diff options
Diffstat (limited to 'doc/CNN_V2.md')
| -rw-r--r-- | doc/CNN_V2.md | 14 |
1 files changed, 12 insertions, 2 deletions
diff --git a/doc/CNN_V2.md b/doc/CNN_V2.md index 78854ce..c827187 100644 --- a/doc/CNN_V2.md +++ b/doc/CNN_V2.md @@ -326,12 +326,13 @@ class CNNv2(nn.Module): kernel_sizes = [3, 3, 3] # Per-layer kernel sizes (e.g., [1,3,5]) num_layers = 3 # Number of CNN layers mip_level = 0 # Mip level for p0-p3: 0=orig, 1=half, 2=quarter, 3=eighth +grayscale_loss = False # Compute loss on grayscale (Y) instead of RGBA learning_rate = 1e-3 batch_size = 16 epochs = 5000 # Dataset: Input RGB, Target RGBA (preserves alpha channel from image) -# Model outputs RGBA, loss compares all 4 channels +# Model outputs RGBA, loss compares all 4 channels (or grayscale if --grayscale-loss) # Training loop (standard PyTorch f32) for epoch in range(epochs): @@ -344,7 +345,15 @@ for epoch in range(epochs): # Forward pass output = model(input_rgbd, static_feat) - loss = criterion(output, target_batch) + + # Loss computation (grayscale or RGBA) + if grayscale_loss: + # Convert RGBA to grayscale: Y = 0.299*R + 0.587*G + 0.114*B + output_gray = 0.299 * output[:, 0:1] + 0.587 * output[:, 1:2] + 0.114 * output[:, 2:3] + target_gray = 0.299 * target[:, 0:1] + 0.587 * target[:, 1:2] + 0.114 * target[:, 2:3] + loss = criterion(output_gray, target_gray) + else: + loss = criterion(output, target_batch) # Backward pass optimizer.zero_grad() @@ -361,6 +370,7 @@ torch.save({ 'kernel_sizes': [3, 3, 3], # Per-layer kernel sizes 'num_layers': 3, 'mip_level': 0, # Mip level used for p0-p3 + 'grayscale_loss': False, # Whether grayscale loss was used 'features': ['p0', 'p1', 'p2', 'p3', 'uv.x', 'uv.y', 'sin10_x', 'bias'] }, 'epoch': epoch, |
