summaryrefslogtreecommitdiff
path: root/training/export_cnn_v2_shader.py
diff options
context:
space:
mode:
authorskal <pascal.massimino@gmail.com>2026-02-13 16:12:24 +0100
committerskal <pascal.massimino@gmail.com>2026-02-13 16:12:24 +0100
commitb04816a400703ac6c364efb70ae84930d79ccb12 (patch)
tree257acfe047ee79c6037db0dd983b91396139d5a4 /training/export_cnn_v2_shader.py
parentb5e8abad0490e47b52d300d2d0c48425c3fac4f3 (diff)
CNN v2: Fix activation function mismatch between training and inference
Layer 0 now uses clamp [0,1] in both training and inference (was using ReLU in shaders). - index.html: Add is_layer_0 flag to LayerParams, handle Layer 0 separately - export_cnn_v2_shader.py: Generate correct activation for Layer 0 Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
Diffstat (limited to 'training/export_cnn_v2_shader.py')
-rwxr-xr-xtraining/export_cnn_v2_shader.py7
1 files changed, 5 insertions, 2 deletions
diff --git a/training/export_cnn_v2_shader.py b/training/export_cnn_v2_shader.py
index ad5749c..dc475d8 100755
--- a/training/export_cnn_v2_shader.py
+++ b/training/export_cnn_v2_shader.py
@@ -35,9 +35,12 @@ def export_layer_shader(layer_idx, weights, kernel_size, output_dir, is_output_l
)
radius = kernel_size // 2
- activation = "" if is_output_layer else "output[c] = max(0.0, sum); // ReLU"
if is_output_layer:
- activation = "output[c] = clamp(sum, 0.0, 1.0); // Sigmoid approximation"
+ activation = "output[c] = clamp(sum, 0.0, 1.0); // Output layer"
+ elif layer_idx == 0:
+ activation = "output[c] = clamp(sum, 0.0, 1.0); // Layer 0: clamp [0,1]"
+ else:
+ activation = "output[c] = max(0.0, sum); // Middle layers: ReLU"
shader_code = f"""// CNN v2 Layer {layer_idx} - Auto-generated (uniform 12D→4D)
// Kernel: {kernel_size}×{kernel_size}, In: 12D (4 prev + 8 static), Out: 4D