From b04816a400703ac6c364efb70ae84930d79ccb12 Mon Sep 17 00:00:00 2001 From: skal Date: Fri, 13 Feb 2026 16:12:24 +0100 Subject: CNN v2: Fix activation function mismatch between training and inference Layer 0 now uses clamp [0,1] in both training and inference (was using ReLU in shaders). - index.html: Add is_layer_0 flag to LayerParams, handle Layer 0 separately - export_cnn_v2_shader.py: Generate correct activation for Layer 0 Co-Authored-By: Claude Sonnet 4.5 --- training/export_cnn_v2_shader.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'training/export_cnn_v2_shader.py') diff --git a/training/export_cnn_v2_shader.py b/training/export_cnn_v2_shader.py index ad5749c..dc475d8 100755 --- a/training/export_cnn_v2_shader.py +++ b/training/export_cnn_v2_shader.py @@ -35,9 +35,12 @@ def export_layer_shader(layer_idx, weights, kernel_size, output_dir, is_output_l ) radius = kernel_size // 2 - activation = "" if is_output_layer else "output[c] = max(0.0, sum); // ReLU" if is_output_layer: - activation = "output[c] = clamp(sum, 0.0, 1.0); // Sigmoid approximation" + activation = "output[c] = clamp(sum, 0.0, 1.0); // Output layer" + elif layer_idx == 0: + activation = "output[c] = clamp(sum, 0.0, 1.0); // Layer 0: clamp [0,1]" + else: + activation = "output[c] = max(0.0, sum); // Middle layers: ReLU" shader_code = f"""// CNN v2 Layer {layer_idx} - Auto-generated (uniform 12D→4D) // Kernel: {kernel_size}×{kernel_size}, In: 12D (4 prev + 8 static), Out: 4D -- cgit v1.2.3