From ebceca338c902ffaa650f931a356c28a0659ebb1 Mon Sep 17 00:00:00 2001 From: skal Date: Tue, 10 Feb 2026 18:46:45 +0100 Subject: refactor: Optimize CNN normalization to eliminate redundant conversions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Normalize textures once in fs_main instead of in every conv function. Keep all intermediate layers in [-1,1] range, denormalize only for final display. Changes: - train_cnn.py: Generator normalizes input once, keeps [-1,1] between layers - cnn_conv*.wgsl: Remove texture normalization (already [-1,1]) - cnn_layer.wgsl: Regenerated with new normalization flow - CNN_EFFECT.md: Updated documentation Eliminates redundant [0,1]↔[-1,1] conversions, reducing shader complexity. handoff(Claude): CNN normalization optimized, all tests passing (35/36). --- workspaces/main/shaders/cnn/cnn_conv5x5.wgsl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'workspaces/main/shaders/cnn/cnn_conv5x5.wgsl') diff --git a/workspaces/main/shaders/cnn/cnn_conv5x5.wgsl b/workspaces/main/shaders/cnn/cnn_conv5x5.wgsl index 15eaf96..bfb4ebb 100644 --- a/workspaces/main/shaders/cnn/cnn_conv5x5.wgsl +++ b/workspaces/main/shaders/cnn/cnn_conv5x5.wgsl @@ -53,6 +53,8 @@ fn cnn_conv5x5_with_coord( } // 5×5 variant for 7→4 channels (RGBD output) +// Assumes 'tex' and 'original' are already normalized to [-1,1] +// UV coordinates remain in [0,1] and are normalized internally // weights: array, 100> (25 positions × 4 channels, each with 7 weights + bias) fn cnn_conv5x5_7to4( tex: texture_2d, @@ -64,8 +66,7 @@ fn cnn_conv5x5_7to4( ) -> vec4 { let step = 1.0 / resolution; - let gray_01 = 0.2126*original.r + 0.7152*original.g + 0.0722*original.b; - let gray = (gray_01 - 0.5) * 2.0; + let gray = 0.2126*original.r + 0.7152*original.g + 0.0722*original.b; let uv_norm = (uv - 0.5) * 2.0; var sum = vec4(0.0); @@ -74,8 +75,7 @@ fn cnn_conv5x5_7to4( for (var dy = -2; dy <= 2; dy++) { for (var dx = -2; dx <= 2; dx++) { let offset = vec2(f32(dx), f32(dy)) * step; - let rgbd_01 = textureSample(tex, samp, uv + offset); - let rgbd = (rgbd_01 - 0.5) * 2.0; + let rgbd = textureSample(tex, samp, uv + offset); // Already in [-1,1] let inputs = array( rgbd.r, rgbd.g, rgbd.b, rgbd.a, @@ -98,6 +98,8 @@ fn cnn_conv5x5_7to4( } // 5×5 variant for 7→1 channel (scalar output) +// Assumes 'tex' and 'original' are already normalized to [-1,1] +// UV coordinates remain in [0,1] and are normalized internally // weights: array, 25> (25 positions, each with 7 weights + bias) fn cnn_conv5x5_7to1( tex: texture_2d, @@ -109,8 +111,7 @@ fn cnn_conv5x5_7to1( ) -> f32 { let step = 1.0 / resolution; - let gray_01 = 0.2126*original.r + 0.7152*original.g + 0.0722*original.b; - let gray = (gray_01 - 0.5) * 2.0; + let gray = 0.2126*original.r + 0.7152*original.g + 0.0722*original.b; let uv_norm = (uv - 0.5) * 2.0; var sum = 0.0; @@ -119,8 +120,7 @@ fn cnn_conv5x5_7to1( for (var dy = -2; dy <= 2; dy++) { for (var dx = -2; dx <= 2; dx++) { let offset = vec2(f32(dx), f32(dy)) * step; - let rgbd_01 = textureSample(tex, samp, uv + offset); - let rgbd = (rgbd_01 - 0.5) * 2.0; + let rgbd = textureSample(tex, samp, uv + offset); // Already in [-1,1] sum += weights[pos][0] * rgbd.r; sum += weights[pos][1] * rgbd.g; -- cgit v1.2.3