// CNN layer shader - uses modular convolution snippets // Supports multi-pass rendering with residual connections // DO NOT EDIT - Generated by train_cnn.py @group(0) @binding(0) var smplr: sampler; @group(0) @binding(1) var txt: texture_2d; #include "common_uniforms" #include "cnn_activation" #include "cnn_conv3x3" #include "cnn_conv5x5" #include "cnn_weights_generated" struct CNNLayerParams { layer_index: i32, blend_amount: f32, _pad: vec2, }; @group(0) @binding(2) var uniforms: CommonUniforms; @group(0) @binding(3) var params: CNNLayerParams; @group(0) @binding(4) var original_input: texture_2d; @vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4 { var pos = array, 3>( vec2(-1.0, -1.0), vec2(3.0, -1.0), vec2(-1.0, 3.0) ); return vec4(pos[i], 0.0, 1.0); } @fragment fn fs_main(@builtin(position) p: vec4) -> @location(0) vec4 { let uv = p.xy / uniforms.resolution; let original_raw = textureSample(original_input, smplr, uv); let original = (original_raw - 0.5) * 2.0; // Normalize to [-1,1] let gray = dot(original.rgb, vec3(0.2126, 0.7152, 0.0722)); var result = vec4(0.0); // Layer 0: 7→4 (RGBD output, normalizes [0,1] input) if (params.layer_index == 0) { result = cnn_conv3x3_7to4_src(txt, smplr, uv, uniforms.resolution, weights_layer0); result = cnn_tanh(result); } else if (params.layer_index == 1) { result = cnn_conv5x5_7to4(txt, smplr, uv, uniforms.resolution, gray, weights_layer1); result = cnn_tanh(result); // Keep in [-1,1] } else if (params.layer_index == 2) { let gray_out = cnn_conv3x3_7to1(txt, smplr, uv, uniforms.resolution, gray, weights_layer2); // gray_out in [0,1] (clamped to match PyTorch training) result = vec4(gray_out, gray_out, gray_out, 1.0); return mix(original_raw, result, params.blend_amount); // [0,1] } return result; // [-1,1] }