// CNN layer shader - uses modular convolution snippets // Supports multi-pass rendering with residual connections // DO NOT EDIT - Generated by train_cnn.py @group(0) @binding(0) var smplr: sampler; @group(0) @binding(1) var txt: texture_2d; #include "common_uniforms" #include "cnn_activation" #include "cnn_conv3x3" #include "cnn_conv5x5" #include "cnn_weights_generated" struct CNNLayerParams { layer_index: i32, blend_amount: f32, _pad: vec2, }; @group(0) @binding(2) var uniforms: CommonUniforms; @group(0) @binding(3) var params: CNNLayerParams; @group(0) @binding(4) var original_input: texture_2d; @vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4 { var pos = array, 3>( vec2(-1.0, -1.0), vec2(3.0, -1.0), vec2(-1.0, 3.0) ); return vec4(pos[i], 0.0, 1.0); } @fragment fn fs_main(@builtin(position) p: vec4) -> @location(0) vec4 { let uv = p.xy / uniforms.resolution; let original = (textureSample(original_input, smplr, uv) - 0.5) * 2.0; // Normalize to [-1,1] var result = vec4(0.0); // Layer 0: 7→4 (RGBD output) if (params.layer_index == 0) { result = cnn_conv3x3_7to4_src(txt, smplr, uv, uniforms.resolution, weights_layer0); result = cnn_tanh(result); // Keep in [-1,1] } else if (params.layer_index == 1) { result = cnn_conv5x5_7to4(txt, smplr, uv, uniforms.resolution, original, weights_layer1); result = cnn_tanh(result); // Keep in [-1,1] } else if (params.layer_index == 2) { // last layer let gray_out = cnn_conv3x3_7to1(txt, smplr, uv, uniforms.resolution, original, weights_layer2); // At this point here, 'gray_out' is what the training script should have learned. // Below is some extra code for visual output, excluded from training: result = vec4(gray_out, gray_out, gray_out, 1.0); // Keep in [-1,1] let blended = mix(original, result, params.blend_amount); return (blended + 1.0) * 0.5; // Denormalize to [0,1] for display } return result; }