// CNN layer shader - uses modular convolution snippets // Supports multi-pass rendering with residual connections // DO NOT EDIT - Generated by train_cnn.py @group(0) @binding(0) var smplr: sampler; @group(0) @binding(1) var txt: texture_2d; #include "common_uniforms" #include "cnn_activation" #include "cnn_conv3x3" #include "cnn_weights_generated" struct CNNLayerParams { layer_index: i32, blend_amount: f32, _pad: vec2, }; @group(0) @binding(2) var uniforms: CommonUniforms; @group(0) @binding(3) var params: CNNLayerParams; @group(0) @binding(4) var original_input: texture_2d; @vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4 { var pos = array, 3>( vec2(-1.0, -1.0), vec2(3.0, -1.0), vec2(-1.0, 3.0) ); return vec4(pos[i], 0.0, 1.0); } @fragment fn fs_main(@builtin(position) p: vec4) -> @location(0) vec4 { let uv = p.xy / uniforms.resolution; let input = textureSample(txt, smplr, uv); let original = textureSample(original_input, smplr, uv); var result = vec4(0.0); // Layer 0 uses coordinate-aware convolution if (params.layer_index == 0) { result = cnn_conv3x3_with_coord(txt, smplr, uv, uniforms.resolution, rgba_weights_layer0, coord_weights_layer0, bias_layer0); result = cnn_tanh(result); } else if (params.layer_index == 1) { result = cnn_conv3x3(txt, smplr, uv, uniforms.resolution, weights_layer1, bias_layer1); result = cnn_tanh(result); } else if (params.layer_index == 2) { result = cnn_conv3x3(txt, smplr, uv, uniforms.resolution, weights_layer2, bias_layer2); } else { result = input; } return mix(original, result, params.blend_amount); }