From 6944733a6a2f05c18e7e0b73f847a4c9144801fd Mon Sep 17 00:00:00 2001 From: skal Date: Tue, 10 Feb 2026 12:48:43 +0100 Subject: feat: Add multi-layer CNN support with framebuffer capture and blend control Implements automatic layer chaining and generic framebuffer capture API for multi-layer neural network effects with proper original input preservation. Key changes: - Effect::needs_framebuffer_capture() - generic API for pre-render capture - MainSequence: auto-capture to "captured_frame" auxiliary texture - CNNEffect: multi-layer support via layer_index/total_layers params - seq_compiler: expands "layers=N" to N chained effect instances - Shader: @binding(4) original_input available to all layers - Training: generates layer switches and original input binding - Blend: mix(original, result, blend_amount) uses layer 0 input Timeline syntax: CNNEffect layers=3 blend=0.7 Co-Authored-By: Claude Sonnet 4.5 --- workspaces/main/shaders/cnn/cnn_layer.wgsl | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) (limited to 'workspaces/main/shaders/cnn/cnn_layer.wgsl') diff --git a/workspaces/main/shaders/cnn/cnn_layer.wgsl b/workspaces/main/shaders/cnn/cnn_layer.wgsl index b2bab26..2285ef9 100644 --- a/workspaces/main/shaders/cnn/cnn_layer.wgsl +++ b/workspaces/main/shaders/cnn/cnn_layer.wgsl @@ -1,5 +1,6 @@ // CNN layer shader - uses modular convolution snippets // Supports multi-pass rendering with residual connections +// DO NOT EDIT - Generated by train_cnn.py @group(0) @binding(0) var smplr: sampler; @group(0) @binding(1) var txt: texture_2d; @@ -11,12 +12,13 @@ struct CNNLayerParams { layer_index: i32, - use_residual: i32, + blend_amount: f32, _pad: vec2, }; @group(0) @binding(2) var uniforms: CommonUniforms; @group(0) @binding(3) var params: CNNLayerParams; +@group(0) @binding(4) var original_input: texture_2d; @vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4 { var pos = array, 3>( @@ -27,6 +29,8 @@ struct CNNLayerParams { @fragment fn fs_main(@builtin(position) p: vec4) -> @location(0) vec4 { let uv = p.xy / uniforms.resolution; + let input = textureSample(txt, smplr, uv); + let original = textureSample(original_input, smplr, uv); var result = vec4(0.0); // Layer 0 uses coordinate-aware convolution @@ -35,12 +39,19 @@ struct CNNLayerParams { rgba_weights_layer0, coord_weights_layer0, bias_layer0); result = cnn_tanh(result); } - - // Residual connection - if (params.use_residual != 0) { - let input = textureSample(txt, smplr, uv); - result = input + result * 0.3; + else if (params.layer_index == 1) { + result = cnn_conv3x3(txt, smplr, uv, uniforms.resolution, + weights_layer1, bias_layer1); + result = cnn_tanh(result); + } + else if (params.layer_index == 2) { + result = cnn_conv3x3(txt, smplr, uv, uniforms.resolution, + weights_layer2, bias_layer2); + } + else { + result = input; } - return result; + // Blend with ORIGINAL input from layer 0 + return mix(original, result, params.blend_amount); } -- cgit v1.2.3