summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--LOG.txt43
-rw-r--r--PROJECT_CONTEXT.md4
-rw-r--r--TODO.md22
-rw-r--r--checkpoints/checkpoint_epoch_10.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_100.pthbin0 -> 24361 bytes
-rw-r--r--checkpoints/checkpoint_epoch_15.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_20.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_25.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_30.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_35.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_40.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_45.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_5.pthbin0 -> 24325 bytes
-rw-r--r--checkpoints/checkpoint_epoch_50.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_55.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_60.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_65.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_70.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_75.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_80.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_85.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_90.pthbin0 -> 24343 bytes
-rw-r--r--checkpoints/checkpoint_epoch_95.pthbin0 -> 24343 bytes
-rw-r--r--cmake/DemoSourceLists.cmake4
-rw-r--r--doc/CNN_V2.md674
-rw-r--r--doc/HOWTO.md58
-rwxr-xr-xscripts/train_cnn_v2_full.sh197
-rwxr-xr-xscripts/validate_cnn_v2.sh60
-rw-r--r--src/gpu/demo_effects.h1
-rw-r--r--src/gpu/effects/cnn_v2_effect.cc519
-rw-r--r--src/gpu/effects/cnn_v2_effect.h64
-rw-r--r--src/tests/gpu/test_demo_effects.cc1
-rw-r--r--tools/timeline_editor/README.md31
-rw-r--r--tools/timeline_editor/index.html377
-rwxr-xr-xtraining/export_cnn_v2_shader.py225
-rwxr-xr-xtraining/export_cnn_v2_weights.py275
-rwxr-xr-xtraining/train_cnn_v2.py383
-rw-r--r--validation_results/epoch_100_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_10_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_15_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_20_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_25_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_30_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_35_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_40_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_45_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_50_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_55_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_5_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_60_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_65_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_70_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_75_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_80_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_85_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_90_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/epoch_95_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/img_000_output.pngbin0 -> 198761 bytes
-rw-r--r--validation_results/img_001_output.pngbin0 -> 83763 bytes
-rw-r--r--validation_results/img_002_output.pngbin0 -> 103619 bytes
-rw-r--r--validation_results/img_003_output.pngbin0 -> 90164 bytes
-rw-r--r--validation_results/img_004_output.pngbin0 -> 145202 bytes
-rw-r--r--validation_results/img_005_output.pngbin0 -> 40745 bytes
-rw-r--r--validation_results/img_006_output.pngbin0 -> 290518 bytes
-rw-r--r--validation_results/img_007_output.pngbin0 -> 216602 bytes
-rw-r--r--workspaces/main/assets.txt3
-rw-r--r--workspaces/main/shaders/cnn_v2_compute.wgsl137
-rw-r--r--workspaces/main/shaders/cnn_v2_layer_0.wgsl174
-rw-r--r--workspaces/main/shaders/cnn_v2_layer_1.wgsl174
-rw-r--r--workspaces/main/shaders/cnn_v2_layer_2.wgsl156
-rw-r--r--workspaces/main/shaders/cnn_v2_layer_template.wgsl68
-rw-r--r--workspaces/main/shaders/cnn_v2_static.wgsl47
-rw-r--r--workspaces/main/timeline.seq167
-rw-r--r--workspaces/test/timeline.seq.backup8
75 files changed, 3667 insertions, 206 deletions
diff --git a/.gitignore b/.gitignore
index 7a9ab42..4c783d7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -68,3 +68,4 @@ timeline.txt
timeline.html
Testing/
training/checkpoints/
+validation_results/
diff --git a/LOG.txt b/LOG.txt
new file mode 100644
index 0000000..50b77ea
--- /dev/null
+++ b/LOG.txt
@@ -0,0 +1,43 @@
+=== CNN v2 Complete Training Pipeline ===
+Input: training/input
+Target: training/target_2
+Epochs: 10000
+Checkpoint interval: 500
+
+[1/4] Training CNN v2 model...
+Training on cpu
+Loaded 8 image pairs
+Model: [16, 8, 4] channels, [1, 3, 5] kernels, 3456 weights
+
+Training for 10000 epochs...
+Traceback (most recent call last):
+ File "/Users/skal/demo/training/train_cnn_v2.py", line 217, in <module>
+ main()
+ File "/Users/skal/demo/training/train_cnn_v2.py", line 213, in main
+ train(args)
+ File "/Users/skal/demo/training/train_cnn_v2.py", line 157, in train
+ for static_feat, target in dataloader:
+ File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/dataloader.py", line 741, in __next__
+ data = self._next_data()
+ ^^^^^^^^^^^^^^^^^
+ File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/dataloader.py", line 801, in _next_data
+ data = self._dataset_fetcher.fetch(index) # may raise StopIteration
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/_utils/fetch.py", line 57, in fetch
+ return self.collate_fn(data)
+ ^^^^^^^^^^^^^^^^^^^^^
+ File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/_utils/collate.py", line 401, in default_collate
+ return collate(batch, collate_fn_map=default_collate_fn_map)
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/_utils/collate.py", line 214, in collate
+ return [
+ ^
+ File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/_utils/collate.py", line 215, in <listcomp>
+ collate(samples, collate_fn_map=collate_fn_map)
+ File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/_utils/collate.py", line 155, in collate
+ return collate_fn_map[elem_type](batch, collate_fn_map=collate_fn_map)
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/_utils/collate.py", line 275, in collate_tensor_fn
+ return torch.stack(batch, 0, out=out)
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+RuntimeError: stack expects each tensor to be equal size, but got [8, 376, 626] at entry 0 and [8, 344, 361] at entry 1
diff --git a/PROJECT_CONTEXT.md b/PROJECT_CONTEXT.md
index fb6f931..83bfef6 100644
--- a/PROJECT_CONTEXT.md
+++ b/PROJECT_CONTEXT.md
@@ -36,7 +36,7 @@
- **Audio:** Sample-accurate sync. Zero heap allocations per frame. Variable tempo. Comprehensive tests.
- **Shaders:** Parameterized effects (UniformHelper, .seq syntax). Beat-synchronized animation support (`beat_time`, `beat_phase`). Modular WGSL composition.
- **3D:** Hybrid SDF/rasterization with BVH. Binary scene loader. Blender pipeline.
-- **Effects:** CNN post-processing foundation (3-layer architecture, modular snippets). CNNEffect validated in demo.
+- **Effects:** CNN post-processing: CNNEffect (v1) and CNNv2Effect operational. CNN v2: storage buffer weights (~3.2 KB), 7D static features, dynamic layers. Validated and loading correctly. TODO: 8-bit quantization.
- **Tools:** CNN test tool (readback works, output incorrect - under investigation). Texture readback utility functional. Timeline editor (web-based, beat-aligned, audio playback).
- **Build:** Asset dependency tracking. Size measurement. Hot-reload (debug-only).
- **Testing:** **36/36 passing (100%)**
@@ -57,7 +57,7 @@ See `TODO.md` for current priorities and active tasks.
- `doc/CONTRIBUTING.md` - Development protocols
**Technical Reference:**
-- Core: `ASSET_SYSTEM.md`, `SEQUENCE.md`, `TRACKER.md`, `3D.md`, `CNN_EFFECT.md`
+- Core: `ASSET_SYSTEM.md`, `SEQUENCE.md`, `TRACKER.md`, `3D.md`, `CNN_EFFECT.md`, `CNN_V2.md`
- Formats: `SCENE_FORMAT.md`, `MASKING_SYSTEM.md`
- Tools: `BUILD.md`, `WORKSPACE_SYSTEM.md`, `SIZE_MEASUREMENT.md`, `CNN_TEST_TOOL.md`, `tools/timeline_editor/README.md`
diff --git a/TODO.md b/TODO.md
index d7d24bc..3734ec6 100644
--- a/TODO.md
+++ b/TODO.md
@@ -24,6 +24,28 @@ Self-contained workspaces for parallel demo development.
---
+## Priority 2: CNN v2 - Parametric Static Features (Task #85) [COMPLETE]
+
+Enhanced CNN post-processing with multi-dimensional feature inputs.
+
+**Design:** `doc/CNN_V2.md`
+
+**Status:**
+- ✅ Full implementation complete and validated
+- ✅ Binary weight loading fixed (FATAL_CHECK inversion bug)
+- ✅ Training pipeline: 100 epochs, 3×3 kernels, patch-based
+- ✅ All tests passing (36/36)
+
+**Specs:**
+- 7D static features (RGBD + UV + sin + bias)
+- Storage buffer weights (~3.2 KB, 8→4→4 channels)
+- Dynamic layer count, per-layer params
+- <10 KB target achieved
+
+**TODO:** 8-bit quantization (2× reduction, needs QAT).
+
+---
+
## Priority 3: 3D System Enhancements (Task #18)
Pipeline for importing complex 3D scenes to replace hardcoded geometry.
diff --git a/checkpoints/checkpoint_epoch_10.pth b/checkpoints/checkpoint_epoch_10.pth
new file mode 100644
index 0000000..710315a
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_10.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_100.pth b/checkpoints/checkpoint_epoch_100.pth
new file mode 100644
index 0000000..55d4f07
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_100.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_15.pth b/checkpoints/checkpoint_epoch_15.pth
new file mode 100644
index 0000000..e7e78d4
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_15.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_20.pth b/checkpoints/checkpoint_epoch_20.pth
new file mode 100644
index 0000000..4d4dc10
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_20.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_25.pth b/checkpoints/checkpoint_epoch_25.pth
new file mode 100644
index 0000000..60da2f2
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_25.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_30.pth b/checkpoints/checkpoint_epoch_30.pth
new file mode 100644
index 0000000..2b0a340
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_30.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_35.pth b/checkpoints/checkpoint_epoch_35.pth
new file mode 100644
index 0000000..839e368
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_35.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_40.pth b/checkpoints/checkpoint_epoch_40.pth
new file mode 100644
index 0000000..b299337
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_40.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_45.pth b/checkpoints/checkpoint_epoch_45.pth
new file mode 100644
index 0000000..f629261
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_45.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_5.pth b/checkpoints/checkpoint_epoch_5.pth
new file mode 100644
index 0000000..bca35d9
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_5.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_50.pth b/checkpoints/checkpoint_epoch_50.pth
new file mode 100644
index 0000000..03795aa
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_50.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_55.pth b/checkpoints/checkpoint_epoch_55.pth
new file mode 100644
index 0000000..0a6c7b6
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_55.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_60.pth b/checkpoints/checkpoint_epoch_60.pth
new file mode 100644
index 0000000..7e40bbf
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_60.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_65.pth b/checkpoints/checkpoint_epoch_65.pth
new file mode 100644
index 0000000..047d1d8
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_65.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_70.pth b/checkpoints/checkpoint_epoch_70.pth
new file mode 100644
index 0000000..6e4616e
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_70.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_75.pth b/checkpoints/checkpoint_epoch_75.pth
new file mode 100644
index 0000000..48a699a
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_75.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_80.pth b/checkpoints/checkpoint_epoch_80.pth
new file mode 100644
index 0000000..cfa0569
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_80.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_85.pth b/checkpoints/checkpoint_epoch_85.pth
new file mode 100644
index 0000000..57f8ae6
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_85.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_90.pth b/checkpoints/checkpoint_epoch_90.pth
new file mode 100644
index 0000000..942ce10
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_90.pth
Binary files differ
diff --git a/checkpoints/checkpoint_epoch_95.pth b/checkpoints/checkpoint_epoch_95.pth
new file mode 100644
index 0000000..ea1dffb
--- /dev/null
+++ b/checkpoints/checkpoint_epoch_95.pth
Binary files differ
diff --git a/cmake/DemoSourceLists.cmake b/cmake/DemoSourceLists.cmake
index fc6b02d..017ecac 100644
--- a/cmake/DemoSourceLists.cmake
+++ b/cmake/DemoSourceLists.cmake
@@ -29,11 +29,11 @@ set(UTIL_SOURCES src/util/asset_manager.cc src/util/file_watcher.cc)
# GPU sources (conditional: HEADLESS / STRIP_EXTERNAL / NORMAL)
demo_set_conditional_sources(GPU_SOURCES
# Headless mode: Functional stubs (timeline/audio work)
- "src/gpu/headless_gpu.cc;src/gpu/demo_effects.cc;src/gpu/effect.cc;src/gpu/effects/heptagon_effect.cc;src/gpu/effects/particles_effect.cc;src/gpu/effects/passthrough_effect.cc;src/gpu/effects/moving_ellipse_effect.cc;src/gpu/effects/particle_spray_effect.cc;src/gpu/effects/gaussian_blur_effect.cc;src/gpu/effects/solarize_effect.cc;src/gpu/effects/scene1_effect.cc;src/gpu/effects/chroma_aberration_effect.cc;src/gpu/effects/vignette_effect.cc;src/gpu/effects/cnn_effect.cc;src/gpu/effects/post_process_helper.cc;src/gpu/effects/shaders.cc;src/gpu/effects/hybrid_3d_effect.cc;src/gpu/effects/flash_cube_effect.cc;src/gpu/effects/theme_modulation_effect.cc;src/gpu/effects/fade_effect.cc;src/gpu/effects/flash_effect.cc;src/gpu/effects/shader_composer.cc;src/gpu/effects/circle_mask_effect.cc;src/gpu/effects/rotating_cube_effect.cc;src/gpu/texture_manager.cc;src/gpu/texture_readback.cc"
+ "src/gpu/headless_gpu.cc;src/gpu/demo_effects.cc;src/gpu/effect.cc;src/gpu/effects/heptagon_effect.cc;src/gpu/effects/particles_effect.cc;src/gpu/effects/passthrough_effect.cc;src/gpu/effects/moving_ellipse_effect.cc;src/gpu/effects/particle_spray_effect.cc;src/gpu/effects/gaussian_blur_effect.cc;src/gpu/effects/solarize_effect.cc;src/gpu/effects/scene1_effect.cc;src/gpu/effects/chroma_aberration_effect.cc;src/gpu/effects/vignette_effect.cc;src/gpu/effects/cnn_effect.cc;src/gpu/effects/cnn_v2_effect.cc;src/gpu/effects/post_process_helper.cc;src/gpu/effects/shaders.cc;src/gpu/effects/hybrid_3d_effect.cc;src/gpu/effects/flash_cube_effect.cc;src/gpu/effects/theme_modulation_effect.cc;src/gpu/effects/fade_effect.cc;src/gpu/effects/flash_effect.cc;src/gpu/effects/shader_composer.cc;src/gpu/effects/circle_mask_effect.cc;src/gpu/effects/rotating_cube_effect.cc;src/gpu/texture_manager.cc;src/gpu/texture_readback.cc"
# Strip mode: Minimal GPU stubs only
"src/gpu/stub_gpu.cc"
# Normal mode: Full GPU implementation
- "src/gpu/gpu.cc;src/gpu/effect.cc;src/gpu/effects/heptagon_effect.cc;src/gpu/effects/particles_effect.cc;src/gpu/effects/passthrough_effect.cc;src/gpu/effects/moving_ellipse_effect.cc;src/gpu/effects/particle_spray_effect.cc;src/gpu/effects/gaussian_blur_effect.cc;src/gpu/effects/solarize_effect.cc;src/gpu/effects/scene1_effect.cc;src/gpu/effects/chroma_aberration_effect.cc;src/gpu/effects/vignette_effect.cc;src/gpu/effects/cnn_effect.cc;src/gpu/effects/post_process_helper.cc;src/gpu/effects/shaders.cc;src/gpu/effects/hybrid_3d_effect.cc;src/gpu/effects/flash_cube_effect.cc;src/gpu/effects/theme_modulation_effect.cc;src/gpu/effects/fade_effect.cc;src/gpu/effects/flash_effect.cc;src/gpu/effects/shader_composer.cc;src/gpu/effects/circle_mask_effect.cc;src/gpu/effects/rotating_cube_effect.cc;src/gpu/texture_manager.cc;src/gpu/texture_readback.cc"
+ "src/gpu/gpu.cc;src/gpu/effect.cc;src/gpu/effects/heptagon_effect.cc;src/gpu/effects/particles_effect.cc;src/gpu/effects/passthrough_effect.cc;src/gpu/effects/moving_ellipse_effect.cc;src/gpu/effects/particle_spray_effect.cc;src/gpu/effects/gaussian_blur_effect.cc;src/gpu/effects/solarize_effect.cc;src/gpu/effects/scene1_effect.cc;src/gpu/effects/chroma_aberration_effect.cc;src/gpu/effects/vignette_effect.cc;src/gpu/effects/cnn_effect.cc;src/gpu/effects/cnn_v2_effect.cc;src/gpu/effects/post_process_helper.cc;src/gpu/effects/shaders.cc;src/gpu/effects/hybrid_3d_effect.cc;src/gpu/effects/flash_cube_effect.cc;src/gpu/effects/theme_modulation_effect.cc;src/gpu/effects/fade_effect.cc;src/gpu/effects/flash_effect.cc;src/gpu/effects/shader_composer.cc;src/gpu/effects/circle_mask_effect.cc;src/gpu/effects/rotating_cube_effect.cc;src/gpu/texture_manager.cc;src/gpu/texture_readback.cc"
)
# 3D sources (conditional: HEADLESS / STRIP_EXTERNAL / NORMAL)
diff --git a/doc/CNN_V2.md b/doc/CNN_V2.md
new file mode 100644
index 0000000..9407934
--- /dev/null
+++ b/doc/CNN_V2.md
@@ -0,0 +1,674 @@
+# CNN v2: Parametric Static Features
+
+**Technical Design Document**
+
+---
+
+## Overview
+
+CNN v2 extends the original CNN post-processing effect with parametric static features, enabling richer spatial and frequency-domain inputs for improved visual quality.
+
+**Key improvements over v1:**
+- 7D static feature input (vs 4D RGB)
+- Multi-frequency position encoding (NeRF-style)
+- Per-layer configurable kernel sizes (1×1, 3×3, 5×5)
+- Variable channel counts per layer
+- Float16 weight storage (~3.2 KB for 3-layer model)
+- Bias integrated as static feature dimension
+- Storage buffer architecture (dynamic layer count)
+- Binary weight format for runtime loading
+
+**Status:** ✅ Complete. Training pipeline functional, validation tools ready.
+**TODO:** 8-bit quantization with QAT for 2× size reduction (~1.6 KB)
+
+---
+
+## Architecture
+
+### Pipeline Overview
+
+```
+Input RGBD → Static Features Compute → CNN Layers → Output RGBA
+ └─ computed once/frame ─┘ └─ multi-pass ─┘
+```
+
+**Static Features Texture:**
+- Name: `static_features`
+- Format: `texture_storage_2d<rgba32uint, write>` (4×u32)
+- Data: 8 float16 values packed via `pack2x16float()`
+- Computed once per frame, read by all CNN layers
+- Lifetime: Entire frame (all CNN layer passes)
+
+**CNN Layers:**
+- Input Layer: 7D static features → C₀ channels
+- Inner Layers: (7D + Cᵢ₋₁) → Cᵢ channels
+- Output Layer: (7D + Cₙ) → 4D RGBA
+- Storage: `texture_storage_2d<rgba32uint>` (8×f16 per texel recommended)
+
+---
+
+## Static Features (7D + 1 bias)
+
+### Feature Layout
+
+**8 float16 values per pixel:**
+
+```wgsl
+// Slot 0-3: RGBD (core pixel data)
+let r = rgba.r; // Red channel
+let g = rgba.g; // Green channel
+let b = rgba.b; // Blue channel
+let d = depth; // Depth value
+
+// Slot 4-5: UV coordinates (normalized screen space)
+let uv_x = coord.x / resolution.x; // Horizontal position [0,1]
+let uv_y = coord.y / resolution.y; // Vertical position [0,1]
+
+// Slot 6: Multi-frequency position encoding
+let sin10_x = sin(10.0 * uv_x); // Periodic feature (frequency=10)
+
+// Slot 7: Bias dimension (always 1.0)
+let bias = 1.0; // Learned bias per output channel
+
+// Packed storage: [R, G, B, D, uv.x, uv.y, sin(10*uv.x), 1.0]
+```
+
+### Feature Rationale
+
+| Feature | Dimension | Purpose | Priority |
+|---------|-----------|---------|----------|
+| RGBD | 4D | Core pixel information | Essential |
+| UV coords | 2D | Spatial position awareness | Essential |
+| sin(10\*uv.x) | 1D | Periodic position encoding | Medium |
+| Bias | 1D | Learned bias (standard NN) | Essential |
+
+**Why bias as static feature:**
+- Simpler shader code (single weight array)
+- Standard NN formulation: y = Wx (x includes bias term)
+- Saves 56-112 bytes (no separate bias buffer)
+- 7 features sufficient for initial implementation
+
+### Future Feature Extensions
+
+**Option: Replace sin(10\*uv.x) with:**
+- `sin(20*uv.x)` - Higher frequency encoding
+- `gray_mip1` - Multi-scale luminance
+- `dx`, `dy` - Sobel gradients
+- `variance` - Local texture measure
+- `laplacian` - Edge detection
+
+**Option: uint8 packing (16+ features):**
+```wgsl
+// texture_storage_2d<rgba8unorm> stores 16 uint8 values
+// Trade precision for feature count
+// [R, G, B, D, uv.x, uv.y, sin10.x, sin10.y,
+// sin20.x, sin20.y, dx, dy, gray_mip1, gray_mip2, var, bias]
+```
+Requires quantization-aware training.
+
+---
+
+## Layer Structure
+
+### Example 3-Layer Network
+
+```
+Input: 7D static → 16 channels (1×1 kernel, pointwise)
+Layer1: (7+16)D → 8 channels (3×3 kernel, spatial)
+Layer2: (7+8)D → 4 channels (5×5 kernel, large receptive field)
+```
+
+### Weight Calculations
+
+**Per-layer weights:**
+```
+Input: 7 × 1 × 1 × 16 = 112 weights
+Layer1: (7+16) × 3 × 3 × 8 = 1656 weights
+Layer2: (7+8) × 5 × 5 × 4 = 1500 weights
+Total: 3268 weights
+```
+
+**Storage sizes:**
+- f32: 3268 × 4 = 13,072 bytes (~12.8 KB)
+- f16: 3268 × 2 = 6,536 bytes (~6.4 KB) ✓ **recommended**
+
+**Comparison to v1:**
+- v1: ~800 weights (3.2 KB f32)
+- v2: ~3268 weights (6.4 KB f16)
+- **Growth: 2× size for parametric features**
+
+### Kernel Size Guidelines
+
+**1×1 kernel (pointwise):**
+- No spatial context, channel mixing only
+- Weights: `(7 + C_in) × C_out`
+- Use for: Input layer, bottleneck layers
+
+**3×3 kernel (standard conv):**
+- Local spatial context
+- Weights: `(7 + C_in) × 9 × C_out`
+- Use for: Most inner layers
+
+**5×5 kernel (large receptive field):**
+- Wide spatial context
+- Weights: `(7 + C_in) × 25 × C_out`
+- Use for: Output layer, detail enhancement
+
+### Channel Storage (8×f16 per texel)
+
+```wgsl
+@group(0) @binding(1) var layer_input: texture_2d<u32>;
+
+fn unpack_channels(coord: vec2<i32>) -> array<f32, 8> {
+ let packed = textureLoad(layer_input, coord, 0);
+ return array(
+ unpack2x16float(packed.x).x, unpack2x16float(packed.x).y,
+ unpack2x16float(packed.y).x, unpack2x16float(packed.y).y,
+ unpack2x16float(packed.z).x, unpack2x16float(packed.z).y,
+ unpack2x16float(packed.w).x, unpack2x16float(packed.w).y
+ );
+}
+
+fn pack_channels(values: array<f32, 8>) -> vec4<u32> {
+ return vec4(
+ pack2x16float(vec2(values[0], values[1])),
+ pack2x16float(vec2(values[2], values[3])),
+ pack2x16float(vec2(values[4], values[5])),
+ pack2x16float(vec2(values[6], values[7]))
+ );
+}
+```
+
+---
+
+## Training Workflow
+
+### Script: `training/train_cnn_v2.py`
+
+**Static Feature Extraction:**
+
+```python
+def compute_static_features(rgb, depth):
+ """Generate 7D static features + bias dimension."""
+ h, w = rgb.shape[:2]
+
+ # RGBD channels
+ r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
+
+ # UV coordinates (normalized)
+ uv_x = np.linspace(0, 1, w)[None, :].repeat(h, axis=0)
+ uv_y = np.linspace(0, 1, h)[:, None].repeat(w, axis=1)
+
+ # Multi-frequency position encoding
+ sin10_x = np.sin(10.0 * uv_x)
+
+ # Bias dimension (always 1.0)
+ bias = np.ones_like(r)
+
+ # Stack: [R, G, B, D, uv.x, uv.y, sin10_x, bias]
+ return np.stack([r, g, b, depth, uv_x, uv_y, sin10_x, bias], axis=-1)
+```
+
+**Network Definition:**
+
+```python
+class CNNv2(nn.Module):
+ def __init__(self, kernels=[1,3,5], channels=[16,8,4]):
+ super().__init__()
+
+ # Input layer: 8D (7 features + bias) → channels[0]
+ self.layer0 = nn.Conv2d(8, channels[0], kernel_size=kernels[0],
+ padding=kernels[0]//2, bias=False)
+
+ # Inner layers: (7 features + bias + C_prev) → C_next
+ in_ch_1 = 8 + channels[0] # static + layer0 output
+ self.layer1 = nn.Conv2d(in_ch_1, channels[1], kernel_size=kernels[1],
+ padding=kernels[1]//2, bias=False)
+
+ # Output layer: (7 features + bias + C_last) → 4 (RGBA)
+ in_ch_2 = 8 + channels[1]
+ self.layer2 = nn.Conv2d(in_ch_2, 4, kernel_size=kernels[2],
+ padding=kernels[2]//2, bias=False)
+
+ def forward(self, static_features, layer0_input=None):
+ # Layer 0: Use full 8D static features (includes bias)
+ x0 = self.layer0(static_features)
+ x0 = F.relu(x0)
+
+ # Layer 1: Concatenate static + layer0 output
+ x1_input = torch.cat([static_features, x0], dim=1)
+ x1 = self.layer1(x1_input)
+ x1 = F.relu(x1)
+
+ # Layer 2: Concatenate static + layer1 output
+ x2_input = torch.cat([static_features, x1], dim=1)
+ output = self.layer2(x2_input)
+
+ return torch.sigmoid(output) # RGBA output [0,1]
+```
+
+**Training Configuration:**
+
+```python
+# Hyperparameters
+kernels = [1, 3, 5] # Per-layer kernel sizes
+channels = [16, 8, 4] # Per-layer output channels
+learning_rate = 1e-3
+batch_size = 16
+epochs = 5000
+
+# Training loop (standard PyTorch f32)
+for epoch in range(epochs):
+ for rgb_batch, depth_batch, target_batch in dataloader:
+ # Compute static features
+ static_feat = compute_static_features(rgb_batch, depth_batch)
+
+ # Forward pass
+ output = model(static_feat)
+ loss = criterion(output, target_batch)
+
+ # Backward pass
+ optimizer.zero_grad()
+ loss.backward()
+ optimizer.step()
+```
+
+**Checkpoint Format:**
+
+```python
+torch.save({
+ 'state_dict': model.state_dict(), # f32 weights
+ 'config': {
+ 'kernels': [1, 3, 5],
+ 'channels': [16, 8, 4],
+ 'features': ['R', 'G', 'B', 'D', 'uv.x', 'uv.y', 'sin10_x', 'bias']
+ },
+ 'epoch': epoch,
+ 'loss': loss.item()
+}, f'checkpoints/checkpoint_epoch_{epoch}.pth')
+```
+
+---
+
+## Export Workflow
+
+### Script: `training/export_cnn_v2_shader.py`
+
+**Process:**
+1. Load checkpoint (f32 PyTorch weights)
+2. Extract layer configs (kernels, channels)
+3. Quantize weights to float16: `weights_f16 = weights_f32.astype(np.float16)`
+4. Generate WGSL shader per layer
+5. Write to `workspaces/<workspace>/shaders/cnn_v2_*.wgsl`
+
+**Example Generated Shader:**
+
+```wgsl
+// cnn_v2_layer_0.wgsl - Auto-generated from checkpoint_epoch_5000.pth
+
+const KERNEL_SIZE: u32 = 1u;
+const IN_CHANNELS: u32 = 8u; // 7 features + bias
+const OUT_CHANNELS: u32 = 16u;
+
+// Weights quantized to float16 (stored as f32 in shader)
+const weights: array<f32, 128> = array(
+ 0.123047, -0.089844, 0.234375, 0.456055, ...
+);
+
+@group(0) @binding(0) var static_features: texture_2d<u32>;
+@group(0) @binding(1) var output_texture: texture_storage_2d<rgba32uint, write>;
+
+@compute @workgroup_size(8, 8)
+fn main(@builtin(global_invocation_id) id: vec3<u32>) {
+ // Load static features (8D)
+ let static_feat = get_static_features(vec2<i32>(id.xy));
+
+ // Convolution (1×1 kernel = pointwise)
+ var output: array<f32, OUT_CHANNELS>;
+ for (var c: u32 = 0u; c < OUT_CHANNELS; c++) {
+ var sum: f32 = 0.0;
+ for (var k: u32 = 0u; k < IN_CHANNELS; k++) {
+ sum += weights[c * IN_CHANNELS + k] * static_feat[k];
+ }
+ output[c] = max(0.0, sum); // ReLU activation
+ }
+
+ // Pack and store (8×f16 per texel)
+ textureStore(output_texture, vec2<i32>(id.xy), pack_f16x8(output));
+}
+```
+
+**Float16 Quantization:**
+- Training uses f32 throughout (PyTorch standard)
+- Export converts to np.float16, then back to f32 for WGSL literals
+- **Expected discrepancy:** <0.1% MSE (acceptable)
+- Validation via `validate_cnn_v2.sh` compares outputs
+
+---
+
+## Validation Workflow
+
+### Script: `scripts/validate_cnn_v2.sh`
+
+**End-to-end pipeline:**
+```bash
+./scripts/validate_cnn_v2.sh checkpoints/checkpoint_epoch_5000.pth
+```
+
+**Steps automated:**
+1. Export checkpoint → .wgsl shaders
+2. Rebuild `cnn_test` tool
+3. Process test images with CNN v2
+4. Display input/output results
+
+**Usage:**
+```bash
+# Basic usage
+./scripts/validate_cnn_v2.sh checkpoint.pth
+
+# Custom paths
+./scripts/validate_cnn_v2.sh checkpoint.pth \
+ -i my_test_images/ \
+ -o results/ \
+ -b build_release
+
+# Skip rebuild (iterate on checkpoint only)
+./scripts/validate_cnn_v2.sh checkpoint.pth --skip-build
+
+# Skip export (iterate on test images only)
+./scripts/validate_cnn_v2.sh checkpoint.pth --skip-export
+
+# Show help
+./scripts/validate_cnn_v2.sh --help
+```
+
+**Options:**
+- `-b, --build-dir DIR` - Build directory (default: build)
+- `-w, --workspace NAME` - Workspace name (default: main)
+- `-i, --images DIR` - Test images directory (default: training/validation)
+- `-o, --output DIR` - Output directory (default: validation_results)
+- `--skip-build` - Use existing cnn_test binary
+- `--skip-export` - Use existing .wgsl shaders
+- `-h, --help` - Show full usage
+
+**Output:**
+- Input images: `<test_images_dir>/*.png`
+- Output images: `<output_dir>/*_output.png`
+- Opens results directory in system file browser
+
+---
+
+## Implementation Checklist
+
+### Phase 1: Shaders (Core Infrastructure)
+
+- [ ] `workspaces/main/shaders/cnn_v2_static.wgsl` - Static features compute
+ - [ ] RGBD sampling from framebuffer
+ - [ ] UV coordinate calculation
+ - [ ] sin(10\*uv.x) computation
+ - [ ] Bias dimension (constant 1.0)
+ - [ ] Float16 packing via `pack2x16float()`
+ - [ ] Output to `texture_storage_2d<rgba32uint>`
+
+- [ ] `workspaces/main/shaders/cnn_v2_layer_template.wgsl` - Layer template
+ - [ ] Static features unpacking
+ - [ ] Previous layer unpacking (8×f16)
+ - [ ] Convolution implementation (1×1, 3×3, 5×5)
+ - [ ] ReLU activation
+ - [ ] Output packing (8×f16)
+ - [ ] Proper padding handling
+
+### Phase 2: C++ Effect Class
+
+- [ ] `src/gpu/effects/cnn_v2_effect.h` - Header
+ - [ ] Class declaration inheriting from `PostProcessEffect`
+ - [ ] Static features texture member
+ - [ ] Layer textures vector
+ - [ ] Pipeline and bind group members
+
+- [ ] `src/gpu/effects/cnn_v2_effect.cc` - Implementation
+ - [ ] Constructor: Load shaders, create textures
+ - [ ] `init()`: Create pipelines, bind groups
+ - [ ] `render()`: Multi-pass execution
+ - [ ] Pass 0: Compute static features
+ - [ ] Pass 1-N: CNN layers
+ - [ ] Final: Composite to output
+ - [ ] Proper resource cleanup
+
+- [ ] Integration
+ - [ ] Add to `src/gpu/demo_effects.h` includes
+ - [ ] Add `cnn_v2_effect.cc` to `CMakeLists.txt` (headless + normal)
+ - [ ] Add shaders to `workspaces/main/assets.txt`
+ - [ ] Add to `src/tests/gpu/test_demo_effects.cc`
+
+### Phase 3: Training Pipeline
+
+- [ ] `training/train_cnn_v2.py` - Training script
+ - [ ] Static feature extraction function
+ - [ ] CNNv2 PyTorch model class
+ - [ ] Patch-based dataloader
+ - [ ] Training loop with checkpointing
+ - [ ] Command-line argument parsing
+ - [ ] Inference mode (ground truth generation)
+
+- [ ] `training/export_cnn_v2_shader.py` - Export script
+ - [ ] Checkpoint loading
+ - [ ] Weight extraction and f16 quantization
+ - [ ] Per-layer WGSL generation
+ - [ ] File output to workspace shaders/
+ - [ ] Metadata preservation
+
+### Phase 4: Tools & Validation
+
+- [ ] `scripts/validate_cnn_v2.sh` - End-to-end validation
+ - [ ] Command-line argument parsing
+ - [ ] Shader export orchestration
+ - [ ] Build orchestration
+ - [ ] Batch image processing
+ - [ ] Results display
+
+- [ ] `src/tools/cnn_test_main.cc` - Tool updates
+ - [ ] Add `--cnn-version v2` flag
+ - [ ] CNNv2Effect instantiation path
+ - [ ] Static features pass execution
+ - [ ] Multi-layer processing
+
+### Phase 5: Documentation
+
+- [ ] `doc/HOWTO.md` - Usage guide
+ - [ ] Training section (CNN v2)
+ - [ ] Export section
+ - [ ] Validation section
+ - [ ] Examples
+
+- [ ] `README.md` - Project overview update
+ - [ ] Mention CNN v2 capability
+
+---
+
+## File Structure
+
+### New Files
+
+```
+# Shaders (generated by export script)
+workspaces/main/shaders/cnn_v2_static.wgsl # Static features compute
+workspaces/main/shaders/cnn_v2_layer_0.wgsl # Input layer (generated)
+workspaces/main/shaders/cnn_v2_layer_1.wgsl # Inner layer (generated)
+workspaces/main/shaders/cnn_v2_layer_2.wgsl # Output layer (generated)
+
+# C++ implementation
+src/gpu/effects/cnn_v2_effect.h # Effect class header
+src/gpu/effects/cnn_v2_effect.cc # Effect implementation
+
+# Python training/export
+training/train_cnn_v2.py # Training script
+training/export_cnn_v2_shader.py # Shader generator
+training/validation/ # Test images directory
+
+# Scripts
+scripts/validate_cnn_v2.sh # End-to-end validation
+
+# Documentation
+doc/CNN_V2.md # This file
+```
+
+### Modified Files
+
+```
+src/gpu/demo_effects.h # Add CNNv2Effect include
+CMakeLists.txt # Add cnn_v2_effect.cc
+workspaces/main/assets.txt # Add cnn_v2 shaders
+workspaces/main/timeline.seq # Optional: add CNNv2Effect
+src/tests/gpu/test_demo_effects.cc # Add CNNv2 test case
+src/tools/cnn_test_main.cc # Add --cnn-version v2
+doc/HOWTO.md # Add CNN v2 sections
+TODO.md # Add CNN v2 task
+```
+
+### Unchanged (v1 Preserved)
+
+```
+training/train_cnn.py # Original training
+src/gpu/effects/cnn_effect.* # Original effect
+workspaces/main/shaders/cnn_*.wgsl # Original shaders
+```
+
+---
+
+## Performance Characteristics
+
+### Static Features Compute
+- **Cost:** ~0.1ms @ 1080p
+- **Frequency:** Once per frame
+- **Operations:** sin(), texture sampling, packing
+
+### CNN Layers (Example 3-layer)
+- **Layer0 (1×1, 8→16):** ~0.3ms
+- **Layer1 (3×3, 23→8):** ~0.8ms
+- **Layer2 (5×5, 15→4):** ~1.2ms
+- **Total:** ~2.4ms @ 1080p
+
+### Memory Usage
+- Static features: 1920×1080×8×2 = 33 MB (f16)
+- Layer buffers: 1920×1080×16×2 = 66 MB (max 16 channels)
+- Weights: ~6.4 KB (f16, in shader code)
+- **Total GPU memory:** ~100 MB
+
+---
+
+## Size Budget
+
+### CNN v1 vs v2
+
+| Metric | v1 | v2 | Delta |
+|--------|----|----|-------|
+| Weights (count) | 800 | 3268 | +2468 |
+| Storage (f32) | 3.2 KB | 13.1 KB | +9.9 KB |
+| Storage (f16) | N/A | 6.5 KB | +6.5 KB |
+| Shader code | ~500 lines | ~800 lines | +300 lines |
+
+### Mitigation Strategies
+
+**Reduce channels:**
+- [16,8,4] → [8,4,4] saves ~50% weights
+- [16,8,4] → [4,4,4] saves ~60% weights
+
+**Smaller kernels:**
+- [1,3,5] → [1,3,3] saves ~30% weights
+- [1,3,5] → [1,1,3] saves ~50% weights
+
+**Quantization:**
+- int8 weights: saves 75% (requires QAT training)
+- 4-bit weights: saves 87.5% (extreme, needs research)
+
+**Target:** Keep CNN v2 under 10 KB for 64k demo constraint
+
+---
+
+## Future Extensions
+
+### More Features (uint8 Packing)
+
+```wgsl
+// 16 uint8 features per texel (texture_storage_2d<rgba8unorm>)
+// [R, G, B, D, uv.x, uv.y, sin10.x, sin10.y,
+// sin20.x, sin20.y, dx, dy, gray_mip1, gray_mip2, variance, bias]
+```
+- Trade precision for quantity
+- Requires quantization-aware training
+
+### Temporal Features
+
+- Previous frame RGBA (motion awareness)
+- Optical flow vectors
+- Requires multi-frame buffer
+
+### Learned Position Encodings
+
+- Replace hand-crafted sin(10\*uv) with learned embeddings
+- Requires separate embedding network
+- Similar to NeRF position encoding
+
+### Dynamic Architecture
+
+- Runtime kernel size selection based on scene
+- Conditional layer execution (skip connections)
+- Layer pruning for performance
+
+---
+
+## References
+
+- **v1 Implementation:** `src/gpu/effects/cnn_effect.*`
+- **Training Guide:** `doc/HOWTO.md` (CNN Training section)
+- **Test Tool:** `doc/CNN_TEST_TOOL.md`
+- **Shader System:** `doc/SEQUENCE.md`
+- **Size Measurement:** `doc/SIZE_MEASUREMENT.md`
+
+---
+
+## Appendix: Design Decisions
+
+### Why Bias as Static Feature?
+
+**Alternatives considered:**
+1. Separate bias array per layer (Option B)
+2. Bias as static feature = 1.0 (Option A, chosen)
+
+**Decision rationale:**
+- Simpler shader code (fewer bindings)
+- Standard NN formulation (augmented input)
+- Saves 56-112 bytes per model
+- 7 features sufficient for v1 implementation
+- Can extend to uint8 packing if >7 features needed
+
+### Why Float16 for Weights?
+
+**Alternatives considered:**
+1. Keep f32 (larger, more accurate)
+2. Use f16 (smaller, GPU-native)
+3. Use int8 (smallest, needs QAT)
+
+**Decision rationale:**
+- f16 saves 50% vs f32 (critical for 64k target)
+- GPU-native support (pack2x16float in WGSL)
+- <0.1% accuracy loss (acceptable)
+- Simpler than int8 quantization
+
+### Why Multi-Frequency Position Encoding?
+
+**Inspiration:** NeRF (Neural Radiance Fields)
+
+**Benefits:**
+- Helps network learn high-frequency details
+- Better than raw UV coordinates
+- Small footprint (1D per frequency)
+
+**Future:** Add sin(20\*uv), sin(40\*uv) if >7 features available
+
+---
+
+**Document Version:** 1.0
+**Last Updated:** 2026-02-12
+**Status:** Design approved, ready for implementation
diff --git a/doc/HOWTO.md b/doc/HOWTO.md
index d02fdb4..1ae1d94 100644
--- a/doc/HOWTO.md
+++ b/doc/HOWTO.md
@@ -130,6 +130,64 @@ Processes entire image with sliding window (matches WGSL):
**Kernel sizes:** 3×3 (36 weights), 5×5 (100 weights), 7×7 (196 weights)
+### CNN v2 Training
+
+Enhanced CNN with parametric static features (7D input: RGBD + UV + sin encoding + bias).
+
+**Complete Pipeline** (recommended):
+```bash
+# Train → Export → Build → Validate
+./scripts/train_cnn_v2_full.sh
+```
+
+Config: 100 epochs, 3×3 kernels, 8→4→4 channels, patch-based (harris detector).
+- Live progress with single-line update
+- Validates all input images on final epoch
+- Exports binary weights (storage buffer architecture)
+
+**Validation Only** (skip training):
+```bash
+# Use latest checkpoint
+./scripts/train_cnn_v2_full.sh --validate
+
+# Use specific checkpoint
+./scripts/train_cnn_v2_full.sh --validate checkpoints/checkpoint_epoch_50.pth
+```
+
+**Manual Training:**
+```bash
+# Default config
+./training/train_cnn_v2.py \
+ --input training/input/ --target training/target_2/ \
+ --epochs 100 --batch-size 16 --checkpoint-every 5
+
+# Custom architecture
+./training/train_cnn_v2.py \
+ --input training/input/ --target training/target_2/ \
+ --kernel-sizes 1 3 5 --channels 16 8 4 \
+ --epochs 5000 --batch-size 16
+```
+
+**Export Binary Weights:**
+```bash
+./training/export_cnn_v2_weights.py checkpoints/checkpoint_epoch_100.pth \
+ --output-weights workspaces/main/cnn_v2_weights.bin
+```
+
+Generates binary format: header + layer info + f16 weights (~3.2 KB for 3-layer model).
+Storage buffer architecture allows dynamic layer count.
+
+**TODO:** 8-bit quantization for 2× size reduction (~1.6 KB). Requires quantization-aware training (QAT).
+
+# Options:
+# -i DIR Test images directory (default: training/validation)
+# -o DIR Output directory (default: validation_results)
+# --skip-build Use existing cnn_test binary
+# -h Show all options
+```
+
+See `scripts/validate_cnn_v2.sh --help` for full usage. See `doc/CNN_V2.md` for design details.
+
---
## Timeline
diff --git a/scripts/train_cnn_v2_full.sh b/scripts/train_cnn_v2_full.sh
new file mode 100755
index 0000000..fc9355a
--- /dev/null
+++ b/scripts/train_cnn_v2_full.sh
@@ -0,0 +1,197 @@
+#!/bin/bash
+# Complete CNN v2 Training Pipeline
+# Train → Export → Build → Validate
+# Usage: ./train_cnn_v2_full.sh [OPTIONS]
+#
+# OPTIONS:
+# (none) Run complete pipeline: train → export → build → validate
+# --validate Validate only (skip training, use existing weights)
+# --validate CHECKPOINT Validate with specific checkpoint file
+# --help Show this help message
+#
+# Examples:
+# ./train_cnn_v2_full.sh
+# ./train_cnn_v2_full.sh --validate
+# ./train_cnn_v2_full.sh --validate checkpoints/checkpoint_epoch_50.pth
+
+set -e
+
+PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
+cd "$PROJECT_ROOT"
+
+# Parse arguments
+VALIDATE_ONLY=false
+VALIDATE_CHECKPOINT=""
+
+if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then
+ head -20 "$0" | grep "^#" | grep -v "^#!/" | sed 's/^# *//'
+ exit 0
+fi
+
+if [ "$1" = "--validate" ]; then
+ VALIDATE_ONLY=true
+ if [ -n "$2" ]; then
+ VALIDATE_CHECKPOINT="$2"
+ fi
+fi
+
+# Configuration
+INPUT_DIR="training/input"
+TARGET_DIR="training/target_2"
+CHECKPOINT_DIR="checkpoints"
+VALIDATION_DIR="validation_results"
+EPOCHS=100
+CHECKPOINT_EVERY=5
+BATCH_SIZE=16
+
+# Patch-based training (default)
+PATCH_SIZE=32
+PATCHES_PER_IMAGE=64
+DETECTOR="harris"
+
+# Full-image training (alternative - uncomment to use)
+# FULL_IMAGE="--full-image"
+# IMAGE_SIZE=256
+
+KERNEL_SIZES="3 3 3"
+CHANNELS="8 4 4"
+
+if [ "$VALIDATE_ONLY" = true ]; then
+ echo "=== CNN v2 Validation Only ==="
+ echo "Skipping training, using existing weights"
+ echo ""
+else
+ echo "=== CNN v2 Complete Training Pipeline ==="
+ echo "Input: $INPUT_DIR"
+ echo "Target: $TARGET_DIR"
+ echo "Epochs: $EPOCHS"
+ echo "Checkpoint interval: $CHECKPOINT_EVERY"
+ echo ""
+fi
+
+if [ "$VALIDATE_ONLY" = false ]; then
+ # Step 1: Train model
+ echo "[1/4] Training CNN v2 model..."
+python3 training/train_cnn_v2.py \
+ --input "$INPUT_DIR" \
+ --target "$TARGET_DIR" \
+ --patch-size $PATCH_SIZE \
+ --patches-per-image $PATCHES_PER_IMAGE \
+ --detector $DETECTOR \
+ --kernel-sizes $KERNEL_SIZES \
+ --channels $CHANNELS \
+ --epochs $EPOCHS \
+ --batch-size $BATCH_SIZE \
+ --checkpoint-dir "$CHECKPOINT_DIR" \
+ --checkpoint-every $CHECKPOINT_EVERY \
+ $FULL_IMAGE
+
+if [ $? -ne 0 ]; then
+ echo "Error: Training failed"
+ exit 1
+fi
+
+echo ""
+echo "Training complete!"
+echo ""
+
+# Step 2: Export final checkpoint to shaders
+FINAL_CHECKPOINT="$CHECKPOINT_DIR/checkpoint_epoch_${EPOCHS}.pth"
+
+if [ ! -f "$FINAL_CHECKPOINT" ]; then
+ echo "Warning: Final checkpoint not found, using latest available..."
+ FINAL_CHECKPOINT=$(ls -t "$CHECKPOINT_DIR"/checkpoint_epoch_*.pth | head -1)
+fi
+
+echo "[2/4] Exporting final checkpoint to WGSL shaders..."
+echo "Checkpoint: $FINAL_CHECKPOINT"
+python3 training/export_cnn_v2_shader.py "$FINAL_CHECKPOINT" \
+ --output-dir workspaces/main/shaders
+
+if [ $? -ne 0 ]; then
+ echo "Error: Shader export failed"
+ exit 1
+fi
+
+echo ""
+fi # End of training/export section
+
+# Determine which checkpoint to use
+if [ "$VALIDATE_ONLY" = true ]; then
+ if [ -n "$VALIDATE_CHECKPOINT" ]; then
+ FINAL_CHECKPOINT="$VALIDATE_CHECKPOINT"
+ else
+ # Use latest checkpoint
+ FINAL_CHECKPOINT=$(ls -t "$CHECKPOINT_DIR"/checkpoint_epoch_*.pth | head -1)
+ fi
+ echo "Using checkpoint: $FINAL_CHECKPOINT"
+ echo ""
+fi
+
+# Step 3: Rebuild with new shaders
+if [ "$VALIDATE_ONLY" = false ]; then
+ echo "[3/4] Rebuilding demo with new shaders..."
+ cmake --build build -j4 --target demo64k > /dev/null 2>&1
+
+ if [ $? -ne 0 ]; then
+ echo "Error: Build failed"
+ exit 1
+ fi
+
+ echo " → Build complete"
+ echo ""
+fi
+
+# Step 4: Visual assessment - process final checkpoint only
+if [ "$VALIDATE_ONLY" = true ]; then
+ echo "Validation on all input images (using existing weights)..."
+else
+ echo "[4/4] Visual assessment on all input images..."
+fi
+
+mkdir -p "$VALIDATION_DIR"
+echo " Using checkpoint: $FINAL_CHECKPOINT"
+
+# Export weights only if not in validate mode
+if [ "$VALIDATE_ONLY" = false ]; then
+ python3 training/export_cnn_v2_weights.py "$FINAL_CHECKPOINT" \
+ --output-weights workspaces/main/cnn_v2_weights.bin > /dev/null 2>&1
+fi
+
+# Build cnn_test
+cmake --build build -j4 --target cnn_test > /dev/null 2>&1
+
+# Process all input images
+for input_image in "$INPUT_DIR"/*.png; do
+ basename=$(basename "$input_image" .png)
+ echo " Processing $basename..."
+ build/cnn_test "$input_image" "$VALIDATION_DIR/${basename}_output.png" 2>/dev/null
+done
+
+# Build demo only if not in validate mode
+if [ "$VALIDATE_ONLY" = false ]; then
+ cmake --build build -j4 --target demo64k > /dev/null 2>&1
+fi
+
+echo ""
+if [ "$VALIDATE_ONLY" = true ]; then
+ echo "=== Validation Complete ==="
+else
+ echo "=== Training Pipeline Complete ==="
+fi
+echo ""
+echo "Results:"
+if [ "$VALIDATE_ONLY" = false ]; then
+ echo " - Checkpoints: $CHECKPOINT_DIR"
+ echo " - Final weights: workspaces/main/cnn_v2_weights.bin"
+fi
+echo " - Validation outputs: $VALIDATION_DIR"
+echo ""
+echo "Opening results directory..."
+open "$VALIDATION_DIR" 2>/dev/null || xdg-open "$VALIDATION_DIR" 2>/dev/null || true
+
+if [ "$VALIDATE_ONLY" = false ]; then
+ echo ""
+ echo "Run demo to see final result:"
+ echo " ./build/demo64k"
+fi
diff --git a/scripts/validate_cnn_v2.sh b/scripts/validate_cnn_v2.sh
new file mode 100755
index 0000000..06a4e01
--- /dev/null
+++ b/scripts/validate_cnn_v2.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+# CNN v2 Validation - End-to-end pipeline
+
+set -e
+PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
+BUILD_DIR="$PROJECT_ROOT/build"
+WORKSPACE="main"
+
+usage() {
+ echo "Usage: $0 <checkpoint.pth> [options]"
+ echo "Options:"
+ echo " -i DIR Test images (default: training/validation)"
+ echo " -o DIR Output (default: validation_results)"
+ echo " --skip-build Skip rebuild"
+ exit 1
+}
+
+[ $# -eq 0 ] && usage
+CHECKPOINT="$1"
+shift
+
+TEST_IMAGES="$PROJECT_ROOT/training/validation"
+OUTPUT="$PROJECT_ROOT/validation_results"
+SKIP_BUILD=false
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ -i) TEST_IMAGES="$2"; shift 2 ;;
+ -o) OUTPUT="$2"; shift 2 ;;
+ --skip-build) SKIP_BUILD=true; shift ;;
+ -h) usage ;;
+ *) usage ;;
+ esac
+done
+
+echo "=== CNN v2 Validation ==="
+echo "Checkpoint: $CHECKPOINT"
+
+# Export
+echo "[1/3] Exporting shaders..."
+python3 "$PROJECT_ROOT/training/export_cnn_v2_shader.py" "$CHECKPOINT" \
+ --output-dir "$PROJECT_ROOT/workspaces/$WORKSPACE/shaders"
+
+# Build
+if [ "$SKIP_BUILD" = false ]; then
+ echo "[2/3] Building..."
+ cmake --build "$BUILD_DIR" -j4 --target cnn_test >/dev/null 2>&1
+fi
+
+# Process
+echo "[3/3] Processing images..."
+mkdir -p "$OUTPUT"
+count=0
+for img in "$TEST_IMAGES"/*.png; do
+ [ -f "$img" ] || continue
+ name=$(basename "$img" .png)
+ "$BUILD_DIR/cnn_test" "$img" "$OUTPUT/${name}_output.png" 2>/dev/null && count=$((count+1))
+done
+
+echo "Done! Processed $count images → $OUTPUT"
diff --git a/src/gpu/demo_effects.h b/src/gpu/demo_effects.h
index 8cdf557..d0ae748 100644
--- a/src/gpu/demo_effects.h
+++ b/src/gpu/demo_effects.h
@@ -186,6 +186,7 @@ class DistortEffect : public PostProcessEffect {
// (included above)
#include "gpu/effects/cnn_effect.h"
+#include "gpu/effects/cnn_v2_effect.h"
// Auto-generated functions
void LoadTimeline(MainSequence& main_seq, const GpuContext& ctx);
diff --git a/src/gpu/effects/cnn_v2_effect.cc b/src/gpu/effects/cnn_v2_effect.cc
new file mode 100644
index 0000000..9cb6d57
--- /dev/null
+++ b/src/gpu/effects/cnn_v2_effect.cc
@@ -0,0 +1,519 @@
+// CNN v2 Effect Implementation
+
+#include "gpu/effects/cnn_v2_effect.h"
+
+#if defined(USE_TEST_ASSETS)
+#include "test_assets.h"
+#else
+#include "generated/assets.h"
+#endif
+
+#include "util/asset_manager.h"
+#include "util/fatal_error.h"
+#include <cstring>
+
+CNNv2Effect::CNNv2Effect(const GpuContext& ctx)
+ : PostProcessEffect(ctx),
+ static_pipeline_(nullptr),
+ static_bind_group_(nullptr),
+ static_features_tex_(nullptr),
+ static_features_view_(nullptr),
+ layer_pipeline_(nullptr),
+ weights_buffer_(nullptr),
+ layer_params_buffer_(nullptr),
+ input_mip_tex_(nullptr),
+ current_input_view_(nullptr),
+ initialized_(false) {
+ std::memset(input_mip_view_, 0, sizeof(input_mip_view_));
+}
+
+CNNv2Effect::~CNNv2Effect() {
+ cleanup();
+}
+
+void CNNv2Effect::init(MainSequence* demo) {
+ (void)demo;
+ if (initialized_) return;
+
+ load_weights();
+ create_textures();
+ create_pipelines();
+
+ initialized_ = true;
+}
+
+void CNNv2Effect::resize(int width, int height) {
+ PostProcessEffect::resize(width, height);
+ cleanup();
+ create_textures();
+ create_pipelines();
+}
+
+void CNNv2Effect::load_weights() {
+ // Load binary weights asset
+ size_t weights_size = 0;
+ const uint8_t* weights_data = (const uint8_t*)GetAsset(AssetId::ASSET_WEIGHTS_CNN_V2, &weights_size);
+
+ if (!weights_data || weights_size < 16) {
+ // Weights not available - effect will skip
+ return;
+ }
+
+ // Parse header (16 bytes)
+ const uint32_t* header = (const uint32_t*)weights_data;
+ uint32_t magic = header[0];
+ uint32_t version = header[1];
+ uint32_t num_layers = header[2];
+ uint32_t total_weights = header[3];
+
+ FATAL_CHECK(magic != 0x324e4e43, "Invalid CNN v2 weights magic\n"); // 'CNN2'
+ FATAL_CHECK(version != 1, "Unsupported CNN v2 weights version\n");
+
+ // Parse layer info (20 bytes per layer)
+ const uint32_t* layer_data = header + 4;
+ for (uint32_t i = 0; i < num_layers; ++i) {
+ LayerInfo info;
+ info.kernel_size = layer_data[i * 5 + 0];
+ info.in_channels = layer_data[i * 5 + 1];
+ info.out_channels = layer_data[i * 5 + 2];
+ info.weight_offset = layer_data[i * 5 + 3];
+ info.weight_count = layer_data[i * 5 + 4];
+ layer_info_.push_back(info);
+ }
+
+ // Create GPU storage buffer for weights
+ // Buffer contains: header + layer info + packed f16 weights (as u32)
+ WGPUBufferDescriptor buffer_desc = {};
+ buffer_desc.size = weights_size;
+ buffer_desc.usage = WGPUBufferUsage_Storage | WGPUBufferUsage_CopyDst;
+ buffer_desc.mappedAtCreation = false;
+
+ weights_buffer_ = wgpuDeviceCreateBuffer(ctx_.device, &buffer_desc);
+
+ // Upload weights data
+ wgpuQueueWriteBuffer(ctx_.queue, weights_buffer_, 0, weights_data, weights_size);
+
+ // Create uniform buffer for layer params
+ WGPUBufferDescriptor params_desc = {};
+ params_desc.size = sizeof(LayerParams);
+ params_desc.usage = WGPUBufferUsage_Uniform | WGPUBufferUsage_CopyDst;
+ params_desc.mappedAtCreation = false;
+
+ layer_params_buffer_ = wgpuDeviceCreateBuffer(ctx_.device, &params_desc);
+}
+
+void CNNv2Effect::create_textures() {
+ const WGPUExtent3D size = {
+ static_cast<uint32_t>(width_),
+ static_cast<uint32_t>(height_),
+ 1
+ };
+
+ // Static features texture (8×f16 packed as 4×u32)
+ WGPUTextureDescriptor static_desc = {};
+ static_desc.usage = WGPUTextureUsage_StorageBinding | WGPUTextureUsage_TextureBinding;
+ static_desc.dimension = WGPUTextureDimension_2D;
+ static_desc.size = size;
+ static_desc.format = WGPUTextureFormat_RGBA32Uint;
+ static_desc.mipLevelCount = 1;
+ static_desc.sampleCount = 1;
+ static_features_tex_ = wgpuDeviceCreateTexture(ctx_.device, &static_desc);
+
+ WGPUTextureViewDescriptor view_desc = {};
+ view_desc.format = WGPUTextureFormat_RGBA32Uint;
+ view_desc.dimension = WGPUTextureViewDimension_2D;
+ view_desc.baseMipLevel = 0;
+ view_desc.mipLevelCount = 1;
+ view_desc.baseArrayLayer = 0;
+ view_desc.arrayLayerCount = 1;
+ static_features_view_ = wgpuTextureCreateView(static_features_tex_, &view_desc);
+
+ // Input texture with mips (for multi-scale features)
+ WGPUTextureDescriptor input_mip_desc = {};
+ input_mip_desc.usage = WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopyDst;
+ input_mip_desc.dimension = WGPUTextureDimension_2D;
+ input_mip_desc.size = size;
+ input_mip_desc.format = WGPUTextureFormat_RGBA8Unorm;
+ input_mip_desc.mipLevelCount = 3; // Levels 0, 1, 2
+ input_mip_desc.sampleCount = 1;
+ input_mip_tex_ = wgpuDeviceCreateTexture(ctx_.device, &input_mip_desc);
+
+ for (int i = 0; i < 3; ++i) {
+ WGPUTextureViewDescriptor mip_view_desc = {};
+ mip_view_desc.format = WGPUTextureFormat_RGBA8Unorm;
+ mip_view_desc.dimension = WGPUTextureViewDimension_2D;
+ mip_view_desc.baseMipLevel = i;
+ mip_view_desc.mipLevelCount = 1;
+ mip_view_desc.baseArrayLayer = 0;
+ mip_view_desc.arrayLayerCount = 1;
+ input_mip_view_[i] = wgpuTextureCreateView(input_mip_tex_, &mip_view_desc);
+ }
+
+ // Create 2 layer textures (ping-pong buffers for intermediate results)
+ // Each stores 8×f16 channels packed as 4×u32
+ for (int i = 0; i < 2; ++i) {
+ WGPUTextureDescriptor layer_desc = {};
+ layer_desc.usage = WGPUTextureUsage_StorageBinding | WGPUTextureUsage_TextureBinding;
+ layer_desc.dimension = WGPUTextureDimension_2D;
+ layer_desc.size = size;
+ layer_desc.format = WGPUTextureFormat_RGBA32Uint;
+ layer_desc.mipLevelCount = 1;
+ layer_desc.sampleCount = 1;
+
+ WGPUTexture tex = wgpuDeviceCreateTexture(ctx_.device, &layer_desc);
+ layer_textures_.push_back(tex);
+
+ WGPUTextureViewDescriptor view_desc = {};
+ view_desc.format = WGPUTextureFormat_RGBA32Uint;
+ view_desc.dimension = WGPUTextureViewDimension_2D;
+ view_desc.baseMipLevel = 0;
+ view_desc.mipLevelCount = 1;
+ view_desc.baseArrayLayer = 0;
+ view_desc.arrayLayerCount = 1;
+
+ WGPUTextureView view = wgpuTextureCreateView(tex, &view_desc);
+ layer_views_.push_back(view);
+ }
+}
+
+void CNNv2Effect::create_pipelines() {
+ // Static features compute pipeline
+ size_t shader_size = 0;
+ const char* static_code = (const char*)GetAsset(AssetId::ASSET_SHADER_CNN_V2_STATIC, &shader_size);
+
+ if (!static_code || shader_size == 0) {
+ // Shader not available (e.g., in test mode) - skip pipeline creation
+ return;
+ }
+
+ WGPUShaderSourceWGSL wgsl_src = {};
+ wgsl_src.chain.sType = WGPUSType_ShaderSourceWGSL;
+ wgsl_src.code = str_view(static_code);
+
+ WGPUShaderModuleDescriptor shader_desc = {};
+ shader_desc.nextInChain = &wgsl_src.chain;
+
+ WGPUShaderModule static_module = wgpuDeviceCreateShaderModule(ctx_.device, &shader_desc);
+ if (!static_module) {
+ return;
+ }
+
+ WGPUComputePipelineDescriptor pipeline_desc = {};
+ pipeline_desc.compute.module = static_module;
+ pipeline_desc.compute.entryPoint = str_view("main");
+
+ static_pipeline_ = wgpuDeviceCreateComputePipeline(ctx_.device, &pipeline_desc);
+ wgpuShaderModuleRelease(static_module);
+
+ // Create bind group layout for static features compute
+ // Bindings: 0=input_tex, 1=input_mip1, 2=input_mip2, 3=depth_tex, 4=output
+ WGPUBindGroupLayoutEntry bgl_entries[5] = {};
+
+ // Binding 0: Input texture (mip 0)
+ bgl_entries[0].binding = 0;
+ bgl_entries[0].visibility = WGPUShaderStage_Compute;
+ bgl_entries[0].texture.sampleType = WGPUTextureSampleType_Float;
+ bgl_entries[0].texture.viewDimension = WGPUTextureViewDimension_2D;
+
+ // Binding 1: Input texture (mip 1)
+ bgl_entries[1].binding = 1;
+ bgl_entries[1].visibility = WGPUShaderStage_Compute;
+ bgl_entries[1].texture.sampleType = WGPUTextureSampleType_Float;
+ bgl_entries[1].texture.viewDimension = WGPUTextureViewDimension_2D;
+
+ // Binding 2: Input texture (mip 2)
+ bgl_entries[2].binding = 2;
+ bgl_entries[2].visibility = WGPUShaderStage_Compute;
+ bgl_entries[2].texture.sampleType = WGPUTextureSampleType_Float;
+ bgl_entries[2].texture.viewDimension = WGPUTextureViewDimension_2D;
+
+ // Binding 3: Depth texture
+ bgl_entries[3].binding = 3;
+ bgl_entries[3].visibility = WGPUShaderStage_Compute;
+ bgl_entries[3].texture.sampleType = WGPUTextureSampleType_Float;
+ bgl_entries[3].texture.viewDimension = WGPUTextureViewDimension_2D;
+
+ // Binding 4: Output (static features)
+ bgl_entries[4].binding = 4;
+ bgl_entries[4].visibility = WGPUShaderStage_Compute;
+ bgl_entries[4].storageTexture.access = WGPUStorageTextureAccess_WriteOnly;
+ bgl_entries[4].storageTexture.format = WGPUTextureFormat_RGBA32Uint;
+ bgl_entries[4].storageTexture.viewDimension = WGPUTextureViewDimension_2D;
+
+ WGPUBindGroupLayoutDescriptor bgl_desc = {};
+ bgl_desc.entryCount = 5;
+ bgl_desc.entries = bgl_entries;
+
+ WGPUBindGroupLayout static_bgl = wgpuDeviceCreateBindGroupLayout(ctx_.device, &bgl_desc);
+
+ // Update pipeline layout
+ WGPUPipelineLayoutDescriptor pl_desc = {};
+ pl_desc.bindGroupLayoutCount = 1;
+ pl_desc.bindGroupLayouts = &static_bgl;
+ WGPUPipelineLayout pipeline_layout = wgpuDeviceCreatePipelineLayout(ctx_.device, &pl_desc);
+
+ // Recreate pipeline with proper layout
+ WGPUComputePipelineDescriptor pipeline_desc2 = {};
+ pipeline_desc2.compute.module = wgpuDeviceCreateShaderModule(ctx_.device, &shader_desc);
+ pipeline_desc2.compute.entryPoint = str_view("main");
+ pipeline_desc2.layout = pipeline_layout;
+
+ if (static_pipeline_) wgpuComputePipelineRelease(static_pipeline_);
+ static_pipeline_ = wgpuDeviceCreateComputePipeline(ctx_.device, &pipeline_desc2);
+
+ wgpuShaderModuleRelease(pipeline_desc2.compute.module);
+ wgpuPipelineLayoutRelease(pipeline_layout);
+ wgpuBindGroupLayoutRelease(static_bgl);
+
+ // CNN layer compute pipeline (storage buffer version)
+ if (layer_info_.empty()) return; // No weights loaded
+
+ size_t layer_shader_size = 0;
+ const char* layer_code = (const char*)GetAsset(AssetId::ASSET_SHADER_CNN_V2_COMPUTE, &layer_shader_size);
+
+ if (!layer_code || layer_shader_size == 0) return;
+
+ WGPUShaderSourceWGSL layer_wgsl = {};
+ layer_wgsl.chain.sType = WGPUSType_ShaderSourceWGSL;
+ layer_wgsl.code = str_view(layer_code);
+
+ WGPUShaderModuleDescriptor layer_shader_desc = {};
+ layer_shader_desc.nextInChain = &layer_wgsl.chain;
+
+ WGPUShaderModule layer_module = wgpuDeviceCreateShaderModule(ctx_.device, &layer_shader_desc);
+ if (!layer_module) return;
+
+ // Create bind group layout for layer compute
+ // 0=static_features, 1=layer_input, 2=output, 3=weights, 4=params
+ WGPUBindGroupLayoutEntry layer_bgl_entries[5] = {};
+
+ // Binding 0: Static features (texture)
+ layer_bgl_entries[0].binding = 0;
+ layer_bgl_entries[0].visibility = WGPUShaderStage_Compute;
+ layer_bgl_entries[0].texture.sampleType = WGPUTextureSampleType_Uint;
+ layer_bgl_entries[0].texture.viewDimension = WGPUTextureViewDimension_2D;
+
+ // Binding 1: Layer input (texture)
+ layer_bgl_entries[1].binding = 1;
+ layer_bgl_entries[1].visibility = WGPUShaderStage_Compute;
+ layer_bgl_entries[1].texture.sampleType = WGPUTextureSampleType_Uint;
+ layer_bgl_entries[1].texture.viewDimension = WGPUTextureViewDimension_2D;
+
+ // Binding 2: Output (storage texture)
+ layer_bgl_entries[2].binding = 2;
+ layer_bgl_entries[2].visibility = WGPUShaderStage_Compute;
+ layer_bgl_entries[2].storageTexture.access = WGPUStorageTextureAccess_WriteOnly;
+ layer_bgl_entries[2].storageTexture.format = WGPUTextureFormat_RGBA32Uint;
+ layer_bgl_entries[2].storageTexture.viewDimension = WGPUTextureViewDimension_2D;
+
+ // Binding 3: Weights (storage buffer)
+ layer_bgl_entries[3].binding = 3;
+ layer_bgl_entries[3].visibility = WGPUShaderStage_Compute;
+ layer_bgl_entries[3].buffer.type = WGPUBufferBindingType_ReadOnlyStorage;
+
+ // Binding 4: Layer params (uniform buffer)
+ layer_bgl_entries[4].binding = 4;
+ layer_bgl_entries[4].visibility = WGPUShaderStage_Compute;
+ layer_bgl_entries[4].buffer.type = WGPUBufferBindingType_Uniform;
+ layer_bgl_entries[4].buffer.minBindingSize = sizeof(LayerParams);
+
+ WGPUBindGroupLayoutDescriptor layer_bgl_desc = {};
+ layer_bgl_desc.entryCount = 5;
+ layer_bgl_desc.entries = layer_bgl_entries;
+
+ WGPUBindGroupLayout layer_bgl = wgpuDeviceCreateBindGroupLayout(ctx_.device, &layer_bgl_desc);
+
+ WGPUPipelineLayoutDescriptor layer_pl_desc = {};
+ layer_pl_desc.bindGroupLayoutCount = 1;
+ layer_pl_desc.bindGroupLayouts = &layer_bgl;
+
+ WGPUPipelineLayout layer_pipeline_layout = wgpuDeviceCreatePipelineLayout(ctx_.device, &layer_pl_desc);
+
+ WGPUComputePipelineDescriptor layer_pipeline_desc = {};
+ layer_pipeline_desc.compute.module = layer_module;
+ layer_pipeline_desc.compute.entryPoint = str_view("main");
+ layer_pipeline_desc.layout = layer_pipeline_layout;
+
+ layer_pipeline_ = wgpuDeviceCreateComputePipeline(ctx_.device, &layer_pipeline_desc);
+
+ wgpuShaderModuleRelease(layer_module);
+ wgpuPipelineLayoutRelease(layer_pipeline_layout);
+ wgpuBindGroupLayoutRelease(layer_bgl);
+}
+
+void CNNv2Effect::update_bind_group(WGPUTextureView input_view) {
+ if (!static_pipeline_) return;
+
+ // Cache input view
+ current_input_view_ = input_view;
+
+ // Release old bind group
+ if (static_bind_group_) {
+ wgpuBindGroupRelease(static_bind_group_);
+ static_bind_group_ = nullptr;
+ }
+
+ // Create bind group for static features compute
+ WGPUBindGroupEntry bg_entries[5] = {};
+
+ // Binding 0: Input (mip 0)
+ bg_entries[0].binding = 0;
+ bg_entries[0].textureView = input_view;
+
+ // Binding 1: Input (mip 1)
+ bg_entries[1].binding = 1;
+ bg_entries[1].textureView = input_mip_view_[0]; // Use mip 0 for now
+
+ // Binding 2: Input (mip 2)
+ bg_entries[2].binding = 2;
+ bg_entries[2].textureView = input_mip_view_[0]; // Use mip 0 for now
+
+ // Binding 3: Depth (use input for now, no depth available)
+ bg_entries[3].binding = 3;
+ bg_entries[3].textureView = input_view;
+
+ // Binding 4: Output (static features)
+ bg_entries[4].binding = 4;
+ bg_entries[4].textureView = static_features_view_;
+
+ WGPUBindGroupDescriptor bg_desc = {};
+ bg_desc.layout = wgpuComputePipelineGetBindGroupLayout(static_pipeline_, 0);
+ bg_desc.entryCount = 5;
+ bg_desc.entries = bg_entries;
+
+ static_bind_group_ = wgpuDeviceCreateBindGroup(ctx_.device, &bg_desc);
+
+ wgpuBindGroupLayoutRelease(bg_desc.layout);
+
+ // Create layer bind groups
+ if (!layer_pipeline_ || layer_info_.empty()) return;
+
+ // Release old layer bind groups
+ for (auto bg : layer_bind_groups_) {
+ wgpuBindGroupRelease(bg);
+ }
+ layer_bind_groups_.clear();
+
+ // Get bind group layout from layer pipeline
+ WGPUBindGroupLayout layer_bgl = wgpuComputePipelineGetBindGroupLayout(layer_pipeline_, 0);
+
+ // Create bind group for each layer
+ for (size_t i = 0; i < layer_info_.size(); ++i) {
+ WGPUBindGroupEntry layer_entries[5] = {};
+
+ // Binding 0: Static features (constant)
+ layer_entries[0].binding = 0;
+ layer_entries[0].textureView = static_features_view_;
+
+ // Binding 1: Layer input (ping-pong: use previous layer's output)
+ // First layer uses static features as input, others use ping-pong buffers
+ layer_entries[1].binding = 1;
+ layer_entries[1].textureView = (i == 0) ? static_features_view_ : layer_views_[i % 2];
+
+ // Binding 2: Output texture (ping-pong)
+ layer_entries[2].binding = 2;
+ layer_entries[2].textureView = layer_views_[(i + 1) % 2];
+
+ // Binding 3: Weights buffer (constant)
+ layer_entries[3].binding = 3;
+ layer_entries[3].buffer = weights_buffer_;
+ layer_entries[3].size = wgpuBufferGetSize(weights_buffer_);
+
+ // Binding 4: Layer params (will be updated per dispatch)
+ layer_entries[4].binding = 4;
+ layer_entries[4].buffer = layer_params_buffer_;
+ layer_entries[4].size = sizeof(LayerParams);
+
+ WGPUBindGroupDescriptor layer_bg_desc = {};
+ layer_bg_desc.layout = layer_bgl;
+ layer_bg_desc.entryCount = 5;
+ layer_bg_desc.entries = layer_entries;
+
+ WGPUBindGroup layer_bg = wgpuDeviceCreateBindGroup(ctx_.device, &layer_bg_desc);
+ layer_bind_groups_.push_back(layer_bg);
+ }
+
+ wgpuBindGroupLayoutRelease(layer_bgl);
+}
+
+void CNNv2Effect::compute(WGPUCommandEncoder encoder,
+ const CommonPostProcessUniforms& uniforms) {
+ (void)uniforms;
+ if (!initialized_ || !static_pipeline_ || !static_bind_group_) return;
+
+ // Pass 1: Compute static features
+ WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
+
+ wgpuComputePassEncoderSetPipeline(pass, static_pipeline_);
+ wgpuComputePassEncoderSetBindGroup(pass, 0, static_bind_group_, 0, nullptr);
+
+ // Dispatch workgroups (8×8 threads per group)
+ uint32_t workgroups_x = (width_ + 7) / 8;
+ uint32_t workgroups_y = (height_ + 7) / 8;
+ wgpuComputePassEncoderDispatchWorkgroups(pass, workgroups_x, workgroups_y, 1);
+
+ wgpuComputePassEncoderEnd(pass);
+ wgpuComputePassEncoderRelease(pass);
+
+ // Execute CNN layer passes
+ if (!layer_pipeline_ || layer_bind_groups_.empty()) return;
+
+ for (size_t i = 0; i < layer_info_.size(); ++i) {
+ const LayerInfo& info = layer_info_[i];
+
+ // Update layer params uniform buffer
+ LayerParams params;
+ params.kernel_size = info.kernel_size;
+ params.in_channels = info.in_channels;
+ params.out_channels = info.out_channels;
+ params.weight_offset = info.weight_offset;
+ params.is_output_layer = (i == layer_info_.size() - 1) ? 1 : 0;
+
+ wgpuQueueWriteBuffer(ctx_.queue, layer_params_buffer_, 0, &params, sizeof(params));
+
+ // Execute layer compute pass
+ WGPUComputePassEncoder layer_pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
+
+ wgpuComputePassEncoderSetPipeline(layer_pass, layer_pipeline_);
+ wgpuComputePassEncoderSetBindGroup(layer_pass, 0, layer_bind_groups_[i], 0, nullptr);
+
+ wgpuComputePassEncoderDispatchWorkgroups(layer_pass, workgroups_x, workgroups_y, 1);
+
+ wgpuComputePassEncoderEnd(layer_pass);
+ wgpuComputePassEncoderRelease(layer_pass);
+ }
+}
+
+void CNNv2Effect::render(WGPURenderPassEncoder pass,
+ const CommonPostProcessUniforms& uniforms) {
+ (void)pass;
+ (void)uniforms;
+ // Compute-only effect, rendering is done by default composite pass
+}
+
+void CNNv2Effect::cleanup() {
+ if (static_features_view_) wgpuTextureViewRelease(static_features_view_);
+ if (static_features_tex_) wgpuTextureRelease(static_features_tex_);
+ if (static_bind_group_) wgpuBindGroupRelease(static_bind_group_);
+ if (static_pipeline_) wgpuComputePipelineRelease(static_pipeline_);
+
+ if (layer_pipeline_) wgpuComputePipelineRelease(layer_pipeline_);
+ if (weights_buffer_) wgpuBufferRelease(weights_buffer_);
+ if (layer_params_buffer_) wgpuBufferRelease(layer_params_buffer_);
+
+ for (int i = 0; i < 3; ++i) {
+ if (input_mip_view_[i]) wgpuTextureViewRelease(input_mip_view_[i]);
+ }
+ if (input_mip_tex_) wgpuTextureRelease(input_mip_tex_);
+
+ for (auto view : layer_views_) wgpuTextureViewRelease(view);
+ for (auto tex : layer_textures_) wgpuTextureRelease(tex);
+ for (auto bg : layer_bind_groups_) wgpuBindGroupRelease(bg);
+
+ layer_views_.clear();
+ layer_textures_.clear();
+ layer_bind_groups_.clear();
+ layer_info_.clear();
+
+ initialized_ = false;
+}
diff --git a/src/gpu/effects/cnn_v2_effect.h b/src/gpu/effects/cnn_v2_effect.h
new file mode 100644
index 0000000..6005cf5
--- /dev/null
+++ b/src/gpu/effects/cnn_v2_effect.h
@@ -0,0 +1,64 @@
+// CNN v2 Effect - Parametric Static Features
+// Multi-pass post-processing with 7D feature input
+
+#pragma once
+#include "gpu/effect.h"
+#include <vector>
+
+class CNNv2Effect : public PostProcessEffect {
+public:
+ explicit CNNv2Effect(const GpuContext& ctx);
+ ~CNNv2Effect();
+
+ void init(MainSequence* demo) override;
+ void resize(int width, int height) override;
+ void compute(WGPUCommandEncoder encoder,
+ const CommonPostProcessUniforms& uniforms) override;
+ void render(WGPURenderPassEncoder pass,
+ const CommonPostProcessUniforms& uniforms) override;
+ void update_bind_group(WGPUTextureView input_view) override;
+
+private:
+ struct LayerInfo {
+ uint32_t kernel_size;
+ uint32_t in_channels;
+ uint32_t out_channels;
+ uint32_t weight_offset;
+ uint32_t weight_count;
+ };
+
+ struct LayerParams {
+ uint32_t kernel_size;
+ uint32_t in_channels;
+ uint32_t out_channels;
+ uint32_t weight_offset;
+ uint32_t is_output_layer;
+ };
+
+ void create_textures();
+ void create_pipelines();
+ void load_weights();
+ void cleanup();
+
+ // Static features compute
+ WGPUComputePipeline static_pipeline_;
+ WGPUBindGroup static_bind_group_;
+ WGPUTexture static_features_tex_;
+ WGPUTextureView static_features_view_;
+
+ // CNN layers (storage buffer architecture)
+ WGPUComputePipeline layer_pipeline_; // Single pipeline for all layers
+ WGPUBuffer weights_buffer_; // Storage buffer for weights
+ WGPUBuffer layer_params_buffer_; // Uniform buffer for per-layer params
+ std::vector<LayerInfo> layer_info_; // Layer metadata
+ std::vector<WGPUBindGroup> layer_bind_groups_; // Per-layer bind groups
+ std::vector<WGPUTexture> layer_textures_; // Ping-pong buffers
+ std::vector<WGPUTextureView> layer_views_;
+
+ // Input mips
+ WGPUTexture input_mip_tex_;
+ WGPUTextureView input_mip_view_[3];
+ WGPUTextureView current_input_view_;
+
+ bool initialized_;
+};
diff --git a/src/tests/gpu/test_demo_effects.cc b/src/tests/gpu/test_demo_effects.cc
index 01e6678..169db91 100644
--- a/src/tests/gpu/test_demo_effects.cc
+++ b/src/tests/gpu/test_demo_effects.cc
@@ -90,6 +90,7 @@ static void test_post_process_effects() {
std::make_shared<ThemeModulationEffect>(fixture.ctx())},
{"VignetteEffect", std::make_shared<VignetteEffect>(fixture.ctx())},
{"CNNEffect", std::make_shared<CNNEffect>(fixture.ctx())},
+ {"CNNv2Effect", std::make_shared<CNNv2Effect>(fixture.ctx())},
};
int passed = 0;
diff --git a/tools/timeline_editor/README.md b/tools/timeline_editor/README.md
index 4fcb2f4..72b5ae0 100644
--- a/tools/timeline_editor/README.md
+++ b/tools/timeline_editor/README.md
@@ -14,16 +14,29 @@ Interactive web-based editor for `timeline.seq` files.
- ⚙️ Stack-order based priority system
- 🔍 Zoom (10%-200%) with mouse wheel + Ctrl/Cmd
- 🎵 Audio waveform visualization (aligned to beats)
-- 🎼 Snap-to-beat mode (enabled by default)
+- 🎼 Quantize grid (Off, 1/32, 1/16, 1/8, 1/4, 1/2, 1 beat)
- 🎛️ BPM slider (60-200 BPM)
- 🔄 Re-order sequences by time
-- 🗑️ Delete sequences/effects
+- ✨ Add effects to sequences
+- 🗑️ Delete sequences/effects (toolbar + properties panel)
+- 📊 **CPU load visualization** (color-coded effect density)
- ▶️ Audio playback with auto-expand/collapse
- 🎚️ Sticky audio track and timeline ticks
- 🔴 **Playback indicator on waveform** (NEW)
- 🎯 **Double-click seek during playback** (NEW)
- 📍 **Click waveform to seek** (NEW)
+## CPU Load Visualization
+
+The editor displays a **CPU load bar** at the top (underneath audio waveform if loaded):
+- **Full-height bars** (80px) show effect density at each time point
+- **Color-coded:** Green (low) → Yellow (medium) → Red (high load)
+- **Load calculation:** Sum of all active effects across all sequences (1.0 per effect)
+- **Updates automatically** when effects/sequences are moved
+- **Collapsed sequences count** toward load
+
+This helps identify performance hotspots in your timeline.
+
## Usage
1. **Open:** `open tools/timeline_editor/index.html` or double-click in browser
@@ -37,10 +50,13 @@ Interactive web-based editor for `timeline.seq` files.
- Watch sequences auto-expand/collapse during playback
- Red playback indicators on both timeline and waveform show current position
5. **Edit:**
- - Drag sequences/effects to reposition
- - Double-click sequence header to collapse/expand
+ - **Add Effect:** Select sequence, click "✨ Add Effect" button
+ - **Delete:** Click item, use "🗑️ Delete Selected" or delete button in properties panel
+ - Drag sequences/effects to reposition (works when collapsed or expanded)
+ - Double-click anywhere on sequence to collapse/expand
- Click item to edit properties in side panel
- Drag effect handles to resize
+ - **Quantize:** Use dropdown or hotkeys (0-6) to snap to grid
6. **Zoom:** Ctrl/Cmd + mouse wheel (zooms at cursor position)
7. **Save:** Click "💾 Save timeline.seq"
@@ -102,7 +118,9 @@ open "tools/timeline_editor/index.html?seq=../../workspaces/main/timeline.seq"
## Keyboard Shortcuts
- **Spacebar**: Play/pause audio playback
+- **0-6**: Quantize grid (0=Off, 1=1beat, 2=1/2, 3=1/4, 4=1/8, 5=1/16, 6=1/32)
- **Double-click timeline**: Seek to position (continues playing if active)
+- **Double-click sequence**: Collapse/expand
- **Ctrl/Cmd + Wheel**: Zoom in/out at cursor position
## Technical Notes
@@ -113,10 +131,11 @@ open "tools/timeline_editor/index.html?seq=../../workspaces/main/timeline.seq"
- BPM used for seconds conversion (tooltips, audio waveform alignment)
- Priority determines render order (higher = on top)
- Collapsed sequences show 35px title bar, expanded show full effect stack
-- Time markers show beats by default (4-beat/bar increments)
+- **Show Beats** toggle: Switch time markers between beats and seconds
+- Time markers show 4-beat/bar increments (beats) or 1s increments (seconds)
- **Waveform and time markers are sticky** at top during scroll/zoom
- Vertical grid lines aid alignment
-- Snap-to-beat enabled by default for musical alignment
+- **Quantize grid**: Independent snap control (works in both beat and second display modes)
- **Auto-expand/collapse**: Active sequence expands during playback, previous collapses
- **Auto-scroll**: Timeline follows playback indicator (keeps it in middle third of viewport)
- **Dual playback indicators**: Red bars on both timeline and waveform (synchronized)
diff --git a/tools/timeline_editor/index.html b/tools/timeline_editor/index.html
index 21bedd1..eca7b97 100644
--- a/tools/timeline_editor/index.html
+++ b/tools/timeline_editor/index.html
@@ -4,6 +4,7 @@
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Timeline Editor - timeline.seq</title>
+ <link rel="icon" href="data:image/svg+xml,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 100 100'><rect width='100' height='100' fill='%231e1e1e'/><rect x='10' y='30' width='15' height='40' fill='%234ec9b0'/><rect x='30' y='20' width='15' height='60' fill='%234ec9b0'/><rect x='50' y='35' width='15' height='30' fill='%234ec9b0'/><rect x='70' y='15' width='15' height='70' fill='%234ec9b0'/></svg>">
<style>
:root {
--bg-dark: #1e1e1e;
@@ -45,7 +46,8 @@
.sticky-header { position: sticky; top: 0; background: var(--bg-medium); z-index: 100; padding: 20px 20px 10px 20px; border-bottom: 2px solid var(--bg-light); flex-shrink: 0; }
.waveform-container { position: relative; height: 80px; overflow: hidden; background: rgba(0, 0, 0, 0.3); border-radius: var(--radius); cursor: crosshair; }
- #waveformCanvas { position: absolute; left: 0; top: 0; height: 80px; display: block; }
+ #cpuLoadCanvas { position: absolute; left: 0; bottom: 0; height: 10px; display: block; z-index: 1; }
+ #waveformCanvas { position: absolute; left: 0; top: 0; height: 80px; display: block; z-index: 2; }
.playback-indicator { position: absolute; top: 0; left: 0; width: 2px; background: var(--accent-red); box-shadow: 0 0 4px rgba(244, 135, 113, 0.8); pointer-events: none; z-index: 90; display: block; }
@@ -66,7 +68,7 @@
100% { box-shadow: 0 0 10px rgba(14, 99, 156, 0.5); border-color: var(--accent-blue); }
}
- .sequence-header { position: absolute; top: 0; left: 0; right: 0; padding: 8px; z-index: 5; cursor: pointer; user-select: none; }
+ .sequence-header { position: absolute; top: 0; left: 0; right: 0; padding: 8px; z-index: 5; cursor: move; user-select: none; }
.sequence-header-name { font-size: 14px; font-weight: bold; color: #ffffff; }
.sequence:not(.collapsed) .sequence-header-name { display: none; }
.sequence-name { position: absolute; top: 50%; left: 50%; transform: translate(-50%, -50%); font-size: 24px; font-weight: bold; color: #ffffff; text-shadow: 2px 2px 8px rgba(0, 0, 0, 0.9), -1px -1px 4px rgba(0, 0, 0, 0.7); pointer-events: none; white-space: nowrap; opacity: 1; transition: opacity 0.3s ease; z-index: 10; }
@@ -75,6 +77,8 @@
.effect { position: absolute; background: #3a3d41; border: 1px solid var(--border-color); border-radius: 3px; padding: 4px 8px; cursor: move; font-size: 11px; transition: box-shadow 0.2s; display: flex; align-items: center; white-space: nowrap; overflow: hidden; text-overflow: ellipsis; }
.effect:hover { box-shadow: 0 0 8px rgba(133, 133, 133, 0.5); background: #45484d; }
.effect.selected { border-color: var(--accent-orange); box-shadow: 0 0 8px rgba(206, 145, 120, 0.5); }
+ .effect.conflict { background: #4a1d1d; border-color: var(--accent-red); box-shadow: 0 0 8px rgba(244, 135, 113, 0.6); }
+ .effect.conflict:hover { background: #5a2424; }
.effect-handle { position: absolute; top: 0; width: 6px; height: 100%; background: rgba(78, 201, 176, 0.8); cursor: ew-resize; display: none; z-index: 10; }
.effect.selected .effect-handle { display: block; }
.effect-handle.left { left: 0; border-radius: 3px 0 0 3px; }
@@ -111,6 +115,7 @@
<label class="file-label">🎵 Load Audio (WAV)<input type="file" id="audioInput" accept=".wav"></label>
<button id="clearAudioBtn" disabled>✖ Clear Audio</button>
<button id="addSequenceBtn" disabled>➕ Add Sequence</button>
+ <button id="addEffectBtn" disabled>✨ Add Effect</button>
<button id="deleteBtn" disabled>🗑️ Delete Selected</button>
<button id="reorderBtn" disabled>🔄 Re-order by Time</button>
</div>
@@ -124,6 +129,17 @@
<label class="checkbox-label" style="margin-left: 20px">
<input type="checkbox" id="showBeatsCheckbox" checked>Show Beats
</label>
+ <label style="margin-left: 20px">Quantize:
+ <select id="quantizeSelect">
+ <option value="0">Off</option>
+ <option value="32">1/32</option>
+ <option value="16">1/16</option>
+ <option value="8">1/8</option>
+ <option value="4">1/4</option>
+ <option value="2">1/2</option>
+ <option value="1" selected>1 beat</option>
+ </select>
+ </label>
<div id="playbackControls" style="display: none; margin-left: 20px; gap: 10px; align-items: center;">
<span id="playbackTime">0.00s (0.00b)</span>
<button id="playPauseBtn">▶ Play</button>
@@ -134,9 +150,10 @@
<div class="timeline-container">
<div class="sticky-header">
- <div class="waveform-container" id="waveformContainer" style="display: none;">
- <div class="playback-indicator" id="waveformPlaybackIndicator"></div>
+ <div class="waveform-container" id="waveformContainer">
+ <canvas id="cpuLoadCanvas"></canvas>
<canvas id="waveformCanvas"></canvas>
+ <div class="playback-indicator" id="waveformPlaybackIndicator"></div>
</div>
<div class="time-markers" id="timeMarkers"></div>
</div>
@@ -160,14 +177,19 @@
</div>
<script>
+ // Constants
+ const POST_PROCESS_EFFECTS = new Set(['FadeEffect', 'FlashEffect', 'GaussianBlurEffect',
+ 'SolarizeEffect', 'VignetteEffect', 'ChromaAberrationEffect', 'DistortEffect',
+ 'ThemeModulationEffect', 'CNNEffect', 'CNNv2Effect']);
+
// State
const state = {
sequences: [], currentFile: null, selectedItem: null, pixelsPerSecond: 100,
- showBeats: true, bpm: 120, isDragging: false, dragOffset: { x: 0, y: 0 },
- lastActiveSeqIndex: -1, isDraggingHandle: false, handleType: null,
+ showBeats: true, quantizeUnit: 1, bpm: 120, isDragging: false, dragOffset: { x: 0, y: 0 },
+ lastActiveSeqIndex: -1, isDraggingHandle: false, handleType: null, handleDragOffset: 0,
audioBuffer: null, audioDuration: 0, audioSource: null, audioContext: null,
isPlaying: false, playbackStartTime: 0, playbackOffset: 0, animationFrameId: null,
- lastExpandedSeqIndex: -1
+ lastExpandedSeqIndex: -1, dragMoved: false
};
// DOM
@@ -180,7 +202,9 @@
clearAudioBtn: document.getElementById('clearAudioBtn'),
waveformCanvas: document.getElementById('waveformCanvas'),
waveformContainer: document.getElementById('waveformContainer'),
+ cpuLoadCanvas: document.getElementById('cpuLoadCanvas'),
addSequenceBtn: document.getElementById('addSequenceBtn'),
+ addEffectBtn: document.getElementById('addEffectBtn'),
deleteBtn: document.getElementById('deleteBtn'),
reorderBtn: document.getElementById('reorderBtn'),
propertiesPanel: document.getElementById('propertiesPanel'),
@@ -198,7 +222,8 @@
panelCollapseBtn: document.getElementById('panelCollapseBtn'),
bpmSlider: document.getElementById('bpmSlider'),
currentBPM: document.getElementById('currentBPM'),
- showBeatsCheckbox: document.getElementById('showBeatsCheckbox')
+ showBeatsCheckbox: document.getElementById('showBeatsCheckbox'),
+ quantizeSelect: document.getElementById('quantizeSelect')
};
// Parser
@@ -252,18 +277,41 @@
return { sequences, bpm };
}
+ // Helpers
+ const beatsToTime = (beats) => beats * 60.0 / state.bpm;
+ const timeToBeats = (seconds) => seconds * state.bpm / 60.0;
+ const beatRange = (start, end) => {
+ const s = start.toFixed(1), e = end.toFixed(1);
+ const ss = beatsToTime(start).toFixed(1), es = beatsToTime(end).toFixed(1);
+ return state.showBeats ? `${s}-${e}b (${ss}-${es}s)` : `${ss}-${es}s (${s}-${e}b)`;
+ };
+
+ function detectConflicts(seq) {
+ const conflicts = new Set();
+ const priorityGroups = {};
+ seq.effects.forEach((effect, idx) => {
+ if (POST_PROCESS_EFFECTS.has(effect.className)) {
+ if (!priorityGroups[effect.priority]) priorityGroups[effect.priority] = [];
+ priorityGroups[effect.priority].push(idx);
+ }
+ });
+ for (const priority in priorityGroups) {
+ if (priorityGroups[priority].length > 1) {
+ for (const idx of priorityGroups[priority]) conflicts.add(idx);
+ }
+ }
+ return conflicts;
+ }
+
function serializeSeqFile(sequences) {
let output = `# Demo Timeline\n# Generated by Timeline Editor\n# BPM ${state.bpm}\n\n`;
for (const seq of sequences) {
- const seqLine = `SEQUENCE ${seq.startTime.toFixed(2)} ${seq.priority}`;
- output += seq.name ? `${seqLine} "${seq.name}"\n` : `${seqLine}\n`;
+ output += `SEQUENCE ${seq.startTime.toFixed(2)} ${seq.priority}${seq.name ? ` "${seq.name}"` : ''}\n`;
for (const effect of seq.effects) {
const modifier = effect.priorityModifier || '+';
+ const cleanArgs = effect.args?.replace(/\s*#\s*Priority:\s*\d+/i, '').trim();
output += ` EFFECT ${modifier} ${effect.className} ${effect.startTime.toFixed(2)} ${effect.endTime.toFixed(2)}`;
- if (effect.args) {
- const cleanArgs = effect.args.replace(/\s*#\s*Priority:\s*\d+/i, '').trim();
- if (cleanArgs) output += ` ${cleanArgs}`;
- }
+ if (cleanArgs) output += ` ${cleanArgs}`;
output += '\n';
}
output += '\n';
@@ -279,7 +327,6 @@
state.audioBuffer = await state.audioContext.decodeAudioData(arrayBuffer);
state.audioDuration = state.audioBuffer.duration;
renderWaveform();
- dom.waveformContainer.style.display = 'block';
dom.playbackControls.style.display = 'flex';
dom.clearAudioBtn.disabled = false;
showMessage(`Audio loaded: ${state.audioDuration.toFixed(2)}s`, 'success');
@@ -292,38 +339,123 @@
function renderWaveform() {
if (!state.audioBuffer) return;
const canvas = dom.waveformCanvas, ctx = canvas.getContext('2d');
- const audioDurationBeats = state.audioDuration * state.bpm / 60.0;
- const canvasWidth = audioDurationBeats * state.pixelsPerSecond, canvasHeight = 80;
- canvas.width = canvasWidth; canvas.height = canvasHeight;
- canvas.style.width = `${canvasWidth}px`; canvas.style.height = `${canvasHeight}px`;
- dom.waveformPlaybackIndicator.style.height = `${canvasHeight}px`;
- ctx.fillStyle = 'rgba(0, 0, 0, 0.3)'; ctx.fillRect(0, 0, canvasWidth, canvasHeight);
+ const w = timeToBeats(state.audioDuration) * state.pixelsPerSecond, h = 80;
+ canvas.width = w; canvas.height = h;
+ canvas.style.width = `${w}px`; canvas.style.height = `${h}px`;
+ dom.waveformPlaybackIndicator.style.height = `${h}px`;
+ ctx.fillStyle = 'rgba(0, 0, 0, 0.3)'; ctx.fillRect(0, 0, w, h);
+
const channelData = state.audioBuffer.getChannelData(0);
- const samplesPerPixel = Math.ceil(channelData.length / canvasWidth);
+ const samplesPerPixel = Math.ceil(channelData.length / w);
+ const centerY = h / 2, amplitudeScale = h * 0.4;
+
ctx.strokeStyle = '#4ec9b0'; ctx.lineWidth = 1; ctx.beginPath();
- const centerY = canvasHeight / 2, amplitudeScale = canvasHeight * 0.4;
- for (let x = 0; x < canvasWidth; x++) {
- const startSample = Math.floor(x * samplesPerPixel);
- const endSample = Math.min(startSample + samplesPerPixel, channelData.length);
+ for (let x = 0; x < w; x++) {
+ const start = Math.floor(x * samplesPerPixel);
+ const end = Math.min(start + samplesPerPixel, channelData.length);
let min = 1.0, max = -1.0;
- for (let i = startSample; i < endSample; i++) {
- const sample = channelData[i];
- if (sample < min) min = sample;
- if (sample > max) max = sample;
+ for (let i = start; i < end; i++) {
+ min = Math.min(min, channelData[i]);
+ max = Math.max(max, channelData[i]);
}
const yMin = centerY - min * amplitudeScale, yMax = centerY - max * amplitudeScale;
- if (x === 0) ctx.moveTo(x, yMin); else ctx.lineTo(x, yMin);
+ x === 0 ? ctx.moveTo(x, yMin) : ctx.lineTo(x, yMin);
ctx.lineTo(x, yMax);
}
ctx.stroke();
- ctx.strokeStyle = 'rgba(255, 255, 255, 0.1)'; ctx.lineWidth = 1; ctx.beginPath();
- ctx.moveTo(0, centerY); ctx.lineTo(canvasWidth, centerY); ctx.stroke();
+ ctx.strokeStyle = 'rgba(255, 255, 255, 0.1)';
+ ctx.beginPath(); ctx.moveTo(0, centerY); ctx.lineTo(w, centerY); ctx.stroke();
+ }
+
+ function computeCPULoad() {
+ if (state.sequences.length === 0) return { maxTime: 60, loads: [], conflicts: [] };
+ let maxTime = Math.max(60, ...state.sequences.flatMap(seq =>
+ seq.effects.map(eff => seq.startTime + eff.endTime)));
+ if (state.audioDuration > 0) maxTime = Math.max(maxTime, timeToBeats(state.audioDuration));
+
+ const resolution = 0.1, numSamples = Math.ceil(maxTime / resolution);
+ const loads = new Array(numSamples).fill(0);
+ const conflicts = new Array(numSamples).fill(false);
+
+ const markConflict = (seq, effect) => {
+ const start = Math.floor((seq.startTime + effect.startTime) / resolution);
+ const end = Math.ceil((seq.startTime + effect.endTime) / resolution);
+ for (let i = start; i < end && i < numSamples; i++) conflicts[i] = true;
+ };
+
+ // Track load
+ state.sequences.forEach(seq => seq.effects.forEach(effect => {
+ const start = Math.floor((seq.startTime + effect.startTime) / resolution);
+ const end = Math.ceil((seq.startTime + effect.endTime) / resolution);
+ for (let i = start; i < end && i < numSamples; i++) loads[i] += 1.0;
+ }));
+
+ // Detect within-sequence conflicts
+ state.sequences.forEach(seq => {
+ const priorityGroups = {};
+ seq.effects.forEach(eff => {
+ if (POST_PROCESS_EFFECTS.has(eff.className)) {
+ (priorityGroups[eff.priority] ??= []).push(eff);
+ }
+ });
+ Object.values(priorityGroups).forEach(group => {
+ if (group.length > 1) group.forEach(eff => markConflict(seq, eff));
+ });
+ });
+
+ // Detect cross-sequence conflicts
+ const timeGroups = {};
+ state.sequences.forEach((seq, idx) =>
+ (timeGroups[seq.startTime.toFixed(2)] ??= []).push(idx));
+
+ Object.values(timeGroups).forEach(seqIndices => {
+ if (seqIndices.length < 2) return;
+ const crossPriorityMap = {};
+ seqIndices.forEach(idx => {
+ const seq = state.sequences[idx];
+ seq.effects.forEach(eff => {
+ if (POST_PROCESS_EFFECTS.has(eff.className)) {
+ (crossPriorityMap[eff.priority] ??= []).push({ effect: eff, seq });
+ }
+ });
+ });
+ Object.values(crossPriorityMap).forEach(group => {
+ if (group.length > 1) group.forEach(({ effect, seq }) => markConflict(seq, effect));
+ });
+ });
+
+ return { maxTime, loads, conflicts, resolution };
+ }
+
+ function renderCPULoad() {
+ const canvas = dom.cpuLoadCanvas, ctx = canvas.getContext('2d');
+ const { maxTime, loads, conflicts, resolution } = computeCPULoad();
+ const w = maxTime * state.pixelsPerSecond, h = 10;
+ canvas.width = w; canvas.height = h;
+ canvas.style.width = `${w}px`; canvas.style.height = `${h}px`;
+ ctx.fillStyle = 'rgba(0, 0, 0, 0.3)'; ctx.fillRect(0, 0, w, h);
+ if (loads.length === 0) return;
+
+ const barWidth = resolution * state.pixelsPerSecond;
+ loads.forEach((load, i) => {
+ if (load === 0) return;
+ const n = Math.min(load / 8, 1.0);
+ let r, g, b;
+ if (conflicts[i]) { r = 200; g = 100; b = 90; }
+ else if (n < 0.5) { const t = n * 2; r = 120 + t * 50; g = 180 + t * 20; b = 140; }
+ else { const t = (n - 0.5) * 2; r = 170 + t * 30; g = 200 - t * 50; b = 140; }
+ ctx.fillStyle = `rgba(${r|0}, ${g|0}, ${b|0}, 0.7)`;
+ ctx.fillRect(i * barWidth, 0, barWidth, h);
+ });
}
function clearAudio() {
stopPlayback(); state.audioBuffer = null; state.audioDuration = 0;
- dom.waveformContainer.style.display = 'none'; dom.playbackControls.style.display = 'none';
- dom.clearAudioBtn.disabled = true; renderTimeline(); showMessage('Audio cleared', 'success');
+ dom.playbackControls.style.display = 'none';
+ dom.clearAudioBtn.disabled = true;
+ const ctx = dom.waveformCanvas.getContext('2d');
+ ctx.clearRect(0, 0, dom.waveformCanvas.width, dom.waveformCanvas.height);
+ renderTimeline(); showMessage('Audio cleared', 'success');
}
async function startPlayback() {
@@ -358,18 +490,13 @@
function updatePlaybackPosition() {
if (!state.isPlaying) return;
const elapsed = state.audioContext.currentTime - state.playbackStartTime;
- const currentTime = state.playbackOffset + elapsed, currentBeats = currentTime * state.bpm / 60.0;
+ const currentTime = state.playbackOffset + elapsed;
+ const currentBeats = timeToBeats(currentTime);
dom.playbackTime.textContent = `${currentTime.toFixed(2)}s (${currentBeats.toFixed(2)}b)`;
const indicatorX = currentBeats * state.pixelsPerSecond;
- dom.playbackIndicator.style.left = `${indicatorX}px`;
- dom.waveformPlaybackIndicator.style.left = `${indicatorX}px`;
- const viewportWidth = dom.timelineContent.clientWidth;
- const targetScrollX = indicatorX - viewportWidth * 0.4;
- const currentScrollX = dom.timelineContent.scrollLeft;
- const scrollDiff = targetScrollX - currentScrollX;
- if (Math.abs(scrollDiff) > 5) {
- dom.timelineContent.scrollLeft += scrollDiff * 0.1;
- }
+ dom.playbackIndicator.style.left = dom.waveformPlaybackIndicator.style.left = `${indicatorX}px`;
+ const scrollDiff = indicatorX - dom.timelineContent.clientWidth * 0.4 - dom.timelineContent.scrollLeft;
+ if (Math.abs(scrollDiff) > 5) dom.timelineContent.scrollLeft += scrollDiff * 0.1;
expandSequenceAtTime(currentBeats);
state.animationFrameId = requestAnimationFrame(updatePlaybackPosition);
}
@@ -395,6 +522,7 @@
// Render
function renderTimeline() {
+ renderCPULoad();
dom.timeline.innerHTML = ''; document.getElementById('timeMarkers').innerHTML = '';
let maxTime = 60;
for (const seq of state.sequences) {
@@ -442,7 +570,6 @@
const headerName = document.createElement('span'); headerName.className = 'sequence-header-name';
headerName.textContent = seq.name || `Sequence ${seqIndex + 1}`;
seqHeaderDiv.appendChild(headerName);
- seqHeaderDiv.addEventListener('mousedown', e => e.stopPropagation());
seqHeaderDiv.addEventListener('dblclick', e => { e.stopPropagation(); e.preventDefault(); seq._collapsed = !seq._collapsed; renderTimeline(); });
seqDiv.appendChild(seqHeaderDiv);
const seqNameDiv = document.createElement('div'); seqNameDiv.className = 'sequence-name';
@@ -453,26 +580,36 @@
seqDiv.addEventListener('mouseleave', () => seqDiv.classList.remove('hovered'));
seqDiv.addEventListener('mousedown', e => startDrag(e, 'sequence', seqIndex));
seqDiv.addEventListener('click', e => { e.stopPropagation(); selectItem('sequence', seqIndex); });
+ seqDiv.addEventListener('dblclick', e => { e.stopPropagation(); e.preventDefault(); seq._collapsed = !seq._collapsed; renderTimeline(); });
dom.timeline.appendChild(seqDiv);
if (!seq._collapsed) {
+ const conflicts = detectConflicts(seq);
seq.effects.forEach((effect, effectIndex) => {
- const effectDiv = document.createElement('div'); effectDiv.className = 'effect';
- effectDiv.dataset.seqIndex = seqIndex; effectDiv.dataset.effectIndex = effectIndex;
- const effectStart = (seq.startTime + effect.startTime) * state.pixelsPerSecond;
- const effectWidth = (effect.endTime - effect.startTime) * state.pixelsPerSecond;
- effectDiv.style.left = `${effectStart}px`; effectDiv.style.top = `${seq._yPosition + 20 + effectIndex * 30}px`;
- effectDiv.style.width = `${effectWidth}px`; effectDiv.style.height = '26px';
- const startBeat = effect.startTime.toFixed(1), endBeat = effect.endTime.toFixed(1);
- const startSec = (effect.startTime * 60.0 / state.bpm).toFixed(1), endSec = (effect.endTime * 60.0 / state.bpm).toFixed(1);
- const timeDisplay = state.showBeats ? `${startBeat}-${endBeat}b (${startSec}-${endSec}s)` : `${startSec}-${endSec}s (${startBeat}-${endBeat}b)`;
+ const effectDiv = document.createElement('div');
+ effectDiv.className = 'effect';
+ if (conflicts.has(effectIndex)) effectDiv.classList.add('conflict');
+ Object.assign(effectDiv.dataset, { seqIndex, effectIndex });
+ Object.assign(effectDiv.style, {
+ left: `${(seq.startTime + effect.startTime) * state.pixelsPerSecond}px`,
+ top: `${seq._yPosition + 20 + effectIndex * 30}px`,
+ width: `${(effect.endTime - effect.startTime) * state.pixelsPerSecond}px`,
+ height: '26px'
+ });
effectDiv.innerHTML = `<div class="effect-handle left"></div><small>${effect.className}</small><div class="effect-handle right"></div>`;
- effectDiv.title = `${effect.className}\n${timeDisplay}\nPriority: ${effect.priority}\n${effect.args || '(no args)'}`;
- if (state.selectedItem && state.selectedItem.type === 'effect' && state.selectedItem.seqIndex === seqIndex && state.selectedItem.effectIndex === effectIndex) effectDiv.classList.add('selected');
- const leftHandle = effectDiv.querySelector('.effect-handle.left');
- const rightHandle = effectDiv.querySelector('.effect-handle.right');
- leftHandle.addEventListener('mousedown', e => { e.stopPropagation(); startHandleDrag(e, 'left', seqIndex, effectIndex); });
- rightHandle.addEventListener('mousedown', e => { e.stopPropagation(); startHandleDrag(e, 'right', seqIndex, effectIndex); });
- effectDiv.addEventListener('mousedown', e => { if (!e.target.classList.contains('effect-handle')) { e.stopPropagation(); startDrag(e, 'effect', seqIndex, effectIndex); } });
+ const conflictWarning = conflicts.has(effectIndex) ?
+ `\n⚠️ CONFLICT: Multiple post-process effects share priority ${effect.priority}` : '';
+ effectDiv.title = `${effect.className}\n${beatRange(effect.startTime, effect.endTime)}\nPriority: ${effect.priority}${conflictWarning}\n${effect.args || '(no args)'}`;
+ if (state.selectedItem?.type === 'effect' && state.selectedItem.seqIndex === seqIndex && state.selectedItem.effectIndex === effectIndex)
+ effectDiv.classList.add('selected');
+ effectDiv.querySelector('.effect-handle.left').addEventListener('mousedown', e => {
+ e.stopPropagation(); startHandleDrag(e, 'left', seqIndex, effectIndex);
+ });
+ effectDiv.querySelector('.effect-handle.right').addEventListener('mousedown', e => {
+ e.stopPropagation(); startHandleDrag(e, 'right', seqIndex, effectIndex);
+ });
+ effectDiv.addEventListener('mousedown', e => {
+ if (!e.target.classList.contains('effect-handle')) { e.stopPropagation(); startDrag(e, 'effect', seqIndex, effectIndex); }
+ });
effectDiv.addEventListener('click', e => { e.stopPropagation(); selectItem('effect', seqIndex, effectIndex); });
dom.timeline.appendChild(effectDiv);
});
@@ -485,26 +622,27 @@
// Drag
function startDrag(e, type, seqIndex, effectIndex = null) {
- e.preventDefault(); state.isDragging = true;
- const timelineRect = dom.timeline.getBoundingClientRect();
+ state.isDragging = true;
+ state.dragMoved = false;
+ const containerRect = dom.timelineContent.getBoundingClientRect();
const currentLeft = parseFloat(e.currentTarget.style.left) || 0;
- state.dragOffset.x = e.clientX - timelineRect.left - currentLeft;
+ state.dragOffset.x = e.clientX - containerRect.left + dom.timelineContent.scrollLeft - currentLeft;
state.dragOffset.y = e.clientY - e.currentTarget.getBoundingClientRect().top;
state.selectedItem = { type, index: seqIndex, seqIndex, effectIndex };
- renderTimeline(); updateProperties();
document.addEventListener('mousemove', onDrag); document.addEventListener('mouseup', stopDrag);
}
function onDrag(e) {
if (!state.isDragging || !state.selectedItem) return;
- const timelineRect = dom.timeline.getBoundingClientRect();
- let newTime = Math.max(0, (e.clientX - timelineRect.left - state.dragOffset.x) / state.pixelsPerSecond);
- if (state.showBeats) newTime = Math.round(newTime);
- if (state.selectedItem.type === 'sequence') state.sequences[state.selectedItem.index].startTime = Math.round(newTime * 100) / 100;
+ state.dragMoved = true;
+ const containerRect = dom.timelineContent.getBoundingClientRect();
+ let newTime = Math.max(0, (e.clientX - containerRect.left + dom.timelineContent.scrollLeft - state.dragOffset.x) / state.pixelsPerSecond);
+ if (state.quantizeUnit > 0) newTime = Math.round(newTime * state.quantizeUnit) / state.quantizeUnit;
+ if (state.selectedItem.type === 'sequence') state.sequences[state.selectedItem.index].startTime = newTime;
else if (state.selectedItem.type === 'effect') {
const seq = state.sequences[state.selectedItem.seqIndex], effect = seq.effects[state.selectedItem.effectIndex];
const duration = effect.endTime - effect.startTime, relativeTime = newTime - seq.startTime;
- effect.startTime = Math.round(relativeTime * 100) / 100; effect.endTime = effect.startTime + duration;
+ effect.startTime = relativeTime; effect.endTime = effect.startTime + duration;
}
renderTimeline(); updateProperties();
}
@@ -512,35 +650,46 @@
function stopDrag() {
state.isDragging = false;
document.removeEventListener('mousemove', onDrag); document.removeEventListener('mouseup', stopDrag);
+ if (state.dragMoved) {
+ renderTimeline(); updateProperties();
+ }
}
function startHandleDrag(e, type, seqIndex, effectIndex) {
e.preventDefault(); state.isDraggingHandle = true; state.handleType = type;
state.selectedItem = { type: 'effect', seqIndex, effectIndex, index: seqIndex };
- renderTimeline(); updateProperties();
+ const seq = state.sequences[seqIndex], effect = seq.effects[effectIndex];
+ const containerRect = dom.timelineContent.getBoundingClientRect();
+ const mouseTimeBeats = (e.clientX - containerRect.left + dom.timelineContent.scrollLeft) / state.pixelsPerSecond;
+ const handleTimeBeats = seq.startTime + (type === 'left' ? effect.startTime : effect.endTime);
+ state.handleDragOffset = handleTimeBeats - mouseTimeBeats;
document.addEventListener('mousemove', onHandleDrag); document.addEventListener('mouseup', stopHandleDrag);
}
function onHandleDrag(e) {
if (!state.isDraggingHandle || !state.selectedItem) return;
- const timelineRect = dom.timeline.getBoundingClientRect();
- let newTime = Math.max(0, (e.clientX - timelineRect.left) / state.pixelsPerSecond);
- if (state.showBeats) newTime = Math.round(newTime);
+ const containerRect = dom.timelineContent.getBoundingClientRect();
+ let newTime = (e.clientX - containerRect.left + dom.timelineContent.scrollLeft) / state.pixelsPerSecond + state.handleDragOffset;
+ newTime = Math.max(0, newTime);
+ if (state.quantizeUnit > 0) newTime = Math.round(newTime * state.quantizeUnit) / state.quantizeUnit;
const seq = state.sequences[state.selectedItem.seqIndex], effect = seq.effects[state.selectedItem.effectIndex];
const relativeTime = newTime - seq.startTime;
- if (state.handleType === 'left') effect.startTime = Math.min(Math.round(relativeTime * 100) / 100, effect.endTime - 0.1);
- else if (state.handleType === 'right') effect.endTime = Math.max(effect.startTime + 0.1, Math.round(relativeTime * 100) / 100);
+ if (state.handleType === 'left') effect.startTime = Math.min(relativeTime, effect.endTime - 0.1);
+ else if (state.handleType === 'right') effect.endTime = Math.max(effect.startTime + 0.1, relativeTime);
renderTimeline(); updateProperties();
}
function stopHandleDrag() {
state.isDraggingHandle = false; state.handleType = null;
document.removeEventListener('mousemove', onHandleDrag); document.removeEventListener('mouseup', stopHandleDrag);
+ renderTimeline(); updateProperties();
}
function selectItem(type, seqIndex, effectIndex = null) {
state.selectedItem = { type, index: seqIndex, seqIndex, effectIndex };
- renderTimeline(); updateProperties(); dom.deleteBtn.disabled = false;
+ renderTimeline(); updateProperties();
+ dom.deleteBtn.disabled = false;
+ dom.addEffectBtn.disabled = type !== 'sequence';
}
// Properties
@@ -550,11 +699,13 @@
if (state.selectedItem.type === 'sequence') {
const seq = state.sequences[state.selectedItem.index];
dom.propertiesContent.innerHTML = `
- <div class="property-group"><label>Name</label><input type="text" id="propName" value="${seq.name || ''}" placeholder="Sequence name"></div>
- <div class="property-group"><label>Start Time (seconds)</label><input type="number" id="propStartTime" value="${seq.startTime}" step="0.1" min="0"></div>
+ <div class="property-group"><label>Name</label><input type="text" id="propName" value="${seq.name || ''}" placeholder="Sequence name" inputmode="text"></div>
+ <div class="property-group"><label>Start Time (beats)</label><input type="number" id="propStartTime" value="${seq.startTime}" step="0.1" min="0"></div>
+ <div class="property-group"><button id="propDeleteBtn" style="width: 100%; background: var(--accent-red);">🗑️ Delete Sequence</button></div>
`;
document.getElementById('propName').addEventListener('input', applyProperties);
document.getElementById('propStartTime').addEventListener('input', applyProperties);
+ document.getElementById('propDeleteBtn').addEventListener('click', () => dom.deleteBtn.click());
} else if (state.selectedItem.type === 'effect') {
const effect = state.sequences[state.selectedItem.seqIndex].effects[state.selectedItem.effectIndex];
const effects = state.sequences[state.selectedItem.seqIndex].effects;
@@ -572,6 +723,7 @@
</div>
<button id="togglePriorityBtn" style="width: 100%;">${samePriority ? '✓ Same as Above (=)' : 'Increment (+)'}</button>
</div>
+ <div class="property-group"><button id="propDeleteBtn" style="width: 100%; background: var(--accent-red);">🗑️ Delete Effect</button></div>
`;
document.getElementById('propClassName').addEventListener('input', applyProperties);
document.getElementById('propStartTime').addEventListener('input', applyProperties);
@@ -580,6 +732,7 @@
document.getElementById('moveUpBtn').addEventListener('click', moveEffectUp);
document.getElementById('moveDownBtn').addEventListener('click', moveEffectDown);
document.getElementById('togglePriorityBtn').addEventListener('click', toggleSamePriority);
+ document.getElementById('propDeleteBtn').addEventListener('click', () => dom.deleteBtn.click());
}
}
@@ -633,10 +786,8 @@
function updateStats() {
const effectCount = state.sequences.reduce((sum, seq) => sum + seq.effects.length, 0);
- const maxTime = state.sequences.reduce((max, seq) => {
- const seqMax = seq.effects.reduce((m, e) => Math.max(m, seq.startTime + e.endTime), seq.startTime);
- return Math.max(max, seqMax);
- }, 0);
+ const maxTime = Math.max(0, ...state.sequences.flatMap(seq =>
+ seq.effects.map(e => seq.startTime + e.endTime).concat(seq.startTime)));
dom.stats.innerHTML = `📊 Sequences: ${state.sequences.length} | 🎬 Effects: ${effectCount} | ⏱️ Duration: ${maxTime.toFixed(2)}s`;
}
@@ -700,16 +851,17 @@
dom.waveformContainer.addEventListener('click', async e => {
if (!state.audioBuffer) return;
const rect = dom.waveformContainer.getBoundingClientRect();
- const clickX = e.clientX - rect.left + dom.timelineContent.scrollLeft;
- const clickTime = (clickX / state.pixelsPerSecond) * 60.0 / state.bpm;
+ const canvasOffset = parseFloat(dom.waveformCanvas.style.left) || 0;
+ const clickX = e.clientX - rect.left - canvasOffset;
+ const clickBeats = clickX / state.pixelsPerSecond;
+ const clickTime = beatsToTime(clickBeats);
const wasPlaying = state.isPlaying;
if (wasPlaying) stopPlayback(false);
state.playbackOffset = Math.max(0, Math.min(clickTime, state.audioDuration));
- const clickBeats = state.playbackOffset * state.bpm / 60.0;
- dom.playbackTime.textContent = `${state.playbackOffset.toFixed(2)}s (${clickBeats.toFixed(2)}b)`;
- const indicatorX = clickBeats * state.pixelsPerSecond;
- dom.playbackIndicator.style.left = `${indicatorX}px`;
- dom.waveformPlaybackIndicator.style.left = `${indicatorX}px`;
+ const pausedBeats = timeToBeats(state.playbackOffset);
+ dom.playbackTime.textContent = `${state.playbackOffset.toFixed(2)}s (${pausedBeats.toFixed(2)}b)`;
+ const indicatorX = pausedBeats * state.pixelsPerSecond;
+ dom.playbackIndicator.style.left = dom.waveformPlaybackIndicator.style.left = `${indicatorX}px`;
if (wasPlaying) await startPlayback();
});
@@ -718,11 +870,19 @@
renderTimeline(); showMessage('New sequence added', 'success');
});
+ dom.addEffectBtn.addEventListener('click', () => {
+ if (!state.selectedItem || state.selectedItem.type !== 'sequence') return;
+ const seq = state.sequences[state.selectedItem.index];
+ seq.effects.push({ type: 'effect', className: 'Effect', startTime: 0, endTime: 10, priority: 0, priorityModifier: '+', args: '' });
+ seq._collapsed = false;
+ renderTimeline(); showMessage('New effect added', 'success');
+ });
+
dom.deleteBtn.addEventListener('click', () => {
if (!state.selectedItem) return;
if (state.selectedItem.type === 'sequence') state.sequences.splice(state.selectedItem.index, 1);
else if (state.selectedItem.type === 'effect') state.sequences[state.selectedItem.seqIndex].effects.splice(state.selectedItem.effectIndex, 1);
- state.selectedItem = null; dom.deleteBtn.disabled = true; renderTimeline(); updateProperties();
+ state.selectedItem = null; dom.deleteBtn.disabled = true; dom.addEffectBtn.disabled = true; renderTimeline(); updateProperties();
showMessage('Item deleted', 'success');
});
@@ -749,35 +909,46 @@
});
dom.showBeatsCheckbox.addEventListener('change', e => { state.showBeats = e.target.checked; renderTimeline(); });
+ dom.quantizeSelect.addEventListener('change', e => { state.quantizeUnit = parseFloat(e.target.value); });
dom.panelToggle.addEventListener('click', () => { dom.propertiesPanel.classList.add('collapsed'); dom.panelCollapseBtn.classList.add('visible'); dom.panelToggle.textContent = '▲ Expand'; });
dom.panelCollapseBtn.addEventListener('click', () => { dom.propertiesPanel.classList.remove('collapsed'); dom.panelCollapseBtn.classList.remove('visible'); dom.panelToggle.textContent = '▼ Collapse'; });
- dom.timeline.addEventListener('click', () => { state.selectedItem = null; dom.deleteBtn.disabled = true; renderTimeline(); updateProperties(); });
+ dom.timeline.addEventListener('click', () => { state.selectedItem = null; dom.deleteBtn.disabled = true; dom.addEffectBtn.disabled = true; renderTimeline(); updateProperties(); });
dom.timeline.addEventListener('dblclick', async e => {
if (e.target !== dom.timeline) return;
- const timelineRect = dom.timeline.getBoundingClientRect();
- const clickX = e.clientX - timelineRect.left + dom.timelineContent.scrollLeft;
- const clickBeats = clickX / state.pixelsPerSecond, clickTime = clickBeats * 60.0 / state.bpm;
+ const containerRect = dom.timelineContent.getBoundingClientRect();
+ const clickX = e.clientX - containerRect.left + dom.timelineContent.scrollLeft;
+ const clickBeats = clickX / state.pixelsPerSecond;
+ const clickTime = beatsToTime(clickBeats);
if (state.audioBuffer) {
const wasPlaying = state.isPlaying;
if (wasPlaying) stopPlayback(false);
state.playbackOffset = Math.max(0, Math.min(clickTime, state.audioDuration));
- const pausedBeats = state.playbackOffset * state.bpm / 60.0;
+ const pausedBeats = timeToBeats(state.playbackOffset);
dom.playbackTime.textContent = `${state.playbackOffset.toFixed(2)}s (${pausedBeats.toFixed(2)}b)`;
const indicatorX = pausedBeats * state.pixelsPerSecond;
- dom.playbackIndicator.style.left = `${indicatorX}px`; dom.waveformPlaybackIndicator.style.left = `${indicatorX}px`;
+ dom.playbackIndicator.style.left = dom.waveformPlaybackIndicator.style.left = `${indicatorX}px`;
if (wasPlaying) await startPlayback();
showMessage(`Seek to ${clickTime.toFixed(2)}s (${clickBeats.toFixed(2)}b)`, 'success');
}
});
- document.addEventListener('keydown', e => { if (e.code === 'Space' && state.audioBuffer) { e.preventDefault(); dom.playPauseBtn.click(); } });
+ document.addEventListener('keydown', e => {
+ if (e.code === 'Space' && state.audioBuffer) { e.preventDefault(); dom.playPauseBtn.click(); }
+ // Quantize hotkeys: 0=Off, 1=1beat, 2=1/2, 3=1/4, 4=1/8, 5=1/16, 6=1/32
+ const quantizeMap = { '0': '0', '1': '1', '2': '2', '3': '4', '4': '8', '5': '16', '6': '32' };
+ if (quantizeMap[e.key]) {
+ state.quantizeUnit = parseFloat(quantizeMap[e.key]);
+ dom.quantizeSelect.value = quantizeMap[e.key];
+ e.preventDefault();
+ }
+ });
dom.timelineContent.addEventListener('scroll', () => {
- if (dom.waveformCanvas) {
- dom.waveformCanvas.style.left = `-${dom.timelineContent.scrollLeft}px`;
- dom.waveformPlaybackIndicator.style.transform = `translateX(-${dom.timelineContent.scrollLeft}px)`;
- }
+ const scrollLeft = dom.timelineContent.scrollLeft;
+ dom.cpuLoadCanvas.style.left = `-${scrollLeft}px`;
+ dom.waveformCanvas.style.left = `-${scrollLeft}px`;
+ dom.waveformPlaybackIndicator.style.transform = `translateX(-${scrollLeft}px)`;
});
dom.timelineContent.addEventListener('wheel', e => {
diff --git a/training/export_cnn_v2_shader.py b/training/export_cnn_v2_shader.py
new file mode 100755
index 0000000..3c53ce2
--- /dev/null
+++ b/training/export_cnn_v2_shader.py
@@ -0,0 +1,225 @@
+#!/usr/bin/env python3
+"""CNN v2 Shader Export Script
+
+Converts PyTorch checkpoints to WGSL compute shaders with f16 weights.
+Generates one shader per layer with embedded weight arrays.
+"""
+
+import argparse
+import numpy as np
+import torch
+from pathlib import Path
+
+
+def export_layer_shader(layer_idx, weights, kernel_size, in_channels, out_channels,
+ output_dir, is_output_layer=False):
+ """Generate WGSL compute shader for a single CNN layer.
+
+ Args:
+ layer_idx: Layer index (0, 1, 2)
+ weights: (out_ch, in_ch, k, k) weight tensor
+ kernel_size: Kernel size (1, 3, 5, etc.)
+ in_channels: Input channels (includes 8D static features)
+ out_channels: Output channels
+ output_dir: Output directory path
+ is_output_layer: True if this is the final RGBA output layer
+ """
+ weights_flat = weights.flatten()
+ weights_f16 = weights_flat.astype(np.float16)
+ weights_f32 = weights_f16.astype(np.float32) # WGSL stores as f32 literals
+
+ # Format weights as WGSL array
+ weights_str = ",\n ".join(
+ ", ".join(f"{w:.6f}" for w in weights_f32[i:i+8])
+ for i in range(0, len(weights_f32), 8)
+ )
+
+ radius = kernel_size // 2
+ activation = "" if is_output_layer else "output[c] = max(0.0, sum); // ReLU"
+ if is_output_layer:
+ activation = "output[c] = clamp(sum, 0.0, 1.0); // Sigmoid approximation"
+
+ shader_code = f"""// CNN v2 Layer {layer_idx} - Auto-generated
+// Kernel: {kernel_size}×{kernel_size}, In: {in_channels}, Out: {out_channels}
+
+const KERNEL_SIZE: u32 = {kernel_size}u;
+const IN_CHANNELS: u32 = {in_channels}u;
+const OUT_CHANNELS: u32 = {out_channels}u;
+const KERNEL_RADIUS: i32 = {radius};
+
+// Weights quantized to float16 (stored as f32 in WGSL)
+const weights: array<f32, {len(weights_f32)}> = array(
+ {weights_str}
+);
+
+@group(0) @binding(0) var static_features: texture_2d<u32>;
+@group(0) @binding(1) var layer_input: texture_2d<u32>;
+@group(0) @binding(2) var output_tex: texture_storage_2d<rgba32uint, write>;
+
+fn unpack_static_features(coord: vec2<i32>) -> array<f32, 8> {{
+ let packed = textureLoad(static_features, coord, 0);
+ let v0 = unpack2x16float(packed.x);
+ let v1 = unpack2x16float(packed.y);
+ let v2 = unpack2x16float(packed.z);
+ let v3 = unpack2x16float(packed.w);
+ return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y);
+}}
+
+fn unpack_layer_channels(coord: vec2<i32>) -> array<f32, 8> {{
+ let packed = textureLoad(layer_input, coord, 0);
+ let v0 = unpack2x16float(packed.x);
+ let v1 = unpack2x16float(packed.y);
+ let v2 = unpack2x16float(packed.z);
+ let v3 = unpack2x16float(packed.w);
+ return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y);
+}}
+
+fn pack_channels(values: array<f32, 8>) -> vec4<u32> {{
+ return vec4<u32>(
+ pack2x16float(vec2<f32>(values[0], values[1])),
+ pack2x16float(vec2<f32>(values[2], values[3])),
+ pack2x16float(vec2<f32>(values[4], values[5])),
+ pack2x16float(vec2<f32>(values[6], values[7]))
+ );
+}}
+
+@compute @workgroup_size(8, 8)
+fn main(@builtin(global_invocation_id) id: vec3<u32>) {{
+ let coord = vec2<i32>(id.xy);
+ let dims = textureDimensions(static_features);
+
+ if (coord.x >= i32(dims.x) || coord.y >= i32(dims.y)) {{
+ return;
+ }}
+
+ // Load static features (always available)
+ let static_feat = unpack_static_features(coord);
+
+ // Convolution
+ var output: array<f32, OUT_CHANNELS>;
+ for (var c: u32 = 0u; c < OUT_CHANNELS; c++) {{
+ var sum: f32 = 0.0;
+
+ for (var ky: i32 = -KERNEL_RADIUS; ky <= KERNEL_RADIUS; ky++) {{
+ for (var kx: i32 = -KERNEL_RADIUS; kx <= KERNEL_RADIUS; kx++) {{
+ let sample_coord = coord + vec2<i32>(kx, ky);
+
+ // Border handling (clamp)
+ let clamped = vec2<i32>(
+ clamp(sample_coord.x, 0, i32(dims.x) - 1),
+ clamp(sample_coord.y, 0, i32(dims.y) - 1)
+ );
+
+ // Load input features
+ let static_local = unpack_static_features(clamped);
+ let layer_local = unpack_layer_channels(clamped);
+
+ // Weight index calculation
+ let ky_idx = u32(ky + KERNEL_RADIUS);
+ let kx_idx = u32(kx + KERNEL_RADIUS);
+ let spatial_idx = ky_idx * KERNEL_SIZE + kx_idx;
+
+ // Accumulate: static features (8D)
+ for (var i: u32 = 0u; i < 8u; i++) {{
+ let w_idx = c * IN_CHANNELS * KERNEL_SIZE * KERNEL_SIZE +
+ i * KERNEL_SIZE * KERNEL_SIZE + spatial_idx;
+ sum += weights[w_idx] * static_local[i];
+ }}
+
+ // Accumulate: layer input channels (if layer_idx > 0)
+ let prev_channels = IN_CHANNELS - 8u;
+ for (var i: u32 = 0u; i < prev_channels; i++) {{
+ let w_idx = c * IN_CHANNELS * KERNEL_SIZE * KERNEL_SIZE +
+ (8u + i) * KERNEL_SIZE * KERNEL_SIZE + spatial_idx;
+ sum += weights[w_idx] * layer_local[i];
+ }}
+ }}
+ }}
+
+ {activation}
+ }}
+
+ // Pack and store
+ textureStore(output_tex, coord, pack_channels(output));
+}}
+"""
+
+ output_path = Path(output_dir) / f"cnn_v2_layer_{layer_idx}.wgsl"
+ output_path.write_text(shader_code)
+ print(f" → {output_path}")
+
+
+def export_checkpoint(checkpoint_path, output_dir):
+ """Export PyTorch checkpoint to WGSL shaders.
+
+ Args:
+ checkpoint_path: Path to .pth checkpoint
+ output_dir: Output directory for shaders
+ """
+ print(f"Loading checkpoint: {checkpoint_path}")
+ checkpoint = torch.load(checkpoint_path, map_location='cpu')
+
+ state_dict = checkpoint['model_state_dict']
+ config = checkpoint['config']
+
+ print(f"Configuration:")
+ print(f" Kernels: {config['kernels']}")
+ print(f" Channels: {config['channels']}")
+ print(f" Features: {config['features']}")
+
+ output_dir = Path(output_dir)
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ print(f"\nExporting shaders to {output_dir}/")
+
+ # Layer 0: 8 → channels[0]
+ layer0_weights = state_dict['layer0.weight'].detach().numpy()
+ export_layer_shader(
+ layer_idx=0,
+ weights=layer0_weights,
+ kernel_size=config['kernels'][0],
+ in_channels=8,
+ out_channels=config['channels'][0],
+ output_dir=output_dir,
+ is_output_layer=False
+ )
+
+ # Layer 1: (8 + channels[0]) → channels[1]
+ layer1_weights = state_dict['layer1.weight'].detach().numpy()
+ export_layer_shader(
+ layer_idx=1,
+ weights=layer1_weights,
+ kernel_size=config['kernels'][1],
+ in_channels=8 + config['channels'][0],
+ out_channels=config['channels'][1],
+ output_dir=output_dir,
+ is_output_layer=False
+ )
+
+ # Layer 2: (8 + channels[1]) → 4 (RGBA)
+ layer2_weights = state_dict['layer2.weight'].detach().numpy()
+ export_layer_shader(
+ layer_idx=2,
+ weights=layer2_weights,
+ kernel_size=config['kernels'][2],
+ in_channels=8 + config['channels'][1],
+ out_channels=4,
+ output_dir=output_dir,
+ is_output_layer=True
+ )
+
+ print(f"\nExport complete! Generated 3 shader files.")
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Export CNN v2 checkpoint to WGSL shaders')
+ parser.add_argument('checkpoint', type=str, help='Path to checkpoint .pth file')
+ parser.add_argument('--output-dir', type=str, default='workspaces/main/shaders',
+ help='Output directory for shaders')
+
+ args = parser.parse_args()
+ export_checkpoint(args.checkpoint, args.output_dir)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/training/export_cnn_v2_weights.py b/training/export_cnn_v2_weights.py
new file mode 100755
index 0000000..723f572
--- /dev/null
+++ b/training/export_cnn_v2_weights.py
@@ -0,0 +1,275 @@
+#!/usr/bin/env python3
+"""CNN v2 Weight Export Script
+
+Converts PyTorch checkpoints to binary weight format for storage buffer.
+Exports single shader template + binary weights asset.
+"""
+
+import argparse
+import numpy as np
+import torch
+import struct
+from pathlib import Path
+
+
+def export_weights_binary(checkpoint_path, output_path):
+ """Export CNN v2 weights to binary format.
+
+ Binary format:
+ Header (16 bytes):
+ uint32 magic ('CNN2')
+ uint32 version (1)
+ uint32 num_layers
+ uint32 total_weights (f16 count)
+
+ LayerInfo × num_layers (20 bytes each):
+ uint32 kernel_size
+ uint32 in_channels
+ uint32 out_channels
+ uint32 weight_offset (f16 index)
+ uint32 weight_count
+
+ Weights (f16 array):
+ float16[] all_weights
+
+ Args:
+ checkpoint_path: Path to .pth checkpoint
+ output_path: Output .bin file path
+
+ Returns:
+ config dict for shader generation
+ """
+ print(f"Loading checkpoint: {checkpoint_path}")
+ checkpoint = torch.load(checkpoint_path, map_location='cpu')
+
+ state_dict = checkpoint['model_state_dict']
+ config = checkpoint['config']
+
+ print(f"Configuration:")
+ print(f" Kernels: {config['kernels']}")
+ print(f" Channels: {config['channels']}")
+
+ # Collect layer info
+ layers = []
+ all_weights = []
+ weight_offset = 0
+
+ # Layer 0: 8 → channels[0]
+ layer0_weights = state_dict['layer0.weight'].detach().numpy()
+ layer0_flat = layer0_weights.flatten()
+ layers.append({
+ 'kernel_size': config['kernels'][0],
+ 'in_channels': 8,
+ 'out_channels': config['channels'][0],
+ 'weight_offset': weight_offset,
+ 'weight_count': len(layer0_flat)
+ })
+ all_weights.extend(layer0_flat)
+ weight_offset += len(layer0_flat)
+
+ # Layer 1: (8 + channels[0]) → channels[1]
+ layer1_weights = state_dict['layer1.weight'].detach().numpy()
+ layer1_flat = layer1_weights.flatten()
+ layers.append({
+ 'kernel_size': config['kernels'][1],
+ 'in_channels': 8 + config['channels'][0],
+ 'out_channels': config['channels'][1],
+ 'weight_offset': weight_offset,
+ 'weight_count': len(layer1_flat)
+ })
+ all_weights.extend(layer1_flat)
+ weight_offset += len(layer1_flat)
+
+ # Layer 2: (8 + channels[1]) → 4 (RGBA output)
+ layer2_weights = state_dict['layer2.weight'].detach().numpy()
+ layer2_flat = layer2_weights.flatten()
+ layers.append({
+ 'kernel_size': config['kernels'][2],
+ 'in_channels': 8 + config['channels'][1],
+ 'out_channels': 4,
+ 'weight_offset': weight_offset,
+ 'weight_count': len(layer2_flat)
+ })
+ all_weights.extend(layer2_flat)
+ weight_offset += len(layer2_flat)
+
+ # Convert to f16
+ # TODO: Use 8-bit quantization for 2× size reduction
+ # Requires quantization-aware training (QAT) to maintain accuracy
+ all_weights_f16 = np.array(all_weights, dtype=np.float16)
+
+ # Pack f16 pairs into u32 for storage buffer
+ # Pad to even count if needed
+ if len(all_weights_f16) % 2 == 1:
+ all_weights_f16 = np.append(all_weights_f16, np.float16(0.0))
+
+ # Pack pairs using numpy view
+ weights_u32 = all_weights_f16.view(np.uint32)
+
+ print(f"\nWeight statistics:")
+ print(f" Total layers: {len(layers)}")
+ print(f" Total weights: {len(all_weights_f16)} (f16)")
+ print(f" Packed: {len(weights_u32)} u32")
+ print(f" Binary size: {16 + len(layers) * 20 + len(weights_u32) * 4} bytes")
+
+ # Write binary file
+ output_path = Path(output_path)
+ output_path.parent.mkdir(parents=True, exist_ok=True)
+
+ with open(output_path, 'wb') as f:
+ # Header (16 bytes)
+ f.write(struct.pack('<4sIII',
+ b'CNN2', # magic
+ 1, # version
+ len(layers), # num_layers
+ len(all_weights_f16))) # total_weights (f16 count)
+
+ # Layer info (20 bytes per layer)
+ for layer in layers:
+ f.write(struct.pack('<IIIII',
+ layer['kernel_size'],
+ layer['in_channels'],
+ layer['out_channels'],
+ layer['weight_offset'],
+ layer['weight_count']))
+
+ # Weights (u32 packed f16 pairs)
+ f.write(weights_u32.tobytes())
+
+ print(f" → {output_path}")
+
+ return {
+ 'num_layers': len(layers),
+ 'layers': layers
+ }
+
+
+def export_shader_template(config, output_dir):
+ """Generate single WGSL shader template with storage buffer binding.
+
+ Args:
+ config: Layer configuration from export_weights_binary()
+ output_dir: Output directory path
+ """
+ shader_code = """// CNN v2 Compute Shader - Storage Buffer Version
+// Reads weights from storage buffer, processes all layers in sequence
+
+struct CNNv2Header {
+ magic: u32, // 'CNN2'
+ version: u32, // 1
+ num_layers: u32, // Number of layers
+ total_weights: u32, // Total f16 weight count
+}
+
+struct CNNv2LayerInfo {
+ kernel_size: u32,
+ in_channels: u32,
+ out_channels: u32,
+ weight_offset: u32, // Offset in weights array
+ weight_count: u32,
+}
+
+@group(0) @binding(0) var static_features: texture_2d<u32>;
+@group(0) @binding(1) var layer_input: texture_2d<u32>;
+@group(0) @binding(2) var output_tex: texture_storage_2d<rgba32uint, write>;
+@group(0) @binding(3) var<storage, read> weights: array<u32>; // Packed f16 pairs
+
+fn unpack_static_features(coord: vec2<i32>) -> array<f32, 8> {
+ let packed = textureLoad(static_features, coord, 0);
+ let v0 = unpack2x16float(packed.x);
+ let v1 = unpack2x16float(packed.y);
+ let v2 = unpack2x16float(packed.z);
+ let v3 = unpack2x16float(packed.w);
+ return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y);
+}
+
+fn unpack_layer_channels(coord: vec2<i32>) -> array<f32, 8> {
+ let packed = textureLoad(layer_input, coord, 0);
+ let v0 = unpack2x16float(packed.x);
+ let v1 = unpack2x16float(packed.y);
+ let v2 = unpack2x16float(packed.z);
+ let v3 = unpack2x16float(packed.w);
+ return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y);
+}
+
+fn pack_channels(values: array<f32, 8>) -> vec4<u32> {
+ return vec4<u32>(
+ pack2x16float(vec2<f32>(values[0], values[1])),
+ pack2x16float(vec2<f32>(values[2], values[3])),
+ pack2x16float(vec2<f32>(values[4], values[5])),
+ pack2x16float(vec2<f32>(values[6], values[7]))
+ );
+}
+
+fn get_weight(idx: u32) -> f32 {
+ let pair_idx = idx / 2u;
+ let packed = weights[8u + pair_idx]; // Skip header (32 bytes = 8 u32)
+ let unpacked = unpack2x16float(packed);
+ return select(unpacked.y, unpacked.x, (idx & 1u) == 0u);
+}
+
+@compute @workgroup_size(8, 8)
+fn main(@builtin(global_invocation_id) id: vec3<u32>) {
+ let coord = vec2<i32>(id.xy);
+ let dims = textureDimensions(static_features);
+
+ if (coord.x >= i32(dims.x) || coord.y >= i32(dims.y)) {
+ return;
+ }
+
+ // Read header
+ let header_packed = weights[0]; // magic + version
+ let counts_packed = weights[1]; // num_layers + total_weights
+ let num_layers = counts_packed & 0xFFFFu;
+
+ // Load static features
+ let static_feat = unpack_static_features(coord);
+
+ // Process each layer (hardcoded for 3 layers for now)
+ // TODO: Dynamic layer loop when needed
+
+ // Example for layer 0 - expand to full multi-layer when tested
+ let layer_info_offset = 2u; // After header
+ let layer0_info_base = layer_info_offset;
+
+ // Read layer 0 info (5 u32 values = 20 bytes)
+ let kernel_size = weights[layer0_info_base];
+ let in_channels = weights[layer0_info_base + 1u];
+ let out_channels = weights[layer0_info_base + 2u];
+ let weight_offset = weights[layer0_info_base + 3u];
+
+ // Convolution (simplified - expand to full kernel loop)
+ var output: array<f32, 8>;
+ for (var c: u32 = 0u; c < min(out_channels, 8u); c++) {
+ output[c] = 0.0; // TODO: Actual convolution
+ }
+
+ textureStore(output_tex, coord, pack_channels(output));
+}
+"""
+
+ output_path = Path(output_dir) / "cnn_v2_compute.wgsl"
+ output_path.write_text(shader_code)
+ print(f" → {output_path}")
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Export CNN v2 weights to binary format')
+ parser.add_argument('checkpoint', type=str, help='Path to checkpoint .pth file')
+ parser.add_argument('--output-weights', type=str, default='workspaces/main/cnn_v2_weights.bin',
+ help='Output binary weights file')
+ parser.add_argument('--output-shader', type=str, default='workspaces/main/shaders',
+ help='Output directory for shader template')
+
+ args = parser.parse_args()
+
+ print("=== CNN v2 Weight Export ===\n")
+ config = export_weights_binary(args.checkpoint, args.output_weights)
+ print()
+ # Shader is manually maintained in cnn_v2_compute.wgsl
+ # export_shader_template(config, args.output_shader)
+ print("\nExport complete!")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/training/train_cnn_v2.py b/training/train_cnn_v2.py
new file mode 100755
index 0000000..758b044
--- /dev/null
+++ b/training/train_cnn_v2.py
@@ -0,0 +1,383 @@
+#!/usr/bin/env python3
+"""CNN v2 Training Script - Parametric Static Features
+
+Trains a multi-layer CNN with 7D static feature input:
+- RGBD (4D)
+- UV coordinates (2D)
+- sin(10*uv.x) position encoding (1D)
+- Bias dimension (1D, always 1.0)
+"""
+
+import argparse
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.utils.data import Dataset, DataLoader
+from pathlib import Path
+from PIL import Image
+import time
+import cv2
+
+
+def compute_static_features(rgb, depth=None):
+ """Generate 7D static features + bias dimension.
+
+ Args:
+ rgb: (H, W, 3) RGB image [0, 1]
+ depth: (H, W) depth map [0, 1], optional
+
+ Returns:
+ (H, W, 8) static features tensor
+ """
+ h, w = rgb.shape[:2]
+
+ # RGBD channels
+ r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
+ d = depth if depth is not None else np.zeros((h, w), dtype=np.float32)
+
+ # UV coordinates (normalized [0, 1])
+ uv_x = np.linspace(0, 1, w)[None, :].repeat(h, axis=0).astype(np.float32)
+ uv_y = np.linspace(0, 1, h)[:, None].repeat(w, axis=1).astype(np.float32)
+
+ # Multi-frequency position encoding
+ sin10_x = np.sin(10.0 * uv_x).astype(np.float32)
+
+ # Bias dimension (always 1.0)
+ bias = np.ones((h, w), dtype=np.float32)
+
+ # Stack: [R, G, B, D, uv.x, uv.y, sin10_x, bias]
+ features = np.stack([r, g, b, d, uv_x, uv_y, sin10_x, bias], axis=-1)
+ return features
+
+
+class CNNv2(nn.Module):
+ """CNN v2 with parametric static features.
+
+ TODO: Add quantization-aware training (QAT) for 8-bit weights
+ - Use torch.quantization.QuantStub/DeQuantStub
+ - Train with fake quantization to adapt to 8-bit precision
+ - Target: ~1.6 KB weights (vs 3.2 KB with f16)
+ """
+
+ def __init__(self, kernels=[1, 3, 5], channels=[16, 8, 4]):
+ super().__init__()
+ self.kernels = kernels
+ self.channels = channels
+
+ # Input layer: 8D (7 features + bias) → channels[0]
+ self.layer0 = nn.Conv2d(8, channels[0], kernel_size=kernels[0],
+ padding=kernels[0]//2, bias=False)
+
+ # Inner layers: (8 + C_prev) → C_next
+ in_ch_1 = 8 + channels[0]
+ self.layer1 = nn.Conv2d(in_ch_1, channels[1], kernel_size=kernels[1],
+ padding=kernels[1]//2, bias=False)
+
+ # Output layer: (8 + C_last) → 4 (RGBA)
+ in_ch_2 = 8 + channels[1]
+ self.layer2 = nn.Conv2d(in_ch_2, 4, kernel_size=kernels[2],
+ padding=kernels[2]//2, bias=False)
+
+ def forward(self, static_features):
+ """Forward pass with static feature concatenation.
+
+ Args:
+ static_features: (B, 8, H, W) static features
+
+ Returns:
+ (B, 4, H, W) RGBA output [0, 1]
+ """
+ # Layer 0: Use full 8D static features
+ x0 = self.layer0(static_features)
+ x0 = F.relu(x0)
+
+ # Layer 1: Concatenate static + layer0 output
+ x1_input = torch.cat([static_features, x0], dim=1)
+ x1 = self.layer1(x1_input)
+ x1 = F.relu(x1)
+
+ # Layer 2: Concatenate static + layer1 output
+ x2_input = torch.cat([static_features, x1], dim=1)
+ output = self.layer2(x2_input)
+
+ return torch.sigmoid(output)
+
+
+class PatchDataset(Dataset):
+ """Patch-based dataset extracting salient regions from images."""
+
+ def __init__(self, input_dir, target_dir, patch_size=32, patches_per_image=64,
+ detector='harris'):
+ self.input_paths = sorted(Path(input_dir).glob("*.png"))
+ self.target_paths = sorted(Path(target_dir).glob("*.png"))
+ self.patch_size = patch_size
+ self.patches_per_image = patches_per_image
+ self.detector = detector
+
+ assert len(self.input_paths) == len(self.target_paths), \
+ f"Mismatch: {len(self.input_paths)} inputs vs {len(self.target_paths)} targets"
+
+ print(f"Found {len(self.input_paths)} image pairs")
+ print(f"Extracting {patches_per_image} patches per image using {detector} detector")
+ print(f"Total patches: {len(self.input_paths) * patches_per_image}")
+
+ def __len__(self):
+ return len(self.input_paths) * self.patches_per_image
+
+ def _detect_salient_points(self, img_array):
+ """Detect salient points on original image.
+
+ TODO: Add random sampling to training vectors
+ - In addition to salient points, incorporate randomly-located samples
+ - Default: 10% random samples, 90% salient points
+ - Prevents overfitting to only high-gradient regions
+ - Improves generalization across entire image
+ - Configurable via --random-sample-percent parameter
+ """
+ gray = cv2.cvtColor((img_array * 255).astype(np.uint8), cv2.COLOR_RGB2GRAY)
+ h, w = gray.shape
+ half_patch = self.patch_size // 2
+
+ corners = None
+ if self.detector == 'harris':
+ corners = cv2.goodFeaturesToTrack(gray, self.patches_per_image * 2,
+ qualityLevel=0.01, minDistance=half_patch)
+ elif self.detector == 'fast':
+ fast = cv2.FastFeatureDetector_create(threshold=20)
+ keypoints = fast.detect(gray, None)
+ corners = np.array([[kp.pt[0], kp.pt[1]] for kp in keypoints[:self.patches_per_image * 2]])
+ corners = corners.reshape(-1, 1, 2) if len(corners) > 0 else None
+ elif self.detector == 'shi-tomasi':
+ corners = cv2.goodFeaturesToTrack(gray, self.patches_per_image * 2,
+ qualityLevel=0.01, minDistance=half_patch,
+ useHarrisDetector=False)
+ elif self.detector == 'gradient':
+ grad_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
+ grad_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
+ gradient_mag = np.sqrt(grad_x**2 + grad_y**2)
+ threshold = np.percentile(gradient_mag, 95)
+ y_coords, x_coords = np.where(gradient_mag > threshold)
+
+ if len(x_coords) > self.patches_per_image * 2:
+ indices = np.random.choice(len(x_coords), self.patches_per_image * 2, replace=False)
+ x_coords = x_coords[indices]
+ y_coords = y_coords[indices]
+
+ corners = np.array([[x, y] for x, y in zip(x_coords, y_coords)])
+ corners = corners.reshape(-1, 1, 2) if len(corners) > 0 else None
+
+ # Fallback to random if no corners found
+ if corners is None or len(corners) == 0:
+ x_coords = np.random.randint(half_patch, w - half_patch, self.patches_per_image)
+ y_coords = np.random.randint(half_patch, h - half_patch, self.patches_per_image)
+ corners = np.array([[x, y] for x, y in zip(x_coords, y_coords)])
+ corners = corners.reshape(-1, 1, 2)
+
+ # Filter valid corners
+ valid_corners = []
+ for corner in corners:
+ x, y = int(corner[0][0]), int(corner[0][1])
+ if half_patch <= x < w - half_patch and half_patch <= y < h - half_patch:
+ valid_corners.append((x, y))
+ if len(valid_corners) >= self.patches_per_image:
+ break
+
+ # Fill with random if not enough
+ while len(valid_corners) < self.patches_per_image:
+ x = np.random.randint(half_patch, w - half_patch)
+ y = np.random.randint(half_patch, h - half_patch)
+ valid_corners.append((x, y))
+
+ return valid_corners
+
+ def __getitem__(self, idx):
+ img_idx = idx // self.patches_per_image
+ patch_idx = idx % self.patches_per_image
+
+ # Load original images (no resize)
+ input_img = np.array(Image.open(self.input_paths[img_idx]).convert('RGB')) / 255.0
+ target_img = np.array(Image.open(self.target_paths[img_idx]).convert('RGB')) / 255.0
+
+ # Detect salient points on original image
+ salient_points = self._detect_salient_points(input_img)
+ cx, cy = salient_points[patch_idx]
+
+ # Extract patch
+ half_patch = self.patch_size // 2
+ y1, y2 = cy - half_patch, cy + half_patch
+ x1, x2 = cx - half_patch, cx + half_patch
+
+ input_patch = input_img[y1:y2, x1:x2]
+ target_patch = target_img[y1:y2, x1:x2]
+
+ # Compute static features for patch
+ static_feat = compute_static_features(input_patch.astype(np.float32))
+
+ # Convert to tensors (C, H, W)
+ static_feat = torch.from_numpy(static_feat).permute(2, 0, 1)
+ target = torch.from_numpy(target_patch.astype(np.float32)).permute(2, 0, 1)
+
+ # Pad target to 4 channels (RGBA)
+ target = F.pad(target, (0, 0, 0, 0, 0, 1), value=1.0)
+
+ return static_feat, target
+
+
+class ImagePairDataset(Dataset):
+ """Dataset of input/target image pairs (full-image mode)."""
+
+ def __init__(self, input_dir, target_dir, target_size=(256, 256)):
+ self.input_paths = sorted(Path(input_dir).glob("*.png"))
+ self.target_paths = sorted(Path(target_dir).glob("*.png"))
+ self.target_size = target_size
+ assert len(self.input_paths) == len(self.target_paths), \
+ f"Mismatch: {len(self.input_paths)} inputs vs {len(self.target_paths)} targets"
+
+ def __len__(self):
+ return len(self.input_paths)
+
+ def __getitem__(self, idx):
+ # Load and resize images to fixed size
+ input_pil = Image.open(self.input_paths[idx]).convert('RGB')
+ target_pil = Image.open(self.target_paths[idx]).convert('RGB')
+
+ # Resize to target size
+ input_pil = input_pil.resize(self.target_size, Image.LANCZOS)
+ target_pil = target_pil.resize(self.target_size, Image.LANCZOS)
+
+ input_img = np.array(input_pil) / 255.0
+ target_img = np.array(target_pil) / 255.0
+
+ # Compute static features
+ static_feat = compute_static_features(input_img.astype(np.float32))
+
+ # Convert to tensors (C, H, W)
+ static_feat = torch.from_numpy(static_feat).permute(2, 0, 1)
+ target = torch.from_numpy(target_img.astype(np.float32)).permute(2, 0, 1)
+
+ # Pad target to 4 channels (RGBA)
+ target = F.pad(target, (0, 0, 0, 0, 0, 1), value=1.0)
+
+ return static_feat, target
+
+
+def train(args):
+ """Train CNN v2 model."""
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+ print(f"Training on {device}")
+
+ # Create dataset (patch-based or full-image)
+ if args.full_image:
+ print(f"Mode: Full-image (resized to {args.image_size}x{args.image_size})")
+ target_size = (args.image_size, args.image_size)
+ dataset = ImagePairDataset(args.input, args.target, target_size=target_size)
+ dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)
+ else:
+ print(f"Mode: Patch-based ({args.patch_size}x{args.patch_size} patches)")
+ dataset = PatchDataset(args.input, args.target,
+ patch_size=args.patch_size,
+ patches_per_image=args.patches_per_image,
+ detector=args.detector)
+ dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)
+
+ # Create model
+ model = CNNv2(kernels=args.kernel_sizes, channels=args.channels).to(device)
+ total_params = sum(p.numel() for p in model.parameters())
+ print(f"Model: {args.channels} channels, {args.kernel_sizes} kernels, {total_params} weights")
+
+ # Optimizer and loss
+ optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
+ criterion = nn.MSELoss()
+
+ # Training loop
+ print(f"\nTraining for {args.epochs} epochs...")
+ start_time = time.time()
+
+ for epoch in range(1, args.epochs + 1):
+ model.train()
+ epoch_loss = 0.0
+
+ for static_feat, target in dataloader:
+ static_feat = static_feat.to(device)
+ target = target.to(device)
+
+ optimizer.zero_grad()
+ output = model(static_feat)
+ loss = criterion(output, target)
+ loss.backward()
+ optimizer.step()
+
+ epoch_loss += loss.item()
+
+ avg_loss = epoch_loss / len(dataloader)
+
+ # Print loss at every epoch (overwrite line with \r)
+ elapsed = time.time() - start_time
+ print(f"\rEpoch {epoch:4d}/{args.epochs} | Loss: {avg_loss:.6f} | Time: {elapsed:.1f}s", end='', flush=True)
+
+ # Save checkpoint
+ if args.checkpoint_every > 0 and epoch % args.checkpoint_every == 0:
+ print() # Newline before checkpoint message
+ checkpoint_path = Path(args.checkpoint_dir) / f"checkpoint_epoch_{epoch}.pth"
+ checkpoint_path.parent.mkdir(parents=True, exist_ok=True)
+ torch.save({
+ 'epoch': epoch,
+ 'model_state_dict': model.state_dict(),
+ 'optimizer_state_dict': optimizer.state_dict(),
+ 'loss': avg_loss,
+ 'config': {
+ 'kernels': args.kernel_sizes,
+ 'channels': args.channels,
+ 'features': ['R', 'G', 'B', 'D', 'uv.x', 'uv.y', 'sin10_x', 'bias']
+ }
+ }, checkpoint_path)
+ print(f" → Saved checkpoint: {checkpoint_path}")
+
+ print(f"\nTraining complete! Total time: {time.time() - start_time:.1f}s")
+ return model
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Train CNN v2 with parametric static features')
+ parser.add_argument('--input', type=str, required=True, help='Input images directory')
+ parser.add_argument('--target', type=str, required=True, help='Target images directory')
+
+ # Training mode
+ parser.add_argument('--full-image', action='store_true',
+ help='Use full-image mode (resize all images)')
+ parser.add_argument('--image-size', type=int, default=256,
+ help='Full-image mode: resize to this size (default: 256)')
+
+ # Patch-based mode (default)
+ parser.add_argument('--patch-size', type=int, default=32,
+ help='Patch mode: patch size (default: 32)')
+ parser.add_argument('--patches-per-image', type=int, default=64,
+ help='Patch mode: patches per image (default: 64)')
+ parser.add_argument('--detector', type=str, default='harris',
+ choices=['harris', 'fast', 'shi-tomasi', 'gradient'],
+ help='Patch mode: salient point detector (default: harris)')
+ # TODO: Add --random-sample-percent parameter (default: 10)
+ # Mix salient points with random samples for better generalization
+
+ # Model architecture
+ parser.add_argument('--kernel-sizes', type=int, nargs=3, default=[1, 3, 5],
+ help='Kernel sizes for 3 layers (default: 1 3 5)')
+ parser.add_argument('--channels', type=int, nargs=3, default=[16, 8, 4],
+ help='Output channels for 3 layers (default: 16 8 4)')
+
+ # Training parameters
+ parser.add_argument('--epochs', type=int, default=5000, help='Training epochs')
+ parser.add_argument('--batch-size', type=int, default=16, help='Batch size')
+ parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate')
+ parser.add_argument('--checkpoint-dir', type=str, default='checkpoints',
+ help='Checkpoint directory')
+ parser.add_argument('--checkpoint-every', type=int, default=1000,
+ help='Save checkpoint every N epochs (0 = disable)')
+
+ args = parser.parse_args()
+ train(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/validation_results/epoch_100_output.png b/validation_results/epoch_100_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_100_output.png
Binary files differ
diff --git a/validation_results/epoch_10_output.png b/validation_results/epoch_10_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_10_output.png
Binary files differ
diff --git a/validation_results/epoch_15_output.png b/validation_results/epoch_15_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_15_output.png
Binary files differ
diff --git a/validation_results/epoch_20_output.png b/validation_results/epoch_20_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_20_output.png
Binary files differ
diff --git a/validation_results/epoch_25_output.png b/validation_results/epoch_25_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_25_output.png
Binary files differ
diff --git a/validation_results/epoch_30_output.png b/validation_results/epoch_30_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_30_output.png
Binary files differ
diff --git a/validation_results/epoch_35_output.png b/validation_results/epoch_35_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_35_output.png
Binary files differ
diff --git a/validation_results/epoch_40_output.png b/validation_results/epoch_40_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_40_output.png
Binary files differ
diff --git a/validation_results/epoch_45_output.png b/validation_results/epoch_45_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_45_output.png
Binary files differ
diff --git a/validation_results/epoch_50_output.png b/validation_results/epoch_50_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_50_output.png
Binary files differ
diff --git a/validation_results/epoch_55_output.png b/validation_results/epoch_55_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_55_output.png
Binary files differ
diff --git a/validation_results/epoch_5_output.png b/validation_results/epoch_5_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_5_output.png
Binary files differ
diff --git a/validation_results/epoch_60_output.png b/validation_results/epoch_60_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_60_output.png
Binary files differ
diff --git a/validation_results/epoch_65_output.png b/validation_results/epoch_65_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_65_output.png
Binary files differ
diff --git a/validation_results/epoch_70_output.png b/validation_results/epoch_70_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_70_output.png
Binary files differ
diff --git a/validation_results/epoch_75_output.png b/validation_results/epoch_75_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_75_output.png
Binary files differ
diff --git a/validation_results/epoch_80_output.png b/validation_results/epoch_80_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_80_output.png
Binary files differ
diff --git a/validation_results/epoch_85_output.png b/validation_results/epoch_85_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_85_output.png
Binary files differ
diff --git a/validation_results/epoch_90_output.png b/validation_results/epoch_90_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_90_output.png
Binary files differ
diff --git a/validation_results/epoch_95_output.png b/validation_results/epoch_95_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/epoch_95_output.png
Binary files differ
diff --git a/validation_results/img_000_output.png b/validation_results/img_000_output.png
new file mode 100644
index 0000000..ea63a31
--- /dev/null
+++ b/validation_results/img_000_output.png
Binary files differ
diff --git a/validation_results/img_001_output.png b/validation_results/img_001_output.png
new file mode 100644
index 0000000..8c5a6e0
--- /dev/null
+++ b/validation_results/img_001_output.png
Binary files differ
diff --git a/validation_results/img_002_output.png b/validation_results/img_002_output.png
new file mode 100644
index 0000000..2044840
--- /dev/null
+++ b/validation_results/img_002_output.png
Binary files differ
diff --git a/validation_results/img_003_output.png b/validation_results/img_003_output.png
new file mode 100644
index 0000000..9caff40
--- /dev/null
+++ b/validation_results/img_003_output.png
Binary files differ
diff --git a/validation_results/img_004_output.png b/validation_results/img_004_output.png
new file mode 100644
index 0000000..b9fa2da
--- /dev/null
+++ b/validation_results/img_004_output.png
Binary files differ
diff --git a/validation_results/img_005_output.png b/validation_results/img_005_output.png
new file mode 100644
index 0000000..0b43ab0
--- /dev/null
+++ b/validation_results/img_005_output.png
Binary files differ
diff --git a/validation_results/img_006_output.png b/validation_results/img_006_output.png
new file mode 100644
index 0000000..26885d3
--- /dev/null
+++ b/validation_results/img_006_output.png
Binary files differ
diff --git a/validation_results/img_007_output.png b/validation_results/img_007_output.png
new file mode 100644
index 0000000..2a3a411
--- /dev/null
+++ b/validation_results/img_007_output.png
Binary files differ
diff --git a/workspaces/main/assets.txt b/workspaces/main/assets.txt
index 750bf15..4cbbb0f 100644
--- a/workspaces/main/assets.txt
+++ b/workspaces/main/assets.txt
@@ -43,6 +43,9 @@ SHADER_CNN_CONV5X5, NONE, shaders/cnn/cnn_conv5x5.wgsl, "CNN 5x5 Convolution"
SHADER_CNN_CONV7X7, NONE, shaders/cnn/cnn_conv7x7.wgsl, "CNN 7x7 Convolution"
SHADER_CNN_WEIGHTS, NONE, shaders/cnn/cnn_weights_generated.wgsl, "CNN Weights (Generated)"
SHADER_CNN_LAYER, NONE, shaders/cnn/cnn_layer.wgsl, "CNN Layer Shader"
+SHADER_CNN_V2_STATIC, NONE, shaders/cnn_v2_static.wgsl, "CNN v2 Static Features"
+SHADER_CNN_V2_COMPUTE, NONE, shaders/cnn_v2_compute.wgsl, "CNN v2 Compute (Storage Buffer)"
+WEIGHTS_CNN_V2, NONE, cnn_v2_weights.bin, "CNN v2 Binary Weights"
SHADER_SOLARIZE, NONE, shaders/solarize.wgsl, "Solarize Shader"
SHADER_DISTORT, NONE, shaders/distort.wgsl, "Distort Shader"
SHADER_CHROMA_ABERRATION, NONE, shaders/chroma_aberration.wgsl, "Chroma Aberration Shader"
diff --git a/workspaces/main/shaders/cnn_v2_compute.wgsl b/workspaces/main/shaders/cnn_v2_compute.wgsl
new file mode 100644
index 0000000..b19a692
--- /dev/null
+++ b/workspaces/main/shaders/cnn_v2_compute.wgsl
@@ -0,0 +1,137 @@
+// CNN v2 Compute Shader - Storage Buffer Version
+// Processes single layer per dispatch with weights from storage buffer
+// Multi-layer execution handled by C++ with ping-pong buffers
+
+// Push constants for layer parameters (passed per dispatch)
+struct LayerParams {
+ kernel_size: u32,
+ in_channels: u32,
+ out_channels: u32,
+ weight_offset: u32, // Offset in f16 units
+ is_output_layer: u32, // 1 if final layer (sigmoid), 0 otherwise (relu)
+}
+
+@group(0) @binding(0) var static_features: texture_2d<u32>; // 8-channel static features
+@group(0) @binding(1) var layer_input: texture_2d<u32>; // Previous layer output (8-channel packed)
+@group(0) @binding(2) var output_tex: texture_storage_2d<rgba32uint, write>; // Current layer output
+@group(0) @binding(3) var<storage, read> weights_buffer: array<u32>; // Packed f16 weights
+@group(0) @binding(4) var<uniform> params: LayerParams;
+
+fn unpack_static_features(coord: vec2<i32>) -> array<f32, 8> {
+ let packed = textureLoad(static_features, coord, 0);
+ let v0 = unpack2x16float(packed.x);
+ let v1 = unpack2x16float(packed.y);
+ let v2 = unpack2x16float(packed.z);
+ let v3 = unpack2x16float(packed.w);
+ return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y);
+}
+
+fn unpack_layer_channels(coord: vec2<i32>) -> array<f32, 8> {
+ let packed = textureLoad(layer_input, coord, 0);
+ let v0 = unpack2x16float(packed.x);
+ let v1 = unpack2x16float(packed.y);
+ let v2 = unpack2x16float(packed.z);
+ let v3 = unpack2x16float(packed.w);
+ return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y);
+}
+
+fn pack_channels(values: array<f32, 8>) -> vec4<u32> {
+ return vec4<u32>(
+ pack2x16float(vec2<f32>(values[0], values[1])),
+ pack2x16float(vec2<f32>(values[2], values[3])),
+ pack2x16float(vec2<f32>(values[4], values[5])),
+ pack2x16float(vec2<f32>(values[6], values[7]))
+ );
+}
+
+// Get weight from storage buffer (f16 packed as u32 pairs)
+// Buffer layout: [header: 4 u32][layer_info: N×5 u32][weights: packed f16]
+// TODO: Support 8-bit quantized weights (4× per u32) for 2× size reduction
+fn get_weight(idx: u32) -> f32 {
+ // Skip header (16 bytes = 4 u32) and layer info
+ // Weights start after header + layer_info, but weight_offset already accounts for this
+ let pair_idx = idx / 2u;
+ let packed = weights_buffer[pair_idx];
+ let unpacked = unpack2x16float(packed);
+ return select(unpacked.y, unpacked.x, (idx & 1u) == 0u);
+}
+
+@compute @workgroup_size(8, 8)
+fn main(@builtin(global_invocation_id) id: vec3<u32>) {
+ let coord = vec2<i32>(id.xy);
+ let dims = textureDimensions(static_features);
+
+ if (coord.x >= i32(dims.x) || coord.y >= i32(dims.y)) {
+ return;
+ }
+
+ let kernel_size = params.kernel_size;
+ let in_channels = params.in_channels;
+ let out_channels = params.out_channels;
+ let weight_offset = params.weight_offset;
+ let is_output = params.is_output_layer != 0u;
+
+ let kernel_radius = i32(kernel_size / 2u);
+
+ // Load static features (always 8D)
+ let static_feat = unpack_static_features(coord);
+
+ // Convolution per output channel
+ var output: array<f32, 8>;
+ for (var c: u32 = 0u; c < out_channels && c < 8u; c++) {
+ var sum: f32 = 0.0;
+
+ // Convolve over kernel
+ for (var ky: i32 = -kernel_radius; ky <= kernel_radius; ky++) {
+ for (var kx: i32 = -kernel_radius; kx <= kernel_radius; kx++) {
+ let sample_coord = coord + vec2<i32>(kx, ky);
+
+ // Border handling (clamp)
+ let clamped = vec2<i32>(
+ clamp(sample_coord.x, 0, i32(dims.x) - 1),
+ clamp(sample_coord.y, 0, i32(dims.y) - 1)
+ );
+
+ // Load input features at this spatial location
+ let static_local = unpack_static_features(clamped);
+ let layer_local = unpack_layer_channels(clamped);
+
+ // Weight index calculation
+ let ky_idx = u32(ky + kernel_radius);
+ let kx_idx = u32(kx + kernel_radius);
+ let spatial_idx = ky_idx * kernel_size + kx_idx;
+
+ // Accumulate: static features (always 8 channels)
+ for (var i: u32 = 0u; i < 8u; i++) {
+ let w_idx = weight_offset +
+ c * in_channels * kernel_size * kernel_size +
+ i * kernel_size * kernel_size + spatial_idx;
+ sum += get_weight(w_idx) * static_local[i];
+ }
+
+ // Accumulate: previous layer channels (in_channels - 8)
+ let prev_channels = in_channels - 8u;
+ for (var i: u32 = 0u; i < prev_channels && i < 8u; i++) {
+ let w_idx = weight_offset +
+ c * in_channels * kernel_size * kernel_size +
+ (8u + i) * kernel_size * kernel_size + spatial_idx;
+ sum += get_weight(w_idx) * layer_local[i];
+ }
+ }
+ }
+
+ // Activation
+ if (is_output) {
+ output[c] = clamp(sum, 0.0, 1.0); // Sigmoid approximation
+ } else {
+ output[c] = max(0.0, sum); // ReLU
+ }
+ }
+
+ // Zero unused channels
+ for (var c: u32 = out_channels; c < 8u; c++) {
+ output[c] = 0.0;
+ }
+
+ textureStore(output_tex, coord, pack_channels(output));
+}
diff --git a/workspaces/main/shaders/cnn_v2_layer_0.wgsl b/workspaces/main/shaders/cnn_v2_layer_0.wgsl
new file mode 100644
index 0000000..8e14957
--- /dev/null
+++ b/workspaces/main/shaders/cnn_v2_layer_0.wgsl
@@ -0,0 +1,174 @@
+// CNN v2 Layer 0 - Auto-generated
+// Kernel: 3×3, In: 8, Out: 8
+
+const KERNEL_SIZE: u32 = 3u;
+const IN_CHANNELS: u32 = 8u;
+const OUT_CHANNELS: u32 = 8u;
+const KERNEL_RADIUS: i32 = 1;
+
+// Weights quantized to float16 (stored as f32 in WGSL)
+const weights: array<f32, 576> = array(
+ 0.057281, -0.041962, 0.003933, 0.026459, 0.304199, 0.067261, 0.191895, 0.047455,
+ 0.074402, 0.201660, 0.158325, 0.150513, 0.219238, 0.260010, 0.319336, 0.208618,
+ 0.050201, 0.090210, 0.086853, 0.181152, 0.060486, 0.167847, 0.161499, 0.265869,
+ 0.163818, 0.100647, 0.243408, -0.008553, -0.010849, 0.046509, -0.060608, -0.022263,
+ 0.094360, -0.043854, -0.005329, -0.093262, 0.032349, 0.007259, 0.039948, -0.018692,
+ -0.000618, 0.052368, -0.038055, 0.118042, -0.084595, 0.044281, -0.107056, 0.089478,
+ -0.076477, 0.017441, 0.088135, 0.076721, -0.063965, 0.001612, 0.062469, 0.067505,
+ 0.035736, 0.115051, -0.117737, -0.076843, -0.008888, -0.002028, -0.061005, 0.081726,
+ 0.115051, -0.028183, 0.043213, -0.079285, -0.040314, -0.047699, -0.051575, -0.052521,
+ 0.071533, 0.084656, 0.051910, 0.090637, -0.104248, -0.066467, -0.032104, -0.006977,
+ 0.075439, -0.004841, 0.084656, -0.034698, 0.035675, -0.101929, -0.035034, -0.036804,
+ 0.069641, -0.026840, -0.017807, -0.088318, -0.125000, -0.042847, -0.003063, 0.007622,
+ 0.076416, 0.094971, -0.019058, 0.083496, -0.085205, 0.036285, -0.077209, 0.082458,
+ 0.056549, 0.038818, 0.092224, -0.002499, 0.069641, 0.097229, 0.069275, -0.111084,
+ -0.092041, -0.020462, -0.061279, -0.032196, -0.088623, 0.032227, -0.117004, -0.125854,
+ -0.015884, 0.093018, -0.070923, -0.117615, -0.081848, -0.115479, 0.033508, -0.026443,
+ -0.009850, -0.063232, 0.098328, -0.000984, 0.039886, -0.085754, -0.108826, 0.030258,
+ 0.091675, 0.024384, -0.118958, -0.077148, -0.122437, -0.002090, -0.089539, 0.096741,
+ 0.095337, 0.108582, -0.101807, 0.152222, 0.206177, 0.050323, -0.111450, -0.104431,
+ -0.037445, 0.276611, 0.244019, 0.171143, 0.131592, 0.056030, 0.141602, 0.014267,
+ -0.025955, -0.019730, 0.155884, 0.072144, 0.176636, -0.010117, 0.141724, 0.103027,
+ -0.253174, -0.229370, -0.105713, -0.005898, 0.075439, -0.002014, -0.010506, -0.108093,
+ -0.016724, 0.108215, 0.053589, -0.044586, 0.030396, -0.077759, 0.058594, -0.018463,
+ 0.027100, 0.030823, -0.026947, -0.014084, 0.121643, 0.116638, -0.010239, 0.106262,
+ -0.109070, -0.044281, -0.045319, -0.021942, 0.083923, 0.114929, 0.154541, 0.078186,
+ -0.047394, 0.007957, 0.099182, -0.030075, 0.103699, 0.080994, -0.085144, 0.047180,
+ 0.099792, 0.081116, 0.084961, 0.151123, 0.000963, 0.029221, 0.073181, 0.086609,
+ 0.149048, -0.052185, -0.158936, 0.146240, 0.020004, 0.063110, 0.111877, 0.037201,
+ 0.087585, 0.134277, 0.058258, -0.075256, 0.141357, 0.045776, 0.171753, 0.186035,
+ 0.093201, 0.202637, 0.018723, -0.047638, 0.072510, 0.132812, 0.182251, 0.191650,
+ 0.163818, 0.146362, 0.124451, -0.082214, 0.094482, -0.007275, 0.029099, -0.040314,
+ -0.017624, -0.018860, -0.108398, -0.111145, 0.058289, -0.106995, -0.091919, 0.069824,
+ -0.084045, -0.105957, 0.065002, -0.012894, 0.042297, -0.081299, -0.112976, 0.012314,
+ 0.015625, -0.100708, -0.039673, 0.092041, 0.037201, 0.089722, 0.064087, 0.000403,
+ 0.120667, -0.012238, -0.055695, 0.010620, -0.022110, -0.008751, 0.038605, 0.075256,
+ 0.041260, 0.128296, -0.072021, 0.020828, -0.072449, 0.051239, 0.034058, 0.122803,
+ -0.062103, 0.156006, -0.111633, 0.043671, 0.209229, 0.006088, 0.141968, 0.209961,
+ 0.122620, -0.004547, 0.107727, 0.115601, 0.003378, 0.375732, 0.068481, 0.037842,
+ 0.159546, -0.014450, 0.073425, 0.168701, -0.052643, 0.060699, 0.333740, 0.033905,
+ -0.060150, 0.053558, 0.165527, -0.052460, -0.047882, 0.080750, 0.110352, -0.057098,
+ 0.057983, -0.018692, 0.019714, -0.056427, -0.053314, -0.001763, 0.027039, 0.003395,
+ -0.131226, -0.068481, -0.086609, 0.065186, 0.084717, 0.036530, 0.043488, 0.013893,
+ -0.076660, 0.081177, 0.037476, -0.124084, -0.070312, -0.027130, -0.009331, -0.128174,
+ -0.075256, 0.098206, -0.046539, -0.045319, 0.083923, -0.050598, 0.063477, 0.007408,
+ 0.026794, -0.090454, -0.083435, 0.129761, 0.044556, 0.051849, 0.115662, 0.071167,
+ 0.004414, 0.048035, -0.148682, 0.098938, 0.200562, 0.111938, 0.208496, 0.200684,
+ -0.050262, 0.119568, 0.062988, 0.072083, 0.123779, 0.369629, 0.317627, 0.187622,
+ 0.157227, 0.183960, 0.031921, 0.142944, 0.080627, 0.218628, 0.264160, 0.156128,
+ 0.084961, 0.029343, 0.057617, 0.089233, 0.041138, 0.044373, 0.074707, 0.025818,
+ 0.113708, -0.045380, -0.114929, 0.104370, -0.012238, -0.174194, -0.169312, -0.070312,
+ -0.005863, 0.027481, 0.053345, -0.016006, -0.057953, -0.010284, 0.034241, -0.041077,
+ -0.002373, 0.034515, 0.078552, -0.066162, -0.035400, 0.072510, 0.060425, -0.037720,
+ -0.025955, 0.118042, -0.071777, 0.133667, 0.012192, -0.080933, 0.093445, 0.052826,
+ -0.037354, -0.052277, 0.124084, 0.029861, 0.137085, 0.053009, -0.034180, -0.011421,
+ 0.089233, 0.172729, 0.146118, 0.003944, 0.279541, 0.162842, 0.112244, 0.204956,
+ 0.059753, 0.117737, 0.330322, 0.185547, 0.194946, 0.404541, 0.274658, 0.177612,
+ 0.153320, 0.189575, 0.032257, 0.285400, 0.158203, 0.048035, 0.476562, 0.301025,
+ -0.179565, 0.160767, 0.137207, 0.102478, -0.060547, 0.060364, -0.091858, 0.064209,
+ 0.082642, 0.044769, -0.096436, -0.103699, -0.021683, 0.007221, -0.048737, 0.071228,
+ -0.069580, 0.066528, -0.122864, -0.008415, -0.094788, 0.040131, -0.091431, -0.029602,
+ -0.112488, -0.074158, -0.004898, -0.006721, -0.118286, -0.047516, 0.069519, 0.121521,
+ -0.004158, 0.167603, -0.092468, -0.049927, 0.006599, 0.097595, 0.064087, 0.083435,
+ 0.026993, 0.071411, 0.020538, 0.022293, 0.022858, 0.124268, 0.098999, -0.031738,
+ 0.019806, -0.087341, -0.096558, -0.099304, -0.113159, 0.021744, -0.080200, -0.056030,
+ 0.089661, -0.055115, -0.115845, -0.040222, 0.035919, 0.027832, 0.034668, 0.072632,
+ 0.071838, -0.081116, 0.050262, -0.037872, 0.054047, -0.096680, -0.102051, -0.044281,
+ 0.078796, -0.095154, -0.013229, 0.031555, -0.058533, -0.114441, -0.008530, 0.112732,
+ -0.057251, 0.096191, -0.008385, 0.052246, -0.016983, 0.092041, 0.013710, 0.012299,
+ -0.109497, 0.025604, -0.121643, -0.023819, 0.039490, -0.090088, -0.013145, -0.101562,
+ -0.115051, 0.050232, -0.047119, -0.055847, -0.017563, 0.103760, 0.116333, -0.061768,
+ -0.083069, -0.030319, 0.078003, -0.010124, 0.044617, -0.045868, 0.103638, 0.032379,
+ -0.093506, -0.048004, -0.022079, -0.004353, -0.048187, -0.025330, -0.070740, -0.014671
+);
+
+@group(0) @binding(0) var static_features: texture_2d<u32>;
+@group(0) @binding(1) var layer_input: texture_2d<u32>;
+@group(0) @binding(2) var output_tex: texture_storage_2d<rgba32uint, write>;
+
+fn unpack_static_features(coord: vec2<i32>) -> array<f32, 8> {
+ let packed = textureLoad(static_features, coord, 0);
+ let v0 = unpack2x16float(packed.x);
+ let v1 = unpack2x16float(packed.y);
+ let v2 = unpack2x16float(packed.z);
+ let v3 = unpack2x16float(packed.w);
+ return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y);
+}
+
+fn unpack_layer_channels(coord: vec2<i32>) -> array<f32, 8> {
+ let packed = textureLoad(layer_input, coord, 0);
+ let v0 = unpack2x16float(packed.x);
+ let v1 = unpack2x16float(packed.y);
+ let v2 = unpack2x16float(packed.z);
+ let v3 = unpack2x16float(packed.w);
+ return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y);
+}
+
+fn pack_channels(values: array<f32, 8>) -> vec4<u32> {
+ return vec4<u32>(
+ pack2x16float(vec2<f32>(values[0], values[1])),
+ pack2x16float(vec2<f32>(values[2], values[3])),
+ pack2x16float(vec2<f32>(values[4], values[5])),
+ pack2x16float(vec2<f32>(values[6], values[7]))
+ );
+}
+
+@compute @workgroup_size(8, 8)
+fn main(@builtin(global_invocation_id) id: vec3<u32>) {
+ let coord = vec2<i32>(id.xy);
+ let dims = textureDimensions(static_features);
+
+ if (coord.x >= i32(dims.x) || coord.y >= i32(dims.y)) {
+ return;
+ }
+
+ // Load static features (always available)
+ let static_feat = unpack_static_features(coord);
+
+ // Convolution
+ var output: array<f32, OUT_CHANNELS>;
+ for (var c: u32 = 0u; c < OUT_CHANNELS; c++) {
+ var sum: f32 = 0.0;
+
+ for (var ky: i32 = -KERNEL_RADIUS; ky <= KERNEL_RADIUS; ky++) {
+ for (var kx: i32 = -KERNEL_RADIUS; kx <= KERNEL_RADIUS; kx++) {
+ let sample_coord = coord + vec2<i32>(kx, ky);
+
+ // Border handling (clamp)
+ let clamped = vec2<i32>(
+ clamp(sample_coord.x, 0, i32(dims.x) - 1),
+ clamp(sample_coord.y, 0, i32(dims.y) - 1)
+ );
+
+ // Load input features
+ let static_local = unpack_static_features(clamped);
+ let layer_local = unpack_layer_channels(clamped);
+
+ // Weight index calculation
+ let ky_idx = u32(ky + KERNEL_RADIUS);
+ let kx_idx = u32(kx + KERNEL_RADIUS);
+ let spatial_idx = ky_idx * KERNEL_SIZE + kx_idx;
+
+ // Accumulate: static features (8D)
+ for (var i: u32 = 0u; i < 8u; i++) {
+ let w_idx = c * IN_CHANNELS * KERNEL_SIZE * KERNEL_SIZE +
+ i * KERNEL_SIZE * KERNEL_SIZE + spatial_idx;
+ sum += weights[w_idx] * static_local[i];
+ }
+
+ // Accumulate: layer input channels (if layer_idx > 0)
+ let prev_channels = IN_CHANNELS - 8u;
+ for (var i: u32 = 0u; i < prev_channels; i++) {
+ let w_idx = c * IN_CHANNELS * KERNEL_SIZE * KERNEL_SIZE +
+ (8u + i) * KERNEL_SIZE * KERNEL_SIZE + spatial_idx;
+ sum += weights[w_idx] * layer_local[i];
+ }
+ }
+ }
+
+ output[c] = max(0.0, sum); // ReLU
+ }
+
+ // Pack and store
+ textureStore(output_tex, coord, pack_channels(output));
+}
diff --git a/workspaces/main/shaders/cnn_v2_layer_1.wgsl b/workspaces/main/shaders/cnn_v2_layer_1.wgsl
new file mode 100644
index 0000000..f490d13
--- /dev/null
+++ b/workspaces/main/shaders/cnn_v2_layer_1.wgsl
@@ -0,0 +1,174 @@
+// CNN v2 Layer 1 - Auto-generated
+// Kernel: 3×3, In: 16, Out: 4
+
+const KERNEL_SIZE: u32 = 3u;
+const IN_CHANNELS: u32 = 16u;
+const OUT_CHANNELS: u32 = 4u;
+const KERNEL_RADIUS: i32 = 1;
+
+// Weights quantized to float16 (stored as f32 in WGSL)
+const weights: array<f32, 576> = array(
+ 0.337402, 0.638672, -0.481201, 0.699707, 1.127930, -0.018280, -0.062195, 0.148682,
+ -0.655273, 0.448975, 0.969238, -0.280762, 0.817383, 1.271484, 0.421387, -0.163696,
+ 0.305664, -0.454834, 0.354004, 0.932617, -0.411377, 0.581543, 1.263672, 0.422363,
+ -0.380371, 0.152588, -0.668945, -0.063782, 0.060730, 0.022018, -0.075195, -0.049286,
+ 0.068542, 0.057343, -0.009773, 0.006344, -0.080872, -0.179932, -0.297119, 0.098328,
+ 0.061951, -0.088989, 0.047913, 0.093628, -0.091858, -0.068298, 0.102600, -0.044067,
+ -0.054230, -0.031799, 0.050934, -0.300049, -0.202637, -0.203613, -0.294189, -0.361084,
+ 0.277344, -0.213257, -0.239624, 0.193237, -0.215210, -0.295166, 0.298828, -0.065369,
+ 0.148926, 0.024963, 0.272705, 0.368164, 0.173096, 0.061279, 0.291260, 0.151611,
+ 0.411133, 0.216431, -0.179932, 0.506348, 0.319580, 0.059875, -0.134399, -0.150635,
+ -0.275391, 0.029480, 0.115417, 0.063782, 0.018723, -0.073364, -0.019653, 0.066467,
+ -0.086731, 0.113220, 0.110535, 0.011940, -0.094727, 0.262207, 0.180298, 0.141357,
+ 0.249634, 0.199585, 0.120605, 0.403809, 0.242676, -0.028442, 0.251953, 0.130737,
+ 0.152832, -0.306396, -0.324951, -0.176514, 0.161133, 0.333252, -0.195068, 0.250244,
+ 0.569824, 0.011223, -0.186035, 0.048279, -0.325439, 0.272217, 0.144043, -0.142700,
+ 0.447754, 0.434082, 0.124878, -0.157471, -0.120422, -0.281494, 0.338135, 0.266113,
+ -0.301514, 0.424805, 0.541504, -0.195679, 0.054962, 0.061798, -0.323975, 0.056732,
+ 0.072571, -0.087341, 0.052856, -0.057220, 0.023270, 0.071472, 0.014038, 0.083008,
+ -0.050659, 0.020111, 0.035614, -0.038086, -0.042786, 0.060242, -0.050079, -0.044403,
+ -0.059631, 0.075500, 0.056000, 0.010910, -0.064026, -0.016037, -0.050720, 0.050171,
+ -0.075256, -0.014183, 0.047058, -0.086731, 0.027939, 0.063232, -0.024597, -0.039551,
+ 0.000622, -0.048370, -0.001906, 0.058868, -0.074524, 0.019714, -0.036011, 0.028442,
+ 0.009766, -0.060577, -0.007416, -0.014381, 0.002317, -0.023483, 0.014313, 0.057434,
+ 0.063110, 0.030350, -0.027557, 0.023270, 0.055115, -0.003502, 0.012268, -0.054993,
+ -0.084961, -0.022736, 0.076233, 0.027573, -0.068787, -0.036987, -0.018539, -0.049347,
+ 0.032227, 0.033081, 0.050476, 0.043030, 0.023636, -0.039764, -0.018600, 0.073669,
+ 0.032166, -0.047119, -0.033325, -0.038605, 0.034119, -0.076843, 0.005863, -0.049103,
+ 0.065796, -0.056458, 0.054504, -0.008354, -0.018509, -0.057739, -0.075684, -0.053680,
+ 0.036804, 0.020721, -0.056183, 0.021774, -0.043884, 0.033661, -0.029633, 0.027374,
+ -0.087891, 0.030853, -0.040070, 0.013733, -0.082275, -0.072571, -0.055756, 0.002262,
+ 0.004421, -0.012169, -0.078064, -0.063904, -0.051758, -0.033264, -0.059265, -0.062256,
+ 0.063782, -0.088745, -0.026855, 0.062805, -0.036591, 0.037659, -0.012970, 0.025513,
+ -0.000908, 0.027084, 0.001842, -0.080750, -0.049713, -0.069397, -0.046448, -0.031006,
+ 0.012543, 0.009369, -0.080139, -0.034363, 0.003361, -0.052704, 0.041870, 0.059265,
+ 0.029938, 0.000138, 0.049896, 0.068787, 0.040405, -0.073608, 0.047668, 0.015320,
+ -0.033203, -0.016983, 0.034149, -0.010323, 0.029877, 0.078003, -0.054688, -0.021805,
+ -0.019409, 0.010284, 0.089172, -0.050385, 0.024857, -0.041992, 0.016602, 0.082397,
+ 0.081970, 0.096375, 0.060760, -0.006603, 0.029907, 0.012131, 0.104980, 0.034210,
+ 0.074707, -0.028320, -0.020248, 0.114868, -0.036957, 0.040192, 0.002888, 0.034973,
+ -0.038635, -0.018204, -0.058563, 0.029419, 0.013344, 0.027618, 0.073669, -0.038361,
+ 0.080933, 0.044586, -0.013214, 0.022675, 0.084351, 0.081848, 0.027328, 0.043915,
+ 0.040771, 0.078918, 0.054443, -0.049652, 0.073547, 0.103882, 0.065918, 0.070923,
+ -0.037476, -0.011215, -0.021408, 0.094727, 0.042450, 0.032806, -0.064026, 0.023941,
+ 0.011780, 0.041260, -0.038818, 0.079163, 0.079468, 0.053680, 0.047150, 0.003571,
+ 0.054840, 0.045929, -0.041382, -0.033539, 0.069153, 0.046234, 0.119263, -0.006340,
+ -0.050323, 0.030212, 0.069092, 0.045441, 0.096313, -0.024628, -0.088745, 0.009033,
+ -0.016830, 0.028534, -0.042755, -0.031921, 0.013611, -0.029251, -0.051483, -0.005848,
+ -0.032837, -0.058136, 0.075989, -0.008125, 0.108765, -0.004745, -0.003422, 0.079590,
+ 0.090515, -0.019196, -0.006786, 0.059479, -0.041168, 0.093445, 0.075439, -0.025055,
+ 0.067139, 0.011734, 0.031586, 0.029587, 0.098267, 0.025848, 0.095276, 0.003189,
+ 0.105408, 0.018799, -0.102478, 0.033813, 0.004272, 0.020477, 0.033142, 0.009727,
+ -0.021393, 0.120300, 0.088684, -0.037842, -0.094177, 0.017944, 0.020126, -0.002304,
+ -0.016006, 0.018112, 0.072693, -0.072021, -0.171265, -0.053528, -0.093201, 0.024124,
+ -0.050476, -0.023422, -0.071167, 0.046478, 0.034607, 0.076904, 0.013077, -0.082031,
+ 0.091858, -0.001575, 0.083801, 0.078003, 0.019119, -0.004967, 0.027298, 0.027740,
+ 0.032623, 0.048370, 0.029099, 0.093201, 0.049957, -0.007191, 0.059631, 0.008659,
+ 0.042725, -0.009369, 0.089417, 0.074951, -0.024704, 0.005344, 0.123840, 0.080322,
+ 0.096375, 0.070312, -0.010399, 0.033203, -0.009743, -0.030045, -0.039520, 0.042023,
+ -0.017441, 0.073486, 0.049500, -0.039734, 0.009811, 0.093262, -0.069641, 0.099365,
+ -0.010414, 0.048859, 0.099182, -0.007256, -0.023941, -0.021393, -0.005703, 0.025055,
+ 0.054535, 0.093384, -0.033661, 0.073242, 0.055023, 0.037170, -0.009300, 0.048615,
+ 0.019150, 0.019409, -0.080688, -0.050049, 0.104126, -0.023193, 0.044708, 0.111816,
+ 0.061584, 0.042755, -0.013863, -0.008385, -0.039703, 0.070618, -0.016922, -0.040833,
+ 0.051178, -0.060333, -0.004368, -0.009827, 0.051544, 0.072083, 0.068176, 0.148071,
+ 0.159424, 0.017578, 0.089905, -0.006794, 0.066101, -0.051117, 0.088684, -0.002989,
+ -0.066895, 0.089844, 0.012131, -0.020203, 0.011230, 0.000327, 0.073669, 0.060669,
+ 0.091064, 0.075989, 0.051971, 0.045044, 0.033875, 0.040466, -0.029449, 0.128418,
+ -0.000229, -0.026901, 0.052063, 0.000995, -0.032532, 0.105896, -0.001241, 0.114075,
+ 0.047607, 0.090332, 0.063660, 0.016495, 0.124817, 0.090942, 0.021545, 0.007164,
+ 0.074890, 0.118347, 0.047394, 0.052856, 0.104980, 0.009384, 0.034363, 0.019073,
+ 0.072388, -0.013313, 0.119141, 0.021255, 0.103210, 0.058319, 0.186035, -0.010818,
+ 0.037109, -0.044037, -0.075989, -0.001281, 0.017899, 0.030701, -0.080261, 0.082703
+);
+
+@group(0) @binding(0) var static_features: texture_2d<u32>;
+@group(0) @binding(1) var layer_input: texture_2d<u32>;
+@group(0) @binding(2) var output_tex: texture_storage_2d<rgba32uint, write>;
+
+fn unpack_static_features(coord: vec2<i32>) -> array<f32, 8> {
+ let packed = textureLoad(static_features, coord, 0);
+ let v0 = unpack2x16float(packed.x);
+ let v1 = unpack2x16float(packed.y);
+ let v2 = unpack2x16float(packed.z);
+ let v3 = unpack2x16float(packed.w);
+ return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y);
+}
+
+fn unpack_layer_channels(coord: vec2<i32>) -> array<f32, 8> {
+ let packed = textureLoad(layer_input, coord, 0);
+ let v0 = unpack2x16float(packed.x);
+ let v1 = unpack2x16float(packed.y);
+ let v2 = unpack2x16float(packed.z);
+ let v3 = unpack2x16float(packed.w);
+ return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y);
+}
+
+fn pack_channels(values: array<f32, 8>) -> vec4<u32> {
+ return vec4<u32>(
+ pack2x16float(vec2<f32>(values[0], values[1])),
+ pack2x16float(vec2<f32>(values[2], values[3])),
+ pack2x16float(vec2<f32>(values[4], values[5])),
+ pack2x16float(vec2<f32>(values[6], values[7]))
+ );
+}
+
+@compute @workgroup_size(8, 8)
+fn main(@builtin(global_invocation_id) id: vec3<u32>) {
+ let coord = vec2<i32>(id.xy);
+ let dims = textureDimensions(static_features);
+
+ if (coord.x >= i32(dims.x) || coord.y >= i32(dims.y)) {
+ return;
+ }
+
+ // Load static features (always available)
+ let static_feat = unpack_static_features(coord);
+
+ // Convolution
+ var output: array<f32, OUT_CHANNELS>;
+ for (var c: u32 = 0u; c < OUT_CHANNELS; c++) {
+ var sum: f32 = 0.0;
+
+ for (var ky: i32 = -KERNEL_RADIUS; ky <= KERNEL_RADIUS; ky++) {
+ for (var kx: i32 = -KERNEL_RADIUS; kx <= KERNEL_RADIUS; kx++) {
+ let sample_coord = coord + vec2<i32>(kx, ky);
+
+ // Border handling (clamp)
+ let clamped = vec2<i32>(
+ clamp(sample_coord.x, 0, i32(dims.x) - 1),
+ clamp(sample_coord.y, 0, i32(dims.y) - 1)
+ );
+
+ // Load input features
+ let static_local = unpack_static_features(clamped);
+ let layer_local = unpack_layer_channels(clamped);
+
+ // Weight index calculation
+ let ky_idx = u32(ky + KERNEL_RADIUS);
+ let kx_idx = u32(kx + KERNEL_RADIUS);
+ let spatial_idx = ky_idx * KERNEL_SIZE + kx_idx;
+
+ // Accumulate: static features (8D)
+ for (var i: u32 = 0u; i < 8u; i++) {
+ let w_idx = c * IN_CHANNELS * KERNEL_SIZE * KERNEL_SIZE +
+ i * KERNEL_SIZE * KERNEL_SIZE + spatial_idx;
+ sum += weights[w_idx] * static_local[i];
+ }
+
+ // Accumulate: layer input channels (if layer_idx > 0)
+ let prev_channels = IN_CHANNELS - 8u;
+ for (var i: u32 = 0u; i < prev_channels; i++) {
+ let w_idx = c * IN_CHANNELS * KERNEL_SIZE * KERNEL_SIZE +
+ (8u + i) * KERNEL_SIZE * KERNEL_SIZE + spatial_idx;
+ sum += weights[w_idx] * layer_local[i];
+ }
+ }
+ }
+
+ output[c] = max(0.0, sum); // ReLU
+ }
+
+ // Pack and store
+ textureStore(output_tex, coord, pack_channels(output));
+}
diff --git a/workspaces/main/shaders/cnn_v2_layer_2.wgsl b/workspaces/main/shaders/cnn_v2_layer_2.wgsl
new file mode 100644
index 0000000..2f9836a
--- /dev/null
+++ b/workspaces/main/shaders/cnn_v2_layer_2.wgsl
@@ -0,0 +1,156 @@
+// CNN v2 Layer 2 - Auto-generated
+// Kernel: 3×3, In: 12, Out: 4
+
+const KERNEL_SIZE: u32 = 3u;
+const IN_CHANNELS: u32 = 12u;
+const OUT_CHANNELS: u32 = 4u;
+const KERNEL_RADIUS: i32 = 1;
+
+// Weights quantized to float16 (stored as f32 in WGSL)
+const weights: array<f32, 432> = array(
+ 0.030212, -0.041351, 0.053864, -0.025635, 0.099976, -0.016830, -0.068665, 0.112488,
+ -0.069824, 0.030197, 0.020142, 0.101807, 0.061920, 0.022415, -0.025864, -0.056366,
+ 0.085571, -0.053650, 0.109802, 0.129272, 0.023438, 0.087341, 0.066284, 0.037079,
+ -0.067566, 0.021530, -0.046814, 0.029343, -0.028534, 0.047150, -0.079346, -0.022675,
+ -0.019669, -0.024185, 0.029587, 0.068970, 0.108826, 0.050598, -0.072144, 0.083008,
+ -0.002201, 0.006275, 0.056396, 0.001884, 0.097168, -0.028503, -0.002499, 0.008919,
+ -0.013771, -0.017502, -0.033478, 0.105530, 0.032898, 0.068726, -0.036285, -0.021011,
+ -0.018250, 0.073914, 0.024277, 0.061066, 0.008682, -0.022766, 0.074219, 0.094421,
+ 0.050903, 0.072571, 0.117493, -0.033234, 0.067993, -0.008049, 0.046997, -0.064209,
+ -0.381104, 0.107788, -0.213867, 0.145142, 0.514160, 0.407715, -0.317871, 0.249023,
+ 0.055634, -0.006294, -0.067444, 0.025131, 0.012939, -0.074158, -0.013741, -0.033020,
+ 0.026871, -0.007671, 0.089661, -0.003016, 0.029007, -0.038483, 0.045044, 0.104065,
+ 0.077148, 0.092468, -0.090027, -0.048126, 0.096863, -0.088013, 0.009483, 0.075012,
+ -0.076843, -0.085449, -0.066040, 0.019165, -0.019958, 0.083496, 0.069275, -0.019714,
+ 0.027786, -0.042389, 0.054718, 0.010635, -0.071777, 0.029282, -0.003605, 0.113770,
+ 0.080994, 0.106079, 0.047333, -0.013733, 0.034760, 0.099365, -0.020813, 0.095886,
+ 0.052490, -0.049194, 0.047394, 0.072510, -0.030930, -0.003782, -0.038025, -0.019318,
+ -0.047852, -0.043915, 0.026810, -0.041138, 0.038422, 0.009605, -0.080688, -0.019653,
+ 0.075256, -0.013817, -0.022400, 0.050629, 0.048462, 0.072998, -0.009109, 0.070923,
+ 0.079895, 0.071350, 0.002869, 0.081543, 0.037231, 0.020767, -0.017929, 0.042328,
+ -0.075134, -0.010681, -0.009079, 0.057007, -0.040253, -0.025574, -0.041534, 0.105835,
+ -0.039703, 0.032104, 0.076050, 0.070923, -0.013046, -0.054108, -0.024582, -0.033997,
+ 0.092285, 0.000525, 0.114685, 0.036926, -0.419434, 0.087891, -0.187866, 0.128906,
+ 0.665527, 0.268311, -0.337891, 0.195557, 0.140503, 0.014465, -0.043671, 0.031677,
+ 0.073059, 0.085144, 0.014290, -0.046967, 0.033356, 0.004177, 0.102844, 0.015259,
+ 0.026627, -0.005032, 0.111694, -0.010590, 0.029816, 0.108154, -0.072327, 0.056213,
+ 0.022903, 0.053772, 0.084473, -0.059845, -0.032776, -0.000015, -0.093872, -0.085815,
+ 0.081604, 0.069336, 0.034149, -0.067322, -0.020859, 0.120911, 0.077209, -0.016388,
+ 0.050140, -0.045563, -0.046326, 0.032623, -0.005009, 0.008003, 0.109192, 0.086548,
+ 0.096558, 0.118530, 0.035034, 0.110352, -0.041748, 0.009178, 0.049957, 0.084839,
+ 0.042053, -0.069153, -0.024796, -0.094604, -0.047028, -0.053802, 0.024979, 0.049591,
+ -0.016373, -0.047607, -0.008797, -0.058868, 0.107178, 0.055695, 0.092407, 0.092346,
+ 0.053894, 0.054657, -0.039703, -0.073792, 0.041779, -0.044159, 0.099182, 0.037109,
+ 0.097778, 0.098206, -0.057831, -0.054016, -0.068604, -0.061584, -0.054382, 0.005268,
+ 0.096008, -0.007118, -0.063049, 0.059113, 0.076904, 0.045288, -0.055695, -0.052612,
+ -0.022110, 0.049103, 0.095276, 0.014572, 0.064819, 0.014671, 0.029800, 0.066284,
+ -0.383301, 0.071838, -0.207275, 0.099365, 0.640137, 0.393311, -0.334229, 0.275391,
+ -0.013977, -0.025269, -0.007065, -0.033478, -0.017349, 0.026764, 0.005192, 0.093384,
+ 0.014313, 0.018906, 0.006962, 0.094849, 0.005390, 0.101624, -0.041199, 0.026245,
+ 0.027588, 0.062408, 0.033356, -0.010826, 0.067993, -0.054199, 0.076416, 0.023315,
+ -0.002886, -0.112061, -0.041473, -0.012703, 0.016022, 0.010506, -0.021362, -0.037750,
+ 0.062927, 0.061920, 0.038177, -0.037201, -0.011620, 0.014015, -0.062164, -0.045441,
+ -0.063416, -0.040100, 0.035950, 0.045563, -0.017227, -0.060547, -0.017593, 0.111877,
+ 0.121521, 0.073853, 0.023331, -0.012428, 0.018478, -0.010948, 0.030716, 0.043427,
+ 0.003117, -0.069092, 0.038361, -0.053497, 0.039154, -0.085754, 0.012642, -0.051208,
+ 0.022934, 0.127197, 0.117920, 0.074036, 0.083313, -0.061951, 0.079224, 0.091248,
+ 0.009132, 0.069946, 0.123474, 0.130127, 0.118835, 0.020874, -0.045380, -0.000111,
+ 0.111206, 0.054688, 0.008995, 0.085693, 0.005562, 0.103088, -0.034698, 0.119934,
+ -0.067200, 0.065430, -0.021942, 0.089783, 0.033112, -0.025467, 0.040161, -0.052155,
+ -0.048920, 0.031250, 0.112549, 0.122192, 0.126587, 0.180908, 0.194946, 0.121704,
+ 0.217529, 0.224243, 0.269287, 0.222656, 0.288086, 0.035492, 0.066711, -0.046600,
+ 0.085144, 0.013855, -0.065979, -0.083252, -0.058289, 0.104126, 0.013702, -0.018188,
+ 0.036591, 0.099854, 0.056061, 0.151855, 0.062134, 0.133789, 0.084045, 0.095825,
+ 0.036987, 0.022308, 0.070923, 0.031036, 0.101868, 0.062347, 0.141235, 0.066650
+);
+
+@group(0) @binding(0) var static_features: texture_2d<u32>;
+@group(0) @binding(1) var layer_input: texture_2d<u32>;
+@group(0) @binding(2) var output_tex: texture_storage_2d<rgba32uint, write>;
+
+fn unpack_static_features(coord: vec2<i32>) -> array<f32, 8> {
+ let packed = textureLoad(static_features, coord, 0);
+ let v0 = unpack2x16float(packed.x);
+ let v1 = unpack2x16float(packed.y);
+ let v2 = unpack2x16float(packed.z);
+ let v3 = unpack2x16float(packed.w);
+ return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y);
+}
+
+fn unpack_layer_channels(coord: vec2<i32>) -> array<f32, 8> {
+ let packed = textureLoad(layer_input, coord, 0);
+ let v0 = unpack2x16float(packed.x);
+ let v1 = unpack2x16float(packed.y);
+ let v2 = unpack2x16float(packed.z);
+ let v3 = unpack2x16float(packed.w);
+ return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y);
+}
+
+fn pack_channels(values: array<f32, 8>) -> vec4<u32> {
+ return vec4<u32>(
+ pack2x16float(vec2<f32>(values[0], values[1])),
+ pack2x16float(vec2<f32>(values[2], values[3])),
+ pack2x16float(vec2<f32>(values[4], values[5])),
+ pack2x16float(vec2<f32>(values[6], values[7]))
+ );
+}
+
+@compute @workgroup_size(8, 8)
+fn main(@builtin(global_invocation_id) id: vec3<u32>) {
+ let coord = vec2<i32>(id.xy);
+ let dims = textureDimensions(static_features);
+
+ if (coord.x >= i32(dims.x) || coord.y >= i32(dims.y)) {
+ return;
+ }
+
+ // Load static features (always available)
+ let static_feat = unpack_static_features(coord);
+
+ // Convolution
+ var output: array<f32, OUT_CHANNELS>;
+ for (var c: u32 = 0u; c < OUT_CHANNELS; c++) {
+ var sum: f32 = 0.0;
+
+ for (var ky: i32 = -KERNEL_RADIUS; ky <= KERNEL_RADIUS; ky++) {
+ for (var kx: i32 = -KERNEL_RADIUS; kx <= KERNEL_RADIUS; kx++) {
+ let sample_coord = coord + vec2<i32>(kx, ky);
+
+ // Border handling (clamp)
+ let clamped = vec2<i32>(
+ clamp(sample_coord.x, 0, i32(dims.x) - 1),
+ clamp(sample_coord.y, 0, i32(dims.y) - 1)
+ );
+
+ // Load input features
+ let static_local = unpack_static_features(clamped);
+ let layer_local = unpack_layer_channels(clamped);
+
+ // Weight index calculation
+ let ky_idx = u32(ky + KERNEL_RADIUS);
+ let kx_idx = u32(kx + KERNEL_RADIUS);
+ let spatial_idx = ky_idx * KERNEL_SIZE + kx_idx;
+
+ // Accumulate: static features (8D)
+ for (var i: u32 = 0u; i < 8u; i++) {
+ let w_idx = c * IN_CHANNELS * KERNEL_SIZE * KERNEL_SIZE +
+ i * KERNEL_SIZE * KERNEL_SIZE + spatial_idx;
+ sum += weights[w_idx] * static_local[i];
+ }
+
+ // Accumulate: layer input channels (if layer_idx > 0)
+ let prev_channels = IN_CHANNELS - 8u;
+ for (var i: u32 = 0u; i < prev_channels; i++) {
+ let w_idx = c * IN_CHANNELS * KERNEL_SIZE * KERNEL_SIZE +
+ (8u + i) * KERNEL_SIZE * KERNEL_SIZE + spatial_idx;
+ sum += weights[w_idx] * layer_local[i];
+ }
+ }
+ }
+
+ output[c] = clamp(sum, 0.0, 1.0); // Sigmoid approximation
+ }
+
+ // Pack and store
+ textureStore(output_tex, coord, pack_channels(output));
+}
diff --git a/workspaces/main/shaders/cnn_v2_layer_template.wgsl b/workspaces/main/shaders/cnn_v2_layer_template.wgsl
new file mode 100644
index 0000000..1bf6819
--- /dev/null
+++ b/workspaces/main/shaders/cnn_v2_layer_template.wgsl
@@ -0,0 +1,68 @@
+// CNN v2 Layer Template (placeholder for generated shaders)
+// This file documents the structure - actual layers generated by export script
+
+// Example: Layer 0 (1×1 kernel, 8→16 channels)
+// const KERNEL_SIZE: u32 = 1u;
+// const IN_CHANNELS: u32 = 8u; // 7 features + bias
+// const OUT_CHANNELS: u32 = 16u;
+// const weights: array<f32, 128> = array(...);
+
+@group(0) @binding(0) var static_features: texture_2d<u32>;
+@group(0) @binding(1) var layer_input: texture_2d<u32>; // Previous layer output
+@group(0) @binding(2) var output_tex: texture_storage_2d<rgba32uint, write>;
+
+fn unpack_static_features(coord: vec2<i32>) -> array<f32, 8> {
+ let packed = textureLoad(static_features, coord, 0);
+ let v0 = unpack2x16float(packed.x);
+ let v1 = unpack2x16float(packed.y);
+ let v2 = unpack2x16float(packed.z);
+ let v3 = unpack2x16float(packed.w);
+ return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y);
+}
+
+fn unpack_layer_channels(coord: vec2<i32>) -> array<f32, 8> {
+ let packed = textureLoad(layer_input, coord, 0);
+ let v0 = unpack2x16float(packed.x);
+ let v1 = unpack2x16float(packed.y);
+ let v2 = unpack2x16float(packed.z);
+ let v3 = unpack2x16float(packed.w);
+ return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y);
+}
+
+fn pack_channels(values: array<f32, 8>) -> vec4<u32> {
+ return vec4<u32>(
+ pack2x16float(vec2<f32>(values[0], values[1])),
+ pack2x16float(vec2<f32>(values[2], values[3])),
+ pack2x16float(vec2<f32>(values[4], values[5])),
+ pack2x16float(vec2<f32>(values[6], values[7]))
+ );
+}
+
+@compute @workgroup_size(8, 8)
+fn main(@builtin(global_invocation_id) id: vec3<u32>) {
+ let coord = vec2<i32>(id.xy);
+ let dims = textureDimensions(static_features);
+
+ if (coord.x >= i32(dims.x) || coord.y >= i32(dims.y)) {
+ return;
+ }
+
+ // Load static features (always available)
+ let static_feat = unpack_static_features(coord);
+
+ // Convolution loop (example for generated code)
+ // var output: array<f32, OUT_CHANNELS>;
+ // for (var c: u32 = 0u; c < OUT_CHANNELS; c++) {
+ // var sum: f32 = 0.0;
+ // for (var ky: i32 = -radius; ky <= radius; ky++) {
+ // for (var kx: i32 = -radius; kx <= radius; kx++) {
+ // let sample_coord = coord + vec2<i32>(kx, ky);
+ // // Load static + prev layer, multiply weights, accumulate
+ // }
+ // }
+ // output[c] = max(0.0, sum); // ReLU
+ // }
+
+ // Placeholder output
+ textureStore(output_tex, coord, vec4<u32>(0u));
+}
diff --git a/workspaces/main/shaders/cnn_v2_static.wgsl b/workspaces/main/shaders/cnn_v2_static.wgsl
new file mode 100644
index 0000000..c3a2de7
--- /dev/null
+++ b/workspaces/main/shaders/cnn_v2_static.wgsl
@@ -0,0 +1,47 @@
+// CNN v2 Static Features Compute Shader
+// Generates 7D features + bias: [R, G, B, D, uv.x, uv.y, sin10_x, 1.0]
+
+@group(0) @binding(0) var input_tex: texture_2d<f32>;
+@group(0) @binding(1) var input_tex_mip1: texture_2d<f32>;
+@group(0) @binding(2) var input_tex_mip2: texture_2d<f32>;
+@group(0) @binding(3) var depth_tex: texture_2d<f32>;
+@group(0) @binding(4) var output_tex: texture_storage_2d<rgba32uint, write>;
+
+@compute @workgroup_size(8, 8)
+fn main(@builtin(global_invocation_id) id: vec3<u32>) {
+ let coord = vec2<i32>(id.xy);
+ let dims = textureDimensions(input_tex);
+
+ if (coord.x >= i32(dims.x) || coord.y >= i32(dims.y)) {
+ return;
+ }
+
+ // Sample RGBA from mip 0
+ let rgba = textureLoad(input_tex, coord, 0);
+ let r = rgba.r;
+ let g = rgba.g;
+ let b = rgba.b;
+
+ // Sample depth
+ let d = textureLoad(depth_tex, coord, 0).r;
+
+ // UV coordinates (normalized [0,1])
+ let uv_x = f32(coord.x) / f32(dims.x);
+ let uv_y = f32(coord.y) / f32(dims.y);
+
+ // Multi-frequency position encoding
+ let sin10_x = sin(10.0 * uv_x);
+
+ // Bias dimension (always 1.0)
+ let bias = 1.0;
+
+ // Pack 8×f16 into 4×u32 (rgba32uint)
+ let packed = vec4<u32>(
+ pack2x16float(vec2<f32>(r, g)),
+ pack2x16float(vec2<f32>(b, d)),
+ pack2x16float(vec2<f32>(uv_x, uv_y)),
+ pack2x16float(vec2<f32>(sin10_x, bias))
+ );
+
+ textureStore(output_tex, coord, packed);
+}
diff --git a/workspaces/main/timeline.seq b/workspaces/main/timeline.seq
index ab9e40d..4e8dc69 100644
--- a/workspaces/main/timeline.seq
+++ b/workspaces/main/timeline.seq
@@ -2,104 +2,101 @@
# Generated by Timeline Editor
# BPM 120
-SEQUENCE 0.00s 0
-EFFECT - FlashCubeEffect 0.00s 2.44s
-EFFECT + FlashEffect 0.00s 1.00s color=1.0,0.5,0.5 decay=0.95
-EFFECT + FadeEffect 0.10s 1.00s
-EFFECT + SolarizeEffect 0.00s 2.00s
-EFFECT + VignetteEffect 0.00s 2.50s radius=0.6 softness=0.1
+SEQUENCE 0.00 0
+ EFFECT - FlashCubeEffect 0.00 4.88
+ EFFECT + FlashEffect 0.00 2.00 color=1.0,0.5,0.5 decay=0.95
+ EFFECT + FadeEffect 0.20 2.00
+ EFFECT + SolarizeEffect 0.00 4.00
+ EFFECT + VignetteEffect 0.00 5.00 radius=0.6 softness=0.1
-SEQUENCE 2.50s 0 "rotating cube"
-EFFECT + CircleMaskEffect 0.00s 4.00s 0.50
-EFFECT + RotatingCubeEffect 0.00s 4.00s
-EFFECT + GaussianBlurEffect 1.00s 2.00s strength=1.0
-EFFECT + GaussianBlurEffect 3.00s 4.00s strength=2.0
+SEQUENCE 5.00 0 "rotating cube"
+ EFFECT + CircleMaskEffect 0.00 8.00 0.50
+ EFFECT + RotatingCubeEffect 0.00 8.00
+ EFFECT + GaussianBlurEffect 2.00 4.00 strength=1.0
+ EFFECT + GaussianBlurEffect 6.00 8.00 strength=2.0
-SEQUENCE 5.93s 0
-EFFECT - FlashCubeEffect 0.11s 1.45s
-EFFECT + FlashEffect 0.00s 0.20s
+SEQUENCE 12.00 0
+ EFFECT - FlashCubeEffect 0.22 2.90
+ EFFECT + FlashEffect 0.00 0.40
-SEQUENCE 6.90s 1 "spray"
-EFFECT + ParticleSprayEffect 0.00s 2.00s
-EFFECT + ParticlesEffect 0.00s 3.00s
-EFFECT = GaussianBlurEffect 0.00s 2.00s strength=3.0
+SEQUENCE 14.00 1 "spray"
+ EFFECT + ParticleSprayEffect 0.00 4.00
+ EFFECT + ParticlesEffect 0.00 6.00
+ EFFECT = GaussianBlurEffect 0.00 4.00 strength=3.0
-SEQUENCE 8.50s 2 "Hybrid3D"
-EFFECT + ThemeModulationEffect 0.00s 2.00s
-EFFECT + HeptagonEffect 0.20s 2.00s
-EFFECT + ParticleSprayEffect 0.00s 2.00s
-EFFECT = ParticlesEffect 0.00s 2.00s
-EFFECT + Hybrid3DEffect 0.00s 2.00s
-EFFECT + GaussianBlurEffect 0.00s 2.00s
-EFFECT + CNNEffect 0.0s 2.0s layers=3 blend=.9
-# EFFECT + ChromaAberrationEffect 0.00 1.50 offset=0.01 angle=1.57
+SEQUENCE 17.00 2 "Hybrid3D"
+ EFFECT + ThemeModulationEffect 0.00 4.00
+ EFFECT + HeptagonEffect 0.40 4.00
+ EFFECT + ParticleSprayEffect 0.00 4.00
+ EFFECT = ParticlesEffect 0.00 4.00
+ EFFECT + Hybrid3DEffect 0.00 4.00
+ EFFECT + GaussianBlurEffect 0.00 4.00
+ EFFECT + CNNEffect 0.00 4.00 layers=3 blend=.9
-SEQUENCE 10.50s 0 "CNN effect"
-EFFECT + HeptagonEffect 0.0s 12.00s
-# EFFECT + RotatingCubeEffect 0.00 12.0
-# EFFECT + Hybrid3DEffect 0.00 12.00
-EFFECT + Scene1Effect 0.0s 12.0s
-EFFECT + CNNEffect 1.0s 12.0s layers=3 blend=.5
+SEQUENCE 21.00 0 "CNN effect"
+ EFFECT + HeptagonEffect 0.00 22.00
+ EFFECT + Scene1Effect 0.00 24.00
+ EFFECT + CNNEffect 2.00 24.00 layers=3 blend=.5
-SEQUENCE 22.0s 0 "buggy"
-EFFECT + HeptagonEffect 0.00s 0.20s
-EFFECT + FadeEffect 0.11s 1.01s
+SEQUENCE 44.00 0 "buggy"
+ EFFECT + HeptagonEffect 0.00 0.40
+ EFFECT + FadeEffect 0.22 2.02
-SEQUENCE 22.14s 3
-EFFECT + ThemeModulationEffect 0.00s 4.00s
-EFFECT = HeptagonEffect 0.00s 4.00s
-EFFECT + GaussianBlurEffect 0.00s 5.00s strength=1.5
-EFFECT + ChromaAberrationEffect 0.00s 5.00s offset=0.03 angle=0.785
-EFFECT + SolarizeEffect 0.00s 5.00s
+SEQUENCE 44.00 3 "Seq-8"
+ EFFECT + ThemeModulationEffect 0.00 8.00
+ EFFECT = HeptagonEffect 0.00 8.00
+ EFFECT + GaussianBlurEffect 0.00 10.00 strength=1.5
+ EFFECT + ChromaAberrationEffect 0.00 10.00 offset=0.03 angle=0.785
+ EFFECT + SolarizeEffect 0.00 10.00
-SEQUENCE 23.00s 2
-EFFECT - FlashCubeEffect 0.20s 1.50s
-EFFECT + HeptagonEffect 0.00s 2.00s
-EFFECT + ParticleSprayEffect 0.00s 2.00s
-EFFECT + ParticlesEffect 0.00s 2.00s
+SEQUENCE 46.00 2
+ EFFECT - FlashCubeEffect 0.40 3.00
+ EFFECT + HeptagonEffect 0.00 4.00
+ EFFECT + ParticleSprayEffect 0.00 4.00
+ EFFECT + ParticlesEffect 0.00 4.00
-SEQUENCE 22.75s 2 "Fade"
-EFFECT - FlashCubeEffect 0.20s 1.50s
-EFFECT + FlashEffect 0.00s 1.00s
+SEQUENCE 46.00 2 "Fade"
+ EFFECT - FlashCubeEffect 0.40 3.00
+ EFFECT + FlashEffect 0.00 2.00
-SEQUENCE 23.88s 10
-EFFECT - FlashCubeEffect 0.20s 1.50s
-EFFECT + GaussianBlurEffect 0.00s 2.00s
-EFFECT + FlashEffect 0.00s 0.20s
-EFFECT = FlashEffect 0.50s 0.20s
+SEQUENCE 48.00 10
+ EFFECT - FlashCubeEffect 0.40 3.00
+ EFFECT + GaussianBlurEffect 0.00 4.00
+ EFFECT + FlashEffect 0.00 0.40
+ EFFECT = FlashEffect 1.00 0.40
-SEQUENCE 25.59s 1
-EFFECT + ThemeModulationEffect 0.00s 8.00s
-EFFECT + HeptagonEffect 0.20s 2.00s
-EFFECT + ParticleSprayEffect 0.00s 8.00s
-EFFECT + Hybrid3DEffect 0.00s 8.06s
-EFFECT + GaussianBlurEffect 0.00s 8.00s
-EFFECT + ChromaAberrationEffect 0.00s 8.14s
-EFFECT + SolarizeEffect 0.00s 7.88s
+SEQUENCE 51.00 1
+ EFFECT + ThemeModulationEffect 0.00 16.00
+ EFFECT + HeptagonEffect 0.40 4.00
+ EFFECT + ParticleSprayEffect 0.00 16.00
+ EFFECT + Hybrid3DEffect 0.00 16.12
+ EFFECT + GaussianBlurEffect 0.00 16.00
+ EFFECT + ChromaAberrationEffect 0.00 16.28
+ EFFECT + SolarizeEffect 0.00 15.76
-SEQUENCE 33.08s 0
-EFFECT + ThemeModulationEffect 0.00s 3.00s
-EFFECT + VignetteEffect 0.00s 3.00s radius=0.6 softness=0.3
-EFFECT + SolarizeEffect 0.00s 3.00s
+SEQUENCE 66.00 0
+ EFFECT + ThemeModulationEffect 0.00 6.00
+ EFFECT + VignetteEffect 0.00 6.00 radius=0.6 softness=0.3
+ EFFECT + SolarizeEffect 0.00 6.00
-SEQUENCE 35.31s 0
-EFFECT + ThemeModulationEffect 0.00s 4.00s
-EFFECT + HeptagonEffect 0.20s 2.00s
-EFFECT + GaussianBlurEffect 0.00s 8.00s
-EFFECT + SolarizeEffect 0.00s 2.00s
+SEQUENCE 71.00 0
+ EFFECT + ThemeModulationEffect 0.00 8.00
+ EFFECT + HeptagonEffect 0.40 4.00
+ EFFECT + GaussianBlurEffect 0.00 16.00
+ EFFECT + SolarizeEffect 0.00 4.00
-SEQUENCE 42.29s 0
-EFFECT + ThemeModulationEffect 0.00s 6.00s
-EFFECT = HeptagonEffect 0.20s 2.00s
-EFFECT + Hybrid3DEffect 0.00s 4.00s
-EFFECT + ParticleSprayEffect 0.00s 5.50s
-EFFECT + HeptagonEffect 0.00s 8.00s
-EFFECT + ChromaAberrationEffect 0.00s 7.50s
-EFFECT + GaussianBlurEffect 0.00s 8.00s
+SEQUENCE 85.00 0 "double hepta!"
+ EFFECT + ThemeModulationEffect 0.00 12.00
+ EFFECT = HeptagonEffect 0.40 4.00
+ EFFECT + Hybrid3DEffect 0.00 8.00
+ EFFECT + ParticleSprayEffect 0.00 11.00
+ EFFECT + HeptagonEffect 0.00 16.00
+ EFFECT + ChromaAberrationEffect 0.00 15.00
+ EFFECT + GaussianBlurEffect 0.00 16.00
-SEQUENCE 50.02s 0
-EFFECT + ThemeModulationEffect 0.00s 4.00s
-EFFECT + HeptagonEffect 0.00s 9.50s
-EFFECT + ChromaAberrationEffect 0.00s 9.00s
-EFFECT + GaussianBlurEffect 0.00s 8.00s
+SEQUENCE 100.00 0
+ EFFECT + ThemeModulationEffect 0.00 8.00
+ EFFECT + HeptagonEffect 0.00 19.00
+ EFFECT + ChromaAberrationEffect 0.00 18.00
+ EFFECT + GaussianBlurEffect 0.00 16.00
diff --git a/workspaces/test/timeline.seq.backup b/workspaces/test/timeline.seq.backup
deleted file mode 100644
index 100c7da..0000000
--- a/workspaces/test/timeline.seq.backup
+++ /dev/null
@@ -1,8 +0,0 @@
-# WORKSPACE: test
-# Minimal timeline for audio/visual sync testing
-# BPM 120 (set in test_demo.track)
-
-SEQUENCE 0.0 0 "Main Loop"
- EFFECT + FlashEffect 0.0 16.0
-
-END_DEMO 32b