diff options
| author | skal <pascal.massimino@gmail.com> | 2026-02-14 07:22:17 +0100 |
|---|---|---|
| committer | skal <pascal.massimino@gmail.com> | 2026-02-14 07:24:51 +0100 |
| commit | 0f53ed1ed8ed7c07cd7ea8e88e21b5be5d5494e5 (patch) | |
| tree | 0e1a8426c16e7c89b83038d5b90bb9d94c6d06e5 /tools/cnn_v2_test | |
| parent | 8dd77545b5ec2f45ce46b98dd7d94a3c4a13e290 (diff) | |
CNN v2: bilinear mip-level sampling and UI improvements
**CNN v2 Changes:**
- Replace point sampling with bilinear interpolation for mip-level features
- Add linear sampler (binding 6) to static features shader
- Update CNNv2Effect, cnn_test, and HTML tool
**HTML Tool UI:**
- Move controls to floating bottom bar in central view
- Consolidate video controls + Blend/Depth/Save PNG in single container
- Increase left panel width: 300px → 315px (+5%)
- Remove per-frame debug messages (visualization, rendering logs)
**Technical:**
- WGSL: textureSample() with linear_sampler vs textureLoad()
- C++: Create WGPUSampler with Linear filtering
- HTML: Change sampler from 'nearest' to 'linear'
handoff(Claude): CNN v2 now uses bilinear mip-level sampling across all tools
Diffstat (limited to 'tools/cnn_v2_test')
| -rw-r--r-- | tools/cnn_v2_test/index.html | 84 |
1 files changed, 50 insertions, 34 deletions
diff --git a/tools/cnn_v2_test/index.html b/tools/cnn_v2_test/index.html index 2ec934d..e226d0c 100644 --- a/tools/cnn_v2_test/index.html +++ b/tools/cnn_v2_test/index.html @@ -104,7 +104,7 @@ background: #404040; } .left-sidebar { - width: 300px; + width: 315px; background: #2a2a2a; overflow-y: auto; display: flex; @@ -135,6 +135,32 @@ border: 1px solid #404040; z-index: 100; } + .bottom-controls-float { + position: absolute; + bottom: 16px; + left: 50%; + transform: translateX(-50%); + display: flex; + gap: 16px; + align-items: center; + background: rgba(42, 42, 42, 0.95); + padding: 8px 16px; + border-radius: 4px; + border: 1px solid #404040; + z-index: 100; + } + .bottom-controls-float .control-group { + display: flex; + gap: 8px; + align-items: center; + } + .bottom-controls-float #videoControls { + display: flex; + gap: 8px; + align-items: center; + padding-right: 16px; + border-right: 1px solid #404040; + } .main.drop-active::after { content: 'Drop PNG/video here'; position: absolute; @@ -312,19 +338,6 @@ <body> <div class="header"> <h1>CNN v2 Testing Tool</h1> - <div class="controls"> - <div class="control-group"> - <label>Blend:</label> - <input type="range" id="blend" min="0" max="1" step="0.01" value="1.0"> - <span id="blendValue">1.0</span> - </div> - <div class="control-group"> - <label>Depth:</label> - <input type="range" id="depth" min="0" max="1" step="0.01" value="1.0"> - <span id="depthValue">1.0</span> - </div> - <button id="savePngBtn">Save PNG</button> - </div> </div> <video id="videoSource" muted loop></video> <div class="content"> @@ -358,10 +371,23 @@ </div> </div> <div class="main" id="mainDrop"> - <div class="video-controls-float" id="videoControls"> - <button id="playPauseBtn" disabled>Play</button> - <button id="stepBackBtn" disabled>◄ Frame</button> - <button id="stepForwardBtn" disabled>Frame ►</button> + <div class="bottom-controls-float"> + <div id="videoControls"> + <button id="playPauseBtn" disabled>Play</button> + <button id="stepBackBtn" disabled>◄ Frame</button> + <button id="stepForwardBtn" disabled>Frame ►</button> + </div> + <div class="control-group"> + <label>Blend:</label> + <input type="range" id="blend" min="0" max="1" step="0.01" value="1.0"> + <span id="blendValue">1.0</span> + </div> + <div class="control-group"> + <label>Depth:</label> + <input type="range" id="depth" min="0" max="1" step="0.01" value="1.0"> + <span id="depthValue">1.0</span> + </div> + <button id="savePngBtn">Save PNG</button> </div> <canvas id="canvas"></canvas> </div> @@ -409,7 +435,7 @@ fn vs_main(@builtin(vertex_index) idx: u32) -> @builtin(position) vec4<f32> { // Static features: 7D parametric features (RGBD + UV + sin(10*uv_x) + bias) const STATIC_SHADER = ` @group(0) @binding(0) var input_tex: texture_2d<f32>; -@group(0) @binding(1) var point_sampler: sampler; +@group(0) @binding(1) var linear_sampler: sampler; @group(0) @binding(2) var depth_tex: texture_2d<f32>; @group(0) @binding(3) var output_tex: texture_storage_2d<rgba32uint, write>; @group(0) @binding(4) var<uniform> mip_level: u32; @@ -420,9 +446,9 @@ fn main(@builtin(global_invocation_id) id: vec3<u32>) { let dims = textureDimensions(input_tex); if (coord.x >= i32(dims.x) || coord.y >= i32(dims.y)) { return; } - // Use normalized UV coords with point sampler (no filtering) + // Use normalized UV coords with linear sampler (bilinear filtering) let uv = (vec2<f32>(coord) + 0.5) / vec2<f32>(dims); - let rgba = textureSampleLevel(input_tex, point_sampler, uv, f32(mip_level)); + let rgba = textureSampleLevel(input_tex, linear_sampler, uv, f32(mip_level)); let p0 = rgba.r; let p1 = rgba.g; @@ -1114,8 +1140,6 @@ class CNNTester { if (!source) return; const { width, height } = this.getDimensions(); - this.log(`Running CNN pipeline (${this.weights.layers.length} layers)...`); - this.context.configure({ device: this.device, format: this.format }); // Create persistent input texture for original view with mipmaps @@ -1203,9 +1227,9 @@ class CNNTester { if (!this.pointSampler) { this.pointSampler = this.device.createSampler({ - magFilter: 'nearest', - minFilter: 'nearest', - mipmapFilter: 'nearest' + magFilter: 'linear', + minFilter: 'linear', + mipmapFilter: 'linear' }); } @@ -1464,8 +1488,6 @@ class CNNTester { const layerTex = this.layerOutputs[layerIdx]; const { width, height } = this.getDimensions(); - this.log(`Visualizing ${layerName} activations (${width}×${height})`); - // Update channel labels based on layer type // Static features (layerIdx=0): 8 channels split into two views // CNN layers (layerIdx≥1): 4 channels per layer @@ -1519,7 +1541,6 @@ class CNNTester { try { ctx.configure({ device: this.device, format: this.format }); - this.log(`Canvas ${c}: ${width}×${height}, WebGPU context configured`); } catch (e) { this.log(`Failed to configure canvas ${c}: ${e.message}`, 'error'); continue; @@ -1559,12 +1580,10 @@ class CNNTester { renderPass.end(); this.device.queue.submit([encoder.finish()]); - this.log(`Submitted render for channel ${c}`); } // Wait for all renders to complete await this.device.queue.onSubmittedWorkDone(); - this.log(`Rendered 4 channels for ${layerName}`); // Update active channel highlighting and preview this.updateChannelSelection(); @@ -1666,7 +1685,6 @@ class CNNTester { if (btn) btn.classList.add('active'); const { kernelSize, inChannels, outChannels, weightOffset, min, max } = layer; - this.log(`Visualizing Layer ${cnnLayerIdx} weights: ${inChannels}→${outChannels}, ${kernelSize}×${kernelSize}`); const canvas = document.getElementById('weightsCanvas'); const ctx = canvas.getContext('2d', { willReadFrequently: false }); @@ -1706,8 +1724,6 @@ class CNNTester { } } } - - this.log(`Rendered ${outChannels} output channels (${width}×${height}px)`); } getWeightValue(idx) { |
