From 91d42f2d057e077c267d6775cc109a801aa315c0 Mon Sep 17 00:00:00 2001 From: skal Date: Thu, 12 Feb 2026 11:34:50 +0100 Subject: CNN v2: parametric static features - Phases 1-4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Infrastructure for enhanced CNN post-processing with 7D feature input. Phase 1: Shaders - Static features compute (RGBD + UV + sin10_x + bias → 8×f16) - Layer template (convolution skeleton, packing/unpacking) - 3 mip level support for multi-scale features Phase 2: C++ Effect - CNNv2Effect class (multi-pass architecture) - Texture management (static features, layer buffers) - Build integration (CMakeLists, assets, tests) Phase 3: Training Pipeline - train_cnn_v2.py: PyTorch model with static feature concatenation - export_cnn_v2_shader.py: f32→f16 quantization, WGSL generation - Configurable architecture (kernels, channels) Phase 4: Validation - validate_cnn_v2.sh: End-to-end pipeline - Checkpoint → shaders → build → test images Tests: 36/36 passing Next: Complete render pipeline implementation (bind groups, multi-pass) Co-Authored-By: Claude Sonnet 4.5 --- training/export_cnn_v2_shader.py | 225 +++++++++++++++++++++++++++++++++++++++ training/train_cnn_v2.py | 217 +++++++++++++++++++++++++++++++++++++ 2 files changed, 442 insertions(+) create mode 100755 training/export_cnn_v2_shader.py create mode 100755 training/train_cnn_v2.py (limited to 'training') diff --git a/training/export_cnn_v2_shader.py b/training/export_cnn_v2_shader.py new file mode 100755 index 0000000..3c53ce2 --- /dev/null +++ b/training/export_cnn_v2_shader.py @@ -0,0 +1,225 @@ +#!/usr/bin/env python3 +"""CNN v2 Shader Export Script + +Converts PyTorch checkpoints to WGSL compute shaders with f16 weights. +Generates one shader per layer with embedded weight arrays. +""" + +import argparse +import numpy as np +import torch +from pathlib import Path + + +def export_layer_shader(layer_idx, weights, kernel_size, in_channels, out_channels, + output_dir, is_output_layer=False): + """Generate WGSL compute shader for a single CNN layer. + + Args: + layer_idx: Layer index (0, 1, 2) + weights: (out_ch, in_ch, k, k) weight tensor + kernel_size: Kernel size (1, 3, 5, etc.) + in_channels: Input channels (includes 8D static features) + out_channels: Output channels + output_dir: Output directory path + is_output_layer: True if this is the final RGBA output layer + """ + weights_flat = weights.flatten() + weights_f16 = weights_flat.astype(np.float16) + weights_f32 = weights_f16.astype(np.float32) # WGSL stores as f32 literals + + # Format weights as WGSL array + weights_str = ",\n ".join( + ", ".join(f"{w:.6f}" for w in weights_f32[i:i+8]) + for i in range(0, len(weights_f32), 8) + ) + + radius = kernel_size // 2 + activation = "" if is_output_layer else "output[c] = max(0.0, sum); // ReLU" + if is_output_layer: + activation = "output[c] = clamp(sum, 0.0, 1.0); // Sigmoid approximation" + + shader_code = f"""// CNN v2 Layer {layer_idx} - Auto-generated +// Kernel: {kernel_size}×{kernel_size}, In: {in_channels}, Out: {out_channels} + +const KERNEL_SIZE: u32 = {kernel_size}u; +const IN_CHANNELS: u32 = {in_channels}u; +const OUT_CHANNELS: u32 = {out_channels}u; +const KERNEL_RADIUS: i32 = {radius}; + +// Weights quantized to float16 (stored as f32 in WGSL) +const weights: array = array( + {weights_str} +); + +@group(0) @binding(0) var static_features: texture_2d; +@group(0) @binding(1) var layer_input: texture_2d; +@group(0) @binding(2) var output_tex: texture_storage_2d; + +fn unpack_static_features(coord: vec2) -> array {{ + let packed = textureLoad(static_features, coord, 0); + let v0 = unpack2x16float(packed.x); + let v1 = unpack2x16float(packed.y); + let v2 = unpack2x16float(packed.z); + let v3 = unpack2x16float(packed.w); + return array(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y); +}} + +fn unpack_layer_channels(coord: vec2) -> array {{ + let packed = textureLoad(layer_input, coord, 0); + let v0 = unpack2x16float(packed.x); + let v1 = unpack2x16float(packed.y); + let v2 = unpack2x16float(packed.z); + let v3 = unpack2x16float(packed.w); + return array(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y); +}} + +fn pack_channels(values: array) -> vec4 {{ + return vec4( + pack2x16float(vec2(values[0], values[1])), + pack2x16float(vec2(values[2], values[3])), + pack2x16float(vec2(values[4], values[5])), + pack2x16float(vec2(values[6], values[7])) + ); +}} + +@compute @workgroup_size(8, 8) +fn main(@builtin(global_invocation_id) id: vec3) {{ + let coord = vec2(id.xy); + let dims = textureDimensions(static_features); + + if (coord.x >= i32(dims.x) || coord.y >= i32(dims.y)) {{ + return; + }} + + // Load static features (always available) + let static_feat = unpack_static_features(coord); + + // Convolution + var output: array; + for (var c: u32 = 0u; c < OUT_CHANNELS; c++) {{ + var sum: f32 = 0.0; + + for (var ky: i32 = -KERNEL_RADIUS; ky <= KERNEL_RADIUS; ky++) {{ + for (var kx: i32 = -KERNEL_RADIUS; kx <= KERNEL_RADIUS; kx++) {{ + let sample_coord = coord + vec2(kx, ky); + + // Border handling (clamp) + let clamped = vec2( + clamp(sample_coord.x, 0, i32(dims.x) - 1), + clamp(sample_coord.y, 0, i32(dims.y) - 1) + ); + + // Load input features + let static_local = unpack_static_features(clamped); + let layer_local = unpack_layer_channels(clamped); + + // Weight index calculation + let ky_idx = u32(ky + KERNEL_RADIUS); + let kx_idx = u32(kx + KERNEL_RADIUS); + let spatial_idx = ky_idx * KERNEL_SIZE + kx_idx; + + // Accumulate: static features (8D) + for (var i: u32 = 0u; i < 8u; i++) {{ + let w_idx = c * IN_CHANNELS * KERNEL_SIZE * KERNEL_SIZE + + i * KERNEL_SIZE * KERNEL_SIZE + spatial_idx; + sum += weights[w_idx] * static_local[i]; + }} + + // Accumulate: layer input channels (if layer_idx > 0) + let prev_channels = IN_CHANNELS - 8u; + for (var i: u32 = 0u; i < prev_channels; i++) {{ + let w_idx = c * IN_CHANNELS * KERNEL_SIZE * KERNEL_SIZE + + (8u + i) * KERNEL_SIZE * KERNEL_SIZE + spatial_idx; + sum += weights[w_idx] * layer_local[i]; + }} + }} + }} + + {activation} + }} + + // Pack and store + textureStore(output_tex, coord, pack_channels(output)); +}} +""" + + output_path = Path(output_dir) / f"cnn_v2_layer_{layer_idx}.wgsl" + output_path.write_text(shader_code) + print(f" → {output_path}") + + +def export_checkpoint(checkpoint_path, output_dir): + """Export PyTorch checkpoint to WGSL shaders. + + Args: + checkpoint_path: Path to .pth checkpoint + output_dir: Output directory for shaders + """ + print(f"Loading checkpoint: {checkpoint_path}") + checkpoint = torch.load(checkpoint_path, map_location='cpu') + + state_dict = checkpoint['model_state_dict'] + config = checkpoint['config'] + + print(f"Configuration:") + print(f" Kernels: {config['kernels']}") + print(f" Channels: {config['channels']}") + print(f" Features: {config['features']}") + + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + print(f"\nExporting shaders to {output_dir}/") + + # Layer 0: 8 → channels[0] + layer0_weights = state_dict['layer0.weight'].detach().numpy() + export_layer_shader( + layer_idx=0, + weights=layer0_weights, + kernel_size=config['kernels'][0], + in_channels=8, + out_channels=config['channels'][0], + output_dir=output_dir, + is_output_layer=False + ) + + # Layer 1: (8 + channels[0]) → channels[1] + layer1_weights = state_dict['layer1.weight'].detach().numpy() + export_layer_shader( + layer_idx=1, + weights=layer1_weights, + kernel_size=config['kernels'][1], + in_channels=8 + config['channels'][0], + out_channels=config['channels'][1], + output_dir=output_dir, + is_output_layer=False + ) + + # Layer 2: (8 + channels[1]) → 4 (RGBA) + layer2_weights = state_dict['layer2.weight'].detach().numpy() + export_layer_shader( + layer_idx=2, + weights=layer2_weights, + kernel_size=config['kernels'][2], + in_channels=8 + config['channels'][1], + out_channels=4, + output_dir=output_dir, + is_output_layer=True + ) + + print(f"\nExport complete! Generated 3 shader files.") + + +def main(): + parser = argparse.ArgumentParser(description='Export CNN v2 checkpoint to WGSL shaders') + parser.add_argument('checkpoint', type=str, help='Path to checkpoint .pth file') + parser.add_argument('--output-dir', type=str, default='workspaces/main/shaders', + help='Output directory for shaders') + + args = parser.parse_args() + export_checkpoint(args.checkpoint, args.output_dir) + + +if __name__ == '__main__': + main() diff --git a/training/train_cnn_v2.py b/training/train_cnn_v2.py new file mode 100755 index 0000000..fe148b4 --- /dev/null +++ b/training/train_cnn_v2.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python3 +"""CNN v2 Training Script - Parametric Static Features + +Trains a multi-layer CNN with 7D static feature input: +- RGBD (4D) +- UV coordinates (2D) +- sin(10*uv.x) position encoding (1D) +- Bias dimension (1D, always 1.0) +""" + +import argparse +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import Dataset, DataLoader +from pathlib import Path +from PIL import Image +import time + + +def compute_static_features(rgb, depth=None): + """Generate 7D static features + bias dimension. + + Args: + rgb: (H, W, 3) RGB image [0, 1] + depth: (H, W) depth map [0, 1], optional + + Returns: + (H, W, 8) static features tensor + """ + h, w = rgb.shape[:2] + + # RGBD channels + r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] + d = depth if depth is not None else np.zeros((h, w), dtype=np.float32) + + # UV coordinates (normalized [0, 1]) + uv_x = np.linspace(0, 1, w)[None, :].repeat(h, axis=0).astype(np.float32) + uv_y = np.linspace(0, 1, h)[:, None].repeat(w, axis=1).astype(np.float32) + + # Multi-frequency position encoding + sin10_x = np.sin(10.0 * uv_x).astype(np.float32) + + # Bias dimension (always 1.0) + bias = np.ones((h, w), dtype=np.float32) + + # Stack: [R, G, B, D, uv.x, uv.y, sin10_x, bias] + features = np.stack([r, g, b, d, uv_x, uv_y, sin10_x, bias], axis=-1) + return features + + +class CNNv2(nn.Module): + """CNN v2 with parametric static features.""" + + def __init__(self, kernels=[1, 3, 5], channels=[16, 8, 4]): + super().__init__() + self.kernels = kernels + self.channels = channels + + # Input layer: 8D (7 features + bias) → channels[0] + self.layer0 = nn.Conv2d(8, channels[0], kernel_size=kernels[0], + padding=kernels[0]//2, bias=False) + + # Inner layers: (8 + C_prev) → C_next + in_ch_1 = 8 + channels[0] + self.layer1 = nn.Conv2d(in_ch_1, channels[1], kernel_size=kernels[1], + padding=kernels[1]//2, bias=False) + + # Output layer: (8 + C_last) → 4 (RGBA) + in_ch_2 = 8 + channels[1] + self.layer2 = nn.Conv2d(in_ch_2, 4, kernel_size=kernels[2], + padding=kernels[2]//2, bias=False) + + def forward(self, static_features): + """Forward pass with static feature concatenation. + + Args: + static_features: (B, 8, H, W) static features + + Returns: + (B, 4, H, W) RGBA output [0, 1] + """ + # Layer 0: Use full 8D static features + x0 = self.layer0(static_features) + x0 = F.relu(x0) + + # Layer 1: Concatenate static + layer0 output + x1_input = torch.cat([static_features, x0], dim=1) + x1 = self.layer1(x1_input) + x1 = F.relu(x1) + + # Layer 2: Concatenate static + layer1 output + x2_input = torch.cat([static_features, x1], dim=1) + output = self.layer2(x2_input) + + return torch.sigmoid(output) + + +class ImagePairDataset(Dataset): + """Dataset of input/target image pairs.""" + + def __init__(self, input_dir, target_dir): + self.input_paths = sorted(Path(input_dir).glob("*.png")) + self.target_paths = sorted(Path(target_dir).glob("*.png")) + assert len(self.input_paths) == len(self.target_paths), \ + f"Mismatch: {len(self.input_paths)} inputs vs {len(self.target_paths)} targets" + + def __len__(self): + return len(self.input_paths) + + def __getitem__(self, idx): + # Load images + input_img = np.array(Image.open(self.input_paths[idx]).convert('RGB')) / 255.0 + target_img = np.array(Image.open(self.target_paths[idx]).convert('RGB')) / 255.0 + + # Compute static features + static_feat = compute_static_features(input_img.astype(np.float32)) + + # Convert to tensors (C, H, W) + static_feat = torch.from_numpy(static_feat).permute(2, 0, 1) + target = torch.from_numpy(target_img.astype(np.float32)).permute(2, 0, 1) + + # Pad target to 4 channels (RGBA) + target = F.pad(target, (0, 0, 0, 0, 0, 1), value=1.0) + + return static_feat, target + + +def train(args): + """Train CNN v2 model.""" + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + print(f"Training on {device}") + + # Create dataset + dataset = ImagePairDataset(args.input, args.target) + dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True) + print(f"Loaded {len(dataset)} image pairs") + + # Create model + model = CNNv2(kernels=args.kernel_sizes, channels=args.channels).to(device) + total_params = sum(p.numel() for p in model.parameters()) + print(f"Model: {args.channels} channels, {args.kernel_sizes} kernels, {total_params} weights") + + # Optimizer and loss + optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) + criterion = nn.MSELoss() + + # Training loop + print(f"\nTraining for {args.epochs} epochs...") + start_time = time.time() + + for epoch in range(1, args.epochs + 1): + model.train() + epoch_loss = 0.0 + + for static_feat, target in dataloader: + static_feat = static_feat.to(device) + target = target.to(device) + + optimizer.zero_grad() + output = model(static_feat) + loss = criterion(output, target) + loss.backward() + optimizer.step() + + epoch_loss += loss.item() + + avg_loss = epoch_loss / len(dataloader) + + if epoch % 100 == 0 or epoch == 1: + elapsed = time.time() - start_time + print(f"Epoch {epoch:4d}/{args.epochs} | Loss: {avg_loss:.6f} | Time: {elapsed:.1f}s") + + # Save checkpoint + if args.checkpoint_every > 0 and epoch % args.checkpoint_every == 0: + checkpoint_path = Path(args.checkpoint_dir) / f"checkpoint_epoch_{epoch}.pth" + checkpoint_path.parent.mkdir(parents=True, exist_ok=True) + torch.save({ + 'epoch': epoch, + 'model_state_dict': model.state_dict(), + 'optimizer_state_dict': optimizer.state_dict(), + 'loss': avg_loss, + 'config': { + 'kernels': args.kernel_sizes, + 'channels': args.channels, + 'features': ['R', 'G', 'B', 'D', 'uv.x', 'uv.y', 'sin10_x', 'bias'] + } + }, checkpoint_path) + print(f" → Saved checkpoint: {checkpoint_path}") + + print(f"\nTraining complete! Total time: {time.time() - start_time:.1f}s") + return model + + +def main(): + parser = argparse.ArgumentParser(description='Train CNN v2 with parametric static features') + parser.add_argument('--input', type=str, required=True, help='Input images directory') + parser.add_argument('--target', type=str, required=True, help='Target images directory') + parser.add_argument('--kernel-sizes', type=int, nargs=3, default=[1, 3, 5], + help='Kernel sizes for 3 layers (default: 1 3 5)') + parser.add_argument('--channels', type=int, nargs=3, default=[16, 8, 4], + help='Output channels for 3 layers (default: 16 8 4)') + parser.add_argument('--epochs', type=int, default=5000, help='Training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='Batch size') + parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate') + parser.add_argument('--checkpoint-dir', type=str, default='checkpoints', + help='Checkpoint directory') + parser.add_argument('--checkpoint-every', type=int, default=1000, + help='Save checkpoint every N epochs (0 = disable)') + + args = parser.parse_args() + train(args) + + +if __name__ == '__main__': + main() -- cgit v1.2.3