diff options
| -rw-r--r-- | TODO.md | 17 | ||||
| -rw-r--r-- | cmake/DemoSourceLists.cmake | 4 | ||||
| -rw-r--r-- | doc/HOWTO.md | 26 | ||||
| -rwxr-xr-x | scripts/validate_cnn_v2.sh | 216 | ||||
| -rw-r--r-- | src/gpu/demo_effects.h | 1 | ||||
| -rw-r--r-- | src/gpu/effects/cnn_v2_effect.cc | 170 | ||||
| -rw-r--r-- | src/gpu/effects/cnn_v2_effect.h | 41 | ||||
| -rw-r--r-- | src/tests/gpu/test_demo_effects.cc | 1 | ||||
| -rwxr-xr-x | training/export_cnn_v2_shader.py | 225 | ||||
| -rwxr-xr-x | training/train_cnn_v2.py | 217 | ||||
| -rw-r--r-- | workspaces/main/assets.txt | 2 | ||||
| -rw-r--r-- | workspaces/main/shaders/cnn_v2_layer_template.wgsl | 68 | ||||
| -rw-r--r-- | workspaces/main/shaders/cnn_v2_static.wgsl | 47 |
13 files changed, 849 insertions, 186 deletions
@@ -24,22 +24,25 @@ Self-contained workspaces for parallel demo development. --- -## Priority 2: CNN v2 - Parametric Static Features (Task #85) [PLANNING] +## Priority 2: CNN v2 - Parametric Static Features (Task #85) [IN PROGRESS] Enhanced CNN post-processing with multi-dimensional feature inputs. **Design:** `doc/CNN_V2.md` -**Implementation phases:** -1. Static features compute shader (RGBD + UV + sin encoding + bias) -2. C++ effect class (multi-pass layer execution) -3. Training pipeline (PyTorch f32 → f16 export) -4. Validation tooling (end-to-end checkpoint testing) +**Status:** +- ✅ Phase 1: Static features shader (RGBD + UV + sin encoding + bias → 8×f16, 3 mip levels) +- ✅ Phase 2: C++ effect class (CNNv2Effect skeleton, multi-pass architecture) +- ✅ Phase 3: Training pipeline (`train_cnn_v2.py`, `export_cnn_v2_shader.py`) +- ✅ Phase 4: Validation tooling (`scripts/validate_cnn_v2.sh`) +- ⏳ Phase 5: Full implementation (bind groups, multi-pass execution, layer shaders) + +**Next:** Complete CNNv2Effect render pipeline, test with trained checkpoint **Key improvements over v1:** - 7D static feature input (vs 4D RGB) - Per-layer configurable kernels (1×1, 3×3, 5×5) -- Float16 weight storage (~6.4 KB vs 3.2 KB) +- Float16 weight storage (~6.4 KB) **Target:** <10 KB for 64k demo constraint diff --git a/cmake/DemoSourceLists.cmake b/cmake/DemoSourceLists.cmake index fc6b02d..017ecac 100644 --- a/cmake/DemoSourceLists.cmake +++ b/cmake/DemoSourceLists.cmake @@ -29,11 +29,11 @@ set(UTIL_SOURCES src/util/asset_manager.cc src/util/file_watcher.cc) # GPU sources (conditional: HEADLESS / STRIP_EXTERNAL / NORMAL) demo_set_conditional_sources(GPU_SOURCES # Headless mode: Functional stubs (timeline/audio work) - "src/gpu/headless_gpu.cc;src/gpu/demo_effects.cc;src/gpu/effect.cc;src/gpu/effects/heptagon_effect.cc;src/gpu/effects/particles_effect.cc;src/gpu/effects/passthrough_effect.cc;src/gpu/effects/moving_ellipse_effect.cc;src/gpu/effects/particle_spray_effect.cc;src/gpu/effects/gaussian_blur_effect.cc;src/gpu/effects/solarize_effect.cc;src/gpu/effects/scene1_effect.cc;src/gpu/effects/chroma_aberration_effect.cc;src/gpu/effects/vignette_effect.cc;src/gpu/effects/cnn_effect.cc;src/gpu/effects/post_process_helper.cc;src/gpu/effects/shaders.cc;src/gpu/effects/hybrid_3d_effect.cc;src/gpu/effects/flash_cube_effect.cc;src/gpu/effects/theme_modulation_effect.cc;src/gpu/effects/fade_effect.cc;src/gpu/effects/flash_effect.cc;src/gpu/effects/shader_composer.cc;src/gpu/effects/circle_mask_effect.cc;src/gpu/effects/rotating_cube_effect.cc;src/gpu/texture_manager.cc;src/gpu/texture_readback.cc" + "src/gpu/headless_gpu.cc;src/gpu/demo_effects.cc;src/gpu/effect.cc;src/gpu/effects/heptagon_effect.cc;src/gpu/effects/particles_effect.cc;src/gpu/effects/passthrough_effect.cc;src/gpu/effects/moving_ellipse_effect.cc;src/gpu/effects/particle_spray_effect.cc;src/gpu/effects/gaussian_blur_effect.cc;src/gpu/effects/solarize_effect.cc;src/gpu/effects/scene1_effect.cc;src/gpu/effects/chroma_aberration_effect.cc;src/gpu/effects/vignette_effect.cc;src/gpu/effects/cnn_effect.cc;src/gpu/effects/cnn_v2_effect.cc;src/gpu/effects/post_process_helper.cc;src/gpu/effects/shaders.cc;src/gpu/effects/hybrid_3d_effect.cc;src/gpu/effects/flash_cube_effect.cc;src/gpu/effects/theme_modulation_effect.cc;src/gpu/effects/fade_effect.cc;src/gpu/effects/flash_effect.cc;src/gpu/effects/shader_composer.cc;src/gpu/effects/circle_mask_effect.cc;src/gpu/effects/rotating_cube_effect.cc;src/gpu/texture_manager.cc;src/gpu/texture_readback.cc" # Strip mode: Minimal GPU stubs only "src/gpu/stub_gpu.cc" # Normal mode: Full GPU implementation - "src/gpu/gpu.cc;src/gpu/effect.cc;src/gpu/effects/heptagon_effect.cc;src/gpu/effects/particles_effect.cc;src/gpu/effects/passthrough_effect.cc;src/gpu/effects/moving_ellipse_effect.cc;src/gpu/effects/particle_spray_effect.cc;src/gpu/effects/gaussian_blur_effect.cc;src/gpu/effects/solarize_effect.cc;src/gpu/effects/scene1_effect.cc;src/gpu/effects/chroma_aberration_effect.cc;src/gpu/effects/vignette_effect.cc;src/gpu/effects/cnn_effect.cc;src/gpu/effects/post_process_helper.cc;src/gpu/effects/shaders.cc;src/gpu/effects/hybrid_3d_effect.cc;src/gpu/effects/flash_cube_effect.cc;src/gpu/effects/theme_modulation_effect.cc;src/gpu/effects/fade_effect.cc;src/gpu/effects/flash_effect.cc;src/gpu/effects/shader_composer.cc;src/gpu/effects/circle_mask_effect.cc;src/gpu/effects/rotating_cube_effect.cc;src/gpu/texture_manager.cc;src/gpu/texture_readback.cc" + "src/gpu/gpu.cc;src/gpu/effect.cc;src/gpu/effects/heptagon_effect.cc;src/gpu/effects/particles_effect.cc;src/gpu/effects/passthrough_effect.cc;src/gpu/effects/moving_ellipse_effect.cc;src/gpu/effects/particle_spray_effect.cc;src/gpu/effects/gaussian_blur_effect.cc;src/gpu/effects/solarize_effect.cc;src/gpu/effects/scene1_effect.cc;src/gpu/effects/chroma_aberration_effect.cc;src/gpu/effects/vignette_effect.cc;src/gpu/effects/cnn_effect.cc;src/gpu/effects/cnn_v2_effect.cc;src/gpu/effects/post_process_helper.cc;src/gpu/effects/shaders.cc;src/gpu/effects/hybrid_3d_effect.cc;src/gpu/effects/flash_cube_effect.cc;src/gpu/effects/theme_modulation_effect.cc;src/gpu/effects/fade_effect.cc;src/gpu/effects/flash_effect.cc;src/gpu/effects/shader_composer.cc;src/gpu/effects/circle_mask_effect.cc;src/gpu/effects/rotating_cube_effect.cc;src/gpu/texture_manager.cc;src/gpu/texture_readback.cc" ) # 3D sources (conditional: HEADLESS / STRIP_EXTERNAL / NORMAL) diff --git a/doc/HOWTO.md b/doc/HOWTO.md index 2b896ab..7be5246 100644 --- a/doc/HOWTO.md +++ b/doc/HOWTO.md @@ -130,6 +130,32 @@ Processes entire image with sliding window (matches WGSL): **Kernel sizes:** 3×3 (36 weights), 5×5 (100 weights), 7×7 (196 weights) +### CNN v2 Training + +Enhanced CNN with parametric static features (7D input: RGBD + UV + sin encoding + bias): + +```bash +# Train CNN v2 with default config (1×1, 3×3, 5×5 kernels, 16→8→4 channels) +./training/train_cnn_v2.py \ + --input training/input/ --target training/output/ \ + --epochs 5000 --batch-size 16 \ + --checkpoint-every 1000 + +# Custom architecture (smaller for size optimization) +./training/train_cnn_v2.py \ + --input training/input/ --target training/output/ \ + --kernel-sizes 1 3 3 --channels 8 4 4 \ + --epochs 5000 --batch-size 16 +``` + +**Export shaders:** +```bash +./training/export_cnn_v2_shader.py checkpoints/checkpoint_epoch_5000.pth \ + --output-dir workspaces/main/shaders +``` + +Generates `cnn_v2_layer_0.wgsl`, `cnn_v2_layer_1.wgsl`, `cnn_v2_layer_2.wgsl` with f16 weights. + ### CNN v2 Validation End-to-end testing: checkpoint → shaders → build → test images → results diff --git a/scripts/validate_cnn_v2.sh b/scripts/validate_cnn_v2.sh index fcd9908..06a4e01 100755 --- a/scripts/validate_cnn_v2.sh +++ b/scripts/validate_cnn_v2.sh @@ -1,198 +1,60 @@ #!/bin/bash -# Validate CNN v2: Export checkpoint → Build → Test → Display results +# CNN v2 Validation - End-to-end pipeline set -e - -# Default paths -BUILD_DIR="build" +PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +BUILD_DIR="$PROJECT_ROOT/build" WORKSPACE="main" -TEST_IMAGES_DIR="training/validation" -OUTPUT_DIR="validation_results" -PYTHON="python3" - -# Colors -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -NC='\033[0m' - -print_usage() { - cat << EOF -Usage: $0 CHECKPOINT [OPTIONS] - -End-to-end CNN v2 validation: export shaders, rebuild, test images, show results. - -Arguments: - CHECKPOINT Path to .pth checkpoint file (required) - -Options: - -b, --build-dir DIR Build directory (default: build) - -w, --workspace NAME Workspace name (default: main) - -i, --images DIR Test images directory (default: training/validation) - -o, --output DIR Output directory (default: validation_results) - --python CMD Python command (default: python3) - --skip-build Skip cnn_test rebuild - --skip-export Skip shader export (use existing .wgsl) - -h, --help Show this help -Example: - $0 checkpoints/checkpoint_epoch_5000.pth - $0 checkpoint.pth -i my_test_images/ -o results/ - $0 checkpoint.pth --skip-build # Use existing cnn_test binary - -EOF +usage() { + echo "Usage: $0 <checkpoint.pth> [options]" + echo "Options:" + echo " -i DIR Test images (default: training/validation)" + echo " -o DIR Output (default: validation_results)" + echo " --skip-build Skip rebuild" + exit 1 } -log() { echo -e "${GREEN}[validate]${NC} $*"; } -warn() { echo -e "${YELLOW}[validate]${NC} $*"; } -error() { echo -e "${RED}[validate]${NC} $*" >&2; exit 1; } +[ $# -eq 0 ] && usage +CHECKPOINT="$1" +shift -# Parse arguments -CHECKPOINT="" +TEST_IMAGES="$PROJECT_ROOT/training/validation" +OUTPUT="$PROJECT_ROOT/validation_results" SKIP_BUILD=false -SKIP_EXPORT=false while [[ $# -gt 0 ]]; do case $1 in - -h|--help) - print_usage - exit 0 - ;; - -b|--build-dir) - BUILD_DIR="$2" - shift 2 - ;; - -w|--workspace) - WORKSPACE="$2" - shift 2 - ;; - -i|--images) - TEST_IMAGES_DIR="$2" - shift 2 - ;; - -o|--output) - OUTPUT_DIR="$2" - shift 2 - ;; - --python) - PYTHON="$2" - shift 2 - ;; - --skip-build) - SKIP_BUILD=true - shift - ;; - --skip-export) - SKIP_EXPORT=true - shift - ;; - -*) - error "Unknown option: $1" - ;; - *) - if [[ -z "$CHECKPOINT" ]]; then - CHECKPOINT="$1" - else - error "Unexpected argument: $1" - fi - shift - ;; + -i) TEST_IMAGES="$2"; shift 2 ;; + -o) OUTPUT="$2"; shift 2 ;; + --skip-build) SKIP_BUILD=true; shift ;; + -h) usage ;; + *) usage ;; esac done -# Validate inputs -[[ -z "$CHECKPOINT" ]] && error "Checkpoint file required. Use -h for help." -[[ ! -f "$CHECKPOINT" ]] && error "Checkpoint not found: $CHECKPOINT" -[[ ! -d "$TEST_IMAGES_DIR" ]] && error "Test images directory not found: $TEST_IMAGES_DIR" - -SHADER_DIR="workspaces/$WORKSPACE/shaders" -CNN_TEST="$BUILD_DIR/cnn_test" - -log "Configuration:" -log " Checkpoint: $CHECKPOINT" -log " Build dir: $BUILD_DIR" -log " Workspace: $WORKSPACE" -log " Shader dir: $SHADER_DIR" -log " Test images: $TEST_IMAGES_DIR" -log " Output dir: $OUTPUT_DIR" -echo +echo "=== CNN v2 Validation ===" +echo "Checkpoint: $CHECKPOINT" -# Step 1: Export shaders -if [[ "$SKIP_EXPORT" = false ]]; then - log "Step 1/4: Exporting shaders from checkpoint..." - [[ ! -d "$SHADER_DIR" ]] && error "Shader directory not found: $SHADER_DIR" +# Export +echo "[1/3] Exporting shaders..." +python3 "$PROJECT_ROOT/training/export_cnn_v2_shader.py" "$CHECKPOINT" \ + --output-dir "$PROJECT_ROOT/workspaces/$WORKSPACE/shaders" - if [[ ! -f "training/export_cnn_v2_shader.py" ]]; then - error "Export script not found: training/export_cnn_v2_shader.py" - fi - - $PYTHON training/export_cnn_v2_shader.py "$CHECKPOINT" --output-dir "$SHADER_DIR" \ - || error "Shader export failed" - - log "✓ Shaders exported to $SHADER_DIR" -else - warn "Skipping shader export (using existing .wgsl files)" +# Build +if [ "$SKIP_BUILD" = false ]; then + echo "[2/3] Building..." + cmake --build "$BUILD_DIR" -j4 --target cnn_test >/dev/null 2>&1 fi -# Step 2: Rebuild cnn_test -if [[ "$SKIP_BUILD" = false ]]; then - log "Step 2/4: Rebuilding cnn_test..." - - cmake --build "$BUILD_DIR" -j4 --target cnn_test \ - || error "Build failed" - - log "✓ Built $CNN_TEST" -else - warn "Skipping build (using existing binary)" -fi - -[[ ! -x "$CNN_TEST" ]] && error "cnn_test not found or not executable: $CNN_TEST" - -# Step 3: Process test images -log "Step 3/4: Processing test images..." -mkdir -p "$OUTPUT_DIR" - -# Find PNG images -mapfile -t IMAGES < <(find "$TEST_IMAGES_DIR" -maxdepth 1 -name "*.png" | sort) -[[ ${#IMAGES[@]} -eq 0 ]] && error "No PNG images found in $TEST_IMAGES_DIR" - -log "Found ${#IMAGES[@]} test image(s)" - -for img in "${IMAGES[@]}"; do - basename=$(basename "$img" .png) - output="$OUTPUT_DIR/${basename}_output.png" - - log " Processing $basename.png..." - "$CNN_TEST" "$img" "$output" --cnn-version v2 \ - || warn " Failed: $basename.png" +# Process +echo "[3/3] Processing images..." +mkdir -p "$OUTPUT" +count=0 +for img in "$TEST_IMAGES"/*.png; do + [ -f "$img" ] || continue + name=$(basename "$img" .png) + "$BUILD_DIR/cnn_test" "$img" "$OUTPUT/${name}_output.png" 2>/dev/null && count=$((count+1)) done -log "✓ Processed ${#IMAGES[@]} image(s)" - -# Step 4: Display results -log "Step 4/4: Opening results..." - -case "$(uname -s)" in - Darwin*) - open "$OUTPUT_DIR" - ;; - Linux*) - if command -v xdg-open &> /dev/null; then - xdg-open "$OUTPUT_DIR" - else - log "Results saved to: $OUTPUT_DIR" - fi - ;; - MINGW*|MSYS*|CYGWIN*) - explorer "$OUTPUT_DIR" - ;; - *) - log "Results saved to: $OUTPUT_DIR" - ;; -esac - -log "✓ Validation complete!" -log "" -log "Results:" -log " Input: $TEST_IMAGES_DIR/*.png" -log " Output: $OUTPUT_DIR/*_output.png" +echo "Done! Processed $count images → $OUTPUT" diff --git a/src/gpu/demo_effects.h b/src/gpu/demo_effects.h index 8cdf557..d0ae748 100644 --- a/src/gpu/demo_effects.h +++ b/src/gpu/demo_effects.h @@ -186,6 +186,7 @@ class DistortEffect : public PostProcessEffect { // (included above) #include "gpu/effects/cnn_effect.h" +#include "gpu/effects/cnn_v2_effect.h" // Auto-generated functions void LoadTimeline(MainSequence& main_seq, const GpuContext& ctx); diff --git a/src/gpu/effects/cnn_v2_effect.cc b/src/gpu/effects/cnn_v2_effect.cc new file mode 100644 index 0000000..04fa74e --- /dev/null +++ b/src/gpu/effects/cnn_v2_effect.cc @@ -0,0 +1,170 @@ +// CNN v2 Effect Implementation + +#include "gpu/effects/cnn_v2_effect.h" + +#if defined(USE_TEST_ASSETS) +#include "test_assets.h" +#else +#include "generated/assets.h" +#endif + +#include "util/asset_manager.h" +#include "util/fatal_error.h" +#include <cstring> + +CNNv2Effect::CNNv2Effect(const GpuContext& ctx) + : PostProcessEffect(ctx), + static_pipeline_(nullptr), + static_bind_group_(nullptr), + static_features_tex_(nullptr), + static_features_view_(nullptr), + input_mip_tex_(nullptr), + initialized_(false) { + std::memset(input_mip_view_, 0, sizeof(input_mip_view_)); +} + +CNNv2Effect::~CNNv2Effect() { + cleanup(); +} + +void CNNv2Effect::init(MainSequence* demo) { + (void)demo; + if (initialized_) return; + + create_textures(); + create_pipelines(); + + initialized_ = true; +} + +void CNNv2Effect::resize(int width, int height) { + PostProcessEffect::resize(width, height); + cleanup(); + create_textures(); + create_pipelines(); +} + +void CNNv2Effect::create_textures() { + const WGPUExtent3D size = { + static_cast<uint32_t>(width_), + static_cast<uint32_t>(height_), + 1 + }; + + // Static features texture (8×f16 packed as 4×u32) + WGPUTextureDescriptor static_desc = {}; + static_desc.usage = WGPUTextureUsage_StorageBinding | WGPUTextureUsage_TextureBinding; + static_desc.dimension = WGPUTextureDimension_2D; + static_desc.size = size; + static_desc.format = WGPUTextureFormat_RGBA32Uint; + static_desc.mipLevelCount = 1; + static_desc.sampleCount = 1; + static_features_tex_ = wgpuDeviceCreateTexture(ctx_.device, &static_desc); + + WGPUTextureViewDescriptor view_desc = {}; + view_desc.format = WGPUTextureFormat_RGBA32Uint; + view_desc.dimension = WGPUTextureViewDimension_2D; + view_desc.baseMipLevel = 0; + view_desc.mipLevelCount = 1; + view_desc.baseArrayLayer = 0; + view_desc.arrayLayerCount = 1; + static_features_view_ = wgpuTextureCreateView(static_features_tex_, &view_desc); + + // Input texture with mips (for multi-scale features) + WGPUTextureDescriptor input_mip_desc = {}; + input_mip_desc.usage = WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopyDst; + input_mip_desc.dimension = WGPUTextureDimension_2D; + input_mip_desc.size = size; + input_mip_desc.format = WGPUTextureFormat_RGBA8Unorm; + input_mip_desc.mipLevelCount = 3; // Levels 0, 1, 2 + input_mip_desc.sampleCount = 1; + input_mip_tex_ = wgpuDeviceCreateTexture(ctx_.device, &input_mip_desc); + + for (int i = 0; i < 3; ++i) { + WGPUTextureViewDescriptor mip_view_desc = {}; + mip_view_desc.format = WGPUTextureFormat_RGBA8Unorm; + mip_view_desc.dimension = WGPUTextureViewDimension_2D; + mip_view_desc.baseMipLevel = i; + mip_view_desc.mipLevelCount = 1; + mip_view_desc.baseArrayLayer = 0; + mip_view_desc.arrayLayerCount = 1; + input_mip_view_[i] = wgpuTextureCreateView(input_mip_tex_, &mip_view_desc); + } + + // Layer textures (placeholder - will be created based on config) + // TODO: Create layer textures based on layer_configs_ +} + +void CNNv2Effect::create_pipelines() { + // Static features compute pipeline + size_t shader_size = 0; + const char* static_code = (const char*)GetAsset(AssetId::ASSET_SHADER_CNN_V2_STATIC, &shader_size); + + if (!static_code || shader_size == 0) { + // Shader not available (e.g., in test mode) - skip pipeline creation + return; + } + + WGPUShaderSourceWGSL wgsl_src = {}; + wgsl_src.chain.sType = WGPUSType_ShaderSourceWGSL; + wgsl_src.code = str_view(static_code); + + WGPUShaderModuleDescriptor shader_desc = {}; + shader_desc.nextInChain = &wgsl_src.chain; + + WGPUShaderModule static_module = wgpuDeviceCreateShaderModule(ctx_.device, &shader_desc); + if (!static_module) { + return; + } + + WGPUComputePipelineDescriptor pipeline_desc = {}; + pipeline_desc.compute.module = static_module; + pipeline_desc.compute.entryPoint = str_view("main"); + + static_pipeline_ = wgpuDeviceCreateComputePipeline(ctx_.device, &pipeline_desc); + wgpuShaderModuleRelease(static_module); + + // TODO: Create layer pipelines + // TODO: Create bind groups +} + +void CNNv2Effect::update_bind_group(WGPUTextureView input_view) { + (void)input_view; + // TODO: Create bind groups for static features and layers +} + +void CNNv2Effect::render(WGPURenderPassEncoder pass, + const CommonPostProcessUniforms& uniforms) { + (void)pass; + (void)uniforms; + if (!initialized_) return; + + // TODO: Multi-pass execution + // 1. Compute static features + // 2. Execute CNN layers + // 3. Composite to output +} + +void CNNv2Effect::cleanup() { + if (static_features_view_) wgpuTextureViewRelease(static_features_view_); + if (static_features_tex_) wgpuTextureRelease(static_features_tex_); + if (static_bind_group_) wgpuBindGroupRelease(static_bind_group_); + if (static_pipeline_) wgpuComputePipelineRelease(static_pipeline_); + + for (int i = 0; i < 3; ++i) { + if (input_mip_view_[i]) wgpuTextureViewRelease(input_mip_view_[i]); + } + if (input_mip_tex_) wgpuTextureRelease(input_mip_tex_); + + for (auto view : layer_views_) wgpuTextureViewRelease(view); + for (auto tex : layer_textures_) wgpuTextureRelease(tex); + for (auto bg : layer_bind_groups_) wgpuBindGroupRelease(bg); + for (auto pipeline : layer_pipelines_) wgpuComputePipelineRelease(pipeline); + + layer_views_.clear(); + layer_textures_.clear(); + layer_bind_groups_.clear(); + layer_pipelines_.clear(); + + initialized_ = false; +} diff --git a/src/gpu/effects/cnn_v2_effect.h b/src/gpu/effects/cnn_v2_effect.h new file mode 100644 index 0000000..edf301e --- /dev/null +++ b/src/gpu/effects/cnn_v2_effect.h @@ -0,0 +1,41 @@ +// CNN v2 Effect - Parametric Static Features +// Multi-pass post-processing with 7D feature input + +#pragma once +#include "gpu/effect.h" +#include <vector> + +class CNNv2Effect : public PostProcessEffect { +public: + explicit CNNv2Effect(const GpuContext& ctx); + ~CNNv2Effect(); + + void init(MainSequence* demo) override; + void resize(int width, int height) override; + void render(WGPURenderPassEncoder pass, + const CommonPostProcessUniforms& uniforms) override; + void update_bind_group(WGPUTextureView input_view) override; + +private: + void create_textures(); + void create_pipelines(); + void cleanup(); + + // Static features compute + WGPUComputePipeline static_pipeline_; + WGPUBindGroup static_bind_group_; + WGPUTexture static_features_tex_; + WGPUTextureView static_features_view_; + + // CNN layers (opaque implementation) + std::vector<WGPUComputePipeline> layer_pipelines_; + std::vector<WGPUBindGroup> layer_bind_groups_; + std::vector<WGPUTexture> layer_textures_; + std::vector<WGPUTextureView> layer_views_; + + // Input mips + WGPUTexture input_mip_tex_; + WGPUTextureView input_mip_view_[3]; + + bool initialized_; +}; diff --git a/src/tests/gpu/test_demo_effects.cc b/src/tests/gpu/test_demo_effects.cc index 01e6678..169db91 100644 --- a/src/tests/gpu/test_demo_effects.cc +++ b/src/tests/gpu/test_demo_effects.cc @@ -90,6 +90,7 @@ static void test_post_process_effects() { std::make_shared<ThemeModulationEffect>(fixture.ctx())}, {"VignetteEffect", std::make_shared<VignetteEffect>(fixture.ctx())}, {"CNNEffect", std::make_shared<CNNEffect>(fixture.ctx())}, + {"CNNv2Effect", std::make_shared<CNNv2Effect>(fixture.ctx())}, }; int passed = 0; diff --git a/training/export_cnn_v2_shader.py b/training/export_cnn_v2_shader.py new file mode 100755 index 0000000..3c53ce2 --- /dev/null +++ b/training/export_cnn_v2_shader.py @@ -0,0 +1,225 @@ +#!/usr/bin/env python3 +"""CNN v2 Shader Export Script + +Converts PyTorch checkpoints to WGSL compute shaders with f16 weights. +Generates one shader per layer with embedded weight arrays. +""" + +import argparse +import numpy as np +import torch +from pathlib import Path + + +def export_layer_shader(layer_idx, weights, kernel_size, in_channels, out_channels, + output_dir, is_output_layer=False): + """Generate WGSL compute shader for a single CNN layer. + + Args: + layer_idx: Layer index (0, 1, 2) + weights: (out_ch, in_ch, k, k) weight tensor + kernel_size: Kernel size (1, 3, 5, etc.) + in_channels: Input channels (includes 8D static features) + out_channels: Output channels + output_dir: Output directory path + is_output_layer: True if this is the final RGBA output layer + """ + weights_flat = weights.flatten() + weights_f16 = weights_flat.astype(np.float16) + weights_f32 = weights_f16.astype(np.float32) # WGSL stores as f32 literals + + # Format weights as WGSL array + weights_str = ",\n ".join( + ", ".join(f"{w:.6f}" for w in weights_f32[i:i+8]) + for i in range(0, len(weights_f32), 8) + ) + + radius = kernel_size // 2 + activation = "" if is_output_layer else "output[c] = max(0.0, sum); // ReLU" + if is_output_layer: + activation = "output[c] = clamp(sum, 0.0, 1.0); // Sigmoid approximation" + + shader_code = f"""// CNN v2 Layer {layer_idx} - Auto-generated +// Kernel: {kernel_size}×{kernel_size}, In: {in_channels}, Out: {out_channels} + +const KERNEL_SIZE: u32 = {kernel_size}u; +const IN_CHANNELS: u32 = {in_channels}u; +const OUT_CHANNELS: u32 = {out_channels}u; +const KERNEL_RADIUS: i32 = {radius}; + +// Weights quantized to float16 (stored as f32 in WGSL) +const weights: array<f32, {len(weights_f32)}> = array( + {weights_str} +); + +@group(0) @binding(0) var static_features: texture_2d<u32>; +@group(0) @binding(1) var layer_input: texture_2d<u32>; +@group(0) @binding(2) var output_tex: texture_storage_2d<rgba32uint, write>; + +fn unpack_static_features(coord: vec2<i32>) -> array<f32, 8> {{ + let packed = textureLoad(static_features, coord, 0); + let v0 = unpack2x16float(packed.x); + let v1 = unpack2x16float(packed.y); + let v2 = unpack2x16float(packed.z); + let v3 = unpack2x16float(packed.w); + return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y); +}} + +fn unpack_layer_channels(coord: vec2<i32>) -> array<f32, 8> {{ + let packed = textureLoad(layer_input, coord, 0); + let v0 = unpack2x16float(packed.x); + let v1 = unpack2x16float(packed.y); + let v2 = unpack2x16float(packed.z); + let v3 = unpack2x16float(packed.w); + return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y); +}} + +fn pack_channels(values: array<f32, 8>) -> vec4<u32> {{ + return vec4<u32>( + pack2x16float(vec2<f32>(values[0], values[1])), + pack2x16float(vec2<f32>(values[2], values[3])), + pack2x16float(vec2<f32>(values[4], values[5])), + pack2x16float(vec2<f32>(values[6], values[7])) + ); +}} + +@compute @workgroup_size(8, 8) +fn main(@builtin(global_invocation_id) id: vec3<u32>) {{ + let coord = vec2<i32>(id.xy); + let dims = textureDimensions(static_features); + + if (coord.x >= i32(dims.x) || coord.y >= i32(dims.y)) {{ + return; + }} + + // Load static features (always available) + let static_feat = unpack_static_features(coord); + + // Convolution + var output: array<f32, OUT_CHANNELS>; + for (var c: u32 = 0u; c < OUT_CHANNELS; c++) {{ + var sum: f32 = 0.0; + + for (var ky: i32 = -KERNEL_RADIUS; ky <= KERNEL_RADIUS; ky++) {{ + for (var kx: i32 = -KERNEL_RADIUS; kx <= KERNEL_RADIUS; kx++) {{ + let sample_coord = coord + vec2<i32>(kx, ky); + + // Border handling (clamp) + let clamped = vec2<i32>( + clamp(sample_coord.x, 0, i32(dims.x) - 1), + clamp(sample_coord.y, 0, i32(dims.y) - 1) + ); + + // Load input features + let static_local = unpack_static_features(clamped); + let layer_local = unpack_layer_channels(clamped); + + // Weight index calculation + let ky_idx = u32(ky + KERNEL_RADIUS); + let kx_idx = u32(kx + KERNEL_RADIUS); + let spatial_idx = ky_idx * KERNEL_SIZE + kx_idx; + + // Accumulate: static features (8D) + for (var i: u32 = 0u; i < 8u; i++) {{ + let w_idx = c * IN_CHANNELS * KERNEL_SIZE * KERNEL_SIZE + + i * KERNEL_SIZE * KERNEL_SIZE + spatial_idx; + sum += weights[w_idx] * static_local[i]; + }} + + // Accumulate: layer input channels (if layer_idx > 0) + let prev_channels = IN_CHANNELS - 8u; + for (var i: u32 = 0u; i < prev_channels; i++) {{ + let w_idx = c * IN_CHANNELS * KERNEL_SIZE * KERNEL_SIZE + + (8u + i) * KERNEL_SIZE * KERNEL_SIZE + spatial_idx; + sum += weights[w_idx] * layer_local[i]; + }} + }} + }} + + {activation} + }} + + // Pack and store + textureStore(output_tex, coord, pack_channels(output)); +}} +""" + + output_path = Path(output_dir) / f"cnn_v2_layer_{layer_idx}.wgsl" + output_path.write_text(shader_code) + print(f" → {output_path}") + + +def export_checkpoint(checkpoint_path, output_dir): + """Export PyTorch checkpoint to WGSL shaders. + + Args: + checkpoint_path: Path to .pth checkpoint + output_dir: Output directory for shaders + """ + print(f"Loading checkpoint: {checkpoint_path}") + checkpoint = torch.load(checkpoint_path, map_location='cpu') + + state_dict = checkpoint['model_state_dict'] + config = checkpoint['config'] + + print(f"Configuration:") + print(f" Kernels: {config['kernels']}") + print(f" Channels: {config['channels']}") + print(f" Features: {config['features']}") + + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + print(f"\nExporting shaders to {output_dir}/") + + # Layer 0: 8 → channels[0] + layer0_weights = state_dict['layer0.weight'].detach().numpy() + export_layer_shader( + layer_idx=0, + weights=layer0_weights, + kernel_size=config['kernels'][0], + in_channels=8, + out_channels=config['channels'][0], + output_dir=output_dir, + is_output_layer=False + ) + + # Layer 1: (8 + channels[0]) → channels[1] + layer1_weights = state_dict['layer1.weight'].detach().numpy() + export_layer_shader( + layer_idx=1, + weights=layer1_weights, + kernel_size=config['kernels'][1], + in_channels=8 + config['channels'][0], + out_channels=config['channels'][1], + output_dir=output_dir, + is_output_layer=False + ) + + # Layer 2: (8 + channels[1]) → 4 (RGBA) + layer2_weights = state_dict['layer2.weight'].detach().numpy() + export_layer_shader( + layer_idx=2, + weights=layer2_weights, + kernel_size=config['kernels'][2], + in_channels=8 + config['channels'][1], + out_channels=4, + output_dir=output_dir, + is_output_layer=True + ) + + print(f"\nExport complete! Generated 3 shader files.") + + +def main(): + parser = argparse.ArgumentParser(description='Export CNN v2 checkpoint to WGSL shaders') + parser.add_argument('checkpoint', type=str, help='Path to checkpoint .pth file') + parser.add_argument('--output-dir', type=str, default='workspaces/main/shaders', + help='Output directory for shaders') + + args = parser.parse_args() + export_checkpoint(args.checkpoint, args.output_dir) + + +if __name__ == '__main__': + main() diff --git a/training/train_cnn_v2.py b/training/train_cnn_v2.py new file mode 100755 index 0000000..fe148b4 --- /dev/null +++ b/training/train_cnn_v2.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python3 +"""CNN v2 Training Script - Parametric Static Features + +Trains a multi-layer CNN with 7D static feature input: +- RGBD (4D) +- UV coordinates (2D) +- sin(10*uv.x) position encoding (1D) +- Bias dimension (1D, always 1.0) +""" + +import argparse +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import Dataset, DataLoader +from pathlib import Path +from PIL import Image +import time + + +def compute_static_features(rgb, depth=None): + """Generate 7D static features + bias dimension. + + Args: + rgb: (H, W, 3) RGB image [0, 1] + depth: (H, W) depth map [0, 1], optional + + Returns: + (H, W, 8) static features tensor + """ + h, w = rgb.shape[:2] + + # RGBD channels + r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] + d = depth if depth is not None else np.zeros((h, w), dtype=np.float32) + + # UV coordinates (normalized [0, 1]) + uv_x = np.linspace(0, 1, w)[None, :].repeat(h, axis=0).astype(np.float32) + uv_y = np.linspace(0, 1, h)[:, None].repeat(w, axis=1).astype(np.float32) + + # Multi-frequency position encoding + sin10_x = np.sin(10.0 * uv_x).astype(np.float32) + + # Bias dimension (always 1.0) + bias = np.ones((h, w), dtype=np.float32) + + # Stack: [R, G, B, D, uv.x, uv.y, sin10_x, bias] + features = np.stack([r, g, b, d, uv_x, uv_y, sin10_x, bias], axis=-1) + return features + + +class CNNv2(nn.Module): + """CNN v2 with parametric static features.""" + + def __init__(self, kernels=[1, 3, 5], channels=[16, 8, 4]): + super().__init__() + self.kernels = kernels + self.channels = channels + + # Input layer: 8D (7 features + bias) → channels[0] + self.layer0 = nn.Conv2d(8, channels[0], kernel_size=kernels[0], + padding=kernels[0]//2, bias=False) + + # Inner layers: (8 + C_prev) → C_next + in_ch_1 = 8 + channels[0] + self.layer1 = nn.Conv2d(in_ch_1, channels[1], kernel_size=kernels[1], + padding=kernels[1]//2, bias=False) + + # Output layer: (8 + C_last) → 4 (RGBA) + in_ch_2 = 8 + channels[1] + self.layer2 = nn.Conv2d(in_ch_2, 4, kernel_size=kernels[2], + padding=kernels[2]//2, bias=False) + + def forward(self, static_features): + """Forward pass with static feature concatenation. + + Args: + static_features: (B, 8, H, W) static features + + Returns: + (B, 4, H, W) RGBA output [0, 1] + """ + # Layer 0: Use full 8D static features + x0 = self.layer0(static_features) + x0 = F.relu(x0) + + # Layer 1: Concatenate static + layer0 output + x1_input = torch.cat([static_features, x0], dim=1) + x1 = self.layer1(x1_input) + x1 = F.relu(x1) + + # Layer 2: Concatenate static + layer1 output + x2_input = torch.cat([static_features, x1], dim=1) + output = self.layer2(x2_input) + + return torch.sigmoid(output) + + +class ImagePairDataset(Dataset): + """Dataset of input/target image pairs.""" + + def __init__(self, input_dir, target_dir): + self.input_paths = sorted(Path(input_dir).glob("*.png")) + self.target_paths = sorted(Path(target_dir).glob("*.png")) + assert len(self.input_paths) == len(self.target_paths), \ + f"Mismatch: {len(self.input_paths)} inputs vs {len(self.target_paths)} targets" + + def __len__(self): + return len(self.input_paths) + + def __getitem__(self, idx): + # Load images + input_img = np.array(Image.open(self.input_paths[idx]).convert('RGB')) / 255.0 + target_img = np.array(Image.open(self.target_paths[idx]).convert('RGB')) / 255.0 + + # Compute static features + static_feat = compute_static_features(input_img.astype(np.float32)) + + # Convert to tensors (C, H, W) + static_feat = torch.from_numpy(static_feat).permute(2, 0, 1) + target = torch.from_numpy(target_img.astype(np.float32)).permute(2, 0, 1) + + # Pad target to 4 channels (RGBA) + target = F.pad(target, (0, 0, 0, 0, 0, 1), value=1.0) + + return static_feat, target + + +def train(args): + """Train CNN v2 model.""" + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + print(f"Training on {device}") + + # Create dataset + dataset = ImagePairDataset(args.input, args.target) + dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True) + print(f"Loaded {len(dataset)} image pairs") + + # Create model + model = CNNv2(kernels=args.kernel_sizes, channels=args.channels).to(device) + total_params = sum(p.numel() for p in model.parameters()) + print(f"Model: {args.channels} channels, {args.kernel_sizes} kernels, {total_params} weights") + + # Optimizer and loss + optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) + criterion = nn.MSELoss() + + # Training loop + print(f"\nTraining for {args.epochs} epochs...") + start_time = time.time() + + for epoch in range(1, args.epochs + 1): + model.train() + epoch_loss = 0.0 + + for static_feat, target in dataloader: + static_feat = static_feat.to(device) + target = target.to(device) + + optimizer.zero_grad() + output = model(static_feat) + loss = criterion(output, target) + loss.backward() + optimizer.step() + + epoch_loss += loss.item() + + avg_loss = epoch_loss / len(dataloader) + + if epoch % 100 == 0 or epoch == 1: + elapsed = time.time() - start_time + print(f"Epoch {epoch:4d}/{args.epochs} | Loss: {avg_loss:.6f} | Time: {elapsed:.1f}s") + + # Save checkpoint + if args.checkpoint_every > 0 and epoch % args.checkpoint_every == 0: + checkpoint_path = Path(args.checkpoint_dir) / f"checkpoint_epoch_{epoch}.pth" + checkpoint_path.parent.mkdir(parents=True, exist_ok=True) + torch.save({ + 'epoch': epoch, + 'model_state_dict': model.state_dict(), + 'optimizer_state_dict': optimizer.state_dict(), + 'loss': avg_loss, + 'config': { + 'kernels': args.kernel_sizes, + 'channels': args.channels, + 'features': ['R', 'G', 'B', 'D', 'uv.x', 'uv.y', 'sin10_x', 'bias'] + } + }, checkpoint_path) + print(f" → Saved checkpoint: {checkpoint_path}") + + print(f"\nTraining complete! Total time: {time.time() - start_time:.1f}s") + return model + + +def main(): + parser = argparse.ArgumentParser(description='Train CNN v2 with parametric static features') + parser.add_argument('--input', type=str, required=True, help='Input images directory') + parser.add_argument('--target', type=str, required=True, help='Target images directory') + parser.add_argument('--kernel-sizes', type=int, nargs=3, default=[1, 3, 5], + help='Kernel sizes for 3 layers (default: 1 3 5)') + parser.add_argument('--channels', type=int, nargs=3, default=[16, 8, 4], + help='Output channels for 3 layers (default: 16 8 4)') + parser.add_argument('--epochs', type=int, default=5000, help='Training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='Batch size') + parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate') + parser.add_argument('--checkpoint-dir', type=str, default='checkpoints', + help='Checkpoint directory') + parser.add_argument('--checkpoint-every', type=int, default=1000, + help='Save checkpoint every N epochs (0 = disable)') + + args = parser.parse_args() + train(args) + + +if __name__ == '__main__': + main() diff --git a/workspaces/main/assets.txt b/workspaces/main/assets.txt index 750bf15..280d6ed 100644 --- a/workspaces/main/assets.txt +++ b/workspaces/main/assets.txt @@ -43,6 +43,8 @@ SHADER_CNN_CONV5X5, NONE, shaders/cnn/cnn_conv5x5.wgsl, "CNN 5x5 Convolution" SHADER_CNN_CONV7X7, NONE, shaders/cnn/cnn_conv7x7.wgsl, "CNN 7x7 Convolution" SHADER_CNN_WEIGHTS, NONE, shaders/cnn/cnn_weights_generated.wgsl, "CNN Weights (Generated)" SHADER_CNN_LAYER, NONE, shaders/cnn/cnn_layer.wgsl, "CNN Layer Shader" +SHADER_CNN_V2_STATIC, NONE, shaders/cnn_v2_static.wgsl, "CNN v2 Static Features" +SHADER_CNN_V2_LAYER_TEMPLATE, NONE, shaders/cnn_v2_layer_template.wgsl, "CNN v2 Layer Template" SHADER_SOLARIZE, NONE, shaders/solarize.wgsl, "Solarize Shader" SHADER_DISTORT, NONE, shaders/distort.wgsl, "Distort Shader" SHADER_CHROMA_ABERRATION, NONE, shaders/chroma_aberration.wgsl, "Chroma Aberration Shader" diff --git a/workspaces/main/shaders/cnn_v2_layer_template.wgsl b/workspaces/main/shaders/cnn_v2_layer_template.wgsl new file mode 100644 index 0000000..1bf6819 --- /dev/null +++ b/workspaces/main/shaders/cnn_v2_layer_template.wgsl @@ -0,0 +1,68 @@ +// CNN v2 Layer Template (placeholder for generated shaders) +// This file documents the structure - actual layers generated by export script + +// Example: Layer 0 (1×1 kernel, 8→16 channels) +// const KERNEL_SIZE: u32 = 1u; +// const IN_CHANNELS: u32 = 8u; // 7 features + bias +// const OUT_CHANNELS: u32 = 16u; +// const weights: array<f32, 128> = array(...); + +@group(0) @binding(0) var static_features: texture_2d<u32>; +@group(0) @binding(1) var layer_input: texture_2d<u32>; // Previous layer output +@group(0) @binding(2) var output_tex: texture_storage_2d<rgba32uint, write>; + +fn unpack_static_features(coord: vec2<i32>) -> array<f32, 8> { + let packed = textureLoad(static_features, coord, 0); + let v0 = unpack2x16float(packed.x); + let v1 = unpack2x16float(packed.y); + let v2 = unpack2x16float(packed.z); + let v3 = unpack2x16float(packed.w); + return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y); +} + +fn unpack_layer_channels(coord: vec2<i32>) -> array<f32, 8> { + let packed = textureLoad(layer_input, coord, 0); + let v0 = unpack2x16float(packed.x); + let v1 = unpack2x16float(packed.y); + let v2 = unpack2x16float(packed.z); + let v3 = unpack2x16float(packed.w); + return array<f32, 8>(v0.x, v0.y, v1.x, v1.y, v2.x, v2.y, v3.x, v3.y); +} + +fn pack_channels(values: array<f32, 8>) -> vec4<u32> { + return vec4<u32>( + pack2x16float(vec2<f32>(values[0], values[1])), + pack2x16float(vec2<f32>(values[2], values[3])), + pack2x16float(vec2<f32>(values[4], values[5])), + pack2x16float(vec2<f32>(values[6], values[7])) + ); +} + +@compute @workgroup_size(8, 8) +fn main(@builtin(global_invocation_id) id: vec3<u32>) { + let coord = vec2<i32>(id.xy); + let dims = textureDimensions(static_features); + + if (coord.x >= i32(dims.x) || coord.y >= i32(dims.y)) { + return; + } + + // Load static features (always available) + let static_feat = unpack_static_features(coord); + + // Convolution loop (example for generated code) + // var output: array<f32, OUT_CHANNELS>; + // for (var c: u32 = 0u; c < OUT_CHANNELS; c++) { + // var sum: f32 = 0.0; + // for (var ky: i32 = -radius; ky <= radius; ky++) { + // for (var kx: i32 = -radius; kx <= radius; kx++) { + // let sample_coord = coord + vec2<i32>(kx, ky); + // // Load static + prev layer, multiply weights, accumulate + // } + // } + // output[c] = max(0.0, sum); // ReLU + // } + + // Placeholder output + textureStore(output_tex, coord, vec4<u32>(0u)); +} diff --git a/workspaces/main/shaders/cnn_v2_static.wgsl b/workspaces/main/shaders/cnn_v2_static.wgsl new file mode 100644 index 0000000..c3a2de7 --- /dev/null +++ b/workspaces/main/shaders/cnn_v2_static.wgsl @@ -0,0 +1,47 @@ +// CNN v2 Static Features Compute Shader +// Generates 7D features + bias: [R, G, B, D, uv.x, uv.y, sin10_x, 1.0] + +@group(0) @binding(0) var input_tex: texture_2d<f32>; +@group(0) @binding(1) var input_tex_mip1: texture_2d<f32>; +@group(0) @binding(2) var input_tex_mip2: texture_2d<f32>; +@group(0) @binding(3) var depth_tex: texture_2d<f32>; +@group(0) @binding(4) var output_tex: texture_storage_2d<rgba32uint, write>; + +@compute @workgroup_size(8, 8) +fn main(@builtin(global_invocation_id) id: vec3<u32>) { + let coord = vec2<i32>(id.xy); + let dims = textureDimensions(input_tex); + + if (coord.x >= i32(dims.x) || coord.y >= i32(dims.y)) { + return; + } + + // Sample RGBA from mip 0 + let rgba = textureLoad(input_tex, coord, 0); + let r = rgba.r; + let g = rgba.g; + let b = rgba.b; + + // Sample depth + let d = textureLoad(depth_tex, coord, 0).r; + + // UV coordinates (normalized [0,1]) + let uv_x = f32(coord.x) / f32(dims.x); + let uv_y = f32(coord.y) / f32(dims.y); + + // Multi-frequency position encoding + let sin10_x = sin(10.0 * uv_x); + + // Bias dimension (always 1.0) + let bias = 1.0; + + // Pack 8×f16 into 4×u32 (rgba32uint) + let packed = vec4<u32>( + pack2x16float(vec2<f32>(r, g)), + pack2x16float(vec2<f32>(b, d)), + pack2x16float(vec2<f32>(uv_x, uv_y)), + pack2x16float(vec2<f32>(sin10_x, bias)) + ); + + textureStore(output_tex, coord, packed); +} |
