summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/cnn_test.cc953
-rw-r--r--tools/cnn_v2_test/index.html420
2 files changed, 1225 insertions, 148 deletions
diff --git a/tools/cnn_test.cc b/tools/cnn_test.cc
index c2983a9..4599512 100644
--- a/tools/cnn_test.cc
+++ b/tools/cnn_test.cc
@@ -28,6 +28,7 @@
#include <cstdlib>
#include <cstring>
#include <vector>
+#include <cmath>
// Helper to get asset string or empty string
static const char* SafeGetAsset(AssetId id) {
@@ -44,6 +45,9 @@ struct Args {
const char* save_intermediates = nullptr;
int num_layers = 3; // Default to 3 layers
bool debug_hex = false; // Print first 8 pixels as hex
+ int cnn_version = 1; // 1=CNNEffect, 2=CNNv2Effect
+ const char* weights_path = nullptr; // Optional .bin weights file
+ bool cnn_version_explicit = false; // Track if --cnn-version was explicitly set
};
// Parse command-line arguments
@@ -83,6 +87,15 @@ static bool parse_args(int argc, char** argv, Args* args) {
}
} else if (strcmp(argv[i], "--debug-hex") == 0) {
args->debug_hex = true;
+ } else if (strcmp(argv[i], "--cnn-version") == 0 && i + 1 < argc) {
+ args->cnn_version = atoi(argv[++i]);
+ args->cnn_version_explicit = true;
+ if (args->cnn_version < 1 || args->cnn_version > 2) {
+ fprintf(stderr, "Error: cnn-version must be 1 or 2\n");
+ return false;
+ }
+ } else if (strcmp(argv[i], "--weights") == 0 && i + 1 < argc) {
+ args->weights_path = argv[++i];
} else if (strcmp(argv[i], "--help") == 0) {
return false;
} else {
@@ -91,6 +104,21 @@ static bool parse_args(int argc, char** argv, Args* args) {
}
}
+ // Force CNN v2 when --weights is specified
+ if (args->weights_path) {
+ if (args->cnn_version_explicit && args->cnn_version != 2) {
+ fprintf(stderr, "WARNING: --cnn-version %d ignored (--weights forces CNN v2)\n",
+ args->cnn_version);
+ }
+ args->cnn_version = 2;
+
+ // Warn if --layers was specified (binary file config takes precedence)
+ if (args->num_layers != 3) { // 3 is the default
+ fprintf(stderr, "WARNING: --layers %d ignored (--weights loads layer config from .bin)\n",
+ args->num_layers);
+ }
+ }
+
return true;
}
@@ -100,9 +128,11 @@ static void print_usage(const char* prog) {
fprintf(stderr, "\nOPTIONS:\n");
fprintf(stderr, " --blend F Final blend amount (0.0-1.0, default: 1.0)\n");
fprintf(stderr, " --format ppm|png Output format (default: png)\n");
- fprintf(stderr, " --layers N Number of CNN layers (1-10, default: 3)\n");
+ fprintf(stderr, " --layers N Number of CNN layers (1-10, default: 3, ignored with --weights)\n");
fprintf(stderr, " --save-intermediates DIR Save intermediate layers to directory\n");
fprintf(stderr, " --debug-hex Print first 8 pixels as hex (debug)\n");
+ fprintf(stderr, " --cnn-version N CNN version: 1 (default) or 2 (ignored with --weights)\n");
+ fprintf(stderr, " --weights PATH Load weights from .bin (forces CNN v2, overrides layer config)\n");
fprintf(stderr, " --help Show this help\n");
}
@@ -160,6 +190,66 @@ static WGPUTexture load_texture(WGPUDevice device, WGPUQueue queue,
return texture;
}
+// Load PNG alpha channel as depth texture (or 1.0 if no alpha)
+static WGPUTexture load_depth_from_alpha(WGPUDevice device, WGPUQueue queue,
+ const char* path, int width,
+ int height) {
+ int w, h, channels;
+ uint8_t* data = stbi_load(path, &w, &h, &channels, 4);
+ if (!data || w != width || h != height) {
+ fprintf(stderr, "Error: failed to load depth from '%s'\n", path);
+ if (data) stbi_image_free(data);
+ return nullptr;
+ }
+
+ // Extract alpha channel (or use 1.0 if original was RGB)
+ std::vector<float> depth_data(width * height);
+ bool has_alpha = (channels == 4);
+ for (int i = 0; i < width * height; ++i) {
+ // Alpha is in data[i*4+3] (0-255), convert to float [0, 1]
+ // If no alpha channel, default to 1.0 (far plane)
+ depth_data[i] = has_alpha ? (data[i * 4 + 3] / 255.0f) : 1.0f;
+ }
+ stbi_image_free(data);
+
+ // Create R32Float depth texture
+ const WGPUTextureDescriptor depth_desc = {
+ .usage = WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopyDst,
+ .dimension = WGPUTextureDimension_2D,
+ .size = {static_cast<uint32_t>(width), static_cast<uint32_t>(height), 1},
+ .format = WGPUTextureFormat_R32Float,
+ .mipLevelCount = 1,
+ .sampleCount = 1,
+ };
+ WGPUTexture depth_texture = wgpuDeviceCreateTexture(device, &depth_desc);
+ if (!depth_texture) {
+ fprintf(stderr, "Error: failed to create depth texture\n");
+ return nullptr;
+ }
+
+ // Write depth data
+ const WGPUTexelCopyTextureInfo dst = {
+ .texture = depth_texture,
+ .mipLevel = 0
+ };
+ const WGPUTexelCopyBufferLayout layout = {
+ .bytesPerRow = static_cast<uint32_t>(width * sizeof(float)),
+ .rowsPerImage = static_cast<uint32_t>(height)
+ };
+ const WGPUExtent3D size = {
+ static_cast<uint32_t>(width),
+ static_cast<uint32_t>(height),
+ 1
+ };
+ wgpuQueueWriteTexture(queue, &dst, depth_data.data(),
+ depth_data.size() * sizeof(float), &layout, &size);
+
+ printf("Loaded depth from alpha: %dx%d (%s alpha)\n", width, height,
+ has_alpha ? "has" : "no");
+
+ return depth_texture;
+}
+
// Create CNN render pipeline (5 bindings)
// Takes both intermediate format (RGBA16Float) and final format (BGRA8Unorm)
static WGPURenderPipeline create_cnn_pipeline(WGPUDevice device,
@@ -236,6 +326,57 @@ static bool save_png(const char* path, const std::vector<uint8_t>& pixels,
return true;
}
+// Create horizontal grayscale composite of layer outputs
+// Each layer is already 4x wide (showing 4 channels), stack them vertically
+static bool save_layer_composite(const char* dir, int width, int height, int num_layers) {
+ // Each layer PNG is already 4x wide with 4 channels side-by-side
+ int layer_width = width * 4;
+
+ // Load all layer images (they're already grayscale)
+ std::vector<std::vector<uint8_t>> layers(num_layers);
+ for (int i = 0; i < num_layers; ++i) {
+ char path[512];
+ snprintf(path, sizeof(path), "%s/layer_%d.png", dir, i);
+
+ int w, h, channels;
+ uint8_t* data = stbi_load(path, &w, &h, &channels, 1); // Load as grayscale
+ if (!data || w != layer_width || h != height) {
+ if (data) stbi_image_free(data);
+ fprintf(stderr, "Warning: failed to load layer %d for composite (expected %dx%d, got %dx%d)\n",
+ i, layer_width, height, w, h);
+ return false;
+ }
+
+ layers[i].assign(data, data + (layer_width * height));
+ stbi_image_free(data);
+ }
+
+ // Stack layers vertically
+ int composite_height = height * num_layers;
+ std::vector<uint8_t> composite(layer_width * composite_height);
+
+ for (int layer = 0; layer < num_layers; ++layer) {
+ for (int y = 0; y < height; ++y) {
+ int src_row_offset = y * layer_width;
+ int dst_row_offset = (layer * height + y) * layer_width;
+ memcpy(&composite[dst_row_offset], &layers[layer][src_row_offset], layer_width);
+ }
+ }
+
+ // Save as grayscale PNG (stacked vertically)
+ char composite_path[512];
+ snprintf(composite_path, sizeof(composite_path), "%s/layers_composite.png", dir);
+ if (!stbi_write_png(composite_path, layer_width, composite_height, 1,
+ composite.data(), layer_width)) {
+ fprintf(stderr, "Error: failed to write composite PNG\n");
+ return false;
+ }
+
+ printf("Saved layer composite to '%s' (%dx%d, 4 layers stacked vertically)\n",
+ composite_path, layer_width, composite_height);
+ return true;
+}
+
// Save PPM output (fallback)
static bool save_ppm(const char* path, const std::vector<uint8_t>& pixels,
int width, int height) {
@@ -257,6 +398,803 @@ static bool save_ppm(const char* path, const std::vector<uint8_t>& pixels,
return true;
}
+// CNN v2 structures (matching CNNv2Effect)
+struct CNNv2LayerInfo {
+ uint32_t kernel_size;
+ uint32_t in_channels;
+ uint32_t out_channels;
+ uint32_t weight_offset;
+ uint32_t weight_count;
+};
+
+struct CNNv2LayerParams {
+ uint32_t kernel_size;
+ uint32_t in_channels;
+ uint32_t out_channels;
+ uint32_t weight_offset;
+ uint32_t is_output_layer;
+ float blend_amount;
+ uint32_t is_layer_0;
+};
+
+struct CNNv2StaticFeatureParams {
+ uint32_t mip_level;
+ uint32_t padding[3];
+};
+
+// Convert RGBA32Uint (packed f16) texture to BGRA8Unorm
+static std::vector<uint8_t> readback_rgba32uint_to_bgra8(
+ WGPUDevice device, WGPUQueue queue, WGPUTexture texture,
+ int width, int height) {
+ // Create staging buffer
+ const uint32_t bytes_per_row = width * 16; // 4×u32 per pixel
+ const uint32_t padded_bytes_per_row = (bytes_per_row + 255) & ~255;
+ const size_t buffer_size = padded_bytes_per_row * height;
+
+ WGPUBufferDescriptor buffer_desc = {};
+ buffer_desc.size = buffer_size;
+ buffer_desc.usage = WGPUBufferUsage_CopyDst | WGPUBufferUsage_MapRead;
+ buffer_desc.mappedAtCreation = false;
+
+ WGPUBuffer staging = wgpuDeviceCreateBuffer(device, &buffer_desc);
+
+ // Copy texture to buffer
+ WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+
+ WGPUTexelCopyTextureInfo src = {};
+ src.texture = texture;
+ src.mipLevel = 0;
+
+ WGPUTexelCopyBufferInfo dst = {};
+ dst.buffer = staging;
+ dst.layout.bytesPerRow = padded_bytes_per_row;
+ dst.layout.rowsPerImage = height;
+
+ WGPUExtent3D copy_size = {
+ static_cast<uint32_t>(width),
+ static_cast<uint32_t>(height),
+ 1};
+
+ wgpuCommandEncoderCopyTextureToBuffer(encoder, &src, &dst, &copy_size);
+
+ WGPUCommandBuffer commands = wgpuCommandEncoderFinish(encoder, nullptr);
+ wgpuQueueSubmit(queue, 1, &commands);
+ wgpuCommandBufferRelease(commands);
+ wgpuCommandEncoderRelease(encoder);
+
+ // Wait for copy to complete
+ wgpuDevicePoll(device, true, nullptr);
+
+ // Map and read buffer
+ struct MapState {
+ bool done = false;
+ };
+ MapState map_state;
+
+ auto map_cb = [](WGPUMapAsyncStatus status, WGPUStringView message,
+ void* userdata1, void* userdata2) {
+ (void)message;
+ (void)userdata2;
+ MapState* state = (MapState*)userdata1;
+ state->done = (status == WGPUMapAsyncStatus_Success);
+ };
+
+ WGPUBufferMapCallbackInfo map_info = {};
+ map_info.mode = WGPUCallbackMode_AllowProcessEvents;
+ map_info.callback = map_cb;
+ map_info.userdata1 = &map_state;
+
+ wgpuBufferMapAsync(staging, WGPUMapMode_Read, 0, buffer_size, map_info);
+
+ // Wait for mapping to complete
+ for (int i = 0; i < 100 && !map_state.done; ++i) {
+ wgpuDevicePoll(device, true, nullptr);
+ }
+
+ if (!map_state.done) {
+ fprintf(stderr, "Error: Buffer mapping timed out\n");
+ wgpuBufferRelease(staging);
+ return std::vector<uint8_t>();
+ }
+
+ const uint32_t* mapped =
+ (const uint32_t*)wgpuBufferGetConstMappedRange(staging, 0, buffer_size);
+
+ std::vector<uint8_t> result(width * height * 4);
+
+ // Unpack f16 to u8 (BGRA)
+ for (int y = 0; y < height; ++y) {
+ const uint32_t* row =
+ (const uint32_t*)((const uint8_t*)mapped + y * padded_bytes_per_row);
+ for (int x = 0; x < width; ++x) {
+ // Read 4×u32 (8×f16)
+ uint32_t data[4];
+ data[0] = row[x * 4 + 0];
+ data[1] = row[x * 4 + 1];
+ data[2] = row[x * 4 + 2];
+ data[3] = row[x * 4 + 3];
+
+ // Extract RGBA channels (first 4 f16 values)
+ uint16_t r16 = data[0] & 0xFFFF;
+ uint16_t g16 = (data[0] >> 16) & 0xFFFF;
+ uint16_t b16 = data[1] & 0xFFFF;
+ uint16_t a16 = (data[1] >> 16) & 0xFFFF;
+
+ // Convert f16 to f32 (simple decode)
+ auto f16_to_f32 = [](uint16_t h) -> float {
+ uint32_t sign = (h >> 15) & 1;
+ uint32_t exp = (h >> 10) & 0x1F;
+ uint32_t frac = h & 0x3FF;
+
+ if (exp == 0) {
+ if (frac == 0) return sign ? -0.0f : 0.0f;
+ // Denormal
+ float val = frac / 1024.0f / 16384.0f;
+ return sign ? -val : val;
+ }
+ if (exp == 31) {
+ return frac ? NAN : (sign ? -INFINITY : INFINITY);
+ }
+
+ int32_t e = exp - 15;
+ float val = (1.0f + frac / 1024.0f) * powf(2.0f, e);
+ return sign ? -val : val;
+ };
+
+ float r = f16_to_f32(r16);
+ float g = f16_to_f32(g16);
+ float b = f16_to_f32(b16);
+ float a = f16_to_f32(a16);
+
+ // Clamp to [0,1] and convert to u8
+ auto clamp_u8 = [](float v) -> uint8_t {
+ if (v <= 0.0f) return 0;
+ if (v >= 1.0f) return 255;
+ return static_cast<uint8_t>(v * 255.0f + 0.5f);
+ };
+
+ result[(y * width + x) * 4 + 0] = clamp_u8(b);
+ result[(y * width + x) * 4 + 1] = clamp_u8(g);
+ result[(y * width + x) * 4 + 2] = clamp_u8(r);
+ result[(y * width + x) * 4 + 3] = clamp_u8(a);
+ }
+ }
+
+ wgpuBufferUnmap(staging);
+ wgpuBufferRelease(staging);
+
+ return result;
+}
+
+// Read RGBA32Uint and create 4x wide grayscale composite (each channel side-by-side)
+static std::vector<uint8_t> readback_rgba32uint_to_composite(
+ WGPUDevice device, WGPUQueue queue, WGPUTexture texture,
+ int width, int height) {
+
+ // First get BGRA8 data
+ std::vector<uint8_t> bgra = readback_rgba32uint_to_bgra8(device, queue, texture, width, height);
+ if (bgra.empty()) return {};
+
+ // Create 4x wide grayscale image (one channel per horizontal strip)
+ int composite_width = width * 4;
+ std::vector<uint8_t> composite(composite_width * height);
+
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ int src_idx = (y * width + x) * 4;
+ uint8_t b = bgra[src_idx + 0];
+ uint8_t g = bgra[src_idx + 1];
+ uint8_t r = bgra[src_idx + 2];
+ uint8_t a = bgra[src_idx + 3];
+
+ // Convert each channel to grayscale luminance
+ auto to_gray = [](uint8_t val) -> uint8_t { return val; };
+
+ // Place each channel in its horizontal strip
+ composite[y * composite_width + (0 * width + x)] = to_gray(r); // Channel 0
+ composite[y * composite_width + (1 * width + x)] = to_gray(g); // Channel 1
+ composite[y * composite_width + (2 * width + x)] = to_gray(b); // Channel 2
+ composite[y * composite_width + (3 * width + x)] = to_gray(a); // Channel 3
+ }
+ }
+
+ return composite;
+}
+
+// Process image with CNN v2
+static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
+ WGPUInstance instance, WGPUTexture input_texture,
+ int width, int height, const Args& args) {
+ printf("Using CNN v2 (storage buffer architecture)\n");
+
+ // Load weights (from file or asset system)
+ size_t weights_size = 0;
+ const uint8_t* weights_data = nullptr;
+ std::vector<uint8_t> file_weights; // For file-based loading
+
+ if (args.weights_path) {
+ // Load from file
+ printf("Loading weights from '%s'...\n", args.weights_path);
+ FILE* f = fopen(args.weights_path, "rb");
+ if (!f) {
+ fprintf(stderr, "Error: failed to open weights file '%s'\n", args.weights_path);
+ return false;
+ }
+
+ fseek(f, 0, SEEK_END);
+ weights_size = ftell(f);
+ fseek(f, 0, SEEK_SET);
+
+ file_weights.resize(weights_size);
+ size_t read = fread(file_weights.data(), 1, weights_size, f);
+ fclose(f);
+
+ if (read != weights_size) {
+ fprintf(stderr, "Error: failed to read weights file\n");
+ return false;
+ }
+
+ weights_data = file_weights.data();
+ } else {
+ // Load from asset system
+ weights_data = (const uint8_t*)GetAsset(AssetId::ASSET_WEIGHTS_CNN_V2, &weights_size);
+ }
+
+ if (!weights_data || weights_size < 20) {
+ fprintf(stderr, "Error: CNN v2 weights not available\n");
+ return false;
+ }
+
+ // Parse header
+ const uint32_t* header = (const uint32_t*)weights_data;
+ uint32_t magic = header[0];
+ uint32_t version = header[1];
+ uint32_t num_layers = header[2];
+ uint32_t total_weights = header[3];
+
+ if (magic != 0x324e4e43) { // 'CNN2'
+ fprintf(stderr, "Error: Invalid CNN v2 weights magic\n");
+ return false;
+ }
+
+ uint32_t mip_level = 0;
+ if (version == 2) {
+ mip_level = header[4];
+ }
+
+ printf("Loaded CNN v2 weights: %u layers, %u weights, version %u\n",
+ num_layers, total_weights, version);
+
+ // Parse layer info
+ const uint32_t header_u32_count = (version == 1) ? 4 : 5;
+ const uint32_t* layer_data = header + header_u32_count;
+ std::vector<CNNv2LayerInfo> layer_info;
+
+ for (uint32_t i = 0; i < num_layers; ++i) {
+ CNNv2LayerInfo info;
+ info.kernel_size = layer_data[i * 5 + 0];
+ info.in_channels = layer_data[i * 5 + 1];
+ info.out_channels = layer_data[i * 5 + 2];
+ info.weight_offset = layer_data[i * 5 + 3];
+ info.weight_count = layer_data[i * 5 + 4];
+ layer_info.push_back(info);
+
+ printf(" Layer %u: %ux%u conv, %u→%u channels, %u weights\n", i,
+ info.kernel_size, info.kernel_size, info.in_channels,
+ info.out_channels, info.weight_count);
+ }
+
+ // Create weights storage buffer
+ WGPUBufferDescriptor weights_buffer_desc = {};
+ weights_buffer_desc.size = weights_size;
+ weights_buffer_desc.usage = WGPUBufferUsage_Storage | WGPUBufferUsage_CopyDst;
+ weights_buffer_desc.mappedAtCreation = false;
+
+ WGPUBuffer weights_buffer =
+ wgpuDeviceCreateBuffer(device, &weights_buffer_desc);
+ wgpuQueueWriteBuffer(queue, weights_buffer, 0, weights_data, weights_size);
+
+ // Create input view
+ const WGPUTextureViewDescriptor view_desc = {
+ .format = WGPUTextureFormat_BGRA8Unorm,
+ .dimension = WGPUTextureViewDimension_2D,
+ .baseMipLevel = 0,
+ .mipLevelCount = 1,
+ .baseArrayLayer = 0,
+ .arrayLayerCount = 1,
+ };
+ WGPUTextureView input_view = wgpuTextureCreateView(input_texture, &view_desc);
+
+ // Create static features texture (RGBA32Uint)
+ const WGPUTextureDescriptor static_desc = {
+ .usage = WGPUTextureUsage_StorageBinding | WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopySrc,
+ .dimension = WGPUTextureDimension_2D,
+ .size = {static_cast<uint32_t>(width), static_cast<uint32_t>(height), 1},
+ .format = WGPUTextureFormat_RGBA32Uint,
+ .mipLevelCount = 1,
+ .sampleCount = 1,
+ };
+ WGPUTexture static_features_tex =
+ wgpuDeviceCreateTexture(device, &static_desc);
+ WGPUTextureView static_features_view =
+ wgpuTextureCreateView(static_features_tex, nullptr);
+
+ // Load depth from input alpha channel (or 1.0 if no alpha)
+ WGPUTexture depth_texture =
+ load_depth_from_alpha(device, queue, args.input_path, width, height);
+ if (!depth_texture) {
+ wgpuTextureViewRelease(static_features_view);
+ wgpuTextureRelease(static_features_tex);
+ wgpuBufferRelease(weights_buffer);
+ wgpuTextureViewRelease(input_view);
+ return false;
+ }
+ WGPUTextureView depth_view = wgpuTextureCreateView(depth_texture, nullptr);
+
+ // Create layer textures (ping-pong)
+ WGPUTexture layer_textures[2] = {
+ wgpuDeviceCreateTexture(device, &static_desc),
+ wgpuDeviceCreateTexture(device, &static_desc),
+ };
+ WGPUTextureView layer_views[2] = {
+ wgpuTextureCreateView(layer_textures[0], nullptr),
+ wgpuTextureCreateView(layer_textures[1], nullptr),
+ };
+
+ // Load shaders
+ const char* static_shader =
+ SafeGetAsset(AssetId::ASSET_SHADER_CNN_V2_STATIC);
+ const char* layer_shader =
+ SafeGetAsset(AssetId::ASSET_SHADER_CNN_V2_COMPUTE);
+
+ if (!static_shader[0] || !layer_shader[0]) {
+ fprintf(stderr, "Error: CNN v2 shaders not available\n");
+ wgpuTextureViewRelease(static_features_view);
+ wgpuTextureRelease(static_features_tex);
+ wgpuTextureViewRelease(depth_view);
+ wgpuTextureRelease(depth_texture);
+ wgpuTextureViewRelease(layer_views[0]);
+ wgpuTextureViewRelease(layer_views[1]);
+ wgpuTextureRelease(layer_textures[0]);
+ wgpuTextureRelease(layer_textures[1]);
+ wgpuBufferRelease(weights_buffer);
+ wgpuTextureViewRelease(input_view);
+ return false;
+ }
+
+ // Create static feature params buffer
+ WGPUBufferDescriptor static_params_desc = {};
+ static_params_desc.size = sizeof(CNNv2StaticFeatureParams);
+ static_params_desc.usage = WGPUBufferUsage_Uniform | WGPUBufferUsage_CopyDst;
+ static_params_desc.mappedAtCreation = false;
+
+ WGPUBuffer static_params_buffer =
+ wgpuDeviceCreateBuffer(device, &static_params_desc);
+
+ CNNv2StaticFeatureParams static_params;
+ static_params.mip_level = mip_level;
+ static_params.padding[0] = 0;
+ static_params.padding[1] = 0;
+ static_params.padding[2] = 0;
+ wgpuQueueWriteBuffer(queue, static_params_buffer, 0, &static_params,
+ sizeof(static_params));
+
+ // Create static features compute pipeline
+ WGPUShaderSourceWGSL static_wgsl = {};
+ static_wgsl.chain.sType = WGPUSType_ShaderSourceWGSL;
+ static_wgsl.code = str_view(static_shader);
+
+ WGPUShaderModuleDescriptor static_module_desc = {};
+ static_module_desc.nextInChain = &static_wgsl.chain;
+
+ WGPUShaderModule static_module =
+ wgpuDeviceCreateShaderModule(device, &static_module_desc);
+
+ // Bind group layout: 0=input, 1=input_mip1, 2=input_mip2, 3=depth, 4=output,
+ // 5=params
+ WGPUBindGroupLayoutEntry static_bgl_entries[6] = {};
+ static_bgl_entries[0].binding = 0;
+ static_bgl_entries[0].visibility = WGPUShaderStage_Compute;
+ static_bgl_entries[0].texture.sampleType = WGPUTextureSampleType_Float;
+ static_bgl_entries[0].texture.viewDimension = WGPUTextureViewDimension_2D;
+
+ static_bgl_entries[1].binding = 1;
+ static_bgl_entries[1].visibility = WGPUShaderStage_Compute;
+ static_bgl_entries[1].texture.sampleType = WGPUTextureSampleType_Float;
+ static_bgl_entries[1].texture.viewDimension = WGPUTextureViewDimension_2D;
+
+ static_bgl_entries[2].binding = 2;
+ static_bgl_entries[2].visibility = WGPUShaderStage_Compute;
+ static_bgl_entries[2].texture.sampleType = WGPUTextureSampleType_Float;
+ static_bgl_entries[2].texture.viewDimension = WGPUTextureViewDimension_2D;
+
+ static_bgl_entries[3].binding = 3;
+ static_bgl_entries[3].visibility = WGPUShaderStage_Compute;
+ static_bgl_entries[3].texture.sampleType = WGPUTextureSampleType_UnfilterableFloat;
+ static_bgl_entries[3].texture.viewDimension = WGPUTextureViewDimension_2D;
+
+ static_bgl_entries[4].binding = 4;
+ static_bgl_entries[4].visibility = WGPUShaderStage_Compute;
+ static_bgl_entries[4].storageTexture.access =
+ WGPUStorageTextureAccess_WriteOnly;
+ static_bgl_entries[4].storageTexture.format = WGPUTextureFormat_RGBA32Uint;
+ static_bgl_entries[4].storageTexture.viewDimension =
+ WGPUTextureViewDimension_2D;
+
+ static_bgl_entries[5].binding = 5;
+ static_bgl_entries[5].visibility = WGPUShaderStage_Compute;
+ static_bgl_entries[5].buffer.type = WGPUBufferBindingType_Uniform;
+ static_bgl_entries[5].buffer.minBindingSize =
+ sizeof(CNNv2StaticFeatureParams);
+
+ WGPUBindGroupLayoutDescriptor static_bgl_desc = {};
+ static_bgl_desc.entryCount = 6;
+ static_bgl_desc.entries = static_bgl_entries;
+
+ WGPUBindGroupLayout static_bgl =
+ wgpuDeviceCreateBindGroupLayout(device, &static_bgl_desc);
+
+ WGPUPipelineLayoutDescriptor static_pl_desc = {};
+ static_pl_desc.bindGroupLayoutCount = 1;
+ static_pl_desc.bindGroupLayouts = &static_bgl;
+
+ WGPUPipelineLayout static_pl =
+ wgpuDeviceCreatePipelineLayout(device, &static_pl_desc);
+
+ WGPUComputePipelineDescriptor static_pipeline_desc = {};
+ static_pipeline_desc.compute.module = static_module;
+ static_pipeline_desc.compute.entryPoint = str_view("main");
+ static_pipeline_desc.layout = static_pl;
+
+ WGPUComputePipeline static_pipeline =
+ wgpuDeviceCreateComputePipeline(device, &static_pipeline_desc);
+
+ wgpuShaderModuleRelease(static_module);
+ wgpuPipelineLayoutRelease(static_pl);
+
+ // Create static bind group (use input as all mips for simplicity)
+ WGPUBindGroupEntry static_bg_entries[6] = {};
+ static_bg_entries[0].binding = 0;
+ static_bg_entries[0].textureView = input_view;
+ static_bg_entries[1].binding = 1;
+ static_bg_entries[1].textureView = input_view;
+ static_bg_entries[2].binding = 2;
+ static_bg_entries[2].textureView = input_view;
+ static_bg_entries[3].binding = 3;
+ static_bg_entries[3].textureView = depth_view; // Depth from alpha channel (matches training)
+ static_bg_entries[4].binding = 4;
+ static_bg_entries[4].textureView = static_features_view;
+ static_bg_entries[5].binding = 5;
+ static_bg_entries[5].buffer = static_params_buffer;
+ static_bg_entries[5].size = sizeof(CNNv2StaticFeatureParams);
+
+ WGPUBindGroupDescriptor static_bg_desc = {};
+ static_bg_desc.layout = static_bgl;
+ static_bg_desc.entryCount = 6;
+ static_bg_desc.entries = static_bg_entries;
+
+ WGPUBindGroup static_bg = wgpuDeviceCreateBindGroup(device, &static_bg_desc);
+
+ wgpuBindGroupLayoutRelease(static_bgl);
+
+ // Create layer compute pipeline
+ WGPUShaderSourceWGSL layer_wgsl = {};
+ layer_wgsl.chain.sType = WGPUSType_ShaderSourceWGSL;
+ layer_wgsl.code = str_view(layer_shader);
+
+ WGPUShaderModuleDescriptor layer_module_desc = {};
+ layer_module_desc.nextInChain = &layer_wgsl.chain;
+
+ WGPUShaderModule layer_module =
+ wgpuDeviceCreateShaderModule(device, &layer_module_desc);
+
+ // Layer bind group layout:
+ // 0=static_features, 1=layer_input, 2=output, 3=weights, 4=params,
+ // 5=original
+ WGPUBindGroupLayoutEntry layer_bgl_entries[6] = {};
+ layer_bgl_entries[0].binding = 0;
+ layer_bgl_entries[0].visibility = WGPUShaderStage_Compute;
+ layer_bgl_entries[0].texture.sampleType = WGPUTextureSampleType_Uint;
+ layer_bgl_entries[0].texture.viewDimension = WGPUTextureViewDimension_2D;
+
+ layer_bgl_entries[1].binding = 1;
+ layer_bgl_entries[1].visibility = WGPUShaderStage_Compute;
+ layer_bgl_entries[1].texture.sampleType = WGPUTextureSampleType_Uint;
+ layer_bgl_entries[1].texture.viewDimension = WGPUTextureViewDimension_2D;
+
+ layer_bgl_entries[2].binding = 2;
+ layer_bgl_entries[2].visibility = WGPUShaderStage_Compute;
+ layer_bgl_entries[2].storageTexture.access =
+ WGPUStorageTextureAccess_WriteOnly;
+ layer_bgl_entries[2].storageTexture.format = WGPUTextureFormat_RGBA32Uint;
+ layer_bgl_entries[2].storageTexture.viewDimension =
+ WGPUTextureViewDimension_2D;
+
+ layer_bgl_entries[3].binding = 3;
+ layer_bgl_entries[3].visibility = WGPUShaderStage_Compute;
+ layer_bgl_entries[3].buffer.type = WGPUBufferBindingType_ReadOnlyStorage;
+
+ layer_bgl_entries[4].binding = 4;
+ layer_bgl_entries[4].visibility = WGPUShaderStage_Compute;
+ layer_bgl_entries[4].buffer.type = WGPUBufferBindingType_Uniform;
+ layer_bgl_entries[4].buffer.minBindingSize = sizeof(CNNv2LayerParams);
+
+ layer_bgl_entries[5].binding = 5;
+ layer_bgl_entries[5].visibility = WGPUShaderStage_Compute;
+ layer_bgl_entries[5].texture.sampleType = WGPUTextureSampleType_Float;
+ layer_bgl_entries[5].texture.viewDimension = WGPUTextureViewDimension_2D;
+
+ WGPUBindGroupLayoutDescriptor layer_bgl_desc = {};
+ layer_bgl_desc.entryCount = 6;
+ layer_bgl_desc.entries = layer_bgl_entries;
+
+ WGPUBindGroupLayout layer_bgl =
+ wgpuDeviceCreateBindGroupLayout(device, &layer_bgl_desc);
+
+ WGPUPipelineLayoutDescriptor layer_pl_desc = {};
+ layer_pl_desc.bindGroupLayoutCount = 1;
+ layer_pl_desc.bindGroupLayouts = &layer_bgl;
+
+ WGPUPipelineLayout layer_pl =
+ wgpuDeviceCreatePipelineLayout(device, &layer_pl_desc);
+
+ WGPUComputePipelineDescriptor layer_pipeline_desc = {};
+ layer_pipeline_desc.compute.module = layer_module;
+ layer_pipeline_desc.compute.entryPoint = str_view("main");
+ layer_pipeline_desc.layout = layer_pl;
+
+ WGPUComputePipeline layer_pipeline =
+ wgpuDeviceCreateComputePipeline(device, &layer_pipeline_desc);
+
+ wgpuShaderModuleRelease(layer_module);
+ wgpuPipelineLayoutRelease(layer_pl);
+
+ // Create layer params buffers
+ std::vector<WGPUBuffer> layer_params_buffers;
+ for (size_t i = 0; i < layer_info.size(); ++i) {
+ WGPUBufferDescriptor params_desc = {};
+ params_desc.size = sizeof(CNNv2LayerParams);
+ params_desc.usage = WGPUBufferUsage_Uniform | WGPUBufferUsage_CopyDst;
+ params_desc.mappedAtCreation = false;
+
+ WGPUBuffer buf = wgpuDeviceCreateBuffer(device, &params_desc);
+ layer_params_buffers.push_back(buf);
+ }
+
+ // Execute compute passes
+ WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+
+ // Pass 1: Static features
+ printf("Computing static features...\n");
+ WGPUComputePassEncoder static_pass =
+ wgpuCommandEncoderBeginComputePass(encoder, nullptr);
+ wgpuComputePassEncoderSetPipeline(static_pass, static_pipeline);
+ wgpuComputePassEncoderSetBindGroup(static_pass, 0, static_bg, 0, nullptr);
+
+ uint32_t workgroups_x = (width + 7) / 8;
+ uint32_t workgroups_y = (height + 7) / 8;
+ wgpuComputePassEncoderDispatchWorkgroups(static_pass, workgroups_x,
+ workgroups_y, 1);
+
+ wgpuComputePassEncoderEnd(static_pass);
+ wgpuComputePassEncoderRelease(static_pass);
+
+ // Save static features if requested
+ if (args.save_intermediates) {
+ // Submit and wait for static features to complete
+ WGPUCommandBuffer cmd = wgpuCommandEncoderFinish(encoder, nullptr);
+ wgpuQueueSubmit(queue, 1, &cmd);
+ wgpuCommandBufferRelease(cmd);
+ wgpuDevicePoll(device, true, nullptr);
+
+ // Create new encoder for layers
+ encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+
+ char layer_path[512];
+ snprintf(layer_path, sizeof(layer_path), "%s/static_features.png",
+ args.save_intermediates);
+ printf("Saving static features to '%s'...\n", layer_path);
+
+ // Read back RGBA32Uint and create 8-channel grayscale composite
+ // Static features has 8 channels (packed as 4×u32), create 8x wide composite
+ std::vector<uint8_t> bgra = readback_rgba32uint_to_bgra8(
+ device, queue, static_features_tex, width, height);
+
+ if (!bgra.empty()) {
+ // Static features: 8 f16 values packed in 4×u32
+ // For now, just show first 4 channels (like layers)
+ // TODO: Show all 8 channels in 8x wide composite
+ std::vector<uint8_t> composite = readback_rgba32uint_to_composite(
+ device, queue, static_features_tex, width, height);
+ if (!composite.empty()) {
+ int composite_width = width * 4;
+ if (!stbi_write_png(layer_path, composite_width, height, 1,
+ composite.data(), composite_width)) {
+ fprintf(stderr, "Error: failed to write static features PNG\n");
+ }
+ }
+ }
+ }
+
+ // Pass 2-N: CNN layers
+ for (size_t i = 0; i < layer_info.size(); ++i) {
+ const CNNv2LayerInfo& info = layer_info[i];
+
+ printf("Processing layer %zu/%zu (%ux%u, %u→%u channels)...\n", i + 1,
+ layer_info.size(), info.kernel_size, info.kernel_size,
+ info.in_channels, info.out_channels);
+
+ // Update layer params
+ CNNv2LayerParams params;
+ params.kernel_size = info.kernel_size;
+ params.in_channels = info.in_channels;
+ params.out_channels = info.out_channels;
+ params.weight_offset = info.weight_offset;
+ params.is_output_layer = (i == layer_info.size() - 1) ? 1 : 0;
+ params.blend_amount = args.blend;
+ params.is_layer_0 = (i == 0) ? 1 : 0;
+
+ wgpuQueueWriteBuffer(queue, layer_params_buffers[i], 0, &params,
+ sizeof(params));
+
+ // Create bind group for this layer
+ WGPUBindGroupEntry layer_bg_entries[6] = {};
+ layer_bg_entries[0].binding = 0;
+ layer_bg_entries[0].textureView = static_features_view;
+
+ layer_bg_entries[1].binding = 1;
+ layer_bg_entries[1].textureView =
+ (i == 0) ? static_features_view : layer_views[i % 2];
+
+ layer_bg_entries[2].binding = 2;
+ layer_bg_entries[2].textureView = layer_views[(i + 1) % 2];
+
+ layer_bg_entries[3].binding = 3;
+ layer_bg_entries[3].buffer = weights_buffer;
+ layer_bg_entries[3].size = weights_size;
+
+ layer_bg_entries[4].binding = 4;
+ layer_bg_entries[4].buffer = layer_params_buffers[i];
+ layer_bg_entries[4].size = sizeof(CNNv2LayerParams);
+
+ layer_bg_entries[5].binding = 5;
+ layer_bg_entries[5].textureView = input_view;
+
+ WGPUBindGroupDescriptor layer_bg_desc = {};
+ layer_bg_desc.layout = layer_bgl;
+ layer_bg_desc.entryCount = 6;
+ layer_bg_desc.entries = layer_bg_entries;
+
+ WGPUBindGroup layer_bg =
+ wgpuDeviceCreateBindGroup(device, &layer_bg_desc);
+
+ WGPUComputePassEncoder layer_pass =
+ wgpuCommandEncoderBeginComputePass(encoder, nullptr);
+ wgpuComputePassEncoderSetPipeline(layer_pass, layer_pipeline);
+ wgpuComputePassEncoderSetBindGroup(layer_pass, 0, layer_bg, 0, nullptr);
+
+ wgpuComputePassEncoderDispatchWorkgroups(layer_pass, workgroups_x,
+ workgroups_y, 1);
+
+ wgpuComputePassEncoderEnd(layer_pass);
+ wgpuComputePassEncoderRelease(layer_pass);
+ wgpuBindGroupRelease(layer_bg);
+
+ // Save intermediate layer if requested
+ if (args.save_intermediates) {
+ // Submit and wait for layer to complete
+ WGPUCommandBuffer cmd = wgpuCommandEncoderFinish(encoder, nullptr);
+ wgpuQueueSubmit(queue, 1, &cmd);
+ wgpuCommandBufferRelease(cmd);
+ wgpuDevicePoll(device, true, nullptr);
+
+ // Create new encoder for next layer
+ encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+
+ char layer_path[512];
+ snprintf(layer_path, sizeof(layer_path), "%s/layer_%zu.png",
+ args.save_intermediates, i);
+ printf("Saving intermediate layer %zu to '%s'...\n", i, layer_path);
+
+ // Read back RGBA32Uint and create 4-channel grayscale composite
+ WGPUTexture output_tex = layer_textures[(i + 1) % 2];
+ std::vector<uint8_t> composite = readback_rgba32uint_to_composite(
+ device, queue, output_tex, width, height);
+
+ if (!composite.empty()) {
+ int composite_width = width * 4;
+ if (!stbi_write_png(layer_path, composite_width, height, 1,
+ composite.data(), composite_width)) {
+ fprintf(stderr, "Error: failed to write layer PNG\n");
+ }
+ }
+ }
+ }
+
+ WGPUCommandBuffer commands = wgpuCommandEncoderFinish(encoder, nullptr);
+ wgpuQueueSubmit(queue, 1, &commands);
+ wgpuCommandBufferRelease(commands);
+ wgpuCommandEncoderRelease(encoder);
+
+ wgpuDevicePoll(device, true, nullptr);
+
+ // Create layer composite if intermediates were saved
+ if (args.save_intermediates) {
+ save_layer_composite(args.save_intermediates, width, height, layer_info.size());
+ }
+
+ // Readback final result (from last layer's output texture)
+ printf("Reading pixels from GPU...\n");
+ size_t final_layer_idx = (layer_info.size()) % 2;
+ std::vector<uint8_t> pixels = readback_rgba32uint_to_bgra8(
+ device, queue, layer_textures[final_layer_idx], width, height);
+
+ if (pixels.empty()) {
+ fprintf(stderr, "Error: GPU readback failed\n");
+ for (auto buf : layer_params_buffers) wgpuBufferRelease(buf);
+ wgpuComputePipelineRelease(layer_pipeline);
+ wgpuBindGroupLayoutRelease(layer_bgl);
+ wgpuBindGroupRelease(static_bg);
+ wgpuComputePipelineRelease(static_pipeline);
+ wgpuBufferRelease(static_params_buffer);
+ wgpuTextureViewRelease(static_features_view);
+ wgpuTextureRelease(static_features_tex);
+ wgpuTextureViewRelease(depth_view);
+ wgpuTextureRelease(depth_texture);
+ wgpuTextureViewRelease(layer_views[0]);
+ wgpuTextureViewRelease(layer_views[1]);
+ wgpuTextureRelease(layer_textures[0]);
+ wgpuTextureRelease(layer_textures[1]);
+ wgpuBufferRelease(weights_buffer);
+ wgpuTextureViewRelease(input_view);
+ return false;
+ }
+
+ // Debug hex dump
+ if (args.debug_hex) {
+ printf("First 8 pixels (BGRA hex):\n");
+ for (int i = 0; i < 8 && i < width * height; ++i) {
+ const uint8_t b = pixels[i * 4 + 0];
+ const uint8_t g = pixels[i * 4 + 1];
+ const uint8_t r = pixels[i * 4 + 2];
+ const uint8_t a = pixels[i * 4 + 3];
+ printf(" [%d] 0x%02X%02X%02X%02X (RGBA)\n", i, r, g, b, a);
+ }
+ }
+
+ // Save output
+ bool success;
+ if (args.output_png) {
+ printf("Saving PNG to '%s'...\n", args.output_path);
+ success = save_png(args.output_path, pixels, width, height);
+ } else {
+ printf("Saving PPM to '%s'...\n", args.output_path);
+ success = save_ppm(args.output_path, pixels, width, height);
+ }
+
+ if (success) {
+ printf("Done! Output saved to '%s'\n", args.output_path);
+ }
+
+ // Cleanup
+ for (auto buf : layer_params_buffers) wgpuBufferRelease(buf);
+ wgpuComputePipelineRelease(layer_pipeline);
+ wgpuBindGroupLayoutRelease(layer_bgl);
+ wgpuBindGroupRelease(static_bg);
+ wgpuComputePipelineRelease(static_pipeline);
+ wgpuBufferRelease(static_params_buffer);
+ wgpuTextureViewRelease(static_features_view);
+ wgpuTextureRelease(static_features_tex);
+ wgpuTextureViewRelease(layer_views[0]);
+ wgpuTextureViewRelease(layer_views[1]);
+ wgpuTextureRelease(layer_textures[0]);
+ wgpuTextureRelease(layer_textures[1]);
+ wgpuBufferRelease(weights_buffer);
+ wgpuTextureViewRelease(input_view);
+
+ return success;
+}
+
int main(int argc, char** argv) {
// Parse arguments
Args args;
@@ -292,6 +1230,19 @@ int main(int argc, char** argv) {
printf("Loaded %dx%d image from '%s'\n", width, height, args.input_path);
+ // Branch based on CNN version
+ if (args.cnn_version == 2) {
+ bool success = process_cnn_v2(device, queue, instance, input_texture,
+ width, height, args);
+ wgpuTextureRelease(input_texture);
+ SamplerCache::Get().clear();
+ fixture.shutdown();
+ return success ? 0 : 1;
+ }
+
+ // CNN v1 processing below
+ printf("Using CNN v1 (render pipeline architecture)\n");
+
// Create input texture view
const WGPUTextureViewDescriptor view_desc = {
.format = WGPUTextureFormat_BGRA8Unorm,
diff --git a/tools/cnn_v2_test/index.html b/tools/cnn_v2_test/index.html
index cab20ea..dd1e42b 100644
--- a/tools/cnn_v2_test/index.html
+++ b/tools/cnn_v2_test/index.html
@@ -68,14 +68,16 @@
input[type="range"] { width: 120px; }
input[type="number"] { width: 60px; background: #1a1a1a; color: #e0e0e0; border: 1px solid #404040; padding: 4px; }
.drop-zone {
- border: 2px dashed #404040;
- padding: 16px;
+ border: 3px dashed #606060;
+ padding: 20px;
text-align: center;
cursor: pointer;
transition: all 0.2s;
- font-size: 12px;
- background: #1a1a1a;
- border-radius: 4px;
+ font-size: 13px;
+ font-weight: bold;
+ background: #252525;
+ border-radius: 6px;
+ color: #4a9eff;
}
button {
background: #1a1a1a;
@@ -91,7 +93,7 @@
button:hover { border-color: #606060; background: #252525; }
button:disabled { opacity: 0.3; cursor: not-allowed; }
video { display: none; }
- .drop-zone:hover { border-color: #606060; background: #252525; }
+ .drop-zone:hover { border-color: #4a9eff; background: #2a3545; }
.drop-zone.active { border-color: #4a9eff; background: #1a2a3a; }
.drop-zone.error { border-color: #ff4a4a; background: #3a1a1a; }
.content {
@@ -240,18 +242,25 @@
flex-direction: column;
overflow: hidden;
}
- .layer-zoom {
+ .layer-preview {
background: #1a1a1a;
border: 1px solid #404040;
display: flex;
flex-direction: column;
overflow: hidden;
+ margin-top: 8px;
}
- .layer-zoom canvas {
+ .layer-preview canvas {
width: 100%;
height: 100%;
image-rendering: pixelated;
}
+ .layer-view.active {
+ border: 2px solid #ffffff;
+ }
+ .layer-view canvas {
+ cursor: pointer;
+ }
.layer-view-label {
background: #2a2a2a;
padding: 4px;
@@ -314,6 +323,7 @@
<input type="range" id="depth" min="0" max="1" step="0.01" value="1.0">
<span id="depthValue">1.0</span>
</div>
+ <button id="savePngBtn">Save PNG</button>
</div>
</div>
<video id="videoSource" muted loop></video>
@@ -378,7 +388,8 @@
// ============================================================================
// Default pre-trained weights (base64-encoded binary format)
-const DEFAULT_WEIGHTS_B64 = 'Q05OMgEAAAADAAAAEAUAAAMAAAAMAAAABAAAAAAAAACwAQAAAwAAAAwAAAAEAAAAsAEAALABAAADAAAADAAAAAQAAABgAwAAsAEAAFQ3KjroN2yxSjQHsFi5HyjQtSo62zsJOq00bTXTqxC2zCnntyo44zoWOuwpajKup861uis/tj4kqyl6GkOp6CjTLbukERc2K0M4ijm8NpysMjQysgW4kLAHuJo5hTvGOtI0IDTio3C0yTDLtpM2ajq1ObctXjNlHEu2vqolt/ut2KMzJxWfSKf1p4et1C1eGfgt0atJMCihIKTyLZekxxRkKBas7ygWKpqoFS8LqHkruBhVqBwsELComn0yviiZMkUuVamVMKww2i/nMN0p/C1YKGmgBi8XKVilMCrApWGtaiJpI5OtJalcrZSkR5ceKvWrwSfxroSulyRcpXymsxTUIEcmXSx+rdap4KxIrCisTivhF1+tGi6VraYry6mdLNun26kKLHYod6o9r3AsyR7TpMytuykIpbStQ6ZoIZSsJqw/rM8o5K0eIbOgYCkXqAQpHysapWOsKCZcKPwtz6OPrRAuAapTq9AnBib9rPetVK4Lr6WstayeHqEo+Sk0LBskyqc6FueiOKn/JTknFqvapjcsJiy6K0+scqHVoyWnhKZ1rtWmkiiJq18sqaU3rpUyxS3pt2E4vDEIt7gj6ipZtSU5+zWjtKQ8uDaLqEguii/Qtck1SyRMtOs4UTA6ss6zSq8TtfSeFSwlJaOspisuowSuhCbGK3QxQaslt+83HTT4tAWoNKiTtSs4XTZos1g86zcZrLopxDBMtJY09iletg46dC13qy+vdax4tjIl8ynULASrUyG2rGqsGRklq5qpPasootsi3So3LJimlqpVJC8x6imJLmSoCyzPLkOrDCQtokovPaqMKmcsYKwWpS0gcSnzL2YseqXkJqorki1RMawtGC+MMEE0MiyfsCAzGTNEsgAz9qkJtBguArUutnqugieqtBk1FJ1Yszc1RK8vtqgpNidgtfYzpy5CsBwoHKopJGaZdKxwJFaqyJp/q4kzJSwrs78xeTMXs6AziYror8YwTrN8tcukZy0htKM1nC2qsOc00rDYtBwtXa3GtN00qqo5sf8m7aoBLnGqBaZIrbMekSa7LEam56g6rPaojJ1VIfoqbiRjIwkutSxCKGsr559+L4awEqNOrvorhSTCLbAsF7DZosOle5xZLZCtuyloJHmnLyScKggf66WRqYSbwSGXrDOhoqoyrpwmDC3TrDkqvyGdrZGpvqyEpfEkfq1eKN+scRY8neSksKS1pFisyqcNpKCtraz3q90oW6ysoOsqDRrBKIWthqlxJwkqDSzqqMKYCptkJn8qcCbhpZWh/q1BqwCtT6a2pESmsBVwrn4rHau8FqQc2a15rFIrlyKEKE2pSiT5qCSuTagwq2GpP6y8KiWqIaXfrSSuxqlkpYgoZK1yriUrBCw4Iqqm8ixcreEmUCkErHstMCodLaun/a3HrE2tEyIYmzgppSGRquCpmqhdqF4s9i4vIsakEh8pKokzrDIRMyEumSTcjRCsKC6Eqg+qKamNKFupdCzyMmWqeS+FNBApFi4AMTK01aE5LYmxNTDDMIq34bB0IdKypK3wtDqxw7LBqiGw17SusK4pwS9nsYAh+CkAIcIsaLLaoRKxiyVPrGYoo60VMCywD7BGsYeszaswJmasuiiwJRuuH67mqlAmXCN2p46oKA3eq3OmLKkgqIsvrqqMKWgm/SFJqVkhJ7CksP4jc6wEhXUuwJHSLyooyKfpL16hS6UAqoauw6EQqCyjdyBlsVQntKmXLOovsiiMnDkwcSq9KFupySkOLb8jMSp2GNWoqiF9JaWqJzB/MryvxC3WLV+iUKVOspq2cSq8MOe1BK2bKHGsQiQbrOu18bgWtRKtBjGNqk6oJ7WwrbixkLTOseipITnzMFowX5+KMiiziLRxqXe0ADXmpJKonLCQqvOs+CVAlc4mxK0AJ5epbyr7q0yg4yA5qtAiJS48qZIr/aEOHw8geS8DLo4ovSytqO0q8iSsLAim4aOaJUctsKwWKUyp0S1womMqXKmprUyrHi2+qVkmsqlfKf6sEq2qK32tJK38qjyriSzHrVSoti2wE7+sdqwGLCWgMCxFKLat9KTXLEGobaXEqAmuaav5rXOksCmMJaaoRqYbrccr6iifLGMqQShppjUtoqdVLRQiCCp3qgygL6g6nmeXPq6pLFYqj6w0KLkpMKbjqdes+SyaI/UrWyg5psaqHiMzpYEt1KywqvYbEKYVLparYSetrS2tDB8wqMOnf6xLphioHqmvLOoqCquZKoElf6lQrWSoxyLiqSwglqdtqRQf9hzipQatea0GrpiugKqaqbMrMqpToyMuASgbrbyroa0lKGAkFafjKeSm96sWLj4tHyGqL5kw4THeMj4wxC36KWMtJDSbp/SpUa5Ss+WlLZz0qlcl36pDJ+4rsCbcKQgrtSLyqawn/TmTluOjaijcLOAk3CdpJUYo3Ds8pIct6CvkLeiqyqIrHemwwjmzrvEcVakZLXacjSxEoeulOSn5pkCsJSpHJgiubCNuKvslwi2sIZknnyoTHRud/CzjoX8kGiysK56ooa9wqrcl8J30KJ0rdqipKB8m5KDsI/gg9p2aqsSiazFAlRuqtykkKxwo2qknrJIYMaxEpy6r7KotrYWcqqpnLnst0iAiMCswCzIvMoowUi4cK88q+zKarCio7a1esmyqeaxHrIOpRyw9Ig8tJKv5qDUrMh5SqSWeKTqcnw8aQCTLLd8qSCVpHlcsWDsHKEMt/SxuKCOv1qHwoj2wFToQrUCkhqoCLkwopgvToAslhqa+q/Qq56zYp1UqmKWiInUmeS5BqaQsVSkSqiOCcCqVJvwsEixYKtKsia7OrDMfnqEhJWcw+qkRL5elRhURprapYKsvJ4IjWyqgLDEh9SjsLfUsSSeZqiYpshTRHtyqPapurKKnrqjTLCMsAii8LwQx3DBgM9AwdyojLSAuMjP4qLSqiqzZs7ao6avJLTaoNKgErXotqSzOLT8s35Ikp1cX8jmOn3KnTilDLjEjwylOqEkqEzyhINYwpST1KBqsW6N5JsKwqDm/rxipzqgfMPGs0KzzqggrNKyXqmSsxhOMrDWtGh/wKCQrFyueKPMtG6Pvo9wP/yyIDnQc2CelIHSjca8eI0meGgqEIq4xPKzKMMwm06CIJbGd0Ci2rDsuYxM/MGWoNB14IGSoTimyqqMgr6k2qycpxaH7EI0wbiMQL4goXDHJMA4y+TBvMYssAC5XMUY0AzX2L74yoS5zMqmqTauXqX8oDakgKaGozqYSrF0onygXLtgeZ6WaLVYtRCzPLG0lk6vlKIcp/C66qJMuKSSAKK+f4KqHLognYijKKjUojynrpS8hHa2Tqqks4KsLLOMt4yyqrJUutS8QIeUuqjBEMd4qTyu/LXEtpy1rKlgtaigUL5wsSy1kJccV5KyJMCoy/qswMM4tIiLTMX2pwaRhLpsvBS5LMOwsbzI7LQ==';
+// Version 2: 4 layers (3×3, 5×5, 3×3, 3×3), 2496 f16 weights, mip_level=2
+const DEFAULT_WEIGHTS_B64 = 'Q05OMgIAAAAEAAAAwAkAAAIAAAADAAAADAAAAAQAAAAAAAAAsAEAAAUAAAAMAAAABAAAALABAACwBAAAAwAAAAwAAAAEAAAAYAYAALABAAADAAAADAAAAAQAAAAQCAAAsAEAAAU3faplMDmtR7gnMLqt6bSrLM4RCa/En4q257kVsmWz57aSHJMxz6wILJC0tLdBriWww7IULUehCClCo60dBiu1nWqsf60ZKn6ktCWKjrswATSfLwQunzJjKKWkN6hxLTMwbS2DJvgvUjFDL1YsQDFFL78ysC5OL/cvxC2kJ6qh0i1BLH2rzCrcKFUoeixTqwwopjD+rXmewCY6sYUtXCwwsaKqGjBcqoykKigRJYStaqjMp+siPi1BLI+tGatfK5Ii6C1qLY0tYSGFKz4wpzNdH1QuJDKmMJi0lLVAs0y2Q7YWtY21fLXusf+n8LDSsaethK3drB4rtSROKYOrLK53qrqu0REYLEUuVy1qEqohDSzgqk4sDKKSKi0clKcVKvupJ69rKTmw8q7qptatQK7OsFUw5Z5JKJ4udSp9LLQeui87LbcxljEgJ6Iw75jDLfUvIjCxnh0g763Lq/ItMqzDqP0sXCRcqnkl9qDlJUStSyR8oTuwA616IrAnNqo5JS4qDKeILmahyaHZI48tryiajuEs0aghLBcuny+aovQpAhj6Kqkwdy+8MZ0wLzBvKBStsrRAKJez+raaKAotBiVSqZqyk7b2sHO1e7cJsfGmQLACpWizBLP9LnWxYLWoJPeb/CY5ISokXqynJ4qtG6K1qpesL6zGqYssIDJRpnErRi3RL9kh1zBFLPkdGSNvKtEuvyywmgilbC43LNovbywCKj4pFzEbMmMuly2gMFYscCgzliIomSqZnpSnyK3hJJKsAasgJGMrfCyNqXwpqaYNq14wiyzWLrSn/yLbqm+tnauOpkKtRKdCrBcYQS0dnGAveqeBrD8sMiGpLkAugzEaLM6lLzAkL5YydzYnqGo15zh2MuSwJK0nqxI04jZ5LAs2TjilNeSc3yANLecrCzBCprUvfjUHMWCuFrAkItyq/an0JSUnvKnrrAosv5CRrTGvQKesntuur6v2rsyxzbCAsHYn1y5GrAGsASYUmawrpSLooRSy86sBqmaxAq67sD0lJalOKxOtkqx8H+wqgygMLhup8SzNKZuhcafWKUKs567KI1opDCsoplatAykJpc+skavUrK4p2iznLlMqcig4Le6mDKiaJpIsMiOgLGOtQqI7sFGworKfsTOq86ZIlru0dLCEoMqq4KzsI6I2MzixMocqSym8MwQtT7Njqrwy26rEthe2nTGxL/Gq+az8MPg1Tq6EqXmslqyArkKs/S73MqEwmyuzrUUxejLhKYaw0yUlMzgxAZULsZ4rhq8ssgarCjDTrPop0ywBLswwjbT7MMAxdq2fsEC04DZoOIovG7G4LwM1gTNnKDsuEbByrzyxvLLBKJgkGDQANSMy66wVrM21ebURriAluK5quFa3wLBsK2wvaDU7OEg3RDGWKVUzpTfPNG+tbrGcr3ytRKosr7yuCbB2rV6gZq3msWmtjqvmoNurP6YXrOIpf6l/J2irl6/iqK2jy6MCLkkhjSDQoAWWACo1JrWjP6nvKvmthay+KJ6rUqoKqaatHKyJrUOarydBo5yu/CUaKFoxFCW1CNgpri2WK02kgqvYqkotwqlIrdiiEa1aKZ2tXa6mrkax4KkYKp2vcKgErYsi2RvbqWapU6EAnMyqtyPBpYwdZyVZkwGl1yhhJ2QBPaUJqMmMJJ54IikpcqmUHzmacCDzq1Cr3yR9n8aizKlWKFiogapBFlknrimnHmemDqbVKHciNRyII5AsxZ0+Lf0Xmyh7LMIqDS2KK9EkxyxRHKgp2iL9K0QfxCwGLLEuwiqrLcWob6xpppasp6+lotypGrC9qdmpPKUuplagES2cpSyrsSyHJTMi3Kk4KWAlSCaqKNMtR626rKaoj6koI1wqeivGI9cpuqQ9KQUkZyEJKOmquyW0JymirSjhprWgkBpKLFykzZyloWSrNKxrGaCtMi1MqL6t56lLqu+wbbTetYkqYDR1rB0wqir/sWQwNas8N9E4wq+9I6WwT6xuMDy1yC9tM/Kwka+btK8vJisnIJWeUa30LRkwDaqIsNqzWK9lLnEzKjEMqYMuWy8uMs0qI6xKLjcvxicEqYCv06zrrLusKK/lMeMz8CyCMmqxO7AtNpW38zFzL5i2Wq19tkCuBaTlt8Kv85Mlsg6wWLfgstutzDJVNAqZxCywrQgspDYOMS0mGbQCuf63QS7GJ4GsBLizuRS0mKyiKKMkBbLXseCufCr4qKUpah7Vqh8tV6eqLLQoGy1bMNEu6i4fMD4wZSvbjwOpmCBzLMmeJKddoYqkIic6qpqRY6nNqDiwIq5dqcmndqbnKnGkSCjmKBUsriySrHWsZyTaG7smSKxAIwolIi2zLX6unK5KqXCwKq03qyarcKWMqQmmd6tIodWtH6UvLg2tTadPJOOp2iGgny0ufyy+L7AvNClhpiEpC6qMqqMp7KTopJ4mmB2ylM6mrKhfKiQrTyiiKdGoQqjKJ6Umxip/qDiq/ChgKtmqIiwOr+CunZF7Kfot36poqkcthCx+Ksapg5T5pn0oNqOPq4osMSbSqQQmGqgXKhEl3yV1piyswazLK7QoQBTaqU8lIS13Ldch+qQqJ2AsPKfmp3Ink5Z2HhosR5z4qLIoGqkNLCct2Ck3KPGnUC0oJBQq7agOKyaq0qsqpAap8SylLg4qriy6M3MqKCtdKpMjSi86KigsGCz/n2erEyu7J/QRVCkpILUwcC35LI8qxiw6Knoq5jAAKo8wnieqLF0vVTAYMZw4Jyx2t/ayTjGWMoGzKbwus1w4QRxeJse1dTGSNJGwmCrEJV8uQKygKe4gjSqkrLeydiaMroS0FrQms8Uygi28qe2uXS2Ko4q1d7ZxszEpiDSBMoc0STWpNc0xJKSvrMWm6bCKsOC3CrEOJNC1Ga5Qubi7U6/+NRQ0AqnSuFoySDmKtJS0b7KcNAMmqi45IbMvGzjeMg2qSioPKVWtSK6EpaA1UTckMt2m16nwM5E2oDHBsZ+pniVpMc4vQy1epXkqHifBl7Mu36T/KzQorix4JAOmWyqJFVUqq67doiot2CxYME8i2JxVKhQt5ioYJsWp1KiSpL0lhq1JpWAgbCweKW2o1CrCIMsrcghkHUqW3hiTI5osYqMlB+WaLy0uKNUooKx4qdEezqRlJEapyKuUoEmoZyT7nqcoo6v3n4yqZaGcpNElwij3IkinQiAFIFQK2ygqIoKsiZxEI6ukqCf7KFSkgqSTqjEq8JZLJPufXKmFkaEj36lCKj2qURxfKkQouaqQhRIrGSmepKin7Cl8KEcuKI+ip4Evz6xIF0woVK/yHLyfLSj0ny+oWywSJHWmQaEomWos6ZTMpPWlY61pqLelZqYGpAidcyzQE5kneBr1pnQkJSwIqWYpIabdKA8oHKroGeCnYplOKzAmC51LJ0emp6o+rXAofCkCKV4w4x1sKCYjrKAgKa0r+BcPJDMmP6o2JW4pIqqtm4srTqgHlLWlsBBepaqrKq27rBat9aTlot8qkaw2o5sl76ivKDkjNyjzKKWY5KlHrQCr8SjxquarXqrlKB2xyyfZL1Sqq7LWpxA04zZwMkyvUiyHMig1ay+GJqenVq1Ao1awVLHQnrEqxTD/LO8kKB+NH1grfKsPsY6u+aIELLaj4LBmLBU0wDOlM8ksdKjbqPSqQykHJmYodC+WMcYuSCJ7psYvNDTaLqWw/qy7Myw4xjTnMIouQTV9OJ81YSlbLiIx3TVuMUcokrDzI0ow8CQQr9IvDyxsLnk0OTVhLmmobLAULN4zkyyZsGC0LK01L3Upw52Jroywlix0MCwr5qkQJkot9aWzsYuui66HrHykMa9ZsDet96yBqXWvXbAXsraxIqgpsVOvtq5frF+iZa2WqROwcaP+qX2w+aW3rxWpI7Bwrlqu5K0LrxexX7DUrfOvhK3QrUGwP7BrsY2tU6yWr8qkpK18rn2rHCbloYmfaqM1nfSr7Sn1qjuk2KT2qyem4KXJJ4MdxaidqPWsa58zKTSsoKXAJUymz6rJpv+oGKsOJo2hSicHqA4oOiiRmr4k0BxBq8Ui16jTKvyq7ijmqHcpZanhHnGfMikxIiEk7S4Yq90sfKWSoZyntKg/qh+nJiifnAyvlKeXJMIdViKeoxEjLKvZpXymAqkhraCofK5SnTGmLqdkq7mjYCD8qV0qQKo0qrUo+KsZKVSs0iaULFUI8qS0mlWtiiqbGBegACwBoAErhaW1qMwqHSxfKVKpp6x7poiweKxCrdkivK48sJewrKdArHYnqyhoHbUnsagYK58qSjAgMcUwsCt0K/4rLC7mJGwtvStOMFQu0SzuJQUsBTBMLswqcJyEnVQsESn3ox2z9ai/qFqwES7tKP0vSChMoqQwVzR4LKaT+y/NK06q2y0LIi2wHrIcKZuzsrSHn/6xkrPssAovJzEipEQiDbDjr3SqIis5LGIoOSm6p1apeqGGrtAqJzCIJRuptqrApiktWTAwMB4xQizXKoIgASFFsLwweTHbLdQtqyzXoKYtay3SLeOke6wgoPWr/SpFKUEmDacWptSoMChJKm6s6azkHe+mfzFKKyamfi6bK/wr5atPqEMxUTAlKSeueiRxoSQjQqxQLRavgauKriOssymXLZOooa97pFoufTSppqgoVq05tEg196yCsQIy7bEitAItJ7RgtUEzxjGML/QmEKIlrPgjPDFaoTYoPDFcJRavtK4XrKmsk6zjsCwsTa4UsPQs9jI/I3ct1C6cMV+b5y7wJZ0tYTF9MGojdS/oLTShziM/MVmnxC8FKJUwRCUxIz8wiS4QLWipLCCYq9EseabMKnEll6kPqIawRq+xGcgjyCkgqKed7SB6qZcr6CwJLW+st6ePq7WuHycUrhqsSq7zsKuZtimgCXCrmKkqnIGp4LHNsX2wnqyBsH2xIbDhpwCzra1ss44wTCypKDCyyK23LRiwYKKPMJmxcaqZKcshCCYipoyxNa1Nsbwozi1+MB8lQ5mtsDel3jDnlbutxiPzsWmp5SpTHaqys7EstauTPqoRsOosf6g3sLOgeaAfKUIsWi/BJdosUSzdMM4pSy3kpGM0DjWvLWw0cjR4MWWqQaYMLo2rZSijJjstZiFaLBadMq0TseyjYi0VGsQt8yo5oZCgti/HMLciM6r3KgMk8K6OqKup9q0srT0xcaWMMMwra67qrhSfsZ3GrrIj2a2+pqSvdrEcrRQ0IDhgMB+PCDWVM8qjnJ5ZKOmw4C0dMGyuG6DGMQUvrq+Oq4UsTSzHMRg2ibbXs+Axa7N5sAqqnSoerQUmky8oKIiuUjGsoBitdKy9q6iw661pqg4thKnpkYmt+a3gseypGp5Co22fM6YSKJap66hwopmsmqhlrCMkZyiLL4KnGKupKvUmyCQbLFUrbSZerKahlaRoqCYm5SqYKW0rcS8WrAUkzaMcGlqpRK3bnresXy18IXapEKqHKFssXKCpKMUrfamapf4tKjBiKJGoU54HK+8q5qq4qVuiZiy4JuEsTixNMFQnlSSIIw4k1KzxpbMlDqyKqz6gra4SpcOw3a3Vq+qqC6tOq22eORvnpC8hRadkka2q/K7HHUiowawpqPInLyA0qYMlsihUqGGkWCb7K1WdWK5Dr5EhnKv5KHKlXqYnJ/2l9i0YKUYuMzHxpyCs/ChMkPEtwanxoFQqJi3Uq7Mseq3arXskWKc5pOAc7CZcqCwc5w7qKO4f3iaKIDsq/KRgLpWsQqn5rYYkxCWPoU0bx6hzGdkkqibtofEoxy8GpUupSCTiKiwvpij7LbiulqkErXetejFkL2+upqtUp0OwiLAPsdCpxLIlrKOyQ7C2r3utIg0drZEl2y6oLkquoaX4rCysAa9GDRCwKrHDsNivAbHsqtioqiGvrqgJE66Kqw4rzKyDKgaomp6TK2EsDyc0oOSol6NZJkmsvyxorMss5pR0KBquEixPpjsgXCpsnXQocq2MrfGmoivvLBeacahmLROpe6kcGCSfdC03qL6i6yitHHohrxzqq4UiP6JMqF8qThOshWAVUqHupDsoohQuJSkv/ywqLiwlNjG7o++hxi3vIKmleCdyrH6wYatdsPWsjLCNol+sSTDpryCptbBDK+qs4zBpLGc0Nqc1rdo09jX5MqsrHi2xKOad8igwJxAoeSsiqgkqdChcLOYxJzGlMkAsUzCuKzskTjAOKhuplqjHqf8wzDKYIGefNDISqd8pIC23Ltwu7zC9KgMsQDL/JcgrryYzLJ0oTSoyqpkmLax+KuejVyqxr08ulZ2XpyQr5yxRsEMpwzD0KmEqoihRC6mwF6xOplwmjSSmpMep0SvhpOEndCluqLyvtCGgo3unOyy9IXKtmZ9yIK8hlqohrEUtxh0XKH0sGi18p6coHa3Tow6psqa/JRUMU6yiKbUoXigQpo2i7C18q3ur6CnWrSateC3/KY+jlCJ6o6qr+x8VJUkSFadyAgGpji0xraytBSd+rYksTqDAHQAtxSjkqMAmNqxhqNesEi5uKsqlFqo9Kg6seizOrdusAasErjmtoKv8rb8ph6cYLnMmcKlCLJ6pjiuIKpkpKK1UKvyq3RhVpZac+izlrYitWB+DrI4omKOZKikiZS1Fqicf+q25rJmsqKrYrNGt0JWRLWel2KfLqQ==';
// Reusable fullscreen quad vertex shader (2 triangles covering NDC)
const FULLSCREEN_QUAD_VS = `
@@ -398,7 +409,7 @@ fn vs_main(@builtin(vertex_index) idx: u32) -> @builtin(position) vec4<f32> {
// Static features: 7D parametric features (RGBD + UV + sin(10*uv_x) + bias)
const STATIC_SHADER = `
@group(0) @binding(0) var input_tex: texture_2d<f32>;
-@group(0) @binding(1) var input_sampler: sampler;
+@group(0) @binding(1) var point_sampler: sampler;
@group(0) @binding(2) var depth_tex: texture_2d<f32>;
@group(0) @binding(3) var output_tex: texture_storage_2d<rgba32uint, write>;
@group(0) @binding(4) var<uniform> mip_level: u32;
@@ -409,18 +420,25 @@ fn main(@builtin(global_invocation_id) id: vec3<u32>) {
let dims = textureDimensions(input_tex);
if (coord.x >= i32(dims.x) || coord.y >= i32(dims.y)) { return; }
+ // Use normalized UV coords with point sampler (no filtering)
let uv = (vec2<f32>(coord) + 0.5) / vec2<f32>(dims);
- let rgba = textureSampleLevel(input_tex, input_sampler, uv, f32(mip_level));
- let d = textureLoad(depth_tex, coord, 0).r;
+ let rgba = textureSampleLevel(input_tex, point_sampler, uv, f32(mip_level));
+
+ let p0 = rgba.r;
+ let p1 = rgba.g;
+ let p2 = rgba.b;
+ let p3 = textureLoad(depth_tex, coord, 0).r;
+
let uv_x = f32(coord.x) / f32(dims.x);
- let uv_y = 1.0 - (f32(coord.y) / f32(dims.y));
+ let uv_y = f32(coord.y) / f32(dims.y);
let sin20_y = sin(20.0 * uv_y);
+ let bias = 1.0;
let packed = vec4<u32>(
- pack2x16float(vec2<f32>(rgba.r, rgba.g)),
- pack2x16float(vec2<f32>(rgba.b, d)),
+ pack2x16float(vec2<f32>(p0, p1)),
+ pack2x16float(vec2<f32>(p2, p3)),
pack2x16float(vec2<f32>(uv_x, uv_y)),
- pack2x16float(vec2<f32>(sin20_y, 1.0))
+ pack2x16float(vec2<f32>(sin20_y, bias))
);
textureStore(output_tex, coord, packed);
}`;
@@ -662,6 +680,7 @@ class CNNTester {
this.fps = 30;
this.isProcessing = false;
this.mipLevel = 0;
+ this.selectedChannel = 0;
this.init();
}
@@ -780,7 +799,7 @@ class CNNTester {
}
this.log(` Weight buffer: ${weights.length} u32 (${nonZero} non-zero)`);
- return { layers, weights, mipLevel, fileSize: buffer.byteLength };
+ return { version, layers, weights, mipLevel, fileSize: buffer.byteLength };
}
unpackF16(packed) {
@@ -914,11 +933,12 @@ class CNNTester {
updateWeightsPanel() {
const panel = document.getElementById('weightsInfo');
- const { layers, mipLevel, fileSize } = this.weights;
+ const { version, layers, mipLevel, fileSize } = this.weights;
let html = `
<div style="margin-bottom: 12px;">
<div><strong>File Size:</strong> ${(fileSize / 1024).toFixed(2)} KB</div>
+ <div><strong>Version:</strong> ${version}</div>
<div><strong>CNN Layers:</strong> ${layers.length}</div>
<div><strong>Mip Level:</strong> ${mipLevel} (p0-p3 features)</div>
<div style="font-size: 9px; color: #808080; margin-top: 4px;">Static features (input) + ${layers.length} conv layers</div>
@@ -1118,19 +1138,6 @@ class CNNTester {
// Generate mipmaps
this.generateMipmaps(this.inputTexture, width, height);
- const depthTex = this.device.createTexture({
- size: [width, height],
- format: 'r32float',
- usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST
- });
- const depthData = new Float32Array(width * height).fill(this.depth);
- this.device.queue.writeTexture(
- { texture: depthTex },
- depthData,
- { bytesPerRow: width * 4 },
- [width, height]
- );
-
const staticTex = this.device.createTexture({
size: [width, height],
format: 'rgba32uint',
@@ -1196,20 +1203,49 @@ class CNNTester {
});
this.device.queue.writeBuffer(mipLevelBuffer, 0, new Uint32Array([this.mipLevel]));
- if (!this.linearSampler) {
- this.linearSampler = this.device.createSampler({
- magFilter: 'linear',
- minFilter: 'linear',
- mipmapFilter: 'linear'
+ if (!this.pointSampler) {
+ this.pointSampler = this.device.createSampler({
+ magFilter: 'nearest',
+ minFilter: 'nearest',
+ mipmapFilter: 'nearest'
});
}
+ // Extract depth from alpha channel (or 1.0 if no alpha)
+ const depthTex = this.device.createTexture({
+ size: [width, height, 1],
+ format: 'r32float',
+ usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST
+ });
+
+ // Read image data to extract alpha channel
+ const tempCanvas = document.createElement('canvas');
+ tempCanvas.width = width;
+ tempCanvas.height = height;
+ const tempCtx = tempCanvas.getContext('2d');
+ tempCtx.drawImage(source, 0, 0, width, height);
+ const imageData = tempCtx.getImageData(0, 0, width, height);
+ const pixels = imageData.data;
+
+ // Extract alpha channel (RGBA format: every 4th byte)
+ const depthData = new Float32Array(width * height);
+ for (let i = 0; i < width * height; i++) {
+ depthData[i] = pixels[i * 4 + 3] / 255.0; // Alpha channel [0, 255] → [0, 1]
+ }
+
+ this.device.queue.writeTexture(
+ { texture: depthTex },
+ depthData,
+ { bytesPerRow: width * 4 },
+ [width, height, 1]
+ );
+
const staticBG = this.device.createBindGroup({
layout: staticPipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: this.inputTexture.createView() },
- { binding: 1, resource: this.linearSampler },
- { binding: 2, resource: depthTex.createView() },
+ { binding: 1, resource: this.pointSampler },
+ { binding: 2, resource: depthTex.createView() }, // Depth from alpha (matches training)
{ binding: 3, resource: staticTex.createView() },
{ binding: 4, resource: { buffer: mipLevelBuffer } }
]
@@ -1237,7 +1273,9 @@ class CNNTester {
const isOutput = i === this.weights.layers.length - 1;
// Calculate absolute weight offset in f16 units (add header offset)
- const headerOffsetU32 = 4 + this.weights.layers.length * 5; // Header + layer info in u32
+ // Version 1: 4 u32 header, Version 2: 5 u32 header
+ const headerSizeU32 = (this.weights.version === 1) ? 4 : 5;
+ const headerOffsetU32 = headerSizeU32 + this.weights.layers.length * 5; // Header + layer info in u32
const absoluteWeightOffset = headerOffsetU32 * 2 + layer.weightOffset; // Convert to f16 units
const paramsData = new Uint32Array(7);
@@ -1357,10 +1395,11 @@ class CNNTester {
const label = `Layer ${i - 1}`;
html += `<button onclick="tester.visualizeLayer(${i})" id="layerBtn${i}">${label}</button>`;
}
+ html += `<button onclick="tester.saveCompositedLayer()" style="margin-left: 20px; background: #28a745;">Save Composited</button>`;
html += '</div>';
html += '<div class="layer-grid" id="layerGrid"></div>';
- html += '<div class="layer-zoom"><div class="layer-view-label">Zoom x4</div><canvas id="zoomCanvas"></canvas></div>';
+ html += '<div class="layer-preview"><div class="layer-view-label" id="previewLabel">Ch0</div><canvas id="previewCanvas"></canvas></div>';
panel.innerHTML = html;
this.log(`Layer visualization ready: ${this.layerOutputs.length} layers`);
@@ -1395,8 +1434,10 @@ class CNNTester {
<div class="layer-view-label" id="channelLabel${c}">Ch ${c}</div>
<canvas id="layerCanvas${c}"></canvas>
`;
+ div.onclick = () => this.selectChannel(c);
grid.appendChild(div);
}
+ this.selectedChannel = 0;
}
async visualizeLayer(layerIdx, channelOffset = 0) {
@@ -1486,7 +1527,7 @@ class CNNTester {
continue;
}
- const vizScale = layerIdx === 0 ? 1.0 : 0.5; // Static: 1.0, CNN layers: 0.5 (4 channels [0,1])
+ const vizScale = 1.0; // Always 1.0, shader clamps to [0,1]
const paramsBuffer = this.device.createBuffer({
size: 8,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
@@ -1527,42 +1568,37 @@ class CNNTester {
await this.device.queue.onSubmittedWorkDone();
this.log(`Rendered 4 channels for ${layerName}`);
- // Set up mouse tracking for zoom view
- this.setupZoomTracking(layerTex, channelOffset);
+ // Update active channel highlighting and preview
+ this.updateChannelSelection();
+ await this.renderChannelPreview();
}
- setupZoomTracking(layerTex, channelOffset) {
- const zoomCanvas = document.getElementById('zoomCanvas');
- if (!zoomCanvas) return;
-
- const width = this.isVideo ? this.video.videoWidth : this.image.width;
- const height = this.isVideo ? this.video.videoHeight : this.image.height;
- const zoomSize = 32; // Show 32x32 area
- zoomCanvas.width = zoomSize;
- zoomCanvas.height = zoomSize;
-
- // Add mousemove handlers to all layer canvases
- for (let c = 0; c < 4; c++) {
- const canvas = document.getElementById(`layerCanvas${c}`);
- if (!canvas) continue;
+ selectChannel(channelIdx) {
+ this.selectedChannel = channelIdx;
+ this.updateChannelSelection();
+ this.renderChannelPreview();
+ }
- const updateZoom = (e) => {
- const rect = canvas.getBoundingClientRect();
- const x = Math.floor((e.clientX - rect.left) / rect.width * width);
- const y = Math.floor((e.clientY - rect.top) / rect.height * height);
- this.renderZoom(layerTex, channelOffset, x, y, zoomSize);
- };
+ updateChannelSelection() {
+ const grid = document.getElementById('layerGrid');
+ if (!grid) return;
- canvas.onmousemove = updateZoom;
- canvas.onmouseenter = updateZoom;
- }
+ const views = grid.querySelectorAll('.layer-view');
+ views.forEach((view, idx) => {
+ view.classList.toggle('active', idx === this.selectedChannel);
+ });
}
- async renderZoom(layerTex, channelOffset, centerX, centerY, zoomSize) {
- const zoomCanvas = document.getElementById('zoomCanvas');
- if (!zoomCanvas || !this.device) return;
+ async renderChannelPreview() {
+ const previewCanvas = document.getElementById('previewCanvas');
+ const previewLabel = document.getElementById('previewLabel');
+ if (!previewCanvas || !this.device) return;
+
+ const { width, height } = this.getDimensions();
+ previewCanvas.width = width;
+ previewCanvas.height = height;
- const ctx = zoomCanvas.getContext('webgpu');
+ const ctx = previewCanvas.getContext('webgpu');
if (!ctx) return;
try {
@@ -1571,91 +1607,30 @@ class CNNTester {
return;
}
- const halfSize = Math.floor(zoomSize / 2);
- const width = this.isVideo ? this.video.videoWidth : this.image.width;
- const height = this.isVideo ? this.video.videoHeight : this.image.height;
-
- // Create shader for zoomed view (samples 4 channels and displays as 2x2 grid)
- const zoomShader = `
- @group(0) @binding(0) var layer_tex: texture_2d<u32>;
- @group(0) @binding(1) var<uniform> params: vec4<f32>; // centerX, centerY, channelOffset, scale
-
- @vertex
- fn vs_main(@builtin(vertex_index) idx: u32) -> @builtin(position) vec4<f32> {
- var pos = array<vec2<f32>, 6>(
- vec2<f32>(-1.0, -1.0), vec2<f32>(1.0, -1.0), vec2<f32>(-1.0, 1.0),
- vec2<f32>(-1.0, 1.0), vec2<f32>(1.0, -1.0), vec2<f32>(1.0, 1.0)
- );
- return vec4<f32>(pos[idx], 0.0, 1.0);
- }
-
- @fragment
- fn fs_main(@builtin(position) pos: vec4<f32>) -> @location(0) vec4<f32> {
- let dims = textureDimensions(layer_tex);
- let centerX = i32(params.x);
- let centerY = i32(params.y);
- let channelOffset = u32(params.z);
- let scale = params.w;
-
- // Map output pixel to source pixel
- let halfSize = 16;
- let localX = i32(pos.x) - halfSize;
- let localY = i32(pos.y) - halfSize;
- let srcX = clamp(centerX + localX, 0, i32(dims.x) - 1);
- let srcY = clamp(centerY + localY, 0, i32(dims.y) - 1);
-
- let coord = vec2<i32>(srcX, srcY);
- let packed = textureLoad(layer_tex, coord, 0);
- let v0 = unpack2x16float(packed.x);
- let v1 = unpack2x16float(packed.y);
- let v2 = unpack2x16float(packed.z);
- let v3 = unpack2x16float(packed.w);
-
- var channels: array<f32, 8>;
- channels[0] = v0.x;
- channels[1] = v0.y;
- channels[2] = v1.x;
- channels[3] = v1.y;
- channels[4] = v2.x;
- channels[5] = v2.y;
- channels[6] = v3.x;
- channels[7] = v3.y;
-
- // Determine which quadrant (channel) to show
- let quadX = i32(pos.x) / 16;
- let quadY = i32(pos.y) / 16;
- let channelIdx = min(channelOffset + u32(quadY * 2 + quadX), 7u);
+ // Update label
+ const channelLabel = document.getElementById(`channelLabel${this.selectedChannel}`);
+ if (channelLabel && previewLabel) {
+ previewLabel.textContent = channelLabel.textContent;
+ }
- let val = clamp(channels[channelIdx] * scale, 0.0, 1.0);
- return vec4<f32>(val, val, val, 1.0);
- }
- `;
+ // Render selected channel
+ const layerIdx = this.currentLayerIdx;
+ const channelOffset = this.currentChannelOffset;
+ const layerTex = this.layerOutputs[layerIdx];
+ if (!layerTex) return;
- if (!this.zoomPipeline) {
- this.zoomPipeline = this.device.createRenderPipeline({
- layout: 'auto',
- vertex: {
- module: this.device.createShaderModule({ code: zoomShader }),
- entryPoint: 'vs_main'
- },
- fragment: {
- module: this.device.createShaderModule({ code: zoomShader }),
- entryPoint: 'fs_main',
- targets: [{ format: this.format }]
- }
- });
- }
+ const vizScale = layerIdx === 0 ? 1.0 : 0.5;
+ const actualChannel = channelOffset + this.selectedChannel;
- const vizScale = channelOffset === 0 ? 1.0 : 0.5;
const paramsBuffer = this.device.createBuffer({
- size: 16,
+ size: 8,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
});
- const paramsData = new Float32Array([centerX, centerY, channelOffset, vizScale]);
+ const paramsData = new Float32Array([actualChannel, vizScale]);
this.device.queue.writeBuffer(paramsBuffer, 0, paramsData);
const bindGroup = this.device.createBindGroup({
- layout: this.zoomPipeline.getBindGroupLayout(0),
+ layout: this.layerVizPipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: layerTex.createView() },
{ binding: 1, resource: { buffer: paramsBuffer } }
@@ -1671,7 +1646,7 @@ class CNNTester {
}]
});
- renderPass.setPipeline(this.zoomPipeline);
+ renderPass.setPipeline(this.layerVizPipeline);
renderPass.setBindGroup(0, bindGroup);
renderPass.draw(6);
renderPass.end();
@@ -1770,6 +1745,156 @@ class CNNTester {
this.device.queue.submit([encoder.finish()]);
}
+
+ async savePNG() {
+ if (!this.image && !this.isVideo) {
+ this.log('No image loaded', 'error');
+ return;
+ }
+
+ if (!this.resultTexture) {
+ this.log('No result to save', 'error');
+ return;
+ }
+
+ try {
+ const { width, height } = this.getDimensions();
+
+ // GPU readback from result texture
+ const bytesPerRow = width * 16; // 4×u32 per pixel
+ const paddedBytesPerRow = Math.ceil(bytesPerRow / 256) * 256;
+ const bufferSize = paddedBytesPerRow * height;
+
+ const stagingBuffer = this.device.createBuffer({
+ size: bufferSize,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ
+ });
+
+ const encoder = this.device.createCommandEncoder();
+ encoder.copyTextureToBuffer(
+ { texture: this.resultTexture },
+ { buffer: stagingBuffer, bytesPerRow: paddedBytesPerRow, rowsPerImage: height },
+ { width, height, depthOrArrayLayers: 1 }
+ );
+ this.device.queue.submit([encoder.finish()]);
+
+ await stagingBuffer.mapAsync(GPUMapMode.READ);
+ const mapped = new Uint8Array(stagingBuffer.getMappedRange());
+
+ // Unpack f16 to RGBA8
+ const pixels = new Uint8Array(width * height * 4);
+ for (let y = 0; y < height; y++) {
+ const rowOffset = y * paddedBytesPerRow;
+ for (let x = 0; x < width; x++) {
+ const pixelOffset = rowOffset + x * 16;
+ const data = new Uint32Array(mapped.buffer, mapped.byteOffset + pixelOffset, 4);
+
+ // Unpack f16 (first 4 channels only)
+ const unpack = (u32, idx) => {
+ const h = (idx === 0) ? (u32 & 0xFFFF) : ((u32 >> 16) & 0xFFFF);
+ const sign = (h >> 15) & 1;
+ const exp = (h >> 10) & 0x1F;
+ const frac = h & 0x3FF;
+ if (exp === 0) return 0;
+ if (exp === 31) return sign ? 0 : 255;
+ const e = exp - 15;
+ const val = (1 + frac / 1024) * Math.pow(2, e);
+ return Math.max(0, Math.min(255, Math.round(val * 255)));
+ };
+
+ const outIdx = (y * width + x) * 4;
+ pixels[outIdx + 0] = unpack(data[0], 0); // R
+ pixels[outIdx + 1] = unpack(data[0], 1); // G
+ pixels[outIdx + 2] = unpack(data[1], 0); // B
+ pixels[outIdx + 3] = 255; // A
+ }
+ }
+
+ stagingBuffer.unmap();
+ stagingBuffer.destroy();
+
+ // Create blob from pixels
+ const canvas = document.createElement('canvas');
+ canvas.width = width;
+ canvas.height = height;
+ const ctx = canvas.getContext('2d');
+ const imageData = new ImageData(new Uint8ClampedArray(pixels), width, height);
+ ctx.putImageData(imageData, 0, 0);
+
+ const blob = await new Promise(resolve => canvas.toBlob(resolve, 'image/png'));
+ const url = URL.createObjectURL(blob);
+ const a = document.createElement('a');
+ const mode = ['cnn', 'original', 'diff'][this.viewMode];
+ a.href = url;
+ a.download = `output_${width}x${height}_${mode}.png`;
+ a.click();
+ URL.revokeObjectURL(url);
+
+ this.log(`Saved PNG: ${a.download}`);
+ this.setStatus(`Saved: ${a.download}`);
+ } catch (err) {
+ this.log(`Failed to save PNG: ${err.message}`, 'error');
+ this.setStatus(`Save failed: ${err.message}`, true);
+ }
+ }
+
+ async saveCompositedLayer() {
+ if (!this.currentLayerIdx) {
+ this.log('No layer selected for compositing', 'error');
+ return;
+ }
+
+ try {
+ const canvases = [];
+ for (let i = 0; i < 4; i++) {
+ const canvas = document.getElementById(`layerCanvas${i}`);
+ if (!canvas) {
+ this.log(`Canvas layerCanvas${i} not found`, 'error');
+ return;
+ }
+ canvases.push(canvas);
+ }
+
+ const width = canvases[0].width;
+ const height = canvases[0].height;
+ const compositedWidth = width * 4;
+
+ // Create composited canvas
+ const compositedCanvas = document.createElement('canvas');
+ compositedCanvas.width = compositedWidth;
+ compositedCanvas.height = height;
+ const ctx = compositedCanvas.getContext('2d');
+
+ // Composite horizontally
+ for (let i = 0; i < 4; i++) {
+ ctx.drawImage(canvases[i], i * width, 0);
+ }
+
+ // Convert to grayscale
+ const imageData = ctx.getImageData(0, 0, compositedWidth, height);
+ const pixels = imageData.data;
+ for (let i = 0; i < pixels.length; i += 4) {
+ const gray = 0.299 * pixels[i] + 0.587 * pixels[i + 1] + 0.114 * pixels[i + 2];
+ pixels[i] = pixels[i + 1] = pixels[i + 2] = gray;
+ }
+ ctx.putImageData(imageData, 0, 0);
+
+ // Save as PNG
+ const blob = await new Promise(resolve => compositedCanvas.toBlob(resolve, 'image/png'));
+ const url = URL.createObjectURL(blob);
+ const a = document.createElement('a');
+ a.href = url;
+ a.download = `composited_layer${this.currentLayerIdx}_${compositedWidth}x${height}.png`;
+ a.click();
+ URL.revokeObjectURL(url);
+
+ this.log(`Saved composited layer: ${a.download}`);
+ this.setStatus(`Saved: ${a.download}`);
+ } catch (err) {
+ this.log(`Failed to save composited layer: ${err.message}`, 'error');
+ this.setStatus(`Compositing failed: ${err.message}`, true);
+ }
+ }
}
const tester = new CNNTester();
@@ -1869,6 +1994,7 @@ document.getElementById('mipLevel').addEventListener('change', e => {
document.getElementById('playPauseBtn').addEventListener('click', () => tester.togglePlayPause());
document.getElementById('stepBackBtn').addEventListener('click', () => tester.stepFrame(-1));
document.getElementById('stepForwardBtn').addEventListener('click', () => tester.stepFrame(1));
+document.getElementById('savePngBtn').addEventListener('click', () => tester.savePNG());
document.addEventListener('keydown', e => {
if (e.code === 'Space') {