summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorskal <pascal.massimino@gmail.com>2026-02-14 15:14:25 +0100
committerskal <pascal.massimino@gmail.com>2026-02-14 15:14:25 +0100
commit8ce27b7e15f0fc65c8ee78950c7501660b936178 (patch)
tree391f32111b9a30a0156709b6c1ed2fae7b435d57 /tools
parente38be0dbf5816338ff97e2ee2f9adfff2902dc2b (diff)
style: Apply clang-format to codebase
Diffstat (limited to 'tools')
-rw-r--r--tools/asset_packer.cc34
-rw-r--r--tools/cnn_test.cc272
-rw-r--r--tools/seq_compiler.cc37
-rw-r--r--tools/shadertoy/template.cc10
-rw-r--r--tools/shadertoy/template.h2
-rw-r--r--tools/tracker_compiler.cc33
6 files changed, 218 insertions, 170 deletions
diff --git a/tools/asset_packer.cc b/tools/asset_packer.cc
index af89a88..fdecb58 100644
--- a/tools/asset_packer.cc
+++ b/tools/asset_packer.cc
@@ -4,8 +4,8 @@
#include <algorithm> // For std::count
#include <cmath>
-#include <cstdio> // for simplicity, use fprintf() for output generation
-#include <cstring> // For std::memcpy
+#include <cstdio> // for simplicity, use fprintf() for output generation
+#include <cstring> // For std::memcpy
#include <filesystem> // For path normalization
#include <fstream>
#include <map>
@@ -52,8 +52,8 @@ static bool HasMeshExtension(const std::string& filename) {
struct AssetBuildInfo;
static bool ParseProceduralParams(const std::string& params_str,
- std::vector<float>* out_params,
- const std::string& asset_name) {
+ std::vector<float>* out_params,
+ const std::string& asset_name) {
size_t current_pos = 0;
while (current_pos < params_str.length()) {
size_t comma_pos = params_str.find(',', current_pos);
@@ -93,7 +93,7 @@ struct AssetBuildInfo {
};
static bool ParseProceduralFunction(const std::string& compression_type_str,
- AssetBuildInfo* info, bool is_gpu) {
+ AssetBuildInfo* info, bool is_gpu) {
const char* prefix = is_gpu ? "PROC_GPU(" : "PROC(";
size_t prefix_len = is_gpu ? 9 : 5;
@@ -144,15 +144,21 @@ static bool ParseProceduralFunction(const std::string& compression_type_str,
struct Vec3 {
float x, y, z;
- Vec3 operator+(const Vec3& o) const { return {x + o.x, y + o.y, z + o.z}; }
+ Vec3 operator+(const Vec3& o) const {
+ return {x + o.x, y + o.y, z + o.z};
+ }
Vec3 operator+=(const Vec3& o) {
x += o.x;
y += o.y;
z += o.z;
return *this;
}
- Vec3 operator-(const Vec3& o) const { return {x - o.x, y - o.y, z - o.z}; }
- Vec3 operator*(float s) const { return {x * s, y * s, z * s}; }
+ Vec3 operator-(const Vec3& o) const {
+ return {x - o.x, y - o.y, z - o.z};
+ }
+ Vec3 operator*(float s) const {
+ return {x * s, y * s, z * s};
+ }
static Vec3 cross(const Vec3& a, const Vec3& b) {
return {a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z,
a.x * b.y - a.y * b.x};
@@ -168,8 +174,8 @@ struct Vertex {
};
static bool ProcessMeshFile(const std::string& full_path,
- std::vector<uint8_t>* buffer,
- const std::string& asset_name) {
+ std::vector<uint8_t>* buffer,
+ const std::string& asset_name) {
std::ifstream obj_file(full_path);
if (!obj_file.is_open()) {
fprintf(stderr, "Error: Could not open mesh file: %s\n", full_path.c_str());
@@ -269,8 +275,8 @@ static bool ProcessMeshFile(const std::string& full_path,
for (const auto& face : raw_faces) {
for (int i = 0; i < 3; ++i) {
char key_buf[128];
- std::snprintf(key_buf, sizeof(key_buf), "%d/%d/%d", face.v[i],
- face.vt[i], face.vn[i]);
+ std::snprintf(key_buf, sizeof(key_buf), "%d/%d/%d", face.v[i], face.vt[i],
+ face.vn[i]);
std::string key = key_buf;
if (vertex_map.find(key) == vertex_map.end()) {
@@ -318,8 +324,8 @@ static bool ProcessMeshFile(const std::string& full_path,
}
static bool ProcessImageFile(const std::string& full_path,
- std::vector<uint8_t>* buffer,
- const std::string& asset_name) {
+ std::vector<uint8_t>* buffer,
+ const std::string& asset_name) {
int w, h, channels;
unsigned char* img_data =
stbi_load(full_path.c_str(), &w, &h, &channels, 4); // Force RGBA
diff --git a/tools/cnn_test.cc b/tools/cnn_test.cc
index 740f41d..7d060ae 100644
--- a/tools/cnn_test.cc
+++ b/tools/cnn_test.cc
@@ -5,30 +5,30 @@
#error "cnn_test requires STRIP_ALL=OFF (tool builds only)"
#endif
-#include "platform/platform.h"
-#include "gpu/gpu.h"
+#include "effects/cnn_effect.h"
+#include "generated/assets.h"
#include "gpu/bind_group_builder.h"
+#include "gpu/gpu.h"
#include "gpu/pipeline_builder.h"
-#include "gpu/sampler_cache.h"
-#include "gpu/texture_readback.h"
#include "gpu/post_process_helper.h"
-#include "effects/cnn_effect.h"
+#include "gpu/sampler_cache.h"
#include "gpu/shader_composer.h"
#include "gpu/shaders.h"
-#include "tests/common/webgpu_test_fixture.h"
+#include "gpu/texture_readback.h"
+#include "platform/platform.h"
#include "tests/common/offscreen_render_target.h"
-#include "generated/assets.h"
+#include "tests/common/webgpu_test_fixture.h"
#include "util/asset_manager.h"
#include "util/mini_math.h"
#include "stb_image.h"
#include "wgpu-native/examples/capture/stb_image_write.h"
+#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <vector>
-#include <cmath>
// Helper to get asset string or empty string
static const char* SafeGetAsset(AssetId id) {
@@ -43,11 +43,12 @@ struct Args {
float blend = 1.0f;
bool output_png = true; // Default to PNG
const char* save_intermediates = nullptr;
- int num_layers = 3; // Default to 3 layers
- bool debug_hex = false; // Print first 8 pixels as hex
- int cnn_version = 1; // 1=CNNEffect, 2=CNNv2Effect
+ int num_layers = 3; // Default to 3 layers
+ bool debug_hex = false; // Print first 8 pixels as hex
+ int cnn_version = 1; // 1=CNNEffect, 2=CNNv2Effect
const char* weights_path = nullptr; // Optional .bin weights file
- bool cnn_version_explicit = false; // Track if --cnn-version was explicitly set
+ bool cnn_version_explicit =
+ false; // Track if --cnn-version was explicitly set
};
// Parse command-line arguments
@@ -107,14 +108,17 @@ static bool parse_args(int argc, char** argv, Args* args) {
// Force CNN v2 when --weights is specified
if (args->weights_path) {
if (args->cnn_version_explicit && args->cnn_version != 2) {
- fprintf(stderr, "WARNING: --cnn-version %d ignored (--weights forces CNN v2)\n",
+ fprintf(stderr,
+ "WARNING: --cnn-version %d ignored (--weights forces CNN v2)\n",
args->cnn_version);
}
args->cnn_version = 2;
// Warn if --layers was specified (binary file config takes precedence)
- if (args->num_layers != 3) { // 3 is the default
- fprintf(stderr, "WARNING: --layers %d ignored (--weights loads layer config from .bin)\n",
+ if (args->num_layers != 3) { // 3 is the default
+ fprintf(stderr,
+ "WARNING: --layers %d ignored (--weights loads layer config from "
+ ".bin)\n",
args->num_layers);
}
}
@@ -126,20 +130,30 @@ static bool parse_args(int argc, char** argv, Args* args) {
static void print_usage(const char* prog) {
fprintf(stderr, "Usage: %s input.png output.png [OPTIONS]\n", prog);
fprintf(stderr, "\nOPTIONS:\n");
- fprintf(stderr, " --blend F Final blend amount (0.0-1.0, default: 1.0)\n");
+ fprintf(stderr,
+ " --blend F Final blend amount (0.0-1.0, default: "
+ "1.0)\n");
fprintf(stderr, " --format ppm|png Output format (default: png)\n");
- fprintf(stderr, " --layers N Number of CNN layers (1-10, default: 3, ignored with --weights)\n");
- fprintf(stderr, " --save-intermediates DIR Save intermediate layers to directory\n");
- fprintf(stderr, " --debug-hex Print first 8 pixels as hex (debug)\n");
- fprintf(stderr, " --cnn-version N CNN version: 1 (default) or 2 (ignored with --weights)\n");
- fprintf(stderr, " --weights PATH Load weights from .bin (forces CNN v2, overrides layer config)\n");
+ fprintf(stderr,
+ " --layers N Number of CNN layers (1-10, default: 3, "
+ "ignored with --weights)\n");
+ fprintf(stderr,
+ " --save-intermediates DIR Save intermediate layers to directory\n");
+ fprintf(stderr,
+ " --debug-hex Print first 8 pixels as hex (debug)\n");
+ fprintf(stderr,
+ " --cnn-version N CNN version: 1 (default) or 2 (ignored "
+ "with --weights)\n");
+ fprintf(stderr,
+ " --weights PATH Load weights from .bin (forces CNN v2, "
+ "overrides layer config)\n");
fprintf(stderr, " --help Show this help\n");
}
// Load PNG and upload to GPU texture
static WGPUTexture load_texture(WGPUDevice device, WGPUQueue queue,
- const char* path, int* out_width,
- int* out_height) {
+ const char* path, int* out_width,
+ int* out_height) {
int width, height, channels;
uint8_t* data = stbi_load(path, &width, &height, &channels, 4);
if (!data) {
@@ -192,13 +206,14 @@ static WGPUTexture load_texture(WGPUDevice device, WGPUQueue queue,
// Load PNG alpha channel as depth texture (or 1.0 if no alpha)
static WGPUTexture load_depth_from_alpha(WGPUDevice device, WGPUQueue queue,
- const char* path, int width,
- int height) {
+ const char* path, int width,
+ int height) {
int w, h, channels;
uint8_t* data = stbi_load(path, &w, &h, &channels, 4);
if (!data || w != width || h != height) {
fprintf(stderr, "Error: failed to load depth from '%s'\n", path);
- if (data) stbi_image_free(data);
+ if (data)
+ stbi_image_free(data);
return nullptr;
}
@@ -228,19 +243,13 @@ static WGPUTexture load_depth_from_alpha(WGPUDevice device, WGPUQueue queue,
}
// Write depth data
- const WGPUTexelCopyTextureInfo dst = {
- .texture = depth_texture,
- .mipLevel = 0
- };
+ const WGPUTexelCopyTextureInfo dst = {.texture = depth_texture,
+ .mipLevel = 0};
const WGPUTexelCopyBufferLayout layout = {
.bytesPerRow = static_cast<uint32_t>(width * sizeof(float)),
- .rowsPerImage = static_cast<uint32_t>(height)
- };
- const WGPUExtent3D size = {
- static_cast<uint32_t>(width),
- static_cast<uint32_t>(height),
- 1
- };
+ .rowsPerImage = static_cast<uint32_t>(height)};
+ const WGPUExtent3D size = {static_cast<uint32_t>(width),
+ static_cast<uint32_t>(height), 1};
wgpuQueueWriteTexture(queue, &dst, depth_data.data(),
depth_data.size() * sizeof(float), &layout, &size);
@@ -253,8 +262,8 @@ static WGPUTexture load_depth_from_alpha(WGPUDevice device, WGPUQueue queue,
// Create CNN render pipeline (5 bindings)
// Takes both intermediate format (RGBA16Float) and final format (BGRA8Unorm)
static WGPURenderPipeline create_cnn_pipeline(WGPUDevice device,
- WGPUTextureFormat format,
- bool is_final_layer) {
+ WGPUTextureFormat format,
+ bool is_final_layer) {
const char* shader_code = SafeGetAsset(AssetId::ASSET_SHADER_CNN_LAYER);
// Debug: check if shader loaded
@@ -274,14 +283,16 @@ static WGPURenderPipeline create_cnn_pipeline(WGPUDevice device,
.build(device);
// Use appropriate format: RGBA16Float for intermediate, BGRA8Unorm for final
- WGPUTextureFormat output_format =
- is_final_layer ? WGPUTextureFormat_BGRA8Unorm : WGPUTextureFormat_RGBA16Float;
+ WGPUTextureFormat output_format = is_final_layer
+ ? WGPUTextureFormat_BGRA8Unorm
+ : WGPUTextureFormat_RGBA16Float;
- WGPURenderPipeline pipeline = RenderPipelineBuilder(device)
- .shader(shader_code) // compose=true by default
- .bind_group_layout(bgl)
- .format(output_format)
- .build();
+ WGPURenderPipeline pipeline =
+ RenderPipelineBuilder(device)
+ .shader(shader_code) // compose=true by default
+ .bind_group_layout(bgl)
+ .format(output_format)
+ .build();
wgpuBindGroupLayoutRelease(bgl);
return pipeline;
@@ -289,7 +300,7 @@ static WGPURenderPipeline create_cnn_pipeline(WGPUDevice device,
// Begin render pass with clear
static WGPURenderPassEncoder begin_render_pass(WGPUCommandEncoder encoder,
- WGPUTextureView view) {
+ WGPUTextureView view) {
const WGPURenderPassColorAttachment color_attachment = {
.view = view,
.depthSlice = WGPU_DEPTH_SLICE_UNDEFINED,
@@ -328,7 +339,8 @@ static bool save_png(const char* path, const std::vector<uint8_t>& pixels,
// Create horizontal grayscale composite of layer outputs
// Each layer is already 4x wide (showing 4 channels), stack them vertically
-static bool save_layer_composite(const char* dir, int width, int height, int num_layers) {
+static bool save_layer_composite(const char* dir, int width, int height,
+ int num_layers) {
// Each layer PNG is already 4x wide with 4 channels side-by-side
int layer_width = width * 4;
@@ -341,8 +353,11 @@ static bool save_layer_composite(const char* dir, int width, int height, int num
int w, h, channels;
uint8_t* data = stbi_load(path, &w, &h, &channels, 1); // Load as grayscale
if (!data || w != layer_width || h != height) {
- if (data) stbi_image_free(data);
- fprintf(stderr, "Warning: failed to load layer %d for composite (expected %dx%d, got %dx%d)\n",
+ if (data)
+ stbi_image_free(data);
+ fprintf(stderr,
+ "Warning: failed to load layer %d for composite (expected %dx%d, "
+ "got %dx%d)\n",
i, layer_width, height, w, h);
return false;
}
@@ -359,13 +374,15 @@ static bool save_layer_composite(const char* dir, int width, int height, int num
for (int y = 0; y < height; ++y) {
int src_row_offset = y * layer_width;
int dst_row_offset = (layer * height + y) * layer_width;
- memcpy(&composite[dst_row_offset], &layers[layer][src_row_offset], layer_width);
+ memcpy(&composite[dst_row_offset], &layers[layer][src_row_offset],
+ layer_width);
}
}
// Save as grayscale PNG (stacked vertically)
char composite_path[512];
- snprintf(composite_path, sizeof(composite_path), "%s/layers_composite.png", dir);
+ snprintf(composite_path, sizeof(composite_path), "%s/layers_composite.png",
+ dir);
if (!stbi_write_png(composite_path, layer_width, composite_height, 1,
composite.data(), layer_width)) {
fprintf(stderr, "Error: failed to write composite PNG\n");
@@ -388,8 +405,8 @@ static bool save_ppm(const char* path, const std::vector<uint8_t>& pixels,
fprintf(f, "P6\n%d %d\n255\n", width, height);
for (int i = 0; i < width * height; ++i) {
- const uint8_t rgb[3] = {pixels[i * 4 + 2], // R
- pixels[i * 4 + 1], // G
+ const uint8_t rgb[3] = {pixels[i * 4 + 2], // R
+ pixels[i * 4 + 1], // G
pixels[i * 4 + 0]}; // B
fwrite(rgb, 1, 3, f);
}
@@ -423,9 +440,9 @@ struct CNNv2StaticFeatureParams {
};
// Convert RGBA32Uint (packed f16) texture to BGRA8Unorm
-static std::vector<uint8_t> readback_rgba32uint_to_bgra8(
- WGPUDevice device, WGPUQueue queue, WGPUTexture texture,
- int width, int height) {
+static std::vector<uint8_t>
+readback_rgba32uint_to_bgra8(WGPUDevice device, WGPUQueue queue,
+ WGPUTexture texture, int width, int height) {
// Create staging buffer
const uint32_t bytes_per_row = width * 16; // 4×u32 per pixel
const uint32_t padded_bytes_per_row = (bytes_per_row + 255) & ~255;
@@ -450,10 +467,8 @@ static std::vector<uint8_t> readback_rgba32uint_to_bgra8(
dst.layout.bytesPerRow = padded_bytes_per_row;
dst.layout.rowsPerImage = height;
- WGPUExtent3D copy_size = {
- static_cast<uint32_t>(width),
- static_cast<uint32_t>(height),
- 1};
+ WGPUExtent3D copy_size = {static_cast<uint32_t>(width),
+ static_cast<uint32_t>(height), 1};
wgpuCommandEncoderCopyTextureToBuffer(encoder, &src, &dst, &copy_size);
@@ -527,7 +542,8 @@ static std::vector<uint8_t> readback_rgba32uint_to_bgra8(
uint32_t frac = h & 0x3FF;
if (exp == 0) {
- if (frac == 0) return sign ? -0.0f : 0.0f;
+ if (frac == 0)
+ return sign ? -0.0f : 0.0f;
// Denormal
float val = frac / 1024.0f / 16384.0f;
return sign ? -val : val;
@@ -548,8 +564,10 @@ static std::vector<uint8_t> readback_rgba32uint_to_bgra8(
// Clamp to [0,1] and convert to u8
auto clamp_u8 = [](float v) -> uint8_t {
- if (v <= 0.0f) return 0;
- if (v >= 1.0f) return 255;
+ if (v <= 0.0f)
+ return 0;
+ if (v >= 1.0f)
+ return 255;
return static_cast<uint8_t>(v * 255.0f + 0.5f);
};
@@ -566,14 +584,16 @@ static std::vector<uint8_t> readback_rgba32uint_to_bgra8(
return result;
}
-// Read RGBA32Uint and create 4x wide grayscale composite (each channel side-by-side)
-static std::vector<uint8_t> readback_rgba32uint_to_composite(
- WGPUDevice device, WGPUQueue queue, WGPUTexture texture,
- int width, int height) {
-
+// Read RGBA32Uint and create 4x wide grayscale composite (each channel
+// side-by-side)
+static std::vector<uint8_t>
+readback_rgba32uint_to_composite(WGPUDevice device, WGPUQueue queue,
+ WGPUTexture texture, int width, int height) {
// First get BGRA8 data
- std::vector<uint8_t> bgra = readback_rgba32uint_to_bgra8(device, queue, texture, width, height);
- if (bgra.empty()) return {};
+ std::vector<uint8_t> bgra =
+ readback_rgba32uint_to_bgra8(device, queue, texture, width, height);
+ if (bgra.empty())
+ return {};
// Create 4x wide grayscale image (one channel per horizontal strip)
int composite_width = width * 4;
@@ -591,10 +611,14 @@ static std::vector<uint8_t> readback_rgba32uint_to_composite(
auto to_gray = [](uint8_t val) -> uint8_t { return val; };
// Place each channel in its horizontal strip
- composite[y * composite_width + (0 * width + x)] = to_gray(r); // Channel 0
- composite[y * composite_width + (1 * width + x)] = to_gray(g); // Channel 1
- composite[y * composite_width + (2 * width + x)] = to_gray(b); // Channel 2
- composite[y * composite_width + (3 * width + x)] = to_gray(a); // Channel 3
+ composite[y * composite_width + (0 * width + x)] =
+ to_gray(r); // Channel 0
+ composite[y * composite_width + (1 * width + x)] =
+ to_gray(g); // Channel 1
+ composite[y * composite_width + (2 * width + x)] =
+ to_gray(b); // Channel 2
+ composite[y * composite_width + (3 * width + x)] =
+ to_gray(a); // Channel 3
}
}
@@ -610,14 +634,15 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
// Load weights (from file or asset system)
size_t weights_size = 0;
const uint8_t* weights_data = nullptr;
- std::vector<uint8_t> file_weights; // For file-based loading
+ std::vector<uint8_t> file_weights; // For file-based loading
if (args.weights_path) {
// Load from file
printf("Loading weights from '%s'...\n", args.weights_path);
FILE* f = fopen(args.weights_path, "rb");
if (!f) {
- fprintf(stderr, "Error: failed to open weights file '%s'\n", args.weights_path);
+ fprintf(stderr, "Error: failed to open weights file '%s'\n",
+ args.weights_path);
return false;
}
@@ -637,7 +662,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
weights_data = file_weights.data();
} else {
// Load from asset system
- weights_data = (const uint8_t*)GetAsset(AssetId::ASSET_WEIGHTS_CNN_V2, &weights_size);
+ weights_data =
+ (const uint8_t*)GetAsset(AssetId::ASSET_WEIGHTS_CNN_V2, &weights_size);
}
if (!weights_data || weights_size < 20) {
@@ -652,7 +678,7 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
uint32_t num_layers = header[2];
uint32_t total_weights = header[3];
- if (magic != 0x324e4e43) { // 'CNN2'
+ if (magic != 0x324e4e43) { // 'CNN2'
fprintf(stderr, "Error: Invalid CNN v2 weights magic\n");
return false;
}
@@ -684,9 +710,10 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
info.out_channels, info.weight_count);
}
- // Create weights storage buffer (skip header + layer info, upload only weights)
- size_t header_size = 20; // 5 u32
- size_t layer_info_size = 20 * layer_info.size(); // 5 u32 per layer
+ // Create weights storage buffer (skip header + layer info, upload only
+ // weights)
+ size_t header_size = 20; // 5 u32
+ size_t layer_info_size = 20 * layer_info.size(); // 5 u32 per layer
size_t weights_offset = header_size + layer_info_size;
size_t weights_only_size = weights_size - weights_offset;
@@ -697,7 +724,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
WGPUBuffer weights_buffer =
wgpuDeviceCreateBuffer(device, &weights_buffer_desc);
- wgpuQueueWriteBuffer(queue, weights_buffer, 0, weights_data + weights_offset, weights_only_size);
+ wgpuQueueWriteBuffer(queue, weights_buffer, 0, weights_data + weights_offset,
+ weights_only_size);
// Create input view
WGPUTextureView input_view =
@@ -705,7 +733,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
// Create static features texture (RGBA32Uint)
const WGPUTextureDescriptor static_desc = {
- .usage = WGPUTextureUsage_StorageBinding | WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopySrc,
+ .usage = WGPUTextureUsage_StorageBinding |
+ WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopySrc,
.dimension = WGPUTextureDimension_2D,
.size = {static_cast<uint32_t>(width), static_cast<uint32_t>(height), 1},
.format = WGPUTextureFormat_RGBA32Uint,
@@ -740,10 +769,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
};
// Load shaders
- const char* static_shader =
- SafeGetAsset(AssetId::ASSET_SHADER_CNN_V2_STATIC);
- const char* layer_shader =
- SafeGetAsset(AssetId::ASSET_SHADER_CNN_V2_COMPUTE);
+ const char* static_shader = SafeGetAsset(AssetId::ASSET_SHADER_CNN_V2_STATIC);
+ const char* layer_shader = SafeGetAsset(AssetId::ASSET_SHADER_CNN_V2_COMPUTE);
if (!static_shader[0] || !layer_shader[0]) {
fprintf(stderr, "Error: CNN v2 shaders not available\n");
@@ -789,7 +816,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
linear_sampler_desc.lodMaxClamp = 32.0f;
linear_sampler_desc.maxAnisotropy = 1;
- WGPUSampler linear_sampler = wgpuDeviceCreateSampler(device, &linear_sampler_desc);
+ WGPUSampler linear_sampler =
+ wgpuDeviceCreateSampler(device, &linear_sampler_desc);
// Create static features compute pipeline
WGPUShaderSourceWGSL static_wgsl = {};
@@ -822,7 +850,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
static_bgl_entries[3].binding = 3;
static_bgl_entries[3].visibility = WGPUShaderStage_Compute;
- static_bgl_entries[3].texture.sampleType = WGPUTextureSampleType_UnfilterableFloat;
+ static_bgl_entries[3].texture.sampleType =
+ WGPUTextureSampleType_UnfilterableFloat;
static_bgl_entries[3].texture.viewDimension = WGPUTextureViewDimension_2D;
static_bgl_entries[4].binding = 4;
@@ -877,7 +906,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
static_bg_entries[2].binding = 2;
static_bg_entries[2].textureView = input_view;
static_bg_entries[3].binding = 3;
- static_bg_entries[3].textureView = depth_view; // Depth from alpha channel (matches training)
+ static_bg_entries[3].textureView =
+ depth_view; // Depth from alpha channel (matches training)
static_bg_entries[4].binding = 4;
static_bg_entries[4].textureView = static_features_view;
static_bg_entries[5].binding = 5;
@@ -992,7 +1022,7 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
uint32_t workgroups_x = (width + 7) / 8;
uint32_t workgroups_y = (height + 7) / 8;
wgpuComputePassEncoderDispatchWorkgroups(static_pass, workgroups_x,
- workgroups_y, 1);
+ workgroups_y, 1);
wgpuComputePassEncoderEnd(static_pass);
wgpuComputePassEncoderRelease(static_pass);
@@ -1014,7 +1044,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
printf("Saving static features to '%s'...\n", layer_path);
// Read back RGBA32Uint and create 8-channel grayscale composite
- // Static features has 8 channels (packed as 4×u32), create 8x wide composite
+ // Static features has 8 channels (packed as 4×u32), create 8x wide
+ // composite
std::vector<uint8_t> bgra = readback_rgba32uint_to_bgra8(
device, queue, static_features_tex, width, height);
@@ -1083,8 +1114,7 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
layer_bg_desc.entryCount = 6;
layer_bg_desc.entries = layer_bg_entries;
- WGPUBindGroup layer_bg =
- wgpuDeviceCreateBindGroup(device, &layer_bg_desc);
+ WGPUBindGroup layer_bg = wgpuDeviceCreateBindGroup(device, &layer_bg_desc);
WGPUComputePassEncoder layer_pass =
wgpuCommandEncoderBeginComputePass(encoder, nullptr);
@@ -1092,7 +1122,7 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
wgpuComputePassEncoderSetBindGroup(layer_pass, 0, layer_bg, 0, nullptr);
wgpuComputePassEncoderDispatchWorkgroups(layer_pass, workgroups_x,
- workgroups_y, 1);
+ workgroups_y, 1);
wgpuComputePassEncoderEnd(layer_pass);
wgpuComputePassEncoderRelease(layer_pass);
@@ -1138,7 +1168,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
// Create layer composite if intermediates were saved
if (args.save_intermediates) {
- save_layer_composite(args.save_intermediates, width, height, layer_info.size());
+ save_layer_composite(args.save_intermediates, width, height,
+ layer_info.size());
}
// Readback final result (from last layer's output texture)
@@ -1149,7 +1180,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
if (pixels.empty()) {
fprintf(stderr, "Error: GPU readback failed\n");
- for (auto buf : layer_params_buffers) wgpuBufferRelease(buf);
+ for (auto buf : layer_params_buffers)
+ wgpuBufferRelease(buf);
wgpuComputePipelineRelease(layer_pipeline);
wgpuBindGroupLayoutRelease(layer_bgl);
wgpuBindGroupRelease(static_bg);
@@ -1195,7 +1227,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
}
// Cleanup
- for (auto buf : layer_params_buffers) wgpuBufferRelease(buf);
+ for (auto buf : layer_params_buffers)
+ wgpuBufferRelease(buf);
wgpuComputePipelineRelease(layer_pipeline);
wgpuBindGroupLayoutRelease(layer_bgl);
wgpuBindGroupRelease(static_bg);
@@ -1250,8 +1283,8 @@ int main(int argc, char** argv) {
// Branch based on CNN version
if (args.cnn_version == 2) {
- bool success = process_cnn_v2(device, queue, instance, input_texture,
- width, height, args);
+ bool success = process_cnn_v2(device, queue, instance, input_texture, width,
+ height, args);
wgpuTextureRelease(input_texture);
SamplerCache::Get().clear();
fixture.shutdown();
@@ -1274,8 +1307,10 @@ int main(int argc, char** argv) {
if (!pipeline_intermediate || !pipeline_final) {
fprintf(stderr, "Error: failed to create CNN pipelines\n");
- if (pipeline_intermediate) wgpuRenderPipelineRelease(pipeline_intermediate);
- if (pipeline_final) wgpuRenderPipelineRelease(pipeline_final);
+ if (pipeline_intermediate)
+ wgpuRenderPipelineRelease(pipeline_intermediate);
+ if (pipeline_final)
+ wgpuRenderPipelineRelease(pipeline_final);
wgpuTextureViewRelease(input_view);
wgpuTextureRelease(input_texture);
SamplerCache::Get().clear();
@@ -1284,7 +1319,8 @@ int main(int argc, char** argv) {
}
// Get bind group layout from intermediate pipeline (same for both)
- WGPUBindGroupLayout bgl = wgpuRenderPipelineGetBindGroupLayout(pipeline_intermediate, 0);
+ WGPUBindGroupLayout bgl =
+ wgpuRenderPipelineGetBindGroupLayout(pipeline_intermediate, 0);
// Create uniform buffers
const WGPUBufferDescriptor common_uniform_desc = {
@@ -1363,15 +1399,14 @@ int main(int argc, char** argv) {
sizeof(layer_params));
// Build bind group
- WGPUBindGroup bind_group = BindGroupBuilder()
- .sampler(0, sampler)
- .texture(1, current_input)
- .buffer(2, common_uniform_buffer,
- sizeof(CommonPostProcessUniforms))
- .buffer(3, layer_params_buffer,
- sizeof(CNNLayerParams))
- .texture(4, original_view)
- .build(device, bgl);
+ WGPUBindGroup bind_group =
+ BindGroupBuilder()
+ .sampler(0, sampler)
+ .texture(1, current_input)
+ .buffer(2, common_uniform_buffer, sizeof(CommonPostProcessUniforms))
+ .buffer(3, layer_params_buffer, sizeof(CNNLayerParams))
+ .texture(4, original_view)
+ .build(device, bgl);
// Render to appropriate output texture with correct pipeline
bool is_final = (layer == NUM_LAYERS - 1);
@@ -1379,7 +1414,8 @@ int main(int argc, char** argv) {
if (is_final) {
// Final layer: use OffscreenRenderTarget (known working readback)
OffscreenRenderTarget rt(instance, device, width, height);
- WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+ WGPUCommandEncoder encoder =
+ wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPURenderPassEncoder pass = begin_render_pass(encoder, rt.view());
wgpuRenderPassEncoderSetPipeline(pass, pipeline_final);
wgpuRenderPassEncoderSetBindGroup(pass, 0, bind_group, 0, nullptr);
@@ -1456,11 +1492,12 @@ int main(int argc, char** argv) {
}
printf("Done! Output saved to '%s'\n", args.output_path);
- break; // Exit loop after final layer
+ break; // Exit loop after final layer
} else {
// Intermediate layers: render to ping-pong textures
WGPUTextureView output_view = intermediate_views[dst_idx];
- WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+ WGPUCommandEncoder encoder =
+ wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPURenderPassEncoder pass = begin_render_pass(encoder, output_view);
wgpuRenderPassEncoderSetPipeline(pass, pipeline_intermediate);
wgpuRenderPassEncoderSetBindGroup(pass, 0, bind_group, 0, nullptr);
@@ -1501,7 +1538,8 @@ int main(int argc, char** argv) {
if (!pixels.empty()) {
save_png(layer_path, pixels, width, height);
} else {
- fprintf(stderr, "Warning: failed to read intermediate layer %d\n", layer);
+ fprintf(stderr, "Warning: failed to read intermediate layer %d\n",
+ layer);
}
}
}
diff --git a/tools/seq_compiler.cc b/tools/seq_compiler.cc
index 2448a3b..5804031 100644
--- a/tools/seq_compiler.cc
+++ b/tools/seq_compiler.cc
@@ -63,17 +63,10 @@ parse_parameters(const std::string& args) {
bool is_post_process_effect(const std::string& class_name) {
// List of known post-process effects
static const std::vector<std::string> post_process_effects = {
- "FadeEffect",
- "FlashEffect",
- "GaussianBlurEffect",
- "SolarizeEffect",
- "VignetteEffect",
- "ChromaAberrationEffect",
- "DistortEffect",
- "ThemeModulationEffect",
- "CNNEffect",
- "PassthroughEffect",
- "CircleMaskEffect"};
+ "FadeEffect", "FlashEffect", "GaussianBlurEffect",
+ "SolarizeEffect", "VignetteEffect", "ChromaAberrationEffect",
+ "DistortEffect", "ThemeModulationEffect", "CNNEffect",
+ "PassthroughEffect", "CircleMaskEffect"};
return std::find(post_process_effects.begin(), post_process_effects.end(),
class_name) != post_process_effects.end();
}
@@ -112,7 +105,7 @@ float get_sequence_end(const SequenceEntry& seq) {
// Analyze timeline: find max time and sort sequences by start time
TimelineMetrics analyze_timeline(const std::vector<SequenceEntry>& sequences,
- const std::string& demo_end_time) {
+ const std::string& demo_end_time) {
float max_time = demo_end_time.empty() ? 0.0f : std::stof(demo_end_time);
for (const auto& seq : sequences) {
float seq_start = std::stof(seq.start_time);
@@ -343,7 +336,8 @@ void generate_gantt_chart(const std::string& output_file,
out << "\n\n";
// Draw sequences and effects
- for (size_t seq_idx = 0; seq_idx < metrics.sorted_sequences.size(); ++seq_idx) {
+ for (size_t seq_idx = 0; seq_idx < metrics.sorted_sequences.size();
+ ++seq_idx) {
const auto& seq = metrics.sorted_sequences[seq_idx];
float seq_start = std::stof(seq.start_time);
float seq_end = get_sequence_end(seq);
@@ -510,7 +504,8 @@ void generate_gantt_html(const std::string& output_file,
// Draw sequences and effects
int y_offset = margin_top;
- for (size_t seq_idx = 0; seq_idx < metrics.sorted_sequences.size(); ++seq_idx) {
+ for (size_t seq_idx = 0; seq_idx < metrics.sorted_sequences.size();
+ ++seq_idx) {
const auto& seq = metrics.sorted_sequences[seq_idx];
float seq_start = std::stof(seq.start_time);
float seq_end = get_sequence_end(seq);
@@ -903,7 +898,8 @@ int main(int argc, char* argv[]) {
}
}
- // Validate: detect cross-sequence priority collisions for concurrent sequences
+ // Validate: detect cross-sequence priority collisions for concurrent
+ // sequences
std::map<std::string, std::vector<size_t>> time_groups;
for (size_t i = 0; i < sequences.size(); ++i) {
time_groups[sequences[i].start_time].push_back(i);
@@ -912,7 +908,8 @@ int main(int argc, char* argv[]) {
for (const auto& [start_time, seq_indices] : time_groups) {
if (seq_indices.size() > 1) {
// Multiple sequences start at the same time
- std::map<int, std::vector<std::pair<std::string, size_t>>> cross_priority_map;
+ std::map<int, std::vector<std::pair<std::string, size_t>>>
+ cross_priority_map;
for (size_t seq_idx : seq_indices) {
const auto& seq = sequences[seq_idx];
@@ -933,12 +930,14 @@ int main(int argc, char* argv[]) {
"priority "
<< prio << ":\n";
for (const auto& [effect, seq_idx] : effects) {
- std::cerr << " - " << effect << " (sequence #" << seq_idx << ")\n";
+ std::cerr << " - " << effect << " (sequence #" << seq_idx
+ << ")\n";
}
std::cerr << " Post-process effects from different sequences at the "
"same time will be\n";
- std::cerr << " merged into a single render chain. Consider adjusting "
- "priorities to clarify order.\n";
+ std::cerr
+ << " merged into a single render chain. Consider adjusting "
+ "priorities to clarify order.\n";
}
}
}
diff --git a/tools/shadertoy/template.cc b/tools/shadertoy/template.cc
index 35c9b10..7636c0a 100644
--- a/tools/shadertoy/template.cc
+++ b/tools/shadertoy/template.cc
@@ -3,8 +3,8 @@
// TODO: Update description, rename class
#include "effects/shadertoy_effect.h"
-#include "gpu/shader_composer.h"
#include "generated/assets.h"
+#include "gpu/shader_composer.h"
// TODO: Rename class and adjust constructor parameters
ShaderToyEffect::ShaderToyEffect(const GpuContext& ctx) : Effect(ctx) {
@@ -34,8 +34,8 @@ void ShaderToyEffect::init(MainSequence* demo) {
// TODO: Update asset name to match your shader file
size_t shader_size;
- const char* shader_code = (const char*)GetAsset(
- AssetId::ASSET_SHADERTOY_SHADER, &shader_size);
+ const char* shader_code =
+ (const char*)GetAsset(AssetId::ASSET_SHADERTOY_SHADER, &shader_size);
std::string composed = ShaderComposer::Get().Compose({}, shader_code);
@@ -96,8 +96,8 @@ void ShaderToyEffect::init(MainSequence* demo) {
bind_group_ = wgpuDeviceCreateBindGroup(ctx_.device, &bg_desc);
}
-void ShaderToyEffect::render(WGPURenderPassEncoder pass, float time,
- float beat, float intensity, float aspect_ratio) {
+void ShaderToyEffect::render(WGPURenderPassEncoder pass, float time, float beat,
+ float intensity, float aspect_ratio) {
const CommonPostProcessUniforms uniforms = {
.resolution = {static_cast<float>(width_), static_cast<float>(height_)},
.aspect_ratio = aspect_ratio,
diff --git a/tools/shadertoy/template.h b/tools/shadertoy/template.h
index 82f8b39..74be9f2 100644
--- a/tools/shadertoy/template.h
+++ b/tools/shadertoy/template.h
@@ -26,7 +26,7 @@ class ShaderToyEffect : public Effect {
struct ShaderToyParams {
float param1;
float param2;
- float _pad[2]; // Padding to 16 bytes
+ float _pad[2]; // Padding to 16 bytes
};
static_assert(sizeof(ShaderToyParams) == 16,
"ShaderToyParams must be 16 bytes for WGSL alignment");
diff --git a/tools/tracker_compiler.cc b/tools/tracker_compiler.cc
index d12005d..de635cd 100644
--- a/tools/tracker_compiler.cc
+++ b/tools/tracker_compiler.cc
@@ -132,8 +132,8 @@ struct ResourceAnalysis {
// Analyze resource requirements from tracker data
ResourceAnalysis analyze_resources(const std::vector<Sample>& samples,
- const std::vector<Pattern>& patterns,
- const std::vector<Trigger>& score) {
+ const std::vector<Pattern>& patterns,
+ const std::vector<Trigger>& score) {
ResourceAnalysis result = {};
// Count sample types
@@ -168,9 +168,9 @@ ResourceAnalysis analyze_resources(const std::vector<Sample>& samples,
result.max_simultaneous_patterns * result.avg_events_per_pattern;
// Conservative recommendations with 50% safety margin
- result.min_spectrograms = result.asset_sample_count +
- (result.generated_sample_count *
- result.estimated_max_polyphony);
+ result.min_spectrograms =
+ result.asset_sample_count +
+ (result.generated_sample_count * result.estimated_max_polyphony);
result.recommended_spectrograms = (int)(result.min_spectrograms * 1.5f);
result.recommended_voices = result.estimated_max_polyphony * 2;
@@ -236,9 +236,10 @@ int validate_tracker_data(const std::vector<Sample>& samples,
errors++;
}
if (e.volume < 0.0f || e.volume > 2.0f) {
- fprintf(stderr,
- "WARNING: Pattern '%s' unusual volume: %.2f (expected 0.0-2.0)\n",
- p.name.c_str(), e.volume);
+ fprintf(
+ stderr,
+ "WARNING: Pattern '%s' unusual volume: %.2f (expected 0.0-2.0)\n",
+ p.name.c_str(), e.volume);
warnings++;
}
if (e.pan < -1.0f || e.pan > 1.0f) {
@@ -275,8 +276,8 @@ void write_sanitized_track(const char* output_path, float bpm,
for (const auto& s : samples) {
fprintf(out, "SAMPLE %s", s.name.c_str());
if (s.type == GENERATED) {
- fprintf(out, ", %.1f, %.2f, %.1f, %.2f, %d, %.1f", s.freq, s.dur,
- s.amp, s.attack, s.harmonics, s.harmonic_decay);
+ fprintf(out, ", %.1f, %.2f, %.1f, %.2f, %d, %.1f", s.freq, s.dur, s.amp,
+ s.attack, s.harmonics, s.harmonic_decay);
}
fprintf(out, "\n");
}
@@ -318,10 +319,12 @@ void write_sanitized_track(const char* output_path, float bpm,
// Write resource analysis to output file
void write_resource_analysis(FILE* out, const ResourceAnalysis& analysis,
- int total_samples) {
- fprintf(out, "// ============================================================\n");
+ int total_samples) {
+ fprintf(out,
+ "// ============================================================\n");
fprintf(out, "// RESOURCE USAGE ANALYSIS (for synth.h configuration)\n");
- fprintf(out, "// ============================================================\n");
+ fprintf(out,
+ "// ============================================================\n");
fprintf(out, "// Total samples: %d (%d assets + %d generated notes)\n",
total_samples, analysis.asset_sample_count,
analysis.generated_sample_count);
@@ -343,7 +346,9 @@ void write_resource_analysis(FILE* out, const ResourceAnalysis& analysis,
fprintf(out, "// NOTE: With spectrogram caching by note parameters,\n");
fprintf(out, "// MAX_SPECTROGRAMS could be reduced to ~%d\n",
analysis.asset_sample_count + analysis.generated_sample_count);
- fprintf(out, "// ============================================================\n\n");
+ fprintf(
+ out,
+ "// ============================================================\n\n");
}
int main(int argc, char** argv) {