summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorskal <pascal.massimino@gmail.com>2026-02-14 15:14:25 +0100
committerskal <pascal.massimino@gmail.com>2026-02-14 15:14:25 +0100
commit8ce27b7e15f0fc65c8ee78950c7501660b936178 (patch)
tree391f32111b9a30a0156709b6c1ed2fae7b435d57
parente38be0dbf5816338ff97e2ee2f9adfff2902dc2b (diff)
style: Apply clang-format to codebase
-rw-r--r--src/app/main.cc17
-rw-r--r--src/app/test_demo.cc31
-rw-r--r--src/audio/audio.cc4
-rw-r--r--src/audio/audio.h3
-rw-r--r--src/effects/chroma_aberration_effect.cc2
-rw-r--r--src/effects/circle_mask_effect.cc6
-rw-r--r--src/effects/cnn_effect.cc65
-rw-r--r--src/effects/cnn_effect.h8
-rw-r--r--src/effects/cnn_v2_effect.cc190
-rw-r--r--src/effects/cnn_v2_effect.h17
-rw-r--r--src/effects/distort_effect.cc3
-rw-r--r--src/effects/fade_effect.cc3
-rw-r--r--src/effects/fade_effect.h2
-rw-r--r--src/effects/flash_effect.cc3
-rw-r--r--src/effects/gaussian_blur_effect.cc2
-rw-r--r--src/effects/heptagon_effect.cc3
-rw-r--r--src/effects/hybrid_3d_effect.cc2
-rw-r--r--src/effects/moving_ellipse_effect.cc5
-rw-r--r--src/effects/particle_spray_effect.cc2
-rw-r--r--src/effects/particles_effect.cc2
-rw-r--r--src/effects/rotating_cube_effect.cc12
-rw-r--r--src/effects/scene1_effect.cc3
-rw-r--r--src/effects/theme_modulation_effect.cc7
-rw-r--r--src/effects/vignette_effect.cc2
-rw-r--r--src/gpu/bind_group_builder.h22
-rw-r--r--src/gpu/demo_effects.h16
-rw-r--r--src/gpu/effect.cc19
-rw-r--r--src/gpu/effect.h17
-rw-r--r--src/gpu/gpu.cc16
-rw-r--r--src/gpu/gpu.h7
-rw-r--r--src/gpu/headless_gpu.cc4
-rw-r--r--src/gpu/pipeline_builder.h21
-rw-r--r--src/gpu/post_process_helper.cc50
-rw-r--r--src/gpu/post_process_helper.h14
-rw-r--r--src/gpu/sampler_cache.h17
-rw-r--r--src/gpu/shaders.cc6
-rw-r--r--src/gpu/stub_gpu.cc4
-rw-r--r--src/gpu/texture_manager.cc47
-rw-r--r--src/gpu/texture_manager.h38
-rw-r--r--src/gpu/texture_readback.cc31
-rw-r--r--src/gpu/texture_readback.h21
-rw-r--r--src/platform/stub_types.h25
-rw-r--r--src/tests/3d/test_3d.cc11
-rw-r--r--src/tests/3d/test_3d_physics.cc2
-rw-r--r--src/tests/3d/test_3d_render.cc2
-rw-r--r--src/tests/3d/test_mesh.cc2
-rw-r--r--src/tests/3d/test_physics.cc12
-rw-r--r--src/tests/audio/test_audio_engine.cc2
-rw-r--r--src/tests/audio/test_silent_backend.cc2
-rw-r--r--src/tests/audio/test_tracker.cc2
-rw-r--r--src/tests/audio/test_tracker_timing.cc5
-rw-r--r--src/tests/audio/test_variable_tempo.cc11
-rw-r--r--src/tests/audio/test_wav_dump.cc2
-rw-r--r--src/tests/common/audio_test_fixture.cc5
-rw-r--r--src/tests/common/audio_test_fixture.h14
-rw-r--r--src/tests/common/effect_test_fixture.cc3
-rw-r--r--src/tests/common/effect_test_fixture.h18
-rw-r--r--src/tests/common/offscreen_render_target.cc2
-rw-r--r--src/tests/common/test_math_helpers.h5
-rw-r--r--src/tests/gpu/test_demo_effects.cc4
-rw-r--r--src/tests/gpu/test_effect_base.cc4
-rw-r--r--src/tests/gpu/test_gpu_composite.cc18
-rw-r--r--src/tests/gpu/test_post_process_helper.cc27
-rw-r--r--src/tests/gpu/test_shader_assets.cc22
-rw-r--r--src/tests/gpu/test_texture_manager.cc2
-rw-r--r--src/tests/util/test_maths.cc32
-rw-r--r--src/util/asset_manager.cc4
-rw-r--r--src/util/file_watcher.cc2
-rw-r--r--src/util/file_watcher.h6
-rw-r--r--tools/asset_packer.cc34
-rw-r--r--tools/cnn_test.cc272
-rw-r--r--tools/seq_compiler.cc37
-rw-r--r--tools/shadertoy/template.cc10
-rw-r--r--tools/shadertoy/template.h2
-rw-r--r--tools/tracker_compiler.cc33
75 files changed, 770 insertions, 608 deletions
diff --git a/src/app/main.cc b/src/app/main.cc
index 5d699ce..27d6312 100644
--- a/src/app/main.cc
+++ b/src/app/main.cc
@@ -225,8 +225,8 @@ int main(int argc, char** argv) {
if ((int)physical_time % 5 == 0 &&
physical_time - update_dt < (int)physical_time) {
- printf(" Progress: %.1fs / %.1fs (music: %.1fs)\r",
- physical_time, headless_duration, g_music_time);
+ printf(" Progress: %.1fs / %.1fs (music: %.1fs)\r", physical_time,
+ headless_duration, g_music_time);
fflush(stdout);
}
}
@@ -349,9 +349,11 @@ int main(int argc, char** argv) {
const float visual_peak = fminf(raw_peak * 8.0f, 1.0f);
// Beat calculation: convert audio time to musical beats
- const float absolute_beat_time = current_audio_time * g_tracker_score.bpm / 60.0f;
+ const float absolute_beat_time =
+ current_audio_time * g_tracker_score.bpm / 60.0f;
const int beat_number = (int)absolute_beat_time;
- const float beat_phase = fmodf(absolute_beat_time, 1.0f); // Fractional part (0.0 to 1.0)
+ const float beat_phase =
+ fmodf(absolute_beat_time, 1.0f); // Fractional part (0.0 to 1.0)
// Print beat/time info periodically for identifying sync points
// Use graphics time for the print interval to avoid excessive output if
@@ -366,9 +368,10 @@ int main(int argc, char** argv) {
current_physical_time, current_audio_time, g_music_time,
beat_number, beat_phase, visual_peak, g_tempo_scale);
} else {
- printf("[GraphicsT=%.2f, AudioT=%.2f, Beat=%d, Phase=%.2f, Peak=%.2f]\n",
- current_physical_time, current_audio_time, beat_number, beat_phase,
- visual_peak);
+ printf(
+ "[GraphicsT=%.2f, AudioT=%.2f, Beat=%d, Phase=%.2f, Peak=%.2f]\n",
+ current_physical_time, current_audio_time, beat_number, beat_phase,
+ visual_peak);
}
last_graphics_print_time = current_physical_time;
}
diff --git a/src/app/test_demo.cc b/src/app/test_demo.cc
index 2a91d37..39dbcba 100644
--- a/src/app/test_demo.cc
+++ b/src/app/test_demo.cc
@@ -20,10 +20,10 @@ extern float GetDemoDuration();
extern void LoadTimeline(MainSequence& main_seq, const GpuContext& ctx);
// Inline peak meter effect for debugging audio-visual sync
-#include "gpu/post_process_helper.h"
-#include "gpu/shader_composer.h"
#include "effects/cnn_effect.h"
#include "effects/cnn_v2_effect.h"
+#include "gpu/post_process_helper.h"
+#include "gpu/shader_composer.h"
class PeakMeterEffect : public PostProcessEffect {
public:
@@ -81,11 +81,11 @@ class PeakMeterEffect : public PostProcessEffect {
)";
// Compose shader with common_uniforms to get CommonUniforms definition
- std::string shader_code = ShaderComposer::Get().Compose(
- {"common_uniforms"}, shader_main);
+ std::string shader_code =
+ ShaderComposer::Get().Compose({"common_uniforms"}, shader_main);
- pipeline_ =
- create_post_process_pipeline(ctx_.device, ctx_.format, shader_code.c_str());
+ pipeline_ = create_post_process_pipeline(ctx_.device, ctx_.format,
+ shader_code.c_str());
}
void update_bind_group(WGPUTextureView input_view) override {
@@ -100,7 +100,7 @@ class PeakMeterEffect : public PostProcessEffect {
}
};
-static int g_cnn_version = 2; // Default to v2
+static int g_cnn_version = 2; // Default to v2
#if !defined(STRIP_ALL)
static void print_usage(const char* prog_name) {
@@ -368,9 +368,11 @@ int main(int argc, char** argv) {
const float visual_peak = fminf(raw_peak * 8.0f, 1.0f);
// Beat calculation: convert audio time to musical beats
- const float absolute_beat_time = current_audio_time * g_tracker_score.bpm / 60.0f;
+ const float absolute_beat_time =
+ current_audio_time * g_tracker_score.bpm / 60.0f;
const int beat_number = (int)absolute_beat_time;
- const float beat_phase = fmodf(absolute_beat_time, 1.0f); // Fractional part (0.0 to 1.0)
+ const float beat_phase =
+ fmodf(absolute_beat_time, 1.0f); // Fractional part (0.0 to 1.0)
#if !defined(STRIP_ALL)
// Log peak (either per-frame or per-beat)
@@ -401,9 +403,10 @@ int main(int argc, char** argv) {
current_physical_time, current_audio_time, g_music_time,
beat_number, beat_phase, visual_peak, g_tempo_scale);
} else {
- printf("[GraphicsT=%.2f, AudioT=%.2f, Beat=%d, Phase=%.2f, Peak=%.2f]\n",
- current_physical_time, current_audio_time, beat_number, beat_phase,
- visual_peak);
+ printf(
+ "[GraphicsT=%.2f, AudioT=%.2f, Beat=%d, Phase=%.2f, Peak=%.2f]\n",
+ current_physical_time, current_audio_time, beat_number, beat_phase,
+ visual_peak);
}
last_graphics_print_time = current_physical_time;
}
@@ -411,8 +414,8 @@ int main(int argc, char** argv) {
// Draw graphics using physical time and musical beat time
const float graphics_frame_time = (float)current_physical_time;
- gpu_draw(visual_peak, aspect_ratio, graphics_frame_time,
- absolute_beat_time, beat_phase);
+ gpu_draw(visual_peak, aspect_ratio, graphics_frame_time, absolute_beat_time,
+ beat_phase);
// Update audio systems (tracker, synth, etc.) based on audio time
// progression
diff --git a/src/audio/audio.cc b/src/audio/audio.cc
index c5bd3d9..d044b00 100644
--- a/src/audio/audio.cc
+++ b/src/audio/audio.cc
@@ -68,8 +68,8 @@ void audio_start() {
void audio_render_ahead(float music_time, float dt, float target_fill) {
// Target: maintain look-ahead buffer
const float target_lookahead = (target_fill < 0.0f)
- ? (float)RING_BUFFER_LOOKAHEAD_MS / 1000.0f
- : target_fill;
+ ? (float)RING_BUFFER_LOOKAHEAD_MS / 1000.0f
+ : target_fill;
// Render in small chunks to keep synth time synchronized with tracker
// Chunk size: one frame's worth of audio (~16.6ms @ 60fps)
diff --git a/src/audio/audio.h b/src/audio/audio.h
index 778d312..9d521e6 100644
--- a/src/audio/audio.h
+++ b/src/audio/audio.h
@@ -24,7 +24,8 @@ void audio_init();
void audio_start(); // Starts the audio device callback
// Ring buffer audio rendering (main thread fills buffer)
-// target_fill: Target buffer fill time in seconds (default: RING_BUFFER_LOOKAHEAD_MS/1000)
+// target_fill: Target buffer fill time in seconds (default:
+// RING_BUFFER_LOOKAHEAD_MS/1000)
void audio_render_ahead(float music_time, float dt, float target_fill = -1.0f);
// Get current playback time (in seconds) based on samples consumed
diff --git a/src/effects/chroma_aberration_effect.cc b/src/effects/chroma_aberration_effect.cc
index a096f5b..2a92225 100644
--- a/src/effects/chroma_aberration_effect.cc
+++ b/src/effects/chroma_aberration_effect.cc
@@ -2,8 +2,8 @@
// It implements the ChromaAberrationEffect with parameterization.
#include "gpu/demo_effects.h"
-#include "gpu/post_process_helper.h"
#include "gpu/gpu.h"
+#include "gpu/post_process_helper.h"
// --- ChromaAberrationEffect ---
diff --git a/src/effects/circle_mask_effect.cc b/src/effects/circle_mask_effect.cc
index 63c8f68..7a016d9 100644
--- a/src/effects/circle_mask_effect.cc
+++ b/src/effects/circle_mask_effect.cc
@@ -51,7 +51,8 @@ void CircleMaskEffect::init(MainSequence* demo) {
AssetId::ASSET_CIRCLE_MASK_RENDER_SHADER, &render_size);
// Compose shaders to resolve #include directives
- std::string composed_compute = ShaderComposer::Get().Compose({}, compute_shader);
+ std::string composed_compute =
+ ShaderComposer::Get().Compose({}, compute_shader);
WGPUShaderSourceWGSL compute_wgsl = {};
compute_wgsl.chain.sType = WGPUSType_ShaderSourceWGSL;
@@ -93,7 +94,8 @@ void CircleMaskEffect::init(MainSequence* demo) {
.build(ctx_.device, compute_layout);
wgpuBindGroupLayoutRelease(compute_layout);
- std::string composed_render = ShaderComposer::Get().Compose({}, render_shader);
+ std::string composed_render =
+ ShaderComposer::Get().Compose({}, render_shader);
WGPUShaderSourceWGSL render_wgsl = {};
render_wgsl.chain.sType = WGPUSType_ShaderSourceWGSL;
diff --git a/src/effects/cnn_effect.cc b/src/effects/cnn_effect.cc
index 4475180..49c5239 100644
--- a/src/effects/cnn_effect.cc
+++ b/src/effects/cnn_effect.cc
@@ -2,31 +2,32 @@
// Neural network-based stylization with modular WGSL
#include "effects/cnn_effect.h"
-#include "gpu/post_process_helper.h"
-#include "gpu/shaders.h"
-#include "gpu/shader_composer.h"
-#include "gpu/effect.h"
#include "gpu/bind_group_builder.h"
-#include "gpu/sampler_cache.h"
+#include "gpu/effect.h"
#include "gpu/pipeline_builder.h"
+#include "gpu/post_process_helper.h"
+#include "gpu/sampler_cache.h"
+#include "gpu/shader_composer.h"
+#include "gpu/shaders.h"
// Create custom pipeline with 5 bindings (includes original texture)
static WGPURenderPipeline create_cnn_pipeline(WGPUDevice device,
- WGPUTextureFormat format,
- const char* shader_code) {
- WGPUBindGroupLayout bgl = BindGroupLayoutBuilder()
- .sampler(0, WGPUShaderStage_Fragment)
- .texture(1, WGPUShaderStage_Fragment)
- .uniform(2, WGPUShaderStage_Vertex | WGPUShaderStage_Fragment)
- .uniform(3, WGPUShaderStage_Fragment)
- .texture(4, WGPUShaderStage_Fragment)
- .build(device);
+ WGPUTextureFormat format,
+ const char* shader_code) {
+ WGPUBindGroupLayout bgl =
+ BindGroupLayoutBuilder()
+ .sampler(0, WGPUShaderStage_Fragment)
+ .texture(1, WGPUShaderStage_Fragment)
+ .uniform(2, WGPUShaderStage_Vertex | WGPUShaderStage_Fragment)
+ .uniform(3, WGPUShaderStage_Fragment)
+ .texture(4, WGPUShaderStage_Fragment)
+ .build(device);
WGPURenderPipeline pipeline = RenderPipelineBuilder(device)
- .shader(shader_code)
- .bind_group_layout(bgl)
- .format(format)
- .build();
+ .shader(shader_code)
+ .bind_group_layout(bgl)
+ .format(format)
+ .build();
wgpuBindGroupLayoutRelease(bgl);
return pipeline;
@@ -36,16 +37,16 @@ CNNEffect::CNNEffect(const GpuContext& ctx)
: PostProcessEffect(ctx), layer_index_(0), total_layers_(1),
blend_amount_(1.0f), input_view_(nullptr), original_view_(nullptr),
bind_group_(nullptr) {
- pipeline_ = create_cnn_pipeline(ctx_.device, ctx_.format,
- cnn_layer_shader_wgsl);
+ pipeline_ =
+ create_cnn_pipeline(ctx_.device, ctx_.format, cnn_layer_shader_wgsl);
}
CNNEffect::CNNEffect(const GpuContext& ctx, const CNNEffectParams& params)
: PostProcessEffect(ctx), layer_index_(params.layer_index),
total_layers_(params.total_layers), blend_amount_(params.blend_amount),
input_view_(nullptr), original_view_(nullptr), bind_group_(nullptr) {
- pipeline_ = create_cnn_pipeline(ctx_.device, ctx_.format,
- cnn_layer_shader_wgsl);
+ pipeline_ =
+ create_cnn_pipeline(ctx_.device, ctx_.format, cnn_layer_shader_wgsl);
}
void CNNEffect::init(MainSequence* demo) {
@@ -78,7 +79,7 @@ void CNNEffect::resize(int width, int height) {
}
void CNNEffect::render(WGPURenderPassEncoder pass,
- const CommonPostProcessUniforms& uniforms) {
+ const CommonPostProcessUniforms& uniforms) {
if (!bind_group_) {
fprintf(stderr, "CNN render: no bind_group\n");
return;
@@ -114,13 +115,15 @@ void CNNEffect::update_bind_group(WGPUTextureView input_view) {
WGPUBindGroupLayout bgl = wgpuRenderPipelineGetBindGroupLayout(pipeline_, 0);
// Use clamp (not repeat) to match PyTorch Conv2d zero-padding behavior
- WGPUSampler sampler = SamplerCache::Get().get_or_create(ctx_.device, SamplerCache::clamp());
+ WGPUSampler sampler =
+ SamplerCache::Get().get_or_create(ctx_.device, SamplerCache::clamp());
- bind_group_ = BindGroupBuilder()
- .sampler(0, sampler)
- .texture(1, input_view_)
- .buffer(2, uniforms_.get().buffer, uniforms_.get().size)
- .buffer(3, params_buffer_.get().buffer, params_buffer_.get().size)
- .texture(4, original_view_ ? original_view_ : input_view_)
- .build(ctx_.device, bgl);
+ bind_group_ =
+ BindGroupBuilder()
+ .sampler(0, sampler)
+ .texture(1, input_view_)
+ .buffer(2, uniforms_.get().buffer, uniforms_.get().size)
+ .buffer(3, params_buffer_.get().buffer, params_buffer_.get().size)
+ .texture(4, original_view_ ? original_view_ : input_view_)
+ .build(ctx_.device, bgl);
}
diff --git a/src/effects/cnn_effect.h b/src/effects/cnn_effect.h
index 3e2b7ca..cdcd656 100644
--- a/src/effects/cnn_effect.h
+++ b/src/effects/cnn_effect.h
@@ -7,15 +7,15 @@
struct CNNLayerParams {
int layer_index;
- float blend_amount; // Blend: mix(input, output, blend_amount)
+ float blend_amount; // Blend: mix(input, output, blend_amount)
float _pad[2];
};
static_assert(sizeof(CNNLayerParams) == 16);
struct CNNEffectParams {
- int layer_index = 0; // Which layer to render (0-based)
- int total_layers = 1; // Total number of layers in the CNN
- float blend_amount = 1.0f; // Final blend with original input
+ int layer_index = 0; // Which layer to render (0-based)
+ int total_layers = 1; // Total number of layers in the CNN
+ float blend_amount = 1.0f; // Final blend with original input
};
class CNNEffect : public PostProcessEffect {
diff --git a/src/effects/cnn_v2_effect.cc b/src/effects/cnn_v2_effect.cc
index 4c10ed1..7127aae 100644
--- a/src/effects/cnn_v2_effect.cc
+++ b/src/effects/cnn_v2_effect.cc
@@ -15,38 +15,24 @@
#include <cstring>
CNNv2Effect::CNNv2Effect(const GpuContext& ctx)
- : PostProcessEffect(ctx),
- static_pipeline_(nullptr),
- static_bind_group_(nullptr),
- static_params_buffer_(nullptr),
- static_features_tex_(nullptr),
- static_features_view_(nullptr),
- linear_sampler_(nullptr),
- layer_pipeline_(nullptr),
- weights_buffer_(nullptr),
- input_mip_tex_(nullptr),
- current_input_view_(nullptr),
- blend_amount_(1.0f),
- mip_level_(0),
+ : PostProcessEffect(ctx), static_pipeline_(nullptr),
+ static_bind_group_(nullptr), static_params_buffer_(nullptr),
+ static_features_tex_(nullptr), static_features_view_(nullptr),
+ linear_sampler_(nullptr), layer_pipeline_(nullptr),
+ weights_buffer_(nullptr), input_mip_tex_(nullptr),
+ current_input_view_(nullptr), blend_amount_(1.0f), mip_level_(0),
initialized_(false) {
std::memset(input_mip_view_, 0, sizeof(input_mip_view_));
}
CNNv2Effect::CNNv2Effect(const GpuContext& ctx, const CNNv2EffectParams& params)
- : PostProcessEffect(ctx),
- static_pipeline_(nullptr),
- static_bind_group_(nullptr),
- static_params_buffer_(nullptr),
- static_features_tex_(nullptr),
- static_features_view_(nullptr),
- linear_sampler_(nullptr),
- layer_pipeline_(nullptr),
- weights_buffer_(nullptr),
- input_mip_tex_(nullptr),
- current_input_view_(nullptr),
- blend_amount_(params.blend_amount),
- mip_level_(0),
- initialized_(false) {
+ : PostProcessEffect(ctx), static_pipeline_(nullptr),
+ static_bind_group_(nullptr), static_params_buffer_(nullptr),
+ static_features_tex_(nullptr), static_features_view_(nullptr),
+ linear_sampler_(nullptr), layer_pipeline_(nullptr),
+ weights_buffer_(nullptr), input_mip_tex_(nullptr),
+ current_input_view_(nullptr), blend_amount_(params.blend_amount),
+ mip_level_(0), initialized_(false) {
std::memset(input_mip_view_, 0, sizeof(input_mip_view_));
}
@@ -56,7 +42,8 @@ CNNv2Effect::~CNNv2Effect() {
void CNNv2Effect::init(MainSequence* demo) {
(void)demo;
- if (initialized_) return;
+ if (initialized_)
+ return;
load_weights();
create_textures();
@@ -75,7 +62,8 @@ void CNNv2Effect::resize(int width, int height) {
void CNNv2Effect::load_weights() {
// Load binary weights asset
size_t weights_size = 0;
- const uint8_t* weights_data = (const uint8_t*)GetAsset(AssetId::ASSET_WEIGHTS_CNN_V2, &weights_size);
+ const uint8_t* weights_data =
+ (const uint8_t*)GetAsset(AssetId::ASSET_WEIGHTS_CNN_V2, &weights_size);
if (!weights_data || weights_size < 20) {
// Weights not available - effect will skip
@@ -89,12 +77,14 @@ void CNNv2Effect::load_weights() {
uint32_t num_layers = header[2];
uint32_t total_weights = header[3];
- FATAL_CHECK(magic != 0x324e4e43, "Invalid CNN v2 weights magic\n"); // 'CNN2'
+ FATAL_CHECK(magic != 0x324e4e43, "Invalid CNN v2 weights magic\n"); // 'CNN2'
- // Support both version 1 (16-byte header) and version 2 (20-byte header with mip_level)
- // TODO: Version 3 should include feature descriptor for arbitrary layout/ordering
+ // Support both version 1 (16-byte header) and version 2 (20-byte header with
+ // mip_level)
+ // TODO: Version 3 should include feature descriptor for arbitrary
+ // layout/ordering
if (version == 1) {
- mip_level_ = 0; // Default for v1
+ mip_level_ = 0; // Default for v1
} else if (version == 2) {
mip_level_ = header[4];
} else {
@@ -115,9 +105,10 @@ void CNNv2Effect::load_weights() {
layer_info_.push_back(info);
}
- // Create GPU storage buffer for weights (skip header + layer info, upload only weights)
- size_t header_size = 20; // 5 u32
- size_t layer_info_size = 20 * num_layers; // 5 u32 per layer
+ // Create GPU storage buffer for weights (skip header + layer info, upload
+ // only weights)
+ size_t header_size = 20; // 5 u32
+ size_t layer_info_size = 20 * num_layers; // 5 u32 per layer
size_t weights_offset = header_size + layer_info_size;
size_t weights_only_size = weights_size - weights_offset;
@@ -129,7 +120,8 @@ void CNNv2Effect::load_weights() {
weights_buffer_ = wgpuDeviceCreateBuffer(ctx_.device, &buffer_desc);
// Upload only weights (skip header + layer info)
- wgpuQueueWriteBuffer(ctx_.queue, weights_buffer_, 0, weights_data + weights_offset, weights_only_size);
+ wgpuQueueWriteBuffer(ctx_.queue, weights_buffer_, 0,
+ weights_data + weights_offset, weights_only_size);
// Create uniform buffers for layer params (one per layer)
for (uint32_t i = 0; i < num_layers; ++i) {
@@ -153,7 +145,9 @@ void CNNv2Effect::create_textures() {
// Input texture with mips (for multi-scale features)
TextureWithView input_mip = gpu_create_texture_2d(
ctx_.device, width_, height_, WGPUTextureFormat_RGBA8Unorm,
- (WGPUTextureUsage)(WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopyDst), 3);
+ (WGPUTextureUsage)(WGPUTextureUsage_TextureBinding |
+ WGPUTextureUsage_CopyDst),
+ 3);
input_mip_tex_ = input_mip.texture;
for (int i = 0; i < 3; ++i) {
@@ -195,7 +189,8 @@ void CNNv2Effect::create_pipelines() {
// Static features compute pipeline
size_t shader_size = 0;
- const char* static_code = (const char*)GetAsset(AssetId::ASSET_SHADER_CNN_V2_STATIC, &shader_size);
+ const char* static_code =
+ (const char*)GetAsset(AssetId::ASSET_SHADER_CNN_V2_STATIC, &shader_size);
if (!static_code || shader_size == 0) {
// Shader not available (e.g., in test mode) - skip pipeline creation
@@ -210,7 +205,8 @@ void CNNv2Effect::create_pipelines() {
shader_desc.nextInChain = &wgsl_src.chain;
// Create bind group layout for static features compute
- // Bindings: 0=input_tex, 1=input_mip1, 2=input_mip2, 3=depth_tex, 4=output, 5=params, 6=linear_sampler
+ // Bindings: 0=input_tex, 1=input_mip1, 2=input_mip2, 3=depth_tex, 4=output,
+ // 5=params, 6=linear_sampler
WGPUBindGroupLayout static_bgl =
BindGroupLayoutBuilder()
.texture(0, WGPUShaderStage_Compute)
@@ -227,28 +223,35 @@ void CNNv2Effect::create_pipelines() {
WGPUPipelineLayoutDescriptor pl_desc = {};
pl_desc.bindGroupLayoutCount = 1;
pl_desc.bindGroupLayouts = &static_bgl;
- WGPUPipelineLayout pipeline_layout = wgpuDeviceCreatePipelineLayout(ctx_.device, &pl_desc);
+ WGPUPipelineLayout pipeline_layout =
+ wgpuDeviceCreatePipelineLayout(ctx_.device, &pl_desc);
// Recreate pipeline with proper layout
WGPUComputePipelineDescriptor pipeline_desc2 = {};
- pipeline_desc2.compute.module = wgpuDeviceCreateShaderModule(ctx_.device, &shader_desc);
+ pipeline_desc2.compute.module =
+ wgpuDeviceCreateShaderModule(ctx_.device, &shader_desc);
pipeline_desc2.compute.entryPoint = str_view("main");
pipeline_desc2.layout = pipeline_layout;
- if (static_pipeline_) wgpuComputePipelineRelease(static_pipeline_);
- static_pipeline_ = wgpuDeviceCreateComputePipeline(ctx_.device, &pipeline_desc2);
+ if (static_pipeline_)
+ wgpuComputePipelineRelease(static_pipeline_);
+ static_pipeline_ =
+ wgpuDeviceCreateComputePipeline(ctx_.device, &pipeline_desc2);
wgpuShaderModuleRelease(pipeline_desc2.compute.module);
wgpuPipelineLayoutRelease(pipeline_layout);
wgpuBindGroupLayoutRelease(static_bgl);
// CNN layer compute pipeline (storage buffer version)
- if (layer_info_.empty()) return; // No weights loaded
+ if (layer_info_.empty())
+ return; // No weights loaded
size_t layer_shader_size = 0;
- const char* layer_code = (const char*)GetAsset(AssetId::ASSET_SHADER_CNN_V2_COMPUTE, &layer_shader_size);
+ const char* layer_code = (const char*)GetAsset(
+ AssetId::ASSET_SHADER_CNN_V2_COMPUTE, &layer_shader_size);
- if (!layer_code || layer_shader_size == 0) return;
+ if (!layer_code || layer_shader_size == 0)
+ return;
WGPUShaderSourceWGSL layer_wgsl = {};
layer_wgsl.chain.sType = WGPUSType_ShaderSourceWGSL;
@@ -257,11 +260,14 @@ void CNNv2Effect::create_pipelines() {
WGPUShaderModuleDescriptor layer_shader_desc = {};
layer_shader_desc.nextInChain = &layer_wgsl.chain;
- WGPUShaderModule layer_module = wgpuDeviceCreateShaderModule(ctx_.device, &layer_shader_desc);
- if (!layer_module) return;
+ WGPUShaderModule layer_module =
+ wgpuDeviceCreateShaderModule(ctx_.device, &layer_shader_desc);
+ if (!layer_module)
+ return;
// Create bind group layout for layer compute
- // 0=static_features, 1=layer_input, 2=output, 3=weights, 4=params, 5=original_input
+ // 0=static_features, 1=layer_input, 2=output, 3=weights, 4=params,
+ // 5=original_input
WGPUBindGroupLayout layer_bgl =
BindGroupLayoutBuilder()
.uint_texture(0, WGPUShaderStage_Compute)
@@ -277,14 +283,16 @@ void CNNv2Effect::create_pipelines() {
layer_pl_desc.bindGroupLayoutCount = 1;
layer_pl_desc.bindGroupLayouts = &layer_bgl;
- WGPUPipelineLayout layer_pipeline_layout = wgpuDeviceCreatePipelineLayout(ctx_.device, &layer_pl_desc);
+ WGPUPipelineLayout layer_pipeline_layout =
+ wgpuDeviceCreatePipelineLayout(ctx_.device, &layer_pl_desc);
WGPUComputePipelineDescriptor layer_pipeline_desc = {};
layer_pipeline_desc.compute.module = layer_module;
layer_pipeline_desc.compute.entryPoint = str_view("main");
layer_pipeline_desc.layout = layer_pipeline_layout;
- layer_pipeline_ = wgpuDeviceCreateComputePipeline(ctx_.device, &layer_pipeline_desc);
+ layer_pipeline_ =
+ wgpuDeviceCreateComputePipeline(ctx_.device, &layer_pipeline_desc);
wgpuShaderModuleRelease(layer_module);
wgpuPipelineLayoutRelease(layer_pipeline_layout);
@@ -292,7 +300,8 @@ void CNNv2Effect::create_pipelines() {
}
void CNNv2Effect::update_bind_group(WGPUTextureView input_view) {
- if (!static_pipeline_) return;
+ if (!static_pipeline_)
+ return;
// Cache input view
current_input_view_ = input_view;
@@ -303,7 +312,8 @@ void CNNv2Effect::update_bind_group(WGPUTextureView input_view) {
static_bind_group_ = nullptr;
}
- // Create bind group for static features compute (manual for storage texture binding)
+ // Create bind group for static features compute (manual for storage texture
+ // binding)
WGPUBindGroupEntry bg_entries[7] = {};
bg_entries[0].binding = 0;
bg_entries[0].textureView = input_view;
@@ -332,7 +342,8 @@ void CNNv2Effect::update_bind_group(WGPUTextureView input_view) {
wgpuBindGroupLayoutRelease(layout);
// Create layer bind groups
- if (!layer_pipeline_ || layer_info_.empty()) return;
+ if (!layer_pipeline_ || layer_info_.empty())
+ return;
// Release old layer bind groups
for (auto bg : layer_bind_groups_) {
@@ -341,7 +352,8 @@ void CNNv2Effect::update_bind_group(WGPUTextureView input_view) {
layer_bind_groups_.clear();
// Get bind group layout from layer pipeline
- WGPUBindGroupLayout layer_bgl = wgpuComputePipelineGetBindGroupLayout(layer_pipeline_, 0);
+ WGPUBindGroupLayout layer_bgl =
+ wgpuComputePipelineGetBindGroupLayout(layer_pipeline_, 0);
// Create bind group for each layer
for (size_t i = 0; i < layer_info_.size(); ++i) {
@@ -366,7 +378,8 @@ void CNNv2Effect::update_bind_group(WGPUTextureView input_view) {
void CNNv2Effect::compute(WGPUCommandEncoder encoder,
const CommonPostProcessUniforms& uniforms) {
- if (!initialized_ || !static_pipeline_ || !static_bind_group_) return;
+ if (!initialized_ || !static_pipeline_ || !static_bind_group_)
+ return;
float effective_blend = blend_amount_;
if (beat_modulated_) {
@@ -379,10 +392,12 @@ void CNNv2Effect::compute(WGPUCommandEncoder encoder,
static_params.padding[0] = 0;
static_params.padding[1] = 0;
static_params.padding[2] = 0;
- wgpuQueueWriteBuffer(ctx_.queue, static_params_buffer_, 0, &static_params, sizeof(static_params));
+ wgpuQueueWriteBuffer(ctx_.queue, static_params_buffer_, 0, &static_params,
+ sizeof(static_params));
// Pass 1: Compute static features
- WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
+ WGPUComputePassEncoder pass =
+ wgpuCommandEncoderBeginComputePass(encoder, nullptr);
wgpuComputePassEncoderSetPipeline(pass, static_pipeline_);
wgpuComputePassEncoderSetBindGroup(pass, 0, static_bind_group_, 0, nullptr);
@@ -396,7 +411,8 @@ void CNNv2Effect::compute(WGPUCommandEncoder encoder,
wgpuComputePassEncoderRelease(pass);
// Execute CNN layer passes
- if (!layer_pipeline_ || layer_bind_groups_.empty()) return;
+ if (!layer_pipeline_ || layer_bind_groups_.empty())
+ return;
// Update layer params (each layer has own buffer)
for (size_t i = 0; i < layer_info_.size(); ++i) {
@@ -411,14 +427,18 @@ void CNNv2Effect::compute(WGPUCommandEncoder encoder,
params.blend_amount = effective_blend;
params.is_layer_0 = (i == 0) ? 1 : 0;
- wgpuQueueWriteBuffer(ctx_.queue, layer_params_buffers_[i], 0, &params, sizeof(params));
+ wgpuQueueWriteBuffer(ctx_.queue, layer_params_buffers_[i], 0, &params,
+ sizeof(params));
- WGPUComputePassEncoder layer_pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
+ WGPUComputePassEncoder layer_pass =
+ wgpuCommandEncoderBeginComputePass(encoder, nullptr);
wgpuComputePassEncoderSetPipeline(layer_pass, layer_pipeline_);
- wgpuComputePassEncoderSetBindGroup(layer_pass, 0, layer_bind_groups_[i], 0, nullptr);
+ wgpuComputePassEncoderSetBindGroup(layer_pass, 0, layer_bind_groups_[i], 0,
+ nullptr);
- wgpuComputePassEncoderDispatchWorkgroups(layer_pass, workgroups_x, workgroups_y, 1);
+ wgpuComputePassEncoderDispatchWorkgroups(layer_pass, workgroups_x,
+ workgroups_y, 1);
wgpuComputePassEncoderEnd(layer_pass);
wgpuComputePassEncoderRelease(layer_pass);
@@ -433,26 +453,40 @@ void CNNv2Effect::render(WGPURenderPassEncoder pass,
}
void CNNv2Effect::cleanup() {
- if (static_features_view_) wgpuTextureViewRelease(static_features_view_);
- if (static_features_tex_) wgpuTextureRelease(static_features_tex_);
- if (static_bind_group_) wgpuBindGroupRelease(static_bind_group_);
- if (static_params_buffer_) wgpuBufferRelease(static_params_buffer_);
- if (static_pipeline_) wgpuComputePipelineRelease(static_pipeline_);
- if (linear_sampler_) wgpuSamplerRelease(linear_sampler_);
+ if (static_features_view_)
+ wgpuTextureViewRelease(static_features_view_);
+ if (static_features_tex_)
+ wgpuTextureRelease(static_features_tex_);
+ if (static_bind_group_)
+ wgpuBindGroupRelease(static_bind_group_);
+ if (static_params_buffer_)
+ wgpuBufferRelease(static_params_buffer_);
+ if (static_pipeline_)
+ wgpuComputePipelineRelease(static_pipeline_);
+ if (linear_sampler_)
+ wgpuSamplerRelease(linear_sampler_);
- if (layer_pipeline_) wgpuComputePipelineRelease(layer_pipeline_);
- if (weights_buffer_) wgpuBufferRelease(weights_buffer_);
- for (auto buf : layer_params_buffers_) wgpuBufferRelease(buf);
+ if (layer_pipeline_)
+ wgpuComputePipelineRelease(layer_pipeline_);
+ if (weights_buffer_)
+ wgpuBufferRelease(weights_buffer_);
+ for (auto buf : layer_params_buffers_)
+ wgpuBufferRelease(buf);
layer_params_buffers_.clear();
for (int i = 0; i < 3; ++i) {
- if (input_mip_view_[i]) wgpuTextureViewRelease(input_mip_view_[i]);
+ if (input_mip_view_[i])
+ wgpuTextureViewRelease(input_mip_view_[i]);
}
- if (input_mip_tex_) wgpuTextureRelease(input_mip_tex_);
+ if (input_mip_tex_)
+ wgpuTextureRelease(input_mip_tex_);
- for (auto view : layer_views_) wgpuTextureViewRelease(view);
- for (auto tex : layer_textures_) wgpuTextureRelease(tex);
- for (auto bg : layer_bind_groups_) wgpuBindGroupRelease(bg);
+ for (auto view : layer_views_)
+ wgpuTextureViewRelease(view);
+ for (auto tex : layer_textures_)
+ wgpuTextureRelease(tex);
+ for (auto bg : layer_bind_groups_)
+ wgpuBindGroupRelease(bg);
layer_views_.clear();
layer_textures_.clear();
diff --git a/src/effects/cnn_v2_effect.h b/src/effects/cnn_v2_effect.h
index d530d3b..7960b4f 100644
--- a/src/effects/cnn_v2_effect.h
+++ b/src/effects/cnn_v2_effect.h
@@ -11,7 +11,7 @@ struct CNNv2EffectParams {
};
class CNNv2Effect : public PostProcessEffect {
-public:
+ public:
explicit CNNv2Effect(const GpuContext& ctx);
explicit CNNv2Effect(const GpuContext& ctx, const CNNv2EffectParams& params);
~CNNv2Effect();
@@ -29,7 +29,7 @@ public:
beat_scale_ = scale;
}
-private:
+ private:
struct LayerInfo {
uint32_t kernel_size;
uint32_t in_channels;
@@ -67,12 +67,13 @@ private:
WGPUSampler linear_sampler_;
// CNN layers (storage buffer architecture)
- WGPUComputePipeline layer_pipeline_; // Single pipeline for all layers
- WGPUBuffer weights_buffer_; // Storage buffer for weights
- std::vector<WGPUBuffer> layer_params_buffers_; // Uniform buffers (one per layer)
- std::vector<LayerInfo> layer_info_; // Layer metadata
- std::vector<WGPUBindGroup> layer_bind_groups_; // Per-layer bind groups
- std::vector<WGPUTexture> layer_textures_; // Ping-pong buffers
+ WGPUComputePipeline layer_pipeline_; // Single pipeline for all layers
+ WGPUBuffer weights_buffer_; // Storage buffer for weights
+ std::vector<WGPUBuffer>
+ layer_params_buffers_; // Uniform buffers (one per layer)
+ std::vector<LayerInfo> layer_info_; // Layer metadata
+ std::vector<WGPUBindGroup> layer_bind_groups_; // Per-layer bind groups
+ std::vector<WGPUTexture> layer_textures_; // Ping-pong buffers
std::vector<WGPUTextureView> layer_views_;
// Input mips
diff --git a/src/effects/distort_effect.cc b/src/effects/distort_effect.cc
index 97622b2..f4e68d2 100644
--- a/src/effects/distort_effect.cc
+++ b/src/effects/distort_effect.cc
@@ -32,5 +32,6 @@ void DistortEffect::render(WGPURenderPassEncoder pass,
}
void DistortEffect::update_bind_group(WGPUTextureView v) {
- pp_update_bind_group(ctx_.device, pipeline_, &bind_group_, v, uniforms_.get(), params_buffer_);
+ pp_update_bind_group(ctx_.device, pipeline_, &bind_group_, v, uniforms_.get(),
+ params_buffer_);
} \ No newline at end of file
diff --git a/src/effects/fade_effect.cc b/src/effects/fade_effect.cc
index fd2af69..1dff6bd 100644
--- a/src/effects/fade_effect.cc
+++ b/src/effects/fade_effect.cc
@@ -9,7 +9,8 @@ struct FadeParams {
float fade_amount;
float _pad[3];
};
-static_assert(sizeof(FadeParams) == 16, "FadeParams must be 16 bytes for WGSL alignment");
+static_assert(sizeof(FadeParams) == 16,
+ "FadeParams must be 16 bytes for WGSL alignment");
FadeEffect::FadeEffect(const GpuContext& ctx) : PostProcessEffect(ctx) {
const char* shader_code = R"(
diff --git a/src/effects/fade_effect.h b/src/effects/fade_effect.h
index 8cd7006..6993152 100644
--- a/src/effects/fade_effect.h
+++ b/src/effects/fade_effect.h
@@ -4,8 +4,8 @@
#pragma once
#include "gpu/effect.h"
-#include "gpu/post_process_helper.h"
#include "gpu/gpu.h"
+#include "gpu/post_process_helper.h"
#include "gpu/uniform_helper.h"
class FadeEffect : public PostProcessEffect {
diff --git a/src/effects/flash_effect.cc b/src/effects/flash_effect.cc
index 235412d..00b5217 100644
--- a/src/effects/flash_effect.cc
+++ b/src/effects/flash_effect.cc
@@ -66,7 +66,8 @@ void FlashEffect::update_bind_group(WGPUTextureView input_view) {
void FlashEffect::render(WGPURenderPassEncoder pass,
const CommonPostProcessUniforms& uniforms) {
// Trigger flash based on configured threshold
- if (uniforms.audio_intensity > params_.trigger_threshold && flash_intensity_ < 0.2f) {
+ if (uniforms.audio_intensity > params_.trigger_threshold &&
+ flash_intensity_ < 0.2f) {
flash_intensity_ = 0.8f; // Trigger flash
}
diff --git a/src/effects/gaussian_blur_effect.cc b/src/effects/gaussian_blur_effect.cc
index 0b4beae..6a0675d 100644
--- a/src/effects/gaussian_blur_effect.cc
+++ b/src/effects/gaussian_blur_effect.cc
@@ -2,8 +2,8 @@
// It implements the GaussianBlurEffect with parameterization.
#include "gpu/demo_effects.h"
-#include "gpu/post_process_helper.h"
#include "gpu/gpu.h"
+#include "gpu/post_process_helper.h"
// --- GaussianBlurEffect ---
diff --git a/src/effects/heptagon_effect.cc b/src/effects/heptagon_effect.cc
index 724eabb..273adc2 100644
--- a/src/effects/heptagon_effect.cc
+++ b/src/effects/heptagon_effect.cc
@@ -8,7 +8,8 @@
// --- HeptagonEffect ---
HeptagonEffect::HeptagonEffect(const GpuContext& ctx) : Effect(ctx) {
// uniforms_ is initialized by Effect base class
- ResourceBinding bindings[] = {{uniforms_.get(), WGPUBufferBindingType_Uniform}};
+ ResourceBinding bindings[] = {
+ {uniforms_.get(), WGPUBufferBindingType_Uniform}};
pass_ = gpu_create_render_pass(ctx_.device, ctx_.format, main_shader_wgsl,
bindings, 1);
pass_.vertex_count = 21;
diff --git a/src/effects/hybrid_3d_effect.cc b/src/effects/hybrid_3d_effect.cc
index 1cd773d..61f3165 100644
--- a/src/effects/hybrid_3d_effect.cc
+++ b/src/effects/hybrid_3d_effect.cc
@@ -95,7 +95,7 @@ static float ease_in_out_cubic(float t) {
}
void Hybrid3DEffect::render(WGPURenderPassEncoder pass,
- const CommonPostProcessUniforms& uniforms) {
+ const CommonPostProcessUniforms& uniforms) {
// Animate Objects
for (size_t i = 1; i < scene_.objects.size(); ++i) {
diff --git a/src/effects/moving_ellipse_effect.cc b/src/effects/moving_ellipse_effect.cc
index f595de9..e641927 100644
--- a/src/effects/moving_ellipse_effect.cc
+++ b/src/effects/moving_ellipse_effect.cc
@@ -2,13 +2,14 @@
// It implements the MovingEllipseEffect.
#include "gpu/demo_effects.h"
-#include "gpu/post_process_helper.h"
#include "gpu/gpu.h"
+#include "gpu/post_process_helper.h"
// --- MovingEllipseEffect ---
MovingEllipseEffect::MovingEllipseEffect(const GpuContext& ctx) : Effect(ctx) {
// uniforms_ is initialized by Effect base class
- ResourceBinding bindings[] = {{uniforms_.get(), WGPUBufferBindingType_Uniform}};
+ ResourceBinding bindings[] = {
+ {uniforms_.get(), WGPUBufferBindingType_Uniform}};
pass_ = gpu_create_render_pass(ctx_.device, ctx_.format, ellipse_shader_wgsl,
bindings, 1);
pass_.vertex_count = 3;
diff --git a/src/effects/particle_spray_effect.cc b/src/effects/particle_spray_effect.cc
index e250f5a..0b0dba1 100644
--- a/src/effects/particle_spray_effect.cc
+++ b/src/effects/particle_spray_effect.cc
@@ -2,8 +2,8 @@
// It implements the ParticleSprayEffect.
#include "gpu/demo_effects.h"
-#include "gpu/post_process_helper.h"
#include "gpu/gpu.h"
+#include "gpu/post_process_helper.h"
#include <vector>
// --- ParticleSprayEffect ---
diff --git a/src/effects/particles_effect.cc b/src/effects/particles_effect.cc
index 5762637..b05aecd 100644
--- a/src/effects/particles_effect.cc
+++ b/src/effects/particles_effect.cc
@@ -2,8 +2,8 @@
// It implements the ParticlesEffect.
#include "gpu/demo_effects.h"
-#include "gpu/post_process_helper.h"
#include "gpu/gpu.h"
+#include "gpu/post_process_helper.h"
#include <vector>
// --- ParticlesEffect ---
diff --git a/src/effects/rotating_cube_effect.cc b/src/effects/rotating_cube_effect.cc
index a42feaa..c03eccb 100644
--- a/src/effects/rotating_cube_effect.cc
+++ b/src/effects/rotating_cube_effect.cc
@@ -5,9 +5,9 @@
#include "effects/rotating_cube_effect.h"
#include "generated/assets.h"
#include "gpu/bind_group_builder.h"
-#include "gpu/shader_composer.h"
#include "gpu/gpu.h"
#include "gpu/sampler_cache.h"
+#include "gpu/shader_composer.h"
#include "util/asset_manager_utils.h"
RotatingCubeEffect::RotatingCubeEffect(const GpuContext& ctx) : Effect(ctx) {
@@ -39,12 +39,16 @@ void RotatingCubeEffect::init(MainSequence* demo) {
TextureWithView noise = gpu_create_texture_2d(
ctx_.device, 1, 1, WGPUTextureFormat_RGBA8Unorm,
- (WGPUTextureUsage)(WGPUTextureUsage_TextureBinding | WGPUTextureUsage_RenderAttachment), 1);
+ (WGPUTextureUsage)(WGPUTextureUsage_TextureBinding |
+ WGPUTextureUsage_RenderAttachment),
+ 1);
noise_texture_ = noise.texture;
noise_view_ = noise.view;
- noise_sampler_ = SamplerCache::Get().get_or_create(ctx_.device, SamplerCache::linear());
- mask_sampler_ = SamplerCache::Get().get_or_create(ctx_.device, SamplerCache::clamp());
+ noise_sampler_ =
+ SamplerCache::Get().get_or_create(ctx_.device, SamplerCache::linear());
+ mask_sampler_ =
+ SamplerCache::Get().get_or_create(ctx_.device, SamplerCache::clamp());
size_t shader_size;
const char* shader_code =
diff --git a/src/effects/scene1_effect.cc b/src/effects/scene1_effect.cc
index c75e511..3d6df3b 100644
--- a/src/effects/scene1_effect.cc
+++ b/src/effects/scene1_effect.cc
@@ -5,7 +5,8 @@
#include "gpu/gpu.h"
Scene1Effect::Scene1Effect(const GpuContext& ctx) : Effect(ctx) {
- ResourceBinding bindings[] = {{uniforms_.get(), WGPUBufferBindingType_Uniform}};
+ ResourceBinding bindings[] = {
+ {uniforms_.get(), WGPUBufferBindingType_Uniform}};
pass_ = gpu_create_render_pass(ctx_.device, ctx_.format, scene1_shader_wgsl,
bindings, 1);
pass_.vertex_count = 3;
diff --git a/src/effects/theme_modulation_effect.cc b/src/effects/theme_modulation_effect.cc
index 1c81d79..82bfeb8 100644
--- a/src/effects/theme_modulation_effect.cc
+++ b/src/effects/theme_modulation_effect.cc
@@ -10,7 +10,8 @@ struct ThemeModulationParams {
float theme_brightness;
float _pad[3];
};
-static_assert(sizeof(ThemeModulationParams) == 16, "ThemeModulationParams must be 16 bytes for WGSL alignment");
+static_assert(sizeof(ThemeModulationParams) == 16,
+ "ThemeModulationParams must be 16 bytes for WGSL alignment");
ThemeModulationEffect::ThemeModulationEffect(const GpuContext& ctx)
: PostProcessEffect(ctx) {
@@ -82,8 +83,8 @@ void ThemeModulationEffect::render(WGPURenderPassEncoder pass,
// Alternate between bright and dark every 4 seconds (2 pattern changes)
// Music patterns change every 2 seconds at 120 BPM
- float cycle_time = fmodf(uniforms.time, 8.0f); // 8 second cycle (4 patterns)
- bool is_dark_section = (cycle_time >= 4.0f); // Dark for second half
+ float cycle_time = fmodf(uniforms.time, 8.0f); // 8 second cycle (4 patterns)
+ bool is_dark_section = (cycle_time >= 4.0f); // Dark for second half
// Smooth transition between themes using a sine wave
float transition =
diff --git a/src/effects/vignette_effect.cc b/src/effects/vignette_effect.cc
index 0e5f68f..f5c3f05 100644
--- a/src/effects/vignette_effect.cc
+++ b/src/effects/vignette_effect.cc
@@ -2,8 +2,8 @@
// It implements the VignetteEffect.
#include "gpu/demo_effects.h"
-#include "gpu/post_process_helper.h"
#include "gpu/gpu.h"
+#include "gpu/post_process_helper.h"
VignetteEffect::VignetteEffect(const GpuContext& ctx)
: VignetteEffect(ctx, VignetteParams()) {
diff --git a/src/gpu/bind_group_builder.h b/src/gpu/bind_group_builder.h
index abce9dc..55b7291 100644
--- a/src/gpu/bind_group_builder.h
+++ b/src/gpu/bind_group_builder.h
@@ -24,23 +24,27 @@ typedef uint32_t WGPUShaderStageFlags;
class BindGroupLayoutBuilder {
std::vector<WGPUBindGroupLayoutEntry> entries_;
-public:
- BindGroupLayoutBuilder& uniform(uint32_t binding, WGPUShaderStageFlags vis, size_t min_size = 0) {
+ public:
+ BindGroupLayoutBuilder& uniform(uint32_t binding, WGPUShaderStageFlags vis,
+ size_t min_size = 0) {
WGPUBindGroupLayoutEntry e{};
e.binding = binding;
e.visibility = vis;
e.buffer.type = WGPUBufferBindingType_Uniform;
- if (min_size) e.buffer.minBindingSize = min_size;
+ if (min_size)
+ e.buffer.minBindingSize = min_size;
entries_.push_back(e);
return *this;
}
- BindGroupLayoutBuilder& storage(uint32_t binding, WGPUShaderStageFlags vis, size_t min_size = 0) {
+ BindGroupLayoutBuilder& storage(uint32_t binding, WGPUShaderStageFlags vis,
+ size_t min_size = 0) {
WGPUBindGroupLayoutEntry e{};
e.binding = binding;
e.visibility = vis;
e.buffer.type = WGPUBufferBindingType_ReadOnlyStorage;
- if (min_size) e.buffer.minBindingSize = min_size;
+ if (min_size)
+ e.buffer.minBindingSize = min_size;
entries_.push_back(e);
return *this;
}
@@ -55,7 +59,8 @@ public:
return *this;
}
- BindGroupLayoutBuilder& uint_texture(uint32_t binding, WGPUShaderStageFlags vis) {
+ BindGroupLayoutBuilder& uint_texture(uint32_t binding,
+ WGPUShaderStageFlags vis) {
WGPUBindGroupLayoutEntry e{};
e.binding = binding;
e.visibility = vis;
@@ -65,7 +70,8 @@ public:
return *this;
}
- BindGroupLayoutBuilder& storage_texture(uint32_t binding, WGPUShaderStageFlags vis,
+ BindGroupLayoutBuilder& storage_texture(uint32_t binding,
+ WGPUShaderStageFlags vis,
WGPUTextureFormat format) {
WGPUBindGroupLayoutEntry e{};
e.binding = binding;
@@ -97,7 +103,7 @@ public:
class BindGroupBuilder {
std::vector<WGPUBindGroupEntry> entries_;
-public:
+ public:
BindGroupBuilder& buffer(uint32_t binding, WGPUBuffer buf, size_t size) {
WGPUBindGroupEntry e{};
e.binding = binding;
diff --git a/src/gpu/demo_effects.h b/src/gpu/demo_effects.h
index beadafb..a9258af 100644
--- a/src/gpu/demo_effects.h
+++ b/src/gpu/demo_effects.h
@@ -7,16 +7,16 @@
#include "3d/scene.h"
#include "effect.h"
#include "effects/circle_mask_effect.h"
-#include "effects/fade_effect.h" // FadeEffect with full definition
+#include "effects/fade_effect.h" // FadeEffect with full definition
+#include "effects/flash_cube_effect.h"
#include "effects/flash_effect.h" // FlashEffect with params support
-#include "gpu/post_process_helper.h"
-#include "effects/rotating_cube_effect.h"
-#include "gpu/shaders.h"
-#include "effects/theme_modulation_effect.h" // ThemeModulationEffect with full definition
#include "effects/hybrid_3d_effect.h"
-#include "effects/flash_cube_effect.h"
+#include "effects/rotating_cube_effect.h"
#include "effects/scene1_effect.h"
+#include "effects/theme_modulation_effect.h" // ThemeModulationEffect with full definition
#include "gpu/gpu.h"
+#include "gpu/post_process_helper.h"
+#include "gpu/shaders.h"
#include "gpu/texture_manager.h"
#include "gpu/uniform_helper.h"
#include <memory>
@@ -165,7 +165,8 @@ struct DistortParams {
float strength = 0.01f; // Default distortion strength
float speed = 1.0f; // Default distortion speed
};
-static_assert(sizeof(DistortParams) == 8, "DistortParams must be 8 bytes for WGSL alignment");
+static_assert(sizeof(DistortParams) == 8,
+ "DistortParams must be 8 bytes for WGSL alignment");
class DistortEffect : public PostProcessEffect {
public:
@@ -180,7 +181,6 @@ class DistortEffect : public PostProcessEffect {
UniformBuffer<DistortParams> params_buffer_;
};
-
#include "effects/cnn_effect.h"
#include "effects/cnn_v2_effect.h"
diff --git a/src/gpu/effect.cc b/src/gpu/effect.cc
index 52128c4..ca98ebd 100644
--- a/src/gpu/effect.cc
+++ b/src/gpu/effect.cc
@@ -66,8 +66,8 @@ void Sequence::update_active_list(float seq_time) {
#if !defined(STRIP_ALL)
Effect* effect_ptr = item.effect.get();
const char* effect_name = typeid(*effect_ptr).name();
- printf(" [EFFECT START] <%s> (priority=%d, time=%.2f-%.2f)\n", effect_name,
- item.priority, item.start_time, item.end_time);
+ printf(" [EFFECT START] <%s> (priority=%d, time=%.2f-%.2f)\n",
+ effect_name, item.priority, item.start_time, item.end_time);
#endif
item.effect->start();
item.active = true;
@@ -173,8 +173,8 @@ void MainSequence::init(const GpuContext& ctx, int width, int height) {
passthrough_effect_->resize(width, height);
for (ActiveSequence& entry : sequences_) {
- entry.seq->resize(width, height); // Set dimensions FIRST
- entry.seq->init(this); // Then init with correct dimensions
+ entry.seq->resize(width, height); // Set dimensions FIRST
+ entry.seq->init(this); // Then init with correct dimensions
}
}
@@ -183,8 +183,8 @@ void MainSequence::add_sequence(std::shared_ptr<Sequence> seq, float start_time,
sequences_.push_back({seq, start_time, priority});
// If MainSequence is already initialized, init the new sequence immediately
if (gpu_ctx.device) {
- seq->resize(width_, height_); // Set dimensions FIRST
- seq->init(this); // Then init with correct dimensions
+ seq->resize(width_, height_); // Set dimensions FIRST
+ seq->init(this); // Then init with correct dimensions
}
std::sort(sequences_.begin(), sequences_.end(),
[](const ActiveSequence& a, const ActiveSequence& b) {
@@ -329,8 +329,8 @@ void MainSequence::render_frame(float global_time, float beat_time,
.colorAttachmentCount = 1, .colorAttachments = &capture_attachment};
WGPURenderPassEncoder capture_pass =
wgpuCommandEncoderBeginRenderPass(encoder, &capture_desc);
- wgpuRenderPassEncoderSetViewport(capture_pass, 0.0f, 0.0f,
- (float)width_, (float)height_, 0.0f, 1.0f);
+ wgpuRenderPassEncoderSetViewport(capture_pass, 0.0f, 0.0f, (float)width_,
+ (float)height_, 0.0f, 1.0f);
PostProcessEffect* passthrough =
(PostProcessEffect*)passthrough_effect_.get();
@@ -555,7 +555,8 @@ void MainSequence::simulate_until(float target_time, float step_rate,
}
for (const SequenceItem* item : scene_effects) {
CommonPostProcessUniforms test_uniforms = {
- .resolution = {static_cast<float>(width_), static_cast<float>(height_)},
+ .resolution = {static_cast<float>(width_),
+ static_cast<float>(height_)},
.aspect_ratio = aspect_ratio,
.time = t - item->start_time,
.beat_time = absolute_beat_time,
diff --git a/src/gpu/effect.h b/src/gpu/effect.h
index 5d835ad..30e43d1 100644
--- a/src/gpu/effect.h
+++ b/src/gpu/effect.h
@@ -47,16 +47,17 @@ class Effect {
return false;
}
- // Helper: get initialized CommonPostProcessUniforms based on current dimensions
- // If aspect_ratio < 0, computes from width_/height_
- CommonPostProcessUniforms get_common_uniforms(float time = 0.0f,
- float beat_time = 0.0f,
- float beat_phase = 0.0f,
- float intensity = 0.0f,
- float aspect_ratio = -1.0f) const {
+ // Helper: get initialized CommonPostProcessUniforms based on current
+ // dimensions If aspect_ratio < 0, computes from width_/height_
+ CommonPostProcessUniforms
+ get_common_uniforms(float time = 0.0f, float beat_time = 0.0f,
+ float beat_phase = 0.0f, float intensity = 0.0f,
+ float aspect_ratio = -1.0f) const {
return {
.resolution = {static_cast<float>(width_), static_cast<float>(height_)},
- .aspect_ratio = aspect_ratio < 0.0f ? static_cast<float>(width_) / static_cast<float>(height_) : aspect_ratio,
+ .aspect_ratio = aspect_ratio < 0.0f ? static_cast<float>(width_) /
+ static_cast<float>(height_)
+ : aspect_ratio,
.time = time,
.beat_time = beat_time,
.beat_phase = beat_phase,
diff --git a/src/gpu/gpu.cc b/src/gpu/gpu.cc
index 929a063..6d2c7d5 100644
--- a/src/gpu/gpu.cc
+++ b/src/gpu/gpu.cc
@@ -4,8 +4,8 @@
#include "gpu.h"
#include "effect.h"
-#include "gpu/shaders.h"
#include "gpu/shader_composer.h"
+#include "gpu/shaders.h"
#include "platform/platform.h"
#include <cassert>
@@ -81,7 +81,9 @@ TextureWithView gpu_create_storage_texture_2d(WGPUDevice device, uint32_t width,
WGPUTextureFormat format) {
return gpu_create_texture_2d(
device, width, height, format,
- (WGPUTextureUsage)(WGPUTextureUsage_StorageBinding | WGPUTextureUsage_TextureBinding), 1);
+ (WGPUTextureUsage)(WGPUTextureUsage_StorageBinding |
+ WGPUTextureUsage_TextureBinding),
+ 1);
}
TextureWithView gpu_create_post_process_texture(WGPUDevice device,
@@ -89,7 +91,9 @@ TextureWithView gpu_create_post_process_texture(WGPUDevice device,
WGPUTextureFormat format) {
return gpu_create_texture_2d(
device, width, height, format,
- (WGPUTextureUsage)(WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopySrc),
+ (WGPUTextureUsage)(WGPUTextureUsage_RenderAttachment |
+ WGPUTextureUsage_TextureBinding |
+ WGPUTextureUsage_CopySrc),
1);
}
@@ -451,10 +455,10 @@ void gpu_init(PlatformState* platform_state) {
platform_state->height);
}
-void gpu_draw(float audio_peak, float aspect_ratio, float time,
- float beat_time, float beat_phase) {
+void gpu_draw(float audio_peak, float aspect_ratio, float time, float beat_time,
+ float beat_phase) {
g_main_sequence.render_frame(time, beat_time, beat_phase, audio_peak,
- aspect_ratio, g_surface);
+ aspect_ratio, g_surface);
}
void gpu_resize(int width, int height) {
diff --git a/src/gpu/gpu.h b/src/gpu/gpu.h
index 0b0153b..3796517 100644
--- a/src/gpu/gpu.h
+++ b/src/gpu/gpu.h
@@ -42,8 +42,8 @@ struct RenderPass {
class MainSequence; // Forward declaration
void gpu_init(PlatformState* platform_state);
-void gpu_draw(float audio_peak, float aspect_ratio, float time,
- float beat_time, float beat_phase);
+void gpu_draw(float audio_peak, float aspect_ratio, float time, float beat_time,
+ float beat_phase);
void gpu_resize(int width, int height);
void gpu_shutdown();
@@ -101,7 +101,8 @@ TextureWithView gpu_create_post_process_texture(WGPUDevice device,
uint32_t width, uint32_t height,
WGPUTextureFormat format);
WGPUTextureView gpu_create_mip_view(WGPUTexture texture,
- WGPUTextureFormat format, uint32_t mip_level);
+ WGPUTextureFormat format,
+ uint32_t mip_level);
WGPUTextureView gpu_create_texture_view_2d(WGPUTexture texture,
WGPUTextureFormat format,
uint32_t mip_levels = 1);
diff --git a/src/gpu/headless_gpu.cc b/src/gpu/headless_gpu.cc
index 1eedc66..f204a78 100644
--- a/src/gpu/headless_gpu.cc
+++ b/src/gpu/headless_gpu.cc
@@ -47,8 +47,8 @@ void gpu_init(PlatformState* platform_state) {
}
}
-void gpu_draw(float audio_peak, float aspect_ratio, float time,
- float beat_time, float beat_phase) {
+void gpu_draw(float audio_peak, float aspect_ratio, float time, float beat_time,
+ float beat_phase) {
(void)audio_peak;
(void)aspect_ratio;
(void)time;
diff --git a/src/gpu/pipeline_builder.h b/src/gpu/pipeline_builder.h
index 68e34ea..87b9190 100644
--- a/src/gpu/pipeline_builder.h
+++ b/src/gpu/pipeline_builder.h
@@ -1,7 +1,7 @@
// WGPU render pipeline builder - reduces pipeline creation boilerplate
#pragma once
-#include <vector>
#include <string>
+#include <vector>
// Forward declarations (users must include gpu.h and shader_composer.h)
struct WGPUDeviceImpl;
@@ -13,8 +13,8 @@ typedef struct WGPURenderPipelineImpl* WGPURenderPipeline;
struct WGPUShaderModuleImpl;
typedef struct WGPUShaderModuleImpl* WGPUShaderModule;
-#include "platform/platform.h"
#include "gpu/shader_composer.h"
+#include "platform/platform.h"
class RenderPipelineBuilder {
WGPUDevice device_;
@@ -28,7 +28,7 @@ class RenderPipelineBuilder {
bool has_blend_ = false;
bool has_depth_ = false;
-public:
+ public:
explicit RenderPipelineBuilder(WGPUDevice device) : device_(device) {
desc_.primitive.topology = WGPUPrimitiveTopology_TriangleList;
desc_.primitive.cullMode = WGPUCullMode_None;
@@ -70,7 +70,8 @@ public:
return *this;
}
- RenderPipelineBuilder& depth(WGPUTextureFormat depth_fmt = WGPUTextureFormat_Depth24Plus) {
+ RenderPipelineBuilder&
+ depth(WGPUTextureFormat depth_fmt = WGPUTextureFormat_Depth24Plus) {
has_depth_ = true;
depth_.format = depth_fmt;
depth_.depthWriteEnabled = WGPUOptionalBool_True;
@@ -85,7 +86,8 @@ public:
WGPURenderPipeline build() {
color_.writeMask = WGPUColorWriteMask_All;
- if (has_blend_) color_.blend = &blend_;
+ if (has_blend_)
+ color_.blend = &blend_;
WGPUFragmentState fragment{};
fragment.module = shader_module_;
@@ -96,13 +98,16 @@ public:
WGPUPipelineLayoutDescriptor pl_desc{};
pl_desc.bindGroupLayoutCount = layouts_.size();
pl_desc.bindGroupLayouts = layouts_.data();
- WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device_, &pl_desc);
+ WGPUPipelineLayout layout =
+ wgpuDeviceCreatePipelineLayout(device_, &pl_desc);
desc_.layout = layout;
desc_.fragment = &fragment;
- if (has_depth_) desc_.depthStencil = &depth_;
+ if (has_depth_)
+ desc_.depthStencil = &depth_;
- WGPURenderPipeline pipeline = wgpuDeviceCreateRenderPipeline(device_, &desc_);
+ WGPURenderPipeline pipeline =
+ wgpuDeviceCreateRenderPipeline(device_, &desc_);
wgpuPipelineLayoutRelease(layout);
return pipeline;
}
diff --git a/src/gpu/post_process_helper.cc b/src/gpu/post_process_helper.cc
index c5bef27..2e8f6ad 100644
--- a/src/gpu/post_process_helper.cc
+++ b/src/gpu/post_process_helper.cc
@@ -3,31 +3,34 @@
#include "post_process_helper.h"
#include "demo_effects.h"
-#include "gpu/gpu.h"
-#include "gpu/shader_composer.h"
#include "gpu/bind_group_builder.h"
-#include "gpu/sampler_cache.h"
+#include "gpu/gpu.h"
#include "gpu/pipeline_builder.h"
+#include "gpu/sampler_cache.h"
+#include "gpu/shader_composer.h"
#include <cstring>
// Helper to create a standard post-processing pipeline
WGPURenderPipeline create_post_process_pipeline(WGPUDevice device,
WGPUTextureFormat format,
const char* shader_code) {
- WGPUBindGroupLayout bgl = BindGroupLayoutBuilder()
- .sampler(PP_BINDING_SAMPLER, WGPUShaderStage_Fragment)
- .texture(PP_BINDING_TEXTURE, WGPUShaderStage_Fragment)
- .uniform(PP_BINDING_UNIFORMS, WGPUShaderStage_Vertex | WGPUShaderStage_Fragment)
- .uniform(PP_BINDING_EFFECT_PARAMS, WGPUShaderStage_Fragment)
- .build(device);
+ WGPUBindGroupLayout bgl =
+ BindGroupLayoutBuilder()
+ .sampler(PP_BINDING_SAMPLER, WGPUShaderStage_Fragment)
+ .texture(PP_BINDING_TEXTURE, WGPUShaderStage_Fragment)
+ .uniform(PP_BINDING_UNIFORMS,
+ WGPUShaderStage_Vertex | WGPUShaderStage_Fragment)
+ .uniform(PP_BINDING_EFFECT_PARAMS, WGPUShaderStage_Fragment)
+ .build(device);
- const std::string composed_shader = ShaderComposer::Get().Compose({}, shader_code);
+ const std::string composed_shader =
+ ShaderComposer::Get().Compose({}, shader_code);
WGPURenderPipeline pipeline = RenderPipelineBuilder(device)
- .shader(composed_shader.c_str())
- .bind_group_layout(bgl)
- .format(format)
- .build();
+ .shader(composed_shader.c_str())
+ .bind_group_layout(bgl)
+ .format(format)
+ .build();
wgpuBindGroupLayoutRelease(bgl);
return pipeline;
@@ -47,14 +50,17 @@ void pp_update_bind_group(WGPUDevice device, WGPURenderPipeline pipeline,
wgpuBindGroupRelease(*bind_group);
WGPUBindGroupLayout bgl = wgpuRenderPipelineGetBindGroupLayout(pipeline, 0);
- WGPUSampler sampler = SamplerCache::Get().get_or_create(device, SamplerCache::linear());
+ WGPUSampler sampler =
+ SamplerCache::Get().get_or_create(device, SamplerCache::linear());
*bind_group = BindGroupBuilder()
- .sampler(PP_BINDING_SAMPLER, sampler)
- .texture(PP_BINDING_TEXTURE, input_view)
- .buffer(PP_BINDING_UNIFORMS, uniforms.buffer, uniforms.size)
- .buffer(PP_BINDING_EFFECT_PARAMS,
- effect_params.buffer ? effect_params.buffer : g_dummy_buffer.buffer,
- effect_params.buffer ? effect_params.size : g_dummy_buffer.size)
- .build(device, bgl);
+ .sampler(PP_BINDING_SAMPLER, sampler)
+ .texture(PP_BINDING_TEXTURE, input_view)
+ .buffer(PP_BINDING_UNIFORMS, uniforms.buffer, uniforms.size)
+ .buffer(PP_BINDING_EFFECT_PARAMS,
+ effect_params.buffer ? effect_params.buffer
+ : g_dummy_buffer.buffer,
+ effect_params.buffer ? effect_params.size
+ : g_dummy_buffer.size)
+ .build(device, bgl);
}
diff --git a/src/gpu/post_process_helper.h b/src/gpu/post_process_helper.h
index 1c649e7..5a7e9fd 100644
--- a/src/gpu/post_process_helper.h
+++ b/src/gpu/post_process_helper.h
@@ -8,13 +8,13 @@
// Uniform data common to all post-processing effects
struct CommonPostProcessUniforms {
- vec2 resolution; // Screen dimensions
- float aspect_ratio; // Width/height ratio
- float time; // Physical time in seconds (unaffected by tempo)
- float beat_time; // Musical time in beats (absolute, tempo-scaled)
- float beat_phase; // Fractional beat (0.0-1.0 within current beat)
- float audio_intensity;// Audio peak for beat sync
- float _pad; // Padding for 16-byte alignment
+ vec2 resolution; // Screen dimensions
+ float aspect_ratio; // Width/height ratio
+ float time; // Physical time in seconds (unaffected by tempo)
+ float beat_time; // Musical time in beats (absolute, tempo-scaled)
+ float beat_phase; // Fractional beat (0.0-1.0 within current beat)
+ float audio_intensity; // Audio peak for beat sync
+ float _pad; // Padding for 16-byte alignment
};
static_assert(sizeof(CommonPostProcessUniforms) == 32,
"CommonPostProcessUniforms must be 32 bytes for WGSL alignment");
diff --git a/src/gpu/sampler_cache.h b/src/gpu/sampler_cache.h
index 5df3958..7149921 100644
--- a/src/gpu/sampler_cache.h
+++ b/src/gpu/sampler_cache.h
@@ -16,10 +16,14 @@ struct SamplerSpec {
uint16_t anisotropy;
bool operator<(const SamplerSpec& o) const {
- if (u != o.u) return u < o.u;
- if (v != o.v) return v < o.v;
- if (mag != o.mag) return mag < o.mag;
- if (min != o.min) return min < o.min;
+ if (u != o.u)
+ return u < o.u;
+ if (v != o.v)
+ return v < o.v;
+ if (mag != o.mag)
+ return mag < o.mag;
+ if (min != o.min)
+ return min < o.min;
return anisotropy < o.anisotropy;
}
};
@@ -28,7 +32,7 @@ class SamplerCache {
std::map<SamplerSpec, WGPUSampler> cache_;
SamplerCache() = default;
-public:
+ public:
static SamplerCache& Get() {
static SamplerCache instance;
return instance;
@@ -36,7 +40,8 @@ public:
WGPUSampler get_or_create(WGPUDevice device, const SamplerSpec& spec) {
auto it = cache_.find(spec);
- if (it != cache_.end()) return it->second;
+ if (it != cache_.end())
+ return it->second;
WGPUSamplerDescriptor desc{};
desc.addressModeU = spec.u;
diff --git a/src/gpu/shaders.cc b/src/gpu/shaders.cc
index 1b50e8e..60db655 100644
--- a/src/gpu/shaders.cc
+++ b/src/gpu/shaders.cc
@@ -52,10 +52,12 @@ void InitShaderComposer() {
register_if_exists("ray_box", AssetId::ASSET_SHADER_RAY_BOX);
register_if_exists("ray_triangle", AssetId::ASSET_SHADER_RAY_TRIANGLE);
- register_if_exists("render/fullscreen_vs", AssetId::ASSET_SHADER_RENDER_FULLSCREEN_VS);
+ register_if_exists("render/fullscreen_vs",
+ AssetId::ASSET_SHADER_RENDER_FULLSCREEN_VS);
register_if_exists("math/color", AssetId::ASSET_SHADER_MATH_COLOR);
register_if_exists("math/utils", AssetId::ASSET_SHADER_MATH_UTILS);
- register_if_exists("render/raymarching", AssetId::ASSET_SHADER_RENDER_RAYMARCHING);
+ register_if_exists("render/raymarching",
+ AssetId::ASSET_SHADER_RENDER_RAYMARCHING);
register_if_exists("cnn_activation", AssetId::ASSET_SHADER_CNN_ACTIVATION);
register_if_exists("cnn_conv1x1", AssetId::ASSET_SHADER_CNN_CONV1X1);
diff --git a/src/gpu/stub_gpu.cc b/src/gpu/stub_gpu.cc
index 8d69996..95e647d 100644
--- a/src/gpu/stub_gpu.cc
+++ b/src/gpu/stub_gpu.cc
@@ -41,8 +41,8 @@ void gpu_init(PlatformState* platform_state) {
(void)platform_state;
}
-void gpu_draw(float audio_peak, float aspect_ratio, float time,
- float beat_time, float beat_phase) {
+void gpu_draw(float audio_peak, float aspect_ratio, float time, float beat_time,
+ float beat_phase) {
(void)audio_peak;
(void)aspect_ratio;
(void)time;
diff --git a/src/gpu/texture_manager.cc b/src/gpu/texture_manager.cc
index 7889283..5bca95b 100644
--- a/src/gpu/texture_manager.cc
+++ b/src/gpu/texture_manager.cc
@@ -125,8 +125,8 @@ WGPUTextureView TextureManager::get_texture_view(const std::string& name) {
}
WGPUComputePipeline TextureManager::get_or_create_compute_pipeline(
- const std::string& func_name, const char* shader_code,
- size_t uniform_size, int num_input_textures) {
+ const std::string& func_name, const char* shader_code, size_t uniform_size,
+ int num_input_textures) {
auto it = compute_pipelines_.find(func_name);
if (it != compute_pipelines_.end()) {
return it->second.pipeline;
@@ -149,7 +149,8 @@ WGPUComputePipeline TextureManager::get_or_create_compute_pipeline(
// Binding 1: uniform buffer
// Binding 2 to (2 + num_input_textures - 1): input textures
// Binding (2 + num_input_textures): sampler (if inputs > 0)
- const int max_entries = 2 + num_input_textures + (num_input_textures > 0 ? 1 : 0);
+ const int max_entries =
+ 2 + num_input_textures + (num_input_textures > 0 ? 1 : 0);
std::vector<WGPUBindGroupLayoutEntry> bgl_entries(max_entries);
// Binding 0: Output storage texture
@@ -177,7 +178,8 @@ WGPUComputePipeline TextureManager::get_or_create_compute_pipeline(
if (num_input_textures > 0) {
bgl_entries[2 + num_input_textures].binding = 2 + num_input_textures;
bgl_entries[2 + num_input_textures].visibility = WGPUShaderStage_Compute;
- bgl_entries[2 + num_input_textures].sampler.type = WGPUSamplerBindingType_Filtering;
+ bgl_entries[2 + num_input_textures].sampler.type =
+ WGPUSamplerBindingType_Filtering;
}
WGPUBindGroupLayoutDescriptor bgl_desc = {};
@@ -205,7 +207,8 @@ WGPUComputePipeline TextureManager::get_or_create_compute_pipeline(
wgpuShaderModuleRelease(shader_module);
// Cache pipeline
- ComputePipelineInfo info = {pipeline, shader_code, uniform_size, num_input_textures};
+ ComputePipelineInfo info = {pipeline, shader_code, uniform_size,
+ num_input_textures};
compute_pipelines_[func_name] = info;
return pipeline;
@@ -318,7 +321,8 @@ void TextureManager::create_gpu_noise_texture(
};
NoiseParams uniforms = {(uint32_t)params.width, (uint32_t)params.height,
params.params[0], params.params[1]};
- dispatch_compute("gen_noise", texture, params, &uniforms, sizeof(NoiseParams));
+ dispatch_compute("gen_noise", texture, params, &uniforms,
+ sizeof(NoiseParams));
WGPUTextureView view =
gpu_create_texture_view_2d(texture, WGPUTextureFormat_RGBA8Unorm);
@@ -361,15 +365,15 @@ void TextureManager::create_gpu_perlin_texture(
uint32_t octaves;
float _pad0;
};
- PerlinParams uniforms = {
- (uint32_t)params.width,
- (uint32_t)params.height,
- params.params[0],
- params.params[1],
- params.num_params > 2 ? params.params[2] : 1.0f,
- params.num_params > 3 ? params.params[3] : 0.5f,
- params.num_params > 4 ? (uint32_t)params.params[4] : 4u,
- 0.0f};
+ PerlinParams uniforms = {(uint32_t)params.width,
+ (uint32_t)params.height,
+ params.params[0],
+ params.params[1],
+ params.num_params > 2 ? params.params[2] : 1.0f,
+ params.num_params > 3 ? params.params[3] : 0.5f,
+ params.num_params > 4 ? (uint32_t)params.params[4]
+ : 4u,
+ 0.0f};
dispatch_compute("gen_perlin", texture, params, &uniforms,
sizeof(PerlinParams));
@@ -427,8 +431,8 @@ void TextureManager::create_gpu_grid_texture(
textures_[name] = gpu_tex;
#if !defined(STRIP_ALL)
- printf("Generated GPU grid texture: %s (%dx%d)\n", name.c_str(),
- params.width, params.height);
+ printf("Generated GPU grid texture: %s (%dx%d)\n", name.c_str(), params.width,
+ params.height);
#endif
}
@@ -634,7 +638,7 @@ void TextureManager::create_gpu_composite_texture(
// Dispatch composite shader
GpuProceduralParams params = {width, height, nullptr, 0};
dispatch_composite(shader_func, texture, params, uniform_data, uniform_size,
- input_views, sampler);
+ input_views, sampler);
// Create view
WGPUTextureView view =
@@ -653,11 +657,12 @@ void TextureManager::create_gpu_composite_texture(
name.c_str(), width, height, input_names.size());
#endif
}
-#endif // !defined(STRIP_GPU_COMPOSITE)
+#endif // !defined(STRIP_GPU_COMPOSITE)
#if !defined(STRIP_ALL)
-WGPUTextureView TextureManager::get_or_generate_gpu_texture(
- const std::string& name, const GpuProceduralParams& params) {
+WGPUTextureView
+TextureManager::get_or_generate_gpu_texture(const std::string& name,
+ const GpuProceduralParams& params) {
auto it = textures_.find(name);
if (it != textures_.end()) {
return it->second.view;
diff --git a/src/gpu/texture_manager.h b/src/gpu/texture_manager.h
index 5a2b9f8..ec30c7b 100644
--- a/src/gpu/texture_manager.h
+++ b/src/gpu/texture_manager.h
@@ -60,20 +60,18 @@ class TextureManager {
};
// GPU composite generation (multi-input textures)
- void create_gpu_composite_texture(const std::string& name,
- const std::string& shader_func,
- const char* shader_code,
- const void* uniform_data,
- size_t uniform_size,
- int width, int height,
- const std::vector<std::string>& input_names,
- SamplerType sampler = SamplerType::LinearClamp);
+ void create_gpu_composite_texture(
+ const std::string& name, const std::string& shader_func,
+ const char* shader_code, const void* uniform_data, size_t uniform_size,
+ int width, int height, const std::vector<std::string>& input_names,
+ SamplerType sampler = SamplerType::LinearClamp);
#endif
#if !defined(STRIP_ALL)
// On-demand lazy generation (stripped in final builds)
- WGPUTextureView get_or_generate_gpu_texture(const std::string& name,
- const GpuProceduralParams& params);
+ WGPUTextureView
+ get_or_generate_gpu_texture(const std::string& name,
+ const GpuProceduralParams& params);
#endif
// Retrieves a texture view by name (returns nullptr if not found)
@@ -87,20 +85,20 @@ class TextureManager {
int num_input_textures;
};
- WGPUComputePipeline get_or_create_compute_pipeline(const std::string& func_name,
- const char* shader_code,
- size_t uniform_size,
- int num_input_textures = 0);
+ WGPUComputePipeline
+ get_or_create_compute_pipeline(const std::string& func_name,
+ const char* shader_code, size_t uniform_size,
+ int num_input_textures = 0);
void dispatch_compute(const std::string& func_name, WGPUTexture target,
- const GpuProceduralParams& params, const void* uniform_data,
- size_t uniform_size);
+ const GpuProceduralParams& params,
+ const void* uniform_data, size_t uniform_size);
#if !defined(STRIP_GPU_COMPOSITE)
void dispatch_composite(const std::string& func_name, WGPUTexture target,
- const GpuProceduralParams& params,
- const void* uniform_data, size_t uniform_size,
- const std::vector<WGPUTextureView>& input_views,
- SamplerType sampler_type);
+ const GpuProceduralParams& params,
+ const void* uniform_data, size_t uniform_size,
+ const std::vector<WGPUTextureView>& input_views,
+ SamplerType sampler_type);
#endif
WGPUDevice device_;
diff --git a/src/gpu/texture_readback.cc b/src/gpu/texture_readback.cc
index e25da9e..bd3c79c 100644
--- a/src/gpu/texture_readback.cc
+++ b/src/gpu/texture_readback.cc
@@ -15,13 +15,9 @@ struct MapState {
WGPUMapAsyncStatus status = WGPUMapAsyncStatus_Unknown;
};
-std::vector<uint8_t> read_texture_pixels(
- WGPUInstance instance,
- WGPUDevice device,
- WGPUTexture texture,
- int width,
- int height) {
-
+std::vector<uint8_t> read_texture_pixels(WGPUInstance instance,
+ WGPUDevice device, WGPUTexture texture,
+ int width, int height) {
// Align bytes per row to 256 (COPY_BYTES_PER_ROW_ALIGNMENT)
const uint32_t bytes_per_pixel = 4; // BGRA8
const uint32_t unaligned_bytes_per_row = width * bytes_per_pixel;
@@ -99,7 +95,8 @@ std::vector<uint8_t> read_texture_pixels(
state->done = true;
};
WGPUBufferMapCallbackInfo map_info = {};
- map_info.mode = WGPUCallbackMode_AllowProcessEvents; // Fire during ProcessEvents
+ map_info.mode =
+ WGPUCallbackMode_AllowProcessEvents; // Fire during ProcessEvents
map_info.callback = map_cb;
map_info.userdata1 = &map_state;
wgpuBufferMapAsync(staging, WGPUMapMode_Read, 0, buffer_size, map_info);
@@ -178,13 +175,10 @@ static float fp16_to_float(uint16_t h) {
return result;
}
-std::vector<uint8_t> texture_readback_fp16_to_u8(
- WGPUDevice device,
- WGPUQueue queue,
- WGPUTexture texture,
- int width,
- int height) {
-
+std::vector<uint8_t> texture_readback_fp16_to_u8(WGPUDevice device,
+ WGPUQueue queue,
+ WGPUTexture texture, int width,
+ int height) {
// Align bytes per row to 256
const uint32_t bytes_per_pixel = 8; // RGBA16Float = 4 × 2 bytes
const uint32_t unaligned_bytes_per_row = width * bytes_per_pixel;
@@ -271,10 +265,9 @@ std::vector<uint8_t> texture_readback_fp16_to_u8(
std::vector<uint8_t> pixels(width * height * 4);
if (mapped_data) {
for (int y = 0; y < height; ++y) {
- const uint16_t* src_row =
- reinterpret_cast<const uint16_t*>(
- reinterpret_cast<const uint8_t*>(mapped_data) +
- y * aligned_bytes_per_row);
+ const uint16_t* src_row = reinterpret_cast<const uint16_t*>(
+ reinterpret_cast<const uint8_t*>(mapped_data) +
+ y * aligned_bytes_per_row);
for (int x = 0; x < width; ++x) {
float r = fp16_to_float(src_row[x * 4 + 0]);
float g = fp16_to_float(src_row[x * 4 + 1]);
diff --git a/src/gpu/texture_readback.h b/src/gpu/texture_readback.h
index 8230e13..a99d572 100644
--- a/src/gpu/texture_readback.h
+++ b/src/gpu/texture_readback.h
@@ -7,27 +7,22 @@
#if !defined(STRIP_ALL)
#include "platform/platform.h"
-#include <vector>
#include <cstdint>
+#include <vector>
// Read texture pixels to CPU memory (synchronous, blocking)
// Format: BGRA8Unorm (4 bytes per pixel)
// Returns: width * height * 4 bytes
-std::vector<uint8_t> read_texture_pixels(
- WGPUInstance instance,
- WGPUDevice device,
- WGPUTexture texture,
- int width,
- int height);
+std::vector<uint8_t> read_texture_pixels(WGPUInstance instance,
+ WGPUDevice device, WGPUTexture texture,
+ int width, int height);
// Read RGBA16Float texture and convert to BGRA8Unorm for saving
// Converts [-1,1] float range to [0,255] uint8 range
// Returns: width * height * 4 bytes (BGRA8)
-std::vector<uint8_t> texture_readback_fp16_to_u8(
- WGPUDevice device,
- WGPUQueue queue,
- WGPUTexture texture,
- int width,
- int height);
+std::vector<uint8_t> texture_readback_fp16_to_u8(WGPUDevice device,
+ WGPUQueue queue,
+ WGPUTexture texture, int width,
+ int height);
#endif // !defined(STRIP_ALL)
diff --git a/src/platform/stub_types.h b/src/platform/stub_types.h
index ff9cccf..f2810ea 100644
--- a/src/platform/stub_types.h
+++ b/src/platform/stub_types.h
@@ -107,7 +107,9 @@ struct WGPURenderPassColorAttachment {
WGPUTextureView view;
WGPULoadOp loadOp;
WGPUStoreOp storeOp;
- struct { float r, g, b, a; } clearValue;
+ struct {
+ float r, g, b, a;
+ } clearValue;
uint32_t depthSlice;
};
struct WGPUComputePassDescriptor {};
@@ -117,9 +119,15 @@ struct WGPUSurfaceTexture {
WGPUTexture texture;
WGPUSurfaceGetCurrentTextureStatus status;
};
-struct WGPUColor { float r, g, b, a; };
-struct WGPUExtent3D { uint32_t width, height, depthOrArrayLayers; };
-struct WGPUOrigin3D { uint32_t x, y, z; };
+struct WGPUColor {
+ float r, g, b, a;
+};
+struct WGPUExtent3D {
+ uint32_t width, height, depthOrArrayLayers;
+};
+struct WGPUOrigin3D {
+ uint32_t x, y, z;
+};
struct WGPUImageCopyTexture {};
struct WGPUImageCopyBuffer {};
struct WGPUTextureDataLayout {};
@@ -135,7 +143,8 @@ struct WGPUStringView {
};
static inline WGPUStringView str_view(const char* str) {
- if (!str) return {nullptr, 0};
+ if (!str)
+ return {nullptr, 0};
return {str, strlen(str)};
}
@@ -145,8 +154,10 @@ static inline WGPUStringView label_view(const char* str) {
}
// Platform shims (no-ops)
-static inline void platform_wgpu_wait_any(WGPUInstance) {}
-static inline void platform_wgpu_set_error_callback(WGPUDevice, void*) {}
+static inline void platform_wgpu_wait_any(WGPUInstance) {
+}
+static inline void platform_wgpu_set_error_callback(WGPUDevice, void*) {
+}
// Constants
#define WGPU_DEPTH_SLICE_UNDEFINED 0xffffffff
diff --git a/src/tests/3d/test_3d.cc b/src/tests/3d/test_3d.cc
index 7132b33..13edd45 100644
--- a/src/tests/3d/test_3d.cc
+++ b/src/tests/3d/test_3d.cc
@@ -1,10 +1,10 @@
// This file is part of the 64k demo project.
// It tests the 3D system components (Camera, Object, Scene).
+#include "../common/test_math_helpers.h"
#include "3d/camera.h"
#include "3d/object.h"
#include "3d/scene.h"
-#include "../common/test_math_helpers.h"
#include <cassert>
#include <iostream>
@@ -76,7 +76,8 @@ void test_object_transform() {
vec4 original_space_t =
inv_model_t *
vec4(translated_point.x, translated_point.y, translated_point.z, 1.0);
- assert(test_near(original_space_t.x, 0.0f, 0.001f) && test_near(original_space_t.y, 0.0f, 0.001f) &&
+ assert(test_near(original_space_t.x, 0.0f, 0.001f) &&
+ test_near(original_space_t.y, 0.0f, 0.001f) &&
test_near(original_space_t.z, 0.0f, 0.001f));
// Model matrix with rotation (90 deg Y) and translation (5,0,0)
@@ -88,10 +89,12 @@ void test_object_transform() {
// Translates to (5,0,-1)
vec4 p_trs(1, 0, 0, 1);
vec4 transformed_p = model_trs * p_trs;
- assert(test_near(transformed_p.x, 5.0f, 0.001f) && test_near(transformed_p.z, -1.0f, 0.001f));
+ assert(test_near(transformed_p.x, 5.0f, 0.001f) &&
+ test_near(transformed_p.z, -1.0f, 0.001f));
// Apply inverse to transformed point to get back original point
vec4 original_space_trs = inv_model_trs * transformed_p;
- assert(test_near(original_space_trs.x, 1.0f, 0.001f) && test_near(original_space_trs.y, 0.0f, 0.001f) &&
+ assert(test_near(original_space_trs.x, 1.0f, 0.001f) &&
+ test_near(original_space_trs.y, 0.0f, 0.001f) &&
test_near(original_space_trs.z, 0.0f, 0.001f));
}
diff --git a/src/tests/3d/test_3d_physics.cc b/src/tests/3d/test_3d_physics.cc
index 9e4f71b..26b9bfa 100644
--- a/src/tests/3d/test_3d_physics.cc
+++ b/src/tests/3d/test_3d_physics.cc
@@ -1,6 +1,7 @@
// This file is part of the 64k demo project.
// Standalone "mini-demo" for testing the 3D physics engine.
+#include "../common/test_3d_helpers.h"
#include "3d/bvh.h"
#include "3d/camera.h"
#include "3d/object.h"
@@ -12,7 +13,6 @@
#include "gpu/texture_manager.h"
#include "platform/platform.h"
#include "procedural/generator.h"
-#include "../common/test_3d_helpers.h"
#include <cmath>
#include <cstdio>
#include <cstring>
diff --git a/src/tests/3d/test_3d_render.cc b/src/tests/3d/test_3d_render.cc
index 10de907..9398649 100644
--- a/src/tests/3d/test_3d_render.cc
+++ b/src/tests/3d/test_3d_render.cc
@@ -1,6 +1,7 @@
// This file is part of the 64k demo project.
// Standalone "mini-demo" for testing the 3D renderer.
+#include "../common/test_3d_helpers.h"
#include "3d/camera.h"
#include "3d/object.h"
#include "3d/renderer.h"
@@ -11,7 +12,6 @@
#include "gpu/texture_manager.h"
#include "platform/platform.h"
#include "procedural/generator.h"
-#include "../common/test_3d_helpers.h"
#include <cmath>
#include <cstdio>
#include <cstring>
diff --git a/src/tests/3d/test_mesh.cc b/src/tests/3d/test_mesh.cc
index d4ce097..2a13125 100644
--- a/src/tests/3d/test_mesh.cc
+++ b/src/tests/3d/test_mesh.cc
@@ -1,6 +1,7 @@
// This file is part of the 64k demo project.
// Standalone test for loading and rendering a single mesh from a .obj file.
+#include "../common/test_3d_helpers.h"
#include "3d/camera.h"
#include "3d/object.h"
#include "3d/renderer.h"
@@ -11,7 +12,6 @@
#include "platform/platform.h"
#include "procedural/generator.h"
#include "util/asset_manager_utils.h"
-#include "../common/test_3d_helpers.h"
#include <algorithm>
#include <cmath>
#include <cstdio>
diff --git a/src/tests/3d/test_physics.cc b/src/tests/3d/test_physics.cc
index c1c5c32..6c1f814 100644
--- a/src/tests/3d/test_physics.cc
+++ b/src/tests/3d/test_physics.cc
@@ -1,10 +1,10 @@
// This file is part of the 64k demo project.
// It tests the CPU-side SDF library and BVH for physics and collision.
+#include "../common/test_math_helpers.h"
#include "3d/bvh.h"
#include "3d/physics.h"
#include "3d/sdf_cpu.h"
-#include "../common/test_math_helpers.h"
#include <cassert>
#include <iostream>
@@ -46,18 +46,22 @@ void test_calc_normal() {
// Sphere normal at (1,0,0) should be (1,0,0)
auto sphere_sdf = [](vec3 p) { return sdf::sdSphere(p, 1.0f); };
vec3 n = sdf::calc_normal({1, 0, 0}, sphere_sdf);
- assert(test_near(n.x, 1.0f, 0.001f) && test_near(n.y, 0.0f, 0.001f) && test_near(n.z, 0.0f, 0.001f));
+ assert(test_near(n.x, 1.0f, 0.001f) && test_near(n.y, 0.0f, 0.001f) &&
+ test_near(n.z, 0.0f, 0.001f));
// Box normal at side
auto box_sdf = [](vec3 p) { return sdf::sdBox(p, {1, 1, 1}); };
n = sdf::calc_normal({1, 0, 0}, box_sdf);
- assert(test_near(n.x, 1.0f, 0.001f) && test_near(n.y, 0.0f, 0.001f) && test_near(n.z, 0.0f, 0.001f));
+ assert(test_near(n.x, 1.0f, 0.001f) && test_near(n.y, 0.0f, 0.001f) &&
+ test_near(n.z, 0.0f, 0.001f));
// Plane normal should be n
vec3 plane_n(0, 1, 0);
auto plane_sdf = [plane_n](vec3 p) { return sdf::sdPlane(p, plane_n, 1.0f); };
n = sdf::calc_normal({0, 0, 0}, plane_sdf);
- assert(test_near(n.x, plane_n.x, 0.001f) && test_near(n.y, plane_n.y, 0.001f) && test_near(n.z, plane_n.z, 0.001f));
+ assert(test_near(n.x, plane_n.x, 0.001f) &&
+ test_near(n.y, plane_n.y, 0.001f) &&
+ test_near(n.z, plane_n.z, 0.001f));
}
void test_bvh() {
diff --git a/src/tests/audio/test_audio_engine.cc b/src/tests/audio/test_audio_engine.cc
index 72c1653..3f0ad4d 100644
--- a/src/tests/audio/test_audio_engine.cc
+++ b/src/tests/audio/test_audio_engine.cc
@@ -1,10 +1,10 @@
// This file is part of the 64k demo project.
// Unit tests for AudioEngine lifecycle and resource management.
+#include "../common/audio_test_fixture.h"
#include "audio/audio_engine.h"
#include "audio/tracker.h"
#include "generated/assets.h"
-#include "../common/audio_test_fixture.h"
#include <assert.h>
#include <stdio.h>
diff --git a/src/tests/audio/test_silent_backend.cc b/src/tests/audio/test_silent_backend.cc
index cc98139..3dc1cd4 100644
--- a/src/tests/audio/test_silent_backend.cc
+++ b/src/tests/audio/test_silent_backend.cc
@@ -2,11 +2,11 @@
// It tests the SilentBackend for audio testing without hardware.
// Verifies audio.cc functionality using silent backend.
+#include "../common/audio_test_fixture.h"
#include "audio/audio.h"
#include "audio/audio_engine.h"
#include "audio/backend/silent_backend.h"
#include "audio/synth.h"
-#include "../common/audio_test_fixture.h"
#include <assert.h>
#include <stdio.h>
diff --git a/src/tests/audio/test_tracker.cc b/src/tests/audio/test_tracker.cc
index 1112e91..ab2505d 100644
--- a/src/tests/audio/test_tracker.cc
+++ b/src/tests/audio/test_tracker.cc
@@ -1,11 +1,11 @@
// This file is part of the 64k demo project.
// It tests the core functionality of the audio tracker engine.
+#include "../common/audio_test_fixture.h"
#include "audio/audio_engine.h"
#include "audio/gen.h"
#include "audio/synth.h"
#include "audio/tracker.h"
-#include "../common/audio_test_fixture.h"
// #include "generated/music_data.h" // Will be generated by tracker_compiler
#include <assert.h>
#include <stdio.h>
diff --git a/src/tests/audio/test_tracker_timing.cc b/src/tests/audio/test_tracker_timing.cc
index 7295de3..1c2d6fd 100644
--- a/src/tests/audio/test_tracker_timing.cc
+++ b/src/tests/audio/test_tracker_timing.cc
@@ -2,12 +2,12 @@
// It tests tracker timing and synchronization using MockAudioBackend.
// Verifies pattern triggers occur at correct times with proper BPM scaling.
+#include "../common/audio_test_fixture.h"
#include "audio/audio.h"
#include "audio/audio_engine.h"
#include "audio/backend/mock_audio_backend.h"
#include "audio/synth.h"
#include "audio/tracker.h"
-#include "../common/audio_test_fixture.h"
#include <assert.h>
#include <cmath>
#include <stdio.h>
@@ -15,8 +15,7 @@
#if !defined(STRIP_ALL)
// Helper: Setup audio engine for testing
-static AudioTestFixture*
-setup_audio_test(MockAudioBackend& backend) {
+static AudioTestFixture* setup_audio_test(MockAudioBackend& backend) {
audio_set_backend(&backend);
return new AudioTestFixture();
}
diff --git a/src/tests/audio/test_variable_tempo.cc b/src/tests/audio/test_variable_tempo.cc
index da056c5..9c9a471 100644
--- a/src/tests/audio/test_variable_tempo.cc
+++ b/src/tests/audio/test_variable_tempo.cc
@@ -2,11 +2,11 @@
// It tests variable tempo system with music_time scaling.
// Verifies 2x speed-up and 2x slow-down reset tricks.
+#include "../common/audio_test_fixture.h"
#include "audio/audio.h"
#include "audio/audio_engine.h"
#include "audio/backend/mock_audio_backend.h"
#include "audio/tracker.h"
-#include "../common/audio_test_fixture.h"
#include <assert.h>
#include <cmath>
#include <stdio.h>
@@ -14,8 +14,7 @@
#if !defined(STRIP_ALL)
// Helper: Setup audio engine for testing
-static AudioTestFixture*
-setup_audio_test(MockAudioBackend& backend) {
+static AudioTestFixture* setup_audio_test(MockAudioBackend& backend) {
audio_set_backend(&backend);
AudioTestFixture* fixture = new AudioTestFixture();
fixture->load_music(&g_tracker_score, g_tracker_samples,
@@ -98,7 +97,8 @@ void test_2x_speedup_reset_trick() {
// Phase 1: Accelerate from 1.0x to 2.0x over 5 seconds
printf(" Phase 1: Accelerating 1.0x → 2.0x\n");
auto accel_fn = [](float t) { return fminf(1.0f + (t / 5.0f), 2.0f); };
- simulate_tempo_fn(fixture->engine(), music_time, physical_time, 5.0f, dt, accel_fn);
+ simulate_tempo_fn(fixture->engine(), music_time, physical_time, 5.0f, dt,
+ accel_fn);
const float tempo_scale = accel_fn(physical_time);
printf(" After 5s physical: tempo=%.2fx, music_time=%.3f\n", tempo_scale,
@@ -132,7 +132,8 @@ void test_2x_slowdown_reset_trick() {
// Phase 1: Decelerate from 1.0x to 0.5x over 5 seconds
printf(" Phase 1: Decelerating 1.0x → 0.5x\n");
auto decel_fn = [](float t) { return fmaxf(1.0f - (t / 10.0f), 0.5f); };
- simulate_tempo_fn(fixture->engine(), music_time, physical_time, 5.0f, dt, decel_fn);
+ simulate_tempo_fn(fixture->engine(), music_time, physical_time, 5.0f, dt,
+ decel_fn);
const float tempo_scale = decel_fn(physical_time);
printf(" After 5s physical: tempo=%.2fx, music_time=%.3f\n", tempo_scale,
diff --git a/src/tests/audio/test_wav_dump.cc b/src/tests/audio/test_wav_dump.cc
index 9175153..a0f2a4a 100644
--- a/src/tests/audio/test_wav_dump.cc
+++ b/src/tests/audio/test_wav_dump.cc
@@ -1,11 +1,11 @@
// This file is part of the 64k demo project.
// Regression test for WAV dump backend to prevent format mismatches.
+#include "../common/audio_test_fixture.h"
#include "audio/audio.h"
#include "audio/audio_engine.h"
#include "audio/backend/wav_dump_backend.h"
#include "audio/ring_buffer.h"
-#include "../common/audio_test_fixture.h"
#include <assert.h>
#include <stdio.h>
#include <string.h>
diff --git a/src/tests/common/audio_test_fixture.cc b/src/tests/common/audio_test_fixture.cc
index 42bf27f..13a5234 100644
--- a/src/tests/common/audio_test_fixture.cc
+++ b/src/tests/common/audio_test_fixture.cc
@@ -12,8 +12,7 @@ AudioTestFixture::~AudioTestFixture() {
}
void AudioTestFixture::load_music(const TrackerScore* score,
- const NoteParams* samples,
- const AssetId* assets,
- uint32_t count) {
+ const NoteParams* samples,
+ const AssetId* assets, uint32_t count) {
m_engine.load_music_data(score, samples, assets, count);
}
diff --git a/src/tests/common/audio_test_fixture.h b/src/tests/common/audio_test_fixture.h
index 328e167..fefc37a 100644
--- a/src/tests/common/audio_test_fixture.h
+++ b/src/tests/common/audio_test_fixture.h
@@ -9,18 +9,18 @@
// RAII wrapper for AudioEngine lifecycle
class AudioTestFixture {
-public:
+ public:
AudioTestFixture(); // Calls engine.init()
~AudioTestFixture(); // Calls engine.shutdown()
- AudioEngine& engine() { return m_engine; }
+ AudioEngine& engine() {
+ return m_engine;
+ }
// Helper: Load tracker music data
- void load_music(const TrackerScore* score,
- const NoteParams* samples,
- const AssetId* assets,
- uint32_t count);
+ void load_music(const TrackerScore* score, const NoteParams* samples,
+ const AssetId* assets, uint32_t count);
-private:
+ private:
AudioEngine m_engine;
};
diff --git a/src/tests/common/effect_test_fixture.cc b/src/tests/common/effect_test_fixture.cc
index b403ef6..70765c3 100644
--- a/src/tests/common/effect_test_fixture.cc
+++ b/src/tests/common/effect_test_fixture.cc
@@ -4,7 +4,8 @@
#include "effect_test_fixture.h"
#include <stdio.h>
-EffectTestFixture::EffectTestFixture() {}
+EffectTestFixture::EffectTestFixture() {
+}
EffectTestFixture::~EffectTestFixture() {
if (m_initialized) {
diff --git a/src/tests/common/effect_test_fixture.h b/src/tests/common/effect_test_fixture.h
index 399b5ed..3b01370 100644
--- a/src/tests/common/effect_test_fixture.h
+++ b/src/tests/common/effect_test_fixture.h
@@ -2,13 +2,13 @@
// Simplifies GPU effect test setup
#pragma once
-#include "webgpu_test_fixture.h"
#include "audio_test_fixture.h"
#include "gpu/sequence.h"
+#include "webgpu_test_fixture.h"
// Combined WebGPU + AudioEngine + MainSequence fixture
class EffectTestFixture {
-public:
+ public:
EffectTestFixture();
~EffectTestFixture();
@@ -16,11 +16,17 @@ public:
bool init();
// Accessors
- GpuContext ctx() const { return m_gpu.ctx(); }
- MainSequence& sequence() { return m_sequence; }
- AudioEngine& audio() { return m_audio.engine(); }
+ GpuContext ctx() const {
+ return m_gpu.ctx();
+ }
+ MainSequence& sequence() {
+ return m_sequence;
+ }
+ AudioEngine& audio() {
+ return m_audio.engine();
+ }
-private:
+ private:
WebGPUTestFixture m_gpu;
AudioTestFixture m_audio;
MainSequence m_sequence;
diff --git a/src/tests/common/offscreen_render_target.cc b/src/tests/common/offscreen_render_target.cc
index d322a7c..da2fa8f 100644
--- a/src/tests/common/offscreen_render_target.cc
+++ b/src/tests/common/offscreen_render_target.cc
@@ -61,6 +61,6 @@ std::vector<uint8_t> OffscreenRenderTarget::read_pixels() {
#if !defined(STRIP_ALL)
return read_texture_pixels(instance_, device_, texture_, width_, height_);
#else
- return std::vector<uint8_t>(); // Should never be called in STRIP_ALL builds
+ return std::vector<uint8_t>(); // Should never be called in STRIP_ALL builds
#endif
}
diff --git a/src/tests/common/test_math_helpers.h b/src/tests/common/test_math_helpers.h
index 99e7f9d..24d0f3a 100644
--- a/src/tests/common/test_math_helpers.h
+++ b/src/tests/common/test_math_helpers.h
@@ -2,8 +2,8 @@
// Common floating-point comparison helpers
#pragma once
-#include <cmath>
#include "util/mini_math.h"
+#include <cmath>
// Floating-point comparison with epsilon tolerance
inline bool test_near(float a, float b, float epsilon = 1e-6f) {
@@ -12,7 +12,6 @@ inline bool test_near(float a, float b, float epsilon = 1e-6f) {
// Vector comparison
inline bool test_near_vec3(vec3 a, vec3 b, float epsilon = 1e-6f) {
- return test_near(a.x, b.x, epsilon) &&
- test_near(a.y, b.y, epsilon) &&
+ return test_near(a.x, b.x, epsilon) && test_near(a.y, b.y, epsilon) &&
test_near(a.z, b.z, epsilon);
}
diff --git a/src/tests/gpu/test_demo_effects.cc b/src/tests/gpu/test_demo_effects.cc
index 7c61691..4234901 100644
--- a/src/tests/gpu/test_demo_effects.cc
+++ b/src/tests/gpu/test_demo_effects.cc
@@ -11,10 +11,10 @@
// test_scene_effects()
#include "../common/effect_test_helpers.h"
+#include "../common/webgpu_test_fixture.h"
+#include "effects/cnn_effect.h"
#include "gpu/demo_effects.h"
#include "gpu/effect.h"
-#include "effects/cnn_effect.h"
-#include "../common/webgpu_test_fixture.h"
#include <cassert>
#include <cstdio>
#include <cstring>
diff --git a/src/tests/gpu/test_effect_base.cc b/src/tests/gpu/test_effect_base.cc
index 08cf0a1..f049dff 100644
--- a/src/tests/gpu/test_effect_base.cc
+++ b/src/tests/gpu/test_effect_base.cc
@@ -3,10 +3,10 @@
// Verifies effect initialization, activation, and basic rendering.
#include "../common/effect_test_helpers.h"
-#include "gpu/demo_effects.h"
-#include "gpu/effect.h"
#include "../common/offscreen_render_target.h"
#include "../common/webgpu_test_fixture.h"
+#include "gpu/demo_effects.h"
+#include "gpu/effect.h"
#include <cassert>
#include <cstdio>
#include <memory>
diff --git a/src/tests/gpu/test_gpu_composite.cc b/src/tests/gpu/test_gpu_composite.cc
index e5ac788..28c76df 100644
--- a/src/tests/gpu/test_gpu_composite.cc
+++ b/src/tests/gpu/test_gpu_composite.cc
@@ -52,9 +52,9 @@ int main() {
} blend_uni = {256, 256, 0.5f, 0.0f};
std::vector<std::string> blend_inputs = {"noise_a", "noise_b"};
- tex_mgr.create_gpu_composite_texture("blended", "gen_blend",
- gen_blend_compute_wgsl, &blend_uni,
- sizeof(blend_uni), 256, 256, blend_inputs);
+ tex_mgr.create_gpu_composite_texture(
+ "blended", "gen_blend", gen_blend_compute_wgsl, &blend_uni,
+ sizeof(blend_uni), 256, 256, blend_inputs);
WGPUTextureView blended_view = tex_mgr.get_texture_view("blended");
if (!blended_view) {
@@ -72,9 +72,9 @@ int main() {
} mask_uni = {256, 256};
std::vector<std::string> mask_inputs = {"noise_a", "grid"};
- tex_mgr.create_gpu_composite_texture("masked", "gen_mask", gen_mask_compute_wgsl,
- &mask_uni, sizeof(mask_uni), 256, 256,
- mask_inputs);
+ tex_mgr.create_gpu_composite_texture("masked", "gen_mask",
+ gen_mask_compute_wgsl, &mask_uni,
+ sizeof(mask_uni), 256, 256, mask_inputs);
WGPUTextureView masked_view = tex_mgr.get_texture_view("masked");
if (!masked_view) {
@@ -92,9 +92,9 @@ int main() {
} blend2_uni = {256, 256, 0.7f, 0.0f};
std::vector<std::string> blend2_inputs = {"blended", "masked"};
- tex_mgr.create_gpu_composite_texture("final", "gen_blend",
- gen_blend_compute_wgsl, &blend2_uni,
- sizeof(blend2_uni), 256, 256, blend2_inputs);
+ tex_mgr.create_gpu_composite_texture(
+ "final", "gen_blend", gen_blend_compute_wgsl, &blend2_uni,
+ sizeof(blend2_uni), 256, 256, blend2_inputs);
WGPUTextureView final_view = tex_mgr.get_texture_view("final");
if (!final_view) {
diff --git a/src/tests/gpu/test_post_process_helper.cc b/src/tests/gpu/test_post_process_helper.cc
index 42b5d79..575291d 100644
--- a/src/tests/gpu/test_post_process_helper.cc
+++ b/src/tests/gpu/test_post_process_helper.cc
@@ -2,10 +2,10 @@
// It tests post-processing helper functions (pipeline and bind group creation).
// Validates that helpers can create valid WebGPU resources.
-#include "gpu/demo_effects.h"
-#include "gpu/gpu.h"
#include "../common/offscreen_render_target.h"
#include "../common/webgpu_test_fixture.h"
+#include "gpu/demo_effects.h"
+#include "gpu/gpu.h"
#include <cassert>
#include <cstdio>
@@ -15,10 +15,11 @@ extern WGPURenderPipeline create_post_process_pipeline(WGPUDevice device,
const char* shader_code);
extern void pp_update_bind_group(WGPUDevice device, WGPURenderPipeline pipeline,
WGPUBindGroup* bind_group,
- WGPUTextureView input_view,
- GpuBuffer uniforms, GpuBuffer effect_params);
+ WGPUTextureView input_view, GpuBuffer uniforms,
+ GpuBuffer effect_params);
-// Helpers are now in gpu.h (gpu_create_post_process_texture, gpu_create_texture_view_2d)
+// Helpers are now in gpu.h (gpu_create_post_process_texture,
+// gpu_create_texture_view_2d)
// Minimal valid post-process shader for testing
static const char* test_shader = R"(
@@ -66,8 +67,8 @@ static void test_bind_group_creation(WebGPUTestFixture& fixture) {
assert(pipeline != nullptr && "Pipeline required for bind group test");
// Create input texture with TEXTURE_BINDING usage
- TextureWithView input =
- gpu_create_post_process_texture(fixture.device(), 256, 256, fixture.format());
+ TextureWithView input = gpu_create_post_process_texture(
+ fixture.device(), 256, 256, fixture.format());
// Create uniform buffers using gpu_create_buffer
GpuBuffer uniforms = gpu_create_buffer(
@@ -103,10 +104,10 @@ static void test_bind_group_update(WebGPUTestFixture& fixture) {
WGPURenderPipeline pipeline = create_post_process_pipeline(
fixture.device(), fixture.format(), test_shader);
- TextureWithView texture1 =
- gpu_create_post_process_texture(fixture.device(), 256, 256, fixture.format());
- TextureWithView texture2 =
- gpu_create_post_process_texture(fixture.device(), 512, 512, fixture.format());
+ TextureWithView texture1 = gpu_create_post_process_texture(
+ fixture.device(), 256, 256, fixture.format());
+ TextureWithView texture2 = gpu_create_post_process_texture(
+ fixture.device(), 512, 512, fixture.format());
GpuBuffer uniforms = gpu_create_buffer(
fixture.device(), 16, WGPUBufferUsage_Uniform | WGPUBufferUsage_CopyDst);
@@ -150,8 +151,8 @@ static void test_full_setup(WebGPUTestFixture& fixture) {
assert(pipeline != nullptr && "Pipeline creation failed");
// Create input texture (with TEXTURE_BINDING usage)
- TextureWithView input =
- gpu_create_post_process_texture(fixture.device(), 256, 256, fixture.format());
+ TextureWithView input = gpu_create_post_process_texture(
+ fixture.device(), 256, 256, fixture.format());
// Create output texture (can use OffscreenRenderTarget for this)
OffscreenRenderTarget output_target(fixture.instance(), fixture.device(), 256,
diff --git a/src/tests/gpu/test_shader_assets.cc b/src/tests/gpu/test_shader_assets.cc
index 5619a61..7f2811e 100644
--- a/src/tests/gpu/test_shader_assets.cc
+++ b/src/tests/gpu/test_shader_assets.cc
@@ -70,16 +70,18 @@ int main() {
all_passed &=
validate_shader(AssetId::ASSET_SHADER_GAUSSIAN_BLUR, "GAUSSIAN_BLUR",
{"@vertex", "vs_main", "@fragment", "fs_main"});
- all_passed &= validate_shader(AssetId::ASSET_SHADER_SOLARIZE, "SOLARIZE",
- {"#include \"render/fullscreen_vs\"", "@fragment", "fs_main"});
- all_passed &= validate_shader(AssetId::ASSET_SHADER_DISTORT, "DISTORT",
- {"#include \"render/fullscreen_vs\"", "@fragment", "fs_main"});
- all_passed &= validate_shader(AssetId::ASSET_SHADER_CHROMA_ABERRATION,
- "CHROMA_ABERRATION",
- {"#include \"render/fullscreen_vs\"", "@fragment", "fs_main"});
- all_passed &= validate_shader(AssetId::ASSET_SHADER_VIGNETTE,
- "VIGNETTE",
- {"#include \"render/fullscreen_vs\"", "@fragment", "fs_main"});
+ all_passed &= validate_shader(
+ AssetId::ASSET_SHADER_SOLARIZE, "SOLARIZE",
+ {"#include \"render/fullscreen_vs\"", "@fragment", "fs_main"});
+ all_passed &= validate_shader(
+ AssetId::ASSET_SHADER_DISTORT, "DISTORT",
+ {"#include \"render/fullscreen_vs\"", "@fragment", "fs_main"});
+ all_passed &= validate_shader(
+ AssetId::ASSET_SHADER_CHROMA_ABERRATION, "CHROMA_ABERRATION",
+ {"#include \"render/fullscreen_vs\"", "@fragment", "fs_main"});
+ all_passed &= validate_shader(
+ AssetId::ASSET_SHADER_VIGNETTE, "VIGNETTE",
+ {"#include \"render/fullscreen_vs\"", "@fragment", "fs_main"});
all_passed &=
validate_shader(AssetId::ASSET_SHADER_VISUAL_DEBUG, "VISUAL_DEBUG",
{"@vertex", "vs_main", "@fragment", "fs_main"});
diff --git a/src/tests/gpu/test_texture_manager.cc b/src/tests/gpu/test_texture_manager.cc
index 54a1a8a..b38e69f 100644
--- a/src/tests/gpu/test_texture_manager.cc
+++ b/src/tests/gpu/test_texture_manager.cc
@@ -2,9 +2,9 @@
// It tests the TextureManager for procedural texture generation and management.
// Tests all public methods with both success and failure cases.
+#include "../common/webgpu_test_fixture.h"
#include "gpu/texture_manager.h"
#include "procedural/generator.h"
-#include "../common/webgpu_test_fixture.h"
#include <cassert>
#include <cstdio>
#include <cstring>
diff --git a/src/tests/util/test_maths.cc b/src/tests/util/test_maths.cc
index 4233adc..ceee756 100644
--- a/src/tests/util/test_maths.cc
+++ b/src/tests/util/test_maths.cc
@@ -2,8 +2,8 @@
// It tests the mathematical utility functions.
// Verifies vector operations, matrix transformations, and interpolation.
-#include "util/mini_math.h"
#include "../common/test_math_helpers.h"
+#include "util/mini_math.h"
#include <cassert>
#include <iostream>
#include <vector>
@@ -64,7 +64,8 @@ void test_vec3_special() {
// Cross Product
vec3 c = vec3::cross(v, v2);
- assert(test_near(c.x, 0, 0.001f) && test_near(c.y, 0, 0.001f) && test_near(c.z, 1, 0.001f));
+ assert(test_near(c.x, 0, 0.001f) && test_near(c.y, 0, 0.001f) &&
+ test_near(c.z, 1, 0.001f));
}
// Tests quaternion rotation, look_at, and slerp
@@ -84,11 +85,13 @@ void test_quat() {
quat half_pi_rot = quat::from_axis({0, 1, 0}, 3.14159f); // 180 deg Y
vec3 rotated_half_pi = half_pi_rot.rotate(v);
- assert(test_near(rotated_half_pi.x, -1.0f, 0.001f)); // Rotated 180 deg around Y
+ assert(
+ test_near(rotated_half_pi.x, -1.0f, 0.001f)); // Rotated 180 deg around Y
vec3 zero_vec(0, 0, 0);
vec3 rotated_zero_vec = q.rotate(zero_vec);
- assert(test_near(rotated_zero_vec.x, 0.0f, 0.001f) && test_near(rotated_zero_vec.y, 0.0f, 0.001f) &&
+ assert(test_near(rotated_zero_vec.x, 0.0f, 0.001f) &&
+ test_near(rotated_zero_vec.y, 0.0f, 0.001f) &&
test_near(rotated_zero_vec.z, 0.0f, 0.001f));
// Look At
@@ -96,21 +99,26 @@ void test_quat() {
// The local forward vector (0,0,-1) should be transformed to (1,0,0)
quat l = quat::look_at({0, 0, 0}, {10, 0, 0}, {0, 1, 0});
vec3 f = l.rotate({0, 0, -1});
- assert(test_near(f.x, 1.0f, 0.001f) && test_near(f.y, 0.0f, 0.001f) && test_near(f.z, 0.0f, 0.001f));
+ assert(test_near(f.x, 1.0f, 0.001f) && test_near(f.y, 0.0f, 0.001f) &&
+ test_near(f.z, 0.0f, 0.001f));
// Slerp Midpoint
quat q1(0, 0, 0, 1);
quat q2 = quat::from_axis({0, 1, 0}, 1.5708f); // 90 deg
quat mid = slerp(q1, q2, 0.5f); // 45 deg
- assert(test_near(mid.y, 0.3826f, 0.001f)); // sin(pi/8)
+ assert(test_near(mid.y, 0.3826f, 0.001f)); // sin(pi/8)
// Slerp edge cases
quat slerp_mid_edge = slerp(q1, q2, 0.0f);
- assert(test_near(slerp_mid_edge.w, q1.w, 0.001f) && test_near(slerp_mid_edge.x, q1.x, 0.001f) &&
- test_near(slerp_mid_edge.y, q1.y, 0.001f) && test_near(slerp_mid_edge.z, q1.z, 0.001f));
+ assert(test_near(slerp_mid_edge.w, q1.w, 0.001f) &&
+ test_near(slerp_mid_edge.x, q1.x, 0.001f) &&
+ test_near(slerp_mid_edge.y, q1.y, 0.001f) &&
+ test_near(slerp_mid_edge.z, q1.z, 0.001f));
slerp_mid_edge = slerp(q1, q2, 1.0f);
- assert(test_near(slerp_mid_edge.w, q2.w, 0.001f) && test_near(slerp_mid_edge.x, q2.x, 0.001f) &&
- test_near(slerp_mid_edge.y, q2.y, 0.001f) && test_near(slerp_mid_edge.z, q2.z, 0.001f));
+ assert(test_near(slerp_mid_edge.w, q2.w, 0.001f) &&
+ test_near(slerp_mid_edge.x, q2.x, 0.001f) &&
+ test_near(slerp_mid_edge.y, q2.y, 0.001f) &&
+ test_near(slerp_mid_edge.z, q2.z, 0.001f));
// FromTo
quat from_to_test =
@@ -174,8 +182,8 @@ void test_ease() {
// Midpoint/Logic tests
assert(ease::out_cubic(0.5f) >
0.5f); // Out curves should exceed linear value early
- assert(
- test_near(ease::in_out_quad(0.5f), 0.5f, 0.001f)); // Symmetric curves hit 0.5 at 0.5
+ assert(test_near(ease::in_out_quad(0.5f), 0.5f,
+ 0.001f)); // Symmetric curves hit 0.5 at 0.5
assert(ease::out_expo(0.5f) > 0.5f); // Exponential out should be above linear
}
diff --git a/src/util/asset_manager.cc b/src/util/asset_manager.cc
index 274f0f9..0baa063 100644
--- a/src/util/asset_manager.cc
+++ b/src/util/asset_manager.cc
@@ -199,7 +199,7 @@ void DropAsset(AssetId asset_id, const uint8_t* asset) {
// Note: This only works for assets that read from disk at runtime.
// Compiled-in assets cannot be hot-reloaded.
bool ReloadAssetsFromFile(const char* config_path) {
- (void)config_path; // Unused - just for API consistency
+ (void)config_path; // Unused - just for API consistency
// Clear cache to force reload
for (size_t i = 0; i < (size_t)AssetId::ASSET_LAST_ID; ++i) {
@@ -212,4 +212,4 @@ bool ReloadAssetsFromFile(const char* config_path) {
fprintf(stderr, "[ReloadAssets] Cache cleared\n");
return true;
}
-#endif // !defined(STRIP_ALL)
+#endif // !defined(STRIP_ALL)
diff --git a/src/util/file_watcher.cc b/src/util/file_watcher.cc
index 22eb824..ddaea1b 100644
--- a/src/util/file_watcher.cc
+++ b/src/util/file_watcher.cc
@@ -41,4 +41,4 @@ time_t FileWatcher::get_mtime(const char* path) {
return 0;
}
-#endif // !defined(STRIP_ALL)
+#endif // !defined(STRIP_ALL)
diff --git a/src/util/file_watcher.h b/src/util/file_watcher.h
index 2766a43..c183e8a 100644
--- a/src/util/file_watcher.h
+++ b/src/util/file_watcher.h
@@ -5,9 +5,9 @@
#if !defined(STRIP_ALL)
+#include <ctime>
#include <string>
#include <vector>
-#include <ctime>
class FileWatcher {
public:
@@ -28,6 +28,6 @@ class FileWatcher {
time_t get_mtime(const char* path);
};
-#endif // !defined(STRIP_ALL)
+#endif // !defined(STRIP_ALL)
-#endif // FILE_WATCHER_H_
+#endif // FILE_WATCHER_H_
diff --git a/tools/asset_packer.cc b/tools/asset_packer.cc
index af89a88..fdecb58 100644
--- a/tools/asset_packer.cc
+++ b/tools/asset_packer.cc
@@ -4,8 +4,8 @@
#include <algorithm> // For std::count
#include <cmath>
-#include <cstdio> // for simplicity, use fprintf() for output generation
-#include <cstring> // For std::memcpy
+#include <cstdio> // for simplicity, use fprintf() for output generation
+#include <cstring> // For std::memcpy
#include <filesystem> // For path normalization
#include <fstream>
#include <map>
@@ -52,8 +52,8 @@ static bool HasMeshExtension(const std::string& filename) {
struct AssetBuildInfo;
static bool ParseProceduralParams(const std::string& params_str,
- std::vector<float>* out_params,
- const std::string& asset_name) {
+ std::vector<float>* out_params,
+ const std::string& asset_name) {
size_t current_pos = 0;
while (current_pos < params_str.length()) {
size_t comma_pos = params_str.find(',', current_pos);
@@ -93,7 +93,7 @@ struct AssetBuildInfo {
};
static bool ParseProceduralFunction(const std::string& compression_type_str,
- AssetBuildInfo* info, bool is_gpu) {
+ AssetBuildInfo* info, bool is_gpu) {
const char* prefix = is_gpu ? "PROC_GPU(" : "PROC(";
size_t prefix_len = is_gpu ? 9 : 5;
@@ -144,15 +144,21 @@ static bool ParseProceduralFunction(const std::string& compression_type_str,
struct Vec3 {
float x, y, z;
- Vec3 operator+(const Vec3& o) const { return {x + o.x, y + o.y, z + o.z}; }
+ Vec3 operator+(const Vec3& o) const {
+ return {x + o.x, y + o.y, z + o.z};
+ }
Vec3 operator+=(const Vec3& o) {
x += o.x;
y += o.y;
z += o.z;
return *this;
}
- Vec3 operator-(const Vec3& o) const { return {x - o.x, y - o.y, z - o.z}; }
- Vec3 operator*(float s) const { return {x * s, y * s, z * s}; }
+ Vec3 operator-(const Vec3& o) const {
+ return {x - o.x, y - o.y, z - o.z};
+ }
+ Vec3 operator*(float s) const {
+ return {x * s, y * s, z * s};
+ }
static Vec3 cross(const Vec3& a, const Vec3& b) {
return {a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z,
a.x * b.y - a.y * b.x};
@@ -168,8 +174,8 @@ struct Vertex {
};
static bool ProcessMeshFile(const std::string& full_path,
- std::vector<uint8_t>* buffer,
- const std::string& asset_name) {
+ std::vector<uint8_t>* buffer,
+ const std::string& asset_name) {
std::ifstream obj_file(full_path);
if (!obj_file.is_open()) {
fprintf(stderr, "Error: Could not open mesh file: %s\n", full_path.c_str());
@@ -269,8 +275,8 @@ static bool ProcessMeshFile(const std::string& full_path,
for (const auto& face : raw_faces) {
for (int i = 0; i < 3; ++i) {
char key_buf[128];
- std::snprintf(key_buf, sizeof(key_buf), "%d/%d/%d", face.v[i],
- face.vt[i], face.vn[i]);
+ std::snprintf(key_buf, sizeof(key_buf), "%d/%d/%d", face.v[i], face.vt[i],
+ face.vn[i]);
std::string key = key_buf;
if (vertex_map.find(key) == vertex_map.end()) {
@@ -318,8 +324,8 @@ static bool ProcessMeshFile(const std::string& full_path,
}
static bool ProcessImageFile(const std::string& full_path,
- std::vector<uint8_t>* buffer,
- const std::string& asset_name) {
+ std::vector<uint8_t>* buffer,
+ const std::string& asset_name) {
int w, h, channels;
unsigned char* img_data =
stbi_load(full_path.c_str(), &w, &h, &channels, 4); // Force RGBA
diff --git a/tools/cnn_test.cc b/tools/cnn_test.cc
index 740f41d..7d060ae 100644
--- a/tools/cnn_test.cc
+++ b/tools/cnn_test.cc
@@ -5,30 +5,30 @@
#error "cnn_test requires STRIP_ALL=OFF (tool builds only)"
#endif
-#include "platform/platform.h"
-#include "gpu/gpu.h"
+#include "effects/cnn_effect.h"
+#include "generated/assets.h"
#include "gpu/bind_group_builder.h"
+#include "gpu/gpu.h"
#include "gpu/pipeline_builder.h"
-#include "gpu/sampler_cache.h"
-#include "gpu/texture_readback.h"
#include "gpu/post_process_helper.h"
-#include "effects/cnn_effect.h"
+#include "gpu/sampler_cache.h"
#include "gpu/shader_composer.h"
#include "gpu/shaders.h"
-#include "tests/common/webgpu_test_fixture.h"
+#include "gpu/texture_readback.h"
+#include "platform/platform.h"
#include "tests/common/offscreen_render_target.h"
-#include "generated/assets.h"
+#include "tests/common/webgpu_test_fixture.h"
#include "util/asset_manager.h"
#include "util/mini_math.h"
#include "stb_image.h"
#include "wgpu-native/examples/capture/stb_image_write.h"
+#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <vector>
-#include <cmath>
// Helper to get asset string or empty string
static const char* SafeGetAsset(AssetId id) {
@@ -43,11 +43,12 @@ struct Args {
float blend = 1.0f;
bool output_png = true; // Default to PNG
const char* save_intermediates = nullptr;
- int num_layers = 3; // Default to 3 layers
- bool debug_hex = false; // Print first 8 pixels as hex
- int cnn_version = 1; // 1=CNNEffect, 2=CNNv2Effect
+ int num_layers = 3; // Default to 3 layers
+ bool debug_hex = false; // Print first 8 pixels as hex
+ int cnn_version = 1; // 1=CNNEffect, 2=CNNv2Effect
const char* weights_path = nullptr; // Optional .bin weights file
- bool cnn_version_explicit = false; // Track if --cnn-version was explicitly set
+ bool cnn_version_explicit =
+ false; // Track if --cnn-version was explicitly set
};
// Parse command-line arguments
@@ -107,14 +108,17 @@ static bool parse_args(int argc, char** argv, Args* args) {
// Force CNN v2 when --weights is specified
if (args->weights_path) {
if (args->cnn_version_explicit && args->cnn_version != 2) {
- fprintf(stderr, "WARNING: --cnn-version %d ignored (--weights forces CNN v2)\n",
+ fprintf(stderr,
+ "WARNING: --cnn-version %d ignored (--weights forces CNN v2)\n",
args->cnn_version);
}
args->cnn_version = 2;
// Warn if --layers was specified (binary file config takes precedence)
- if (args->num_layers != 3) { // 3 is the default
- fprintf(stderr, "WARNING: --layers %d ignored (--weights loads layer config from .bin)\n",
+ if (args->num_layers != 3) { // 3 is the default
+ fprintf(stderr,
+ "WARNING: --layers %d ignored (--weights loads layer config from "
+ ".bin)\n",
args->num_layers);
}
}
@@ -126,20 +130,30 @@ static bool parse_args(int argc, char** argv, Args* args) {
static void print_usage(const char* prog) {
fprintf(stderr, "Usage: %s input.png output.png [OPTIONS]\n", prog);
fprintf(stderr, "\nOPTIONS:\n");
- fprintf(stderr, " --blend F Final blend amount (0.0-1.0, default: 1.0)\n");
+ fprintf(stderr,
+ " --blend F Final blend amount (0.0-1.0, default: "
+ "1.0)\n");
fprintf(stderr, " --format ppm|png Output format (default: png)\n");
- fprintf(stderr, " --layers N Number of CNN layers (1-10, default: 3, ignored with --weights)\n");
- fprintf(stderr, " --save-intermediates DIR Save intermediate layers to directory\n");
- fprintf(stderr, " --debug-hex Print first 8 pixels as hex (debug)\n");
- fprintf(stderr, " --cnn-version N CNN version: 1 (default) or 2 (ignored with --weights)\n");
- fprintf(stderr, " --weights PATH Load weights from .bin (forces CNN v2, overrides layer config)\n");
+ fprintf(stderr,
+ " --layers N Number of CNN layers (1-10, default: 3, "
+ "ignored with --weights)\n");
+ fprintf(stderr,
+ " --save-intermediates DIR Save intermediate layers to directory\n");
+ fprintf(stderr,
+ " --debug-hex Print first 8 pixels as hex (debug)\n");
+ fprintf(stderr,
+ " --cnn-version N CNN version: 1 (default) or 2 (ignored "
+ "with --weights)\n");
+ fprintf(stderr,
+ " --weights PATH Load weights from .bin (forces CNN v2, "
+ "overrides layer config)\n");
fprintf(stderr, " --help Show this help\n");
}
// Load PNG and upload to GPU texture
static WGPUTexture load_texture(WGPUDevice device, WGPUQueue queue,
- const char* path, int* out_width,
- int* out_height) {
+ const char* path, int* out_width,
+ int* out_height) {
int width, height, channels;
uint8_t* data = stbi_load(path, &width, &height, &channels, 4);
if (!data) {
@@ -192,13 +206,14 @@ static WGPUTexture load_texture(WGPUDevice device, WGPUQueue queue,
// Load PNG alpha channel as depth texture (or 1.0 if no alpha)
static WGPUTexture load_depth_from_alpha(WGPUDevice device, WGPUQueue queue,
- const char* path, int width,
- int height) {
+ const char* path, int width,
+ int height) {
int w, h, channels;
uint8_t* data = stbi_load(path, &w, &h, &channels, 4);
if (!data || w != width || h != height) {
fprintf(stderr, "Error: failed to load depth from '%s'\n", path);
- if (data) stbi_image_free(data);
+ if (data)
+ stbi_image_free(data);
return nullptr;
}
@@ -228,19 +243,13 @@ static WGPUTexture load_depth_from_alpha(WGPUDevice device, WGPUQueue queue,
}
// Write depth data
- const WGPUTexelCopyTextureInfo dst = {
- .texture = depth_texture,
- .mipLevel = 0
- };
+ const WGPUTexelCopyTextureInfo dst = {.texture = depth_texture,
+ .mipLevel = 0};
const WGPUTexelCopyBufferLayout layout = {
.bytesPerRow = static_cast<uint32_t>(width * sizeof(float)),
- .rowsPerImage = static_cast<uint32_t>(height)
- };
- const WGPUExtent3D size = {
- static_cast<uint32_t>(width),
- static_cast<uint32_t>(height),
- 1
- };
+ .rowsPerImage = static_cast<uint32_t>(height)};
+ const WGPUExtent3D size = {static_cast<uint32_t>(width),
+ static_cast<uint32_t>(height), 1};
wgpuQueueWriteTexture(queue, &dst, depth_data.data(),
depth_data.size() * sizeof(float), &layout, &size);
@@ -253,8 +262,8 @@ static WGPUTexture load_depth_from_alpha(WGPUDevice device, WGPUQueue queue,
// Create CNN render pipeline (5 bindings)
// Takes both intermediate format (RGBA16Float) and final format (BGRA8Unorm)
static WGPURenderPipeline create_cnn_pipeline(WGPUDevice device,
- WGPUTextureFormat format,
- bool is_final_layer) {
+ WGPUTextureFormat format,
+ bool is_final_layer) {
const char* shader_code = SafeGetAsset(AssetId::ASSET_SHADER_CNN_LAYER);
// Debug: check if shader loaded
@@ -274,14 +283,16 @@ static WGPURenderPipeline create_cnn_pipeline(WGPUDevice device,
.build(device);
// Use appropriate format: RGBA16Float for intermediate, BGRA8Unorm for final
- WGPUTextureFormat output_format =
- is_final_layer ? WGPUTextureFormat_BGRA8Unorm : WGPUTextureFormat_RGBA16Float;
+ WGPUTextureFormat output_format = is_final_layer
+ ? WGPUTextureFormat_BGRA8Unorm
+ : WGPUTextureFormat_RGBA16Float;
- WGPURenderPipeline pipeline = RenderPipelineBuilder(device)
- .shader(shader_code) // compose=true by default
- .bind_group_layout(bgl)
- .format(output_format)
- .build();
+ WGPURenderPipeline pipeline =
+ RenderPipelineBuilder(device)
+ .shader(shader_code) // compose=true by default
+ .bind_group_layout(bgl)
+ .format(output_format)
+ .build();
wgpuBindGroupLayoutRelease(bgl);
return pipeline;
@@ -289,7 +300,7 @@ static WGPURenderPipeline create_cnn_pipeline(WGPUDevice device,
// Begin render pass with clear
static WGPURenderPassEncoder begin_render_pass(WGPUCommandEncoder encoder,
- WGPUTextureView view) {
+ WGPUTextureView view) {
const WGPURenderPassColorAttachment color_attachment = {
.view = view,
.depthSlice = WGPU_DEPTH_SLICE_UNDEFINED,
@@ -328,7 +339,8 @@ static bool save_png(const char* path, const std::vector<uint8_t>& pixels,
// Create horizontal grayscale composite of layer outputs
// Each layer is already 4x wide (showing 4 channels), stack them vertically
-static bool save_layer_composite(const char* dir, int width, int height, int num_layers) {
+static bool save_layer_composite(const char* dir, int width, int height,
+ int num_layers) {
// Each layer PNG is already 4x wide with 4 channels side-by-side
int layer_width = width * 4;
@@ -341,8 +353,11 @@ static bool save_layer_composite(const char* dir, int width, int height, int num
int w, h, channels;
uint8_t* data = stbi_load(path, &w, &h, &channels, 1); // Load as grayscale
if (!data || w != layer_width || h != height) {
- if (data) stbi_image_free(data);
- fprintf(stderr, "Warning: failed to load layer %d for composite (expected %dx%d, got %dx%d)\n",
+ if (data)
+ stbi_image_free(data);
+ fprintf(stderr,
+ "Warning: failed to load layer %d for composite (expected %dx%d, "
+ "got %dx%d)\n",
i, layer_width, height, w, h);
return false;
}
@@ -359,13 +374,15 @@ static bool save_layer_composite(const char* dir, int width, int height, int num
for (int y = 0; y < height; ++y) {
int src_row_offset = y * layer_width;
int dst_row_offset = (layer * height + y) * layer_width;
- memcpy(&composite[dst_row_offset], &layers[layer][src_row_offset], layer_width);
+ memcpy(&composite[dst_row_offset], &layers[layer][src_row_offset],
+ layer_width);
}
}
// Save as grayscale PNG (stacked vertically)
char composite_path[512];
- snprintf(composite_path, sizeof(composite_path), "%s/layers_composite.png", dir);
+ snprintf(composite_path, sizeof(composite_path), "%s/layers_composite.png",
+ dir);
if (!stbi_write_png(composite_path, layer_width, composite_height, 1,
composite.data(), layer_width)) {
fprintf(stderr, "Error: failed to write composite PNG\n");
@@ -388,8 +405,8 @@ static bool save_ppm(const char* path, const std::vector<uint8_t>& pixels,
fprintf(f, "P6\n%d %d\n255\n", width, height);
for (int i = 0; i < width * height; ++i) {
- const uint8_t rgb[3] = {pixels[i * 4 + 2], // R
- pixels[i * 4 + 1], // G
+ const uint8_t rgb[3] = {pixels[i * 4 + 2], // R
+ pixels[i * 4 + 1], // G
pixels[i * 4 + 0]}; // B
fwrite(rgb, 1, 3, f);
}
@@ -423,9 +440,9 @@ struct CNNv2StaticFeatureParams {
};
// Convert RGBA32Uint (packed f16) texture to BGRA8Unorm
-static std::vector<uint8_t> readback_rgba32uint_to_bgra8(
- WGPUDevice device, WGPUQueue queue, WGPUTexture texture,
- int width, int height) {
+static std::vector<uint8_t>
+readback_rgba32uint_to_bgra8(WGPUDevice device, WGPUQueue queue,
+ WGPUTexture texture, int width, int height) {
// Create staging buffer
const uint32_t bytes_per_row = width * 16; // 4×u32 per pixel
const uint32_t padded_bytes_per_row = (bytes_per_row + 255) & ~255;
@@ -450,10 +467,8 @@ static std::vector<uint8_t> readback_rgba32uint_to_bgra8(
dst.layout.bytesPerRow = padded_bytes_per_row;
dst.layout.rowsPerImage = height;
- WGPUExtent3D copy_size = {
- static_cast<uint32_t>(width),
- static_cast<uint32_t>(height),
- 1};
+ WGPUExtent3D copy_size = {static_cast<uint32_t>(width),
+ static_cast<uint32_t>(height), 1};
wgpuCommandEncoderCopyTextureToBuffer(encoder, &src, &dst, &copy_size);
@@ -527,7 +542,8 @@ static std::vector<uint8_t> readback_rgba32uint_to_bgra8(
uint32_t frac = h & 0x3FF;
if (exp == 0) {
- if (frac == 0) return sign ? -0.0f : 0.0f;
+ if (frac == 0)
+ return sign ? -0.0f : 0.0f;
// Denormal
float val = frac / 1024.0f / 16384.0f;
return sign ? -val : val;
@@ -548,8 +564,10 @@ static std::vector<uint8_t> readback_rgba32uint_to_bgra8(
// Clamp to [0,1] and convert to u8
auto clamp_u8 = [](float v) -> uint8_t {
- if (v <= 0.0f) return 0;
- if (v >= 1.0f) return 255;
+ if (v <= 0.0f)
+ return 0;
+ if (v >= 1.0f)
+ return 255;
return static_cast<uint8_t>(v * 255.0f + 0.5f);
};
@@ -566,14 +584,16 @@ static std::vector<uint8_t> readback_rgba32uint_to_bgra8(
return result;
}
-// Read RGBA32Uint and create 4x wide grayscale composite (each channel side-by-side)
-static std::vector<uint8_t> readback_rgba32uint_to_composite(
- WGPUDevice device, WGPUQueue queue, WGPUTexture texture,
- int width, int height) {
-
+// Read RGBA32Uint and create 4x wide grayscale composite (each channel
+// side-by-side)
+static std::vector<uint8_t>
+readback_rgba32uint_to_composite(WGPUDevice device, WGPUQueue queue,
+ WGPUTexture texture, int width, int height) {
// First get BGRA8 data
- std::vector<uint8_t> bgra = readback_rgba32uint_to_bgra8(device, queue, texture, width, height);
- if (bgra.empty()) return {};
+ std::vector<uint8_t> bgra =
+ readback_rgba32uint_to_bgra8(device, queue, texture, width, height);
+ if (bgra.empty())
+ return {};
// Create 4x wide grayscale image (one channel per horizontal strip)
int composite_width = width * 4;
@@ -591,10 +611,14 @@ static std::vector<uint8_t> readback_rgba32uint_to_composite(
auto to_gray = [](uint8_t val) -> uint8_t { return val; };
// Place each channel in its horizontal strip
- composite[y * composite_width + (0 * width + x)] = to_gray(r); // Channel 0
- composite[y * composite_width + (1 * width + x)] = to_gray(g); // Channel 1
- composite[y * composite_width + (2 * width + x)] = to_gray(b); // Channel 2
- composite[y * composite_width + (3 * width + x)] = to_gray(a); // Channel 3
+ composite[y * composite_width + (0 * width + x)] =
+ to_gray(r); // Channel 0
+ composite[y * composite_width + (1 * width + x)] =
+ to_gray(g); // Channel 1
+ composite[y * composite_width + (2 * width + x)] =
+ to_gray(b); // Channel 2
+ composite[y * composite_width + (3 * width + x)] =
+ to_gray(a); // Channel 3
}
}
@@ -610,14 +634,15 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
// Load weights (from file or asset system)
size_t weights_size = 0;
const uint8_t* weights_data = nullptr;
- std::vector<uint8_t> file_weights; // For file-based loading
+ std::vector<uint8_t> file_weights; // For file-based loading
if (args.weights_path) {
// Load from file
printf("Loading weights from '%s'...\n", args.weights_path);
FILE* f = fopen(args.weights_path, "rb");
if (!f) {
- fprintf(stderr, "Error: failed to open weights file '%s'\n", args.weights_path);
+ fprintf(stderr, "Error: failed to open weights file '%s'\n",
+ args.weights_path);
return false;
}
@@ -637,7 +662,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
weights_data = file_weights.data();
} else {
// Load from asset system
- weights_data = (const uint8_t*)GetAsset(AssetId::ASSET_WEIGHTS_CNN_V2, &weights_size);
+ weights_data =
+ (const uint8_t*)GetAsset(AssetId::ASSET_WEIGHTS_CNN_V2, &weights_size);
}
if (!weights_data || weights_size < 20) {
@@ -652,7 +678,7 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
uint32_t num_layers = header[2];
uint32_t total_weights = header[3];
- if (magic != 0x324e4e43) { // 'CNN2'
+ if (magic != 0x324e4e43) { // 'CNN2'
fprintf(stderr, "Error: Invalid CNN v2 weights magic\n");
return false;
}
@@ -684,9 +710,10 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
info.out_channels, info.weight_count);
}
- // Create weights storage buffer (skip header + layer info, upload only weights)
- size_t header_size = 20; // 5 u32
- size_t layer_info_size = 20 * layer_info.size(); // 5 u32 per layer
+ // Create weights storage buffer (skip header + layer info, upload only
+ // weights)
+ size_t header_size = 20; // 5 u32
+ size_t layer_info_size = 20 * layer_info.size(); // 5 u32 per layer
size_t weights_offset = header_size + layer_info_size;
size_t weights_only_size = weights_size - weights_offset;
@@ -697,7 +724,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
WGPUBuffer weights_buffer =
wgpuDeviceCreateBuffer(device, &weights_buffer_desc);
- wgpuQueueWriteBuffer(queue, weights_buffer, 0, weights_data + weights_offset, weights_only_size);
+ wgpuQueueWriteBuffer(queue, weights_buffer, 0, weights_data + weights_offset,
+ weights_only_size);
// Create input view
WGPUTextureView input_view =
@@ -705,7 +733,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
// Create static features texture (RGBA32Uint)
const WGPUTextureDescriptor static_desc = {
- .usage = WGPUTextureUsage_StorageBinding | WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopySrc,
+ .usage = WGPUTextureUsage_StorageBinding |
+ WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopySrc,
.dimension = WGPUTextureDimension_2D,
.size = {static_cast<uint32_t>(width), static_cast<uint32_t>(height), 1},
.format = WGPUTextureFormat_RGBA32Uint,
@@ -740,10 +769,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
};
// Load shaders
- const char* static_shader =
- SafeGetAsset(AssetId::ASSET_SHADER_CNN_V2_STATIC);
- const char* layer_shader =
- SafeGetAsset(AssetId::ASSET_SHADER_CNN_V2_COMPUTE);
+ const char* static_shader = SafeGetAsset(AssetId::ASSET_SHADER_CNN_V2_STATIC);
+ const char* layer_shader = SafeGetAsset(AssetId::ASSET_SHADER_CNN_V2_COMPUTE);
if (!static_shader[0] || !layer_shader[0]) {
fprintf(stderr, "Error: CNN v2 shaders not available\n");
@@ -789,7 +816,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
linear_sampler_desc.lodMaxClamp = 32.0f;
linear_sampler_desc.maxAnisotropy = 1;
- WGPUSampler linear_sampler = wgpuDeviceCreateSampler(device, &linear_sampler_desc);
+ WGPUSampler linear_sampler =
+ wgpuDeviceCreateSampler(device, &linear_sampler_desc);
// Create static features compute pipeline
WGPUShaderSourceWGSL static_wgsl = {};
@@ -822,7 +850,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
static_bgl_entries[3].binding = 3;
static_bgl_entries[3].visibility = WGPUShaderStage_Compute;
- static_bgl_entries[3].texture.sampleType = WGPUTextureSampleType_UnfilterableFloat;
+ static_bgl_entries[3].texture.sampleType =
+ WGPUTextureSampleType_UnfilterableFloat;
static_bgl_entries[3].texture.viewDimension = WGPUTextureViewDimension_2D;
static_bgl_entries[4].binding = 4;
@@ -877,7 +906,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
static_bg_entries[2].binding = 2;
static_bg_entries[2].textureView = input_view;
static_bg_entries[3].binding = 3;
- static_bg_entries[3].textureView = depth_view; // Depth from alpha channel (matches training)
+ static_bg_entries[3].textureView =
+ depth_view; // Depth from alpha channel (matches training)
static_bg_entries[4].binding = 4;
static_bg_entries[4].textureView = static_features_view;
static_bg_entries[5].binding = 5;
@@ -992,7 +1022,7 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
uint32_t workgroups_x = (width + 7) / 8;
uint32_t workgroups_y = (height + 7) / 8;
wgpuComputePassEncoderDispatchWorkgroups(static_pass, workgroups_x,
- workgroups_y, 1);
+ workgroups_y, 1);
wgpuComputePassEncoderEnd(static_pass);
wgpuComputePassEncoderRelease(static_pass);
@@ -1014,7 +1044,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
printf("Saving static features to '%s'...\n", layer_path);
// Read back RGBA32Uint and create 8-channel grayscale composite
- // Static features has 8 channels (packed as 4×u32), create 8x wide composite
+ // Static features has 8 channels (packed as 4×u32), create 8x wide
+ // composite
std::vector<uint8_t> bgra = readback_rgba32uint_to_bgra8(
device, queue, static_features_tex, width, height);
@@ -1083,8 +1114,7 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
layer_bg_desc.entryCount = 6;
layer_bg_desc.entries = layer_bg_entries;
- WGPUBindGroup layer_bg =
- wgpuDeviceCreateBindGroup(device, &layer_bg_desc);
+ WGPUBindGroup layer_bg = wgpuDeviceCreateBindGroup(device, &layer_bg_desc);
WGPUComputePassEncoder layer_pass =
wgpuCommandEncoderBeginComputePass(encoder, nullptr);
@@ -1092,7 +1122,7 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
wgpuComputePassEncoderSetBindGroup(layer_pass, 0, layer_bg, 0, nullptr);
wgpuComputePassEncoderDispatchWorkgroups(layer_pass, workgroups_x,
- workgroups_y, 1);
+ workgroups_y, 1);
wgpuComputePassEncoderEnd(layer_pass);
wgpuComputePassEncoderRelease(layer_pass);
@@ -1138,7 +1168,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
// Create layer composite if intermediates were saved
if (args.save_intermediates) {
- save_layer_composite(args.save_intermediates, width, height, layer_info.size());
+ save_layer_composite(args.save_intermediates, width, height,
+ layer_info.size());
}
// Readback final result (from last layer's output texture)
@@ -1149,7 +1180,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
if (pixels.empty()) {
fprintf(stderr, "Error: GPU readback failed\n");
- for (auto buf : layer_params_buffers) wgpuBufferRelease(buf);
+ for (auto buf : layer_params_buffers)
+ wgpuBufferRelease(buf);
wgpuComputePipelineRelease(layer_pipeline);
wgpuBindGroupLayoutRelease(layer_bgl);
wgpuBindGroupRelease(static_bg);
@@ -1195,7 +1227,8 @@ static bool process_cnn_v2(WGPUDevice device, WGPUQueue queue,
}
// Cleanup
- for (auto buf : layer_params_buffers) wgpuBufferRelease(buf);
+ for (auto buf : layer_params_buffers)
+ wgpuBufferRelease(buf);
wgpuComputePipelineRelease(layer_pipeline);
wgpuBindGroupLayoutRelease(layer_bgl);
wgpuBindGroupRelease(static_bg);
@@ -1250,8 +1283,8 @@ int main(int argc, char** argv) {
// Branch based on CNN version
if (args.cnn_version == 2) {
- bool success = process_cnn_v2(device, queue, instance, input_texture,
- width, height, args);
+ bool success = process_cnn_v2(device, queue, instance, input_texture, width,
+ height, args);
wgpuTextureRelease(input_texture);
SamplerCache::Get().clear();
fixture.shutdown();
@@ -1274,8 +1307,10 @@ int main(int argc, char** argv) {
if (!pipeline_intermediate || !pipeline_final) {
fprintf(stderr, "Error: failed to create CNN pipelines\n");
- if (pipeline_intermediate) wgpuRenderPipelineRelease(pipeline_intermediate);
- if (pipeline_final) wgpuRenderPipelineRelease(pipeline_final);
+ if (pipeline_intermediate)
+ wgpuRenderPipelineRelease(pipeline_intermediate);
+ if (pipeline_final)
+ wgpuRenderPipelineRelease(pipeline_final);
wgpuTextureViewRelease(input_view);
wgpuTextureRelease(input_texture);
SamplerCache::Get().clear();
@@ -1284,7 +1319,8 @@ int main(int argc, char** argv) {
}
// Get bind group layout from intermediate pipeline (same for both)
- WGPUBindGroupLayout bgl = wgpuRenderPipelineGetBindGroupLayout(pipeline_intermediate, 0);
+ WGPUBindGroupLayout bgl =
+ wgpuRenderPipelineGetBindGroupLayout(pipeline_intermediate, 0);
// Create uniform buffers
const WGPUBufferDescriptor common_uniform_desc = {
@@ -1363,15 +1399,14 @@ int main(int argc, char** argv) {
sizeof(layer_params));
// Build bind group
- WGPUBindGroup bind_group = BindGroupBuilder()
- .sampler(0, sampler)
- .texture(1, current_input)
- .buffer(2, common_uniform_buffer,
- sizeof(CommonPostProcessUniforms))
- .buffer(3, layer_params_buffer,
- sizeof(CNNLayerParams))
- .texture(4, original_view)
- .build(device, bgl);
+ WGPUBindGroup bind_group =
+ BindGroupBuilder()
+ .sampler(0, sampler)
+ .texture(1, current_input)
+ .buffer(2, common_uniform_buffer, sizeof(CommonPostProcessUniforms))
+ .buffer(3, layer_params_buffer, sizeof(CNNLayerParams))
+ .texture(4, original_view)
+ .build(device, bgl);
// Render to appropriate output texture with correct pipeline
bool is_final = (layer == NUM_LAYERS - 1);
@@ -1379,7 +1414,8 @@ int main(int argc, char** argv) {
if (is_final) {
// Final layer: use OffscreenRenderTarget (known working readback)
OffscreenRenderTarget rt(instance, device, width, height);
- WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+ WGPUCommandEncoder encoder =
+ wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPURenderPassEncoder pass = begin_render_pass(encoder, rt.view());
wgpuRenderPassEncoderSetPipeline(pass, pipeline_final);
wgpuRenderPassEncoderSetBindGroup(pass, 0, bind_group, 0, nullptr);
@@ -1456,11 +1492,12 @@ int main(int argc, char** argv) {
}
printf("Done! Output saved to '%s'\n", args.output_path);
- break; // Exit loop after final layer
+ break; // Exit loop after final layer
} else {
// Intermediate layers: render to ping-pong textures
WGPUTextureView output_view = intermediate_views[dst_idx];
- WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+ WGPUCommandEncoder encoder =
+ wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPURenderPassEncoder pass = begin_render_pass(encoder, output_view);
wgpuRenderPassEncoderSetPipeline(pass, pipeline_intermediate);
wgpuRenderPassEncoderSetBindGroup(pass, 0, bind_group, 0, nullptr);
@@ -1501,7 +1538,8 @@ int main(int argc, char** argv) {
if (!pixels.empty()) {
save_png(layer_path, pixels, width, height);
} else {
- fprintf(stderr, "Warning: failed to read intermediate layer %d\n", layer);
+ fprintf(stderr, "Warning: failed to read intermediate layer %d\n",
+ layer);
}
}
}
diff --git a/tools/seq_compiler.cc b/tools/seq_compiler.cc
index 2448a3b..5804031 100644
--- a/tools/seq_compiler.cc
+++ b/tools/seq_compiler.cc
@@ -63,17 +63,10 @@ parse_parameters(const std::string& args) {
bool is_post_process_effect(const std::string& class_name) {
// List of known post-process effects
static const std::vector<std::string> post_process_effects = {
- "FadeEffect",
- "FlashEffect",
- "GaussianBlurEffect",
- "SolarizeEffect",
- "VignetteEffect",
- "ChromaAberrationEffect",
- "DistortEffect",
- "ThemeModulationEffect",
- "CNNEffect",
- "PassthroughEffect",
- "CircleMaskEffect"};
+ "FadeEffect", "FlashEffect", "GaussianBlurEffect",
+ "SolarizeEffect", "VignetteEffect", "ChromaAberrationEffect",
+ "DistortEffect", "ThemeModulationEffect", "CNNEffect",
+ "PassthroughEffect", "CircleMaskEffect"};
return std::find(post_process_effects.begin(), post_process_effects.end(),
class_name) != post_process_effects.end();
}
@@ -112,7 +105,7 @@ float get_sequence_end(const SequenceEntry& seq) {
// Analyze timeline: find max time and sort sequences by start time
TimelineMetrics analyze_timeline(const std::vector<SequenceEntry>& sequences,
- const std::string& demo_end_time) {
+ const std::string& demo_end_time) {
float max_time = demo_end_time.empty() ? 0.0f : std::stof(demo_end_time);
for (const auto& seq : sequences) {
float seq_start = std::stof(seq.start_time);
@@ -343,7 +336,8 @@ void generate_gantt_chart(const std::string& output_file,
out << "\n\n";
// Draw sequences and effects
- for (size_t seq_idx = 0; seq_idx < metrics.sorted_sequences.size(); ++seq_idx) {
+ for (size_t seq_idx = 0; seq_idx < metrics.sorted_sequences.size();
+ ++seq_idx) {
const auto& seq = metrics.sorted_sequences[seq_idx];
float seq_start = std::stof(seq.start_time);
float seq_end = get_sequence_end(seq);
@@ -510,7 +504,8 @@ void generate_gantt_html(const std::string& output_file,
// Draw sequences and effects
int y_offset = margin_top;
- for (size_t seq_idx = 0; seq_idx < metrics.sorted_sequences.size(); ++seq_idx) {
+ for (size_t seq_idx = 0; seq_idx < metrics.sorted_sequences.size();
+ ++seq_idx) {
const auto& seq = metrics.sorted_sequences[seq_idx];
float seq_start = std::stof(seq.start_time);
float seq_end = get_sequence_end(seq);
@@ -903,7 +898,8 @@ int main(int argc, char* argv[]) {
}
}
- // Validate: detect cross-sequence priority collisions for concurrent sequences
+ // Validate: detect cross-sequence priority collisions for concurrent
+ // sequences
std::map<std::string, std::vector<size_t>> time_groups;
for (size_t i = 0; i < sequences.size(); ++i) {
time_groups[sequences[i].start_time].push_back(i);
@@ -912,7 +908,8 @@ int main(int argc, char* argv[]) {
for (const auto& [start_time, seq_indices] : time_groups) {
if (seq_indices.size() > 1) {
// Multiple sequences start at the same time
- std::map<int, std::vector<std::pair<std::string, size_t>>> cross_priority_map;
+ std::map<int, std::vector<std::pair<std::string, size_t>>>
+ cross_priority_map;
for (size_t seq_idx : seq_indices) {
const auto& seq = sequences[seq_idx];
@@ -933,12 +930,14 @@ int main(int argc, char* argv[]) {
"priority "
<< prio << ":\n";
for (const auto& [effect, seq_idx] : effects) {
- std::cerr << " - " << effect << " (sequence #" << seq_idx << ")\n";
+ std::cerr << " - " << effect << " (sequence #" << seq_idx
+ << ")\n";
}
std::cerr << " Post-process effects from different sequences at the "
"same time will be\n";
- std::cerr << " merged into a single render chain. Consider adjusting "
- "priorities to clarify order.\n";
+ std::cerr
+ << " merged into a single render chain. Consider adjusting "
+ "priorities to clarify order.\n";
}
}
}
diff --git a/tools/shadertoy/template.cc b/tools/shadertoy/template.cc
index 35c9b10..7636c0a 100644
--- a/tools/shadertoy/template.cc
+++ b/tools/shadertoy/template.cc
@@ -3,8 +3,8 @@
// TODO: Update description, rename class
#include "effects/shadertoy_effect.h"
-#include "gpu/shader_composer.h"
#include "generated/assets.h"
+#include "gpu/shader_composer.h"
// TODO: Rename class and adjust constructor parameters
ShaderToyEffect::ShaderToyEffect(const GpuContext& ctx) : Effect(ctx) {
@@ -34,8 +34,8 @@ void ShaderToyEffect::init(MainSequence* demo) {
// TODO: Update asset name to match your shader file
size_t shader_size;
- const char* shader_code = (const char*)GetAsset(
- AssetId::ASSET_SHADERTOY_SHADER, &shader_size);
+ const char* shader_code =
+ (const char*)GetAsset(AssetId::ASSET_SHADERTOY_SHADER, &shader_size);
std::string composed = ShaderComposer::Get().Compose({}, shader_code);
@@ -96,8 +96,8 @@ void ShaderToyEffect::init(MainSequence* demo) {
bind_group_ = wgpuDeviceCreateBindGroup(ctx_.device, &bg_desc);
}
-void ShaderToyEffect::render(WGPURenderPassEncoder pass, float time,
- float beat, float intensity, float aspect_ratio) {
+void ShaderToyEffect::render(WGPURenderPassEncoder pass, float time, float beat,
+ float intensity, float aspect_ratio) {
const CommonPostProcessUniforms uniforms = {
.resolution = {static_cast<float>(width_), static_cast<float>(height_)},
.aspect_ratio = aspect_ratio,
diff --git a/tools/shadertoy/template.h b/tools/shadertoy/template.h
index 82f8b39..74be9f2 100644
--- a/tools/shadertoy/template.h
+++ b/tools/shadertoy/template.h
@@ -26,7 +26,7 @@ class ShaderToyEffect : public Effect {
struct ShaderToyParams {
float param1;
float param2;
- float _pad[2]; // Padding to 16 bytes
+ float _pad[2]; // Padding to 16 bytes
};
static_assert(sizeof(ShaderToyParams) == 16,
"ShaderToyParams must be 16 bytes for WGSL alignment");
diff --git a/tools/tracker_compiler.cc b/tools/tracker_compiler.cc
index d12005d..de635cd 100644
--- a/tools/tracker_compiler.cc
+++ b/tools/tracker_compiler.cc
@@ -132,8 +132,8 @@ struct ResourceAnalysis {
// Analyze resource requirements from tracker data
ResourceAnalysis analyze_resources(const std::vector<Sample>& samples,
- const std::vector<Pattern>& patterns,
- const std::vector<Trigger>& score) {
+ const std::vector<Pattern>& patterns,
+ const std::vector<Trigger>& score) {
ResourceAnalysis result = {};
// Count sample types
@@ -168,9 +168,9 @@ ResourceAnalysis analyze_resources(const std::vector<Sample>& samples,
result.max_simultaneous_patterns * result.avg_events_per_pattern;
// Conservative recommendations with 50% safety margin
- result.min_spectrograms = result.asset_sample_count +
- (result.generated_sample_count *
- result.estimated_max_polyphony);
+ result.min_spectrograms =
+ result.asset_sample_count +
+ (result.generated_sample_count * result.estimated_max_polyphony);
result.recommended_spectrograms = (int)(result.min_spectrograms * 1.5f);
result.recommended_voices = result.estimated_max_polyphony * 2;
@@ -236,9 +236,10 @@ int validate_tracker_data(const std::vector<Sample>& samples,
errors++;
}
if (e.volume < 0.0f || e.volume > 2.0f) {
- fprintf(stderr,
- "WARNING: Pattern '%s' unusual volume: %.2f (expected 0.0-2.0)\n",
- p.name.c_str(), e.volume);
+ fprintf(
+ stderr,
+ "WARNING: Pattern '%s' unusual volume: %.2f (expected 0.0-2.0)\n",
+ p.name.c_str(), e.volume);
warnings++;
}
if (e.pan < -1.0f || e.pan > 1.0f) {
@@ -275,8 +276,8 @@ void write_sanitized_track(const char* output_path, float bpm,
for (const auto& s : samples) {
fprintf(out, "SAMPLE %s", s.name.c_str());
if (s.type == GENERATED) {
- fprintf(out, ", %.1f, %.2f, %.1f, %.2f, %d, %.1f", s.freq, s.dur,
- s.amp, s.attack, s.harmonics, s.harmonic_decay);
+ fprintf(out, ", %.1f, %.2f, %.1f, %.2f, %d, %.1f", s.freq, s.dur, s.amp,
+ s.attack, s.harmonics, s.harmonic_decay);
}
fprintf(out, "\n");
}
@@ -318,10 +319,12 @@ void write_sanitized_track(const char* output_path, float bpm,
// Write resource analysis to output file
void write_resource_analysis(FILE* out, const ResourceAnalysis& analysis,
- int total_samples) {
- fprintf(out, "// ============================================================\n");
+ int total_samples) {
+ fprintf(out,
+ "// ============================================================\n");
fprintf(out, "// RESOURCE USAGE ANALYSIS (for synth.h configuration)\n");
- fprintf(out, "// ============================================================\n");
+ fprintf(out,
+ "// ============================================================\n");
fprintf(out, "// Total samples: %d (%d assets + %d generated notes)\n",
total_samples, analysis.asset_sample_count,
analysis.generated_sample_count);
@@ -343,7 +346,9 @@ void write_resource_analysis(FILE* out, const ResourceAnalysis& analysis,
fprintf(out, "// NOTE: With spectrogram caching by note parameters,\n");
fprintf(out, "// MAX_SPECTROGRAMS could be reduced to ~%d\n",
analysis.asset_sample_count + analysis.generated_sample_count);
- fprintf(out, "// ============================================================\n\n");
+ fprintf(
+ out,
+ "// ============================================================\n\n");
}
int main(int argc, char** argv) {