summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/audio/backend/wav_dump_backend.cc2
-rw-r--r--src/gpu/effect.cc22
-rw-r--r--src/gpu/effect.h13
-rw-r--r--src/gpu/effects/flash_effect.cc2
-rw-r--r--src/gpu/effects/post_process_helper.h13
-rw-r--r--src/gpu/effects/shader_composer.cc6
-rw-r--r--src/gpu/gpu.cc6
-rw-r--r--src/gpu/gpu.h3
-rw-r--r--src/gpu/headless_gpu.cc6
-rw-r--r--src/gpu/sampler_cache.h7
-rw-r--r--src/gpu/stub_gpu.cc6
-rw-r--r--src/gpu/texture_readback.cc1
-rw-r--r--src/main.cc49
-rw-r--r--src/test_demo.cc69
-rw-r--r--src/tests/assets/test_sequence.cc10
-rw-r--r--src/tests/audio/test_wav_dump.cc8
16 files changed, 126 insertions, 97 deletions
diff --git a/src/audio/backend/wav_dump_backend.cc b/src/audio/backend/wav_dump_backend.cc
index 3f72c87..7427fa9 100644
--- a/src/audio/backend/wav_dump_backend.cc
+++ b/src/audio/backend/wav_dump_backend.cc
@@ -123,7 +123,7 @@ void WavDumpBackend::write_wav_header(FILE* file, uint32_t num_samples) {
const uint32_t bits_per_sample = 16;
const uint32_t byte_rate = sample_rate * num_channels * bits_per_sample / 8;
const uint16_t block_align = num_channels * bits_per_sample / 8;
- const uint32_t data_size = num_samples * num_channels * bits_per_sample / 8;
+ const uint32_t data_size = num_samples * bits_per_sample / 8;
// RIFF header
fwrite("RIFF", 1, 4, file);
diff --git a/src/gpu/effect.cc b/src/gpu/effect.cc
index 58e011c..3ee2acd 100644
--- a/src/gpu/effect.cc
+++ b/src/gpu/effect.cc
@@ -226,7 +226,8 @@ void MainSequence::resize(int width, int height) {
}
}
-void MainSequence::render_frame(float global_time, float beat, float peak,
+void MainSequence::render_frame(float global_time, float beat_time,
+ float beat_phase, float peak,
float aspect_ratio, WGPUSurface surface) {
WGPUCommandEncoder encoder =
wgpuDeviceCreateCommandEncoder(gpu_ctx.device, nullptr);
@@ -260,11 +261,12 @@ void MainSequence::render_frame(float global_time, float beat, float peak,
// Construct common uniforms once (reused for all effects)
CommonPostProcessUniforms base_uniforms = {
.resolution = {static_cast<float>(width_), static_cast<float>(height_)},
- ._pad = {0.0f, 0.0f},
.aspect_ratio = aspect_ratio,
.time = 0.0f, // Will be set per-effect
- .beat = beat,
+ .beat_time = beat_time,
+ .beat_phase = beat_phase,
.audio_intensity = peak,
+ ._pad = 0.0f,
};
for (const SequenceItem* item : scene_effects) {
@@ -455,13 +457,9 @@ void MainSequence::register_auxiliary_texture(const char* name, int width,
int height) {
const std::string key(name);
- // Check if already exists
+ // Check if already exists (silent, idempotent registration is valid)
auto it = auxiliary_textures_.find(key);
if (it != auxiliary_textures_.end()) {
-#if !defined(STRIP_ALL)
- fprintf(stderr, "Warning: Auxiliary texture '%s' already registered\n",
- name);
-#endif /* !defined(STRIP_ALL) */
return;
}
@@ -564,7 +562,8 @@ void MainSequence::simulate_until(float target_time, float step_rate,
for (float t = 0.0f; t < target_time; t += step_rate) {
WGPUCommandEncoder encoder =
wgpuDeviceCreateCommandEncoder(gpu_ctx.device, nullptr);
- float beat = fmodf(t * bpm / 60.0f, 1.0f);
+ float absolute_beat_time = t * bpm / 60.0f;
+ float beat_phase = fmodf(absolute_beat_time, 1.0f);
std::vector<SequenceItem*> scene_effects, post_effects;
for (ActiveSequence& entry : sequences_) {
if (t >= entry.start_time) {
@@ -575,11 +574,12 @@ void MainSequence::simulate_until(float target_time, float step_rate,
for (const SequenceItem* item : scene_effects) {
CommonPostProcessUniforms test_uniforms = {
.resolution = {static_cast<float>(width_), static_cast<float>(height_)},
- ._pad = {0.0f, 0.0f},
.aspect_ratio = aspect_ratio,
.time = t - item->start_time,
- .beat = beat,
+ .beat_time = absolute_beat_time,
+ .beat_phase = beat_phase,
.audio_intensity = 0.0f,
+ ._pad = 0.0f,
};
item->effect->compute(encoder, test_uniforms);
}
diff --git a/src/gpu/effect.h b/src/gpu/effect.h
index ed90ac7..b9709a4 100644
--- a/src/gpu/effect.h
+++ b/src/gpu/effect.h
@@ -49,16 +49,19 @@ class Effect {
// Helper: get initialized CommonPostProcessUniforms based on current dimensions
// If aspect_ratio < 0, computes from width_/height_
- CommonPostProcessUniforms get_common_uniforms(float time = 0.0f, float beat = 0.0f,
+ CommonPostProcessUniforms get_common_uniforms(float time = 0.0f,
+ float beat_time = 0.0f,
+ float beat_phase = 0.0f,
float intensity = 0.0f,
float aspect_ratio = -1.0f) const {
return {
.resolution = {static_cast<float>(width_), static_cast<float>(height_)},
- ._pad = {0.0f, 0.0f},
.aspect_ratio = aspect_ratio < 0.0f ? static_cast<float>(width_) / static_cast<float>(height_) : aspect_ratio,
.time = time,
- .beat = beat,
+ .beat_time = beat_time,
+ .beat_phase = beat_phase,
.audio_intensity = intensity,
+ ._pad = 0.0f,
};
}
@@ -130,8 +133,8 @@ class MainSequence {
void init_test(const GpuContext& ctx);
void add_sequence(std::shared_ptr<Sequence> seq, float start_time,
int priority = 0);
- void render_frame(float global_time, float beat, float peak,
- float aspect_ratio, WGPUSurface surface);
+ void render_frame(float global_time, float beat_time, float beat_phase,
+ float peak, float aspect_ratio, WGPUSurface surface);
void resize(int width, int height);
void shutdown();
diff --git a/src/gpu/effects/flash_effect.cc b/src/gpu/effects/flash_effect.cc
index 4357c34..e53cbce 100644
--- a/src/gpu/effects/flash_effect.cc
+++ b/src/gpu/effects/flash_effect.cc
@@ -77,7 +77,7 @@ void FlashEffect::render(WGPURenderPassEncoder pass,
// Animate color based on time and beat
const float r = params_.color[0] * (0.5f + 0.5f * sinf(uniforms.time * 0.5f));
const float g = params_.color[1] * (0.5f + 0.5f * cosf(uniforms.time * 0.7f));
- const float b = params_.color[2] * (1.0f + 0.3f * uniforms.beat);
+ const float b = params_.color[2] * (1.0f + 0.3f * uniforms.beat_phase);
// Update uniforms with computed (animated) values
const FlashUniforms u = {
diff --git a/src/gpu/effects/post_process_helper.h b/src/gpu/effects/post_process_helper.h
index 23cde0e..1c649e7 100644
--- a/src/gpu/effects/post_process_helper.h
+++ b/src/gpu/effects/post_process_helper.h
@@ -8,12 +8,13 @@
// Uniform data common to all post-processing effects
struct CommonPostProcessUniforms {
- vec2 resolution;
- float _pad[2]; // Padding for 16-byte alignment
- float aspect_ratio;
- float time;
- float beat;
- float audio_intensity;
+ vec2 resolution; // Screen dimensions
+ float aspect_ratio; // Width/height ratio
+ float time; // Physical time in seconds (unaffected by tempo)
+ float beat_time; // Musical time in beats (absolute, tempo-scaled)
+ float beat_phase; // Fractional beat (0.0-1.0 within current beat)
+ float audio_intensity;// Audio peak for beat sync
+ float _pad; // Padding for 16-byte alignment
};
static_assert(sizeof(CommonPostProcessUniforms) == 32,
"CommonPostProcessUniforms must be 32 bytes for WGSL alignment");
diff --git a/src/gpu/effects/shader_composer.cc b/src/gpu/effects/shader_composer.cc
index fe3ad74..9234b7a 100644
--- a/src/gpu/effects/shader_composer.cc
+++ b/src/gpu/effects/shader_composer.cc
@@ -89,6 +89,9 @@ ShaderComposer::Compose(const std::vector<std::string>& dependencies,
void ShaderComposer::VerifyIncludes() const {
#if !defined(STRIP_ALL)
+ // Known placeholders that get substituted at composition time
+ std::set<std::string> known_placeholders = {"render/scene_query_mode"};
+
std::set<std::string> missing;
for (const auto& [name, code] : snippets_) {
std::istringstream stream(code);
@@ -99,7 +102,8 @@ void ShaderComposer::VerifyIncludes() const {
size_t end = line.find('"', start + 1);
if (start != std::string::npos && end != std::string::npos) {
std::string included = line.substr(start + 1, end - start - 1);
- if (snippets_.find(included) == snippets_.end()) {
+ if (snippets_.find(included) == snippets_.end() &&
+ known_placeholders.find(included) == known_placeholders.end()) {
missing.insert(included);
}
}
diff --git a/src/gpu/gpu.cc b/src/gpu/gpu.cc
index e89a2f0..41f5bcf 100644
--- a/src/gpu/gpu.cc
+++ b/src/gpu/gpu.cc
@@ -381,8 +381,10 @@ void gpu_init(PlatformState* platform_state) {
platform_state->height);
}
-void gpu_draw(float audio_peak, float aspect_ratio, float time, float beat) {
- g_main_sequence.render_frame(time, beat, audio_peak, aspect_ratio, g_surface);
+void gpu_draw(float audio_peak, float aspect_ratio, float time,
+ float beat_time, float beat_phase) {
+ g_main_sequence.render_frame(time, beat_time, beat_phase, audio_peak,
+ aspect_ratio, g_surface);
}
void gpu_resize(int width, int height) {
diff --git a/src/gpu/gpu.h b/src/gpu/gpu.h
index 8c59aee..c7ee89f 100644
--- a/src/gpu/gpu.h
+++ b/src/gpu/gpu.h
@@ -42,7 +42,8 @@ struct RenderPass {
class MainSequence; // Forward declaration
void gpu_init(PlatformState* platform_state);
-void gpu_draw(float audio_peak, float aspect_ratio, float time, float beat);
+void gpu_draw(float audio_peak, float aspect_ratio, float time,
+ float beat_time, float beat_phase);
void gpu_resize(int width, int height);
void gpu_shutdown();
diff --git a/src/gpu/headless_gpu.cc b/src/gpu/headless_gpu.cc
index 1a649d3..1eedc66 100644
--- a/src/gpu/headless_gpu.cc
+++ b/src/gpu/headless_gpu.cc
@@ -47,11 +47,13 @@ void gpu_init(PlatformState* platform_state) {
}
}
-void gpu_draw(float audio_peak, float aspect_ratio, float time, float beat) {
+void gpu_draw(float audio_peak, float aspect_ratio, float time,
+ float beat_time, float beat_phase) {
(void)audio_peak;
(void)aspect_ratio;
(void)time;
- (void)beat;
+ (void)beat_time;
+ (void)beat_phase;
}
void gpu_resize(int width, int height) {
diff --git a/src/gpu/sampler_cache.h b/src/gpu/sampler_cache.h
index 0f012a8..5df3958 100644
--- a/src/gpu/sampler_cache.h
+++ b/src/gpu/sampler_cache.h
@@ -58,4 +58,11 @@ public:
return {WGPUAddressMode_ClampToEdge, WGPUAddressMode_ClampToEdge,
WGPUFilterMode_Linear, WGPUFilterMode_Linear, 1};
}
+
+ void clear() {
+ for (auto& pair : cache_) {
+ wgpuSamplerRelease(pair.second);
+ }
+ cache_.clear();
+ }
};
diff --git a/src/gpu/stub_gpu.cc b/src/gpu/stub_gpu.cc
index 0b4185c..8d69996 100644
--- a/src/gpu/stub_gpu.cc
+++ b/src/gpu/stub_gpu.cc
@@ -41,11 +41,13 @@ void gpu_init(PlatformState* platform_state) {
(void)platform_state;
}
-void gpu_draw(float audio_peak, float aspect_ratio, float time, float beat) {
+void gpu_draw(float audio_peak, float aspect_ratio, float time,
+ float beat_time, float beat_phase) {
(void)audio_peak;
(void)aspect_ratio;
(void)time;
- (void)beat;
+ (void)beat_time;
+ (void)beat_phase;
}
void gpu_resize(int width, int height) {
diff --git a/src/gpu/texture_readback.cc b/src/gpu/texture_readback.cc
index f3e4056..e25da9e 100644
--- a/src/gpu/texture_readback.cc
+++ b/src/gpu/texture_readback.cc
@@ -71,6 +71,7 @@ std::vector<uint8_t> read_texture_pixels(
wgpuQueueSubmit(queue, 1, &commands);
wgpuCommandBufferRelease(commands);
wgpuCommandEncoderRelease(encoder);
+ wgpuQueueRelease(queue); // Release the queue reference
// Wait for copy to complete before mapping
wgpuDevicePoll(device, true, nullptr);
diff --git a/src/main.cc b/src/main.cc
index 6132841..45a642a 100644
--- a/src/main.cc
+++ b/src/main.cc
@@ -13,6 +13,9 @@
#include "audio/backend/wav_dump_backend.h"
#include "util/file_watcher.h"
#include <vector>
+#if defined(DEMO_HEADLESS)
+#include <csignal>
+#endif
#endif
#include "generated/assets.h" // Include generated asset header
#include "gpu/demo_effects.h" // For GetDemoDuration()
@@ -24,6 +27,17 @@
#include <cstdlib>
#include <cstring>
+#if !defined(STRIP_ALL) && defined(DEMO_HEADLESS)
+static WavDumpBackend* g_wav_backend_ptr = nullptr;
+static void signal_handler(int sig) {
+ if (g_wav_backend_ptr != nullptr) {
+ g_wav_backend_ptr->shutdown();
+ g_wav_backend_ptr = nullptr;
+ }
+ exit(sig);
+}
+#endif
+
int main(int argc, char** argv) {
PlatformState platform_state;
bool fullscreen_enabled = false;
@@ -93,6 +107,11 @@ int main(int argc, char** argv) {
if (dump_wav) {
wav_backend.set_output_file(wav_output_file);
audio_set_backend(&wav_backend);
+#if defined(DEMO_HEADLESS)
+ g_wav_backend_ptr = &wav_backend;
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+#endif
printf("WAV dump mode enabled: %s\n", wav_output_file);
}
#endif
@@ -262,6 +281,9 @@ int main(int argc, char** argv) {
printf("\nWAV dump complete: %.2fs physical, %.2fs music time\n",
physical_time, g_music_time);
+#if defined(DEMO_HEADLESS)
+ g_wav_backend_ptr = nullptr;
+#endif
audio_shutdown();
gpu_shutdown();
platform_shutdown(&platform_state);
@@ -269,6 +291,7 @@ int main(int argc, char** argv) {
}
#endif
+#if !defined(DEMO_HEADLESS)
int last_width = platform_state.width;
int last_height = platform_state.height;
@@ -325,11 +348,10 @@ int main(int argc, char** argv) {
const float raw_peak = audio_get_realtime_peak();
const float visual_peak = fminf(raw_peak * 8.0f, 1.0f);
- // Beat calculation should use audio time to align with audio events.
- // The graphics loop time (current_physical_time) is used for frame rate.
- const float beat_time = current_audio_time * g_tracker_score.bpm / 60.0f;
- const int beat_number = (int)beat_time;
- const float beat = fmodf(beat_time, 1.0f); // Fractional part (0.0 to 1.0)
+ // Beat calculation: convert audio time to musical beats
+ const float absolute_beat_time = current_audio_time * g_tracker_score.bpm / 60.0f;
+ const int beat_number = (int)absolute_beat_time;
+ const float beat_phase = fmodf(absolute_beat_time, 1.0f); // Fractional part (0.0 to 1.0)
// Print beat/time info periodically for identifying sync points
// Use graphics time for the print interval to avoid excessive output if
@@ -339,20 +361,21 @@ int main(int argc, char** argv) {
0.5f) { // Print every 0.5 seconds
if (tempo_test_enabled) {
printf(
- "[GraphicsT=%.2f, AudioT=%.2f, MusicT=%.2f, Beat=%d, Frac=%.2f, "
+ "[GraphicsT=%.2f, AudioT=%.2f, MusicT=%.2f, Beat=%d, Phase=%.2f, "
"Peak=%.2f, Tempo=%.2fx]\n",
current_physical_time, current_audio_time, g_music_time,
- beat_number, beat, visual_peak, g_tempo_scale);
+ beat_number, beat_phase, visual_peak, g_tempo_scale);
} else {
- printf("[GraphicsT=%.2f, AudioT=%.2f, Beat=%d, Frac=%.2f, Peak=%.2f]\n",
- current_physical_time, current_audio_time, beat_number, beat,
+ printf("[GraphicsT=%.2f, AudioT=%.2f, Beat=%d, Phase=%.2f, Peak=%.2f]\n",
+ current_physical_time, current_audio_time, beat_number, beat_phase,
visual_peak);
}
last_graphics_print_time = current_physical_time;
}
- // Draw graphics using the graphics frame time and synchronized audio events
- gpu_draw(visual_peak, aspect_ratio, (float)current_physical_time, beat);
+ // Draw graphics using physical time and musical beat time
+ gpu_draw(visual_peak, aspect_ratio, (float)current_physical_time,
+ absolute_beat_time, beat_phase);
last_frame_time = current_physical_time;
// Update audio systems (tracker, synth, etc.) based on audio time
@@ -360,8 +383,12 @@ int main(int argc, char** argv) {
audio_update();
}
+#if !defined(STRIP_ALL) && defined(DEMO_HEADLESS)
+ g_wav_backend_ptr = nullptr;
+#endif
audio_shutdown();
gpu_shutdown();
platform_shutdown(&platform_state);
+#endif /* !defined(DEMO_HEADLESS) */
return 0;
} \ No newline at end of file
diff --git a/src/test_demo.cc b/src/test_demo.cc
index b8e9381..9cbeae2 100644
--- a/src/test_demo.cc
+++ b/src/test_demo.cc
@@ -21,33 +21,25 @@ extern void LoadTimeline(MainSequence& main_seq, const GpuContext& ctx);
// Inline peak meter effect for debugging audio-visual sync
#include "gpu/effects/post_process_helper.h"
+#include "gpu/effects/shader_composer.h"
+
class PeakMeterEffect : public PostProcessEffect {
public:
PeakMeterEffect(const GpuContext& ctx) : PostProcessEffect(ctx) {
- // Use standard post-process binding macros
- const char* shader_code = R"(
+ // Use ShaderComposer to include CommonUniforms from common_uniforms.wgsl
+ const char* shader_main = R"(
struct VertexOutput {
@builtin(position) position: vec4<f32>,
@location(0) uv: vec2<f32>,
};
- struct Uniforms {
- resolution: vec2<f32>,
- _pad0: f32,
- _pad1: f32,
- aspect_ratio: f32,
- time: f32,
- beat: f32,
- audio_intensity: f32,
- };
-
struct EffectParams {
unused: f32,
};
@group(0) @binding(0) var inputSampler: sampler;
@group(0) @binding(1) var inputTexture: texture_2d<f32>;
- @group(0) @binding(2) var<uniform> uniforms: Uniforms;
+ @group(0) @binding(2) var<uniform> uniforms: CommonUniforms;
@group(0) @binding(3) var<uniform> params: EffectParams;
@vertex
@@ -86,32 +78,23 @@ class PeakMeterEffect : public PostProcessEffect {
}
)";
+ // Compose shader with common_uniforms to get CommonUniforms definition
+ std::string shader_code = ShaderComposer::Get().Compose(
+ {"common_uniforms"}, shader_main);
+
pipeline_ =
- create_post_process_pipeline(ctx_.device, ctx_.format, shader_code);
+ create_post_process_pipeline(ctx_.device, ctx_.format, shader_code.c_str());
}
- void update_bind_group(WGPUTextureView input_view) {
+ void update_bind_group(WGPUTextureView input_view) override {
pp_update_bind_group(ctx_.device, pipeline_, &bind_group_, input_view,
uniforms_.get(), {});
}
- void render(WGPURenderPassEncoder pass, float time, float beat,
- float peak_value, float aspect_ratio) {
- (void)time;
- (void)beat;
-
- CommonPostProcessUniforms u = {
- .resolution = {(float)width_, (float)height_},
- .aspect_ratio = aspect_ratio,
- .time = time,
- .beat = beat,
- .audio_intensity = peak_value,
- };
- uniforms_.update(ctx_.queue, u);
-
- wgpuRenderPassEncoderSetPipeline(pass, pipeline_);
- wgpuRenderPassEncoderSetBindGroup(pass, 0, bind_group_, 0, nullptr);
- wgpuRenderPassEncoderDraw(pass, 3, 1, 0, 0); // Full-screen triangle
+ void render(WGPURenderPassEncoder pass,
+ const CommonPostProcessUniforms& uniforms) override {
+ uniforms_.update(ctx_.queue, uniforms);
+ PostProcessEffect::render(pass, uniforms);
}
};
@@ -347,11 +330,10 @@ int main(int argc, char** argv) {
const float raw_peak = audio_get_realtime_peak();
const float visual_peak = fminf(raw_peak * 8.0f, 1.0f);
- // Beat calculation should use audio time to align with audio events.
- // The graphics loop time (current_physical_time) is used for frame rate.
- const float beat_time = current_audio_time * g_tracker_score.bpm / 60.0f;
- const int beat_number = (int)beat_time;
- const float beat = fmodf(beat_time, 1.0f); // Fractional part (0.0 to 1.0)
+ // Beat calculation: convert audio time to musical beats
+ const float absolute_beat_time = current_audio_time * g_tracker_score.bpm / 60.0f;
+ const int beat_number = (int)absolute_beat_time;
+ const float beat_phase = fmodf(absolute_beat_time, 1.0f); // Fractional part (0.0 to 1.0)
#if !defined(STRIP_ALL)
// Log peak (either per-frame or per-beat)
@@ -377,22 +359,23 @@ int main(int argc, char** argv) {
if (current_physical_time - last_graphics_print_time >= 0.5f) {
if (tempo_test_enabled) {
printf(
- "[GraphicsT=%.2f, AudioT=%.2f, MusicT=%.2f, Beat=%d, Frac=%.2f, "
+ "[GraphicsT=%.2f, AudioT=%.2f, MusicT=%.2f, Beat=%d, Phase=%.2f, "
"Peak=%.2f, Tempo=%.2fx]\n",
current_physical_time, current_audio_time, g_music_time,
- beat_number, beat, visual_peak, g_tempo_scale);
+ beat_number, beat_phase, visual_peak, g_tempo_scale);
} else {
- printf("[GraphicsT=%.2f, AudioT=%.2f, Beat=%d, Frac=%.2f, Peak=%.2f]\n",
- current_physical_time, current_audio_time, beat_number, beat,
+ printf("[GraphicsT=%.2f, AudioT=%.2f, Beat=%d, Phase=%.2f, Peak=%.2f]\n",
+ current_physical_time, current_audio_time, beat_number, beat_phase,
visual_peak);
}
last_graphics_print_time = current_physical_time;
}
#endif
- // Draw graphics using the graphics frame time and synchronized audio events
+ // Draw graphics using physical time and musical beat time
const float graphics_frame_time = (float)current_physical_time;
- gpu_draw(visual_peak, aspect_ratio, graphics_frame_time, beat);
+ gpu_draw(visual_peak, aspect_ratio, graphics_frame_time,
+ absolute_beat_time, beat_phase);
// Update audio systems (tracker, synth, etc.) based on audio time
// progression
diff --git a/src/tests/assets/test_sequence.cc b/src/tests/assets/test_sequence.cc
index 44aac46..157b462 100644
--- a/src/tests/assets/test_sequence.cc
+++ b/src/tests/assets/test_sequence.cc
@@ -96,7 +96,7 @@ void test_effect_lifecycle() {
main_seq.add_sequence(seq1, 0.0f, 0);
// Before effect starts
- main_seq.render_frame(0.5f, 0, 0, 1.0f,
+ main_seq.render_frame(0.5f, 0, 0, 0, 1.0f,
dummy_surface); // This will still call real render, but
// test counts only init
assert(effect1->init_calls == 1);
@@ -105,26 +105,26 @@ void test_effect_lifecycle() {
assert(effect1->end_calls == 0);
// Effect starts
- main_seq.render_frame(1.0f, 0, 0, 1.0f, dummy_surface);
+ main_seq.render_frame(1.0f, 0, 0, 0, 1.0f, dummy_surface);
assert(effect1->start_calls == 1);
// assert(effect1->render_calls == 1); // No longer checking render calls
// directly from here
assert(effect1->end_calls == 0);
// During effect
- main_seq.render_frame(2.0f, 0, 0, 1.0f, dummy_surface);
+ main_seq.render_frame(2.0f, 0, 0, 0, 1.0f, dummy_surface);
assert(effect1->start_calls == 1);
// assert(effect1->render_calls == 2);
assert(effect1->end_calls == 0);
// Effect ends
- main_seq.render_frame(3.0f, 0, 0, 1.0f, dummy_surface);
+ main_seq.render_frame(3.0f, 0, 0, 0, 1.0f, dummy_surface);
assert(effect1->start_calls == 1);
// assert(effect1->render_calls == 2); // Render not called on end frame
assert(effect1->end_calls == 1);
// After effect ends
- main_seq.render_frame(3.5f, 0, 0, 1.0f, dummy_surface);
+ main_seq.render_frame(3.5f, 0, 0, 0, 1.0f, dummy_surface);
assert(effect1->start_calls == 1);
// assert(effect1->render_calls == 2);
assert(effect1->end_calls == 1);
diff --git a/src/tests/audio/test_wav_dump.cc b/src/tests/audio/test_wav_dump.cc
index eb14652..85b168d 100644
--- a/src/tests/audio/test_wav_dump.cc
+++ b/src/tests/audio/test_wav_dump.cc
@@ -134,12 +134,8 @@ void test_wav_format_matches_live_audio() {
const uint32_t expected_min_size = expected_bytes_per_sec * 1.5;
const uint32_t expected_max_size = expected_bytes_per_sec * 2.5;
- // For now, accept if stereo format is correct (main regression test goal)
- if (header.data_size < expected_min_size ||
- header.data_size > expected_max_size) {
- printf(" WARNING: Data size outside expected range\n");
- // Don't fail on this for now - stereo format is the critical check
- }
+ assert(header.data_size >= expected_min_size);
+ assert(header.data_size <= expected_max_size);
// Verify file contains actual audio data (not all zeros)
fseek(f, sizeof(WavHeader), SEEK_SET);