summaryrefslogtreecommitdiff
path: root/src/audio
diff options
context:
space:
mode:
Diffstat (limited to 'src/audio')
-rw-r--r--src/audio/audio.cc8
-rw-r--r--src/audio/audio.h6
-rw-r--r--src/audio/backend/miniaudio_backend.cc14
-rw-r--r--src/audio/backend/silent_backend.cc2
-rw-r--r--src/audio/backend/silent_backend.h20
-rw-r--r--src/audio/backend/wav_dump_backend.cc3
-rw-r--r--src/audio/fft.cc8
-rw-r--r--src/audio/fft.h5
-rw-r--r--src/audio/gen.cc16
-rw-r--r--src/audio/ring_buffer.cc5
-rw-r--r--src/audio/ring_buffer.h7
-rw-r--r--src/audio/spectral_brush.cc145
-rw-r--r--src/audio/spectral_brush.h56
-rw-r--r--src/audio/synth.cc5
-rw-r--r--src/audio/tracker.cc37
-rw-r--r--src/audio/tracker.h9
16 files changed, 177 insertions, 169 deletions
diff --git a/src/audio/audio.cc b/src/audio/audio.cc
index 67345cf..2d667bc 100644
--- a/src/audio/audio.cc
+++ b/src/audio/audio.cc
@@ -1,6 +1,6 @@
// This file is part of the 64k demo project.
// It manages the low-level audio device and high-level audio state.
-// Now uses backend abstraction for testability.
+// Uses backend abstraction for testability.
#include "audio.h"
#include "audio_backend.h"
@@ -172,6 +172,12 @@ float audio_get_playback_time() {
(RING_BUFFER_SAMPLE_RATE * RING_BUFFER_CHANNELS);
}
+float audio_get_render_time() {
+ const int64_t total_samples = g_ring_buffer.get_total_written();
+ return (float)total_samples /
+ (RING_BUFFER_SAMPLE_RATE * RING_BUFFER_CHANNELS);
+}
+
float audio_get_realtime_peak() {
if (g_audio_backend == nullptr) {
return 0.0f;
diff --git a/src/audio/audio.h b/src/audio/audio.h
index 14fe615..e063a57 100644
--- a/src/audio/audio.h
+++ b/src/audio/audio.h
@@ -27,8 +27,14 @@ void audio_start(); // Starts the audio device callback
void audio_render_ahead(float music_time, float dt);
// Get current playback time (in seconds) based on samples consumed
+// This is the ring buffer READ position (what's being played NOW)
float audio_get_playback_time();
+// Get current render time (in seconds) based on samples written
+// This is the ring buffer WRITE position (where we're currently rendering)
+// Use this for calculating sample-accurate trigger offsets
+float audio_get_render_time();
+
// Get peak amplitude of samples currently being played (real-time sync)
// Returns: Peak amplitude in range [0.0, 1.0+]
// Use this for visual effects to ensure audio-visual synchronization
diff --git a/src/audio/backend/miniaudio_backend.cc b/src/audio/backend/miniaudio_backend.cc
index 3be9fb0..ffa0852 100644
--- a/src/audio/backend/miniaudio_backend.cc
+++ b/src/audio/backend/miniaudio_backend.cc
@@ -68,10 +68,10 @@ void MiniaudioBackend::audio_callback(ma_device* pDevice, void* pOutput,
// Check for re-entrant calls
FATAL_CODE_BEGIN
- if (callback_reentry > 0) {
- FATAL_ERROR("Callback re-entered! depth=%d", callback_reentry);
- }
- callback_reentry++;
+ if (callback_reentry > 0) {
+ FATAL_ERROR("Callback re-entered! depth=%d", callback_reentry);
+ }
+ callback_reentry++;
FATAL_CODE_END
// Check if frameCount changed unexpectedly
@@ -162,16 +162,16 @@ void MiniaudioBackend::audio_callback(ma_device* pDevice, void* pOutput,
// (At 128ms callbacks: 0.5^3.9 ≈ 0.07 after ~500ms = 1 beat)
// TODO: Make decay rate configurable based on BPM from tracker/MainSequence
if (frame_peak > realtime_peak_) {
- realtime_peak_ = frame_peak; // Attack: instant
+ realtime_peak_ = frame_peak; // Attack: instant
} else {
- realtime_peak_ *= 0.5f; // Decay: 50% per callback
+ realtime_peak_ *= 0.5f; // Decay: 50% per callback
}
}
#if defined(DEBUG_LOG_AUDIO)
// Clear reentry flag
FATAL_CODE_BEGIN
- callback_reentry--;
+ callback_reentry--;
FATAL_CODE_END
#endif /* defined(DEBUG_LOG_AUDIO) */
}
diff --git a/src/audio/backend/silent_backend.cc b/src/audio/backend/silent_backend.cc
index 637dd68..6615eff 100644
--- a/src/audio/backend/silent_backend.cc
+++ b/src/audio/backend/silent_backend.cc
@@ -33,7 +33,7 @@ float SilentBackend::get_realtime_peak() {
}
void SilentBackend::on_voice_triggered(float timestamp, int spectrogram_id,
- float volume, float pan) {
+ float volume, float pan) {
// Track voice triggers for testing
(void)timestamp;
(void)spectrogram_id;
diff --git a/src/audio/backend/silent_backend.h b/src/audio/backend/silent_backend.h
index f7da42d..2d52858 100644
--- a/src/audio/backend/silent_backend.h
+++ b/src/audio/backend/silent_backend.h
@@ -24,13 +24,23 @@ class SilentBackend : public AudioBackend {
float get_realtime_peak() override;
// Test inspection interface
- bool is_initialized() const { return initialized_; }
- bool is_started() const { return started_; }
- int get_frames_rendered() const { return frames_rendered_.load(); }
- int get_voice_trigger_count() const { return voice_trigger_count_.load(); }
+ bool is_initialized() const {
+ return initialized_;
+ }
+ bool is_started() const {
+ return started_;
+ }
+ int get_frames_rendered() const {
+ return frames_rendered_.load();
+ }
+ int get_voice_trigger_count() const {
+ return voice_trigger_count_.load();
+ }
// Manual control for testing edge cases
- void set_peak(float peak) { test_peak_ = peak; }
+ void set_peak(float peak) {
+ test_peak_ = peak;
+ }
void reset_stats() {
frames_rendered_ = 0;
voice_trigger_count_ = 0;
diff --git a/src/audio/backend/wav_dump_backend.cc b/src/audio/backend/wav_dump_backend.cc
index 1158fb2..3f72c87 100644
--- a/src/audio/backend/wav_dump_backend.cc
+++ b/src/audio/backend/wav_dump_backend.cc
@@ -57,7 +57,8 @@ void WavDumpBackend::write_audio(const float* samples, int num_samples) {
// - MiniaudioBackend passes float samples directly to miniaudio without
// clamping (see miniaudio_backend.cc:140)
// - Miniaudio internally converts float→int16 and handles overflow
- // - We replicate this: no clamping, count out-of-range samples for diagnostics
+ // - We replicate this: no clamping, count out-of-range samples for
+ // diagnostics
//
// If miniaudio's sample handling changes (e.g., they add clamping or
// different overflow behavior), this code MUST be updated to match.
diff --git a/src/audio/fft.cc b/src/audio/fft.cc
index 3f8e706..64d7b1a 100644
--- a/src/audio/fft.cc
+++ b/src/audio/fft.cc
@@ -109,7 +109,7 @@ void dct_fft(const float* input, float* output, size_t N) {
// Reorder input: even indices first, then odd indices reversed
// [x[0], x[2], x[4], ...] followed by [x[N-1], x[N-3], x[N-5], ...]
for (size_t i = 0; i < N / 2; i++) {
- real[i] = input[2 * i]; // Even indices: 0, 2, 4, ...
+ real[i] = input[2 * i]; // Even indices: 0, 2, 4, ...
real[N - 1 - i] = input[2 * i + 1]; // Odd indices reversed: N-1, N-3, ...
}
memset(imag, 0, N * sizeof(float));
@@ -153,7 +153,7 @@ void idct_fft(const float* input, float* output, size_t N) {
// FFT[k] = DCT[k] * exp(+j*pi*k/(2*N)) / normalization
// Note: DCT-III (inverse of DCT-II) requires factor of 2 for AC terms
for (size_t k = 0; k < N; k++) {
- const float angle = PI * k / (2.0f * N); // Positive angle for inverse
+ const float angle = PI * k / (2.0f * N); // Positive angle for inverse
const float wr = cosf(angle);
const float wi = sinf(angle);
@@ -178,8 +178,8 @@ void idct_fft(const float* input, float* output, size_t N) {
// Even output indices come from first half of FFT output
// Odd output indices come from second half (reversed)
for (size_t i = 0; i < N / 2; i++) {
- output[2 * i] = real[i]; // Even positions
- output[2 * i + 1] = real[N - 1 - i]; // Odd positions (reversed)
+ output[2 * i] = real[i]; // Even positions
+ output[2 * i + 1] = real[N - 1 - i]; // Odd positions (reversed)
}
delete[] real;
diff --git a/src/audio/fft.h b/src/audio/fft.h
index 81a12d4..8c10afd 100644
--- a/src/audio/fft.h
+++ b/src/audio/fft.h
@@ -1,6 +1,7 @@
// Fast Fourier Transform (FFT) implementation
// Radix-2 Cooley-Tukey algorithm for power-of-2 sizes
-// This implementation matches the JavaScript version in tools/spectral_editor/dct.js
+// This implementation matches the JavaScript version in
+// tools/spectral_editor/dct.js
#ifndef AUDIO_FFT_H_
#define AUDIO_FFT_H_
@@ -31,4 +32,4 @@ void dct_fft(const float* input, float* output, size_t N);
// N must be a power of 2
void idct_fft(const float* input, float* output, size_t N);
-#endif /* AUDIO_FFT_H_ */
+#endif /* AUDIO_FFT_H_ */
diff --git a/src/audio/gen.cc b/src/audio/gen.cc
index 0757b4d..cd36d54 100644
--- a/src/audio/gen.cc
+++ b/src/audio/gen.cc
@@ -70,15 +70,17 @@ std::vector<float> generate_note_spectrogram(const NoteParams& params,
fdct_512(pcm_chunk, dct_chunk);
// Scale up to compensate for orthonormal normalization
- // Old non-orthonormal DCT had no sqrt scaling, so output was ~sqrt(N/2) larger
- // Scale factor: sqrt(DCT_SIZE / 2) = sqrt(256) = 16
+ // Old non-orthonormal DCT had no sqrt scaling, so output was ~sqrt(N/2)
+ // larger Scale factor: sqrt(DCT_SIZE / 2) = sqrt(256) = 16
//
- // HOWEVER: After removing synthesis windowing (commit f998bfc), audio is louder.
- // The old synthesis incorrectly applied Hamming window to spectrum (reducing energy by 0.63x).
- // New synthesis is correct (no window), but procedural notes with 16x scaling are too loud.
+ // HOWEVER: After removing synthesis windowing (commit f998bfc), audio is
+ // louder. The old synthesis incorrectly applied Hamming window to spectrum
+ // (reducing energy by 0.63x). New synthesis is correct (no window), but
+ // procedural notes with 16x scaling are too loud.
//
- // Analysis applies Hamming window (0.63x energy). With 16x scaling: 0.63 × 16 ≈ 10x.
- // Divide by 2.5 to match the relative loudness increase: 16 / 2.5 = 6.4
+ // Analysis applies Hamming window (0.63x energy). With 16x scaling: 0.63 ×
+ // 16 ≈ 10x. Divide by 2.5 to match the relative loudness increase: 16 / 2.5
+ // = 6.4
const float scale_factor = sqrtf(DCT_SIZE / 2.0f) / 2.5f;
// Copy to buffer with scaling
diff --git a/src/audio/ring_buffer.cc b/src/audio/ring_buffer.cc
index a7e5d9e..7cedb56 100644
--- a/src/audio/ring_buffer.cc
+++ b/src/audio/ring_buffer.cc
@@ -9,7 +9,7 @@
AudioRingBuffer::AudioRingBuffer()
: capacity_(RING_BUFFER_CAPACITY_SAMPLES), write_pos_(0), read_pos_(0),
- total_read_(0) {
+ total_read_(0), total_written_(0) {
memset(buffer_, 0, sizeof(buffer_));
}
@@ -81,6 +81,9 @@ int AudioRingBuffer::write(const float* samples, int count) {
write_pos_.store(remainder, std::memory_order_release);
}
+ // Track total samples written for render timing
+ total_written_.fetch_add(to_write, std::memory_order_release);
+
return to_write;
}
diff --git a/src/audio/ring_buffer.h b/src/audio/ring_buffer.h
index b19c1ea..80b375f 100644
--- a/src/audio/ring_buffer.h
+++ b/src/audio/ring_buffer.h
@@ -42,6 +42,11 @@ class AudioRingBuffer {
return total_read_.load(std::memory_order_acquire);
}
+ // Get total samples written (for render timing)
+ int64_t get_total_written() const {
+ return total_written_.load(std::memory_order_acquire);
+ }
+
// Clear buffer (for seeking)
void clear();
@@ -52,4 +57,6 @@ class AudioRingBuffer {
std::atomic<int> write_pos_; // Write position (0 to capacity-1)
std::atomic<int> read_pos_; // Read position (0 to capacity-1)
std::atomic<int64_t> total_read_; // Total samples read (for playback time)
+ std::atomic<int64_t>
+ total_written_; // Total samples written (for render timing)
};
diff --git a/src/audio/spectral_brush.cc b/src/audio/spectral_brush.cc
index c6eb64d..000b258 100644
--- a/src/audio/spectral_brush.cc
+++ b/src/audio/spectral_brush.cc
@@ -1,6 +1,7 @@
// This file is part of the 64k demo project.
// It implements the "Spectral Brush" primitive for procedural audio generation.
-// Implementation of Bezier curves, vertical profiles, and spectrogram rendering.
+// Implementation of Bezier curves, vertical profiles, and spectrogram
+// rendering.
#include "spectral_brush.h"
@@ -11,9 +12,8 @@ static const float SAMPLE_RATE = 32000.0f;
// Evaluate linear Bezier interpolation between control points
float evaluate_bezier_linear(const float* control_frames,
- const float* control_values,
- int n_points,
- float frame) {
+ const float* control_values, int n_points,
+ float frame) {
if (n_points == 0) {
return 0.0f;
}
@@ -33,8 +33,8 @@ float evaluate_bezier_linear(const float* control_frames,
for (int i = 0; i < n_points - 1; ++i) {
if (frame >= control_frames[i] && frame <= control_frames[i + 1]) {
// Linear interpolation: value = v0 + (v1 - v0) * t
- const float t =
- (frame - control_frames[i]) / (control_frames[i + 1] - control_frames[i]);
+ const float t = (frame - control_frames[i]) /
+ (control_frames[i + 1] - control_frames[i]);
return control_values[i] * (1.0f - t) + control_values[i + 1] * t;
}
}
@@ -49,72 +49,67 @@ uint32_t spectral_brush_rand(uint32_t seed) {
// X_{n+1} = (a * X_n + c) mod m
const uint32_t a = 1664525;
const uint32_t c = 1013904223;
- return a * seed + c; // Implicit mod 2^32
+ return a * seed + c; // Implicit mod 2^32
}
// Evaluate vertical profile at distance from curve center
-float evaluate_profile(ProfileType type, float distance, float param1, float param2) {
+float evaluate_profile(ProfileType type, float distance, float param1,
+ float param2) {
switch (type) {
- case PROFILE_GAUSSIAN: {
- // Gaussian: exp(-(dist^2 / sigma^2))
- // param1 = sigma (width in bins)
- const float sigma = param1;
- if (sigma <= 0.0f) {
- return 0.0f;
- }
- return expf(-(distance * distance) / (sigma * sigma));
+ case PROFILE_GAUSSIAN: {
+ // Gaussian: exp(-(dist^2 / sigma^2))
+ // param1 = sigma (width in bins)
+ const float sigma = param1;
+ if (sigma <= 0.0f) {
+ return 0.0f;
}
+ return expf(-(distance * distance) / (sigma * sigma));
+ }
- case PROFILE_DECAYING_SINUSOID: {
- // Decaying sinusoid: exp(-decay * dist) * cos(omega * dist)
- // param1 = decay rate
- // param2 = oscillation frequency (omega)
- const float decay = param1;
- const float omega = param2;
- const float envelope = expf(-decay * distance);
- const float oscillation = cosf(omega * distance);
- return envelope * oscillation;
- }
+ case PROFILE_DECAYING_SINUSOID: {
+ // Decaying sinusoid: exp(-decay * dist) * cos(omega * dist)
+ // param1 = decay rate
+ // param2 = oscillation frequency (omega)
+ const float decay = param1;
+ const float omega = param2;
+ const float envelope = expf(-decay * distance);
+ const float oscillation = cosf(omega * distance);
+ return envelope * oscillation;
+ }
- case PROFILE_NOISE: {
- // Random noise: deterministic RNG based on distance
- // param1 = amplitude scale
- // param2 = seed
- const float amplitude = param1;
- const uint32_t seed = (uint32_t)(param2) + (uint32_t)(distance * 1000.0f);
- const uint32_t rand_val = spectral_brush_rand(seed);
- // Map to [0, 1]
- const float normalized = (float)(rand_val % 10000) / 10000.0f;
- return amplitude * normalized;
- }
+ case PROFILE_NOISE: {
+ // Random noise: deterministic RNG based on distance
+ // param1 = amplitude scale
+ // param2 = seed
+ const float amplitude = param1;
+ const uint32_t seed = (uint32_t)(param2) + (uint32_t)(distance * 1000.0f);
+ const uint32_t rand_val = spectral_brush_rand(seed);
+ // Map to [0, 1]
+ const float normalized = (float)(rand_val % 10000) / 10000.0f;
+ return amplitude * normalized;
+ }
}
return 0.0f;
}
// Internal implementation: Render Bezier curve with profile
-static void draw_bezier_curve_impl(float* spectrogram,
- int dct_size,
- int num_frames,
- const float* control_frames,
- const float* control_freqs_hz,
- const float* control_amps,
- int n_control_points,
- ProfileType profile_type,
- float profile_param1,
- float profile_param2,
- bool additive) {
+static void draw_bezier_curve_impl(
+ float* spectrogram, int dct_size, int num_frames,
+ const float* control_frames, const float* control_freqs_hz,
+ const float* control_amps, int n_control_points, ProfileType profile_type,
+ float profile_param1, float profile_param2, bool additive) {
if (n_control_points < 1) {
- return; // Nothing to draw
+ return; // Nothing to draw
}
// For each frame in the spectrogram
for (int f = 0; f < num_frames; ++f) {
// 1. Evaluate Bezier curve at this frame
- const float freq_hz =
- evaluate_bezier_linear(control_frames, control_freqs_hz, n_control_points, (float)f);
- const float amplitude =
- evaluate_bezier_linear(control_frames, control_amps, n_control_points, (float)f);
+ const float freq_hz = evaluate_bezier_linear(
+ control_frames, control_freqs_hz, n_control_points, (float)f);
+ const float amplitude = evaluate_bezier_linear(control_frames, control_amps,
+ n_control_points, (float)f);
// 2. Convert frequency (Hz) to frequency bin index
// Nyquist frequency = SAMPLE_RATE / 2
@@ -125,7 +120,8 @@ static void draw_bezier_curve_impl(float* spectrogram,
// 3. Apply vertical profile around freq_bin_0
for (int b = 0; b < dct_size; ++b) {
const float dist = fabsf(b - freq_bin_0);
- const float profile_val = evaluate_profile(profile_type, dist, profile_param1, profile_param2);
+ const float profile_val =
+ evaluate_profile(profile_type, dist, profile_param1, profile_param2);
const float contribution = amplitude * profile_val;
const int idx = f * dct_size + b;
@@ -139,33 +135,24 @@ static void draw_bezier_curve_impl(float* spectrogram,
}
// Draw spectral brush (overwrites spectrogram content)
-void draw_bezier_curve(float* spectrogram,
- int dct_size,
- int num_frames,
+void draw_bezier_curve(float* spectrogram, int dct_size, int num_frames,
const float* control_frames,
- const float* control_freqs_hz,
- const float* control_amps,
- int n_control_points,
- ProfileType profile_type,
- float profile_param1,
- float profile_param2) {
- draw_bezier_curve_impl(spectrogram, dct_size, num_frames, control_frames, control_freqs_hz,
- control_amps, n_control_points, profile_type, profile_param1,
- profile_param2, false);
+ const float* control_freqs_hz, const float* control_amps,
+ int n_control_points, ProfileType profile_type,
+ float profile_param1, float profile_param2) {
+ draw_bezier_curve_impl(spectrogram, dct_size, num_frames, control_frames,
+ control_freqs_hz, control_amps, n_control_points,
+ profile_type, profile_param1, profile_param2, false);
}
// Draw spectral brush (adds to existing spectrogram content)
-void draw_bezier_curve_add(float* spectrogram,
- int dct_size,
- int num_frames,
- const float* control_frames,
- const float* control_freqs_hz,
- const float* control_amps,
- int n_control_points,
- ProfileType profile_type,
- float profile_param1,
- float profile_param2) {
- draw_bezier_curve_impl(spectrogram, dct_size, num_frames, control_frames, control_freqs_hz,
- control_amps, n_control_points, profile_type, profile_param1,
- profile_param2, true);
+void draw_bezier_curve_add(float* spectrogram, int dct_size, int num_frames,
+ const float* control_frames,
+ const float* control_freqs_hz,
+ const float* control_amps, int n_control_points,
+ ProfileType profile_type, float profile_param1,
+ float profile_param2) {
+ draw_bezier_curve_impl(spectrogram, dct_size, num_frames, control_frames,
+ control_freqs_hz, control_amps, n_control_points,
+ profile_type, profile_param1, profile_param2, true);
}
diff --git a/src/audio/spectral_brush.h b/src/audio/spectral_brush.h
index 3125f35..be27bbb 100644
--- a/src/audio/spectral_brush.h
+++ b/src/audio/spectral_brush.h
@@ -1,6 +1,7 @@
// This file is part of the 64k demo project.
// It implements the "Spectral Brush" primitive for procedural audio generation.
-// Spectral brushes trace Bezier curves through spectrograms with vertical profiles.
+// Spectral brushes trace Bezier curves through spectrograms with vertical
+// profiles.
#pragma once
@@ -8,9 +9,9 @@
// Profile types for vertical distribution around central Bezier curve
enum ProfileType {
- PROFILE_GAUSSIAN = 0, // Smooth harmonic falloff
- PROFILE_DECAYING_SINUSOID = 1, // Resonant/metallic texture
- PROFILE_NOISE = 2 // Random texture/grit
+ PROFILE_GAUSSIAN = 0, // Smooth harmonic falloff
+ PROFILE_DECAYING_SINUSOID = 1, // Resonant/metallic texture
+ PROFILE_NOISE = 2 // Random texture/grit
};
// Evaluate linear Bezier interpolation at given frame
@@ -18,11 +19,11 @@ enum ProfileType {
// control_values: Array of values at control points (freq_hz or amplitude)
// n_points: Number of control points
// frame: Frame number to evaluate at
-// Returns: Interpolated value at frame (linearly interpolated between control points)
+// Returns: Interpolated value at frame (linearly interpolated between control
+// points)
float evaluate_bezier_linear(const float* control_frames,
- const float* control_values,
- int n_points,
- float frame);
+ const float* control_values, int n_points,
+ float frame);
// Draw a spectral brush stroke onto a spectrogram
// Traces a Bezier curve through time-frequency space with a vertical profile
@@ -34,32 +35,24 @@ float evaluate_bezier_linear(const float* control_frames,
// control_amps: Amplitude values at control points (0.0-1.0 typical)
// n_control_points: Number of control points (minimum 2 for a curve)
// profile_type: Type of vertical profile to apply
-// profile_param1: First parameter (sigma for Gaussian, decay for sinusoid, amplitude for noise)
-// profile_param2: Second parameter (unused for Gaussian, frequency for sinusoid, seed for noise)
-void draw_bezier_curve(float* spectrogram,
- int dct_size,
- int num_frames,
+// profile_param1: First parameter (sigma for Gaussian, decay for sinusoid,
+// amplitude for noise) profile_param2: Second parameter (unused for Gaussian,
+// frequency for sinusoid, seed for noise)
+void draw_bezier_curve(float* spectrogram, int dct_size, int num_frames,
const float* control_frames,
- const float* control_freqs_hz,
- const float* control_amps,
- int n_control_points,
- ProfileType profile_type,
- float profile_param1,
- float profile_param2 = 0.0f);
+ const float* control_freqs_hz, const float* control_amps,
+ int n_control_points, ProfileType profile_type,
+ float profile_param1, float profile_param2 = 0.0f);
// Additive variant of draw_bezier_curve (adds to existing spectrogram content)
// Use for compositing multiple profiles (e.g., Gaussian + Noise)
// Parameters same as draw_bezier_curve()
-void draw_bezier_curve_add(float* spectrogram,
- int dct_size,
- int num_frames,
- const float* control_frames,
- const float* control_freqs_hz,
- const float* control_amps,
- int n_control_points,
- ProfileType profile_type,
- float profile_param1,
- float profile_param2 = 0.0f);
+void draw_bezier_curve_add(float* spectrogram, int dct_size, int num_frames,
+ const float* control_frames,
+ const float* control_freqs_hz,
+ const float* control_amps, int n_control_points,
+ ProfileType profile_type, float profile_param1,
+ float profile_param2 = 0.0f);
// Evaluate vertical profile at given distance from central curve
// type: Profile type (Gaussian, Decaying Sinusoid, Noise)
@@ -67,9 +60,7 @@ void draw_bezier_curve_add(float* spectrogram,
// param1: First profile parameter
// param2: Second profile parameter
// Returns: Profile amplitude at given distance (0.0-1.0 range typically)
-float evaluate_profile(ProfileType type,
- float distance,
- float param1,
+float evaluate_profile(ProfileType type, float distance, float param1,
float param2);
// Home-brew deterministic RNG for noise profile
@@ -77,4 +68,3 @@ float evaluate_profile(ProfileType type,
// seed: Input seed value
// Returns: Pseudo-random uint32_t value
uint32_t spectral_brush_rand(uint32_t seed);
-
diff --git a/src/audio/synth.cc b/src/audio/synth.cc
index d66c502..e790c12 100644
--- a/src/audio/synth.cc
+++ b/src/audio/synth.cc
@@ -43,7 +43,7 @@ static struct {
static Voice g_voices[MAX_VOICES];
static volatile float g_current_output_peak =
- 0.0f; // Global peak for visualization
+ 0.0f; // Global peak for visualization
static float g_tempo_scale = 1.0f; // Playback speed multiplier
#if !defined(STRIP_ALL)
@@ -201,7 +201,8 @@ void synth_trigger_voice(int spectrogram_id, float volume, float pan,
v.buffer_pos = DCT_SIZE; // Force IDCT on first render
v.fractional_pos =
0.0f; // Initialize fractional position for tempo scaling
- v.start_sample_offset = start_offset_samples; // NEW: Sample-accurate timing
+ v.start_sample_offset =
+ start_offset_samples; // NEW: Sample-accurate timing
v.active_spectral_data =
g_synth_data.active_spectrogram_data[spectrogram_id];
diff --git a/src/audio/tracker.cc b/src/audio/tracker.cc
index 93a1c49..2bb4159 100644
--- a/src/audio/tracker.cc
+++ b/src/audio/tracker.cc
@@ -172,8 +172,10 @@ static int get_free_pattern_slot() {
}
// Helper to trigger a single note event (OPTIMIZED with caching)
-// start_offset_samples: How many samples into the future to trigger (for sample-accurate timing)
-static void trigger_note_event(const TrackerEvent& event, int start_offset_samples) {
+// start_offset_samples: How many samples into the future to trigger (for
+// sample-accurate timing)
+static void trigger_note_event(const TrackerEvent& event,
+ int start_offset_samples) {
#if defined(DEBUG_LOG_TRACKER)
// VALIDATION: Check sample_id bounds
if (event.sample_id >= g_tracker_samples_count) {
@@ -209,13 +211,15 @@ static void trigger_note_event(const TrackerEvent& event, int start_offset_sampl
}
// Trigger voice with sample-accurate offset
- synth_trigger_voice(cached_synth_id, event.volume, event.pan, start_offset_samples);
+ synth_trigger_voice(cached_synth_id, event.volume, event.pan,
+ start_offset_samples);
}
void tracker_update(float music_time_sec) {
// Unit-less timing: 1 unit = 4 beats (by convention)
const float BEATS_PER_UNIT = 4.0f;
- const float unit_duration_sec = (BEATS_PER_UNIT / g_tracker_score.bpm) * 60.0f;
+ const float unit_duration_sec =
+ (BEATS_PER_UNIT / g_tracker_score.bpm) * 60.0f;
// Step 1: Process new pattern triggers
while (g_last_trigger_idx < g_tracker_score.num_triggers) {
@@ -239,9 +243,10 @@ void tracker_update(float music_time_sec) {
}
// Step 2: Update all active patterns and trigger individual events
- // Get current audio playback position for sample-accurate timing
- const float current_playback_time = audio_get_playback_time();
- const float SAMPLE_RATE = 32000.0f; // Audio sample rate
+ // NOTE: We trigger events immediately when their time passes (no sample
+ // offsets) This gives ~16ms quantization (60fps) which is acceptable Sample
+ // offsets don't work with tempo scaling because music_time and render_time
+ // are in different time domains (tempo-scaled vs physical)
for (int i = 0; i < MAX_SPECTROGRAMS; ++i) {
if (!g_active_patterns[i].active)
@@ -261,21 +266,9 @@ void tracker_update(float music_time_sec) {
if (event.unit_time > elapsed_units)
break; // This event hasn't reached its time yet
- // Calculate exact trigger time for this event
- const float event_trigger_time = active.start_music_time +
- (event.unit_time * unit_duration_sec);
-
- // Calculate sample-accurate offset from current playback position
- const float time_delta = event_trigger_time - current_playback_time;
- int sample_offset = (int)(time_delta * SAMPLE_RATE);
-
- // Clamp to 0 if negative (event is late, play immediately)
- if (sample_offset < 0) {
- sample_offset = 0;
- }
-
- // Trigger this event as an individual voice with sample-accurate timing
- trigger_note_event(event, sample_offset);
+ // Trigger this event immediately (no sample offset)
+ // Timing quantization: ~16ms at 60fps, acceptable for rhythm
+ trigger_note_event(event, 0);
active.next_event_idx++;
}
diff --git a/src/audio/tracker.h b/src/audio/tracker.h
index 4cd011b..3ef06a1 100644
--- a/src/audio/tracker.h
+++ b/src/audio/tracker.h
@@ -8,7 +8,7 @@
#include <cstdint>
struct TrackerEvent {
- float unit_time; // Unit-less time within pattern (0.0 to pattern.unit_length)
+ float unit_time; // Unit-less time within pattern (0.0 to pattern.unit_length)
uint16_t sample_id;
float volume;
float pan;
@@ -17,11 +17,12 @@ struct TrackerEvent {
struct TrackerPattern {
const TrackerEvent* events;
uint32_t num_events;
- float unit_length; // Pattern duration in units (typically 1.0 for 4-beat patterns)
+ float unit_length; // Pattern duration in units (typically 1.0 for 4-beat
+ // patterns)
};
struct TrackerPatternTrigger {
- float unit_time; // Unit-less time when pattern triggers
+ float unit_time; // Unit-less time when pattern triggers
uint16_t pattern_id;
// Modifiers could be added here
};
@@ -29,7 +30,7 @@ struct TrackerPatternTrigger {
struct TrackerScore {
const TrackerPatternTrigger* triggers;
uint32_t num_triggers;
- float bpm; // BPM is used only for playback scaling (1 unit = 4 beats)
+ float bpm; // BPM is used only for playback scaling (1 unit = 4 beats)
};
// Global music data generated by tracker_compiler