summaryrefslogtreecommitdiff
path: root/src/audio
diff options
context:
space:
mode:
Diffstat (limited to 'src/audio')
-rw-r--r--src/audio/audio.cc6
-rw-r--r--src/audio/audio.h6
-rw-r--r--src/audio/ring_buffer.cc5
-rw-r--r--src/audio/ring_buffer.h12
-rw-r--r--src/audio/synth.cc17
-rw-r--r--src/audio/synth.h3
-rw-r--r--src/audio/tracker.cc51
-rw-r--r--src/audio/tracker.h8
8 files changed, 84 insertions, 24 deletions
diff --git a/src/audio/audio.cc b/src/audio/audio.cc
index 67345cf..74536e5 100644
--- a/src/audio/audio.cc
+++ b/src/audio/audio.cc
@@ -172,6 +172,12 @@ float audio_get_playback_time() {
(RING_BUFFER_SAMPLE_RATE * RING_BUFFER_CHANNELS);
}
+float audio_get_render_time() {
+ const int64_t total_samples = g_ring_buffer.get_total_written();
+ return (float)total_samples /
+ (RING_BUFFER_SAMPLE_RATE * RING_BUFFER_CHANNELS);
+}
+
float audio_get_realtime_peak() {
if (g_audio_backend == nullptr) {
return 0.0f;
diff --git a/src/audio/audio.h b/src/audio/audio.h
index 14fe615..e063a57 100644
--- a/src/audio/audio.h
+++ b/src/audio/audio.h
@@ -27,8 +27,14 @@ void audio_start(); // Starts the audio device callback
void audio_render_ahead(float music_time, float dt);
// Get current playback time (in seconds) based on samples consumed
+// This is the ring buffer READ position (what's being played NOW)
float audio_get_playback_time();
+// Get current render time (in seconds) based on samples written
+// This is the ring buffer WRITE position (where we're currently rendering)
+// Use this for calculating sample-accurate trigger offsets
+float audio_get_render_time();
+
// Get peak amplitude of samples currently being played (real-time sync)
// Returns: Peak amplitude in range [0.0, 1.0+]
// Use this for visual effects to ensure audio-visual synchronization
diff --git a/src/audio/ring_buffer.cc b/src/audio/ring_buffer.cc
index a7e5d9e..7cedb56 100644
--- a/src/audio/ring_buffer.cc
+++ b/src/audio/ring_buffer.cc
@@ -9,7 +9,7 @@
AudioRingBuffer::AudioRingBuffer()
: capacity_(RING_BUFFER_CAPACITY_SAMPLES), write_pos_(0), read_pos_(0),
- total_read_(0) {
+ total_read_(0), total_written_(0) {
memset(buffer_, 0, sizeof(buffer_));
}
@@ -81,6 +81,9 @@ int AudioRingBuffer::write(const float* samples, int count) {
write_pos_.store(remainder, std::memory_order_release);
}
+ // Track total samples written for render timing
+ total_written_.fetch_add(to_write, std::memory_order_release);
+
return to_write;
}
diff --git a/src/audio/ring_buffer.h b/src/audio/ring_buffer.h
index b19c1ea..324447a 100644
--- a/src/audio/ring_buffer.h
+++ b/src/audio/ring_buffer.h
@@ -42,6 +42,11 @@ class AudioRingBuffer {
return total_read_.load(std::memory_order_acquire);
}
+ // Get total samples written (for render timing)
+ int64_t get_total_written() const {
+ return total_written_.load(std::memory_order_acquire);
+ }
+
// Clear buffer (for seeking)
void clear();
@@ -49,7 +54,8 @@ class AudioRingBuffer {
float buffer_[RING_BUFFER_CAPACITY_SAMPLES];
int capacity_; // Total capacity in samples
- std::atomic<int> write_pos_; // Write position (0 to capacity-1)
- std::atomic<int> read_pos_; // Read position (0 to capacity-1)
- std::atomic<int64_t> total_read_; // Total samples read (for playback time)
+ std::atomic<int> write_pos_; // Write position (0 to capacity-1)
+ std::atomic<int> read_pos_; // Read position (0 to capacity-1)
+ std::atomic<int64_t> total_read_; // Total samples read (for playback time)
+ std::atomic<int64_t> total_written_; // Total samples written (for render timing)
};
diff --git a/src/audio/synth.cc b/src/audio/synth.cc
index 2072bb4..d66c502 100644
--- a/src/audio/synth.cc
+++ b/src/audio/synth.cc
@@ -30,6 +30,8 @@ struct Voice {
int buffer_pos;
float fractional_pos; // Fractional sample position for tempo scaling
+ int start_sample_offset; // Samples to wait before producing audio output
+
const volatile float* active_spectral_data;
};
@@ -152,7 +154,8 @@ void synth_commit_update(int spectrogram_id) {
new_active_ptr, __ATOMIC_RELEASE);
}
-void synth_trigger_voice(int spectrogram_id, float volume, float pan) {
+void synth_trigger_voice(int spectrogram_id, float volume, float pan,
+ int start_offset_samples) {
if (spectrogram_id < 0 || spectrogram_id >= MAX_SPECTROGRAMS ||
!g_synth_data.spectrogram_registered[spectrogram_id]) {
#if defined(DEBUG_LOG_SYNTH)
@@ -174,6 +177,11 @@ void synth_trigger_voice(int spectrogram_id, float volume, float pan) {
pan, spectrogram_id);
pan = (pan < -1.0f) ? -1.0f : 1.0f;
}
+ if (start_offset_samples < 0) {
+ DEBUG_SYNTH("[SYNTH WARNING] Negative start_offset=%d, clamping to 0\n",
+ start_offset_samples);
+ start_offset_samples = 0;
+ }
#endif
for (int i = 0; i < MAX_VOICES; ++i) {
@@ -193,6 +201,7 @@ void synth_trigger_voice(int spectrogram_id, float volume, float pan) {
v.buffer_pos = DCT_SIZE; // Force IDCT on first render
v.fractional_pos =
0.0f; // Initialize fractional position for tempo scaling
+ v.start_sample_offset = start_offset_samples; // NEW: Sample-accurate timing
v.active_spectral_data =
g_synth_data.active_spectrogram_data[spectrogram_id];
@@ -223,6 +232,12 @@ void synth_render(float* output_buffer, int num_frames) {
if (!v.active)
continue;
+ // NEW: Skip this sample if we haven't reached the trigger offset yet
+ if (v.start_sample_offset > 0) {
+ v.start_sample_offset--;
+ continue; // Don't produce audio until offset elapsed
+ }
+
if (v.buffer_pos >= DCT_SIZE) {
if (v.current_spectral_frame >= v.total_spectral_frames) {
v.active = false;
diff --git a/src/audio/synth.h b/src/audio/synth.h
index ba96167..b2625b3 100644
--- a/src/audio/synth.h
+++ b/src/audio/synth.h
@@ -38,7 +38,8 @@ void synth_unregister_spectrogram(int spectrogram_id);
float* synth_begin_update(int spectrogram_id);
void synth_commit_update(int spectrogram_id);
-void synth_trigger_voice(int spectrogram_id, float volume, float pan);
+void synth_trigger_voice(int spectrogram_id, float volume, float pan,
+ int start_offset_samples = 0);
void synth_render(float* output_buffer, int num_frames);
void synth_set_tempo_scale(
float tempo_scale); // Set playback speed (1.0 = normal)
diff --git a/src/audio/tracker.cc b/src/audio/tracker.cc
index 7ad5a67..1cccc57 100644
--- a/src/audio/tracker.cc
+++ b/src/audio/tracker.cc
@@ -172,7 +172,8 @@ static int get_free_pattern_slot() {
}
// Helper to trigger a single note event (OPTIMIZED with caching)
-static void trigger_note_event(const TrackerEvent& event) {
+// start_offset_samples: How many samples into the future to trigger (for sample-accurate timing)
+static void trigger_note_event(const TrackerEvent& event, int start_offset_samples) {
#if defined(DEBUG_LOG_TRACKER)
// VALIDATION: Check sample_id bounds
if (event.sample_id >= g_tracker_samples_count) {
@@ -207,24 +208,29 @@ static void trigger_note_event(const TrackerEvent& event) {
return;
}
- // Trigger voice directly with cached spectrogram
- synth_trigger_voice(cached_synth_id, event.volume, event.pan);
+ // Trigger voice with sample-accurate offset
+ synth_trigger_voice(cached_synth_id, event.volume, event.pan, start_offset_samples);
}
void tracker_update(float music_time_sec) {
+ // Unit-less timing: 1 unit = 4 beats (by convention)
+ const float BEATS_PER_UNIT = 4.0f;
+ const float unit_duration_sec = (BEATS_PER_UNIT / g_tracker_score.bpm) * 60.0f;
+
// Step 1: Process new pattern triggers
while (g_last_trigger_idx < g_tracker_score.num_triggers) {
const TrackerPatternTrigger& trigger =
g_tracker_score.triggers[g_last_trigger_idx];
- if (trigger.time_sec > music_time_sec)
+ const float trigger_time_sec = trigger.unit_time * unit_duration_sec;
+ if (trigger_time_sec > music_time_sec)
break;
// Add this pattern to active patterns list
const int slot = get_free_pattern_slot();
if (slot != -1) {
g_active_patterns[slot].pattern_id = trigger.pattern_id;
- g_active_patterns[slot].start_music_time = trigger.time_sec;
+ g_active_patterns[slot].start_music_time = trigger_time_sec;
g_active_patterns[slot].next_event_idx = 0;
g_active_patterns[slot].active = true;
}
@@ -233,7 +239,10 @@ void tracker_update(float music_time_sec) {
}
// Step 2: Update all active patterns and trigger individual events
- const float beat_duration = 60.0f / g_tracker_score.bpm;
+ // Get current audio RENDER position (write position) for sample-accurate timing
+ // This is where we're currently writing to the ring buffer (~400ms ahead of playback)
+ const float current_render_time = audio_get_render_time();
+ const float SAMPLE_RATE = 32000.0f; // Audio sample rate
for (int i = 0; i < MAX_SPECTROGRAMS; ++i) {
if (!g_active_patterns[i].active)
@@ -242,25 +251,39 @@ void tracker_update(float music_time_sec) {
ActivePattern& active = g_active_patterns[i];
const TrackerPattern& pattern = g_tracker_patterns[active.pattern_id];
- // Calculate elapsed beats since pattern started
+ // Calculate elapsed unit-less time since pattern started
const float elapsed_music_time = music_time_sec - active.start_music_time;
- const float elapsed_beats = elapsed_music_time / beat_duration;
+ const float elapsed_units = elapsed_music_time / unit_duration_sec;
- // Trigger all events that have passed their beat time
+ // Trigger all events that have passed their unit time
while (active.next_event_idx < pattern.num_events) {
const TrackerEvent& event = pattern.events[active.next_event_idx];
- if (event.beat > elapsed_beats)
+ if (event.unit_time > elapsed_units)
break; // This event hasn't reached its time yet
- // Trigger this event as an individual voice
- trigger_note_event(event);
+ // Calculate exact trigger time for this event
+ const float event_trigger_time = active.start_music_time +
+ (event.unit_time * unit_duration_sec);
+
+ // Calculate sample-accurate offset from current RENDER position (write pos)
+ // This is where we're currently writing to the buffer, not where playback is
+ const float time_delta = event_trigger_time - current_render_time;
+ int sample_offset = (int)(time_delta * SAMPLE_RATE);
+
+ // Clamp to 0 if negative (event is late, play immediately)
+ if (sample_offset < 0) {
+ sample_offset = 0;
+ }
+
+ // Trigger this event as an individual voice with sample-accurate timing
+ trigger_note_event(event, sample_offset);
active.next_event_idx++;
}
- // If all events have been triggered, mark pattern as complete
- if (active.next_event_idx >= pattern.num_events) {
+ // Pattern remains active until full duration elapses
+ if (elapsed_units >= pattern.unit_length) {
active.active = false;
}
}
diff --git a/src/audio/tracker.h b/src/audio/tracker.h
index 336f77f..4cd011b 100644
--- a/src/audio/tracker.h
+++ b/src/audio/tracker.h
@@ -8,7 +8,7 @@
#include <cstdint>
struct TrackerEvent {
- float beat;
+ float unit_time; // Unit-less time within pattern (0.0 to pattern.unit_length)
uint16_t sample_id;
float volume;
float pan;
@@ -17,11 +17,11 @@ struct TrackerEvent {
struct TrackerPattern {
const TrackerEvent* events;
uint32_t num_events;
- float num_beats;
+ float unit_length; // Pattern duration in units (typically 1.0 for 4-beat patterns)
};
struct TrackerPatternTrigger {
- float time_sec;
+ float unit_time; // Unit-less time when pattern triggers
uint16_t pattern_id;
// Modifiers could be added here
};
@@ -29,7 +29,7 @@ struct TrackerPatternTrigger {
struct TrackerScore {
const TrackerPatternTrigger* triggers;
uint32_t num_triggers;
- float bpm;
+ float bpm; // BPM is used only for playback scaling (1 unit = 4 beats)
};
// Global music data generated by tracker_compiler