diff options
| -rw-r--r-- | src/audio/tracker.cc | 27 |
1 files changed, 7 insertions, 20 deletions
diff --git a/src/audio/tracker.cc b/src/audio/tracker.cc index 1cccc57..fd25e84 100644 --- a/src/audio/tracker.cc +++ b/src/audio/tracker.cc @@ -239,10 +239,10 @@ void tracker_update(float music_time_sec) { } // Step 2: Update all active patterns and trigger individual events - // Get current audio RENDER position (write position) for sample-accurate timing - // This is where we're currently writing to the ring buffer (~400ms ahead of playback) - const float current_render_time = audio_get_render_time(); - const float SAMPLE_RATE = 32000.0f; // Audio sample rate + // NOTE: We trigger events immediately when their time passes (no sample offsets) + // This gives ~16ms quantization (60fps) which is acceptable + // Sample offsets don't work with tempo scaling because music_time and render_time + // are in different time domains (tempo-scaled vs physical) for (int i = 0; i < MAX_SPECTROGRAMS; ++i) { if (!g_active_patterns[i].active) @@ -262,22 +262,9 @@ void tracker_update(float music_time_sec) { if (event.unit_time > elapsed_units) break; // This event hasn't reached its time yet - // Calculate exact trigger time for this event - const float event_trigger_time = active.start_music_time + - (event.unit_time * unit_duration_sec); - - // Calculate sample-accurate offset from current RENDER position (write pos) - // This is where we're currently writing to the buffer, not where playback is - const float time_delta = event_trigger_time - current_render_time; - int sample_offset = (int)(time_delta * SAMPLE_RATE); - - // Clamp to 0 if negative (event is late, play immediately) - if (sample_offset < 0) { - sample_offset = 0; - } - - // Trigger this event as an individual voice with sample-accurate timing - trigger_note_event(event, sample_offset); + // Trigger this event immediately (no sample offset) + // Timing quantization: ~16ms at 60fps, acceptable for rhythm + trigger_note_event(event, 0); active.next_event_idx++; } |
