diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/audio/audio.cc | 6 | ||||
| -rw-r--r-- | src/audio/audio.h | 6 | ||||
| -rw-r--r-- | src/audio/ring_buffer.cc | 5 | ||||
| -rw-r--r-- | src/audio/ring_buffer.h | 12 | ||||
| -rw-r--r-- | src/audio/synth.cc | 17 | ||||
| -rw-r--r-- | src/audio/synth.h | 3 | ||||
| -rw-r--r-- | src/audio/tracker.cc | 51 | ||||
| -rw-r--r-- | src/audio/tracker.h | 8 | ||||
| -rw-r--r-- | src/generated/music_data.cc | 355 | ||||
| -rw-r--r-- | src/generated/test_demo_music.cc | 40 | ||||
| -rw-r--r-- | src/gpu/effects/flash_effect.cc | 10 | ||||
| -rw-r--r-- | src/gpu/effects/post_process_helper.cc | 15 | ||||
| -rw-r--r-- | src/gpu/effects/post_process_helper.h | 6 | ||||
| -rw-r--r-- | src/main.cc | 1 | ||||
| -rw-r--r-- | src/test_demo.cc | 42 |
15 files changed, 249 insertions, 328 deletions
diff --git a/src/audio/audio.cc b/src/audio/audio.cc index 67345cf..74536e5 100644 --- a/src/audio/audio.cc +++ b/src/audio/audio.cc @@ -172,6 +172,12 @@ float audio_get_playback_time() { (RING_BUFFER_SAMPLE_RATE * RING_BUFFER_CHANNELS); } +float audio_get_render_time() { + const int64_t total_samples = g_ring_buffer.get_total_written(); + return (float)total_samples / + (RING_BUFFER_SAMPLE_RATE * RING_BUFFER_CHANNELS); +} + float audio_get_realtime_peak() { if (g_audio_backend == nullptr) { return 0.0f; diff --git a/src/audio/audio.h b/src/audio/audio.h index 14fe615..e063a57 100644 --- a/src/audio/audio.h +++ b/src/audio/audio.h @@ -27,8 +27,14 @@ void audio_start(); // Starts the audio device callback void audio_render_ahead(float music_time, float dt); // Get current playback time (in seconds) based on samples consumed +// This is the ring buffer READ position (what's being played NOW) float audio_get_playback_time(); +// Get current render time (in seconds) based on samples written +// This is the ring buffer WRITE position (where we're currently rendering) +// Use this for calculating sample-accurate trigger offsets +float audio_get_render_time(); + // Get peak amplitude of samples currently being played (real-time sync) // Returns: Peak amplitude in range [0.0, 1.0+] // Use this for visual effects to ensure audio-visual synchronization diff --git a/src/audio/ring_buffer.cc b/src/audio/ring_buffer.cc index a7e5d9e..7cedb56 100644 --- a/src/audio/ring_buffer.cc +++ b/src/audio/ring_buffer.cc @@ -9,7 +9,7 @@ AudioRingBuffer::AudioRingBuffer() : capacity_(RING_BUFFER_CAPACITY_SAMPLES), write_pos_(0), read_pos_(0), - total_read_(0) { + total_read_(0), total_written_(0) { memset(buffer_, 0, sizeof(buffer_)); } @@ -81,6 +81,9 @@ int AudioRingBuffer::write(const float* samples, int count) { write_pos_.store(remainder, std::memory_order_release); } + // Track total samples written for render timing + total_written_.fetch_add(to_write, std::memory_order_release); + return to_write; } diff --git a/src/audio/ring_buffer.h b/src/audio/ring_buffer.h index b19c1ea..324447a 100644 --- a/src/audio/ring_buffer.h +++ b/src/audio/ring_buffer.h @@ -42,6 +42,11 @@ class AudioRingBuffer { return total_read_.load(std::memory_order_acquire); } + // Get total samples written (for render timing) + int64_t get_total_written() const { + return total_written_.load(std::memory_order_acquire); + } + // Clear buffer (for seeking) void clear(); @@ -49,7 +54,8 @@ class AudioRingBuffer { float buffer_[RING_BUFFER_CAPACITY_SAMPLES]; int capacity_; // Total capacity in samples - std::atomic<int> write_pos_; // Write position (0 to capacity-1) - std::atomic<int> read_pos_; // Read position (0 to capacity-1) - std::atomic<int64_t> total_read_; // Total samples read (for playback time) + std::atomic<int> write_pos_; // Write position (0 to capacity-1) + std::atomic<int> read_pos_; // Read position (0 to capacity-1) + std::atomic<int64_t> total_read_; // Total samples read (for playback time) + std::atomic<int64_t> total_written_; // Total samples written (for render timing) }; diff --git a/src/audio/synth.cc b/src/audio/synth.cc index 2072bb4..d66c502 100644 --- a/src/audio/synth.cc +++ b/src/audio/synth.cc @@ -30,6 +30,8 @@ struct Voice { int buffer_pos; float fractional_pos; // Fractional sample position for tempo scaling + int start_sample_offset; // Samples to wait before producing audio output + const volatile float* active_spectral_data; }; @@ -152,7 +154,8 @@ void synth_commit_update(int spectrogram_id) { new_active_ptr, __ATOMIC_RELEASE); } -void synth_trigger_voice(int spectrogram_id, float volume, float pan) { +void synth_trigger_voice(int spectrogram_id, float volume, float pan, + int start_offset_samples) { if (spectrogram_id < 0 || spectrogram_id >= MAX_SPECTROGRAMS || !g_synth_data.spectrogram_registered[spectrogram_id]) { #if defined(DEBUG_LOG_SYNTH) @@ -174,6 +177,11 @@ void synth_trigger_voice(int spectrogram_id, float volume, float pan) { pan, spectrogram_id); pan = (pan < -1.0f) ? -1.0f : 1.0f; } + if (start_offset_samples < 0) { + DEBUG_SYNTH("[SYNTH WARNING] Negative start_offset=%d, clamping to 0\n", + start_offset_samples); + start_offset_samples = 0; + } #endif for (int i = 0; i < MAX_VOICES; ++i) { @@ -193,6 +201,7 @@ void synth_trigger_voice(int spectrogram_id, float volume, float pan) { v.buffer_pos = DCT_SIZE; // Force IDCT on first render v.fractional_pos = 0.0f; // Initialize fractional position for tempo scaling + v.start_sample_offset = start_offset_samples; // NEW: Sample-accurate timing v.active_spectral_data = g_synth_data.active_spectrogram_data[spectrogram_id]; @@ -223,6 +232,12 @@ void synth_render(float* output_buffer, int num_frames) { if (!v.active) continue; + // NEW: Skip this sample if we haven't reached the trigger offset yet + if (v.start_sample_offset > 0) { + v.start_sample_offset--; + continue; // Don't produce audio until offset elapsed + } + if (v.buffer_pos >= DCT_SIZE) { if (v.current_spectral_frame >= v.total_spectral_frames) { v.active = false; diff --git a/src/audio/synth.h b/src/audio/synth.h index ba96167..b2625b3 100644 --- a/src/audio/synth.h +++ b/src/audio/synth.h @@ -38,7 +38,8 @@ void synth_unregister_spectrogram(int spectrogram_id); float* synth_begin_update(int spectrogram_id); void synth_commit_update(int spectrogram_id); -void synth_trigger_voice(int spectrogram_id, float volume, float pan); +void synth_trigger_voice(int spectrogram_id, float volume, float pan, + int start_offset_samples = 0); void synth_render(float* output_buffer, int num_frames); void synth_set_tempo_scale( float tempo_scale); // Set playback speed (1.0 = normal) diff --git a/src/audio/tracker.cc b/src/audio/tracker.cc index 7ad5a67..1cccc57 100644 --- a/src/audio/tracker.cc +++ b/src/audio/tracker.cc @@ -172,7 +172,8 @@ static int get_free_pattern_slot() { } // Helper to trigger a single note event (OPTIMIZED with caching) -static void trigger_note_event(const TrackerEvent& event) { +// start_offset_samples: How many samples into the future to trigger (for sample-accurate timing) +static void trigger_note_event(const TrackerEvent& event, int start_offset_samples) { #if defined(DEBUG_LOG_TRACKER) // VALIDATION: Check sample_id bounds if (event.sample_id >= g_tracker_samples_count) { @@ -207,24 +208,29 @@ static void trigger_note_event(const TrackerEvent& event) { return; } - // Trigger voice directly with cached spectrogram - synth_trigger_voice(cached_synth_id, event.volume, event.pan); + // Trigger voice with sample-accurate offset + synth_trigger_voice(cached_synth_id, event.volume, event.pan, start_offset_samples); } void tracker_update(float music_time_sec) { + // Unit-less timing: 1 unit = 4 beats (by convention) + const float BEATS_PER_UNIT = 4.0f; + const float unit_duration_sec = (BEATS_PER_UNIT / g_tracker_score.bpm) * 60.0f; + // Step 1: Process new pattern triggers while (g_last_trigger_idx < g_tracker_score.num_triggers) { const TrackerPatternTrigger& trigger = g_tracker_score.triggers[g_last_trigger_idx]; - if (trigger.time_sec > music_time_sec) + const float trigger_time_sec = trigger.unit_time * unit_duration_sec; + if (trigger_time_sec > music_time_sec) break; // Add this pattern to active patterns list const int slot = get_free_pattern_slot(); if (slot != -1) { g_active_patterns[slot].pattern_id = trigger.pattern_id; - g_active_patterns[slot].start_music_time = trigger.time_sec; + g_active_patterns[slot].start_music_time = trigger_time_sec; g_active_patterns[slot].next_event_idx = 0; g_active_patterns[slot].active = true; } @@ -233,7 +239,10 @@ void tracker_update(float music_time_sec) { } // Step 2: Update all active patterns and trigger individual events - const float beat_duration = 60.0f / g_tracker_score.bpm; + // Get current audio RENDER position (write position) for sample-accurate timing + // This is where we're currently writing to the ring buffer (~400ms ahead of playback) + const float current_render_time = audio_get_render_time(); + const float SAMPLE_RATE = 32000.0f; // Audio sample rate for (int i = 0; i < MAX_SPECTROGRAMS; ++i) { if (!g_active_patterns[i].active) @@ -242,25 +251,39 @@ void tracker_update(float music_time_sec) { ActivePattern& active = g_active_patterns[i]; const TrackerPattern& pattern = g_tracker_patterns[active.pattern_id]; - // Calculate elapsed beats since pattern started + // Calculate elapsed unit-less time since pattern started const float elapsed_music_time = music_time_sec - active.start_music_time; - const float elapsed_beats = elapsed_music_time / beat_duration; + const float elapsed_units = elapsed_music_time / unit_duration_sec; - // Trigger all events that have passed their beat time + // Trigger all events that have passed their unit time while (active.next_event_idx < pattern.num_events) { const TrackerEvent& event = pattern.events[active.next_event_idx]; - if (event.beat > elapsed_beats) + if (event.unit_time > elapsed_units) break; // This event hasn't reached its time yet - // Trigger this event as an individual voice - trigger_note_event(event); + // Calculate exact trigger time for this event + const float event_trigger_time = active.start_music_time + + (event.unit_time * unit_duration_sec); + + // Calculate sample-accurate offset from current RENDER position (write pos) + // This is where we're currently writing to the buffer, not where playback is + const float time_delta = event_trigger_time - current_render_time; + int sample_offset = (int)(time_delta * SAMPLE_RATE); + + // Clamp to 0 if negative (event is late, play immediately) + if (sample_offset < 0) { + sample_offset = 0; + } + + // Trigger this event as an individual voice with sample-accurate timing + trigger_note_event(event, sample_offset); active.next_event_idx++; } - // If all events have been triggered, mark pattern as complete - if (active.next_event_idx >= pattern.num_events) { + // Pattern remains active until full duration elapses + if (elapsed_units >= pattern.unit_length) { active.active = false; } } diff --git a/src/audio/tracker.h b/src/audio/tracker.h index 336f77f..4cd011b 100644 --- a/src/audio/tracker.h +++ b/src/audio/tracker.h @@ -8,7 +8,7 @@ #include <cstdint> struct TrackerEvent { - float beat; + float unit_time; // Unit-less time within pattern (0.0 to pattern.unit_length) uint16_t sample_id; float volume; float pan; @@ -17,11 +17,11 @@ struct TrackerEvent { struct TrackerPattern { const TrackerEvent* events; uint32_t num_events; - float num_beats; + float unit_length; // Pattern duration in units (typically 1.0 for 4-beat patterns) }; struct TrackerPatternTrigger { - float time_sec; + float unit_time; // Unit-less time when pattern triggers uint16_t pattern_id; // Modifiers could be added here }; @@ -29,7 +29,7 @@ struct TrackerPatternTrigger { struct TrackerScore { const TrackerPatternTrigger* triggers; uint32_t num_triggers; - float bpm; + float bpm; // BPM is used only for playback scaling (1 unit = 4 beats) }; // Global music data generated by tracker_compiler diff --git a/src/generated/music_data.cc b/src/generated/music_data.cc index 0852e93..8d6f482 100644 --- a/src/generated/music_data.cc +++ b/src/generated/music_data.cc @@ -7,307 +7,162 @@ const NoteParams g_tracker_samples[] = { { 0 }, // ASSET_KICK_1 (ASSET) { 0 }, // ASSET_KICK_2 (ASSET) - { 0 }, // ASSET_KICK_2 (ASSET) { 0 }, // ASSET_SNARE_1 (ASSET) { 0 }, // ASSET_SNARE_2 (ASSET) { 0 }, // ASSET_SNARE_3 (ASSET) - { 0 }, // ASSET_SNARE_3 (ASSET) - { 0 }, // ASSET_HIHAT_1 (ASSET) - { 0 }, // ASSET_HIHAT_2 (ASSET) - { 0 }, // ASSET_HIHAT_3 (ASSET) - { 0 }, // ASSET_HIHAT_3 (ASSET) - { 0 }, // ASSET_CRASH_1 (ASSET) { 0 }, // ASSET_RIDE_1 (ASSET) - { 0 }, // ASSET_SPLASH_1 (ASSET) - { 0 }, // ASSET_BASS_1 (ASSET) - { 82.4f, 0.50f, 1.0f, 0.01f, 0.0f, 0.0f, 0.0f, 3, 0.6f, 0.0f, 0.0f }, // NOTE_E2 - { 98.0f, 0.50f, 1.0f, 0.01f, 0.0f, 0.0f, 0.0f, 3, 0.6f, 0.0f, 0.0f }, // NOTE_G2 - { 73.4f, 0.50f, 1.0f, 0.01f, 0.0f, 0.0f, 0.0f, 3, 0.6f, 0.0f, 0.0f }, // NOTE_D2 - { 65.4f, 0.50f, 1.0f, 0.01f, 0.0f, 0.0f, 0.0f, 3, 0.6f, 0.0f, 0.0f }, // NOTE_C2 }; -const uint32_t g_tracker_samples_count = 19; +const uint32_t g_tracker_samples_count = 6; const AssetId g_tracker_sample_assets[] = { AssetId::ASSET_KICK_1, AssetId::ASSET_KICK_2, - AssetId::ASSET_KICK_2, AssetId::ASSET_SNARE_1, AssetId::ASSET_SNARE_2, AssetId::ASSET_SNARE_3, - AssetId::ASSET_SNARE_3, - AssetId::ASSET_HIHAT_1, - AssetId::ASSET_HIHAT_2, - AssetId::ASSET_HIHAT_3, - AssetId::ASSET_HIHAT_3, - AssetId::ASSET_CRASH_1, AssetId::ASSET_RIDE_1, - AssetId::ASSET_SPLASH_1, - AssetId::ASSET_BASS_1, - AssetId::ASSET_LAST_ID, - AssetId::ASSET_LAST_ID, - AssetId::ASSET_LAST_ID, - AssetId::ASSET_LAST_ID, }; -static const TrackerEvent PATTERN_EVENTS_kick_basic[] = { - { 0.0f, 0, 1.0f, 0.0f }, - { 2.0f, 0, 1.0f, 0.0f }, - { 2.5f, 2, 0.7f, -0.2f }, -}; -static const TrackerEvent PATTERN_EVENTS_kick_varied[] = { - { 0.0f, 2, 1.0f, 0.0f }, - { 2.0f, 0, 0.9f, 0.0f }, - { 2.5f, 0, 0.7f, 0.2f }, +static const TrackerEvent PATTERN_EVENTS_kick_1[] = { + { 0.00f, 0, 1.0f, 0.0f }, + { 0.50f, 0, 1.0f, 0.0f }, }; -static const TrackerEvent PATTERN_EVENTS_kick_dense[] = { - { 0.0f, 0, 1.0f, 0.0f }, - { 0.5f, 2, 0.6f, -0.2f }, - { 1.0f, 0, 0.9f, 0.0f }, - { 1.5f, 2, 0.6f, 0.2f }, - { 2.0f, 0, 1.0f, 0.0f }, - { 2.5f, 2, 0.6f, -0.2f }, - { 3.0f, 0, 0.9f, 0.0f }, - { 3.5f, 2, 0.6f, 0.2f }, +static const TrackerEvent PATTERN_EVENTS_kick_2[] = { + { 0.00f, 1, 1.0f, 0.0f }, + { 0.50f, 1, 1.0f, 0.0f }, }; -static const TrackerEvent PATTERN_EVENTS_snare_basic[] = { - { 1.0f, 3, 1.1f, 0.1f }, - { 3.0f, 3, 1.1f, 0.1f }, +static const TrackerEvent PATTERN_EVENTS_snare_1[] = { + { 0.25f, 2, 1.0f, 0.0f }, + { 0.75f, 2, 1.0f, 0.0f }, }; -static const TrackerEvent PATTERN_EVENTS_snare_varied[] = { - { 1.0f, 4, 1.0f, -0.1f }, - { 3.0f, 0, 1.1f, 0.1f }, +static const TrackerEvent PATTERN_EVENTS_snare_2[] = { + { 0.25f, 3, 1.0f, 0.0f }, + { 0.75f, 3, 1.0f, 0.0f }, }; -static const TrackerEvent PATTERN_EVENTS_snare_dense[] = { - { 0.5f, 6, 0.9f, 0.0f }, - { 1.0f, 3, 1.1f, 0.1f }, - { 1.5f, 0, 0.9f, 0.0f }, - { 2.5f, 6, 0.9f, 0.0f }, - { 3.0f, 4, 1.0f, 0.1f }, - { 3.5f, 0, 0.9f, 0.0f }, -}; -static const TrackerEvent PATTERN_EVENTS_hihat_basic[] = { - { 0.0f, 8, 0.7f, -0.3f }, - { 0.5f, 7, 0.3f, 0.3f }, - { 1.0f, 8, 0.7f, -0.3f }, - { 1.5f, 7, 0.3f, 0.3f }, - { 2.0f, 8, 0.7f, -0.3f }, - { 2.5f, 7, 0.3f, 0.3f }, - { 3.0f, 8, 0.7f, -0.3f }, - { 3.5f, 7, 0.3f, 0.3f }, -}; -static const TrackerEvent PATTERN_EVENTS_hihat_varied[] = { - { 0.0f, 10, 0.7f, -0.3f }, - { 0.5f, 7, 0.3f, 0.3f }, - { 1.0f, 0, 0.6f, -0.2f }, - { 1.5f, 7, 0.3f, 0.3f }, - { 2.0f, 10, 0.7f, -0.3f }, - { 2.5f, 7, 0.3f, 0.3f }, - { 3.0f, 0, 0.6f, -0.2f }, - { 3.5f, 7, 0.3f, 0.3f }, -}; -static const TrackerEvent PATTERN_EVENTS_crash[] = { - { 0.0f, 11, 0.9f, 0.0f }, +static const TrackerEvent PATTERN_EVENTS_snare_3[] = { + { 0.25f, 4, 1.0f, 0.0f }, + { 0.75f, 4, 1.0f, 0.0f }, }; static const TrackerEvent PATTERN_EVENTS_ride[] = { - { 0.0f, 12, 0.8f, 0.2f }, -}; -static const TrackerEvent PATTERN_EVENTS_ride_fast[] = { - { 0.0f, 12, 0.8f, 0.2f }, - { 0.5f, 12, 0.6f, 0.2f }, - { 1.0f, 12, 0.8f, 0.2f }, - { 1.5f, 12, 0.6f, 0.2f }, - { 2.0f, 12, 0.8f, 0.2f }, - { 2.5f, 12, 0.6f, 0.2f }, - { 3.0f, 12, 0.8f, 0.2f }, - { 3.5f, 12, 0.6f, 0.2f }, -}; -static const TrackerEvent PATTERN_EVENTS_splash[] = { - { 0.0f, 13, 0.7f, -0.2f }, -}; -static const TrackerEvent PATTERN_EVENTS_bass_e_soft[] = { - { 0.0f, 15, 0.4f, 0.0f }, - { 2.0f, 15, 0.3f, 0.0f }, -}; -static const TrackerEvent PATTERN_EVENTS_bass_e[] = { - { 0.0f, 15, 0.5f, 0.0f }, - { 1.0f, 15, 0.4f, 0.0f }, - { 2.0f, 15, 0.5f, 0.0f }, - { 2.5f, 15, 0.3f, 0.0f }, - { 3.0f, 15, 0.4f, 0.0f }, -}; -static const TrackerEvent PATTERN_EVENTS_bass_eg[] = { - { 0.0f, 15, 0.5f, 0.0f }, - { 1.0f, 15, 0.4f, 0.0f }, - { 2.0f, 16, 0.5f, 0.0f }, - { 3.0f, 16, 0.4f, 0.0f }, -}; -static const TrackerEvent PATTERN_EVENTS_bass_progression[] = { - { 0.0f, 15, 0.5f, 0.0f }, - { 1.0f, 17, 0.4f, 0.0f }, - { 2.0f, 18, 0.5f, 0.0f }, - { 3.0f, 16, 0.4f, 0.0f }, -}; -static const TrackerEvent PATTERN_EVENTS_bass_synco_1[] = { - { 0.0f, 15, 0.6f, 0.0f }, - { 0.2f, 15, 0.5f, 0.1f }, - { 0.8f, 15, 0.6f, -0.1f }, - { 1.5f, 15, 0.5f, 0.0f }, - { 2.0f, 15, 0.6f, 0.0f }, - { 2.8f, 16, 0.6f, 0.1f }, - { 3.2f, 15, 0.5f, 0.0f }, -}; -static const TrackerEvent PATTERN_EVENTS_bass_synco_2[] = { - { 0.0f, 15, 0.6f, 0.0f }, - { 0.5f, 17, 0.6f, -0.1f }, - { 1.2f, 15, 0.5f, 0.1f }, - { 1.8f, 17, 0.5f, 0.0f }, - { 2.0f, 18, 0.6f, 0.0f }, - { 2.5f, 15, 0.5f, 0.1f }, - { 3.0f, 16, 0.6f, 0.0f }, - { 3.5f, 15, 0.5f, -0.1f }, -}; -static const TrackerEvent PATTERN_EVENTS_bass_synco_3[] = { - { 0.0f, 15, 0.6f, 0.0f }, - { 0.2f, 15, 0.5f, 0.0f }, - { 0.5f, 15, 0.6f, 0.1f }, - { 1.0f, 16, 0.6f, 0.0f }, - { 1.5f, 15, 0.5f, -0.1f }, - { 2.2f, 17, 0.6f, 0.0f }, - { 2.8f, 15, 0.5f, 0.1f }, - { 3.5f, 15, 0.6f, 0.0f }, + { 0.00f, 5, 0.7f, 0.2f }, + { 0.25f, 5, 0.6f, 0.2f }, + { 0.50f, 5, 0.7f, 0.2f }, + { 0.75f, 5, 0.6f, 0.2f }, }; const TrackerPattern g_tracker_patterns[] = { - { PATTERN_EVENTS_kick_basic, 3, 4.0f }, // kick_basic - { PATTERN_EVENTS_kick_varied, 3, 4.0f }, // kick_varied - { PATTERN_EVENTS_kick_dense, 8, 4.0f }, // kick_dense - { PATTERN_EVENTS_snare_basic, 2, 4.0f }, // snare_basic - { PATTERN_EVENTS_snare_varied, 2, 4.0f }, // snare_varied - { PATTERN_EVENTS_snare_dense, 6, 4.0f }, // snare_dense - { PATTERN_EVENTS_hihat_basic, 8, 4.0f }, // hihat_basic - { PATTERN_EVENTS_hihat_varied, 8, 4.0f }, // hihat_varied - { PATTERN_EVENTS_crash, 1, 4.0f }, // crash - { PATTERN_EVENTS_ride, 1, 4.0f }, // ride - { PATTERN_EVENTS_ride_fast, 8, 4.0f }, // ride_fast - { PATTERN_EVENTS_splash, 1, 4.0f }, // splash - { PATTERN_EVENTS_bass_e_soft, 2, 4.0f }, // bass_e_soft - { PATTERN_EVENTS_bass_e, 5, 4.0f }, // bass_e - { PATTERN_EVENTS_bass_eg, 4, 4.0f }, // bass_eg - { PATTERN_EVENTS_bass_progression, 4, 4.0f }, // bass_progression - { PATTERN_EVENTS_bass_synco_1, 7, 4.0f }, // bass_synco_1 - { PATTERN_EVENTS_bass_synco_2, 8, 4.0f }, // bass_synco_2 - { PATTERN_EVENTS_bass_synco_3, 8, 4.0f }, // bass_synco_3 + { PATTERN_EVENTS_kick_1, 2, 1.00f }, // kick_1 + { PATTERN_EVENTS_kick_2, 2, 1.00f }, // kick_2 + { PATTERN_EVENTS_snare_1, 2, 1.00f }, // snare_1 + { PATTERN_EVENTS_snare_2, 2, 1.00f }, // snare_2 + { PATTERN_EVENTS_snare_3, 2, 1.00f }, // snare_3 + { PATTERN_EVENTS_ride, 4, 1.00f }, // ride }; -const uint32_t g_tracker_patterns_count = 19; +const uint32_t g_tracker_patterns_count = 6; static const TrackerPatternTrigger SCORE_TRIGGERS[] = { - { 0.0f, 8 }, { 0.0f, 0 }, - { 0.0f, 6 }, + { 0.0f, 2 }, + { 0.5f, 0 }, + { 0.5f, 2 }, + { 1.0f, 0 }, + { 1.0f, 2 }, + { 1.5f, 0 }, + { 1.5f, 2 }, { 2.0f, 0 }, { 2.0f, 3 }, - { 2.0f, 6 }, - { 4.0f, 9 }, + { 2.5f, 0 }, + { 2.5f, 3 }, + { 3.0f, 0 }, + { 3.0f, 3 }, + { 3.5f, 0 }, + { 3.5f, 3 }, { 4.0f, 1 }, - { 4.0f, 3 }, - { 4.0f, 7 }, + { 4.0f, 4 }, + { 4.5f, 1 }, + { 4.5f, 4 }, + { 5.0f, 1 }, + { 5.0f, 4 }, + { 5.5f, 1 }, + { 5.5f, 4 }, { 6.0f, 1 }, - { 6.0f, 4 }, - { 6.0f, 7 }, - { 8.0f, 11 }, + { 6.0f, 2 }, + { 6.5f, 1 }, + { 6.5f, 2 }, + { 7.0f, 1 }, + { 7.0f, 2 }, + { 7.5f, 1 }, + { 7.5f, 2 }, { 8.0f, 0 }, { 8.0f, 3 }, - { 8.0f, 6 }, - { 8.0f, 12 }, + { 8.0f, 5 }, + { 8.5f, 0 }, + { 8.5f, 3 }, + { 8.5f, 5 }, + { 9.0f, 0 }, + { 9.0f, 3 }, + { 9.0f, 5 }, + { 9.5f, 0 }, + { 9.5f, 3 }, + { 9.5f, 5 }, { 10.0f, 1 }, { 10.0f, 4 }, - { 10.0f, 7 }, - { 10.0f, 12 }, - { 12.0f, 9 }, + { 10.0f, 5 }, + { 10.5f, 1 }, + { 10.5f, 4 }, + { 10.5f, 5 }, + { 11.0f, 1 }, + { 11.0f, 4 }, + { 11.0f, 5 }, + { 11.5f, 1 }, + { 11.5f, 4 }, + { 11.5f, 5 }, { 12.0f, 0 }, - { 12.0f, 3 }, - { 12.0f, 6 }, - { 12.0f, 13 }, + { 12.0f, 2 }, + { 12.0f, 5 }, + { 12.5f, 0 }, + { 12.5f, 2 }, + { 12.5f, 5 }, + { 13.0f, 0 }, + { 13.0f, 2 }, + { 13.0f, 5 }, + { 13.5f, 0 }, + { 13.5f, 2 }, + { 13.5f, 5 }, { 14.0f, 1 }, - { 14.0f, 4 }, - { 14.0f, 7 }, - { 14.0f, 14 }, - { 16.0f, 8 }, - { 16.0f, 2 }, - { 16.0f, 5 }, - { 16.0f, 7 }, - { 16.0f, 13 }, - { 18.0f, 2 }, - { 18.0f, 5 }, - { 18.0f, 6 }, - { 18.0f, 15 }, - { 20.0f, 9 }, - { 20.0f, 2 }, - { 20.0f, 5 }, - { 20.0f, 7 }, - { 20.0f, 13 }, - { 22.0f, 2 }, - { 22.0f, 5 }, - { 22.0f, 6 }, - { 22.0f, 14 }, - { 24.0f, 11 }, - { 24.0f, 2 }, - { 24.0f, 5 }, - { 24.0f, 7 }, - { 24.0f, 15 }, - { 26.0f, 2 }, - { 26.0f, 5 }, - { 26.0f, 6 }, - { 26.0f, 13 }, - { 28.0f, 10 }, - { 28.0f, 0 }, - { 28.0f, 4 }, - { 28.0f, 7 }, - { 28.0f, 14 }, - { 30.0f, 1 }, - { 30.0f, 3 }, - { 30.0f, 6 }, - { 30.0f, 15 }, - { 31.0f, 6 }, - { 32.0f, 8 }, - { 32.0f, 10 }, - { 32.0f, 2 }, - { 32.0f, 5 }, - { 32.0f, 7 }, - { 32.0f, 16 }, - { 34.0f, 10 }, - { 34.0f, 2 }, - { 34.0f, 5 }, - { 34.0f, 6 }, - { 34.0f, 17 }, - { 36.0f, 10 }, - { 36.0f, 2 }, - { 36.0f, 5 }, - { 36.0f, 7 }, - { 36.0f, 18 }, - { 38.0f, 8 }, + { 14.0f, 3 }, + { 14.0f, 5 }, + { 14.5f, 1 }, + { 14.5f, 3 }, + { 14.5f, 5 }, + { 15.0f, 1 }, + { 15.0f, 3 }, + { 15.0f, 5 }, + { 15.5f, 1 }, + { 15.5f, 3 }, + { 15.5f, 5 }, }; const TrackerScore g_tracker_score = { - SCORE_TRIGGERS, 85, 120.0f + SCORE_TRIGGERS, 80, 120.0f }; // ============================================================ // RESOURCE USAGE ANALYSIS (for synth.h configuration) // ============================================================ -// Total samples: 19 (15 assets + 4 generated notes) -// Max simultaneous pattern triggers: 6 -// Estimated max polyphony: 24 voices +// Total samples: 6 (6 assets + 0 generated notes) +// Max simultaneous pattern triggers: 3 +// Estimated max polyphony: 6 voices // // REQUIRED (minimum to avoid pool exhaustion): -// MAX_VOICES: 24 -// MAX_SPECTROGRAMS: 111 (no caching) +// MAX_VOICES: 6 +// MAX_SPECTROGRAMS: 6 (no caching) // // RECOMMENDED (with 50% safety margin): -// MAX_VOICES: 48 -// MAX_SPECTROGRAMS: 166 (no caching) +// MAX_VOICES: 12 +// MAX_SPECTROGRAMS: 9 (no caching) // // NOTE: With spectrogram caching by note parameters, -// MAX_SPECTROGRAMS could be reduced to ~19 +// MAX_SPECTROGRAMS could be reduced to ~6 // ============================================================ diff --git a/src/generated/test_demo_music.cc b/src/generated/test_demo_music.cc index 3fdd2a1..f77984e 100644 --- a/src/generated/test_demo_music.cc +++ b/src/generated/test_demo_music.cc @@ -20,36 +20,36 @@ const AssetId g_tracker_sample_assets[] = { }; static const TrackerEvent PATTERN_EVENTS_drums_basic[] = { - { 0.0f, 0, 1.0f, 0.0f }, - { 0.0f, 3, 0.5f, 0.0f }, - { 1.0f, 1, 0.9f, 0.0f }, - { 2.0f, 0, 1.0f, 0.0f }, - { 3.0f, 1, 0.9f, 0.0f }, + { 0.00f, 0, 1.0f, 0.0f }, + { 0.00f, 3, 0.5f, 0.0f }, + { 0.25f, 1, 0.9f, 0.0f }, + { 0.50f, 0, 1.0f, 0.0f }, + { 0.75f, 1, 0.9f, 0.0f }, }; static const TrackerEvent PATTERN_EVENTS_drums_with_crash[] = { - { 0.0f, 0, 1.0f, 0.0f }, - { 0.0f, 2, 0.9f, 0.0f }, - { 0.0f, 3, 0.5f, 0.0f }, - { 1.0f, 1, 0.9f, 0.0f }, - { 2.0f, 0, 1.0f, 0.0f }, - { 3.0f, 1, 0.9f, 0.0f }, + { 0.00f, 0, 1.0f, 0.0f }, + { 0.00f, 2, 0.9f, 0.0f }, + { 0.00f, 3, 0.5f, 0.0f }, + { 0.25f, 1, 0.9f, 0.0f }, + { 0.50f, 0, 1.0f, 0.0f }, + { 0.75f, 1, 0.9f, 0.0f }, }; const TrackerPattern g_tracker_patterns[] = { - { PATTERN_EVENTS_drums_basic, 5, 4.0f }, // drums_basic - { PATTERN_EVENTS_drums_with_crash, 6, 4.0f }, // drums_with_crash + { PATTERN_EVENTS_drums_basic, 5, 1.00f }, // drums_basic + { PATTERN_EVENTS_drums_with_crash, 6, 1.00f }, // drums_with_crash }; const uint32_t g_tracker_patterns_count = 2; static const TrackerPatternTrigger SCORE_TRIGGERS[] = { { 0.0f, 0 }, - { 2.0f, 0 }, - { 4.0f, 1 }, - { 6.0f, 0 }, - { 8.0f, 0 }, - { 10.0f, 0 }, - { 12.0f, 1 }, - { 14.0f, 0 }, + { 1.0f, 0 }, + { 2.0f, 1 }, + { 3.0f, 0 }, + { 4.0f, 0 }, + { 5.0f, 0 }, + { 6.0f, 1 }, + { 7.0f, 0 }, }; const TrackerScore g_tracker_score = { diff --git a/src/gpu/effects/flash_effect.cc b/src/gpu/effects/flash_effect.cc index 217a7bb..5aebe2d 100644 --- a/src/gpu/effects/flash_effect.cc +++ b/src/gpu/effects/flash_effect.cc @@ -15,7 +15,7 @@ FlashEffect::FlashEffect(const GpuContext& ctx) struct Uniforms { flash_intensity: f32, - _pad0: f32, + intensity: f32, _pad1: f32, _pad2: f32, }; @@ -42,7 +42,9 @@ FlashEffect::FlashEffect(const GpuContext& ctx) let color = textureSample(inputTexture, inputSampler, input.uv); // Add white flash: blend towards white based on flash intensity let white = vec3<f32>(1.0, 1.0, 1.0); - let flashed = mix(color.rgb, white, uniforms.flash_intensity); + let green = vec3<f32>(0.0, 1.0, 0.0); + var flashed = mix(color.rgb, green, uniforms.intensity); + if (input.uv.y > .5) { flashed = mix(color.rgb, white, uniforms.flash_intensity); } return vec4<f32>(flashed, color.a); } )"; @@ -68,9 +70,9 @@ void FlashEffect::render(WGPURenderPassEncoder pass, float time, float beat, } // Exponential decay - flash_intensity_ *= 0.85f; + flash_intensity_ *= 0.98f; - float uniforms[4] = {flash_intensity_, 0.0f, 0.0f, 0.0f}; + float uniforms[4] = {flash_intensity_, intensity, 0.0f, 0.0f}; wgpuQueueWriteBuffer(ctx_.queue, uniforms_.buffer, 0, uniforms, sizeof(uniforms)); wgpuRenderPassEncoderSetPipeline(pass, pipeline_); diff --git a/src/gpu/effects/post_process_helper.cc b/src/gpu/effects/post_process_helper.cc index db89d77..0a2ac22 100644 --- a/src/gpu/effects/post_process_helper.cc +++ b/src/gpu/effects/post_process_helper.cc @@ -1,6 +1,7 @@ // This file is part of the 64k demo project. // It implements helper functions for post-processing effects. +#include "post_process_helper.h" #include "../demo_effects.h" #include "gpu/gpu.h" #include <cstring> @@ -18,15 +19,15 @@ WGPURenderPipeline create_post_process_pipeline(WGPUDevice device, wgpuDeviceCreateShaderModule(device, &shader_desc); WGPUBindGroupLayoutEntry bgl_entries[3] = {}; - bgl_entries[0].binding = 0; + bgl_entries[0].binding = PP_BINDING_SAMPLER; bgl_entries[0].visibility = WGPUShaderStage_Fragment; bgl_entries[0].sampler.type = WGPUSamplerBindingType_Filtering; - bgl_entries[1].binding = 1; + bgl_entries[1].binding = PP_BINDING_TEXTURE; bgl_entries[1].visibility = WGPUShaderStage_Fragment; bgl_entries[1].texture.sampleType = WGPUTextureSampleType_Float; bgl_entries[1].texture.viewDimension = WGPUTextureViewDimension_2D; - bgl_entries[2].binding = 2; - bgl_entries[2].visibility = WGPUShaderStage_Fragment; + bgl_entries[2].binding = PP_BINDING_UNIFORMS; + bgl_entries[2].visibility = WGPUShaderStage_Vertex | WGPUShaderStage_Fragment; bgl_entries[2].buffer.type = WGPUBufferBindingType_Uniform; WGPUBindGroupLayoutDescriptor bgl_desc = {}; @@ -74,11 +75,11 @@ void pp_update_bind_group(WGPUDevice device, WGPURenderPipeline pipeline, sd.maxAnisotropy = 1; WGPUSampler sampler = wgpuDeviceCreateSampler(device, &sd); WGPUBindGroupEntry bge[3] = {}; - bge[0].binding = 0; + bge[0].binding = PP_BINDING_SAMPLER; bge[0].sampler = sampler; - bge[1].binding = 1; + bge[1].binding = PP_BINDING_TEXTURE; bge[1].textureView = input_view; - bge[2].binding = 2; + bge[2].binding = PP_BINDING_UNIFORMS; bge[2].buffer = uniforms.buffer; bge[2].size = uniforms.size; WGPUBindGroupDescriptor bgd = { diff --git a/src/gpu/effects/post_process_helper.h b/src/gpu/effects/post_process_helper.h index 1986ff3..45757cf 100644 --- a/src/gpu/effects/post_process_helper.h +++ b/src/gpu/effects/post_process_helper.h @@ -5,7 +5,13 @@ #include "gpu/gpu.h" +// Standard post-process bind group layout (group 0): +#define PP_BINDING_SAMPLER 0 // Sampler for input texture +#define PP_BINDING_TEXTURE 1 // Input texture (previous render pass) +#define PP_BINDING_UNIFORMS 2 // Custom uniforms buffer + // Helper to create a standard post-processing pipeline +// Uniforms are accessible to both vertex and fragment shaders WGPURenderPipeline create_post_process_pipeline(WGPUDevice device, WGPUTextureFormat format, const char* shader_code); diff --git a/src/main.cc b/src/main.cc index 0e6fd71..89e21f1 100644 --- a/src/main.cc +++ b/src/main.cc @@ -115,6 +115,7 @@ int main(int argc, char** argv) { } else { g_tempo_scale = 1.0f; // Reset to normal } + g_tempo_scale = 1.0f; #if !defined(STRIP_ALL) // Debug output when tempo changes significantly diff --git a/src/test_demo.cc b/src/test_demo.cc index 9635f88..491968c 100644 --- a/src/test_demo.cc +++ b/src/test_demo.cc @@ -23,6 +23,7 @@ class PeakMeterEffect : public PostProcessEffect { public: PeakMeterEffect(const GpuContext& ctx) : PostProcessEffect(ctx) { + // Use standard post-process binding macros const char* shader_code = R"( struct VertexOutput { @builtin(position) position: vec4<f32>, @@ -43,6 +44,7 @@ class PeakMeterEffect : public PostProcessEffect { @vertex fn vs_main(@builtin(vertex_index) vertexIndex: u32) -> VertexOutput { var output: VertexOutput; + // Full-screen triangle (required for post-process pass-through) var pos = array<vec2<f32>, 3>( vec2<f32>(-1.0, -1.0), vec2<f32>(3.0, -1.0), @@ -55,30 +57,23 @@ class PeakMeterEffect : public PostProcessEffect { @fragment fn fs_main(input: VertexOutput) -> @location(0) vec4<f32> { - let color = textureSample(inputTexture, inputSampler, input.uv); - - // Draw red horizontal bar in middle of screen - // Bar height: 5% of screen height - // Bar width: proportional to peak_value (0.0 to 1.0) - let bar_height = 0.05; - let bar_center_y = 0.5; - let bar_y_min = bar_center_y - bar_height * 0.5; - let bar_y_max = bar_center_y + bar_height * 0.5; - - // Bar extends from left (0.0) to peak_value position - let bar_x_max = uniforms.peak_value; - - // Check if current pixel is inside the bar + // Bar dimensions + let bar_y_min = 0.005; + let bar_y_max = 0.015; + let bar_x_min = 0.015; + let bar_x_max = 0.250; let in_bar_y = input.uv.y >= bar_y_min && input.uv.y <= bar_y_max; - let in_bar_x = input.uv.x <= bar_x_max; + let in_bar_x = input.uv.x >= bar_x_min && input.uv.x <= bar_x_max; + // Optimization: Return bar color early (avoids texture sampling for ~5% of pixels) if (in_bar_y && in_bar_x) { - // Red bar - return vec4<f32>(1.0, 0.0, 0.0, 1.0); - } else { - // Original color - return color; + let uv_x = (input.uv.x - bar_x_min) / (bar_x_max - bar_x_min); + let factor = step(uv_x, uniforms.peak_value); + return mix(vec4<f32>(0.0, 0.0, 0.0, 1.0), vec4<f32>(1.0, 0.0, 0.0,1.0), factor); } + + // Pass through input texture for rest of screen + return textureSample(inputTexture, inputSampler, input.uv); } )"; @@ -102,7 +97,7 @@ class PeakMeterEffect : public PostProcessEffect { wgpuRenderPassEncoderSetPipeline(pass, pipeline_); wgpuRenderPassEncoderSetBindGroup(pass, 0, bind_group_, 0, nullptr); - wgpuRenderPassEncoderDraw(pass, 3, 1, 0, 0); + wgpuRenderPassEncoderDraw(pass, 3, 1, 0, 0); // Full-screen triangle } }; @@ -316,7 +311,9 @@ int main(int argc, char** argv) { if (peak_log) { if (log_peaks_fine) { // Log every frame for fine-grained analysis - fprintf(peak_log, "%d %.6f %.6f %d\n", frame_number, audio_time, raw_peak, beat_number); + // Use platform_get_time() for high-resolution timestamps (not audio_time which advances in chunks) + const double frame_time = platform_get_time(); + fprintf(peak_log, "%d %.6f %.6f %d\n", frame_number, frame_time, raw_peak, beat_number); } else if (beat_number != last_beat_logged) { // Log only at beat boundaries fprintf(peak_log, "%d %.6f %.6f\n", beat_number, audio_time, raw_peak); @@ -350,7 +347,6 @@ int main(int argc, char** argv) { printf("Peak log written to '%s'\n", log_peaks_file); } #endif - audio_shutdown(); gpu_shutdown(); platform_shutdown(&platform_state); |
