// This file is part of the 64k demo project. // It manages the low-level audio device and high-level audio state. // Now uses backend abstraction for testability. #include "audio.h" #include "audio_backend.h" #include "miniaudio_backend.h" #include "ring_buffer.h" #include "synth.h" #include "util/asset_manager.h" #define MINIAUDIO_IMPLEMENTATION #include "miniaudio.h" #include // Global ring buffer for audio streaming static AudioRingBuffer g_ring_buffer; // Pending write buffer for partially written samples // Maximum size: one chunk (533 frames @ 60fps = 1066 samples stereo) #define MAX_PENDING_SAMPLES 2048 static float g_pending_buffer[MAX_PENDING_SAMPLES]; static int g_pending_samples = 0; // How many samples are waiting to be written // Global backend pointer for audio abstraction static AudioBackend* g_audio_backend = nullptr; static MiniaudioBackend g_default_backend; static bool g_using_default_backend = false; #if !defined(STRIP_ALL) // Allow tests to inject a custom backend void audio_set_backend(AudioBackend* backend) { g_audio_backend = backend; } // Get current backend (for tests) AudioBackend* audio_get_backend() { return g_audio_backend; } #endif /* !defined(STRIP_ALL) */ int register_spec_asset(AssetId id) { size_t size; const uint8_t* data = GetAsset(id, &size); if (!data || size < sizeof(SpecHeader)) return -1; const SpecHeader* header = (const SpecHeader*)data; const float* spectral_data = (const float*)(data + sizeof(SpecHeader)); Spectrogram spec; spec.spectral_data_a = spectral_data; spec.spectral_data_b = spectral_data; // No double-buffer for static assets spec.num_frames = header->num_frames; return synth_register_spectrogram(&spec); } void audio_init() { synth_init(); // Clear pending buffer g_pending_samples = 0; // Use default backend if none set if (g_audio_backend == nullptr) { g_audio_backend = &g_default_backend; g_using_default_backend = true; } g_audio_backend->init(); } void audio_start() { if (g_audio_backend == nullptr) { printf("Cannot start: audio not initialized.\n"); return; } g_audio_backend->start(); } void audio_render_ahead(float music_time, float dt) { // Target: maintain look-ahead buffer const float target_lookahead = (float)RING_BUFFER_LOOKAHEAD_MS / 1000.0f; // Render in small chunks to keep synth time synchronized with tracker // Chunk size: one frame's worth of audio (~16.6ms @ 60fps) const int chunk_frames = (int)(dt * RING_BUFFER_SAMPLE_RATE); const int chunk_samples = chunk_frames * RING_BUFFER_CHANNELS; if (chunk_frames <= 0) return; // Keep rendering small chunks until buffer is full enough while (true) { // First, try to flush any pending samples from previous partial writes if (g_pending_samples > 0) { const int written = g_ring_buffer.write(g_pending_buffer, g_pending_samples); if (written > 0) { // Some or all samples were written // Move remaining samples to front of buffer const int remaining = g_pending_samples - written; if (remaining > 0) { for (int i = 0; i < remaining; ++i) { g_pending_buffer[i] = g_pending_buffer[written + i]; } } g_pending_samples = remaining; // Notify backend if (g_audio_backend != nullptr) { g_audio_backend->on_frames_rendered(written / RING_BUFFER_CHANNELS); } } // If still have pending samples, buffer is full - wait for consumption if (g_pending_samples > 0) break; } // Check current buffer state const int buffered_samples = g_ring_buffer.available_read(); const float buffered_time = (float)buffered_samples / (RING_BUFFER_SAMPLE_RATE * RING_BUFFER_CHANNELS); // Stop if buffer is full enough if (buffered_time >= target_lookahead) break; // Check if buffer has space for this chunk const int available_space = g_ring_buffer.available_write(); if (available_space < chunk_samples) { // Buffer is too full, wait for audio callback to consume more break; } // Allocate temporary buffer (stereo) float* temp_buffer = new float[chunk_samples]; // Render audio from synth (advances synth state incrementally) synth_render(temp_buffer, chunk_frames); // Write to ring buffer const int written = g_ring_buffer.write(temp_buffer, chunk_samples); // If partial write, save remaining samples to pending buffer if (written < chunk_samples) { const int remaining = chunk_samples - written; if (remaining <= MAX_PENDING_SAMPLES) { for (int i = 0; i < remaining; ++i) { g_pending_buffer[i] = temp_buffer[written + i]; } g_pending_samples = remaining; } } // Notify backend of frames rendered (count frames sent to synth) if (g_audio_backend != nullptr) { g_audio_backend->on_frames_rendered(chunk_frames); } delete[] temp_buffer; // If we couldn't write everything, stop and retry next frame if (written < chunk_samples) break; } } float audio_get_playback_time() { const int64_t total_samples = g_ring_buffer.get_total_read(); return (float)total_samples / (RING_BUFFER_SAMPLE_RATE * RING_BUFFER_CHANNELS); } // Expose ring buffer for backends AudioRingBuffer* audio_get_ring_buffer() { return &g_ring_buffer; } #if !defined(STRIP_ALL) void audio_render_silent(float duration_sec) { const int sample_rate = 32000; const int chunk_size = 512; int total_frames = (int)(duration_sec * sample_rate); float buffer[chunk_size * 2]; // Stereo while (total_frames > 0) { int frames_to_render = (total_frames > chunk_size) ? chunk_size : total_frames; synth_render(buffer, frames_to_render); total_frames -= frames_to_render; // Notify backend of frames rendered (for mock tracking) if (g_audio_backend != nullptr) { g_audio_backend->on_frames_rendered(frames_to_render); } } } #endif /* !defined(STRIP_ALL) */ void audio_update() { } void audio_shutdown() { if (g_audio_backend != nullptr) { g_audio_backend->shutdown(); } synth_shutdown(); // Clear backend pointer if using default if (g_using_default_backend) { g_audio_backend = nullptr; g_using_default_backend = false; } }