blob: 6e1e9b7c1a905b5646a9989a7d508ccd8ca918d1 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
|
// This file is part of the 64k demo project.
// It manages the low-level audio device and high-level audio state.
// Now uses backend abstraction for testability.
#include "audio.h"
#include "audio_backend.h"
#include "miniaudio_backend.h"
#include "ring_buffer.h"
#include "synth.h"
#include "util/asset_manager.h"
#define MINIAUDIO_IMPLEMENTATION
#include "miniaudio.h"
#include <stdio.h>
// Global ring buffer for audio streaming
static AudioRingBuffer g_ring_buffer;
// Pending write buffer for partially written samples
// Maximum size: one chunk (533 frames @ 60fps = 1066 samples stereo)
#define MAX_PENDING_SAMPLES 2048
static float g_pending_buffer[MAX_PENDING_SAMPLES];
static int g_pending_samples = 0; // How many samples are waiting to be written
// Global backend pointer for audio abstraction
static AudioBackend* g_audio_backend = nullptr;
static MiniaudioBackend g_default_backend;
static bool g_using_default_backend = false;
#if !defined(STRIP_ALL)
// Allow tests to inject a custom backend
void audio_set_backend(AudioBackend* backend) {
g_audio_backend = backend;
}
// Get current backend (for tests)
AudioBackend* audio_get_backend() {
return g_audio_backend;
}
#endif /* !defined(STRIP_ALL) */
int register_spec_asset(AssetId id) {
size_t size;
const uint8_t* data = GetAsset(id, &size);
if (!data || size < sizeof(SpecHeader))
return -1;
const SpecHeader* header = (const SpecHeader*)data;
const float* spectral_data = (const float*)(data + sizeof(SpecHeader));
Spectrogram spec;
spec.spectral_data_a = spectral_data;
spec.spectral_data_b = spectral_data; // No double-buffer for static assets
spec.num_frames = header->num_frames;
return synth_register_spectrogram(&spec);
}
void audio_init() {
synth_init();
// Clear pending buffer
g_pending_samples = 0;
// Use default backend if none set
if (g_audio_backend == nullptr) {
g_audio_backend = &g_default_backend;
g_using_default_backend = true;
}
g_audio_backend->init();
}
void audio_start() {
if (g_audio_backend == nullptr) {
printf("Cannot start: audio not initialized.\n");
return;
}
g_audio_backend->start();
}
void audio_render_ahead(float music_time, float dt) {
// Target: maintain look-ahead buffer
const float target_lookahead =
(float)RING_BUFFER_LOOKAHEAD_MS / 1000.0f;
// Render in small chunks to keep synth time synchronized with tracker
// Chunk size: one frame's worth of audio (~16.6ms @ 60fps)
const int chunk_frames = (int)(dt * RING_BUFFER_SAMPLE_RATE);
const int chunk_samples = chunk_frames * RING_BUFFER_CHANNELS;
if (chunk_frames <= 0) return;
// Keep rendering small chunks until buffer is full enough
while (true) {
// First, try to flush any pending samples from previous partial writes
if (g_pending_samples > 0) {
const int written = g_ring_buffer.write(g_pending_buffer, g_pending_samples);
if (written > 0) {
// Some or all samples were written
// Move remaining samples to front of buffer
const int remaining = g_pending_samples - written;
if (remaining > 0) {
for (int i = 0; i < remaining; ++i) {
g_pending_buffer[i] = g_pending_buffer[written + i];
}
}
g_pending_samples = remaining;
// Notify backend (for testing/tracking)
#if !defined(STRIP_ALL)
if (g_audio_backend != nullptr) {
g_audio_backend->on_frames_rendered(written / RING_BUFFER_CHANNELS);
}
#endif
}
// If still have pending samples, buffer is full - wait for consumption
if (g_pending_samples > 0) break;
}
// Check current buffer state
const int buffered_samples = g_ring_buffer.available_read();
const float buffered_time =
(float)buffered_samples / (RING_BUFFER_SAMPLE_RATE * RING_BUFFER_CHANNELS);
// Stop if buffer is full enough
if (buffered_time >= target_lookahead) break;
// Check available space and render chunk that fits
const int available_space = g_ring_buffer.available_write();
if (available_space == 0) {
// Buffer is completely full, wait for audio callback to consume
break;
}
// Determine how much we can actually render
// Render the smaller of: desired chunk size OR available space
const int actual_samples = (available_space < chunk_samples) ? available_space : chunk_samples;
const int actual_frames = actual_samples / RING_BUFFER_CHANNELS;
// Allocate temporary buffer (stereo)
float* temp_buffer = new float[actual_samples];
// Render audio from synth (advances synth state incrementally)
synth_render(temp_buffer, actual_frames);
// Write to ring buffer
const int written = g_ring_buffer.write(temp_buffer, actual_samples);
// If partial write, save remaining samples to pending buffer
if (written < actual_samples) {
const int remaining = actual_samples - written;
if (remaining <= MAX_PENDING_SAMPLES) {
for (int i = 0; i < remaining; ++i) {
g_pending_buffer[i] = temp_buffer[written + i];
}
g_pending_samples = remaining;
}
}
// Notify backend of frames rendered (count frames sent to synth)
#if !defined(STRIP_ALL)
if (g_audio_backend != nullptr) {
g_audio_backend->on_frames_rendered(actual_frames);
}
#endif
delete[] temp_buffer;
// If we couldn't write everything, stop and retry next frame
if (written < actual_samples) break;
}
}
float audio_get_playback_time() {
const int64_t total_samples = g_ring_buffer.get_total_read();
return (float)total_samples /
(RING_BUFFER_SAMPLE_RATE * RING_BUFFER_CHANNELS);
}
// Expose ring buffer for backends
AudioRingBuffer* audio_get_ring_buffer() {
return &g_ring_buffer;
}
#if !defined(STRIP_ALL)
void audio_render_silent(float duration_sec) {
const int sample_rate = 32000;
const int chunk_size = 512;
int total_frames = (int)(duration_sec * sample_rate);
float buffer[chunk_size * 2]; // Stereo
while (total_frames > 0) {
int frames_to_render =
(total_frames > chunk_size) ? chunk_size : total_frames;
synth_render(buffer, frames_to_render);
total_frames -= frames_to_render;
// Notify backend of frames rendered (for mock tracking)
if (g_audio_backend != nullptr) {
g_audio_backend->on_frames_rendered(frames_to_render);
}
}
}
#endif /* !defined(STRIP_ALL) */
void audio_update() {
}
void audio_shutdown() {
if (g_audio_backend != nullptr) {
g_audio_backend->shutdown();
}
synth_shutdown();
// Clear backend pointer if using default
if (g_using_default_backend) {
g_audio_backend = nullptr;
g_using_default_backend = false;
}
}
|