#include "synth.h" #include "audio/window.h" #include #include // For memset struct Voice { bool active; int spectrogram_id; float volume; float pan_left; float pan_right; int current_spectral_frame; int total_spectral_frames; float time_domain_buffer[DCT_SIZE]; int buffer_pos; const volatile float *active_spectral_data; }; static struct { Spectrogram spectrograms[MAX_SPECTROGRAMS]; const volatile float *active_spectrogram_data[MAX_SPECTROGRAMS]; bool spectrogram_registered[MAX_SPECTROGRAMS]; } g_synth_data; static Voice g_voices[MAX_VOICES]; static float g_current_output_peak = 0.0f; // Global peak for visualization void synth_init() { memset(&g_synth_data, 0, sizeof(g_synth_data)); memset(g_voices, 0, sizeof(g_voices)); g_current_output_peak = 0.0f; } void synth_shutdown() { // Nothing to do here since we are not allocating memory } int synth_register_spectrogram(const Spectrogram *spec) { for (int i = 0; i < MAX_SPECTROGRAMS; ++i) { if (!g_synth_data.spectrogram_registered[i]) { g_synth_data.spectrograms[i] = *spec; g_synth_data.active_spectrogram_data[i] = spec->spectral_data_a; g_synth_data.spectrogram_registered[i] = true; return i; } } return -1; // No free slots } void synth_unregister_spectrogram(int spectrogram_id) { if (spectrogram_id >= 0 && spectrogram_id < MAX_SPECTROGRAMS) { g_synth_data.spectrogram_registered[spectrogram_id] = false; } } float *synth_begin_update(int spectrogram_id) { if (spectrogram_id < 0 || spectrogram_id >= MAX_SPECTROGRAMS || !g_synth_data.spectrogram_registered[spectrogram_id]) { return nullptr; } const volatile float *active_ptr = g_synth_data.active_spectrogram_data[spectrogram_id]; if (active_ptr == g_synth_data.spectrograms[spectrogram_id].spectral_data_a) { return g_synth_data.spectrograms[spectrogram_id].spectral_data_b; } else { return g_synth_data.spectrograms[spectrogram_id].spectral_data_a; } } void synth_commit_update(int spectrogram_id) { if (spectrogram_id < 0 || spectrogram_id >= MAX_SPECTROGRAMS || !g_synth_data.spectrogram_registered[spectrogram_id]) { return; } const volatile float *old_active_ptr = g_synth_data.active_spectrogram_data[spectrogram_id]; const float *new_active_ptr = (old_active_ptr == g_synth_data.spectrograms[spectrogram_id].spectral_data_a) ? g_synth_data.spectrograms[spectrogram_id].spectral_data_b : g_synth_data.spectrograms[spectrogram_id].spectral_data_a; // Atomic swap using GCC/Clang builtins for thread safety __atomic_store_n( (const float **)&g_synth_data.active_spectrogram_data[spectrogram_id], new_active_ptr, __ATOMIC_RELEASE); } void synth_trigger_voice(int spectrogram_id, float volume, float pan) { if (spectrogram_id < 0 || spectrogram_id >= MAX_SPECTROGRAMS || !g_synth_data.spectrogram_registered[spectrogram_id]) { return; } for (int i = 0; i < MAX_VOICES; ++i) { if (!g_voices[i].active) { Voice &v = g_voices[i]; v.active = true; v.spectrogram_id = spectrogram_id; v.volume = volume; // Simple linear panning v.pan_left = (pan > 0.0f) ? (1.0f - pan) : 1.0f; v.pan_right = (pan < 0.0f) ? (1.0f + pan) : 1.0f; v.current_spectral_frame = 0; v.total_spectral_frames = g_synth_data.spectrograms[spectrogram_id].num_frames; v.buffer_pos = DCT_SIZE; // Force IDCT on first render v.active_spectral_data = g_synth_data.active_spectrogram_data[spectrogram_id]; return; // Voice triggered } } } void synth_render(float *output_buffer, int num_frames) { float window[WINDOW_SIZE]; hamming_window_512(window); float current_peak_in_frame = 0.0f; for (int i = 0; i < num_frames; ++i) { float left_sample = 0.0f; float right_sample = 0.0f; for (int v_idx = 0; v_idx < MAX_VOICES; ++v_idx) { Voice &v = g_voices[v_idx]; if (!v.active) continue; if (v.buffer_pos >= DCT_SIZE) { if (v.current_spectral_frame >= v.total_spectral_frames) { v.active = false; continue; } // Fetch the latest active spectrogram pointer for this voice v.active_spectral_data = g_synth_data.active_spectrogram_data[v.spectrogram_id]; const float *spectral_frame = (const float *)v.active_spectral_data + (v.current_spectral_frame * DCT_SIZE); float windowed_frame[DCT_SIZE]; for (int j = 0; j < DCT_SIZE; ++j) { windowed_frame[j] = spectral_frame[j] * window[j]; } idct_512(windowed_frame, v.time_domain_buffer); v.buffer_pos = 0; v.current_spectral_frame++; } float voice_sample = v.time_domain_buffer[v.buffer_pos] * v.volume; left_sample += voice_sample * v.pan_left; right_sample += voice_sample * v.pan_right; v.buffer_pos++; } output_buffer[i * 2] = left_sample; output_buffer[i * 2 + 1] = right_sample; current_peak_in_frame = fmaxf(current_peak_in_frame, fmaxf(fabsf(left_sample), fabsf(right_sample))); } g_current_output_peak = current_peak_in_frame; } int synth_get_active_voice_count() { int count = 0; for (int i = 0; i < MAX_VOICES; ++i) { if (g_voices[i].active) { count++; } } return count; } float synth_get_output_peak() { return g_current_output_peak; }