summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorskal <pascal.massimino@gmail.com>2026-02-13 08:55:18 +0100
committerskal <pascal.massimino@gmail.com>2026-02-13 08:55:18 +0100
commit65f6866b985fb3d0993fc2b6798c74015fb4fa6a (patch)
tree5c2b552bfc7dff5345c23713588462cf15273525
parent6a726a83d534e338e9badcfc26cb13cbc003967b (diff)
Refactor: Extract duplicate logic in compiler tools
Consolidate repeated timeline/resource analysis code to improve maintainability and reduce duplication. seq_compiler.cc changes: - Extract timeline analysis (max time, sorting) into analyze_timeline() - Extract sequence end calculation into get_sequence_end() - Reduces ~45 lines of duplicate code tracker_compiler.cc changes: - Extract resource analysis into ResourceAnalysis struct - Consolidate sample counting and recommendations - Reduces ~75 lines of duplicate code Both tools verified with successful builds. Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
-rw-r--r--tools/seq_compiler.cc130
-rw-r--r--tools/tracker_compiler.cc186
2 files changed, 159 insertions, 157 deletions
diff --git a/tools/seq_compiler.cc b/tools/seq_compiler.cc
index daf1294..2448a3b 100644
--- a/tools/seq_compiler.cc
+++ b/tools/seq_compiler.cc
@@ -91,20 +91,55 @@ int calculate_tick_interval(float max_time) {
return 20;
}
-// Analyze effect stacking depth across the timeline
-void analyze_effect_depth(const std::vector<SequenceEntry>& sequences,
- const std::string& demo_end_time,
- float sample_rate = 10.0f) {
- // Find max time for analysis
+// Timeline analysis result: max time and sequences sorted by start time
+struct TimelineMetrics {
+ float max_time;
+ std::vector<SequenceEntry> sorted_sequences;
+};
+
+// Calculate sequence end time (explicit or derived from latest effect)
+float get_sequence_end(const SequenceEntry& seq) {
+ float seq_start = std::stof(seq.start_time);
+ if (seq.end_time != "-1.0") {
+ return seq_start + std::stof(seq.end_time);
+ }
+ float seq_end = seq_start;
+ for (const auto& eff : seq.effects) {
+ seq_end = std::max(seq_end, seq_start + std::stof(eff.end));
+ }
+ return seq_end;
+}
+
+// Analyze timeline: find max time and sort sequences by start time
+TimelineMetrics analyze_timeline(const std::vector<SequenceEntry>& sequences,
+ const std::string& demo_end_time) {
float max_time = demo_end_time.empty() ? 0.0f : std::stof(demo_end_time);
for (const auto& seq : sequences) {
float seq_start = std::stof(seq.start_time);
for (const auto& eff : seq.effects) {
- float eff_end = seq_start + std::stof(eff.end);
- max_time = std::max(max_time, eff_end);
+ max_time = std::max(max_time, seq_start + std::stof(eff.end));
+ }
+ if (seq.end_time != "-1.0") {
+ max_time = std::max(max_time, seq_start + std::stof(seq.end_time));
}
}
+ std::vector<SequenceEntry> sorted = sequences;
+ std::sort(sorted.begin(), sorted.end(),
+ [](const SequenceEntry& a, const SequenceEntry& b) {
+ return std::stof(a.start_time) < std::stof(b.start_time);
+ });
+
+ return {max_time, sorted};
+}
+
+// Analyze effect stacking depth across the timeline
+void analyze_effect_depth(const std::vector<SequenceEntry>& sequences,
+ const std::string& demo_end_time,
+ float sample_rate = 10.0f) {
+ TimelineMetrics metrics = analyze_timeline(sequences, demo_end_time);
+ float max_time = metrics.max_time;
+
if (max_time <= 0.0f) {
std::cout << "\n=== Effect Depth Analysis ===\n";
std::cout << "No effects found in timeline.\n";
@@ -122,10 +157,7 @@ void analyze_effect_depth(const std::vector<SequenceEntry>& sequences,
for (const auto& seq : sequences) {
float seq_start = std::stof(seq.start_time);
- float seq_end = seq_start;
- if (seq.end_time != "-1.0") {
- seq_end = seq_start + std::stof(seq.end_time);
- }
+ float seq_end = get_sequence_end(seq);
for (const auto& eff : seq.effects) {
float eff_start = seq_start + std::stof(eff.start);
@@ -268,18 +300,8 @@ void generate_gantt_chart(const std::string& output_file,
return;
}
- // Find max time for the chart
- float max_time = demo_end_time.empty() ? 0.0f : std::stof(demo_end_time);
- for (const auto& seq : sequences) {
- float seq_start = std::stof(seq.start_time);
- for (const auto& eff : seq.effects) {
- float eff_end = seq_start + std::stof(eff.end);
- max_time = std::max(max_time, eff_end);
- }
- if (seq.end_time != "-1.0") {
- max_time = std::max(max_time, seq_start + std::stof(seq.end_time));
- }
- }
+ TimelineMetrics metrics = analyze_timeline(sequences, demo_end_time);
+ float max_time = metrics.max_time;
// Chart configuration
const int chart_width = 100;
@@ -320,28 +342,11 @@ void generate_gantt_chart(const std::string& output_file,
}
out << "\n\n";
- // Sort sequences by start time for better readability
- std::vector<SequenceEntry> sorted_sequences = sequences;
- std::sort(sorted_sequences.begin(), sorted_sequences.end(),
- [](const SequenceEntry& a, const SequenceEntry& b) {
- return std::stof(a.start_time) < std::stof(b.start_time);
- });
-
// Draw sequences and effects
- for (size_t seq_idx = 0; seq_idx < sorted_sequences.size(); ++seq_idx) {
- const auto& seq = sorted_sequences[seq_idx];
+ for (size_t seq_idx = 0; seq_idx < metrics.sorted_sequences.size(); ++seq_idx) {
+ const auto& seq = metrics.sorted_sequences[seq_idx];
float seq_start = std::stof(seq.start_time);
- float seq_end = seq_start; // Start at sequence start
-
- // Check if sequence has explicit end time
- if (seq.end_time != "-1.0") {
- seq_end = seq_start + std::stof(seq.end_time);
- } else {
- // Calculate implicit end from latest effect
- for (const auto& eff : seq.effects) {
- seq_end = std::max(seq_end, seq_start + std::stof(eff.end));
- }
- }
+ float seq_end = get_sequence_end(seq);
// Draw sequence bar
out << "SEQ@" << seq_start << "s";
@@ -398,7 +403,7 @@ void generate_gantt_chart(const std::string& output_file,
}
// Add separator between sequences
- if (seq_idx < sorted_sequences.size() - 1) {
+ if (seq_idx < metrics.sorted_sequences.size() - 1) {
out << " ";
for (int i = 0; i < chart_width; ++i) {
out << "─";
@@ -429,18 +434,8 @@ void generate_gantt_html(const std::string& output_file,
return;
}
- // Find max time for the chart
- float max_time = demo_end_time.empty() ? 0.0f : std::stof(demo_end_time);
- for (const auto& seq : sequences) {
- float seq_start = std::stof(seq.start_time);
- for (const auto& eff : seq.effects) {
- float eff_end = seq_start + std::stof(eff.end);
- max_time = std::max(max_time, eff_end);
- }
- if (seq.end_time != "-1.0") {
- max_time = std::max(max_time, seq_start + std::stof(seq.end_time));
- }
- }
+ TimelineMetrics metrics = analyze_timeline(sequences, demo_end_time);
+ float max_time = metrics.max_time;
const int svg_width = 1400;
const int row_height = 30;
@@ -513,27 +508,12 @@ void generate_gantt_html(const std::string& output_file,
<< "\" y2=\"" << svg_height - 20 << "\" class=\"time-marker\"/>\n";
}
- // Sort sequences by start time for better readability
- std::vector<SequenceEntry> sorted_sequences = sequences;
- std::sort(sorted_sequences.begin(), sorted_sequences.end(),
- [](const SequenceEntry& a, const SequenceEntry& b) {
- return std::stof(a.start_time) < std::stof(b.start_time);
- });
-
// Draw sequences and effects
int y_offset = margin_top;
- for (size_t seq_idx = 0; seq_idx < sorted_sequences.size(); ++seq_idx) {
- const auto& seq = sorted_sequences[seq_idx];
+ for (size_t seq_idx = 0; seq_idx < metrics.sorted_sequences.size(); ++seq_idx) {
+ const auto& seq = metrics.sorted_sequences[seq_idx];
float seq_start = std::stof(seq.start_time);
- float seq_end = seq_start; // Start at sequence start
-
- if (seq.end_time != "-1.0") {
- seq_end = seq_start + std::stof(seq.end_time);
- } else {
- for (const auto& eff : seq.effects) {
- seq_end = std::max(seq_end, seq_start + std::stof(eff.end));
- }
- }
+ float seq_end = get_sequence_end(seq);
int x1 = margin_left + (int)(seq_start * time_scale);
int x2 = margin_left + (int)(seq_end * time_scale);
@@ -592,7 +572,7 @@ void generate_gantt_html(const std::string& output_file,
}
// Add separator between sequences
- if (seq_idx < sorted_sequences.size() - 1) {
+ if (seq_idx < metrics.sorted_sequences.size() - 1) {
out << " <!-- Separator -->\n";
out << " <line x1=\"" << margin_left << "\" y1=\"" << (y_offset + 5)
<< "\" x2=\"" << (svg_width - 50) << "\" y2=\"" << (y_offset + 5)
diff --git a/tools/tracker_compiler.cc b/tools/tracker_compiler.cc
index 6784209..43b4185 100644
--- a/tools/tracker_compiler.cc
+++ b/tools/tracker_compiler.cc
@@ -116,6 +116,95 @@ struct Trigger {
std::string pattern_name;
};
+// Resource usage analysis for synth configuration
+struct ResourceAnalysis {
+ int asset_sample_count;
+ int generated_sample_count;
+ int max_simultaneous_patterns;
+ int avg_events_per_pattern;
+ int estimated_max_polyphony;
+ int min_spectrograms;
+ int recommended_spectrograms;
+ int recommended_voices;
+};
+
+// Analyze resource requirements from tracker data
+ResourceAnalysis analyze_resources(const std::vector<Sample>& samples,
+ const std::vector<Pattern>& patterns,
+ const std::vector<Trigger>& score) {
+ ResourceAnalysis result = {};
+
+ // Count sample types
+ for (const auto& s : samples) {
+ if (s.type == ASSET) {
+ result.asset_sample_count++;
+ } else {
+ result.generated_sample_count++;
+ }
+ }
+
+ // Calculate maximum simultaneous pattern triggers
+ std::map<float, int> time_pattern_count;
+ for (const auto& t : score) {
+ time_pattern_count[t.time]++;
+ }
+
+ for (const auto& entry : time_pattern_count) {
+ if (entry.second > result.max_simultaneous_patterns) {
+ result.max_simultaneous_patterns = entry.second;
+ }
+ }
+
+ // Calculate average events per pattern
+ int total_events = 0;
+ for (const auto& p : patterns) {
+ total_events += p.events.size();
+ }
+ result.avg_events_per_pattern =
+ patterns.empty() ? 0 : total_events / patterns.size();
+ result.estimated_max_polyphony =
+ result.max_simultaneous_patterns * result.avg_events_per_pattern;
+
+ // Conservative recommendations with 50% safety margin
+ result.min_spectrograms = result.asset_sample_count +
+ (result.generated_sample_count *
+ result.estimated_max_polyphony);
+ result.recommended_spectrograms = (int)(result.min_spectrograms * 1.5f);
+ result.recommended_voices = result.estimated_max_polyphony * 2;
+
+ return result;
+}
+
+// Write resource analysis to output file
+void write_resource_analysis(FILE* out, const ResourceAnalysis& analysis,
+ int total_samples) {
+ fprintf(out, "// ============================================================\n");
+ fprintf(out, "// RESOURCE USAGE ANALYSIS (for synth.h configuration)\n");
+ fprintf(out, "// ============================================================\n");
+ fprintf(out, "// Total samples: %d (%d assets + %d generated notes)\n",
+ total_samples, analysis.asset_sample_count,
+ analysis.generated_sample_count);
+ fprintf(out, "// Max simultaneous pattern triggers: %d\n",
+ analysis.max_simultaneous_patterns);
+ fprintf(out, "// Estimated max polyphony: %d voices\n",
+ analysis.estimated_max_polyphony);
+ fprintf(out, "// \n");
+ fprintf(out, "// REQUIRED (minimum to avoid pool exhaustion):\n");
+ fprintf(out, "// MAX_VOICES: %d\n", analysis.estimated_max_polyphony);
+ fprintf(out, "// MAX_SPECTROGRAMS: %d (no caching)\n",
+ analysis.min_spectrograms);
+ fprintf(out, "// \n");
+ fprintf(out, "// RECOMMENDED (with 50%% safety margin):\n");
+ fprintf(out, "// MAX_VOICES: %d\n", analysis.recommended_voices);
+ fprintf(out, "// MAX_SPECTROGRAMS: %d (no caching)\n",
+ analysis.recommended_spectrograms);
+ fprintf(out, "// \n");
+ fprintf(out, "// NOTE: With spectrogram caching by note parameters,\n");
+ fprintf(out, "// MAX_SPECTROGRAMS could be reduced to ~%d\n",
+ analysis.asset_sample_count + analysis.generated_sample_count);
+ fprintf(out, "// ============================================================\n\n");
+}
+
int main(int argc, char** argv) {
if (argc < 3) {
fprintf(stderr, "Usage: %s <input.track> <output.cc>\n", argv[0]);
@@ -314,102 +403,35 @@ int main(int argc, char** argv) {
fprintf(out_file, " SCORE_TRIGGERS, %zu, %.1ff\n", score.size(), bpm);
fprintf(out_file, "};\n\n");
- // ============================================================================
- // RESOURCE USAGE ANALYSIS
- // ============================================================================
-
- // Count unique samples
- int asset_sample_count = 0;
- int generated_sample_count = 0;
- for (const auto& s : samples) {
- if (s.type == ASSET) {
- asset_sample_count++;
- } else {
- generated_sample_count++;
- }
- }
-
- // Calculate maximum simultaneous pattern triggers
- std::map<float, int> time_pattern_count;
- for (const auto& t : score) {
- time_pattern_count[t.time]++;
- }
+ // Analyze resource requirements
+ ResourceAnalysis analysis = analyze_resources(samples, patterns, score);
- int max_simultaneous_patterns = 0;
- for (const auto& entry : time_pattern_count) {
- if (entry.second > max_simultaneous_patterns) {
- max_simultaneous_patterns = entry.second;
- }
- }
-
- // Calculate maximum polyphony (events per pattern on average)
- int total_events = 0;
- for (const auto& p : patterns) {
- total_events += p.events.size();
- }
- const int avg_events_per_pattern =
- patterns.empty() ? 0 : total_events / patterns.size();
- const int estimated_max_polyphony =
- max_simultaneous_patterns * avg_events_per_pattern;
-
- // Conservative recommendations with safety margins
- // - Each asset sample needs 1 spectrogram slot (shared across all events)
- // - Each generated note needs 1 spectrogram slot PER EVENT (no caching yet)
- // - Add 50% safety margin for peak moments
- const int min_spectrograms =
- asset_sample_count + (generated_sample_count * estimated_max_polyphony);
- const int recommended_spectrograms = (int)(min_spectrograms * 1.5f);
- const int recommended_voices = estimated_max_polyphony * 2;
-
- fprintf(out_file,
- "// ============================================================\n");
- fprintf(out_file, "// RESOURCE USAGE ANALYSIS (for synth.h configuration)\n");
- fprintf(out_file,
- "// ============================================================\n");
- fprintf(out_file, "// Total samples: %d (%d assets + %d generated notes)\n",
- (int)samples.size(), asset_sample_count, generated_sample_count);
- fprintf(out_file, "// Max simultaneous pattern triggers: %d\n",
- max_simultaneous_patterns);
- fprintf(out_file, "// Estimated max polyphony: %d voices\n",
- estimated_max_polyphony);
- fprintf(out_file, "// \n");
- fprintf(out_file, "// REQUIRED (minimum to avoid pool exhaustion):\n");
- fprintf(out_file, "// MAX_VOICES: %d\n", estimated_max_polyphony);
- fprintf(out_file, "// MAX_SPECTROGRAMS: %d (no caching)\n",
- min_spectrograms);
- fprintf(out_file, "// \n");
- fprintf(out_file, "// RECOMMENDED (with 50%% safety margin):\n");
- fprintf(out_file, "// MAX_VOICES: %d\n", recommended_voices);
- fprintf(out_file, "// MAX_SPECTROGRAMS: %d (no caching)\n",
- recommended_spectrograms);
- fprintf(out_file, "// \n");
- fprintf(out_file, "// NOTE: With spectrogram caching by note parameters,\n");
- fprintf(out_file, "// MAX_SPECTROGRAMS could be reduced to ~%d\n",
- asset_sample_count + generated_sample_count);
- fprintf(
- out_file,
- "// ============================================================\n\n");
+ // Write analysis to output file
+ write_resource_analysis(out_file, analysis, samples.size());
fclose(out_file);
+ // Print compilation summary
printf("Tracker compilation successful.\n");
printf(" Patterns: %zu\n", patterns.size());
printf(" Score triggers: %zu\n", score.size());
printf(" Samples: %d (%d assets + %d generated)\n", (int)samples.size(),
- asset_sample_count, generated_sample_count);
- printf(" Max simultaneous patterns: %d\n", max_simultaneous_patterns);
- printf(" Estimated max polyphony: %d voices\n", estimated_max_polyphony);
+ analysis.asset_sample_count, analysis.generated_sample_count);
+ printf(" Max simultaneous patterns: %d\n",
+ analysis.max_simultaneous_patterns);
+ printf(" Estimated max polyphony: %d voices\n",
+ analysis.estimated_max_polyphony);
printf("\n");
printf("RESOURCE REQUIREMENTS:\n");
- printf(" Required MAX_VOICES: %d\n", estimated_max_polyphony);
+ printf(" Required MAX_VOICES: %d\n", analysis.estimated_max_polyphony);
printf(" Required MAX_SPECTROGRAMS: %d (without caching)\n",
- min_spectrograms);
+ analysis.min_spectrograms);
printf(" Recommended MAX_VOICES: %d (with safety margin)\n",
- recommended_voices);
+ analysis.recommended_voices);
printf(" Recommended MAX_SPECTROGRAMS: %d (with safety margin)\n",
- recommended_spectrograms);
+ analysis.recommended_spectrograms);
printf(" With caching: MAX_SPECTROGRAMS could be ~%d\n",
- asset_sample_count + generated_sample_count);
+ analysis.asset_sample_count + analysis.generated_sample_count);
return 0;
}