summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorskal <pascal.massimino@gmail.com>2026-02-16 08:26:45 +0100
committerskal <pascal.massimino@gmail.com>2026-02-16 08:26:45 +0100
commit9d1d4df877f96f1970dce2ab30cfae49d3d796e1 (patch)
tree681298bc46a58890f2b5581c16a05a4272ef4ed3
parent7947a1230e526eb17ca0252f81d19c18811bd355 (diff)
feat(sequence): Phase 1 - Sequence v2 foundation
- Add Node system with typed buffers (u8x4_norm, f32x4, f16x8, depth24) - Add NodeRegistry with aliasing support for ping-pong optimization - Add SequenceV2 base class with DAG execution - Add EffectV2 base class with multi-input/multi-output - Add comprehensive tests (5 test cases, all passing) - Corrected FATAL_CHECK usage (checks ERROR conditions, not success) Phase 1 complete: Core v2 architecture functional. Next: Phase 2 compiler (seq_compiler_v2.py) handoff(Claude): Phase 1 foundation complete, all tests passing (35/35)
-rw-r--r--cmake/DemoSourceLists.cmake2
-rw-r--r--cmake/DemoTests.cmake8
-rw-r--r--doc/SEQUENCE_v2.md93
-rw-r--r--src/gpu/effect_v2.cc11
-rw-r--r--src/gpu/effect_v2.h47
-rw-r--r--src/gpu/sequence_v2.cc207
-rw-r--r--src/gpu/sequence_v2.h107
-rw-r--r--src/tests/gpu/test_sequence_v2.cc184
8 files changed, 659 insertions, 0 deletions
diff --git a/cmake/DemoSourceLists.cmake b/cmake/DemoSourceLists.cmake
index 2d6cf42..fd4bdcb 100644
--- a/cmake/DemoSourceLists.cmake
+++ b/cmake/DemoSourceLists.cmake
@@ -29,6 +29,8 @@ set(UTIL_SOURCES src/util/asset_manager.cc src/util/file_watcher.cc)
# Common effect sources (shared between headless and normal modes)
set(COMMON_GPU_EFFECTS
src/gpu/effect.cc
+ src/gpu/sequence_v2.cc
+ src/gpu/effect_v2.cc
src/effects/heptagon_effect.cc
src/effects/particles_effect.cc
src/effects/passthrough_effect.cc
diff --git a/cmake/DemoTests.cmake b/cmake/DemoTests.cmake
index b511620..3e54ea2 100644
--- a/cmake/DemoTests.cmake
+++ b/cmake/DemoTests.cmake
@@ -234,6 +234,14 @@ add_demo_test(test_gpu_composite GpuCompositeTest gpu
target_link_libraries(test_gpu_composite PRIVATE 3d gpu audio procedural util ${DEMO_LIBS})
demo_add_asset_deps(test_gpu_composite shaders)
+# Sequence v2 Test (Foundation)
+add_demo_test(test_sequence_v2 SequenceV2Test gpu
+ src/tests/gpu/test_sequence_v2.cc
+ src/tests/common/webgpu_test_fixture.cc
+ ${PLATFORM_SOURCES}
+ ${GEN_DEMO_CC})
+target_link_libraries(test_sequence_v2 PRIVATE 3d gpu audio procedural util ${DEMO_LIBS})
+demo_add_asset_deps(test_sequence_v2 shaders)
# Subsystem test targets
add_custom_target(run_audio_tests
diff --git a/doc/SEQUENCE_v2.md b/doc/SEQUENCE_v2.md
new file mode 100644
index 0000000..74692a6
--- /dev/null
+++ b/doc/SEQUENCE_v2.md
@@ -0,0 +1,93 @@
+# Sequence v2 System Documentation
+
+This document describes the high-level ideas for an improved Sequence + Effects system "version 2".
+
+## Goal
+
+* more flexibility and less boilerplate
+* fully configurable by text (.seq)
+* no compatibility with previous version of sequences
+* sequence can be stacked (with priority) and arranged in a timeline to create a demo
+
+## Structure
+
+### Sequence
+
+A 'Sequence' unit consist of:
+ * A predeclared set of named assets and Effects that are used during the sequence
+ * a start-time, end-time
+ * a globally-visible set of internal time-dependent params (UniformsSequenceParams) derived from GlobalParams
+ * a globally-visible set of fixed params values (UniformsSequenceStaticParams) used to configure effects
+ * an input in RGBAu8 format ("Source"). Black by default (RGB=0,0,0,255). Buffer format: u8x4_norm (see below)
+ * an output in RGBAu8 format ("Sink"). This output goes to the next sequence in stack or to physical display
+ * the sink of a Sequence is the source of the next Sequence in the stack, or goes to screen if it's the last sequence in stack
+ * three programatic sections: 'Preprocess', 'EffectFlow' (time-ordered set of effects), 'Postprocess'
+ * a set of internal buffers (Nodes = compute buffers or textures, used as input or render targets by effects stack)
+ * this Nodes are visible to the final post-process effect
+ * Node persists during the whole sequence
+ * at compile time, one can detect which Node can actually be ping-pong buffers
+ * each Sequence's preprocess and postprocess is unique and attached to the sequence
+
+### Effects
+
+The effect unit consists in:
+ * An 'Effect' within the Sequence uses the UniformsSequenceParams and GlobalParams to update its internal state
+ * Receives updated InputNode from the previous effects (InputNode = buffer, textures, g-buffer, ...)
+ * processes them with a shader or c++ code (3d engine, etc.)
+ * fills its OutputNode, passed to the next effect or the final postprocessing step
+ * uses the sequence's assets and params only
+ * if needed an effect predefines an alias of one of its internal buffer as its declared 'postprocess_output' OutputNode
+ * by default, postprocess_output = sequence RGBA Source
+ * Effect's are not attached to a particular Sequence, and can be used in any Sequence
+
+### Preprocess:
+ * the preprocessing step sets up the local UniformsSequenceParams (prepare timers, objects' transforms, camera position, fills Uniform buffers, etc.)
+ * it's a single function attached to the Sequence unit (virtual method?)
+
+### Postprocess:
+ * the post-process unique (virtual) function is responsible for the final assembly into OuputNode
+ * it can take any internal Node as input (most likely the last effects' output) and produces its final Sink content
+
+## How it works
+
+### initialization
+
+at initialization time, the sequence knows from compile-time:
+ * the list of assets it needs to be ready at start time
+ * the list of effects needed, their input nodes, their output node
+ * the list of internal textures, buffer, render target, depth-buffer it will need for its stack effect to work
+ * which intern Node buffers can actually be ping-pong buffers, to optimize resource
+ * generally speaking, the lifetime of a buffer during a frame is known at compile time and can be optimized
+
+### Flow
+ * preprocess: prepare internal state at time t: UniformsSequenceParams
+ * for each effects:
+ ** bind the InputNode nodes (previous effect buffers, compute buffers, etc.)
+ ** runs the compute, vertex and fragment passes
+ ** fills its output Nodes
+ * process: turns its pre-declared input Nodes into its unique Sink node, ready for next sequence, or display
+
+## Requirement
+
+ * unified code to flow the engine
+ * textual description of the sequence/effects arrangement (similar to the .seq)
+ * update the HTML editor tool
+ * unified description of buffer, texture and compute buffers at compilation time
+ ** example, of a sequence's internal Node declaration:
+ "NODE Name1 u8x16_norm"
+ "NODE Name2 f32x4"
+ "NODE Name3 f16x8"
+ "Node Name4 u8x4_norm" <- Source/Sink of a sequence, four bytes
+ * each effect
+ ** declares its requirement as input Node.
+ ** declares its output Node
+ ** can access the C++ version and WebGPU versions of GlobalParams, UniformsSequenceParams and UniformsSequenceStaticParams
+ ** are not attached to a particular sequence, but can be used in many
+ * validation and optimization at compile time (generating c++ code) by seq_compiler
+ * compilation can generate boilerplate preprocess() / postprocess() functions
+ * no backward compatibility needed. Sequence v1 can go.
+ * the HTML Editor overall code, look and functionalities need to be preserved though. Just adapted to v2.
+ ** basically the sequence box will need to show input Node and output Node and have an editable property panel for these
+ ** same for effects: ability to edit their input Node and output Node names
+ * a lot of v1 effects can be deleted (solarized, chroma aberration, etc.): they will be implemented ad-hoc in postprocess() with a single macro call within the final shader
+ * need a minimal migration plan for code.
diff --git a/src/gpu/effect_v2.cc b/src/gpu/effect_v2.cc
new file mode 100644
index 0000000..79a14b4
--- /dev/null
+++ b/src/gpu/effect_v2.cc
@@ -0,0 +1,11 @@
+// EffectV2 implementation
+
+#include "gpu/effect_v2.h"
+#include "util/fatal_error.h"
+
+EffectV2::EffectV2(const GpuContext& ctx, const std::vector<std::string>& inputs,
+ const std::vector<std::string>& outputs)
+ : ctx_(ctx), input_nodes_(inputs), output_nodes_(outputs) {
+ FATAL_CHECK(inputs.empty(), "Effect must have at least one input\n");
+ FATAL_CHECK(outputs.empty(), "Effect must have at least one output\n");
+}
diff --git a/src/gpu/effect_v2.h b/src/gpu/effect_v2.h
new file mode 100644
index 0000000..0d4e18d
--- /dev/null
+++ b/src/gpu/effect_v2.h
@@ -0,0 +1,47 @@
+// EffectV2: Base class for v2 effects with multi-input/multi-output support
+
+#pragma once
+
+#include "gpu/gpu.h"
+#include "gpu/sequence_v2.h"
+#include <string>
+#include <vector>
+
+class NodeRegistry;
+
+class EffectV2 {
+ public:
+ EffectV2(const GpuContext& ctx, const std::vector<std::string>& inputs,
+ const std::vector<std::string>& outputs);
+ virtual ~EffectV2() = default;
+
+ // Optional: Declare temporary nodes (e.g., multi-pass intermediate buffers)
+ virtual void declare_nodes(NodeRegistry& registry) {
+ (void)registry;
+ }
+
+ // Render effect (multi-input/multi-output)
+ virtual void render(WGPUCommandEncoder encoder,
+ const UniformsSequenceParams& params,
+ NodeRegistry& nodes) = 0;
+
+ // Resize notification
+ virtual void resize(int width, int height) {
+ width_ = width;
+ height_ = height;
+ }
+
+ const std::vector<std::string>& input_nodes() const {
+ return input_nodes_;
+ }
+ const std::vector<std::string>& output_nodes() const {
+ return output_nodes_;
+ }
+
+ protected:
+ const GpuContext& ctx_;
+ std::vector<std::string> input_nodes_;
+ std::vector<std::string> output_nodes_;
+ int width_ = 1280;
+ int height_ = 720;
+};
diff --git a/src/gpu/sequence_v2.cc b/src/gpu/sequence_v2.cc
new file mode 100644
index 0000000..c3f9aea
--- /dev/null
+++ b/src/gpu/sequence_v2.cc
@@ -0,0 +1,207 @@
+// Sequence v2 implementation
+
+#include "gpu/sequence_v2.h"
+#include "gpu/effect_v2.h"
+#include "util/fatal_error.h"
+#include <algorithm>
+
+// NodeRegistry implementation
+
+NodeRegistry::NodeRegistry(WGPUDevice device, int default_width,
+ int default_height)
+ : device_(device), default_width_(default_width),
+ default_height_(default_height) {
+ // Reserve source/sink as implicit nodes (managed externally by MainSequence)
+}
+
+NodeRegistry::~NodeRegistry() {
+ for (auto& [name, node] : nodes_) {
+ if (node.view) {
+ wgpuTextureViewRelease(node.view);
+ }
+ for (auto& mip_view : node.mip_views) {
+ wgpuTextureViewRelease(mip_view);
+ }
+ if (node.texture) {
+ wgpuTextureRelease(node.texture);
+ }
+ }
+}
+
+void NodeRegistry::declare_node(const std::string& name, NodeType type,
+ int width, int height) {
+ FATAL_CHECK(nodes_.find(name) != nodes_.end(),
+ "Node already declared: %s\n", name.c_str());
+
+ if (width <= 0)
+ width = default_width_;
+ if (height <= 0)
+ height = default_height_;
+
+ Node node;
+ node.type = type;
+ node.width = width;
+ node.height = height;
+ create_texture(node);
+
+ nodes_[name] = node;
+}
+
+void NodeRegistry::declare_aliased_node(const std::string& name,
+ const std::string& alias_of) {
+ FATAL_CHECK(nodes_.find(alias_of) == nodes_.end(),
+ "Alias target does not exist: %s\n", alias_of.c_str());
+ FATAL_CHECK(aliases_.find(name) != aliases_.end(), "Alias already exists: %s\n",
+ name.c_str());
+
+ aliases_[name] = alias_of;
+}
+
+WGPUTextureView NodeRegistry::get_view(const std::string& name) {
+ // Check aliases first
+ auto alias_it = aliases_.find(name);
+ if (alias_it != aliases_.end()) {
+ return get_view(alias_it->second);
+ }
+
+ auto it = nodes_.find(name);
+ FATAL_CHECK(it == nodes_.end(), "Node not found: %s\n", name.c_str());
+ return it->second.view;
+}
+
+std::vector<WGPUTextureView>
+NodeRegistry::get_output_views(const std::vector<std::string>& names) {
+ std::vector<WGPUTextureView> views;
+ views.reserve(names.size());
+ for (const auto& name : names) {
+ views.push_back(get_view(name));
+ }
+ return views;
+}
+
+void NodeRegistry::resize(int width, int height) {
+ default_width_ = width;
+ default_height_ = height;
+
+ for (auto& [name, node] : nodes_) {
+ // Release old texture
+ if (node.view) {
+ wgpuTextureViewRelease(node.view);
+ }
+ for (auto& mip_view : node.mip_views) {
+ wgpuTextureViewRelease(mip_view);
+ }
+ if (node.texture) {
+ wgpuTextureRelease(node.texture);
+ }
+
+ // Recreate with new dimensions
+ node.width = width;
+ node.height = height;
+ create_texture(node);
+ }
+}
+
+bool NodeRegistry::has_node(const std::string& name) const {
+ return nodes_.find(name) != nodes_.end() ||
+ aliases_.find(name) != aliases_.end();
+}
+
+void NodeRegistry::create_texture(Node& node) {
+ WGPUTextureFormat format;
+ WGPUTextureUsage usage;
+
+ switch (node.type) {
+ case NodeType::U8X4_NORM:
+ format = WGPUTextureFormat_RGBA8Unorm;
+ usage = WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_TextureBinding;
+ break;
+ case NodeType::F32X4:
+ format = WGPUTextureFormat_RGBA32Float;
+ usage = WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_TextureBinding;
+ break;
+ case NodeType::F16X8:
+ format = WGPUTextureFormat_RGBA16Float; // WebGPU doesn't have 8-channel, use RGBA16
+ usage = WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_TextureBinding;
+ break;
+ case NodeType::DEPTH24:
+ format = WGPUTextureFormat_Depth24Plus;
+ usage = WGPUTextureUsage_RenderAttachment;
+ break;
+ case NodeType::COMPUTE_F32:
+ format = WGPUTextureFormat_RGBA32Float;
+ usage = WGPUTextureUsage_StorageBinding | WGPUTextureUsage_TextureBinding;
+ break;
+ }
+
+ WGPUTextureDescriptor desc = {};
+ desc.usage = usage;
+ desc.dimension = WGPUTextureDimension_2D;
+ desc.size = {static_cast<unsigned int>(node.width),
+ static_cast<unsigned int>(node.height), 1};
+ desc.format = format;
+ desc.mipLevelCount = 1;
+ desc.sampleCount = 1;
+
+ node.texture = wgpuDeviceCreateTexture(device_, &desc);
+ FATAL_CHECK(node.texture == nullptr, "Failed to create texture\n");
+
+ WGPUTextureViewDescriptor view_desc = {};
+ view_desc.format = format;
+ view_desc.dimension = WGPUTextureViewDimension_2D;
+ view_desc.baseMipLevel = 0;
+ view_desc.mipLevelCount = 1;
+ view_desc.baseArrayLayer = 0;
+ view_desc.arrayLayerCount = 1;
+ view_desc.aspect = (node.type == NodeType::DEPTH24)
+ ? WGPUTextureAspect_DepthOnly
+ : WGPUTextureAspect_All;
+
+ node.view = wgpuTextureCreateView(node.texture, &view_desc);
+ FATAL_CHECK(node.view == nullptr, "Failed to create texture view\n");
+}
+
+// SequenceV2 implementation
+
+SequenceV2::SequenceV2(const GpuContext& ctx, int width, int height)
+ : ctx_(ctx), width_(width), height_(height),
+ nodes_(ctx.device, width, height) {
+ uniforms_buffer_.init(ctx.device);
+}
+
+void SequenceV2::preprocess(float seq_time, float beat_time, float beat_phase,
+ float audio_intensity) {
+ params_.resolution = {static_cast<float>(width_), static_cast<float>(height_)};
+ params_.aspect_ratio =
+ static_cast<float>(width_) / static_cast<float>(height_);
+ params_.time = seq_time;
+ params_.beat_time = beat_time;
+ params_.beat_phase = beat_phase;
+ params_.audio_intensity = audio_intensity;
+ params_._pad = 0.0f;
+
+ uniforms_buffer_.update(ctx_.queue, params_);
+}
+
+void SequenceV2::postprocess(WGPUCommandEncoder encoder) {
+ (void)encoder;
+ // Default: No-op (last effect writes to sink directly)
+}
+
+void SequenceV2::render_effects(WGPUCommandEncoder encoder) {
+ // Execute DAG in topological order (pre-sorted by compiler)
+ for (const auto& dag_node : effect_dag_) {
+ dag_node.effect->render(encoder, params_, nodes_);
+ }
+}
+
+void SequenceV2::resize(int width, int height) {
+ width_ = width;
+ height_ = height;
+ nodes_.resize(width, height);
+
+ // Notify effects
+ for (auto& dag_node : effect_dag_) {
+ dag_node.effect->resize(width, height);
+ }
+}
diff --git a/src/gpu/sequence_v2.h b/src/gpu/sequence_v2.h
new file mode 100644
index 0000000..9cd93c6
--- /dev/null
+++ b/src/gpu/sequence_v2.h
@@ -0,0 +1,107 @@
+// Sequence v2: Explicit node system with DAG effect routing
+// Replaces implicit framebuffer ping-pong with compile-time optimized nodes
+
+#pragma once
+
+#include "gpu/gpu.h"
+#include "gpu/uniform_helper.h"
+#include "util/mini_math.h"
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+class EffectV2;
+
+enum class NodeType {
+ U8X4_NORM, // RGBAu8 normalized (0-1) - default Source/Sink
+ F32X4, // RGBA float32
+ F16X8, // 8-channel float16
+ DEPTH24, // Depth buffer
+ COMPUTE_F32, // Compute buffer
+};
+
+struct Node {
+ NodeType type;
+ int width;
+ int height;
+ WGPUTexture texture;
+ WGPUTextureView view;
+ std::vector<WGPUTextureView> mip_views; // For multi-target render
+};
+
+struct UniformsSequenceParams {
+ vec2 resolution;
+ float aspect_ratio;
+ float time; // Per-sequence relative time
+ float beat_time; // Musical beats
+ float beat_phase; // Fractional beat 0.0-1.0
+ float audio_intensity;
+ float _pad;
+};
+static_assert(sizeof(UniformsSequenceParams) == 32,
+ "UniformsSequenceParams must be 32 bytes for WGSL alignment");
+
+class NodeRegistry {
+ public:
+ NodeRegistry(WGPUDevice device, int default_width, int default_height);
+ ~NodeRegistry();
+
+ // Declare new node with explicit type/dimensions
+ void declare_node(const std::string& name, NodeType type, int width,
+ int height);
+
+ // Declare aliased node (ping-pong optimization)
+ void declare_aliased_node(const std::string& name,
+ const std::string& alias_of);
+
+ // Retrieve views
+ WGPUTextureView get_view(const std::string& name);
+ std::vector<WGPUTextureView>
+ get_output_views(const std::vector<std::string>& names);
+
+ // Resize all nodes
+ void resize(int width, int height);
+
+ // Check if node exists
+ bool has_node(const std::string& name) const;
+
+ private:
+ WGPUDevice device_;
+ int default_width_;
+ int default_height_;
+ std::map<std::string, Node> nodes_;
+ std::map<std::string, std::string> aliases_; // name -> backing node name
+
+ void create_texture(Node& node);
+};
+
+struct EffectDAGNode {
+ std::shared_ptr<EffectV2> effect;
+ std::vector<std::string> input_nodes;
+ std::vector<std::string> output_nodes;
+ int execution_order; // Topologically sorted
+};
+
+class SequenceV2 {
+ public:
+ SequenceV2(const GpuContext& ctx, int width, int height);
+ virtual ~SequenceV2() = default;
+
+ // Virtual methods (most sequences use defaults)
+ virtual void preprocess(float seq_time, float beat_time, float beat_phase,
+ float audio_intensity);
+ virtual void postprocess(WGPUCommandEncoder encoder);
+ virtual void render_effects(WGPUCommandEncoder encoder);
+
+ void resize(int width, int height);
+
+ protected:
+ const GpuContext& ctx_;
+ int width_;
+ int height_;
+ NodeRegistry nodes_;
+ std::vector<EffectDAGNode> effect_dag_;
+ UniformsSequenceParams params_;
+ UniformBuffer<UniformsSequenceParams> uniforms_buffer_;
+};
diff --git a/src/tests/gpu/test_sequence_v2.cc b/src/tests/gpu/test_sequence_v2.cc
new file mode 100644
index 0000000..54b544e
--- /dev/null
+++ b/src/tests/gpu/test_sequence_v2.cc
@@ -0,0 +1,184 @@
+// Test file for Sequence v2 system
+// Phase 1: Foundation tests (NodeRegistry, SequenceV2 base class)
+
+#include "gpu/sequence_v2.h"
+#include "gpu/effect_v2.h"
+#include "tests/common/webgpu_test_fixture.h"
+#include <cassert>
+#include <cstdio>
+
+// Simple test effect for DAG execution
+class TestEffectV2 : public EffectV2 {
+ public:
+ TestEffectV2(const GpuContext& ctx, const std::vector<std::string>& inputs,
+ const std::vector<std::string>& outputs)
+ : EffectV2(ctx, inputs, outputs), render_called_(false) {
+ }
+
+ void render(WGPUCommandEncoder encoder, const UniformsSequenceParams& params,
+ NodeRegistry& nodes) override {
+ (void)encoder;
+ (void)params;
+ (void)nodes;
+ render_called_ = true;
+ }
+
+ bool was_render_called() const {
+ return render_called_;
+ }
+
+ private:
+ bool render_called_;
+};
+
+// Test: NodeRegistry basic allocation
+void test_node_registry_basic() {
+ WebGPUTestFixture fixture;
+ if (!fixture.init()) {
+ fprintf(stderr, "Skipping test_node_registry_basic (no GPU)\n");
+ return;
+ }
+
+ NodeRegistry registry(fixture.ctx().device, 1280, 720);
+
+ // Declare node
+ registry.declare_node("test_node", NodeType::U8X4_NORM, 1280, 720);
+
+ // Verify node exists
+ assert(registry.has_node("test_node"));
+
+ // Get view (should not crash)
+ WGPUTextureView view = registry.get_view("test_node");
+ assert(view != nullptr);
+
+ printf("PASS: NodeRegistry basic allocation\n");
+}
+
+// Test: NodeRegistry aliased nodes (ping-pong optimization)
+void test_node_registry_aliased() {
+ WebGPUTestFixture fixture;
+ if (!fixture.init()) {
+ fprintf(stderr, "Skipping test_node_registry_aliased (no GPU)\n");
+ return;
+ }
+
+ NodeRegistry registry(fixture.ctx().device, 1280, 720);
+
+ // Declare backing node
+ registry.declare_node("frame_a", NodeType::U8X4_NORM, 1280, 720);
+
+ // Declare aliased node
+ registry.declare_aliased_node("frame_b", "frame_a");
+
+ // Both should resolve to same view
+ WGPUTextureView view_a = registry.get_view("frame_a");
+ WGPUTextureView view_b = registry.get_view("frame_b");
+ assert(view_a == view_b);
+
+ printf("PASS: NodeRegistry aliased nodes\n");
+}
+
+// Test: NodeRegistry multi-output views
+void test_node_registry_multi_output() {
+ WebGPUTestFixture fixture;
+ if (!fixture.init()) {
+ fprintf(stderr, "Skipping test_node_registry_multi_output (no GPU)\n");
+ return;
+ }
+
+ NodeRegistry registry(fixture.ctx().device, 1280, 720);
+
+ // Declare multiple nodes
+ registry.declare_node("output1", NodeType::U8X4_NORM, 1280, 720);
+ registry.declare_node("output2", NodeType::F32X4, 1280, 720);
+
+ // Get multiple views
+ std::vector<std::string> names = {"output1", "output2"};
+ std::vector<WGPUTextureView> views = registry.get_output_views(names);
+
+ assert(views.size() == 2);
+ assert(views[0] != nullptr);
+ assert(views[1] != nullptr);
+
+ printf("PASS: NodeRegistry multi-output views\n");
+}
+
+// Test: SequenceV2 default preprocess
+void test_sequence_v2_preprocess() {
+ WebGPUTestFixture fixture;
+ if (!fixture.init()) {
+ fprintf(stderr, "Skipping test_sequence_v2_preprocess (no GPU)\n");
+ return;
+ }
+
+ SequenceV2 seq(fixture.ctx(), 1280, 720);
+
+ // Call preprocess with test values
+ seq.preprocess(1.0f, 4.0f, 0.5f, 0.8f);
+
+ // No crash = success (params updated internally)
+ printf("PASS: SequenceV2 preprocess\n");
+}
+
+// Test: SequenceV2 DAG execution
+void test_sequence_v2_dag_execution() {
+ WebGPUTestFixture fixture;
+ if (!fixture.init()) {
+ fprintf(stderr, "Skipping test_sequence_v2_dag_execution (no GPU)\n");
+ return;
+ }
+
+ // Create sequence
+ class TestSequence : public SequenceV2 {
+ public:
+ TestSequence(const GpuContext& ctx)
+ : SequenceV2(ctx, 1280, 720),
+ effect1_(std::make_shared<TestEffectV2>(ctx, std::vector<std::string>{"source"},
+ std::vector<std::string>{"temp"})),
+ effect2_(std::make_shared<TestEffectV2>(ctx, std::vector<std::string>{"temp"},
+ std::vector<std::string>{"sink"})) {
+ // Build DAG (2 effects in sequence)
+ effect_dag_.push_back(
+ {effect1_, {"source"}, {"temp"}, 0});
+ effect_dag_.push_back(
+ {effect2_, {"temp"}, {"sink"}, 1});
+ }
+
+ std::shared_ptr<TestEffectV2> effect1_;
+ std::shared_ptr<TestEffectV2> effect2_;
+ };
+
+ TestSequence seq(fixture.ctx());
+
+ // Create command encoder
+ WGPUCommandEncoderDescriptor enc_desc = {};
+ WGPUCommandEncoder encoder =
+ wgpuDeviceCreateCommandEncoder(fixture.ctx().device, &enc_desc);
+
+ // Execute DAG
+ seq.render_effects(encoder);
+
+ // Verify both effects called
+ assert(seq.effect1_->was_render_called());
+ assert(seq.effect2_->was_render_called());
+
+ // Cleanup
+ WGPUCommandBuffer cmd = wgpuCommandEncoderFinish(encoder, nullptr);
+ wgpuCommandBufferRelease(cmd);
+ wgpuCommandEncoderRelease(encoder);
+
+ printf("PASS: SequenceV2 DAG execution\n");
+}
+
+int main() {
+ printf("Running Sequence v2 tests...\n");
+
+ test_node_registry_basic();
+ test_node_registry_aliased();
+ test_node_registry_multi_output();
+ test_sequence_v2_preprocess();
+ test_sequence_v2_dag_execution();
+
+ printf("All Sequence v2 tests passed!\n");
+ return 0;
+}