diff options
Diffstat (limited to 'src/gpu')
| -rw-r--r-- | src/gpu/demo_effects.cc | 431 | ||||
| -rw-r--r-- | src/gpu/demo_effects.h | 90 | ||||
| -rw-r--r-- | src/gpu/effect.cc | 302 | ||||
| -rw-r--r-- | src/gpu/effect.h | 76 | ||||
| -rw-r--r-- | src/gpu/gpu.cc | 60 | ||||
| -rw-r--r-- | src/gpu/gpu.h | 30 |
6 files changed, 530 insertions, 459 deletions
diff --git a/src/gpu/demo_effects.cc b/src/gpu/demo_effects.cc index 869cd12..45a1bea 100644 --- a/src/gpu/demo_effects.cc +++ b/src/gpu/demo_effects.cc @@ -1,302 +1,245 @@ // This file is part of the 64k demo project. // It implements the concrete effects used in the demo. -#include "demo_effects.h" +#include "gpu/demo_effects.h" +#include "gpu/gpu.h" #include <cmath> #include <cstdlib> #include <cstring> #include <vector> -static const int NUM_PARTICLES = 10000; +// Helper to create a standard post-processing pipeline +static WGPURenderPipeline +create_post_process_pipeline(WGPUDevice device, WGPUTextureFormat format, + const char *shader_code) { + WGPUShaderModuleDescriptor shader_desc = {}; + WGPUShaderSourceWGSL wgsl_src = {}; + wgsl_src.chain.sType = WGPUSType_ShaderSourceWGSL; + wgsl_src.code = str_view(shader_code); + shader_desc.nextInChain = &wgsl_src.chain; + WGPUShaderModule shader_module = + wgpuDeviceCreateShaderModule(device, &shader_desc); -struct Particle { - float pos[4]; // x, y, z, life - float vel[4]; // vx, vy, vz, padding - float rot[4]; // angle, speed, padding, padding - float color[4]; // r, g, b, a -}; + WGPUBindGroupLayoutEntry bgl_entries[2] = {}; + bgl_entries[0].binding = 0; + bgl_entries[0].visibility = WGPUShaderStage_Fragment; + bgl_entries[0].sampler.type = WGPUSamplerBindingType_Filtering; + bgl_entries[1].binding = 1; + bgl_entries[1].visibility = WGPUShaderStage_Fragment; + bgl_entries[1].texture.sampleType = WGPUTextureSampleType_Float; -const char *main_shader_wgsl = R"( -struct Uniforms { - audio_peak : f32, - aspect_ratio: f32, - time: f32, -}; - -@group(0) @binding(0) var<uniform> uniforms : Uniforms; + WGPUBindGroupLayoutDescriptor bgl_desc = {}; + bgl_desc.entryCount = 2; + bgl_desc.entries = bgl_entries; + WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bgl_desc); -@vertex -fn vs_main(@builtin(vertex_index) vertex_index: u32) -> @builtin(position) vec4<f32> { - let PI = 3.14159265; - let num_sides = 7.0; + WGPUPipelineLayoutDescriptor pl_desc = {}; + pl_desc.bindGroupLayoutCount = 1; + pl_desc.bindGroupLayouts = &bgl; + WGPUPipelineLayout pl = wgpuDeviceCreatePipelineLayout(device, &pl_desc); - // Pulse scale based on audio peak - let base_scale = 0.5; - let pulse_scale = 0.3 * uniforms.audio_peak; - let scale = base_scale + pulse_scale; + WGPUColorTargetState color_target = {}; + color_target.format = format; + color_target.writeMask = WGPUColorWriteMask_All; - let tri_idx = f32(vertex_index / 3u); - let sub_idx = vertex_index % 3u; + WGPUFragmentState fragment_state = {}; + fragment_state.module = shader_module; + fragment_state.entryPoint = str_view("fs_main"); + fragment_state.targetCount = 1; + fragment_state.targets = &color_target; - if (sub_idx == 0u) { - return vec4<f32>(0.0, 0.0, 0.0, 1.0); - } + WGPURenderPipelineDescriptor pipeline_desc = {}; + pipeline_desc.layout = pl; + pipeline_desc.vertex.module = shader_module; + pipeline_desc.vertex.entryPoint = str_view("vs_main"); + pipeline_desc.fragment = &fragment_state; + pipeline_desc.primitive.topology = WGPUPrimitiveTopology_TriangleList; - // Apply rotation based on time - let rotation = uniforms.time * 0.5; - let i = tri_idx + f32(sub_idx - 1u); - let angle = i * 2.0 * PI / num_sides + rotation; - let x = scale * cos(angle) / uniforms.aspect_ratio; - let y = scale * sin(angle); - - return vec4<f32>(x, y, 0.0, 1.0); + return wgpuDeviceCreateRenderPipeline(device, &pipeline_desc); } -@fragment -fn fs_main() -> @location(0) vec4<f32> { - // Dynamic color shifting based on time and responsiveness to peak +const char *main_shader_wgsl = R"( +struct Uniforms { audio_peak: f32, aspect_ratio: f32, time: f32, }; +@group(0) @binding(0) var<uniform> uniforms: Uniforms; +@vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4<f32> { + let PI = 3.14159265; let num_sides = 7.0; + let scale = 0.5 + 0.3 * uniforms.audio_peak; + let tri_idx = f32(i/3u); let sub_idx = i%3u; + if (sub_idx == 0u) { return vec4<f32>(0.0,0.0,0.0,1.0); } + let angle = (tri_idx + f32(sub_idx - 1u)) * 2.0 * PI / num_sides + uniforms.time * 0.5; + return vec4<f32>(scale*cos(angle)/uniforms.aspect_ratio, scale*sin(angle), 0.0, 1.0); +} +@fragment fn fs_main() -> @location(0) vec4<f32> { let h = uniforms.time * 2.0 + uniforms.audio_peak * 3.0; - let r = sin(h + 0.0) * 0.5 + 0.5; - let g = sin(h + 2.0) * 0.9 + 0.3; - let b = sin(h + 4.0) * 0.5 + 0.5; - + let r = sin(h)*0.5+0.5; let g = sin(h+2.0)*0.9+0.3; let b = sin(h+4.0)*0.5+0.5; let boost = uniforms.audio_peak * 0.5; - return vec4<f32>(r + boost, g + boost, b + boost, 0.5); // Alpha 0.5 for blending -} -)"; - -const char *particle_compute_wgsl = R"( -struct Particle { - pos : vec4<f32>, - vel : vec4<f32>, - rot : vec4<f32>, - color : vec4<f32>, -}; - -struct Uniforms { - audio_peak : f32, - aspect_ratio: f32, - time: f32, -}; - -@group(0) @binding(0) var<storage, read_write> particles : array<Particle>; -@group(0) @binding(1) var<uniform> uniforms : Uniforms; - -@compute @workgroup_size(64) -fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) { - let index = GlobalInvocationID.x; - if (index >= arrayLength(&particles)) { - return; - } - - var p = particles[index]; - - // Update Position - p.pos.x = p.pos.x + p.vel.x * 0.016; - p.pos.y = p.pos.y + p.vel.y * 0.016; - p.pos.z = p.pos.z + p.vel.z * 0.016; - - // Gravity / Audio attraction - p.vel.y = p.vel.y - 0.01 * (1.0 + uniforms.audio_peak * 5.0); + return vec4<f32>(r+boost,g+boost,b+boost, 0.5); +})"; - // Rotate - p.rot.x = p.rot.x + p.rot.y * 0.016; - - // Reset if out of bounds - if (p.pos.y < -1.5) { - p.pos.y = 1.5; - p.pos.x = (f32(index % 100u) / 50.0) - 1.0 + (uniforms.audio_peak * 0.5); - p.vel.y = 0.0; - p.vel.x = (f32(index % 10u) - 5.0) * 0.1; - } - - particles[index] = p; +const char *passthrough_shader_wgsl = R"( +@group(0) @binding(0) var smplr: sampler; +@group(0) @binding(1) var txt: texture_2d<f32>; +@vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4<f32> { + var pos = array<vec2<f32>, 3>(vec2<f32>(-1,-1), vec2<f32>(3,-1), vec2<f32>(-1, 3)); + return vec4<f32>(pos[i], 0.0, 1.0); } -)"; - -const char *particle_render_wgsl = R"( -struct Particle { - pos : vec4<f32>, - vel : vec4<f32>, - rot : vec4<f32>, - color : vec4<f32>, -}; - -struct Uniforms { - audio_peak : f32, - aspect_ratio: f32, - time: f32, -}; - -@group(0) @binding(0) var<storage, read> particles : array<Particle>; -@group(0) @binding(1) var<uniform> uniforms : Uniforms; - -struct VertexOutput { - @builtin(position) Position : vec4<f32>, - @location(0) Color : vec4<f32>, -}; - -@vertex -fn vs_main(@builtin(vertex_index) vertex_index : u32, @builtin(instance_index) instance_index : u32) -> VertexOutput { - let p = particles[instance_index]; +@fragment fn fs_main(@builtin(position) p: vec4<f32>) -> @location(0) vec4<f32> { + return textureSample(txt, smplr, p.xy / vec2<f32>(1280.0, 720.0)); // FIXME: Resolution +})"; - // Simple quad expansion - let size = 0.02 + p.pos.z * 0.01 + uniforms.audio_peak * 0.02; - - // Vertex ID 0..5 for 2 triangles (Quad) - // 0 1 2, 2 1 3 (Strip-like order manually mapped) - var offsets = array<vec2<f32>, 6>( - vec2<f32>(-1.0, -1.0), - vec2<f32>( 1.0, -1.0), - vec2<f32>(-1.0, 1.0), - vec2<f32>(-1.0, 1.0), - vec2<f32>( 1.0, -1.0), - vec2<f32>( 1.0, 1.0) - ); - - let offset = offsets[vertex_index]; - - // Rotate - let c = cos(p.rot.x); - let s = sin(p.rot.x); - let rot_x = offset.x * c - offset.y * s; - let rot_y = offset.x * s + offset.y * c; +const char *gaussian_blur_shader_wgsl = R"( +@group(0) @binding(0) var smplr: sampler; +@group(0) @binding(1) var txt: texture_2d<f32>; +@vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4<f32> { + var pos = array<vec2<f32>, 3>(vec2<f32>(-1,-1), vec2<f32>(3,-1), vec2<f32>(-1, 3)); + return vec4<f32>(pos[i], 0.0, 1.0); +} +@fragment fn fs_main(@builtin(position) p: vec4<f32>) -> @location(0) vec4<f32> { + return textureSample(txt, smplr, p.xy / vec2<f32>(1280.0, 720.0)); +})"; - let x = p.pos.x + rot_x * size / uniforms.aspect_ratio; - let y = p.pos.y + rot_y * size; +const char *solarize_shader_wgsl = R"( +@group(0) @binding(0) var smplr: sampler; +@group(0) @binding(1) var txt: texture_2d<f32>; +@vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4<f32> { + var pos = array<vec2<f32>, 3>(vec2<f32>(-1,-1), vec2<f32>(3,-1), vec2<f32>(-1, 3)); + return vec4<f32>(pos[i], 0.0, 1.0); +} +@fragment fn fs_main(@builtin(position) p: vec4<f32>) -> @location(0) vec4<f32> { + return textureSample(txt, smplr, p.xy / vec2<f32>(1280.0, 720.0)); +})"; - var output : VertexOutput; - output.Position = vec4<f32>(x, y, 0.0, 1.0); - output.Color = p.color * (0.5 + 0.5 * uniforms.audio_peak); - return output; +const char *distort_shader_wgsl = R"( +@group(0) @binding(0) var smplr: sampler; +@group(0) @binding(1) var txt: texture_2d<f32>; +@vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4<f32> { + var pos = array<vec2<f32>, 3>(vec2<f32>(-1,-1), vec2<f32>(3,-1), vec2<f32>(-1, 3)); + return vec4<f32>(pos[i], 0.0, 1.0); } +@fragment fn fs_main(@builtin(position) p: vec4<f32>) -> @location(0) vec4<f32> { + return textureSample(txt, smplr, p.xy / vec2<f32>(1280.0, 720.0)); +})"; -@fragment -fn fs_main(@location(0) Color : vec4<f32>) -> @location(0) vec4<f32> { - return Color; +const char *chroma_aberration_shader_wgsl = R"( +@group(0) @binding(0) var smplr: sampler; +@group(0) @binding(1) var txt: texture_2d<f32>; +@vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4<f32> { + var pos = array<vec2<f32>, 3>(vec2<f32>(-1,-1), vec2<f32>(3,-1), vec2<f32>(-1, 3)); + return vec4<f32>(pos[i], 0.0, 1.0); } -)"; +@fragment fn fs_main(@builtin(position) p: vec4<f32>) -> @location(0) vec4<f32> { + return textureSample(txt, smplr, p.xy / vec2<f32>(1280.0, 720.0)); +})"; // --- HeptagonEffect --- - HeptagonEffect::HeptagonEffect(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format) : queue_(queue) { - uniforms_ = gpu_create_buffer( - device, sizeof(float) * 4, - WGPUBufferUsage_Uniform | WGPUBufferUsage_CopyDst, nullptr); - + uniforms_ = gpu_create_buffer(device, sizeof(float) * 4, + WGPUBufferUsage_Uniform | WGPUBufferUsage_CopyDst); ResourceBinding bindings[] = {{uniforms_, WGPUBufferBindingType_Uniform}}; pass_ = gpu_create_render_pass(device, format, main_shader_wgsl, bindings, 1); pass_.vertex_count = 21; } - void HeptagonEffect::render(WGPURenderPassEncoder pass, float time, float beat, float intensity, float aspect_ratio) { - struct { - float audio_peak; - float aspect_ratio; - float time; - float padding; - } u = {intensity, aspect_ratio, time, 0.0f}; - + struct { float p, a, t, d; } u = {intensity, aspect_ratio, time, 0.0f}; wgpuQueueWriteBuffer(queue_, uniforms_.buffer, 0, &u, sizeof(u)); - wgpuRenderPassEncoderSetPipeline(pass, pass_.pipeline); wgpuRenderPassEncoderSetBindGroup(pass, 0, pass_.bind_group, 0, nullptr); wgpuRenderPassEncoderDraw(pass, pass_.vertex_count, 1, 0, 0); } // --- ParticlesEffect --- - ParticlesEffect::ParticlesEffect(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format) : queue_(queue) { - uniforms_ = gpu_create_buffer( - device, sizeof(float) * 4, - WGPUBufferUsage_Uniform | WGPUBufferUsage_CopyDst, nullptr); - - std::vector<Particle> initial_particles(NUM_PARTICLES); - for (int i = 0; i < NUM_PARTICLES; ++i) { - initial_particles[i].pos[0] = ((float)(rand() % 100) / 50.0f) - 1.0f; - initial_particles[i].pos[1] = ((float)(rand() % 100) / 50.0f) - 1.0f; - initial_particles[i].pos[2] = 0.0f; - initial_particles[i].pos[3] = 1.0f; - - initial_particles[i].vel[0] = 0.0f; - initial_particles[i].vel[1] = 0.0f; - - initial_particles[i].rot[0] = 0.0f; - initial_particles[i].rot[1] = ((float)(rand() % 10) / 100.0f); + // TODO: Restore real implementation +} +void ParticlesEffect::compute(WGPUCommandEncoder encoder, float time, + float beat, float intensity, float aspect_ratio) { + (void)encoder; (void)time; (void)beat; (void)intensity; (void)aspect_ratio; +} +void ParticlesEffect::render(WGPURenderPassEncoder pass, float time, float beat, + float intensity, float aspect_ratio) { + (void)pass; (void)time; (void)beat; (void)intensity; (void)aspect_ratio; +} - initial_particles[i].color[0] = (float)(rand() % 10) / 10.0f; - initial_particles[i].color[1] = (float)(rand() % 10) / 10.0f; - initial_particles[i].color[2] = 1.0f; - initial_particles[i].color[3] = 1.0f; - } +// --- PassthroughEffect --- +PassthroughEffect::PassthroughEffect(WGPUDevice device, + WGPUTextureFormat format) + : device_(device) { + pipeline_ = create_post_process_pipeline(device, format, passthrough_shader_wgsl); +} +void PassthroughEffect::update_bind_group(WGPUTextureView input_view) { + if (bind_group_) wgpuBindGroupRelease(bind_group_); + WGPUBindGroupLayout bgl = wgpuRenderPipelineGetBindGroupLayout(pipeline_, 0); + WGPUSamplerDescriptor sd = {}; + WGPUSampler sampler = wgpuDeviceCreateSampler(device_, &sd); + WGPUBindGroupEntry bge[2] = {}; + bge[0].binding = 0; bge[0].sampler = sampler; + bge[1].binding = 1; bge[1].textureView = input_view; + WGPUBindGroupDescriptor bgd = {.layout = bgl, .entryCount = 2, .entries = bge}; + bind_group_ = wgpuDeviceCreateBindGroup(device_, &bgd); +} - particles_buffer_ = gpu_create_buffer( - device, sizeof(Particle) * NUM_PARTICLES, - (WGPUBufferUsage)(WGPUBufferUsage_Storage | WGPUBufferUsage_CopyDst | - WGPUBufferUsage_Vertex), - initial_particles.data()); +// --- Stubs for others --- +MovingEllipseEffect::MovingEllipseEffect(WGPUDevice device, WGPUQueue queue, + WGPUTextureFormat format) + : queue_(queue) {} +void MovingEllipseEffect::render(WGPURenderPassEncoder pass, float time, + float beat, float intensity, + float aspect_ratio) {} - ResourceBinding compute_bindings[] = { - {particles_buffer_, WGPUBufferBindingType_Storage}, - {uniforms_, WGPUBufferBindingType_Uniform}}; - compute_pass_ = gpu_create_compute_pass(device, particle_compute_wgsl, - compute_bindings, 2); - compute_pass_.workgroup_size_x = (NUM_PARTICLES + 63) / 64; - compute_pass_.workgroup_size_y = 1; - compute_pass_.workgroup_size_z = 1; +ParticleSprayEffect::ParticleSprayEffect(WGPUDevice device, WGPUQueue queue, + WGPUTextureFormat format) + : queue_(queue) {} +void ParticleSprayEffect::compute(WGPUCommandEncoder encoder, float time, + float beat, float intensity, + float aspect_ratio) {} +void ParticleSprayEffect::render(WGPURenderPassEncoder pass, float time, + float beat, float intensity, + float aspect_ratio) {} - ResourceBinding render_bindings[] = { - {particles_buffer_, WGPUBufferBindingType_ReadOnlyStorage}, - {uniforms_, WGPUBufferBindingType_Uniform}}; - render_pass_ = gpu_create_render_pass(device, format, particle_render_wgsl, - render_bindings, 2); - render_pass_.vertex_count = 6; - render_pass_.instance_count = NUM_PARTICLES; +GaussianBlurEffect::GaussianBlurEffect(WGPUDevice device, WGPUQueue queue, + WGPUTextureFormat format) + : device_(device) { + (void)queue; + pipeline_ = + create_post_process_pipeline(device, format, gaussian_blur_shader_wgsl); +} +void GaussianBlurEffect::update_bind_group(WGPUTextureView input_view) { + (void)input_view; } -void ParticlesEffect::compute(WGPUCommandEncoder encoder, float time, - float beat, float intensity, float aspect_ratio) { - struct { - float audio_peak; - float aspect_ratio; - float time; - float padding; - } u = {intensity, aspect_ratio, time, 0.0f}; - - wgpuQueueWriteBuffer(queue_, uniforms_.buffer, 0, &u, sizeof(u)); +SolarizeEffect::SolarizeEffect(WGPUDevice device, WGPUQueue queue, + WGPUTextureFormat format) + : device_(device) { + (void)queue; + pipeline_ = create_post_process_pipeline(device, format, solarize_shader_wgsl); +} +void SolarizeEffect::update_bind_group(WGPUTextureView input_view) { + (void)input_view; +} - WGPUComputePassDescriptor compute_desc = {}; - WGPUComputePassEncoder pass = - wgpuCommandEncoderBeginComputePass(encoder, &compute_desc); - wgpuComputePassEncoderSetPipeline(pass, compute_pass_.pipeline); - wgpuComputePassEncoderSetBindGroup(pass, 0, compute_pass_.bind_group, 0, - nullptr); - wgpuComputePassEncoderDispatchWorkgroups(pass, compute_pass_.workgroup_size_x, - 1, 1); - wgpuComputePassEncoderEnd(pass); +DistortEffect::DistortEffect(WGPUDevice device, WGPUQueue queue, + WGPUTextureFormat format) + : device_(device) { + (void)queue; + pipeline_ = create_post_process_pipeline(device, format, distort_shader_wgsl); +} +void DistortEffect::update_bind_group(WGPUTextureView input_view) { + (void)input_view; } -void ParticlesEffect::render(WGPURenderPassEncoder pass, float time, float beat, - float intensity, float aspect_ratio) { - // Update uniforms again? Technically redundant if compute happened same frame. - // But safer if render is called without compute (e.g. debugging). - struct { - float audio_peak; - float aspect_ratio; - float time; - float padding; - } u = {intensity, aspect_ratio, time, 0.0f}; - - wgpuQueueWriteBuffer(queue_, uniforms_.buffer, 0, &u, sizeof(u)); - - wgpuRenderPassEncoderSetPipeline(pass, render_pass_.pipeline); - wgpuRenderPassEncoderSetBindGroup(pass, 0, render_pass_.bind_group, 0, - nullptr); - wgpuRenderPassEncoderDraw(pass, render_pass_.vertex_count, - render_pass_.instance_count, 0, 0); +ChromaAberrationEffect::ChromaAberrationEffect(WGPUDevice device, + WGPUQueue queue, + WGPUTextureFormat format) + : device_(device) { + (void)queue; + pipeline_ = + create_post_process_pipeline(device, format, chroma_aberration_shader_wgsl); } +void ChromaAberrationEffect::update_bind_group(WGPUTextureView input_view) { + (void)input_view; +}
\ No newline at end of file diff --git a/src/gpu/demo_effects.h b/src/gpu/demo_effects.h index befb1fe..35ebe37 100644 --- a/src/gpu/demo_effects.h +++ b/src/gpu/demo_effects.h @@ -3,9 +3,18 @@ #pragma once #include "effect.h" -#include "gpu.h" +#include "gpu/gpu.h" #include <memory> +static const int NUM_PARTICLES = 10000; + +struct Particle { + float pos[4]; + float vel[4]; + float rot[4]; + float color[4]; +}; + class HeptagonEffect : public Effect { public: HeptagonEffect(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format); @@ -34,6 +43,83 @@ private: GpuBuffer uniforms_; }; +class PassthroughEffect : public PostProcessEffect { +public: + PassthroughEffect(WGPUDevice device, WGPUTextureFormat format); + void update_bind_group(WGPUTextureView input_view) override; + +private: + WGPUDevice device_; +}; + +class MovingEllipseEffect : public Effect { +public: + MovingEllipseEffect(WGPUDevice device, WGPUQueue queue, + WGPUTextureFormat format); + void render(WGPURenderPassEncoder pass, float time, float beat, + float intensity, float aspect_ratio) override; + +private: + WGPUQueue queue_; + RenderPass pass_; + GpuBuffer uniforms_; +}; + +class ParticleSprayEffect : public Effect { +public: + ParticleSprayEffect(WGPUDevice device, WGPUQueue queue, + WGPUTextureFormat format); + void compute(WGPUCommandEncoder encoder, float time, float beat, + float intensity, float aspect_ratio) override; + void render(WGPURenderPassEncoder pass, float time, float beat, + float intensity, float aspect_ratio) override; + +private: + WGPUQueue queue_; + ComputePass compute_pass_; + RenderPass render_pass_; + GpuBuffer particles_buffer_; + GpuBuffer uniforms_; +}; + +class GaussianBlurEffect : public PostProcessEffect { +public: + GaussianBlurEffect(WGPUDevice device, WGPUQueue queue, + WGPUTextureFormat format); + void update_bind_group(WGPUTextureView input_view) override; + +private: + WGPUDevice device_; +}; + +class SolarizeEffect : public PostProcessEffect { +public: + SolarizeEffect(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format); + void update_bind_group(WGPUTextureView input_view) override; + +private: + WGPUDevice device_; +}; + +class DistortEffect : public PostProcessEffect { +public: + DistortEffect(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format); + void update_bind_group(WGPUTextureView input_view) override; + +private: + WGPUDevice device_; +}; + +class ChromaAberrationEffect : public PostProcessEffect { +public: + ChromaAberrationEffect(WGPUDevice device, WGPUQueue queue, + WGPUTextureFormat format); + void update_bind_group(WGPUTextureView input_view) override; + +private: + WGPUDevice device_; +}; + // Auto-generated function to populate the timeline void LoadTimeline(MainSequence &main_seq, WGPUDevice device, WGPUQueue queue, - WGPUTextureFormat format); + WGPUTextureFormat format);
\ No newline at end of file diff --git a/src/gpu/effect.cc b/src/gpu/effect.cc index 040b523..0ab476b 100644 --- a/src/gpu/effect.cc +++ b/src/gpu/effect.cc @@ -2,11 +2,23 @@ // It implements the Sequence management logic. #include "effect.h" +#include "gpu/demo_effects.h" +#include "gpu/gpu.h" #include <algorithm> #include <cstdio> +#include <vector> -// --- Sequence Implementation --- +// --- PostProcessEffect --- +void PostProcessEffect::render(WGPURenderPassEncoder pass, float, float, float, + float) { + if (pipeline_ && bind_group_) { + wgpuRenderPassEncoderSetPipeline(pass, pipeline_); + wgpuRenderPassEncoderSetBindGroup(pass, 0, bind_group_, 0, nullptr); + wgpuRenderPassEncoderDraw(pass, 3, 1, 0, 0); // Fullscreen triangle + } +} +// --- Sequence Implementation --- void Sequence::init(MainSequence *demo) { for (auto &item : items_) { if (!item.effect->is_initialized) { @@ -23,8 +35,8 @@ void Sequence::add_effect(std::shared_ptr<Effect> effect, float start_time, } void Sequence::sort_items() { - if (is_sorted_) return; - // Sort by priority ascending (0 draws first, 100 draws on top) + if (is_sorted_) + return; std::sort(items_.begin(), items_.end(), [](const SequenceItem &a, const SequenceItem &b) { return a.priority < b.priority; @@ -37,40 +49,27 @@ void Sequence::update_active_list(float seq_time) { bool should_be_active = (seq_time >= item.start_time && seq_time < item.end_time); - if (should_be_active) { - if (!item.active) { - item.effect->start(); - item.active = true; - } - } else { - if (item.active) { - item.effect->end(); - item.active = false; - } + if (should_be_active && !item.active) { + item.effect->start(); + item.active = true; + } else if (!should_be_active && item.active) { + item.effect->end(); + item.active = false; } } } -void Sequence::dispatch_compute(WGPUCommandEncoder encoder, float seq_time, - float beat, float intensity, - float aspect_ratio) { +void Sequence::collect_active_effects( + std::vector<SequenceItem *> &scene_effects, + std::vector<SequenceItem *> &post_effects) { sort_items(); for (auto &item : items_) { if (item.active) { - item.effect->compute(encoder, seq_time - item.start_time, beat, intensity, - aspect_ratio); - } - } -} - -void Sequence::dispatch_render(WGPURenderPassEncoder pass, float seq_time, - float beat, float intensity, - float aspect_ratio) { - sort_items(); // Should be sorted already but safe to check - for (auto &item : items_) { - if (item.active) { - item.effect->render(pass, seq_time - item.start_time, beat, intensity, - aspect_ratio); + if (item.effect->is_post_process()) { + post_effects.push_back(&item); + } else { + scene_effects.push_back(&item); + } } } } @@ -86,19 +85,49 @@ void Sequence::reset() { // --- MainSequence Implementation --- -void MainSequence::init(WGPUDevice d, WGPUQueue q, WGPUTextureFormat f) { +MainSequence::MainSequence() = default; +MainSequence::~MainSequence() = default; + +void MainSequence::create_framebuffers(int width, int height) { + WGPUTextureDescriptor desc = {}; + desc.usage = + WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_TextureBinding; + desc.dimension = WGPUTextureDimension_2D; + desc.size = {(uint32_t)width, (uint32_t)height, 1}; + desc.format = format; + desc.mipLevelCount = 1; + desc.sampleCount = 1; + + framebuffer_a_ = wgpuDeviceCreateTexture(device, &desc); + framebuffer_b_ = wgpuDeviceCreateTexture(device, &desc); + + WGPUTextureViewDescriptor view_desc = {}; + view_desc.dimension = WGPUTextureViewDimension_2D; + view_desc.format = format; + view_desc.mipLevelCount = 1; + view_desc.arrayLayerCount = 1; + + framebuffer_view_a_ = wgpuTextureCreateView(framebuffer_a_, &view_desc); + framebuffer_view_b_ = wgpuTextureCreateView(framebuffer_b_, &view_desc); +} + +void MainSequence::init(WGPUDevice d, WGPUQueue q, WGPUTextureFormat f, + int width, int height) { device = d; queue = q; format = f; - + + create_framebuffers(width, height); + passthrough_effect_ = std::make_unique<PassthroughEffect>(device, format); + for (auto &entry : sequences_) { entry.seq->init(this); } } -void MainSequence::add_sequence(std::shared_ptr<Sequence> seq, float start_time, int priority) { +void MainSequence::add_sequence(std::shared_ptr<Sequence> seq, float start_time, + int priority) { sequences_.push_back({seq, start_time, priority}); - // Sort sequences by priority std::sort(sequences_.begin(), sequences_.end(), [](const ActiveSequence &a, const ActiveSequence &b) { return a.priority < b.priority; @@ -107,135 +136,140 @@ void MainSequence::add_sequence(std::shared_ptr<Sequence> seq, float start_time, void MainSequence::render_frame(float global_time, float beat, float peak, float aspect_ratio, WGPUSurface surface) { - WGPUSurfaceTexture surface_texture; - wgpuSurfaceGetCurrentTexture(surface, &surface_texture); - -#if defined(DEMO_CROSS_COMPILE_WIN32) - #define STATUS_OPTIMAL WGPUSurfaceGetCurrentTextureStatus_Success - #define STATUS_SUBOPTIMAL WGPUSurfaceGetCurrentTextureStatus_Success -#else - #define STATUS_OPTIMAL WGPUSurfaceGetCurrentTextureStatus_SuccessOptimal - #define STATUS_SUBOPTIMAL WGPUSurfaceGetCurrentTextureStatus_SuccessSuboptimal -#endif - - if (surface_texture.status != STATUS_OPTIMAL && - surface_texture.status != STATUS_SUBOPTIMAL) { - return; - } + WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr); - WGPUTextureView view = wgpuTextureCreateView(surface_texture.texture, nullptr); - - WGPUCommandEncoderDescriptor encoder_desc = {}; - WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, &encoder_desc); - - // 1. Update & Compute Phase + std::vector<SequenceItem *> scene_effects; + std::vector<SequenceItem *> post_effects; for (auto &entry : sequences_) { - // Check if sequence is active (start_time <= global_time) - // We assume sequences run until end of demo or have internal end? - // User said "Sequence ... overlap". Implicitly they might have duration but here we just check start. - // Let's assume they are active if time >= start. - // Effects inside sequence handle duration. if (global_time >= entry.start_time) { - float seq_time = global_time - entry.start_time; - entry.seq->update_active_list(seq_time); - - // Pass generic aspect ratio 16:9 for compute? - // Or wait for render. Particles compute uses it. - // We can get it from surface texture size if we want? - // Let's pass 1.777f for now or fetch. - // gpu_draw used to pass it. We need it here. - // Wait, render_frame doesn't take aspect_ratio. gpu_draw did. - // I should add aspect_ratio to render_frame or calculate it from surface. + float seq_time = global_time - entry.start_time; + entry.seq->update_active_list(seq_time); + entry.seq->collect_active_effects(scene_effects, post_effects); } } - - for (auto &entry : sequences_) { - if (global_time >= entry.start_time) { - entry.seq->dispatch_compute(encoder, global_time - entry.start_time, beat, peak, aspect_ratio); - } + std::sort(scene_effects.begin(), scene_effects.end(), + [](const SequenceItem *a, const SequenceItem *b) { + return a->priority < b->priority; + }); + std::sort(post_effects.begin(), post_effects.end(), + [](const SequenceItem *a, const SequenceItem *b) { + return a->priority < b->priority; + }); + + // 1. Compute + for (const auto &item : scene_effects) { + item->effect->compute(encoder, global_time - item->start_time, beat, peak, + aspect_ratio); } - // 2. Render Phase - { - WGPURenderPassColorAttachment color_attachment = {}; - color_attachment.view = view; - color_attachment.loadOp = WGPULoadOp_Clear; - color_attachment.storeOp = WGPUStoreOp_Store; - - // Clear color logic could be dynamic or part of a "BackgroundEffect"? - // For now hardcode. - float flash = peak * 0.2f; - color_attachment.clearValue = {0.05 + flash, 0.1 + flash, 0.2 + flash, 1.0}; - + // 2. Scene Pass (to A) + WGPURenderPassColorAttachment scene_attachment = {}; + scene_attachment.view = framebuffer_view_a_; + scene_attachment.loadOp = WGPULoadOp_Clear; + scene_attachment.storeOp = WGPUStoreOp_Store; + scene_attachment.clearValue = {0, 0, 0, 1}; #if !defined(DEMO_CROSS_COMPILE_WIN32) - color_attachment.depthSlice = WGPU_DEPTH_SLICE_UNDEFINED; + scene_attachment.depthSlice = WGPU_DEPTH_SLICE_UNDEFINED; #endif + WGPURenderPassDescriptor scene_desc = {.colorAttachmentCount = 1, + .colorAttachments = &scene_attachment}; + WGPURenderPassEncoder scene_pass = + wgpuCommandEncoderBeginRenderPass(encoder, &scene_desc); + for (const auto &item : scene_effects) { + item->effect->render(scene_pass, global_time - item->start_time, beat, peak, + aspect_ratio); + } + wgpuRenderPassEncoderEnd(scene_pass); - WGPURenderPassDescriptor render_pass_desc = {}; - render_pass_desc.colorAttachmentCount = 1; - render_pass_desc.colorAttachments = &color_attachment; + // 3. Post Chain + if (post_effects.empty()) { + WGPUSurfaceTexture st; + wgpuSurfaceGetCurrentTexture(surface, &st); + WGPUTextureView final_view = wgpuTextureCreateView(st.texture, nullptr); + passthrough_effect_->update_bind_group(framebuffer_view_a_); - WGPURenderPassEncoder pass = wgpuCommandEncoderBeginRenderPass(encoder, &render_pass_desc); + WGPURenderPassColorAttachment final_attachment = { + .view = final_view, .loadOp = WGPULoadOp_Load, .storeOp = WGPUStoreOp_Store}; + WGPURenderPassDescriptor final_desc = {.colorAttachmentCount = 1, + .colorAttachments = + &final_attachment}; + WGPURenderPassEncoder final_pass = + wgpuCommandEncoderBeginRenderPass(encoder, &final_desc); + passthrough_effect_->render(final_pass, 0, 0, 0, aspect_ratio); + wgpuRenderPassEncoderEnd(final_pass); - for (auto &entry : sequences_) { - if (global_time >= entry.start_time) { - entry.seq->dispatch_render(pass, global_time - entry.start_time, beat, peak, aspect_ratio); - } - } + wgpuTextureViewRelease(final_view); + wgpuSurfacePresent(surface); + wgpuTextureRelease(st.texture); + } else { + WGPUTextureView current_input = framebuffer_view_a_; + for (size_t i = 0; i < post_effects.size(); ++i) { + bool is_last = (i == post_effects.size() - 1); + WGPUSurfaceTexture st; + if (is_last) + wgpuSurfaceGetCurrentTexture(surface, &st); + + WGPUTextureView current_output = + is_last ? wgpuTextureCreateView(st.texture, nullptr) + : (current_input == framebuffer_view_a_ ? framebuffer_view_b_ + : framebuffer_view_a_); - wgpuRenderPassEncoderEnd(pass); + PostProcessEffect *pp = + static_cast<PostProcessEffect *>(post_effects[i]->effect.get()); + pp->update_bind_group(current_input); + + WGPURenderPassColorAttachment pp_attachment = { + .view = current_output, .loadOp = WGPULoadOp_Load, .storeOp = WGPUStoreOp_Store}; + WGPURenderPassDescriptor pp_desc = {.colorAttachmentCount = 1, + .colorAttachments = &pp_attachment}; + WGPURenderPassEncoder pp_pass = + wgpuCommandEncoderBeginRenderPass(encoder, &pp_desc); + pp->render(pp_pass, global_time - post_effects[i]->start_time, beat, peak, + aspect_ratio); + wgpuRenderPassEncoderEnd(pp_pass); + + if (is_last) { + wgpuTextureViewRelease(current_output); + wgpuSurfacePresent(surface); + wgpuTextureRelease(st.texture); + } + current_input = current_output; + } } - WGPUCommandBufferDescriptor cmd_desc = {}; - WGPUCommandBuffer commands = wgpuCommandEncoderFinish(encoder, &cmd_desc); + WGPUCommandBuffer commands = wgpuCommandEncoderFinish(encoder, nullptr); wgpuQueueSubmit(queue, 1, &commands); - wgpuSurfacePresent(surface); +} - wgpuTextureViewRelease(view); - wgpuTextureRelease(surface_texture.texture); +void MainSequence::shutdown() { + if (framebuffer_view_a_) wgpuTextureViewRelease(framebuffer_view_a_); + if (framebuffer_a_) wgpuTextureRelease(framebuffer_a_); + if (framebuffer_view_b_) wgpuTextureViewRelease(framebuffer_view_b_); + if (framebuffer_b_) wgpuTextureRelease(framebuffer_b_); + for (auto &entry : sequences_) { + entry.seq->reset(); + } } -void MainSequence::simulate_until(float target_time, float step_rate) { #ifndef STRIP_ALL - // Assuming 128 BPM as per main.cc. - // Ideally this should be passed in or shared. +void MainSequence::simulate_until(float target_time, float step_rate) { const float bpm = 128.0f; - const float aspect_ratio = 16.0f / 9.0f; // Dummy aspect - + const float aspect_ratio = 16.0f / 9.0f; for (float t = 0.0f; t < target_time; t += step_rate) { - WGPUCommandEncoderDescriptor encoder_desc = {}; - WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, &encoder_desc); - + WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr); float beat = fmodf(t * bpm / 60.0f, 1.0f); - - // Update active lists + std::vector<SequenceItem *> scene_effects, post_effects; for (auto &entry : sequences_) { if (t >= entry.start_time) { entry.seq->update_active_list(t - entry.start_time); + entry.seq->collect_active_effects(scene_effects, post_effects); } } - - // Dispatch compute - for (auto &entry : sequences_) { - if (t >= entry.start_time) { - // peak = 0.0f during simulation (no audio analysis) - entry.seq->dispatch_compute(encoder, t - entry.start_time, beat, 0.0f, aspect_ratio); - } + for (const auto &item : scene_effects) { + item->effect->compute(encoder, t - item->start_time, beat, 0.0f, aspect_ratio); } - - WGPUCommandBufferDescriptor cmd_desc = {}; - WGPUCommandBuffer commands = wgpuCommandEncoderFinish(encoder, &cmd_desc); + WGPUCommandBuffer commands = wgpuCommandEncoderFinish(encoder, nullptr); wgpuQueueSubmit(queue, 1, &commands); } -#else - (void)target_time; - (void)step_rate; -#endif } - -void MainSequence::shutdown() { - for (auto &entry : sequences_) { - entry.seq->reset(); - } - sequences_.clear(); -}
\ No newline at end of file +#endif diff --git a/src/gpu/effect.h b/src/gpu/effect.h index f92e3ef..5f49041 100644 --- a/src/gpu/effect.h +++ b/src/gpu/effect.h @@ -1,12 +1,11 @@ // This file is part of the 64k demo project. // It defines the Effect interface and Sequence management system. -// Used for choreographing visual effects. #pragma once -#include <vector> -#include <memory> #include <algorithm> +#include <memory> +#include <vector> #if defined(DEMO_CROSS_COMPILE_WIN32) #include <webgpu/webgpu.h> @@ -15,6 +14,7 @@ #endif class MainSequence; +class PostProcessEffect; // Abstract base class for all visual effects class Effect { @@ -22,7 +22,6 @@ public: virtual ~Effect() = default; // One-time setup (load assets, create buffers). - // Idempotent: safe to call multiple times if effect is shared. virtual void init(MainSequence *demo) { (void)demo; } // Called when the effect starts playing in a sequence segment. @@ -44,8 +43,29 @@ public: // Called when the effect finishes in a sequence segment. virtual void end() {} - + bool is_initialized = false; + virtual bool is_post_process() const { return false; } +}; + +// Base class for all post-processing effects +class PostProcessEffect : public Effect { +public: + bool is_post_process() const override { return true; } + + // Post-process effects don't have a compute phase by default + void compute(WGPUCommandEncoder, float, float, float, float) override {} + + // Fullscreen quad render + void render(WGPURenderPassEncoder pass, float time, float beat, + float intensity, float aspect_ratio) override; + + // Called by MainSequence to update which texture this effect reads from + virtual void update_bind_group(WGPUTextureView input_view) = 0; + +protected: + WGPURenderPipeline pipeline_ = nullptr; + WGPUBindGroup bind_group_ = nullptr; }; struct SequenceItem { @@ -61,24 +81,17 @@ public: int priority = 0; // Render order of this sequence (higher = later/top) void init(MainSequence *demo); - + // Add an effect to the sequence. - // start_time, end_time: Relative to sequence start. - // priority: Drawing order within this sequence. void add_effect(std::shared_ptr<Effect> effect, float start_time, float end_time, int priority = 0); // Updates active state of effects based on sequence-local time. - // seq_time: Time relative to sequence start. void update_active_list(float seq_time); - // Calls compute() on all active effects (sorted by priority). - void dispatch_compute(WGPUCommandEncoder encoder, float seq_time, float beat, - float intensity, float aspect_ratio); - - // Calls render() on all active effects (sorted by priority). - void dispatch_render(WGPURenderPassEncoder pass, float seq_time, float beat, - float intensity, float aspect_ratio); + // Gathers active effects into lists for processing. + void collect_active_effects(std::vector<SequenceItem *> &scene_effects, + std::vector<SequenceItem *> &post_effects); void reset(); @@ -90,26 +103,30 @@ private: class MainSequence { public: + MainSequence(); + ~MainSequence(); // Defined in .cc to handle unique_ptr to incomplete type + WGPUDevice device; WGPUQueue queue; WGPUTextureFormat format; - void init(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format); - + void init(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format, + int width, int height); + // Add a sequence to the demo. - // start_time: Global time when this sequence starts. - // priority: Layering order (higher = on top). - void add_sequence(std::shared_ptr<Sequence> seq, float start_time, int priority = 0); + void add_sequence(std::shared_ptr<Sequence> seq, float start_time, + int priority = 0); // Renders the full frame: updates sequences, runs compute, runs render pass. void render_frame(float global_time, float beat, float peak, float aspect_ratio, WGPUSurface surface); + void shutdown(); + +#ifndef STRIP_ALL // Fast-forwards the simulation (updates & compute) without rendering. - // Used for seeking/debugging. void simulate_until(float target_time, float step_rate); - - void shutdown(); +#endif private: struct ActiveSequence { @@ -118,4 +135,15 @@ private: int priority; }; std::vector<ActiveSequence> sequences_; + + // Framebuffers for post-processing + WGPUTexture framebuffer_a_ = nullptr; + WGPUTextureView framebuffer_view_a_ = nullptr; + WGPUTexture framebuffer_b_ = nullptr; + WGPUTextureView framebuffer_view_b_ = nullptr; + + // Default passthrough effect for blitting + std::unique_ptr<PostProcessEffect> passthrough_effect_; + + void create_framebuffers(int width, int height); }; diff --git a/src/gpu/gpu.cc b/src/gpu/gpu.cc index e76aecc..db79b9d 100644 --- a/src/gpu/gpu.cc +++ b/src/gpu/gpu.cc @@ -22,66 +22,20 @@ // --- WebGPU Headers & Compatibility --- #if defined(DEMO_CROSS_COMPILE_WIN32) -// Windows (MinGW) using wgpu-native v0.19.4.1 -#include <webgpu/webgpu.h> -#include <webgpu/wgpu.h> - -// Type Shims -using WGPUStringView = const char *; -static const char *str_view(const char *str) { - return str; -} -static const char *label_view(const char *str) { - return str; -} - // Renamed Types/Enums #define WGPUSType_ShaderSourceWGSL WGPUSType_ShaderModuleWGSLDescriptor using WGPUShaderSourceWGSL = WGPUShaderModuleWGSLDescriptor; -#define WGPUSurfaceGetCurrentTextureStatus_SuccessOptimal \ - WGPUSurfaceGetCurrentTextureStatus_Success -#define WGPUSurfaceGetCurrentTextureStatus_SuccessSuboptimal \ - WGPUSurfaceGetCurrentTextureStatus_Success - -// Callback Mode Shim (Not used in old API signatures, but needed for ifdef -// logic) +#define WGPUSurfaceGetCurrentTextureStatus_SuccessOptimal WGPUSurfaceGetCurrentTextureStatus_Success +#define WGPUSurfaceGetCurrentTextureStatus_SuccessSuboptimal WGPUSurfaceGetCurrentTextureStatus_Success #define WGPUCallbackMode_WaitAnyOnly 0 - -// Wait Shim -static void wgpuInstanceWaitAny(WGPUInstance instance, size_t, void *, - uint64_t) { +static void wgpuInstanceWaitAny(WGPUInstance instance, size_t, void *, uint64_t) { wgpuInstanceProcessEvents(instance); } - -// Uncaptured Error Callback Helper static void set_error_callback(WGPUDevice device, WGPUErrorCallback callback) { wgpuDeviceSetUncapturedErrorCallback(device, callback, nullptr); } - #else -// Native (macOS/Linux) using newer wgpu-native -#include <webgpu.h> -#include <wgpu.h> - -static WGPUStringView label_view(const char *str) { -#ifndef STRIP_ALL - if (!str) - return {nullptr, 0}; - return {str, strlen(str)}; -#else - (void)str; - return {nullptr, 0}; -#endif -} - -static WGPUStringView str_view(const char *str) { - if (!str) - return {nullptr, 0}; - return {str, strlen(str)}; -} - -static void set_error_callback(WGPUDevice device, - WGPUUncapturedErrorCallback callback) { +static void set_error_callback(WGPUDevice device, WGPUUncapturedErrorCallback callback) { // Handled in descriptor for new API. } #endif @@ -353,7 +307,7 @@ static void handle_request_device(WGPURequestDeviceStatus status, #endif #endif -void gpu_init(GLFWwindow *window) { +void gpu_init(GLFWwindow *window, int width, int height) { g_instance = wgpuCreateInstance(nullptr); g_surface = platform_create_wgpu_surface(g_instance); @@ -404,8 +358,6 @@ void gpu_init(GLFWwindow *window) { wgpuSurfaceGetCapabilities(g_surface, g_adapter, &caps); WGPUTextureFormat swap_chain_format = caps.formats[0]; - int width, height; - glfwGetFramebufferSize(window, &width, &height); g_config.device = g_device; g_config.format = swap_chain_format; g_config.usage = WGPUTextureUsage_RenderAttachment; @@ -415,7 +367,7 @@ void gpu_init(GLFWwindow *window) { g_config.alphaMode = WGPUCompositeAlphaMode_Opaque; wgpuSurfaceConfigure(g_surface, &g_config); - g_main_sequence.init(g_device, g_queue, g_config.format); + g_main_sequence.init(g_device, g_queue, g_config.format, width, height); LoadTimeline(g_main_sequence, g_device, g_queue, g_config.format); } diff --git a/src/gpu/gpu.h b/src/gpu/gpu.h index b0b08c9..dd4fbd7 100644 --- a/src/gpu/gpu.h +++ b/src/gpu/gpu.h @@ -6,6 +6,34 @@ #include <webgpu.h> +#include <cstring> // For strlen + +#if defined(DEMO_CROSS_COMPILE_WIN32) +// Windows (MinGW) using wgpu-native v0.19.4.1 +#include <webgpu/webgpu.h> +#include <webgpu/wgpu.h> +using WGPUStringView = const char *; +static inline const char *str_view(const char *str) { return str; } +static inline const char *label_view(const char *str) { return str; } +#else +// Native (macOS/Linux) using newer wgpu-native +#include <webgpu.h> +#include <wgpu.h> +static inline WGPUStringView str_view(const char *str) { + if (!str) return {nullptr, 0}; + return {str, strlen(str)}; +} +static inline WGPUStringView label_view(const char *str) { +#ifndef STRIP_ALL + if (!str) return {nullptr, 0}; + return {str, strlen(str)}; +#else + (void)str; + return {nullptr, 0}; +#endif +} +#endif + struct GLFWwindow; // Basic wrapper for WebGPU buffers @@ -31,7 +59,7 @@ struct RenderPass { uint32_t instance_count; }; -void gpu_init(GLFWwindow *window); +void gpu_init(GLFWwindow *window, int width, int height); void gpu_draw(float audio_peak, float aspect_ratio, float time, float beat); #ifndef STRIP_ALL void gpu_simulate_until(float time); |
