// This file is part of the 64k demo project. // It implements the concrete effects used in the demo. #include "gpu/demo_effects.h" #include "gpu/gpu.h" #include #include #include #include // Helper to create a standard post-processing pipeline static WGPURenderPipeline create_post_process_pipeline(WGPUDevice device, WGPUTextureFormat format, const char *shader_code) { WGPUShaderModuleDescriptor shader_desc = {}; WGPUShaderSourceWGSL wgsl_src = {}; wgsl_src.chain.sType = WGPUSType_ShaderSourceWGSL; wgsl_src.code = str_view(shader_code); shader_desc.nextInChain = &wgsl_src.chain; WGPUShaderModule shader_module = wgpuDeviceCreateShaderModule(device, &shader_desc); WGPUBindGroupLayoutEntry bgl_entries[2] = {}; bgl_entries[0].binding = 0; bgl_entries[0].visibility = WGPUShaderStage_Fragment; bgl_entries[0].sampler.type = WGPUSamplerBindingType_Filtering; bgl_entries[1].binding = 1; bgl_entries[1].visibility = WGPUShaderStage_Fragment; bgl_entries[1].texture.sampleType = WGPUTextureSampleType_Float; WGPUBindGroupLayoutDescriptor bgl_desc = {}; bgl_desc.entryCount = 2; bgl_desc.entries = bgl_entries; WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bgl_desc); WGPUPipelineLayoutDescriptor pl_desc = {}; pl_desc.bindGroupLayoutCount = 1; pl_desc.bindGroupLayouts = &bgl; WGPUPipelineLayout pl = wgpuDeviceCreatePipelineLayout(device, &pl_desc); WGPUColorTargetState color_target = {}; color_target.format = format; color_target.writeMask = WGPUColorWriteMask_All; WGPUFragmentState fragment_state = {}; fragment_state.module = shader_module; fragment_state.entryPoint = str_view("fs_main"); fragment_state.targetCount = 1; fragment_state.targets = &color_target; WGPURenderPipelineDescriptor pipeline_desc = {}; pipeline_desc.layout = pl; pipeline_desc.vertex.module = shader_module; pipeline_desc.vertex.entryPoint = str_view("vs_main"); pipeline_desc.fragment = &fragment_state; pipeline_desc.primitive.topology = WGPUPrimitiveTopology_TriangleList; return wgpuDeviceCreateRenderPipeline(device, &pipeline_desc); } const char *main_shader_wgsl = R"( struct Uniforms { audio_peak: f32, aspect_ratio: f32, time: f32, }; @group(0) @binding(0) var uniforms: Uniforms; @vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4 { let PI = 3.14159265; let num_sides = 7.0; let scale = 0.5 + 0.3 * uniforms.audio_peak; let tri_idx = f32(i/3u); let sub_idx = i%3u; if (sub_idx == 0u) { return vec4(0.0,0.0,0.0,1.0); } let angle = (tri_idx + f32(sub_idx - 1u)) * 2.0 * PI / num_sides + uniforms.time * 0.5; return vec4(scale*cos(angle)/uniforms.aspect_ratio, scale*sin(angle), 0.0, 1.0); } @fragment fn fs_main() -> @location(0) vec4 { let h = uniforms.time * 2.0 + uniforms.audio_peak * 3.0; let r = sin(h)*0.5+0.5; let g = sin(h+2.0)*0.9+0.3; let b = sin(h+4.0)*0.5+0.5; let boost = uniforms.audio_peak * 0.5; return vec4(r+boost,g+boost,b+boost, 0.5); })"; const char *passthrough_shader_wgsl = R"( @group(0) @binding(0) var smplr: sampler; @group(0) @binding(1) var txt: texture_2d; @vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4 { var pos = array, 3>(vec2(-1,-1), vec2(3,-1), vec2(-1, 3)); return vec4(pos[i], 0.0, 1.0); } @fragment fn fs_main(@builtin(position) p: vec4) -> @location(0) vec4 { return textureSample(txt, smplr, p.xy / vec2(1280.0, 720.0)); // FIXME: Resolution })"; const char *gaussian_blur_shader_wgsl = R"( @group(0) @binding(0) var smplr: sampler; @group(0) @binding(1) var txt: texture_2d; @vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4 { var pos = array, 3>(vec2(-1,-1), vec2(3,-1), vec2(-1, 3)); return vec4(pos[i], 0.0, 1.0); } @fragment fn fs_main(@builtin(position) p: vec4) -> @location(0) vec4 { return textureSample(txt, smplr, p.xy / vec2(1280.0, 720.0)); })"; const char *solarize_shader_wgsl = R"( @group(0) @binding(0) var smplr: sampler; @group(0) @binding(1) var txt: texture_2d; @vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4 { var pos = array, 3>(vec2(-1,-1), vec2(3,-1), vec2(-1, 3)); return vec4(pos[i], 0.0, 1.0); } @fragment fn fs_main(@builtin(position) p: vec4) -> @location(0) vec4 { return textureSample(txt, smplr, p.xy / vec2(1280.0, 720.0)); })"; const char *distort_shader_wgsl = R"( @group(0) @binding(0) var smplr: sampler; @group(0) @binding(1) var txt: texture_2d; @vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4 { var pos = array, 3>(vec2(-1,-1), vec2(3,-1), vec2(-1, 3)); return vec4(pos[i], 0.0, 1.0); } @fragment fn fs_main(@builtin(position) p: vec4) -> @location(0) vec4 { return textureSample(txt, smplr, p.xy / vec2(1280.0, 720.0)); })"; const char *chroma_aberration_shader_wgsl = R"( @group(0) @binding(0) var smplr: sampler; @group(0) @binding(1) var txt: texture_2d; @vertex fn vs_main(@builtin(vertex_index) i: u32) -> @builtin(position) vec4 { var pos = array, 3>(vec2(-1,-1), vec2(3,-1), vec2(-1, 3)); return vec4(pos[i], 0.0, 1.0); } @fragment fn fs_main(@builtin(position) p: vec4) -> @location(0) vec4 { return textureSample(txt, smplr, p.xy / vec2(1280.0, 720.0)); })"; // --- HeptagonEffect --- HeptagonEffect::HeptagonEffect(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format) : queue_(queue) { uniforms_ = gpu_create_buffer(device, sizeof(float) * 4, WGPUBufferUsage_Uniform | WGPUBufferUsage_CopyDst); ResourceBinding bindings[] = {{uniforms_, WGPUBufferBindingType_Uniform}}; pass_ = gpu_create_render_pass(device, format, main_shader_wgsl, bindings, 1); pass_.vertex_count = 21; } void HeptagonEffect::render(WGPURenderPassEncoder pass, float time, float beat, float intensity, float aspect_ratio) { struct { float p, a, t, d; } u = {intensity, aspect_ratio, time, 0.0f}; wgpuQueueWriteBuffer(queue_, uniforms_.buffer, 0, &u, sizeof(u)); wgpuRenderPassEncoderSetPipeline(pass, pass_.pipeline); wgpuRenderPassEncoderSetBindGroup(pass, 0, pass_.bind_group, 0, nullptr); wgpuRenderPassEncoderDraw(pass, pass_.vertex_count, 1, 0, 0); } // --- ParticlesEffect --- ParticlesEffect::ParticlesEffect(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format) : queue_(queue) { // TODO: Restore real implementation } void ParticlesEffect::compute(WGPUCommandEncoder encoder, float time, float beat, float intensity, float aspect_ratio) { (void)encoder; (void)time; (void)beat; (void)intensity; (void)aspect_ratio; } void ParticlesEffect::render(WGPURenderPassEncoder pass, float time, float beat, float intensity, float aspect_ratio) { (void)pass; (void)time; (void)beat; (void)intensity; (void)aspect_ratio; } // --- PassthroughEffect --- PassthroughEffect::PassthroughEffect(WGPUDevice device, WGPUTextureFormat format) : device_(device) { pipeline_ = create_post_process_pipeline(device, format, passthrough_shader_wgsl); } void PassthroughEffect::update_bind_group(WGPUTextureView input_view) { if (bind_group_) wgpuBindGroupRelease(bind_group_); WGPUBindGroupLayout bgl = wgpuRenderPipelineGetBindGroupLayout(pipeline_, 0); WGPUSamplerDescriptor sd = {}; WGPUSampler sampler = wgpuDeviceCreateSampler(device_, &sd); WGPUBindGroupEntry bge[2] = {}; bge[0].binding = 0; bge[0].sampler = sampler; bge[1].binding = 1; bge[1].textureView = input_view; WGPUBindGroupDescriptor bgd = {.layout = bgl, .entryCount = 2, .entries = bge}; bind_group_ = wgpuDeviceCreateBindGroup(device_, &bgd); } // --- Stubs for others --- MovingEllipseEffect::MovingEllipseEffect(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format) : queue_(queue) {} void MovingEllipseEffect::render(WGPURenderPassEncoder pass, float time, float beat, float intensity, float aspect_ratio) {} ParticleSprayEffect::ParticleSprayEffect(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format) : queue_(queue) {} void ParticleSprayEffect::compute(WGPUCommandEncoder encoder, float time, float beat, float intensity, float aspect_ratio) {} void ParticleSprayEffect::render(WGPURenderPassEncoder pass, float time, float beat, float intensity, float aspect_ratio) {} GaussianBlurEffect::GaussianBlurEffect(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format) : device_(device) { (void)queue; pipeline_ = create_post_process_pipeline(device, format, gaussian_blur_shader_wgsl); } void GaussianBlurEffect::update_bind_group(WGPUTextureView input_view) { (void)input_view; } SolarizeEffect::SolarizeEffect(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format) : device_(device) { (void)queue; pipeline_ = create_post_process_pipeline(device, format, solarize_shader_wgsl); } void SolarizeEffect::update_bind_group(WGPUTextureView input_view) { (void)input_view; } DistortEffect::DistortEffect(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format) : device_(device) { (void)queue; pipeline_ = create_post_process_pipeline(device, format, distort_shader_wgsl); } void DistortEffect::update_bind_group(WGPUTextureView input_view) { (void)input_view; } ChromaAberrationEffect::ChromaAberrationEffect(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format) : device_(device) { (void)queue; pipeline_ = create_post_process_pipeline(device, format, chroma_aberration_shader_wgsl); } void ChromaAberrationEffect::update_bind_group(WGPUTextureView input_view) { (void)input_view; }