// This file is part of the 64k demo project. // It implements the Renderer3D class. #include "3d/renderer.h" #include #include // Simple Cube Geometry (Triangle list) // 36 vertices static const float kCubeVertices[] = { // Front face -1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, -1.0, 1.0, 1.0, // Back face -1.0, -1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, -1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0, -1.0, -1.0, // Top face -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0, // Bottom face -1.0, -1.0, -1.0, 1.0, -1.0, -1.0, 1.0, -1.0, 1.0, -1.0, -1.0, -1.0, 1.0, -1.0, 1.0, -1.0, -1.0, 1.0, // Right face 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, -1.0, 1.0, // Left face -1.0, -1.0, -1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, -1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0, -1.0, }; static const char* kShaderCode = R"( struct GlobalUniforms { view_proj: mat4x4, camera_pos: vec3, time: f32, }; struct ObjectData { model: mat4x4, color: vec4, params: vec4, }; struct ObjectsBuffer { objects: array, }; @group(0) @binding(0) var globals: GlobalUniforms; @group(0) @binding(1) var object_data: ObjectsBuffer; struct VertexOutput { @builtin(position) position: vec4, @location(0) local_pos: vec3, @location(1) color: vec4, @location(2) @interpolate(flat) instance_index: u32, @location(3) world_pos: vec3, }; @vertex fn vs_main(@builtin(vertex_index) vertex_index: u32, @builtin(instance_index) instance_index: u32) -> VertexOutput { var pos = array, 36>( vec3(-1.0, -1.0, 1.0), vec3( 1.0, -1.0, 1.0), vec3( 1.0, 1.0, 1.0), vec3(-1.0, -1.0, 1.0), vec3( 1.0, 1.0, 1.0), vec3(-1.0, 1.0, 1.0), vec3(-1.0, -1.0, -1.0), vec3(-1.0, 1.0, -1.0), vec3( 1.0, 1.0, -1.0), vec3(-1.0, -1.0, -1.0), vec3( 1.0, 1.0, -1.0), vec3( 1.0, -1.0, -1.0), vec3(-1.0, 1.0, -1.0), vec3(-1.0, 1.0, 1.0), vec3( 1.0, 1.0, 1.0), vec3(-1.0, 1.0, -1.0), vec3( 1.0, 1.0, 1.0), vec3( 1.0, 1.0, -1.0), vec3(-1.0, -1.0, -1.0), vec3( 1.0, -1.0, -1.0), vec3( 1.0, -1.0, 1.0), vec3(-1.0, -1.0, -1.0), vec3( 1.0, -1.0, 1.0), vec3(-1.0, -1.0, 1.0), vec3( 1.0, -1.0, -1.0), vec3( 1.0, 1.0, -1.0), vec3( 1.0, 1.0, 1.0), vec3( 1.0, -1.0, -1.0), vec3( 1.0, 1.0, 1.0), vec3( 1.0, -1.0, 1.0), vec3(-1.0, -1.0, -1.0), vec3(-1.0, -1.0, 1.0), vec3(-1.0, 1.0, 1.0), vec3(-1.0, -1.0, -1.0), vec3(-1.0, 1.0, 1.0), vec3(-1.0, 1.0, -1.0) ); let p = pos[vertex_index]; let obj = object_data.objects[instance_index]; // Model -> World -> Clip let world_pos = obj.model * vec4(p, 1.0); let clip_pos = globals.view_proj * world_pos; var out: VertexOutput; out.position = clip_pos; out.local_pos = p; // Proxy geometry local coords (-1 to 1) out.color = obj.color; out.instance_index = instance_index; out.world_pos = world_pos.xyz; return out; } // --- SDF Primitives --- // All primitives are centered at 0,0,0 fn sdSphere(p: vec3, r: f32) -> f32 { return length(p) - r; } fn sdBox(p: vec3, b: vec3) -> f32 { let q = abs(p) - b; return length(max(q, vec3(0.0))) + min(max(q.x, max(q.y, q.z)), 0.0); } fn sdTorus(p: vec3, t: vec2) -> f32 { let q = vec2(length(p.xz) - t.x, p.y); return length(q) - t.y; } // --- Dispatchers --- // Type IDs: 0=Cube(Wireframe proxy), 1=Sphere, 2=Box, 3=Torus fn get_dist(p: vec3, type: f32) -> f32 { if (type == 1.0) { return sdSphere(p, 0.9); } if (type == 2.0) { return sdBox(p, vec3(0.7)); } if (type == 3.0) { return sdTorus(p, vec2(0.6, 0.25)); } return 100.0; } // Analytical normals where possible, fallback to Numerical fn get_normal(p: vec3, type: f32) -> vec3 { if (type == 1.0) { // Sphere return normalize(p); // Center is 0,0,0 } // Finite Difference for others let e = vec2(0.001, 0.0); return normalize(vec3( get_dist(p + e.xyy, type) - get_dist(p - e.xyy, type), get_dist(p + e.yxy, type) - get_dist(p - e.yxy, type), get_dist(p + e.yyx, type) - get_dist(p - e.yyx, type) )); } @fragment fn fs_main(in: VertexOutput) -> @location(0) vec4 { let obj = object_data.objects[in.instance_index]; let type = obj.params.x; // Case 0: The central cube (Wireframe/Solid Box logic) - Proxy only if (type == 0.0) { let d = abs(in.local_pos); let edge_dist = max(max(d.x, d.y), d.z); var col = in.color.rgb; if (edge_dist > 0.95) { col = vec3(1.0, 1.0, 1.0); // White edges } else { // Simple face shading let normal = normalize(cross(dpdx(in.local_pos), dpdy(in.local_pos))); let light = normalize(vec3(0.5, 1.0, 0.5)); let diff = max(dot(normal, light), 0.2); col = col * diff; } return vec4(col, 1.0); } // Case 1+: Raymarching inside the proxy box let center = vec3(obj.model[3].x, obj.model[3].y, obj.model[3].z); // Scale: Assume uniform scale from model matrix let scale = length(vec3(obj.model[0].x, obj.model[0].y, obj.model[0].z)); let ro = globals.camera_pos; let rd = normalize(in.world_pos - globals.camera_pos); // Start marching at proxy surface var t = length(in.world_pos - ro); var p = ro + rd * t; // Extract rotation (Normalized columns of model matrix) let mat3 = mat3x3( obj.model[0].xyz / scale, obj.model[1].xyz / scale, obj.model[2].xyz / scale ); var hit = false; // Raymarch Loop for (var i = 0; i < 40; i++) { // Transform p to local unscaled space for SDF eval // q = inv(R) * (p - center) / scale let q = transpose(mat3) * (p - center) / scale; let d_local = get_dist(q, type); let d_world = d_local * scale; if (d_world < 0.001) { hit = true; break; } if (d_world > 3.0 * scale) { break; } p = p + rd * d_world; } if (!hit) { discard; } // Shading // Recompute local pos at hit let q_hit = transpose(mat3) * (p - center) / scale; // Normal calculation: // Calculate normal in local space, then rotate to world. let n_local = get_normal(q_hit, type); let n_world = mat3 * n_local; let normal = normalize(n_world); let light_dir = normalize(vec3(1.0, 1.0, 1.0)); let diff = max(dot(normal, light_dir), 0.0); let amb = 0.1; let lighting = diff + amb; return vec4(in.color.rgb * lighting, 1.0); } )"; void Renderer3D::init(WGPUDevice device, WGPUQueue queue, WGPUTextureFormat format) { device_ = device; queue_ = queue; format_ = format; create_default_resources(); create_pipeline(); } void Renderer3D::shutdown() { if (pipeline_) wgpuRenderPipelineRelease(pipeline_); if (bind_group_) wgpuBindGroupRelease(bind_group_); if (global_uniform_buffer_) wgpuBufferRelease(global_uniform_buffer_); if (object_storage_buffer_) wgpuBufferRelease(object_storage_buffer_); if (depth_view_) wgpuTextureViewRelease(depth_view_); if (depth_texture_) wgpuTextureRelease(depth_texture_); } void Renderer3D::resize(int width, int height) { if (width == width_ && height == height_) return; width_ = width; height_ = height; if (depth_view_) wgpuTextureViewRelease(depth_view_); if (depth_texture_) wgpuTextureRelease(depth_texture_); WGPUTextureDescriptor desc = {}; desc.usage = WGPUTextureUsage_RenderAttachment; desc.dimension = WGPUTextureDimension_2D; desc.size = {(uint32_t)width, (uint32_t)height, 1}; desc.format = WGPUTextureFormat_Depth24Plus; // Common depth format desc.mipLevelCount = 1; desc.sampleCount = 1; depth_texture_ = wgpuDeviceCreateTexture(device_, &desc); WGPUTextureViewDescriptor view_desc = {}; view_desc.format = WGPUTextureFormat_Depth24Plus; view_desc.dimension = WGPUTextureViewDimension_2D; view_desc.aspect = WGPUTextureAspect_DepthOnly; view_desc.arrayLayerCount = 1; view_desc.mipLevelCount = 1; depth_view_ = wgpuTextureCreateView(depth_texture_, &view_desc); } void Renderer3D::create_default_resources() { // Uniform Buffer global_uniform_buffer_ = gpu_create_buffer(device_, sizeof(GlobalUniforms), WGPUBufferUsage_Uniform | WGPUBufferUsage_CopyDst, nullptr).buffer; // Storage Buffer size_t storage_size = sizeof(ObjectData) * kMaxObjects; object_storage_buffer_ = gpu_create_buffer(device_, storage_size, WGPUBufferUsage_Storage | WGPUBufferUsage_CopyDst, nullptr).buffer; } void Renderer3D::create_pipeline() { // Bind Group Layout WGPUBindGroupLayoutEntry entries[2] = {}; // Binding 0: Globals (Uniform) entries[0].binding = 0; entries[0].visibility = WGPUShaderStage_Vertex | WGPUShaderStage_Fragment; entries[0].buffer.type = WGPUBufferBindingType_Uniform; entries[0].buffer.minBindingSize = sizeof(GlobalUniforms); // Binding 1: Object Data (Storage) entries[1].binding = 1; entries[1].visibility = WGPUShaderStage_Vertex | WGPUShaderStage_Fragment; entries[1].buffer.type = WGPUBufferBindingType_ReadOnlyStorage; entries[1].buffer.minBindingSize = sizeof(ObjectData) * kMaxObjects; WGPUBindGroupLayoutDescriptor bgl_desc = {}; bgl_desc.entryCount = 2; bgl_desc.entries = entries; WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device_, &bgl_desc); // Bind Group WGPUBindGroupEntry bg_entries[2] = {}; bg_entries[0].binding = 0; bg_entries[0].buffer = global_uniform_buffer_; bg_entries[0].size = sizeof(GlobalUniforms); bg_entries[1].binding = 1; bg_entries[1].buffer = object_storage_buffer_; bg_entries[1].size = sizeof(ObjectData) * kMaxObjects; WGPUBindGroupDescriptor bg_desc = {}; bg_desc.layout = bgl; bg_desc.entryCount = 2; bg_desc.entries = bg_entries; bind_group_ = wgpuDeviceCreateBindGroup(device_, &bg_desc); // Pipeline Layout WGPUPipelineLayoutDescriptor pl_desc = {}; pl_desc.bindGroupLayoutCount = 1; pl_desc.bindGroupLayouts = &bgl; WGPUPipelineLayout pipeline_layout = wgpuDeviceCreatePipelineLayout(device_, &pl_desc); // Shader Code const char* shader_source = kShaderCode; // Shader Module #if defined(DEMO_CROSS_COMPILE_WIN32) WGPUShaderModuleWGSLDescriptor wgsl_desc = {}; wgsl_desc.chain.sType = WGPUSType_ShaderModuleWGSLDescriptor; wgsl_desc.code = shader_source; WGPUShaderModuleDescriptor shader_desc = {}; shader_desc.nextInChain = (const WGPUChainedStruct*)&wgsl_desc.chain; #else WGPUShaderSourceWGSL wgsl_desc = {}; wgsl_desc.chain.sType = WGPUSType_ShaderSourceWGSL; wgsl_desc.code = {shader_source, strlen(shader_source)}; WGPUShaderModuleDescriptor shader_desc = {}; shader_desc.nextInChain = (const WGPUChainedStruct*)&wgsl_desc.chain; #endif WGPUShaderModule shader_module = wgpuDeviceCreateShaderModule(device_, &shader_desc); // Depth Stencil State WGPUDepthStencilState depth_stencil = {}; depth_stencil.format = WGPUTextureFormat_Depth24Plus; depth_stencil.depthWriteEnabled = WGPUOptionalBool_True; depth_stencil.depthCompare = WGPUCompareFunction_Less; // Render Pipeline WGPURenderPipelineDescriptor desc = {}; desc.layout = pipeline_layout; // Vertex desc.vertex.module = shader_module; #if defined(DEMO_CROSS_COMPILE_WIN32) desc.vertex.entryPoint = "vs_main"; #else desc.vertex.entryPoint = {"vs_main", 7}; #endif // Fragment WGPUColorTargetState color_target = {}; color_target.format = format_; color_target.writeMask = WGPUColorWriteMask_All; WGPUFragmentState fragment = {}; fragment.module = shader_module; #if defined(DEMO_CROSS_COMPILE_WIN32) fragment.entryPoint = "fs_main"; #else fragment.entryPoint = {"fs_main", 7}; #endif fragment.targetCount = 1; fragment.targets = &color_target; desc.fragment = &fragment; desc.primitive.topology = WGPUPrimitiveTopology_TriangleList; desc.primitive.cullMode = WGPUCullMode_Back; desc.primitive.frontFace = WGPUFrontFace_CCW; desc.depthStencil = &depth_stencil; desc.multisample.count = 1; desc.multisample.mask = 0xFFFFFFFF; pipeline_ = wgpuDeviceCreateRenderPipeline(device_, &desc); wgpuBindGroupLayoutRelease(bgl); wgpuPipelineLayoutRelease(pipeline_layout); wgpuShaderModuleRelease(shader_module); } void Renderer3D::update_uniforms(const Scene& scene, const Camera& camera, float time) { // Update Globals GlobalUniforms globals; globals.view_proj = camera.get_projection_matrix() * camera.get_view_matrix(); globals.camera_pos = camera.position; globals.time = time; wgpuQueueWriteBuffer(queue_, global_uniform_buffer_, 0, &globals, sizeof(GlobalUniforms)); // Update Objects std::vector obj_data; obj_data.reserve(scene.objects.size()); for (const auto& obj : scene.objects) { ObjectData data; data.model = obj.get_model_matrix(); data.color = obj.color; // Map ObjectType enum to float ID float type_id = 0.0f; if (obj.type == ObjectType::SPHERE) type_id = 1.0f; else if (obj.type == ObjectType::CUBE) type_id = 0.0f; else if (obj.type == ObjectType::TORUS) type_id = 3.0f; else if (obj.type == ObjectType::BOX) type_id = 2.0f; data.params = vec4(type_id, 0, 0, 0); obj_data.push_back(data); if (obj_data.size() >= kMaxObjects) break; } if (!obj_data.empty()) { wgpuQueueWriteBuffer(queue_, object_storage_buffer_, 0, obj_data.data(), obj_data.size() * sizeof(ObjectData)); } } void Renderer3D::render(const Scene& scene, const Camera& camera, float time, WGPUTextureView target_view, WGPUTextureView depth_view_opt) { update_uniforms(scene, camera, time); WGPUTextureView depth_view = depth_view_opt ? depth_view_opt : depth_view_; if (!depth_view) return; // Should have been created by resize WGPURenderPassColorAttachment color_attachment = {}; gpu_init_color_attachment(color_attachment, target_view); color_attachment.clearValue = {0.05, 0.05, 0.1, 1.0}; // Dark blue-ish background WGPURenderPassDepthStencilAttachment depth_attachment = {}; depth_attachment.view = depth_view; depth_attachment.depthLoadOp = WGPULoadOp_Clear; depth_attachment.depthStoreOp = WGPUStoreOp_Store; depth_attachment.depthClearValue = 1.0f; WGPURenderPassDescriptor pass_desc = {}; pass_desc.colorAttachmentCount = 1; pass_desc.colorAttachments = &color_attachment; pass_desc.depthStencilAttachment = &depth_attachment; WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device_, nullptr); WGPURenderPassEncoder pass = wgpuCommandEncoderBeginRenderPass(encoder, &pass_desc); wgpuRenderPassEncoderSetPipeline(pass, pipeline_); wgpuRenderPassEncoderSetBindGroup(pass, 0, bind_group_, 0, nullptr); // Draw all objects (Instance Count = object count) // Vertex Count = 36 (Cube) uint32_t instance_count = (uint32_t)std::min((size_t)kMaxObjects, scene.objects.size()); if (instance_count > 0) { wgpuRenderPassEncoderDraw(pass, 36, instance_count, 0, 0); } wgpuRenderPassEncoderEnd(pass); WGPUCommandBuffer commands = wgpuCommandEncoderFinish(encoder, nullptr); wgpuQueueSubmit(queue_, 1, &commands); wgpuRenderPassEncoderRelease(pass); wgpuCommandBufferRelease(commands); wgpuCommandEncoderRelease(encoder); }