// This file is part of the 64k demo project. // Standalone test for loading and rendering a single mesh from a .obj file. #include "3d/camera.h" #include "3d/object.h" #include "3d/renderer.h" #include "3d/scene.h" #include "gpu/effects/shaders.h" #include "gpu/texture_manager.h" #include "platform.h" #include #include "procedural/generator.h" #include #include #include #include #include #include #include // Global State static Renderer3D g_renderer; static TextureManager g_textures; static Scene g_scene; static Camera g_camera; static WGPUDevice g_device = nullptr; static WGPUQueue g_queue = nullptr; static WGPUSurface g_surface = nullptr; static WGPUAdapter g_adapter = nullptr; static WGPUTextureFormat g_format = WGPUTextureFormat_Undefined; // Test-specific storage for mesh buffers static Renderer3D::MeshGpuData g_mesh_gpu_data; // Callbacks for asynchronous WGPU initialization (matches test_3d_render.cc) void on_adapter_request_ended(WGPURequestAdapterStatus status, WGPUAdapter adapter, WGPUStringView message, void* userdata, void* user2) { (void)user2; if (status == WGPURequestAdapterStatus_Success) { *(WGPUAdapter*)userdata = adapter; } else { fprintf(stderr, "Failed to request adapter.\n"); // Avoid WGPUStringView::s issues } } void on_device_request_ended(WGPURequestDeviceStatus status, WGPUDevice device, WGPUStringView message, void* userdata, void* user2) { (void)user2; if (status == WGPURequestDeviceStatus_Success) { *(WGPUDevice*)userdata = device; } else { fprintf(stderr, "Failed to request device.\n"); // Avoid WGPUStringView::s issues } } // --- WGPU Boilerplate --- void init_wgpu(WGPUInstance instance, PlatformState* platform_state) { if (!instance) { fprintf(stderr, "Failed to create WGPU instance.\n"); exit(1); } g_surface = platform_create_wgpu_surface(instance, platform_state); if (!g_surface) { fprintf(stderr, "Failed to create WGPU surface.\n"); exit(1); } // Request Adapter WGPURequestAdapterOptions adapter_opts = {}; adapter_opts.compatibleSurface = g_surface; adapter_opts.powerPreference = WGPUPowerPreference_HighPerformance; WGPURequestAdapterCallbackInfo adapter_callback_info = {}; adapter_callback_info.mode = WGPUCallbackMode_WaitAnyOnly; adapter_callback_info.callback = on_adapter_request_ended; adapter_callback_info.userdata1 = &g_adapter; // Corrected to userdata1 wgpuInstanceRequestAdapter(instance, &adapter_opts, adapter_callback_info); // Busy-wait for adapter while (!g_adapter) { platform_wgpu_wait_any(instance); } // Request Device WGPUDeviceDescriptor device_desc = {}; WGPURequestDeviceCallbackInfo device_callback_info = {}; device_callback_info.mode = WGPUCallbackMode_WaitAnyOnly; device_callback_info.callback = on_device_request_ended; device_callback_info.userdata1 = &g_device; // Corrected to userdata1 wgpuAdapterRequestDevice(g_adapter, &device_desc, device_callback_info); // Busy-wait for device while (!g_device) { platform_wgpu_wait_any(instance); } g_queue = wgpuDeviceGetQueue(g_device); WGPUSurfaceCapabilities caps = {}; wgpuSurfaceGetCapabilities(g_surface, g_adapter, &caps); g_format = caps.formats[0]; WGPUSurfaceConfiguration config = {}; config.device = g_device; config.format = g_format; config.usage = WGPUTextureUsage_RenderAttachment; config.width = platform_state->width; config.height = platform_state->height; config.presentMode = WGPUPresentMode_Fifo; config.alphaMode = WGPUCompositeAlphaMode_Opaque; wgpuSurfaceConfigure(g_surface, &config); } // --- OBJ Loading Logic --- #include // For std::sqrt struct Vec3 { float x, y, z; Vec3 operator+(const Vec3& o) const { return {x + o.x, y + o.y, z + o.z}; } Vec3& operator+=(const Vec3& o) { x+=o.x; y+=o.y; z+=o.z; return *this; } Vec3 operator-(const Vec3& o) const { return {x - o.x, y - o.y, z - o.z}; } Vec3 operator*(float s) const { return {x * s, y * s, z * s}; } static Vec3 cross(const Vec3& a, const Vec3& b) { return {a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x}; } Vec3 normalize() const { float len = std::sqrt(x * x + y * y + z * z); if (len > 1e-6f) return {x / len, y / len, z / len}; return {0, 0, 0}; } }; bool load_obj_and_create_buffers(const char* path, Object3D& out_obj) { std::ifstream obj_file(path); if (!obj_file.is_open()) { fprintf(stderr, "Error: Could not open mesh file: %s\n", path); return false; } std::vector v_pos, v_norm, v_uv; struct RawFace { int v[3], vt[3], vn[3]; }; std::vector raw_faces; std::vector final_vertices; std::vector final_indices; std::map vertex_map; std::string obj_line; while (std::getline(obj_file, obj_line)) { if (obj_line.compare(0, 2, "v ") == 0) { float x, y, z; sscanf(obj_line.c_str(), "v %f %f %f", &x, &y, &z); v_pos.insert(v_pos.end(), {x, y, z}); } else if (obj_line.compare(0, 3, "vn ") == 0) { float x, y, z; sscanf(obj_line.c_str(), "vn %f %f %f", &x, &y, &z); v_norm.insert(v_norm.end(), {x, y, z}); } else if (obj_line.compare(0, 3, "vt ") == 0) { float u, v; sscanf(obj_line.c_str(), "vt %f %f", &u, &v); v_uv.insert(v_uv.end(), {u, v}); } else if (obj_line.compare(0, 2, "f ") == 0) { char s1[64], s2[64], s3[64]; if (sscanf(obj_line.c_str(), "f %s %s %s", s1, s2, s3) == 3) { std::string parts[3] = {s1, s2, s3}; RawFace face = {}; for (int i = 0; i < 3; ++i) { // Handle v//vn format if (parts[i].find("//") != std::string::npos) { sscanf(parts[i].c_str(), "%d//%d", &face.v[i], &face.vn[i]); face.vt[i] = 0; } else { int res = sscanf(parts[i].c_str(), "%d/%d/%d", &face.v[i], &face.vt[i], &face.vn[i]); if (res == 2) face.vn[i] = 0; else if (res == 1) { face.vt[i] = 0; face.vn[i] = 0; } } } raw_faces.push_back(face); } } } if (v_norm.empty() && !v_pos.empty()) { std::vector temp_normals(v_pos.size() / 3, {0,0,0}); for(auto& face : raw_faces) { int i0=face.v[0]-1, i1=face.v[1]-1, i2=face.v[2]-1; Vec3 p0={v_pos[i0*3],v_pos[i0*3+1],v_pos[i0*3+2]}; Vec3 p1={v_pos[i1*3],v_pos[i1*3+1],v_pos[i1*3+2]}; Vec3 p2={v_pos[i2*3],v_pos[i2*3+1],v_pos[i2*3+2]}; Vec3 n = Vec3::cross(p1-p0, p2-p0).normalize(); temp_normals[i0] += n; temp_normals[i1] += n; temp_normals[i2] += n; } for(const auto& n : temp_normals) { Vec3 norm = n.normalize(); v_norm.insert(v_norm.end(), {norm.x, norm.y, norm.z}); } for(auto& face : raw_faces) { face.vn[0]=face.v[0]; face.vn[1]=face.v[1]; face.vn[2]=face.v[2]; } } for (const auto& face : raw_faces) { for (int i=0; i<3; ++i) { char key_buf[128]; snprintf(key_buf, sizeof(key_buf), "%d/%d/%d", face.v[i], face.vt[i], face.vn[i]); std::string key = key_buf; if (vertex_map.find(key) == vertex_map.end()) { vertex_map[key] = (uint32_t)final_vertices.size(); MeshVertex v = {}; if(face.v[i]>0) { v.p[0]=v_pos[(face.v[i]-1)*3]; v.p[1]=v_pos[(face.v[i]-1)*3+1]; v.p[2]=v_pos[(face.v[i]-1)*3+2]; } if(face.vn[i]>0) { v.n[0]=v_norm[(face.vn[i]-1)*3]; v.n[1]=v_norm[(face.vn[i]-1)*3+1]; v.n[2]=v_norm[(face.vn[i]-1)*3+2]; } if(face.vt[i]>0) { v.u[0]=v_uv[(face.vt[i]-1)*2]; v.u[1]=v_uv[(face.vt[i]-1)*2+1]; } final_vertices.push_back(v); } final_indices.push_back(vertex_map[key]); } } if (final_vertices.empty()) return false; // Calculate AABB and center the mesh float min_x = 1e10f, min_y = 1e10f, min_z = 1e10f; float max_x = -1e10f, max_y = -1e10f, max_z = -1e10f; for (const auto& v : final_vertices) { min_x = std::min(min_x, v.p[0]); min_y = std::min(min_y, v.p[1]); min_z = std::min(min_z, v.p[2]); max_x = std::max(max_x, v.p[0]); max_y = std::max(max_y, v.p[1]); max_z = std::max(max_z, v.p[2]); } float cx = (min_x + max_x) * 0.5f; float cy = (min_y + max_y) * 0.5f; float cz = (min_z + max_z) * 0.5f; for (auto& v : final_vertices) { v.p[0] -= cx; v.p[1] -= cy; v.p[2] -= cz; } out_obj.local_extent = vec3((max_x - min_x) * 0.5f, (max_y - min_y) * 0.5f, (max_z - min_z) * 0.5f); g_mesh_gpu_data.num_indices = final_indices.size(); g_mesh_gpu_data.vertex_buffer = gpu_create_buffer(g_device, final_vertices.size() * sizeof(MeshVertex), WGPUBufferUsage_Vertex | WGPUBufferUsage_CopyDst, final_vertices.data()).buffer; g_mesh_gpu_data.index_buffer = gpu_create_buffer(g_device, final_indices.size() * sizeof(uint32_t), WGPUBufferUsage_Index | WGPUBufferUsage_CopyDst, final_indices.data()).buffer; out_obj.type = ObjectType::MESH; out_obj.user_data = new std::vector(final_vertices); // This test doesn't use the asset system, so we override the renderer's internal cache lookup // by manually setting the buffers on the renderer object. This is a HACK for this specific tool. g_renderer.override_mesh_buffers(&g_mesh_gpu_data); return true; } int main(int argc, char** argv) { if (argc < 2) { printf("Usage: %s [--debug]\n", argv[0]); return 1; } const char* obj_path = argv[1]; bool debug_mode = (argc > 2 && strcmp(argv[2], "--debug") == 0); printf("Loading mesh: %s\n", obj_path); PlatformState platform_state = platform_init(false, 1280, 720); WGPUInstance instance = wgpuCreateInstance(nullptr); init_wgpu(instance, &platform_state); InitShaderComposer(); g_renderer.init(g_device, g_queue, g_format); g_renderer.resize(platform_state.width, platform_state.height); if (debug_mode) { Renderer3D::SetDebugEnabled(true); } g_textures.init(g_device, g_queue); ProceduralTextureDef noise_def; noise_def.width=256; noise_def.height=256; noise_def.gen_func = procedural::gen_noise; noise_def.params = {1234.0f, 16.0f}; g_textures.create_procedural_texture("noise", noise_def); g_renderer.set_noise_texture(g_textures.get_texture_view("noise")); // --- Create Scene --- Object3D floor(ObjectType::BOX); floor.position = vec3(0, -2.0f, 0); floor.scale = vec3(25.0f, 0.2f, 25.0f); floor.color = vec4(0.5f, 0.5f, 0.5f, 1.0f); g_scene.add_object(floor); Object3D mesh_obj; if (!load_obj_and_create_buffers(obj_path, mesh_obj)) { printf("Failed to load or process OBJ file.\n"); return 1; } mesh_obj.color = vec4(1.0f, 0.7f, 0.2f, 1.0f); mesh_obj.position = {0, 1.5, 0}; // Elevate a bit more g_scene.add_object(mesh_obj); g_camera.position = vec3(0, 3, 5); g_camera.target = vec3(0, 1.5, 0); while (!platform_should_close(&platform_state)) { platform_poll(&platform_state); float time = (float)platform_state.time; g_camera.aspect_ratio = platform_state.aspect_ratio; g_scene.objects[1].rotation = quat::from_axis({0.5f, 1.0f, 0.0f}, time); if (debug_mode) { auto* vertices = (std::vector*)g_scene.objects[1].user_data; g_renderer.GetVisualDebug().add_mesh_normals(g_scene.objects[1].get_model_matrix(), (uint32_t)vertices->size(), vertices->data()); } WGPUSurfaceTexture surface_tex; wgpuSurfaceGetCurrentTexture(g_surface, &surface_tex); if (surface_tex.status == WGPUSurfaceGetCurrentTextureStatus_SuccessOptimal) { // WGPUSurfaceGetCurrentTextureStatus_Success is 0 WGPUTextureView view = wgpuTextureCreateView(surface_tex.texture, nullptr); g_renderer.render(g_scene, g_camera, time, view); wgpuTextureViewRelease(view); wgpuSurfacePresent(g_surface); } wgpuTextureRelease(surface_tex.texture); // Release here, after present, outside the if block } #if !defined(STRIP_ALL) Renderer3D::SetDebugEnabled(false); // Reset debug mode #endif delete (std::vector*)g_scene.objects[1].user_data; wgpuBufferRelease(g_mesh_gpu_data.vertex_buffer); wgpuBufferRelease(g_mesh_gpu_data.index_buffer); g_renderer.shutdown(); g_textures.shutdown(); platform_shutdown(&platform_state); return 0; }