1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
|
// CNN post-processing effect implementation
// Neural network-based stylization with modular WGSL
#include "gpu/effects/cnn_effect.h"
#include "gpu/effects/post_process_helper.h"
#include "gpu/effects/shaders.h"
#include "gpu/effects/shader_composer.h"
#include "gpu/effect.h"
// Create custom pipeline with 5 bindings (includes original texture)
static WGPURenderPipeline create_cnn_pipeline(WGPUDevice device,
WGPUTextureFormat format,
const char* shader_code) {
std::string composed_shader = ShaderComposer::Get().Compose({}, shader_code);
WGPUShaderModuleDescriptor shader_desc = {};
WGPUShaderSourceWGSL wgsl_src = {};
wgsl_src.chain.sType = WGPUSType_ShaderSourceWGSL;
wgsl_src.code = str_view(composed_shader.c_str());
shader_desc.nextInChain = &wgsl_src.chain;
WGPUShaderModule shader_module =
wgpuDeviceCreateShaderModule(device, &shader_desc);
WGPUBindGroupLayoutEntry bgl_entries[5] = {};
bgl_entries[0].binding = 0; // sampler
bgl_entries[0].visibility = WGPUShaderStage_Fragment;
bgl_entries[0].sampler.type = WGPUSamplerBindingType_Filtering;
bgl_entries[1].binding = 1; // input texture
bgl_entries[1].visibility = WGPUShaderStage_Fragment;
bgl_entries[1].texture.sampleType = WGPUTextureSampleType_Float;
bgl_entries[1].texture.viewDimension = WGPUTextureViewDimension_2D;
bgl_entries[2].binding = 2; // uniforms
bgl_entries[2].visibility = WGPUShaderStage_Vertex | WGPUShaderStage_Fragment;
bgl_entries[2].buffer.type = WGPUBufferBindingType_Uniform;
bgl_entries[3].binding = 3; // effect params
bgl_entries[3].visibility = WGPUShaderStage_Fragment;
bgl_entries[3].buffer.type = WGPUBufferBindingType_Uniform;
bgl_entries[4].binding = 4; // original texture
bgl_entries[4].visibility = WGPUShaderStage_Fragment;
bgl_entries[4].texture.sampleType = WGPUTextureSampleType_Float;
bgl_entries[4].texture.viewDimension = WGPUTextureViewDimension_2D;
WGPUBindGroupLayoutDescriptor bgl_desc = {};
bgl_desc.entryCount = 5;
bgl_desc.entries = bgl_entries;
WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bgl_desc);
WGPUPipelineLayoutDescriptor pl_desc = {};
pl_desc.bindGroupLayoutCount = 1;
pl_desc.bindGroupLayouts = &bgl;
WGPUPipelineLayout pl = wgpuDeviceCreatePipelineLayout(device, &pl_desc);
WGPUColorTargetState color_target = {};
color_target.format = format;
color_target.writeMask = WGPUColorWriteMask_All;
WGPUFragmentState fragment_state = {};
fragment_state.module = shader_module;
fragment_state.entryPoint = str_view("fs_main");
fragment_state.targetCount = 1;
fragment_state.targets = &color_target;
WGPURenderPipelineDescriptor pipeline_desc = {};
pipeline_desc.layout = pl;
pipeline_desc.vertex.module = shader_module;
pipeline_desc.vertex.entryPoint = str_view("vs_main");
pipeline_desc.fragment = &fragment_state;
pipeline_desc.primitive.topology = WGPUPrimitiveTopology_TriangleList;
pipeline_desc.multisample.count = 1;
pipeline_desc.multisample.mask = 0xFFFFFFFF;
return wgpuDeviceCreateRenderPipeline(device, &pipeline_desc);
}
CNNEffect::CNNEffect(const GpuContext& ctx)
: PostProcessEffect(ctx), layer_index_(0), total_layers_(1),
blend_amount_(1.0f), input_view_(nullptr), original_view_(nullptr),
bind_group_(nullptr) {
pipeline_ = create_cnn_pipeline(ctx_.device, ctx_.format,
cnn_layer_shader_wgsl);
}
CNNEffect::CNNEffect(const GpuContext& ctx, const CNNEffectParams& params)
: PostProcessEffect(ctx), layer_index_(params.layer_index),
total_layers_(params.total_layers), blend_amount_(params.blend_amount),
input_view_(nullptr), original_view_(nullptr), bind_group_(nullptr) {
pipeline_ = create_cnn_pipeline(ctx_.device, ctx_.format,
cnn_layer_shader_wgsl);
}
void CNNEffect::init(MainSequence* demo) {
PostProcessEffect::init(demo);
demo_ = demo;
params_buffer_.init(ctx_.device);
// Register captured_frame texture (used by all layers for original input)
if (layer_index_ == 0) {
demo_->register_auxiliary_texture("captured_frame", width_, height_);
}
CNNLayerParams params = {layer_index_, blend_amount_, {0.0f, 0.0f}};
params_buffer_.update(ctx_.queue, params);
}
void CNNEffect::render(WGPURenderPassEncoder pass, float time, float beat,
float intensity, float aspect_ratio) {
if (!bind_group_) return;
wgpuRenderPassEncoderSetPipeline(pass, pipeline_);
wgpuRenderPassEncoderSetBindGroup(pass, 0, bind_group_, 0, nullptr);
wgpuRenderPassEncoderDraw(pass, 3, 1, 0, 0);
}
void CNNEffect::update_bind_group(WGPUTextureView input_view) {
input_view_ = input_view;
// All layers: get captured frame (original input from layer 0)
if (demo_) {
original_view_ = demo_->get_auxiliary_view("captured_frame");
}
// Create bind group with original texture
if (bind_group_)
wgpuBindGroupRelease(bind_group_);
WGPUBindGroupLayout bgl = wgpuRenderPipelineGetBindGroupLayout(pipeline_, 0);
WGPUSamplerDescriptor sd = {};
sd.magFilter = WGPUFilterMode_Linear;
sd.minFilter = WGPUFilterMode_Linear;
sd.maxAnisotropy = 1;
WGPUSampler sampler = wgpuDeviceCreateSampler(ctx_.device, &sd);
WGPUBindGroupEntry bge[5] = {};
bge[0].binding = 0;
bge[0].sampler = sampler;
bge[1].binding = 1;
bge[1].textureView = input_view_;
bge[2].binding = 2;
bge[2].buffer = uniforms_.get().buffer;
bge[2].size = uniforms_.get().size;
bge[3].binding = 3;
bge[3].buffer = params_buffer_.get().buffer;
bge[3].size = params_buffer_.get().size;
bge[4].binding = 4;
bge[4].textureView = original_view_ ? original_view_ : input_view_; // Fallback
WGPUBindGroupDescriptor bgd = {};
bgd.layout = bgl;
bgd.entryCount = 5;
bgd.entries = bge;
bind_group_ = wgpuDeviceCreateBindGroup(ctx_.device, &bgd);
}
|