1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
|
// CNN post-processing effect implementation
// Neural network-based stylization with modular WGSL
#include "gpu/effects/cnn_effect.h"
#include "gpu/effects/post_process_helper.h"
#include "gpu/effects/shaders.h"
#include "gpu/effects/shader_composer.h"
#include "gpu/effect.h"
#include "gpu/bind_group_builder.h"
#include "gpu/sampler_cache.h"
#include "gpu/pipeline_builder.h"
// Create custom pipeline with 5 bindings (includes original texture)
static WGPURenderPipeline create_cnn_pipeline(WGPUDevice device,
WGPUTextureFormat format,
const char* shader_code) {
WGPUBindGroupLayout bgl = BindGroupLayoutBuilder()
.sampler(0, WGPUShaderStage_Fragment)
.texture(1, WGPUShaderStage_Fragment)
.uniform(2, WGPUShaderStage_Vertex | WGPUShaderStage_Fragment)
.uniform(3, WGPUShaderStage_Fragment)
.texture(4, WGPUShaderStage_Fragment)
.build(device);
WGPURenderPipeline pipeline = RenderPipelineBuilder(device)
.shader(shader_code)
.bind_group_layout(bgl)
.format(format)
.build();
wgpuBindGroupLayoutRelease(bgl);
return pipeline;
}
CNNEffect::CNNEffect(const GpuContext& ctx)
: PostProcessEffect(ctx), layer_index_(0), total_layers_(1),
blend_amount_(1.0f), input_view_(nullptr), original_view_(nullptr),
bind_group_(nullptr) {
pipeline_ = create_cnn_pipeline(ctx_.device, ctx_.format,
cnn_layer_shader_wgsl);
}
CNNEffect::CNNEffect(const GpuContext& ctx, const CNNEffectParams& params)
: PostProcessEffect(ctx), layer_index_(params.layer_index),
total_layers_(params.total_layers), blend_amount_(params.blend_amount),
input_view_(nullptr), original_view_(nullptr), bind_group_(nullptr) {
pipeline_ = create_cnn_pipeline(ctx_.device, ctx_.format,
cnn_layer_shader_wgsl);
}
void CNNEffect::init(MainSequence* demo) {
PostProcessEffect::init(demo);
demo_ = demo;
params_buffer_.init(ctx_.device);
// Register captured_frame texture (used by all layers for original input)
if (layer_index_ == 0) {
demo_->register_auxiliary_texture("captured_frame", width_, height_);
}
CNNLayerParams params = {layer_index_, blend_amount_, {0.0f, 0.0f}};
params_buffer_.update(ctx_.queue, params);
}
void CNNEffect::render(WGPURenderPassEncoder pass, float time, float beat,
float intensity, float aspect_ratio) {
if (!bind_group_) {
fprintf(stderr, "CNN render: no bind_group\n");
return;
}
wgpuRenderPassEncoderSetPipeline(pass, pipeline_);
wgpuRenderPassEncoderSetBindGroup(pass, 0, bind_group_, 0, nullptr);
wgpuRenderPassEncoderDraw(pass, 3, 1, 0, 0);
}
void CNNEffect::update_bind_group(WGPUTextureView input_view) {
input_view_ = input_view;
// Update common uniforms (CRITICAL for UV calculation!)
const CommonPostProcessUniforms u = {
.resolution = {(float)width_, (float)height_},
.aspect_ratio = (float)width_ / (float)height_,
.time = 0.0f,
.beat = 0.0f,
.audio_intensity = 0.0f,
};
uniforms_.update(ctx_.queue, u);
// All layers: get captured frame (original input from layer 0)
if (demo_) {
original_view_ = demo_->get_auxiliary_view("captured_frame");
}
// Create bind group with original texture
if (bind_group_)
wgpuBindGroupRelease(bind_group_);
WGPUBindGroupLayout bgl = wgpuRenderPipelineGetBindGroupLayout(pipeline_, 0);
WGPUSampler sampler = SamplerCache::Get().get_or_create(ctx_.device, SamplerCache::linear());
bind_group_ = BindGroupBuilder()
.sampler(0, sampler)
.texture(1, input_view_)
.buffer(2, uniforms_.get().buffer, uniforms_.get().size)
.buffer(3, params_buffer_.get().buffer, params_buffer_.get().size)
.texture(4, original_view_ ? original_view_ : input_view_)
.build(ctx_.device, bgl);
}
|