1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
|
// GPU texture readback utility implementation
// Extracts texture pixels to CPU memory for offline processing
#include "gpu/texture_readback.h"
#if !defined(STRIP_ALL)
#include <cassert>
#include <cstdio>
#include <cstring>
// Callback state for async buffer mapping
struct MapState {
bool done = false;
WGPUMapAsyncStatus status = WGPUMapAsyncStatus_Unknown;
};
std::vector<uint8_t> read_texture_pixels(
WGPUInstance instance,
WGPUDevice device,
WGPUTexture texture,
int width,
int height) {
// Align bytes per row to 256 (COPY_BYTES_PER_ROW_ALIGNMENT)
const uint32_t bytes_per_pixel = 4; // BGRA8
const uint32_t unaligned_bytes_per_row = width * bytes_per_pixel;
const uint32_t aligned_bytes_per_row =
((unaligned_bytes_per_row + 255) / 256) * 256;
const size_t buffer_size = aligned_bytes_per_row * height;
std::vector<uint8_t> pixels(width * height * bytes_per_pixel);
// Create staging buffer for readback (with aligned size)
const WGPUBufferDescriptor buffer_desc = {
.usage = WGPUBufferUsage_CopyDst | WGPUBufferUsage_MapRead,
.size = buffer_size,
};
WGPUBuffer staging = wgpuDeviceCreateBuffer(device, &buffer_desc);
assert(staging && "Failed to create staging buffer");
// Create command encoder for copy operation
const WGPUCommandEncoderDescriptor enc_desc = {};
WGPUCommandEncoder encoder =
wgpuDeviceCreateCommandEncoder(device, &enc_desc);
// Copy texture to buffer
const WGPUTexelCopyTextureInfo src = {
.texture = texture,
.mipLevel = 0,
.origin = {0, 0, 0},
};
const WGPUTexelCopyBufferInfo dst = {
.buffer = staging,
.layout =
{
.bytesPerRow = aligned_bytes_per_row,
.rowsPerImage = static_cast<uint32_t>(height),
},
};
const WGPUExtent3D copy_size = {static_cast<uint32_t>(width),
static_cast<uint32_t>(height), 1};
wgpuCommandEncoderCopyTextureToBuffer(encoder, &src, &dst, ©_size);
// Submit commands
WGPUCommandBuffer commands = wgpuCommandEncoderFinish(encoder, nullptr);
WGPUQueue queue = wgpuDeviceGetQueue(device);
wgpuQueueSubmit(queue, 1, &commands);
wgpuCommandBufferRelease(commands);
wgpuCommandEncoderRelease(encoder);
wgpuQueueRelease(queue); // Release the queue reference
// Wait for copy to complete before mapping
wgpuDevicePoll(device, true, nullptr);
// Map buffer for reading (API differs between Win32 and native)
#if defined(DEMO_CROSS_COMPILE_WIN32)
// Win32: Old callback API
MapState map_state = {};
auto map_cb = [](WGPUBufferMapAsyncStatus status, void* userdata) {
MapState* state = static_cast<MapState*>(userdata);
state->status = status;
state->done = true;
};
wgpuBufferMapAsync(staging, WGPUMapMode_Read, 0, buffer_size, map_cb,
&map_state);
#else
// Native: New callback info API
MapState map_state = {};
auto map_cb = [](WGPUMapAsyncStatus status, WGPUStringView message,
void* userdata, void* user2) {
(void)message;
(void)user2;
MapState* state = static_cast<MapState*>(userdata);
state->status = status;
state->done = true;
};
WGPUBufferMapCallbackInfo map_info = {};
map_info.mode = WGPUCallbackMode_AllowProcessEvents; // Fire during ProcessEvents
map_info.callback = map_cb;
map_info.userdata1 = &map_state;
wgpuBufferMapAsync(staging, WGPUMapMode_Read, 0, buffer_size, map_info);
#endif
// Wait for mapping to complete (synchronous blocking)
for (int i = 0; i < 100 && !map_state.done; ++i) {
#if defined(__EMSCRIPTEN__)
emscripten_sleep(10);
#else
wgpuDevicePoll(device, true, nullptr);
#endif
}
if (!map_state.done || map_state.status != WGPUMapAsyncStatus_Success) {
wgpuBufferRelease(staging);
return pixels; // Return empty on timeout or failure
}
// Copy data from mapped buffer (handle row padding)
const uint8_t* mapped_data = static_cast<const uint8_t*>(
wgpuBufferGetConstMappedRange(staging, 0, buffer_size));
if (mapped_data) {
// If rows are aligned, copy row by row to remove padding
if (aligned_bytes_per_row != unaligned_bytes_per_row) {
for (int y = 0; y < height; ++y) {
memcpy(pixels.data() + y * unaligned_bytes_per_row,
mapped_data + y * aligned_bytes_per_row,
unaligned_bytes_per_row);
}
} else {
// No padding, direct copy
memcpy(pixels.data(), mapped_data, pixels.size());
}
}
// Cleanup
wgpuBufferUnmap(staging);
wgpuBufferRelease(staging);
return pixels;
}
// Half-float (FP16) to float conversion
static float fp16_to_float(uint16_t h) {
uint32_t sign = (h & 0x8000) << 16;
uint32_t exp = (h & 0x7C00) >> 10;
uint32_t mant = (h & 0x03FF);
if (exp == 0) {
if (mant == 0) {
// Zero
uint32_t bits = sign;
float result;
memcpy(&result, &bits, sizeof(float));
return result;
}
// Denormalized
exp = 1;
while ((mant & 0x400) == 0) {
mant <<= 1;
exp--;
}
mant &= 0x3FF;
} else if (exp == 31) {
// Inf or NaN
uint32_t bits = sign | 0x7F800000 | (mant << 13);
float result;
memcpy(&result, &bits, sizeof(float));
return result;
}
uint32_t bits = sign | ((exp + 112) << 23) | (mant << 13);
float result;
memcpy(&result, &bits, sizeof(float));
return result;
}
std::vector<uint8_t> texture_readback_fp16_to_u8(
WGPUDevice device,
WGPUQueue queue,
WGPUTexture texture,
int width,
int height) {
// Align bytes per row to 256
const uint32_t bytes_per_pixel = 8; // RGBA16Float = 4 × 2 bytes
const uint32_t unaligned_bytes_per_row = width * bytes_per_pixel;
const uint32_t aligned_bytes_per_row =
((unaligned_bytes_per_row + 255) / 256) * 256;
const size_t buffer_size = aligned_bytes_per_row * height;
// Create staging buffer
const WGPUBufferDescriptor buffer_desc = {
.usage = WGPUBufferUsage_CopyDst | WGPUBufferUsage_MapRead,
.size = buffer_size,
};
WGPUBuffer staging = wgpuDeviceCreateBuffer(device, &buffer_desc);
if (!staging) {
return {};
}
// Copy texture to buffer
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
const WGPUTexelCopyTextureInfo src = {
.texture = texture,
.mipLevel = 0,
.origin = {0, 0, 0},
};
const WGPUTexelCopyBufferInfo dst = {
.buffer = staging,
.layout =
{
.bytesPerRow = aligned_bytes_per_row,
.rowsPerImage = static_cast<uint32_t>(height),
},
};
const WGPUExtent3D copy_size = {static_cast<uint32_t>(width),
static_cast<uint32_t>(height), 1};
wgpuCommandEncoderCopyTextureToBuffer(encoder, &src, &dst, ©_size);
WGPUCommandBuffer commands = wgpuCommandEncoderFinish(encoder, nullptr);
wgpuQueueSubmit(queue, 1, &commands);
wgpuCommandBufferRelease(commands);
wgpuCommandEncoderRelease(encoder);
wgpuDevicePoll(device, true, nullptr);
// Map buffer
#if defined(DEMO_CROSS_COMPILE_WIN32)
MapState map_state = {};
auto map_cb = [](WGPUBufferMapAsyncStatus status, void* userdata) {
MapState* state = static_cast<MapState*>(userdata);
state->status = status;
state->done = true;
};
wgpuBufferMapAsync(staging, WGPUMapMode_Read, 0, buffer_size, map_cb,
&map_state);
#else
MapState map_state = {};
auto map_cb = [](WGPUMapAsyncStatus status, WGPUStringView message,
void* userdata, void* user2) {
(void)message;
(void)user2;
MapState* state = static_cast<MapState*>(userdata);
state->status = status;
state->done = true;
};
WGPUBufferMapCallbackInfo map_info = {};
map_info.mode = WGPUCallbackMode_AllowProcessEvents;
map_info.callback = map_cb;
map_info.userdata1 = &map_state;
wgpuBufferMapAsync(staging, WGPUMapMode_Read, 0, buffer_size, map_info);
#endif
for (int i = 0; i < 100 && !map_state.done; ++i) {
wgpuDevicePoll(device, true, nullptr);
}
if (!map_state.done || map_state.status != WGPUMapAsyncStatus_Success) {
wgpuBufferRelease(staging);
return {};
}
// Convert FP16 to U8 ([-1,1] → [0,255])
const uint16_t* mapped_data = static_cast<const uint16_t*>(
wgpuBufferGetConstMappedRange(staging, 0, buffer_size));
std::vector<uint8_t> pixels(width * height * 4);
if (mapped_data) {
for (int y = 0; y < height; ++y) {
const uint16_t* src_row =
reinterpret_cast<const uint16_t*>(
reinterpret_cast<const uint8_t*>(mapped_data) +
y * aligned_bytes_per_row);
for (int x = 0; x < width; ++x) {
float r = fp16_to_float(src_row[x * 4 + 0]);
float g = fp16_to_float(src_row[x * 4 + 1]);
float b = fp16_to_float(src_row[x * 4 + 2]);
float a = fp16_to_float(src_row[x * 4 + 3]);
// Convert [-1,1] → [0,1] → [0,255]
r = (r + 1.0f) * 0.5f;
g = (g + 1.0f) * 0.5f;
b = (b + 1.0f) * 0.5f;
a = (a + 1.0f) * 0.5f;
int idx = (y * width + x) * 4;
pixels[idx + 0] = static_cast<uint8_t>(b * 255.0f); // B
pixels[idx + 1] = static_cast<uint8_t>(g * 255.0f); // G
pixels[idx + 2] = static_cast<uint8_t>(r * 255.0f); // R
pixels[idx + 3] = static_cast<uint8_t>(a * 255.0f); // A
}
}
}
wgpuBufferUnmap(staging);
wgpuBufferRelease(staging);
return pixels;
}
#endif // !defined(STRIP_ALL)
|