Skip to content

Commit 8452940

Browse files
committed
Handling async pipeline errors more gracefully
1 parent 7a6ef73 commit 8452940

File tree

3 files changed

+145
-111
lines changed

3 files changed

+145
-111
lines changed

src/webgpu/shader/execution/expression/expression.ts

+23-14
Original file line numberDiff line numberDiff line change
@@ -220,12 +220,12 @@ type PipelineCache = Map<String, GPUComputePipeline>;
220220
* @param create the function used to construct a value, if not found in the cache
221221
* @returns the value, either fetched from the cache, or newly built.
222222
*/
223-
function getOrCreate<K, V>(map: Map<K, V>, key: K, create: () => V) {
223+
async function getOrCreate<K, V>(map: Map<K, V>, key: K, create: () => Promise<V>) {
224224
const existing = map.get(key);
225225
if (existing !== undefined) {
226226
return existing;
227227
}
228-
const value = create();
228+
const value = await create();
229229
map.set(key, value);
230230
return value;
231231
}
@@ -307,16 +307,24 @@ export async function run(
307307
};
308308

309309
const processBatch = async (batchCases: CaseList) => {
310-
const checkBatch = await submitBatch(
311-
t,
312-
shaderBuilder,
313-
parameterTypes,
314-
resultType,
315-
batchCases,
316-
cfg.inputSource,
317-
pipelineCache
318-
);
319-
checkBatch();
310+
try {
311+
const checkBatch = await submitBatch(
312+
t,
313+
shaderBuilder,
314+
parameterTypes,
315+
resultType,
316+
batchCases,
317+
cfg.inputSource,
318+
pipelineCache
319+
);
320+
checkBatch();
321+
} catch (err) {
322+
if (err instanceof GPUPipelineError) {
323+
t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`);
324+
} else {
325+
throw err;
326+
}
327+
}
320328
void t.queue.onSubmittedWorkDone().finally(batchFinishedCallback);
321329
};
322330

@@ -993,6 +1001,7 @@ async function buildPipeline(
9931001
const module = t.device.createShaderModule({ code: source });
9941002

9951003
// build the pipeline
1004+
9961005
const pipeline = await t.device.createComputePipelineAsync({
9971006
layout: 'auto',
9981007
compute: { module, entryPoint: 'main' },
@@ -1037,12 +1046,12 @@ async function buildPipeline(
10371046
}
10381047

10391048
// build the compute pipeline, if the shader hasn't been compiled already.
1040-
const pipeline = getOrCreate(pipelineCache, source, () => {
1049+
const pipeline = await getOrCreate(pipelineCache, source, () => {
10411050
// build the shader module
10421051
const module = t.device.createShaderModule({ code: source });
10431052

10441053
// build the pipeline
1045-
return t.device.createComputePipeline({
1054+
return t.device.createComputePipelineAsync({
10461055
layout: 'auto',
10471056
compute: { module, entryPoint: 'main' },
10481057
});

src/webgpu/shader/execution/robust_access.spec.ts

+33-25
Original file line numberDiff line numberDiff line change
@@ -62,35 +62,43 @@ fn main() {
6262

6363
t.debug(source);
6464
const module = t.device.createShaderModule({ code: source });
65-
const pipeline = await t.device.createComputePipelineAsync({
66-
layout,
67-
compute: { module, entryPoint: 'main' },
68-
});
69-
70-
const group = t.device.createBindGroup({
71-
layout: pipeline.getBindGroupLayout(1),
72-
entries: [
73-
{ binding: 0, resource: { buffer: constantsBuffer } },
74-
{ binding: 1, resource: { buffer: resultBuffer } },
75-
],
76-
});
7765

78-
const testGroup = t.device.createBindGroup({
79-
layout: pipeline.getBindGroupLayout(0),
80-
entries: testBindings,
81-
});
66+
try {
67+
const pipeline = await t.device.createComputePipelineAsync({
68+
layout,
69+
compute: { module, entryPoint: 'main' },
70+
});
8271

83-
const encoder = t.device.createCommandEncoder();
84-
const pass = encoder.beginComputePass();
85-
pass.setPipeline(pipeline);
86-
pass.setBindGroup(0, testGroup, dynamicOffsets);
87-
pass.setBindGroup(1, group);
88-
pass.dispatchWorkgroups(1);
89-
pass.end();
72+
const group = t.device.createBindGroup({
73+
layout: pipeline.getBindGroupLayout(1),
74+
entries: [
75+
{ binding: 0, resource: { buffer: constantsBuffer } },
76+
{ binding: 1, resource: { buffer: resultBuffer } },
77+
],
78+
});
9079

91-
t.queue.submit([encoder.finish()]);
80+
const testGroup = t.device.createBindGroup({
81+
layout: pipeline.getBindGroupLayout(0),
82+
entries: testBindings,
83+
});
9284

93-
t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0]));
85+
const encoder = t.device.createCommandEncoder();
86+
const pass = encoder.beginComputePass();
87+
pass.setPipeline(pipeline);
88+
pass.setBindGroup(0, testGroup, dynamicOffsets);
89+
pass.setBindGroup(1, group);
90+
pass.dispatchWorkgroups(1);
91+
pass.end();
92+
93+
t.queue.submit([encoder.finish()]);
94+
t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0]));
95+
} catch (err) {
96+
if (err instanceof GPUPipelineError) {
97+
t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`);
98+
} else {
99+
throw err;
100+
}
101+
}
94102
}
95103

96104
/** Fill an ArrayBuffer with sentinel values, except clear a region to zero. */

src/webgpu/shader/execution/zero_init.spec.ts

+89-72
Original file line numberDiff line numberDiff line change
@@ -446,101 +446,118 @@ g.test('compute,zero_init')
446446
],
447447
});
448448

449-
const fillPipeline = await t.device.createComputePipelineAsync({
450-
layout: t.device.createPipelineLayout({ bindGroupLayouts: [fillLayout] }),
451-
label: 'Workgroup Fill Pipeline',
449+
try {
450+
const fillPipeline = await t.device.createComputePipelineAsync({
451+
layout: t.device.createPipelineLayout({ bindGroupLayouts: [fillLayout] }),
452+
label: 'Workgroup Fill Pipeline',
453+
compute: {
454+
module: t.device.createShaderModule({
455+
code: wgsl,
456+
}),
457+
entryPoint: 'fill',
458+
},
459+
});
460+
461+
const inputBuffer = t.makeBufferWithContents(
462+
new Uint32Array([...iterRange(wg_memory_limits / 4, _i => 0xdeadbeef)]),
463+
GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
464+
);
465+
t.trackForCleanup(inputBuffer);
466+
const outputBuffer = t.device.createBuffer({
467+
size: wg_memory_limits,
468+
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
469+
});
470+
t.trackForCleanup(outputBuffer);
471+
472+
const bg = t.device.createBindGroup({
473+
layout: fillPipeline.getBindGroupLayout(0),
474+
entries: [
475+
{
476+
binding: 0,
477+
resource: {
478+
buffer: inputBuffer,
479+
},
480+
},
481+
{
482+
binding: 1,
483+
resource: {
484+
buffer: outputBuffer,
485+
},
486+
},
487+
],
488+
});
489+
490+
const e = t.device.createCommandEncoder();
491+
const p = e.beginComputePass();
492+
p.setPipeline(fillPipeline);
493+
p.setBindGroup(0, bg);
494+
p.dispatchWorkgroups(1);
495+
p.end();
496+
t.queue.submit([e.finish()]);
497+
} catch (err) {
498+
if (err instanceof GPUPipelineError) {
499+
t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`);
500+
return;
501+
} else {
502+
throw err;
503+
}
504+
}
505+
}
506+
507+
try {
508+
const pipeline = await t.device.createComputePipelineAsync({
509+
layout: 'auto',
452510
compute: {
453511
module: t.device.createShaderModule({
454512
code: wgsl,
455513
}),
456-
entryPoint: 'fill',
514+
entryPoint: 'main',
457515
},
458516
});
459517

460-
const inputBuffer = t.makeBufferWithContents(
461-
new Uint32Array([...iterRange(wg_memory_limits / 4, _i => 0xdeadbeef)]),
462-
GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
463-
);
464-
t.trackForCleanup(inputBuffer);
465-
const outputBuffer = t.device.createBuffer({
466-
size: wg_memory_limits,
518+
const resultBuffer = t.device.createBuffer({
519+
size: 4,
467520
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
468521
});
469-
t.trackForCleanup(outputBuffer);
522+
t.trackForCleanup(resultBuffer);
470523

471-
const bg = t.device.createBindGroup({
472-
layout: fillPipeline.getBindGroupLayout(0),
524+
const zeroBuffer = t.device.createBuffer({
525+
size: 4,
526+
usage: GPUBufferUsage.UNIFORM,
527+
});
528+
t.trackForCleanup(zeroBuffer);
529+
530+
const bindGroup = t.device.createBindGroup({
531+
layout: pipeline.getBindGroupLayout(0),
473532
entries: [
474533
{
475534
binding: 0,
476535
resource: {
477-
buffer: inputBuffer,
536+
buffer: resultBuffer,
478537
},
479538
},
480539
{
481540
binding: 1,
482541
resource: {
483-
buffer: outputBuffer,
542+
buffer: zeroBuffer,
484543
},
485544
},
486545
],
487546
});
488547

489-
const e = t.device.createCommandEncoder();
490-
const p = e.beginComputePass();
491-
p.setPipeline(fillPipeline);
492-
p.setBindGroup(0, bg);
493-
p.dispatchWorkgroups(1);
494-
p.end();
495-
t.queue.submit([e.finish()]);
548+
const encoder = t.device.createCommandEncoder();
549+
const pass = encoder.beginComputePass();
550+
pass.setPipeline(pipeline);
551+
pass.setBindGroup(0, bindGroup);
552+
pass.dispatchWorkgroups(1);
553+
pass.end();
554+
t.queue.submit([encoder.finish()]);
555+
t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0]));
556+
} catch (err) {
557+
if (err instanceof GPUPipelineError) {
558+
t.fail(`Pipeline Creation Error, ${err.reason}: ${err.message}`);
559+
} else {
560+
throw err;
561+
}
496562
}
497-
498-
const pipeline = await t.device.createComputePipelineAsync({
499-
layout: 'auto',
500-
compute: {
501-
module: t.device.createShaderModule({
502-
code: wgsl,
503-
}),
504-
entryPoint: 'main',
505-
},
506-
});
507-
508-
const resultBuffer = t.device.createBuffer({
509-
size: 4,
510-
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
511-
});
512-
t.trackForCleanup(resultBuffer);
513-
514-
const zeroBuffer = t.device.createBuffer({
515-
size: 4,
516-
usage: GPUBufferUsage.UNIFORM,
517-
});
518-
t.trackForCleanup(zeroBuffer);
519-
520-
const bindGroup = t.device.createBindGroup({
521-
layout: pipeline.getBindGroupLayout(0),
522-
entries: [
523-
{
524-
binding: 0,
525-
resource: {
526-
buffer: resultBuffer,
527-
},
528-
},
529-
{
530-
binding: 1,
531-
resource: {
532-
buffer: zeroBuffer,
533-
},
534-
},
535-
],
536-
});
537-
538-
const encoder = t.device.createCommandEncoder();
539-
const pass = encoder.beginComputePass();
540-
pass.setPipeline(pipeline);
541-
pass.setBindGroup(0, bindGroup);
542-
pass.dispatchWorkgroups(1);
543-
pass.end();
544-
t.queue.submit([encoder.finish()]);
545-
t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0]));
546563
});

0 commit comments

Comments
 (0)