tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

compute_pass.spec.ts (8689B)


      1 export const description = `
      2 Stress tests covering GPUComputePassEncoder usage.
      3 `;
      4 
      5 import { makeTestGroup } from '../../common/framework/test_group.js';
      6 import { assert, iterRange } from '../../common/util/util.js';
      7 import { GPUTest } from '../../webgpu/gpu_test.js';
      8 
      9 export const g = makeTestGroup(GPUTest);
     10 
     11 g.test('many')
     12  .desc(
     13    `Tests execution of a huge number of compute passes using the same
     14 GPUComputePipeline.`
     15  )
     16  .fn(t => {
     17    const kNumElements = 64;
     18    const data = new Uint32Array([...iterRange(kNumElements, x => x)]);
     19    const buffer = t.makeBufferWithContents(data, GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC);
     20    const pipeline = t.device.createComputePipeline({
     21      layout: 'auto',
     22      compute: {
     23        module: t.device.createShaderModule({
     24          code: `
     25            struct Buffer { data: array<u32>, };
     26            @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
     27            @compute @workgroup_size(1) fn main(
     28                @builtin(global_invocation_id) id: vec3<u32>) {
     29              buffer.data[id.x] = buffer.data[id.x] + 1u;
     30            }
     31          `,
     32        }),
     33        entryPoint: 'main',
     34      },
     35    });
     36    const bindGroup = t.device.createBindGroup({
     37      layout: pipeline.getBindGroupLayout(0),
     38      entries: [{ binding: 0, resource: { buffer } }],
     39    });
     40    const kNumIterations = 250_000;
     41    for (let i = 0; i < kNumIterations; ++i) {
     42      const encoder = t.device.createCommandEncoder();
     43      const pass = encoder.beginComputePass();
     44      pass.setPipeline(pipeline);
     45      pass.setBindGroup(0, bindGroup);
     46      pass.dispatchWorkgroups(kNumElements);
     47      pass.end();
     48      t.device.queue.submit([encoder.finish()]);
     49    }
     50    t.expectGPUBufferValuesEqual(
     51      buffer,
     52      new Uint32Array([...iterRange(kNumElements, x => x + kNumIterations)])
     53    );
     54  });
     55 
     56 g.test('pipeline_churn')
     57  .desc(
     58    `Tests execution of a huge number of compute passes which each use a different
     59 GPUComputePipeline.`
     60  )
     61  .fn(t => {
     62    const buffer = t.makeBufferWithContents(
     63      new Uint32Array([0]),
     64      GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC
     65    );
     66    const kNumIterations = 10_000;
     67    const stages = iterRange(kNumIterations, i => ({
     68      module: t.device.createShaderModule({
     69        code: `
     70        struct Buffer { data: u32, };
     71        @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
     72        @compute @workgroup_size(1) fn main${i}() {
     73          buffer.data = buffer.data + 1u;
     74        }
     75        `,
     76      }),
     77      entryPoint: `main${i}`,
     78    }));
     79    for (const compute of stages) {
     80      const encoder = t.device.createCommandEncoder();
     81      const pipeline = t.device.createComputePipeline({ layout: 'auto', compute });
     82      const bindGroup = t.device.createBindGroup({
     83        layout: pipeline.getBindGroupLayout(0),
     84        entries: [{ binding: 0, resource: { buffer } }],
     85      });
     86      const pass = encoder.beginComputePass();
     87      pass.setPipeline(pipeline);
     88      pass.setBindGroup(0, bindGroup);
     89      pass.dispatchWorkgroups(1);
     90      pass.end();
     91      t.device.queue.submit([encoder.finish()]);
     92    }
     93    t.expectGPUBufferValuesEqual(buffer, new Uint32Array([kNumIterations]));
     94  });
     95 
     96 g.test('bind_group_churn')
     97  .desc(
     98    `Tests execution of compute passes which switch between a huge number of bind
     99 groups.`
    100  )
    101  .fn(t => {
    102    const kNumElements = 64;
    103    const data = new Uint32Array([...iterRange(kNumElements, x => x)]);
    104    const buffer1 = t.makeBufferWithContents(
    105      data,
    106      GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC
    107    );
    108    const buffer2 = t.makeBufferWithContents(
    109      data,
    110      GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC
    111    );
    112    const module = t.device.createShaderModule({
    113      code: `
    114        struct Buffer { data: array<u32>, };
    115        @group(0) @binding(0) var<storage, read_write> buffer1: Buffer;
    116        @group(0) @binding(1) var<storage, read_write> buffer2: Buffer;
    117        @compute @workgroup_size(1) fn main(
    118            @builtin(global_invocation_id) id: vec3<u32>) {
    119          buffer1.data[id.x] = buffer1.data[id.x] + 1u;
    120          buffer2.data[id.x] = buffer2.data[id.x] + 2u;
    121        }
    122      `,
    123    });
    124    const kNumIterations = 250_000;
    125    const pipeline = t.device.createComputePipeline({
    126      layout: 'auto',
    127      compute: { module, entryPoint: 'main' },
    128    });
    129    const encoder = t.device.createCommandEncoder();
    130    const pass = encoder.beginComputePass();
    131    pass.setPipeline(pipeline);
    132    for (let i = 0; i < kNumIterations; ++i) {
    133      const buffer1Binding = i % 2;
    134      const buffer2Binding = buffer1Binding ^ 1;
    135      const bindGroup = t.device.createBindGroup({
    136        layout: pipeline.getBindGroupLayout(0),
    137        entries: [
    138          { binding: buffer1Binding, resource: { buffer: buffer1 } },
    139          { binding: buffer2Binding, resource: { buffer: buffer2 } },
    140        ],
    141      });
    142      pass.setBindGroup(0, bindGroup);
    143      pass.dispatchWorkgroups(kNumElements);
    144    }
    145    pass.end();
    146    t.device.queue.submit([encoder.finish()]);
    147    const kTotalAddition = (kNumIterations / 2) * 3;
    148    t.expectGPUBufferValuesEqual(
    149      buffer1,
    150      new Uint32Array([...iterRange(kNumElements, x => x + kTotalAddition)])
    151    );
    152    t.expectGPUBufferValuesEqual(
    153      buffer2,
    154      new Uint32Array([...iterRange(kNumElements, x => x + kTotalAddition)])
    155    );
    156  });
    157 
    158 g.test('many_dispatches')
    159  .desc(`Tests execution of compute passes with a huge number of dispatch calls`)
    160  .fn(t => {
    161    const kNumElements = 64;
    162    const data = new Uint32Array([...iterRange(kNumElements, x => x)]);
    163    const buffer = t.makeBufferWithContents(data, GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC);
    164    const module = t.device.createShaderModule({
    165      code: `
    166        struct Buffer { data: array<u32>, };
    167        @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
    168        @compute @workgroup_size(1) fn main(
    169            @builtin(global_invocation_id) id: vec3<u32>) {
    170          buffer.data[id.x] = buffer.data[id.x] + 1u;
    171        }
    172      `,
    173    });
    174    const kNumIterations = 1_000_000;
    175    const pipeline = t.device.createComputePipeline({
    176      layout: 'auto',
    177      compute: { module, entryPoint: 'main' },
    178    });
    179    const encoder = t.device.createCommandEncoder();
    180    const pass = encoder.beginComputePass();
    181    pass.setPipeline(pipeline);
    182    const bindGroup = t.device.createBindGroup({
    183      layout: pipeline.getBindGroupLayout(0),
    184      entries: [{ binding: 0, resource: { buffer } }],
    185    });
    186    pass.setBindGroup(0, bindGroup);
    187    for (let i = 0; i < kNumIterations; ++i) {
    188      pass.dispatchWorkgroups(kNumElements);
    189    }
    190    pass.end();
    191    t.device.queue.submit([encoder.finish()]);
    192    t.expectGPUBufferValuesEqual(
    193      buffer,
    194      new Uint32Array([...iterRange(kNumElements, x => x + kNumIterations)])
    195    );
    196  });
    197 
    198 g.test('huge_dispatches')
    199  .desc(`Tests execution of compute passes with huge dispatch calls`)
    200  .fn(async t => {
    201    const kDimensions = [512, 512, 128];
    202    kDimensions.forEach(x => {
    203      assert(x <= t.device.limits.maxComputeWorkgroupsPerDimension);
    204    });
    205 
    206    const kNumElements = kDimensions[0] * kDimensions[1] * kDimensions[2];
    207    const data = new Uint32Array([...iterRange(kNumElements, x => x)]);
    208    const buffer = t.makeBufferWithContents(data, GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC);
    209    const module = t.device.createShaderModule({
    210      code: `
    211        struct Buffer { data: array<u32>, };
    212        @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
    213        @compute @workgroup_size(1) fn main(
    214            @builtin(global_invocation_id) id: vec3<u32>) {
    215          let index = (id.z * 512u + id.y) * 512u + id.x;
    216          buffer.data[index] = buffer.data[index] + 1u;
    217        }
    218      `,
    219    });
    220    const kNumIterations = 16;
    221    const pipeline = t.device.createComputePipeline({
    222      layout: 'auto',
    223      compute: { module, entryPoint: 'main' },
    224    });
    225    const bindGroup = t.device.createBindGroup({
    226      layout: pipeline.getBindGroupLayout(0),
    227      entries: [{ binding: 0, resource: { buffer } }],
    228    });
    229    for (let i = 0; i < kNumIterations; ++i) {
    230      const encoder = t.device.createCommandEncoder();
    231      const pass = encoder.beginComputePass();
    232      pass.setBindGroup(0, bindGroup);
    233      pass.setPipeline(pipeline);
    234      pass.dispatchWorkgroups(kDimensions[0], kDimensions[1], kDimensions[2]);
    235      pass.end();
    236      t.device.queue.submit([encoder.finish()]);
    237      await t.device.queue.onSubmittedWorkDone();
    238    }
    239    t.expectGPUBufferValuesEqual(
    240      buffer,
    241      new Uint32Array([...iterRange(kNumElements, x => x + kNumIterations)])
    242    );
    243  });