tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

parallel-dispatch.https.any.js (11823B)


      1 // META: title=test parallel WebNN API dispatch calls
      2 // META: global=window,worker
      3 // META: variant=?cpu
      4 // META: variant=?gpu
      5 // META: variant=?npu
      6 // META: script=../resources/utils.js
      7 // META: timeout=long
      8 
      9 'use strict';
     10 
     11 // https://www.w3.org/TR/webnn/#api-mlcontext-dispatch
     12 
     13 let mlContext;
     14 
     15 
     16 // Skip tests if WebNN is unimplemented.
     17 promise_setup(async () => {
     18  assert_implements(navigator.ml, 'missing navigator.ml');
     19  mlContext = await navigator.ml.createContext(contextOptions);
     20 });
     21 
     22 function buildMulGraph(context, operandDescriptor, multiplier) {
     23  // Construct a simple graph: A = B * `multiplier`.
     24  const builder = new MLGraphBuilder(context);
     25  const inputOperand = builder.input('input', operandDescriptor);
     26  const constantOperand =
     27      builder.constant(operandDescriptor, Float32Array.from([multiplier]));
     28  const outputOperand = builder.mul(inputOperand, constantOperand);
     29  return builder.build({'output': outputOperand});
     30 }
     31 
     32 promise_test(async () => {
     33  const operandDescriptor = {
     34    dataType: 'float32',
     35    shape: [1],
     36    readable: true,
     37    writable: true,
     38  };
     39 
     40  const [mlGraph, inputTensor1, inputTensor2, outputTensor] =
     41      await Promise.all([
     42        buildMulGraph(mlContext, operandDescriptor, 2),
     43        mlContext.createTensor(operandDescriptor),
     44        mlContext.createTensor(operandDescriptor),
     45        mlContext.createTensor(operandDescriptor)
     46      ]);
     47 
     48  mlContext.writeTensor(inputTensor1, Float32Array.from([1]));
     49  mlContext.writeTensor(inputTensor2, Float32Array.from([10]));
     50 
     51  let readTensorPromises = [];
     52 
     53  mlContext.dispatch(
     54      mlGraph, {'input': inputTensor1}, {'output': outputTensor});
     55 
     56  // Don't await tensor readback before dispatching again.
     57  readTensorPromises.push(mlContext.readTensor(outputTensor));
     58 
     59  mlContext.dispatch(
     60      mlGraph, {'input': inputTensor2}, {'output': outputTensor});
     61 
     62  readTensorPromises.push(mlContext.readTensor(outputTensor));
     63 
     64  const actualOutputs =
     65      await Promise.all(readTensorPromises.map(async (promise) => {
     66        const output = await promise;
     67        return new Float32Array(output)[0];
     68      }));
     69 
     70  assert_array_equals(actualOutputs, [2, 20]);
     71 }, 'dispatch queues behind readTensor');
     72 
     73 promise_test(async () => {
     74  const operandDescriptor = {
     75    dataType: 'float32',
     76    shape: [1],
     77    readable: true,
     78    writable: true,
     79  };
     80  const mlGraph = await buildMulGraph(mlContext, operandDescriptor, 3);
     81 
     82  // write/dispatch/read, write/dispatch/read, ...
     83  const testInputs = [1, 2, 3, 4];
     84  const actualOutputs = await Promise.all(testInputs.map(async (input) => {
     85    const [inputTensor, outputTensor] = await Promise.all([
     86      mlContext.createTensor(operandDescriptor),
     87      mlContext.createTensor(operandDescriptor)
     88    ]);
     89 
     90    mlContext.writeTensor(inputTensor, Float32Array.from([input]));
     91 
     92    mlContext.dispatch(
     93        mlGraph, {'input': inputTensor}, {'output': outputTensor});
     94 
     95    const output = await mlContext.readTensor(outputTensor);
     96    return new Float32Array(output)[0];
     97  }));
     98 
     99  assert_array_equals(actualOutputs, [3, 6, 9, 12]);
    100 }, 'same graph: write/dispatch/read, write/dispatch/read, ...');
    101 
    102 promise_test(async () => {
    103  const operandDescriptor = {
    104    dataType: 'float32',
    105    shape: [1],
    106    readable: true,
    107    writable: true,
    108  };
    109  const mlGraph = await buildMulGraph(mlContext, operandDescriptor, 10);
    110 
    111  // write/write...
    112  const testInputs = [1, 2, 3, 4];
    113  const inputAndOutputTensors =
    114      await Promise.all(testInputs.map(async (testInput) => {
    115        const [inputTensor, outputTensor] = await Promise.all([
    116          mlContext.createTensor(operandDescriptor),
    117          mlContext.createTensor(operandDescriptor)
    118        ]);
    119 
    120        mlContext.writeTensor(inputTensor, Float32Array.from([testInput]));
    121        return [inputTensor, outputTensor];
    122      }));
    123 
    124  // dispatch/read, dispatch/read, ...
    125  let readTensorPromises = [];
    126  for (let i = 0; i < testInputs.length; i++) {
    127    mlContext.dispatch(
    128        mlGraph, {'input': inputAndOutputTensors[i][0]},
    129        {'output': inputAndOutputTensors[i][1]});
    130    readTensorPromises.push(mlContext.readTensor(inputAndOutputTensors[i][1]));
    131  };
    132 
    133  const actualOutputs =
    134      await Promise.all(readTensorPromises.map(async (promise) => {
    135        const output = await promise;
    136        return new Float32Array(output)[0];
    137      }));
    138 
    139  assert_array_equals(actualOutputs, [10, 20, 30, 40]);
    140 }, 'same graph: write/write..., dispatch/read, dispatch/read, ...');
    141 
    142 promise_test(async () => {
    143  const operandDescriptor = {
    144    dataType: 'float32',
    145    shape: [1],
    146    readable: true,
    147    writable: true,
    148  };
    149  const mlGraph = await buildMulGraph(mlContext, operandDescriptor, 9);
    150 
    151  // write/write...
    152  const testInputs = [1, 2, 3, 4];
    153  const inputAndOutputTensors =
    154      await Promise.all(testInputs.map(async (testInput) => {
    155        const [inputTensor, outputTensor] = await Promise.all([
    156          mlContext.createTensor(operandDescriptor),
    157          mlContext.createTensor(operandDescriptor)
    158        ]);
    159 
    160        mlContext.writeTensor(inputTensor, Float32Array.from([testInput]));
    161        return [inputTensor, outputTensor];
    162      }));
    163 
    164  // dispatch/dispatch...
    165  for (let i = 0; i < testInputs.length; i++) {
    166    mlContext.dispatch(
    167        mlGraph, {'input': inputAndOutputTensors[i][0]},
    168        {'output': inputAndOutputTensors[i][1]});
    169  }
    170 
    171  // read/read...
    172  const actualOutputs = await Promise.all(
    173      inputAndOutputTensors.map(async (inputAndOutputTensor) => {
    174        const output = await mlContext.readTensor(inputAndOutputTensor[1]);
    175        return new Float32Array(output)[0];
    176      }));
    177 
    178  assert_array_equals(actualOutputs, [9, 18, 27, 36]);
    179 }, 'same graph: write/write..., dispatch/dispatch..., read/read...');
    180 
    181 promise_test(async () => {
    182  const operandDescriptor = {
    183    dataType: 'float32',
    184    shape: [1],
    185    readable: true,
    186    writable: true,
    187  };
    188  const mlGraph = await buildMulGraph(mlContext, operandDescriptor, 2);
    189 
    190  const tensors = await Promise.all([
    191    mlContext.createTensor(operandDescriptor),
    192    mlContext.createTensor(operandDescriptor),
    193    mlContext.createTensor(operandDescriptor),
    194    mlContext.createTensor(operandDescriptor),
    195    mlContext.createTensor(operandDescriptor)
    196  ]);
    197 
    198  mlContext.writeTensor(tensors[0], Float32Array.from([1]));
    199 
    200  // dispatch/dispatch...
    201  for (let i = 0; i < tensors.length - 1; i++) {
    202    mlContext.dispatch(
    203        mlGraph, {'input': tensors[i]}, {'output': tensors[i + 1]});
    204  }
    205 
    206  // read/read...
    207  const actualOutputs = await Promise.all(tensors.map(async (tensor) => {
    208    const output = await mlContext.readTensor(tensor);
    209    return new Float32Array(output)[0];
    210  }));
    211 
    212  assert_array_equals(actualOutputs, [1, 2, 4, 8, 16]);
    213 }, 'same graph serial inputs: dispatch/dispatch..., read/read...');
    214 
    215 promise_test(async () => {
    216  const operandDescriptor = {
    217    dataType: 'float32',
    218    shape: [1],
    219    readable: true,
    220    writable: true,
    221  };
    222 
    223  // write/write...
    224  const testInputs = [1, 2, 3, 4];
    225  const graphsAndTensors =
    226      await Promise.all(testInputs.map(async (testInput) => {
    227        const [graph, inputTensor, outputTensor] = await Promise.all([
    228          buildMulGraph(mlContext, operandDescriptor, testInput),
    229          mlContext.createTensor(operandDescriptor),
    230          mlContext.createTensor(operandDescriptor)
    231        ]);
    232 
    233        mlContext.writeTensor(inputTensor, Float32Array.from([testInput]));
    234        return [graph, inputTensor, outputTensor];
    235      }));
    236 
    237  // dispatch/read, dispatch/read, ...
    238  let readTensorPromises = [];
    239  for (let i = 0; i < graphsAndTensors.length; i++) {
    240    mlContext.dispatch(
    241        graphsAndTensors[i][0], {'input': graphsAndTensors[i][1]},
    242        {'output': graphsAndTensors[i][2]});
    243    readTensorPromises.push(mlContext.readTensor(graphsAndTensors[i][2]));
    244  };
    245 
    246  const actualOutputs =
    247      await Promise.all(readTensorPromises.map(async (promise) => {
    248        const output = await promise;
    249        return new Float32Array(output)[0];
    250      }));
    251 
    252  assert_array_equals(actualOutputs, [1, 4, 9, 16]);
    253 }, 'different graphs: write/write..., dispatch/read, dispatch/read, ...');
    254 
    255 promise_test(async () => {
    256  const operandDescriptor = {
    257    dataType: 'float32',
    258    shape: [1],
    259    readable: true,
    260    writable: true,
    261  };
    262 
    263  // write/write...
    264  const testInputs = [1, 2, 3, 4];
    265  const graphsAndTensors =
    266      await Promise.all(testInputs.map(async (testInput) => {
    267        const [graph, inputTensor, outputTensor] = await Promise.all([
    268          buildMulGraph(mlContext, operandDescriptor, testInput * 2),
    269          mlContext.createTensor(operandDescriptor),
    270          mlContext.createTensor(operandDescriptor)
    271        ]);
    272 
    273        mlContext.writeTensor(inputTensor, Float32Array.from([testInput]));
    274        return [graph, inputTensor, outputTensor];
    275      }));
    276 
    277  // dispatch/dispatch...
    278  for (let i = 0; i < graphsAndTensors.length; i++) {
    279    mlContext.dispatch(
    280        graphsAndTensors[i][0], {'input': graphsAndTensors[i][1]},
    281        {'output': graphsAndTensors[i][2]});
    282  };
    283 
    284  // read/read...
    285  const actualOutputs =
    286      await Promise.all(graphsAndTensors.map(async (graphAndTensors) => {
    287        const output = await mlContext.readTensor(graphAndTensors[2]);
    288        return new Float32Array(output)[0];
    289      }));
    290 
    291  assert_array_equals(actualOutputs, [2, 8, 18, 32]);
    292 }, 'different graphs: write/write..., dispatch/dispatch..., read/read...');
    293 
    294 promise_test(async () => {
    295  const operandDescriptor = {
    296    dataType: 'float32',
    297    shape: [1],
    298    readable: true,
    299    writable: true,
    300  };
    301 
    302  const graphs = await Promise.all([3, 2].map(async (multiplier) => {
    303    return buildMulGraph(mlContext, operandDescriptor, multiplier);
    304  }));
    305 
    306  const tensors = await Promise.all([
    307    mlContext.createTensor(operandDescriptor),
    308    mlContext.createTensor(operandDescriptor),
    309    mlContext.createTensor(operandDescriptor),
    310    mlContext.createTensor(operandDescriptor),
    311    mlContext.createTensor(operandDescriptor)
    312  ]);
    313 
    314  mlContext.writeTensor(tensors[0], Float32Array.from([1]));
    315 
    316  // dispatch/dispatch...
    317  for (let i = 0; i < tensors.length - 1; i++) {
    318    mlContext.dispatch(
    319        graphs[i % 2], {'input': tensors[i]}, {'output': tensors[i + 1]});
    320  }
    321 
    322  // read/read...
    323  const actualOutputs = await Promise.all(tensors.map(async (tensor) => {
    324    const output = await mlContext.readTensor(tensor);
    325    return new Float32Array(output)[0];
    326  }));
    327 
    328  assert_array_equals(actualOutputs, [1, 3, 6, 18, 36]);
    329 }, 'different graphs serial inputs: dispatch/dispatch..., read/read...');
    330 
    331 promise_test(async () => {
    332  const operandDescriptor = {
    333    dataType: 'float32',
    334    shape: [1],
    335    readable: true,
    336    writable: true,
    337  };
    338 
    339  const graphs = await Promise.all([2, 3].map(async (multiplier) => {
    340    return buildMulGraph(mlContext, operandDescriptor, multiplier);
    341  }));
    342 
    343  const tensors = await Promise.all([
    344    mlContext.createTensor(operandDescriptor),
    345    mlContext.createTensor(operandDescriptor)
    346  ]);
    347 
    348  // Write to the tensor which will be initially used as an input.
    349  mlContext.writeTensor(tensors[0], Float32Array.from([1]));
    350 
    351  // Double the value in one tensor, sticking the result in the other tensor.
    352  //
    353  // tensors[0]  tensors[1]
    354  //     1
    355  //        >---->  2
    356  //     6  <----<
    357  //        >---->  12
    358  //     36 <----<
    359  //        >---->  72
    360  //    216 <----<
    361 
    362  // dispatch/dispatch...
    363  for (let i = 0; i < 6; i++) {
    364    mlContext.dispatch(
    365        graphs[i % 2], {'input': tensors[i % 2]},
    366        {'output': tensors[(i + 1) % 2]});
    367  };
    368 
    369  // read/read...
    370  const actualOutputs = await Promise.all(tensors.map(async (tensor) => {
    371    const output = await mlContext.readTensor(tensor);
    372    return new Float32Array(output)[0];
    373  }));
    374 
    375  assert_array_equals(actualOutputs, [216, 72]);
    376 }, 'different graphs using the same tensors');