tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

tensor.https.any.js (59818B)


      1 // META: title=test WebNN API tensor operations
      2 // META: global=window,worker
      3 // META: variant=?cpu
      4 // META: variant=?gpu
      5 // META: variant=?npu
      6 // META: script=../resources/utils_validation.js
      7 // META: script=../resources/utils.js
      8 // META: timeout=long
      9 
     10 'use strict';
     11 
     12 // https://www.w3.org/TR/webnn/#api-mltensor
     13 
     14 const bytesPerDataType = (dataType) => {
     15  if (dataType === 'int8' || dataType === 'uint8') {
     16    return 1;
     17  } else if (dataType === 'float16') {
     18    return 2;
     19  } else if (
     20      dataType === 'float32' || dataType === 'int32' || dataType === 'uint32') {
     21    return 4;
     22  } else if (dataType === 'int64' || dataType === 'uint64') {
     23    return 8;
     24  } else {
     25    throw new AssertionError(`Data type '${dataType}' is not supported`);
     26  }
     27 };
     28 
     29 const sizeOfDescriptor = (descriptor) => {
     30  return descriptor.shape.reduce(
     31      (accumulator, currentValue) => accumulator * currentValue,
     32      bytesPerDataType(descriptor.dataType));
     33 };
     34 
     35 const getDescriptorFromTensor = (tensor) => {
     36  return {
     37    dataType: tensor.dataType,
     38    shape: tensor.shape,
     39    readable: tensor.readable,
     40    writable: tensor.writable,
     41  };
     42 };
     43 
     44 
     45 /**
     46 * WebNN destroy tensor twice test.
     47 * @param {String} testName - The name of the test operation.
     48 */
     49 const testDestroyTensor = (testName) => {
     50  let mlContext;
     51  promise_setup(async () => {
     52    try {
     53      mlContext = await navigator.ml.createContext(contextOptions);
     54    } catch (e) {
     55      throw new AssertionError(
     56          `Unable to create context for ${variant} variant. ${e}`);
     57    }
     58 
     59    try {
     60      const mlTensor =
     61          await mlContext.createTensor({dataType: 'int32', shape: [2, 3]});
     62    } catch (e) {
     63      throw new AssertionError(
     64          `Unable to create tensor for ${variant} variant. ${e}`);
     65    }
     66  });
     67  promise_test(async () => {
     68    let mlTensor =
     69        await mlContext.createTensor({dataType: 'int32', shape: [2, 3]});
     70    mlTensor.destroy();
     71    mlTensor.destroy();
     72  }, `${testName}`);
     73 };
     74 
     75 /**
     76 * WebNN create tensor test.
     77 * @param {String} testName - The name of the test operation.
     78 * @param {MLTensorDescriptor} tensorDescriptor - The intended tensor specs.
     79 */
     80 const testCreateTensor = (testName, tensorDescriptor) => {
     81  let mlContext;
     82 
     83  promise_setup(async () => {
     84    try {
     85      mlContext = await navigator.ml.createContext(contextOptions);
     86    } catch (e) {
     87      throw new AssertionError(
     88          `Unable to create context for ${variant} variant. ${e}`);
     89    }
     90  });
     91  promise_test(async t => {
     92    if (!mlContext.opSupportLimits().input.dataTypes.includes(
     93            tensorDescriptor.dataType)) {
     94      await promise_rejects_js(
     95          t, TypeError, mlContext.createTensor(tensorDescriptor));
     96      return;
     97    }
     98 
     99    const mlTensor = await mlContext.createTensor(tensorDescriptor);
    100    assert_equals(
    101        mlTensor.dataType, tensorDescriptor.dataType,
    102        'tensor data types do not match');
    103    assert_array_equals(
    104        mlTensor.shape, tensorDescriptor.shape, 'tensor shapes do not match');
    105  }, `${testName} / ${tensorDescriptor.dataType}`);
    106 };
    107 
    108 /**
    109 * Same as above, but expect creating the tensor to fail.
    110 * @param {String} testName - The name of the test operation.
    111 * @param {MLTensorDescriptor} tensorDescriptor - The intended tensor specs.
    112 */
    113 const testCreateTensorFails = (testName, tensorDescriptor) => {
    114  let mlContext;
    115 
    116  promise_setup(async () => {
    117    try {
    118      mlContext = await navigator.ml.createContext(contextOptions);
    119    } catch (e) {
    120      throw new AssertionError(
    121          `Unable to create context for ${variant} variant. ${e}`);
    122    }
    123  });
    124  promise_test(async t => {
    125    await promise_rejects_js(
    126        t, TypeError, mlContext.createTensor(tensorDescriptor));
    127  }, `${testName} / ${tensorDescriptor.dataType}`);
    128 };
    129 
    130 /**
    131 * WebNN create constant tensor test.
    132 * @param {String} testName - The name of the test operation.
    133 * @param {MLOperandDescriptor} descriptor - The intended operand specs.
    134 */
    135 const testCreateConstantTensor = (testName, descriptor) => {
    136  let mlContext;
    137  let isConstantTensorSupported = false;
    138  promise_setup(async () => {
    139    try {
    140      mlContext = await navigator.ml.createContext(contextOptions);
    141    } catch (error) {
    142      throw new AssertionError(
    143          `Unable to create context for ${variant} variant. ${error}`);
    144    }
    145 
    146    // Check if WebNN has constant tensor support.
    147    try {
    148      await mlContext.createConstantTensor(
    149          {
    150            dataType: 'float32',
    151            shape: [1],
    152          },
    153          new Float32Array([0xAA]));
    154      isConstantTensorSupported = true;
    155    } catch (error) {
    156      if (error.name !== 'NotSupportedError') {
    157        throw error;
    158      }
    159    }
    160  });
    161 
    162  promise_test(async t => {
    163    if (!isConstantTensorSupported) {
    164      return;
    165    }
    166 
    167    const inputData =
    168        new TypedArrayDict[descriptor.dataType](sizeOfShape(descriptor.shape))
    169            .fill(0xAA);
    170    if (!mlContext.opSupportLimits().constant.dataTypes.includes(
    171            descriptor.dataType)) {
    172      await promise_rejects_js(
    173          t, TypeError, mlContext.createConstantTensor(descriptor, inputData));
    174      return;
    175    }
    176 
    177    const mlTensor =
    178        await mlContext.createConstantTensor(descriptor, inputData);
    179    assert_true(mlTensor.constant, 'constant tensors should be constant.');
    180    assert_false(mlTensor.readable, 'constant tensors should not be readable.');
    181    assert_false(mlTensor.writable, 'constant tensors should not be writable.');
    182  }, `${testName} / ${descriptor.dataType}`);
    183 
    184  promise_test(async t => {
    185    if (!isConstantTensorSupported) {
    186      return;
    187    }
    188 
    189    try {
    190      const inputDataTooBig = new TypedArrayDict[descriptor.dataType](
    191          sizeOfShape(descriptor.shape) + 1);
    192      await promise_rejects_js(
    193          t, TypeError,
    194          mlContext.createConstantTensor(descriptor, inputDataTooBig));
    195    } catch (error) {
    196      if (error instanceof RangeError) {
    197        return;  // Skip test when dataType is too big.
    198      } else {
    199        throw error;
    200      }
    201    }
    202  }, `${testName} / ${descriptor.dataType} / source data too big`);
    203 
    204  promise_test(async t => {
    205    if (!isConstantTensorSupported) {
    206      return;
    207    }
    208 
    209    try {
    210      const inputDataTooSmall = new TypedArrayDict[descriptor.dataType](
    211          sizeOfShape(descriptor.shape) - 1);
    212      await promise_rejects_js(
    213          t, TypeError,
    214          mlContext.createConstantTensor(descriptor, inputDataTooSmall));
    215    } catch (error) {
    216      if (error instanceof RangeError) {
    217        return;  // Skip test when dataType is too big.
    218      } else {
    219        throw error;
    220      }
    221    }
    222  }, `${testName} / ${descriptor.dataType} / source data too small`);
    223 };
    224 
    225 /**
    226 * Same as above, but expect constant tensor creation to fail.
    227 * @param {String} testName - The name of the test operation.
    228 * @param {MLOperandDescriptor} descriptor - The intended operand specs.
    229 */
    230 const testCreateConstantTensorFails = (testName, descriptor) => {
    231  let mlContext;
    232 
    233  promise_setup(async () => {
    234    try {
    235      mlContext = await navigator.ml.createContext(contextOptions);
    236    } catch (error) {
    237      throw new AssertionError(
    238          `Unable to create context for ${variant} variant. ${error}`);
    239    }
    240  });
    241 
    242  promise_test(async t => {
    243    await promise_rejects_js(
    244        t, TypeError,
    245        mlContext.createConstantTensor(
    246            descriptor,
    247            new TypedArrayDict[descriptor.dataType](
    248                sizeOfShape(descriptor.shape))));
    249  }, `${testName} / ${descriptor.dataType}`);
    250 };
    251 
    252 promise_test(async t => {
    253  const tensorDescriptor = {
    254    dataType: 'int32',
    255    shape: [(context.opSupportLimits().maxTensorByteLength + 1) / 4],
    256    writable: true,
    257  };
    258  await promise_rejects_js(
    259    t, TypeError, context.createTensor(tensorDescriptor));
    260 }, `create too large tensor byte length that exceeds limit`);
    261 
    262 /**
    263 * Asserts the tensor data in MLTensor matches expected.
    264 * @param {MLContext} mlContext - The context used to create the tensor.
    265 * @param {MLTensor} mlTensor - The tensor to read and compare data.
    266 * @param {Array} expected - Array of the expected data in the tensor.
    267 */
    268 const assert_tensor_data_equals = async (mlContext, mlTensor, expected) => {
    269  const actual = await mlContext.readTensor(mlTensor);
    270  assert_array_equals(
    271      new expected.constructor(actual), expected,
    272      'Read tensor data equals expected data.');
    273 };
    274 
    275 /**
    276 * WebNN write tensor operation test.
    277 * @param {String} testName - The name of the test operation.
    278 */
    279 const testWriteTensor = (testName) => {
    280  let mlContext;
    281  promise_setup(async () => {
    282    try {
    283      mlContext = await navigator.ml.createContext(contextOptions);
    284    } catch (e) {
    285      throw new AssertionError(
    286          `Unable to create context for ${variant} variant. ${e}`);
    287    }
    288 
    289    try {
    290      const mlTensor =
    291          await mlContext.createTensor({dataType: 'int32', shape: [2, 3]});
    292    } catch (e) {
    293      throw new AssertionError(
    294          `Unable to create tensor for ${variant} variant. ${e}`);
    295    }
    296  });
    297 
    298  if ('SharedArrayBuffer' in globalThis) {
    299    promise_test(async () => {
    300      const tensorDescriptor = {
    301        dataType: 'int32',
    302        shape: [4],
    303        readable: true,
    304        writable: true,
    305      };
    306      const tensorByteLength = sizeOfDescriptor(tensorDescriptor);
    307 
    308      // Required to use SharedArrayBuffer.
    309      assert_true(
    310          self.crossOriginIsolated,
    311          'The page is served with COOP and COEP, it should be cross-origin-isolated.');
    312 
    313      let arrayBuffer = new ArrayBuffer(tensorByteLength);
    314      let arrayBufferView = new Int32Array(arrayBuffer);
    315      arrayBufferView.fill(7);
    316 
    317      let sharedArrayBuffer = new SharedArrayBuffer(tensorByteLength);
    318      let sharedArrayBufferView = new Int32Array(sharedArrayBuffer);
    319      sharedArrayBufferView.fill(7);
    320 
    321      const tensors = await Promise.all([
    322        mlContext.createTensor(tensorDescriptor),
    323        mlContext.createTensor(tensorDescriptor),
    324        mlContext.createTensor(tensorDescriptor),
    325        mlContext.createTensor(tensorDescriptor)
    326      ]);
    327 
    328      mlContext.writeTensor(tensors[0], arrayBuffer);
    329      mlContext.writeTensor(tensors[2], arrayBufferView);
    330      mlContext.writeTensor(tensors[1], sharedArrayBuffer);
    331      mlContext.writeTensor(tensors[3], sharedArrayBufferView);
    332 
    333      await Promise.all(tensors.map(async (tensor) => {
    334        assert_tensor_data_equals(mlContext, tensor, arrayBufferView);
    335      }));
    336    }, `${testName} / write with different kinds of buffers`);
    337  }
    338 
    339  promise_test(async () => {
    340    const tensorDescriptor = {
    341      dataType: 'int32',
    342      shape: [1],
    343      writable: true,
    344    };
    345    let mlTensor = await mlContext.createTensor(tensorDescriptor);
    346 
    347    const tensorByteLength = sizeOfDescriptor(tensorDescriptor);
    348 
    349    // Writing with a buffer larger than the source tensor.
    350    assert_throws_js(
    351        TypeError,
    352        () => mlContext.writeTensor(
    353            mlTensor, new ArrayBuffer(tensorByteLength + 1)));
    354    // Writing with a buffer smaller than the source tensor.
    355    assert_throws_js(
    356        TypeError,
    357        () => mlContext.writeTensor(
    358            mlTensor, new ArrayBuffer(tensorByteLength - 1)));
    359  }, `${testName} / write with buffer of wrong size`);
    360 
    361  promise_test(async () => {
    362    const tensorDescriptor = {
    363      dataType: 'int32',
    364      shape: [2, 2],
    365      writable: true,
    366    };
    367    let mlTensor = await mlContext.createTensor(tensorDescriptor);
    368 
    369    // Writing data to a destroyed MLTensor should throw.
    370    mlTensor.destroy();
    371 
    372    assert_throws_dom(
    373        'InvalidStateError',
    374        () => mlContext.writeTensor(
    375            mlTensor, new Uint8Array(sizeOfDescriptor(tensorDescriptor))));
    376  }, `${testName} / destroy`);
    377 
    378  promise_test(async () => {
    379    const tensorDescriptor = {
    380      dataType: 'int32',
    381      shape: [2, 3],
    382      writable: true,
    383    };
    384    let mlTensor = await mlContext.createTensor(tensorDescriptor);
    385 
    386    let anotherMLContext = await navigator.ml.createContext(contextOptions);
    387    let anotherMLTensor = await anotherMLContext.createTensor(tensorDescriptor);
    388 
    389    let inputData =
    390        new Uint8Array(sizeOfDescriptor(tensorDescriptor)).fill(0xAA);
    391    assert_throws_js(
    392        TypeError, () => mlContext.writeTensor(anotherMLTensor, inputData));
    393    assert_throws_js(
    394        TypeError, () => anotherMLContext.writeTensor(mlTensor, inputData));
    395  }, `${testName} / context_mismatch`);
    396 
    397  promise_test(async () => {
    398    let mlTensor = await mlContext.createTensor({
    399      dataType: 'int32',
    400      shape: [],
    401      readable: true,
    402      writable: true,
    403    });
    404 
    405    const inputData = Int32Array.from([0xAAAABBBB]);
    406    mlContext.writeTensor(mlTensor, inputData);
    407    await assert_tensor_data_equals(mlContext, mlTensor, inputData);
    408  }, `${testName} / scalar`);
    409 
    410  promise_test(async () => {
    411    const tensorDescriptor = {
    412      dataType: 'int32',
    413      shape: [2, 2],
    414      readable: true,
    415      writable: true,
    416    };
    417    let mlTensor = await mlContext.createTensor(tensorDescriptor);
    418 
    419    const tensorByteLength = sizeOfDescriptor(tensorDescriptor);
    420    let inputBuffer = new ArrayBuffer(tensorByteLength);
    421 
    422    const int32View = new Int32Array(inputBuffer);
    423    int32View.fill(0xBBBBBBBB);
    424 
    425    mlContext.writeTensor(mlTensor, int32View);
    426 
    427    // Writing to a detached buffer should fail.
    428    const detachedBuffer = inputBuffer.transfer();
    429    assert_true(inputBuffer.detached, 'array buffer should be detached.');
    430 
    431    assert_throws_js(
    432        TypeError, () => mlContext.writeTensor(mlTensor, inputBuffer));
    433 
    434    await assert_tensor_data_equals(
    435        mlContext, mlTensor, new Int32Array(detachedBuffer));
    436  }, `${testName} / detached`);
    437 };
    438 
    439 /**
    440 * WebNN read tensor operation test.
    441 * @param {String} testName - The name of the test operation.
    442 */
    443 const testReadTensor = (testName) => {
    444  let mlContext;
    445  promise_setup(async () => {
    446    try {
    447      mlContext = await navigator.ml.createContext(contextOptions);
    448    } catch (e) {
    449      throw new AssertionError(
    450          `Unable to create context for ${variant} variant. ${e}`);
    451    }
    452 
    453    try {
    454      const mlTensor =
    455          await mlContext.createTensor({dataType: 'int32', shape: [2, 3]});
    456    } catch (e) {
    457      throw new AssertionError(
    458          `Unable to create tensor for ${variant} variant. ${e}`);
    459    }
    460  });
    461 
    462  promise_test(async t => {
    463    let mlTensor = await mlContext.createTensor({
    464      dataType: 'int32',
    465      shape: [2, 2],
    466      readable: true,
    467    });
    468 
    469    // Reading a destroyed MLTensor should reject.
    470    mlTensor.destroy();
    471 
    472    await promise_rejects_dom(
    473        t, 'InvalidStateError', mlContext.readTensor(mlTensor));
    474  }, `${testName} / read_after_destroy`);
    475 
    476  promise_test(async t => {
    477    let mlTensor = await mlContext.createTensor({
    478      dataType: 'int32',
    479      shape: [2, 3],
    480      readable: true,
    481    });
    482 
    483    let promise = mlContext.readTensor(mlTensor);
    484    let anotherPromise = mlContext.readTensor(mlTensor);
    485 
    486    mlTensor.destroy();
    487 
    488    await promise_rejects_dom(t, 'InvalidStateError', promise);
    489    await promise_rejects_dom(t, 'InvalidStateError', anotherPromise);
    490  }, `${testName} / read_before_destroy`);
    491 
    492  promise_test(async () => {
    493    let mlTensor = await mlContext.createTensor({
    494      dataType: 'int32',
    495      shape: [1024],
    496      readable: true,
    497    });
    498 
    499    await assert_tensor_data_equals(mlContext, mlTensor, new Uint32Array(1024));
    500  }, `${testName} / uninitialized`);
    501 
    502  promise_test(async () => {
    503    let mlTensor = await mlContext.createTensor({
    504      dataType: 'int32',
    505      shape: [1],
    506      readable: true,
    507      writable: true,
    508    });
    509 
    510    mlContext.writeTensor(mlTensor, Uint8Array.from([0xAA, 0xAA, 0xAA, 0xAA]));
    511 
    512    // Write over previously-written data.
    513    mlContext.writeTensor(mlTensor, Uint32Array.from([0xBBBBBBBB]));
    514    await assert_tensor_data_equals(
    515        mlContext, mlTensor, Uint32Array.from([0xBBBBBBBB]));
    516  }, `${testName} / overwrite`);
    517 
    518  promise_test(async t => {
    519    const tensorDescriptor = {
    520      dataType: 'int32',
    521      shape: [2, 3],
    522      readable: true,
    523    };
    524    let mlTensor = await mlContext.createTensor(tensorDescriptor);
    525 
    526    let anotherMLContext = await navigator.ml.createContext(contextOptions);
    527    let anotherMLTensor = await anotherMLContext.createTensor(tensorDescriptor);
    528 
    529    await promise_rejects_js(
    530        t, TypeError, mlContext.readTensor(anotherMLTensor));
    531    await promise_rejects_js(
    532        t, TypeError, anotherMLContext.readTensor(mlTensor));
    533  }, `${testName} / context_mismatch`);
    534 
    535  promise_test(async () => {
    536    // Create a 128k tensor to test the data pipe.
    537    let mlTensor = await mlContext.createTensor({
    538      dataType: 'int32',
    539      shape: [2, 128, 128],
    540      readable: true,
    541    });
    542 
    543    // Read to an array larger than the 128k mlTensor
    544    const largeArray = new Int32Array(140000);
    545    await mlContext.readTensor(mlTensor, largeArray);
    546  }, `${testName} / read with larger array`);
    547 };
    548 
    549 /**
    550 * WebNN dispatch tensor operation test.
    551 * @param {String} testName - The name of the test operation.
    552 */
    553 const testDispatchTensor = (testName) => {
    554  let mlContext;
    555  let mlGraph;
    556  const shape = [3, 5];
    557  let inputs = {};
    558  let outputs = {};
    559  let isConstantTensorSupported = false;
    560  promise_setup(async () => {
    561    try {
    562      mlContext = await navigator.ml.createContext(contextOptions);
    563    } catch (e) {
    564      throw new AssertionError(
    565          `Unable to create context for ${variant} variant. ${e}`);
    566    }
    567 
    568    // Check if WebNN has constant tensor support.
    569    try {
    570      await mlContext.createConstantTensor(
    571          {
    572            dataType: 'float32',
    573            shape: [1],
    574          },
    575          new Float32Array([0xAA]));
    576      isConstantTensorSupported = true;
    577    } catch (error) {
    578      if (error.name !== 'NotSupportedError') {
    579        throw error;
    580      }
    581    }
    582 
    583    // Construct a simple graph: A = B + C, with two outputs.
    584    const builder = new MLGraphBuilder(mlContext);
    585    const tensorDescriptor = {
    586      dataType: 'float32',
    587      shape: shape,
    588      readable: true,
    589      writable: true,
    590    };
    591    const lhsOperand = builder.input('lhs', tensorDescriptor);
    592    const rhsOperand = builder.input('rhs', tensorDescriptor);
    593    const output1Operand = builder.add(lhsOperand, rhsOperand);
    594    const output2Operand = builder.add(lhsOperand, rhsOperand);
    595    mlGraph = await builder.build(
    596        {'output1': output1Operand, 'output2': output2Operand});
    597 
    598    try {
    599      const mlTensor =
    600          await mlContext.createTensor({dataType: 'int32', shape: [2, 3]});
    601    } catch (e) {
    602      throw new AssertionError(
    603          `Unable to create tensor for ${variant} variant. ${e}`);
    604    }
    605 
    606    inputs = {
    607      'lhs': await mlContext.createTensor(tensorDescriptor),
    608      'rhs': await mlContext.createTensor(tensorDescriptor),
    609    };
    610    outputs = {
    611      'output1': await mlContext.createTensor(tensorDescriptor),
    612      'output2': await mlContext.createTensor(tensorDescriptor),
    613    };
    614  });
    615 
    616  promise_test(async () => {
    617    let anotherMLContext = await navigator.ml.createContext(contextOptions);
    618 
    619    // Control case, same context.
    620    mlContext.dispatch(mlGraph, inputs, outputs);
    621 
    622    // Test the wrong context being used for inputs.
    623    const lhsTensor = await anotherMLContext.createTensor(
    624        getDescriptorFromTensor(inputs['lhs']));
    625    assert_throws_js(
    626        TypeError,
    627        () => mlContext.dispatch(
    628            mlGraph, {
    629              'lhs': lhsTensor,
    630              'rhs': inputs['rhs'],
    631            },
    632            outputs));
    633 
    634    // Test the wrong context being used for outputs.
    635    const outputTensor1 = await anotherMLContext.createTensor(
    636        getDescriptorFromTensor(outputs['output1']));
    637    assert_throws_js(TypeError, () => mlContext.dispatch(mlGraph, inputs, {
    638      'output1': outputTensor1,
    639      'output2': outputs['output2'],
    640    }));
    641  }, `${testName} / context_mismatch`);
    642 
    643  promise_test(async () => {
    644    // Control case, valid tensors.
    645    mlContext.dispatch(mlGraph, inputs, outputs);
    646 
    647    // Input is a different shape.
    648    const lhsTensor = await mlContext.createTensor({
    649      dataType: inputs['lhs'].dataType,
    650      // Input rank is too high.
    651      shape: inputs['lhs'].shape.concat([2])
    652    });
    653 
    654    assert_throws_js(
    655        TypeError,
    656        () => mlContext.dispatch(
    657            mlGraph, {
    658              'lhs': lhsTensor,
    659              'rhs': inputs['rhs'],
    660            },
    661            outputs));
    662 
    663    const rhsTensor = await mlContext.createTensor({
    664      dataType: inputs['rhs'].dataType,
    665      // Input rank is too low.
    666      shape: inputs['rhs'].shape.slice(1)
    667    });
    668 
    669    assert_throws_js(
    670        TypeError,
    671        () => mlContext.dispatch(
    672            mlGraph, {
    673              'lhs': inputs['lhs'],
    674              'rhs': rhsTensor,
    675            },
    676            outputs));
    677 
    678    // Output is a different shape. Dimension value is too large.
    679    let output1WrongShape = [...outputs['output1'].shape];
    680    output1WrongShape[0] += 2;
    681    const outputTensor1 = await mlContext.createTensor(
    682        {dataType: outputs['output1'].dataType, shape: output1WrongShape});
    683 
    684    assert_throws_js(TypeError, () => mlContext.dispatch(mlGraph, inputs, {
    685      'output1': outputTensor1,
    686      'output2': outputs['output2'],
    687    }));
    688 
    689    // Output is a different shape. Dimension value is too small.
    690    let output2WrongShape = [...outputs['output2'].shape];
    691    output2WrongShape[1] -= 1;
    692    const outputTensor2 = await mlContext.createTensor(
    693        {dataType: outputs['output2'].dataType, shape: output2WrongShape});
    694 
    695    assert_throws_js(TypeError, () => mlContext.dispatch(mlGraph, inputs, {
    696      'output1': outputs['output1'],
    697      'output2': outputTensor2,
    698    }));
    699  }, `${testName} / invalid shape`);
    700 
    701  promise_test(async () => {
    702    // Control case, valid tensors.
    703    mlContext.dispatch(mlGraph, inputs, outputs);
    704 
    705    // Inputs are a different data type.
    706    const inputWrongDataType = 'int32';
    707    assert_not_equals(inputs['lhs'].dataType, inputWrongDataType);
    708    assert_not_equals(inputs['rhs'].dataType, inputWrongDataType);
    709    assert_throws_js(
    710        TypeError,
    711        () => mlContext.dispatch(
    712            mlGraph, {
    713              'lhs': mlContext.createTensor(
    714                  {dataType: inputWrongDataType, shape: inputs['lhs'].shape}),
    715              'rhs': inputs['rhs'],
    716            },
    717            outputs));
    718 
    719    assert_throws_js(
    720        TypeError,
    721        () => mlContext.dispatch(
    722            mlGraph, {
    723              'lhs': inputs['lhs'],
    724              'rhs': mlContext.createTensor(
    725                  {dataType: inputWrongDataType, shape: inputs['rhs'].shape}),
    726            },
    727            outputs));
    728 
    729    // Outputs are a different data type.
    730    const outputWrongDataType = 'int32';
    731    assert_not_equals(outputs['output1'].dataType, outputWrongDataType);
    732    assert_not_equals(outputs['output2'].dataType, outputWrongDataType);
    733    const outputTensor1 = await mlContext.createTensor(
    734        {dataType: outputWrongDataType, shape: outputs['output1'].shape});
    735 
    736    assert_throws_js(TypeError, () => mlContext.dispatch(mlGraph, inputs, {
    737      'output1': outputTensor1,
    738      'output2': outputs['output2'],
    739    }));
    740 
    741    const outputTensor2 = await mlContext.createTensor(
    742        {dataType: outputWrongDataType, shape: outputs['output2'].shape});
    743 
    744    assert_throws_js(TypeError, () => mlContext.dispatch(mlGraph, inputs, {
    745      'output1': outputs['output1'],
    746      'output2': outputTensor2,
    747    }));
    748  }, `${testName} / invalid data type`);
    749 
    750  promise_test(async () => {
    751    // Control case, valid names.
    752    mlContext.dispatch(mlGraph, inputs, outputs);
    753 
    754    // No names is invalid.
    755    assert_throws_js(TypeError, () => mlContext.dispatch(mlGraph, {}, {}));
    756 
    757    // Input name is invalid.
    758    assert_throws_js(
    759        TypeError,
    760        () => mlContext.dispatch(
    761            mlGraph, {
    762              'aDifferentInputName': inputs['lhs'],
    763              'rhs': inputs['rhs'],
    764            },
    765            outputs));
    766 
    767    assert_throws_js(
    768        TypeError,
    769        () => mlContext.dispatch(
    770            mlGraph, {
    771              'lhs': inputs['lhs'],
    772              'aDifferentInputName': inputs['rhs'],
    773            },
    774            outputs));
    775 
    776    // Output name is invalid.
    777    assert_throws_js(TypeError, () => mlContext.dispatch(mlGraph, inputs, {
    778      'aDifferentOutputName': outputs['output1'],
    779      'output2': outputs['output2'],
    780    }));
    781 
    782    assert_throws_js(TypeError, () => mlContext.dispatch(mlGraph, inputs, {
    783      'output1': outputs['output1'],
    784      'aDifferentOutputName': outputs['output2'],
    785    }));
    786 
    787    // Too few named inputs is invalid.
    788    assert_throws_js(
    789        TypeError,
    790        () => mlContext.dispatch(
    791            mlGraph, {
    792              'lhs': inputs['lhs'],
    793            },
    794            outputs));
    795 
    796    // Too many named inputs is invalid.
    797    const anotherRhsTensor =
    798        await mlContext.createTensor(getDescriptorFromTensor(inputs['rhs']));
    799    assert_throws_js(
    800        TypeError,
    801        () => mlContext.dispatch(
    802            mlGraph, {
    803              'lhs': inputs['lhs'],
    804              'rhs': inputs['rhs'],
    805              'aDifferentInputName': anotherRhsTensor,
    806            },
    807            outputs));
    808 
    809    // Too few named outputs is invalid.
    810    assert_throws_js(TypeError, () => mlContext.dispatch(mlGraph, inputs, {
    811      'output1': outputs['output1']
    812    }));
    813 
    814    // Too many named outputs is invalid.
    815    const anotherOutputTensor2 = await mlContext.createTensor(
    816        getDescriptorFromTensor(outputs['output2']));
    817    assert_throws_js(TypeError, () => mlContext.dispatch(mlGraph, inputs, {
    818      'output1': outputs['output1'],
    819      'output2': outputs['output2'],
    820      'aDifferentOutputName': anotherOutputTensor2,
    821    }));
    822  }, `${testName} / invalid_name`);
    823 
    824  promise_test(async () => {
    825    // Control case, valid tensors.
    826    mlContext.dispatch(mlGraph, inputs, outputs);
    827 
    828    // Same tensor used as outputs more than once is invalid.
    829    assert_throws_js(TypeError, () => mlContext.dispatch(mlGraph, inputs, {
    830      'output1': outputs['output1'],
    831      'output2': outputs['output1'],
    832    }));
    833 
    834    // Same tensor used as input and output is invalid.
    835    assert_throws_js(TypeError, () => mlContext.dispatch(mlGraph, inputs, {
    836      'output1': inputs['lhs'],
    837      'output2': outputs['output2'],
    838    }));
    839 
    840    assert_throws_js(
    841        TypeError,
    842        () => mlContext.dispatch(
    843            mlGraph, {
    844              'lhs': outputs['output1'],
    845              'rhs': inputs['rhs'],
    846            },
    847            outputs));
    848 
    849    // Tensor that does not exist is invalid.
    850    assert_throws_js(
    851        TypeError,
    852        () => mlContext.dispatch(
    853            mlGraph, {
    854              'lhs': undefined,
    855              'rhs': inputs['rhs'],
    856            },
    857            outputs));
    858 
    859    assert_throws_js(TypeError, () => mlContext.dispatch(mlGraph, inputs, {
    860      'output1': undefined,
    861      'output2': outputs['output2'],
    862    }));
    863  }, `${testName} / invalid_tensor`);
    864 
    865  promise_test(async () => {
    866    const dispatchInputs = {
    867      'lhs':
    868          await mlContext.createTensor(getDescriptorFromTensor(inputs['lhs'])),
    869      'rhs':
    870          await mlContext.createTensor(getDescriptorFromTensor(inputs['rhs'])),
    871    };
    872 
    873    const dispatch1Outputs = {
    874      'output1': await mlContext.createTensor(
    875          getDescriptorFromTensor(outputs['output1'])),
    876      'output2': await mlContext.createTensor(
    877          getDescriptorFromTensor(outputs['output2'])),
    878    };
    879 
    880    const dispatch2Outputs = {
    881      'output1': await mlContext.createTensor(
    882          getDescriptorFromTensor(outputs['output1'])),
    883      'output2': await mlContext.createTensor(
    884          getDescriptorFromTensor(outputs['output2'])),
    885    };
    886 
    887    // Initialize inputs
    888    const inputData =
    889        new TypedArrayDict['float32'](sizeOfShape(shape)).fill(1.0);
    890    mlContext.writeTensor(dispatchInputs['lhs'], inputData);
    891    mlContext.writeTensor(dispatchInputs['rhs'], inputData);
    892 
    893    // Output_1 = LHS + RHS = 1 + 1 = 2
    894    mlContext.dispatch(mlGraph, dispatchInputs, dispatch1Outputs);
    895 
    896    // Output_2 = LHS + RHS = 1 + 1 = 2
    897    mlContext.dispatch(mlGraph, dispatchInputs, dispatch2Outputs);
    898 
    899    await assert_tensor_data_equals(
    900        mlContext, dispatch1Outputs['output1'],
    901        new Float32Array(sizeOfShape(shape)).fill(2.0));
    902 
    903    await assert_tensor_data_equals(
    904        mlContext, dispatch1Outputs['output2'],
    905        new Float32Array(sizeOfShape(shape)).fill(2.0));
    906 
    907    await assert_tensor_data_equals(
    908        mlContext, dispatch2Outputs['output1'],
    909        new Float32Array(sizeOfShape(shape)).fill(2.0));
    910 
    911    await assert_tensor_data_equals(
    912        mlContext, dispatch2Outputs['output2'],
    913        new Float32Array(sizeOfShape(shape)).fill(2.0));
    914  }, `${testName} / same_inputs`);
    915 
    916  promise_test(async () => {
    917    const dispatch1Inputs = {
    918      'lhs':
    919          await mlContext.createTensor(getDescriptorFromTensor(inputs['lhs'])),
    920      'rhs':
    921          await mlContext.createTensor(getDescriptorFromTensor(inputs['rhs'])),
    922    };
    923 
    924    const dispatch2Inputs = {
    925      'lhs':
    926          await mlContext.createTensor(getDescriptorFromTensor(inputs['lhs'])),
    927      'rhs':
    928          await mlContext.createTensor(getDescriptorFromTensor(inputs['rhs'])),
    929    };
    930 
    931    const dispatchOutputs = {
    932      'output1': await mlContext.createTensor(
    933          getDescriptorFromTensor(outputs['output1'])),
    934      'output2': await mlContext.createTensor(
    935          getDescriptorFromTensor(outputs['output2'])),
    936    };
    937 
    938    // Initialize inputs
    939    const input1Data =
    940        new TypedArrayDict['float32'](sizeOfShape(shape)).fill(1.0);
    941    mlContext.writeTensor(dispatch1Inputs['lhs'], input1Data);
    942    mlContext.writeTensor(dispatch1Inputs['rhs'], input1Data);
    943 
    944    const input2Data =
    945        new TypedArrayDict['float32'](sizeOfShape(shape)).fill(2.0);
    946    mlContext.writeTensor(dispatch2Inputs['lhs'], input2Data);
    947    mlContext.writeTensor(dispatch2Inputs['rhs'], input2Data);
    948 
    949    // Output = LHS_1 + RHS_1 = 1 + 1 = 2
    950    mlContext.dispatch(mlGraph, dispatch1Inputs, dispatchOutputs);
    951 
    952    // Output = LHS_2 + RHS_2 = 2 + 2 = 4
    953    mlContext.dispatch(mlGraph, dispatch2Inputs, dispatchOutputs);
    954 
    955    await assert_tensor_data_equals(
    956        mlContext, dispatchOutputs['output1'],
    957        new Float32Array(sizeOfShape(shape)).fill(4.0));
    958 
    959    await assert_tensor_data_equals(
    960        mlContext, dispatchOutputs['output2'],
    961        new Float32Array(sizeOfShape(shape)).fill(4.0));
    962  }, `${testName} / same_outputs`);
    963 
    964  promise_test(async () => {
    965    const dispatchInputs = {
    966      'lhs':
    967          await mlContext.createTensor(getDescriptorFromTensor(inputs['lhs'])),
    968      'rhs':
    969          await mlContext.createTensor(getDescriptorFromTensor(inputs['rhs'])),
    970    };
    971 
    972    const dispatchOutputs = {
    973      'output1': await mlContext.createTensor(
    974          getDescriptorFromTensor(outputs['output1'])),
    975      'output2': await mlContext.createTensor(
    976          getDescriptorFromTensor(outputs['output2'])),
    977    };
    978 
    979    // Initialize inputs
    980    const inputData =
    981        new TypedArrayDict['float32'](sizeOfShape(shape)).fill(1.0);
    982    mlContext.writeTensor(dispatchInputs['lhs'], inputData);
    983    mlContext.writeTensor(dispatchInputs['rhs'], inputData);
    984 
    985    // Output = LHS + RHS = 1 + 1 = 2
    986    mlContext.dispatch(mlGraph, dispatchInputs, dispatchOutputs);
    987    mlContext.dispatch(mlGraph, dispatchInputs, dispatchOutputs);
    988 
    989    await assert_tensor_data_equals(
    990        mlContext, dispatchOutputs['output1'],
    991        new Float32Array(sizeOfShape(shape)).fill(2.0));
    992 
    993    await assert_tensor_data_equals(
    994        mlContext, dispatchOutputs['output2'],
    995        new Float32Array(sizeOfShape(shape)).fill(2.0));
    996  }, `${testName} / same_inputs_and_outputs`);
    997 
    998  promise_test(async () => {
    999    const dispatchInputs = {
   1000      'lhs':
   1001          await mlContext.createTensor(getDescriptorFromTensor(inputs['lhs'])),
   1002      'rhs':
   1003          await mlContext.createTensor(getDescriptorFromTensor(inputs['rhs'])),
   1004    };
   1005 
   1006    const dispatch1Outputs = {
   1007      'output1': await mlContext.createTensor(
   1008          getDescriptorFromTensor(outputs['output1'])),
   1009      'output2': await mlContext.createTensor(
   1010          getDescriptorFromTensor(outputs['output2'])),
   1011    };
   1012 
   1013    const dispatch2Outputs = {
   1014      'output1': await mlContext.createTensor(
   1015          getDescriptorFromTensor(outputs['output1'])),
   1016      'output2': await mlContext.createTensor(
   1017          getDescriptorFromTensor(outputs['output2'])),
   1018    };
   1019 
   1020    // Initialize inputs
   1021    const inputData =
   1022        new TypedArrayDict['float32'](sizeOfShape(shape)).fill(1.0);
   1023    mlContext.writeTensor(dispatchInputs['lhs'], inputData);
   1024    mlContext.writeTensor(dispatchInputs['rhs'], inputData);
   1025 
   1026    // Output_1 = LHS + RHS = 1 + 1 = 2
   1027    mlContext.dispatch(mlGraph, dispatchInputs, dispatch1Outputs);
   1028 
   1029    // Output_2 = Output_1_LHS + Output_1_RHS = 2 + 2 = 4
   1030    mlContext.dispatch(
   1031        mlGraph, {
   1032          'lhs': dispatch1Outputs['output1'],
   1033          'rhs': dispatch1Outputs['output2'],
   1034        },
   1035        dispatch2Outputs);
   1036 
   1037    // Output_1 = Output_2_LHS + Output_2_RHS = 4 + 4 = 8
   1038    mlContext.dispatch(
   1039        mlGraph, {
   1040          'lhs': dispatch2Outputs['output1'],
   1041          'rhs': dispatch2Outputs['output2'],
   1042        },
   1043        dispatch1Outputs);
   1044 
   1045    await assert_tensor_data_equals(
   1046        mlContext, dispatch1Outputs['output1'],
   1047        new Float32Array(sizeOfShape(shape)).fill(8));
   1048 
   1049    await assert_tensor_data_equals(
   1050        mlContext, dispatch1Outputs['output2'],
   1051        new Float32Array(sizeOfShape(shape)).fill(8));
   1052  }, `${testName} / outputs_as_inputs`);
   1053 
   1054  promise_test(async () => {
   1055    // Construct a simple graph: OUTPUT = LHS - RHS.
   1056    const builder = new MLGraphBuilder(mlContext);
   1057    const operandType = {dataType: 'float32', shape};
   1058    const lhsOperand = builder.input('lhs', operandType);
   1059    const rhsOperand = builder.input('rhs', operandType);
   1060    const graph =
   1061        await builder.build({'output': builder.sub(lhsOperand, rhsOperand)});
   1062 
   1063    const lhsTensor =
   1064        await mlContext.createTensor(getDescriptorFromTensor(inputs['lhs']));
   1065    const rhsTensor =
   1066        await mlContext.createTensor(getDescriptorFromTensor(inputs['rhs']));
   1067 
   1068    const dispatchOutputs = {
   1069      'output': await mlContext.createTensor(
   1070          getDescriptorFromTensor(outputs['output1']))
   1071    };
   1072 
   1073    // Initialize inputs
   1074    mlContext.writeTensor(
   1075        lhsTensor, new TypedArrayDict['float32'](sizeOfShape(shape)).fill(5.0));
   1076    mlContext.writeTensor(
   1077        rhsTensor, new TypedArrayDict['float32'](sizeOfShape(shape)).fill(3.0));
   1078 
   1079    // Output = LHS - RHS = 5 - 3 = 2
   1080    mlContext.dispatch(
   1081        graph, {
   1082          'lhs': lhsTensor,
   1083          'rhs': rhsTensor,
   1084        },
   1085        dispatchOutputs);
   1086 
   1087    await assert_tensor_data_equals(
   1088        mlContext, dispatchOutputs['output'],
   1089        new Float32Array(sizeOfShape(shape)).fill(2));
   1090 
   1091    // Output = RHS - LHS = 3 - 5 = -2
   1092    mlContext.dispatch(
   1093        graph, {
   1094          'lhs': rhsTensor,
   1095          'rhs': lhsTensor,
   1096        },
   1097        dispatchOutputs);
   1098 
   1099    await assert_tensor_data_equals(
   1100        mlContext, dispatchOutputs['output'],
   1101        new Float32Array(sizeOfShape(shape)).fill(-2));
   1102  }, `${testName} / same name diff input tensors`);
   1103 
   1104  promise_test(async () => {
   1105    const dispatchInputs = {
   1106      'lhs':
   1107          await mlContext.createTensor(getDescriptorFromTensor(inputs['lhs'])),
   1108      'rhs':
   1109          await mlContext.createTensor(getDescriptorFromTensor(inputs['rhs'])),
   1110    };
   1111 
   1112    const outputTensor1 = await mlContext.createTensor(
   1113        getDescriptorFromTensor(outputs['output1']));
   1114    const outputTensor2 = await mlContext.createTensor(
   1115        getDescriptorFromTensor(outputs['output2']));
   1116 
   1117    // Initialize inputs
   1118    const inputData1 =
   1119        new TypedArrayDict['float32'](sizeOfShape(shape)).fill(1.0);
   1120    mlContext.writeTensor(dispatchInputs['lhs'], inputData1);
   1121    mlContext.writeTensor(dispatchInputs['rhs'], inputData1);
   1122 
   1123    // Output = LHS + RHS = 1 + 1 = 2
   1124    mlContext.dispatch(mlGraph, dispatchInputs, {
   1125      'output1': outputTensor1,
   1126      'output2': outputTensor2,
   1127    });
   1128 
   1129    // Output = LHS + RHS = 2 + 2 = 4
   1130    const inputData2 =
   1131        new TypedArrayDict['float32'](sizeOfShape(shape)).fill(2.0);
   1132    mlContext.writeTensor(dispatchInputs['lhs'], inputData2);
   1133    mlContext.writeTensor(dispatchInputs['rhs'], inputData2);
   1134 
   1135    mlContext.dispatch(mlGraph, dispatchInputs, {
   1136      'output1': outputTensor1,
   1137      'output2': await mlContext.createTensor(
   1138          getDescriptorFromTensor(outputs['output2'])),
   1139    });
   1140 
   1141    // Ensure the last dispatch() did not modify the original second output
   1142    // tensor.
   1143    await assert_tensor_data_equals(
   1144        mlContext, outputTensor2, new Float32Array(sizeOfShape(shape)).fill(2));
   1145  }, `${testName} / same name diff outputs tensors`);
   1146 
   1147  promise_test(async () => {
   1148    const dispatchInputs = {
   1149      'lhs':
   1150          await mlContext.createTensor(getDescriptorFromTensor(inputs['lhs'])),
   1151      'rhs':
   1152          await mlContext.createTensor(getDescriptorFromTensor(inputs['rhs'])),
   1153    };
   1154 
   1155    const dispatchOutputs = {
   1156      'output1': await mlContext.createTensor(
   1157          getDescriptorFromTensor(outputs['output1'])),
   1158      'output2': await mlContext.createTensor(
   1159          getDescriptorFromTensor(outputs['output2'])),
   1160    };
   1161 
   1162    // Initialize inputs
   1163    const inputData =
   1164        new TypedArrayDict['float32'](sizeOfShape(shape)).fill(1.0);
   1165    mlContext.writeTensor(dispatchInputs['lhs'], inputData);
   1166    mlContext.writeTensor(dispatchInputs['rhs'], inputData);
   1167 
   1168    // Output = LHS + RHS = 1 + 1 = 2
   1169    mlContext.dispatch(mlGraph, dispatchInputs, dispatchOutputs);
   1170 
   1171    // Check destroyed input tensors cannot be re-used in subsequent dispatches.
   1172    dispatchInputs['lhs'].destroy();
   1173    dispatchInputs['lhs'] =
   1174        await mlContext.createTensor(getDescriptorFromTensor(inputs['lhs']));
   1175 
   1176    const newInputData =
   1177        new TypedArrayDict['float32'](sizeOfShape(shape)).fill(2.0);
   1178    mlContext.writeTensor(dispatchInputs['lhs'], newInputData);
   1179 
   1180    // Output = LHS + RHS = 2 + 1 = 3
   1181    mlContext.dispatch(mlGraph, dispatchInputs, dispatchOutputs);
   1182 
   1183    await assert_tensor_data_equals(
   1184        mlContext, dispatchOutputs['output1'],
   1185        new Float32Array(sizeOfShape(shape)).fill(3));
   1186 
   1187    dispatchInputs['rhs'].destroy();
   1188    dispatchInputs['rhs'] =
   1189        await mlContext.createTensor(getDescriptorFromTensor(inputs['rhs']));
   1190    mlContext.writeTensor(dispatchInputs['rhs'], newInputData);
   1191 
   1192    // Output = LHS + RHS = 2 + 2 = 4
   1193    mlContext.dispatch(mlGraph, dispatchInputs, dispatchOutputs);
   1194 
   1195    await assert_tensor_data_equals(
   1196        mlContext, dispatchOutputs['output1'],
   1197        new Float32Array(sizeOfShape(shape)).fill(4));
   1198  }, `${testName} / same name diff inputs tensors destroy`);
   1199 
   1200  promise_test(async () => {
   1201    const dispatchInputs = {
   1202      'lhs':
   1203          await mlContext.createTensor(getDescriptorFromTensor(inputs['lhs'])),
   1204      'rhs':
   1205          await mlContext.createTensor(getDescriptorFromTensor(inputs['rhs'])),
   1206    };
   1207 
   1208    const dispatchOutputs = {
   1209      'output1': await mlContext.createTensor(
   1210          getDescriptorFromTensor(outputs['output1'])),
   1211      'output2': await mlContext.createTensor(
   1212          getDescriptorFromTensor(outputs['output2'])),
   1213    };
   1214 
   1215    // Initialize inputs
   1216    const inputData =
   1217        new TypedArrayDict['float32'](sizeOfShape(shape)).fill(1.0);
   1218    mlContext.writeTensor(dispatchInputs['lhs'], inputData);
   1219    mlContext.writeTensor(dispatchInputs['rhs'], inputData);
   1220 
   1221    // Output = LHS + RHS = 1 + 1 = 2
   1222    mlContext.dispatch(mlGraph, dispatchInputs, dispatchOutputs);
   1223 
   1224    // Check destroyed output tensors cannot be re-used in subsequent
   1225    // dispatches.
   1226    dispatchOutputs['output1'].destroy();
   1227    dispatchOutputs['output1'] = await mlContext.createTensor(
   1228        getDescriptorFromTensor(outputs['output1']));
   1229 
   1230    const newInputData =
   1231        new TypedArrayDict['float32'](sizeOfShape(shape)).fill(2.0);
   1232    mlContext.writeTensor(dispatchInputs['lhs'], newInputData);
   1233 
   1234    // Output = LHS + RHS = 2 + 1 = 3
   1235    mlContext.dispatch(mlGraph, dispatchInputs, dispatchOutputs);
   1236 
   1237    await assert_tensor_data_equals(
   1238        mlContext, dispatchOutputs['output1'],
   1239        new Float32Array(sizeOfShape(shape)).fill(3));
   1240  }, `${testName} / same name diff outputs tensors destroy`);
   1241 
   1242  promise_test(async () => {
   1243    if (!isConstantTensorSupported) {
   1244      return;
   1245    }
   1246 
   1247    let constantTensor = await mlContext.createConstantTensor(
   1248        {
   1249          dataType: 'float32',
   1250          shape: shape,
   1251        },
   1252        new Float32Array(sizeOfShape(shape)).fill(3.0));
   1253 
   1254    const builder = new MLGraphBuilder(mlContext);
   1255    const lhsConstantOperand = builder.constant(constantTensor);
   1256    const rhsConstantOperand = builder.constant(constantTensor);
   1257    const outputOperand = builder.add(lhsConstantOperand, rhsConstantOperand);
   1258    const graphWithOnlyConstants =
   1259        await builder.build({'output': outputOperand});
   1260 
   1261    const outputTensor = await mlContext.createTensor(
   1262        getDescriptorFromTensor(outputs['output1']));
   1263 
   1264    // Output = LHS + RHS = 3 + 3 = 6
   1265    mlContext.dispatch(graphWithOnlyConstants, {}, {'output': outputTensor});
   1266 
   1267    await assert_tensor_data_equals(
   1268        mlContext, outputTensor,
   1269        new Float32Array(sizeOfShape(shape)).fill(6.0));
   1270  }, `${testName} / same constant same graph`);
   1271 
   1272  promise_test(async () => {
   1273    if (!isConstantTensorSupported) {
   1274      return;
   1275    }
   1276 
   1277    const rhsConstantTensor = await mlContext.createConstantTensor(
   1278        {
   1279          dataType: 'float32',
   1280          shape: shape,
   1281        },
   1282        new Float32Array(sizeOfShape(shape)).fill(3.0));
   1283 
   1284    const lhsInputOperandDesc = {dataType: 'float32', shape};
   1285 
   1286    let graphWithConstants;
   1287    {
   1288      const builder = new MLGraphBuilder(mlContext);
   1289      const lhsOperand = builder.input('lhs', lhsInputOperandDesc);
   1290      const rhsConstantOperand = builder.constant(rhsConstantTensor);
   1291      const outputOperand = builder.sub(lhsOperand, rhsConstantOperand);
   1292      graphWithConstants = await builder.build({'output': outputOperand});
   1293    }
   1294 
   1295    const lhsTensor =
   1296        await mlContext.createTensor(getDescriptorFromTensor(inputs['lhs']));
   1297    mlContext.writeTensor(
   1298        lhsTensor, new Float32Array(sizeOfShape(shape)).fill(5.0));
   1299 
   1300    const outputTensor = await mlContext.createTensor(
   1301        getDescriptorFromTensor(outputs['output1']));
   1302 
   1303    // Output = LHS - RHS = 5 - 3 = 2
   1304    mlContext.dispatch(
   1305        graphWithConstants, {
   1306          'lhs': lhsTensor,
   1307        },
   1308        {'output': outputTensor});
   1309 
   1310    // Create another graph reusing the same constants.
   1311    {
   1312      const builder = new MLGraphBuilder(mlContext);
   1313      const lhsOperand = builder.input('lhs', lhsInputOperandDesc);
   1314      const rhsConstantOperand = builder.constant(rhsConstantTensor);
   1315      const outputOperand = builder.sub(lhsOperand, rhsConstantOperand);
   1316      graphWithConstants = await builder.build({'output': outputOperand});
   1317    }
   1318 
   1319    mlContext.writeTensor(
   1320        lhsTensor, new Float32Array(sizeOfShape(shape)).fill(4.0));
   1321 
   1322    // Output = LHS - RHS = 4 - 3 = 1
   1323    mlContext.dispatch(
   1324        graphWithConstants, {
   1325          'lhs': lhsTensor,
   1326        },
   1327        {'output': outputTensor});
   1328 
   1329    await assert_tensor_data_equals(
   1330        mlContext, outputTensor,
   1331        new Float32Array(sizeOfShape(shape)).fill(1.0));
   1332  }, `${testName} / same constant multiple graphs`);
   1333 
   1334  promise_test(async () => {
   1335    // Construct a simple graph: OUTPUT = IDENTITY(INPUT) to test whether the default
   1336    // tensor is initialized to zero.
   1337    const builder = new MLGraphBuilder(mlContext);
   1338    const inputOperand = builder.input('input', {dataType: 'int32', shape: [1024]});
   1339    const graph = await builder.build({'output': builder.identity(inputOperand)});
   1340 
   1341    const inputTensor = await mlContext.createTensor({
   1342      dataType: inputOperand.dataType,
   1343      shape: inputOperand.shape
   1344    });
   1345 
   1346    const outputTensor = await mlContext.createTensor({
   1347      dataType: inputOperand.dataType,
   1348      shape: inputOperand.shape,
   1349      readable: true
   1350    });
   1351 
   1352    mlContext.dispatch(graph, {'input': inputTensor}, {'output': outputTensor});
   1353    await assert_tensor_data_equals(mlContext, outputTensor, new Uint32Array(1024));
   1354  }, `${testName} / default tensor uninitialized`);
   1355 };
   1356 
   1357 /**
   1358 * Asserts a gpu buffer data matches expected.
   1359 * @param {GPUDevice} gpuDevice - The device used to create the context.
   1360 * @param {GPUBuffer} gpuBuffer - The buffer to read and compare data.
   1361 * @param {Array} expected - Array of the expected data in the tensor.
   1362 */
   1363 const assert_gpu_buffer_data_equals =
   1364    async (gpuDevice, gpuBuffer, expected) => {
   1365  const gpuReadbackBuffer = gpuDevice.createBuffer({
   1366    size: expected.byteLength,
   1367    usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
   1368  });
   1369 
   1370  const gpuCommandEncoder = gpuDevice.createCommandEncoder();
   1371  gpuCommandEncoder.copyBufferToBuffer(
   1372      gpuBuffer, 0, gpuReadbackBuffer, 0, expected.byteLength);
   1373  gpuDevice.queue.submit([gpuCommandEncoder.finish()]);
   1374 
   1375  await gpuReadbackBuffer.mapAsync(GPUMapMode.READ);
   1376  const outputData =
   1377      new expected.constructor(gpuReadbackBuffer.getMappedRange());
   1378  assert_array_equals(outputData, expected);
   1379  gpuReadbackBuffer.unmap();
   1380 };
   1381 
   1382 /**
   1383 * Export to GPU operation test.
   1384 * @param {String} testName - The name of the test operation.
   1385 */
   1386 const testExportToGPU = (testName, dataType) => {
   1387  let gpuAdapter;
   1388  let gpuDevice;
   1389  let mlContext;
   1390  let mlGraph;
   1391  const shape = [24, 2];
   1392  let gpuComputePipeline;
   1393  let isExportToGPUSupported = true;
   1394  const typedArray = dataType == 'float16' ? Float16Array : Float32Array;
   1395 
   1396  promise_setup(async () => {
   1397    // Initialize GPU
   1398    gpuAdapter = navigator.gpu && await navigator.gpu.requestAdapter();
   1399    if (!gpuAdapter) {
   1400      isExportToGPUSupported = false;
   1401      return;
   1402    }
   1403 
   1404    gpuDevice =
   1405        await gpuAdapter.requestDevice({requiredFeatures: ['shader-f16']});
   1406    if (!gpuDevice) {
   1407      isExportToGPUSupported = false;
   1408      return;
   1409    }
   1410 
   1411    // Construct a GPU custom op which increments each number of the input
   1412    // buffer by 1.
   1413    const bufferType = dataType == 'float16' ? 'f16' : 'f32';
   1414    const gpuComputeShaderCode = `
   1415       ${bufferType == 'f16' ? 'enable f16;' : ''}
   1416        @group(0) @binding(0) var<storage, read> inputBuffer: array<${
   1417        bufferType}>;
   1418        @group(0) @binding(1) var<storage, read_write> outputBuffer: array<${
   1419        bufferType}>;
   1420 
   1421        @compute @workgroup_size(1)
   1422        fn main(@builtin(global_invocation_id) global_id: vec3<u32>) {
   1423            let index = global_id.x;
   1424            outputBuffer[index] = inputBuffer[index] + 1.0${
   1425        bufferType == 'f16' ? 'h' : ''};
   1426        }`;
   1427 
   1428    const gpuShaderModule =
   1429        gpuDevice.createShaderModule({code: gpuComputeShaderCode});
   1430 
   1431    gpuComputePipeline = gpuDevice.createComputePipeline({
   1432      layout: 'auto',
   1433      compute: {module: gpuShaderModule, entryPoint: 'main'},
   1434    });
   1435 
   1436    // Initialize WebNN
   1437    try {
   1438      mlContext = await navigator.ml.createContext(contextOptions);
   1439    } catch (e) {
   1440      throw new AssertionError(
   1441          `Unable to create context for ${variant} variant. ${e}`);
   1442    }
   1443 
   1444    // Check if WebNN interop is supported.
   1445    try {
   1446      let mlTensor =
   1447          await mlContext.createExportableTensor({dataType, shape}, gpuDevice);
   1448      await mlContext.exportToGPU(mlTensor);
   1449    } catch (e) {
   1450      if (e.name === 'NotSupportedError') {
   1451        isExportToGPUSupported = false;
   1452        return;
   1453      }
   1454      throw e;
   1455    }
   1456 
   1457    // Construct a simple graph: OUTPUT = LHS + RHS.
   1458    const mlBuilder = new MLGraphBuilder(mlContext);
   1459    const mlOperandDescriptor = {dataType, shape};
   1460    const lhsOperand = mlBuilder.input('lhs', mlOperandDescriptor);
   1461    const rhsOperand = mlBuilder.input('rhs', mlOperandDescriptor);
   1462    mlGraph = await mlBuilder.build(
   1463        {'output': mlBuilder.add(lhsOperand, rhsOperand)});
   1464  });
   1465 
   1466  const dispatchGPU =
   1467      (gpuDevice, gpuPipeline, gpuInputBuffer, gpuOutputBuffer, inputData) => {
   1468        const gpuBindGroup = gpuDevice.createBindGroup({
   1469          layout: gpuPipeline.getBindGroupLayout(0),
   1470          entries: [
   1471            {binding: 0, resource: {buffer: gpuInputBuffer}},
   1472            {binding: 1, resource: {buffer: gpuOutputBuffer}},
   1473          ],
   1474        });
   1475 
   1476        const gpuCommandEncoder = gpuDevice.createCommandEncoder();
   1477        {
   1478          const gpuComputePass = gpuCommandEncoder.beginComputePass();
   1479          gpuComputePass.setPipeline(gpuPipeline);
   1480          gpuComputePass.setBindGroup(0, gpuBindGroup);
   1481          gpuComputePass.dispatchWorkgroups(
   1482              inputData.byteLength / inputData.BYTES_PER_ELEMENT);
   1483          gpuComputePass.end();
   1484        }
   1485        gpuDevice.queue.submit([gpuCommandEncoder.finish()]);
   1486      };
   1487 
   1488  promise_test(async () => {
   1489    if (!isExportToGPUSupported) {
   1490      return;
   1491    }
   1492 
   1493    const mlTensorDescriptor = {dataType, shape};
   1494    const mlTensor = await mlContext.createExportableTensor(mlTensorDescriptor,
   1495      gpuDevice);
   1496 
   1497    const gpuTensorBuffer = await mlContext.exportToGPU(mlTensor);
   1498 
   1499    assert_equals(
   1500        gpuTensorBuffer.usage,
   1501        GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC |
   1502            GPUBufferUsage.COPY_DST);
   1503    assert_equals(gpuTensorBuffer.size, sizeOfDescriptor(mlTensorDescriptor));
   1504  }, `${testName} / export tensor`);
   1505 
   1506  promise_test(async t => {
   1507    if (!isExportToGPUSupported) {
   1508      return;
   1509    }
   1510 
   1511    const mlTensor = await mlContext.createTensor({dataType, shape});
   1512 
   1513    await promise_rejects_js(t, TypeError, mlContext.exportToGPU(mlTensor));
   1514  }, `${testName} / export wrong tensor`);
   1515 
   1516  promise_test(async t => {
   1517    if (!isExportToGPUSupported) {
   1518      return;
   1519    }
   1520 
   1521    const maxBufferSizeOOB = gpuDevice.limits.maxBufferSize + 1;
   1522    const elementSize = typedArray.BYTES_PER_ELEMENT;
   1523    const shape = [maxBufferSizeOOB / elementSize];
   1524 
   1525    const mlTensor =
   1526        await mlContext.createExportableTensor({dataType, shape}, gpuDevice);
   1527 
   1528    await mlContext.exportToGPU(mlTensor);
   1529  }, `${testName} / export big tensor`)
   1530 
   1531  promise_test(async () => {
   1532    if (!isExportToGPUSupported) {
   1533      return;
   1534    }
   1535 
   1536    const mlTensorDescriptor =
   1537        {dataType, shape, readable: true, writable: true};
   1538 
   1539    let mlTensor = await mlContext.createExportableTensor(mlTensorDescriptor, gpuDevice);
   1540    const inputData = new typedArray(sizeOfShape(shape)).fill(1.0);
   1541    mlContext.writeTensor(mlTensor, inputData);
   1542 
   1543    const gpuTensorBuffer = await mlContext.exportToGPU(mlTensor);
   1544    gpuTensorBuffer.destroy();
   1545 
   1546    await assert_tensor_data_equals(mlContext, mlTensor, inputData);
   1547  }, `${testName} / export then destroy buffer`);
   1548 
   1549  promise_test(async () => {
   1550    if (!isExportToGPUSupported) {
   1551      return;
   1552    }
   1553 
   1554    const mlTensorDescriptor = {dataType, shape, writable: true};
   1555 
   1556    let mlTensor = await mlContext.createExportableTensor(mlTensorDescriptor, gpuDevice);
   1557 
   1558    const inputData = new typedArray(sizeOfShape(shape)).fill(1.0);
   1559    mlContext.writeTensor(mlTensor, inputData);
   1560 
   1561    const gpuTensorBuffer = await mlContext.exportToGPU(mlTensor);
   1562    mlTensor.destroy();
   1563 
   1564    await assert_gpu_buffer_data_equals(gpuDevice, gpuTensorBuffer, inputData);
   1565  }, `${testName} / export then destroy tensor`);
   1566 
   1567  promise_test(async () => {
   1568    if (!isExportToGPUSupported) {
   1569      return;
   1570    }
   1571 
   1572    const mlTensor =
   1573        await mlContext.createExportableTensor({dataType, shape}, gpuDevice);
   1574    await mlContext.exportToGPU(mlTensor);
   1575    assert_throws_js(
   1576        TypeError,
   1577        () => mlContext.writeTensor(
   1578            mlTensor, new typedArray([1.0, 2.0, 3.0, 4.0])));
   1579  }, `${testName} / write tensor after export`);
   1580 
   1581  promise_test(async t => {
   1582    if (!isExportToGPUSupported) {
   1583      return;
   1584    }
   1585 
   1586    const mlTensor =
   1587        await mlContext.createExportableTensor({dataType, shape}, gpuDevice);
   1588 
   1589    // Second call rejects because the first export is still pending and multiple
   1590    // exports aren’t allowed.
   1591    let export_promise = mlContext.exportToGPU(mlTensor);
   1592    await promise_rejects_js(t, TypeError, mlContext.exportToGPU(mlTensor));
   1593 
   1594    let gpuTensorBuffer1 = await export_promise;
   1595    let gpuTensorBuffer2 = await mlContext.exportToGPU(mlTensor);
   1596    assert_equals(
   1597        gpuTensorBuffer1, gpuTensorBuffer2, 'Same buffers should be returned.');
   1598  }, `${testName} / export twice`);
   1599 
   1600  promise_test(async () => {
   1601    if (!isExportToGPUSupported) {
   1602      return;
   1603    }
   1604 
   1605    // Initialize the tensor buffers from WebNN.
   1606    let mlTensorInput = await mlContext.createExportableTensor(
   1607        {dataType, shape, writable: true}, gpuDevice);
   1608 
   1609    const inputData1 = new typedArray(sizeOfShape(shape)).fill(1.0);
   1610    mlContext.writeTensor(mlTensorInput, inputData1);
   1611 
   1612    let mlTensorOutput =
   1613        await mlContext.createExportableTensor({dataType, shape}, gpuDevice);
   1614 
   1615    let gpuTensorBufferInput = await mlContext.exportToGPU(mlTensorInput);
   1616    let gpuTensorBufferOutput = await mlContext.exportToGPU(mlTensorOutput);
   1617 
   1618    dispatchGPU(
   1619        gpuDevice, gpuComputePipeline, gpuTensorBufferInput,
   1620        gpuTensorBufferOutput, inputData1);
   1621 
   1622    gpuTensorBufferInput.destroy();
   1623    gpuTensorBufferOutput.destroy();
   1624 
   1625    // Write different data to the input tensor.
   1626    const inputData2 = new typedArray(sizeOfShape(shape)).fill(2.0);
   1627    mlContext.writeTensor(mlTensorInput, inputData2);
   1628 
   1629    gpuTensorBufferInput = await mlContext.exportToGPU(mlTensorInput);
   1630    gpuTensorBufferOutput = await mlContext.exportToGPU(mlTensorOutput);
   1631 
   1632    dispatchGPU(
   1633        gpuDevice, gpuComputePipeline, gpuTensorBufferInput,
   1634        gpuTensorBufferOutput, inputData2);
   1635 
   1636    await assert_gpu_buffer_data_equals(
   1637        gpuDevice, gpuTensorBufferOutput, inputData2.map(x => x + 1));
   1638  }, `${testName} / dispatch gpu twice`);
   1639 
   1640  promise_test(async () => {
   1641    if (!isExportToGPUSupported) {
   1642      return;
   1643    }
   1644 
   1645    // Initialize the tensor buffers from WebNN.
   1646    let mlTensorInput = await mlContext.createExportableTensor(
   1647        {dataType, shape, writable: true}, gpuDevice);
   1648 
   1649    const inputData = new typedArray(sizeOfShape(shape)).fill(1.0);
   1650    mlContext.writeTensor(mlTensorInput, inputData);
   1651 
   1652    let mlTensorOutput = await mlContext.createExportableTensor(
   1653        {dataType, shape, readable: true}, gpuDevice);
   1654 
   1655    let gpuTensorBufferInput = await mlContext.exportToGPU(mlTensorInput);
   1656    let gpuTensorBufferOutput = await mlContext.exportToGPU(mlTensorOutput);
   1657 
   1658    gpuTensorBufferInput.destroy();
   1659    gpuTensorBufferOutput.destroy();
   1660 
   1661    mlContext.dispatch(
   1662        mlGraph, {
   1663          'lhs': mlTensorInput,
   1664          'rhs': mlTensorInput,
   1665        },
   1666        {
   1667          'output': mlTensorOutput,
   1668        });
   1669 
   1670    await assert_tensor_data_equals(
   1671        mlContext, mlTensorOutput, inputData.map(x => x + 1));
   1672  }, `${testName} / webnn dispatch only`);
   1673 
   1674  promise_test(async () => {
   1675    if (!isExportToGPUSupported) {
   1676      return;
   1677    }
   1678 
   1679    // Initialize the tensor buffers from WebNN.
   1680    let mlTensorInput = await mlContext.createExportableTensor(
   1681        {dataType, shape, writable: true}, gpuDevice);
   1682 
   1683    const inputData = new typedArray(sizeOfShape(shape)).fill(1.0);
   1684    mlContext.writeTensor(mlTensorInput, inputData);
   1685 
   1686    let mlTensorOutput =
   1687        await mlContext.createExportableTensor({dataType, shape}, gpuDevice);
   1688 
   1689    let gpuTensorBufferInput = await mlContext.exportToGPU(mlTensorInput);
   1690    let gpuTensorBufferOutput = await mlContext.exportToGPU(mlTensorOutput);
   1691 
   1692    dispatchGPU(
   1693        gpuDevice, gpuComputePipeline, gpuTensorBufferInput,
   1694        gpuTensorBufferOutput, inputData);
   1695 
   1696    gpuTensorBufferInput.destroy();
   1697    gpuTensorBufferOutput.destroy();
   1698 
   1699    mlContext.dispatch(
   1700        mlGraph, {
   1701          'lhs': mlTensorOutput,
   1702          'rhs': mlTensorOutput,
   1703        },
   1704        {
   1705          'output': mlTensorInput,
   1706        });
   1707 
   1708    gpuTensorBufferInput = await mlContext.exportToGPU(mlTensorInput);
   1709    gpuTensorBufferOutput = await mlContext.exportToGPU(mlTensorOutput);
   1710 
   1711    dispatchGPU(
   1712        gpuDevice, gpuComputePipeline, gpuTensorBufferInput,
   1713        gpuTensorBufferOutput, inputData);
   1714 
   1715    await assert_gpu_buffer_data_equals(
   1716        gpuDevice, gpuTensorBufferOutput,
   1717        new typedArray(sizeOfShape(shape)).fill(5.0));
   1718  }, `${testName} / dispatch from webgpu then webnn`);
   1719 
   1720  promise_test(async () => {
   1721    if (!isExportToGPUSupported) {
   1722      return;
   1723    }
   1724 
   1725    let anotherMLContext = await navigator.ml.createContext(contextOptions);
   1726 
   1727    let mlTensor = await anotherMLContext.createExportableTensor(
   1728        {dataType, shape, writable: true}, gpuDevice);
   1729 
   1730    const inputData = new typedArray(sizeOfShape(shape)).fill(1.0);
   1731    anotherMLContext.writeTensor(mlTensor, inputData);
   1732 
   1733    const gpuTensorBuffer = await anotherMLContext.exportToGPU(mlTensor);
   1734 
   1735    anotherMLContext.destroy();
   1736 
   1737    await assert_gpu_buffer_data_equals(gpuDevice, gpuTensorBuffer, inputData);
   1738  }, `${testName} / destroy context after export`);
   1739 
   1740  promise_test(async t => {
   1741    if (!isExportToGPUSupported) {
   1742      return;
   1743    }
   1744 
   1745    let anotherGPUAdapter = await navigator.gpu.requestAdapter();
   1746    let anotherGPUDevice = await anotherGPUAdapter.requestDevice(
   1747        {requiredFeatures: ['shader-f16']});
   1748    let anotherMLContext = await navigator.ml.createContext(contextOptions);
   1749 
   1750    let mlTensor = await anotherMLContext.createExportableTensor(
   1751        {dataType, shape, readable: true, writable: true}, anotherGPUDevice);
   1752 
   1753    const inputData = new typedArray(sizeOfShape(shape)).fill(1.0);
   1754    anotherMLContext.writeTensor(mlTensor, inputData);
   1755 
   1756    const gpuTensorBuffer = await anotherMLContext.exportToGPU(mlTensor);
   1757 
   1758    anotherGPUDevice.destroy();
   1759 
   1760    gpuTensorBuffer.destroy();
   1761 
   1762    await assert_tensor_data_equals(anotherMLContext, mlTensor, inputData);
   1763  }, `${testName} / destroy device after export`);
   1764 
   1765  promise_test(async t => {
   1766    if (!isExportToGPUSupported) {
   1767      return;
   1768    }
   1769 
   1770    let anotherGPUAdapter = await navigator.gpu.requestAdapter();
   1771    let anotherGPUDevice = await anotherGPUAdapter.requestDevice(
   1772        {requiredFeatures: ['shader-f16']});
   1773    let anotherMLContext = await navigator.ml.createContext(contextOptions);
   1774 
   1775    let mlTensor = await anotherMLContext.createExportableTensor(
   1776        {dataType, shape, readable: true, writable: true}, anotherGPUDevice);
   1777    const inputData = new typedArray(sizeOfShape(shape)).fill(1.0);
   1778    anotherMLContext.writeTensor(mlTensor, inputData);
   1779 
   1780    anotherGPUDevice.destroy();
   1781 
   1782    await promise_rejects_dom(
   1783        t, 'InvalidStateError', anotherMLContext.exportToGPU(mlTensor));
   1784  }, `${testName} / destroy device before export`);
   1785 };
   1786 
   1787 if (navigator.ml) {
   1788  testCreateTensor('create', {dataType: 'float16', shape: [2, 3]});
   1789  testCreateTensor('create', {dataType: 'float32', shape: [1, 5]});
   1790  testCreateTensor('create', {dataType: 'int32', shape: [4]});
   1791  testCreateTensor('create', {dataType: 'uint8', shape: [3, 2, 4]});
   1792 
   1793  testCreateTensorFails(
   1794      'createFailsEmptyDimension', {dataType: 'int32', shape: [2, 0, 3]});
   1795  testCreateTensorFails('createFailsTooLarge', {
   1796    dataType: 'int32',
   1797    shape: [kMaxUnsignedLong, kMaxUnsignedLong, kMaxUnsignedLong]
   1798  });
   1799 
   1800  testCreateConstantTensor('createConstant', {dataType: 'int32', shape: [4]});
   1801  testCreateConstantTensor(
   1802      'createConstant', {dataType: 'uint8', shape: [3, 2, 4]});
   1803 
   1804  testCreateConstantTensorFails(
   1805      'createConstantFailsEmptyDimension',
   1806      {dataType: 'int32', shape: [2, 0, 3]});
   1807 
   1808  testDestroyTensor('destroyTwice');
   1809  testReadTensor('read');
   1810  testWriteTensor('write');
   1811  testDispatchTensor('dispatch');
   1812  testExportToGPU('interop float16', 'float16');
   1813  testExportToGPU('interop float32', 'float32');
   1814 } else {
   1815  test(() => assert_implements(navigator.ml, 'missing navigator.ml'));
   1816 }