tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit 4f25d9d732e74c0793c8e0ee57bdc0b57660d768
parent 7156d0685138b6c9c8869b730a5871352dbcad47
Author: Phillis Tang <phillis@chromium.org>
Date:   Mon, 17 Nov 2025 21:50:19 +0000

Bug 2000522 [wpt PR 56046] - webnn: add webgpu interop float16 test, a=testonly

Automatic update from web-platform-tests
webnn: add webgpu interop float16 test

Add test coverage for float16. Also update the tensor shape to not use
the same number for width and height.

Change-Id: Iec6eb063b03c71747eae56229984247d0c282616
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/7129702
Reviewed-by: Reilly Grant <reillyg@chromium.org>
Commit-Queue: Phillis Tang <phillis@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1545337}

--

wpt-commits: 8aa2da3d7d031b2f651605e8007d173e5c87cabf
wpt-pr: 56046

Diffstat:
Mtesting/web-platform/tests/webnn/conformance_tests/tensor.https.any.js | 165++++++++++++++++++++++++++++++++-----------------------------------------------
1 file changed, 66 insertions(+), 99 deletions(-)

diff --git a/testing/web-platform/tests/webnn/conformance_tests/tensor.https.any.js b/testing/web-platform/tests/webnn/conformance_tests/tensor.https.any.js @@ -1361,14 +1361,16 @@ const assert_gpu_buffer_data_equals = * Export to GPU operation test. * @param {String} testName - The name of the test operation. */ -const testExportToGPU = (testName) => { +const testExportToGPU = (testName, dataType) => { let gpuAdapter; let gpuDevice; let mlContext; let mlGraph; - const shape = [2, 2]; + const shape = [24, 2]; let gpuComputePipeline; let isExportToGPUSupported = true; + const typedArray = dataType == 'float16' ? Float16Array : Float32Array; + promise_setup(async () => { // Initialize GPU gpuAdapter = navigator.gpu && await navigator.gpu.requestAdapter(); @@ -1377,7 +1379,8 @@ const testExportToGPU = (testName) => { return; } - gpuDevice = await gpuAdapter.requestDevice(); + gpuDevice = + await gpuAdapter.requestDevice({requiredFeatures: ['shader-f16']}); if (!gpuDevice) { isExportToGPUSupported = false; return; @@ -1385,18 +1388,24 @@ const testExportToGPU = (testName) => { // Construct a GPU custom op which increments each number of the input // buffer by 1. + const bufferType = dataType == 'float16' ? 'f16' : 'f32'; const gpuComputeShaderCode = ` - @group(0) @binding(0) var<storage, read> inputBuffer: array<f32>; - @group(0) @binding(1) var<storage, read_write> outputBuffer: array<f32>; + ${bufferType == 'f16' ? 'enable f16;' : ''} + @group(0) @binding(0) var<storage, read> inputBuffer: array<${ + bufferType}>; + @group(0) @binding(1) var<storage, read_write> outputBuffer: array<${ + bufferType}>; @compute @workgroup_size(1) fn main(@builtin(global_invocation_id) global_id: vec3<u32>) { let index = global_id.x; - outputBuffer[index] = inputBuffer[index] + 1.0; + outputBuffer[index] = inputBuffer[index] + 1.0${ + bufferType == 'f16' ? 'h' : ''}; }`; const gpuShaderModule = gpuDevice.createShaderModule({code: gpuComputeShaderCode}); + gpuComputePipeline = gpuDevice.createComputePipeline({ layout: 'auto', compute: {module: gpuShaderModule, entryPoint: 'main'}, @@ -1412,10 +1421,8 @@ const testExportToGPU = (testName) => { // Check if WebNN interop is supported. try { - let mlTensor = await mlContext.createExportableTensor({ - dataType: 'float32', - shape: shape, - }, gpuDevice); + let mlTensor = + await mlContext.createExportableTensor({dataType, shape}, gpuDevice); await mlContext.exportToGPU(mlTensor); } catch (e) { if (e.name === 'NotSupportedError') { @@ -1427,7 +1434,7 @@ const testExportToGPU = (testName) => { // Construct a simple graph: OUTPUT = LHS + RHS. const mlBuilder = new MLGraphBuilder(mlContext); - const mlOperandDescriptor = {dataType: 'float32', shape}; + const mlOperandDescriptor = {dataType, shape}; const lhsOperand = mlBuilder.input('lhs', mlOperandDescriptor); const rhsOperand = mlBuilder.input('rhs', mlOperandDescriptor); mlGraph = await mlBuilder.build( @@ -1461,13 +1468,10 @@ const testExportToGPU = (testName) => { return; } - const mlTensorDescriptor = { - dataType: 'float32', - shape: shape, - }; - + const mlTensorDescriptor = {dataType, shape}; const mlTensor = await mlContext.createExportableTensor(mlTensorDescriptor, gpuDevice); + const gpuTensorBuffer = await mlContext.exportToGPU(mlTensor); assert_equals( @@ -1482,10 +1486,7 @@ const testExportToGPU = (testName) => { return; } - const mlTensor = await mlContext.createTensor({ - dataType: 'float32', - shape: shape, - }); + const mlTensor = await mlContext.createTensor({dataType, shape}); await promise_rejects_js(t, TypeError, mlContext.exportToGPU(mlTensor)); }, `${testName} / export wrong tensor`); @@ -1496,13 +1497,11 @@ const testExportToGPU = (testName) => { } const maxBufferSizeOOB = gpuDevice.limits.maxBufferSize + 1; - const elementSize = Float32Array.BYTES_PER_ELEMENT; + const elementSize = typedArray.BYTES_PER_ELEMENT; const shape = [maxBufferSizeOOB / elementSize]; - const mlTensor = await mlContext.createExportableTensor({ - dataType: 'float32', - shape: shape - }, gpuDevice); + const mlTensor = + await mlContext.createExportableTensor({dataType, shape}, gpuDevice); await mlContext.exportToGPU(mlTensor); }, `${testName} / export big tensor`) @@ -1512,15 +1511,11 @@ const testExportToGPU = (testName) => { return; } - const mlTensorDescriptor = { - dataType: 'float32', - shape: shape, - readable: true, - writable: true - }; + const mlTensorDescriptor = + {dataType, shape, readable: true, writable: true}; let mlTensor = await mlContext.createExportableTensor(mlTensorDescriptor, gpuDevice); - const inputData = new Float32Array(sizeOfShape(shape)).fill(1.0); + const inputData = new typedArray(sizeOfShape(shape)).fill(1.0); mlContext.writeTensor(mlTensor, inputData); const gpuTensorBuffer = await mlContext.exportToGPU(mlTensor); @@ -1534,15 +1529,11 @@ const testExportToGPU = (testName) => { return; } - const mlTensorDescriptor = { - dataType: 'float32', - shape: shape, - writable: true - }; + const mlTensorDescriptor = {dataType, shape, writable: true}; let mlTensor = await mlContext.createExportableTensor(mlTensorDescriptor, gpuDevice); - const inputData = new Float32Array(sizeOfShape(shape)).fill(1.0); + const inputData = new typedArray(sizeOfShape(shape)).fill(1.0); mlContext.writeTensor(mlTensor, inputData); const gpuTensorBuffer = await mlContext.exportToGPU(mlTensor); @@ -1556,15 +1547,13 @@ const testExportToGPU = (testName) => { return; } - const mlTensor = await mlContext.createExportableTensor({ - dataType: 'float32', - shape: shape, - }, gpuDevice); + const mlTensor = + await mlContext.createExportableTensor({dataType, shape}, gpuDevice); await mlContext.exportToGPU(mlTensor); assert_throws_js( TypeError, () => mlContext.writeTensor( - mlTensor, new Float32Array([1.0, 2.0, 3.0, 4.0]))); + mlTensor, new typedArray([1.0, 2.0, 3.0, 4.0]))); }, `${testName} / write tensor after export`); promise_test(async t => { @@ -1572,10 +1561,8 @@ const testExportToGPU = (testName) => { return; } - const mlTensor = await mlContext.createExportableTensor({ - dataType: 'float32', - shape: shape, - }, gpuDevice); + const mlTensor = + await mlContext.createExportableTensor({dataType, shape}, gpuDevice); // Second call rejects because the first export is still pending and multiple // exports aren’t allowed. @@ -1594,17 +1581,14 @@ const testExportToGPU = (testName) => { } // Initialize the tensor buffers from WebNN. - let mlTensorInput = await mlContext.createExportableTensor({ - dataType: 'float32', - shape: shape, - writable: true - }, gpuDevice); + let mlTensorInput = await mlContext.createExportableTensor( + {dataType, shape, writable: true}, gpuDevice); - const inputData1 = new Float32Array(sizeOfShape(shape)).fill(1.0); + const inputData1 = new typedArray(sizeOfShape(shape)).fill(1.0); mlContext.writeTensor(mlTensorInput, inputData1); - let mlTensorOutput = await mlContext.createExportableTensor( - {dataType: 'float32', shape: shape}, gpuDevice); + let mlTensorOutput = + await mlContext.createExportableTensor({dataType, shape}, gpuDevice); let gpuTensorBufferInput = await mlContext.exportToGPU(mlTensorInput); let gpuTensorBufferOutput = await mlContext.exportToGPU(mlTensorOutput); @@ -1617,7 +1601,7 @@ const testExportToGPU = (testName) => { gpuTensorBufferOutput.destroy(); // Write different data to the input tensor. - const inputData2 = new Float32Array(sizeOfShape(shape)).fill(2.0); + const inputData2 = new typedArray(sizeOfShape(shape)).fill(2.0); mlContext.writeTensor(mlTensorInput, inputData2); gpuTensorBufferInput = await mlContext.exportToGPU(mlTensorInput); @@ -1637,20 +1621,14 @@ const testExportToGPU = (testName) => { } // Initialize the tensor buffers from WebNN. - let mlTensorInput = await mlContext.createExportableTensor({ - dataType: 'float32', - shape: shape, - writable: true - }, gpuDevice); + let mlTensorInput = await mlContext.createExportableTensor( + {dataType, shape, writable: true}, gpuDevice); - const inputData = new Float32Array(sizeOfShape(shape)).fill(1.0); + const inputData = new typedArray(sizeOfShape(shape)).fill(1.0); mlContext.writeTensor(mlTensorInput, inputData); - let mlTensorOutput = await mlContext.createExportableTensor({ - dataType: 'float32', - shape: shape, - readable: true - }, gpuDevice); + let mlTensorOutput = await mlContext.createExportableTensor( + {dataType, shape, readable: true}, gpuDevice); let gpuTensorBufferInput = await mlContext.exportToGPU(mlTensorInput); let gpuTensorBufferOutput = await mlContext.exportToGPU(mlTensorOutput); @@ -1677,17 +1655,14 @@ const testExportToGPU = (testName) => { } // Initialize the tensor buffers from WebNN. - let mlTensorInput = await mlContext.createExportableTensor({ - dataType: 'float32', - shape: shape, - writable: true, - }, gpuDevice); + let mlTensorInput = await mlContext.createExportableTensor( + {dataType, shape, writable: true}, gpuDevice); - const inputData = new Float32Array(sizeOfShape(shape)).fill(1.0); + const inputData = new typedArray(sizeOfShape(shape)).fill(1.0); mlContext.writeTensor(mlTensorInput, inputData); - let mlTensorOutput = await mlContext.createExportableTensor( - {dataType: 'float32', shape: shape}, gpuDevice); + let mlTensorOutput = + await mlContext.createExportableTensor({dataType, shape}, gpuDevice); let gpuTensorBufferInput = await mlContext.exportToGPU(mlTensorInput); let gpuTensorBufferOutput = await mlContext.exportToGPU(mlTensorOutput); @@ -1717,7 +1692,7 @@ const testExportToGPU = (testName) => { await assert_gpu_buffer_data_equals( gpuDevice, gpuTensorBufferOutput, - new Float32Array(sizeOfShape(shape)).fill(5.0)); + new typedArray(sizeOfShape(shape)).fill(5.0)); }, `${testName} / dispatch from webgpu then webnn`); promise_test(async () => { @@ -1727,13 +1702,10 @@ const testExportToGPU = (testName) => { let anotherMLContext = await navigator.ml.createContext(contextOptions); - let mlTensor = await anotherMLContext.createExportableTensor({ - dataType: 'float32', - shape: shape, - writable: true, - }, gpuDevice); + let mlTensor = await anotherMLContext.createExportableTensor( + {dataType, shape, writable: true}, gpuDevice); - const inputData = new Float32Array(sizeOfShape(shape)).fill(1.0); + const inputData = new typedArray(sizeOfShape(shape)).fill(1.0); anotherMLContext.writeTensor(mlTensor, inputData); const gpuTensorBuffer = await anotherMLContext.exportToGPU(mlTensor); @@ -1749,17 +1721,14 @@ const testExportToGPU = (testName) => { } let anotherGPUAdapter = await navigator.gpu.requestAdapter(); - let anotherGPUDevice = await anotherGPUAdapter.requestDevice(); + let anotherGPUDevice = await anotherGPUAdapter.requestDevice( + {requiredFeatures: ['shader-f16']}); let anotherMLContext = await navigator.ml.createContext(contextOptions); - let mlTensor = await anotherMLContext.createExportableTensor({ - dataType: 'float32', - shape: shape, - readable: true, - writable: true, - }, anotherGPUDevice); + let mlTensor = await anotherMLContext.createExportableTensor( + {dataType, shape, readable: true, writable: true}, anotherGPUDevice); - const inputData = new Float32Array(sizeOfShape(shape)).fill(1.0); + const inputData = new typedArray(sizeOfShape(shape)).fill(1.0); anotherMLContext.writeTensor(mlTensor, inputData); const gpuTensorBuffer = await anotherMLContext.exportToGPU(mlTensor); @@ -1777,16 +1746,13 @@ const testExportToGPU = (testName) => { } let anotherGPUAdapter = await navigator.gpu.requestAdapter(); - let anotherGPUDevice = await anotherGPUAdapter.requestDevice(); + let anotherGPUDevice = await anotherGPUAdapter.requestDevice( + {requiredFeatures: ['shader-f16']}); let anotherMLContext = await navigator.ml.createContext(contextOptions); - let mlTensor = await anotherMLContext.createExportableTensor({ - dataType: 'float32', - shape: shape, - readable: true, - writable: true - }, anotherGPUDevice); - const inputData = new Float32Array(sizeOfShape(shape)).fill(1.0); + let mlTensor = await anotherMLContext.createExportableTensor( + {dataType, shape, readable: true, writable: true}, anotherGPUDevice); + const inputData = new typedArray(sizeOfShape(shape)).fill(1.0); anotherMLContext.writeTensor(mlTensor, inputData); anotherGPUDevice.destroy(); @@ -1821,7 +1787,8 @@ if (navigator.ml) { testReadTensor('read'); testWriteTensor('write'); testDispatchTensor('dispatch'); - testExportToGPU('interop'); + testExportToGPU('interop float16', 'float16'); + testExportToGPU('interop float32', 'float32'); } else { test(() => assert_implements(navigator.ml, 'missing navigator.ml')); }