tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit a5f6d62acb8d6be0e1f12fbd483d94fd4e8bc436
parent c2555944096afeaa8edacd7cf220f0280d90fc1e
Author: Enrico Galli <enrico.galli@intel.com>
Date:   Fri, 31 Oct 2025 08:57:19 +0000

Bug 1997014 [wpt PR 55743] - WebNN: Implement createExportableTensor(GPUDevice), a=testonly

Automatic update from web-platform-tests
WebNN: Implement createExportableTensor(GPUDevice)

Move GPUDevice from createContext to createExportableTensor. This
enables cross-devices use cases.

Bug: 440128357
Change-Id: I15ce77a84ce2f057025d38861370e984770a2d86
Cq-Include-Trybots: luci.chromium.try​:win11-blink-rel
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/7070083
Reviewed-by: Reilly Grant <reillyg@chromium.org>
Reviewed-by: ningxin hu <ningxin.hu@intel.com>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Commit-Queue: Enrico Galli <enrico.galli@intel.com>
Cr-Commit-Position: refs/heads/main@{#1537045}

--

wpt-commits: 19c9e8d74f7e105aae94f26c6d910297f7f520e5
wpt-pr: 55743

Diffstat:
Mtesting/web-platform/tests/webnn/conformance_tests/tensor.https.any.js | 95+++++++++++++++++++++++++++++++++----------------------------------------------
1 file changed, 39 insertions(+), 56 deletions(-)

diff --git a/testing/web-platform/tests/webnn/conformance_tests/tensor.https.any.js b/testing/web-platform/tests/webnn/conformance_tests/tensor.https.any.js @@ -38,7 +38,6 @@ const getDescriptorFromTensor = (tensor) => { shape: tensor.shape, readable: tensor.readable, writable: tensor.writable, - exportableToGPU: tensor.exportableToGPU, }; }; @@ -1405,7 +1404,7 @@ const testExportToGPU = (testName) => { // Initialize WebNN try { - mlContext = await navigator.ml.createContext(gpuDevice); + mlContext = await navigator.ml.createContext(contextOptions); } catch (e) { throw new AssertionError( `Unable to create context for ${variant} variant. ${e}`); @@ -1413,11 +1412,10 @@ const testExportToGPU = (testName) => { // Check if WebNN interop is supported. try { - let mlTensor = await mlContext.createTensor({ + let mlTensor = await mlContext.createExportableTensor({ dataType: 'float32', shape: shape, - exportableToGPU: true, - }); + }, gpuDevice); await mlContext.exportToGPU(mlTensor); } catch (e) { if (e.name === 'NotSupportedError') { @@ -1466,10 +1464,10 @@ const testExportToGPU = (testName) => { const mlTensorDescriptor = { dataType: 'float32', shape: shape, - exportableToGPU: true }; - const mlTensor = await mlContext.createTensor(mlTensorDescriptor); + const mlTensor = await mlContext.createExportableTensor(mlTensorDescriptor, + gpuDevice); const gpuTensorBuffer = await mlContext.exportToGPU(mlTensor); assert_equals( @@ -1487,7 +1485,6 @@ const testExportToGPU = (testName) => { const mlTensor = await mlContext.createTensor({ dataType: 'float32', shape: shape, - exportableToGPU: false, }); await promise_rejects_js(t, TypeError, mlContext.exportToGPU(mlTensor)); @@ -1502,11 +1499,10 @@ const testExportToGPU = (testName) => { const elementSize = Float32Array.BYTES_PER_ELEMENT; const shape = [maxBufferSizeOOB / elementSize]; - const mlTensor = await mlContext.createTensor({ + const mlTensor = await mlContext.createExportableTensor({ dataType: 'float32', - shape: shape, - exportableToGPU: true, - }); + shape: shape + }, gpuDevice); await mlContext.exportToGPU(mlTensor); }, `${testName} / export big tensor`) @@ -1519,12 +1515,11 @@ const testExportToGPU = (testName) => { const mlTensorDescriptor = { dataType: 'float32', shape: shape, - exportableToGPU: true, readable: true, writable: true }; - let mlTensor = await mlContext.createTensor(mlTensorDescriptor); + let mlTensor = await mlContext.createExportableTensor(mlTensorDescriptor, gpuDevice); const inputData = new Float32Array(sizeOfShape(shape)).fill(1.0); mlContext.writeTensor(mlTensor, inputData); @@ -1542,11 +1537,10 @@ const testExportToGPU = (testName) => { const mlTensorDescriptor = { dataType: 'float32', shape: shape, - exportableToGPU: true, writable: true }; - let mlTensor = await mlContext.createTensor(mlTensorDescriptor); + let mlTensor = await mlContext.createExportableTensor(mlTensorDescriptor, gpuDevice); const inputData = new Float32Array(sizeOfShape(shape)).fill(1.0); mlContext.writeTensor(mlTensor, inputData); @@ -1562,11 +1556,10 @@ const testExportToGPU = (testName) => { return; } - const mlTensor = await mlContext.createTensor({ + const mlTensor = await mlContext.createExportableTensor({ dataType: 'float32', shape: shape, - exportableToGPU: true, - }); + }, gpuDevice); await mlContext.exportToGPU(mlTensor); assert_throws_js( TypeError, @@ -1579,11 +1572,10 @@ const testExportToGPU = (testName) => { return; } - const mlTensor = await mlContext.createTensor({ + const mlTensor = await mlContext.createExportableTensor({ dataType: 'float32', shape: shape, - exportableToGPU: true, - }); + }, gpuDevice); // Second call rejects because the first export is still pending and multiple // exports aren’t allowed. @@ -1602,18 +1594,17 @@ const testExportToGPU = (testName) => { } // Initialize the tensor buffers from WebNN. - let mlTensorInput = await mlContext.createTensor({ + let mlTensorInput = await mlContext.createExportableTensor({ dataType: 'float32', shape: shape, - exportableToGPU: true, writable: true - }); + }, gpuDevice); const inputData1 = new Float32Array(sizeOfShape(shape)).fill(1.0); mlContext.writeTensor(mlTensorInput, inputData1); - let mlTensorOutput = await mlContext.createTensor( - {dataType: 'float32', shape: shape, exportableToGPU: true}); + let mlTensorOutput = await mlContext.createExportableTensor( + {dataType: 'float32', shape: shape}, gpuDevice); let gpuTensorBufferInput = await mlContext.exportToGPU(mlTensorInput); let gpuTensorBufferOutput = await mlContext.exportToGPU(mlTensorOutput); @@ -1646,22 +1637,20 @@ const testExportToGPU = (testName) => { } // Initialize the tensor buffers from WebNN. - let mlTensorInput = await mlContext.createTensor({ + let mlTensorInput = await mlContext.createExportableTensor({ dataType: 'float32', shape: shape, - exportableToGPU: true, writable: true - }); + }, gpuDevice); const inputData = new Float32Array(sizeOfShape(shape)).fill(1.0); mlContext.writeTensor(mlTensorInput, inputData); - let mlTensorOutput = await mlContext.createTensor({ + let mlTensorOutput = await mlContext.createExportableTensor({ dataType: 'float32', shape: shape, - exportableToGPU: true, readable: true - }); + }, gpuDevice); let gpuTensorBufferInput = await mlContext.exportToGPU(mlTensorInput); let gpuTensorBufferOutput = await mlContext.exportToGPU(mlTensorOutput); @@ -1688,18 +1677,17 @@ const testExportToGPU = (testName) => { } // Initialize the tensor buffers from WebNN. - let mlTensorInput = await mlContext.createTensor({ + let mlTensorInput = await mlContext.createExportableTensor({ dataType: 'float32', shape: shape, - exportableToGPU: true, - writable: true - }); + writable: true, + }, gpuDevice); const inputData = new Float32Array(sizeOfShape(shape)).fill(1.0); mlContext.writeTensor(mlTensorInput, inputData); - let mlTensorOutput = await mlContext.createTensor( - {dataType: 'float32', shape: shape, exportableToGPU: true}); + let mlTensorOutput = await mlContext.createExportableTensor( + {dataType: 'float32', shape: shape}, gpuDevice); let gpuTensorBufferInput = await mlContext.exportToGPU(mlTensorInput); let gpuTensorBufferOutput = await mlContext.exportToGPU(mlTensorOutput); @@ -1737,14 +1725,13 @@ const testExportToGPU = (testName) => { return; } - let anotherMLContext = await navigator.ml.createContext(gpuDevice); + let anotherMLContext = await navigator.ml.createContext(contextOptions); - let mlTensor = await anotherMLContext.createTensor({ + let mlTensor = await anotherMLContext.createExportableTensor({ dataType: 'float32', shape: shape, - exportableToGPU: true, - writable: true - }); + writable: true, + }, gpuDevice); const inputData = new Float32Array(sizeOfShape(shape)).fill(1.0); anotherMLContext.writeTensor(mlTensor, inputData); @@ -1763,15 +1750,14 @@ const testExportToGPU = (testName) => { let anotherGPUAdapter = await navigator.gpu.requestAdapter(); let anotherGPUDevice = await anotherGPUAdapter.requestDevice(); - let anotherMLContext = await navigator.ml.createContext(anotherGPUDevice); + let anotherMLContext = await navigator.ml.createContext(contextOptions); - let mlTensor = await anotherMLContext.createTensor({ + let mlTensor = await anotherMLContext.createExportableTensor({ dataType: 'float32', shape: shape, - exportableToGPU: true, readable: true, - writable: true - }); + writable: true, + }, anotherGPUDevice); const inputData = new Float32Array(sizeOfShape(shape)).fill(1.0); anotherMLContext.writeTensor(mlTensor, inputData); @@ -1782,8 +1768,7 @@ const testExportToGPU = (testName) => { gpuTensorBuffer.destroy(); - await promise_rejects_dom( - t, 'InvalidStateError', anotherMLContext.readTensor(mlTensor)); + await assert_tensor_data_equals(anotherMLContext, mlTensor, inputData); }, `${testName} / destroy device after export`); promise_test(async t => { @@ -1793,16 +1778,14 @@ const testExportToGPU = (testName) => { let anotherGPUAdapter = await navigator.gpu.requestAdapter(); let anotherGPUDevice = await anotherGPUAdapter.requestDevice(); - let anotherMLContext = await navigator.ml.createContext(anotherGPUDevice); + let anotherMLContext = await navigator.ml.createContext(contextOptions); - let mlTensor = await anotherMLContext.createTensor({ + let mlTensor = await anotherMLContext.createExportableTensor({ dataType: 'float32', shape: shape, - exportableToGPU: true, readable: true, writable: true - }); - + }, anotherGPUDevice); const inputData = new Float32Array(sizeOfShape(shape)).fill(1.0); anotherMLContext.writeTensor(mlTensor, inputData);