tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit f7175e3f7e36da4e130351bf57b4c278b1bc2d3d
parent 9816ee147508d064e9911db22965f5dff5164110
Author: Mike Wasserman <msw@chromium.org>
Date:   Thu,  6 Nov 2025 21:33:03 +0000

Bug 1997482 [wpt PR 55781] - Prompt API: Move WPTs to web_tests/external/wpt/ai/language-model, a=testonly

Automatic update from web-platform-tests
Prompt API: Move WPTs to web_tests/external/wpt/ai/language-model

Basic initial export of internal tests; future refinements planned.
Reuse shared utils; cleanup LanguageModel utils and individual tests.
Use external image and media files, delete internal ones.

Bug: 452743283
Change-Id: I01757bf1e50c3c5c865f43ca5ee4cf5f7f9d920d
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6648569
Commit-Queue: Mike Wasserman <msw@chromium.org>
Reviewed-by: Nathan Memmott <memmott@chromium.org>
Reviewed-by: Mike Wasserman <msw@chromium.org>
Auto-Submit: Mike Wasserman <msw@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1538290}

--

wpt-commits: 3b6950649d63d2eb5fa402bc618ef4e3a5d663a5
wpt-pr: 55781

Diffstat:
Atesting/web-platform/tests/ai/language-model/language-model-abort.tentative.https.window.js | 39+++++++++++++++++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-availability-available-multimodal.tentative.https.window.js | 46++++++++++++++++++++++++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-availability-available.tentative.https.window.js | 58++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-availability.tentative.https.window.js | 51+++++++++++++++++++++++++++++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-clone.tentative.https.window.js | 37+++++++++++++++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-create-multimodal.tentative.https.window.js | 55+++++++++++++++++++++++++++++++++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-create-user-activation.tentative.https.window.js | 23+++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-create.tentative.https.window.js | 134+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-destroy.tentative.https.window.js | 47+++++++++++++++++++++++++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-from-detached-iframe.tentative.https.window.js | 24++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-iframe.tentative.https.html | 59+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-measure-input-usage.tentative.https.window.js | 20++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-params.tentative.https.window.js | 17+++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-prompt-context-destroyed.tentative.https.window.js | 19+++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-prompt-gc.tentative.https.window.js | 19+++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-prompt-monitor-callback-exception.tentative.https.window.js | 25+++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-prompt-multimodal.tentative.https.window.js | 310+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-prompt-streaming-gc.tentative.https.window.js | 28++++++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-prompt-streaming.tentative.https.window.js | 32++++++++++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-prompt.tentative.https.window.js | 72++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-quota-exceeded.tentative.https.window.js | 20++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/language-model-response-json-schema.tentative.https.window.js | 90+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Atesting/web-platform/tests/ai/language-model/resources/iframe-helper.html | 26++++++++++++++++++++++++++
Mtesting/web-platform/tests/ai/resources/util.js | 13+++++++++++++
24 files changed, 1264 insertions(+), 0 deletions(-)

diff --git a/testing/web-platform/tests/ai/language-model/language-model-abort.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-abort.tentative.https.window.js @@ -0,0 +1,39 @@ +// META: title=Language Model Abort +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +promise_test(async t => { + await testAbortPromise(t, signal => { + return createLanguageModel({ + signal: signal + }); + }); +}, "Aborting LanguageModel.create()."); + +promise_test(async t => { + const session = await createLanguageModel(); + await testAbortPromise(t, signal => { + return session.clone({ + signal: signal + }); + }); +}, "Aborting LanguageModel.clone()."); + +promise_test(async t => { + const session = await createLanguageModel(); + await testAbortPromise(t, signal => { + return session.prompt(kTestPrompt, { signal: signal }); + }); +}, "Aborting LanguageModel.prompt()."); + +promise_test(async t => { + const session = await createLanguageModel(); + await testAbortReadableStream(t, signal => { + return session.promptStreaming( + kTestPrompt, { signal: signal } + ); + }); +}, "Aborting LanguageModel.promptStreaming()."); diff --git a/testing/web-platform/tests/ai/language-model/language-model-availability-available-multimodal.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-availability-available-multimodal.tentative.https.window.js @@ -0,0 +1,46 @@ +// META: title=Language Model Availability Available Multimodal +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +// These tests depend on some level of model availability, whereas those in +// language-model-api-availability-available.https.window.js have no availability requirements. + +promise_test(async () => { + await ensureLanguageModel({expectedInputs: [{type: 'audio'}]}); +}, 'LanguageModel.availability() is available with multimodal audio option'); + +promise_test(async () => { + await ensureLanguageModel({expectedInputs: [{type: 'image'}]}); +}, 'LanguageModel.availability() is available with multimodal image option'); + +promise_test(async () => { + await ensureLanguageModel({expectedInputs: [{type: 'audio'}, {type: 'image'}]}); + const kSupportedCreateOptions = [ + { expectedInputs: [{type: 'audio'}] }, + { expectedInputs: [{type: 'image'}] }, + { expectedInputs: [{type: 'audio'}, {type: 'image'}, {type: 'text'}] }, + { expectedInputs: [{type: 'audio', languages: ['en']}] }, + { expectedInputs: [{type: 'image', languages: ['en']}] }, + { expectedInputs: [{type: 'audio', languages: ['en']}, + {type: 'image', languages: ['en']}, + {type: 'text', languages: ['en']}] }, + ]; + for (const options of kSupportedCreateOptions) { + const availability = await LanguageModel.availability(options); + assert_in_array(availability, kValidAvailabilities, JSON.stringify(options)); + } +}, 'LanguageModel.availability() returns available with supported multimodal options'); + +promise_test(async () => { + await ensureLanguageModel({expectedInputs: [{type: 'audio'}, {type: 'image'}]}); + const kUnsupportedCreateOptions = [ + { expectedInputs: [{type: 'audio', languages: ['unk']}] }, // Language not supported. + { expectedInputs: [{type: 'image', languages: ['unk']}] }, // Language not supported. + ]; + for (const options of kUnsupportedCreateOptions) { + assert_equals(await LanguageModel.availability(options), 'unavailable', JSON.stringify(options)); + } +}, 'LanguageModel.availability() returns unavailable with unsupported multimodal options'); diff --git a/testing/web-platform/tests/ai/language-model/language-model-availability-available.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-availability-available.tentative.https.window.js @@ -0,0 +1,58 @@ +// META: title=Language Model Availability Available +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +// These tests depend on some level of model availability, whereas those in +// language-model-api-availability-available.https.window.js have no availability requirements. + +promise_test(async () => { + await ensureLanguageModel(); +}, 'LanguageModel.availability() is available with no options'); + +promise_test(async () => { + await ensureLanguageModel(); + // An array of supported test option values. + const kCreateOptionsSpec = [ + { topK: [1, 1.5, 2, 3, 99] }, // Nominally int 1-10+. + { temperature: [0, 0.5, 1, 2] }, // Nominally float 0-1. + { expectedInputs: [undefined, [], [{type: 'text'}], [{type: 'text', languages: ['en']}]] }, + { expectedOutputs: [undefined, [], [{type: 'text'}], [{type: 'text', languages: ['en']}]] }, + ]; + for (const options of generateOptionCombinations(kCreateOptionsSpec)) { + const availability = await LanguageModel.availability(options); + assert_in_array(availability, kValidAvailabilities, JSON.stringify(options)); + } +}, 'LanguageModel.availability() returns available with supported options'); + +promise_test(async () => { + await ensureLanguageModel(); + // An array of unsupported test options. + const kUnsupportedCreateOptions = [ + { expectedInputs: [{type: 'text', languages: ['unk']}] }, // Language not supported. + { expectedOutputs: [{type: 'text', languages: ['unk']}] }, // Language not supported. + { expectedOutputs: [{type: 'image'}] }, // Type not supported. + { expectedOutputs: [{type: 'audio'}] }, // Type not supported. + { topK: 0, temperature: 0.5 }, // zero topK not supported. + { topK: -3, temperature: 0.5 }, // negative topK not supported. + { topK: 3, temperature: -0.5 }, // negative temperature not supported. + { topK: 3 }, // topK without temperature not supported. + { temperature: 0.5 }, // temperature without topK not supported. + ]; + for (const options of kUnsupportedCreateOptions) { + assert_equals(await LanguageModel.availability(options), 'unavailable', JSON.stringify(options)); + } +}, 'LanguageModel.availability() returns unavailable with unsupported options'); + +promise_test(async t => { + await ensureLanguageModel(); + // An array of invalid test options. + const kInvalidCreateOptions = [ + { expectedInputs: [{type: 'soup'}] }, // Type not supported. + ]; + for (const options of kInvalidCreateOptions) { + await promise_rejects_js(t, TypeError, LanguageModel.availability(options)); + } +}, 'LanguageModel.availability() rejects with invalid options'); diff --git a/testing/web-platform/tests/ai/language-model/language-model-availability.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-availability.tentative.https.window.js @@ -0,0 +1,51 @@ +// META: title=Language Model Availability +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +// These tests have no availability requirements, they only test the API shape. + +promise_test(async () => { + assert_true(!!LanguageModel); + assert_equals(typeof LanguageModel.availability, 'function'); +}, 'LanguageModel.availability() is defined'); + +promise_test(async () => { + const availability = await LanguageModel.availability(); + assert_in_array(availability, kValidAvailabilities); +}, 'LanguageModel.availability() returns a valid value with no options'); + +promise_test(async (t) => { + return promise_rejects_js(t, RangeError, LanguageModel.availability({ + expectedInputs: [{type: 'text', languages: ['en-abc-invalid']}] + })); +}, 'LanguageModel.availability() rejects when given invalid language tags'); + +promise_test(async () => { + // An array of plausible test option values. + const kCreateOptionsSpec = [ + {topK: [undefined, -2, 0, 1, 1.5, 3, 99]}, // Nominally int 1-10+. + {temperature: [undefined, -0.5, 0, 0.6, 1, 7]}, // Nominally float 0-1. + {expectedInputs: [undefined, [], [{type: 'text'}], + [{type: 'text'}, {type: 'audio'}, {type: 'image'}], + [{type: 'text', languages: ['en', 'ja', 'ko']}], + [{type: 'audio', languages: ['es']}, {type: 'image', languages: ['fr']}], + ]}, + {expectedOutputs: [undefined, [], [{type: 'text'}], + [{type: 'text'}, {type: 'audio'}, {type: 'image'}], + [{type: 'text', languages: ['en', 'ja', 'ko']}], + [{type: 'audio', languages: ['es']}, {type: 'image', languages: ['fr']}], + ]}, + {initialPrompts: [undefined, [], [{role: 'system', content: 'have fun'}], + [{role: 'system', content: 'have fun'}, {role: 'user', content: 'be good'}], + [{role: 'system', content: 'be good'}, {role: 'system', content: 'be bad'}], + [{role: 'system', content: 'have fun'}, {role: 'system', content: 'be bad'}], + ]}, + ]; + for (const options of generateOptionCombinations(kCreateOptionsSpec)) { + const availability = await LanguageModel.availability(options); + assert_in_array(availability, kValidAvailabilities, JSON.stringify(options)); + } +}, 'LanguageModel.availability() returns a valid value with plausible options'); diff --git a/testing/web-platform/tests/ai/language-model/language-model-clone.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-clone.tentative.https.window.js @@ -0,0 +1,37 @@ +// META: title=Language Model Clone +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +promise_test(async () => { + await ensureLanguageModel(); + + // Start a new session and test it. + const session = await createLanguageModel(); + const result = await session.prompt(kTestPrompt); + assert_equals(typeof result, 'string'); + + // Clone a session and test it. + const cloned_session = await session.clone(); + assert_equals( + cloned_session.inputQuota, session.inputQuota, + 'cloned session should have the same inputQuota as the original session.' + ); + assert_equals( + cloned_session.inputUsage, session.inputUsage, + 'cloned session should have the same inputUsage as the original session.' + ); + assert_equals( + cloned_session.topK, session.topK, + 'cloned session should have the same topK as the original session.' + ); + assert_equals( + cloned_session.temperature, session.temperature, + 'cloned session should have the same temperature as the original session.' + ); + + const clone_result = await cloned_session.prompt(kTestPrompt); + assert_equals(typeof clone_result, 'string'); +}); diff --git a/testing/web-platform/tests/ai/language-model/language-model-create-multimodal.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-create-multimodal.tentative.https.window.js @@ -0,0 +1,55 @@ +// META: title=Language Model Create Multimodal +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +const kValidImagePath = '/images/computer.jpg'; +const kValidAudioPath = '/media/speech.wav'; + +promise_test(async () => { + await ensureLanguageModel({expectedInputs: [{type: 'audio'}, {type: 'image'}]}); + const kSupportedCreateOptions = [ + { expectedInputs: [{type: 'audio'}] }, + { expectedInputs: [{type: 'image'}] }, + { expectedInputs: [{type: 'audio'}, {type: 'image'}, {type: 'text'}] }, + { expectedInputs: [{type: 'audio', languages: ['en']}] }, + { expectedInputs: [{type: 'image', languages: ['en']}] }, + { expectedInputs: [{type: 'audio', languages: ['en']}, + {type: 'image', languages: ['en']}, + {type: 'text', languages: ['en']}] }, + ]; + for (const options of kSupportedCreateOptions) { + assert_true(!!await createLanguageModel(options), JSON.stringify(options)); + } +}, 'LanguageModel.create() succeeds with supported multimodal type and language options'); + +promise_test(async () => { + await ensureLanguageModel({expectedInputs: [{type: 'audio'}, {type: 'image'}]}); + const audioContent = { type:'audio', value: await (await fetch(kValidAudioPath)).blob() }; + const imageContent = { type:'image', value: await (await fetch(kValidImagePath)).blob() }; + const kSupportedCreateOptions = [ + { expectedInputs: [{type: 'audio'}], initialPrompts: [{role: 'user', content: [audioContent]}] }, + { expectedInputs: [{type: 'image'}], initialPrompts: [{role: 'user', content: [imageContent]}] }, + { expectedInputs: [{type: 'audio'}, {type: 'image'}], + initialPrompts: [{role: 'user', content: [audioContent, imageContent]}] }, + ]; + for (const options of kSupportedCreateOptions) { + // TODO(crbug.com/419599702): Ensure the model actually gets initialPrompts. + assert_true(!!await createLanguageModel(options), JSON.stringify(options)); + } +}, 'LanguageModel.create() succeeds with supported multimodal initialPrompts'); + +promise_test(async t => { + await ensureLanguageModel({expectedInputs: [{type: 'audio'}, {type: 'image'}]}); + const audioContent = { type:'audio', value: await (await fetch(kValidAudioPath)).blob() }; + const imageContent = { type:'image', value: await (await fetch(kValidImagePath)).blob() }; + const kUnsupportedCreateOptions = [ + { expectedInputs: [{type: 'audio'}], initialPrompts: [{role: 'user', content: [imageContent]}] }, + { expectedInputs: [{type: 'image'}], initialPrompts: [{role: 'user', content: [audioContent]}] }, + ]; + for (const options of kUnsupportedCreateOptions) { + await promise_rejects_dom(t, 'NotSupportedError', createLanguageModel(options), JSON.stringify(options)); + } +}, 'LanguageModel.create() fails with unsupported multimodal initialPrompts'); diff --git a/testing/web-platform/tests/ai/language-model/language-model-create-user-activation.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-create-user-activation.tentative.https.window.js @@ -0,0 +1,23 @@ +// META: title=Language Model Create User Activation +// META: script=/resources/testdriver.js +// META: script=/resources/testdriver-vendor.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +// Mocked model download state may be shared between test cases in the same file +// (see e.g. `EchoAIManagerImpl`), so this test case is kept in a separate file. +// TODO(crbug.com/390246212): Support model state controls for WPTs. +promise_test(async t => { + // Create requires user activation when availability is 'downloadable'. + assert_implements_optional(await LanguageModel.availability() == 'downloadable'); + assert_false(navigator.userActivation.isActive); + await promise_rejects_dom(t, 'NotAllowedError', LanguageModel.create()); + await test_driver.bless('LanguageModel.create', LanguageModel.create); + + // Create does not require user activation when availability is 'available'. + assert_equals(await LanguageModel.availability(), 'available'); + assert_false(navigator.userActivation.isActive); + await LanguageModel.create(); +}, 'Create requires user activation when availability is "downloadable"'); diff --git a/testing/web-platform/tests/ai/language-model/language-model-create.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-create.tentative.https.window.js @@ -0,0 +1,134 @@ +// META: title=Language Model Create +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +promise_test(async t => { + await ensureLanguageModel(); +}, 'Ensure sessions can be created'); + +promise_test(async t => { + let session = await createLanguageModel(); + assert_true(session instanceof LanguageModel); + + assert_equals(typeof session.prompt, 'function'); + assert_equals(typeof session.promptStreaming, 'function'); + assert_equals(typeof session.append, 'function'); + assert_equals(typeof session.measureInputUsage, 'function'); + assert_equals(typeof session.clone, 'function'); + assert_equals(typeof session.destroy, 'function'); + + assert_equals(typeof session.inputUsage, 'number'); + assert_equals(typeof session.inputQuota, 'number'); + assert_equals(typeof session.topK, 'number'); + assert_equals(typeof session.temperature, 'number'); + + assert_equals(typeof session.onquotaoverflow, 'object'); +}, 'LanguageModel.create() returns a valid object with default options'); + +promise_test(async t => { + await testMonitor(createLanguageModel); +}, 'LanguageModel.create() notifies its monitor on downloadprogress'); + +promise_test(async t => { + await testCreateMonitorWithAbort(t, createLanguageModel); +}, 'Progress events are not emitted after aborted.'); + +promise_test(async t => { + let session = await createLanguageModel({ topK: 3, temperature: 0.6 }); + assert_true(!!session); +}, 'Create with topK and temperature'); + +promise_test(async t => { + let result = createLanguageModel({ topK: 3 }); + await promise_rejects_dom( + t, 'NotSupportedError', result, + 'Initializing a new session must either specify both topK and temperature, or neither of them.'); +}, 'Create with only topK should fail'); + +promise_test(async t => { + let result = createLanguageModel({ temperature: 0.5 }); + await promise_rejects_dom( + t, 'NotSupportedError', result, + 'Initializing a new session must either specify both topK and temperature, or neither of them.'); +}, 'Create with only temperature should fail'); + +promise_test(async t => { + let result = createLanguageModel({ topK: 3, temperature: -0.5 }); + await promise_rejects_js(t, RangeError, result); +}, 'Create with negative temperature should fail'); + +promise_test(async t => { + let result = createLanguageModel({ topK: 0, temperature: 0.5 }); + await promise_rejects_js(t, RangeError, result); +}, 'Create with zero topK should fail'); + +promise_test(async t => { + let result = createLanguageModel({ topK: -2, temperature: 0.5 }); + await promise_rejects_js(t, RangeError, result); +}, 'Create with negative topK should fail'); + +promise_test(async t => { + let session = await createLanguageModel({ topK: 1.5, temperature: 0.5 }); + assert_true(!!session); + assert_equals(session.topK, 1); +}, 'Create with fractional topK should be rounded down'); + +promise_test(async t => { + let session = await createLanguageModel({ + initialPrompts: [ + {role: 'system', content: 'you are a robot'}, + {role: 'user', content: 'hello'}, {role: 'assistant', content: 'hello'} + ] + }); + assert_true(!!session); +}, 'Create with initialPrompts'); + +promise_test(async t => { + let session = await createLanguageModel({initialPrompts: []}); + assert_true(!!session); +}, 'Create with empty initialPrompts'); + +promise_test(async t => { + let session = await createLanguageModel({ + initialPrompts: [ + {role: 'user', content: 'hello'}, {role: 'assistant', content: 'hello'} + ] + }); + assert_true(!!session); +}, 'Create with initialPrompts without system role'); + +promise_test(async t => { + let result = createLanguageModel({ + initialPrompts: [ + {role: 'user', content: 'hello'}, {role: 'assistant', content: 'hello'}, + {role: 'system', content: 'you are a robot'} + ] + }); + await promise_rejects_js(t, TypeError, result); +}, 'Create with system role not ordered first should fail'); + +promise_test(async t => { + let result = createLanguageModel({ + initialPrompts: [ + {role: 'system', content: 'you are a robot'}, + {role: 'system', content: 'you are a kitten'}, + {role: 'user', content: 'hello'}, {role: 'assistant', content: 'hello'} + ] + }); + await promise_rejects_js(t, TypeError, result); +}, 'Create multiple system role entries should fail'); + +promise_test(async (t) => { + return promise_rejects_js(t, RangeError, LanguageModel.create({ + expectedInputs: [{type: 'text', languages: ['en-abc-invalid']}] + })); +}, 'LanguageModel.create() rejects when given invalid language tags'); + +promise_test(async (t) => { + let session = await LanguageModel.create( + {expectedInputs: [{type: 'text', languages: ['EN']}]}); + assert_true(!!session); +}, 'LanguageModel.create() canonicalizes language tags'); diff --git a/testing/web-platform/tests/ai/language-model/language-model-destroy.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-destroy.tentative.https.window.js @@ -0,0 +1,47 @@ +// META: title=Language Model Destroy +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +promise_test(async t => { + await ensureLanguageModel(); + + // Start a new session. + const session = await createLanguageModel(); + + // Calling `session.destroy()` immediately after `session.prompt()` will + // trigger the "The model execution session has been destroyed." exception. + let result = session.prompt(kTestPrompt); + session.destroy(); + await promise_rejects_dom( + t, "InvalidStateError", result, + "The model execution session has been destroyed." + ); + + // Calling `session.prompt()` after `session.destroy()` will trigger the + // "The model execution session has been destroyed." exception. + await promise_rejects_dom( + t, "InvalidStateError", session.prompt(kTestPrompt), + "The model execution session has been destroyed." + ); + + // After destroying the session, the properties should be still accessible. + assert_equals( + typeof session.inputQuota, "number", + "inputQuota must be accessible." + ); + assert_equals( + typeof session.inputUsage, "number", + "inputUsage must be accessible." + ); + assert_equals( + typeof session.temperature, "number", + "temperature must be accessible." + ); + assert_equals( + typeof session.topK, "number", + "topK must be accessible." + ); +}); diff --git a/testing/web-platform/tests/ai/language-model/language-model-from-detached-iframe.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-from-detached-iframe.tentative.https.window.js @@ -0,0 +1,24 @@ +// META: title=Language Model From Detached Iframe +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +promise_test(async t => { + assert_true(!!LanguageModel); + // Create the iframe and append it to the document. + let iframe = document.createElement("iframe"); + document.childNodes[document.childNodes.length - 1].appendChild(iframe); + let iframeWindow = iframe.contentWindow; + iframeWindow.languageModel = iframeWindow.LanguageModel; + let iframeDOMException = iframeWindow.DOMException; + // Detach the iframe. + iframe.remove(); + // Calling `LanguageModel.availability()` from an invalid script state will trigger + // the "The execution context is not valid." exception. + await promise_rejects_dom( + t, 'InvalidStateError', iframeDOMException, iframeWindow.languageModel.availability(), + "The promise should be rejected with InvalidStateError if the execution context is invalid." + ); +}); diff --git a/testing/web-platform/tests/ai/language-model/language-model-iframe.tentative.https.html b/testing/web-platform/tests/ai/language-model/language-model-iframe.tentative.https.html @@ -0,0 +1,59 @@ +<!DOCTYPE html> +<meta name="timeout" content="long"> +<script src="/resources/testdriver.js"></script> +<script src='/resources/testdriver-vendor.js'></script> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<script src="/common/get-host-info.sub.js"></script> +<script src="../resources/util.js"></script> +<body></body> +<script> +'use strict'; + +const { HTTPS_ORIGIN, HTTPS_NOTSAMESITE_ORIGIN } = get_host_info(); +const PATH = location.pathname.substring(0, location.pathname.lastIndexOf('/') + 1); +const IFRAME_PATH = PATH + 'resources/iframe-helper.html'; + +promise_test(async t => { + const src = HTTPS_NOTSAMESITE_ORIGIN + IFRAME_PATH; + const iframe = await load_iframe(src, /*permissionPolicy=*/''); + await promise_rejects_dom(t, 'NotAllowedError', run_iframe_test(iframe, 'LanguageModelCreate')); + iframe.remove(); +}, 'Throw a \'NotAllowedError\' when creating LanguageModel within cross-origin iframe'); + +promise_test(async t => { + const src = HTTPS_NOTSAMESITE_ORIGIN + IFRAME_PATH; + const iframe = await load_iframe(src, /*permissionPolicy=*/'language-model'); + assert_equals(await run_iframe_test(iframe, 'LanguageModelCreate'), 'Success'); + iframe.remove(); +}, 'LanguageModel can be created within cross-origin iframe with permission policy'); + +promise_test(async t => { + const src = HTTPS_ORIGIN + IFRAME_PATH; + const iframe = await load_iframe(src, /*permissionPolicy=*/''); + assert_equals(await run_iframe_test(iframe, 'LanguageModelCreate'), 'Success'); + iframe.remove(); +}, 'LanguageModel can be used within same-origin iframe'); + +promise_test(async t => { + const src = HTTPS_NOTSAMESITE_ORIGIN + IFRAME_PATH; + const iframe = await load_iframe(src, /*permissionPolicy=*/''); + assert_equals(await run_iframe_test(iframe, 'LanguageModelAvailability'), 'unavailable'); + iframe.remove(); +}, 'LanguageModel is unavailable within cross-origin iframe'); + +promise_test(async t => { + const src = HTTPS_NOTSAMESITE_ORIGIN + IFRAME_PATH; + const iframe = await load_iframe(src, /*permissionPolicy=*/'language-model'); + assert_in_array(await run_iframe_test(iframe, 'LanguageModelAvailability'), kAvailableAvailabilities); + iframe.remove(); +}, 'LanguageModel is available within cross-origin iframe with permission policy'); + +promise_test(async t => { + const src = HTTPS_ORIGIN + IFRAME_PATH; + const iframe = await load_iframe(src, /*permissionPolicy=*/''); + assert_in_array(await run_iframe_test(iframe, 'LanguageModelAvailability'), kAvailableAvailabilities); + iframe.remove(); +}, 'LanguageModel is available within same-origin iframe'); + +</script> diff --git a/testing/web-platform/tests/ai/language-model/language-model-measure-input-usage.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-measure-input-usage.tentative.https.window.js @@ -0,0 +1,20 @@ +// META: title=Language Model Measure Input Usage +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +promise_test(async t => { + await ensureLanguageModel(); + + // Start a new session. + const session = await createLanguageModel(); + + // Test the measureInputUsage() API. + let result = await session.measureInputUsage("This is a prompt."); + assert_true( + typeof result === "number" && result > 0, + "The counting result should be a positive number." + ); +}); diff --git a/testing/web-platform/tests/ai/language-model/language-model-params.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-params.tentative.https.window.js @@ -0,0 +1,17 @@ +// META: title=Language Model Params +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +promise_test(async () => { + await ensureLanguageModel(); + + const params = await LanguageModel.params(); + assert_true(!!params); + assert_equals(typeof params.maxTopK, "number"); + assert_equals(typeof params.defaultTopK, "number"); + assert_equals(typeof params.maxTemperature, "number"); + assert_equals(typeof params.defaultTemperature, "number"); +}); diff --git a/testing/web-platform/tests/ai/language-model/language-model-prompt-context-destroyed.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-prompt-context-destroyed.tentative.https.window.js @@ -0,0 +1,19 @@ +// META: title=Language Model Prompt Context Destroyed +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +promise_test(async t => { + assert_true(!!LanguageModel); + // Create the iframe and append it to the document. + const iframe = document.createElement('iframe'); + document.childNodes[document.childNodes.length - 1].appendChild(iframe); + + await test_driver.bless(); + const session = await iframe.contentWindow.LanguageModel.create(); + session.prompt(kTestPrompt); + // Detach the iframe. + iframe.remove(); +}, 'Detaching iframe while running prompt() should not cause memory leak'); diff --git a/testing/web-platform/tests/ai/language-model/language-model-prompt-gc.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-prompt-gc.tentative.https.window.js @@ -0,0 +1,19 @@ +// META: title=Language Model Prompt GC +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +promise_test(async () => { + await ensureLanguageModel(); + + // Start a new session. + const session = await createLanguageModel(); + // Test the prompt API. + const promptPromise = session.prompt(kTestPrompt); + // Run GC. + gc(); + const result = await promptPromise; + assert_equals(typeof result, "string"); +}, 'Prompt API must continue even after GC has been performed.'); diff --git a/testing/web-platform/tests/ai/language-model/language-model-prompt-monitor-callback-exception.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-prompt-monitor-callback-exception.tentative.https.window.js @@ -0,0 +1,25 @@ +// META: title=Language Model Prompt Monitor Callback Exception +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +// Test that the exception from the callback will be re-thrown by the session +// creation, and the session won't be created. +promise_test(async t => { + // Make sure the model availability is `downloadable`. + const availability = await LanguageModel.availability(); + if (availability === "downloadable") { + const error = new Error("test"); + const sessionPromise = createLanguageModel({ + // Start a new session with callback that will throw error. + monitor(m) { + throw error; + } + }); + await promise_rejects_exactly( + t, error, sessionPromise + ); + } +}); diff --git a/testing/web-platform/tests/ai/language-model/language-model-prompt-multimodal.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-prompt-multimodal.tentative.https.window.js @@ -0,0 +1,310 @@ +// META: title=Language Model Prompt Multimodal +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +const kPrompt = 'describe this'; +const kValidImagePath = '/images/computer.jpg'; +const kValidAudioPath = '/media/speech.wav'; +const kValidSVGImagePath = '/images/pattern.svg'; +const kValidVideoPath = '/media/test.webm'; + +const kImageOptions = {expectedInputs: [{type: 'image'}]}; +const kAudioOptions = {expectedInputs: [{type: 'audio'}]}; + +function messageWithContent(prompt, type, value) { + return [{ + role: 'user', + content: [{type: 'text', value: prompt}, {type: type, value: value}] + }]; +} + +/***************************************** + * General tests + *****************************************/ + +promise_test(async t => { + await ensureLanguageModel(kImageOptions); + const newImage = new Image(); + newImage.src = kValidImagePath; + const session = await createLanguageModel(kImageOptions); + // TODO(crbug.com/409615288): Expect a TypeError according to the spec. + return promise_rejects_dom( + t, 'SyntaxError', + session.prompt(messageWithContent(kPrompt, 'text', newImage))); +}, 'Prompt with type:"text" and image content should reject'); + +promise_test(async t => { + await ensureLanguageModel(kImageOptions); + const newImage = new Image(); + newImage.src = kValidImagePath; + const session = await createLanguageModel(kImageOptions); + return promise_rejects_dom(t, 'NotSupportedError', session.prompt([ + {role: 'assistant', content: [{type: 'image', value: newImage}]} + ])); +}, 'Prompt with assistant role should reject with multimodal input'); + +/***************************************** + * Image tests + *****************************************/ + +promise_test(async (t) => { + await ensureLanguageModel(); + const newImage = new Image(); + newImage.src = kValidImagePath; + const session = await createLanguageModel(); + return promise_rejects_dom( + t, 'NotSupportedError', + session.prompt(messageWithContent(kPrompt, 'image', newImage))); +}, 'Prompt image without `image` expectedInput'); + +promise_test(async () => { + const blob = await (await fetch(kValidImagePath)).blob(); + const options = { + expectedInputs: [{type: 'image'}], + initialPrompts: messageWithContent(kPrompt, 'image', blob) + }; + await ensureLanguageModel(options); + const session = await LanguageModel.create(options); + const tokenLength = await session.measureInputUsage(options.initialPrompts); + assert_greater_than(tokenLength, 0); + assert_equals(session.inputUsage, tokenLength); + assert_regexp_match( + await session.prompt([{role: 'system', content: ''}]), + /<image>/); +}, 'Test Image initialPrompt'); + +promise_test(async () => { + await ensureLanguageModel(kImageOptions); + const blob = await (await fetch(kValidImagePath)).blob(); + const session = await createLanguageModel(kImageOptions); + const result = + await session.prompt(messageWithContent(kPrompt, 'image', blob)); + assert_regexp_match(result, /<image>/); +}, 'Prompt with Blob image content'); + +promise_test(async () => { + await ensureLanguageModel(kImageOptions); + const blob = await (await fetch(kValidImagePath)).blob(); + const bitmap = await createImageBitmap(blob); + const session = await createLanguageModel(kImageOptions); + const result = + await session.prompt(messageWithContent(kPrompt, 'image', bitmap)); + assert_regexp_match(result, /<image>/); +}, 'Prompt with ImageBitmap image content'); + +promise_test(async () => { + await ensureLanguageModel(kImageOptions); + const blob = await (await fetch(kValidImagePath)).blob(); + const bitmap = await createImageBitmap(blob); + const frame = new VideoFrame(bitmap, {timestamp: 1}); + const session = await createLanguageModel(kImageOptions); + const result = + await session.prompt(messageWithContent(kPrompt, 'image', frame)); + frame.close(); // Avoid JS garbage collection warning. + assert_regexp_match(result, /<image>/); +}, 'Prompt with VideoFrame image content'); + +promise_test(async () => { + await ensureLanguageModel(kImageOptions); + const canvas = new OffscreenCanvas(512, 512); + // Requires a context to convert to a bitmap. + var context = canvas.getContext('2d'); + context.fillRect(10, 10, 200, 200); + const session = await createLanguageModel(kImageOptions); + const result = + await session.prompt(messageWithContent(kPrompt, 'image', canvas)); + assert_regexp_match(result, /<image>/); +}, 'Prompt with OffscreenCanvas image content'); + +promise_test(async () => { + await ensureLanguageModel(kImageOptions); + const session = await createLanguageModel(kImageOptions); + const result = await session.prompt( + messageWithContent(kPrompt, 'image', new ImageData(256, 256))); + assert_regexp_match(result, /<image>/); +}, 'Prompt with ImageData image content'); + +promise_test(async () => { + await ensureLanguageModel(kImageOptions); + const newImage = new Image(); + newImage.src = kValidImagePath; + const session = await createLanguageModel(kImageOptions); + const result = + await session.prompt(messageWithContent(kPrompt, 'image', newImage)); + assert_regexp_match(result, /<image>/); +}, 'Prompt with HTMLImageElement image content'); + +promise_test(async () => { + await ensureLanguageModel(kImageOptions); + var canvas = document.createElement('canvas'); + canvas.width = 1224; + canvas.height = 768; + const session = await createLanguageModel(kImageOptions); + const result = + await session.prompt(messageWithContent(kPrompt, 'image', canvas)); + assert_regexp_match(result, /<image>/); +}, 'Prompt with HTMLCanvasElement image content'); + +promise_test(async () => { + await ensureLanguageModel(kImageOptions); + const imageData = await fetch(kValidImagePath); + const session = await createLanguageModel(kImageOptions); + const result = await session.prompt( + messageWithContent(kPrompt, 'image', await imageData.arrayBuffer())); + assert_regexp_match(result, /<image>/); +}, 'Prompt with ArrayBuffer image content'); + +promise_test(async () => { + await ensureLanguageModel(kImageOptions); + const imageData = await fetch(kValidImagePath); + const session = await createLanguageModel(kImageOptions); + const result = await session.prompt(messageWithContent( + kPrompt, 'image', new DataView(await imageData.arrayBuffer()))); + assert_regexp_match(result, /<image>/); +}, 'Prompt with ArrayBufferView image content'); + +promise_test(async (t) => { + await ensureLanguageModel(kImageOptions); + const imageData = await fetch(kValidImagePath); + const session = await createLanguageModel(kImageOptions); + const buffer = await imageData.arrayBuffer(); + // Add 256 bytes of padding in front of the image data. + const bufferView = new Uint8Array(buffer); + const newBufferArray = new ArrayBuffer(256 + buffer.byteLength); + const imageView = new Uint8Array(newBufferArray, 256, buffer.byteLength); + imageView.set(bufferView); + + const result = + await session.prompt(messageWithContent(kPrompt, 'image', imageView)); + assert_regexp_match(result, /<image>/); + + // Offset causes 56 bytes of blank data, resulting in a decoding error. + await promise_rejects_dom( + t, 'InvalidStateError', + session.prompt(messageWithContent( + kPrompt, 'image', + new Uint8Array(newBufferArray, 200, buffer.byteLength)))); +}, 'Prompt with ArrayBufferView image content with an offset.'); + + +promise_test(async () => { + await ensureLanguageModel(kImageOptions); + const newImage = new Image(); + newImage.src = kValidSVGImagePath; + const session = await createLanguageModel(kImageOptions); + const result = + await session.prompt(messageWithContent( + kPrompt, 'image', newImage)); + assert_regexp_match(result, /<image>/); +}, 'Prompt with HTMLImageElement image content (with SVG)'); + + +promise_test(async () => { + await ensureLanguageModel(kImageOptions); + const svg = document.createElementNS('http://www.w3.org/2000/svg', 'svg'); + svg.setAttribute('width', '100'); + svg.setAttribute('height', '100'); + const svgImage = + document.createElementNS('http://www.w3.org/2000/svg', 'image'); + svgImage.setAttribute('href', kValidImagePath); + svgImage.setAttribute('decoding', 'sync'); + svg.appendChild(svgImage); + document.body.appendChild(svg); + + // Must wait for the SVG and image to load first. + // TODO(crbug.com/417260923): Make prompt Api await the image to be loaded. + const {promise, resolve} = Promise.withResolvers(); + svgImage.addEventListener('load', resolve); + await promise; + const session = await createLanguageModel(kImageOptions); + const result = + await session.prompt(messageWithContent( + kPrompt, 'image', svgImage)); + assert_regexp_match(result, /<image>/); +}, 'Prompt with SVGImageElement image content'); + +promise_test(async () => { + await ensureLanguageModel(kImageOptions); + var video = document.createElement('video'); + video.src = kValidVideoPath; + video.width = 1224; + video.height = 768; + // Video must have frames fetched. See crbug.com/417249941#comment3 + await video.play(); + const session = await createLanguageModel(kImageOptions); + const result = + await session.prompt(messageWithContent( + kPrompt, 'image', video)); + assert_regexp_match(result, /<image>/); +}, 'Prompt with HTMLVideoElement image content'); + +/***************************************** + * Audio tests + *****************************************/ + +promise_test(async (t) => { + await ensureLanguageModel(); + const blob = await (await fetch(kValidAudioPath)).blob(); + const session = await createLanguageModel(); + return promise_rejects_dom( + t, 'NotSupportedError', + session.prompt(messageWithContent(kPrompt, 'audio', blob))); +}, 'Prompt audio without `audio` expectedInput'); + +promise_test(async () => { + const blob = await (await fetch(kValidAudioPath)).blob(); + const options = { + expectedInputs: [{type: 'audio'}], + initialPrompts: messageWithContent(kPrompt, 'audio', blob) + }; + await ensureLanguageModel(options); + const session = await LanguageModel.create(options); + const tokenLength = await session.measureInputUsage(options.initialPrompts); + assert_greater_than(tokenLength, 0); + assert_equals(session.inputUsage, tokenLength); + assert_regexp_match( + await session.prompt([{role: 'system', content: ''}]), + /<audio>/); +}, 'Test Audio initialPrompt'); + +promise_test(async () => { + await ensureLanguageModel(kAudioOptions); + const blob = await (await fetch(kValidAudioPath)).blob(); + const session = await createLanguageModel(kAudioOptions); + const result = + await session.prompt(messageWithContent(kPrompt, 'audio', blob)); + assert_regexp_match(result, /<audio>/); +}, 'Prompt with Blob audio content'); + +promise_test(async (t) => { + await ensureLanguageModel(kAudioOptions); + const blob = await (await fetch(kValidImagePath)).blob(); + const session = await createLanguageModel(kAudioOptions); + // TODO(crbug.com/409615288): Expect a TypeError according to the spec. + return promise_rejects_dom( + t, 'DataError', + session.prompt(messageWithContent(kPrompt, 'audio', blob))); +}, 'Prompt audio with blob containing invalid audio data.'); + +promise_test(async () => { + await ensureLanguageModel(kAudioOptions); + const audio_data = await fetch(kValidAudioPath); + const audioCtx = new AudioContext(); + const buffer = await audioCtx.decodeAudioData(await audio_data.arrayBuffer()); + const session = await createLanguageModel(kAudioOptions); + const result = + await session.prompt(messageWithContent(kPrompt, 'audio', buffer)); + assert_regexp_match(result, /<audio>/); +}, 'Prompt with AudioBuffer'); + +promise_test(async () => { + await ensureLanguageModel(kAudioOptions); + const audio_data = await fetch(kValidAudioPath); + const session = await createLanguageModel(kAudioOptions); + const result = await session.prompt( + messageWithContent(kPrompt, 'audio', await audio_data.arrayBuffer())); + assert_regexp_match(result, /<audio>/); +}, 'Prompt with BufferSource - ArrayBuffer'); diff --git a/testing/web-platform/tests/ai/language-model/language-model-prompt-streaming-gc.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-prompt-streaming-gc.tentative.https.window.js @@ -0,0 +1,28 @@ +// META: title=Language Model Prompt Streaming GC +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +promise_test(async t => { + await ensureLanguageModel(); + + // Start a new session. + const session = await createLanguageModel(); + // Test the streaming prompt API. + const streamingResponse = + session.promptStreaming(kTestPrompt); + // Run GC. + gc(); + assert_equals( + Object.prototype.toString.call(streamingResponse), + "[object ReadableStream]" + ); + let result = ""; + for await (const value of streamingResponse) { + result += value; + gc(); + } + assert_greater_than(result.length, 0, "The result should not be empty."); +}, 'Prompt Streaming API must continue even after GC has been performed.'); diff --git a/testing/web-platform/tests/ai/language-model/language-model-prompt-streaming.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-prompt-streaming.tentative.https.window.js @@ -0,0 +1,32 @@ +// META: title=Language Model Prompt Streaming +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +promise_test(async t => { + await ensureLanguageModel(); + + // Start a new session. + const session = await createLanguageModel(); + // Test the streaming prompt API. + const streamingResponse = + session.promptStreaming(kTestPrompt); + assert_equals( + Object.prototype.toString.call(streamingResponse), + "[object ReadableStream]" + ); + const reader = streamingResponse.getReader(); + let result = ""; + while (true) { + const { value, done } = await reader.read(); + if (done) { + break; + } + if (value) { + result += value; + } + } + assert_greater_than(result.length, 0, "The result should not be empty."); +}); diff --git a/testing/web-platform/tests/ai/language-model/language-model-prompt.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-prompt.tentative.https.window.js @@ -0,0 +1,72 @@ +// META: title=Language Model Prompt +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +promise_test(async () => { + await ensureLanguageModel(); + const session = await createLanguageModel(); + const result = await session.prompt(kTestPrompt); + assert_equals(typeof result, 'string'); +}, 'Simple LanguageModel.prompt() call'); + +promise_test(async (t) => { + await ensureLanguageModel(); + const session = await createLanguageModel(); + assert_true(!!(await session.prompt([]))); + // Invalid input should be stringified. + assert_regexp_match(await session.prompt({}), /\[object Object\]/); +}, 'Check empty input'); + +promise_test(async (t) => { + await ensureLanguageModel(); + const session = await createLanguageModel(); + assert_regexp_match(await session.prompt('shorthand'), /shorthand/); + assert_regexp_match( + await session.prompt([{role: 'system', content: 'shorthand'}]), + /shorthand/); +}, 'Check Shorthand'); + +promise_test(async () => { + const options = { + initialPrompts: + [{role: 'user', content: [{type: 'text', value: 'The word of the day is regurgitation.'}]}] + }; + await ensureLanguageModel(options); + const session = await LanguageModel.create(options); + const tokenLength = await session.measureInputUsage(options.initialPrompts); + assert_greater_than(tokenLength, 0); + assert_equals(session.inputUsage, tokenLength); + assert_regexp_match( + await session.prompt([{role: 'system', content: ''}]), + /regurgitation/); +}, 'Test that initialPrompt counts towards session inputUsage'); + +promise_test(async () => { + await ensureLanguageModel(); + const session = await createLanguageModel(); + const promise = new Promise(resolve => { + session.addEventListener("quotaoverflow", resolve); + }); + // Make sure there is something to evict. + const kLongPrompt = kTestPrompt.repeat(10); + const usage = await session.measureInputUsage(kLongPrompt); + assert_greater_than(session.inputQuota, usage); + await session.prompt(kLongPrompt); + // Generate a repeated kLongPrompt string that exceeds inputQuota. + assert_greater_than(session.inputUsage, 0); + const repeatCount = session.inputQuota / session.inputUsage; + const promptString = kLongPrompt.repeat(repeatCount); + // The prompt promise succeeds, while causing older input to be evicted. + await Promise.all([promise, session.prompt(promptString)]); +}, 'The `quotaoverflow` event is fired when overall usage exceeds the quota'); + +promise_test(async t => { + await ensureLanguageModel(); + const session = await createLanguageModel(); + const promptString = kTestPrompt.repeat(session.inputQuota); + const requested = await session.measureInputUsage(promptString); + await promise_rejects_quotaexceedederror(t, session.prompt(promptString), requested, session.inputQuota); +}, 'Test that prompt input exeeding the total quota rejects'); diff --git a/testing/web-platform/tests/ai/language-model/language-model-quota-exceeded.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-quota-exceeded.tentative.https.window.js @@ -0,0 +1,20 @@ +// META: title=Language Model Quota Exceeded +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +promise_test(async t => { + await ensureLanguageModel(); + + // Start a new session to get the max tokens. + const session = await createLanguageModel(); + const inputQuota = session.inputQuota; + const initialPrompt = kTestPrompt.repeat(inputQuota); + const requested = await session.measureInputUsage(initialPrompt); + + const promise = createLanguageModel( + { initialPrompts: [ { role: "system", content: initialPrompt } ] }); + await promise_rejects_quotaexceedederror(t, promise, requested, inputQuota); +}, "QuotaExceededError is thrown when initial prompts are too large."); diff --git a/testing/web-platform/tests/ai/language-model/language-model-response-json-schema.tentative.https.window.js b/testing/web-platform/tests/ai/language-model/language-model-response-json-schema.tentative.https.window.js @@ -0,0 +1,90 @@ +// META: title=Language Model Response JSON Schema +// META: script=/resources/testdriver.js +// META: script=../resources/util.js +// META: timeout=long + +'use strict'; + +promise_test(async t => { + await ensureLanguageModel(); + const session = await createLanguageModel(); + // Circular reference is not valid. + const invalidRepsonseJsonSchema = {}; + invalidRepsonseJsonSchema.self = invalidRepsonseJsonSchema; + await promise_rejects_dom(t, 'NotSupportedError', + session.prompt(kTestPrompt, { responseConstraint: invalidRepsonseJsonSchema }), + 'Response json schema is invalid - it should be an object that can be stringified into a JSON string.'); +}, 'Prompt API should fail if an invalid response json schema is provided'); + +promise_test(async t => { + await ensureLanguageModel(); + const session = await createLanguageModel(); + const validRepsonseJsonSchema = { + type: "object", + required: ["Rating"], + additionalProperties: false, + properties: { + Rating: { + type: "number", + minimum: 0, + maximum: 5, + }, + }, + }; + const promptPromise = session.prompt('hello', { responseConstraint : validRepsonseJsonSchema }); + // Both the prompt and schema should be present. + assert_regexp_match(await promptPromise, /hello.*Rating/s); +}, 'Prompt API should work when a valid response json schema is provided.'); + +promise_test(async t => { + await ensureLanguageModel(); + const session = await createLanguageModel(); + const validRepsonseJsonSchema = { + type: "object", + required: ["Rating"], + additionalProperties: false, + properties: { + Rating: { + type: "number", + minimum: 0, + maximum: 5, + }, + }, + }; + const promptPromise = session.prompt([ + {role: 'user', content: 'hello'}, + {role: 'assistant', content: 'prefix', prefix: true} + ], { responseConstraint : validRepsonseJsonSchema }); + // Both the prompt and schema should be present, but prefix should be last. + assert_regexp_match(await promptPromise, /hello.*Rating.*prefix/s); +}, 'Prompt API should work when a valid response json schema and model prefix is provided.'); + +promise_test(async t => { + await ensureLanguageModel(); + const session = await createLanguageModel(); + const validRepsonseJsonSchema = { + type: "object", + required: ["Rating"], + additionalProperties: false, + properties: { + Rating: { + type: "number", + minimum: 0, + maximum: 5, + }, + }, + }; + const promptPromise = session.prompt('hello', { + responseConstraint : validRepsonseJsonSchema, + omitResponseConstraintInput : true + }); + assert_regexp_match(await promptPromise, /hello$/); +}, 'Prompt API should omit response schema from input.'); + +promise_test(async t => { + await ensureLanguageModel(); + const session = await createLanguageModel(); + const promptPromise = session.prompt(kTestPrompt, { responseConstraint : /hello/ }); + const result = await promptPromise; + assert_true(typeof result === "string"); +}, 'Prompt API should work when a valid regex constraint is provided.'); diff --git a/testing/web-platform/tests/ai/language-model/resources/iframe-helper.html b/testing/web-platform/tests/ai/language-model/resources/iframe-helper.html @@ -0,0 +1,26 @@ +<!DOCTYPE HTML> +<meta charset="utf-8"> +<script src="/resources/testdriver.js"></script> +<script src="/resources/testdriver-vendor.js"></script> +<body></body> +<script> +test_driver.set_test_context(parent); + +window.onmessage = async message => { + const { id, type } = message.data; + try { + switch (type) { + case 'LanguageModelCreate': + await test_driver.bless('LanguageModel.create', LanguageModel.create, window); + parent.postMessage({id, success: 'Success'}, '*'); + break; + case 'LanguageModelAvailability': + const availability = await LanguageModel.availability(); + parent.postMessage({id, success: availability}, '*'); + break; + } + } catch (err) { + parent.postMessage({id, err: err}, '*'); + } +}; +</script> diff --git a/testing/web-platform/tests/ai/resources/util.js b/testing/web-platform/tests/ai/resources/util.js @@ -225,6 +225,11 @@ function load_iframe(src, permission_policy) { return promise; } +async function createLanguageModel(options = {}) { + await test_driver.bless(); + return LanguageModel.create(options); +} + async function createSummarizer(options = {}) { await test_driver.bless(); return await Summarizer.create(options); @@ -245,6 +250,14 @@ async function createProofreader(options = {}) { return await Proofreader.create(options); } +async function ensureLanguageModel(options = {}) { + assert_true(!!LanguageModel); + const availability = await LanguageModel.availability(options); + assert_in_array(availability, kValidAvailabilities); + // Yield PRECONDITION_FAILED if the API is unavailable on this device. + assert_implements_optional(availability != 'unavailable', 'API unavailable'); +}; + async function testDestroy(t, createMethod, options, instanceMethods) { const instance = await createMethod(options);