commit 0a5ce11727d93fa8604865bbf15bde956afa2cc8
parent afe4eafc19b96a438ac6252fb50e23bcc6ae3024
Author: Greg Tatum <tatum.creative@gmail.com>
Date: Fri, 31 Oct 2025 15:04:31 +0000
Bug 1996291 - Update tests documentation and some naming conventions r=firefox-ai-ml-reviewers,tarek
I read through all of the tests here, and while doing so I updated the
documentation so that it would make more sense to me what the individual
tests were doing. Some of these changes are a bit stylistic, but I feel
they are justified as they ensure we have more explanatory documentation
rather than just re-stating the test name in plain English.
Differential Revision: https://phabricator.services.mozilla.com/D269979
Diffstat:
5 files changed, 62 insertions(+), 18 deletions(-)
diff --git a/toolkit/components/ml/tests/browser/browser_ml_engine_lifetime.js b/toolkit/components/ml/tests/browser/browser_ml_engine_lifetime.js
@@ -3,17 +3,20 @@
"use strict";
-const RAW_PIPELINE_OPTIONS = { taskName: "moz-echo", timeoutMS: -1 };
-const PIPELINE_OPTIONS = new PipelineOptions({
+const MOZ_ECHO_OPTIONS_RAW = { taskName: "moz-echo", timeoutMS: -1 };
+const MOZ_ECHO_OPTIONS = new PipelineOptions({
taskName: "moz-echo",
timeoutMS: -1,
});
+/**
+ * Performing a basic engine initialization and run.
+ */
add_task(async function test_ml_engine_basics() {
const { cleanup, remoteClients } = await setup();
info("Get the engine");
- const engineInstance = await createEngine(RAW_PIPELINE_OPTIONS);
+ const engineInstance = await createEngine(MOZ_ECHO_OPTIONS_RAW);
info("Check the inference process is running");
Assert.equal(await checkForRemoteType("inference"), true);
@@ -42,18 +45,20 @@ add_task(async function test_ml_engine_basics() {
await cleanup();
});
+/**
+ * Test the Wasm failing to download triggering a rejection.
+ */
add_task(async function test_ml_engine_wasm_rejection() {
const { cleanup, remoteClients } = await setup();
info("Get the engine");
- const engineInstance = await createEngine(RAW_PIPELINE_OPTIONS);
+ const engineInstance = await createEngine(MOZ_ECHO_OPTIONS_RAW);
info("Run the inference");
const inferencePromise = engineInstance.run({ data: "This gets echoed." });
info("Wait for the pending downloads.");
await remoteClients["ml-onnx-runtime"].rejectPendingDownloads(1);
- //await remoteClients.models.resolvePendingDownloads(1);
let error;
try {
@@ -83,8 +88,10 @@ add_task(async function test_ml_engine_parallel() {
let sleepTimes = [300, 1000, 700, 0, 500, 900, 400, 800, 600, 100];
let numCalls = 10;
+ const enginesSeen = new Set();
async function run(x) {
- const engineInstance = await createEngine(RAW_PIPELINE_OPTIONS);
+ const engineInstance = await createEngine(MOZ_ECHO_OPTIONS_RAW);
+ enginesSeen.add(engineInstance);
let msg = `${x} - This gets echoed.`;
let res = engineInstance.run({
@@ -117,6 +124,8 @@ add_task(async function test_ml_engine_parallel() {
);
}
+ Assert.equal(enginesSeen.size, 1, "Only one engine was created.");
+
ok(
!EngineProcess.areAllEnginesTerminated(),
"The engine process is still active."
@@ -134,14 +143,13 @@ add_task(async function test_ml_engine_model_error() {
const { cleanup, remoteClients } = await setup();
info("Get the engine");
- const engineInstance = await createEngine(RAW_PIPELINE_OPTIONS);
+ const engineInstance = await createEngine(MOZ_ECHO_OPTIONS_RAW);
info("Run the inference with a throwing example.");
const inferencePromise = engineInstance.run("throw");
info("Wait for the pending downloads.");
await remoteClients["ml-onnx-runtime"].resolvePendingDownloads(1);
- //await remoteClients.models.resolvePendingDownloads(1);
let error;
try {
@@ -167,7 +175,7 @@ add_task(async function test_ml_engine_destruction() {
const { cleanup, remoteClients } = await setup();
info("Get engineInstance");
- const engineInstance = await createEngine(PIPELINE_OPTIONS);
+ const engineInstance = await createEngine(MOZ_ECHO_OPTIONS);
info("Run the inference");
const inferencePromise = engineInstance.run({ data: "This gets echoed." });
@@ -200,20 +208,19 @@ add_task(async function test_ml_engine_destruction() {
});
/**
- * Tests that the engineInstanceModel's internal errors are correctly surfaced.
+ * Tests creating an engine after an error.
*/
add_task(async function test_ml_engine_model_error() {
const { cleanup, remoteClients } = await setup();
info("Get the engine");
- const engineInstance = await createEngine(RAW_PIPELINE_OPTIONS);
+ const engineInstance = await createEngine(MOZ_ECHO_OPTIONS_RAW);
info("Run the inference with a throwing example.");
const inferencePromise = engineInstance.run("throw");
info("Wait for the pending downloads.");
await remoteClients["ml-onnx-runtime"].resolvePendingDownloads(1);
- //await remoteClients.models.resolvePendingDownloads(1);
let error;
try {
@@ -232,7 +239,7 @@ add_task(async function test_ml_engine_model_error() {
});
/**
- * Tests that we display a nice error message when the pref is off
+ * Tests that we display a nice error message when the "browser.ml.enable" pref is off.
*/
add_task(async function test_pref_is_off() {
await SpecialPowers.pushPrefEnv({
@@ -392,6 +399,9 @@ add_task(async function test_ml_dupe_engines() {
await cleanup();
});
+/**
+ * Tests that a worker can have an infinite timeout.
+ */
add_task(async function test_ml_engine_infinite_worker() {
const { cleanup, remoteClients } = await setup();
diff --git a/toolkit/components/ml/tests/browser/browser_ml_engine_pipeline_options.js b/toolkit/components/ml/tests/browser/browser_ml_engine_pipeline_options.js
@@ -3,6 +3,9 @@
"use strict";
+/**
+ * Test that model PipelineOptions can override the defaults.
+ */
add_task(async function test_ml_engine_override_options() {
const { cleanup, remoteClients } = await setup();
@@ -42,6 +45,9 @@ add_task(async function test_ml_engine_override_options() {
await cleanup();
});
+/**
+ * Verify that features such as the dtype can be picked up via Remote Settings.
+ */
add_task(async function test_ml_engine_pick_feature_id() {
// one record sent back from RS contains featureId
const records = [
@@ -148,6 +154,9 @@ add_task(async function test_ml_generic_pipeline() {
await cleanup();
});
+/**
+ * Test out the default precision values.
+ */
add_task(async function test_q8_by_default() {
const { cleanup, remoteClients } = await setup();
@@ -192,6 +201,10 @@ add_task(async function test_q8_by_default() {
await cleanup();
});
+/**
+ * Test that the preference override options only work for the SAFE_OVERRIDE_OPTIONS
+ * defined in MLEngineChild.sys.mjs
+ */
add_task(
async function test_override_ml_engine_pipeline_options_in_allow_list() {
const { cleanup, remoteClients } = await setup();
@@ -287,6 +300,9 @@ add_task(async function test_override_ml_pipeline_options_not_in_allow_list() {
await cleanup();
});
+/**
+ * Test that an unsanctioned modelId does not get used.
+ */
add_task(async function test_override_ml_pipeline_options_unsafe_options() {
const { cleanup, remoteClients } = await setup();
await SpecialPowers.pushPrefEnv({
@@ -340,6 +356,9 @@ add_task(async function test_override_ml_pipeline_options_unsafe_options() {
await cleanup();
});
+/**
+ * Check that DEFAULT_MODELS are used to pick a preferred model for a given task.
+ */
add_task(async function test_ml_engine_blessed_model() {
const { cleanup, remoteClients } = await setup();
@@ -442,6 +461,9 @@ add_task(async function test_ml_engine_two_tasknames_in_rs() {
await cleanup();
});
+/**
+ * The modelHub should be applied to the PipelineOptions
+ */
add_task(async function test_ml_engine_model_hub_applied() {
const options = {
taskName: "moz-echo",
@@ -716,7 +738,7 @@ const pipelineOptionsCases = [
];
/**
- * Testing PipelineOption validation
+ * Go through all of the pipeline validation test cases.
*/
add_task(async function test_pipeline_options_validation() {
pipelineOptionsCases.forEach(testCase => {
@@ -741,6 +763,9 @@ add_task(async function test_pipeline_options_validation() {
});
});
+/**
+ * The pipeline should only be able to be initialized when there is enough memory.
+ */
add_task(async function test_ml_engine_not_enough_memory() {
const { cleanup } = await setup({
prefs: [
@@ -768,7 +793,8 @@ add_task(async function test_ml_engine_not_enough_memory() {
});
/**
- * Test threading support
+ * This tests that threading is supported. On certain machines this could be false,
+ * but should be true for our testing infrastructure.
*/
add_task(async function test_ml_threading_support() {
const { cleanup, remoteClients } = await setup();
diff --git a/toolkit/components/ml/tests/browser/browser_ml_engine_rs_hub.js b/toolkit/components/ml/tests/browser/browser_ml_engine_rs_hub.js
@@ -3,6 +3,9 @@
"use strict";
+/**
+ * Test the hub return values by default.
+ */
add_task(async function test_hub_by_default() {
const { cleanup, remoteClients } = await setup();
@@ -45,7 +48,7 @@ add_task(async function test_hub_by_default() {
});
/**
- * Tests a custom model hub
+ * Tests that the pipeline can use a custom model hub.
*/
add_task(async function test_ml_custom_hub() {
const { cleanup, remoteClients } = await setup();
diff --git a/toolkit/components/ml/tests/browser/browser_ml_openai.js b/toolkit/components/ml/tests/browser/browser_ml_openai.js
@@ -25,6 +25,9 @@ const SHARED_TOOLS = [
},
];
+/**
+ * Test that createEngine successfully talks to the OpenAI client.
+ */
add_task(async function test_openai_client() {
const records = [
{
diff --git a/toolkit/components/ml/tests/browser/head.js b/toolkit/components/ml/tests/browser/head.js
@@ -31,6 +31,9 @@ const { HttpServer } = ChromeUtils.importESModule(
const MS_PER_SEC = 1000;
const IndexedDBCache = TestIndexedDBCache;
+/**
+ * @type {import("../../../ml/content/EngineProcess.sys.mjs")}
+ */
const {
createEngine,
PipelineOptions,
@@ -50,8 +53,7 @@ Services.scriptloader.loadSubScript(
);
/**
- * Sets up the stage for a test
- *
+ * Mock out remote settings and set some default preferences for the testing environment.
*/
async function setup({
disabled = false,