commit 5e5bc3588184589b95d535234e074d0beb98f2c2
parent 3cf7d867803e8af44117a4442e500a8cca4df959
Author: Christopher DiPersio <cdipersio@mozilla.com>
Date: Mon, 1 Dec 2025 22:03:05 +0000
Bug 2003328 - Implement createOpenAIEngine and prompt rendering r=cgopal,tzhang,ai-models-reviewers
Differential Revision: https://phabricator.services.mozilla.com/D274634
Diffstat:
5 files changed, 212 insertions(+), 1 deletion(-)
diff --git a/browser/base/content/test/static/browser_all_files_referenced.js b/browser/base/content/test/static/browser_all_files_referenced.js
@@ -345,6 +345,10 @@ var allowlist = [
{
file: "moz-src:///browser/components/aiwindow/models/ChatUtils.mjs",
},
+ // Bug 2003328 - Implement createOpenAIEngine and prompt rendering
+ {
+ file: "moz-src:///browser/components/aiwindow/models/Utils.mjs",
+ },
];
if (AppConstants.NIGHTLY_BUILD) {
diff --git a/browser/components/aiwindow/models/Utils.mjs b/browser/components/aiwindow/models/Utils.mjs
@@ -0,0 +1,96 @@
+/**
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ */
+
+/**
+ * This module defines utility functions and classes needed for invoking LLMs such as:
+ * - Creating and running OpenAI engine instances
+ * - Rendering prompts from files
+ */
+
+/* eslint-disable-next-line mozilla/reject-import-system-module-from-non-system */
+import { createEngine } from "chrome://global/content/ml/EngineProcess.sys.mjs";
+
+/**
+ * openAIEngine class
+ *
+ * Contains methods to create engine instances and estimate token usage.
+ */
+export class openAIEngine {
+ /**
+ * Exposing createEngine for testing purposes.
+ */
+ static _createEngine = createEngine;
+
+ static async build(engineId = "smart-openai") {
+ const engine = new openAIEngine();
+ engine.engineInstance = await openAIEngine.#createOpenAIEngine(engineId);
+ return engine;
+ }
+
+ /**
+ * Creates an OpenAI engine instance
+ *
+ * @param {string} engineId The identifier for the engine instance
+ * @returns {Promise<object>} The configured engine instance
+ */
+ static async #createOpenAIEngine(engineId) {
+ try {
+ const engineInstance = await openAIEngine._createEngine({
+ apiKey: Services.prefs.getStringPref("browser.aiwindow.apiKey"),
+ backend: "openai",
+ baseURL: Services.prefs.getStringPref("browser.aiwindow.endpoint"),
+ engineId,
+ modelId: Services.prefs.getStringPref("browser.aiwindow.model"),
+ modelRevision: "main",
+ taskName: "text-generation",
+ });
+ return engineInstance;
+ } catch (error) {
+ console.error("Failed to create OpenAI engine:", error);
+ throw error;
+ }
+ }
+
+ /**
+ * Wrapper around engine.run to send message to the LLM
+ * Will eventually use `usage` from the LiteLLM API response for token telemetry
+ *
+ * @param {Map<string, any>} content OpenAI formatted messages to be sent to the LLM
+ * @returns {object} LLM response
+ */
+ async run(content) {
+ return await this.engineInstance.run(content);
+ }
+
+ /**
+ * Wrapper around engine.runWithGenerator to send message to the LLM
+ * Will eventually use `usage` from the LiteLLM API response for token telemetry
+ *
+ * @param {Map<string, any>} options OpenAI formatted messages with streaming and tooling options to be sent to the LLM
+ * @returns {object} LLM response
+ */
+ runWithGenerator(options) {
+ return this.engineInstance.runWithGenerator(options);
+ }
+}
+
+/**
+ * Renders a prompt from a string, replacing placeholders with provided strings.
+ *
+ * @param {string} rawPromptContent The raw prompt as a string
+ * @param {Map<string, string>} stringsToReplace A map of placeholder strings to their replacements
+ * @returns {Promise<string>} The rendered prompt
+ */
+export async function renderPrompt(rawPromptContent, stringsToReplace = {}) {
+ let finalPromptContent = rawPromptContent;
+
+ for (const [orig, repl] of Object.entries(stringsToReplace)) {
+ const regex = new RegExp(`{${orig}}`, "g");
+ finalPromptContent = finalPromptContent.replace(regex, repl);
+ }
+
+ return finalPromptContent;
+}
diff --git a/browser/components/aiwindow/models/moz.build b/browser/components/aiwindow/models/moz.build
@@ -9,6 +9,7 @@ MOZ_SRC_FILES += [
"ChatUtils.mjs",
"InsightsHistorySource.sys.mjs",
"IntentClassifier.sys.mjs",
+ "Utils.mjs",
]
XPCSHELL_TESTS_MANIFESTS += [
diff --git a/browser/components/aiwindow/models/tests/xpcshell/test_Utils.js b/browser/components/aiwindow/models/tests/xpcshell/test_Utils.js
@@ -0,0 +1,106 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const { openAIEngine, renderPrompt } = ChromeUtils.importESModule(
+ "moz-src:///browser/components/aiwindow/models/Utils.mjs"
+);
+
+const { sinon } = ChromeUtils.importESModule(
+ "resource://testing-common/Sinon.sys.mjs"
+);
+
+/**
+ * Constants for preference keys and test values
+ */
+const PREF_API_KEY = "browser.aiwindow.apiKey";
+const PREF_ENDPOINT = "browser.aiwindow.endpoint";
+const PREF_MODEL = "browser.aiwindow.model";
+
+const API_KEY = "fake-key";
+const ENDPOINT = "https://api.fake-endpoint.com/v1";
+const MODEL = "fake-model";
+
+/**
+ * Cleans up preferences after testing
+ */
+registerCleanupFunction(() => {
+ for (let pref of [PREF_API_KEY, PREF_ENDPOINT, PREF_MODEL]) {
+ if (Services.prefs.prefHasUserValue(pref)) {
+ Services.prefs.clearUserPref(pref);
+ }
+ }
+});
+
+/**
+ * Tests the creation of an OpenAI engine instance
+ */
+add_task(async function test_createOpenAIEngine() {
+ Services.prefs.setStringPref(PREF_API_KEY, API_KEY);
+ Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT);
+ Services.prefs.setStringPref(PREF_MODEL, MODEL);
+
+ const sb = sinon.createSandbox();
+ try {
+ // Take engine to stub out actual engine creation
+ const fakeEngine = {
+ runWithGenerator() {
+ throw new Error("not used");
+ },
+ };
+
+ const stub = sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine);
+ const engine = await openAIEngine.build();
+ Assert.strictEqual(
+ engine.engineInstance,
+ fakeEngine,
+ "Should return engine from _createEngine"
+ );
+ Assert.ok(stub.calledOnce, "_createEngine should be called once");
+
+ // Test preferences were read correctly
+ const opts = stub.firstCall.args[0];
+ Assert.equal(opts.apiKey, API_KEY, "apiKey should come from pref");
+ Assert.equal(opts.backend, "openai", "backend should be openai");
+ Assert.equal(opts.baseURL, ENDPOINT, "baseURL should come from pref");
+ Assert.equal(
+ opts.engineId,
+ "smart-openai",
+ "engineId should be smart-openai"
+ );
+ Assert.equal(opts.modelId, MODEL, "modelId should come from pref");
+ Assert.equal(opts.modelRevision, "main", "modelRevision should be main");
+ Assert.equal(
+ opts.taskName,
+ "text-generation",
+ "taskName should be text-generation"
+ );
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Tests rendering a prompt from a file with placeholder string replacements
+ */
+add_task(async function test_renderPrompt() {
+ // Render the test prompt with replacements
+ const test_prompt = `
+This is a test prompt.
+{testToReplace1}
+
+This is more content. {testToReplace2}
+
+{testToReplace3} Here's the last line.`.trim();
+ const promptContent = await renderPrompt(test_prompt, {
+ testToReplace1: "replaced1",
+ testToReplace2: "replaced2",
+ testToReplace3: "replaced3",
+ });
+
+ Assert.equal(
+ promptContent,
+ "This is a test prompt.\nreplaced1\n\nThis is more content. replaced2\n\nreplaced3 Here's the last line.",
+ "Should render the prompt correctly with provided replacement strings"
+ );
+});
diff --git a/browser/components/aiwindow/models/tests/xpcshell/xpcshell.toml b/browser/components/aiwindow/models/tests/xpcshell/xpcshell.toml
@@ -1,5 +1,7 @@
[DEFAULT]
-run-if = ["os != 'android'"]
+run-if = [
+ "os != 'android'",
+]
head = "head.js"
firefox-appdir = "browser"
support-files = []
@@ -8,4 +10,6 @@ support-files = []
["test_InsightsHistorySource.js"]
+["test_Utils.js"]
+
["test_intent_classifier.js"]