tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit 38b2bf6eb66ba1d401d211873670ff4c8ae59059
parent dc4e9d14937fb0fff9c2cc427826df18002bd4fb
Author: Christopher DiPersio <cdipersio@mozilla.com>
Date:   Tue, 30 Dec 2025 21:20:43 +0000

Bug 2006939 - Add required headers to OpenAIEngine class r=tzhang,npodgurski,ai-models-reviewers,ai-ondevice-reviewers,gregtatum

Differential Revision: https://phabricator.services.mozilla.com/D277215

Diffstat:
Mbrowser/components/aiwindow/models/Chat.sys.mjs | 23+----------------------
Mbrowser/components/aiwindow/models/Insights.sys.mjs | 8+++++++-
Mbrowser/components/aiwindow/models/InsightsManager.sys.mjs | 1+
Mbrowser/components/aiwindow/models/TitleGeneration.sys.mjs | 5++++-
Mbrowser/components/aiwindow/models/Utils.sys.mjs | 59++++++++++++++++++++++++++++++++++++++++++++++++++++++-----
Mbrowser/components/aiwindow/models/tests/xpcshell/test_Utils.js | 9+++++++++
Mtoolkit/components/ml/content/EngineProcess.sys.mjs | 19+++++++++++++++++++
Mtoolkit/components/ml/content/backends/OpenAIPipeline.mjs | 43++++++++++++++++++++++++++++++++++++-------
8 files changed, 131 insertions(+), 36 deletions(-)

diff --git a/browser/components/aiwindow/models/Chat.sys.mjs b/browser/components/aiwindow/models/Chat.sys.mjs @@ -4,14 +4,8 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -/* eslint-disable-next-line mozilla/reject-import-system-module-from-non-system */ -import { getFxAccountsSingleton } from "resource://gre/modules/FxAccounts.sys.mjs"; import { openAIEngine } from "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs"; import { - OAUTH_CLIENT_ID, - SCOPE_PROFILE, -} from "resource://gre/modules/FxAccountsCommon.sys.mjs"; -import { toolsConfig, getOpenTabs, searchBrowsingHistory, @@ -28,21 +22,6 @@ export const Chat = { get_page_content: GetPageContent.getPageContent.bind(GetPageContent), }, - async _getFxAccountToken() { - try { - const fxAccounts = getFxAccountsSingleton(); - const token = await fxAccounts.getOAuthToken({ - // Scope needs to be updated in accordance with https://bugzilla.mozilla.org/show_bug.cgi?id=2005290 - scope: SCOPE_PROFILE, - client_id: OAUTH_CLIENT_ID, - }); - return token; - } catch (error) { - console.warn("Error obtaining FxA token:", error); - return null; - } - }, - /** * Stream assistant output with tool-call support. * Yields assistant text chunks as they arrive. If the model issues tool calls, @@ -56,7 +35,7 @@ export const Chat = { const engineInstance = await openAIEngine.build(); // Note FXA token fetching disabled for now - this is still in progress // We can flip this switch on when more realiable - const fxAccountToken = await this._getFxAccountToken(); + const fxAccountToken = await openAIEngine.getFxAccountToken(); // We'll mutate a local copy of the thread as we loop // We also filter out empty assistant messages because diff --git a/browser/components/aiwindow/models/Insights.sys.mjs b/browser/components/aiwindow/models/Insights.sys.mjs @@ -24,7 +24,7 @@ * */ -import { renderPrompt } from "./Utils.sys.mjs"; +import { renderPrompt, openAIEngine } from "./Utils.sys.mjs"; import { HISTORY, @@ -365,6 +365,7 @@ function normalizeInsightList(parsed) { */ export async function generateInitialInsightsList(engine, sources) { const promptText = await buildInitialInsightsGenerationPrompt(sources); + const response = await engine.run({ args: [ { @@ -374,6 +375,7 @@ export async function generateInitialInsightsList(engine, sources) { { role: "user", content: promptText }, ], responseFormat: { type: "json_schema", schema: INITIAL_INSIGHTS_SCHEMA }, + fxAccountToken: await openAIEngine.getFxAccountToken(), }); const parsed = parseAndExtractJSON(response, []); @@ -397,6 +399,7 @@ export async function deduplicateInsights( existingInsightsList, newInsightsList ); + const response = await engine.run({ args: [ { @@ -409,6 +412,7 @@ export async function deduplicateInsights( type: "json_schema", schema: INSIGHTS_DEDUPLICATION_SCHEMA, }, + fxAccountToken: await openAIEngine.getFxAccountToken(), }); const parsed = parseAndExtractJSON(response, { unique_insights: [] }); @@ -440,6 +444,7 @@ export async function deduplicateInsights( export async function filterSensitiveInsights(engine, insightsList) { const sensitivityFilterPrompt = await buildInsightsSensitivityFilterPrompt(insightsList); + const response = await engine.run({ args: [ { @@ -452,6 +457,7 @@ export async function filterSensitiveInsights(engine, insightsList) { type: "json_schema", schema: INSIGHTS_NON_SENSITIVE_SCHEMA, }, + fxAccountToken: await openAIEngine.getFxAccountToken(), }); const parsed = parseAndExtractJSON(response, { non_sensitive_insights: [] }); diff --git a/browser/components/aiwindow/models/InsightsManager.sys.mjs b/browser/components/aiwindow/models/InsightsManager.sys.mjs @@ -429,6 +429,7 @@ export class InsightsManager { type: "json_schema", schema: INSIGHTS_MESSAGE_CLASSIFY_SCHEMA, }, + fxAccountToken: await openAIEngine.getFxAccountToken(), }); const parsed = parseAndExtractJSON(response, { diff --git a/browser/components/aiwindow/models/TitleGeneration.sys.mjs b/browser/components/aiwindow/models/TitleGeneration.sys.mjs @@ -60,7 +60,10 @@ export async function generateChatTitle(message, current_tab) { ]; // Call the LLM - const response = await engine.run({ messages }); + const response = await engine.run({ + messages, + fxAccountToken: await openAIEngine.getFxAccountToken(), + }); // Extract the generated title from the response const title = diff --git a/browser/components/aiwindow/models/Utils.sys.mjs b/browser/components/aiwindow/models/Utils.sys.mjs @@ -11,6 +11,11 @@ */ import { createEngine } from "chrome://global/content/ml/EngineProcess.sys.mjs"; +import { getFxAccountsSingleton } from "resource://gre/modules/FxAccounts.sys.mjs"; +import { + OAUTH_CLIENT_ID, + SCOPE_PROFILE, +} from "resource://gre/modules/FxAccountsCommon.sys.mjs"; /** * openAIEngine class @@ -23,19 +28,61 @@ export class openAIEngine { */ static _createEngine = createEngine; - static async build(engineId = "smart-openai") { + /** + * Returns an OpenAIEngine instance with the specified engine and service types + * + * @param {string} engineId The identifier for the engine instance + * @param {string} serviceType The type of message to be sent ("ai", "memories", "s2s") + * @returns {Promise<openAIEngine>} The OpenAIEngine instance + */ + static async build(engineId = "smart-openai", serviceType = "ai") { const engine = new openAIEngine(); - engine.engineInstance = await openAIEngine.#createOpenAIEngine(engineId); + engine.engineInstance = await openAIEngine.#createOpenAIEngine( + engineId, + serviceType + ); return engine; } /** + * Retrieves the Firefox account token + * + * @returns {Promise<string|null>} The Firefox account token (string) or null + */ + static async getFxAccountToken() { + try { + const fxAccounts = getFxAccountsSingleton(); + return await fxAccounts.getOAuthToken({ + // Scope needs to be updated in accordance with https://bugzilla.mozilla.org/show_bug.cgi?id=2005290 + scope: SCOPE_PROFILE, + client_id: OAUTH_CLIENT_ID, + }); + } catch (error) { + console.warn("Error obtaining FxA token:", error); + return null; + } + } + + /** * Creates an OpenAI engine instance * - * @param {string} engineId The identifier for the engine instance - * @returns {Promise<object>} The configured engine instance + * @param {string} engineId The identifier for the engine instance + * @param {string} serviceType The type of message to be sent ("ai", "memories", "s2s") + * @returns {Promise<object>} The configured engine instance */ - static async #createOpenAIEngine(engineId) { + static async #createOpenAIEngine(engineId, serviceType) { + const extraHeadersPref = Services.prefs.getStringPref( + "browser.aiwindow.extraHeaders", + "{}" + ); + let extraHeaders = {}; + try { + extraHeaders = JSON.parse(extraHeadersPref); + } catch (e) { + console.error("Failed to parse extra headers from prefs:", e); + Services.prefs.clearUserPref("browser.aiwindow.extraHeaders"); + } + try { const engineInstance = await openAIEngine._createEngine({ apiKey: Services.prefs.getStringPref("browser.aiwindow.apiKey"), @@ -45,6 +92,8 @@ export class openAIEngine { modelId: Services.prefs.getStringPref("browser.aiwindow.model"), modelRevision: "main", taskName: "text-generation", + serviceType, + extraHeaders, }); return engineInstance; } catch (error) { diff --git a/browser/components/aiwindow/models/tests/xpcshell/test_Utils.js b/browser/components/aiwindow/models/tests/xpcshell/test_Utils.js @@ -16,10 +16,12 @@ const { sinon } = ChromeUtils.importESModule( const PREF_API_KEY = "browser.aiwindow.apiKey"; const PREF_ENDPOINT = "browser.aiwindow.endpoint"; const PREF_MODEL = "browser.aiwindow.model"; +const PREF_EXTRA_HEADERS = "browser.aiwindow.extraHeaders"; const API_KEY = "fake-key"; const ENDPOINT = "https://api.fake-endpoint.com/v1"; const MODEL = "fake-model"; +const EXTRA_HEADERS = '{"x-fastly-request": "fake-key"}'; /** * Cleans up preferences after testing @@ -39,6 +41,7 @@ add_task(async function test_createOpenAIEngine() { Services.prefs.setStringPref(PREF_API_KEY, API_KEY); Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT); Services.prefs.setStringPref(PREF_MODEL, MODEL); + Services.prefs.setStringPref(PREF_EXTRA_HEADERS, EXTRA_HEADERS); const sb = sinon.createSandbox(); try { @@ -75,6 +78,12 @@ add_task(async function test_createOpenAIEngine() { "text-generation", "taskName should be text-generation" ); + Assert.equal(opts.serviceType, "ai", "serviceType should be ai"); + Assert.deepEqual( + opts.extraHeaders, + JSON.parse(EXTRA_HEADERS), + "extraHeaders should come from pref" + ); } finally { sb.restore(); } diff --git a/toolkit/components/ml/content/EngineProcess.sys.mjs b/toolkit/components/ml/content/EngineProcess.sys.mjs @@ -596,6 +596,21 @@ export class PipelineOptions { staticEmbeddingsOptions = null; /** + * The service type for an OpenAIPipeline. + * + * @type {"ai" | "memories" | "s2s" | null} + */ + serviceType = null; + + /** + * This option allows for extra headers to be passed to + * OpenAI-API-compatable endpoints + * + * @type {?Record<string, string>} + */ + extraHeaders = null; + + /** * Create a PipelineOptions instance. * * @param {object} options - The options for the pipeline. Must include mandatory fields. @@ -801,6 +816,8 @@ export class PipelineOptions { "baseURL", "apiKey", "staticEmbeddingsOptions", + "serviceType", + "extraHeaders", ]; if (options instanceof PipelineOptions) { @@ -946,6 +963,8 @@ export class PipelineOptions { baseURL: this.baseURL, apiKey: this.apiKey, staticEmbeddingsOptions: this.staticEmbeddingsOptions, + serviceType: this.serviceType, + extraHeaders: this.extraHeaders, }; } diff --git a/toolkit/components/ml/content/backends/OpenAIPipeline.mjs b/toolkit/components/ml/content/backends/OpenAIPipeline.mjs @@ -326,20 +326,49 @@ export class OpenAIPipeline { ) { lazy.console.debug("Running OpenAI pipeline"); try { - const { baseURL, apiKey, modelId } = this.#options; + const { baseURL, apiKey, modelId, serviceType, extraHeaders, engineId } = + this.#options; const fxAccountToken = request.fxAccountToken ? request.fxAccountToken : null; - const defaultHeaders = fxAccountToken - ? { - Authorization: `Bearer ${fxAccountToken}`, - "service-type": "ai", + + let isFastlyRequest = false; + if (extraHeaders) { + for (const headerKey of Object.keys(extraHeaders)) { + if (headerKey.toLowerCase() == "x-fastly-request") { + isFastlyRequest = true; + break; } - : undefined; + } + } + + /** @type {Record<string, string>} */ + let authHeaders; + if (isFastlyRequest) { + // If the x-fastly-request extra header is present, we want to hit the LiteLLM + // endpoint directly, so don't use an FxA token + authHeaders = { + authorization: `Bearer ${apiKey}`, + }; + } else if (fxAccountToken) { + // Use a Firefox account token if available + authHeaders = { + authorization: `Bearer ${fxAccountToken}`, + "service-type": serviceType || "ai", + }; + } else { + // Don't use any authentication headers + authHeaders = {}; + } + const client = new OpenAIPipeline.OpenAILib.OpenAI({ baseURL: baseURL ? baseURL : "http://localhost:11434/v1", apiKey: apiKey || "ollama", - ...(defaultHeaders ? { defaultHeaders } : {}), + defaultHeaders: { + ...authHeaders, + ...extraHeaders, + "x-engine-id": engineId, + }, }); const stream = request.streamOptions?.enabled || false; const tools = request.tools || [];