tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit 4f38e3b955f0013dea44374eb0b9f318feec38f2
parent 62be651d7a1eb3b66cc7cef83ab627a0f8d8a7c9
Author: Nick Grato <ngrato@gmail.com>
Date:   Wed, 10 Dec 2025 21:51:50 +0000

Bug 2003598 - Add Chat service with fetch with history r=tzhang,ai-models-reviewers

Bringing over work done for Smart Assist Engine specifically the FXA and fetch with history streaming function.

Differential Revision: https://phabricator.services.mozilla.com/D274803

Diffstat:
Mbrowser/base/content/test/static/browser_all_files_referenced.js | 4++++
Abrowser/components/aiwindow/models/Chat.sys.mjs | 154+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Mbrowser/components/aiwindow/models/moz.build | 1+
Abrowser/components/aiwindow/models/tests/xpcshell/test_Chat.js | 292+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Mbrowser/components/aiwindow/models/tests/xpcshell/xpcshell.toml | 2++
5 files changed, 453 insertions(+), 0 deletions(-)

diff --git a/browser/base/content/test/static/browser_all_files_referenced.js b/browser/base/content/test/static/browser_all_files_referenced.js @@ -335,6 +335,10 @@ var allowlist = [ { file: "moz-src:///browser/components/aiwindow/models/IntentClassifier.sys.mjs", }, + // Bug 2003598 - Add Chat service with fetch with history (backed out due to unused file) + { + file: "moz-src:///browser/components/aiwindow/models/Chat.sys.mjs", + }, // Bug 2002840 - add function to return real time info injection message & tests (backed out due to unused file) { file: "moz-src:///browser/components/aiwindow/models/ChatUtils.sys.mjs", diff --git a/browser/components/aiwindow/models/Chat.sys.mjs b/browser/components/aiwindow/models/Chat.sys.mjs @@ -0,0 +1,154 @@ +/** + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ + +/* eslint-disable-next-line mozilla/reject-import-system-module-from-non-system */ +import { getFxAccountsSingleton } from "resource://gre/modules/FxAccounts.sys.mjs"; +import { openAIEngine } from "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs"; +import { + OAUTH_CLIENT_ID, + SCOPE_PROFILE, +} from "resource://gre/modules/FxAccountsCommon.sys.mjs"; + +/** + * Chat + */ +export const Chat = { + toolMap: {}, // TODO can import toolMap + + async _getFxAccountToken() { + try { + const fxAccounts = getFxAccountsSingleton(); + const token = await fxAccounts.getOAuthToken({ + // Scope needs to be updated in accordance with https://bugzilla.mozilla.org/show_bug.cgi?id=2005290 + scope: SCOPE_PROFILE, + client_id: OAUTH_CLIENT_ID, + }); + return token; + } catch (error) { + console.warn("Error obtaining FxA token:", error); + return null; + } + }, + + /** + * Stream assistant output with tool-call support. + * Yields assistant text chunks as they arrive. If the model issues tool calls, + * we execute them locally, append results to the conversation, and continue + * streaming the model’s follow-up answer. Repeats until no more tool calls. + * + * @param {Array<{role:string, content?:string, tool_call_id?:string, tool_calls?:any}>} messages + * @yields {string} Assistant text chunks + */ + async *fetchWithHistory(messages) { + const engineInstance = await openAIEngine.build(); + // Note FXA token fetching disabled for now - this is still in progress + // We can flip this switch on when more realiable + // const fxAccountToken = await this._getFxAccountToken(); + + // We'll mutate a local copy of the thread as we loop + let convo = Array.isArray(messages) ? [...messages] : []; + + // Helper to run the model once (streaming) on current convo + const streamModelResponse = () => + engineInstance.runWithGenerator({ + streamOptions: { enabled: true }, + // fxAccountToken, + tool_choice: "auto", + // tools: Add your tools configuration here, + args: convo, + }); + + // Keep calling until the model finishes without requesting tools + while (true) { + let pendingToolCalls = null; + + // 1) First pass: stream tokens; capture any toolCalls + for await (const chunk of streamModelResponse()) { + // Stream assistant text to the UI + if (chunk?.text) { + yield chunk.text; + } + + // Capture tool calls (do not echo raw tool plumbing to the user) + if (chunk?.toolCalls?.length) { + pendingToolCalls = chunk.toolCalls; + } + } + + // 2) Watch for tool calls; if none, we are done + if (!pendingToolCalls || pendingToolCalls.length === 0) { + return; + } + + // 3) Build the assistant tool_calls message exactly as expected by the API + const assistantToolMsg = { + role: "assistant", + tool_calls: pendingToolCalls.map(toolCall => ({ + id: toolCall.id, + type: "function", + function: { + name: toolCall.function.name, + arguments: toolCall.function.arguments, + }, + })), + }; + + // 4) Execute each tool locally and create a tool message with the result + const toolResultMessages = []; + for (const toolCall of pendingToolCalls) { + const { id, function: functionSpec } = toolCall; + const name = functionSpec?.name || ""; + let toolParams = {}; + + try { + toolParams = functionSpec?.arguments + ? JSON.parse(functionSpec.arguments) + : {}; + } catch { + toolResultMessages.push({ + role: "tool", + tool_call_id: id, + content: JSON.stringify({ error: "Invalid JSON arguments" }), + }); + continue; + } + + let result; + try { + // Call the appropriate tool by name + const toolFunc = this.toolMap[name]; + if (typeof toolFunc !== "function") { + throw new Error(`No such tool: ${name}`); + } + + result = await toolFunc(toolParams); + + // Create special tool call log message to show in the UI log panel + const assistantToolCallLogMsg = { + role: "assistant", + content: `Tool Call: ${name} with parameters: ${JSON.stringify( + toolParams + )}`, + type: "tool_call_log", + result, + }; + convo.push(assistantToolCallLogMsg); + yield assistantToolCallLogMsg; + } catch (e) { + result = { error: `Tool execution failed: ${String(e)}` }; + } + + toolResultMessages.push({ + role: "tool", + tool_call_id: id, + content: typeof result === "string" ? result : JSON.stringify(result), + }); + } + + convo = [...convo, assistantToolMsg, ...toolResultMessages]; + } + }, +}; diff --git a/browser/components/aiwindow/models/moz.build b/browser/components/aiwindow/models/moz.build @@ -10,6 +10,7 @@ DIRS += [ ] MOZ_SRC_FILES += [ + "Chat.sys.mjs", "ChatUtils.sys.mjs", "Insights.sys.mjs", "InsightsConstants.sys.mjs", diff --git a/browser/components/aiwindow/models/tests/xpcshell/test_Chat.js b/browser/components/aiwindow/models/tests/xpcshell/test_Chat.js @@ -0,0 +1,292 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +const { Chat } = ChromeUtils.importESModule( + "moz-src:///browser/components/aiwindow/models/Chat.sys.mjs" +); +const { openAIEngine } = ChromeUtils.importESModule( + "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs" +); + +const { sinon } = ChromeUtils.importESModule( + "resource://testing-common/Sinon.sys.mjs" +); + +// Prefs for aiwindow +const PREF_API_KEY = "browser.aiwindow.apiKey"; +const PREF_ENDPOINT = "browser.aiwindow.endpoint"; +const PREF_MODEL = "browser.aiwindow.model"; + +// Clean prefs after all tests +registerCleanupFunction(() => { + for (let pref of [PREF_API_KEY, PREF_ENDPOINT, PREF_MODEL]) { + if (Services.prefs.prefHasUserValue(pref)) { + Services.prefs.clearUserPref(pref); + } + } +}); + +add_task(async function test_openAIEngine_build_uses_prefs() { + Services.prefs.setStringPref(PREF_API_KEY, "test-key-123"); + Services.prefs.setStringPref(PREF_ENDPOINT, "https://example.test/v1"); + Services.prefs.setStringPref(PREF_MODEL, "gpt-fake"); + + const sb = sinon.createSandbox(); + try { + const fakeEngineInstance = { + runWithGenerator() { + throw new Error("not used"); + }, + }; + const stub = sb + .stub(openAIEngine, "_createEngine") + .resolves(fakeEngineInstance); + + const engine = await openAIEngine.build(); + + Assert.ok( + engine instanceof openAIEngine, + "Should return openAIEngine instance" + ); + Assert.strictEqual( + engine.engineInstance, + fakeEngineInstance, + "Should store engine instance" + ); + Assert.ok(stub.calledOnce, "_createEngine should be called once"); + + const opts = stub.firstCall.args[0]; + Assert.equal(opts.apiKey, "test-key-123", "apiKey should come from pref"); + Assert.equal( + opts.baseURL, + "https://example.test/v1", + "baseURL should come from pref" + ); + Assert.equal(opts.modelId, "gpt-fake", "modelId should come from pref"); + } finally { + sb.restore(); + } +}); + +add_task(async function test_Chat_fetchWithHistory_streams_and_forwards_args() { + const sb = sinon.createSandbox(); + try { + let capturedArgs = null; + let capturedOptions = null; + + // Fake openAIEngine instance that directly has runWithGenerator method + const fakeEngine = { + runWithGenerator(options) { + capturedArgs = options.args; + capturedOptions = options; + async function* gen() { + yield { text: "Hello" }; + yield { text: " from" }; + yield { text: " fake engine!" }; + yield {}; // ignored by Chat + // No toolCalls yielded, so loop will exit after first iteration + } + return gen(); + }, + }; + + sb.stub(openAIEngine, "build").resolves(fakeEngine); + // sb.stub(Chat, "_getFxAccountToken").resolves("mock_token"); + + const messages = [ + { role: "system", content: "You are helpful" }, + { role: "user", content: "Hi there" }, + ]; + + // Collect streamed output + let acc = ""; + for await (const chunk of Chat.fetchWithHistory(messages)) { + if (typeof chunk === "string") { + acc += chunk; + } + } + + Assert.equal( + acc, + "Hello from fake engine!", + "Should concatenate streamed chunks" + ); + Assert.deepEqual( + capturedArgs, + messages, + "Should forward messages as args to runWithGenerator()" + ); + Assert.deepEqual( + capturedOptions.streamOptions.enabled, + true, + "Should enable streaming in runWithGenerator()" + ); + } finally { + sb.restore(); + } +}); + +add_task(async function test_Chat_fetchWithHistory_handles_tool_calls() { + const sb = sinon.createSandbox(); + try { + let callCount = 0; + const fakeEngine = { + runWithGenerator(_options) { + callCount++; + async function* gen() { + if (callCount === 1) { + // First call: yield text and tool call + yield { text: "I'll help you with that. " }; + yield { + toolCalls: [ + { + id: "call_123", + function: { + name: "test_tool", + arguments: JSON.stringify({ param: "value" }), + }, + }, + ], + }; + } else { + // Second call: after tool execution + yield { text: "Tool executed successfully!" }; + } + } + return gen(); + }, + }; + + // Mock tool function + Chat.toolMap.test_tool = sb.stub().resolves("tool result"); + + sb.stub(openAIEngine, "build").resolves(fakeEngine); + // sb.stub(Chat, "_getFxAccountToken").resolves("mock_token"); + + const messages = [{ role: "user", content: "Use the test tool" }]; + + let textOutput = ""; + let toolCallLogs = []; + for await (const chunk of Chat.fetchWithHistory(messages)) { + if (typeof chunk === "string") { + textOutput += chunk; + } else if (chunk?.type === "tool_call_log") { + toolCallLogs.push(chunk); + } + } + + Assert.equal( + textOutput, + "I'll help you with that. Tool executed successfully!", + "Should yield text from both model calls" + ); + Assert.equal(toolCallLogs.length, 1, "Should have one tool call log"); + Assert.ok( + toolCallLogs[0].content.includes("test_tool"), + "Tool call log should mention tool name" + ); + Assert.ok(Chat.toolMap.test_tool.calledOnce, "Tool should be called once"); + Assert.deepEqual( + Chat.toolMap.test_tool.firstCall.args[0], + { param: "value" }, + "Tool should receive correct parameters" + ); + Assert.equal( + callCount, + 2, + "Engine should be called twice (initial + after tool)" + ); + } finally { + sb.restore(); + delete Chat.toolMap.test_tool; + } +}); + +add_task( + async function test_Chat_fetchWithHistory_propagates_engine_build_error() { + const sb = sinon.createSandbox(); + try { + const err = new Error("engine build failed"); + sb.stub(openAIEngine, "build").rejects(err); + // sb.stub(Chat, "_getFxAccountToken").resolves("mock_token"); + + const messages = [{ role: "user", content: "Hi" }]; + + const consume = async () => { + for await (const _chunk of Chat.fetchWithHistory(messages)) { + void _chunk; + } + }; + + await Assert.rejects( + consume(), + e => e === err, + "Should propagate the same error thrown by openAIEngine.build" + ); + } finally { + sb.restore(); + } + } +); + +add_task( + async function test_Chat_fetchWithHistory_handles_invalid_tool_arguments() { + const sb = sinon.createSandbox(); + try { + let callCount = 0; + const fakeEngine = { + runWithGenerator(_options) { + callCount++; + async function* gen() { + if (callCount === 1) { + // First call: yield text and invalid tool call + yield { text: "Using tool with bad args: " }; + yield { + toolCalls: [ + { + id: "call_456", + function: { + name: "test_tool", + arguments: "invalid json {", + }, + }, + ], + }; + } else { + // Second call: no more tool calls, should exit loop + yield { text: "Done." }; + } + } + return gen(); + }, + }; + + Chat.toolMap.test_tool = sb.stub().resolves("should not be called"); + + sb.stub(openAIEngine, "build").resolves(fakeEngine); + // sb.stub(Chat, "_getFxAccountToken").resolves("mock_token"); + + const messages = [{ role: "user", content: "Test bad JSON" }]; + + let textOutput = ""; + for await (const chunk of Chat.fetchWithHistory(messages)) { + if (typeof chunk === "string") { + textOutput += chunk; + } + } + + Assert.equal( + textOutput, + "Using tool with bad args: Done.", + "Should yield text from both calls" + ); + Assert.ok( + Chat.toolMap.test_tool.notCalled, + "Tool should not be called with invalid JSON" + ); + } finally { + sb.restore(); + delete Chat.toolMap.test_tool; + } + } +); diff --git a/browser/components/aiwindow/models/tests/xpcshell/xpcshell.toml b/browser/components/aiwindow/models/tests/xpcshell/xpcshell.toml @@ -6,6 +6,8 @@ head = "head.js" firefox-appdir = "browser" support-files = [] +["test_Chat.js"] + ["test_ChatUtils.js"] ["test_Insights.js"]