tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit f97a902a366f6b7d3822d929f6633917c344f800
parent 769c83f717efea84e6d4291be370837c2d8a89fc
Author: Nick Grato <ngrato@gmail.com>
Date:   Mon,  6 Oct 2025 15:15:41 +0000

Bug 1990599 - example tool call from chat r=Mardak,firefox-ai-ml-reviewers,tarek

similar to bug 1990387 adding to our openai engine capability building on basic chat bug 1989035, we'll likely need ways to call tools, so here we'll get at least one dummy tool working so that it unblocks others adding more tools

could even be functional tool like getting the current page content

Differential Revision: https://phabricator.services.mozilla.com/D266883

Diffstat:
Mbrowser/components/genai/SmartAssistEngine.sys.mjs | 176++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------
Mbrowser/components/genai/content/smart-assist.css | 29+++++++++++++++++++++++++++++
Mbrowser/components/genai/content/smart-assist.mjs | 102+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++----------------
Mtoolkit/components/ml/actors/MLEngineParent.sys.mjs | 1+
Mtoolkit/components/ml/content/backends/OpenAIPipeline.mjs | 125+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------
Mtoolkit/components/ml/tests/browser/browser_ml_engine.js | 190+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Mtoolkit/components/ml/tests/browser/head.js | 218+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
7 files changed, 798 insertions(+), 43 deletions(-)

diff --git a/browser/components/genai/SmartAssistEngine.sys.mjs b/browser/components/genai/SmartAssistEngine.sys.mjs @@ -4,12 +4,68 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +const lazy = {}; +ChromeUtils.defineESModuleGetters(lazy, { + BrowserWindowTracker: "resource:///modules/BrowserWindowTracker.sys.mjs", +}); + +/* eslint-disable-next-line mozilla/reject-import-system-module-from-non-system */ import { createEngine } from "chrome://global/content/ml/EngineProcess.sys.mjs"; +const toolsConfig = [ + { + type: "function", + function: { + name: "search_open_tabs", + description: + "Searches the user's open tabs for tabs that match the given type", + parameters: { + type: "object", + properties: { + type: { + type: "string", + description: + "the type of tabs I am looking for ie news, sports, etc", + }, + }, + required: ["type"], + }, + }, + }, +]; + +/** + * Searches the user's open tabs for tabs that match the given type + * + * @param {object} args.type - type of tabs to search for + * @returns + */ + +const search_open_tabs = ({ type }) => { + let win = lazy.BrowserWindowTracker.getTopWindow(); + let gBrowser = win.gBrowser; + let tabs = gBrowser.tabs; + const tabData = tabs.map(tab => { + return { + title: tab.label, + url: tab.linkedBrowser.currentURI.spec, + }; + }); + + return { + query: type, + allTabs: tabData, + }; +}; + /** * Smart Assist Engine */ export const SmartAssistEngine = { + toolMap: { + search_open_tabs, + }, + /** * Exposing createEngine for testing purposes. */ @@ -21,7 +77,6 @@ export const SmartAssistEngine = { * * @returns {Promise<object>} The configured engine instance */ - async createOpenAIEngine() { try { const engineInstance = await this._createEngine({ @@ -34,32 +89,125 @@ export const SmartAssistEngine = { modelRevision: "main", taskName: "text-generation", }); - return engineInstance; } catch (error) { console.error("Failed to create OpenAI engine:", error); - throw error; } }, /** - * Fetches a response from the OpenAI engine with message history. + * Stream assistant output with tool-call support. + * Yields assistant text chunks as they arrive. If the model issues tool calls, + * we execute them locally, append results to the conversation, and continue + * streaming the model’s follow-up answer. Repeats until no more tool calls. * - * @param {Array} messages - Array of message objects with role and content - * @returns {string} AI response + * @param {Array<{role:string, content?:string, tool_call_id?:string, tool_calls?:any}>} messages + * @yields {string} Assistant text chunks */ - async *fetchWithHistory(messages) { const engineInstance = await this.createOpenAIEngine(); - // Use runWithGenerator to get streaming chunks directly - for await (const chunk of engineInstance.runWithGenerator({ - streamOptions: { enabled: true }, - args: messages, - })) { - if (chunk.text) { - yield chunk.text; + + // We'll mutate a local copy of the thread as we loop + let convo = Array.isArray(messages) ? [...messages] : []; + + // Helper to run the model once (streaming) on current convo + const streamModelResponse = () => + engineInstance.runWithGenerator({ + streamOptions: { enabled: true }, + tool_choice: "auto", + tools: toolsConfig, + args: convo, + }); + + // Keep calling until the model finishes without requesting tools + while (true) { + let pendingToolCalls = null; + + // 1) First pass: stream tokens; capture any toolCalls + for await (const chunk of streamModelResponse()) { + // Stream assistant text to the UI + if (chunk?.text) { + yield chunk.text; + } + + // Capture tool calls (do not echo raw tool plumbing to the user) + if (chunk?.toolCalls?.length) { + pendingToolCalls = chunk.toolCalls; + } + } + + // 2) Watch for tool calls; if none, we are done + if (!pendingToolCalls || pendingToolCalls.length === 0) { + return; + } + + // 3) Build the assistant tool_calls message exactly as expected by the API + const assistantToolMsg = { + role: "assistant", + tool_calls: pendingToolCalls.map(toolCall => ({ + id: toolCall.id, + type: "function", + function: { + name: toolCall.function.name, + arguments: toolCall.function.arguments, + }, + })), + }; + + // 4) Execute each tool locally and create a tool message with the result + const toolResultMessages = []; + for (const toolCall of pendingToolCalls) { + const { id, function: functionSpec } = toolCall; + const name = functionSpec?.name || ""; + let toolParams = {}; + + try { + toolParams = functionSpec?.arguments + ? JSON.parse(functionSpec.arguments) + : {}; + } catch { + toolResultMessages.push({ + role: "tool", + tool_call_id: id, + content: JSON.stringify({ error: "Invalid JSON arguments" }), + }); + continue; + } + + let result; + try { + // Call the appropriate tool by name + const toolFunc = this.toolMap[name]; + if (typeof toolFunc !== "function") { + throw new Error(`No such tool: ${name}`); + } + + result = await toolFunc(toolParams); + + // Create special tool call log message to show in the UI log panel + const assistantToolCallLogMsg = { + role: "assistant", + content: `Tool Call: ${name} with parameters: ${JSON.stringify( + toolParams + )}`, + type: "tool_call_log", + result, + }; + convo.push(assistantToolCallLogMsg); + yield assistantToolCallLogMsg; + } catch (e) { + result = { error: `Tool execution failed: ${String(e)}` }; + } + + toolResultMessages.push({ + role: "tool", + tool_call_id: id, + content: typeof result === "string" ? result : JSON.stringify(result), + }); } + + convo = [...convo, assistantToolMsg, ...toolResultMessages]; } }, }; diff --git a/browser/components/genai/content/smart-assist.css b/browser/components/genai/content/smart-assist.css @@ -18,6 +18,7 @@ margin-bottom: var(--space-medium); padding: var(--space-medium); border-radius: var(--border-radius-small); + font-size: 14px; } .message.user { @@ -37,3 +38,31 @@ display: flex; justify-content: flex-end; } + +.log-header { + display: flex; + justify-content: space-between; + align-items: center; +} + +.log-entries { + display: flex; + gap: var(--space-medium); + margin-block-end: var(--space-medium); + flex-direction: column; + font-size: 12px; +} + +.log-title { + font-weight: bold; + font-size: var(--font-size-medium); + margin-block-end: var(--space-medium); + display: block; +} + +.log-entry { + border: 1px solid currentColor; + padding: var(--space-medium); + border-radius: var(--border-radius-small); + overflow-wrap: break-word; +} diff --git a/browser/components/genai/content/smart-assist.mjs b/browser/components/genai/content/smart-assist.mjs @@ -25,8 +25,10 @@ export class SmartAssist extends MozLitElement { userPrompt: { type: String }, aiResponse: { type: String }, conversationState: { type: Array }, + logState: { type: Array }, mode: { type: String }, // "tab" | "sidebar" overrideNewTab: { type: Boolean }, + showLog: { type: Boolean }, }; constructor() { @@ -37,6 +39,8 @@ export class SmartAssist extends MozLitElement { this.conversationState = [ { role: "system", content: "You are a helpful assistant" }, ]; + this.logState = []; + this.showLog = false; this.mode = "sidebar"; this.overrideNewTab = Services.prefs.getBoolPref( "browser.ml.smartAssist.overrideNewTab" @@ -61,6 +65,11 @@ export class SmartAssist extends MozLitElement { this.conversationState = [...this.conversationState, chatEntry]; }; + _updatelogState = chatEntry => { + const entryWithDate = { ...chatEntry, date: new Date().toLocaleString() }; + this.logState = [...this.logState, entryWithDate]; + }; + _handlePromptInput = e => { const value = e.target.value; this.userPrompt = value; @@ -87,7 +96,17 @@ export class SmartAssist extends MozLitElement { ); for await (const chunk of stream) { + // Check to see if chunk is special tool calling log and add to logState + if (chunk.type === "tool_call_log") { + this._updatelogState({ + content: chunk.content, + result: chunk.result || "No result", + }); + continue; + } acc += chunk; + // append to the latest assistant message + this.conversationState[latestAssistantMessageIndex] = { ...this.conversationState[latestAssistantMessageIndex], content: acc, @@ -135,16 +154,20 @@ export class SmartAssist extends MozLitElement { rel="stylesheet" href="chrome://browser/content/genai/content/smart-assist.css" /> - <div> - ${this.mode === "sidebar" - ? html` <sidebar-panel-header - data-l10n-id="genai-smart-assist-sidebar-title" - data-l10n-attrs="heading" - view="viewGenaiSmartAssistSidebar" - ></sidebar-panel-header>` - : ""} - - <div class="wrapper"> + <div class="wrapper"> + ${ + this.mode === "sidebar" + ? html` <sidebar-panel-header + data-l10n-id="genai-smart-assist-sidebar-title" + data-l10n-attrs="heading" + view="viewGenaiSmartAssistSidebar" + ></sidebar-panel-header>` + : "" + } + + <div> + + <!-- Conversation Panel --> <div> ${this.conversationState .filter(msg => msg.role !== "system") @@ -158,6 +181,42 @@ export class SmartAssist extends MozLitElement { </div>` )} </div> + + <!-- Log Panel --> + ${ + this.logState.length !== 0 + ? html` <div class="log-panel"> + <div class="log-header"> + <span class="log-title">Log</span> + <moz-button + type="ghost" + iconSrc="chrome://global/skin/icons/arrow-down.svg" + @click=${() => { + this.showLog = !this.showLog; + }} + > + </moz-button> + </div> + <div class="log-entries"> + ${this.logState.map( + data => + html`<div class="log-entry"> + <div><b>Message</b> : ${data.content}</div> + <div><b>Date</b> : ${data.date}</div> + <div> + <b>Tool Response</b> : + ${JSON.stringify(data.result)} + </div> + </div>` + )} + </div> + </div>` + : html`` + } + + </div> + + <!-- User Input --> <textarea .value=${this.userPrompt} class="prompt-textarea" @@ -172,16 +231,19 @@ export class SmartAssist extends MozLitElement { Submit </moz-button> - ${this.mode === "sidebar" - ? html`<div class="footer"> - <moz-checkbox - type="checkbox" - label="Mock Full Page Experience" - @change=${e => this._onToggleFullPage(e)} - ?checked=${this.overrideNewTab} - ></moz-checkbox> - </div>` - : ""} + <!-- Footer - New Tab Override --> + ${ + this.mode === "sidebar" + ? html`<div class="footer"> + <moz-checkbox + type="checkbox" + label="Mock Full Page Experience" + @change=${e => this._onToggleFullPage(e)} + ?checked=${this.overrideNewTab} + ></moz-checkbox> + </div>` + : "" + } </div> </div> `; diff --git a/toolkit/components/ml/actors/MLEngineParent.sys.mjs b/toolkit/components/ml/actors/MLEngineParent.sys.mjs @@ -1542,6 +1542,7 @@ export class MLEngine { text: chunk.metadata.text, tokens: chunk.metadata.tokens, isPrompt: chunk.metadata.isPrompt, + toolCalls: chunk.metadata.toolCalls, }; chunkPromise = responseChunkResolvers.getAndAdvanceChunkPromise(); } else if (this.#port === null) { diff --git a/toolkit/components/ml/content/backends/OpenAIPipeline.mjs b/toolkit/components/ml/content/backends/OpenAIPipeline.mjs @@ -89,20 +89,27 @@ export class OpenAIPipeline { * @param {boolean} args.isDone - Whether this is the final progress update */ #sendProgress(args) { - const { content, requestId, inferenceProgressCallback, port, isDone } = - args; + const { + content, + requestId, + inferenceProgressCallback, + port, + isDone, + toolCalls, + } = args; port?.postMessage({ text: content, + ...(toolCalls ? { toolCalls } : {}), ...(isDone && { done: true, finalOutput: content }), ok: true, }); - inferenceProgressCallback?.({ ok: true, metadata: { text: content, requestId, tokens: [], + ...(toolCalls ? { toolCalls } : {}), }, type: Progress.ProgressType.INFERENCE, statusText: isDone @@ -112,6 +119,66 @@ export class OpenAIPipeline { } /** + * Finalizes the tool calls by sorting them by index and returning an array of call objects. + * + * @param {map} acc + * @returns {Array} + */ + + #finalizeToolCalls(acc) { + // Convert Map entries ([index, call]) to an array + const entries = Array.from(acc.entries()); + // Sort by the numeric index + entries.sort((a, b) => a[0] - b[0]); + // Return just the call objects (drop the index) + return entries.map(([_index, call]) => call); + } + + /** + * Because we are streaming here, we may get multiple partial tool_calls deltas and need to merge them together. + * This helper does that by looking at the index property on each tool call fragment. + * + * @param {map} acc - Accumulated tool calls map + * @param {Array} deltas - New tool call fragments to merge + * @returns {map} Merged tool calls map + */ + #mergeToolDeltas(acc, deltas) { + // If no deltas, return acc unchanged + if (!Array.isArray(deltas) || deltas.length === 0) { + return acc; + } + + let next = new Map(acc); // shallow copy to keep immutability + + for (const toolCall of deltas) { + const idx = toolCall.index ?? 0; + const existing = next.get(idx) || { + id: null, + type: "function", + function: { name: "", arguments: "" }, + }; + const nameFrag = toolCall.function?.name ?? ""; + const argsFrag = toolCall.function?.arguments ?? ""; + + // Merge fragments into previous entry + const merged = { + id: toolCall.id ?? existing.id, + type: "function", + function: { + name: nameFrag + ? existing.function.name + nameFrag + : existing.function.name, + arguments: argsFrag + ? existing.function.arguments + argsFrag + : existing.function.arguments, + }, + }; + next.set(idx, merged); + } + return next; + } + + /** * Handles streaming response from the OpenAI API. * Processes each chunk as it arrives and sends progress updates. * @@ -132,25 +199,55 @@ export class OpenAIPipeline { inferenceProgressCallback, port, } = args; + const stream = await client.chat.completions.create(completionParams); + let streamOutput = ""; + let toolAcc = new Map(); + let sawToolCallsFinish = false; for await (const chunk of stream) { - const content = chunk.choices[0]?.delta?.content || ""; - if (content) { - streamOutput += content; + const choice = chunk?.choices?.[0]; + const delta = choice?.delta ?? {}; + + // Normal text tokens + if (delta.content) { + streamOutput += delta.content; + this.#sendProgress({ + content: delta.content, + requestId, + inferenceProgressCallback, + port, + isDone: false, + }); + } + + if (Array.isArray(delta.tool_calls) && delta.tool_calls.length) { + toolAcc = this.#mergeToolDeltas(toolAcc, delta.tool_calls); + } + + // If the model signals it wants tools now + if (choice?.finish_reason === "tool_calls") { + sawToolCallsFinish = true; + const toolCalls = this.#finalizeToolCalls(toolAcc); + + // Emit the completed tool calls to the caller so they can execute them. this.#sendProgress({ - content, + content: "", // no user-visible text here requestId, inferenceProgressCallback, port, isDone: false, + toolCalls, }); + + // Typically end this assistant turn here. + break; } } + // Final message: does not carry full content to avoid duplication this.#sendProgress({ - content: "", requestId, inferenceProgressCallback, port, @@ -160,6 +257,9 @@ export class OpenAIPipeline { return { finalOutput: streamOutput, metrics: [], + ...(sawToolCallsFinish + ? { toolCalls: this.#finalizeToolCalls(toolAcc) } + : {}), }; } @@ -184,8 +284,11 @@ export class OpenAIPipeline { inferenceProgressCallback, port, } = args; + const completion = await client.chat.completions.create(completionParams); - const output = completion.choices[0].message.content; + const message = completion.choices[0].message; + const output = message.content || ""; + const toolCalls = message.tool_calls || null; this.#sendProgress({ content: output, @@ -193,11 +296,13 @@ export class OpenAIPipeline { inferenceProgressCallback, port, isDone: true, + toolCalls, }); return { finalOutput: output, metrics: [], + ...(toolCalls ? { toolCalls } : {}), }; } @@ -227,11 +332,13 @@ export class OpenAIPipeline { apiKey: apiKey || "ollama", }); const stream = request.streamOptions?.enabled || false; + const tools = request.tools || []; const completionParams = { model: modelId, messages: request.args, stream, + tools, }; const args = { diff --git a/toolkit/components/ml/tests/browser/browser_ml_engine.js b/toolkit/components/ml/tests/browser/browser_ml_engine.js @@ -35,6 +35,28 @@ async function checkForRemoteType(remoteType) { return false; } +const SHARED_TOOLS = [ + { + type: "function", + function: { + name: "search_open_tabs", + description: "Search open tabs by type.", + parameters: { + type: "object", + properties: { type: { type: "string" } }, + required: ["type"], + }, + }, + }, +]; + +const BASE_ENGINE_OPTIONS = { + featureId: "about-inference", + taskName: "text-generation", + modelId: "qwen3:0.6b", + modelRevision: "main", +}; + /** * End to End test that the engine is indeed initialized with wllama when it is the * best-llama. @@ -1970,3 +1992,171 @@ add_task(async function test_openai_client() { await stopMockOpenAI(mockServer); } }); + +add_task(async function test_openai_client_tools_non_streaming() { + const records = [ + { + ...BASE_ENGINE_OPTIONS, + id: "74a71cfd-1734-44e6-85c0-69cf3e874138", + }, + ]; + const { cleanup } = await setup({ records }); + const { server: mockServer, port } = startMockOpenAI(); + + const engineInstance = await createEngine({ + ...BASE_ENGINE_OPTIONS, + apiKey: "ollama", + baseURL: `http://localhost:${port}/v1`, + backend: "openai", + }); + + // First request: ask with tools; server responds with tool_calls + const requestWithTools = { + args: [ + { role: "system", content: "You are a helpful assistant." }, + { role: "user", content: "Find my open news tabs." }, + ], + tools: SHARED_TOOLS, + }; + + try { + info("Run request that triggers tool calls"); + const result1 = await engineInstance.run(requestWithTools); + + // The pipeline should surface toolCalls from the OpenAI message + Assert.ok(result1.toolCalls, "toolCalls should exist on the result"); + Assert.equal(result1.toolCalls.length, 1, "Exactly one tool call"); + Assert.equal( + result1.toolCalls[0].function.name, + "search_open_tabs", + "Tool name should match" + ); + + // Second request: append assistant tool_calls + our tool result + const assistantToolCallsMsg = { + role: "assistant", + tool_calls: result1.toolCalls.map(tc => ({ + id: tc.id, + type: "function", + function: { + name: tc.function.name, + arguments: tc.function.arguments, + }, + })), + }; + + const toolResultMsg = { + role: "tool", + tool_call_id: result1.toolCalls[0].id, + content: JSON.stringify({ query: "news", allTabs: [] }), + }; + + const followup = await engineInstance.run({ + args: [...requestWithTools.args, assistantToolCallsMsg, toolResultMsg], + tools: requestWithTools.tools, // still valid to include + }); + + Assert.equal( + followup.finalOutput, + "Here are the tabs I found for you.", + "Should get assistant follow-up after tool result" + ); + } finally { + await EngineProcess.destroyMLEngine(); + await cleanup(); + await stopMockOpenAI(mockServer); + } +}); + +add_task(async function test_openai_client_tools_streaming() { + const records = [ + { + ...BASE_ENGINE_OPTIONS, + id: "b3b2b661-daa6-4b7f-8d3c-7db0df0dbeef", + }, + ]; + const { cleanup } = await setup({ records }); + const { server: mockServer, port } = startMockOpenAI(); + + const engineInstance = await createEngine({ + ...BASE_ENGINE_OPTIONS, + apiKey: "ollama", + baseURL: `http://localhost:${port}/v1`, + backend: "openai", + }); + + const starter = { + args: [ + { role: "system", content: "You are a helpful assistant." }, + { role: "user", content: "Find my open news tabs." }, + ], + tools: SHARED_TOOLS, + streamOptions: { enabled: true }, + }; + + try { + // --- First turn: expect tool_calls via streaming --- + const gen = engineInstance.runWithGenerator(starter); + + let toolCalls = null; + for await (const chunk of gen) { + // Your MLEngineParent + OpenAIPipeline put toolCalls onto the yielded chunk + if (chunk.toolCalls && chunk.toolCalls.length) { + toolCalls = chunk.toolCalls; + break; // we end the turn when model asks for tools + } + // (Optional) you could accumulate chunk.text here; expected empty in this turn + } + + Assert.ok(toolCalls, "Should receive toolCalls via streaming"); + Assert.equal(toolCalls.length, 1, "One tool call"); + Assert.equal( + toolCalls[0].function.name, + "search_open_tabs", + "Tool name should match" + ); + + // --- Second turn: send tool result, stream final answer --- + const assistantToolCallsMsg = { + role: "assistant", + tool_calls: toolCalls.map(tc => ({ + id: tc.id, + type: "function", + function: { + name: tc.function.name, + arguments: tc.function.arguments, + }, + })), + }; + + const toolResultMsg = { + role: "tool", + tool_call_id: toolCalls[0].id, + content: JSON.stringify({ query: "news", allTabs: [] }), + }; + + const gen2 = engineInstance.runWithGenerator({ + args: [...starter.args, assistantToolCallsMsg, toolResultMsg], + tools: SHARED_TOOLS, + streamOptions: { enabled: true }, + }); + + let final = ""; + for await (const chunk of gen2) { + if (chunk.text) { + final += chunk.text; + } + } + + Assert.ok(final.length, "Should stream some final content"); + Assert.equal( + final, + "Here are the tabs I found for you.", + "Should stream the expected assistant follow-up" + ); + } finally { + await EngineProcess.destroyMLEngine(); + await cleanup(); + await stopMockOpenAI(mockServer); + } +}); diff --git a/toolkit/components/ml/tests/browser/head.js b/toolkit/components/ml/tests/browser/head.js @@ -793,6 +793,224 @@ function startMockOpenAI({ echo = "This gets echoed." } = {}) { } info("bodyText: " + bodyText); + let body; + try { + body = JSON.parse(bodyText || "{}"); + } catch (_) { + body = {}; + } + + const wantsStream = !!body.stream; + const tools = Array.isArray(body.tools) ? body.tools : []; + const askedForTools = tools.length; + const messages = Array.isArray(body.messages) ? body.messages : []; + const hasToolResult = messages.some(m => m && m.role === "tool"); + + // ---- SSE helpers (for streaming mode) ---- + function startSSE() { + response.setStatusLine(request.httpVersion, 200, "OK"); + response.setHeader( + "Content-Type", + "text/event-stream; charset=utf-8", + false + ); + response.setHeader("Cache-Control", "no-cache", false); + response.setHeader("Access-Control-Allow-Origin", "*", false); + response.processAsync(); + } + function sendSSE(obj) { + const line = `data: ${JSON.stringify(obj)}\n\n`; + response.write(line); + } + function endSSE() { + response.write("data: [DONE]\n\n"); + response.finish(); + } + + // =========================== + // STREAMING BRANCHES (SSE) + // =========================== + if (wantsStream && askedForTools && !hasToolResult) { + // First turn: stream partial tool_calls, then finish with "tool_calls" + startSSE(); + + // Partial 1: name/args prefix + sendSSE({ + id: "chatcmpl-mock-tools-stream-1", + object: "chat.completion.chunk", + created: Math.floor(Date.now() / 1000), + model: "qwen3:0.6b", + choices: [ + { + index: 0, + delta: { + content: "", + tool_calls: [ + { + index: 0, + id: "call_1", + type: "function", + function: { name: "search_", arguments: '{ "type": "ne' }, + }, + ], + }, + finish_reason: null, + }, + ], + }); + + // Partial 2: complete name/args + sendSSE({ + id: "chatcmpl-mock-tools-stream-2", + object: "chat.completion.chunk", + created: Math.floor(Date.now() / 1000), + model: "qwen3:0.6b", + choices: [ + { + index: 0, + delta: { + content: "", + tool_calls: [ + { + index: 0, + id: "call_1", + type: "function", + function: { name: "open_tabs", arguments: 'ws" }' }, + }, + ], + }, + finish_reason: null, + }, + ], + }); + + // Signal the turn ends with tool calls + sendSSE({ + id: "chatcmpl-mock-tools-stream-3", + object: "chat.completion.chunk", + created: Math.floor(Date.now() / 1000), + model: "qwen3:0.6b", + choices: [{ index: 0, delta: {}, finish_reason: "tool_calls" }], + }); + + endSSE(); + return; + } + + if (wantsStream && askedForTools && hasToolResult) { + // Second turn (after tool result): stream normal assistant text + startSSE(); + + sendSSE({ + id: "chatcmpl-mock-tools-stream-4", + object: "chat.completion.chunk", + created: Math.floor(Date.now() / 1000), + model: "qwen3:0.6b", + choices: [ + { + index: 0, + delta: { content: "Here are the tabs " }, + finish_reason: null, + }, + ], + }); + + sendSSE({ + id: "chatcmpl-mock-tools-stream-5", + object: "chat.completion.chunk", + created: Math.floor(Date.now() / 1000), + model: "qwen3:0.6b", + choices: [ + { + index: 0, + delta: { content: "I found for you." }, + finish_reason: "stop", + }, + ], + }); + + endSSE(); + return; + } + + // =========================== + // NON-STREAMING BRANCHES + // =========================== + + // First turn w/ tools: return tool_calls message (finish_reason: tool_calls) + if (askedForTools && !hasToolResult) { + const payload = { + id: "chatcmpl-mock-tools-1", + object: "chat.completion", + created: Math.floor(Date.now() / 1000), + model: "qwen3:0.6b", + choices: [ + { + index: 0, + message: { + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_1", + type: "function", + function: { + name: "search_open_tabs", + arguments: JSON.stringify({ type: "news" }), + }, + }, + ], + }, + finish_reason: "tool_calls", + }, + ], + usage: { prompt_tokens: 10, completion_tokens: 0, total_tokens: 10 }, + echo, + }; + + response.setStatusLine(request.httpVersion, 200, "OK"); + response.setHeader( + "Content-Type", + "application/json; charset=utf-8", + false + ); + response.setHeader("Access-Control-Allow-Origin", "*", false); + response.write(JSON.stringify(payload)); + return; + } + + // Second turn w/ tools (after tool result): normal assistant message + if (askedForTools && hasToolResult) { + const payload = { + id: "chatcmpl-mock-tools-2", + object: "chat.completion", + created: Math.floor(Date.now() / 1000), + model: "qwen3:0.6b", + choices: [ + { + index: 0, + message: { + role: "assistant", + content: "Here are the tabs I found for you.", + }, + finish_reason: "stop", + }, + ], + usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 }, + echo, + }; + + response.setStatusLine(request.httpVersion, 200, "OK"); + response.setHeader( + "Content-Type", + "application/json; charset=utf-8", + false + ); + response.setHeader("Access-Control-Allow-Origin", "*", false); + response.write(JSON.stringify(payload)); + return; + } + const payload = { id: "chatcmpl-mock-1", object: "chat.completion",