commit 14c08f0368ead8bfdddec62f43e0bb5c8fd61289
parent 628102e30e4929c64d70d5f15ee4b4c04ad6e84d
Author: Molly Shillabeer <mshillabeer@mozilla.com>
Date: Mon, 22 Dec 2025 16:49:05 +0000
Bug 2006433 - Move conversastion starter and follow up inference to ai window r=tzhang,ai-models-reviewers
Differential Revision: https://phabricator.services.mozilla.com/D276702
Diffstat:
7 files changed, 1618 insertions(+), 0 deletions(-)
diff --git a/browser/base/content/test/static/browser_all_files_referenced.js b/browser/base/content/test/static/browser_all_files_referenced.js
@@ -367,6 +367,10 @@ var allowlist = [
{
file: "moz-src:///browser/components/aiwindow/models/InsightsConversationScheduler.sys.mjs",
},
+ // Bug 2006433 - Implement conversation starter/followup inference
+ {
+ file: "moz-src:///browser/components/aiwindow/models/ConversationSuggestions.sys.mjs",
+ },
];
if (AppConstants.NIGHTLY_BUILD) {
diff --git a/browser/components/aiwindow/models/ConversationSuggestions.sys.mjs b/browser/components/aiwindow/models/ConversationSuggestions.sys.mjs
@@ -0,0 +1,261 @@
+/**
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ */
+
+// conversation starter/followup generation functions
+
+import {
+ openAIEngine,
+ renderPrompt,
+} from "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs";
+
+import {
+ conversationStarterPrompt,
+ conversationFollowupPrompt,
+ conversationInsightsPrompt,
+} from "moz-src:///browser/components/aiwindow/models/prompts/ConversationSuggestionsPrompts.sys.mjs";
+
+import { MESSAGE_ROLE } from "moz-src:///browser/components/aiwindow/ui/modules/ChatStore.sys.mjs";
+
+import { InsightsManager } from "moz-src:///browser/components/aiwindow/models/InsightsManager.sys.mjs";
+
+// Max number of insights to include in prompts
+const MAX_NUM_INSIGHTS = 8;
+
+/**
+ * Helper to trim conversation history to recent messages, dropping empty messages, tool calls and responses
+ *
+ * @param {Array} messages - Array of chat messages
+ * @param {number} maxMessages - Max number of messages to keep (default 15)
+ * @returns {Array} Trimmed array of user/assistant messages
+ */
+export function trimConversation(messages, maxMessages = 15) {
+ const out = [];
+
+ for (const m of messages) {
+ if (
+ (m.role === MESSAGE_ROLE.USER || m.role === MESSAGE_ROLE.ASSISTANT) &&
+ m.content &&
+ m.content.trim()
+ ) {
+ const roleString = m.role === MESSAGE_ROLE.USER ? "user" : "assistant";
+ out.push({ role: roleString, content: m.content });
+ }
+ }
+
+ return out.slice(-maxMessages);
+}
+
+/**
+ * Helper to add insights to base prompt if applicable
+ *
+ * @param {string} base - base prompt
+ * @returns {Promise<string>} - prompt with insights added if applicable
+ */
+export async function addInsightsToPrompt(base) {
+ let insightSummaries =
+ await InsightsGetterForSuggestionPrompts.getInsightSummariesForPrompt(
+ MAX_NUM_INSIGHTS
+ );
+ if (insightSummaries.length) {
+ const insightsBlock = insightSummaries.map(s => `- ${s}`).join("\n");
+ const insightPrompt = await renderPrompt(conversationInsightsPrompt, {
+ insights: insightsBlock,
+ });
+ return `${base}\n${insightPrompt}`;
+ }
+ return base;
+}
+
+/**
+ * Cleans inference output into array of prompts
+ *
+ * @param {*} result - Inference output result object
+ * @returns {Array<string>} - Cleaned array of prompts
+ */
+export function cleanInferenceOutput(result) {
+ const text = (result.finalOutput || "").trim();
+ const lines = text
+ .split(/\n+/)
+ .map(l => l.trim())
+ .filter(Boolean);
+
+ const prompts = lines
+ .map(line => line.replace(/^[-*\d.)\[\]]+\s*/, ""))
+ .filter(p => p.length)
+ .map(p => p.replace(/\.$/, "").replace(/^[^:]*:\s*/, ""));
+ return prompts;
+}
+
+/**
+ * Format object to JSON string safely
+ *
+ * @param {*} obj - Object to format
+ * @returns {string} JSON string or string representation
+ */
+const formatJson = obj => {
+ try {
+ return JSON.stringify(obj);
+ } catch {
+ return String(obj);
+ }
+};
+
+/**
+ * Generates conversation starter prompts based on tab context + (optional) user insights
+ *
+ * @param {Array} contextTabs - Array of tab objects with title, url, favicon
+ * @param {number} n - Number of suggestions to generate (default 6)
+ * @param {boolean} useInsights - Whether to include user insights in prompt (default false)
+ * @returns {Promise<Array>} Array of {text, type} suggestion objects
+ */
+export async function generateConversationStartersSidebar(
+ contextTabs = [],
+ n = 2,
+ useInsights = false
+) {
+ try {
+ const today = new Date().toISOString().slice(0, 10);
+
+ // Format current tab (first in context or empty)
+ const currentTab = contextTabs.length
+ ? formatJson({ title: contextTabs[0].title, url: contextTabs[0].url })
+ : "No current tab";
+
+ // Format opened tabs
+ let openedTabs;
+ if (contextTabs.length >= 1) {
+ openedTabs =
+ contextTabs.length === 1
+ ? "Only current tab is open"
+ : formatJson(
+ contextTabs.slice(1).map(t => ({ title: t.title, url: t.url }))
+ );
+ } else {
+ openedTabs = "No tabs available";
+ }
+
+ // Base template
+ const base = await renderPrompt(conversationStarterPrompt, {
+ current_tab: currentTab,
+ open_tabs: openedTabs,
+ n: String(n),
+ date: today,
+ });
+
+ let filled = useInsights
+ ? await addInsightsToPrompt(base, useInsights)
+ : base;
+
+ const engineInstance = await openAIEngine.build("starter");
+
+ const result = await engineInstance.run({
+ messages: [
+ {
+ role: "system",
+ content: "Return only the requested suggestions, one per line.",
+ },
+ { role: "user", content: filled },
+ ],
+ });
+
+ const prompts = cleanInferenceOutput(result);
+
+ return prompts.slice(0, n).map(t => ({ text: t, type: "chat" }));
+ } catch (e) {
+ console.warn(
+ "[ConversationSuggestions][sidebar-conversation-starters] failed:",
+ e
+ );
+ return [];
+ }
+}
+
+/**
+ * Generates followup prompt suggestions based on conversation history
+ *
+ * @param {Array} conversationHistory - Array of chat messages
+ * @param {object} currentTab - Current tab object with title, url
+ * @param {number} n - Number of suggestions to generate (default 6)
+ * @param {boolean} useInsights - Whether to include user insights in prompt (default false)
+ * @returns {Promise<Array>} Array of {text, type} suggestion objects
+ */
+export async function generateFollowupPrompts(
+ conversationHistory,
+ currentTab,
+ n = 2,
+ useInsights = false
+) {
+ try {
+ const today = new Date().toISOString().slice(0, 10);
+ const convo = trimConversation(conversationHistory);
+ const currentTabStr =
+ currentTab && Object.keys(currentTab).length
+ ? formatJson({ title: currentTab.title, url: currentTab.url })
+ : "No tab";
+ const base = await renderPrompt(conversationFollowupPrompt, {
+ current_tab: currentTabStr,
+ conversation: formatJson(convo),
+ n: String(n),
+ date: today,
+ });
+
+ let filled = useInsights
+ ? await addInsightsToPrompt(base, useInsights)
+ : base;
+
+ const engineInstance = await openAIEngine.build("followup");
+
+ const result = await engineInstance.run({
+ messages: [
+ {
+ role: "system",
+ content: "Return only the requested suggestions, one per line.",
+ },
+ { role: "user", content: filled },
+ ],
+ });
+
+ const prompts = cleanInferenceOutput(result);
+
+ return prompts.slice(0, n).map(t => ({ text: t, type: "chat" }));
+ } catch (e) {
+ console.warn("[ConversationSuggestions][followup-prompts] failed:", e);
+ return [];
+ }
+}
+
+export const InsightsGetterForSuggestionPrompts = {
+ /**
+ * Gets the requested number of unique insight summaries for prompt inclusion
+ *
+ * @param {number} maxInsights - Max number of insights to return (default MAX_NUM_INSIGHTS)
+ * @returns {Promise<Array>} Array of string insight summaries
+ */
+
+ async getInsightSummariesForPrompt(maxInsights) {
+ const insightSummaries = [];
+ const insightEntries = (await InsightsManager.getAllInsights()) || {};
+ const seenSummaries = new Set();
+
+ for (const { insight_summary } of insightEntries) {
+ const summaryText = String(insight_summary ?? "").trim();
+ if (!summaryText) {
+ continue;
+ }
+ const lower = summaryText.toLowerCase();
+ if (seenSummaries.has(lower)) {
+ continue;
+ }
+ seenSummaries.add(lower);
+ insightSummaries.push(summaryText);
+ if (insightSummaries.length >= maxInsights) {
+ break;
+ }
+ }
+
+ return insightSummaries;
+ },
+};
diff --git a/browser/components/aiwindow/models/moz.build b/browser/components/aiwindow/models/moz.build
@@ -16,6 +16,7 @@ BROWSER_CHROME_MANIFESTS += [
MOZ_SRC_FILES += [
"Chat.sys.mjs",
"ChatUtils.sys.mjs",
+ "ConversationSuggestions.sys.mjs",
"Insights.sys.mjs",
"InsightsChatSource.sys.mjs",
"InsightsConstants.sys.mjs",
diff --git a/browser/components/aiwindow/models/prompts/ConversationSuggestionsPrompts.sys.mjs b/browser/components/aiwindow/models/prompts/ConversationSuggestionsPrompts.sys.mjs
@@ -0,0 +1,120 @@
+/**
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ */
+
+const assistantLimitations = `The following tools are available to the browser assistant:
+- get_open_tabs(): Access the user's browser and return a list of the most recently browsed data
+- get_page_content(url): Retrieve cleaned text content of the provided browser page URL
+- search_browsing_history(search_term, start_ts, end_ts): Retrieve pages from the user's past browsing history, optionally filtered by topic and/or time range
+
+Browser Assistant Capabilities & Limitations:
+1. The browser assistant is not agentic; the human user performs all actions.
+- The assistant can:
+- Provide information, comparisons, explanations, and instructions
+- Suggest next steps, links, or search queries for users to act on
+- Summarize, analyze, or explain visible content
+- The assistant cannot:
+- Click, scroll, or type on webpages
+- Fill or submit forms
+- Make purchases or reservations
+- Change browser settings, themes, or extensions
+- Execute multi-step or autonomous web tasks
+2. The browser assistant can read only visible page content.
+- Accessible: current tab, open tabs, fully opened emails or messages
+- Not accessible: unopened messages/emails, passwords, cookies, payment info, private/incognito browsing data, local or system-level files
+3. The assistant will decline to answer when it identifies agentic or unsafe requests.`;
+
+export const conversationStarterPromptMetadata = {
+ version: "0.1",
+};
+export const conversationStarterPrompt = `You are an expert in suggesting conversation starters for a browser assistant.
+
+========
+Today's date:
+{date}
+
+========
+Current Tab:
+{current_tab}
+
+========
+Open Tabs:
+{open_tabs}
+
+========
+${assistantLimitations}
+
+========
+Task:
+Generate exactly {n} conversation starter suggestions that can help the user begin a chat with the browser assistant about the current tab.
+
+Rules:
+- Each suggestion must be under 8 words; fewer is better. Be concise and specific
+- All suggestions must be about the current tab, you can assume what the content of the page is based on the title and url
+- Use context from open tabs only if they are related to the current tab to enhance suggestions (eg comparison); ignore unrelated tabs
+- Provide diverse suggestions; avoid duplicates across suggestions
+- Suggestions should be common questions or requests that make logical sense
+- Do not generate suggestions requiring clicking, scrolling, opening new pages, submitting forms, saving, sharing, or other behaviors that violate browser assistant capabilities
+- Prioritize suggestions that help the user engage with the current tab in new ways
+- Each suggestion must reference a specific element from the current tab when possible. Avoid generic phrasing.
+- Do not use words that imply personal traits unless the current context contains those attributes (eg “family-friendly”, “healthy”, “budget-conscious”)
+- Fallback suggestions may only be used if the current tab provides no actionable information: "What can you do with this content?", "Explain key ideas from this page"
+- Suggestions should make sense for the content type of the current tab (recipe, social media, email, video, article, product page, landing page, round up, comparison, etc)
+- Suggestions must be equally spread across 3 intent categories:
+ - Plan: turn scattered info into steps eg) plan an activity, make a list, compare
+ - Consume: transform page content eg) get key points, explain, analyze
+ - Create: edit or respond to existing content eg) draft, proofread, rephrase
+
+Return ONLY the suggestions, one per line, no numbering, no extra formatting. Sort from most to least relevant.`;
+
+export const conversationFollowupPromptMetadata = {
+ version: "0.1",
+};
+export const conversationFollowupPrompt = `You are an expert suggesting next responses or queries for a user during a conversation with an AI browser assistant.
+
+========
+Today's date:
+{date}
+
+========
+Current Tab:
+{current_tab}
+
+========
+Conversation History (latest last):
+{conversation}
+
+========
+${assistantLimitations}
+
+========
+Generate {n} suggested next responses or queries that the user might want to message next.
+
+Rules:
+- Each suggestions must be under 8 words; fewer is better.
+- Focus on conversational topics that the browser assistant can help with
+- Stay relevant to the current tab and recent assistant replies; assume there are no other open tabs
+- If the most recent browser assistant reply ended with a question, generate at least 1 suggestion that directly and logically answers that question.
+- Assume the user has already taken any actions requested by the browser assistant when responding to questions.
+ - eg) If the assistant asked "Would you like me to generate a summary?", one suggestion should be "Yes, summarize the article"
+- Consider the content type of the current tab (recipe, social media, email, video, article, product page, landing page, round up, comparison, etc)
+- Suggestions should focus on 3 main intents, use these as inspiration: plan steps/lists, transform content (summarize, analyze, explain), respond to existing content (draft reply, proofread, rephrase)
+- Do not repeat earlier user messages verbatim
+- Provide diverse and helpful suggestions based on the conversation
+- Suggestions should not violate browser assistant capabilities & limitations
+
+Return ONLY the suggestions, one per line, no numbering, no extra formatting.`;
+
+export const conversationInsightsPromptMetadata = {
+ version: "0.1",
+};
+export const conversationInsightsPrompt = `========
+User Insights:
+{insights}
+
+Guideline:
+- Only use insights that are relevant to the current tab; ignore irrelevant insights
+- Do not repeat insights verbatim or reveal sensitive details; just use them to inform suggestion generation
+- Do not invent new personal attributes or insights; prefer neutral phrasing when unsure`;
diff --git a/browser/components/aiwindow/models/prompts/moz.build b/browser/components/aiwindow/models/prompts/moz.build
@@ -7,6 +7,7 @@ with Files("**"):
MOZ_SRC_FILES += [
"AssistantPrompts.sys.mjs",
+ "ConversationSuggestionsPrompts.sys.mjs",
"InsightsPrompts.sys.mjs",
"TitleGenerationPrompts.sys.mjs",
]
diff --git a/browser/components/aiwindow/models/tests/xpcshell/test_ConversationSuggestions.js b/browser/components/aiwindow/models/tests/xpcshell/test_ConversationSuggestions.js
@@ -0,0 +1,1229 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+do_get_profile();
+
+const {
+ trimConversation,
+ addInsightsToPrompt,
+ cleanInferenceOutput,
+ generateConversationStartersSidebar,
+ generateFollowupPrompts,
+ InsightsGetterForSuggestionPrompts,
+} = ChromeUtils.importESModule(
+ "moz-src:///browser/components/aiwindow/models/ConversationSuggestions.sys.mjs"
+);
+
+const { openAIEngine } = ChromeUtils.importESModule(
+ "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs"
+);
+const { InsightsManager } = ChromeUtils.importESModule(
+ "moz-src:///browser/components/aiwindow/models/InsightsManager.sys.mjs"
+);
+const { MESSAGE_ROLE } = ChromeUtils.importESModule(
+ "moz-src:///browser/components/aiwindow/ui/modules/ChatStore.sys.mjs"
+);
+const { sinon } = ChromeUtils.importESModule(
+ "resource://testing-common/Sinon.sys.mjs"
+);
+
+/**
+ * Constants for preference keys and test values
+ */
+const PREF_API_KEY = "browser.aiwindow.apiKey";
+const PREF_ENDPOINT = "browser.aiwindow.endpoint";
+const PREF_MODEL = "browser.aiwindow.model";
+
+const API_KEY = "test-api-key";
+const ENDPOINT = "https://api.test-endpoint.com/v1";
+const MODEL = "test-model";
+
+/**
+ * Cleans up preferences after testing
+ */
+registerCleanupFunction(() => {
+ for (let pref of [PREF_API_KEY, PREF_ENDPOINT, PREF_MODEL]) {
+ if (Services.prefs.prefHasUserValue(pref)) {
+ Services.prefs.clearUserPref(pref);
+ }
+ }
+});
+
+/**
+ * Tests for trimConversation function
+ */
+add_task(async function test_trimConversation() {
+ const cases = [
+ {
+ input: [
+ // empty case
+ ],
+ expected: [],
+ },
+ {
+ input: [
+ // more than 15 messages
+ ...Array.from({ length: 20 }, (_, i) => ({
+ role: i % 2 === 0 ? MESSAGE_ROLE.USER : MESSAGE_ROLE.ASSISTANT,
+ content: `Message ${i + 1}`,
+ })),
+ ],
+ expected: [
+ ...Array.from({ length: 15 }, (_, i) => ({
+ role: (i + 5) % 2 === 0 ? "user" : "assistant",
+ content: `Message ${i + 6}`,
+ })),
+ ],
+ },
+ {
+ input: [
+ // should remove tool call/responses
+ { role: MESSAGE_ROLE.USER, content: "What's the weather like?" },
+ {
+ role: MESSAGE_ROLE.ASSISTANT,
+ content: "",
+ tool_call: { name: "get_weather", arguments: "{}" },
+ },
+ {
+ role: MESSAGE_ROLE.TOOL,
+ content: "Here are the latest news articles.",
+ },
+ ],
+ expected: [{ role: "user", content: "What's the weather like?" }],
+ },
+ {
+ input: [
+ // should remove system message
+ { role: MESSAGE_ROLE.SYSTEM, content: "System message" },
+ { role: MESSAGE_ROLE.USER, content: "Hello" },
+ { role: MESSAGE_ROLE.ASSISTANT, content: "Hi there!" },
+ ],
+ expected: [
+ { role: "user", content: "Hello" },
+ { role: "assistant", content: "Hi there!" },
+ ],
+ },
+ {
+ input: [
+ // should remove messages with empty content
+ { role: MESSAGE_ROLE.USER, content: "\n" },
+ { role: MESSAGE_ROLE.ASSISTANT, content: " " },
+ ],
+ expected: [],
+ },
+ {
+ input: [
+ // no valid messages
+ { role: MESSAGE_ROLE.SYSTEM, content: "System message" },
+ {
+ role: MESSAGE_ROLE.ASSISTANT,
+ content: "",
+ tool_call: { name: "get_info", arguments: "{}" },
+ },
+ ],
+ expected: [],
+ },
+ {
+ input: [
+ // should slice after filtering invalid messages
+ ...Array(10)
+ .fill(0)
+ .flatMap((_, i) => [
+ {
+ role: MESSAGE_ROLE.USER,
+ content: `User message ${i + 1}`,
+ },
+ { role: MESSAGE_ROLE.SYSTEM, content: "System message" },
+ ]),
+ ],
+ expected: [
+ ...Array.from({ length: 10 }, (_, i) => ({
+ role: "user",
+ content: `User message ${i + 1}`,
+ })),
+ ],
+ },
+ ];
+
+ for (const { input, expected } of cases) {
+ const result = trimConversation(input);
+ Assert.deepEqual(
+ result,
+ expected,
+ "trimConversation should return only user/assistant messages with content"
+ );
+ }
+});
+
+/**
+ * Test for addInsightsToPrompt function when there are insights
+ */
+add_task(async function test_addInsightsToPrompt_have_insights() {
+ const sb = sinon.createSandbox();
+ try {
+ const basePrompt = "Base prompt content.";
+ const fakeInsights = ["Insight summary 1", "Insight summary 2"];
+ const insightsStub = sb
+ .stub(InsightsGetterForSuggestionPrompts, "getInsightSummariesForPrompt")
+ .resolves(fakeInsights);
+ const promptWithInsights = await addInsightsToPrompt(basePrompt);
+ Assert.ok(
+ insightsStub.calledOnce,
+ "getInsightSummariesForPrompt should be called"
+ );
+ Assert.ok(
+ promptWithInsights.includes("- Insight summary 1") &&
+ promptWithInsights.includes("- Insight summary 2"),
+ "Prompt should include insights"
+ );
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Test for addInsightsToPrompt function when there are no insights
+ */
+add_task(async function test_addInsightsToPrompt_dont_have_insights() {
+ const sb = sinon.createSandbox();
+ try {
+ const basePrompt = "Base prompt content.";
+ const fakeInsights = [];
+ const insightsStub = sb
+ .stub(InsightsGetterForSuggestionPrompts, "getInsightSummariesForPrompt")
+ .resolves(fakeInsights);
+ const promptWithInsights = await addInsightsToPrompt(basePrompt);
+ Assert.ok(
+ insightsStub.calledOnce,
+ "getInsightSummariesForPrompt should be called"
+ );
+ Assert.equal(
+ promptWithInsights,
+ basePrompt,
+ "Prompt should be unchanged when no insights"
+ );
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Tests for cleanInferenceOutput function
+ */
+add_task(function test_cleanInferenceOutput() {
+ const sb = sinon.createSandbox();
+ try {
+ const cases = [
+ {
+ input: {
+ finalOutput: `- Suggestion 1\n\n* Suggestion 2\n1. Suggestion 3.\n2)Suggestion 4\n[5] Suggestion 5\n6.\nLabel: Suggestion 6\nSuggestion 7.\nSuggestion 8?`,
+ },
+ expected: [
+ "Suggestion 1",
+ "Suggestion 2",
+ "Suggestion 3",
+ "Suggestion 4",
+ "Suggestion 5",
+ "Suggestion 6",
+ "Suggestion 7",
+ "Suggestion 8?",
+ ],
+ },
+ {
+ input: { finalOutput: `Suggestion X\nSuggestion Y\nSuggestion Z` },
+ expected: ["Suggestion X", "Suggestion Y", "Suggestion Z"],
+ },
+ {
+ input: { finalOutput: "" },
+ expected: [],
+ },
+ ];
+
+ for (const { input, expected } of cases) {
+ const result = cleanInferenceOutput(input);
+ Assert.deepEqual(
+ result,
+ expected,
+ "cleanInferenceOutput should return expected output"
+ );
+ }
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Tests for generateConversationStartersSidebar successfully generating suggestions
+ */
+add_task(async function test_generateConversationStartersSidebar_happy_path() {
+ Services.prefs.setStringPref(PREF_API_KEY, API_KEY);
+ Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT);
+ Services.prefs.setStringPref(PREF_MODEL, MODEL);
+
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the openAIEngine and insights response
+ const fakeEngine = {
+ run: sb.stub().resolves({
+ finalOutput: `1. Suggestion 1\n\n- Suggestion 2\nLabel: Suggestion 3.\nSuggestion 4\nSuggestion 5\nSuggestion 6`,
+ }),
+ };
+ sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine);
+
+ const fakeInsights = ["Insight summary 1", "Insight summary 2"];
+ const insightsStub = sb
+ .stub(InsightsGetterForSuggestionPrompts, "getInsightSummariesForPrompt")
+ .resolves(fakeInsights);
+
+ const n = 3;
+ const contextTabs = [
+ { title: "Current Tab", url: "https://current.example.com" },
+ { title: "Tab 2", url: "https://tab2.example.com" },
+ ];
+
+ const result = await generateConversationStartersSidebar(
+ contextTabs,
+ n,
+ true
+ );
+ Assert.ok(fakeEngine.run.calledOnce, "Engine run should be called once");
+ Assert.ok(
+ insightsStub.calledOnce,
+ "getInsightSummariesForPrompt should be called once"
+ );
+
+ // Verify the prompt content
+ const callArgs = fakeEngine.run.firstCall.args[0];
+ Assert.equal(
+ callArgs.messages.length,
+ 2,
+ "run should be called with 2 messages"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ '{"title":"Current Tab","url":"https://current.example.com"}'
+ ),
+ "Prompt should include current tab info"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ '{"title":"Tab 2","url":"https://tab2.example.com"}'
+ ),
+ "Prompt should include other tab info"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ "\n- Insight summary 1\n- Insight summary 2"
+ ),
+ "Prompt should include insight summaries"
+ );
+
+ Assert.deepEqual(
+ result,
+ [
+ { text: "Suggestion 1", type: "chat" },
+ { text: "Suggestion 2", type: "chat" },
+ { text: "Suggestion 3", type: "chat" },
+ ],
+ "Suggestions should match expected values"
+ );
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Tests for generateConversationStartersSidebar without including insights
+ */
+add_task(
+ async function test_generateConversationStartersSidebar_without_insights() {
+ Services.prefs.setStringPref(PREF_API_KEY, API_KEY);
+ Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT);
+ Services.prefs.setStringPref(PREF_MODEL, MODEL);
+
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the openAIEngine and insights response
+ const fakeEngine = {
+ run: sb.stub().resolves({
+ finalOutput: `1. Suggestion 1\n\n- Suggestion 2\nLabel: Suggestion 3.\nSuggestion 4\nSuggestion 5\nSuggestion 6`,
+ }),
+ };
+ sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine);
+
+ const fakeInsights = ["Insight summary 1", "Insight summary 2"];
+ const insightsStub = sb
+ .stub(
+ InsightsGetterForSuggestionPrompts,
+ "getInsightSummariesForPrompt"
+ )
+ .resolves(fakeInsights);
+
+ const n = 3;
+ const contextTabs = [
+ { title: "Current Tab", url: "https://current.example.com" },
+ { title: "Tab 2", url: "https://tab2.example.com" },
+ ];
+
+ const result = await generateConversationStartersSidebar(
+ contextTabs,
+ n,
+ false
+ );
+ Assert.ok(fakeEngine.run.calledOnce, "Engine run should be called once");
+ Assert.ok(
+ !insightsStub.calledOnce,
+ "getInsightSummariesForPrompt shouldn't be called"
+ );
+
+ // Verify the prompt content
+ const callArgs = fakeEngine.run.firstCall.args[0];
+ Assert.equal(
+ callArgs.messages.length,
+ 2,
+ "run should be called with 2 messages"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ '{"title":"Current Tab","url":"https://current.example.com"}'
+ ),
+ "Prompt should include current tab info"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ '{"title":"Tab 2","url":"https://tab2.example.com"}'
+ ),
+ "Prompt should include other tab info"
+ );
+ Assert.ok(
+ !callArgs.messages[1].content.includes(
+ "\n- Insight summary 1\n- Insight summary 2"
+ ),
+ "Prompt should not include insight summaries"
+ );
+
+ Assert.deepEqual(
+ result,
+ [
+ { text: "Suggestion 1", type: "chat" },
+ { text: "Suggestion 2", type: "chat" },
+ { text: "Suggestion 3", type: "chat" },
+ ],
+ "Suggestions should match expected values"
+ );
+ } finally {
+ sb.restore();
+ }
+ }
+);
+
+/**
+ * Tests for generateConversationStartersSidebar when no insights are returned
+ */
+add_task(
+ async function test_generateConversationStartersSidebar_no_insights_returned() {
+ Services.prefs.setStringPref(PREF_API_KEY, API_KEY);
+ Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT);
+ Services.prefs.setStringPref(PREF_MODEL, MODEL);
+
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the openAIEngine and insights response
+ const fakeEngine = {
+ run: sb.stub().resolves({
+ finalOutput: `1. Suggestion 1\n\n- Suggestion 2\nLabel: Suggestion 3.\nSuggestion 4\nSuggestion 5\nSuggestion 6`,
+ }),
+ };
+ sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine);
+
+ const fakeInsights = [];
+ const insightsStub = sb
+ .stub(
+ InsightsGetterForSuggestionPrompts,
+ "getInsightSummariesForPrompt"
+ )
+ .resolves(fakeInsights);
+
+ const n = 3;
+ const contextTabs = [
+ { title: "Current Tab", url: "https://current.example.com" },
+ { title: "Tab 2", url: "https://tab2.example.com" },
+ ];
+
+ const result = await generateConversationStartersSidebar(
+ contextTabs,
+ n,
+ true
+ );
+ Assert.ok(fakeEngine.run.calledOnce, "Engine run should be called once");
+ Assert.ok(
+ insightsStub.calledOnce,
+ "getInsightSummariesForPrompt should be called once"
+ );
+
+ // Verify the prompt content
+ const callArgs = fakeEngine.run.firstCall.args[0];
+ Assert.equal(
+ callArgs.messages.length,
+ 2,
+ "run should be called with 2 messages"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ '{"title":"Current Tab","url":"https://current.example.com"}'
+ ),
+ "Prompt should include current tab info"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ '{"title":"Tab 2","url":"https://tab2.example.com"}'
+ ),
+ "Prompt should include other tab info"
+ );
+ Assert.ok(
+ !callArgs.messages[1].content.includes("\nUser Insights:\n"),
+ "Prompt shouldn't include user insights block"
+ );
+
+ Assert.deepEqual(
+ result,
+ [
+ { text: "Suggestion 1", type: "chat" },
+ { text: "Suggestion 2", type: "chat" },
+ { text: "Suggestion 3", type: "chat" },
+ ],
+ "Suggestions should match expected values"
+ );
+ } finally {
+ sb.restore();
+ }
+ }
+);
+
+/**
+ * Tests for generateConversationStartersSidebar when no tabs are provided
+ */
+add_task(async function test_generateConversationStartersSidebar_no_tabs() {
+ Services.prefs.setStringPref(PREF_API_KEY, API_KEY);
+ Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT);
+ Services.prefs.setStringPref(PREF_MODEL, MODEL);
+
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the openAIEngine and insights response
+ const fakeEngine = {
+ run: sb.stub().resolves({
+ finalOutput: `1. Suggestion 1\n\n- Suggestion 2\nLabel: Suggestion 3.\nSuggestion 4\nSuggestion 5\nSuggestion 6`,
+ }),
+ };
+ sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine);
+
+ const fakeInsights = ["Insight summary 1", "Insight summary 2"];
+ const insightsStub = sb
+ .stub(InsightsGetterForSuggestionPrompts, "getInsightSummariesForPrompt")
+ .resolves(fakeInsights);
+
+ const n = 3;
+ const contextTabs = [];
+
+ const result = await generateConversationStartersSidebar(
+ contextTabs,
+ n,
+ true
+ );
+ Assert.ok(fakeEngine.run.calledOnce, "Engine run should be called once");
+ Assert.ok(
+ insightsStub.calledOnce,
+ "getInsightSummariesForPrompt should be called once"
+ );
+
+ // Verify the prompt content
+ const callArgs = fakeEngine.run.firstCall.args[0];
+ Assert.equal(
+ callArgs.messages.length,
+ 2,
+ "run should be called with 2 messages"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes("\nNo current tab\n"),
+ "Prompt should indicate no current tab"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes("\nNo tabs available\n"),
+ "Prompt should indicate no tabs available"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ "\n- Insight summary 1\n- Insight summary 2"
+ ),
+ "Prompt should include insight summaries"
+ );
+
+ Assert.deepEqual(
+ result,
+ [
+ { text: "Suggestion 1", type: "chat" },
+ { text: "Suggestion 2", type: "chat" },
+ { text: "Suggestion 3", type: "chat" },
+ ],
+ "Suggestions should match expected values"
+ );
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Tests for generateConversationStartersSidebar with one tab provided
+ */
+add_task(async function test_generateConversationStartersSidebar_one_tab() {
+ Services.prefs.setStringPref(PREF_API_KEY, API_KEY);
+ Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT);
+ Services.prefs.setStringPref(PREF_MODEL, MODEL);
+
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the openAIEngine and insights response
+ const fakeEngine = {
+ run: sb.stub().resolves({
+ finalOutput: `1. Suggestion 1\n\n- Suggestion 2\nLabel: Suggestion 3.\nSuggestion 4\nSuggestion 5\nSuggestion 6`,
+ }),
+ };
+ sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine);
+
+ const fakeInsights = ["Insight summary 1", "Insight summary 2"];
+ const insightsStub = sb
+ .stub(InsightsGetterForSuggestionPrompts, "getInsightSummariesForPrompt")
+ .resolves(fakeInsights);
+
+ const n = 3;
+ const contextTabs = [
+ { title: "Only Tab", url: "https://only.example.com" },
+ ];
+
+ const result = await generateConversationStartersSidebar(
+ contextTabs,
+ n,
+ true
+ );
+ Assert.ok(fakeEngine.run.calledOnce, "Engine run should be called once");
+ Assert.ok(
+ insightsStub.calledOnce,
+ "getInsightSummariesForPrompt should be called once"
+ );
+
+ // Verify the prompt content
+ const callArgs = fakeEngine.run.firstCall.args[0];
+ Assert.equal(
+ callArgs.messages.length,
+ 2,
+ "run should be called with 2 messages"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ '\n{"title":"Only Tab","url":"https://only.example.com"}'
+ ),
+ "Prompt should include current tab info"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes("\nOnly current tab is open\n"),
+ "Prompt should indicate only current tab is open"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ "\n- Insight summary 1\n- Insight summary 2"
+ ),
+ "Prompt should include insight summaries"
+ );
+
+ Assert.deepEqual(
+ result,
+ [
+ { text: "Suggestion 1", type: "chat" },
+ { text: "Suggestion 2", type: "chat" },
+ { text: "Suggestion 3", type: "chat" },
+ ],
+ "Suggestions should match expected values"
+ );
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Tests that generateConversationStartersSidebar handles engine errors gracefully
+ */
+add_task(
+ async function test_generateConversationStartersSidebar_engine_error() {
+ Services.prefs.setStringPref(PREF_API_KEY, API_KEY);
+ Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT);
+ Services.prefs.setStringPref(PREF_MODEL, MODEL);
+
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the openAIEngine and insights response
+ const fakeEngine = {
+ run: sb.stub().rejects(new Error("Engine failure")),
+ };
+ sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine);
+
+ const fakeInsights = ["Insight summary 1", "Insight summary 2"];
+ sb.stub(
+ InsightsGetterForSuggestionPrompts,
+ "getInsightSummariesForPrompt"
+ ).resolves(fakeInsights);
+
+ const n = 3;
+ const contextTabs = [
+ { title: "Only Tab", url: "https://only.example.com" },
+ ];
+
+ const result = await generateConversationStartersSidebar(
+ contextTabs,
+ n,
+ true
+ );
+ Assert.deepEqual(result, [], "Should return empty array on engine error");
+ } finally {
+ sb.restore();
+ }
+ }
+);
+
+/**
+ * Tests for generateFollowupPrompts successfully generating suggestions
+ */
+add_task(async function test_generateFollowupPrompts_happy_path() {
+ Services.prefs.setStringPref(PREF_API_KEY, API_KEY);
+ Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT);
+ Services.prefs.setStringPref(PREF_MODEL, MODEL);
+
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the openAIEngine and insights response
+ const fakeEngine = {
+ run: sb.stub().resolves({
+ finalOutput: `1. Suggestion 1\n\n- Suggestion 2.\nSuggestion 3.\nSuggestion 4\nSuggestion 5\nSuggestion 6`,
+ }),
+ };
+ sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine);
+
+ const fakeInsights = ["Insight summary 1", "Insight summary 2"];
+ const insightsStub = sb
+ .stub(InsightsGetterForSuggestionPrompts, "getInsightSummariesForPrompt")
+ .resolves(fakeInsights);
+
+ const n = 2;
+ const conversationHistory = [
+ { role: MESSAGE_ROLE.USER, content: "Hello" },
+ { role: MESSAGE_ROLE.ASSISTANT, content: "Hi there!" },
+ ];
+ const currentTab = {
+ title: "Current Tab",
+ url: "https://current.example.com",
+ };
+
+ // Using insights
+ const result = await generateFollowupPrompts(
+ conversationHistory,
+ currentTab,
+ n,
+ true
+ );
+ Assert.ok(fakeEngine.run.calledOnce, "Engine run should be called once");
+ Assert.ok(
+ insightsStub.calledOnce,
+ "getInsightSummariesForPrompt should be called once"
+ );
+
+ const callArgs = fakeEngine.run.firstCall.args[0];
+ Assert.equal(
+ callArgs.messages.length,
+ 2,
+ "run should be called with 2 messages"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ '{"title":"Current Tab","url":"https://current.example.com"}'
+ ),
+ "Prompt should include current tab info"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ '[{"role":"user","content":"Hello"},{"role":"assistant","content":"Hi there!"}]'
+ ),
+ "Prompt should include conversation history"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ "\n- Insight summary 1\n- Insight summary 2"
+ ),
+ "Prompt should include insight summaries"
+ );
+
+ Assert.deepEqual(
+ result,
+ [
+ { text: "Suggestion 1", type: "chat" },
+ { text: "Suggestion 2", type: "chat" },
+ ],
+ "Suggestions should match"
+ );
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Tests for generateFollowupPrompts without including insights
+ */
+add_task(async function test_generateFollowupPrompts_no_insights() {
+ Services.prefs.setStringPref(PREF_API_KEY, API_KEY);
+ Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT);
+ Services.prefs.setStringPref(PREF_MODEL, MODEL);
+
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the openAIEngine and insights response
+ const fakeEngine = {
+ run: sb.stub().resolves({
+ finalOutput: `1. Suggestion 1\n\n- Suggestion 2.\nSuggestion 3.\nSuggestion 4\nSuggestion 5\nSuggestion 6`,
+ }),
+ };
+ sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine);
+
+ const fakeInsights = ["Insight summary 1", "Insight summary 2"];
+ const insightsStub = sb
+ .stub(InsightsGetterForSuggestionPrompts, "getInsightSummariesForPrompt")
+ .resolves(fakeInsights);
+
+ const n = 2;
+ const conversationHistory = [
+ { role: MESSAGE_ROLE.USER, content: "Hello" },
+ { role: MESSAGE_ROLE.ASSISTANT, content: "Hi there!" },
+ ];
+ const currentTab = {
+ title: "Current Tab",
+ url: "https://current.example.com",
+ };
+
+ const result = await generateFollowupPrompts(
+ conversationHistory,
+ currentTab,
+ n,
+ false
+ );
+ Assert.ok(fakeEngine.run.calledOnce, "Engine run should be called once");
+ Assert.ok(
+ !insightsStub.calledOnce,
+ "getInsightSummariesForPrompt shouldn't be called"
+ );
+
+ const callArgs = fakeEngine.run.firstCall.args[0];
+ Assert.equal(
+ callArgs.messages.length,
+ 2,
+ "run should be called with 2 messages"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ '{"title":"Current Tab","url":"https://current.example.com"}'
+ ),
+ "Prompt should include current tab info"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ '[{"role":"user","content":"Hello"},{"role":"assistant","content":"Hi there!"}]'
+ ),
+ "Prompt should include conversation history"
+ );
+ Assert.ok(
+ !callArgs.messages[1].content.includes(
+ "\n- Insight summary 1\n- Insight summary 2"
+ ),
+ "Prompt shouldn't include insight summaries"
+ );
+
+ Assert.deepEqual(
+ result,
+ [
+ { text: "Suggestion 1", type: "chat" },
+ { text: "Suggestion 2", type: "chat" },
+ ],
+ "Suggestions should match"
+ );
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Tests for generateFollowupPrompts when no insights are returned
+ */
+add_task(async function test_generateFollowupPrompts_no_insights_returned() {
+ Services.prefs.setStringPref(PREF_API_KEY, API_KEY);
+ Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT);
+ Services.prefs.setStringPref(PREF_MODEL, MODEL);
+
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the openAIEngine and insights response
+ const fakeEngine = {
+ run: sb.stub().resolves({
+ finalOutput: `1. Suggestion 1\n\n- Suggestion 2.\nSuggestion 3.\nSuggestion 4\nSuggestion 5\nSuggestion 6`,
+ }),
+ };
+ sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine);
+
+ const fakeInsights = [];
+ const insightsStub = sb
+ .stub(InsightsGetterForSuggestionPrompts, "getInsightSummariesForPrompt")
+ .resolves(fakeInsights);
+
+ const n = 2;
+ const conversationHistory = [
+ { role: MESSAGE_ROLE.USER, content: "Hello" },
+ { role: MESSAGE_ROLE.ASSISTANT, content: "Hi there!" },
+ ];
+ const currentTab = {
+ title: "Current Tab",
+ url: "https://current.example.com",
+ };
+
+ // Using insights
+ const result = await generateFollowupPrompts(
+ conversationHistory,
+ currentTab,
+ n,
+ true
+ );
+ Assert.ok(fakeEngine.run.calledOnce, "Engine run should be called once");
+ Assert.ok(
+ insightsStub.calledOnce,
+ "getInsightSummariesForPrompt should be called once"
+ );
+
+ const callArgs = fakeEngine.run.firstCall.args[0];
+ Assert.equal(
+ callArgs.messages.length,
+ 2,
+ "run should be called with 2 messages"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ '{"title":"Current Tab","url":"https://current.example.com"}'
+ ),
+ "Prompt should include current tab info"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ '[{"role":"user","content":"Hello"},{"role":"assistant","content":"Hi there!"}]'
+ ),
+ "Prompt should include conversation history"
+ );
+ Assert.ok(
+ !callArgs.messages[1].content.includes("\nUser Insights:\n"),
+ "Prompt shouldn't include user insights block"
+ );
+
+ Assert.deepEqual(
+ result,
+ [
+ { text: "Suggestion 1", type: "chat" },
+ { text: "Suggestion 2", type: "chat" },
+ ],
+ "Suggestions should match"
+ );
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Tests for generateFollowupPrompts without a current tab
+ */
+add_task(async function test_generateFollowupPrompts_no_current_tab() {
+ Services.prefs.setStringPref(PREF_API_KEY, API_KEY);
+ Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT);
+ Services.prefs.setStringPref(PREF_MODEL, MODEL);
+
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the openAIEngine and insights response
+ const fakeEngine = {
+ run: sb.stub().resolves({
+ finalOutput: `1. Suggestion 1\n\n- Suggestion 2.\nSuggestion 3.\nSuggestion 4\nSuggestion 5\nSuggestion 6`,
+ }),
+ };
+ sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine);
+
+ const fakeInsights = [];
+ const insightsStub = sb
+ .stub(InsightsGetterForSuggestionPrompts, "getInsightSummariesForPrompt")
+ .resolves(fakeInsights);
+
+ const n = 2;
+ const conversationHistory = [
+ { role: MESSAGE_ROLE.USER, content: "Hello" },
+ { role: MESSAGE_ROLE.ASSISTANT, content: "Hi there!" },
+ ];
+ const currentTab = {};
+
+ const result = await generateFollowupPrompts(
+ conversationHistory,
+ currentTab,
+ n,
+ false
+ );
+ Assert.ok(fakeEngine.run.calledOnce, "Engine run should be called once");
+ Assert.ok(
+ !insightsStub.calledOnce,
+ "getInsightSummariesForPrompt shouldn't be called"
+ );
+
+ const callArgs = fakeEngine.run.firstCall.args[0];
+ Assert.equal(
+ callArgs.messages.length,
+ 2,
+ "run should be called with 2 messages"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes("\nNo tab\n"),
+ "Prompt shouldn't include any tab info"
+ );
+ Assert.ok(
+ callArgs.messages[1].content.includes(
+ '[{"role":"user","content":"Hello"},{"role":"assistant","content":"Hi there!"}]'
+ ),
+ "Prompt should include conversation history"
+ );
+ Assert.ok(
+ !callArgs.messages[1].content.includes("\nUser Insights:\n"),
+ "Prompt shouldn't include user insights block"
+ );
+
+ Assert.deepEqual(
+ result,
+ [
+ { text: "Suggestion 1", type: "chat" },
+ { text: "Suggestion 2", type: "chat" },
+ ],
+ "Suggestions should match"
+ );
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Tests that generateFollowupPrompts handles engine errors gracefully
+ */
+add_task(async function test_generateFollowupPrompts_engine_error() {
+ Services.prefs.setStringPref(PREF_API_KEY, API_KEY);
+ Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT);
+ Services.prefs.setStringPref(PREF_MODEL, MODEL);
+
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the openAIEngine and insights response
+ const fakeEngine = {
+ run: sb.stub().rejects(new Error("Engine failure")),
+ };
+ sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine);
+
+ const fakeInsights = [];
+ sb.stub(
+ InsightsGetterForSuggestionPrompts,
+ "getInsightSummariesForPrompt"
+ ).resolves(fakeInsights);
+
+ const n = 2;
+ const conversationHistory = [
+ { role: MESSAGE_ROLE.USER, content: "Hello" },
+ { role: MESSAGE_ROLE.ASSISTANT, content: "Hi there!" },
+ ];
+ const currentTab = {};
+
+ const result = await generateFollowupPrompts(
+ conversationHistory,
+ currentTab,
+ n,
+ false
+ );
+ Assert.deepEqual(result, [], "Should return empty array on engine error");
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Tests for getInsightSummariesForPrompt happy path
+ */
+add_task(async function test_getInsightSummariesForPrompt_happy_path() {
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the InsightStore to return fixed insights
+ const fakeInsights = [
+ {
+ insight_summary: "Insight summary 1",
+ },
+ {
+ insight_summary: "Insight summary 2",
+ },
+ {
+ insight_summary: "Insight summary 3",
+ },
+ ];
+
+ sb.stub(InsightsManager, "getAllInsights").resolves(fakeInsights);
+
+ const maxInsights = 2;
+ const summaries =
+ await InsightsGetterForSuggestionPrompts.getInsightSummariesForPrompt(
+ maxInsights
+ );
+
+ Assert.deepEqual(
+ summaries,
+ ["Insight summary 1", "Insight summary 2"],
+ "Insight summaries should match expected values"
+ );
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Tests for getInsightSummariesForPrompt when no insights are returned
+ */
+add_task(async function test_getInsightSummariesForPrompt_no_insights() {
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the InsightStore to return fixed insights
+ const fakeInsights = [];
+
+ sb.stub(InsightsManager, "getAllInsights").resolves(fakeInsights);
+
+ const maxInsights = 2;
+ const summaries =
+ await InsightsGetterForSuggestionPrompts.getInsightSummariesForPrompt(
+ maxInsights
+ );
+
+ Assert.equal(
+ summaries.length,
+ 0,
+ `getInsightSummariesForPrompt(${maxInsights}) should return 0 summaries`
+ );
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Tests for getInsightSummariesForPrompt with fewer insights than maxInsights
+ */
+add_task(async function test_getInsightSummariesForPrompt_too_few_insights() {
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the InsightStore to return fixed insights
+ const fakeInsights = [
+ {
+ insight_summary: "Insight summary 1",
+ },
+ ];
+
+ sb.stub(InsightsManager, "getAllInsights").resolves(fakeInsights);
+
+ const maxInsights = 2;
+ const summaries =
+ await InsightsGetterForSuggestionPrompts.getInsightSummariesForPrompt(
+ maxInsights
+ );
+
+ Assert.deepEqual(
+ summaries,
+ ["Insight summary 1"],
+ "Insight summaries should match expected values"
+ );
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Tests for getInsightSummariesForPrompt handling duplicate summaries
+ */
+add_task(async function test_getInsightSummariesForPrompt_duplicates() {
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the InsightStore to return fixed insights
+ const fakeInsights = [
+ {
+ insight_summary: "Duplicate summary",
+ },
+ {
+ insight_summary: "duplicate summary",
+ },
+ {
+ insight_summary: "Unique summary",
+ },
+ ];
+
+ sb.stub(InsightsManager, "getAllInsights").resolves(fakeInsights);
+
+ const maxInsights = 2;
+ const summaries =
+ await InsightsGetterForSuggestionPrompts.getInsightSummariesForPrompt(
+ maxInsights
+ );
+
+ Assert.deepEqual(
+ summaries,
+ ["Duplicate summary", "Unique summary"],
+ "Insight summaries should match expected values"
+ );
+ } finally {
+ sb.restore();
+ }
+});
+
+/**
+ * Tests for getInsightSummariesForPrompt handling empty and whitespace-only summaries
+ */
+add_task(
+ async function test_getInsightSummariesForPrompt_empty_and_whitespace() {
+ const sb = sinon.createSandbox();
+ try {
+ // Mock the InsightStore to return fixed insights
+ const fakeInsights = [
+ {
+ insight_summary: " \n",
+ },
+ {
+ insight_summary: "",
+ },
+ {
+ insight_summary: "Valid summary",
+ },
+ ];
+
+ sb.stub(InsightsManager, "getAllInsights").resolves(fakeInsights);
+
+ const maxInsights = 2;
+ const summaries =
+ await InsightsGetterForSuggestionPrompts.getInsightSummariesForPrompt(
+ maxInsights
+ );
+
+ Assert.deepEqual(
+ summaries,
+ ["Valid summary"],
+ "Insight summaries should match expected values"
+ );
+ } finally {
+ sb.restore();
+ }
+ }
+);
diff --git a/browser/components/aiwindow/models/tests/xpcshell/xpcshell.toml b/browser/components/aiwindow/models/tests/xpcshell/xpcshell.toml
@@ -10,6 +10,8 @@ support-files = []
["test_ChatUtils.js"]
+["test_ConversationSuggestions.js"]
+
["test_Insights.js"]
["test_InsightsChatSource.js"]