tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit 8c19fa8bd6e924436ee5f8126b12b50f324348ba
parent 9c30754d54423cd68a32a9a4ece6510d9cb13845
Author: Tom Zhang <tzhang@mozilla.com>
Date:   Fri,  9 Jan 2026 01:26:56 +0000

Bug 2005766 - Load model/prompt/inference configs from remote settings with local default as fallback (except for memories) r=ai-models-reviewers,bjohns,mshillabeer,ai-frontend-reviewers

Also adapt title-generation and conv starter/followup scripts to this change.
Also fixed import error in conversation suggestions.
Also created defaults for memories to avoid code break.

Differential Revision: https://phabricator.services.mozilla.com/D277387

Diffstat:
Mbrowser/components/aiwindow/models/Chat.sys.mjs | 11+++++++++--
Mbrowser/components/aiwindow/models/ConversationSuggestions.sys.mjs | 71++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------
Mbrowser/components/aiwindow/models/TitleGeneration.sys.mjs | 16+++++++++++-----
Mbrowser/components/aiwindow/models/Utils.sys.mjs | 440+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
Mbrowser/components/aiwindow/models/memories/Memories.sys.mjs | 2+-
Mbrowser/components/aiwindow/models/memories/MemoriesManager.sys.mjs | 9++++++++-
Mbrowser/components/aiwindow/models/prompts/AssistantPrompts.sys.mjs | 3+--
Mbrowser/components/aiwindow/models/prompts/ConversationSuggestionsPrompts.sys.mjs | 13++++++++-----
Mbrowser/components/aiwindow/models/prompts/TitleGenerationPrompts.sys.mjs | 3+++
Abrowser/components/aiwindow/models/tests/browser/ai-window-prompts-remote-settings-snapshot.json | 86+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Mbrowser/components/aiwindow/models/tests/browser/browser.toml | 2++
Abrowser/components/aiwindow/models/tests/browser/browser_utils_loadConfig.js | 131+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Abrowser/components/aiwindow/models/tests/xpcshell/ai-window-prompts-remote-settings-snapshot.json | 86+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Mbrowser/components/aiwindow/models/tests/xpcshell/test_Chat.js | 89++++++++++++++++++++++++++++++++++++++++++++++---------------------------------
Mbrowser/components/aiwindow/models/tests/xpcshell/test_ConversationSuggestions.js | 128+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
Mbrowser/components/aiwindow/models/tests/xpcshell/test_Memories.js | 115++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------
Mbrowser/components/aiwindow/models/tests/xpcshell/test_Utils.js | 13+++++++------
Abrowser/components/aiwindow/models/tests/xpcshell/test_Utils_RemoteSettings.js | 374+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Mbrowser/components/aiwindow/models/tests/xpcshell/xpcshell.toml | 6+++++-
Mbrowser/components/aiwindow/ui/modules/ChatConversation.sys.mjs | 2++
20 files changed, 1491 insertions(+), 109 deletions(-)

diff --git a/browser/components/aiwindow/models/Chat.sys.mjs b/browser/components/aiwindow/models/Chat.sys.mjs @@ -5,7 +5,10 @@ */ import { ToolRoleOpts } from "moz-src:///browser/components/aiwindow/ui/modules/ChatMessage.sys.mjs"; -import { openAIEngine } from "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs"; +import { + MODEL_FEATURES, + openAIEngine, +} from "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs"; import { toolsConfig, getOpenTabs, @@ -39,11 +42,14 @@ export const Chat = { // @todo Bug 2007046 // Update this with correct model id + // Move engineInstance initialization up to access engineInstance.model const modelId = "qwen3-235b-a22b-instruct-2507-maas"; const toolRoleOpts = new ToolRoleOpts(modelId); const currentTurn = conversation.currentTurnIndex(); - const engineInstance = await openAIEngine.build(); + const engineInstance = await openAIEngine.build(MODEL_FEATURES.CHAT); + const config = engineInstance.getConfig(engineInstance.feature); + const inferenceParams = config?.parameters || {}; // Helper to run the model once (streaming) on current convo const streamModelResponse = () => @@ -53,6 +59,7 @@ export const Chat = { tool_choice: "auto", tools: toolsConfig, args: conversation.getMessagesInOpenAiFormat(), + ...inferenceParams, }); // Keep calling until the model finishes without requesting tools diff --git a/browser/components/aiwindow/models/ConversationSuggestions.sys.mjs b/browser/components/aiwindow/models/ConversationSuggestions.sys.mjs @@ -9,14 +9,9 @@ import { openAIEngine, renderPrompt, + MODEL_FEATURES, } from "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs"; -import { - conversationStarterPrompt, - conversationFollowupPrompt, - conversationMemoriesPrompt, -} from "moz-src:///browser/components/aiwindow/models/prompts/ConversationSuggestionsPrompts.sys.mjs"; - import { MESSAGE_ROLE } from "moz-src:///browser/components/aiwindow/ui/modules/ChatStore.sys.mjs"; import { MemoriesManager } from "moz-src:///browser/components/aiwindow/models/memories/MemoriesManager.sys.mjs"; @@ -52,9 +47,10 @@ export function trimConversation(messages, maxMessages = 15) { * Helper to add memories to base prompt if applicable * * @param {string} base - base prompt + * @param {string} conversationMemoriesPrompt - the memories prompt template * @returns {Promise<string>} - prompt with memories added if applicable */ -export async function addMemoriesToPrompt(base) { +export async function addMemoriesToPrompt(base, conversationMemoriesPrompt) { let memorySummaries = await MemoriesGetterForSuggestionPrompts.getMemorySummariesForPrompt( MAX_NUM_MEMORIES @@ -196,19 +192,39 @@ export async function generateConversationStartersSidebar( openedTabs = "No tabs available"; } + // Build engine and load prompt + const engineInstance = await openAIEngine.build( + MODEL_FEATURES.CONVERSATION_SUGGESTIONS_SIDEBAR_STARTER + ); + + const conversationStarterPrompt = await engineInstance.loadPrompt( + MODEL_FEATURES.CONVERSATION_SUGGESTIONS_SIDEBAR_STARTER + ); + + const assistantLimitations = await engineInstance.loadPrompt( + MODEL_FEATURES.CONVERSATION_SUGGESTIONS_ASSISTANT_LIMITATIONS + ); + // Base template const base = await renderPrompt(conversationStarterPrompt, { current_tab: currentTab, open_tabs: openedTabs, n: String(n), date: today, + assistant_limitations: assistantLimitations, }); - let filled = useMemories - ? await addMemoriesToPrompt(base, useMemories) - : base; + let filled = base; + if (useMemories) { + const conversationMemoriesPrompt = await engineInstance.loadPrompt( + MODEL_FEATURES.CONVERSATION_SUGGESTIONS_MEMORIES + ); + filled = await addMemoriesToPrompt(base, conversationMemoriesPrompt); + } - const engineInstance = await openAIEngine.build("starter"); + // Get config for inference parameters + const config = engineInstance.getConfig(engineInstance.feature); + const inferenceParams = config?.parameters || {}; const result = await engineInstance.run({ messages: [ @@ -218,6 +234,7 @@ export async function generateConversationStartersSidebar( }, { role: "user", content: filled }, ], + ...inferenceParams, }); const prompts = cleanInferenceOutput(result); @@ -254,18 +271,41 @@ export async function generateFollowupPrompts( currentTab && Object.keys(currentTab).length ? formatJson({ title: currentTab.title, url: currentTab.url }) : "No tab"; + + // Build engine and load prompt + const engineInstance = await openAIEngine.build( + MODEL_FEATURES.CONVERSATION_SUGGESTIONS_FOLLOWUP + ); + + const conversationFollowupPrompt = await engineInstance.loadPrompt( + MODEL_FEATURES.CONVERSATION_SUGGESTIONS_FOLLOWUP + ); + + const assistantLimitationsFollowup = await engineInstance.loadPrompt( + MODEL_FEATURES.CONVERSATION_SUGGESTIONS_ASSISTANT_LIMITATIONS + ); + const base = await renderPrompt(conversationFollowupPrompt, { current_tab: currentTabStr, conversation: formatJson(convo), n: String(n), date: today, + assistant_limitations: assistantLimitationsFollowup, }); - let filled = useMemories - ? await addMemoriesToPrompt(base, useMemories) - : base; + let filled = base; + if (useMemories) { + const conversationMemoriesPrompt = await engineInstance.loadPrompt( + MODEL_FEATURES.CONVERSATION_SUGGESTIONS_MEMORIES + ); + filled = await addMemoriesToPrompt(base, conversationMemoriesPrompt); + } - const engineInstance = await openAIEngine.build("followup"); + // Get config for inference parameters + const config = engineInstance.getConfig( + MODEL_FEATURES.CONVERSATION_SUGGESTIONS_FOLLOWUP + ); + const inferenceParams = config?.parameters || {}; const result = await engineInstance.run({ messages: [ @@ -275,6 +315,7 @@ export async function generateFollowupPrompts( }, { role: "user", content: filled }, ], + ...inferenceParams, }); const prompts = cleanInferenceOutput(result); diff --git a/browser/components/aiwindow/models/TitleGeneration.sys.mjs b/browser/components/aiwindow/models/TitleGeneration.sys.mjs @@ -7,8 +7,8 @@ import { openAIEngine, renderPrompt, + MODEL_FEATURES, } from "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs"; -import { titleGenerationPrompt } from "moz-src:///browser/components/aiwindow/models/prompts/TitleGenerationPrompts.sys.mjs"; /** * Generate a default title from the first four words of a message. @@ -43,13 +43,14 @@ function generateDefaultTitle(message) { */ export async function generateChatTitle(message, current_tab) { try { - // Build the OpenAI engines - const engine = await openAIEngine.build(); + // Build the OpenAI engine + const engine = await openAIEngine.build(MODEL_FEATURES.TITLE_GENERATION); const tabInfo = current_tab || { url: "", title: "", description: "" }; - // Render the prompt with actual values - const systemPrompt = await renderPrompt(titleGenerationPrompt, { + // Load and render the prompt with actual values + const rawPrompt = await engine.loadPrompt(MODEL_FEATURES.TITLE_GENERATION); + const systemPrompt = await renderPrompt(rawPrompt, { current_tab: JSON.stringify(tabInfo), }); @@ -59,10 +60,15 @@ export async function generateChatTitle(message, current_tab) { { role: "user", content: message }, ]; + // Get config for inference parameters if exists + const config = engine.getConfig(engine.feature); + const inferenceParams = config?.parameters || {}; + // Call the LLM const response = await engine.run({ messages, fxAccountToken: await openAIEngine.getFxAccountToken(), + ...inferenceParams, }); // Extract the generated title from the response diff --git a/browser/components/aiwindow/models/Utils.sys.mjs b/browser/components/aiwindow/models/Utils.sys.mjs @@ -16,6 +16,180 @@ import { OAUTH_CLIENT_ID, SCOPE_PROFILE, } from "resource://gre/modules/FxAccountsCommon.sys.mjs"; +import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs"; + +const lazy = XPCOMUtils.declareLazy({ + RemoteSettings: "resource://services-settings/remote-settings.sys.mjs", +}); + +const MODEL_PREF = "browser.aiwindow.model"; + +/** + * Default engine ID used for all AI Window features + */ +export const DEFAULT_ENGINE_ID = "smart-openai"; + +/** + * Service types for different AI Window features + */ +export const SERVICE_TYPES = Object.freeze({ + AI: "ai", + MEMORIES: "memories", +}); + +/** + * Observer for model preference changes. + * Invalidates the Remote Settings client cache when user changes their model preference. + */ +const modelPrefObserver = { + observe(_subject, topic, data) { + if (topic === "nsPref:changed" && data === MODEL_PREF) { + console.warn( + "Model preference changed, invalidating Remote Settings cache" + ); + openAIEngine._remoteClient = null; + } + }, +}; +Services.prefs.addObserver(MODEL_PREF, modelPrefObserver); + +/** + * Feature identifiers for AI Window model, configurations and prompts. + * These are used to look up model configs, prompts, and inference parameters + * from Remote Settings or local defaults. + */ +export const MODEL_FEATURES = Object.freeze({ + CHAT: "chat", + TITLE_GENERATION: "title-generation", + CONVERSATION_SUGGESTIONS_SIDEBAR_STARTER: + "conversation-suggestions-sidebar-starter", + CONVERSATION_SUGGESTIONS_FOLLOWUP: "conversation-suggestions-followup", + CONVERSATION_SUGGESTIONS_ASSISTANT_LIMITATIONS: + "conversation-suggestions-assistant-limitations", + CONVERSATION_SUGGESTIONS_MEMORIES: "conversation-suggestions-memories", + // TODO: update with actual memories prompts identifiers + MEMORIES: "memories", +}); + +/** + * Default model IDs for each feature. + * These are Mozilla's recommended models, used when user hasn't configured + * custom settings or when remote setting retrieval fails. + */ +export const DEFAULT_MODEL = Object.freeze({ + [MODEL_FEATURES.CHAT]: "qwen3-235b-a22b-instruct-2507-maas", + [MODEL_FEATURES.TITLE_GENERATION]: "qwen3-235b-a22b-instruct-2507-maas", + [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_SIDEBAR_STARTER]: + "qwen3-235b-a22b-instruct-2507-maas", + [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_FOLLOWUP]: + "qwen3-235b-a22b-instruct-2507-maas", + [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_ASSISTANT_LIMITATIONS]: + "qwen3-235b-a22b-instruct-2507-maas", + [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_INSIGHTS]: + "qwen3-235b-a22b-instruct-2507-maas", + // TODO: update with actual memories default model + [MODEL_FEATURES.MEMORIES]: "qwen3-235b-a22b-instruct-2507-maas", +}); + +/** + * Major version compatibility requirements for each feature. + * When incrementing a feature's major version: + * - Update this constant + * - Ensure Remote Settings has configs for the new major version + * - Old clients will continue using old major version + */ +export const FEATURE_MAJOR_VERSIONS = Object.freeze({ + [MODEL_FEATURES.CHAT]: 1, + [MODEL_FEATURES.TITLE_GENERATION]: 1, + [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_SIDEBAR_STARTER]: 1, + [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_FOLLOWUP]: 1, + [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_ASSISTANT_LIMITATIONS]: 1, + [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_INSIGHTS]: 1, + // TODO: add major version for memories prompts +}); + +/** + * Remote Settings configuration record structure + * + * @typedef {object} RemoteSettingsConfig + * @property {string} feature - Feature identifier + * @property {string} model - Model identifier for LLM inference + * @property {string} prompts - Prompt template content + * @property {string} version - Version string in "v{major}.{minor}" format + * @property {boolean} [is_default] - Whether this is the default config for the feature + * @property {object} [parameters] - Optional inference parameters (e.g., temperature) + * @property {string[]} [additional_components] - Optional list of dependent feature configs + */ + +/** + * Parses a version string in the format "v{major}.{minor}". + * + * @param {string} versionString - Version string to parse (e.g., "v1.2") + * @returns {object|null} Parsed version with major and minor numbers, or null if invalid + */ +function parseVersion(versionString) { + const match = /^v(\d+)\.(\d+)$/.exec(versionString || ""); + if (!match) { + return null; + } + return { + major: Number(match[1]), + minor: Number(match[2]), + original: versionString, + }; +} + +/** + * Selects the main configuration for a feature based on version and model preferences. + * + * Remote Settings maintains only the latest minor version for each (feature, model, major_version) combination. + * + * Selection logic: + * 1. Filter to configs matching the required major version + * 2. If user has model preference, find that model's config + * 3. Otherwise, find the default config (is_default: true) + * + * @param {Array} featureConfigs - All configs for the feature from Remote Settings + * @param {object} options - Selection options + * @param {number} options.majorVersion - Required major version for the feature + * @param {string} options.userModel - User's preferred model (empty string if none) + * @returns {object|null} Selected config or null if no match + */ +function selectMainConfig(featureConfigs, { majorVersion, userModel }) { + // Filter to configs matching the required major version + const sameMajor = featureConfigs.filter(config => { + const parsed = parseVersion(config.version); + return parsed && parsed.major === majorVersion; + }); + + if (sameMajor.length === 0) { + return null; + } + + // If user specified a model preference, find that model's config + if (userModel) { + const userModelConfig = sameMajor.find( + config => config.model === userModel + ); + if (userModelConfig) { + return userModelConfig; + } + // User's model not found in this major version - fall through to defaults + console.warn( + `User model "${userModel}" not found for major version ${majorVersion}, using default` + ); + } + + // No user model pref OR user's model not found: use default + const defaultConfig = sameMajor.find(config => config.is_default === true); + if (defaultConfig) { + return defaultConfig; + } + + // No default found - this shouldn't happen with proper Remote Settings data + console.warn(`No default config found for major version ${majorVersion}`); + return null; +} /** * openAIEngine class @@ -29,18 +203,263 @@ export class openAIEngine { static _createEngine = createEngine; /** - * Returns an OpenAIEngine instance with the specified engine and service types + * The Remote Settings collection name for AI window prompt configurations + */ + static RS_AI_WINDOW_COLLECTION = "ai-window-prompts"; + + /** + * Cached Remote Settings client + * Cache is invalidated when user changes MODEL_PREF pref via modelPrefObserver * - * @param {string} engineId The identifier for the engine instance - * @param {string} serviceType The type of message to be sent ("ai", "memories", "s2s") - * @returns {Promise<openAIEngine>} The OpenAIEngine instance + * @type {RemoteSettingsClient | null} + */ + static _remoteClient = null; + + /** + * Configuration map: { featureName: configObject } + * + * @type {object | null} + */ + #configs = null; + + /** + * Main feature name + * + * @type {string | null} */ - static async build(engineId = "smart-openai", serviceType = "ai") { + feature = null; + + /** + * Resolved model name for LLM inference + * + * @type {string | null} + */ + model = null; + + /** + * Gets the Remote Settings client for AI window configurations. + * + * @returns {RemoteSettingsClient} + */ + static getRemoteClient() { + if (openAIEngine._remoteClient) { + return openAIEngine._remoteClient; + } + + const client = lazy.RemoteSettings(openAIEngine.RS_AI_WINDOW_COLLECTION, { + bucketName: "main", + }); + + openAIEngine._remoteClient = client; + return client; + } + + /** + * Applies default configuration fallback when Remote Settings selection fails + * + * @param {string} feature - The feature identifier + * @private + */ + _applyDefaultConfig(feature) { + this.feature = feature; + this.model = DEFAULT_MODEL[feature]; + this.#configs = {}; + } + + /** + * Loads configuration from Remote Settings with version-aware selection. + * + * Selection logic: + * 1. Filters configs by feature and major version compatibility + * 2. If user has model preference, finds latest minor for that model + * 3. Otherwise, finds latest minor among default configs + * 4. Falls back to latest minor overall if no defaults + * 5. Falls back to local defaults if no matching major version + * + * @param {string} feature - The feature identifier from MODEL_FEATURES + * @returns {Promise<void>} + * Sets this.feature to the feature name + * Sets this.model to the selected model ID + * Sets this.#configs to contain feature's and additional_components' configs + */ + async loadConfig(feature) { + const client = openAIEngine.getRemoteClient(); + const allRecords = await client.get(); + + // Filter to configs for this feature + const featureConfigs = allRecords.filter( + record => record.feature === feature + ); + + // Fallback to default if no remote settings records for given feature + if (!featureConfigs.length) { + console.warn( + `No Remote Settings records found for feature: ${feature}, using default` + ); + this._applyDefaultConfig(feature); + return; + } + + const majorVersion = FEATURE_MAJOR_VERSIONS[feature]; + const userModel = Services.prefs.getStringPref(MODEL_PREF, ""); + + // Find matching config with version and provided userModel pref + const mainConfig = selectMainConfig(featureConfigs, { + majorVersion, + userModel, + }); + + if (!mainConfig) { + console.warn( + `No matching model config found for feature: ${feature} with major version ${majorVersion}, using default` + ); + this._applyDefaultConfig(feature); + return; + } + + // Store the selected configuration + this.feature = feature; + this.model = mainConfig.model; + + // Build configsMap for looking up additional_components + const configsMap = new Map(allRecords.map(r => [r.feature, r])); + + // Build configs map: { featureName: configObject } + this.#configs = {}; + this.#configs[feature] = mainConfig; + + // Add additional_components if exists + // This field lists what other remote settings configs are needed + // as dependency to the current feature. + if (mainConfig.additional_components) { + for (const componentFeature of mainConfig.additional_components) { + const componentConfig = configsMap.get(componentFeature); + if (componentConfig) { + this.#configs[componentFeature] = componentConfig; + } else { + console.warn( + `Additional component "${componentFeature}" not found in Remote Settings` + ); + } + } + } + } + + /** + * Gets the configuration for a specific feature. + * + * @param {string} [feature] - The feature identifier. Defaults to the main feature. + * @returns {object|null} The feature's configuration object + */ + getConfig(feature) { + const targetFeature = feature || this.feature; + return this.#configs?.[targetFeature] || null; + } + + /** + * Loads a prompt for the specified feature. + * Tries Remote Settings first, then falls back to local prompts. + * + * @param {string} feature - The feature identifier + * @returns {Promise<string>} The prompt content + */ + async loadPrompt(feature) { + // Try loading from Remote Settings first + const config = this.getConfig(feature); + if (config?.prompts) { + return config.prompts; + } + + console.warn( + `No Remote Settings prompt for ${feature}, falling back to local` + ); + + // Fall back to local prompts + try { + return await this.#loadLocalPrompt(feature); + } catch (error) { + throw new Error(`Failed to load prompt for ${feature}: ${error.message}`); + } + } + + /** + * Loads a prompt from local prompt files. + * + * @param {string} feature - The feature identifier + * @returns {Promise<string>} The prompt content from local files + */ + async #loadLocalPrompt(feature) { + switch (feature) { + case MODEL_FEATURES.CHAT: { + const { assistantPrompt } = await import( + "moz-src:///browser/components/aiwindow/models/prompts/AssistantPrompts.sys.mjs" + ); + return assistantPrompt; + } + case MODEL_FEATURES.TITLE_GENERATION: { + const { titleGenerationPrompt } = await import( + "moz-src:///browser/components/aiwindow/models/prompts/TitleGenerationPrompts.sys.mjs" + ); + return titleGenerationPrompt; + } + case MODEL_FEATURES.CONVERSATION_SUGGESTIONS_SIDEBAR_STARTER: { + const { conversationStarterPrompt } = await import( + "moz-src:///browser/components/aiwindow/models/prompts/ConversationSuggestionsPrompts.sys.mjs" + ); + return conversationStarterPrompt; + } + case MODEL_FEATURES.CONVERSATION_SUGGESTIONS_FOLLOWUP: { + const { conversationFollowupPrompt } = await import( + "moz-src:///browser/components/aiwindow/models/prompts/ConversationSuggestionsPrompts.sys.mjs" + ); + return conversationFollowupPrompt; + } + case MODEL_FEATURES.CONVERSATION_SUGGESTIONS_ASSISTANT_LIMITATIONS: { + const { assistantLimitations } = await import( + "moz-src:///browser/components/aiwindow/models/prompts/ConversationSuggestionsPrompts.sys.mjs" + ); + return assistantLimitations; + } + case MODEL_FEATURES.CONVERSATION_SUGGESTIONS_MEMORIES: { + const { conversationMemoriesPrompt } = await import( + "moz-src:///browser/components/aiwindow/models/prompts/ConversationSuggestionsPrompts.sys.mjs" + ); + return conversationMemoriesPrompt; + } + // TODO: add local memories prompts imports for each feature + default: + throw new Error(`No local prompt found for feature: ${feature}`); + } + } + + /** + * Builds an openAIEngine instance with configuration loaded from Remote Settings. + * + * @param {string} feature + * The feature name to use to retrieve remote settings for prompts. + * @param {string} engineId + * The engine ID for MLEngine creation. Defaults to DEFAULT_ENGINE_ID. + * @param {string} serviceType + * The type of message to be sent ("ai", "memories", "s2s"). + * Defaults to SERVICE_TYPES.AI. + * @returns {Promise<object>} + * Promise that will resolve to the configured engine instance. + */ + static async build( + feature, + engineId = DEFAULT_ENGINE_ID, + serviceType = SERVICE_TYPES.AI + ) { const engine = new openAIEngine(); + + await engine.loadConfig(feature); + engine.engineInstance = await openAIEngine.#createOpenAIEngine( engineId, - serviceType + serviceType, + engine.model ); + return engine; } @@ -68,9 +487,10 @@ export class openAIEngine { * * @param {string} engineId The identifier for the engine instance * @param {string} serviceType The type of message to be sent ("ai", "memories", "s2s") + * @param {string | null} modelId The resolved model ID (already contains fallback logic) * @returns {Promise<object>} The configured engine instance */ - static async #createOpenAIEngine(engineId, serviceType) { + static async #createOpenAIEngine(engineId, serviceType, modelId = null) { const extraHeadersPref = Services.prefs.getStringPref( "browser.aiwindow.extraHeaders", "{}" @@ -85,11 +505,11 @@ export class openAIEngine { try { const engineInstance = await openAIEngine._createEngine({ - apiKey: Services.prefs.getStringPref("browser.aiwindow.apiKey"), + apiKey: Services.prefs.getStringPref("browser.aiwindow.apiKey", ""), backend: "openai", - baseURL: Services.prefs.getStringPref("browser.aiwindow.endpoint"), + baseURL: Services.prefs.getStringPref("browser.aiwindow.endpoint", ""), engineId, - modelId: Services.prefs.getStringPref("browser.aiwindow.model"), + modelId, modelRevision: "main", taskName: "text-generation", serviceType, diff --git a/browser/components/aiwindow/models/memories/Memories.sys.mjs b/browser/components/aiwindow/models/memories/Memories.sys.mjs @@ -17,7 +17,7 @@ * 3. `existingMemoriesList`: an array of existing memory summary strings to deduplicate against * * Example Usage: - * const engine = await openAIEngine.build(); + * const engine = await openAIEngine.build(MODEL_FEATURES.MEMORIES, DEFAULT_ENGINE_ID, SERVICE_TYPES.MEMORIES); * const sources = {history: [domainItems, titleItems, searchItems]}; * const existingMemoriesList = [...]; // Array of existing memory summary strings; this should be fetched from memory storage * const newMemories = await generateMemories(engine, sources, existingMemoriesList); diff --git a/browser/components/aiwindow/models/memories/MemoriesManager.sys.mjs b/browser/components/aiwindow/models/memories/MemoriesManager.sys.mjs @@ -11,8 +11,11 @@ import { } from "moz-src:///browser/components/aiwindow/models/memories/MemoriesHistorySource.sys.mjs"; import { getRecentChats } from "./MemoriesChatSource.sys.mjs"; import { + DEFAULT_ENGINE_ID, + MODEL_FEATURES, openAIEngine, renderPrompt, + SERVICE_TYPES, } from "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs"; import { MemoryStore } from "moz-src:///browser/components/aiwindow/services/MemoryStore.sys.mjs"; import { @@ -64,7 +67,11 @@ export class MemoriesManager { */ static async ensureOpenAIEngine() { if (!this.#openAIEnginePromise) { - this.#openAIEnginePromise = await openAIEngine.build(); + this.#openAIEnginePromise = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); } return this.#openAIEnginePromise; } diff --git a/browser/components/aiwindow/models/prompts/AssistantPrompts.sys.mjs b/browser/components/aiwindow/models/prompts/AssistantPrompts.sys.mjs @@ -5,9 +5,8 @@ */ export const assistantPromptMetadata = { - version: "0.1", + version: "v1.0", }; - export const assistantPrompt = `You are a very knowledgeable personal browser assistant, designed to assist the user in navigating the web. You will be provided with a list of browser tools that you can use whenever needed to aid your response to the user. Your internal knowledge cutoff date is: July, 2024. diff --git a/browser/components/aiwindow/models/prompts/ConversationSuggestionsPrompts.sys.mjs b/browser/components/aiwindow/models/prompts/ConversationSuggestionsPrompts.sys.mjs @@ -4,7 +4,10 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -const assistantLimitations = `The following tools are available to the browser assistant: +export const assistantLimitationstMetadata = { + version: "v1.0", +}; +export const assistantLimitations = `The following tools are available to the browser assistant: - get_open_tabs(): Access the user's browser and return a list of the most recently browsed data - get_page_content(url): Retrieve cleaned text content of the provided browser page URL - search_browsing_history(search_term, start_ts, end_ts): Retrieve pages from the user's past browsing history, optionally filtered by topic and/or time range @@ -27,7 +30,7 @@ Browser Assistant Capabilities & Limitations: 3. The assistant will decline to answer when it identifies agentic or unsafe requests.`; export const conversationStarterPromptMetadata = { - version: "0.1", + version: "v1.0", }; export const conversationStarterPrompt = `You are an expert in suggesting conversation starters for a browser assistant. @@ -44,7 +47,7 @@ Open Tabs: {open_tabs} ======== -${assistantLimitations} +{assistant_limitations} ======== Task: @@ -70,7 +73,7 @@ Rules: Return ONLY the suggestions, one per line, no numbering, no extra formatting. Sort from most to least relevant.`; export const conversationFollowupPromptMetadata = { - version: "0.1", + version: "v1.0", }; export const conversationFollowupPrompt = `You are an expert suggesting next responses or queries for a user during a conversation with an AI browser assistant. @@ -87,7 +90,7 @@ Conversation History (latest last): {conversation} ======== -${assistantLimitations} +{assistant_limitations} ======== Generate {n} suggested next responses or queries that the user might want to message next. diff --git a/browser/components/aiwindow/models/prompts/TitleGenerationPrompts.sys.mjs b/browser/components/aiwindow/models/prompts/TitleGenerationPrompts.sys.mjs @@ -2,6 +2,9 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ +export const titleGenerationPromptMetadata = { + version: "v1.0", +}; export const titleGenerationPrompt = `Generate a concise chat title using only the current user message and the current context. Rules: diff --git a/browser/components/aiwindow/models/tests/browser/ai-window-prompts-remote-settings-snapshot.json b/browser/components/aiwindow/models/tests/browser/ai-window-prompts-remote-settings-snapshot.json @@ -0,0 +1,86 @@ +[ + { + "model": "", + "feature": "conversation-suggestions-assistant-limitations", + "prompts": "You can do this and cannot do that.", + "version": "v1.0", + "is_default": true, + "parameters": {}, + "additional_components": [] + }, + { + "model": "qwen3-235b-a22b-instruct-2507-maas", + "feature": "conversation-suggestions-sidebar-starter", + "prompts": "Suggest a conversation starter message. {assistant_limitations}", + "version": "v1.0", + "is_default": true, + "parameters": {}, + "additional_components": [ + "conversation-suggestions-assistant-limitations", + "conversation-suggestions-memories" + ] + }, + { + "model": "qwen3-235b-a22b-instruct-2507-maas", + "feature": "chat", + "prompts": "You are a helpful browser assistant.", + "version": "v1.0", + "is_default": true, + "parameters": { + "temperature": 1.0 + }, + "additional_components": [] + }, + { + "model": "gpt-oss-120b", + "feature": "chat", + "prompts": "You are a helpful browser assistant.", + "version": "v1.0", + "is_default": false, + "parameters": { + "temperature": 1.0 + }, + "additional_components": [] + }, + { + "model": "gemini-2.5-flash-lite", + "feature": "chat", + "prompts": "You are a helpful browser assistant.", + "version": "v1.0", + "is_default": false, + "parameters": { + "temperature": 1.0 + }, + "additional_components": [] + }, + { + "model": "", + "feature": "conversation-suggestions-memories", + "prompts": "Use the following memories {memories}.", + "version": "v1.0", + "is_default": true, + "parameters": {}, + "additional_components": [] + }, + { + "model": "qwen3-235b-a22b-instruct-2507-maas", + "feature": "conversation-suggestions-followup", + "prompts": "Suggest next conversation message. {assistant_limitations}", + "version": "v1.0", + "is_default": true, + "parameters": {}, + "additional_components": [ + "conversation-suggestions-assistant-limitations", + "conversation-suggestions-memories" + ] + }, + { + "model": "qwen3-235b-a22b-instruct-2507-maas", + "feature": "title-generation", + "prompts": "Generate a chat title basd on current conversation.", + "version": "v1.0", + "is_default": true, + "parameters": {}, + "additional_components": [] + } +] diff --git a/browser/components/aiwindow/models/tests/browser/browser.toml b/browser/components/aiwindow/models/tests/browser/browser.toml @@ -1,6 +1,7 @@ [DEFAULT] support-files = [ "head.js", + "ai-window-prompts-remote-settings-snapshot.json", ] prefs = [ "browser.aiwindow.enabled=true", @@ -9,3 +10,4 @@ prefs = [ ["browser_getCurrentTabMetadata.js"] ["browser_get_page_content.js"] window_attributes = "ai-window" +["browser_utils_loadConfig.js"] diff --git a/browser/components/aiwindow/models/tests/browser/browser_utils_loadConfig.js b/browser/components/aiwindow/models/tests/browser/browser_utils_loadConfig.js @@ -0,0 +1,131 @@ +/* Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ */ + +"use strict"; + +const { openAIEngine, MODEL_FEATURES } = ChromeUtils.importESModule( + "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs" +); + +const { RemoteSettings } = ChromeUtils.importESModule( + "resource://services-settings/remote-settings.sys.mjs" +); + +async function loadRemoteSettingsSnapshot() { + const chromeUrl = getRootDirectory(gTestPath); + const snapshotUrl = `${chromeUrl}ai-window-prompts-remote-settings-snapshot.json`; + + const response = await fetch(snapshotUrl); + if (!response.ok) { + throw new Error(`Failed to load snapshot: ${response.statusText}`); + } + return response.json(); +} + +add_setup(async function () { + const snapshotData = await loadRemoteSettingsSnapshot(); + + // Populate Remote Settings with snapshot data + const client = RemoteSettings("ai-window-prompts"); + await client.db.clear(); + + for (const record of snapshotData) { + await client.db.create({ + id: `${record.feature}-${record.model || "default"}-${record.version}`, + ...record, + }); + } + + await client.db.importChanges({}, Date.now()); + + registerCleanupFunction(async () => { + await client.db.clear(); + }); +}); + +add_task(async function test_loadConfig_chat_feature() { + const engine = new openAIEngine(); + await engine.loadConfig(MODEL_FEATURES.CHAT); + const config = engine.getConfig(engine.feature); + + info("Loaded config for 'chat' feature:"); + info(` Model: ${engine.model}`); + info(` Feature: ${engine.feature}`); + if (config) { + info(` Config version: ${config.version}`); + info(` Config model: ${config.model}`); + info(` Has prompts: ${!!config.prompts}`); + info(` Prompts: ${config.prompts}`); + } + + Assert.equal(engine.feature, "chat", "Feature should be set to 'chat'"); + Assert.equal( + engine.model, + "qwen3-235b-a22b-instruct-2507-maas", + "Model should be loaded from remote settings" + ); + Assert.ok(config, "Config should not be null or undefined"); + Assert.notEqual( + JSON.stringify(config), + "{}", + "Config should not be an empty object" + ); + Assert.equal(config.version, "v1.0", "Version should be v1.0"); + Assert.equal( + config.prompts, + "You are a helpful browser assistant.", + "Prompts should be loaded from remote settings" + ); + Assert.equal( + config.parameters.temperature, + 1.0, + "Temperature parameter should be loaded" + ); +}); + +add_task(async function test_loadConfig_with_additional_components() { + const engine = new openAIEngine(); + await engine.loadConfig( + MODEL_FEATURES.CONVERSATION_SUGGESTIONS_SIDEBAR_STARTER + ); + + const mainConfig = engine.getConfig( + MODEL_FEATURES.CONVERSATION_SUGGESTIONS_SIDEBAR_STARTER + ); + + info("Testing additional_components loading:"); + info(` Main feature: ${engine.feature}`); + info(` Model: ${engine.model}`); + if (mainConfig) { + info( + ` Additional components: ${mainConfig.additional_components.join(", ")}` + ); + } + + Assert.ok(mainConfig, "Main config should be loaded"); + Assert.ok( + Array.isArray(mainConfig.additional_components), + "additional_components should be an array" + ); + Assert.equal( + mainConfig.additional_components.length, + 2, + "Should have 2 additional components" + ); + + const limitationsConfig = engine.getConfig( + "conversation-suggestions-assistant-limitations" + ); + Assert.ok( + limitationsConfig, + "Assistant limitations component should be loaded" + ); + Assert.ok( + limitationsConfig.prompts, + "Assistant limitations should have prompts" + ); + + const memoriesConfig = engine.getConfig("conversation-suggestions-memories"); + Assert.ok(memoriesConfig, "Memories component should be loaded"); + Assert.ok(memoriesConfig.prompts, "Memories should have prompts"); +}); diff --git a/browser/components/aiwindow/models/tests/xpcshell/ai-window-prompts-remote-settings-snapshot.json b/browser/components/aiwindow/models/tests/xpcshell/ai-window-prompts-remote-settings-snapshot.json @@ -0,0 +1,86 @@ +[ + { + "model": "", + "feature": "conversation-suggestions-assistant-limitations", + "prompts": "You can do this and cannot do that.", + "version": "v1.0", + "is_default": true, + "parameters": {}, + "additional_components": [] + }, + { + "model": "qwen3-235b-a22b-instruct-2507-maas", + "feature": "conversation-suggestions-sidebar-starter", + "prompts": "Suggest a conversation starter message. {assistant_limitations}", + "version": "v1.0", + "is_default": true, + "parameters": {}, + "additional_components": [ + "conversation-suggestions-assistant-limitations", + "conversation-suggestions-memories" + ] + }, + { + "model": "qwen3-235b-a22b-instruct-2507-maas", + "feature": "chat", + "prompts": "You are a helpful browser assistant.", + "version": "v1.0", + "is_default": true, + "parameters": { + "temperature": 1.0 + }, + "additional_components": [] + }, + { + "model": "gpt-oss-120b", + "feature": "chat", + "prompts": "You are a helpful browser assistant.", + "version": "v1.0", + "is_default": false, + "parameters": { + "temperature": 1.0 + }, + "additional_components": [] + }, + { + "model": "gemini-2.5-flash-lite", + "feature": "chat", + "prompts": "You are a helpful browser assistant.", + "version": "v1.0", + "is_default": false, + "parameters": { + "temperature": 1.0 + }, + "additional_components": [] + }, + { + "model": "", + "feature": "conversation-suggestions-memories", + "prompts": "Use the following memories {memories}.", + "version": "v1.0", + "is_default": true, + "parameters": {}, + "additional_components": [] + }, + { + "model": "qwen3-235b-a22b-instruct-2507-maas", + "feature": "conversation-suggestions-followup", + "prompts": "Suggest next conversation message. {assistant_limitations}", + "version": "v1.0", + "is_default": true, + "parameters": {}, + "additional_components": [ + "conversation-suggestions-assistant-limitations", + "conversation-suggestions-memories" + ] + }, + { + "model": "qwen3-235b-a22b-instruct-2507-maas", + "feature": "title-generation", + "prompts": "Generate a chat title basd on current conversation.", + "version": "v1.0", + "is_default": true, + "parameters": {}, + "additional_components": [] + } +] diff --git a/browser/components/aiwindow/models/tests/xpcshell/test_Chat.js b/browser/components/aiwindow/models/tests/xpcshell/test_Chat.js @@ -12,7 +12,7 @@ const { SYSTEM_PROMPT_TYPE, MESSAGE_ROLE } = ChromeUtils.importESModule( const { Chat } = ChromeUtils.importESModule( "moz-src:///browser/components/aiwindow/models/Chat.sys.mjs" ); -const { openAIEngine } = ChromeUtils.importESModule( +const { MODEL_FEATURES, openAIEngine } = ChromeUtils.importESModule( "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs" ); @@ -52,47 +52,53 @@ add_task(async function test_Chat_real_tools_are_registered() { ); }); -add_task(async function test_openAIEngine_build_uses_prefs() { - Services.prefs.setStringPref(PREF_API_KEY, "test-key-123"); - Services.prefs.setStringPref(PREF_ENDPOINT, "https://example.test/v1"); - Services.prefs.setStringPref(PREF_MODEL, "gpt-fake"); +add_task( + async function test_openAIEngine_build_with_chat_feature_and_nonexistent_model() { + Services.prefs.setStringPref(PREF_API_KEY, "test-key-123"); + Services.prefs.setStringPref(PREF_ENDPOINT, "https://example.test/v1"); + Services.prefs.setStringPref(PREF_MODEL, "nonexistent-model"); - const sb = sinon.createSandbox(); - try { - const fakeEngineInstance = { - runWithGenerator() { - throw new Error("not used"); - }, - }; - const stub = sb - .stub(openAIEngine, "_createEngine") - .resolves(fakeEngineInstance); + const sb = sinon.createSandbox(); + try { + const fakeEngineInstance = { + runWithGenerator() { + throw new Error("not used"); + }, + }; + const stub = sb + .stub(openAIEngine, "_createEngine") + .resolves(fakeEngineInstance); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build(MODEL_FEATURES.CHAT); - Assert.ok( - engine instanceof openAIEngine, - "Should return openAIEngine instance" - ); - Assert.strictEqual( - engine.engineInstance, - fakeEngineInstance, - "Should store engine instance" - ); - Assert.ok(stub.calledOnce, "_createEngine should be called once"); + Assert.ok( + engine instanceof openAIEngine, + "Should return openAIEngine instance" + ); + Assert.strictEqual( + engine.engineInstance, + fakeEngineInstance, + "Should store engine instance" + ); + Assert.ok(stub.calledOnce, "_createEngine should be called once"); - const opts = stub.firstCall.args[0]; - Assert.equal(opts.apiKey, "test-key-123", "apiKey should come from pref"); - Assert.equal( - opts.baseURL, - "https://example.test/v1", - "baseURL should come from pref" - ); - Assert.equal(opts.modelId, "gpt-fake", "modelId should come from pref"); - } finally { - sb.restore(); + const opts = stub.firstCall.args[0]; + Assert.equal(opts.apiKey, "test-key-123", "apiKey should come from pref"); + Assert.equal( + opts.baseURL, + "https://example.test/v1", + "baseURL should come from pref" + ); + Assert.equal( + opts.modelId, + "qwen3-235b-a22b-instruct-2507-maas", + "modelId should fallback to default" + ); + } finally { + sb.restore(); + } } -}); +); add_task(async function test_Chat_fetchWithHistory_streams_and_forwards_args() { const sb = sinon.createSandbox(); @@ -114,6 +120,9 @@ add_task(async function test_Chat_fetchWithHistory_streams_and_forwards_args() { } return gen(); }, + getConfig() { + return {}; + }, }; sb.stub(openAIEngine, "build").resolves(fakeEngine); @@ -189,6 +198,9 @@ add_task(async function test_Chat_fetchWithHistory_handles_tool_calls() { } return gen(); }, + getConfig() { + return {}; + }, }; // Mock tool function @@ -314,6 +326,9 @@ add_task( } return gen(); }, + getConfig() { + return {}; + }, }; Chat.toolMap.test_tool = sb.stub().resolves("should not be called"); diff --git a/browser/components/aiwindow/models/tests/xpcshell/test_ConversationSuggestions.js b/browser/components/aiwindow/models/tests/xpcshell/test_ConversationSuggestions.js @@ -23,7 +23,7 @@ const { MemoriesManager } = ChromeUtils.importESModule( "moz-src:///browser/components/aiwindow/models/memories/MemoriesManager.sys.mjs" ); const { MESSAGE_ROLE } = ChromeUtils.importESModule( - "moz-src:///browser/components/aiwindow/ui/modules/ChatStore.sys.mjs" + "moz-src:///browser/components/aiwindow/ui/modules/ChatConstants.sys.mjs" ); const { sinon } = ChromeUtils.importESModule( "resource://testing-common/Sinon.sys.mjs" @@ -42,6 +42,18 @@ const API_KEY = "test-api-key"; const ENDPOINT = "https://api.test-endpoint.com/v1"; const MODEL = "test-model"; +async function loadRemoteSettingsSnapshot() { + const file = do_get_file("ai-window-prompts-remote-settings-snapshot.json"); + const data = await IOUtils.readUTF8(file.path); + return JSON.parse(data); +} + +let REAL_REMOTE_SETTINGS_SNAPSHOT; + +add_setup(async function () { + REAL_REMOTE_SETTINGS_SNAPSHOT = await loadRemoteSettingsSnapshot(); +}); + /** * Cleans up preferences after testing */ @@ -176,7 +188,12 @@ add_task(async function test_addMemoriesToPrompt_have_memories() { const memoriesStub = sb .stub(MemoriesGetterForSuggestionPrompts, "getMemorySummariesForPrompt") .resolves(fakeMemories); - const promptWithMemories = await addMemoriesToPrompt(basePrompt); + const conversationMemoriesPrompt = "Memories block:\n{memories}"; + const promptWithMemories = await addMemoriesToPrompt( + basePrompt, + conversationMemoriesPrompt + ); + Assert.ok( memoriesStub.calledOnce, "getMemorySummariesForPrompt should be called" @@ -202,7 +219,12 @@ add_task(async function test_addMemoriesToPrompt_dont_have_memories() { const memoriesStub = sb .stub(MemoriesGetterForSuggestionPrompts, "getMemorySummariesForPrompt") .resolves(fakeMemories); - const promptWithMemories = await addMemoriesToPrompt(basePrompt); + const conversationMemoriesPrompt = "Memories block:\n{memories}"; + const promptWithMemories = await addMemoriesToPrompt( + basePrompt, + conversationMemoriesPrompt + ); + Assert.ok( memoriesStub.calledOnce, "getMemorySummariesForPrompt should be called" @@ -870,6 +892,55 @@ add_task( ); /** + * Tests that assistant limitations are included in conversation starter prompts + */ +add_task( + async function test_generateConversationStartersSidebar_includes_assistant_limitations() { + Services.prefs.setStringPref(PREF_API_KEY, API_KEY); + Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT); + Services.prefs.setStringPref(PREF_MODEL, MODEL); + + const sb = sinon.createSandbox(); + try { + const fakeEngine = { + run: sb.stub().resolves({ + finalOutput: `Suggestion 1\nSuggestion 2\nSuggestion 3`, + }), + }; + sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); + + sb.stub(openAIEngine, "getRemoteClient").returns({ + get: sb.stub().resolves(REAL_REMOTE_SETTINGS_SNAPSHOT), + }); + + sb.stub( + MemoriesGetterForSuggestionPrompts, + "getMemorySummariesForPrompt" + ).resolves([]); + + const n = 3; + const contextTabs = [ + { title: "Test Tab", url: "https://test.example.com" }, + ]; + + await generateConversationStartersSidebar(contextTabs, n, false); + + Assert.ok(fakeEngine.run.calledOnce, "Engine run should be called once"); + + const callArgs = fakeEngine.run.firstCall.args[0]; + Assert.ok( + callArgs.messages[1].content.includes( + "You can do this and cannot do that." + ), + "Prompt should include assistant limitations from remote settings" + ); + } finally { + sb.restore(); + } + } +); + +/** * Tests for generateFollowupPrompts successfully generating suggestions */ add_task(async function test_generateFollowupPrompts_happy_path() { @@ -1236,6 +1307,57 @@ add_task(async function test_generateFollowupPrompts_engine_error() { }); /** + * Tests that assistant limitations are included in followup prompts + */ +add_task( + async function test_generateFollowupPrompts_includes_assistant_limitations() { + Services.prefs.setStringPref(PREF_API_KEY, API_KEY); + Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT); + Services.prefs.setStringPref(PREF_MODEL, MODEL); + + const sb = sinon.createSandbox(); + try { + const fakeEngine = { + run: sb.stub().resolves({ + finalOutput: `Suggestion 1\nSuggestion 2`, + }), + }; + sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); + + sb.stub(openAIEngine, "getRemoteClient").returns({ + get: sb.stub().resolves(REAL_REMOTE_SETTINGS_SNAPSHOT), + }); + + sb.stub( + MemoriesGetterForSuggestionPrompts, + "getMemorySummariesForPrompt" + ).resolves([]); + + const n = 2; + const conversationHistory = [ + { role: MESSAGE_ROLE.USER, content: "Hello" }, + { role: MESSAGE_ROLE.ASSISTANT, content: "Hi there!" }, + ]; + const currentTab = { title: "Test", url: "https://test.example.com" }; + + await generateFollowupPrompts(conversationHistory, currentTab, n, false); + + Assert.ok(fakeEngine.run.calledOnce, "Engine run should be called once"); + + const callArgs = fakeEngine.run.firstCall.args[0]; + Assert.ok( + callArgs.messages[1].content.includes( + "You can do this and cannot do that." + ), + "Prompt should include assistant limitations from remote settings" + ); + } finally { + sb.restore(); + } + } +); + +/** * Tests for getMemorySummariesForPrompt happy path */ add_task(async function test_getMemorySummariesForPrompt_happy_path() { diff --git a/browser/components/aiwindow/models/tests/xpcshell/test_Memories.js b/browser/components/aiwindow/models/tests/xpcshell/test_Memories.js @@ -18,9 +18,10 @@ const { const { getRecentChats } = ChromeUtils.importESModule( "moz-src:///browser/components/aiwindow/models/memories/MemoriesChatSource.sys.mjs" ); -const { openAIEngine } = ChromeUtils.importESModule( - "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs" -); +const { DEFAULT_ENGINE_ID, MODEL_FEATURES, openAIEngine, SERVICE_TYPES } = + ChromeUtils.importESModule( + "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs" + ); const { sinon } = ChromeUtils.importESModule( "resource://testing-common/Sinon.sys.mjs" ); @@ -454,7 +455,11 @@ add_task(async function test_generateInitialMemoriesList_happy_path() { // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").returns(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const [domainItems, titleItems, searchItems] = @@ -532,7 +537,11 @@ add_task( // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").returns(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const [domainItems, titleItems, searchItems] = @@ -566,7 +575,11 @@ add_task( // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").returns(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const [domainItems, titleItems, searchItems] = @@ -600,7 +613,11 @@ add_task( // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").returns(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const [domainItems, titleItems, searchItems] = @@ -664,7 +681,11 @@ add_task( // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").returns(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const [domainItems, titleItems, searchItems] = @@ -732,7 +753,11 @@ add_task(async function test_deduplicateMemoriesList_happy_path() { // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const dedupedMemoriesList = await deduplicateMemories( @@ -791,7 +816,11 @@ add_task(async function test_deduplicateMemoriesList_sad_path_empty_output() { // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const dedupedMemoriesList = await deduplicateMemories( @@ -825,7 +854,11 @@ add_task( // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const dedupedMemoriesList = await deduplicateMemories( @@ -866,7 +899,11 @@ add_task( // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const dedupedMemoriesList = await deduplicateMemories( @@ -907,7 +944,11 @@ add_task( // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const dedupedMemoriesList = await deduplicateMemories( @@ -969,7 +1010,11 @@ add_task( // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const dedupedMemoriesList = await deduplicateMemories( @@ -1023,7 +1068,11 @@ add_task( // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const dedupedMemoriesList = await deduplicateMemories( @@ -1075,7 +1124,11 @@ add_task(async function test_filterSensitiveMemories_happy_path() { // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const nonSensitiveMemoriesList = await filterSensitiveMemories( @@ -1125,7 +1178,11 @@ add_task(async function test_filterSensitiveMemories_sad_path_empty_output() { // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const nonSensitiveMemoriesList = await filterSensitiveMemories( @@ -1165,7 +1222,11 @@ add_task( // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const nonSensitiveMemoriesList = await filterSensitiveMemories( @@ -1208,7 +1269,11 @@ add_task( // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const nonSensitiveMemoriesList = await filterSensitiveMemories( @@ -1253,7 +1318,11 @@ add_task( // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const nonSensitiveMemoriesList = await filterSensitiveMemories( @@ -1300,7 +1369,11 @@ add_task( // Check that the stub was called const stub = sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build( + MODEL_FEATURES.MEMORIES, + DEFAULT_ENGINE_ID, + SERVICE_TYPES.MEMORIES + ); Assert.ok(stub.calledOnce, "_createEngine should be called once"); const nonSensitiveMemoriesList = await filterSensitiveMemories( diff --git a/browser/components/aiwindow/models/tests/xpcshell/test_Utils.js b/browser/components/aiwindow/models/tests/xpcshell/test_Utils.js @@ -2,9 +2,10 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ -const { openAIEngine, renderPrompt } = ChromeUtils.importESModule( - "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs" -); +const { MODEL_FEATURES, openAIEngine, renderPrompt } = + ChromeUtils.importESModule( + "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs" + ); const { sinon } = ChromeUtils.importESModule( "resource://testing-common/Sinon.sys.mjs" @@ -37,7 +38,7 @@ registerCleanupFunction(() => { /** * Tests the creation of an OpenAI engine instance */ -add_task(async function test_createOpenAIEngine() { +add_task(async function test_createOpenAIEngine_with_chat_feature() { Services.prefs.setStringPref(PREF_API_KEY, API_KEY); Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT); Services.prefs.setStringPref(PREF_MODEL, MODEL); @@ -53,7 +54,7 @@ add_task(async function test_createOpenAIEngine() { }; const stub = sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); - const engine = await openAIEngine.build(); + const engine = await openAIEngine.build(MODEL_FEATURES.CHAT); Assert.strictEqual( engine.engineInstance, fakeEngine, @@ -71,7 +72,7 @@ add_task(async function test_createOpenAIEngine() { "smart-openai", "engineId should be smart-openai" ); - Assert.equal(opts.modelId, MODEL, "modelId should come from pref"); + Assert.ok(opts.modelId, "modelId should be set"); Assert.equal(opts.modelRevision, "main", "modelRevision should be main"); Assert.equal( opts.taskName, diff --git a/browser/components/aiwindow/models/tests/xpcshell/test_Utils_RemoteSettings.js b/browser/components/aiwindow/models/tests/xpcshell/test_Utils_RemoteSettings.js @@ -0,0 +1,374 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +const { openAIEngine, MODEL_FEATURES } = ChromeUtils.importESModule( + "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs" +); + +const { sinon } = ChromeUtils.importESModule( + "resource://testing-common/Sinon.sys.mjs" +); + +const PREF_API_KEY = "browser.aiwindow.apiKey"; +const PREF_ENDPOINT = "browser.aiwindow.endpoint"; +const PREF_MODEL = "browser.aiwindow.model"; + +const API_KEY = "fake-key"; +const ENDPOINT = "https://api.fake-endpoint.com/v1"; + +async function loadRemoteSettingsSnapshot() { + const file = do_get_file("ai-window-prompts-remote-settings-snapshot.json"); + const data = await IOUtils.readUTF8(file.path); + return JSON.parse(data); +} + +let REAL_REMOTE_SETTINGS_SNAPSHOT; + +add_setup(async function () { + REAL_REMOTE_SETTINGS_SNAPSHOT = await loadRemoteSettingsSnapshot(); +}); + +registerCleanupFunction(() => { + for (let pref of [PREF_API_KEY, PREF_ENDPOINT, PREF_MODEL]) { + if (Services.prefs.prefHasUserValue(pref)) { + Services.prefs.clearUserPref(pref); + } + } +}); + +add_task(async function test_loadConfig_basic_with_real_snapshot() { + Services.prefs.setStringPref(PREF_API_KEY, API_KEY); + Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT); + + const sb = sinon.createSandbox(); + try { + const fakeEngine = { + runWithGenerator() { + throw new Error("not used"); + }, + }; + sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); + + sb.stub(openAIEngine, "getRemoteClient").returns({ + get: sb.stub().resolves(REAL_REMOTE_SETTINGS_SNAPSHOT), + }); + + const engine = new openAIEngine(); + + await engine.loadConfig(MODEL_FEATURES.CHAT); + + Assert.equal( + engine.feature, + MODEL_FEATURES.CHAT, + "Feature should be set correctly" + ); + Assert.ok(engine.model, "Model should be loaded from remote settings"); + + const config = engine.getConfig(MODEL_FEATURES.CHAT); + Assert.ok(config, "Config should be loaded"); + Assert.ok(config.prompts, "Prompts should be loaded from remote settings"); + Assert.ok( + config.prompts.includes("browser assistant"), + "Prompts should contain expected content" + ); + } finally { + sb.restore(); + } +}); + +add_task(async function test_loadConfig_with_user_pref_model() { + Services.prefs.setStringPref(PREF_API_KEY, API_KEY); + Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT); + Services.prefs.setStringPref(PREF_MODEL, "gpt-oss-120b"); + + const sb = sinon.createSandbox(); + try { + const fakeEngine = { + runWithGenerator() { + throw new Error("not used"); + }, + }; + sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); + + sb.stub(openAIEngine, "getRemoteClient").returns({ + get: sb.stub().resolves(REAL_REMOTE_SETTINGS_SNAPSHOT), + }); + + const engine = new openAIEngine(); + + await engine.loadConfig(MODEL_FEATURES.CHAT); + + Assert.equal( + engine.model, + "gpt-oss-120b", + "User pref model should filter to matching configs" + ); + const config = engine.getConfig(MODEL_FEATURES.CHAT); + Assert.equal( + config.model, + "gpt-oss-120b", + "Selected config should be for user's preferred model" + ); + } finally { + sb.restore(); + Services.prefs.clearUserPref(PREF_MODEL); + } +}); + +add_task(async function test_loadConfig_no_records() { + Services.prefs.setStringPref(PREF_API_KEY, API_KEY); + Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT); + + const sb = sinon.createSandbox(); + try { + const fakeEngine = { + runWithGenerator() { + throw new Error("not used"); + }, + }; + sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); + + sb.stub(openAIEngine, "getRemoteClient").returns({ + get: sb.stub().resolves([]), + }); + + const engine = new openAIEngine(); + + await engine.loadConfig(MODEL_FEATURES.CHAT); + + Assert.equal( + engine.model, + "qwen3-235b-a22b-instruct-2507-maas", + "Should fall back to default model when remote settings returns no records" + ); + Assert.equal( + engine.feature, + MODEL_FEATURES.CHAT, + "Should set feature when remote settings returns no records" + ); + } finally { + sb.restore(); + } +}); + +add_task(async function test_loadConfig_filters_by_major_version() { + Services.prefs.setStringPref(PREF_API_KEY, API_KEY); + Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT); + + const sb = sinon.createSandbox(); + try { + const fakeEngine = { + runWithGenerator() { + throw new Error("not used"); + }, + }; + sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); + + // Add a v2.0 record to test data + const recordsWithV2 = [ + ...REAL_REMOTE_SETTINGS_SNAPSHOT, + { + model: "future-model", + feature: "chat", + prompts: "Future version prompt", + version: "v2.0", + is_default: true, + }, + ]; + + sb.stub(openAIEngine, "getRemoteClient").returns({ + get: sb.stub().resolves(recordsWithV2), + }); + + const engine = new openAIEngine(); + await engine.loadConfig(MODEL_FEATURES.CHAT); + + const config = engine.getConfig(MODEL_FEATURES.CHAT); + // Should get v1.x, not v2.0 + Assert.ok(config.version.startsWith("v1."), "Should select v1.x, not v2.0"); + } finally { + sb.restore(); + } +}); + +add_task(async function test_loadConfig_fallback_when_user_model_not_found() { + Services.prefs.setStringPref(PREF_API_KEY, API_KEY); + Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT); + Services.prefs.setStringPref(PREF_MODEL, "nonexistent-model"); + + const sb = sinon.createSandbox(); + try { + const fakeEngine = { + runWithGenerator() { + throw new Error("not used"); + }, + }; + sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); + + sb.stub(openAIEngine, "getRemoteClient").returns({ + get: sb.stub().resolves(REAL_REMOTE_SETTINGS_SNAPSHOT), + }); + + const engine = new openAIEngine(); + await engine.loadConfig(MODEL_FEATURES.CHAT); + + // Should fall back to default model + Assert.notEqual( + engine.model, + "nonexistent-model", + "Should not use invalid user model" + ); + const config = engine.getConfig(MODEL_FEATURES.CHAT); + Assert.equal(config.is_default, true, "Should fall back to default config"); + Assert.equal( + config.model, + engine.model, + "Engine model should match the default config's model" + ); + Assert.equal(config.version, "v1.0", "Should use v1.0"); + } finally { + sb.restore(); + Services.prefs.clearUserPref(PREF_MODEL); + } +}); + +add_task(async function test_loadPrompt_from_remote_settings() { + Services.prefs.setStringPref(PREF_API_KEY, API_KEY); + Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT); + + const sb = sinon.createSandbox(); + try { + const fakeEngine = { + runWithGenerator() { + throw new Error("not used"); + }, + }; + sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); + + sb.stub(openAIEngine, "getRemoteClient").returns({ + get: sb.stub().resolves(REAL_REMOTE_SETTINGS_SNAPSHOT), + }); + + const engine = new openAIEngine(); + await engine.loadConfig(MODEL_FEATURES.TITLE_GENERATION); + + const prompt = await engine.loadPrompt(MODEL_FEATURES.TITLE_GENERATION); + + Assert.ok(prompt, "Prompt should be loaded from remote settings"); + Assert.ok( + prompt.includes("title") || prompt.includes("conversation"), + "Prompt should contain expected content for title generation" + ); + } finally { + sb.restore(); + } +}); + +add_task(async function test_loadPrompt_fallback_to_local() { + Services.prefs.setStringPref(PREF_API_KEY, API_KEY); + Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT); + + const sb = sinon.createSandbox(); + try { + const fakeEngine = { + runWithGenerator() { + throw new Error("not used"); + }, + }; + sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); + + sb.stub(openAIEngine, "getRemoteClient").returns({ + get: sb.stub().resolves([]), + }); + + const engine = new openAIEngine(); + await engine.loadConfig(MODEL_FEATURES.TITLE_GENERATION); + + const prompt = await engine.loadPrompt(MODEL_FEATURES.TITLE_GENERATION); + + Assert.ok(prompt, "Prompt should fallback to local prompt"); + Assert.ok( + prompt.includes("Generate a concise chat title"), + "Should load local prompt when remote settings has no config" + ); + } finally { + sb.restore(); + } +}); + +add_task(async function test_build_with_feature() { + Services.prefs.setStringPref(PREF_API_KEY, API_KEY); + Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT); + + const sb = sinon.createSandbox(); + try { + const fakeEngine = { + runWithGenerator() { + throw new Error("not used"); + }, + }; + const createEngineStub = sb + .stub(openAIEngine, "_createEngine") + .resolves(fakeEngine); + + sb.stub(openAIEngine, "getRemoteClient").returns({ + get: sb.stub().resolves(REAL_REMOTE_SETTINGS_SNAPSHOT), + }); + + const engine = await openAIEngine.build(MODEL_FEATURES.CHAT); + + Assert.ok(engine.engineInstance, "Engine instance should be created"); + Assert.equal(engine.feature, MODEL_FEATURES.CHAT, "Feature should be set"); + Assert.ok(engine.model, "Model should be loaded from remote settings"); + + const opts = createEngineStub.firstCall.args[0]; + Assert.ok(opts.modelId, "Model should be passed to engine creation"); + Assert.equal( + opts.modelId, + engine.model, + "Model passed to engine should match loaded model" + ); + } finally { + sb.restore(); + } +}); + +add_task(async function test_inference_params_from_config() { + Services.prefs.setStringPref(PREF_API_KEY, API_KEY); + Services.prefs.setStringPref(PREF_ENDPOINT, ENDPOINT); + + const sb = sinon.createSandbox(); + try { + const fakeEngine = { + runWithGenerator() { + throw new Error("not used"); + }, + }; + sb.stub(openAIEngine, "_createEngine").resolves(fakeEngine); + + sb.stub(openAIEngine, "getRemoteClient").returns({ + get: sb.stub().resolves(REAL_REMOTE_SETTINGS_SNAPSHOT), + }); + + const engine = new openAIEngine(); + await engine.loadConfig(MODEL_FEATURES.CHAT); + + const config = engine.getConfig(MODEL_FEATURES.CHAT); + Assert.ok(config, "Config should be loaded"); + + const inferenceParams = config?.parameters || {}; + Assert.equal( + typeof inferenceParams, + "object", + "Inference parameters should be an object" + ); + Assert.equal( + inferenceParams.temperature, + 1.0, + "Temperature should be loaded from parameters" + ); + } finally { + sb.restore(); + } +}); diff --git a/browser/components/aiwindow/models/tests/xpcshell/xpcshell.toml b/browser/components/aiwindow/models/tests/xpcshell/xpcshell.toml @@ -4,7 +4,9 @@ run-if = [ ] head = "head.js" firefox-appdir = "browser" -support-files = [] +support-files = [ + "ai-window-prompts-remote-settings-snapshot.json", +] ["test_Chat.js"] @@ -40,4 +42,6 @@ support-files = [] ["test_Utils.js"] +["test_Utils_RemoteSettings.js"] + ["test_intent_classifier.js"] diff --git a/browser/components/aiwindow/ui/modules/ChatConversation.sys.mjs b/browser/components/aiwindow/ui/modules/ChatConversation.sys.mjs @@ -234,6 +234,8 @@ export class ChatConversation { */ async generatePrompt(prompt, pageUrl) { if (!this.#messages.length) { + // TODO: Bug 2008865 + // switch to use remote settings prompt accessed via engine.loadPrompt(feature) this.addSystemMessage(SYSTEM_PROMPT_TYPE.TEXT, assistantPrompt); }