Utils.sys.mjs (18834B)
1 /** 2 * This Source Code Form is subject to the terms of the Mozilla Public 3 * License, v. 2.0. If a copy of the MPL was not distributed with this 4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. 5 */ 6 7 /** 8 * This module defines utility functions and classes needed for invoking LLMs such as: 9 * - Creating and running OpenAI engine instances 10 * - Rendering prompts from files 11 */ 12 13 import { createEngine } from "chrome://global/content/ml/EngineProcess.sys.mjs"; 14 import { getFxAccountsSingleton } from "resource://gre/modules/FxAccounts.sys.mjs"; 15 import { 16 OAUTH_CLIENT_ID, 17 SCOPE_PROFILE, 18 } from "resource://gre/modules/FxAccountsCommon.sys.mjs"; 19 import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs"; 20 21 const lazy = XPCOMUtils.declareLazy({ 22 RemoteSettings: "resource://services-settings/remote-settings.sys.mjs", 23 }); 24 25 const MODEL_PREF = "browser.aiwindow.model"; 26 27 /** 28 * Default engine ID used for all AI Window features 29 */ 30 export const DEFAULT_ENGINE_ID = "smart-openai"; 31 32 /** 33 * Service types for different AI Window features 34 */ 35 export const SERVICE_TYPES = Object.freeze({ 36 AI: "ai", 37 MEMORIES: "memories", 38 }); 39 40 /** 41 * Observer for model preference changes. 42 * Invalidates the Remote Settings client cache when user changes their model preference. 43 */ 44 const modelPrefObserver = { 45 observe(_subject, topic, data) { 46 if (topic === "nsPref:changed" && data === MODEL_PREF) { 47 console.warn( 48 "Model preference changed, invalidating Remote Settings cache" 49 ); 50 openAIEngine._remoteClient = null; 51 } 52 }, 53 }; 54 Services.prefs.addObserver(MODEL_PREF, modelPrefObserver); 55 56 /** 57 * Feature identifiers for AI Window model, configurations and prompts. 58 * These are used to look up model configs, prompts, and inference parameters 59 * from Remote Settings or local defaults. 60 */ 61 export const MODEL_FEATURES = Object.freeze({ 62 CHAT: "chat", 63 TITLE_GENERATION: "title-generation", 64 CONVERSATION_SUGGESTIONS_SIDEBAR_STARTER: 65 "conversation-suggestions-sidebar-starter", 66 CONVERSATION_SUGGESTIONS_FOLLOWUP: "conversation-suggestions-followup", 67 CONVERSATION_SUGGESTIONS_ASSISTANT_LIMITATIONS: 68 "conversation-suggestions-assistant-limitations", 69 CONVERSATION_SUGGESTIONS_MEMORIES: "conversation-suggestions-memories", 70 // TODO: update with actual memories prompts identifiers 71 MEMORIES: "memories", 72 }); 73 74 /** 75 * Default model IDs for each feature. 76 * These are Mozilla's recommended models, used when user hasn't configured 77 * custom settings or when remote setting retrieval fails. 78 */ 79 export const DEFAULT_MODEL = Object.freeze({ 80 [MODEL_FEATURES.CHAT]: "qwen3-235b-a22b-instruct-2507-maas", 81 [MODEL_FEATURES.TITLE_GENERATION]: "qwen3-235b-a22b-instruct-2507-maas", 82 [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_SIDEBAR_STARTER]: 83 "qwen3-235b-a22b-instruct-2507-maas", 84 [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_FOLLOWUP]: 85 "qwen3-235b-a22b-instruct-2507-maas", 86 [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_ASSISTANT_LIMITATIONS]: 87 "qwen3-235b-a22b-instruct-2507-maas", 88 [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_INSIGHTS]: 89 "qwen3-235b-a22b-instruct-2507-maas", 90 // TODO: update with actual memories default model 91 [MODEL_FEATURES.MEMORIES]: "qwen3-235b-a22b-instruct-2507-maas", 92 }); 93 94 /** 95 * Major version compatibility requirements for each feature. 96 * When incrementing a feature's major version: 97 * - Update this constant 98 * - Ensure Remote Settings has configs for the new major version 99 * - Old clients will continue using old major version 100 */ 101 export const FEATURE_MAJOR_VERSIONS = Object.freeze({ 102 [MODEL_FEATURES.CHAT]: 1, 103 [MODEL_FEATURES.TITLE_GENERATION]: 1, 104 [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_SIDEBAR_STARTER]: 1, 105 [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_FOLLOWUP]: 1, 106 [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_ASSISTANT_LIMITATIONS]: 1, 107 [MODEL_FEATURES.CONVERSATION_SUGGESTIONS_INSIGHTS]: 1, 108 // TODO: add major version for memories prompts 109 }); 110 111 /** 112 * Remote Settings configuration record structure 113 * 114 * @typedef {object} RemoteSettingsConfig 115 * @property {string} feature - Feature identifier 116 * @property {string} model - Model identifier for LLM inference 117 * @property {string} prompts - Prompt template content 118 * @property {string} version - Version string in "v{major}.{minor}" format 119 * @property {boolean} [is_default] - Whether this is the default config for the feature 120 * @property {object} [parameters] - Optional inference parameters (e.g., temperature) 121 * @property {string[]} [additional_components] - Optional list of dependent feature configs 122 */ 123 124 /** 125 * Parses a version string in the format "v{major}.{minor}". 126 * 127 * @param {string} versionString - Version string to parse (e.g., "v1.2") 128 * @returns {object|null} Parsed version with major and minor numbers, or null if invalid 129 */ 130 function parseVersion(versionString) { 131 const match = /^v(\d+)\.(\d+)$/.exec(versionString || ""); 132 if (!match) { 133 return null; 134 } 135 return { 136 major: Number(match[1]), 137 minor: Number(match[2]), 138 original: versionString, 139 }; 140 } 141 142 /** 143 * Selects the main configuration for a feature based on version and model preferences. 144 * 145 * Remote Settings maintains only the latest minor version for each (feature, model, major_version) combination. 146 * 147 * Selection logic: 148 * 1. Filter to configs matching the required major version 149 * 2. If user has model preference, find that model's config 150 * 3. Otherwise, find the default config (is_default: true) 151 * 152 * @param {Array} featureConfigs - All configs for the feature from Remote Settings 153 * @param {object} options - Selection options 154 * @param {number} options.majorVersion - Required major version for the feature 155 * @param {string} options.userModel - User's preferred model (empty string if none) 156 * @returns {object|null} Selected config or null if no match 157 */ 158 function selectMainConfig(featureConfigs, { majorVersion, userModel }) { 159 // Filter to configs matching the required major version 160 const sameMajor = featureConfigs.filter(config => { 161 const parsed = parseVersion(config.version); 162 return parsed && parsed.major === majorVersion; 163 }); 164 165 if (sameMajor.length === 0) { 166 return null; 167 } 168 169 // If user specified a model preference, find that model's config 170 if (userModel) { 171 const userModelConfig = sameMajor.find( 172 config => config.model === userModel 173 ); 174 if (userModelConfig) { 175 return userModelConfig; 176 } 177 // User's model not found in this major version - fall through to defaults 178 console.warn( 179 `User model "${userModel}" not found for major version ${majorVersion}, using default` 180 ); 181 } 182 183 // No user model pref OR user's model not found: use default 184 const defaultConfig = sameMajor.find(config => config.is_default === true); 185 if (defaultConfig) { 186 return defaultConfig; 187 } 188 189 // No default found - this shouldn't happen with proper Remote Settings data 190 console.warn(`No default config found for major version ${majorVersion}`); 191 return null; 192 } 193 194 /** 195 * openAIEngine class 196 * 197 * Contains methods to create engine instances and estimate token usage. 198 */ 199 export class openAIEngine { 200 /** 201 * Exposing createEngine for testing purposes. 202 */ 203 static _createEngine = createEngine; 204 205 /** 206 * The Remote Settings collection name for AI window prompt configurations 207 */ 208 static RS_AI_WINDOW_COLLECTION = "ai-window-prompts"; 209 210 /** 211 * Cached Remote Settings client 212 * Cache is invalidated when user changes MODEL_PREF pref via modelPrefObserver 213 * 214 * @type {RemoteSettingsClient | null} 215 */ 216 static _remoteClient = null; 217 218 /** 219 * Configuration map: { featureName: configObject } 220 * 221 * @type {object | null} 222 */ 223 #configs = null; 224 225 /** 226 * Main feature name 227 * 228 * @type {string | null} 229 */ 230 feature = null; 231 232 /** 233 * Resolved model name for LLM inference 234 * 235 * @type {string | null} 236 */ 237 model = null; 238 239 /** 240 * Gets the Remote Settings client for AI window configurations. 241 * 242 * @returns {RemoteSettingsClient} 243 */ 244 static getRemoteClient() { 245 if (openAIEngine._remoteClient) { 246 return openAIEngine._remoteClient; 247 } 248 249 const client = lazy.RemoteSettings(openAIEngine.RS_AI_WINDOW_COLLECTION, { 250 bucketName: "main", 251 }); 252 253 openAIEngine._remoteClient = client; 254 return client; 255 } 256 257 /** 258 * Applies default configuration fallback when Remote Settings selection fails 259 * 260 * @param {string} feature - The feature identifier 261 * @private 262 */ 263 _applyDefaultConfig(feature) { 264 this.feature = feature; 265 this.model = DEFAULT_MODEL[feature]; 266 this.#configs = {}; 267 } 268 269 /** 270 * Loads configuration from Remote Settings with version-aware selection. 271 * 272 * Selection logic: 273 * 1. Filters configs by feature and major version compatibility 274 * 2. If user has model preference, finds latest minor for that model 275 * 3. Otherwise, finds latest minor among default configs 276 * 4. Falls back to latest minor overall if no defaults 277 * 5. Falls back to local defaults if no matching major version 278 * 279 * @param {string} feature - The feature identifier from MODEL_FEATURES 280 * @returns {Promise<void>} 281 * Sets this.feature to the feature name 282 * Sets this.model to the selected model ID 283 * Sets this.#configs to contain feature's and additional_components' configs 284 */ 285 async loadConfig(feature) { 286 const client = openAIEngine.getRemoteClient(); 287 const allRecords = await client.get(); 288 289 // Filter to configs for this feature 290 const featureConfigs = allRecords.filter( 291 record => record.feature === feature 292 ); 293 294 // Fallback to default if no remote settings records for given feature 295 if (!featureConfigs.length) { 296 console.warn( 297 `No Remote Settings records found for feature: ${feature}, using default` 298 ); 299 this._applyDefaultConfig(feature); 300 return; 301 } 302 303 const majorVersion = FEATURE_MAJOR_VERSIONS[feature]; 304 const userModel = Services.prefs.getStringPref(MODEL_PREF, ""); 305 306 // Find matching config with version and provided userModel pref 307 const mainConfig = selectMainConfig(featureConfigs, { 308 majorVersion, 309 userModel, 310 }); 311 312 if (!mainConfig) { 313 console.warn( 314 `No matching model config found for feature: ${feature} with major version ${majorVersion}, using default` 315 ); 316 this._applyDefaultConfig(feature); 317 return; 318 } 319 320 // Store the selected configuration 321 this.feature = feature; 322 this.model = mainConfig.model; 323 324 // Build configsMap for looking up additional_components 325 const configsMap = new Map(allRecords.map(r => [r.feature, r])); 326 327 // Build configs map: { featureName: configObject } 328 this.#configs = {}; 329 this.#configs[feature] = mainConfig; 330 331 // Add additional_components if exists 332 // This field lists what other remote settings configs are needed 333 // as dependency to the current feature. 334 if (mainConfig.additional_components) { 335 for (const componentFeature of mainConfig.additional_components) { 336 const componentConfig = configsMap.get(componentFeature); 337 if (componentConfig) { 338 this.#configs[componentFeature] = componentConfig; 339 } else { 340 console.warn( 341 `Additional component "${componentFeature}" not found in Remote Settings` 342 ); 343 } 344 } 345 } 346 } 347 348 /** 349 * Gets the configuration for a specific feature. 350 * 351 * @param {string} [feature] - The feature identifier. Defaults to the main feature. 352 * @returns {object|null} The feature's configuration object 353 */ 354 getConfig(feature) { 355 const targetFeature = feature || this.feature; 356 return this.#configs?.[targetFeature] || null; 357 } 358 359 /** 360 * Loads a prompt for the specified feature. 361 * Tries Remote Settings first, then falls back to local prompts. 362 * 363 * @param {string} feature - The feature identifier 364 * @returns {Promise<string>} The prompt content 365 */ 366 async loadPrompt(feature) { 367 // Try loading from Remote Settings first 368 const config = this.getConfig(feature); 369 if (config?.prompts) { 370 return config.prompts; 371 } 372 373 console.warn( 374 `No Remote Settings prompt for ${feature}, falling back to local` 375 ); 376 377 // Fall back to local prompts 378 try { 379 return await this.#loadLocalPrompt(feature); 380 } catch (error) { 381 throw new Error(`Failed to load prompt for ${feature}: ${error.message}`); 382 } 383 } 384 385 /** 386 * Loads a prompt from local prompt files. 387 * 388 * @param {string} feature - The feature identifier 389 * @returns {Promise<string>} The prompt content from local files 390 */ 391 async #loadLocalPrompt(feature) { 392 switch (feature) { 393 case MODEL_FEATURES.CHAT: { 394 const { assistantPrompt } = await import( 395 "moz-src:///browser/components/aiwindow/models/prompts/AssistantPrompts.sys.mjs" 396 ); 397 return assistantPrompt; 398 } 399 case MODEL_FEATURES.TITLE_GENERATION: { 400 const { titleGenerationPrompt } = await import( 401 "moz-src:///browser/components/aiwindow/models/prompts/TitleGenerationPrompts.sys.mjs" 402 ); 403 return titleGenerationPrompt; 404 } 405 case MODEL_FEATURES.CONVERSATION_SUGGESTIONS_SIDEBAR_STARTER: { 406 const { conversationStarterPrompt } = await import( 407 "moz-src:///browser/components/aiwindow/models/prompts/ConversationSuggestionsPrompts.sys.mjs" 408 ); 409 return conversationStarterPrompt; 410 } 411 case MODEL_FEATURES.CONVERSATION_SUGGESTIONS_FOLLOWUP: { 412 const { conversationFollowupPrompt } = await import( 413 "moz-src:///browser/components/aiwindow/models/prompts/ConversationSuggestionsPrompts.sys.mjs" 414 ); 415 return conversationFollowupPrompt; 416 } 417 case MODEL_FEATURES.CONVERSATION_SUGGESTIONS_ASSISTANT_LIMITATIONS: { 418 const { assistantLimitations } = await import( 419 "moz-src:///browser/components/aiwindow/models/prompts/ConversationSuggestionsPrompts.sys.mjs" 420 ); 421 return assistantLimitations; 422 } 423 case MODEL_FEATURES.CONVERSATION_SUGGESTIONS_MEMORIES: { 424 const { conversationMemoriesPrompt } = await import( 425 "moz-src:///browser/components/aiwindow/models/prompts/ConversationSuggestionsPrompts.sys.mjs" 426 ); 427 return conversationMemoriesPrompt; 428 } 429 // TODO: add local memories prompts imports for each feature 430 default: 431 throw new Error(`No local prompt found for feature: ${feature}`); 432 } 433 } 434 435 /** 436 * Builds an openAIEngine instance with configuration loaded from Remote Settings. 437 * 438 * @param {string} feature 439 * The feature name to use to retrieve remote settings for prompts. 440 * @param {string} engineId 441 * The engine ID for MLEngine creation. Defaults to DEFAULT_ENGINE_ID. 442 * @param {string} serviceType 443 * The type of message to be sent ("ai", "memories", "s2s"). 444 * Defaults to SERVICE_TYPES.AI. 445 * @returns {Promise<object>} 446 * Promise that will resolve to the configured engine instance. 447 */ 448 static async build( 449 feature, 450 engineId = DEFAULT_ENGINE_ID, 451 serviceType = SERVICE_TYPES.AI 452 ) { 453 const engine = new openAIEngine(); 454 455 await engine.loadConfig(feature); 456 457 engine.engineInstance = await openAIEngine.#createOpenAIEngine( 458 engineId, 459 serviceType, 460 engine.model 461 ); 462 463 return engine; 464 } 465 466 /** 467 * Retrieves the Firefox account token 468 * 469 * @returns {Promise<string|null>} The Firefox account token (string) or null 470 */ 471 static async getFxAccountToken() { 472 try { 473 const fxAccounts = getFxAccountsSingleton(); 474 return await fxAccounts.getOAuthToken({ 475 // Scope needs to be updated in accordance with https://bugzilla.mozilla.org/show_bug.cgi?id=2005290 476 scope: SCOPE_PROFILE, 477 client_id: OAUTH_CLIENT_ID, 478 }); 479 } catch (error) { 480 console.warn("Error obtaining FxA token:", error); 481 return null; 482 } 483 } 484 485 /** 486 * Creates an OpenAI engine instance 487 * 488 * @param {string} engineId The identifier for the engine instance 489 * @param {string} serviceType The type of message to be sent ("ai", "memories", "s2s") 490 * @param {string | null} modelId The resolved model ID (already contains fallback logic) 491 * @returns {Promise<object>} The configured engine instance 492 */ 493 static async #createOpenAIEngine(engineId, serviceType, modelId = null) { 494 const extraHeadersPref = Services.prefs.getStringPref( 495 "browser.aiwindow.extraHeaders", 496 "{}" 497 ); 498 let extraHeaders = {}; 499 try { 500 extraHeaders = JSON.parse(extraHeadersPref); 501 } catch (e) { 502 console.error("Failed to parse extra headers from prefs:", e); 503 Services.prefs.clearUserPref("browser.aiwindow.extraHeaders"); 504 } 505 506 try { 507 const engineInstance = await openAIEngine._createEngine({ 508 apiKey: Services.prefs.getStringPref("browser.aiwindow.apiKey", ""), 509 backend: "openai", 510 baseURL: Services.prefs.getStringPref("browser.aiwindow.endpoint", ""), 511 engineId, 512 modelId, 513 modelRevision: "main", 514 taskName: "text-generation", 515 serviceType, 516 extraHeaders, 517 }); 518 return engineInstance; 519 } catch (error) { 520 console.error("Failed to create OpenAI engine:", error); 521 throw error; 522 } 523 } 524 525 /** 526 * Wrapper around engine.run to send message to the LLM 527 * Will eventually use `usage` from the LiteLLM API response for token telemetry 528 * 529 * @param {Map<string, any>} content OpenAI formatted messages to be sent to the LLM 530 * @returns {object} LLM response 531 */ 532 async run(content) { 533 return await this.engineInstance.run(content); 534 } 535 536 /** 537 * Wrapper around engine.runWithGenerator to send message to the LLM 538 * Will eventually use `usage` from the LiteLLM API response for token telemetry 539 * 540 * @param {Map<string, any>} options OpenAI formatted messages with streaming and tooling options to be sent to the LLM 541 * @returns {object} LLM response 542 */ 543 runWithGenerator(options) { 544 return this.engineInstance.runWithGenerator(options); 545 } 546 } 547 548 /** 549 * Renders a prompt from a string, replacing placeholders with provided strings. 550 * 551 * @param {string} rawPromptContent The raw prompt as a string 552 * @param {Map<string, string>} stringsToReplace A map of placeholder strings to their replacements 553 * @returns {Promise<string>} The rendered prompt 554 */ 555 export async function renderPrompt(rawPromptContent, stringsToReplace = {}) { 556 let finalPromptContent = rawPromptContent; 557 558 for (const [orig, repl] of Object.entries(stringsToReplace)) { 559 const regex = new RegExp(`{${orig}}`, "g"); 560 finalPromptContent = finalPromptContent.replace(regex, repl); 561 } 562 563 return finalPromptContent; 564 }