tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

ConversationSuggestions.sys.mjs (11279B)


      1 /**
      2 * This Source Code Form is subject to the terms of the Mozilla Public
      3 * License, v. 2.0. If a copy of the MPL was not distributed with this
      4 * file, You can obtain one at http://mozilla.org/MPL/2.0/.
      5 */
      6 
      7 // conversation starter/followup generation functions
      8 
      9 import {
     10  openAIEngine,
     11  renderPrompt,
     12  MODEL_FEATURES,
     13 } from "moz-src:///browser/components/aiwindow/models/Utils.sys.mjs";
     14 
     15 import { MESSAGE_ROLE } from "moz-src:///browser/components/aiwindow/ui/modules/ChatStore.sys.mjs";
     16 
     17 import { MemoriesManager } from "moz-src:///browser/components/aiwindow/models/memories/MemoriesManager.sys.mjs";
     18 
     19 // Max number of memories to include in prompts
     20 const MAX_NUM_MEMORIES = 8;
     21 
     22 /**
     23 * Helper to trim conversation history to recent messages, dropping empty messages, tool calls and responses
     24 *
     25 * @param {Array} messages - Array of chat messages
     26 * @param {number} maxMessages - Max number of messages to keep (default 15)
     27 * @returns {Array} Trimmed array of user/assistant messages
     28 */
     29 export function trimConversation(messages, maxMessages = 15) {
     30  const out = [];
     31 
     32  for (const m of messages) {
     33    if (
     34      (m.role === MESSAGE_ROLE.USER || m.role === MESSAGE_ROLE.ASSISTANT) &&
     35      m.content &&
     36      m.content.trim()
     37    ) {
     38      const roleString = m.role === MESSAGE_ROLE.USER ? "user" : "assistant";
     39      out.push({ role: roleString, content: m.content });
     40    }
     41  }
     42 
     43  return out.slice(-maxMessages);
     44 }
     45 
     46 /**
     47 * Helper to add memories to base prompt if applicable
     48 *
     49 * @param {string} base - base prompt
     50 * @param {string} conversationMemoriesPrompt - the memories prompt template
     51 * @returns {Promise<string>} - prompt with memories added if applicable
     52 */
     53 export async function addMemoriesToPrompt(base, conversationMemoriesPrompt) {
     54  let memorySummaries =
     55    await MemoriesGetterForSuggestionPrompts.getMemorySummariesForPrompt(
     56      MAX_NUM_MEMORIES
     57    );
     58  if (memorySummaries.length) {
     59    const memoriesBlock = memorySummaries.map(s => `- ${s}`).join("\n");
     60    const memoryPrompt = await renderPrompt(conversationMemoriesPrompt, {
     61      memories: memoriesBlock,
     62    });
     63    return `${base}\n${memoryPrompt}`;
     64  }
     65  return base;
     66 }
     67 
     68 /**
     69 * Cleans inference output into array of prompts
     70 *
     71 * @param {*} result - Inference output result object
     72 * @returns {Array<string>} - Cleaned array of prompts
     73 */
     74 export function cleanInferenceOutput(result) {
     75  const text = (result.finalOutput || "").trim();
     76  const lines = text
     77    .split(/\n+/)
     78    .map(l => l.trim())
     79    .filter(Boolean);
     80 
     81  const prompts = lines
     82    .map(line => line.replace(/^[-*\d.)\[\]]+\s*/, ""))
     83    .filter(p => p.length)
     84    .map(p => p.replace(/\.$/, "").replace(/^[^:]*:\s*/, ""));
     85  return prompts;
     86 }
     87 
     88 /**
     89 * Format object to JSON string safely
     90 *
     91 * @param {*} obj - Object to format
     92 * @returns {string} JSON string or string representation
     93 */
     94 const formatJson = obj => {
     95  try {
     96    return JSON.stringify(obj);
     97  } catch {
     98    return String(obj);
     99  }
    100 };
    101 
    102 export const NewTabStarterGenerator = {
    103  writingPrompts: [
    104    "Write a first draft",
    105    "Improve writing",
    106    "Proofread a message",
    107  ],
    108 
    109  planningPrompts: ["Simplify a topic", "Brainstorm ideas", "Help make a plan"],
    110 
    111  // TODO: discuss with design about updating phrasing to "pages" instead of "tabs"
    112  browsingPrompts: [
    113    { text: "Find tabs in history", minTabs: 0, needsHistory: true },
    114    { text: "Summarize tabs", minTabs: 1, needsHistory: false },
    115    { text: "Compare tabs", minTabs: 2, needsHistory: false },
    116  ],
    117 
    118  getRandom(arr) {
    119    return arr[Math.floor(Math.random() * arr.length)];
    120  },
    121 
    122  /**
    123   * Generate conversation starter prompts based on number of open tabs and browsing history prefs.
    124   * "places.history.enabled" covers "Remember browsing and download history" while
    125   * "browser.privatebrowsing.autostart" covers "Always use private mode" and "Never remember history".
    126   * We need to check both prefs to cover all cases where history can be disabled.
    127   *
    128   * @param {number} tabCount - number of open tabs
    129   * @returns {Promise<Array>} Array of {text, type} suggestion objects
    130   */
    131  async getPrompts(tabCount) {
    132    const historyEnabled = Services.prefs.getBoolPref("places.history.enabled");
    133    const privateBrowsing = Services.prefs.getBoolPref(
    134      "browser.privatebrowsing.autostart"
    135    );
    136    const validBrowsingPrompts = this.browsingPrompts.filter(
    137      p =>
    138        tabCount >= p.minTabs &&
    139        (!p.needsHistory || (historyEnabled && !privateBrowsing))
    140    );
    141 
    142    const writingPrompt = this.getRandom(this.writingPrompts);
    143    const planningPrompt = this.getRandom(this.planningPrompts);
    144    const browsingPrompt = validBrowsingPrompts.length
    145      ? this.getRandom(validBrowsingPrompts)
    146      : null;
    147 
    148    const prompts = [
    149      { text: writingPrompt, type: "chat" },
    150      { text: planningPrompt, type: "chat" },
    151    ];
    152 
    153    if (browsingPrompt) {
    154      prompts.push({ text: browsingPrompt.text, type: "chat" });
    155    }
    156 
    157    return prompts;
    158  },
    159 };
    160 
    161 /**
    162 * Generates conversation starter prompts based on tab context + (optional) user memories
    163 *
    164 * @param {Array} contextTabs - Array of tab objects with title, url, favicon
    165 * @param {number} n - Number of suggestions to generate (default 6)
    166 * @param {boolean} useMemories - Whether to include user memories in prompt (default false)
    167 * @returns {Promise<Array>} Array of {text, type} suggestion objects
    168 */
    169 export async function generateConversationStartersSidebar(
    170  contextTabs = [],
    171  n = 2,
    172  useMemories = false
    173 ) {
    174  try {
    175    const today = new Date().toISOString().slice(0, 10);
    176 
    177    // Format current tab (first in context or empty)
    178    const currentTab = contextTabs.length
    179      ? formatJson({ title: contextTabs[0].title, url: contextTabs[0].url })
    180      : "No current tab";
    181 
    182    // Format opened tabs
    183    let openedTabs;
    184    if (contextTabs.length >= 1) {
    185      openedTabs =
    186        contextTabs.length === 1
    187          ? "Only current tab is open"
    188          : formatJson(
    189              contextTabs.slice(1).map(t => ({ title: t.title, url: t.url }))
    190            );
    191    } else {
    192      openedTabs = "No tabs available";
    193    }
    194 
    195    // Build engine and load prompt
    196    const engineInstance = await openAIEngine.build(
    197      MODEL_FEATURES.CONVERSATION_SUGGESTIONS_SIDEBAR_STARTER
    198    );
    199 
    200    const conversationStarterPrompt = await engineInstance.loadPrompt(
    201      MODEL_FEATURES.CONVERSATION_SUGGESTIONS_SIDEBAR_STARTER
    202    );
    203 
    204    const assistantLimitations = await engineInstance.loadPrompt(
    205      MODEL_FEATURES.CONVERSATION_SUGGESTIONS_ASSISTANT_LIMITATIONS
    206    );
    207 
    208    // Base template
    209    const base = await renderPrompt(conversationStarterPrompt, {
    210      current_tab: currentTab,
    211      open_tabs: openedTabs,
    212      n: String(n),
    213      date: today,
    214      assistant_limitations: assistantLimitations,
    215    });
    216 
    217    let filled = base;
    218    if (useMemories) {
    219      const conversationMemoriesPrompt = await engineInstance.loadPrompt(
    220        MODEL_FEATURES.CONVERSATION_SUGGESTIONS_MEMORIES
    221      );
    222      filled = await addMemoriesToPrompt(base, conversationMemoriesPrompt);
    223    }
    224 
    225    // Get config for inference parameters
    226    const config = engineInstance.getConfig(engineInstance.feature);
    227    const inferenceParams = config?.parameters || {};
    228 
    229    const result = await engineInstance.run({
    230      messages: [
    231        {
    232          role: "system",
    233          content: "Return only the requested suggestions, one per line.",
    234        },
    235        { role: "user", content: filled },
    236      ],
    237      ...inferenceParams,
    238    });
    239 
    240    const prompts = cleanInferenceOutput(result);
    241 
    242    return prompts.slice(0, n).map(t => ({ text: t, type: "chat" }));
    243  } catch (e) {
    244    console.warn(
    245      "[ConversationSuggestions][sidebar-conversation-starters] failed:",
    246      e
    247    );
    248    return [];
    249  }
    250 }
    251 
    252 /**
    253 * Generates followup prompt suggestions based on conversation history
    254 *
    255 * @param {Array} conversationHistory - Array of chat messages
    256 * @param {object} currentTab - Current tab object with title, url
    257 * @param {number} n - Number of suggestions to generate (default 6)
    258 * @param {boolean} useMemories - Whether to include user memories in prompt (default false)
    259 * @returns {Promise<Array>} Array of {text, type} suggestion objects
    260 */
    261 export async function generateFollowupPrompts(
    262  conversationHistory,
    263  currentTab,
    264  n = 2,
    265  useMemories = false
    266 ) {
    267  try {
    268    const today = new Date().toISOString().slice(0, 10);
    269    const convo = trimConversation(conversationHistory);
    270    const currentTabStr =
    271      currentTab && Object.keys(currentTab).length
    272        ? formatJson({ title: currentTab.title, url: currentTab.url })
    273        : "No tab";
    274 
    275    // Build engine and load prompt
    276    const engineInstance = await openAIEngine.build(
    277      MODEL_FEATURES.CONVERSATION_SUGGESTIONS_FOLLOWUP
    278    );
    279 
    280    const conversationFollowupPrompt = await engineInstance.loadPrompt(
    281      MODEL_FEATURES.CONVERSATION_SUGGESTIONS_FOLLOWUP
    282    );
    283 
    284    const assistantLimitationsFollowup = await engineInstance.loadPrompt(
    285      MODEL_FEATURES.CONVERSATION_SUGGESTIONS_ASSISTANT_LIMITATIONS
    286    );
    287 
    288    const base = await renderPrompt(conversationFollowupPrompt, {
    289      current_tab: currentTabStr,
    290      conversation: formatJson(convo),
    291      n: String(n),
    292      date: today,
    293      assistant_limitations: assistantLimitationsFollowup,
    294    });
    295 
    296    let filled = base;
    297    if (useMemories) {
    298      const conversationMemoriesPrompt = await engineInstance.loadPrompt(
    299        MODEL_FEATURES.CONVERSATION_SUGGESTIONS_MEMORIES
    300      );
    301      filled = await addMemoriesToPrompt(base, conversationMemoriesPrompt);
    302    }
    303 
    304    // Get config for inference parameters
    305    const config = engineInstance.getConfig(
    306      MODEL_FEATURES.CONVERSATION_SUGGESTIONS_FOLLOWUP
    307    );
    308    const inferenceParams = config?.parameters || {};
    309 
    310    const result = await engineInstance.run({
    311      messages: [
    312        {
    313          role: "system",
    314          content: "Return only the requested suggestions, one per line.",
    315        },
    316        { role: "user", content: filled },
    317      ],
    318      ...inferenceParams,
    319    });
    320 
    321    const prompts = cleanInferenceOutput(result);
    322 
    323    return prompts.slice(0, n).map(t => ({ text: t, type: "chat" }));
    324  } catch (e) {
    325    console.warn("[ConversationSuggestions][followup-prompts] failed:", e);
    326    return [];
    327  }
    328 }
    329 
    330 export const MemoriesGetterForSuggestionPrompts = {
    331  /**
    332   * Gets the requested number of unique memory summaries for prompt inclusion
    333   *
    334   * @param {number} maxMemories - Max number of memories to return (default MAX_NUM_MEMORIES)
    335   * @returns {Promise<Array>} Array of string memory summaries
    336   */
    337 
    338  async getMemorySummariesForPrompt(maxMemories) {
    339    const memorySummaries = [];
    340    const memoryEntries = (await MemoriesManager.getAllMemories()) || {};
    341    const seenSummaries = new Set();
    342 
    343    for (const { memory_summary } of memoryEntries) {
    344      const summaryText = String(memory_summary ?? "").trim();
    345      if (!summaryText) {
    346        continue;
    347      }
    348      const lower = summaryText.toLowerCase();
    349      if (seenSummaries.has(lower)) {
    350        continue;
    351      }
    352      seenSummaries.add(lower);
    353      memorySummaries.push(summaryText);
    354      if (memorySummaries.length >= maxMemories) {
    355        break;
    356      }
    357    }
    358 
    359    return memorySummaries;
    360  },
    361 };