commit a4c4ec5b39cf29292a49e2f36b868963a7b4dec9
parent a8e957529a2aeb0af1aaac76a56906af1b1f554b
Author: Taimur Hasan <taimurhasan@proton.me>
Date: Wed, 22 Oct 2025 18:16:28 +0000
Bug 1969847 - Renamed Request and Response for Differentiation Fix r=gregtatum
Differential Revision: https://phabricator.services.mozilla.com/D269451
Diffstat:
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/toolkit/components/ml/actors/MLEngineParent.sys.mjs b/toolkit/components/ml/actors/MLEngineParent.sys.mjs
@@ -910,13 +910,13 @@ class ResponseOrChunkResolvers {
* potentially large amounts of memory to run models, with the speed and ease of running
* the engine.
*
- * @typedef {object} Request
+ * @typedef {object} EngineRunRequest
* @property {?string} [id] - The identifier for tracking this request. If not provided, an id will be auto-generated. Each inference callback will reference this id.
* @property {any[]} args - The arguments to pass to the pipeline. The required arguments depend on your model. See [Hugging Face Transformers documentation](https://huggingface.co/docs/transformers.js/en/api/models) for more details.
* @property {?object} options - The generation options to pass to the model. Refer to the [GenerationConfigType documentation](https://huggingface.co/docs/transformers.js/en/api/utils/generation#module_utils/generation..GenerationConfigType) for available options.
* @property {?Uint8Array} [data] - For the imagetoText model, this is the array containing the image data.
*
- * @template Response
+ * @template EngineRunResponse
*/
export class MLEngine {
/**
@@ -936,7 +936,7 @@ export class MLEngine {
/**
* Tie together a message id to a resolved response.
*
- * @type {Map<number, PromiseWithResolvers<Request>>}
+ * @type {Map<number, PromiseWithResolvers<EngineRunRequest>>}
*/
#requests = new Map();
@@ -1442,8 +1442,8 @@ export class MLEngine {
/**
* Run the inference request
*
- * @param {Request} request
- * @returns {Promise<Response>}
+ * @param {EngineRunRequest} request
+ * @returns {Promise<EngineRunResponse>}
*/
async run(request) {
const resolvers = Promise.withResolvers();
@@ -1521,8 +1521,8 @@ export class MLEngine {
/**
* Run the inference request using an async generator function.
*
- * @param {Request} request - The inference request containing the input data.
- * @returns {AsyncGenerator<Response, Response, unknown>} An async generator yielding chunks of generated responses.
+ * @param {EngineRunRequest} request - The inference request containing the input data.
+ * @returns {AsyncGenerator<EngineRunResponse, EngineRunResponse, unknown>} An async generator yielding chunks of generated responses.
*/
runWithGenerator = async function* (request) {
lazy.console.debug(`runWithGenerator called for request ${request}`);