commit 8e3e96c53183d48fc2391176e0a9e0efc2347c7c
parent eba2838e1be12a09749961082df41a92433a3bca
Author: Florian Quèze <florian@queze.net>
Date: Fri, 31 Oct 2025 20:38:47 +0000
Bug 1997047 - Add taskcluster tasks for the script that generates JSON files used by the xpcshell timings dashboard, r=ahal,taskgraph-reviewers,frontend-codestyle-reviewers.
Differential Revision: https://phabricator.services.mozilla.com/D270480
Diffstat:
8 files changed, 2144 insertions(+), 11 deletions(-)
diff --git a/.cron.yml b/.cron.yml
@@ -463,3 +463,13 @@ jobs:
- mozilla-central
when:
- {weekday: 'Monday', hour: 3, minute: 0}
+
+ - name: test-info-xpcshell-timings-daily
+ job:
+ type: decision-task
+ treeherder-symbol: test-info(xpcshell-timings-daily)
+ target-tasks-method: test-info-xpcshell-timings-daily
+ run-on-projects:
+ - mozilla-central
+ when:
+ - {hour: 3, minute: 0}
diff --git a/eslint-file-globals.config.mjs b/eslint-file-globals.config.mjs
@@ -460,6 +460,8 @@ export default [
"testing/raptor/browsertime/utils/profiling.js",
"testing/raptor/browsertime/utils/support_measurements.js",
"testing/raptor/browsertime/welcome.js",
+ "testing/timings/fetch-xpcshell-data.js",
+ "testing/timings/profile-worker.js",
"testing/web-platform/tests/webrtc/third_party/sdp/sdp.js",
"testing/xpcshell/moz-http2/moz-http2-child.js",
"testing/xpcshell/moz-http2/moz-http2.js",
diff --git a/taskcluster/gecko_taskgraph/target_tasks.py b/taskcluster/gecko_taskgraph/target_tasks.py
@@ -1736,3 +1736,10 @@ def target_tasks_os_integration(full_task_graph, parameters, graph_config):
@register_target_task("weekly-test-info")
def target_tasks_weekly_test_info(full_task_graph, parameters, graph_config):
return ["source-test-file-metadata-test-info-all"]
+
+
+@register_target_task("test-info-xpcshell-timings-daily")
+def target_tasks_test_info_xpcshell_timings_daily(
+ full_task_graph, parameters, graph_config
+):
+ return ["source-test-file-metadata-test-info-xpcshell-timings-daily"]
diff --git a/taskcluster/kinds/source-test/file-metadata.yml b/taskcluster/kinds/source-test/file-metadata.yml
@@ -127,3 +127,50 @@ test-info-all:
./mach test-info testrun-report --output-file /builds/worker/artifacts/test-run-info.json &&
./mach test-info report --show-tests --show-summary --show-testruns --verbose --output-file /builds/worker/artifacts/test-info-all-tests.json --config-matrix-output-file /builds/worker/artifacts/test-info-testrun-matrix.json --runcounts-input-file /builds/worker/artifacts/test-run-info.json &&
./mach test-info report --show-annotations --output-file /builds/worker/artifacts/test-info-manifest-conditions.json
+
+test-info-xpcshell-timings-daily:
+ description: Generate xpcshell test timing data
+ treeherder:
+ symbol: test-info(xpcshell-timings-daily)
+ tier: 2
+ index:
+ product: source
+ job-name: test-info-xpcshell-timings
+ worker:
+ docker-image: {in-tree: debian12-amd64-build}
+ max-run-time: 3600
+ env:
+ MACH_BUILD_PYTHON_NATIVE_PACKAGE_SOURCE: system
+ run-on-projects: []
+ fetches:
+ toolchain:
+ - linux64-node
+ use-python: "3.11"
+ run:
+ using: run-task
+ cwd: '{checkout}'
+ command: >-
+ source taskcluster/scripts/misc/source-test-common.sh &&
+ ./mach test-info xpcshell-timings --days 21 --output-dir /builds/worker/artifacts/
+
+test-info-xpcshell-timings-rev:
+ description: Generate xpcshell test timing data
+ treeherder:
+ symbol: test-info(xpcshell-timings)
+ tier: 2
+ worker:
+ docker-image: {in-tree: debian12-amd64-build}
+ max-run-time: 1800
+ env:
+ MACH_BUILD_PYTHON_NATIVE_PACKAGE_SOURCE: system
+ run-on-projects: []
+ fetches:
+ toolchain:
+ - linux64-node
+ use-python: "3.11"
+ run:
+ using: run-task
+ cwd: '{checkout}'
+ command: >-
+ source taskcluster/scripts/misc/source-test-common.sh &&
+ ./mach test-info xpcshell-timings --revision=current --output-dir /builds/worker/artifacts/
diff --git a/testing/mach_commands.py b/testing/mach_commands.py
@@ -3,18 +3,23 @@
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
+import json
import logging
import os
+import subprocess
import sys
from datetime import date, timedelta
# ruff linter deprecates List, required for Python 3.8 compatibility
from typing import List, Optional # noqa UP035
+from urllib.parse import urlparse
import requests
from mach.decorators import Command, CommandArgument, SubCommand
-from mozbuild.base import BuildEnvironmentNotFoundException
+from mozbuild.base import BuildEnvironmentNotFoundException, MozbuildObject
from mozbuild.base import MachCommandConditions as conditions
+from mozbuild.nodeutil import find_node_executable
+from mozsystemmonitor.resourcemonitor import SystemResourceMonitor
UNKNOWN_TEST = """
I was unable to find tests from the given argument(s).
@@ -256,8 +261,6 @@ def addtest(
proc = None
if editor:
- import subprocess
-
proc = subprocess.Popen(f"{editor} {' '.join(paths)}", shell=True)
if proc:
@@ -586,8 +589,6 @@ def executable_name(name):
help="Extra arguments to pass down to the test harness.",
)
def run_jstests(command_context, shell, params):
- import subprocess
-
command_context.virtualenv_manager.ensure()
python = command_context.virtualenv_manager.python_path
@@ -620,8 +621,6 @@ def run_jstests(command_context, shell, params):
help="Extra arguments to pass down to the test harness.",
)
def run_jittests(command_context, shell, cgc, params):
- import subprocess
-
command_context.virtualenv_manager.ensure()
python = command_context.virtualenv_manager.python_path
@@ -660,8 +659,6 @@ def run_jittests(command_context, shell, cgc, params):
"omitted, the entire test suite is executed.",
)
def run_jsapitests(command_context, list=False, frontend_only=False, test_name=None):
- import subprocess
-
jsapi_tests_cmd = [
os.path.join(command_context.bindir, executable_name("jsapi-tests"))
]
@@ -682,8 +679,6 @@ def run_jsapitests(command_context, list=False, frontend_only=False, test_name=N
def run_check_js_msg(command_context):
- import subprocess
-
command_context.virtualenv_manager.ensure()
python = command_context.virtualenv_manager.python_path
@@ -722,6 +717,129 @@ def test_info(command_context):
"""
+class TestInfoNodeRunner(MozbuildObject):
+ """Run TestInfo node tests."""
+
+ def run_node_cmd(self, monitor, days=1, revision=None, output_dir=None):
+ """Run the TestInfo node command."""
+
+ self.test_timings_dir = os.path.join(self.topsrcdir, "testing", "timings")
+ test_runner_script = os.path.join(
+ self.test_timings_dir, "fetch-xpcshell-data.js"
+ )
+
+ # Build the command to run
+ node_binary, _ = find_node_executable()
+ cmd = [node_binary, test_runner_script]
+
+ if revision:
+ cmd.extend(["--revision", revision])
+ else:
+ cmd.extend(["--days", str(days)])
+
+ if output_dir:
+ cmd.extend(["--output-dir", os.path.abspath(output_dir)])
+
+ print(f"Running: {' '.join(cmd)}")
+ print(f"Working directory: {self.test_timings_dir}")
+
+ try:
+ # Run the test runner and capture stdout line by line
+ process = subprocess.Popen(
+ cmd,
+ cwd=self.test_timings_dir,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True,
+ bufsize=1,
+ )
+
+ for line in process.stdout:
+ line = line.rstrip()
+ # Print to console
+ print(line)
+
+ # Add as instant event marker to profile (skip empty lines)
+ if line:
+ monitor.record_event(line)
+
+ process.wait()
+ return process.returncode
+ except FileNotFoundError:
+ print(
+ "ERROR: Node.js not found. Please ensure Node.js is installed and in your PATH."
+ )
+ return 1
+ except Exception as e:
+ print(f"ERROR: Failed to run TestInfo node command: {e}")
+ return 1
+
+
+@SubCommand(
+ "test-info",
+ "xpcshell-timings",
+ description="Collect timing information for XPCShell test jobs.",
+)
+@CommandArgument(
+ "--days",
+ default=1,
+ help="Number of days to download and aggregate, starting with yesterday",
+)
+@CommandArgument(
+ "--revision",
+ default="",
+ help="revision to fetch data for ('mozilla-central:<revision id>', '<revision id>' for a try push or 'current' to take the revision from the environment)",
+)
+@CommandArgument("--output-dir", help="Path to report file.")
+def test_info_xpcshell_timings(command_context, days, output_dir, revision=None):
+ # Start resource monitoring with 0.1s sampling rate
+ monitor = SystemResourceMonitor(poll_interval=0.1)
+ monitor.start()
+
+ try:
+ # node fetch-xpcshell-data.js --days 1
+ runner = TestInfoNodeRunner.from_environment(
+ cwd=os.getcwd(), detect_virtualenv_mozinfo=False
+ )
+
+ # Handle 'current' special value to use current build's revision
+ if revision == "current":
+ rev = os.environ.get("MOZ_SOURCE_CHANGESET", "")
+ repo = os.environ.get("MOZ_SOURCE_REPO", "")
+
+ if rev and repo:
+ # Extract project name from repository URL
+ # e.g., https://hg.mozilla.org/try -> try
+ # e.g., https://hg.mozilla.org/mozilla-central -> mozilla-central
+ parsed_url = urlparse(repo)
+ project = os.path.basename(parsed_url.path)
+ revision = f"{project}:{rev}"
+ elif revision and ":" not in revision:
+ # Bare revision ID without project prefix - assume it's a try push
+ revision = f"try:{revision}"
+
+ runner.run_node_cmd(
+ monitor, days=days, revision=revision, output_dir=output_dir
+ )
+ finally:
+ # Stop resource monitoring and save profile
+ if output_dir:
+ monitor.stop(upload_dir=output_dir)
+ profile_path = os.path.join(output_dir, "profile_resource-usage.json")
+ else:
+ monitor.stop()
+ # This is where ./mach resource-usage will find the profile.
+ profile_path = command_context._get_state_filename(
+ "profile_build_resources.json"
+ )
+ with open(profile_path, "w", encoding="utf-8", newline="\n") as fh:
+ to_write = json.dumps(monitor.as_profile(), separators=(",", ":"))
+ fh.write(to_write)
+ print(f"Resource usage profile saved to: {profile_path}")
+ if not output_dir:
+ print("View it with: ./mach resource-usage")
+
+
@SubCommand(
"test-info",
"tests",
diff --git a/testing/timings/JSON_FORMAT.md b/testing/timings/JSON_FORMAT.md
@@ -0,0 +1,380 @@
+# XPCShell JSON Data Format Documentation
+
+This document describes the JSON file formats created by `fetch-xpcshell-data.js`.
+
+## Overview
+
+The script generates two types of JSON files for each date or try commit:
+
+1. **Test timing data**: `xpcshell-{date}.json` or `xpcshell-try-{revision}.json`
+2. **Resource usage data**: `xpcshell-{date}-resources.json` or `xpcshell-try-{revision}-resources.json`
+
+Both formats use string tables and index-based lookups to minimize file size.
+
+---
+
+## Test Timing Data Format
+
+### Top-Level Structure
+
+```json
+{
+ "metadata": { ... },
+ "tables": { ... },
+ "taskInfo": { ... },
+ "testInfo": { ... },
+ "testRuns": [ ... ]
+}
+```
+
+### metadata
+
+Contains information about the data collection:
+
+```json
+{
+ "date": "2025-10-14", // Date of the data (for date-based queries)
+ "revision": "abc123...", // Try commit revision (for try-based queries)
+ "pushId": 12345, // Treeherder push ID (for try-based queries)
+ "startTime": 1760400000, // Unix timestamp (seconds) used as base for relative timestamps
+ "generatedAt": "2025-10-15T14:24:33.451Z", // ISO timestamp when file was created
+ "jobCount": 3481, // Number of jobs fetched
+ "processedJobCount": 3481 // Number of jobs successfully processed
+}
+```
+
+### tables
+
+String tables for efficient storage. All strings are deduplicated and stored once, sorted by frequency (most frequently used first for better compression):
+
+```json
+{
+ "jobNames": [ // Job names (e.g., "test-linux1804-64/opt-xpcshell")
+ "test-linux1804-64/opt-xpcshell",
+ "test-macosx1015-64/debug-xpcshell",
+ ...
+ ],
+ "testPaths": [ // Test file paths (e.g., "dom/indexedDB/test/unit")
+ "dom/indexedDB/test/unit",
+ "toolkit/components/extensions/test/xpcshell",
+ ...
+ ],
+ "testNames": [ // Test filenames (e.g., "test_foo.js")
+ "test_foo.js",
+ "test_bar.js",
+ ...
+ ],
+ "repositories": [ // Repository names
+ "mozilla-central",
+ "autoland",
+ "try",
+ ...
+ ],
+ "statuses": [ // Test run statuses
+ "PASS-PARALLEL",
+ "PASS-SEQUENTIAL",
+ "SKIP",
+ "FAIL-PARALLEL",
+ "TIMEOUT-SEQUENTIAL",
+ "CRASH",
+ "EXPECTED-FAIL",
+ ...
+ ],
+ "taskIds": [ // TaskCluster task IDs with retry (always includes .retryId)
+ "YJJe4a0CRIqbAmcCo8n63w.0", // Retry 0
+ "XPPf5b1DRJrcBndDp9o74x.1", // Retry 1
+ ...
+ ],
+ "messages": [ // SKIP status messages (only for tests that were skipped)
+ "skip-if: os == 'linux'",
+ "disabled due to bug 123456",
+ ...
+ ],
+ "crashSignatures": [ // Crash signatures (only for crashed tests)
+ "mozilla::dom::Something::Crash",
+ "EMPTY: no crashing thread identified",
+ ...
+ ]
+}
+```
+
+### taskInfo
+
+Maps task IDs to their associated job names and repositories. These are parallel arrays indexed by `taskIdId`:
+
+```json
+{
+ "repositoryIds": [0, 1, 0, 2, ...], // Index into tables.repositories
+ "jobNameIds": [0, 0, 1, 1, ...] // Index into tables.jobNames
+}
+```
+
+**Example lookup:**
+```javascript
+const taskIdId = 5;
+const taskId = tables.taskIds[taskIdId]; // "YJJe4a0CRIqbAmcCo8n63w.0"
+const repository = tables.repositories[taskInfo.repositoryIds[taskIdId]]; // "mozilla-central"
+const jobName = tables.jobNames[taskInfo.jobNameIds[taskIdId]]; // "test-linux1804-64/opt-xpcshell"
+```
+
+### testInfo
+
+Maps test IDs to their test paths and names. These are parallel arrays indexed by `testId`:
+
+```json
+{
+ "testPathIds": [0, 0, 1, 2, ...], // Index into tables.testPaths
+ "testNameIds": [0, 1, 2, 3, ...] // Index into tables.testNames
+}
+```
+
+**Example lookup:**
+```javascript
+const testId = 10;
+const testPath = tables.testPaths[testInfo.testPathIds[testId]]; // "dom/indexedDB/test/unit"
+const testName = tables.testNames[testInfo.testNameIds[testId]]; // "test_foo.js"
+const fullPath = testPath ? `${testPath}/${testName}` : testName;
+```
+
+### testRuns
+
+A 2D sparse array structure: `testRuns[testId][statusId]`
+
+- First dimension: `testId` (index into testInfo arrays)
+- Second dimension: `statusId` (index into tables.statuses)
+
+Each `testRuns[testId][statusId]` contains data for all runs of that test with that specific status. If a test never had a particular status, that array position contains `null`:
+
+```json
+[
+ // testId 0
+ [
+ // statusId 0 (e.g., "PASS-PARALLEL")
+ {
+ "taskIdIds": [5, 12, 18, ...], // Indices into tables.taskIds
+ "durations": [1234, 1456, 1289, ...], // Test durations in milliseconds
+ "timestamps": [0, 15, 23, ...] // Differential compressed timestamps (seconds relative to metadata.startTime)
+ },
+ // statusId 1 - this test never had that status
+ null,
+ // statusId 2 (e.g., "SKIP")
+ {
+ "taskIdIds": [45, 67, ...],
+ "durations": [0, 0, ...],
+ "timestamps": [100, 200, ...],
+ "messageIds": [5, 5, ...] // Only present for SKIP status - indices into tables.messages (null if no message)
+ },
+ // statusId 3 (e.g., "CRASH")
+ {
+ "taskIdIds": [89, ...],
+ "durations": [5678, ...],
+ "timestamps": [300, ...],
+ "crashSignatureIds": [2, ...], // Only present for CRASH status - indices into tables.crashSignatures (null if none)
+ "minidumps": ["12345678-abcd-1234-abcd-1234567890ab", ...] // Only present for CRASH status - minidump IDs or null
+ }
+ ],
+ // testId 1
+ [ ... ],
+ ...
+]
+```
+
+**Timestamp decompression:**
+```javascript
+// Timestamps are differentially compressed
+let currentTime = metadata.startTime; // Base timestamp in seconds
+const decompressedTimestamps = statusGroup.timestamps.map(diff => {
+ currentTime += diff;
+ return currentTime;
+});
+```
+
+**Example: Get all runs of a specific test:**
+```javascript
+const testId = 10;
+const testGroup = testRuns[testId];
+
+for (let statusId = 0; statusId < testGroup.length; statusId++) {
+ const statusGroup = testGroup[statusId];
+ if (!statusGroup) continue; // This test never had this status
+
+ const status = tables.statuses[statusId];
+ console.log(`Status: ${status}, Runs: ${statusGroup.taskIdIds.length}`);
+
+ // Decompress timestamps
+ let currentTime = metadata.startTime;
+ for (let i = 0; i < statusGroup.taskIdIds.length; i++) {
+ currentTime += statusGroup.timestamps[i];
+ const taskId = tables.taskIds[statusGroup.taskIdIds[i]];
+ const duration = statusGroup.durations[i];
+ console.log(` Task: ${taskId}, Duration: ${duration}ms, Time: ${currentTime}`);
+ }
+}
+```
+
+---
+
+## Resource Usage Data Format
+
+### Top-Level Structure
+
+```json
+{
+ "jobNames": [ ... ],
+ "repositories": [ ... ],
+ "machineInfos": [ ... ],
+ "jobs": { ... }
+}
+```
+
+### Lookup Tables
+
+```json
+{
+ "jobNames": [ // Base job names without chunk numbers
+ "test-linux1804-64/opt-xpcshell",
+ "test-macosx1015-64/debug-xpcshell",
+ ...
+ ],
+ "repositories": [ // Repository names
+ "mozilla-central",
+ "autoland",
+ ...
+ ],
+ "machineInfos": [ // Machine specifications (memory in GB, rounded to 1 decimal)
+ {
+ "logicalCPUs": 8,
+ "physicalCPUs": 4,
+ "mainMemory": 15.6 // GB
+ },
+ {
+ "logicalCPUs": 16,
+ "physicalCPUs": 8,
+ "mainMemory": 31.4
+ },
+ ...
+ ]
+}
+```
+
+### jobs
+
+Parallel arrays containing resource usage data for each job, sorted by start time:
+
+```json
+{
+ "jobNameIds": [0, 0, 1, 1, ...], // Indices into jobNames array
+ "chunks": [1, 2, 1, 2, ...], // Chunk numbers (null if job name has no chunk)
+ "taskIds": ["YJJe4a0CRIqbAmcCo8n63w", "XPPf5b1DRJrcBndDp9o74x.1", ...], // Task IDs (format: "taskId" for retry 0, "taskId.retryId" for retry > 0)
+ "repositoryIds": [0, 0, 1, 1, ...], // Indices into repositories array
+ "startTimes": [0, 150, 23, 45, ...], // Differential compressed timestamps (seconds)
+ "machineInfoIds": [0, 0, 1, 1, ...], // Indices into machineInfos array
+ "maxMemories": [1234567890, ...], // Maximum memory used (bytes)
+ "idleTimes": [12345, ...], // Time with <50% of one core used (milliseconds)
+ "singleCoreTimes": [45678, ...], // Time using ~1 core (0.75-1.25 cores, milliseconds)
+ "cpuBuckets": [ // CPU usage time distribution (milliseconds per bucket)
+ [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000], // Job 0: [0-10%, 10-20%, ..., 90-100%]
+ [150, 250, 350, 450, 550, 650, 750, 850, 950, 1050], // Job 1
+ ...
+ ]
+}
+```
+
+**CPU Buckets Explanation:**
+- Array of 10 values representing time spent in each CPU usage range
+- Bucket 0: 0-10% CPU usage
+- Bucket 1: 10-20% CPU usage
+- ...
+- Bucket 9: 90-100% CPU usage
+- Values are in milliseconds
+
+**Idle Time Calculation:**
+- Idle = CPU usage < (50% of one core)
+- For 8-core machine: idle = CPU usage < 6.25%
+- For 16-core machine: idle = CPU usage < 3.125%
+
+**Single Core Time Calculation:**
+- Single core = CPU usage between 0.75 and 1.25 cores
+- For 8-core machine: 9.375% - 15.625%
+- For 16-core machine: 4.6875% - 7.8125%
+
+**Start Time Decompression:**
+```javascript
+let currentTime = 0; // Start times are relative to each other
+const decompressedStartTimes = jobs.startTimes.map(diff => {
+ currentTime += diff;
+ return currentTime;
+});
+```
+
+**Example: Get full information for a job:**
+```javascript
+const jobIndex = 5;
+const jobName = jobNames[jobs.jobNameIds[jobIndex]];
+const chunk = jobs.chunks[jobIndex]; // May be null
+const fullJobName = chunk !== null ? `${jobName}-${chunk}` : jobName;
+const taskId = jobs.taskIds[jobIndex];
+const repository = repositories[jobs.repositoryIds[jobIndex]];
+const machineInfo = machineInfos[jobs.machineInfoIds[jobIndex]];
+
+// Decompress start time
+let currentTime = 0;
+for (let i = 0; i <= jobIndex; i++) {
+ currentTime += jobs.startTimes[i];
+}
+const startTime = currentTime; // seconds since epoch
+
+const maxMemoryGB = jobs.maxMemories[jobIndex] / (1024 * 1024 * 1024);
+const idleTimeSeconds = jobs.idleTimes[jobIndex] / 1000;
+const singleCoreTimeSeconds = jobs.singleCoreTimes[jobIndex] / 1000;
+const cpuDistribution = jobs.cpuBuckets[jobIndex];
+const totalTime = cpuDistribution.reduce((sum, val) => sum + val, 0);
+const idlePercent = (idleTimeSeconds * 1000 / totalTime) * 100;
+```
+
+---
+
+## Data Compression Techniques
+
+The format uses several compression techniques to minimize file size:
+
+1. **String Tables**: All repeated strings (job names, test paths, etc.) are stored once and referenced by index
+2. **Frequency Sorting**: Strings are sorted by usage frequency (most common first) so that frequently-used items have smaller index values, reducing the number of digits in the serialized JSON
+3. **Differential Compression**: Timestamps are stored as differences from the previous value
+4. **Parallel Arrays**: Instead of arrays of objects, data is stored in parallel arrays to avoid repeating key names
+5. **Sparse Arrays**: In testRuns, status groups that don't exist are stored as `null`
+6. **Combined IDs**: TaskCluster task IDs and retry IDs are combined into a single string format: `"taskId.retryId"`
+7. **Chunk Extraction**: Job chunk numbers are extracted and stored separately from base job names
+
+---
+
+## Index File Format
+
+The `index.json` file lists all available dates:
+
+```json
+{
+ "dates": [
+ "2025-10-15",
+ "2025-10-14",
+ "2025-10-13",
+ ...
+ ]
+}
+```
+
+Dates are sorted in descending order (newest first).
+
+---
+
+## Notes
+
+- All timestamps in test timing data are in **seconds**
+- All durations are in **milliseconds**
+- Memory values in machineInfos are in **GB** (rounded to 1 decimal place)
+- Memory values in jobs.maxMemories are in **bytes**
+- The `testRuns` array is sparse - `testRuns[testId][statusId]` may be `null` if that test never had that status
+- **Task ID formats differ between files:**
+ - Test timing data: Always includes retry suffix (e.g., `"YJJe4a0CRIqbAmcCo8n63w.0"`)
+ - Resource usage data: Omits `.0` for retry 0 (e.g., `"YJJe4a0CRIqbAmcCo8n63w"`), includes suffix for retries > 0 (e.g., `"YJJe4a0CRIqbAmcCo8n63w.1"`)
+- The data structure is optimized for sequential access patterns used by the dashboards
diff --git a/testing/timings/fetch-xpcshell-data.js b/testing/timings/fetch-xpcshell-data.js
@@ -0,0 +1,1197 @@
+#!/usr/bin/env node
+
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const fs = require("fs");
+const path = require("path");
+const { Worker } = require("worker_threads");
+const os = require("os");
+
+const MAX_WORKERS = Math.min(32, os.cpus().length);
+
+const TASKCLUSTER_BASE_URL =
+ process.env.TASKCLUSTER_PROXY_URL ||
+ process.env.TASKCLUSTER_ROOT_URL ||
+ "https://firefox-ci-tc.services.mozilla.com";
+
+// Check for --output-dir parameter
+const OUTPUT_DIR = (() => {
+ const outputDirIndex = process.argv.findIndex(arg => arg === "--output-dir");
+ if (outputDirIndex !== -1 && outputDirIndex + 1 < process.argv.length) {
+ return process.argv[outputDirIndex + 1];
+ }
+ return "./xpcshell-data";
+})();
+
+const PROFILE_CACHE_DIR = "./profile-cache";
+
+let previousRunData = null;
+let allJobsCache = null;
+
+if (!fs.existsSync(OUTPUT_DIR)) {
+ fs.mkdirSync(OUTPUT_DIR, { recursive: true });
+}
+if (!fs.existsSync(PROFILE_CACHE_DIR)) {
+ fs.mkdirSync(PROFILE_CACHE_DIR, { recursive: true });
+}
+
+// Get date in YYYY-MM-DD format
+function getDateString(daysAgo = 0) {
+ const date = new Date();
+ date.setDate(date.getDate() - daysAgo);
+ return date.toISOString().split("T")[0];
+}
+
+async function fetchJson(url) {
+ const response = await fetch(url);
+ if (!response.ok) {
+ return null;
+ }
+ return response.json();
+}
+
+// Fetch commit push data from Treeherder API
+async function fetchCommitData(project, revision) {
+ console.log(`Fetching commit data for ${project}:${revision}...`);
+
+ const result = await fetchJson(
+ `https://treeherder.mozilla.org/api/project/${project}/push/?full=true&count=10&revision=${revision}`
+ );
+
+ if (!result || !result.results || result.results.length === 0) {
+ throw new Error(
+ `No push found for revision ${revision} on project ${project}`
+ );
+ }
+
+ const pushId = result.results[0].id;
+ console.log(`Found push ID: ${pushId}`);
+ return pushId;
+}
+
+// Fetch jobs from push
+async function fetchPushJobs(project, pushId) {
+ console.log(`Fetching jobs for push ID ${pushId}...`);
+
+ let allJobs = [];
+ let propertyNames = [];
+ let url = `https://treeherder.mozilla.org/api/jobs/?push_id=${pushId}`;
+
+ // The /jobs/ API is paginated, keep fetching until next is null
+ while (url) {
+ const result = await fetchJson(url);
+ if (!result) {
+ throw new Error(`Failed to fetch jobs for push ID ${pushId}`);
+ }
+
+ allJobs = allJobs.concat(result.results || []);
+ if (!propertyNames.length) {
+ propertyNames = result.job_property_names || [];
+ }
+
+ url = result.next;
+ }
+
+ // Get field indices dynamically
+ const jobTypeNameIndex = propertyNames.indexOf("job_type_name");
+ const taskIdIndex = propertyNames.indexOf("task_id");
+ const retryIdIndex = propertyNames.indexOf("retry_id");
+ const lastModifiedIndex = propertyNames.indexOf("last_modified");
+
+ const xpcshellJobs = allJobs
+ .filter(
+ job => job[jobTypeNameIndex] && job[jobTypeNameIndex].includes("xpcshell")
+ )
+ .map(job => ({
+ name: job[jobTypeNameIndex],
+ task_id: job[taskIdIndex],
+ retry_id: job[retryIdIndex] || 0,
+ start_time: job[lastModifiedIndex],
+ repository: project,
+ }));
+
+ console.log(
+ `Found ${xpcshellJobs.length} xpcshell jobs out of ${allJobs.length} total jobs`
+ );
+ return xpcshellJobs;
+}
+
+// Fetch xpcshell test data from treeherder database for a specific date
+async function fetchXpcshellData(targetDate) {
+ console.log(`Fetching xpcshell test data for ${targetDate}...`);
+
+ // Fetch data from the treeherder database if not already cached
+ if (!allJobsCache) {
+ console.log(`Querying treeherder database...`);
+ const result = await fetchJson(
+ "https://sql.telemetry.mozilla.org/api/queries/110630/results.json?api_key=Pyybfsna2r5KQkwYgSk9zqbYfc6Dv0rhxL99DFi1"
+ );
+
+ if (!result) {
+ throw new Error("Failed to fetch data from treeherder database");
+ }
+
+ const allJobs = result.query_result.data.rows;
+
+ // Cache only xpcshell jobs
+ allJobsCache = allJobs.filter(job => job.name.includes("xpcshell"));
+ console.log(
+ `Cached ${allJobsCache.length} xpcshell jobs from treeherder database (out of ${allJobs.length} total jobs)`
+ );
+ }
+
+ // Filter cached jobs for the target date
+ return allJobsCache.filter(job => job.start_time.startsWith(targetDate));
+}
+
+// Process jobs using worker threads with dynamic job distribution
+async function processJobsWithWorkers(jobs, targetDate = null) {
+ if (jobs.length === 0) {
+ return [];
+ }
+
+ const dateStr = targetDate ? ` for ${targetDate}` : "";
+ console.log(
+ `Processing ${jobs.length} jobs${dateStr} using ${MAX_WORKERS} workers...`
+ );
+
+ const jobQueue = [...jobs];
+ const results = [];
+ const workers = [];
+ let completedJobs = 0;
+ let lastProgressTime = 0;
+
+ return new Promise((resolve, reject) => {
+ // Track worker states
+ const workerStates = new Map();
+
+ // Create workers
+ for (let i = 0; i < MAX_WORKERS; i++) {
+ const worker = new Worker(path.join(__dirname, "profile-worker.js"), {
+ workerData: {
+ profileCacheDir: PROFILE_CACHE_DIR,
+ taskclusterBaseUrl: TASKCLUSTER_BASE_URL,
+ },
+ });
+
+ workers.push(worker);
+ workerStates.set(worker, { id: i + 1, ready: false, jobsProcessed: 0 });
+
+ worker.on("message", message => {
+ const workerState = workerStates.get(worker);
+
+ if (message.type === "ready") {
+ workerState.ready = true;
+ assignNextJob(worker);
+ } else if (message.type === "jobComplete") {
+ workerState.jobsProcessed++;
+ completedJobs++;
+
+ if (message.result) {
+ results.push(message.result);
+ }
+
+ // Show progress at most once per second, or on first/last job
+ const now = Date.now();
+ if (
+ completedJobs === 1 ||
+ completedJobs === jobs.length ||
+ now - lastProgressTime >= 1000
+ ) {
+ const percentage = Math.round((completedJobs / jobs.length) * 100);
+ const paddedCompleted = completedJobs
+ .toString()
+ .padStart(jobs.length.toString().length);
+ const paddedPercentage = percentage.toString().padStart(3); // Pad to 3 chars for alignment (0-100%)
+ console.log(
+ ` ${paddedPercentage}% ${paddedCompleted}/${jobs.length}`
+ );
+ lastProgressTime = now;
+ }
+
+ // Assign next job or finish
+ assignNextJob(worker);
+ } else if (message.type === "finished") {
+ checkAllComplete();
+ } else if (message.type === "error") {
+ reject(new Error(`Worker ${workerState.id} error: ${message.error}`));
+ }
+ });
+
+ worker.on("error", error => {
+ reject(
+ new Error(
+ `Worker ${workerStates.get(worker).id} thread error: ${error.message}`
+ )
+ );
+ });
+
+ worker.on("exit", code => {
+ if (code !== 0) {
+ reject(
+ new Error(
+ `Worker ${workerStates.get(worker).id} stopped with exit code ${code}`
+ )
+ );
+ }
+ });
+ }
+
+ function assignNextJob(worker) {
+ if (jobQueue.length) {
+ const job = jobQueue.shift();
+ worker.postMessage({ type: "job", job });
+ } else {
+ // No more jobs, tell worker to finish
+ worker.postMessage({ type: "shutdown" });
+ }
+ }
+
+ let resolved = false;
+ let workersFinished = 0;
+
+ function checkAllComplete() {
+ if (resolved) {
+ return;
+ }
+
+ workersFinished++;
+
+ if (workersFinished >= MAX_WORKERS) {
+ resolved = true;
+
+ // Terminate all workers to ensure clean exit
+ workers.forEach(worker => worker.terminate());
+
+ resolve(results);
+ }
+ }
+ });
+}
+
+// Create string tables and store raw data efficiently
+function createDataTables(jobResults) {
+ const tables = {
+ jobNames: [],
+ testPaths: [],
+ testNames: [],
+ repositories: [],
+ statuses: [],
+ taskIds: [],
+ messages: [],
+ crashSignatures: [],
+ };
+
+ // Maps for O(1) string lookups
+ const stringMaps = {
+ jobNames: new Map(),
+ testPaths: new Map(),
+ testNames: new Map(),
+ repositories: new Map(),
+ statuses: new Map(),
+ taskIds: new Map(),
+ messages: new Map(),
+ crashSignatures: new Map(),
+ };
+
+ // Task info maps task ID index to repository and job name indexes
+ const taskInfo = {
+ repositoryIds: [],
+ jobNameIds: [],
+ };
+
+ // Test info maps test ID index to test path and name indexes
+ const testInfo = {
+ testPathIds: [],
+ testNameIds: [],
+ };
+
+ // Map for fast testId lookup: fullPath -> testId
+ const testIdMap = new Map();
+
+ // Test runs grouped by test ID, then by status ID
+ // testRuns[testId] = array of status groups for that test
+ const testRuns = [];
+
+ function findStringIndex(tableName, string) {
+ const table = tables[tableName];
+ const map = stringMaps[tableName];
+
+ let index = map.get(string);
+ if (index === undefined) {
+ index = table.length;
+ table.push(string);
+ map.set(string, index);
+ }
+ return index;
+ }
+
+ for (const result of jobResults) {
+ if (!result || !result.timings) {
+ continue;
+ }
+
+ const jobNameId = findStringIndex("jobNames", result.jobName);
+ const repositoryId = findStringIndex("repositories", result.repository);
+
+ for (const timing of result.timings) {
+ const fullPath = timing.path;
+
+ // Check if we already have this test
+ let testId = testIdMap.get(fullPath);
+ if (testId === undefined) {
+ // New test - need to process path/name split and create entry
+ const lastSlashIndex = fullPath.lastIndexOf("/");
+
+ let testPath, testName;
+ if (lastSlashIndex === -1) {
+ // No directory, just the filename
+ testPath = "";
+ testName = fullPath;
+ } else {
+ testPath = fullPath.substring(0, lastSlashIndex);
+ testName = fullPath.substring(lastSlashIndex + 1);
+ }
+
+ const testPathId = findStringIndex("testPaths", testPath);
+ const testNameId = findStringIndex("testNames", testName);
+
+ testId = testInfo.testPathIds.length;
+ testInfo.testPathIds.push(testPathId);
+ testInfo.testNameIds.push(testNameId);
+ testIdMap.set(fullPath, testId);
+ }
+
+ const statusId = findStringIndex("statuses", timing.status || "UNKNOWN");
+ const taskIdString = `${result.taskId}.${result.retryId}`;
+ const taskIdId = findStringIndex("taskIds", taskIdString);
+
+ // Store task info only once per unique task ID
+ if (taskInfo.repositoryIds[taskIdId] === undefined) {
+ taskInfo.repositoryIds[taskIdId] = repositoryId;
+ taskInfo.jobNameIds[taskIdId] = jobNameId;
+ }
+
+ // Initialize test group if it doesn't exist
+ if (!testRuns[testId]) {
+ testRuns[testId] = [];
+ }
+
+ // Initialize status group within test if it doesn't exist
+ let statusGroup = testRuns[testId][statusId];
+ if (!statusGroup) {
+ statusGroup = {
+ taskIdIds: [],
+ durations: [],
+ timestamps: [],
+ };
+ // Only include messageIds array for SKIP status
+ if (timing.status === "SKIP") {
+ statusGroup.messageIds = [];
+ }
+ // Only include crash data arrays for CRASH status
+ if (timing.status === "CRASH") {
+ statusGroup.crashSignatureIds = [];
+ statusGroup.minidumps = [];
+ }
+ testRuns[testId][statusId] = statusGroup;
+ }
+
+ // Add test run to the appropriate test/status group
+ statusGroup.taskIdIds.push(taskIdId);
+ statusGroup.durations.push(Math.round(timing.duration));
+ statusGroup.timestamps.push(timing.timestamp);
+
+ // Store message ID for SKIP status (or null if no message)
+ if (timing.status === "SKIP") {
+ const messageId = timing.message
+ ? findStringIndex("messages", timing.message)
+ : null;
+ statusGroup.messageIds.push(messageId);
+ }
+
+ // Store crash data for CRASH status (or null if not available)
+ if (timing.status === "CRASH") {
+ const crashSignatureId = timing.crashSignature
+ ? findStringIndex("crashSignatures", timing.crashSignature)
+ : null;
+ statusGroup.crashSignatureIds.push(crashSignatureId);
+ statusGroup.minidumps.push(timing.minidump || null);
+ }
+ }
+ }
+
+ return {
+ tables,
+ taskInfo,
+ testInfo,
+ testRuns,
+ };
+}
+
+// Sort string tables by frequency and remap all indices for deterministic output and better compression
+function sortStringTablesByFrequency(dataStructure) {
+ const { tables, taskInfo, testInfo, testRuns } = dataStructure;
+
+ // Count frequency of each index for each table
+ const frequencyCounts = {
+ jobNames: new Array(tables.jobNames.length).fill(0),
+ testPaths: new Array(tables.testPaths.length).fill(0),
+ testNames: new Array(tables.testNames.length).fill(0),
+ repositories: new Array(tables.repositories.length).fill(0),
+ statuses: new Array(tables.statuses.length).fill(0),
+ taskIds: new Array(tables.taskIds.length).fill(0),
+ messages: new Array(tables.messages.length).fill(0),
+ crashSignatures: new Array(tables.crashSignatures.length).fill(0),
+ };
+
+ // Count taskInfo references
+ for (const jobNameId of taskInfo.jobNameIds) {
+ if (jobNameId !== undefined) {
+ frequencyCounts.jobNames[jobNameId]++;
+ }
+ }
+ for (const repositoryId of taskInfo.repositoryIds) {
+ if (repositoryId !== undefined) {
+ frequencyCounts.repositories[repositoryId]++;
+ }
+ }
+
+ // Count testInfo references
+ for (const testPathId of testInfo.testPathIds) {
+ frequencyCounts.testPaths[testPathId]++;
+ }
+ for (const testNameId of testInfo.testNameIds) {
+ frequencyCounts.testNames[testNameId]++;
+ }
+
+ // Count testRuns references
+ for (const testGroup of testRuns) {
+ if (!testGroup) {
+ continue;
+ }
+
+ testGroup.forEach((statusGroup, statusId) => {
+ if (!statusGroup) {
+ return;
+ }
+
+ frequencyCounts.statuses[statusId] += statusGroup.taskIdIds.length;
+
+ for (const taskIdId of statusGroup.taskIdIds) {
+ frequencyCounts.taskIds[taskIdId]++;
+ }
+
+ if (statusGroup.messageIds) {
+ for (const messageId of statusGroup.messageIds) {
+ if (messageId !== null) {
+ frequencyCounts.messages[messageId]++;
+ }
+ }
+ }
+
+ if (statusGroup.crashSignatureIds) {
+ for (const crashSigId of statusGroup.crashSignatureIds) {
+ if (crashSigId !== null) {
+ frequencyCounts.crashSignatures[crashSigId]++;
+ }
+ }
+ }
+ });
+ }
+
+ // Create sorted tables and index mappings (sorted by frequency descending)
+ const sortedTables = {};
+ const indexMaps = {};
+
+ for (const [tableName, table] of Object.entries(tables)) {
+ const counts = frequencyCounts[tableName];
+
+ // Create array with value, oldIndex, and count
+ const indexed = table.map((value, oldIndex) => ({
+ value,
+ oldIndex,
+ count: counts[oldIndex],
+ }));
+
+ // Sort by count descending, then by value for deterministic order when counts are equal
+ indexed.sort((a, b) => {
+ if (b.count !== a.count) {
+ return b.count - a.count;
+ }
+ return a.value.localeCompare(b.value);
+ });
+
+ // Extract sorted values and create mapping
+ sortedTables[tableName] = indexed.map(item => item.value);
+ indexMaps[tableName] = new Map(
+ indexed.map((item, newIndex) => [item.oldIndex, newIndex])
+ );
+ }
+
+ // Remap taskInfo indices
+ // taskInfo arrays are indexed by taskIdId, and when taskIds get remapped,
+ // we need to rebuild the arrays at the new indices
+ const sortedTaskInfo = {
+ repositoryIds: [],
+ jobNameIds: [],
+ };
+
+ for (
+ let oldTaskIdId = 0;
+ oldTaskIdId < taskInfo.repositoryIds.length;
+ oldTaskIdId++
+ ) {
+ const newTaskIdId = indexMaps.taskIds.get(oldTaskIdId);
+ sortedTaskInfo.repositoryIds[newTaskIdId] = indexMaps.repositories.get(
+ taskInfo.repositoryIds[oldTaskIdId]
+ );
+ sortedTaskInfo.jobNameIds[newTaskIdId] = indexMaps.jobNames.get(
+ taskInfo.jobNameIds[oldTaskIdId]
+ );
+ }
+
+ // Remap testInfo indices
+ const sortedTestInfo = {
+ testPathIds: testInfo.testPathIds.map(oldId =>
+ indexMaps.testPaths.get(oldId)
+ ),
+ testNameIds: testInfo.testNameIds.map(oldId =>
+ indexMaps.testNames.get(oldId)
+ ),
+ };
+
+ // Remap testRuns indices
+ const sortedTestRuns = testRuns.map(testGroup => {
+ if (!testGroup) {
+ return testGroup;
+ }
+
+ return testGroup.map(statusGroup => {
+ if (!statusGroup) {
+ return statusGroup;
+ }
+
+ const remapped = {
+ taskIdIds: statusGroup.taskIdIds.map(oldId =>
+ indexMaps.taskIds.get(oldId)
+ ),
+ durations: statusGroup.durations,
+ timestamps: statusGroup.timestamps,
+ };
+
+ // Remap message IDs for SKIP status
+ if (statusGroup.messageIds) {
+ remapped.messageIds = statusGroup.messageIds.map(oldId =>
+ oldId === null ? null : indexMaps.messages.get(oldId)
+ );
+ }
+
+ // Remap crash data for CRASH status
+ if (statusGroup.crashSignatureIds) {
+ remapped.crashSignatureIds = statusGroup.crashSignatureIds.map(oldId =>
+ oldId === null ? null : indexMaps.crashSignatures.get(oldId)
+ );
+ }
+ if (statusGroup.minidumps) {
+ remapped.minidumps = statusGroup.minidumps;
+ }
+
+ return remapped;
+ });
+ });
+
+ // Remap statusId positions in testRuns (move status groups to their new positions)
+ const finalTestRuns = sortedTestRuns.map(testGroup => {
+ if (!testGroup) {
+ return testGroup;
+ }
+
+ const remappedGroup = [];
+ testGroup.forEach((statusGroup, oldStatusId) => {
+ if (!statusGroup) {
+ return;
+ }
+ const newStatusId = indexMaps.statuses.get(oldStatusId);
+ remappedGroup[newStatusId] = statusGroup;
+ });
+
+ return remappedGroup;
+ });
+
+ return {
+ tables: sortedTables,
+ taskInfo: sortedTaskInfo,
+ testInfo: sortedTestInfo,
+ testRuns: finalTestRuns,
+ };
+}
+
+// Create resource usage data structure
+function createResourceUsageData(jobResults) {
+ const jobNames = [];
+ const jobNameMap = new Map();
+ const repositories = [];
+ const repositoryMap = new Map();
+ const machineInfos = [];
+ const machineInfoMap = new Map();
+
+ // Collect all job data first
+ const jobDataList = [];
+
+ for (const result of jobResults) {
+ if (!result || !result.resourceUsage) {
+ continue;
+ }
+
+ // Extract chunk number from job name (e.g., "test-linux1804-64/opt-xpcshell-1" -> "test-linux1804-64/opt-xpcshell", chunk: 1)
+ let jobNameBase = result.jobName;
+ let chunkNumber = null;
+ const match = result.jobName.match(/^(.+)-(\d+)$/);
+ if (match) {
+ jobNameBase = match[1];
+ chunkNumber = parseInt(match[2], 10);
+ }
+
+ // Get or create job name index
+ let jobNameId = jobNameMap.get(jobNameBase);
+ if (jobNameId === undefined) {
+ jobNameId = jobNames.length;
+ jobNames.push(jobNameBase);
+ jobNameMap.set(jobNameBase, jobNameId);
+ }
+
+ // Get or create repository index
+ let repositoryId = repositoryMap.get(result.repository);
+ if (repositoryId === undefined) {
+ repositoryId = repositories.length;
+ repositories.push(result.repository);
+ repositoryMap.set(result.repository, repositoryId);
+ }
+
+ // Get or create machine info index
+ const machineInfo = result.resourceUsage.machineInfo;
+ const machineInfoKey = JSON.stringify(machineInfo);
+ let machineInfoId = machineInfoMap.get(machineInfoKey);
+ if (machineInfoId === undefined) {
+ machineInfoId = machineInfos.length;
+ machineInfos.push(machineInfo);
+ machineInfoMap.set(machineInfoKey, machineInfoId);
+ }
+
+ // Combine taskId and retryId (omit .0 for retry 0)
+ const taskIdString =
+ result.retryId === 0
+ ? result.taskId
+ : `${result.taskId}.${result.retryId}`;
+
+ jobDataList.push({
+ jobNameId,
+ chunk: chunkNumber,
+ taskId: taskIdString,
+ repositoryId,
+ startTime: result.startTime,
+ machineInfoId,
+ maxMemory: result.resourceUsage.maxMemory,
+ idleTime: result.resourceUsage.idleTime,
+ singleCoreTime: result.resourceUsage.singleCoreTime,
+ cpuBuckets: result.resourceUsage.cpuBuckets,
+ });
+ }
+
+ // Sort by start time
+ jobDataList.sort((a, b) => a.startTime - b.startTime);
+
+ // Apply differential compression to start times and build parallel arrays
+ const jobs = {
+ jobNameIds: [],
+ chunks: [],
+ taskIds: [],
+ repositoryIds: [],
+ startTimes: [],
+ machineInfoIds: [],
+ maxMemories: [],
+ idleTimes: [],
+ singleCoreTimes: [],
+ cpuBuckets: [],
+ };
+
+ let previousStartTime = 0;
+ for (const jobData of jobDataList) {
+ jobs.jobNameIds.push(jobData.jobNameId);
+ jobs.chunks.push(jobData.chunk);
+ jobs.taskIds.push(jobData.taskId);
+ jobs.repositoryIds.push(jobData.repositoryId);
+
+ // Differential compression: store difference from previous
+ const timeDiff = jobData.startTime - previousStartTime;
+ jobs.startTimes.push(timeDiff);
+ previousStartTime = jobData.startTime;
+
+ jobs.machineInfoIds.push(jobData.machineInfoId);
+ jobs.maxMemories.push(jobData.maxMemory);
+ jobs.idleTimes.push(jobData.idleTime);
+ jobs.singleCoreTimes.push(jobData.singleCoreTime);
+ jobs.cpuBuckets.push(jobData.cpuBuckets);
+ }
+
+ return {
+ jobNames,
+ repositories,
+ machineInfos,
+ jobs,
+ };
+}
+
+// Helper to save a JSON file and log its size
+function saveJsonFile(data, filePath) {
+ fs.writeFileSync(filePath, JSON.stringify(data));
+
+ const stats = fs.statSync(filePath);
+ const fileSizeBytes = stats.size;
+
+ // Use MB for files >= 1MB, otherwise KB
+ if (fileSizeBytes >= 1024 * 1024) {
+ const fileSizeMB = Math.round(fileSizeBytes / (1024 * 1024));
+ const formattedBytes = fileSizeBytes.toLocaleString();
+ console.log(
+ `Saved ${filePath} - ${fileSizeMB}MB (${formattedBytes} bytes)`
+ );
+ } else {
+ const fileSizeKB = Math.round(fileSizeBytes / 1024);
+ console.log(`Saved ${filePath} - ${fileSizeKB}KB`);
+ }
+}
+
+// Common function to process jobs and create data structure
+async function processJobsAndCreateData(
+ jobs,
+ targetLabel,
+ startTime,
+ metadata
+) {
+ if (jobs.length === 0) {
+ console.log(`No jobs found for ${targetLabel}.`);
+ return null;
+ }
+
+ // Process jobs to extract test timings
+ const jobProcessingStart = Date.now();
+ const jobResults = await processJobsWithWorkers(jobs, targetLabel);
+ const jobProcessingTime = Date.now() - jobProcessingStart;
+ console.log(
+ `Successfully processed ${jobResults.length} jobs in ${jobProcessingTime}ms`
+ );
+
+ // Create efficient data tables
+ const dataTablesStart = Date.now();
+ let dataStructure = createDataTables(jobResults);
+ const dataTablesTime = Date.now() - dataTablesStart;
+ console.log(`Created data tables in ${dataTablesTime}ms:`);
+
+ // Check if any test runs were extracted
+ const hasTestRuns = !!dataStructure.testRuns.length;
+ if (!hasTestRuns) {
+ console.log(`No test run data extracted for ${targetLabel}`);
+ return null;
+ }
+
+ const totalRuns = dataStructure.testRuns.reduce((sum, testGroup) => {
+ if (!testGroup) {
+ return sum;
+ }
+ return (
+ sum +
+ testGroup.reduce(
+ (testSum, statusGroup) =>
+ testSum + (statusGroup ? statusGroup.taskIdIds.length : 0),
+ 0
+ )
+ );
+ }, 0);
+ console.log(
+ ` ${dataStructure.testInfo.testPathIds.length} tests, ${totalRuns} runs, ${dataStructure.tables.taskIds.length} tasks, ${dataStructure.tables.jobNames.length} job names, ${dataStructure.tables.statuses.length} statuses`
+ );
+
+ // Sort string tables by frequency for deterministic output and better compression
+ const sortingStart = Date.now();
+ dataStructure = sortStringTablesByFrequency(dataStructure);
+ const sortingTime = Date.now() - sortingStart;
+ console.log(`Sorted string tables by frequency in ${sortingTime}ms`);
+
+ // Convert absolute timestamps to relative and apply differential compression (in place)
+ for (const testGroup of dataStructure.testRuns) {
+ if (!testGroup) {
+ continue;
+ }
+
+ for (const statusGroup of testGroup) {
+ if (!statusGroup) {
+ continue;
+ }
+
+ // Convert timestamps to relative in place
+ for (let i = 0; i < statusGroup.timestamps.length; i++) {
+ statusGroup.timestamps[i] =
+ Math.floor(statusGroup.timestamps[i] / 1000) - startTime;
+ }
+
+ // Map to array of objects including crash data if present
+ const runs = statusGroup.timestamps.map((ts, i) => {
+ const run = {
+ timestamp: ts,
+ taskIdId: statusGroup.taskIdIds[i],
+ duration: statusGroup.durations[i],
+ };
+ // Include crash data if this is a CRASH status group
+ if (statusGroup.crashSignatureIds) {
+ run.crashSignatureId = statusGroup.crashSignatureIds[i];
+ }
+ if (statusGroup.minidumps) {
+ run.minidump = statusGroup.minidumps[i];
+ }
+ // Include message data if this is a SKIP status group
+ if (statusGroup.messageIds) {
+ run.messageId = statusGroup.messageIds[i];
+ }
+ return run;
+ });
+
+ // Sort by timestamp
+ runs.sort((a, b) => a.timestamp - b.timestamp);
+
+ // Apply differential compression in place for timestamps
+ let previousTimestamp = 0;
+ for (const run of runs) {
+ const currentTimestamp = run.timestamp;
+ run.timestamp = currentTimestamp - previousTimestamp;
+ previousTimestamp = currentTimestamp;
+ }
+
+ // Update in place
+ statusGroup.taskIdIds = runs.map(run => run.taskIdId);
+ statusGroup.durations = runs.map(run => run.duration);
+ statusGroup.timestamps = runs.map(run => run.timestamp);
+ // Update crash data arrays if present
+ if (statusGroup.crashSignatureIds) {
+ statusGroup.crashSignatureIds = runs.map(run => run.crashSignatureId);
+ }
+ if (statusGroup.minidumps) {
+ statusGroup.minidumps = runs.map(run => run.minidump);
+ }
+ // Update message data arrays if present
+ if (statusGroup.messageIds) {
+ statusGroup.messageIds = runs.map(run => run.messageId);
+ }
+ }
+ }
+
+ // Build output with metadata
+ return {
+ testData: {
+ metadata: {
+ ...metadata,
+ startTime,
+ generatedAt: new Date().toISOString(),
+ jobCount: jobs.length,
+ processedJobCount: jobResults.length,
+ },
+ tables: dataStructure.tables,
+ taskInfo: dataStructure.taskInfo,
+ testInfo: dataStructure.testInfo,
+ testRuns: dataStructure.testRuns,
+ },
+ resourceData: createResourceUsageData(jobResults),
+ };
+}
+
+async function processRevisionData(project, revision, forceRefetch = false) {
+ console.log(`Fetching xpcshell test data for ${project}:${revision}`);
+ console.log(`=== Processing ${project}:${revision} ===`);
+
+ const cacheFile = path.join(
+ OUTPUT_DIR,
+ `xpcshell-${project}-${revision}.json`
+ );
+
+ // Check if we already have data for this revision
+ if (fs.existsSync(cacheFile) && !forceRefetch) {
+ console.log(`Data for ${project}:${revision} already exists. Skipping.`);
+ return null;
+ }
+
+ if (forceRefetch) {
+ console.log(
+ `Force flag detected, re-fetching data for ${project}:${revision}...`
+ );
+ }
+
+ try {
+ // Fetch push ID from revision
+ const pushId = await fetchCommitData(project, revision);
+
+ // Fetch jobs for the push
+ const jobs = await fetchPushJobs(project, pushId);
+
+ if (jobs.length === 0) {
+ console.log(`No xpcshell jobs found for ${project}:${revision}.`);
+ return null;
+ }
+
+ // Use the last_modified time of the first job as start time
+ const startTime = jobs.length
+ ? Math.floor(new Date(jobs[0].start_time).getTime() / 1000)
+ : Math.floor(Date.now() / 1000);
+
+ const output = await processJobsAndCreateData(
+ jobs,
+ `${project}-${revision}`,
+ startTime,
+ {
+ project,
+ revision,
+ pushId,
+ }
+ );
+
+ if (!output) {
+ return null;
+ }
+
+ saveJsonFile(output.testData, cacheFile);
+ const resourceCacheFile = path.join(
+ OUTPUT_DIR,
+ `xpcshell-${project}-${revision}-resources.json`
+ );
+ saveJsonFile(output.resourceData, resourceCacheFile);
+
+ return output;
+ } catch (error) {
+ console.error(`Error processing ${project}:${revision}:`, error);
+ return null;
+ }
+}
+
+// Fetch previous run metadata from Taskcluster
+async function fetchPreviousRunData() {
+ try {
+ // Fetch task info for the current task to get the index name from the routes.
+ const taskUrl = `${TASKCLUSTER_BASE_URL}/api/queue/v1/task/${process.env.TASK_ID}`;
+ const taskData = await fetchJson(taskUrl);
+ if (!taskData) {
+ console.log(`Failed to fetch task info from ${taskUrl}`);
+ return;
+ }
+
+ const routes = taskData.routes || [];
+ // Find a route that starts with "index." and contains ".latest."
+ const latestRoute = routes.find(
+ route => route.startsWith("index.") && route.includes(".latest.")
+ );
+ if (!latestRoute) {
+ console.log(
+ `No route found with 'index.' prefix and '.latest.' in name. Available routes: ${JSON.stringify(routes)}`
+ );
+ return;
+ }
+
+ // Remove "index." prefix from route to get index name
+ const indexName = latestRoute.replace(/^index\./, "");
+ console.log(`Using index: ${indexName}`);
+
+ // Store artifacts URL for later use by processDateData
+ const artifactsUrl = `${TASKCLUSTER_BASE_URL}/api/index/v1/task/${indexName}/artifacts/public`;
+
+ // Fetch the index.json from the previous run
+ const indexUrl = `${artifactsUrl}/index.json`;
+ console.log(`Fetching previous run data from ${indexUrl}`);
+ const indexData = await fetchJson(indexUrl);
+ if (!indexData) {
+ console.log(`Failed to fetch index.json from ${indexUrl}`);
+ return;
+ }
+
+ const dates = indexData.dates || [];
+
+ console.log(`Found ${dates.length} dates in previous run`);
+
+ previousRunData = {
+ dates: new Set(dates),
+ artifactsUrl,
+ };
+
+ console.log("Previous run metadata loaded\n");
+ } catch (error) {
+ console.log(`Error fetching previous run metadata: ${error.message}`);
+ }
+}
+
+// Process data for a single date
+async function processDateData(targetDate, forceRefetch = false) {
+ const timingsFilename = `xpcshell-${targetDate}.json`;
+ const resourcesFilename = `xpcshell-${targetDate}-resources.json`;
+ const timingsPath = path.join(OUTPUT_DIR, timingsFilename);
+ const resourcesPath = path.join(OUTPUT_DIR, resourcesFilename);
+
+ // Check if we already have data for this date
+ if (fs.existsSync(timingsPath) && !forceRefetch) {
+ console.log(`Data for ${targetDate} already exists. Skipping.`);
+ return;
+ }
+
+ // Try to fetch from previous run if available and not forcing refetch
+ if (
+ !forceRefetch &&
+ previousRunData &&
+ previousRunData.dates.has(targetDate)
+ ) {
+ console.log(`Fetching ${targetDate} from previous run...`);
+ try {
+ const [timings, resources] = await Promise.all([
+ fetchJson(`${previousRunData.artifactsUrl}/${timingsFilename}`),
+ fetchJson(`${previousRunData.artifactsUrl}/${resourcesFilename}`),
+ ]);
+
+ if (timings && resources) {
+ saveJsonFile(timings, timingsPath);
+ saveJsonFile(resources, resourcesPath);
+ return;
+ }
+ console.log(` Failed to fetch from previous run, will regenerate`);
+ } catch (error) {
+ console.log(
+ ` Error fetching from previous run: ${error.message}, will regenerate`
+ );
+ }
+ }
+
+ if (forceRefetch) {
+ console.log(`Force flag detected, re-fetching data for ${targetDate}...`);
+ }
+
+ try {
+ const jobs = await fetchXpcshellData(targetDate);
+ if (jobs.length === 0) {
+ console.log(`No jobs found for ${targetDate}.`);
+ return;
+ }
+
+ // Calculate start of day timestamp for relative time calculation
+ const startOfDay = new Date(targetDate + "T00:00:00.000Z");
+ const startTime = Math.floor(startOfDay.getTime() / 1000); // Convert to seconds
+
+ const output = await processJobsAndCreateData(jobs, targetDate, startTime, {
+ date: targetDate,
+ });
+ if (!output) {
+ return;
+ }
+
+ saveJsonFile(output.testData, timingsPath);
+ saveJsonFile(output.resourceData, resourcesPath);
+ } catch (error) {
+ console.error(`Error processing ${targetDate}:`, error);
+ }
+}
+
+async function main() {
+ const forceRefetch = process.argv.includes("--force");
+
+ // Check for --days parameter
+ let numDays = 3;
+ const daysIndex = process.argv.findIndex(arg => arg === "--days");
+ if (daysIndex !== -1 && daysIndex + 1 < process.argv.length) {
+ const daysValue = parseInt(process.argv[daysIndex + 1]);
+ if (!isNaN(daysValue) && daysValue > 0 && daysValue <= 30) {
+ numDays = daysValue;
+ } else {
+ console.error("Error: --days must be a number between 1 and 30");
+ process.exit(1);
+ }
+ }
+
+ if (process.env.TASK_ID) {
+ await fetchPreviousRunData();
+ }
+
+ // Check for --revision parameter (format: project:revision)
+ const revisionIndex = process.argv.findIndex(arg => arg === "--revision");
+ if (revisionIndex !== -1 && revisionIndex + 1 < process.argv.length) {
+ const revisionArg = process.argv[revisionIndex + 1];
+ const parts = revisionArg.split(":");
+
+ if (parts.length !== 2) {
+ console.error(
+ "Error: --revision must be in format project:revision (e.g., try:abc123 or autoland:def456)"
+ );
+ process.exit(1);
+ }
+
+ const [project, revision] = parts;
+ const output = await processRevisionData(project, revision, forceRefetch);
+
+ if (output) {
+ console.log("Successfully processed revision data.");
+ } else {
+ console.log("\nNo data was successfully processed.");
+ }
+ return;
+ }
+
+ // Check for --try option (shortcut for --revision try:...)
+ const tryIndex = process.argv.findIndex(arg => arg === "--try");
+ if (tryIndex !== -1 && tryIndex + 1 < process.argv.length) {
+ const revision = process.argv[tryIndex + 1];
+ const output = await processRevisionData("try", revision, forceRefetch);
+
+ if (output) {
+ console.log("Successfully processed try commit data.");
+ } else {
+ console.log("\nNo data was successfully processed.");
+ }
+ return;
+ }
+
+ // Fetch data for the specified number of days
+ const dates = [];
+ for (let i = 1; i <= numDays; i++) {
+ dates.push(getDateString(i));
+ }
+
+ console.log(
+ `Fetching xpcshell test data for the last ${numDays} day${numDays > 1 ? "s" : ""}: ${dates.join(", ")}`
+ );
+
+ for (const date of dates) {
+ console.log(`\n=== Processing ${date} ===`);
+ await processDateData(date, forceRefetch);
+ }
+
+ // Create index file with available dates
+ const indexFile = path.join(OUTPUT_DIR, "index.json");
+ const availableDates = [];
+
+ // Scan for all xpcshell-*.json files in the output directory
+ const files = fs.readdirSync(OUTPUT_DIR);
+ files.forEach(file => {
+ const match = file.match(/^xpcshell-(\d{4}-\d{2}-\d{2})\.json$/);
+ if (match) {
+ availableDates.push(match[1]);
+ }
+ });
+
+ // Sort dates in descending order (newest first)
+ availableDates.sort((a, b) => b.localeCompare(a));
+
+ fs.writeFileSync(
+ indexFile,
+ JSON.stringify({ dates: availableDates }, null, 2)
+ );
+ console.log(
+ `\nIndex file saved as ${indexFile} with ${availableDates.length} dates`
+ );
+}
+
+main().catch(console.error);
diff --git a/testing/timings/profile-worker.js b/testing/timings/profile-worker.js
@@ -0,0 +1,372 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const { parentPort, workerData } = require("worker_threads");
+const fs = require("fs");
+const path = require("path");
+const zlib = require("zlib");
+
+// Extract parallel execution time ranges from markers
+function extractParallelRanges(markers) {
+ const parallelRanges = [];
+
+ for (let i = 0; i < markers.length; i++) {
+ const data = markers.data[i];
+ // Look for markers with type: "Text" and text: "parallel"
+ if (data?.type === "Text" && data.text === "parallel") {
+ parallelRanges.push({
+ start: markers.startTime[i],
+ end: markers.endTime[i],
+ });
+ }
+ }
+
+ return parallelRanges;
+}
+
+// Check if a test time overlaps with any parallel execution range
+function isInParallelRange(testStart, testEnd, parallelRanges) {
+ for (const range of parallelRanges) {
+ // Check if test overlaps with parallel range
+ if (testStart < range.end && testEnd > range.start) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Extract resource usage information from profile
+function extractResourceUsage(profile) {
+ if (!profile || !profile.threads || !profile.threads[0]) {
+ return null;
+ }
+
+ const thread = profile.threads[0];
+ const { markers } = thread;
+
+ if (!markers || !markers.data) {
+ return null;
+ }
+
+ // Extract machine info from profile metadata
+ // Convert memory to GB with 1 decimal place to avoid grouping issues from tiny variations
+ const machineInfo = {
+ logicalCPUs: profile.meta?.logicalCPUs || null,
+ physicalCPUs: profile.meta?.physicalCPUs || null,
+ mainMemory: profile.meta?.mainMemory
+ ? parseFloat((profile.meta.mainMemory / (1024 * 1024 * 1024)).toFixed(1))
+ : null,
+ };
+
+ let maxMemory = 0;
+ let idleTime = 0;
+ let singleCoreTime = 0;
+ // CPU buckets: [0-10%, 10-20%, 20-30%, ..., 90-100%]
+ const cpuBuckets = new Array(10).fill(0);
+
+ // Calculate thresholds based on core count
+ const oneCorePct = machineInfo.logicalCPUs
+ ? 100 / machineInfo.logicalCPUs
+ : 12.5;
+ const idleThreshold = oneCorePct / 2;
+ // Single-core range: 0.75 to 1.25 cores (to account for slight variations)
+ const singleCoreMin = oneCorePct * 0.75;
+ const singleCoreMax = oneCorePct * 1.25;
+
+ // Process markers to gather resource usage
+ for (let i = 0; i < markers.length; i++) {
+ const data = markers.data[i];
+ if (!data) {
+ continue;
+ }
+
+ const duration = markers.endTime[i] - markers.startTime[i];
+
+ if (data.type === "Mem") {
+ if (data.used > maxMemory) {
+ maxMemory = data.used;
+ }
+ } else if (data.type === "CPU") {
+ // Parse CPU percentage (e.g., "21.4%" -> 21.4)
+ const cpuPercent = parseFloat(data.cpuPercent);
+ if (isNaN(cpuPercent)) {
+ continue;
+ }
+
+ if (cpuPercent < idleThreshold) {
+ idleTime += duration;
+ }
+
+ // Check if it's in the single-core range
+ if (cpuPercent >= singleCoreMin && cpuPercent <= singleCoreMax) {
+ singleCoreTime += duration;
+ }
+
+ // Compute bucket index: 0-10% -> bucket 0, 10-20% -> bucket 1, etc.
+ const bucketIndex = Math.min(Math.floor(cpuPercent / 10), 9);
+ cpuBuckets[bucketIndex] += duration;
+ }
+ }
+
+ return {
+ machineInfo,
+ maxMemory,
+ idleTime,
+ singleCoreTime,
+ cpuBuckets,
+ };
+}
+
+// Extract test timings from profile
+// eslint-disable-next-line complexity
+function extractTestTimings(profile) {
+ if (!profile || !profile.threads || !profile.threads[0]) {
+ return [];
+ }
+
+ const thread = profile.threads[0];
+ const { markers, stringArray } = thread;
+
+ if (!markers || !markers.data || !markers.name || !stringArray) {
+ return [];
+ }
+
+ // First, extract parallel execution ranges
+ const parallelRanges = extractParallelRanges(markers);
+
+ // Extract crash markers for later matching with CRASH status tests
+ const crashMarkers = [];
+ for (let i = 0; i < markers.length; i++) {
+ const data = markers.data[i];
+ if (data?.type !== "Crash" || !data.test) {
+ continue;
+ }
+ crashMarkers.push({
+ testPath: data.test,
+ startTime: markers.startTime[i],
+ signature: data.signature || null,
+ minidump: data.minidump || null,
+ });
+ }
+
+ const testStringId = stringArray.indexOf("test");
+ const timings = [];
+
+ for (let i = 0; i < markers.length; i++) {
+ if (markers.name[i] !== testStringId) {
+ continue;
+ }
+
+ const data = markers.data[i];
+ if (!data) {
+ continue;
+ }
+
+ let testPath = null;
+ let status = "UNKNOWN";
+ let message = null;
+
+ // Handle both structured and plain text logs
+ if (data.type === "Test") {
+ // Structured log format
+ testPath = data.test || data.name;
+ status = data.status || "UNKNOWN";
+ // Normalize line breaks in message (convert \r\n to \n)
+ message = data.message ? data.message.replace(/\r\n/g, "\n") : null;
+
+ // Check if this is an expected failure (FAIL status but green color)
+ if (status === "FAIL" && data.color === "green") {
+ status = "EXPECTED-FAIL";
+ }
+ // Add execution context suffix to timeout, fail, and pass statuses
+ else if (
+ ["TIMEOUT", "FAIL", "PASS"].includes(status) &&
+ parallelRanges.length
+ ) {
+ status += isInParallelRange(
+ markers.startTime[i],
+ markers.endTime[i],
+ parallelRanges
+ )
+ ? "-PARALLEL"
+ : "-SEQUENTIAL";
+ }
+ // Keep other statuses as-is
+
+ // Extract the actual test file path from the test field
+ // Format: "xpcshell-parent-process.toml:dom/indexedDB/test/unit/test_fileListUpgrade.js"
+ if (testPath && testPath.includes(":")) {
+ testPath = testPath.split(":")[1];
+ }
+ } else if (data.type === "Text") {
+ // Plain text log format
+ testPath = data.text;
+
+ // Skip text markers like "replaying full log for ..."
+ if (testPath?.startsWith("replaying full log for ")) {
+ continue;
+ }
+
+ // We don't have status information in markers from plain text logs
+ status = "UNKNOWN";
+ } else {
+ continue;
+ }
+
+ if (!testPath || !testPath.endsWith(".js")) {
+ continue;
+ }
+
+ const testStartTime = markers.startTime[i];
+ const testEndTime = markers.endTime[i];
+
+ const timing = {
+ path: testPath,
+ duration: testEndTime - testStartTime,
+ status,
+ timestamp: profile.meta.startTime + testStartTime,
+ };
+ if (message) {
+ timing.message = message;
+ }
+
+ // For CRASH status, find matching crash marker within the test's time range
+ if (status === "CRASH") {
+ const matchingCrash = crashMarkers.find(
+ crash =>
+ crash.testPath === data.test &&
+ crash.startTime >= testStartTime &&
+ crash.startTime <= testEndTime
+ );
+ if (matchingCrash) {
+ if (matchingCrash.signature) {
+ timing.crashSignature = matchingCrash.signature;
+ }
+ if (matchingCrash.minidump) {
+ timing.minidump = matchingCrash.minidump;
+ }
+ }
+ }
+
+ timings.push(timing);
+ }
+
+ return timings;
+}
+
+// Fetch resource profile from TaskCluster with local caching
+async function fetchResourceProfile(taskId, retryId = 0) {
+ const cacheFileGz = path.join(
+ workerData.profileCacheDir,
+ `${taskId}-${retryId}.json.gz`
+ );
+
+ // Check if we have a cached gzipped version
+ if (fs.existsSync(cacheFileGz)) {
+ try {
+ const compressedData = fs.readFileSync(cacheFileGz);
+ const decompressedData = zlib.gunzipSync(compressedData);
+ return JSON.parse(decompressedData.toString("utf-8"));
+ } catch (error) {
+ console.warn(
+ `Error reading cached gzipped profile ${taskId}: ${error.message}`
+ );
+ // Continue to fetch from network
+ }
+ }
+
+ const url = `${workerData.taskclusterBaseUrl}/api/queue/v1/task/${taskId}/runs/${retryId}/artifacts/public/test_info/profile_resource-usage.json`;
+
+ try {
+ const response = await fetch(url);
+ if (!response.ok) {
+ return null;
+ }
+
+ const profile = await response.json();
+
+ // Cache the profile for future use (gzipped)
+ try {
+ const compressed = zlib.gzipSync(JSON.stringify(profile));
+ fs.writeFileSync(cacheFileGz, compressed);
+ } catch (error) {
+ console.warn(`Error caching profile ${taskId}: ${error.message}`);
+ }
+
+ return profile;
+ } catch (error) {
+ console.error(`Error fetching profile for task ${taskId}:`, error.message);
+ return null;
+ }
+}
+
+// Process a single job to extract test timings
+async function processJob(job) {
+ const taskId = job.task_id;
+ const retryId = job.retry_id || 0;
+ const jobName = job.name;
+
+ if (!taskId) {
+ return null;
+ }
+
+ // Processing job silently to avoid mixed output with main thread
+
+ const profile = await fetchResourceProfile(taskId, retryId);
+ if (!profile) {
+ return null;
+ }
+
+ const timings = extractTestTimings(profile);
+ if (timings.length === 0) {
+ return null;
+ }
+
+ const resourceUsage = extractResourceUsage(profile);
+
+ // Convert start_time to timestamp in seconds if it's a string
+ const startTime =
+ typeof job.start_time === "string"
+ ? Math.floor(new Date(job.start_time).getTime() / 1000)
+ : job.start_time;
+
+ return {
+ jobName,
+ taskId,
+ retryId,
+ repository: job.repository,
+ startTime,
+ timings,
+ resourceUsage,
+ };
+}
+
+// Main worker function
+async function main() {
+ try {
+ const results = [];
+
+ // Signal worker is ready for jobs
+ parentPort.postMessage({ type: "ready" });
+
+ // Listen for job assignments
+ parentPort.on("message", async message => {
+ if (message.type === "job") {
+ const result = await processJob(message.job);
+ if (result) {
+ results.push(result);
+ }
+ // Request next job
+ parentPort.postMessage({ type: "jobComplete", result });
+ } else if (message.type === "shutdown") {
+ // Send final results and exit
+ parentPort.postMessage({ type: "finished", results });
+ }
+ });
+ } catch (error) {
+ parentPort.postMessage({ type: "error", error: error.message });
+ }
+}
+
+main();