commit a92c0a8ab09469c641aa5d6e4f9831ecd5d59f5c
parent 34719179f66fddee9250238d79425aea3b48979e
Author: Alex Franchuk <afranchuk@mozilla.com>
Date: Tue, 7 Oct 2025 13:48:13 +0000
Bug 1973820 p2 - Aggregate and print GTest failures r=jmaher
This aggregates test information across all processes and displays a
summary and test failures at the end of the log, which is convenient for
developers.
Differential Revision: https://phabricator.services.mozilla.com/D254996
Diffstat:
2 files changed, 269 insertions(+), 99 deletions(-)
diff --git a/python/mozbuild/mozbuild/mach_commands.py b/python/mozbuild/mozbuild/mach_commands.py
@@ -4,6 +4,7 @@
import argparse
import errno
+import functools
import itertools
import json
import logging
@@ -21,6 +22,7 @@ from os import path
from pathlib import Path
import mozpack.path as mozpath
+from gtest.reports import AggregatedGTestReport
from gtest.suites import get_gtest_suites, suite_filters
from mach.decorators import (
Command,
@@ -1158,116 +1160,150 @@ def gtest(
pass_thru=True,
)
- import functools
+ report = AggregatedGTestReport()
- from mozprocess import ProcessHandlerMixin
+ with report:
+ from mozprocess import ProcessHandlerMixin
- processes = []
+ processes = []
- def add_process(job_id, env, **kwargs):
- def log_line(line):
- # Prepend the job identifier to output
- command_context.log(
- logging.INFO,
- "GTest",
- {"job_id": job_id, "line": line.strip()},
- "[{job_id}] {line}",
+ def add_process(job_id, env, **kwargs):
+ def log_line(line):
+ # Prepend the job identifier to output
+ command_context.log(
+ logging.INFO,
+ "GTest",
+ {"job_id": job_id, "line": line.strip()},
+ "[{job_id}] {line}",
+ )
+
+ report.set_output_in_env(env, job_id)
+
+ proc = ProcessHandlerMixin(
+ [app_path, "-unittest"],
+ cwd=cwd,
+ universal_newlines=True,
+ env=env,
+ processOutputLine=log_line,
+ **kwargs,
)
+ processes.append(proc)
+ return proc
- proc = ProcessHandlerMixin(
- [app_path, "-unittest"],
- cwd=cwd,
- universal_newlines=True,
- env=env,
- processOutputLine=log_line,
- **kwargs,
- )
- processes.append(proc)
- return proc
+ if combine_suites:
+ # Use GTest sharding to create `jobs` processes
+ gtest_env["GTEST_TOTAL_SHARDS"] = str(jobs)
- if combine_suites:
- # Use GTest sharding to create `jobs` processes
- gtest_env["GTEST_TOTAL_SHARDS"] = str(jobs)
+ for i in range(0, jobs):
+ env = gtest_env.copy()
+ env["GTEST_SHARD_INDEX"] = str(i)
+ add_process(str(i), env).run()
+ else:
+ # Make one process per test suite
+ suites = get_gtest_suites(args, cwd, gtest_env)
+
+ from threading import Event, Lock
+
+ processes_to_run = []
+ all_processes_run = Event()
+ running_suites = set()
+ process_state_lock = Lock()
+
+ def run_next(finished_suite=None):
+ """
+ Run another test suite process.
+
+ If `finished_suite` is provided, it will be considered as finished.
+ This updates the `running_suites` set and will signal the
+ `all_processes_run` Event when there are no longer any test suites
+ to start (though some may still be running).
+
+ This may be safely called from different threads
+ (ProcessHandlerMixin callbacks occur from separate threads).
+ """
+ # The changes here must be synchronized, so acquire a lock for the
+ # duration of the function.
+ with process_state_lock:
+ if finished_suite is not None:
+ running_suites.remove(finished_suite)
+ if len(processes_to_run) > 0:
+ next_suite, proc = processes_to_run.pop()
+ proc.run()
+ running_suites.add(next_suite)
+ command_context.log(
+ logging.DEBUG,
+ "GTest",
+ {},
+ f"Starting {next_suite} tests. {len(processes_to_run)} suites remain.",
+ )
+ else:
+ all_processes_run.set()
+ if len(running_suites) > 0:
+ command_context.log(
+ logging.INFO,
+ "GTest",
+ {},
+ f"Currently running suites: {', '.join(running_suites)}",
+ )
+
+ for filt in suite_filters(suites):
+ proc = add_process(
+ filt.suite,
+ filt(gtest_env),
+ onFinish=functools.partial(run_next, filt.suite),
+ )
+ processes_to_run.append((filt.suite, proc))
- for i in range(0, jobs):
- env = gtest_env.copy()
- env["GTEST_SHARD_INDEX"] = str(i)
- add_process(str(i), env).run()
- else:
- # Make one process per test suite
- suites = get_gtest_suites(args, cwd, gtest_env)
-
- from threading import Event, Lock
-
- processes_to_run = []
- all_processes_run = Event()
- running_suites = set()
- process_state_lock = Lock()
-
- def run_next(finished_suite=None):
- """
- Run another test suite process.
-
- If `finished_suite` is provided, it will be considered as finished.
- This updates the `running_suites` set and will signal the
- `all_processes_run` Event when there are no longer any test suites
- to start (though some may still be running).
-
- This may be safely called from different threads
- (ProcessHandlerMixin callbacks occur from separate threads).
- """
- # The changes here must be synchronized, so acquire a lock for the
- # duration of the function.
- with process_state_lock:
- if finished_suite is not None:
- running_suites.remove(finished_suite)
- if len(processes_to_run) > 0:
- next_suite, proc = processes_to_run.pop()
- proc.run()
- running_suites.add(next_suite)
- command_context.log(
- logging.DEBUG,
- "GTest",
- {},
- f"Starting {next_suite} tests. {len(processes_to_run)} suites remain.",
- )
- else:
- all_processes_run.set()
- if len(running_suites) > 0:
- command_context.log(
- logging.INFO,
- "GTest",
- {},
- f"Currently running suites: {', '.join(running_suites)}",
- )
+ # Start a number of processes according to 'jobs'. As they finish,
+ # they'll each kick off another one.
+ for _ in range(jobs):
+ run_next()
- for filt in suite_filters(suites):
- proc = add_process(
- filt.suite,
- filt(gtest_env),
- onFinish=functools.partial(run_next, filt.suite),
- )
- processes_to_run.append((filt.suite, proc))
+ # Wait for all processes to have been started, then wait on completion.
+ all_processes_run.wait()
- # Start a number of processes according to 'jobs'. As they finish,
- # they'll each kick off another one.
- for _ in range(jobs):
- run_next()
+ # Wait on processes and return a non-zero exit code if any process does so.
+ exit_code = 0
+ for process in processes:
+ status = process.wait()
+ if status:
+ exit_code = status
- # Wait for all processes to have been started, then wait on completion.
- all_processes_run.wait()
+ # Clamp error code to 255 to prevent overflowing multiple of
+ # 256 into 0
+ if exit_code > 255:
+ exit_code = 255
- # Wait on processes and return a non-zero exit code if any process does so.
- exit_code = 0
- for process in processes:
- status = process.wait()
- if status:
- exit_code = status
-
- # Clamp error code to 255 to prevent overflowing multiple of
- # 256 into 0
- if exit_code > 255:
- exit_code = 255
+ # Show aggregated report information and any test errors.
+ command_context.log(
+ logging.INFO,
+ "GTest",
+ {
+ "tests": report["tests"] - report["disabled"],
+ "failures": report["failures"],
+ "disabled": report["disabled"],
+ "suites": len(report["testsuites"]),
+ },
+ "Ran {tests} test(s) from {suites} test suite(s) ({disabled} disabled), with {failures} failure(s).",
+ )
+
+ for suite in report["testsuites"]:
+ if suite["failures"] == 0:
+ continue
+ for test in suite["testsuite"]:
+ if "failures" not in test:
+ continue
+ full_name = f"{suite['name']}.{test['name']}"
+ command_context.log(
+ logging.ERROR,
+ "GTest",
+ {
+ "test": full_name,
+ "failure_count": len(test["failures"]),
+ "failures": "\n".join(e["failure"] for e in test["failures"]),
+ },
+ "{test} failed {failure_count} check(s):\n{failures}",
+ )
return exit_code
diff --git a/testing/gtest/reports.py b/testing/gtest/reports.py
@@ -0,0 +1,134 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, # You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import functools
+import itertools
+import json
+import os
+import tempfile
+from os import path
+
+_EMPTY_REPORT = {
+ "tests": 0,
+ "failures": 0,
+ "disabled": 0,
+ "errors": 0,
+ "testsuites": {},
+}
+
+
+def merge_gtest_reports(test_reports):
+ """
+ Logically merges json test reports matching [this
+ schema](https://google.github.io/googletest/advanced.html#generating-a-json-report).
+
+ It is assumed that each test will appear in at most one report (rather than
+ trying to search and merge each test).
+
+ Arguments:
+ * test_reports - an iterator of python-native data (likely loaded from GTest JSON files).
+ """
+ INTEGER_FIELDS = ["tests", "failures", "disabled", "errors"]
+ TESTSUITE_INTEGER_FIELDS = ["tests", "failures", "disabled"]
+
+ def merge_testsuite(target, suite):
+ for field in TESTSUITE_INTEGER_FIELDS:
+ if field in suite:
+ target[field] += suite[field]
+ # We assume that each test will appear in at most one report,
+ # so just extend the list of tests.
+ target["testsuite"].extend(suite["testsuite"])
+
+ def merge_one(current, report):
+ for field in INTEGER_FIELDS:
+ if field in report:
+ current[field] += report[field]
+ for suite in report["testsuites"]:
+ name = suite["name"]
+ if name in current["testsuites"]:
+ merge_testsuite(current["testsuites"][name], suite)
+ else:
+ current["testsuites"][name] = suite
+ for field in TESTSUITE_INTEGER_FIELDS:
+ current["testsuites"][name].setdefault(field, 0)
+ return current
+
+ merged = functools.reduce(merge_one, test_reports, _EMPTY_REPORT)
+ # We had testsuites as a dict for fast lookup when merging, change
+ # it back to a list to match the schema.
+ merged["testsuites"] = list(merged["testsuites"].values())
+
+ return merged
+
+
+class AggregatedGTestReport(dict):
+ """
+ An aggregated gtest report (stored as a `dict`)
+
+ This should be used as a context manager to manage the lifetime of
+ temporary storage for reports. If no exception occurs, when the context
+ exits the reports will be merged into this dictionary. Thus, the context
+ must not be exited before the outputs are written (e.g., by gtest processes
+ completing).
+
+ When merging results, it is assumed that each test will appear in at most
+ one report (rather than trying to search and merge each test).
+ """
+
+ __slots__ = ["result_dir"]
+
+ def __init__(self):
+ self.result_dir = tempfile.TemporaryDirectory(ignore_cleanup_errors=True)
+ super().__init__()
+ self.reset()
+
+ def __enter__(self):
+ self.result_dir.__enter__()
+ return self
+
+ def __exit__(self, *exc_info):
+ # Only collect reports if no exception occurred
+ if exc_info[0] is None:
+ d = self.result_dir.name
+ result_files = filter(
+ lambda f: path.isfile(f), map(lambda f: path.join(d, f), os.listdir(d))
+ )
+
+ def json_from_file(file):
+ with open(file) as f:
+ return json.load(f)
+
+ self.update(
+ merge_gtest_reports(
+ itertools.chain([self], map(json_from_file, result_files))
+ )
+ )
+ self.result_dir.__exit__(*exc_info)
+
+ def reset(self):
+ """Clear all results."""
+ self.clear()
+ self.update(
+ {"tests": 0, "failures": 0, "disabled": 0, "errors": 0, "testsuites": []}
+ )
+
+ def gtest_output(self, job_id):
+ """
+ Create a gtest output string with the given job id (to differentiate
+ outputs).
+ """
+ # Replace `/` with `_` in job_id to prevent nested directories (job_id
+ # may be a suite name, which may have slashes for parameterized test
+ # suites).
+ return f"json:{self.result_dir.name}/{job_id.replace('/', '_')}.json"
+
+ def set_output_in_env(self, env, job_id):
+ """
+ Sets an environment variable mapping appropriate with the output for
+ the given job id.
+
+ Returns the env.
+ """
+ env["GTEST_OUTPUT"] = self.gtest_output(job_id)
+ return env