tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

mach_commands.py (42793B)


      1 # This Source Code Form is subject to the terms of the Mozilla Public
      2 # License, v. 2.0. If a copy of the MPL was not distributed with this
      3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
      4 
      5 import argparse
      6 import json
      7 import logging
      8 import os
      9 import subprocess
     10 import sys
     11 from datetime import date, timedelta
     12 from urllib.parse import urlparse
     13 
     14 import requests
     15 from mach.decorators import Command, CommandArgument, SubCommand
     16 from mozbuild.base import BuildEnvironmentNotFoundException, MozbuildObject
     17 from mozbuild.base import MachCommandConditions as conditions
     18 from mozbuild.nodeutil import find_node_executable
     19 from mozsystemmonitor.resourcemonitor import SystemResourceMonitor
     20 
     21 UNKNOWN_TEST = """
     22 I was unable to find tests from the given argument(s).
     23 
     24 You should specify a test directory, filename, test suite name, or
     25 abbreviation.
     26 
     27 It's possible my little brain doesn't know about the type of test you are
     28 trying to execute. If you suspect this, please request support by filing
     29 a bug at
     30 https://bugzilla.mozilla.org/enter_bug.cgi?product=Testing&component=General.
     31 """.strip()
     32 
     33 UNKNOWN_FLAVOR = """
     34 I know you are trying to run a %s%s test. Unfortunately, I can't run those
     35 tests yet. Sorry!
     36 """.strip()
     37 
     38 TEST_HELP = """
     39 Test or tests to run. Tests can be specified by filename, directory, suite
     40 name or suite alias.
     41 
     42 The following test suites and aliases are supported: {}
     43 """.strip()
     44 
     45 
     46 def get_test_parser():
     47    from mozlog.commandline import add_logging_group
     48    from moztest.resolve import TEST_SUITES
     49 
     50    parser = argparse.ArgumentParser()
     51    parser.add_argument(
     52        "what",
     53        default=None,
     54        nargs="*",
     55        help=TEST_HELP.format(", ".join(sorted(TEST_SUITES))),
     56    )
     57    parser.add_argument(
     58        "extra_args",
     59        default=None,
     60        nargs=argparse.REMAINDER,
     61        help="Extra arguments to pass to the underlying test command(s). "
     62        "If an underlying command doesn't recognize the argument, it "
     63        "will fail.",
     64    )
     65    parser.add_argument(
     66        "--debugger",
     67        default=None,
     68        action="store",
     69        nargs="?",
     70        help="Specify a debugger to use.",
     71    )
     72    parser.add_argument(
     73        "--auto",
     74        nargs="?",
     75        const="quick",
     76        default=False,
     77        choices=["extensive", "moderate", "quick"],
     78        metavar="LEVEL",
     79        help="Automatically select tests based on local changes using BugBug. "
     80        "Optional confidence level: 'extensive' (more tests), 'moderate' or "
     81        "'quick' (fewer tests with highest confidence to be related). "
     82        "Default: quick",
     83    )
     84    add_logging_group(parser)
     85    return parser
     86 
     87 
     88 ADD_TEST_SUPPORTED_SUITES = [
     89    "mochitest-chrome",
     90    "mochitest-plain",
     91    "mochitest-browser-chrome",
     92    "web-platform-tests",
     93    "web-platform-tests-reftest",
     94    "xpcshell",
     95 ]
     96 ADD_TEST_SUPPORTED_DOCS = ["js", "html", "xhtml", "xul"]
     97 
     98 SUITE_SYNONYMS = {
     99    "wpt": "web-platform-tests",
    100    "wpt-testharness": "web-platform-tests",
    101    "wpt-reftest": "web-platform-tests-reftest",
    102 }
    103 
    104 MISSING_ARG = object()
    105 
    106 
    107 def create_parser_addtest():
    108    import addtest
    109 
    110    parser = argparse.ArgumentParser()
    111    parser.add_argument(
    112        "--suite",
    113        choices=sorted(ADD_TEST_SUPPORTED_SUITES + list(SUITE_SYNONYMS.keys())),
    114        help="suite for the test. "
    115        "If you pass a `test` argument this will be determined "
    116        "based on the filename and the folder it is in",
    117    )
    118    parser.add_argument(
    119        "-o",
    120        "--overwrite",
    121        action="store_true",
    122        help="Overwrite an existing file if it exists.",
    123    )
    124    parser.add_argument(
    125        "--doc",
    126        choices=ADD_TEST_SUPPORTED_DOCS,
    127        help="Document type for the test (if applicable)."
    128        "If you pass a `test` argument this will be determined "
    129        "based on the filename.",
    130    )
    131    parser.add_argument(
    132        "-e",
    133        "--editor",
    134        action="store",
    135        nargs="?",
    136        default=MISSING_ARG,
    137        help="Open the created file(s) in an editor; if a "
    138        "binary is supplied it will be used otherwise the default editor for "
    139        "your environment will be opened",
    140    )
    141 
    142    for base_suite in addtest.TEST_CREATORS:
    143        cls = addtest.TEST_CREATORS[base_suite]
    144        if hasattr(cls, "get_parser"):
    145            group = parser.add_argument_group(base_suite)
    146            cls.get_parser(group)
    147 
    148    parser.add_argument("test", nargs="?", help=("Test to create."))
    149    return parser
    150 
    151 
    152 @Command(
    153    "addtest",
    154    category="testing",
    155    description="Generate tests based on templates",
    156    parser=create_parser_addtest,
    157 )
    158 def addtest(
    159    command_context,
    160    suite=None,
    161    test=None,
    162    doc=None,
    163    overwrite=False,
    164    editor=MISSING_ARG,
    165    **kwargs,
    166 ):
    167 
    168    import addtest
    169    from moztest.resolve import TEST_SUITES
    170 
    171    if not suite and not test:
    172        return create_parser_addtest().parse_args(["--help"])
    173 
    174    if suite in SUITE_SYNONYMS:
    175        suite = SUITE_SYNONYMS[suite]
    176 
    177    if test:
    178        if not overwrite and os.path.isfile(os.path.abspath(test)):
    179            print("Error: can't generate a test that already exists:", test)
    180            return 1
    181 
    182        abs_test = os.path.abspath(test)
    183        if doc is None:
    184            doc = guess_doc(abs_test)
    185        if suite is None:
    186            guessed_suite, err = guess_suite(abs_test)
    187            if err:
    188                print(err)
    189                return 1
    190            suite = guessed_suite
    191 
    192    else:
    193        test = None
    194        if doc is None:
    195            doc = "html"
    196 
    197    if not suite:
    198        print(
    199            "We couldn't automatically determine a suite. "
    200            f"Please specify `--suite` with one of the following options:\n{ADD_TEST_SUPPORTED_SUITES}\n"
    201            "If you'd like to add support to a new suite, please file a bug "
    202            "blocking https://bugzilla.mozilla.org/show_bug.cgi?id=1540285."
    203        )
    204        return 1
    205 
    206    if doc not in ADD_TEST_SUPPORTED_DOCS:
    207        print(
    208            "Error: invalid `doc`. Either pass in a test with a valid extension"
    209            f"({ADD_TEST_SUPPORTED_DOCS}) or pass in the `doc` argument"
    210        )
    211        return 1
    212 
    213    creator_cls = addtest.creator_for_suite(suite)
    214 
    215    if creator_cls is None:
    216        print(f"Sorry, `addtest` doesn't currently know how to add {suite}")
    217        return 1
    218 
    219    creator = creator_cls(command_context.topsrcdir, test, suite, doc, **kwargs)
    220 
    221    creator.check_args()
    222 
    223    paths = []
    224    added_tests = False
    225    for path, template in creator:
    226        if not template:
    227            continue
    228        added_tests = True
    229        if path:
    230            paths.append(path)
    231            print(f"Adding a test file at {path} (suite `{suite}`)")
    232 
    233            try:
    234                os.makedirs(os.path.dirname(path))
    235            except OSError:
    236                pass
    237 
    238            with open(path, "w", newline="\n") as f:
    239                f.write(template)
    240        else:
    241            # write to stdout if you passed only suite and doc and not a file path
    242            print(template)
    243 
    244    if not added_tests:
    245        return 1
    246 
    247    if test:
    248        creator.update_manifest()
    249 
    250        # Small hack, should really do this better
    251        if suite.startswith("wpt-"):
    252            suite = "web-platform-tests"
    253 
    254        mach_command = TEST_SUITES[suite]["mach_command"]
    255        print(
    256            "Please make sure to add the new test to your commit. "
    257            f"You can now run the test with:\n    ./mach {mach_command} {test}"
    258        )
    259 
    260    if editor is not MISSING_ARG:
    261        if editor is not None:
    262            pass
    263        elif "VISUAL" in os.environ:
    264            editor = os.environ["VISUAL"]
    265        elif "EDITOR" in os.environ:
    266            editor = os.environ["EDITOR"]
    267        else:
    268            print("Unable to determine editor; please specify a binary")
    269            editor = None
    270 
    271        proc = None
    272        if editor:
    273            proc = subprocess.Popen(f"{editor} {' '.join(paths)}", shell=True)
    274 
    275        if proc:
    276            proc.wait()
    277 
    278    return 0
    279 
    280 
    281 def guess_doc(abs_test):
    282    filename = os.path.basename(abs_test)
    283    return os.path.splitext(filename)[1].strip(".")
    284 
    285 
    286 def guess_suite(abs_test):
    287    # If you pass a abs_test, try to detect the type based on the name
    288    # and folder. This detection can be skipped if you pass the `type` arg.
    289    err = None
    290    guessed_suite = None
    291    parent = os.path.dirname(abs_test)
    292    filename = os.path.basename(abs_test)
    293 
    294    has_browser_ini = os.path.isfile(os.path.join(parent, "browser.ini"))
    295    has_browser_toml = os.path.isfile(os.path.join(parent, "browser.toml"))
    296    has_chrome_ini = os.path.isfile(os.path.join(parent, "chrome.ini"))
    297    has_chrome_toml = os.path.isfile(os.path.join(parent, "chrome.toml"))
    298    has_plain_ini = os.path.isfile(os.path.join(parent, "mochitest.ini"))
    299    has_plain_toml = os.path.isfile(os.path.join(parent, "mochitest.toml"))
    300    has_xpcshell_ini = os.path.isfile(os.path.join(parent, "xpcshell.ini"))
    301    has_xpcshell_toml = os.path.isfile(os.path.join(parent, "xpcshell.toml"))
    302 
    303    in_wpt_folder = abs_test.startswith(
    304        os.path.abspath(os.path.join("testing", "web-platform"))
    305    )
    306 
    307    if in_wpt_folder:
    308        guessed_suite = "web-platform-tests"
    309        if "/css/" in abs_test:
    310            guessed_suite = "web-platform-tests-reftest"
    311    elif (
    312        filename.startswith("test_")
    313        and (has_xpcshell_ini or has_xpcshell_toml)
    314        and guess_doc(abs_test) == "js"
    315    ):
    316        guessed_suite = "xpcshell"
    317    elif filename.startswith("browser_") and (has_browser_ini or has_browser_toml):
    318        guessed_suite = "mochitest-browser-chrome"
    319    elif filename.startswith("test_"):
    320        if (has_chrome_ini or has_chrome_toml) and (has_plain_ini or has_plain_toml):
    321            err = (
    322                "Error: directory contains both a chrome.toml and mochitest.toml. "
    323                "Please set --suite=mochitest-chrome or --suite=mochitest-plain."
    324            )
    325    elif has_chrome_ini or has_chrome_toml:
    326        guessed_suite = "mochitest-chrome"
    327    elif has_plain_ini or has_plain_toml:
    328        guessed_suite = "mochitest-plain"
    329    return guessed_suite, err
    330 
    331 
    332 class MachTestRunner:
    333    """Adapter for mach test to simplify it's import externally."""
    334 
    335    def test(command_context, what, extra_args, **log_args):
    336        return test(command_context, what, extra_args, **log_args)
    337 
    338 
    339 @Command(
    340    "test",
    341    category="testing",
    342    description="Run tests (detects the kind of test and runs it).",
    343    parser=get_test_parser,
    344 )
    345 def test(command_context, what, extra_args, **log_args):
    346    """Run tests from names or paths.
    347 
    348    mach test accepts arguments specifying which tests to run. Each argument
    349    can be:
    350 
    351    * The path to a test file
    352    * A directory containing tests
    353    * A test suite name
    354    * An alias to a test suite name (codes used on TreeHerder)
    355    * path to a test manifest
    356 
    357    When paths or directories are given, they are first resolved to test
    358    files known to the build system.
    359 
    360    If resolved tests belong to more than one test type/flavor/harness,
    361    the harness for each relevant type/flavor will be invoked. e.g. if
    362    you specify a directory with xpcshell and browser chrome mochitests,
    363    both harnesses will be invoked.
    364 
    365    Warning: `mach test` does not automatically re-build.
    366    Please remember to run `mach build` when necessary.
    367 
    368    EXAMPLES
    369 
    370    Run all test files in the devtools/client/shared/redux/middleware/xpcshell/
    371    directory:
    372 
    373    `./mach test devtools/client/shared/redux/middleware/xpcshell/`
    374 
    375    The below command prints a short summary of results instead of
    376    the default more verbose output.
    377    Do not forget the - (minus sign) after --log-grouped!
    378 
    379    `./mach test --log-grouped - devtools/client/shared/redux/middleware/xpcshell/`
    380 
    381    To learn more about arguments for each test type/flavor/harness, please run
    382    `./mach <test-harness> --help`. For example, `./mach mochitest --help`.
    383    """
    384    from mozlog.commandline import setup_logging
    385    from mozlog.handlers import ResourceHandler, StreamHandler
    386    from moztest.resolve import TEST_SUITES, TestResolver, get_suite_definition
    387 
    388    if not log_args.get("auto") and not what:
    389        print("Error: You must specify test paths or use --auto flag")
    390        return 1
    391 
    392    if log_args.get("auto"):
    393        from itertools import chain
    394 
    395        from gecko_taskgraph.util.bugbug import patch_schedules
    396        from mozversioncontrol.factory import get_specific_repository_object
    397 
    398        if what:
    399            print(
    400                "Note: when using --auto, any test paths specified will be combined with BugBug's recommendations."
    401            )
    402 
    403        selection_mode = log_args.get("auto")
    404 
    405        repo = get_specific_repository_object(".", "git")
    406        base_commit = repo.base_ref_as_commit()
    407        patch = "\n".join([
    408            repo.get_patches_after_ref(base_commit),
    409            repo.get_patch_for_uncommitted_changes(),
    410        ])
    411        if not patch.strip():
    412            print("No local changes detected; no tests to run.")
    413            return 1
    414 
    415        print(
    416            f"Querying BugBug for test recommendations... (based on changes after {base_commit[:8]})"
    417        )
    418        schedules = patch_schedules(base_commit, patch, selection_mode)
    419        if not schedules:
    420            print("BugBug did not recommend any tests for your changes.")
    421 
    422        if not schedules and not what:
    423            print("Consider specifying tests by path or suite name.")
    424            return 1
    425 
    426        test_paths = sorted(schedules.keys())
    427        print(f"BugBug recommended {len(test_paths)} test group(s):")
    428        for path in test_paths:
    429            print(f"  {path} (confidence: {schedules[path]:.2f})")
    430 
    431        what = set(chain(what, test_paths))
    432 
    433    resolver = command_context._spawn(TestResolver)
    434    run_suites, run_tests = resolver.resolve_metadata(what)
    435 
    436    if not run_suites and not run_tests:
    437        print(UNKNOWN_TEST)
    438        return 1
    439 
    440    if log_args.get("debugger", None):
    441        import mozdebug
    442 
    443        if not mozdebug.get_debugger_info(log_args.get("debugger")):
    444            sys.exit(1)
    445        extra_args_debugger_notation = "=".join([
    446            "--debugger",
    447            log_args.get("debugger"),
    448        ])
    449        if extra_args:
    450            extra_args.append(extra_args_debugger_notation)
    451        else:
    452            extra_args = [extra_args_debugger_notation]
    453 
    454    # Create shared logger
    455    format_args = {"level": command_context._mach_context.settings["test"]["level"]}
    456    if not run_suites and len(run_tests) == 1:
    457        format_args["verbose"] = True
    458        format_args["compact"] = False
    459 
    460    default_format = command_context._mach_context.settings["test"]["format"]
    461    log = setup_logging(
    462        "mach-test", log_args, {default_format: sys.stdout}, format_args
    463    )
    464    for handler in log.handlers:
    465        if isinstance(handler, StreamHandler):
    466            handler.formatter.inner.summary_on_shutdown = True
    467 
    468    log.add_handler(ResourceHandler(command_context))
    469 
    470    if log_args.get("custom_handler", None) is not None:
    471        log.add_handler(log_args.get("custom_handler"))
    472 
    473    status = None
    474    for suite_name in run_suites:
    475        suite = TEST_SUITES[suite_name]
    476        kwargs = suite["kwargs"]
    477        kwargs["log"] = log
    478        kwargs.setdefault("subsuite", None)
    479 
    480        if "mach_command" in suite:
    481            res = command_context._mach_context.commands.dispatch(
    482                suite["mach_command"],
    483                command_context._mach_context,
    484                argv=extra_args,
    485                **kwargs,
    486            )
    487            if res:
    488                status = res
    489 
    490    buckets = {}
    491    for test in run_tests:
    492        key = (test["flavor"], test.get("subsuite", ""))
    493        buckets.setdefault(key, []).append(test)
    494 
    495    for (flavor, subsuite), tests in sorted(buckets.items()):
    496        _, m = get_suite_definition(flavor, subsuite)
    497        if "mach_command" not in m:
    498            substr = f"-{subsuite}" if subsuite else ""
    499            print(UNKNOWN_FLAVOR % (flavor, substr))
    500            status = 1
    501            continue
    502 
    503        kwargs = dict(m["kwargs"])
    504        kwargs["log"] = log
    505        kwargs.setdefault("subsuite", None)
    506 
    507        res = command_context._mach_context.commands.dispatch(
    508            m["mach_command"],
    509            command_context._mach_context,
    510            argv=extra_args,
    511            test_objects=tests,
    512            **kwargs,
    513        )
    514        if res:
    515            status = res
    516 
    517    if not log.has_shutdown:
    518        log.shutdown()
    519    return status
    520 
    521 
    522 @Command(
    523    "cppunittest", category="testing", description="Run cpp unit tests (C++ tests)."
    524 )
    525 @CommandArgument(
    526    "test_files",
    527    nargs="*",
    528    metavar="N",
    529    help="Test to run. Can be specified as one or more files or "
    530    "directories, or omitted. If omitted, the entire test suite is "
    531    "executed.",
    532 )
    533 def run_cppunit_test(command_context, **params):
    534    from mozlog import commandline
    535 
    536    log = params.get("log")
    537    if not log:
    538        log = commandline.setup_logging("cppunittest", {}, {"tbpl": sys.stdout})
    539 
    540    # See if we have crash symbols
    541    symbols_path = os.path.join(command_context.distdir, "crashreporter-symbols")
    542    if not os.path.isdir(symbols_path):
    543        symbols_path = None
    544 
    545    # If no tests specified, run all tests in main manifest
    546    tests = params["test_files"]
    547    if not tests:
    548        tests = [os.path.join(command_context.distdir, "cppunittests")]
    549        manifest_path = os.path.join(
    550            command_context.topsrcdir, "testing", "cppunittest.toml"
    551        )
    552    else:
    553        manifest_path = None
    554 
    555    utility_path = command_context.bindir
    556 
    557    if conditions.is_android(command_context):
    558        from mozrunner.devices.android_device import (
    559            InstallIntent,
    560            verify_android_device,
    561        )
    562 
    563        verify_android_device(command_context, install=InstallIntent.NO)
    564        return run_android_test(
    565            command_context, tests, symbols_path, manifest_path, log
    566        )
    567 
    568    return run_desktop_test(
    569        command_context, tests, symbols_path, manifest_path, utility_path, log
    570    )
    571 
    572 
    573 def run_desktop_test(
    574    command_context, tests, symbols_path, manifest_path, utility_path, log
    575 ):
    576    import runcppunittests as cppunittests
    577    from mozlog import commandline
    578 
    579    parser = cppunittests.CPPUnittestOptions()
    580    commandline.add_logging_group(parser)
    581    options, args = parser.parse_args()
    582 
    583    options.symbols_path = symbols_path
    584    options.manifest_path = manifest_path
    585    options.utility_path = utility_path
    586    options.xre_path = command_context.bindir
    587 
    588    try:
    589        result = cppunittests.run_test_harness(options, tests)
    590    except Exception as e:
    591        log.error(f"Caught exception running cpp unit tests: {str(e)}")
    592        result = False
    593        raise
    594 
    595    return 0 if result else 1
    596 
    597 
    598 def run_android_test(command_context, tests, symbols_path, manifest_path, log):
    599    import remotecppunittests
    600    from mozlog import commandline
    601 
    602    parser = remotecppunittests.RemoteCPPUnittestOptions()
    603    commandline.add_logging_group(parser)
    604    options, args = parser.parse_args()
    605 
    606    if not options.adb_path:
    607        from mozrunner.devices.android_device import get_adb_path
    608 
    609        options.adb_path = get_adb_path(command_context)
    610    options.symbols_path = symbols_path
    611    options.manifest_path = manifest_path
    612    options.xre_path = command_context.bindir
    613    options.local_lib = command_context.bindir.replace("bin", "geckoview")
    614    for file in os.listdir(os.path.join(command_context.topobjdir, "dist")):
    615        if file.endswith(".apk") and file.startswith("geckoview"):
    616            options.local_apk = os.path.join(command_context.topobjdir, "dist", file)
    617            log.info("using APK: " + options.local_apk)
    618            break
    619 
    620    try:
    621        result = remotecppunittests.run_test_harness(options, tests)
    622    except Exception as e:
    623        log.error(f"Caught exception running cpp unit tests: {str(e)}")
    624        result = False
    625        raise
    626 
    627    return 0 if result else 1
    628 
    629 
    630 def executable_name(name):
    631    return name + ".exe" if sys.platform.startswith("win") else name
    632 
    633 
    634 @Command(
    635    "jstests",
    636    category="testing",
    637    description="Run SpiderMonkey JS tests in the JS shell.",
    638    ok_if_tests_disabled=True,
    639 )
    640 @CommandArgument("--shell", help="The shell to be used")
    641 @CommandArgument(
    642    "params",
    643    nargs=argparse.REMAINDER,
    644    help="Extra arguments to pass down to the test harness.",
    645 )
    646 def run_jstests(command_context, shell, params):
    647    command_context.virtualenv_manager.ensure()
    648    python = command_context.virtualenv_manager.python_path
    649 
    650    js = shell or os.path.join(command_context.bindir, executable_name("js"))
    651    jstest_cmd = [
    652        python,
    653        os.path.join(command_context.topsrcdir, "js", "src", "tests", "jstests.py"),
    654        js,
    655    ] + params
    656 
    657    return subprocess.call(jstest_cmd)
    658 
    659 
    660 @Command(
    661    "jit-test",
    662    category="testing",
    663    description="Run SpiderMonkey jit-tests in the JS shell.",
    664    ok_if_tests_disabled=True,
    665 )
    666 @CommandArgument("--shell", help="The shell to be used")
    667 @CommandArgument(
    668    "--cgc",
    669    action="store_true",
    670    default=False,
    671    help="Run with the SM(cgc) job's env vars",
    672 )
    673 @CommandArgument(
    674    "params",
    675    nargs=argparse.REMAINDER,
    676    help="Extra arguments to pass down to the test harness.",
    677 )
    678 def run_jittests(command_context, shell, cgc, params):
    679    command_context.virtualenv_manager.ensure()
    680    python = command_context.virtualenv_manager.python_path
    681 
    682    js = shell or os.path.join(command_context.bindir, executable_name("js"))
    683    jittest_cmd = [
    684        python,
    685        os.path.join(command_context.topsrcdir, "js", "src", "jit-test", "jit_test.py"),
    686        js,
    687    ] + params
    688 
    689    env = os.environ.copy()
    690    if cgc:
    691        env["JS_GC_ZEAL"] = "IncrementalMultipleSlices"
    692 
    693    return subprocess.call(jittest_cmd, env=env)
    694 
    695 
    696 @Command("jsapi-tests", category="testing", description="Run SpiderMonkey JSAPI tests.")
    697 @CommandArgument(
    698    "--list",
    699    action="store_true",
    700    default=False,
    701    help="List all tests",
    702 )
    703 @CommandArgument(
    704    "--frontend-only",
    705    action="store_true",
    706    default=False,
    707    help="Run tests for frontend-only APIs, with light-weight entry point",
    708 )
    709 @CommandArgument(
    710    "test_name",
    711    nargs="?",
    712    metavar="N",
    713    help="Test to run. Can be a prefix or omitted. If "
    714    "omitted, the entire test suite is executed.",
    715 )
    716 def run_jsapitests(command_context, list=False, frontend_only=False, test_name=None):
    717    jsapi_tests_cmd = [
    718        os.path.join(command_context.bindir, executable_name("jsapi-tests"))
    719    ]
    720    if list:
    721        jsapi_tests_cmd.append("--list")
    722    if frontend_only:
    723        jsapi_tests_cmd.append("--frontend-only")
    724    if test_name:
    725        jsapi_tests_cmd.append(test_name)
    726 
    727    test_env = os.environ.copy()
    728    test_env["TOPSRCDIR"] = command_context.topsrcdir
    729 
    730    result = subprocess.call(jsapi_tests_cmd, env=test_env)
    731    if result != 0:
    732        print(f"jsapi-tests failed, exit code {result}")
    733    return result
    734 
    735 
    736 def run_check_js_msg(command_context):
    737    command_context.virtualenv_manager.ensure()
    738    python = command_context.virtualenv_manager.python_path
    739 
    740    check_cmd = [
    741        python,
    742        os.path.join(command_context.topsrcdir, "config", "check_js_msg_encoding.py"),
    743    ]
    744 
    745    return subprocess.call(check_cmd)
    746 
    747 
    748 def get_jsshell_parser():
    749    from jsshell.benchmark import get_parser
    750 
    751    return get_parser()
    752 
    753 
    754 @Command(
    755    "jsshell-bench",
    756    category="testing",
    757    parser=get_jsshell_parser,
    758    description="Run benchmarks in the SpiderMonkey JS shell.",
    759 )
    760 def run_jsshelltests(command_context, **kwargs):
    761    from jsshell import benchmark
    762 
    763    return benchmark.run(**kwargs)
    764 
    765 
    766 @Command(
    767    "test-info", category="testing", description="Display historical test results."
    768 )
    769 def test_info(command_context):
    770    """
    771    All functions implemented as subcommands.
    772    """
    773 
    774 
    775 class TestInfoNodeRunner(MozbuildObject):
    776    """Run TestInfo node tests."""
    777 
    778    def run_node_cmd(self, monitor, days=1, revision=None, output_dir=None):
    779        """Run the TestInfo node command."""
    780 
    781        self.test_timings_dir = os.path.join(self.topsrcdir, "testing", "timings")
    782        test_runner_script = os.path.join(
    783            self.test_timings_dir, "fetch-xpcshell-data.js"
    784        )
    785 
    786        # Build the command to run
    787        node_binary, _ = find_node_executable()
    788        cmd = [node_binary, test_runner_script]
    789 
    790        if revision:
    791            cmd.extend(["--revision", revision])
    792        else:
    793            cmd.extend(["--days", str(days)])
    794 
    795        if output_dir:
    796            cmd.extend(["--output-dir", os.path.abspath(output_dir)])
    797 
    798        print(f"Running: {' '.join(cmd)}")
    799        print(f"Working directory: {self.test_timings_dir}")
    800 
    801        try:
    802            # Run the test runner and capture stdout line by line
    803            process = subprocess.Popen(
    804                cmd,
    805                cwd=self.test_timings_dir,
    806                stdout=subprocess.PIPE,
    807                stderr=subprocess.STDOUT,
    808                text=True,
    809                bufsize=1,
    810            )
    811 
    812            for line_ in process.stdout:
    813                line = line_.rstrip()
    814                # Print to console
    815                print(line)
    816 
    817                # Add as instant event marker to profile (skip empty lines)
    818                if line:
    819                    monitor.record_event(line)
    820 
    821            process.wait()
    822            return process.returncode
    823        except FileNotFoundError:
    824            print(
    825                "ERROR: Node.js not found. Please ensure Node.js is installed and in your PATH."
    826            )
    827            return 1
    828        except Exception as e:
    829            print(f"ERROR: Failed to run TestInfo node command: {e}")
    830            return 1
    831 
    832 
    833 @SubCommand(
    834    "test-info",
    835    "xpcshell-timings",
    836    description="Collect timing information for XPCShell test jobs.",
    837 )
    838 @CommandArgument(
    839    "--days",
    840    default=1,
    841    help="Number of days to download and aggregate, starting with yesterday",
    842 )
    843 @CommandArgument(
    844    "--revision",
    845    default="",
    846    help="revision to fetch data for ('mozilla-central:<revision id>', '<revision id>' for a try push or 'current' to take the revision from the environment)",
    847 )
    848 @CommandArgument("--output-dir", help="Path to report file.")
    849 def test_info_xpcshell_timings(command_context, days, output_dir, revision=None):
    850    # Start resource monitoring with 0.1s sampling rate
    851    monitor = SystemResourceMonitor(poll_interval=0.1)
    852    monitor.start()
    853 
    854    try:
    855        # node fetch-xpcshell-data.js --days 1
    856        runner = TestInfoNodeRunner.from_environment(
    857            cwd=os.getcwd(), detect_virtualenv_mozinfo=False
    858        )
    859 
    860        # Handle 'current' special value to use current build's revision
    861        if revision == "current":
    862            rev = os.environ.get("MOZ_SOURCE_CHANGESET", "")
    863            repo = os.environ.get("MOZ_SOURCE_REPO", "")
    864 
    865            if rev and repo:
    866                # Extract project name from repository URL
    867                # e.g., https://hg.mozilla.org/try -> try
    868                # e.g., https://hg.mozilla.org/mozilla-central -> mozilla-central
    869                parsed_url = urlparse(repo)
    870                project = os.path.basename(parsed_url.path)
    871                revision = f"{project}:{rev}"
    872        elif revision and ":" not in revision:
    873            # Bare revision ID without project prefix - assume it's a try push
    874            revision = f"try:{revision}"
    875 
    876        runner.run_node_cmd(
    877            monitor, days=days, revision=revision, output_dir=output_dir
    878        )
    879    finally:
    880        # Stop resource monitoring and save profile
    881        if output_dir:
    882            monitor.stop(upload_dir=output_dir)
    883            profile_path = os.path.join(output_dir, "profile_resource-usage.json")
    884        else:
    885            monitor.stop()
    886            # This is where ./mach resource-usage will find the profile.
    887            profile_path = command_context._get_state_filename(
    888                "profile_build_resources.json"
    889            )
    890        with open(profile_path, "w", encoding="utf-8", newline="\n") as fh:
    891            to_write = json.dumps(monitor.as_profile(), separators=(",", ":"))
    892            fh.write(to_write)
    893        print(f"Resource usage profile saved to: {profile_path}")
    894        if not output_dir:
    895            print("View it with: ./mach resource-usage")
    896 
    897 
    898 @SubCommand(
    899    "test-info",
    900    "tests",
    901    description="Display historical test result summary for named tests.",
    902 )
    903 @CommandArgument("test_names", nargs=argparse.REMAINDER, help="Test(s) of interest.")
    904 @CommandArgument(
    905    "--start",
    906    default=(date.today() - timedelta(7)).strftime("%Y-%m-%d"),
    907    help="Start date (YYYY-MM-DD)",
    908 )
    909 @CommandArgument(
    910    "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
    911 )
    912 @CommandArgument(
    913    "--show-info",
    914    action="store_true",
    915    help="Retrieve and display general test information.",
    916 )
    917 @CommandArgument(
    918    "--show-bugs",
    919    action="store_true",
    920    help="Retrieve and display related Bugzilla bugs.",
    921 )
    922 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
    923 def test_info_tests(
    924    command_context,
    925    test_names,
    926    start,
    927    end,
    928    show_info,
    929    show_bugs,
    930    verbose,
    931 ):
    932    import testinfo
    933 
    934    ti = testinfo.TestInfoTests(verbose)
    935    ti.report(
    936        test_names,
    937        start,
    938        end,
    939        show_info,
    940        show_bugs,
    941    )
    942 
    943 
    944 @SubCommand(
    945    "test-info",
    946    "report",
    947    description="Generate a json report of test manifests and/or tests "
    948    "categorized by Bugzilla component and optionally filtered "
    949    "by path, component, and/or manifest annotations.",
    950 )
    951 @CommandArgument(
    952    "--components",
    953    default=None,
    954    help="Comma-separated list of Bugzilla components."
    955    " eg. Testing::General,Core::WebVR",
    956 )
    957 @CommandArgument(
    958    "--flavor",
    959    help='Limit results to tests of the specified flavor (eg. "xpcshell").',
    960 )
    961 @CommandArgument(
    962    "--subsuite",
    963    help='Limit results to tests of the specified subsuite (eg. "devtools").',
    964 )
    965 @CommandArgument(
    966    "paths", nargs=argparse.REMAINDER, help="File system paths of interest."
    967 )
    968 @CommandArgument(
    969    "--show-manifests",
    970    action="store_true",
    971    help="Include test manifests in report.",
    972 )
    973 @CommandArgument(
    974    "--show-tests", action="store_true", help="Include individual tests in report."
    975 )
    976 @CommandArgument(
    977    "--show-summary", action="store_true", help="Include summary in report."
    978 )
    979 @CommandArgument(
    980    "--show-annotations",
    981    action="store_true",
    982    help="Include list of manifest annotation conditions in report.",
    983 )
    984 @CommandArgument(
    985    "--show-testruns",
    986    action="store_true",
    987    help="Include total number of runs the test has if there are failures.",
    988 )
    989 @CommandArgument(
    990    "--filter-values",
    991    help="Comma-separated list of value regular expressions to filter on; "
    992    "displayed tests contain all specified values.",
    993 )
    994 @CommandArgument(
    995    "--filter-keys",
    996    help="Comma-separated list of test keys to filter on, "
    997    'like "skip-if"; only these fields will be searched '
    998    "for filter-values.",
    999 )
   1000 @CommandArgument(
   1001    "--no-component-report",
   1002    action="store_false",
   1003    dest="show_components",
   1004    default=True,
   1005    help="Do not categorize by bugzilla component.",
   1006 )
   1007 @CommandArgument("--output-file", help="Path to report file.")
   1008 @CommandArgument("--runcounts-input-file", help="Optional path to report file.")
   1009 @CommandArgument(
   1010    "--config-matrix-output-file",
   1011    help="Path to report the config matrix for each manifest.",
   1012 )
   1013 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
   1014 @CommandArgument(
   1015    "--start",
   1016    default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
   1017    help="Start date (YYYY-MM-DD)",
   1018 )
   1019 @CommandArgument(
   1020    "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
   1021 )
   1022 def test_report(
   1023    command_context,
   1024    components,
   1025    flavor,
   1026    subsuite,
   1027    paths,
   1028    show_manifests,
   1029    show_tests,
   1030    show_summary,
   1031    show_annotations,
   1032    filter_values,
   1033    filter_keys,
   1034    show_components,
   1035    output_file,
   1036    verbose,
   1037    start,
   1038    end,
   1039    show_testruns,
   1040    runcounts_input_file,
   1041    config_matrix_output_file,
   1042 ):
   1043    import testinfo
   1044    from mozbuild import build_commands
   1045 
   1046    try:
   1047        command_context.config_environment
   1048    except BuildEnvironmentNotFoundException:
   1049        print("Looks like configure has not run yet, running it now...")
   1050        build_commands.configure(command_context)
   1051 
   1052    ti = testinfo.TestInfoReport(verbose)
   1053    ti.report(
   1054        components,
   1055        flavor,
   1056        subsuite,
   1057        paths,
   1058        show_manifests,
   1059        show_tests,
   1060        show_summary,
   1061        show_annotations,
   1062        filter_values,
   1063        filter_keys,
   1064        show_components,
   1065        output_file,
   1066        start,
   1067        end,
   1068        show_testruns,
   1069        runcounts_input_file,
   1070        config_matrix_output_file,
   1071    )
   1072 
   1073 
   1074 @SubCommand(
   1075    "test-info",
   1076    "report-diff",
   1077    description='Compare two reports generated by "test-info reports".',
   1078 )
   1079 @CommandArgument(
   1080    "--before",
   1081    default=None,
   1082    help="The first (earlier) report file; path to local file or url.",
   1083 )
   1084 @CommandArgument(
   1085    "--after", help="The second (later) report file; path to local file or url."
   1086 )
   1087 @CommandArgument(
   1088    "--output-file",
   1089    help="Path to report file to be written. If not specified, report"
   1090    "will be written to standard output.",
   1091 )
   1092 @CommandArgument("--verbose", action="store_true", help="Enable debug logging.")
   1093 def test_report_diff(command_context, before, after, output_file, verbose):
   1094    import testinfo
   1095 
   1096    ti = testinfo.TestInfoReport(verbose)
   1097    ti.report_diff(before, after, output_file)
   1098 
   1099 
   1100 @SubCommand(
   1101    "test-info",
   1102    "testrun-report",
   1103    description="Generate report of number of runs for each test group (manifest)",
   1104 )
   1105 @CommandArgument("--output-file", help="Path to report file.")
   1106 def test_info_testrun_report(command_context, output_file):
   1107    import json
   1108 
   1109    import testinfo
   1110 
   1111    ti = testinfo.TestInfoReport(verbose=True)
   1112    if os.environ.get("GECKO_HEAD_REPOSITORY", "") in [
   1113        "https://hg.mozilla.org/mozilla-central",
   1114        "https://hg.mozilla.org/try",
   1115    ]:
   1116        # keep the original format around as data store
   1117        runcounts = ti.get_runcounts()
   1118        if not output_file:
   1119            print(runcounts)
   1120            return
   1121 
   1122        output_file = os.path.abspath(output_file)
   1123        output_dir = os.path.dirname(output_file)
   1124        if not os.path.isdir(output_dir):
   1125            os.makedirs(output_dir)
   1126        with open(output_file, "w") as f:
   1127            json.dump(runcounts, f)
   1128 
   1129        # creating custom 1, 7, 30 day artifacts instead
   1130        for days in [1, 7, 30]:
   1131            optimized_data = ti.optimize_runcounts_data(runcounts, days)
   1132            new_output_file = output_file.replace(".json", f"-{days}days.json")
   1133            with open(new_output_file, "w") as f:
   1134                json.dump(optimized_data, f)
   1135 
   1136 
   1137 @SubCommand(
   1138    "test-info",
   1139    "failure-report",
   1140    description="Display failure line groupings and frequencies for "
   1141    "single tracking intermittent bugs.",
   1142 )
   1143 @CommandArgument(
   1144    "--start",
   1145    default=(date.today() - timedelta(30)).strftime("%Y-%m-%d"),
   1146    help="Start date (YYYY-MM-DD)",
   1147 )
   1148 @CommandArgument(
   1149    "--end", default=date.today().strftime("%Y-%m-%d"), help="End date (YYYY-MM-DD)"
   1150 )
   1151 @CommandArgument(
   1152    "--bugid",
   1153    default=None,
   1154    help="bugid for treeherder intermittent failures data query.",
   1155 )
   1156 def test_info_failures(
   1157    command_context,
   1158    start,
   1159    end,
   1160    bugid,
   1161 ):
   1162    # bugid comes in as a string, we need an int:
   1163    try:
   1164        bugid = int(bugid)
   1165    except ValueError:
   1166        bugid = None
   1167    if not bugid:
   1168        print("Please enter a valid bugid (i.e. '1760132')")
   1169        return
   1170 
   1171    # get bug info
   1172    url = f"https://bugzilla.mozilla.org/rest/bug?include_fields=summary,depends_on&id={bugid}"
   1173    r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
   1174    if r.status_code != 200:
   1175        print(f"{r.status_code} error retrieving url: {url}")
   1176 
   1177    data = r.json()
   1178    if not data:
   1179        print(f"unable to get bugzilla information for {bugid}")
   1180        return
   1181 
   1182    summary = data["bugs"][0]["summary"]
   1183    parts = summary.split("|")
   1184    if not summary.endswith("single tracking bug") or len(parts) != 2:
   1185        print("this query only works with single tracking bugs")
   1186        return
   1187 
   1188    # get depends_on bugs:
   1189    buglist = [bugid]
   1190    if "depends_on" in data["bugs"][0]:
   1191        buglist.extend(data["bugs"][0]["depends_on"])
   1192 
   1193    testname = parts[0].strip().split(" ")[-1]
   1194 
   1195    # now query treeherder to get details about annotations
   1196    data = []
   1197    for b in buglist:
   1198        url = "https://treeherder.mozilla.org/api/failuresbybug/"
   1199        url += f"?startday={start}&endday={end}&tree=trunk&bug={b}"
   1200        r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
   1201        r.raise_for_status()
   1202 
   1203        bdata = r.json()
   1204        data.extend(bdata)
   1205 
   1206    if len(data) == 0:
   1207        print("no failures were found for given bugid, please ensure bug is")
   1208        print("accessible via: https://treeherder.mozilla.org/intermittent-failures")
   1209        return
   1210 
   1211    # query VCS to get current list of variants:
   1212    import yaml
   1213 
   1214    url = "https://hg.mozilla.org/mozilla-central/raw-file/default/taskcluster/test_configs/variants.yml"
   1215    r = requests.get(url, headers={"User-agent": "mach-test-info/1.0"})
   1216    variants = yaml.safe_load(r.text)
   1217 
   1218    print(
   1219        f"\nQuerying data for bug {buglist} annotated from {start} to {end} on trunk.\n\n"
   1220    )
   1221    jobs = {}
   1222    lines = {}
   1223    for failure in data:
   1224        # config = platform/buildtype
   1225        # testsuite (<suite>[-variant][-<chunk>])
   1226        # lines - group by patterns that contain test name
   1227        config = f"{failure['platform']}/{failure['build_type']}"
   1228        variant = ""
   1229        suite = ""
   1230        varpos = len(failure["test_suite"])
   1231        for v in variants.keys():
   1232            var = f"-{variants[v]['suffix']}"
   1233            if var in failure["test_suite"]:
   1234                if failure["test_suite"].find(var) < varpos:
   1235                    variant = var
   1236 
   1237        if variant:
   1238            suite = failure["test_suite"].split(variant)[0]
   1239 
   1240        parts = failure["test_suite"].split("-")
   1241        try:
   1242            int(parts[-1])
   1243            suite = "-".join(parts[:-1])
   1244        except ValueError:
   1245            pass  # if this works, then the last '-X' is a number :)
   1246 
   1247        if suite == "":
   1248            print(f"Error: failure to find variant in {failure['test_suite']}")
   1249 
   1250        job = f"{config}-{suite}{variant}"
   1251        if job not in jobs.keys():
   1252            jobs[job] = 0
   1253        jobs[job] += 1
   1254 
   1255        # lines - sum(hash) of all lines where we match testname
   1256        hvalue = 0
   1257        for line in failure["lines"]:
   1258            if len(line.split(testname)) <= 1:
   1259                continue
   1260            # strip off timestamp and mozharness status
   1261            parts = line.split("TEST-UNEXPECTED")
   1262            l = f"TEST-UNEXPECTED{parts[-1]}"
   1263 
   1264            # only keep 25 characters of the failure, often longer is random numbers
   1265            parts = l.split(testname)
   1266            l = parts[0] + testname + parts[1][:25]
   1267 
   1268            hvalue += hash(l)
   1269 
   1270        if not failure["lines"]:
   1271            hvalue = 1
   1272 
   1273        if not hvalue:
   1274            continue
   1275 
   1276        if hvalue not in lines.keys():
   1277            lines[hvalue] = {"lines": failure["lines"], "config": []}
   1278        lines[hvalue]["config"].append(job)
   1279 
   1280    for h in lines.keys():
   1281        print(f"{len(lines[h]['config'])} errors with:")
   1282        failure_lines = lines[h]["lines"]
   1283        if len(failure_lines) > 0:
   1284            for l in failure_lines:
   1285                print(l)
   1286        else:
   1287            print(
   1288                "... no failure lines recorded in"
   1289                " https://treeherder.mozilla.org/intermittent-failures ..."
   1290            )
   1291 
   1292        for job in jobs:
   1293            count = len([x for x in lines[h]["config"] if x == job])
   1294            if count > 0:
   1295                print(f"  {job}: {count}")
   1296        print("")
   1297 
   1298 
   1299 @Command(
   1300    "rusttests",
   1301    category="testing",
   1302    conditions=[conditions.is_non_artifact_build],
   1303    description="Run rust unit tests (via cargo test).",
   1304 )
   1305 def run_rusttests(command_context, **kwargs):
   1306    return command_context._mach_context.commands.dispatch(
   1307        "build",
   1308        command_context._mach_context,
   1309        what=["pre-export", "export", "recurse_rusttests"],
   1310    )
   1311 
   1312 
   1313 @Command(
   1314    "fluent-migration-test",
   1315    category="testing",
   1316    description="Test Fluent migration recipes.",
   1317 )
   1318 @CommandArgument("test_paths", nargs="*", metavar="N", help="Recipe paths to test.")
   1319 def run_migration_tests(command_context, test_paths=None, **kwargs):
   1320    if not test_paths:
   1321        test_paths = []
   1322    command_context.activate_virtualenv()
   1323    from test_fluent_migrations import fmt
   1324 
   1325    rv = 0
   1326    with_context = []
   1327    for to_test in test_paths:
   1328        try:
   1329            context = fmt.inspect_migration(to_test)
   1330            for issue in context["issues"]:
   1331                command_context.log(
   1332                    logging.ERROR,
   1333                    "fluent-migration-test",
   1334                    {
   1335                        "error": issue["msg"],
   1336                        "file": to_test,
   1337                    },
   1338                    "ERROR in {file}: {error}",
   1339                )
   1340            if context["issues"]:
   1341                continue
   1342            with_context.append({
   1343                "to_test": to_test,
   1344                "references": context["references"],
   1345            })
   1346        except Exception as e:
   1347            command_context.log(
   1348                logging.ERROR,
   1349                "fluent-migration-test",
   1350                {"error": str(e), "file": to_test},
   1351                "ERROR in {file}: {error}",
   1352            )
   1353            rv |= 1
   1354    obj_dir, repo_dir = fmt.prepare_directories(command_context)
   1355    for context in with_context:
   1356        rv |= fmt.test_migration(command_context, obj_dir, repo_dir, **context)
   1357    return rv
   1358 
   1359 
   1360 @Command(
   1361    "platform-diff",
   1362    category="testing",
   1363    description="Displays the difference in platforms used for the given task by using the output of the tgdiff artifact",
   1364 )
   1365 @CommandArgument("task_id", help="task_id to fetch the tgdiff from.")
   1366 @CommandArgument(
   1367    "-r",
   1368    "--replace",
   1369    default=None,
   1370    dest="replace",
   1371    help='Array of strings to replace from the old platforms to find matches in new platforms. Eg: ["1804=2404", "-qr"] will replace "1804" by "2404" and remove "-qr" before looking at new platforms.',
   1372 )
   1373 def platform_diff(
   1374    command_context,
   1375    task_id,
   1376    replace,
   1377 ):
   1378    from platform_diff import PlatformDiff
   1379 
   1380    PlatformDiff(command_context, task_id, replace).run()