tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

jstests.py (29837B)


      1 #!/usr/bin/env python
      2 # This Source Code Form is subject to the terms of the Mozilla Public
      3 # License, v. 2.0. If a copy of the MPL was not distributed with this
      4 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
      5 
      6 """
      7 The JS Shell Test Harness.
      8 
      9 See the adjacent README.txt for more details.
     10 """
     11 
     12 import math
     13 import os
     14 import platform
     15 import posixpath
     16 import re
     17 import shlex
     18 import sys
     19 import tempfile
     20 from contextlib import contextmanager
     21 from copy import copy
     22 from datetime import datetime
     23 from itertools import chain
     24 from os.path import abspath, dirname, isfile, realpath
     25 from subprocess import call, list2cmdline
     26 
     27 from lib.adaptor import xdr_annotate
     28 from lib.progressbar import ProgressBar
     29 from lib.results import ResultsSink, TestOutput
     30 from lib.tempfile import TemporaryDirectory
     31 from lib.tests import (
     32    RefTestCase,
     33    change_env,
     34    get_cpu_count,
     35    get_environment_overlay,
     36    get_jitflags,
     37 )
     38 
     39 if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
     40    from lib.tasks_unix import run_all_tests
     41 else:
     42    from lib.tasks_win import run_all_tests
     43 
     44 here = dirname(abspath(__file__))
     45 
     46 
     47 @contextmanager
     48 def changedir(dirname):
     49    pwd = os.getcwd()
     50    os.chdir(dirname)
     51    try:
     52        yield
     53    finally:
     54        os.chdir(pwd)
     55 
     56 
     57 class PathOptions:
     58    def __init__(self, location, requested_paths, excluded_paths):
     59        self.requested_paths = requested_paths
     60        self.excluded_files, self.excluded_dirs = PathOptions._split_files_and_dirs(
     61            location, excluded_paths
     62        )
     63 
     64    @staticmethod
     65    def _split_files_and_dirs(location, paths):
     66        """Split up a set of paths into files and directories"""
     67        files, dirs = set(), set()
     68        for path in paths:
     69            fullpath = os.path.join(location, path)
     70            if path.endswith("/"):
     71                dirs.add(path[:-1])
     72            elif os.path.isdir(fullpath):
     73                dirs.add(path)
     74            elif os.path.exists(fullpath):
     75                files.add(path)
     76 
     77        return files, dirs
     78 
     79    def should_run(self, filename):
     80        # If any tests are requested by name, skip tests that do not match.
     81        if self.requested_paths and not any(
     82            req in filename for req in self.requested_paths
     83        ):
     84            return False
     85 
     86        # Skip excluded tests.
     87        if filename in self.excluded_files:
     88            return False
     89 
     90        for dir in self.excluded_dirs:
     91            if filename.startswith(dir + "/"):
     92                return False
     93 
     94        return True
     95 
     96 
     97 def parse_args():
     98    """
     99    Parse command line arguments.
    100    Returns a tuple of: (options, js_shell, requested_paths, excluded_paths)
    101        options :object: The raw OptionParser output.
    102        js_shell :str: The absolute location of the shell to test with.
    103        requested_paths :set<str>: Test paths specially requested on the CLI.
    104        excluded_paths :set<str>: Test paths specifically excluded by the CLI.
    105    """
    106    from argparse import ArgumentParser
    107 
    108    op = ArgumentParser(
    109        description="Run jstests JS shell tests",
    110        epilog="Shell output format: [ pass | fail | timeout | skip ] progress | time",
    111    )
    112    op.add_argument(
    113        "--xul-info",
    114        dest="xul_info_src",
    115        help="config data for xulRuntime (avoids search for config/autoconf.mk)",
    116    )
    117 
    118    harness_og = op.add_argument_group("Harness Controls", "Control how tests are run.")
    119    harness_og.add_argument(
    120        "-j",
    121        "--worker-count",
    122        type=int,
    123        default=max(1, get_cpu_count()),
    124        help="Number of tests to run in parallel (default %(default)s)",
    125    )
    126    harness_og.add_argument(
    127        "-t",
    128        "--timeout",
    129        type=float,
    130        default=150.0,
    131        help="Set maximum time a test is allows to run (in seconds).",
    132    )
    133    harness_og.add_argument(
    134        "--show-slow",
    135        action="store_true",
    136        help="Show tests taking longer than a minimum time (in seconds).",
    137    )
    138    harness_og.add_argument(
    139        "--slow-test-threshold",
    140        type=float,
    141        default=5.0,
    142        help="Time in seconds a test can take until it is"
    143        "considered slow (default %(default)s).",
    144    )
    145    harness_og.add_argument(
    146        "-a",
    147        "--args",
    148        dest="shell_args",
    149        default=[],
    150        action="append",
    151        help="Extra args to pass to the JS shell.",
    152    )
    153    harness_og.add_argument(
    154        "--feature-args",
    155        dest="feature_args",
    156        default=[],
    157        action="append",
    158        help="Extra args to pass to the JS shell even when feature-testing.",
    159    )
    160    harness_og.add_argument(
    161        "--jitflags",
    162        dest="jitflags",
    163        default="none",
    164        type=str,
    165        help="IonMonkey option combinations. One of all,"
    166        " debug, ion, and none (default %(default)s).",
    167    )
    168    harness_og.add_argument(
    169        "--tbpl",
    170        action="store_true",
    171        help="Runs each test in all configurations tbpl tests.",
    172    )
    173    harness_og.add_argument(
    174        "--tbpl-debug",
    175        action="store_true",
    176        help="Runs each test in some faster configurations tbpl tests.",
    177    )
    178    harness_og.add_argument(
    179        "-g", "--debug", action="store_true", help="Run a test in debugger."
    180    )
    181    harness_og.add_argument(
    182        "--debugger", default="gdb -q --args", help="Debugger command."
    183    )
    184    harness_og.add_argument(
    185        "-J", "--jorendb", action="store_true", help="Run under JS debugger."
    186    )
    187    harness_og.add_argument(
    188        "--passthrough",
    189        action="store_true",
    190        help="Run tests with stdin/stdout attached to caller.",
    191    )
    192    harness_og.add_argument(
    193        "--test-reflect-stringify",
    194        dest="test_reflect_stringify",
    195        help="instead of running tests, use them to test the "
    196        "Reflect.stringify code in specified file",
    197    )
    198    harness_og.add_argument(
    199        "--valgrind", action="store_true", help="Run tests in valgrind."
    200    )
    201    harness_og.add_argument(
    202        "--valgrind-args", default="", help="Extra args to pass to valgrind."
    203    )
    204    harness_og.add_argument(
    205        "--rr",
    206        action="store_true",
    207        help="Run tests under RR record-and-replay debugger.",
    208    )
    209    harness_og.add_argument(
    210        "-C",
    211        "--check-output",
    212        action="store_true",
    213        help="Run tests to check output for different jit-flags",
    214    )
    215    harness_og.add_argument(
    216        "--remote", action="store_true", help="Run tests on a remote device"
    217    )
    218    harness_og.add_argument(
    219        "--deviceIP",
    220        action="store",
    221        type=str,
    222        dest="device_ip",
    223        help="IP address of remote device to test",
    224    )
    225    harness_og.add_argument(
    226        "--devicePort",
    227        action="store",
    228        type=int,
    229        dest="device_port",
    230        default=20701,
    231        help="port of remote device to test",
    232    )
    233    harness_og.add_argument(
    234        "--deviceSerial",
    235        action="store",
    236        type=str,
    237        dest="device_serial",
    238        default=None,
    239        help="ADB device serial number of remote device to test",
    240    )
    241    harness_og.add_argument(
    242        "--remoteTestRoot",
    243        dest="remote_test_root",
    244        action="store",
    245        type=str,
    246        default="/data/local/tmp/test_root",
    247        help="The remote directory to use as test root (e.g. %(default)s)",
    248    )
    249    harness_og.add_argument(
    250        "--localLib",
    251        dest="local_lib",
    252        action="store",
    253        type=str,
    254        help="The location of libraries to push -- preferably stripped",
    255    )
    256    harness_og.add_argument(
    257        "--no-xdr",
    258        dest="use_xdr",
    259        action="store_false",
    260        help="Whether to disable caching of self-hosted parsed content in XDR format.",
    261    )
    262 
    263    input_og = op.add_argument_group("Inputs", "Change what tests are run.")
    264    input_og.add_argument(
    265        "-f",
    266        "--file",
    267        dest="test_file",
    268        action="append",
    269        help="Get tests from the given file.",
    270    )
    271    input_og.add_argument(
    272        "-x",
    273        "--exclude-file",
    274        action="append",
    275        help="Exclude tests from the given file.",
    276    )
    277    input_og.add_argument(
    278        "--wpt",
    279        dest="wpt",
    280        choices=["enabled", "disabled", "if-running-everything"],
    281        default="if-running-everything",
    282        help="Enable or disable shell web-platform-tests "
    283        "(default: enable if no test paths are specified).",
    284    )
    285    input_og.add_argument(
    286        "--include",
    287        action="append",
    288        dest="requested_paths",
    289        default=[],
    290        help="Include the given test file or directory.",
    291    )
    292    input_og.add_argument(
    293        "--exclude",
    294        action="append",
    295        dest="excluded_paths",
    296        default=[],
    297        help="Exclude the given test file or directory.",
    298    )
    299    input_og.add_argument(
    300        "-d",
    301        "--exclude-random",
    302        dest="random",
    303        action="store_false",
    304        help='Exclude tests marked as "random."',
    305    )
    306    input_og.add_argument(
    307        "--run-skipped", action="store_true", help='Run tests marked as "skip."'
    308    )
    309    input_og.add_argument(
    310        "--run-only-skipped",
    311        action="store_true",
    312        help='Run only tests marked as "skip."',
    313    )
    314    input_og.add_argument(
    315        "--run-slow-tests",
    316        action="store_true",
    317        help='Do not skip tests marked as "slow."',
    318    )
    319    input_og.add_argument(
    320        "--no-extensions",
    321        action="store_true",
    322        help="Run only tests conforming to the ECMAScript 5 standard.",
    323    )
    324    input_og.add_argument(
    325        "--repeat", type=int, default=1, help="Repeat tests the given number of times."
    326    )
    327 
    328    output_og = op.add_argument_group("Output", "Modify the harness and tests output.")
    329    output_og.add_argument(
    330        "-s",
    331        "--show-cmd",
    332        action="store_true",
    333        help="Show exact commandline used to run each test.",
    334    )
    335    output_og.add_argument(
    336        "-o",
    337        "--show-output",
    338        action="store_true",
    339        help="Print each test's output to the file given by --output-file.",
    340    )
    341    output_og.add_argument(
    342        "-F",
    343        "--failed-only",
    344        action="store_true",
    345        help="If a --show-* option is given, only print output for failed tests.",
    346    )
    347    output_og.add_argument(
    348        "--no-show-failed",
    349        action="store_true",
    350        help="Don't print output for failed tests (no-op with --show-output).",
    351    )
    352    output_og.add_argument(
    353        "-O",
    354        "--output-file",
    355        help="Write all output to the given file (default: stdout).",
    356    )
    357    output_og.add_argument(
    358        "--failure-file", help="Write all not-passed tests to the given file."
    359    )
    360    output_og.add_argument(
    361        "--no-progress",
    362        dest="hide_progress",
    363        action="store_true",
    364        help="Do not show the progress bar.",
    365    )
    366    output_og.add_argument(
    367        "--tinderbox",
    368        dest="format",
    369        action="store_const",
    370        const="automation",
    371        help="Use automation-parseable output format.",
    372    )
    373    output_og.add_argument(
    374        "--format",
    375        dest="format",
    376        default="none",
    377        choices=["automation", "none"],
    378        help="Output format. Either automation or none (default %(default)s).",
    379    )
    380    output_og.add_argument(
    381        "--log-wptreport",
    382        dest="wptreport",
    383        action="store",
    384        help="Path to write a Web Platform Tests report (wptreport)",
    385    )
    386    output_og.add_argument(
    387        "--this-chunk", type=int, default=1, help="The test chunk to run."
    388    )
    389    output_og.add_argument(
    390        "--total-chunks", type=int, default=1, help="The total number of test chunks."
    391    )
    392 
    393    special_og = op.add_argument_group(
    394        "Special", "Special modes that do not run tests."
    395    )
    396    special_og.add_argument(
    397        "--make-manifests",
    398        metavar="BASE_TEST_PATH",
    399        help="Generate reftest manifest files.",
    400    )
    401 
    402    op.add_argument("--js-shell", metavar="JS_SHELL", help="JS shell to run tests with")
    403    op.add_argument(
    404        "-z", "--gc-zeal", help="GC zeal mode to use when running the shell"
    405    )
    406 
    407    options, args = op.parse_known_args()
    408 
    409    # Need a shell unless in a special mode.
    410    if not options.make_manifests:
    411        if not args:
    412            op.error("missing JS_SHELL argument")
    413        options.js_shell = os.path.abspath(args.pop(0))
    414 
    415    requested_paths = set(args)
    416 
    417    # Valgrind, gdb, and rr are mutually exclusive.
    418    if sum(map(bool, (options.valgrind, options.debug, options.rr))) > 1:
    419        op.error("--valgrind, --debug, and --rr are mutually exclusive.")
    420 
    421    # Fill the debugger field, as needed.
    422    if options.debug:
    423        if options.debugger == "lldb":
    424            debugger_prefix = ["lldb", "--"]
    425        else:
    426            debugger_prefix = options.debugger.split()
    427    else:
    428        debugger_prefix = []
    429 
    430    if options.valgrind:
    431        debugger_prefix = ["valgrind"] + options.valgrind_args.split()
    432        if os.uname()[0] == "Darwin":
    433            debugger_prefix.append("--dsymutil=yes")
    434        options.show_output = True
    435    if options.rr:
    436        debugger_prefix = ["rr", "record"]
    437 
    438    js_cmd_args = split_extra_shell_args(options.shell_args + options.feature_args)
    439    if options.jorendb:
    440        options.passthrough = True
    441        options.hide_progress = True
    442        options.worker_count = 1
    443        debugger_path = realpath(
    444            os.path.join(
    445                abspath(dirname(abspath(__file__))),
    446                "..",
    447                "..",
    448                "examples",
    449                "jorendb.js",
    450            )
    451        )
    452        js_cmd_args.extend(["-d", "-f", debugger_path, "--"])
    453    prefix = RefTestCase.build_js_cmd_prefix(
    454        options.js_shell, js_cmd_args, debugger_prefix
    455    )
    456 
    457    # If files with lists of tests to run were specified, add them to the
    458    # requested tests set.
    459    if options.test_file:
    460        for test_file in options.test_file:
    461            requested_paths |= set([
    462                line.strip() for line in open(test_file).readlines()
    463            ])
    464 
    465    excluded_paths = set(options.excluded_paths)
    466 
    467    # If files with lists of tests to exclude were specified, add them to the
    468    # excluded tests set.
    469    if options.exclude_file:
    470        for filename in options.exclude_file:
    471            with open(filename) as fp:
    472                for line in fp:
    473                    if line.startswith("#"):
    474                        continue
    475                    line = line.strip()
    476                    if not line:
    477                        continue
    478                    excluded_paths.add(line)
    479 
    480    # Handle output redirection, if requested and relevant.
    481    options.output_fp = sys.stdout
    482    if options.output_file:
    483        if not options.show_cmd:
    484            options.show_output = True
    485        try:
    486            options.output_fp = open(options.output_file, "w")
    487        except OSError as ex:
    488            raise SystemExit("Failed to open output file: " + str(ex))
    489 
    490    # Hide the progress bar if it will get in the way of other output.
    491    options.hide_progress = (
    492        options.format == "automation"
    493        or not ProgressBar.conservative_isatty()
    494        or options.hide_progress
    495    )
    496 
    497    return (options, prefix, requested_paths, excluded_paths)
    498 
    499 
    500 def load_wpt_tests(xul_tester, requested_paths, excluded_paths, update_manifest=True):
    501    """Return a list of `RefTestCase` objects for the jsshell testharness.js
    502    tests filtered by the given paths and debug-ness."""
    503    repo_root = abspath(os.path.join(here, "..", "..", ".."))
    504    wp = os.path.join(repo_root, "testing", "web-platform")
    505    wpt = os.path.join(wp, "tests")
    506 
    507    sys_paths = [
    508        "python/mozterm",
    509        "python/mozboot",
    510        "testing/mozbase/mozcrash",
    511        "testing/mozbase/mozdevice",
    512        "testing/mozbase/mozfile",
    513        "testing/mozbase/mozinfo",
    514        "testing/mozbase/mozleak",
    515        "testing/mozbase/mozlog",
    516        "testing/mozbase/mozprocess",
    517        "testing/mozbase/mozprofile",
    518        "testing/mozbase/mozrunner",
    519        "testing/mozbase/mozversion",
    520        "testing/web-platform/",
    521        "testing/web-platform/tests/tools",
    522        "testing/web-platform/tests/tools/third_party/html5lib",
    523        "testing/web-platform/tests/tools/third_party/webencodings",
    524        "testing/web-platform/tests/tools/wptrunner",
    525        "testing/web-platform/tests/tools/wptserve",
    526        "third_party/python/requests",
    527    ]
    528    abs_sys_paths = [os.path.join(repo_root, path) for path in sys_paths]
    529 
    530    failed = False
    531    for path in abs_sys_paths:
    532        if not os.path.isdir(path):
    533            failed = True
    534            print("Could not add '%s' to the path")
    535    if failed:
    536        return []
    537 
    538    sys.path[0:0] = abs_sys_paths
    539 
    540    import manifestupdate
    541    from wptrunner import products, testloader, wptcommandline, wptlogging, wpttest
    542 
    543    manifest_root = tempfile.gettempdir()
    544    (maybe_dist, maybe_bin) = os.path.split(os.path.dirname(xul_tester.js_bin))
    545    if maybe_bin == "bin":
    546        (maybe_root, maybe_dist) = os.path.split(maybe_dist)
    547        if maybe_dist == "dist":
    548            if os.path.exists(os.path.join(maybe_root, "_tests")):
    549                # Assume this is a gecko objdir.
    550                manifest_root = maybe_root
    551 
    552    logger = wptlogging.setup({}, {})
    553 
    554    test_manifests = manifestupdate.run(
    555        repo_root, manifest_root, logger, update=update_manifest
    556    )
    557 
    558    kwargs = vars(wptcommandline.create_parser().parse_args([]))
    559    kwargs.update({
    560        "config": os.path.join(
    561            manifest_root, "_tests", "web-platform", "wptrunner.local.ini"
    562        ),
    563        "gecko_e10s": False,
    564        "product": "firefox",
    565        "verify": False,
    566        "wasm": xul_tester.test("wasmIsSupported()"),
    567    })
    568    wptcommandline.set_from_config(kwargs)
    569 
    570    def filter_jsshell_tests(it):
    571        for item_type, path, tests in it:
    572            tests = set(item for item in tests if item.jsshell)
    573            if tests:
    574                yield item_type, path, tests
    575 
    576    run_info_extras = products.Product(kwargs["config"], "firefox").run_info_extras(
    577        logger, **kwargs
    578    )
    579    run_info = wpttest.get_run_info(
    580        kwargs["run_info"],
    581        "firefox",
    582        debug=xul_tester.test("isDebugBuild"),
    583        extras=run_info_extras,
    584    )
    585    release_or_beta = xul_tester.test("getBuildConfiguration('release_or_beta')")
    586    run_info["release_or_beta"] = release_or_beta
    587    run_info["nightly_build"] = not release_or_beta
    588    early_beta_or_earlier = xul_tester.test(
    589        "getBuildConfiguration('early_beta_or_earlier')"
    590    )
    591    run_info["early_beta_or_earlier"] = early_beta_or_earlier
    592 
    593    path_filter = testloader.TestFilter(
    594        test_manifests, include=requested_paths, exclude=excluded_paths
    595    )
    596    subsuites = testloader.load_subsuites(logger, run_info, None, set())
    597    loader = testloader.TestLoader(
    598        test_manifests,
    599        ["testharness"],
    600        run_info,
    601        subsuites=subsuites,
    602        manifest_filters=[path_filter, filter_jsshell_tests],
    603    )
    604 
    605    extra_helper_paths = [
    606        os.path.join(here, "web-platform-test-shims.js"),
    607        os.path.join(wpt, "resources", "testharness.js"),
    608        os.path.join(here, "testharnessreport.js"),
    609    ]
    610 
    611    pref_prefix = "javascript.options."
    612    recognized_prefs = set(["wasm_js_promise_integration"])
    613 
    614    def resolve(test_path, script):
    615        if script.startswith("/"):
    616            return os.path.join(wpt, script[1:])
    617 
    618        return os.path.join(wpt, os.path.dirname(test_path), script)
    619 
    620    tests = []
    621    for test in loader.tests[""]["testharness"]:
    622        test_path = os.path.relpath(test.path, wpt)
    623        scripts = [resolve(test_path, s) for s in test.scripts]
    624        extra_helper_paths_for_test = extra_helper_paths + scripts
    625 
    626        # We must create at least one test with the default options, along with
    627        # one test for each option given in a test-also annotation.
    628        variants = [None]
    629        flags = []
    630        for m in test.itermeta():
    631            # Search for prefs to enable that we recognize
    632            for pref in m.prefs:
    633                pref_value = m.prefs[pref]
    634                if not pref.startswith(pref_prefix):
    635                    continue
    636                short_pref = pref.replace(pref_prefix, "")
    637                if not short_pref in recognized_prefs:
    638                    continue
    639                flags.append("--setpref=" + short_pref + "=" + pref_value)
    640 
    641            if m.has_key("test-also"):  # NOQA: W601
    642                variants += m.get("test-also").split()
    643        for variant in variants:
    644            test_case = RefTestCase(
    645                wpt,
    646                test_path,
    647                extra_helper_paths=extra_helper_paths_for_test[:],
    648                wpt=test,
    649            )
    650            if variant:
    651                test_case.options.append(variant)
    652            for flag in flags:
    653                test_case.options.append(flag)
    654            tests.append(test_case)
    655    return tests
    656 
    657 
    658 def split_extra_shell_args(args):
    659    result = []
    660    for option in args:
    661        result.extend(shlex.split(option))
    662    return result
    663 
    664 
    665 def load_tests(options, requested_paths, excluded_paths):
    666    """
    667    Returns a tuple: (test_count, test_gen)
    668        test_count: [int] Number of tests that will be in test_gen
    669        test_gen: [iterable<Test>] Tests found that should be run.
    670    """
    671    from lib import manifest
    672 
    673    if options.js_shell is None:
    674        xul_tester = manifest.NullXULInfoTester()
    675    else:
    676        if options.xul_info_src is None:
    677            xul_info = manifest.XULInfo.create(options.js_shell)
    678        else:
    679            xul_abi, xul_os, xul_debug = options.xul_info_src.split(r":")
    680            xul_debug = xul_debug.lower() == "true"
    681            xul_info = manifest.XULInfo(xul_abi, xul_os, xul_debug)
    682        feature_args = split_extra_shell_args(options.feature_args)
    683        xul_tester = manifest.XULInfoTester(xul_info, options, feature_args)
    684 
    685    test_dir = dirname(abspath(__file__))
    686    path_options = PathOptions(test_dir, requested_paths, excluded_paths)
    687    test_count = manifest.count_tests(test_dir, path_options)
    688    test_gen = manifest.load_reftests(test_dir, path_options, xul_tester)
    689 
    690    # WPT tests are already run in the browser in their own harness.
    691    wpt_enabled = options.wpt == "enabled" or (
    692        options.wpt == "if-running-everything"
    693        and len(requested_paths) == 0
    694        and not options.make_manifests
    695    )
    696    if wpt_enabled:
    697        wpt_tests = load_wpt_tests(xul_tester, requested_paths, excluded_paths)
    698        test_count += len(wpt_tests)
    699        test_gen = chain(test_gen, wpt_tests)
    700 
    701    if options.test_reflect_stringify is not None:
    702 
    703        def trs_gen(tests):
    704            for test in tests:
    705                test.test_reflect_stringify = options.test_reflect_stringify
    706                # Even if the test is not normally expected to pass, we still
    707                # expect reflect-stringify to be able to handle it.
    708                test.expect = True
    709                test.random = False
    710                test.slow = False
    711                yield test
    712 
    713        test_gen = trs_gen(test_gen)
    714 
    715    if options.make_manifests:
    716        manifest.make_manifests(options.make_manifests, test_gen)
    717        sys.exit()
    718 
    719    # Create a new test list. Apply each TBPL configuration to every test.
    720    flags_list = None
    721    if options.tbpl:
    722        flags_list = get_jitflags("all")
    723    elif options.tbpl_debug:
    724        flags_list = get_jitflags("debug")
    725    else:
    726        flags_list = get_jitflags(options.jitflags, none=None)
    727 
    728    if flags_list:
    729 
    730        def flag_gen(tests):
    731            for test in tests:
    732                for jitflags in flags_list:
    733                    tmp_test = copy(test)
    734                    tmp_test.jitflags = copy(test.jitflags)
    735                    tmp_test.jitflags.extend(jitflags)
    736                    yield tmp_test
    737 
    738        test_count = test_count * len(flags_list)
    739        test_gen = flag_gen(test_gen)
    740 
    741    if options.test_file:
    742        paths = set()
    743        for test_file in options.test_file:
    744            paths |= set([line.strip() for line in open(test_file).readlines()])
    745        test_gen = (_ for _ in test_gen if _.path in paths)
    746 
    747    if options.no_extensions:
    748        pattern = os.sep + "extensions" + os.sep
    749        test_gen = (_ for _ in test_gen if pattern not in _.path)
    750 
    751    if not options.random:
    752        test_gen = (_ for _ in test_gen if not _.random)
    753 
    754    if options.run_only_skipped:
    755        options.run_skipped = True
    756        test_gen = (_ for _ in test_gen if not _.enable)
    757 
    758    if not options.run_slow_tests:
    759        test_gen = (_ for _ in test_gen if not _.slow)
    760 
    761    if options.repeat:
    762        test_gen = (test for test in test_gen for i in range(options.repeat))
    763        test_count *= options.repeat
    764 
    765    return test_count, test_gen
    766 
    767 
    768 def main():
    769    options, prefix, requested_paths, excluded_paths = parse_args()
    770    if options.js_shell is not None and not (
    771        isfile(options.js_shell) and os.access(options.js_shell, os.X_OK)
    772    ):
    773        if (
    774            platform.system() != "Windows"
    775            or isfile(options.js_shell)
    776            or not isfile(options.js_shell + ".exe")
    777            or not os.access(options.js_shell + ".exe", os.X_OK)
    778        ):
    779            print("Could not find executable shell: " + options.js_shell)
    780            return 1
    781 
    782    test_count, test_gen = load_tests(options, requested_paths, excluded_paths)
    783    test_environment = get_environment_overlay(options.js_shell, options.gc_zeal)
    784 
    785    if test_count == 0:
    786        print("no tests selected")
    787        return 1
    788 
    789    test_dir = dirname(abspath(__file__))
    790 
    791    if options.debug:
    792        if test_count > 1:
    793            print(
    794                "Multiple tests match command line arguments, debugger can only run one"
    795            )
    796            for tc in test_gen:
    797                print(f"    {tc.path}")
    798            return 2
    799 
    800        with changedir(test_dir), change_env(
    801            test_environment
    802        ), TemporaryDirectory() as tempdir:
    803            cmd = next(test_gen).get_command(prefix, tempdir)
    804            if options.show_cmd:
    805                print(list2cmdline(cmd))
    806            call(cmd)
    807        return 0
    808 
    809    # The test_gen generator is converted into a list in
    810    # run_all_tests. Go ahead and do it here so we can apply
    811    # chunking.
    812    #
    813    # If chunking is enabled, determine which tests are part of this chunk.
    814    # This code was adapted from testing/mochitest/runtestsremote.py.
    815    if options.total_chunks > 1:
    816        tests_per_chunk = math.ceil(test_count / float(options.total_chunks))
    817        start = int(round((options.this_chunk - 1) * tests_per_chunk))
    818        end = int(round(options.this_chunk * tests_per_chunk))
    819        test_gen = list(test_gen)[start:end]
    820 
    821    if options.remote:
    822        results = ResultsSink("jstests", options, test_count)
    823        try:
    824            from lib.remote import init_device, init_remote_dir
    825 
    826            device = init_device(options)
    827            tempdir = posixpath.join(options.remote_test_root, "tmp")
    828            jtd_tests = posixpath.join(options.remote_test_root, "tests", "tests")
    829            init_remote_dir(device, jtd_tests)
    830            device.push(test_dir, jtd_tests, timeout=600)
    831            device.chmod(jtd_tests, recursive=True)
    832            prefix[0] = options.js_shell
    833            if options.use_xdr:
    834                test_gen = xdr_annotate(test_gen, options)
    835            for test in test_gen:
    836                out = run_test_remote(test, device, prefix, tempdir, options)
    837                results.push(out)
    838            results.finish(True)
    839        except KeyboardInterrupt:
    840            results.finish(False)
    841 
    842        return 0 if results.all_passed() else 1
    843 
    844    with changedir(test_dir), change_env(
    845        test_environment
    846    ), TemporaryDirectory() as tempdir:
    847        results = ResultsSink("jstests", options, test_count)
    848        try:
    849            for out in run_all_tests(test_gen, prefix, tempdir, results.pb, options):
    850                results.push(out)
    851            results.finish(True)
    852        except KeyboardInterrupt:
    853            results.finish(False)
    854 
    855        return 0 if results.all_passed() else 1
    856 
    857    return 0
    858 
    859 
    860 def run_test_remote(test, device, prefix, tempdir, options):
    861    from mozdevice import ADBDevice, ADBProcessError
    862 
    863    cmd = test.get_command(prefix, tempdir)
    864    test_root_parent = os.path.dirname(test.root)
    865    jtd_tests = posixpath.join(options.remote_test_root, "tests")
    866    cmd = [_.replace(test_root_parent, jtd_tests) for _ in cmd]
    867 
    868    env = {"TZ": "PST8PDT", "LD_LIBRARY_PATH": os.path.dirname(prefix[0])}
    869 
    870    adb_cmd = ADBDevice._escape_command_line(cmd)
    871    start = datetime.now()
    872    try:
    873        # Allow ADBError or ADBTimeoutError to terminate the test run,
    874        # but handle ADBProcessError in order to support the use of
    875        # non-zero exit codes in the JavaScript shell tests.
    876        out = device.shell_output(
    877            adb_cmd, env=env, cwd=options.remote_test_root, timeout=int(options.timeout)
    878        )
    879        returncode = 0
    880    except ADBProcessError as e:
    881        # Treat ignorable intermittent adb communication errors as
    882        # skipped tests.
    883        out = str(e.adb_process.stdout)
    884        returncode = e.adb_process.exitcode
    885        re_ignore = re.compile(r"error: (closed|device .* not found)")
    886        if returncode == 1 and re_ignore.search(out):
    887            print(f"Skipping {test.path} due to ignorable adb error {out}")
    888            test.skip_if_cond = "true"
    889            returncode = test.SKIPPED_EXIT_STATUS
    890 
    891    elapsed = (datetime.now() - start).total_seconds()
    892 
    893    # We can't distinguish between stdout and stderr so we pass
    894    # the same buffer to both.
    895    return TestOutput(test, cmd, out, out, returncode, elapsed, False)
    896 
    897 
    898 if __name__ == "__main__":
    899    sys.exit(main())