tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

jittests.py (30070B)


      1 #!/usr/bin/env python
      2 # This Source Code Form is subject to the terms of the Mozilla Public
      3 # License, v. 2.0. If a copy of the MPL was not distributed with this
      4 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
      5 
      6 
      7 # jit_test.py -- Python harness for JavaScript trace tests.
      8 
      9 import os
     10 import re
     11 import sys
     12 import traceback
     13 from collections import namedtuple
     14 from enum import Enum
     15 
     16 if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
     17    from .tasks_unix import run_all_tests
     18 else:
     19    from .tasks_win import run_all_tests
     20 
     21 from .progressbar import NullProgressBar, ProgressBar
     22 from .results import escape_cmdline
     23 from .structuredlog import TestLogger
     24 from .tempfile import TemporaryDirectory
     25 
     26 TESTS_LIB_DIR = os.path.dirname(os.path.abspath(__file__))
     27 JS_DIR = os.path.dirname(os.path.dirname(TESTS_LIB_DIR))
     28 TOP_SRC_DIR = os.path.dirname(os.path.dirname(JS_DIR))
     29 TEST_DIR = os.path.join(JS_DIR, "jit-test", "tests")
     30 LIB_DIR = os.path.join(JS_DIR, "jit-test", "lib") + os.path.sep
     31 MODULE_DIR = os.path.join(JS_DIR, "jit-test", "modules") + os.path.sep
     32 SHELL_XDR = "shell.xdr"
     33 
     34 
     35 class OutputStatus(Enum):
     36    OK = 1
     37    SKIPPED = 2
     38    FAILED = 3
     39 
     40    def __bool__(self):
     41        return self != OutputStatus.FAILED
     42 
     43 
     44 # Backported from Python 3.1 posixpath.py
     45 
     46 
     47 def _relpath(path, start=None):
     48    """Return a relative version of a path"""
     49 
     50    if not path:
     51        raise ValueError("no path specified")
     52 
     53    if start is None:
     54        start = os.curdir
     55 
     56    start_list = os.path.abspath(start).split(os.sep)
     57    path_list = os.path.abspath(path).split(os.sep)
     58 
     59    # Work out how much of the filepath is shared by start and path.
     60    i = len(os.path.commonprefix([start_list, path_list]))
     61 
     62    rel_list = [os.pardir] * (len(start_list) - i) + path_list[i:]
     63    if not rel_list:
     64        return os.curdir
     65    return os.path.join(*rel_list)
     66 
     67 
     68 # Mapping of Python chars to their javascript string representation.
     69 QUOTE_MAP = {
     70    "\\": "\\\\",
     71    "\b": "\\b",
     72    "\f": "\\f",
     73    "\n": "\\n",
     74    "\r": "\\r",
     75    "\t": "\\t",
     76    "\v": "\\v",
     77 }
     78 
     79 # Quote the string S, javascript style.
     80 
     81 
     82 def js_quote(quote, s):
     83    result = quote
     84    for c in s:
     85        if c == quote:
     86            result += "\\" + quote
     87        elif c in QUOTE_MAP:
     88            result += QUOTE_MAP[c]
     89        else:
     90            result += c
     91    result += quote
     92    return result
     93 
     94 
     95 os.path.relpath = _relpath
     96 
     97 
     98 def extend_condition(condition, value):
     99    if condition:
    100        condition += " || "
    101    condition += f"({value})"
    102    return condition
    103 
    104 
    105 class JitTest:
    106    VALGRIND_CMD = []
    107    paths = (d for d in os.environ["PATH"].split(os.pathsep))
    108    valgrinds = (os.path.join(d, "valgrind") for d in paths)
    109    if any(os.path.exists(p) for p in valgrinds):
    110        VALGRIND_CMD = [
    111            "valgrind",
    112            "-q",
    113            "--smc-check=all-non-file",
    114            "--error-exitcode=1",
    115            "--gen-suppressions=all",
    116            "--show-possibly-lost=no",
    117            "--leak-check=full",
    118        ]
    119        if os.uname()[0] == "Darwin":
    120            VALGRIND_CMD.append("--dsymutil=yes")
    121 
    122    del paths
    123    del valgrinds
    124 
    125    def __init__(self, path):
    126        # Absolute path of the test file.
    127        self.path = path
    128 
    129        # Path relative to the top mozilla/ directory.
    130        self.relpath_top = os.path.relpath(path, TOP_SRC_DIR)
    131 
    132        # Path relative to mozilla/js/src/jit-test/tests/.
    133        self.relpath_tests = os.path.relpath(path, TEST_DIR)
    134 
    135        # jit flags to enable
    136        self.jitflags = []
    137        # True means the test is slow-running
    138        self.slow = False
    139        # Heavy tests will never run alongside other heavy tests
    140        self.heavy = False
    141        # True means that OOM is not considered a failure
    142        self.allow_oom = False
    143        # True means CrashAtUnhandlableOOM is not considered a failure
    144        self.allow_unhandlable_oom = False
    145        # True means that hitting recursion the limits is not considered a failure.
    146        self.allow_overrecursed = False
    147        # True means run under valgrind
    148        self.valgrind = False
    149        # True means force Pacific time for the test
    150        self.tz_pacific = False
    151        # Additional files to include, in addition to prologue.js
    152        self.other_lib_includes = []
    153        self.other_script_includes = []
    154        # List of other configurations to test with.
    155        self.test_also = []
    156        # List of other configurations to test with all existing variants.
    157        self.test_join = []
    158        # Errors to expect and consider passing
    159        self.expect_error = ""
    160        # Exit status to expect from shell
    161        self.expect_status = 0
    162        # Exit status or error output.
    163        self.expect_crash = False
    164        self.is_module = False
    165        # Reflect.stringify implementation to test
    166        self.test_reflect_stringify = None
    167        # Use self-hosted XDR instead of parsing the source stored in the binary.
    168        self.selfhosted_xdr_path = None
    169        self.selfhosted_xdr_mode = "off"
    170 
    171        # Skip-if condition. We don't have a xulrunner, but we can ask the shell
    172        # directly.
    173        self.skip_if_cond = ""
    174        self.skip_variant_if_cond = {}
    175 
    176        # Expected by the test runner. Always true for jit-tests.
    177        self.enable = True
    178 
    179    def copy(self):
    180        t = JitTest(self.path)
    181        t.jitflags = self.jitflags[:]
    182        t.slow = self.slow
    183        t.heavy = self.heavy
    184        t.allow_oom = self.allow_oom
    185        t.allow_unhandlable_oom = self.allow_unhandlable_oom
    186        t.allow_overrecursed = self.allow_overrecursed
    187        t.valgrind = self.valgrind
    188        t.tz_pacific = self.tz_pacific
    189        t.other_lib_includes = self.other_lib_includes[:]
    190        t.other_script_includes = self.other_script_includes[:]
    191        t.test_also = self.test_also
    192        t.test_join = self.test_join
    193        t.expect_error = self.expect_error
    194        t.expect_status = self.expect_status
    195        t.expect_crash = self.expect_crash
    196        t.test_reflect_stringify = self.test_reflect_stringify
    197        t.selfhosted_xdr_path = self.selfhosted_xdr_path
    198        t.selfhosted_xdr_mode = self.selfhosted_xdr_mode
    199        t.enable = True
    200        t.is_module = self.is_module
    201        t.skip_if_cond = self.skip_if_cond
    202        t.skip_variant_if_cond = self.skip_variant_if_cond
    203        return t
    204 
    205    def copy_and_extend_jitflags(self, variant):
    206        t = self.copy()
    207        t.jitflags.extend(variant)
    208        for flags in variant:
    209            if flags in self.skip_variant_if_cond:
    210                t.skip_if_cond = extend_condition(
    211                    t.skip_if_cond, self.skip_variant_if_cond[flags]
    212                )
    213        return t
    214 
    215    def copy_variants(self, variants):
    216        # Append variants to be tested in addition to the current set of tests.
    217        variants = variants + self.test_also
    218 
    219        # For each existing variant, duplicates it for each list of options in
    220        # test_join.  This will multiply the number of variants by 2 for set of
    221        # options.
    222        for join_opts in self.test_join:
    223            variants = variants + [opts + join_opts for opts in variants]
    224 
    225        # For each list of jit flags, make a copy of the test.
    226        return [self.copy_and_extend_jitflags(v) for v in variants]
    227 
    228    COOKIE = b"|jit-test|"
    229 
    230    # We would use 500019 (5k19), but quit() only accepts values up to 127, due to fuzzers
    231    SKIPPED_EXIT_STATUS = 59
    232    Directives = {}
    233 
    234    @classmethod
    235    def find_directives(cls, file_name):
    236        meta = ""
    237        line = open(file_name, "rb").readline()
    238        i = line.find(cls.COOKIE)
    239        if i != -1:
    240            meta = ";" + line[i + len(cls.COOKIE) :].decode(errors="strict").strip("\n")
    241        return meta
    242 
    243    @classmethod
    244    def from_file(cls, path, options):
    245        test = cls(path)
    246 
    247        # If directives.txt exists in the test's directory then it may
    248        # contain metainformation that will be catenated with
    249        # whatever's in the test file.  The form of the directive in
    250        # the directive file is the same as in the test file.  Only
    251        # the first line is considered, just as for the test file.
    252 
    253        dir_meta = ""
    254        dir_name = os.path.dirname(path)
    255        if dir_name in cls.Directives:
    256            dir_meta = cls.Directives[dir_name]
    257        else:
    258            meta_file_name = os.path.join(dir_name, "directives.txt")
    259            if os.path.exists(meta_file_name):
    260                dir_meta = cls.find_directives(meta_file_name)
    261            cls.Directives[dir_name] = dir_meta
    262 
    263        filename, file_extension = os.path.splitext(path)
    264        meta = cls.find_directives(path)
    265 
    266        if meta != "" or dir_meta != "":
    267            meta = meta + dir_meta
    268            parts = meta.split(";")
    269            for part in parts:
    270                part = part.strip()
    271                if not part:
    272                    continue
    273                name, _, value = part.partition(":")
    274                if value:
    275                    value = value.strip()
    276                    if name == "error":
    277                        test.expect_error = value
    278                    elif name == "exitstatus":
    279                        try:
    280                            status = int(value, 0)
    281                            if status == test.SKIPPED_EXIT_STATUS:
    282                                print(
    283                                    "warning: jit-tests uses {} as a sentinel"
    284                                    " return value {}",
    285                                    test.SKIPPED_EXIT_STATUS,
    286                                    path,
    287                                )
    288                            else:
    289                                test.expect_status = status
    290                        except ValueError:
    291                            print(f"warning: couldn't parse exit status {value}")
    292                    elif name == "thread-count":
    293                        try:
    294                            test.jitflags.append(f"--thread-count={int(value, 0)}")
    295                        except ValueError:
    296                            print(f"warning: couldn't parse thread-count {value}")
    297                    elif name == "include":
    298                        test.other_lib_includes.append(value)
    299                    elif name == "local-include":
    300                        test.other_script_includes.append(value)
    301                    elif name == "skip-if":
    302                        test.skip_if_cond = extend_condition(test.skip_if_cond, value)
    303                    elif name == "skip-variant-if":
    304                        try:
    305                            [variant, condition] = value.split(",")
    306                            test.skip_variant_if_cond[variant] = extend_condition(
    307                                test.skip_if_cond, condition
    308                            )
    309                        except ValueError:
    310                            print("warning: couldn't parse skip-variant-if")
    311                    else:
    312                        print(
    313                            f"{path}: warning: unrecognized |jit-test| attribute {part}"
    314                        )
    315                elif name == "slow":
    316                    test.slow = True
    317                elif name == "heavy":
    318                    test.heavy = True
    319                elif name == "allow-oom":
    320                    test.allow_oom = True
    321                elif name == "allow-unhandlable-oom":
    322                    test.allow_unhandlable_oom = True
    323                elif name == "allow-overrecursed":
    324                    test.allow_overrecursed = True
    325                elif name == "valgrind":
    326                    test.valgrind = options.valgrind
    327                elif name == "tz-pacific":
    328                    test.tz_pacific = True
    329                elif name.startswith("test-also="):
    330                    test.test_also.append(re.split(r"\s+", name[len("test-also=") :]))
    331                elif name.startswith("test-join="):
    332                    test.test_join.append(re.split(r"\s+", name[len("test-join=") :]))
    333                elif name == "module":
    334                    test.is_module = True
    335                elif name == "crash":
    336                    # Crashes are only allowed in self-test, as it is
    337                    # intended to verify that our testing infrastructure
    338                    # works, and not meant as a way to accept temporary
    339                    # failing tests. These tests should either be fixed or
    340                    # skipped.
    341                    assert "self-test" in path, (
    342                        f"{path}: has an unexpected crash annotation."
    343                    )
    344                    test.expect_crash = True
    345                elif name.startswith("--"):
    346                    # // |jit-test| --ion-gvn=off; --no-sse4
    347                    test.jitflags.append(name)
    348                elif name.startswith("-P"):
    349                    prefAndValue = name.split()
    350                    assert len(prefAndValue) == 2, f"{name}: failed to parse preference"
    351                    # // |jit-test| -P pref(=value)?
    352                    test.jitflags.append("--setpref=" + prefAndValue[1])
    353                else:
    354                    print(f"{path}: warning: unrecognized |jit-test| attribute {part}")
    355 
    356        if options.valgrind_all:
    357            test.valgrind = True
    358 
    359        if options.test_reflect_stringify is not None:
    360            test.expect_error = ""
    361            test.expect_status = 0
    362 
    363        return test
    364 
    365    def command(self, prefix, libdir, moduledir, tempdir, remote_prefix=None):
    366        path = self.path
    367        if remote_prefix:
    368            path = self.path.replace(TEST_DIR, remote_prefix)
    369 
    370        scriptdir_var = os.path.dirname(path)
    371        if not scriptdir_var.endswith("/"):
    372            scriptdir_var += "/"
    373 
    374        # Note: The tempdir provided as argument is managed by the caller
    375        # should remain alive as long as the test harness. Therefore, the XDR
    376        # content of the self-hosted code would be accessible to all JS Shell
    377        # instances.
    378        self.selfhosted_xdr_path = os.path.join(tempdir, SHELL_XDR)
    379 
    380        # Platforms where subprocess immediately invokes exec do not care
    381        # whether we use double or single quotes. On windows and when using
    382        # a remote device, however, we have to be careful to use the quote
    383        # style that is the opposite of what the exec wrapper uses.
    384        if remote_prefix:
    385            quotechar = '"'
    386        else:
    387            quotechar = "'"
    388 
    389        # Don't merge the expressions: We want separate -e arguments to avoid
    390        # semicolons in the command line, bug 1351607.
    391        exprs = [
    392            f"const platform={js_quote(quotechar, sys.platform)}",
    393            f"const libdir={js_quote(quotechar, libdir)}",
    394            f"const scriptdir={js_quote(quotechar, scriptdir_var)}",
    395        ]
    396 
    397        # We may have specified '-a' or '-d' twice: once via --jitflags, once
    398        # via the "|jit-test|" line.  Remove dups because they are toggles.
    399        # Note: |dict.fromkeys(flags)| is similar to |set(flags)| but it
    400        # preserves order.
    401        cmd = prefix + []
    402        cmd += list(dict.fromkeys(self.jitflags))
    403        # Handle selfhosted XDR file.
    404        if self.selfhosted_xdr_mode != "off":
    405            cmd += [
    406                "--selfhosted-xdr-path",
    407                self.selfhosted_xdr_path,
    408                "--selfhosted-xdr-mode",
    409                self.selfhosted_xdr_mode,
    410            ]
    411        for expr in exprs:
    412            cmd += ["-e", expr]
    413        for inc in self.other_lib_includes:
    414            cmd += ["-f", libdir + inc]
    415        for inc in self.other_script_includes:
    416            cmd += ["-f", scriptdir_var + inc]
    417        if self.skip_if_cond:
    418            cmd += [
    419                "-e",
    420                f"if ({self.skip_if_cond}) quit({self.SKIPPED_EXIT_STATUS})",
    421            ]
    422        cmd += ["--module-load-path", moduledir]
    423        if self.is_module:
    424            cmd += ["--module", path]
    425        elif self.test_reflect_stringify is None:
    426            cmd += ["-f", path]
    427        else:
    428            cmd += ["--", self.test_reflect_stringify, "--check", path]
    429 
    430        if self.valgrind:
    431            cmd = self.VALGRIND_CMD + cmd
    432 
    433        if self.allow_unhandlable_oom or self.expect_crash:
    434            cmd += ["--suppress-minidump"]
    435 
    436        return cmd
    437 
    438    def get_command(self, prefix, tempdir):
    439        """Shim for the test runner."""
    440        return self.command(prefix, LIB_DIR, MODULE_DIR, tempdir)
    441 
    442 
    443 def find_tests(substring=None):
    444    ans = []
    445    for dirpath, dirnames, filenames in os.walk(TEST_DIR):
    446        dirnames.sort()
    447        filenames.sort()
    448        if dirpath == ".":
    449            continue
    450 
    451        for filename in filenames:
    452            if not filename.endswith(".js"):
    453                continue
    454            if filename in ("shell.js", "browser.js"):
    455                continue
    456            test = os.path.join(dirpath, filename)
    457            if substring is None or substring in os.path.relpath(test, TEST_DIR):
    458                ans.append(test)
    459    return ans
    460 
    461 
    462 def check_output(out, err, rc, timed_out, test, options):
    463    # Allow skipping to compose with other expected results
    464    if test.skip_if_cond:
    465        if rc == test.SKIPPED_EXIT_STATUS:
    466            return OutputStatus.SKIPPED
    467 
    468    if timed_out:
    469        relpath = os.path.normpath(test.relpath_tests).replace(os.sep, "/")
    470        if relpath in options.ignore_timeouts:
    471            return OutputStatus.OK
    472        return OutputStatus.FAILED
    473 
    474    if test.expect_error:
    475        # The shell exits with code 3 on uncaught exceptions.
    476        if rc != 3:
    477            return OutputStatus.FAILED
    478 
    479        return test.expect_error in err
    480 
    481    for line in out.split("\n"):
    482        if line.startswith("Trace stats check failed"):
    483            return OutputStatus.FAILED
    484 
    485    for line in err.split("\n"):
    486        if "Assertion failed:" in line:
    487            return OutputStatus.FAILED
    488 
    489    if test.expect_crash:
    490        # Python 3 on Windows interprets process exit codes as unsigned
    491        # integers, where Python 2 used to allow signed integers. Account for
    492        # each possibility here.
    493        if sys.platform == "win32" and rc in (3 - 2**31, 3 + 2**31):
    494            return OutputStatus.OK
    495 
    496        if sys.platform != "win32" and rc == -11:
    497            return OutputStatus.OK
    498 
    499        # When building with ASan enabled, ASan will convert the -11 returned
    500        # value to 1. As a work-around we look for the error output which
    501        # includes the crash reason.
    502        if rc == 1 and ("Hit MOZ_CRASH" in err or "Assertion failure:" in err):
    503            return OutputStatus.OK
    504 
    505        # When running jittests on Android, SEGV results in a return code of
    506        # 128 + 11 = 139. Due to a bug in tinybox, we have to check for 138 as
    507        # well.
    508        if rc in {139, 138}:
    509            return OutputStatus.OK
    510 
    511        # Crashing test should always crash as expected, otherwise this is an
    512        # error. The JS shell crash() function can be used to force the test
    513        # case to crash in unexpected configurations.
    514        return OutputStatus.FAILED
    515 
    516    if rc != test.expect_status:
    517        # Allow a non-zero exit code if we want to allow OOM, but only if we
    518        # actually got OOM.
    519        if (
    520            test.allow_oom
    521            and "out of memory" in err
    522            and "Assertion failure" not in err
    523            and "MOZ_CRASH" not in err
    524        ):
    525            return OutputStatus.OK
    526 
    527        # Allow a non-zero exit code if we want to allow unhandlable OOM, but
    528        # only if we actually got unhandlable OOM.
    529        if test.allow_unhandlable_oom and "MOZ_CRASH([unhandlable oom]" in err:
    530            return OutputStatus.OK
    531 
    532        # Allow a non-zero exit code if we want to all too-much-recursion and
    533        # the test actually over-recursed.
    534        if (
    535            test.allow_overrecursed
    536            and "too much recursion" in err
    537            and "Assertion failure" not in err
    538        ):
    539            return OutputStatus.OK
    540 
    541        # Allow a zero exit code if we are running under a sanitizer that
    542        # forces the exit status.
    543        if test.expect_status != 0 and options.unusable_error_status:
    544            return OutputStatus.OK
    545 
    546        return OutputStatus.FAILED
    547 
    548    return OutputStatus.OK
    549 
    550 
    551 def print_automation_format(ok, res, slog):
    552    # Output test failures in a parsable format suitable for automation, eg:
    553    # TEST-RESULT | filename.js | Failure description (code N, args "--foobar")
    554    #
    555    # Example:
    556    # TEST-PASS | foo/bar/baz.js | (code 0, args "--ion-eager")
    557    # TEST-UNEXPECTED-FAIL | foo/bar/baz.js | TypeError: or something (code -9, args "--no-ion")
    558    # INFO exit-status     : 3
    559    # INFO timed-out       : False
    560    # INFO stdout          > foo
    561    # INFO stdout          > bar
    562    # INFO stdout          > baz
    563    # INFO stderr         2> TypeError: or something
    564    # TEST-UNEXPECTED-FAIL | jit_test.py: Test execution interrupted by user
    565    result = "TEST-PASS" if ok else "TEST-UNEXPECTED-FAIL"
    566    message = "Success" if ok else res.describe_failure()
    567    jitflags = " ".join(res.test.jitflags)
    568    print(
    569        f'{result} | {res.test.relpath_top} | {message} (code {res.rc}, args "{jitflags}") [{res.dt:.1f} s]'
    570    )
    571 
    572    details = {
    573        "message": message,
    574        "extra": {
    575            "jitflags": jitflags,
    576        },
    577    }
    578    if res.extra:
    579        details["extra"].update(res.extra)
    580    slog.test(res.test.relpath_tests, "PASS" if ok else "FAIL", res.dt, **details)
    581 
    582    # For failed tests, print as much information as we have, to aid debugging.
    583    if ok:
    584        return
    585    print(f"INFO exit-status     : {res.rc}")
    586    print(f"INFO timed-out       : {res.timed_out}")
    587    warnings = []
    588    for line in res.out.splitlines():
    589        # See Bug 1868693
    590        if line.startswith("WARNING") and "unused DT entry" in line:
    591            warnings.append(line)
    592            continue
    593        print("INFO stdout          > " + line.strip())
    594    for line in res.err.splitlines():
    595        # See Bug 1868693
    596        if line.startswith("WARNING") and "unused DT entry" in line:
    597            warnings.append(line)
    598            continue
    599        print("INFO stderr         2> " + line.strip())
    600    for line in warnings:
    601        print("INFO (warn-stderr)  2> " + line.strip())
    602 
    603 
    604 def print_test_summary(num_tests, failures, complete, slow_tests, doing, options):
    605    def test_details(res):
    606        if options.show_failed:
    607            return escape_cmdline(res.cmd)
    608        return " ".join(res.test.jitflags + [res.test.relpath_tests])
    609 
    610    if failures:
    611        if options.write_failures:
    612            try:
    613                out = open(options.write_failures, "w")
    614                # Don't write duplicate entries when we are doing multiple
    615                # failures per job.
    616                written = set()
    617                for res in failures:
    618                    if res.test.path not in written:
    619                        out.write(os.path.relpath(res.test.path, TEST_DIR) + "\n")
    620                        if options.write_failure_output:
    621                            out.write(res.out)
    622                            out.write(res.err)
    623                            out.write("Exit code: " + str(res.rc) + "\n")
    624                        written.add(res.test.path)
    625                out.close()
    626            except OSError:
    627                sys.stderr.write(
    628                    "Exception thrown trying to write failure"
    629                    f" file '{options.write_failures}'\n"
    630                )
    631                traceback.print_exc()
    632                sys.stderr.write("---\n")
    633 
    634        print("FAILURES:")
    635        for res in failures:
    636            if not res.timed_out:
    637                print("    " + test_details(res))
    638 
    639        print("TIMEOUTS:")
    640        for res in failures:
    641            if res.timed_out:
    642                print("    " + test_details(res))
    643    else:
    644        print(
    645            "PASSED ALL"
    646            + ("" if complete else f" (partial run -- interrupted by user {doing})")
    647        )
    648 
    649    if options.format == "automation":
    650        num_failures = len(failures) if failures else 0
    651        print("Result summary:")
    652        print(f"Passed: {num_tests - num_failures:d}")
    653        print(f"Failed: {num_failures:d}")
    654 
    655    if num_tests != 0 and options.show_slow:
    656        threshold = options.slow_test_threshold
    657        fraction_fast = 1 - len(slow_tests) / num_tests
    658        print(f"{fraction_fast * 100:5.2f}% of tests ran in under {threshold}s")
    659 
    660        print(f"Slowest tests that took longer than {threshold}s:")
    661        slow_tests.sort(key=lambda res: res.dt, reverse=True)
    662        any = False
    663        for i in range(min(len(slow_tests), 20)):
    664            res = slow_tests[i]
    665            print(f"  {res.dt:6.2f} {test_details(res)}")
    666            any = True
    667        if not any:
    668            print("None")
    669 
    670    return not failures
    671 
    672 
    673 def create_progressbar(num_tests, options):
    674    if (
    675        not options.hide_progress
    676        and not options.show_cmd
    677        and ProgressBar.conservative_isatty()
    678    ):
    679        fmt = [
    680            {"value": "PASS", "color": "green"},
    681            {"value": "FAIL", "color": "red"},
    682            {"value": "TIMEOUT", "color": "blue"},
    683            {"value": "SKIP", "color": "brightgray"},
    684        ]
    685        return ProgressBar(num_tests, fmt)
    686    return NullProgressBar()
    687 
    688 
    689 def process_test_results(results, num_tests, pb, options, slog):
    690    failures = []
    691    timeouts = 0
    692    skipped = 0
    693    complete = False
    694    output_dict = {}
    695    doing = "before starting"
    696    slow_tests = []
    697 
    698    if num_tests == 0:
    699        pb.finish(True)
    700        complete = True
    701        return print_test_summary(
    702            num_tests, failures, complete, slow_tests, doing, options
    703        )
    704 
    705    try:
    706        for i, res in enumerate(results):
    707            status = check_output(
    708                res.out, res.err, res.rc, res.timed_out, res.test, options
    709            )
    710 
    711            if status:
    712                show_output = options.show_output and not options.failed_only
    713            else:
    714                show_output = options.show_output or not options.no_show_failed
    715 
    716            if show_output:
    717                pb.beginline()
    718                sys.stdout.write(res.out)
    719                sys.stdout.write(res.err)
    720                sys.stdout.write(f"Exit code: {res.rc}\n")
    721 
    722            if res.test.valgrind and not show_output:
    723                pb.beginline()
    724                sys.stdout.write(res.err)
    725 
    726            if options.check_output:
    727                if res.test.path in output_dict.keys():
    728                    if output_dict[res.test.path] != res.out:
    729                        pb.message(f"FAIL - OUTPUT DIFFERS {res.test.relpath_tests}")
    730                else:
    731                    output_dict[res.test.path] = res.out
    732 
    733            doing = f"after {res.test.relpath_tests}"
    734            if status == OutputStatus.SKIPPED:
    735                skipped += 1
    736            elif status == OutputStatus.FAILED:
    737                failures.append(res)
    738                if res.timed_out:
    739                    pb.message(f"TIMEOUT - {res.test.relpath_tests}")
    740                    timeouts += 1
    741                else:
    742                    pb.message(f"FAIL - {res.test.relpath_tests}")
    743 
    744            if options.format == "automation":
    745                print_automation_format(status, res, slog)
    746 
    747            n = i + 1
    748            pb.update(
    749                n,
    750                {
    751                    "PASS": n - len(failures),
    752                    "FAIL": len(failures),
    753                    "TIMEOUT": timeouts,
    754                    "SKIP": skipped,
    755                },
    756            )
    757 
    758            if res.dt > options.slow_test_threshold:
    759                slow_tests.append(res)
    760        complete = True
    761    except KeyboardInterrupt:
    762        print(
    763            "TEST-UNEXPECTED-FAIL | jit_test.py"
    764            + " : Test execution interrupted by user"
    765        )
    766 
    767    pb.finish(True)
    768    return print_test_summary(num_tests, failures, complete, slow_tests, doing, options)
    769 
    770 
    771 def run_tests(tests, num_tests, prefix, options, remote=False):
    772    slog = None
    773    if options.format == "automation":
    774        slog = TestLogger("jittests")
    775        slog.suite_start()
    776 
    777    if remote:
    778        ok = run_tests_remote(tests, num_tests, prefix, options, slog)
    779    else:
    780        ok = run_tests_local(tests, num_tests, prefix, options, slog)
    781 
    782    if slog:
    783        slog.suite_end()
    784 
    785    return ok
    786 
    787 
    788 def run_tests_local(tests, num_tests, prefix, options, slog):
    789    # The jstests tasks runner requires the following options. The names are
    790    # taken from the jstests options processing code, which are frequently
    791    # subtly different from the options jit-tests expects. As such, we wrap
    792    # them here, as needed.
    793    AdaptorOptions = namedtuple(
    794        "AdaptorOptions",
    795        [
    796            "worker_count",
    797            "passthrough",
    798            "timeout",
    799            "output_fp",
    800            "hide_progress",
    801            "run_skipped",
    802            "show_cmd",
    803            "use_xdr",
    804        ],
    805    )
    806    shim_options = AdaptorOptions(
    807        options.max_jobs,
    808        False,
    809        options.timeout,
    810        sys.stdout,
    811        False,
    812        True,
    813        options.show_cmd,
    814        options.use_xdr,
    815    )
    816 
    817    with TemporaryDirectory() as tempdir:
    818        pb = create_progressbar(num_tests, options)
    819        gen = run_all_tests(tests, prefix, tempdir, pb, shim_options)
    820        ok = process_test_results(gen, num_tests, pb, options, slog)
    821    return ok
    822 
    823 
    824 def run_tests_remote(tests, num_tests, prefix, options, slog):
    825    # Setup device with everything needed to run our tests.
    826    from mozdevice import ADBError, ADBTimeoutError
    827 
    828    from .tasks_adb_remote import get_remote_results
    829 
    830    # Run all tests.
    831    pb = create_progressbar(num_tests, options)
    832    try:
    833        gen = get_remote_results(tests, prefix, pb, options)
    834        ok = process_test_results(gen, num_tests, pb, options, slog)
    835    except (ADBError, ADBTimeoutError):
    836        print("TEST-UNEXPECTED-FAIL | jit_test.py" + " : Device error during test")
    837        raise
    838    return ok
    839 
    840 
    841 if __name__ == "__main__":
    842    print("Use ../jit-test/jit_test.py to run these tests.")