tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

jit_test.py (17899B)


      1 #!/usr/bin/env python
      2 # This Source Code Form is subject to the terms of the Mozilla Public
      3 # License, v. 2.0. If a copy of the MPL was not distributed with this
      4 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
      5 
      6 import math
      7 import os
      8 import platform
      9 import posixpath
     10 import shlex
     11 import subprocess
     12 import sys
     13 import traceback
     14 
     15 read_input = input
     16 
     17 
     18 def add_tests_dir_to_path():
     19    from os.path import dirname, exists, join, realpath
     20 
     21    js_src_dir = dirname(dirname(realpath(sys.argv[0])))
     22    assert exists(join(js_src_dir, "jsapi.h"))
     23    sys.path.insert(0, join(js_src_dir, "tests"))
     24 
     25 
     26 add_tests_dir_to_path()
     27 
     28 from lib import jittests
     29 from lib.tempfile import TemporaryDirectory
     30 from lib.tests import (
     31    change_env,
     32    get_cpu_count,
     33    get_environment_overlay,
     34    get_jitflags,
     35    valid_jitflags,
     36 )
     37 
     38 
     39 def which(name):
     40    if name.find(os.path.sep) != -1:
     41        return os.path.abspath(name)
     42 
     43    for path in os.environ["PATH"].split(os.pathsep):
     44        full = os.path.join(path, name)
     45        if os.path.exists(full):
     46            return os.path.abspath(full)
     47 
     48    return name
     49 
     50 
     51 def choose_item(jobs, max_items):
     52    job_count = len(jobs)
     53 
     54    # Don't present a choice if there are too many tests
     55    if job_count > max_items:
     56        raise Exception("Too many jobs.")
     57 
     58    def display_job(job):
     59        flags = ""
     60        if len(job.jitflags) != 0:
     61            flags = "({})".format(" ".join(job.jitflags))
     62        return f"{job.path} {flags}"
     63 
     64    for i, job in enumerate(jobs, 1):
     65        print(f"{i}) {display_job(job)}")
     66 
     67    item = read_input("Which one:\n")
     68    try:
     69        item = int(item)
     70        if item > job_count or item < 1:
     71            raise Exception(f"Input isn't between 1 and {job_count}")
     72    except ValueError:
     73        raise Exception("Unrecognized input")
     74 
     75    return jobs[item - 1]
     76 
     77 
     78 def main(argv):
     79    # The [TESTS] optional arguments are paths of test files relative
     80    # to the jit-test/tests directory.
     81    import argparse
     82 
     83    op = argparse.ArgumentParser(description="Run jit-test JS shell tests")
     84    op.add_argument(
     85        "-s",
     86        "--show-cmd",
     87        dest="show_cmd",
     88        action="store_true",
     89        help="show js shell command run",
     90    )
     91    op.add_argument(
     92        "-f",
     93        "--show-failed-cmd",
     94        dest="show_failed",
     95        action="store_true",
     96        help="show command lines of failed tests",
     97    )
     98    op.add_argument(
     99        "-o",
    100        "--show-output",
    101        dest="show_output",
    102        action="store_true",
    103        help="show output from js shell",
    104    )
    105    op.add_argument(
    106        "-F",
    107        "--failed-only",
    108        dest="failed_only",
    109        action="store_true",
    110        help="if --show-output is given, only print output for failed tests",
    111    )
    112    op.add_argument(
    113        "--no-show-failed",
    114        dest="no_show_failed",
    115        action="store_true",
    116        help="don't print output for failed tests (no-op with --show-output)",
    117    )
    118    op.add_argument(
    119        "-x",
    120        "--exclude",
    121        dest="exclude",
    122        default=[],
    123        action="append",
    124        help="exclude given test dir or path",
    125    )
    126    op.add_argument(
    127        "--exclude-from",
    128        dest="exclude_from",
    129        type=str,
    130        help="exclude each test dir or path in FILE",
    131    )
    132    op.add_argument(
    133        "--slow",
    134        dest="run_slow",
    135        action="store_true",
    136        help="also run tests marked as slow",
    137    )
    138    op.add_argument(
    139        "--no-slow",
    140        dest="run_slow",
    141        action="store_false",
    142        help="do not run tests marked as slow (the default)",
    143    )
    144    op.add_argument(
    145        "-t",
    146        "--timeout",
    147        dest="timeout",
    148        type=float,
    149        default=150.0,
    150        help="set test timeout in seconds",
    151    )
    152    op.add_argument(
    153        "--no-progress",
    154        dest="hide_progress",
    155        action="store_true",
    156        help="hide progress bar",
    157    )
    158    op.add_argument(
    159        "--tinderbox",
    160        dest="format",
    161        action="store_const",
    162        const="automation",
    163        help="Use automation-parseable output format",
    164    )
    165    op.add_argument(
    166        "--format",
    167        dest="format",
    168        default="none",
    169        choices=("automation", "none"),
    170        help="Output format (default %(default)s).",
    171    )
    172    op.add_argument(
    173        "--args",
    174        dest="shell_args",
    175        metavar="ARGS",
    176        default=[],
    177        action="append",
    178        help="extra args to pass to the JS shell",
    179    )
    180    op.add_argument(
    181        "--feature-args",
    182        dest="feature_args",
    183        metavar="ARGS",
    184        default=[],
    185        action="append",
    186        help="even more args to pass to the JS shell "
    187        "(for compatibility with jstests.py)",
    188    )
    189    op.add_argument(
    190        "-w",
    191        "--write-failures",
    192        dest="write_failures",
    193        metavar="FILE",
    194        help="Write a list of failed tests to [FILE]",
    195    )
    196    op.add_argument(
    197        "-C",
    198        "--check-output",
    199        action="store_true",
    200        dest="check_output",
    201        help="Run tests to check output for different jit-flags",
    202    )
    203    op.add_argument(
    204        "-r",
    205        "--read-tests",
    206        dest="read_tests",
    207        metavar="FILE",
    208        help="Run test files listed in [FILE]",
    209    )
    210    op.add_argument(
    211        "-R",
    212        "--retest",
    213        dest="retest",
    214        metavar="FILE",
    215        help="Retest using test list file [FILE]",
    216    )
    217    op.add_argument(
    218        "-g",
    219        "--debug",
    220        action="store_const",
    221        const="gdb",
    222        dest="debugger",
    223        help="Run a single test under the gdb debugger",
    224    )
    225    op.add_argument(
    226        "-G",
    227        "--debug-rr",
    228        action="store_const",
    229        const="rr",
    230        dest="debugger",
    231        help="Run a single test under the rr debugger",
    232    )
    233    op.add_argument(
    234        "--debugger", type=str, help="Run a single test under the specified debugger"
    235    )
    236    op.add_argument("--one", action="store_true", help="Run a single test only.")
    237    op.add_argument(
    238        "--valgrind",
    239        dest="valgrind",
    240        action="store_true",
    241        help="Enable the |valgrind| flag, if valgrind is in $PATH.",
    242    )
    243    op.add_argument(
    244        "--unusable-error-status",
    245        action="store_true",
    246        help="Ignore incorrect exit status on tests that should return nonzero.",
    247    )
    248    op.add_argument(
    249        "--valgrind-all",
    250        dest="valgrind_all",
    251        action="store_true",
    252        help="Run all tests with valgrind, if valgrind is in $PATH.",
    253    )
    254    op.add_argument(
    255        "--write-failure-output",
    256        dest="write_failure_output",
    257        action="store_true",
    258        help="With --write-failures=FILE, additionally write the"
    259        " output of failed tests to [FILE]",
    260    )
    261    op.add_argument(
    262        "--jitflags",
    263        dest="jitflags",
    264        default="none",
    265        choices=valid_jitflags(),
    266        help="IonMonkey option combinations (default %(default)s).",
    267    )
    268    op.add_argument(
    269        "--ion",
    270        dest="jitflags",
    271        action="store_const",
    272        const="ion",
    273        help="Run tests once with --ion-eager and once with"
    274        " --baseline-eager (equivalent to --jitflags=ion)",
    275    )
    276    op.add_argument(
    277        "--no-xdr",
    278        dest="use_xdr",
    279        action="store_false",
    280        help="Whether to disable caching of self-hosted parsed content in XDR format.",
    281    )
    282    op.add_argument(
    283        "--tbpl",
    284        dest="jitflags",
    285        action="store_const",
    286        const="all",
    287        help="Run tests with all IonMonkey option combinations"
    288        " (equivalent to --jitflags=all)",
    289    )
    290    op.add_argument(
    291        "-j",
    292        "--worker-count",
    293        dest="max_jobs",
    294        type=int,
    295        default=max(1, get_cpu_count()),
    296        help="Number of tests to run in parallel (default %(default)s).",
    297    )
    298    op.add_argument(
    299        "--remote", action="store_true", help="Run tests on a remote device"
    300    )
    301    op.add_argument(
    302        "--deviceIP",
    303        action="store",
    304        type=str,
    305        dest="device_ip",
    306        help="IP address of remote device to test",
    307    )
    308    op.add_argument(
    309        "--devicePort",
    310        action="store",
    311        type=int,
    312        dest="device_port",
    313        default=20701,
    314        help="port of remote device to test",
    315    )
    316    op.add_argument(
    317        "--deviceSerial",
    318        action="store",
    319        type=str,
    320        dest="device_serial",
    321        default=None,
    322        help="ADB device serial number of remote device to test",
    323    )
    324    op.add_argument(
    325        "--remoteTestRoot",
    326        dest="remote_test_root",
    327        action="store",
    328        type=str,
    329        default="/data/local/tmp/test_root",
    330        help="The remote directory to use as test root (e.g.  %(default)s)",
    331    )
    332    op.add_argument(
    333        "--localLib",
    334        dest="local_lib",
    335        action="store",
    336        type=str,
    337        help="The location of libraries to push -- preferably stripped",
    338    )
    339    op.add_argument(
    340        "--repeat", type=int, default=1, help="Repeat tests the given number of times."
    341    )
    342    op.add_argument("--this-chunk", type=int, default=1, help="The test chunk to run.")
    343    op.add_argument(
    344        "--total-chunks", type=int, default=1, help="The total number of test chunks."
    345    )
    346    op.add_argument(
    347        "--ignore-timeouts",
    348        dest="ignore_timeouts",
    349        metavar="FILE",
    350        help="Ignore timeouts of tests listed in [FILE]",
    351    )
    352    op.add_argument(
    353        "--retry-remote-timeouts",
    354        dest="timeout_retry",
    355        type=int,
    356        default=1,
    357        help="Number of time to retry timeout on remote devices",
    358    )
    359    op.add_argument(
    360        "--test-reflect-stringify",
    361        dest="test_reflect_stringify",
    362        help="instead of running tests, use them to test the "
    363        "Reflect.stringify code in specified file",
    364    )
    365    # --enable-webrender is ignored as it is not relevant for JIT
    366    # tests, but is required for harness compatibility.
    367    op.add_argument(
    368        "--enable-webrender",
    369        action="store_true",
    370        dest="enable_webrender",
    371        default=False,
    372        help=argparse.SUPPRESS,
    373    )
    374    op.add_argument("js_shell", metavar="JS_SHELL", help="JS shell to run tests with")
    375    op.add_argument(
    376        "-z", "--gc-zeal", help="GC zeal mode to use when running the shell"
    377    )
    378    op.add_argument(
    379        "--show-slow",
    380        action="store_true",
    381        help="Show tests taking longer than a minimum time (in seconds).",
    382    )
    383    op.add_argument(
    384        "--slow-test-threshold",
    385        type=float,
    386        default=5.0,
    387        help="Time in seconds a test can take until it is considered slow "
    388        "(default %(default)s).",
    389    )
    390 
    391    options, test_args = op.parse_known_args(argv)
    392    js_shell = which(options.js_shell)
    393    test_environment = get_environment_overlay(js_shell, options.gc_zeal)
    394 
    395    if not (os.path.isfile(js_shell) and os.access(js_shell, os.X_OK)):
    396        if (
    397            platform.system() != "Windows"
    398            or os.path.isfile(js_shell)
    399            or not os.path.isfile(js_shell + ".exe")
    400            or not os.access(js_shell + ".exe", os.X_OK)
    401        ):
    402            op.error("shell is not executable: " + js_shell)
    403 
    404    if options.retest:
    405        options.read_tests = options.retest
    406        options.write_failures = options.retest
    407 
    408    test_list = []
    409    read_all = True
    410 
    411    if test_args:
    412        read_all = False
    413        for arg in test_args:
    414            test_list += jittests.find_tests(arg)
    415 
    416    if options.read_tests:
    417        read_all = False
    418        try:
    419            f = open(options.read_tests)
    420            for line in f:
    421                test_list.append(os.path.join(jittests.TEST_DIR, line.strip("\n")))
    422            f.close()
    423        except OSError:
    424            if options.retest:
    425                read_all = True
    426            else:
    427                sys.stderr.write(
    428                    "Exception thrown trying to read test file"
    429                    f" '{options.read_tests}'\n"
    430                )
    431                traceback.print_exc()
    432                sys.stderr.write("---\n")
    433 
    434    if read_all:
    435        test_list = jittests.find_tests()
    436 
    437    if options.exclude_from:
    438        with open(options.exclude_from) as fh:
    439            for line in fh:
    440                line_exclude = line.strip()
    441                if not line_exclude.startswith("#") and len(line_exclude):
    442                    options.exclude.append(line_exclude)
    443 
    444    if options.exclude:
    445        exclude_list = []
    446        for exclude in options.exclude:
    447            exclude_list += jittests.find_tests(exclude)
    448        test_list = [test for test in test_list if test not in set(exclude_list)]
    449 
    450    if not test_list:
    451        print("No tests found matching command line arguments.", file=sys.stderr)
    452        sys.exit(0)
    453 
    454    test_list = [jittests.JitTest.from_file(_, options) for _ in test_list]
    455 
    456    if not options.run_slow:
    457        test_list = [_ for _ in test_list if not _.slow]
    458 
    459    if options.test_reflect_stringify is not None:
    460        for test in test_list:
    461            test.test_reflect_stringify = options.test_reflect_stringify
    462 
    463    # If chunking is enabled, determine which tests are part of this chunk.
    464    # This code was adapted from testing/mochitest/runtestsremote.py.
    465    if options.total_chunks > 1:
    466        total_tests = len(test_list)
    467        tests_per_chunk = math.ceil(total_tests / float(options.total_chunks))
    468        start = int(round((options.this_chunk - 1) * tests_per_chunk))
    469        end = int(round(options.this_chunk * tests_per_chunk))
    470        test_list = test_list[start:end]
    471 
    472    if not test_list:
    473        print(
    474            "No tests found matching command line arguments after filtering.",
    475            file=sys.stderr,
    476        )
    477        sys.exit(0)
    478 
    479    # The full test list is ready. Now create copies for each JIT configuration.
    480    test_flags = get_jitflags(options.jitflags)
    481 
    482    test_list = [_ for test in test_list for _ in test.copy_variants(test_flags)]
    483 
    484    job_list = (test for test in test_list)
    485    job_count = len(test_list)
    486 
    487    if options.repeat:
    488 
    489        def repeat_copy(job_list_generator, repeat):
    490            job_list = list(job_list_generator)
    491            for i in range(repeat):
    492                for test in job_list:
    493                    if i == 0:
    494                        yield test
    495                    else:
    496                        yield test.copy()
    497 
    498        job_list = repeat_copy(job_list, options.repeat)
    499        job_count *= options.repeat
    500 
    501    if options.ignore_timeouts:
    502        read_all = False
    503        try:
    504            with open(options.ignore_timeouts) as f:
    505                ignore = set()
    506                for line in f.readlines():
    507                    path = line.strip("\n")
    508                    ignore.add(path)
    509                options.ignore_timeouts = ignore
    510        except OSError:
    511            sys.exit("Error reading file: " + options.ignore_timeouts)
    512    else:
    513        options.ignore_timeouts = set()
    514 
    515    prefix = [js_shell] + split_extra_shell_args(
    516        options.shell_args + options.feature_args
    517    )
    518    prologue = os.path.join(jittests.LIB_DIR, "prologue.js")
    519    if options.remote:
    520        prologue = posixpath.join(options.remote_test_root, "lib", "prologue.js")
    521 
    522    prefix += ["-p", prologue]
    523 
    524    if options.one:
    525        try:
    526            jobs = list(job_list)
    527            tc = choose_item(jobs, max_items=50)
    528        except Exception as e:
    529            sys.exit(str(e))
    530 
    531        with change_env(test_environment):
    532            with TemporaryDirectory() as tempdir:
    533                cmd = tc.command(prefix, jittests.LIB_DIR, jittests.MODULE_DIR, tempdir)
    534                os.execvp(cmd[0], cmd)
    535        sys.exit()
    536    elif options.debugger:
    537        if job_count > 1:
    538            print(
    539                "Multiple tests match command line arguments, debugger can only run one"
    540            )
    541            jobs = list(job_list)
    542 
    543            try:
    544                tc = choose_item(jobs, max_items=50)
    545            except Exception as e:
    546                sys.exit(str(e))
    547        else:
    548            tc = next(job_list)
    549 
    550        if options.debugger == "gdb":
    551            debug_cmd = ["gdb", "--args"]
    552        elif options.debugger == "lldb":
    553            debug_cmd = ["lldb", "--"]
    554        elif options.debugger == "rr":
    555            debug_cmd = ["rr", "record"]
    556        else:
    557            debug_cmd = options.debugger.split()
    558 
    559        with change_env(test_environment):
    560            with TemporaryDirectory() as tempdir:
    561                if options.debugger == "rr":
    562                    subprocess.call(
    563                        debug_cmd
    564                        + tc.command(
    565                            prefix, jittests.LIB_DIR, jittests.MODULE_DIR, tempdir
    566                        )
    567                    )
    568                    os.execvp("rr", ["rr", "replay"])
    569                else:
    570                    os.execvp(
    571                        debug_cmd[0],
    572                        debug_cmd
    573                        + tc.command(
    574                            prefix, jittests.LIB_DIR, jittests.MODULE_DIR, tempdir
    575                        ),
    576                    )
    577        sys.exit()
    578 
    579    try:
    580        ok = None
    581        if options.remote:
    582            ok = jittests.run_tests(job_list, job_count, prefix, options, remote=True)
    583        else:
    584            with change_env(test_environment):
    585                ok = jittests.run_tests(job_list, job_count, prefix, options)
    586        if not ok:
    587            sys.exit(2)
    588    except OSError:
    589        if not os.path.exists(prefix[0]):
    590            print(
    591                f"JS shell argument: file does not exist: '{prefix[0]}'",
    592                file=sys.stderr,
    593            )
    594            sys.exit(1)
    595        else:
    596            raise
    597 
    598 
    599 def split_extra_shell_args(args):
    600    result = []
    601    for option in args:
    602        result.extend(shlex.split(option))
    603    return result
    604 
    605 
    606 if __name__ == "__main__":
    607    main(sys.argv[1:])