tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

manifest.py (20022B)


      1 # Library for JSTest manifests.
      2 #
      3 # This includes classes for representing and parsing JS manifests.
      4 
      5 import os
      6 import posixpath
      7 import re
      8 import sys
      9 from subprocess import PIPE, Popen
     10 
     11 from .remote import init_device
     12 from .tests import RefTestCase
     13 
     14 
     15 def split_path_into_dirs(path):
     16    dirs = [path]
     17 
     18    while True:
     19        path, tail = os.path.split(path)
     20        if not tail:
     21            break
     22        dirs.append(path)
     23    return dirs
     24 
     25 
     26 class XULInfo:
     27    def __init__(self, abi, os, isdebug):
     28        self.abi = abi
     29        self.os = os
     30        self.isdebug = isdebug
     31 
     32    def as_js(self):
     33        """Return JS that when executed sets up variables so that JS expression
     34        predicates on XUL build info evaluate properly."""
     35 
     36        return f"""
     37 var winWidget = {str(self.os == "WINNT").lower()};
     38 var gtkWidget = {str(self.os == "Linux").lower()};
     39 var cocoaWidget = {str(self.os == "Darwin").lower()};
     40 var is64Bit = {str("x86-" not in self.abi).lower()};
     41 var xulRuntime = {{ shell: true }};
     42 var release_or_beta = getBuildConfiguration('release_or_beta');
     43 var isDebugBuild={str(self.isdebug).lower()};
     44 var Android={str(self.os == "Android").lower()};
     45 """.replace("\n", "")
     46 
     47    @classmethod
     48    def create(cls, jsdir):
     49        """Create a XULInfo based on the current platform's characteristics."""
     50 
     51        # Our strategy is to find the autoconf.mk generated for the build and
     52        # read the values from there.
     53 
     54        # Find config/autoconf.mk.
     55        dirs = split_path_into_dirs(os.getcwd()) + split_path_into_dirs(jsdir)
     56 
     57        path = None
     58        for dir in dirs:
     59            _path = posixpath.join(dir, "config", "autoconf.mk")
     60            if os.path.isfile(_path):
     61                path = _path
     62                break
     63 
     64        if path is None:
     65            print(
     66                "Can't find config/autoconf.mk on a directory containing"
     67                f" the JS shell (searched from {jsdir})"
     68            )
     69            sys.exit(1)
     70 
     71        # Read the values.
     72        val_re = re.compile(r"(TARGET_XPCOM_ABI|OS_TARGET|MOZ_DEBUG)\s*=\s*(.*)")
     73        kw = {"isdebug": False}
     74        for line in open(path, encoding="utf-8"):
     75            m = val_re.match(line)
     76            if m:
     77                key, val = m.groups()
     78                val = val.rstrip()
     79                if key == "TARGET_XPCOM_ABI":
     80                    kw["abi"] = val
     81                if key == "OS_TARGET":
     82                    kw["os"] = val
     83                if key == "MOZ_DEBUG":
     84                    kw["isdebug"] = val == "1"
     85        return cls(**kw)
     86 
     87 
     88 class XULInfoTester:
     89    def __init__(self, xulinfo, options, js_args):
     90        self.js_prologue = xulinfo.as_js()
     91        self.js_bin = options.js_shell
     92        self.js_args = js_args
     93        # options here are the command line options
     94        self.options = options
     95        # Maps JS expr to evaluation result.
     96        self.cache = {}
     97 
     98        if not self.options.remote:
     99            return
    100        self.device = init_device(options)
    101        self.js_bin = posixpath.join(options.remote_test_root, "bin", "js")
    102 
    103    def test(self, cond, options=[]):
    104        if self.options.remote:
    105            return self._test_remote(cond, options=options)
    106        return self._test_local(cond, options=options)
    107 
    108    def _test_remote(self, cond, options=[]):
    109        from mozdevice import ADBDevice, ADBProcessError
    110 
    111        ans = self.cache.get(cond, None)
    112        if ans is not None:
    113            return ans
    114 
    115        env = {
    116            "LD_LIBRARY_PATH": posixpath.join(self.options.remote_test_root, "bin"),
    117        }
    118 
    119        cmd = (
    120            [self.js_bin]
    121            + self.js_args
    122            + options
    123            + [
    124                # run in safe configuration, since it is hard to debug
    125                # crashes when running code here. In particular, msan will
    126                # error out if the jit is active.
    127                "--no-baseline",
    128                "--no-blinterp",
    129                "-e",
    130                self.js_prologue,
    131                "-e",
    132                f"print(!!({cond}))",
    133            ]
    134        )
    135        cmd = ADBDevice._escape_command_line(cmd)
    136        try:
    137            # Allow ADBError or ADBTimeoutError to terminate the test run,
    138            # but handle ADBProcessError in order to support the use of
    139            # non-zero exit codes in the JavaScript shell tests.
    140            out = self.device.shell_output(
    141                cmd, env=env, cwd=self.options.remote_test_root, timeout=None
    142            )
    143            err = ""
    144        except ADBProcessError as e:
    145            out = ""
    146            err = str(e.adb_process.stdout)
    147 
    148        if out == "true":
    149            ans = True
    150        elif out == "false":
    151            ans = False
    152        else:
    153            raise Exception(
    154                f"Failed to test XUL condition {cond!r};"
    155                f" output was {out!r}, stderr was {err!r}"
    156            )
    157        self.cache[cond] = ans
    158        return ans
    159 
    160    def _test_local(self, cond, options=[]):
    161        """Test a XUL predicate condition against this local info."""
    162        ans = self.cache.get(cond, None)
    163        if ans is None:
    164            cmd = (
    165                [self.js_bin]
    166                + self.js_args
    167                + options
    168                + [
    169                    # run in safe configuration, since it is hard to debug
    170                    # crashes when running code here. In particular, msan will
    171                    # error out if the jit is active.
    172                    "--no-baseline",
    173                    "--no-blinterp",
    174                    "-e",
    175                    self.js_prologue,
    176                    "-e",
    177                    f"print(!!({cond}))",
    178                ]
    179            )
    180            p = Popen(
    181                cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True
    182            )
    183            out, err = p.communicate()
    184            if out in ("true\n", "true\r\n"):
    185                ans = True
    186            elif out in ("false\n", "false\r\n"):
    187                ans = False
    188            else:
    189                raise Exception(
    190                    f"Failed to test XUL condition {cond!r};"
    191                    f" output was {out!r}, stderr was {err!r}"
    192                )
    193            self.cache[cond] = ans
    194        return ans
    195 
    196 
    197 class NullXULInfoTester:
    198    """Can be used to parse manifests without a JS shell."""
    199 
    200    def test(self, cond, options=[]):
    201        return False
    202 
    203 
    204 def _parse_one(testcase, terms, xul_tester):
    205    pos = 0
    206    parts = terms.split()
    207    while pos < len(parts):
    208        if parts[pos] == "fails":
    209            testcase.expect = False
    210            pos += 1
    211        elif parts[pos] == "skip":
    212            testcase.expect = testcase.enable = False
    213            pos += 1
    214        elif parts[pos] == "random":
    215            testcase.random = True
    216            pos += 1
    217        elif parts[pos].startswith("shell-option("):
    218            # This directive adds an extra option to pass to the shell.
    219            option = parts[pos][len("shell-option(") : -1]
    220            testcase.options.append(option)
    221            pos += 1
    222        elif parts[pos].startswith("fails-if"):
    223            cond = parts[pos][len("fails-if(") : -1]
    224            if xul_tester.test(cond, testcase.options):
    225                testcase.expect = False
    226            pos += 1
    227        elif parts[pos].startswith("asserts-if"):
    228            # This directive means we may flunk some number of
    229            # NS_ASSERTIONs in the browser. For the shell, ignore it.
    230            pos += 1
    231        elif parts[pos].startswith("skip-if"):
    232            cond = parts[pos][len("skip-if(") : -1]
    233            if xul_tester.test(cond, testcase.options):
    234                testcase.expect = testcase.enable = False
    235            pos += 1
    236        elif parts[pos].startswith("ignore-flag"):
    237            flag = parts[pos][len("ignore-flag(") : -1]
    238            testcase.ignoredflags.append(flag)
    239            pos += 1
    240        elif parts[pos].startswith("random-if"):
    241            cond = parts[pos][len("random-if(") : -1]
    242            if xul_tester.test(cond, testcase.options):
    243                testcase.random = True
    244            pos += 1
    245        elif parts[pos] == "slow":
    246            testcase.slow = True
    247            pos += 1
    248        elif parts[pos].startswith("slow-if"):
    249            cond = parts[pos][len("slow-if(") : -1]
    250            if xul_tester.test(cond, testcase.options):
    251                testcase.slow = True
    252            pos += 1
    253        elif parts[pos] == "silentfail":
    254            # silentfails use tons of memory, and Darwin doesn't support ulimit.
    255            if xul_tester.test("cocoaWidget", testcase.options):
    256                testcase.expect = testcase.enable = False
    257            pos += 1
    258        elif parts[pos].startswith("error:"):
    259            # This directive allows to specify an error type.
    260            (_, _, errortype) = parts[pos].partition(":")
    261            testcase.error = errortype
    262            pos += 1
    263        elif parts[pos] == "module":
    264            # This directive marks the test as module code.
    265            testcase.is_module = True
    266            pos += 1
    267        elif parts[pos] == "test262-raw":
    268            testcase.is_test262_raw = True
    269            pos += 1
    270        elif parts[pos] == "async":
    271            # This directive marks the test as async.
    272            testcase.is_async = True
    273            pos += 1
    274        else:
    275            print(f'warning: invalid manifest line element "{parts[pos]}"')
    276            pos += 1
    277 
    278 
    279 def _build_manifest_script_entry(script_name, test):
    280    line = []
    281    properties = []
    282    if test.terms:
    283        # Remove jsreftest internal terms.
    284        terms = " ".join([
    285            term
    286            for term in test.terms.split()
    287            if not (
    288                term in {"module", "async", "test262-raw"}
    289                or term.startswith("error:")
    290                or term.startswith("ignore-flag(")
    291                or term.startswith("shell-option(")
    292            )
    293        ])
    294        if terms:
    295            line.append(terms)
    296    if test.error:
    297        properties.append("error=" + test.error)
    298    if test.is_module:
    299        properties.append("module")
    300    if test.is_async:
    301        properties.append("async")
    302    if test.is_test262_raw:
    303        properties.append("test262_raw")
    304    line.append("script")
    305    script = script_name
    306    if properties:
    307        script = ";".join([script] + properties)
    308    line.append(script)
    309    if test.comment:
    310        line.append("#")
    311        line.append(test.comment)
    312    return " ".join(line)
    313 
    314 
    315 def _map_prefixes_left(test_gen):
    316    """
    317    Splits tests into a dictionary keyed on the first component of the test
    318    path, aggregating tests with a common base path into a list.
    319    """
    320    byprefix = {}
    321    for t in test_gen:
    322        left, sep, remainder = t.path.partition(os.sep)
    323        if left not in byprefix:
    324            byprefix[left] = []
    325        if remainder:
    326            t.path = remainder
    327        byprefix[left].append(t)
    328    return byprefix
    329 
    330 
    331 def _emit_manifest_at(location, relative, test_gen, depth):
    332    """
    333    location  - str: absolute path where we want to write the manifest
    334    relative  - str: relative path from topmost manifest directory to current
    335    test_gen  - (str): generator of all test paths and directorys
    336    depth     - int: number of dirs we are below the topmost manifest dir
    337    """
    338    manifests = _map_prefixes_left(test_gen)
    339 
    340    filename = os.path.join(location, "jstests.list")
    341    manifest = []
    342    numTestFiles = 0
    343    for k, test_list in manifests.items():
    344        fullpath = os.path.join(location, k)
    345        if os.path.isdir(fullpath):
    346            manifest.append("include " + k + "/jstests.list")
    347            relpath = os.path.join(relative, k)
    348            _emit_manifest_at(fullpath, relpath, test_list, depth + 1)
    349        else:
    350            numTestFiles += 1
    351            assert len(test_list) == 1, test_list
    352            line = _build_manifest_script_entry(k, test_list[0])
    353            manifest.append(line)
    354 
    355    # Always present our manifest in sorted order.
    356    manifest.sort()
    357 
    358    # If we have tests, we have to set the url-prefix so reftest can find them.
    359    if numTestFiles > 0:
    360        manifest = [
    361            "url-prefix {}jsreftest.html?test={}/".format("../" * depth, relative)
    362        ] + manifest
    363 
    364    fp = open(filename, "w", encoding="utf-8", newline="\n")
    365    try:
    366        fp.write("\n".join(manifest) + "\n")
    367    finally:
    368        fp.close()
    369 
    370 
    371 def make_manifests(location, test_gen):
    372    _emit_manifest_at(location, "", test_gen, 0)
    373 
    374 
    375 def _find_all_js_files(location):
    376    for root, dirs, files in os.walk(location):
    377        root = root[len(location) + 1 :]
    378        for fn in files:
    379            if fn.endswith(".js"):
    380                yield root, fn
    381 
    382 
    383 # The pattern for test header lines.
    384 TEST_HEADER_PATTERN = r"""
    385 # Ignore any space before the tag.
    386 \s*
    387 
    388 # The reftest tag is enclosed in pipes.
    389 \|(?P<tag>.*?)\|
    390 
    391 # Ignore any space before the options.
    392 \s*
    393 
    394 # Accept some options.
    395 (?P<options>.*?)
    396 
    397 # Ignore space before the comments.
    398 \s*
    399 
    400 # Accept an optional comment starting with "--".
    401 (?:
    402  # Unless "--" is directly preceded by "(".
    403  (?<!\()
    404  --
    405 
    406  # Ignore more space.
    407  \s*
    408 
    409  # The actual comment.
    410  (?P<comment>.*)
    411 )?
    412 """
    413 
    414 
    415 TEST_HEADER_PATTERN_INLINE = re.compile(
    416    r"""
    417 # Start a single line comment
    418 //
    419 """
    420    + TEST_HEADER_PATTERN
    421    + r"""
    422 # Match the end of line.
    423 $
    424 """,
    425    re.VERBOSE,
    426 )
    427 TEST_HEADER_PATTERN_MULTI = re.compile(
    428    r"""
    429 # Start a multi line comment
    430 /\*
    431 """
    432    + TEST_HEADER_PATTERN
    433    + r"""
    434 # Match the end of comment.
    435 \*/
    436 """,
    437    re.VERBOSE,
    438 )
    439 
    440 
    441 def _append_terms_and_comment(testcase, terms, comment):
    442    if testcase.terms is None:
    443        testcase.terms = terms
    444    else:
    445        testcase.terms += " " + terms
    446 
    447    if testcase.comment is None:
    448        testcase.comment = comment
    449    elif comment:
    450        testcase.comment += "; " + comment
    451 
    452 
    453 def _parse_test_header(fullpath, testcase, xul_tester):
    454    """
    455    This looks a bit weird.  The reason is that it needs to be efficient, since
    456    it has to be done on every test
    457    """
    458    fp = open(fullpath, encoding="utf-8")
    459    try:
    460        buf = fp.read(512)
    461    finally:
    462        fp.close()
    463 
    464    # Bail early if we do not start with a single comment.
    465    if not buf.startswith("//"):
    466        return
    467 
    468    # Extract the token.
    469    buf, _, _ = buf.partition("\n")
    470    matches = TEST_HEADER_PATTERN_INLINE.match(buf)
    471 
    472    if not matches:
    473        matches = TEST_HEADER_PATTERN_MULTI.match(buf)
    474        if not matches:
    475            return
    476 
    477    testcase.tag = matches.group("tag")
    478    _append_terms_and_comment(
    479        testcase, matches.group("options"), matches.group("comment")
    480    )
    481    _parse_one(testcase, matches.group("options"), xul_tester)
    482 
    483 
    484 def _parse_external_manifest(filename, relpath):
    485    """
    486    Reads an external manifest file for test suites whose individual test cases
    487    can't be decorated with reftest comments.
    488    filename - str: name of the manifest file
    489    relpath - str: relative path of the directory containing the manifest
    490                   within the test suite
    491    """
    492    if not os.path.exists(filename):
    493        return []
    494 
    495    entries = []
    496 
    497    with open(filename, encoding="utf-8") as fp:
    498        manifest_re = re.compile(
    499            r"^\s*(?P<terms>.*)\s+(?P<type>include|script)\s+(?P<path>\S+)$"
    500        )
    501        include_re = re.compile(r"^\s*include\s+(?P<path>\S+)$")
    502        for line in fp:
    503            line, _, comment = line.partition("#")
    504            line = line.strip()
    505            if not line:
    506                continue
    507            matches = manifest_re.match(line)
    508            if not matches:
    509                matches = include_re.match(line)
    510                if not matches:
    511                    print(f"warning: unrecognized line in jstests.list: {line}")
    512                    continue
    513 
    514                include_file = matches.group("path")
    515                include_filename = os.path.join(os.path.dirname(filename), include_file)
    516                include_relpath = os.path.join(relpath, os.path.dirname(include_file))
    517                include_entries = _parse_external_manifest(
    518                    include_filename, include_relpath
    519                )
    520                entries.extend(include_entries)
    521                continue
    522 
    523            path = os.path.normpath(os.path.join(relpath, matches.group("path")))
    524            if matches.group("type") == "include":
    525                # The manifest spec wants a reference to another manifest here,
    526                # but we need just the directory. We do need the trailing
    527                # separator so we don't accidentally match other paths of which
    528                # this one is a prefix.
    529                assert path.endswith("jstests.list")
    530                path = path[: -len("jstests.list")]
    531 
    532            entries.append({
    533                "path": path,
    534                "terms": matches.group("terms"),
    535                "comment": comment.strip(),
    536            })
    537 
    538    # if one directory name is a prefix of another, we want the shorter one
    539    # first
    540    entries.sort(key=lambda x: x["path"])
    541    return entries
    542 
    543 
    544 def _apply_external_manifests(filename, testcase, entries, xul_tester):
    545    for entry in entries:
    546        if filename.startswith(entry["path"]):
    547            # The reftest spec would require combining the terms (failure types)
    548            # that may already be defined in the test case with the terms
    549            # specified in entry; for example, a skip overrides a random, which
    550            # overrides a fails. Since we don't necessarily know yet in which
    551            # environment the test cases will be run, we'd also have to
    552            # consider skip-if, random-if, and fails-if with as-yet unresolved
    553            # conditions.
    554            # At this point, we use external manifests only for test cases
    555            # that can't have their own failure type comments, so we simply
    556            # use the terms for the most specific path.
    557            _append_terms_and_comment(testcase, entry["terms"], entry["comment"])
    558            _parse_one(testcase, entry["terms"], xul_tester)
    559 
    560 
    561 def _is_test_file(path_from_root, basename, filename, path_options):
    562    # Any file whose basename matches something in this set is ignored.
    563    EXCLUDED = set((
    564        "browser.js",
    565        "shell.js",
    566        "template.js",
    567        "user.js",
    568        "js-test-driver-begin.js",
    569        "js-test-driver-end.js",
    570    ))
    571 
    572    # Skip js files in the root test directory.
    573    if not path_from_root:
    574        return False
    575 
    576    # Skip files that we know are not tests.
    577    if basename in EXCLUDED:
    578        return False
    579 
    580    if not path_options.should_run(filename):
    581        return False
    582 
    583    return True
    584 
    585 
    586 def count_tests(location, path_options):
    587    count = 0
    588    for root, basename in _find_all_js_files(location):
    589        filename = os.path.join(root, basename)
    590        if _is_test_file(root, basename, filename, path_options):
    591            count += 1
    592    return count
    593 
    594 
    595 def load_reftests(location, path_options, xul_tester):
    596    """
    597    Locates all tests by walking the filesystem starting at |location|.
    598    Uses xul_tester to evaluate any test conditions in the test header.
    599    Failure type and comment for a test case can come from
    600    - an external manifest entry for the test case,
    601    - an external manifest entry for a containing directory,
    602    - most commonly: the header of the test case itself.
    603    """
    604    manifestFile = os.path.join(location, "jstests.list")
    605    externalManifestEntries = _parse_external_manifest(manifestFile, "")
    606 
    607    for root, basename in _find_all_js_files(location):
    608        # Get the full path and relative location of the file.
    609        filename = os.path.join(root, basename)
    610        if not _is_test_file(root, basename, filename, path_options):
    611            continue
    612 
    613        # Skip empty files.
    614        fullpath = os.path.join(location, filename)
    615 
    616        testcase = RefTestCase(location, filename)
    617        _apply_external_manifests(
    618            filename, testcase, externalManifestEntries, xul_tester
    619        )
    620        _parse_test_header(fullpath, testcase, xul_tester)
    621        yield testcase