tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

test.py (7952B)


      1 # This Source Code Form is subject to the terms of the Mozilla Public
      2 # License, v. 2.0. If a copy of the MPL was not distributed with this
      3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
      4 
      5 
      6 import gzip
      7 import json
      8 import logging
      9 import os
     10 
     11 from mozbuild.util import memoize
     12 from taskgraph.loader.transform import loader as transform_loader
     13 from taskgraph.util.copy import deepcopy
     14 from taskgraph.util.yaml import load_yaml
     15 
     16 from gecko_taskgraph import TEST_CONFIGS
     17 from gecko_taskgraph.util.chunking import resolver
     18 
     19 logger = logging.getLogger(__name__)
     20 
     21 ARTIFACTS_DIR = "artifacts"
     22 
     23 
     24 def loader(kind, path, config, params, loaded_tasks, write_artifacts):
     25    """
     26    Generate tasks implementing Gecko tests.
     27    """
     28 
     29    builds_by_platform = get_builds_by_platform(
     30        dep_kind="build", loaded_tasks=loaded_tasks
     31    )
     32    signed_builds_by_platform = get_builds_by_platform(
     33        dep_kind="build-signing", loaded_tasks=loaded_tasks
     34    )
     35 
     36    # get the test platforms for those build tasks
     37    test_platforms_cfg = load_yaml(TEST_CONFIGS, "test-platforms.yml")
     38    test_platforms = get_test_platforms(
     39        test_platforms_cfg, builds_by_platform, signed_builds_by_platform
     40    )
     41 
     42    # expand the test sets for each of those platforms
     43    test_sets_cfg = load_yaml(TEST_CONFIGS, "test-sets.yml")
     44    test_platforms = expand_tests(test_sets_cfg, test_platforms, kind)
     45 
     46    # load the test descriptions
     47    tests = transform_loader(kind, path, config, params, loaded_tasks, write_artifacts)
     48    test_descriptions = {t.pop("name"): t for t in tests}
     49 
     50    # generate all tests for all test platforms
     51    for test_platform_name, test_platform in test_platforms.items():
     52        for test_name in test_platform["test-names"]:
     53            test = deepcopy(test_descriptions[test_name])
     54            test["build-platform"] = test_platform["build-platform"]
     55            test["test-platform"] = test_platform_name
     56            test["build-label"] = test_platform["build-label"]
     57            if test_platform.get("build-signing-label", None):
     58                test["build-signing-label"] = test_platform["build-signing-label"]
     59 
     60            test["build-attributes"] = test_platform["build-attributes"]
     61            test["test-name"] = test_name
     62            if test_platform.get("shippable"):
     63                test.setdefault("attributes", {})["shippable"] = True
     64                test["attributes"]["shipping_product"] = test_platform[
     65                    "shipping_product"
     66                ]
     67 
     68            logger.debug(
     69                "Generating tasks for test {} on platform {}".format(
     70                    test_name, test["test-platform"]
     71                )
     72            )
     73            yield test
     74 
     75    # this file was previously written out in `decision.py` alongside most
     76    # other decision task artifacts. it was moved here to accommodate tasks
     77    # being generated in subprocesses, and the fact that the `resolver` that
     78    # has the data is only updated in the subprocess.
     79    # see https://bugzilla.mozilla.org/show_bug.cgi?id=1989038 for additional
     80    # details
     81    # we must only write this file once, to ensure it is never overridden
     82    # we only need `tests-by-manifest` for web-platform-tests, so we need to
     83    # write it out from whichever kind contains them
     84    if kind == "web-platform-tests" and write_artifacts:
     85        if not os.path.isdir(ARTIFACTS_DIR):
     86            os.mkdir(ARTIFACTS_DIR)
     87        path = os.path.join(ARTIFACTS_DIR, "tests-by-manifest.json.gz")
     88        with gzip.open(path, "wb") as f:
     89            f.write(json.dumps(resolver.tests_by_manifest).encode("utf-8"))
     90 
     91 
     92 def get_builds_by_platform(dep_kind, loaded_tasks):
     93    """Find the build tasks on which tests will depend, keyed by
     94    platform/type.  Returns a dictionary mapping build platform to task."""
     95    builds_by_platform = {}
     96    for task in loaded_tasks:
     97        if task.kind != dep_kind:
     98            continue
     99 
    100        build_platform = task.attributes.get("build_platform")
    101        build_type = task.attributes.get("build_type")
    102        if not build_platform or not build_type:
    103            continue
    104        platform = f"{build_platform}/{build_type}"
    105        if platform in builds_by_platform:
    106            raise Exception("multiple build jobs for " + platform)
    107        builds_by_platform[platform] = task
    108    return builds_by_platform
    109 
    110 
    111 def get_test_platforms(
    112    test_platforms_cfg, builds_by_platform, signed_builds_by_platform={}
    113 ):
    114    """Get the test platforms for which test tasks should be generated,
    115    based on the available build platforms.  Returns a dictionary mapping
    116    test platform to {test-set, build-platform, build-label}."""
    117    test_platforms = {}
    118    for test_platform, cfg in test_platforms_cfg.items():
    119        build_platform = cfg["build-platform"]
    120        if build_platform not in builds_by_platform:
    121            logger.warning(
    122                f"No build task with platform {build_platform}; ignoring test platform {test_platform}"
    123            )
    124            continue
    125        test_platforms[test_platform] = {
    126            "build-platform": build_platform,
    127            "build-label": builds_by_platform[build_platform].label,
    128            "build-attributes": builds_by_platform[build_platform].attributes,
    129        }
    130 
    131        if builds_by_platform[build_platform].attributes.get("shippable"):
    132            test_platforms[test_platform]["shippable"] = builds_by_platform[
    133                build_platform
    134            ].attributes["shippable"]
    135            test_platforms[test_platform]["shipping_product"] = builds_by_platform[
    136                build_platform
    137            ].attributes["shipping_product"]
    138 
    139        test_platforms[test_platform].update(cfg)
    140 
    141    return test_platforms
    142 
    143 
    144 PREFIX_BY_KIND = {
    145    "browsertime": {"browsertime"},
    146    "mochitest": {"mochitest"},
    147    "reftest": {"crashtest", "jsreftest", "reftest"},
    148    "web-platform-tests": {
    149        "web-platform-tests",
    150        "test-coverage-wpt",
    151        "test-verify-wpt",
    152    },
    153 }
    154 
    155 
    156 @memoize
    157 def is_test_for_kind(test_name, kind):
    158    if kind == "test":
    159        # the test kind is special: we assume that it should contain any tests
    160        # that aren't included in an explicitly listed `kind`.
    161        # if/when the `test` kind goes away, this block should go away too
    162        for prefixes in PREFIX_BY_KIND.values():
    163            if any([test_name.startswith(prefix) for prefix in prefixes]):
    164                return False
    165        return True
    166    else:
    167        test_set_prefixes = PREFIX_BY_KIND[kind]
    168        return any([test_name.startswith(prefix) for prefix in test_set_prefixes])
    169 
    170 
    171 def expand_tests(test_sets_cfg, test_platforms, kind):
    172    """Expand the test sets in `test_platforms` out to sets of test names.
    173    Returns a dictionary like `get_test_platforms`, with an additional
    174    `test-names` key for each test platform, containing a set of test
    175    names."""
    176    rv = {}
    177    for test_platform, cfg in test_platforms.items():
    178        test_sets = cfg["test-sets"]
    179        if not set(test_sets) <= set(test_sets_cfg):
    180            raise Exception(
    181                "Test sets {} for test platform {} are not defined".format(
    182                    ", ".join(test_sets), test_platform
    183                )
    184            )
    185        test_names = set()
    186        for test_set in test_sets:
    187            for test_name in test_sets_cfg[test_set]:
    188                # test_sets contains groups of test suites that we commonly
    189                # run together. these tests are defined across more than one
    190                # `kind`, which means we may only have a subset of them when
    191                # this is called for any given kind. any tests that are
    192                # relevant to the given kind will be included; all others will
    193                # be skipped over.
    194                if is_test_for_kind(test_name, kind):
    195                    test_names.add(test_name)
    196        rv[test_platform] = cfg.copy()
    197        rv[test_platform]["test-names"] = test_names
    198    return rv