__init__.py (24159B)
1 # This Source Code Form is subject to the terms of the Mozilla Public 2 # License, v. 2.0. If a copy of the MPL was not distributed with this 3 # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 """ 5 These transforms construct a task description to run the given test, based on a 6 test description. The implementation here is shared among all test kinds, but 7 contains specific support for how we run tests in Gecko (via mozharness, 8 invoked in particular ways). 9 10 This is a good place to translate a test-description option such as 11 `single-core: true` to the implementation of that option in a task description 12 (worker options, mozharness commandline, environment variables, etc.) 13 14 The test description should be fully formed by the time it reaches these 15 transforms, and these transforms should not embody any specific knowledge about 16 what should run where. this is the wrong place for special-casing platforms, 17 for example - use `all_tests.py` instead. 18 """ 19 20 import logging 21 from importlib import import_module 22 23 from mozbuild.schedules import INCLUSIVE_COMPONENTS 24 from taskgraph.transforms.base import TransformSequence 25 from taskgraph.util.schema import Schema, optionally_keyed_by, resolve_keyed_by 26 from voluptuous import Any, Exclusive, Optional, Required 27 28 from gecko_taskgraph.optimize.schema import OptimizationSchema 29 from gecko_taskgraph.transforms.job import job_description_schema 30 from gecko_taskgraph.transforms.job.run_task import run_task_schema 31 from gecko_taskgraph.transforms.test.other import get_mobile_project 32 from gecko_taskgraph.util.chunking import manifest_loaders 33 34 logger = logging.getLogger(__name__) 35 transforms = TransformSequence() 36 37 38 # Schema for a test description 39 # 40 # *****WARNING***** 41 # 42 # This is a great place for baffling cruft to accumulate, and that makes 43 # everyone move more slowly. Be considerate of your fellow hackers! 44 # See the warnings in taskcluster/docs/how-tos.rst 45 # 46 # *****WARNING***** 47 test_description_schema = Schema({ 48 # description of the suite, for the task metadata 49 Required("description"): str, 50 # test suite category and name 51 Optional("suite"): Any( 52 optionally_keyed_by("variant", str), 53 { 54 Optional("category"): str, 55 Optional("name"): optionally_keyed_by("variant", str), 56 }, 57 ), 58 # base work directory used to set up the task. 59 Optional("workdir"): optionally_keyed_by("test-platform", Any(str, "default")), 60 # the name by which this test suite is addressed in try syntax; defaults to 61 # the test-name. This will translate to the `unittest_try_name` or 62 # `talos_try_name` attribute. 63 Optional("try-name"): str, 64 # additional tags to mark up this type of test 65 Optional("tags"): {str: object}, 66 # the symbol, or group(symbol), under which this task should appear in 67 # treeherder. 68 Required("treeherder-symbol"): str, 69 # the value to place in task.extra.treeherder.machine.platform; ideally 70 # this is the same as build-platform, and that is the default, but in 71 # practice it's not always a match. 72 Optional("treeherder-machine-platform"): str, 73 # attributes to appear in the resulting task (later transforms will add the 74 # common attributes) 75 Optional("attributes"): {str: object}, 76 # relative path (from config.path) to the file task was defined in 77 Optional("task-from"): str, 78 # The `run_on_projects` attribute, defaulting to "all". This dictates the 79 # projects on which this task should be included in the target task set. 80 # See the attributes documentation for details. 81 # 82 # Note that the special case 'built-projects', the default, uses the parent 83 # build task's run-on-projects, meaning that tests run only on platforms 84 # that are built. 85 Optional("run-on-projects"): optionally_keyed_by( 86 "app", 87 "subtest", 88 "test-platform", 89 "test-name", 90 "variant", 91 Any([str], "built-projects"), 92 ), 93 # Whether tasks should run on only a specific type of repository. 94 Optional("run-on-repo-type"): job_description_schema["run-on-repo-type"], 95 # Whether tasks should run on specified Git branches. 96 Optional("run-on-git-branches"): job_description_schema["run-on-git-branches"], 97 # When set only run on projects where the build would already be running. 98 # This ensures tasks where this is True won't be the cause of the build 99 # running on a project it otherwise wouldn't have. 100 Optional("built-projects-only"): bool, 101 # the sheriffing tier for this task (default: set based on test platform) 102 Optional("tier"): optionally_keyed_by( 103 "test-platform", "variant", "app", "subtest", Any(int, "default") 104 ), 105 # number of chunks to create for this task. This can be keyed by test 106 # platform by passing a dictionary in the `by-test-platform` key. If the 107 # test platform is not found, the key 'default' will be tried. 108 Required("chunks"): optionally_keyed_by( 109 "test-platform", "variant", Any(int, "dynamic") 110 ), 111 # Timeout multiplier to apply to default test timeout values. Can be keyed 112 # by test platform. 113 Optional("timeoutfactor"): optionally_keyed_by("test-platform", Any(int, float)), 114 # Custom 'test_manifest_loader' to use, overriding the one configured in the 115 # parameters. When 'null', no test chunking will be performed. Can also 116 # be used to disable "manifest scheduling". 117 Optional("test-manifest-loader"): optionally_keyed_by( 118 "test-platform", Any(None, *list(manifest_loaders)) 119 ), 120 # the time (with unit) after which this task is deleted; default depends on 121 # the branch (see below) 122 Optional("expires-after"): str, 123 # The different configurations that should be run against this task, defined 124 # in the TEST_VARIANTS object in the variant.py transforms. 125 Optional("variants"): [str], 126 # Whether to run this task without any variants applied. 127 Required("run-without-variant"): optionally_keyed_by("test-platform", bool), 128 # The EC2 instance size to run these tests on. 129 Required("instance-size"): optionally_keyed_by( 130 "test-platform", 131 "variant", 132 Any( 133 "default", 134 "large-legacy", 135 "large", 136 "large-noscratch", 137 "xlarge", 138 "xlarge-noscratch", 139 "highcpu", 140 ), 141 ), 142 # type of virtualization or hardware required by test. 143 Required("virtualization"): optionally_keyed_by( 144 "test-platform", Any("virtual", "virtual-with-gpu", "hardware") 145 ), 146 # Whether the task requires loopback audio or video (whatever that may mean 147 # on the platform) 148 Required("loopback-audio"): bool, 149 Required("loopback-video"): bool, 150 # Whether the test can run using a software GL implementation on Linux 151 # using the GL compositor. May not be used with "legacy" sized instances 152 # due to poor LLVMPipe performance (bug 1296086). Defaults to true for 153 # unit tests on linux platforms and false otherwise 154 Optional("allow-software-gl-layers"): bool, 155 # For tasks that will run in docker-worker, this is the 156 # name of the docker image or in-tree docker image to run the task in. If 157 # in-tree, then a dependency will be created automatically. This is 158 # generally `desktop-test`, or an image that acts an awful lot like it. 159 Required("docker-image"): optionally_keyed_by( 160 "test-platform", 161 Any( 162 # a raw Docker image path (repo/image:tag) 163 str, 164 # an in-tree generated docker image (from `taskcluster/docker/<name>`) 165 {"in-tree": str}, 166 # an indexed docker image 167 {"indexed": str}, 168 ), 169 ), 170 # seconds of runtime after which the task will be killed. Like 'chunks', 171 # this can be keyed by test platform, but also variant. 172 Required("max-run-time"): optionally_keyed_by( 173 "test-platform", "subtest", "variant", "app", int 174 ), 175 # the exit status code that indicates the task should be retried 176 Optional("retry-exit-status"): [int], 177 # Whether to perform a gecko checkout. 178 Required("checkout"): bool, 179 # Wheter to perform a machine reboot after test is done 180 Optional("reboot"): Any(False, "always", "on-exception", "on-failure"), 181 # What to run 182 Required("mozharness"): { 183 # the mozharness script used to run this task 184 Required("script"): optionally_keyed_by("test-platform", str), 185 # the config files required for the task 186 Required("config"): optionally_keyed_by("test-platform", [str]), 187 # mochitest flavor for mochitest runs 188 Optional("mochitest-flavor"): str, 189 # any additional actions to pass to the mozharness command 190 Optional("actions"): [str], 191 # additional command-line options for mozharness, beyond those 192 # automatically added 193 Required("extra-options"): optionally_keyed_by( 194 "test-platform", "variant", "subtest", "app", [str] 195 ), 196 # the artifact name (including path) to test on the build task; this is 197 # generally set in a per-kind transformation 198 Optional("build-artifact-name"): str, 199 Optional("installer-url"): str, 200 # If not false, tooltool downloads will be enabled via relengAPIProxy 201 # for either just public files, or all files. Not supported on Windows 202 Required("tooltool-downloads"): Any( 203 False, 204 "public", 205 "internal", 206 ), 207 # Add --blob-upload-branch=<project> mozharness parameter 208 Optional("include-blob-upload-branch"): bool, 209 # The setting for --download-symbols (if omitted, the option will not 210 # be passed to mozharness) 211 Optional("download-symbols"): Any(True, "ondemand"), 212 # If set, then MOZ_NODE_PATH=/usr/local/bin/node is included in the 213 # environment. This is more than just a helpful path setting -- it 214 # causes xpcshell tests to start additional servers, and runs 215 # additional tests. 216 Required("set-moz-node-path"): bool, 217 # If true, include chunking information in the command even if the number 218 # of chunks is 1 219 Required("chunked"): optionally_keyed_by("test-platform", bool), 220 Required("requires-signed-builds"): optionally_keyed_by( 221 "test-platform", "variant", bool 222 ), 223 }, 224 # The set of test manifests to run. 225 Optional("test-manifests"): Any( 226 [str], 227 {"active": [str], "skipped": [str]}, 228 ), 229 # flag to determine if this is a confirm failure task 230 Optional("confirm-failure"): bool, 231 # The current chunk (if chunking is enabled). 232 Optional("this-chunk"): int, 233 # os user groups for test task workers; required scopes, will be 234 # added automatically 235 Optional("os-groups"): optionally_keyed_by("test-platform", [str]), 236 Optional("run-as-administrator"): optionally_keyed_by("test-platform", bool), 237 # -- values supplied by the task-generation infrastructure 238 # the platform of the build this task is testing 239 Required("build-platform"): str, 240 # the label of the build task generating the materials to test 241 Required("build-label"): str, 242 # the label of the signing task generating the materials to test. 243 # Signed builds are used in xpcshell tests on Windows, for instance. 244 Optional("build-signing-label"): optionally_keyed_by("variant", str), 245 # the build's attributes 246 Required("build-attributes"): {str: object}, 247 # the platform on which the tests will run 248 Required("test-platform"): str, 249 # limit the test-platforms (as defined in test-platforms.yml) 250 # that the test will run on 251 Optional("limit-platforms"): optionally_keyed_by("app", "subtest", [str]), 252 # the name of the test (the key in tests.yml) 253 Required("test-name"): str, 254 # the product name, defaults to firefox 255 Optional("product"): str, 256 # conditional files to determine when these tests should be run 257 Exclusive("when", "optimization"): { 258 Optional("files-changed"): [str], 259 }, 260 # Optimization to perform on this task during the optimization phase. 261 # Optimizations are defined in taskcluster/gecko_taskgraph/optimize.py. 262 Exclusive("optimization", "optimization"): OptimizationSchema, 263 # The SCHEDULES component for this task; this defaults to the suite 264 # (not including the flavor) but can be overridden here. 265 Exclusive("schedules-component", "optimization"): Any( 266 str, 267 [str], 268 ), 269 Optional("worker-type"): optionally_keyed_by( 270 "test-platform", 271 "variant", 272 Any(str, None), 273 ), 274 Optional( 275 "require-signed-extensions", 276 description="Whether the build being tested requires extensions be signed.", 277 ): optionally_keyed_by("release-type", "test-platform", bool), 278 # The target name, specifying the build artifact to be tested. 279 # If None or not specified, a transform sets the target based on OS: 280 # target.dmg (Mac), target.apk (Android), target.tar.xz (Linux), 281 # or target.zip (Windows). 282 Optional("target"): optionally_keyed_by( 283 "app", 284 "test-platform", 285 "variant", 286 Any( 287 str, 288 None, 289 {Required("index"): str, Required("name"): str}, 290 {Required("upstream-task"): str, Required("name"): str}, 291 ), 292 ), 293 # A list of artifacts to install from 'fetch' tasks. Validation deferred 294 # to 'job' transforms. 295 Optional("fetches"): object, 296 # A list of extra dependencies 297 Optional("dependencies"): object, 298 # Raptor / browsertime specific keys, defer validation to 'raptor.py' 299 # transform. 300 Optional("raptor"): object, 301 # Raptor / browsertime specific keys that need to be here since 'raptor' schema 302 # is evluated *before* test_description_schema 303 Optional("app"): str, 304 Optional("subtest"): str, 305 # Define if a given task supports artifact builds or not, see bug 1695325. 306 Optional("supports-artifact-builds"): bool, 307 # Version of python used to run the task 308 Optional("use-python"): job_description_schema["use-python"], 309 # Fetch uv binary and add it to PATH 310 Optional("use-uv"): bool, 311 # Cache mounts / volumes to set up 312 Optional("use-caches"): optionally_keyed_by( 313 "test-platform", run_task_schema["use-caches"] 314 ), 315 }) 316 317 318 @transforms.add 319 def handle_keyed_by_mozharness(config, tasks): 320 """Resolve a mozharness field if it is keyed by something""" 321 fields = [ 322 "mozharness", 323 "mozharness.chunked", 324 "mozharness.config", 325 "mozharness.script", 326 ] 327 for task in tasks: 328 for field in fields: 329 resolve_keyed_by( 330 task, 331 field, 332 item_name=task["test-name"], 333 enforce_single_match=False, 334 ) 335 yield task 336 337 338 @transforms.add 339 def set_defaults(config, tasks): 340 for task in tasks: 341 build_platform = task["build-platform"] 342 if build_platform.startswith("android"): 343 # all Android test tasks download internal objects from tooltool 344 task["mozharness"]["tooltool-downloads"] = "internal" 345 task["mozharness"]["actions"] = ["get-secrets"] 346 347 # loopback-video is always true for Android, but false for other 348 # platform phyla 349 task["loopback-video"] = True 350 task["mozharness"]["set-moz-node-path"] = True 351 352 # software-gl-layers is only meaningful on linux unittests, where it defaults to True 353 if task["test-platform"].startswith("linux") and task["suite"] not in [ 354 "talos", 355 "raptor", 356 ]: 357 task.setdefault("allow-software-gl-layers", True) 358 else: 359 task["allow-software-gl-layers"] = False 360 361 task.setdefault("try-name", task["test-name"]) 362 task.setdefault("os-groups", []) 363 task.setdefault("run-as-administrator", False) 364 task.setdefault("chunks", 1) 365 task.setdefault("run-on-projects", "built-projects") 366 task.setdefault("built-projects-only", False) 367 task.setdefault("instance-size", "default") 368 task.setdefault("max-run-time", 3600) 369 task.setdefault("reboot", False) 370 task.setdefault("virtualization", "virtual") 371 task.setdefault("loopback-audio", False) 372 task.setdefault("loopback-video", False) 373 task.setdefault("limit-platforms", []) 374 task.setdefault("docker-image", {"in-tree": "ubuntu1804-test"}) 375 task.setdefault("checkout", False) 376 task.setdefault("require-signed-extensions", False) 377 task.setdefault("run-without-variant", True) 378 task.setdefault("variants", []) 379 task.setdefault("supports-artifact-builds", True) 380 task.setdefault("use-python", "system") 381 task.setdefault("use-uv", True) 382 task.setdefault("use-caches", ["checkout", "pip", "uv"]) 383 384 task["mozharness"].setdefault("extra-options", []) 385 task["mozharness"].setdefault("requires-signed-builds", False) 386 task["mozharness"].setdefault("tooltool-downloads", "public") 387 task["mozharness"].setdefault("set-moz-node-path", False) 388 task["mozharness"].setdefault("chunked", False) 389 yield task 390 391 392 transforms.add_validate(test_description_schema) 393 394 395 @transforms.add 396 def run_variant_transforms(config, tasks): 397 """Variant transforms are run as soon as possible to allow other transforms 398 to key by variant.""" 399 for task in tasks: 400 xforms = TransformSequence() 401 mod = import_module("gecko_taskgraph.transforms.test.variant") 402 xforms.add(mod.transforms) 403 404 yield from xforms(config, [task]) 405 406 407 @transforms.add 408 def resolve_keys(config, tasks): 409 keys = ( 410 "require-signed-extensions", 411 "run-without-variant", 412 "suite", 413 "suite.name", 414 "test-manifest-loader", 415 "timeoutfactor", 416 "use-caches", 417 ) 418 for task in tasks: 419 for key in keys: 420 resolve_keyed_by( 421 task, 422 key, 423 item_name=task["test-name"], 424 enforce_single_match=False, 425 **{ 426 "release-type": config.params["release_type"], 427 "variant": task["attributes"].get("unittest_variant"), 428 }, 429 ) 430 yield task 431 432 433 @transforms.add 434 def run_remaining_transforms(config, tasks): 435 """Runs other transform files next to this module.""" 436 # List of modules to load transforms from in order. 437 transform_modules = ( 438 ("raptor", lambda t: t["suite"] == "raptor"), 439 ("other", None), 440 ("worker", None), 441 ("confirm_failure", None), 442 ("pernosco", lambda t: t["build-platform"].startswith("linux64")), 443 ("os_integration", None), 444 # These transforms should run last as there is never any difference in 445 # configuration from one chunk to another (other than chunk number). 446 ("chunk", None), 447 ) 448 449 for task in tasks: 450 xforms = TransformSequence() 451 for name, filterfn in transform_modules: 452 if filterfn and not filterfn(task): 453 continue 454 455 mod = import_module(f"gecko_taskgraph.transforms.test.{name}") 456 xforms.add(mod.transforms) 457 458 yield from xforms(config, [task]) 459 460 461 @transforms.add 462 def define_tags(config, tasks): 463 for task in tasks: 464 tags = task.setdefault("tags", {}) 465 tags.setdefault("test-suite", task["suite"]) 466 tags.setdefault("test-platform", task["test-platform"]) 467 variant = task.get("attributes", {}).get("unittest_variant") 468 if variant: 469 tags.setdefault("test-variant", variant) 470 471 yield task 472 473 474 @transforms.add 475 def make_job_description(config, tasks): 476 """Convert *test* descriptions to *job* descriptions (input to 477 gecko_taskgraph.transforms.job)""" 478 479 for task in tasks: 480 attributes = task.get("attributes", {}) 481 482 mobile = get_mobile_project(task) 483 if mobile and (mobile not in task["test-name"]): 484 label = "test-{}-{}-{}".format( 485 task["test-platform"], mobile, task["test-name"] 486 ) 487 else: 488 label = "test-{}-{}".format(task["test-platform"], task["test-name"]) 489 490 try_name = task["try-name"] 491 if attributes.get("unittest_variant"): 492 suffix = task.pop("variant-suffix") 493 label += suffix 494 try_name += suffix 495 496 if task["chunks"] > 1: 497 label += "-{}".format(task["this-chunk"]) 498 499 if task.get("confirm-failure", False): 500 label += "-cf" 501 502 build_label = task["build-label"] 503 504 if task["suite"] == "talos": 505 attr_try_name = "talos_try_name" 506 elif task["suite"] == "raptor": 507 attr_try_name = "raptor_try_name" 508 else: 509 attr_try_name = "unittest_try_name" 510 511 attr_build_platform, attr_build_type = task["build-platform"].split("/", 1) 512 attributes.update({ 513 "build_platform": attr_build_platform, 514 "build_type": attr_build_type, 515 "test_platform": task["test-platform"], 516 "test_chunk": str(task["this-chunk"]), 517 "supports-artifact-builds": task["supports-artifact-builds"], 518 attr_try_name: try_name, 519 }) 520 521 if "test-manifests" in task: 522 attributes["test_manifests"] = task["test-manifests"] 523 524 jobdesc = {} 525 name = "{}-{}".format(task["test-platform"], task["test-name"]) 526 jobdesc["name"] = name 527 jobdesc["label"] = label 528 jobdesc["description"] = task["description"] 529 jobdesc["attributes"] = attributes 530 jobdesc["dependencies"] = {"build": build_label} 531 jobdesc["task-from"] = task["task-from"] 532 533 if task.get("fetches"): 534 jobdesc["fetches"] = task["fetches"] 535 536 if task["mozharness"]["requires-signed-builds"] is True: 537 jobdesc["dependencies"]["build-signing"] = task["build-signing-label"] 538 539 if "dependencies" in task: 540 jobdesc["dependencies"].update(task["dependencies"]) 541 542 if "expires-after" in task: 543 jobdesc["expires-after"] = task["expires-after"] 544 545 jobdesc["routes"] = task.get("routes", []) 546 jobdesc["run-on-repo-type"] = sorted(task["run-on-repo-type"]) 547 jobdesc["run-on-projects"] = sorted(task["run-on-projects"]) 548 jobdesc["scopes"] = [] 549 jobdesc["tags"] = task.get("tags", {}) 550 jobdesc["extra"] = { 551 "chunks": { 552 "current": task["this-chunk"], 553 "total": task["chunks"], 554 }, 555 "suite": attributes["unittest_suite"], 556 "test-setting": task.pop("test-setting"), 557 } 558 jobdesc["treeherder"] = { 559 "symbol": task["treeherder-symbol"], 560 "kind": "test", 561 "tier": task["tier"], 562 "platform": task.get("treeherder-machine-platform", task["build-platform"]), 563 } 564 565 schedules = task.get("schedules-component", []) 566 if task.get("when"): 567 # This may still be used by comm-central. 568 jobdesc["when"] = task["when"] 569 elif "optimization" in task: 570 jobdesc["optimization"] = task["optimization"] 571 elif set(schedules) & set(INCLUSIVE_COMPONENTS): 572 jobdesc["optimization"] = {"test-inclusive": schedules} 573 else: 574 jobdesc["optimization"] = {"test": schedules} 575 576 run = jobdesc["run"] = {} 577 run["using"] = "mozharness-test" 578 run["test"] = task 579 580 if "workdir" in task: 581 run["workdir"] = task.pop("workdir") 582 583 jobdesc["worker-type"] = task.pop("worker-type") 584 585 if "worker" in task: 586 jobdesc["worker"] = task.pop("worker") 587 588 if task.get("fetches"): 589 jobdesc["fetches"] = task.pop("fetches") 590 591 yield jobdesc 592 593 594 def normpath(path): 595 return path.replace("/", "\\") 596 597 598 def get_firefox_version(): 599 with open("browser/config/version.txt") as f: 600 return f.readline().strip()