raptor.py (18924B)
1 # This Source Code Form is subject to the terms of the Mozilla Public 2 # License, v. 2.0. If a copy of the MPL was not distributed with this 3 # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 5 6 from taskgraph.transforms.base import TransformSequence 7 from taskgraph.util.copy import deepcopy 8 from taskgraph.util.schema import Schema, optionally_keyed_by, resolve_keyed_by 9 from taskgraph.util.treeherder import join_symbol, split_symbol 10 from voluptuous import Extra, Optional, Required 11 12 from gecko_taskgraph.transforms.test import test_description_schema 13 from gecko_taskgraph.util.perftest import is_external_browser 14 15 transforms = TransformSequence() 16 task_transforms = TransformSequence() 17 18 raptor_description_schema = Schema({ 19 # Raptor specific configs. 20 Optional("raptor"): { 21 Optional("activity"): optionally_keyed_by("app", str), 22 Optional("apps"): optionally_keyed_by("test-platform", "subtest", [str]), 23 Optional("binary-path"): optionally_keyed_by("app", str), 24 Optional("run-visual-metrics"): optionally_keyed_by( 25 "app", "test-platform", bool 26 ), 27 Optional("subtests"): optionally_keyed_by("app", "test-platform", list), 28 Optional("test"): str, 29 Optional("test-url-param"): optionally_keyed_by( 30 "subtest", "test-platform", str 31 ), 32 Optional("lull-schedule"): optionally_keyed_by("subtest", "test-platform", str), 33 Optional("network-conditions"): optionally_keyed_by("subtest", list), 34 }, 35 # Configs defined in the 'test_description_schema'. 36 Optional("max-run-time"): optionally_keyed_by( 37 "app", "subtest", "test-platform", test_description_schema["max-run-time"] 38 ), 39 Optional("run-on-projects"): optionally_keyed_by( 40 "app", 41 "test-name", 42 "raptor.test", 43 "subtest", 44 "variant", 45 test_description_schema["run-on-projects"], 46 ), 47 Optional("variants"): test_description_schema["variants"], 48 Optional("target"): optionally_keyed_by("app", test_description_schema["target"]), 49 Optional("tier"): optionally_keyed_by( 50 "app", "raptor.test", "subtest", "variant", test_description_schema["tier"] 51 ), 52 Required("test-name"): test_description_schema["test-name"], 53 Required("test-platform"): test_description_schema["test-platform"], 54 Required("require-signed-extensions"): test_description_schema[ 55 "require-signed-extensions" 56 ], 57 Required("treeherder-symbol"): test_description_schema["treeherder-symbol"], 58 # Any unrecognized keys will be validated against the test_description_schema. 59 Extra: object, 60 }) 61 62 transforms.add_validate(raptor_description_schema) 63 64 65 @transforms.add 66 def set_defaults(config, tests): 67 for test in tests: 68 test.setdefault("raptor", {}).setdefault("run-visual-metrics", False) 69 yield test 70 71 72 @transforms.add 73 def split_apps(config, tests): 74 app_symbols = { 75 "chrome": "ChR", 76 "chrome-m": "ChR", 77 "fenix": "fenix", 78 "refbrow": "refbrow", 79 "safari": "Saf", 80 "safari-tp": "STP", 81 "custom-car": "CaR", 82 "cstm-car-m": "CaR", 83 } 84 85 for test in tests: 86 apps = test["raptor"].pop("apps", None) 87 if not apps: 88 yield test 89 continue 90 91 for app in apps: 92 # Ignore variants for non-Firefox or non-mobile applications. 93 if app not in [ 94 "firefox", 95 "geckoview", 96 "fenix", 97 "chrome-m", 98 "cstm-car-m", 99 ] and test["attributes"].get("unittest_variant"): 100 continue 101 102 atest = deepcopy(test) 103 suffix = f"-{app}" 104 atest["app"] = app 105 atest["description"] += f" on {app.capitalize()}" 106 107 name = atest["test-name"] + suffix 108 atest["test-name"] = name 109 atest["try-name"] = name 110 111 if app in app_symbols: 112 group, symbol = split_symbol(atest["treeherder-symbol"]) 113 group += f"-{app_symbols[app]}" 114 atest["treeherder-symbol"] = join_symbol(group, symbol) 115 116 yield atest 117 118 119 @transforms.add 120 def handle_keyed_by_prereqs(config, tests): 121 """ 122 Only resolve keys for prerequisite fields here since the 123 these keyed-by options might have keyed-by fields 124 as well. 125 """ 126 for test in tests: 127 resolve_keyed_by(test, "raptor.subtests", item_name=test["test-name"]) 128 yield test 129 130 131 @transforms.add 132 def split_raptor_subtests(config, tests): 133 for test in tests: 134 # For tests that have 'subtests' listed, we want to create a separate 135 # test job for every subtest (i.e. split out each page-load URL into its own job) 136 subtests = test["raptor"].pop("subtests", None) 137 if not subtests: 138 if all( 139 p not in test["test-platform"] for p in ("macosx1400", "macosx1500") 140 ): 141 yield test 142 continue 143 144 for chunk_number, subtest in enumerate(subtests): 145 # Create new test job 146 chunked = deepcopy(test) 147 chunked["chunk-number"] = 1 + chunk_number 148 chunked["subtest"] = subtest 149 chunked["subtest-symbol"] = subtest 150 if isinstance(chunked["subtest"], list): 151 chunked["subtest"] = subtest[0] 152 chunked["subtest-symbol"] = subtest[1] 153 chunked = resolve_keyed_by( 154 chunked, "tier", chunked["subtest"], defer=["variant"] 155 ) 156 yield chunked 157 158 159 @transforms.add 160 def handle_keyed_by(config, tests): 161 fields = [ 162 "raptor.test-url-param", 163 "raptor.run-visual-metrics", 164 "raptor.activity", 165 "raptor.binary-path", 166 "raptor.lull-schedule", 167 "raptor.network-conditions", 168 "limit-platforms", 169 "fetches.fetch", 170 "max-run-time", 171 "run-on-projects", 172 "target", 173 "tier", 174 "mozharness.extra-options", 175 ] 176 for test in tests: 177 for field in fields: 178 resolve_keyed_by( 179 test, field, item_name=test["test-name"], defer=["variant"] 180 ) 181 yield test 182 183 184 @transforms.add 185 def handle_network_conditions(config, tests): 186 for test in tests: 187 conditions = test["raptor"].pop("network-conditions", None) 188 if not conditions: 189 yield test 190 continue 191 192 for condition in conditions: 193 new_test = deepcopy(test) 194 network_type, packet_loss_rate = condition 195 196 new_test.pop("chunk-number") 197 subtest = new_test.pop("subtest") 198 new_test["raptor"]["test"] = subtest 199 200 group, _ = split_symbol(new_test["treeherder-symbol"]) 201 new_group = f"{group}-{network_type}" 202 subtest_symbol = f"{new_test['subtest-symbol']}-{packet_loss_rate}" 203 new_test["treeherder-symbol"] = join_symbol(new_group, subtest_symbol) 204 205 mozharness = new_test.setdefault("mozharness", {}) 206 extra_options = mozharness.setdefault("extra-options", []) 207 208 extra_options.extend([ 209 f"--browsertime-arg=network_type={network_type}", 210 f"--browsertime-arg=pkt_loss_rate={packet_loss_rate}", 211 ]) 212 213 new_test["test-name"] += f"-{subtest}-{network_type}-{packet_loss_rate}" 214 new_test["try-name"] += f"-{subtest}-{network_type}-{packet_loss_rate}" 215 new_test["description"] += ( 216 f" on {subtest} with {network_type} network type and " 217 f" {packet_loss_rate} loss rate" 218 ) 219 220 yield new_test 221 222 yield test 223 224 225 @transforms.add 226 def split_page_load_by_url(config, tests): 227 for test in tests: 228 # `chunk-number` and 'subtest' only exists when the task had a 229 # definition for `subtests` 230 chunk_number = test.pop("chunk-number", None) 231 subtest = test.get( 232 "subtest" 233 ) # don't pop as some tasks need this value after splitting variants 234 subtest_symbol = test.pop("subtest-symbol", None) 235 236 if not chunk_number or not subtest: 237 yield test 238 continue 239 240 if len(subtest_symbol) > 10 and "ytp" not in subtest_symbol: 241 raise Exception( 242 "Treeherder symbol %s is larger than 10 char! Please use a different symbol." 243 % subtest_symbol 244 ) 245 246 if test["test-name"].startswith("browsertime-"): 247 test["raptor"]["test"] = subtest 248 249 # Remove youtube-playback in the test name to avoid duplication 250 test["test-name"] = test["test-name"].replace("youtube-playback-", "") 251 else: 252 # Use full test name if running on webextension 253 test["raptor"]["test"] = "raptor-tp6-" + subtest + "-{}".format(test["app"]) 254 255 # Only run the subtest/single URL 256 test["test-name"] += f"-{subtest}" 257 test["try-name"] += f"-{subtest}" 258 259 # Set treeherder symbol and description 260 group, _ = split_symbol(test["treeherder-symbol"]) 261 test["treeherder-symbol"] = join_symbol(group, subtest_symbol) 262 test["description"] += f" on {subtest}" 263 264 yield test 265 266 267 @transforms.add 268 def modify_extra_options(config, tests): 269 for test in tests: 270 test_name = test.get("test-name", None) 271 272 if "first-install" in test_name: 273 # First-install tests should never use conditioned profiles 274 extra_options = test.setdefault("mozharness", {}).setdefault( 275 "extra-options", [] 276 ) 277 278 for i, opt in enumerate(extra_options): 279 if "conditioned-profile" in opt: 280 if i: 281 extra_options.pop(i) 282 break 283 284 if "-widevine" in test_name: 285 extra_options = test.setdefault("mozharness", {}).setdefault( 286 "extra-options", [] 287 ) 288 for i, opt in enumerate(extra_options): 289 if "--conditioned-profile=settled" in opt: 290 if i: 291 extra_options[i] += "-youtube" 292 break 293 294 if "unity-webgl" in test_name: 295 # Disable the extra-profiler-run for unity-webgl tests. 296 extra_options = test.setdefault("mozharness", {}).setdefault( 297 "extra-options", [] 298 ) 299 for i, opt in enumerate(extra_options): 300 if "extra-profiler-run" in opt: 301 if i: 302 extra_options.pop(i) 303 break 304 305 if "jetstream" in test_name and test.get("app", "") in ("chrome", "custom-car"): 306 # Bug 1996836 - Disable jetstream 2/3 extra profile runs 307 extra_options = test.setdefault("mozharness", {}).setdefault( 308 "extra-options", [] 309 ) 310 for i, opt in enumerate(extra_options): 311 if "extra-profiler-run" in opt: 312 extra_options.pop(i) 313 break 314 315 yield test 316 317 318 @transforms.add 319 def add_extra_options(config, tests): 320 for test in tests: 321 mozharness = test.setdefault("mozharness", {}) 322 if test.get("app", "") == "chrome-m": 323 mozharness["tooltool-downloads"] = "internal" 324 325 extra_options = mozharness.setdefault("extra-options", []) 326 327 # Adding device name if we're on android 328 test_platform = test["test-platform"] 329 if test_platform.startswith("android-hw-a55"): 330 extra_options.append("--device-name=a55") 331 elif test_platform.startswith("android-hw-p5"): 332 extra_options.append("--device-name=p5_aarch64") 333 elif test_platform.startswith("android-hw-p6"): 334 extra_options.append("--device-name=p6_aarch64") 335 elif test_platform.startswith("android-hw-s24"): 336 extra_options.append("--device-name=s24_aarch64") 337 338 if test["raptor"].pop("run-visual-metrics", False): 339 extra_options.append("--browsertime-video") 340 extra_options.append("--browsertime-visualmetrics") 341 test["attributes"]["run-visual-metrics"] = True 342 343 if "app" in test: 344 extra_options.append( 345 "--app={}".format(test["app"]) 346 ) # don't pop as some tasks need this value after splitting variants 347 348 if "activity" in test["raptor"]: 349 extra_options.append("--activity={}".format(test["raptor"].pop("activity"))) 350 351 if "binary-path" in test["raptor"]: 352 extra_options.append( 353 "--binary-path={}".format(test["raptor"].pop("binary-path")) 354 ) 355 356 if "test" in test["raptor"]: 357 extra_options.append("--test={}".format(test["raptor"].pop("test"))) 358 359 if test["require-signed-extensions"]: 360 extra_options.append("--is-release-build") 361 362 if "test-url-param" in test["raptor"]: 363 param = test["raptor"].pop("test-url-param") 364 if not param == []: 365 extra_options.append( 366 "--test-url-params={}".format(param.replace(" ", "")) 367 ) 368 369 if ( 370 ("android-hw-p6" in test_platform or "android-hw-s24" in test_platform) 371 and "speedometer-" not in test["test-name"] 372 # Bug 1943674 resolve why --power-test causes permafails on certain mobile platforms and browsers 373 ) or ( 374 "android-hw-a55" in test_platform 375 and any(t in test["test-name"] for t in ("tp6", "speedometer3")) 376 # Bug 1919024 remove tp6 and sp3 restrictions once benchmark parsing is done in the support scripts 377 ): 378 if "--power-test" not in extra_options: 379 extra_options.append("--power-test") 380 elif "windows" in test_platform and any( 381 t in test["test-name"] for t in ("speedometer3", "tp6") 382 ): 383 extra_options.append("--power-test") 384 385 extra_options.append("--project={}".format(config.params.get("project"))) 386 387 yield test 388 389 390 @transforms.add 391 def modify_mozharness_configs(config, tests): 392 for test in tests: 393 if not is_external_browser(test["app"]): 394 yield test 395 continue 396 397 test_platform = test["test-platform"] 398 mozharness = test.setdefault("mozharness", {}) 399 if "mac" in test_platform: 400 mozharness["config"] = ["raptor/mac_external_browser_config.py"] 401 elif "windows" in test_platform: 402 mozharness["config"] = ["raptor/windows_external_browser_config.py"] 403 elif "linux" in test_platform: 404 mozharness["config"] = ["raptor/linux_external_browser_config.py"] 405 elif "android" in test_platform: 406 test["target"] = "target.tar.xz" 407 mozharness["config"] = ["raptor/android_hw_external_browser_config.py"] 408 409 yield test 410 411 412 @transforms.add 413 def handle_lull_schedule(config, tests): 414 # Setup lull schedule attribute here since the attributes 415 # can't have any keyed by settings 416 for test in tests: 417 if "lull-schedule" in test["raptor"]: 418 lull_schedule = test["raptor"].pop("lull-schedule") 419 if lull_schedule: 420 test.setdefault("attributes", {})["lull-schedule"] = lull_schedule 421 yield test 422 423 424 @transforms.add 425 def apply_raptor_device_optimization(config, tests): 426 # Bug 1919389 427 # For now, only change the back stop optimization strategy for A55 devices 428 for test in tests: 429 if test["test-platform"].startswith("android-hw-a55"): 430 test["optimization"] = {"skip-unless-backstop": None} 431 yield test 432 433 434 @task_transforms.add 435 def add_scopes_and_proxy(config, tasks): 436 for task in tasks: 437 task.setdefault("worker", {})["taskcluster-proxy"] = True 438 task.setdefault("scopes", []).append( 439 "secrets:get:project/perftest/gecko/level-{level}/perftest-login" 440 ) 441 yield task 442 443 444 @task_transforms.add 445 def setup_lull_schedule(config, tasks): 446 for task in tasks: 447 attrs = task.setdefault("attributes", {}) 448 if attrs.get("lull-schedule", None) is not None: 449 # Move the lull schedule attribute into the extras 450 # so that it can be accessible through mozci 451 lull_schedule = attrs.pop("lull-schedule") 452 task.setdefault("extra", {})["lull-schedule"] = lull_schedule 453 yield task 454 455 456 @task_transforms.add 457 def setup_internal_artifacts(config, tasks): 458 for task in tasks: 459 if ( 460 task["worker"]["os"] == "linux-bitbar" 461 or task["worker"]["os"] == "linux-lambda" 462 ): 463 task["worker"].setdefault("artifacts", []).append({ 464 "name": "perftest", 465 "path": "workspace/build/perftest", 466 "type": "directory", 467 }) 468 else: 469 task["worker"].setdefault("artifacts", []).append({ 470 "name": "perftest", 471 "path": "build/perftest", 472 "type": "directory", 473 }) 474 yield task 475 476 477 @task_transforms.add 478 def select_tasks_to_lambda(config, tasks): 479 """ 480 all motionmark tests 481 unity-webgl test 482 all non-power-testing youtube-playback tests 483 all vpl (video-playback-latency) tests 484 all pageload tests (ideally fenix/CaR/ChR) 485 486 """ 487 tests_to_run_at_lambdatest = [ 488 "motionmark-1-3", 489 "motionmark-htmlsuite-1-3", 490 "unity-webgl", 491 "video-playback-latency", 492 "youtube-playback-av1-sfr", 493 "youtube-playback-hfr", 494 "youtube-playback-vp9-sfr", 495 "tp6m", 496 ] 497 498 for task in tasks: 499 if "android" in task["label"] and "a55" in task["label"]: 500 if any([t in task["label"] for t in tests_to_run_at_lambdatest]): 501 if task["worker-type"] == "t-bitbar-gw-perf-a55": 502 task["tags"]["os"] = "linux-lambda" 503 task["worker"]["os"] = "linux-lambda" 504 task["worker-type"] = "t-lambda-perf-a55" 505 task["worker"]["env"]["TASKCLUSTER_WORKER_TYPE"] = ( 506 "t-lambda-perf-a55" 507 ) 508 cmds = [] 509 for cmd in task["worker"]["command"]: 510 # Bug 1981862 - issues with condprof setup @ lambdatest 511 cmds.append([ 512 c.replace( 513 "/builds/taskcluster/script.py", 514 "/home/ltuser/taskcluster/script.py", 515 ) 516 for c in cmd 517 if not c.startswith("--conditioned-profile") 518 ]) 519 task["worker"]["command"] = cmds 520 task["worker"]["env"]["DISABLE_USB_POWER_METER_RESET"] = "1" 521 yield task