desktop_unittest.py (64007B)
1 #!/usr/bin/env python 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 5 # You can obtain one at http://mozilla.org/MPL/2.0/. 6 7 import copy 8 import glob 9 import json 10 import multiprocessing 11 import os 12 import re 13 import shutil 14 import sys 15 from datetime import datetime, timedelta 16 17 # load modules from parent dir 18 here = os.path.abspath(os.path.dirname(__file__)) 19 sys.path.insert(1, os.path.dirname(here)) 20 21 import threading 22 23 from mozfile import load_source 24 from mozharness.base.errors import BaseErrorList 25 from mozharness.base.log import INFO, WARNING 26 from mozharness.base.script import PreScriptAction 27 from mozharness.base.vcs.vcsbase import MercurialScript 28 from mozharness.mozilla.automation import TBPL_EXCEPTION, TBPL_RETRY 29 from mozharness.mozilla.mozbase import MozbaseMixin 30 from mozharness.mozilla.structuredlog import StructuredOutputParser 31 from mozharness.mozilla.testing.codecoverage import ( 32 CodeCoverageMixin, 33 code_coverage_config_options, 34 ) 35 from mozharness.mozilla.testing.errors import HarnessErrorList 36 from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options 37 from mozharness.mozilla.testing.unittest import DesktopUnittestOutputParser 38 39 SUITE_CATEGORIES = [ 40 "gtest", 41 "cppunittest", 42 "jittest", 43 "mochitest", 44 "reftest", 45 "xpcshell", 46 ] 47 SUITE_DEFAULT_E10S = ["mochitest", "reftest"] 48 SUITE_NO_E10S = ["xpcshell"] 49 SUITE_REPEATABLE = ["mochitest", "reftest", "xpcshell"] 50 SUITE_INSTALL_EXTENSIONS = ["mochitest"] 51 52 53 # DesktopUnittest {{{1 54 class DesktopUnittest(TestingMixin, MercurialScript, MozbaseMixin, CodeCoverageMixin): 55 config_options = ( 56 [ 57 [ 58 [ 59 "--mochitest-suite", 60 ], 61 { 62 "action": "extend", 63 "dest": "specified_mochitest_suites", 64 "type": "string", 65 "help": "Specify which mochi suite to run. " 66 "Suites are defined in the config file.\n" 67 "Examples: 'all', 'plain1', 'plain5', 'chrome', or 'a11y'", 68 }, 69 ], 70 [ 71 [ 72 "--reftest-suite", 73 ], 74 { 75 "action": "extend", 76 "dest": "specified_reftest_suites", 77 "type": "string", 78 "help": "Specify which reftest suite to run. " 79 "Suites are defined in the config file.\n" 80 "Examples: 'all', 'crashplan', or 'jsreftest'", 81 }, 82 ], 83 [ 84 [ 85 "--xpcshell-suite", 86 ], 87 { 88 "action": "extend", 89 "dest": "specified_xpcshell_suites", 90 "type": "string", 91 "help": "Specify which xpcshell suite to run. " 92 "Suites are defined in the config file\n." 93 "Examples: 'xpcshell'", 94 }, 95 ], 96 [ 97 [ 98 "--cppunittest-suite", 99 ], 100 { 101 "action": "extend", 102 "dest": "specified_cppunittest_suites", 103 "type": "string", 104 "help": "Specify which cpp unittest suite to run. " 105 "Suites are defined in the config file\n." 106 "Examples: 'cppunittest'", 107 }, 108 ], 109 [ 110 [ 111 "--gtest-suite", 112 ], 113 { 114 "action": "extend", 115 "dest": "specified_gtest_suites", 116 "type": "string", 117 "help": "Specify which gtest suite to run. " 118 "Suites are defined in the config file\n." 119 "Examples: 'gtest'", 120 }, 121 ], 122 [ 123 [ 124 "--jittest-suite", 125 ], 126 { 127 "action": "extend", 128 "dest": "specified_jittest_suites", 129 "type": "string", 130 "help": "Specify which jit-test suite to run. " 131 "Suites are defined in the config file\n." 132 "Examples: 'jittest'", 133 }, 134 ], 135 [ 136 [ 137 "--run-all-suites", 138 ], 139 { 140 "action": "store_true", 141 "dest": "run_all_suites", 142 "default": False, 143 "help": "This will run all suites that are specified " 144 "in the config file. You do not need to specify " 145 "any other suites.\nBeware, this may take a while ;)", 146 }, 147 ], 148 [ 149 [ 150 "--disable-e10s", 151 ], 152 { 153 "action": "store_false", 154 "dest": "e10s", 155 "default": True, 156 "help": "Run tests without multiple processes (e10s).", 157 }, 158 ], 159 [ 160 [ 161 "--headless", 162 ], 163 { 164 "action": "store_true", 165 "dest": "headless", 166 "default": False, 167 "help": "Run tests in headless mode.", 168 }, 169 ], 170 [ 171 [ 172 "--no-random", 173 ], 174 { 175 "action": "store_true", 176 "dest": "no_random", 177 "default": False, 178 "help": "Run tests with no random intermittents and bisect in case of real failure.", # NOQA: E501 179 }, 180 ], 181 [ 182 ["--total-chunks"], 183 { 184 "action": "store", 185 "dest": "total_chunks", 186 "help": "Number of total chunks", 187 }, 188 ], 189 [ 190 ["--this-chunk"], 191 { 192 "action": "store", 193 "dest": "this_chunk", 194 "help": "Number of this chunk", 195 }, 196 ], 197 [ 198 ["--timeout-factor"], 199 { 200 "action": "store", 201 "dest": "timeout_factor", 202 "help": "Multiplier for test timeout values", 203 }, 204 ], 205 [ 206 ["--filter"], 207 { 208 "action": "store", 209 "dest": "filter", 210 "default": "", 211 "help": "Specify a regular expression (as could be passed " 212 "to the JS RegExp constructor) to test against URLs in " 213 "the manifest; only test items that have a matching test " 214 "URL will be run.", 215 }, 216 ], 217 [ 218 ["--allow-software-gl-layers"], 219 { 220 "action": "store_true", 221 "dest": "allow_software_gl_layers", 222 "default": False, 223 "help": "Permits a software GL implementation (such as LLVMPipe) to use " 224 "the GL compositor.", 225 }, 226 ], 227 [ 228 ["--enable-inc-origin-init"], 229 { 230 "action": "store_true", 231 "dest": "enable_inc_origin_init", 232 "default": False, 233 "help": "Enable the incremental origin initialization in Gecko.", 234 }, 235 ], 236 [ 237 ["--filter-set"], 238 { 239 "action": "store", 240 "dest": "filter_set", 241 "default": "", 242 "help": "Use a predefined filter.", 243 }, 244 ], 245 [ 246 ["--threads"], 247 { 248 "action": "store", 249 "dest": "threads", 250 "help": "Number of total chunks", 251 }, 252 ], 253 [ 254 ["--variant"], 255 { 256 "action": "store", 257 "dest": "variant", 258 "default": "", 259 "help": "specify a variant if mozharness needs to setup paths", 260 }, 261 ], 262 [ 263 ["--gpu-required"], 264 { 265 "action": "store_true", 266 "dest": "gpu_required", 267 "default": False, 268 "help": "Run additional verification on modified tests using gpu instances.", 269 }, 270 ], 271 [ 272 ["--setpref"], 273 { 274 "action": "append", 275 "metavar": "PREF=VALUE", 276 "dest": "extra_prefs", 277 "default": [], 278 "help": "Defines an extra user preference.", 279 }, 280 ], 281 [ 282 [ 283 "--repeat", 284 ], 285 { 286 "action": "store", 287 "type": "int", 288 "dest": "repeat", 289 "default": 0, 290 "help": "Repeat the tests the given number of times. Supported " 291 "by mochitest, reftest, crashtest, ignored otherwise.", 292 }, 293 ], 294 [ 295 ["--enable-xorigin-tests"], 296 { 297 "action": "store_true", 298 "dest": "enable_xorigin_tests", 299 "default": False, 300 "help": "Run tests in a cross origin iframe.", 301 }, 302 ], 303 [ 304 ["--enable-a11y-checks"], 305 { 306 "action": "store_true", 307 "default": False, 308 "dest": "a11y_checks", 309 "help": "Run tests with accessibility checks enabled.", 310 }, 311 ], 312 [ 313 ["--run-failures"], 314 { 315 "action": "store", 316 "default": "", 317 "type": "string", 318 "dest": "run_failures", 319 "help": "Run only failures matching keyword. " 320 "Examples: 'apple_silicon'", 321 }, 322 ], 323 [ 324 ["--crash-as-pass"], 325 { 326 "action": "store_true", 327 "default": False, 328 "dest": "crash_as_pass", 329 "help": "treat harness level crash as a pass", 330 }, 331 ], 332 [ 333 ["--timeout-as-pass"], 334 { 335 "action": "store_true", 336 "default": False, 337 "dest": "timeout_as_pass", 338 "help": "treat harness level timeout as a pass", 339 }, 340 ], 341 [ 342 ["--disable-fission"], 343 { 344 "action": "store_true", 345 "default": False, 346 "dest": "disable_fission", 347 "help": "do not run tests with fission enabled.", 348 }, 349 ], 350 [ 351 ["--conditioned-profile"], 352 { 353 "action": "store_true", 354 "default": False, 355 "dest": "conditioned_profile", 356 "help": "run tests with a conditioned profile", 357 }, 358 ], 359 [ 360 ["--tag"], 361 { 362 "action": "append", 363 "default": [], 364 "dest": "test_tags", 365 "help": "Filter out tests that don't have the given tag. Can be used multiple " 366 "times in which case the test must contain at least one of the given tags.", 367 }, 368 ], 369 [ 370 ["--use-http3-server"], 371 { 372 "action": "store_true", 373 "default": False, 374 "dest": "useHttp3Server", 375 "help": "Whether to use the Http3 server", 376 }, 377 ], 378 [ 379 ["--use-http2-server"], 380 { 381 "action": "store_true", 382 "default": False, 383 "dest": "useHttp2Server", 384 "help": "Whether to use the Http2 server", 385 }, 386 ], 387 [ 388 ["--mochitest-flavor"], 389 { 390 "action": "store", 391 "dest": "mochitest_flavor", 392 "help": "Specify which mochitest flavor to run." 393 "Examples: 'plain', 'browser'", 394 }, 395 ], 396 [ 397 ["--install-extension"], 398 { 399 "action": "append", 400 "default": [], 401 "dest": "install_extension", 402 "help": "Specify one or more extensions to install in the testing profile." 403 "This is currently only supported for mochitest tests, and is" 404 "ignored for other types. Paths are relative to the fetches" 405 "directory.", 406 }, 407 ], 408 ] 409 + copy.deepcopy(testing_config_options) 410 + copy.deepcopy(code_coverage_config_options) 411 ) 412 413 def __init__(self, require_config_file=True): 414 # abs_dirs defined already in BaseScript but is here to make pylint happy 415 self.abs_dirs = None 416 super().__init__( 417 config_options=self.config_options, 418 all_actions=[ 419 "clobber", 420 "download-and-extract", 421 "create-virtualenv", 422 "start-pulseaudio", 423 "unlock-keyring", 424 "install", 425 "stage-files", 426 "run-tests", 427 "uninstall", 428 ], 429 require_config_file=require_config_file, 430 config={"require_test_zip": True}, 431 ) 432 433 c = self.config 434 self.global_test_options = [] 435 self.installer_url = c.get("installer_url") 436 self.test_url = c.get("test_url") 437 self.test_packages_url = c.get("test_packages_url") 438 self.symbols_url = c.get("symbols_url") 439 # this is so mozinstall in install() doesn't bug out if we don't run 440 # the download_and_extract action 441 self.installer_path = c.get("installer_path") 442 self.binary_path = c.get("binary_path") 443 self.abs_app_dir = None 444 self.abs_res_dir = None 445 self.mochitest_flavor = c.get("mochitest_flavor", None) 446 447 # Construct an identifier to be used to identify Perfherder data 448 # for resource monitoring recording. This attempts to uniquely 449 # identify this test invocation configuration. 450 perfherder_parts = [] 451 perfherder_options = [] 452 suites = ( 453 ("specified_mochitest_suites", "mochitest"), 454 ("specified_reftest_suites", "reftest"), 455 ("specified_xpcshell_suites", "xpcshell"), 456 ("specified_cppunittest_suites", "cppunit"), 457 ("specified_gtest_suites", "gtest"), 458 ("specified_jittest_suites", "jittest"), 459 ) 460 for s, prefix in suites: 461 if s in c: 462 perfherder_parts.append(prefix) 463 perfherder_parts.extend(c[s]) 464 465 if "this_chunk" in c: 466 perfherder_parts.append(c["this_chunk"]) 467 468 if c["e10s"]: 469 perfherder_options.append("e10s") 470 471 self.resource_monitor_perfherder_id = ( 472 ".".join(perfherder_parts), 473 perfherder_options, 474 ) 475 476 # helper methods {{{2 477 def _pre_config_lock(self, rw_config): 478 super()._pre_config_lock(rw_config) 479 c = self.config 480 if not c.get("run_all_suites"): 481 return # configs are valid 482 for category in SUITE_CATEGORIES: 483 specific_suites = c.get("specified_%s_suites" % (category)) 484 if specific_suites: 485 if specific_suites != "all": 486 self.fatal( 487 "Config options are not valid. Please ensure" 488 " that if the '--run-all-suites' flag was enabled," 489 " then do not specify to run only specific suites " 490 "like:\n '--mochitest-suite browser-chrome'" 491 ) 492 493 def query_abs_dirs(self): 494 if self.abs_dirs: 495 return self.abs_dirs 496 abs_dirs = super().query_abs_dirs() 497 498 c = self.config 499 dirs = {} 500 dirs["abs_work_dir"] = abs_dirs["abs_work_dir"] 501 dirs["abs_app_install_dir"] = os.path.join( 502 abs_dirs["abs_work_dir"], "application" 503 ) 504 dirs["abs_test_install_dir"] = os.path.join(abs_dirs["abs_work_dir"], "tests") 505 dirs["abs_test_extensions_dir"] = os.path.join( 506 dirs["abs_test_install_dir"], "extensions" 507 ) 508 dirs["abs_test_bin_dir"] = os.path.join(dirs["abs_test_install_dir"], "bin") 509 dirs["abs_test_bin_plugins_dir"] = os.path.join( 510 dirs["abs_test_bin_dir"], "plugins" 511 ) 512 dirs["abs_test_bin_components_dir"] = os.path.join( 513 dirs["abs_test_bin_dir"], "components" 514 ) 515 dirs["abs_mochitest_dir"] = os.path.join( 516 dirs["abs_test_install_dir"], "mochitest" 517 ) 518 dirs["abs_reftest_dir"] = os.path.join(dirs["abs_test_install_dir"], "reftest") 519 dirs["abs_xpcshell_dir"] = os.path.join( 520 dirs["abs_test_install_dir"], "xpcshell" 521 ) 522 dirs["abs_cppunittest_dir"] = os.path.join( 523 dirs["abs_test_install_dir"], "cppunittest" 524 ) 525 dirs["abs_gtest_dir"] = os.path.join(dirs["abs_test_install_dir"], "gtest") 526 dirs["abs_blob_upload_dir"] = os.path.join( 527 abs_dirs["abs_work_dir"], "blobber_upload_dir" 528 ) 529 dirs["abs_jittest_dir"] = os.path.join( 530 dirs["abs_test_install_dir"], "jit-test", "jit-test" 531 ) 532 533 if os.path.isabs(c["virtualenv_path"]): 534 dirs["abs_virtualenv_dir"] = c["virtualenv_path"] 535 else: 536 dirs["abs_virtualenv_dir"] = os.path.join( 537 abs_dirs["abs_work_dir"], c["virtualenv_path"] 538 ) 539 abs_dirs.update(dirs) 540 self.abs_dirs = abs_dirs 541 542 return self.abs_dirs 543 544 def query_abs_app_dir(self): 545 """We can't set this in advance, because OSX install directories 546 change depending on branding and opt/debug. 547 """ 548 if self.abs_app_dir: 549 return self.abs_app_dir 550 if not self.binary_path: 551 self.fatal("Can't determine abs_app_dir (binary_path not set!)") 552 self.abs_app_dir = os.path.dirname(self.binary_path) 553 return self.abs_app_dir 554 555 def query_abs_res_dir(self): 556 """The directory containing resources like plugins and extensions. On 557 OSX this is Contents/Resources, on all other platforms its the same as 558 the app dir. 559 560 As with the app dir, we can't set this in advance, because OSX install 561 directories change depending on branding and opt/debug. 562 """ 563 if self.abs_res_dir: 564 return self.abs_res_dir 565 566 abs_app_dir = self.query_abs_app_dir() 567 if self._is_darwin(): 568 res_subdir = self.config.get("mac_res_subdir", "Resources") 569 self.abs_res_dir = os.path.join(os.path.dirname(abs_app_dir), res_subdir) 570 else: 571 self.abs_res_dir = abs_app_dir 572 return self.abs_res_dir 573 574 @PreScriptAction("create-virtualenv") 575 def _pre_create_virtualenv(self, action): 576 dirs = self.query_abs_dirs() 577 578 self.register_virtualenv_module(name="mock") 579 self.register_virtualenv_module(name="simplejson") 580 581 requirements_files = [ 582 os.path.join( 583 dirs["abs_test_install_dir"], "config", "marionette_requirements.txt" 584 ) 585 ] 586 587 if self._query_specified_suites("mochitest", "mochitest-media") is not None: 588 # mochitest-media is the only thing that needs this 589 requirements_files.append( 590 os.path.join( 591 dirs["abs_mochitest_dir"], 592 "websocketprocessbridge", 593 "websocketprocessbridge_requirements_3.txt", 594 ) 595 ) 596 597 if ( 598 self._query_specified_suites("mochitest", "mochitest-browser-a11y") 599 is not None 600 and sys.platform == "win32" 601 ): 602 # Only Windows a11y browser tests need this. 603 requirements_files.append( 604 os.path.join( 605 dirs["abs_mochitest_dir"], 606 "browser", 607 "accessible", 608 "tests", 609 "browser", 610 "windows", 611 "a11y_setup_requirements.txt", 612 ) 613 ) 614 615 for requirements_file in requirements_files: 616 self.register_virtualenv_module(requirements=[requirements_file]) 617 618 _python_interp = self.query_exe("python") 619 if "win" in self.platform_name() and os.path.exists(_python_interp): 620 multiprocessing.set_executable(_python_interp) 621 622 def _query_symbols_url(self): 623 """query the full symbols URL based upon binary URL""" 624 # may break with name convention changes but is one less 'input' for script 625 if self.symbols_url: 626 return self.symbols_url 627 628 # Use simple text substitution to determine the symbols_url from the 629 # installer_url. This will not always work: For signed builds, the 630 # installer_url is likely an artifact in a signing task, which may not 631 # have a symbols artifact. It might be better to use the test target 632 # preferentially, like query_prefixed_build_dir_url() does (for future 633 # consideration, if this code proves troublesome). 634 symbols_url = None 635 self.info("finding symbols_url based upon self.installer_url") 636 if self.installer_url: 637 for ext in [".zip", ".dmg", ".tar.bz2", ".tar.xz"]: 638 if ext in self.installer_url: 639 symbols_url = self.installer_url.replace( 640 ext, ".crashreporter-symbols.zip" 641 ) 642 if not symbols_url: 643 self.fatal( 644 "self.installer_url was found but symbols_url could \ 645 not be determined" 646 ) 647 else: 648 self.fatal("self.installer_url was not found in self.config") 649 self.info("setting symbols_url as %s" % (symbols_url)) 650 self.symbols_url = symbols_url 651 return self.symbols_url 652 653 def _get_mozharness_test_paths(self, suite_category, suite): 654 # test_paths is the group name, confirm_paths can be the path+testname 655 # test_paths will always be the group name, unrelated to if confirm_paths is set or not. 656 test_paths = json.loads(os.environ.get("MOZHARNESS_TEST_PATHS", '""')) 657 confirm_paths = json.loads(os.environ.get("MOZHARNESS_CONFIRM_PATHS", '""')) 658 659 if "-coverage" in suite: 660 suite = suite[: suite.index("-coverage")] 661 662 if not test_paths or suite not in test_paths: 663 return None 664 665 suite_test_paths = test_paths[suite] 666 if confirm_paths and suite in confirm_paths and confirm_paths[suite]: 667 suite_test_paths = confirm_paths[suite] 668 669 if suite_category == "reftest": 670 dirs = self.query_abs_dirs() 671 suite_test_paths = [ 672 os.path.join(dirs["abs_reftest_dir"], "tests", p) 673 for p in suite_test_paths 674 ] 675 676 return suite_test_paths 677 678 def _query_abs_base_cmd(self, suite_category, suite): 679 if self.binary_path: 680 c = self.config 681 dirs = self.query_abs_dirs() 682 run_file = c["run_file_names"][suite_category] 683 base_cmd = [self.query_python_path("python"), "-u"] 684 base_cmd.append(os.path.join(dirs["abs_%s_dir" % suite_category], run_file)) 685 abs_app_dir = self.query_abs_app_dir() 686 abs_res_dir = self.query_abs_res_dir() 687 688 raw_log_file, error_summary_file = self.get_indexed_logs( 689 dirs["abs_blob_upload_dir"], suite 690 ) 691 692 str_format_values = { 693 "binary_path": self.binary_path, 694 "symbols_path": self._query_symbols_url(), 695 "abs_work_dir": dirs["abs_work_dir"], 696 "abs_app_dir": abs_app_dir, 697 "abs_res_dir": abs_res_dir, 698 "raw_log_file": raw_log_file, 699 "error_summary_file": error_summary_file, 700 "gtest_dir": os.path.join(dirs["abs_test_install_dir"], "gtest"), 701 } 702 703 if self.mochitest_flavor: 704 str_format_values.update({"mochitest_flavor": self.mochitest_flavor}) 705 706 # TestingMixin._download_and_extract_symbols() will set 707 # self.symbols_path when downloading/extracting. 708 if self.symbols_path: 709 str_format_values["symbols_path"] = self.symbols_path 710 711 if suite_category not in SUITE_NO_E10S: 712 if suite_category in SUITE_DEFAULT_E10S and not c["e10s"]: 713 base_cmd.append("--disable-e10s") 714 elif suite_category not in SUITE_DEFAULT_E10S and c["e10s"]: 715 base_cmd.append("--e10s") 716 if c.get("repeat"): 717 if suite_category in SUITE_REPEATABLE: 718 base_cmd.extend(["--repeat=%s" % c.get("repeat")]) 719 else: 720 self.log( 721 f"--repeat not supported in {suite_category}", 722 level=WARNING, 723 ) 724 725 if suite_category in SUITE_INSTALL_EXTENSIONS and len( 726 c.get("install_extension", []) 727 ): 728 fetches_dir = os.environ.get("MOZ_FETCHES_DIR", '""') 729 base_cmd.extend([ 730 f"--install-extension={os.path.join(fetches_dir, e)}" 731 for e in c["install_extension"] 732 ]) 733 734 # do not add --disable fission if we don't have --disable-e10s 735 if c["disable_fission"] and suite_category not in [ 736 "gtest", 737 "cppunittest", 738 "jittest", 739 ]: 740 base_cmd.append("--disable-fission") 741 742 if c["useHttp3Server"]: 743 base_cmd.append("--use-http3-server") 744 elif c["useHttp2Server"]: 745 base_cmd.append("--use-http2-server") 746 747 if c["restartAfterFailure"]: 748 base_cmd.append("--restart-after-failure") 749 750 # Ignore chunking if we have user specified test paths 751 if not (self.verify_enabled or self.per_test_coverage): 752 test_paths = self._get_mozharness_test_paths(suite_category, suite) 753 if test_paths or c["test_tags"]: 754 if test_paths: 755 base_cmd.extend(test_paths) 756 if c["test_tags"]: 757 # Exclude suites that don't support --tag to prevent 758 # errors caused by passing unknown argument. 759 # Note there's a similar list in chunking.py in 760 # DefaultLoader's get_manifest method. The lists should 761 # be kept in sync. 762 if suite_category not in [ 763 "gtest", 764 "cppunittest", 765 "jittest", 766 "crashtest", 767 "crashtest-qr", 768 "jsreftest", 769 "reftest", 770 "reftest-qr", 771 ]: 772 base_cmd.extend([f"--tag={t}" for t in c["test_tags"]]) 773 else: 774 self.warning( 775 "--tag does not currently work with the " 776 "'{suite_category}' suite." 777 ) 778 elif c.get("total_chunks") and c.get("this_chunk"): 779 base_cmd.extend([ 780 "--total-chunks", 781 c["total_chunks"], 782 "--this-chunk", 783 c["this_chunk"], 784 ]) 785 786 if c.get("timeout_factor"): 787 base_cmd.extend(["--timeout-factor", c["timeout_factor"]]) 788 789 if c["no_random"]: 790 if suite_category == "mochitest": 791 base_cmd.append("--bisect-chunk=default") 792 else: 793 self.warning( 794 "--no-random does not currently work with suites other than " 795 "mochitest." 796 ) 797 798 if c.get("mochitest_flavor", None): 799 base_cmd.append("--flavor={}".format(c["mochitest_flavor"])) 800 801 if c["headless"]: 802 base_cmd.append("--headless") 803 804 if c["filter"]: 805 if suite_category == "reftest": 806 base_cmd.append("--filter={}".format(c["filter"])) 807 else: 808 self.warning( 809 "--filter does not currently work with suites other than " 810 "reftest." 811 ) 812 813 if c["enable_inc_origin_init"]: 814 if suite_category == "gtest": 815 base_cmd.append("--enable-inc-origin-init") 816 else: 817 self.warning( 818 "--enable-inc-origin-init does not currently work with " 819 "suites other than gtest." 820 ) 821 822 if c["filter_set"]: 823 if suite_category == "gtest": 824 base_cmd.append("--filter-set={}".format(c["filter_set"])) 825 else: 826 self.warning( 827 "--filter-set does not currently work with suites other then " 828 "gtest." 829 ) 830 831 if c.get("threads"): 832 base_cmd.extend(["--threads", c["threads"]]) 833 834 if c["variant"]: 835 base_cmd.append("--variant={}".format(c["variant"])) 836 837 if c["enable_xorigin_tests"]: 838 base_cmd.append("--enable-xorigin-tests") 839 840 if suite_category not in ["cppunittest", "gtest", "jittest"]: 841 # Enable stylo threads everywhere we can. Some tests don't 842 # support --setpref, so ignore those. 843 base_cmd.append("--setpref=layout.css.stylo-threads=4") 844 845 if c["extra_prefs"]: 846 base_cmd.extend([f"--setpref={p}" for p in c["extra_prefs"]]) 847 848 if c["a11y_checks"]: 849 base_cmd.append("--enable-a11y-checks") 850 851 if c["run_failures"]: 852 base_cmd.extend(["--run-failures={}".format(c["run_failures"])]) 853 854 if c["timeout_as_pass"]: 855 base_cmd.append("--timeout-as-pass") 856 857 if c["crash_as_pass"]: 858 base_cmd.append("--crash-as-pass") 859 860 if c["conditioned_profile"]: 861 base_cmd.append("--conditioned-profile") 862 863 if suite_category not in c["suite_definitions"]: 864 self.fatal("'%s' not defined in the config!") 865 866 if suite in ( 867 "browser-chrome-coverage", 868 "xpcshell-coverage", 869 "mochitest-devtools-chrome-coverage", 870 "plain-coverage", 871 ): 872 base_cmd.append("--jscov-dir-prefix=%s" % dirs["abs_blob_upload_dir"]) 873 874 options = c["suite_definitions"][suite_category]["options"] 875 if options: 876 for option in options: 877 option = option % str_format_values 878 if not option.endswith("None"): 879 base_cmd.append(option) 880 if self.structured_output( 881 suite_category, self._query_try_flavor(suite_category, suite) 882 ): 883 base_cmd.append("--log-raw=-") 884 return base_cmd 885 else: 886 self.warning( 887 "Suite options for %s could not be determined." 888 "\nIf you meant to have options for this suite, " 889 "please make sure they are specified in your " 890 "config under %s_options" % (suite_category, suite_category) 891 ) 892 893 return base_cmd 894 else: 895 self.fatal( 896 "'binary_path' could not be determined.\n This should " 897 "be like '/path/build/application/firefox/firefox'" 898 "\nIf you are running this script without the 'install' " 899 "action (where binary_path is set), please ensure you are" 900 " either:\n(1) specifying it in the config file under " 901 "binary_path\n(2) specifying it on command line with the" 902 " '--binary-path' flag" 903 ) 904 905 def _query_specified_suites(self, category, sub_category=None): 906 """Checks if the provided suite does indeed exist. 907 908 If at least one suite was given and if it does exist, return the suite 909 as legitimate and line it up for execution. 910 911 Otherwise, do not run any suites and return a fatal error. 912 """ 913 c = self.config 914 all_suites = c.get(f"all_{category}_suites", None) 915 specified_suites = c.get(f"specified_{category}_suites", None) 916 917 # Bug 1603842 - disallow selection of more than 1 suite at at time 918 if specified_suites is None: 919 # Path taken by test-verify 920 return self.query_per_test_category_suites(category, all_suites) 921 if specified_suites and len(specified_suites) > 1: 922 self.fatal( 923 """Selection of multiple suites is not permitted. \ 924 Please select at most 1 test suite.""" 925 ) 926 return 927 928 # Normal path taken by most test suites as only one suite is specified 929 suite = specified_suites[0] 930 if suite not in all_suites: 931 self.fatal("""Selected suite does not exist!""") 932 933 # allow for fine grain suite selection 934 ret_val = all_suites[suite] 935 if sub_category in all_suites: 936 if all_suites[sub_category] != ret_val: 937 return None 938 939 return {suite: ret_val} 940 941 def _query_try_flavor(self, category, suite): 942 flavors = { 943 "mochitest": [ 944 ("plain.*", "mochitest"), 945 ("browser-chrome.*", "browser-chrome"), 946 ("mochitest-browser-a11y.*", "browser-a11y"), 947 ("mochitest-browser-media.*", "browser-media"), 948 ("mochitest-browser-translations.*", "browser-translations"), 949 ("mochitest-devtools-chrome.*", "devtools-chrome"), 950 ("chrome", "chrome"), 951 ], 952 "xpcshell": [("xpcshell", "xpcshell")], 953 "reftest": [("reftest", "reftest"), ("crashtest", "crashtest")], 954 } 955 for suite_pattern, flavor in flavors.get(category, []): 956 if re.compile(suite_pattern).match(suite): 957 return flavor 958 959 def structured_output(self, suite_category, flavor=None): 960 unstructured_flavors = self.config.get("unstructured_flavors") 961 if not unstructured_flavors: 962 return True 963 if suite_category not in unstructured_flavors: 964 return True 965 if not unstructured_flavors.get( 966 suite_category 967 ) or flavor in unstructured_flavors.get(suite_category): 968 return False 969 return True 970 971 def get_test_output_parser( 972 self, suite_category, flavor=None, strict=False, **kwargs 973 ): 974 if not self.structured_output(suite_category, flavor): 975 return DesktopUnittestOutputParser(suite_category=suite_category, **kwargs) 976 self.info("Structured output parser in use for %s." % suite_category) 977 return StructuredOutputParser( 978 suite_category=suite_category, strict=strict, **kwargs 979 ) 980 981 # Actions {{{2 982 983 # clobber defined in BaseScript, deletes mozharness/build if exists 984 # preflight_download_and_extract is in TestingMixin. 985 # create_virtualenv is in VirtualenvMixin. 986 # preflight_install is in TestingMixin. 987 # install is in TestingMixin. 988 989 @PreScriptAction("download-and-extract") 990 def _pre_download_and_extract(self, action): 991 """Abort if --artifact try syntax is used with compiled-code tests""" 992 dir = self.query_abs_dirs()["abs_blob_upload_dir"] 993 self.mkdir_p(dir) 994 995 if not self.try_message_has_flag("artifact"): 996 return 997 self.info("Artifact build requested in try syntax.") 998 rejected = [] 999 compiled_code_suites = [ 1000 "cppunit", 1001 "gtest", 1002 "jittest", 1003 ] 1004 for category in SUITE_CATEGORIES: 1005 suites = self._query_specified_suites(category) or [] 1006 for suite in suites: 1007 if any([suite.startswith(c) for c in compiled_code_suites]): 1008 rejected.append(suite) 1009 break 1010 if rejected: 1011 self.record_status(TBPL_EXCEPTION) 1012 self.fatal( 1013 "There are specified suites that are incompatible with " 1014 "--artifact try syntax flag: {}".format(", ".join(rejected)), 1015 exit_code=self.return_code, 1016 ) 1017 1018 def download_and_extract(self): 1019 """ 1020 download and extract test zip / download installer 1021 optimizes which subfolders to extract from tests archive 1022 """ 1023 c = self.config 1024 1025 extract_dirs = None 1026 1027 if c.get("run_all_suites"): 1028 target_categories = SUITE_CATEGORIES 1029 else: 1030 target_categories = [ 1031 cat 1032 for cat in SUITE_CATEGORIES 1033 if self._query_specified_suites(cat) is not None 1034 ] 1035 super().download_and_extract( 1036 extract_dirs=extract_dirs, suite_categories=target_categories 1037 ) 1038 1039 def unlock_keyring(self): 1040 if os.environ.get("NEED_GNOME_KEYRING") == "true": 1041 self.log("replacing and unlocking gnome-keyring-daemon") 1042 import subprocess 1043 1044 subprocess.run( 1045 [ 1046 "gnome-keyring-daemon", 1047 "-r", 1048 "-d", 1049 "--unlock", 1050 "--components=secrets", 1051 ], 1052 check=True, 1053 input=b"\n", 1054 ) 1055 1056 def start_pulseaudio(self): 1057 command = [] 1058 # Implies that underlying system is Linux. 1059 if os.environ.get("NEED_PULSEAUDIO") == "true": 1060 command.extend([ 1061 "pulseaudio", 1062 "--daemonize", 1063 "--log-level=4", 1064 "--log-time=1", 1065 "-vvvvv", 1066 "--exit-idle-time=-1", 1067 ]) 1068 1069 # Only run the initialization for Debian. 1070 # Ubuntu appears to have an alternate method of starting pulseaudio. 1071 if self._is_debian(): 1072 self._kill_named_proc("pulseaudio") 1073 self.run_command(command) 1074 1075 # All Linux systems need module-null-sink to be loaded, otherwise 1076 # media tests fail. 1077 1078 self.run_command("pactl load-module module-null-sink") 1079 modules = self.get_output_from_command("pactl list modules short") 1080 if not [l for l in modules.splitlines() if "module-x11" in l]: 1081 # gnome-session isn't running, missing logind and other system services 1082 # force the task to retry (return 4) 1083 self.return_code = 4 1084 self.fatal( 1085 "Unable to start PulseAudio and load x11 modules", 1086 exit_code=self.return_code, 1087 ) 1088 1089 def stage_files(self): 1090 for category in SUITE_CATEGORIES: 1091 suites = self._query_specified_suites(category) 1092 stage = getattr(self, f"_stage_{category}", None) 1093 if suites and stage: 1094 stage(suites) 1095 1096 def _stage_files(self, bin_name=None, fail_if_not_exists=True): 1097 dirs = self.query_abs_dirs() 1098 abs_app_dir = self.query_abs_app_dir() 1099 1100 # For mac these directories are in Contents/Resources, on other 1101 # platforms abs_res_dir will point to abs_app_dir. 1102 abs_res_dir = self.query_abs_res_dir() 1103 abs_res_components_dir = os.path.join(abs_res_dir, "components") 1104 abs_res_plugins_dir = os.path.join(abs_res_dir, "plugins") 1105 abs_res_extensions_dir = os.path.join(abs_res_dir, "extensions") 1106 1107 if bin_name: 1108 src = os.path.join(dirs["abs_test_bin_dir"], bin_name) 1109 if os.path.exists(src): 1110 self.info( 1111 "copying %s to %s" % (src, os.path.join(abs_app_dir, bin_name)) 1112 ) 1113 shutil.copy2(src, os.path.join(abs_app_dir, bin_name)) 1114 elif fail_if_not_exists: 1115 raise OSError("File %s not found" % src) 1116 self.copytree( 1117 dirs["abs_test_bin_components_dir"], 1118 abs_res_components_dir, 1119 overwrite="overwrite_if_exists", 1120 ) 1121 self.mkdir_p(abs_res_plugins_dir) 1122 self.copytree( 1123 dirs["abs_test_bin_plugins_dir"], 1124 abs_res_plugins_dir, 1125 overwrite="overwrite_if_exists", 1126 ) 1127 if os.path.isdir(dirs["abs_test_extensions_dir"]): 1128 self.mkdir_p(abs_res_extensions_dir) 1129 self.copytree( 1130 dirs["abs_test_extensions_dir"], 1131 abs_res_extensions_dir, 1132 overwrite="overwrite_if_exists", 1133 ) 1134 1135 def _stage_xpcshell(self, suites): 1136 if "WindowsApps" in self.binary_path: 1137 self.log( 1138 "Skipping stage xpcshell for MSIX tests because we cannot copy files into the installation directory." 1139 ) 1140 return 1141 1142 self._stage_files(self.config["xpcshell_name"]) 1143 if "plugin_container_name" in self.config: 1144 self._stage_files(self.config["plugin_container_name"]) 1145 # http3server isn't built for Windows tests or Linux asan/tsan 1146 # builds. Only stage if the `http3server_name` config is set and if 1147 # the file actually exists. 1148 if self.config.get("http3server_name"): 1149 self._stage_files(self.config["http3server_name"], fail_if_not_exists=False) 1150 1151 def _stage_cppunittest(self, suites): 1152 abs_res_dir = self.query_abs_res_dir() 1153 dirs = self.query_abs_dirs() 1154 abs_cppunittest_dir = dirs["abs_cppunittest_dir"] 1155 1156 # move manifest and js fils to resources dir, where tests expect them 1157 files = glob.glob(os.path.join(abs_cppunittest_dir, "*.js")) 1158 files.extend(glob.glob(os.path.join(abs_cppunittest_dir, "*.manifest"))) 1159 for f in files: 1160 self.move(f, abs_res_dir) 1161 1162 def _stage_gtest(self, suites): 1163 abs_res_dir = self.query_abs_res_dir() 1164 abs_app_dir = self.query_abs_app_dir() 1165 dirs = self.query_abs_dirs() 1166 abs_gtest_dir = dirs["abs_gtest_dir"] 1167 dirs["abs_test_bin_dir"] = os.path.join(dirs["abs_test_install_dir"], "bin") 1168 1169 files = glob.glob(os.path.join(dirs["abs_test_bin_plugins_dir"], "gmp-*")) 1170 files.append(os.path.join(abs_gtest_dir, "dependentlibs.list.gtest")) 1171 for f in files: 1172 self.move(f, abs_res_dir) 1173 1174 self.copytree( 1175 os.path.join(abs_gtest_dir, "gtest_bin"), os.path.join(abs_app_dir) 1176 ) 1177 1178 def _kill_proc_tree(self, pid): 1179 # Kill a process tree (including grandchildren) with signal.SIGTERM 1180 try: 1181 import signal 1182 1183 import psutil 1184 1185 if pid == os.getpid(): 1186 return (None, None) 1187 1188 parent = psutil.Process(pid) 1189 children = parent.children(recursive=True) 1190 children.append(parent) 1191 1192 for p in children: 1193 p.send_signal(signal.SIGTERM) 1194 1195 # allow for 60 seconds to kill procs 1196 timeout = 60 1197 gone, alive = psutil.wait_procs(children, timeout=timeout) 1198 for p in gone: 1199 self.info("psutil found pid %s dead" % p.pid) 1200 for p in alive: 1201 self.error("failed to kill pid %d after %d" % (p.pid, timeout)) 1202 1203 return (gone, alive) 1204 except Exception as e: 1205 self.error("Exception while trying to kill process tree: %s" % str(e)) 1206 1207 def _kill_named_proc(self, pname): 1208 try: 1209 import psutil 1210 except Exception as e: 1211 self.info( 1212 "Error importing psutil, not killing process %s: %s" % pname, str(e) 1213 ) 1214 return 1215 1216 for proc in psutil.process_iter(): 1217 try: 1218 if proc.name() == pname: 1219 procd = proc.as_dict(attrs=["pid", "ppid", "name", "username"]) 1220 self.info("in _kill_named_proc, killing %s" % procd) 1221 self._kill_proc_tree(proc.pid) 1222 except Exception as e: 1223 self.info("Warning: Unable to kill process %s: %s" % (pname, str(e))) 1224 # may not be able to access process info for all processes 1225 continue 1226 1227 def _remove_xen_clipboard(self): 1228 """ 1229 When running on a Windows 7 VM, we have XenDPriv.exe running which 1230 interferes with the clipboard, lets terminate this process and remove 1231 the binary so it doesn't restart 1232 """ 1233 if not self._is_windows(): 1234 return 1235 1236 self._kill_named_proc("XenDPriv.exe") 1237 xenpath = os.path.join( 1238 os.environ["ProgramFiles"], "Citrix", "XenTools", "XenDPriv.exe" 1239 ) 1240 try: 1241 if os.path.isfile(xenpath): 1242 os.remove(xenpath) 1243 except Exception as e: 1244 self.error("Error: Failure to remove file %s: %s" % (xenpath, str(e))) 1245 1246 def _report_system_info(self): 1247 """ 1248 Create the system-info.log artifact file, containing a variety of 1249 system information that might be useful in diagnosing test failures. 1250 """ 1251 try: 1252 import psutil 1253 1254 path = os.path.join( 1255 self.query_abs_dirs()["abs_blob_upload_dir"], "system-info.log" 1256 ) 1257 with open(path, "w") as f: 1258 f.write("System info collected at %s\n\n" % datetime.now()) 1259 f.write("\nBoot time %s\n" % datetime.fromtimestamp(psutil.boot_time())) 1260 f.write("\nVirtual memory: %s\n" % str(psutil.virtual_memory())) 1261 f.write("\nDisk partitions: %s\n" % str(psutil.disk_partitions())) 1262 f.write("\nDisk usage (/): %s\n" % str(psutil.disk_usage(os.path.sep))) 1263 if not self._is_windows(): 1264 # bug 1417189: frequent errors querying users on Windows 1265 f.write("\nUsers: %s\n" % str(psutil.users())) 1266 f.write("\nNetwork connections:\n") 1267 try: 1268 for nc in psutil.net_connections(): 1269 f.write(" %s\n" % str(nc)) 1270 except Exception as e: 1271 f.write("Exception getting network info: %s\n" % e) 1272 f.write("\nProcesses:\n") 1273 try: 1274 for p in psutil.process_iter(): 1275 ctime = str(datetime.fromtimestamp(p.create_time())) 1276 try: 1277 cmdline = p.cmdline() 1278 except psutil.NoSuchProcess: 1279 cmdline = "" 1280 f.write( 1281 " PID %d %s %s created at %s [%s]\n" 1282 % (p.pid, p.name(), cmdline, ctime, p.status()) 1283 ) 1284 except Exception as e: 1285 f.write("Exception getting process info: %s\n" % e) 1286 except Exception: 1287 # psutil throws a variety of intermittent exceptions 1288 self.info("Unable to complete system-info.log: %s" % sys.exc_info()[0]) 1289 1290 # pull defined in VCSScript. 1291 # preflight_run_tests defined in TestingMixin. 1292 1293 def run_tests(self): 1294 self._remove_xen_clipboard() 1295 self._report_system_info() 1296 self.start_time = datetime.now() 1297 for category in SUITE_CATEGORIES: 1298 if not self._run_category_suites(category): 1299 break 1300 1301 def get_timeout_for_category(self, suite_category): 1302 if suite_category == "cppunittest": 1303 return 2500 1304 return self.config["suite_definitions"][suite_category].get("run_timeout", 1000) 1305 1306 def _run_category_suites(self, suite_category): 1307 """run suite(s) to a specific category""" 1308 dirs = self.query_abs_dirs() 1309 suites = self._query_specified_suites(suite_category) 1310 abs_app_dir = self.query_abs_app_dir() 1311 abs_res_dir = self.query_abs_res_dir() 1312 1313 max_per_test_time = timedelta(minutes=60) 1314 max_per_test_tests = 10 1315 if self.per_test_coverage: 1316 max_per_test_tests = 30 1317 executed_tests = 0 1318 executed_too_many_tests = False 1319 xpcshell_selftests = 0 1320 1321 def do_gnome_video_recording(suite_name, upload_dir, ev): 1322 import os 1323 1324 import dbus 1325 1326 target_file = os.path.join( 1327 upload_dir, 1328 f"video_{suite_name}.webm", 1329 ) 1330 1331 self.info(f"Recording suite {suite_name} to {target_file}") 1332 1333 session_bus = dbus.SessionBus() 1334 session_bus.call_blocking( 1335 "org.gnome.Shell.Screencast", 1336 "/org/gnome/Shell/Screencast", 1337 "org.gnome.Shell.Screencast", 1338 "Screencast", 1339 signature="sa{sv}", 1340 args=[ 1341 target_file, 1342 {"draw-cursor": True, "framerate": 35}, 1343 ], 1344 ) 1345 1346 ev.wait() 1347 1348 session_bus.call_blocking( 1349 "org.gnome.Shell.Screencast", 1350 "/org/gnome/Shell/Screencast", 1351 "org.gnome.Shell.Screencast", 1352 "StopScreencast", 1353 signature="", 1354 args=[], 1355 ) 1356 1357 def do_macos_video_recording(suite_name, upload_dir, ev): 1358 import os 1359 import subprocess 1360 1361 target_file = os.path.join( 1362 upload_dir, 1363 f"video_{suite_name}.mov", 1364 ) 1365 self.info(f"Recording suite {suite_name} to {target_file}") 1366 1367 process = subprocess.Popen( 1368 ["/usr/sbin/screencapture", "-v", "-k", target_file], 1369 stdin=subprocess.PIPE, 1370 ) 1371 ev.wait() 1372 process.stdin.write(b"p") 1373 process.stdin.flush() 1374 process.wait() 1375 1376 if suites: 1377 self.info("#### Running %s suites" % suite_category) 1378 for suite in suites: 1379 if executed_too_many_tests and not self.per_test_coverage: 1380 return False 1381 1382 replace_dict = { 1383 "abs_app_dir": abs_app_dir, 1384 # Mac specific, but points to abs_app_dir on other 1385 # platforms. 1386 "abs_res_dir": abs_res_dir, 1387 "binary_path": self.binary_path, 1388 "install_dir": self.install_dir, 1389 } 1390 options_list = [] 1391 env = {"TEST_SUITE": suite} 1392 if isinstance(suites[suite], dict): 1393 options_list = suites[suite].get("options", []) 1394 if ( 1395 self.verify_enabled 1396 or self.per_test_coverage 1397 or self._get_mozharness_test_paths(suite_category, suite) 1398 ): 1399 # Ignore tests list in modes where we are running specific tests. 1400 tests_list = [] 1401 else: 1402 tests_list = suites[suite].get("tests", []) 1403 env = copy.deepcopy(suites[suite].get("env", {})) 1404 else: 1405 options_list = suites[suite] 1406 tests_list = [] 1407 1408 flavor = ( 1409 self.mochitest_flavor 1410 if self.mochitest_flavor 1411 else self._query_try_flavor(suite_category, suite) 1412 ) 1413 if self.mochitest_flavor: 1414 replace_dict.update({"mochitest_flavor": flavor}) 1415 1416 try_options, try_tests = self.try_args(flavor) 1417 1418 suite_name = suite_category + "-" + suite 1419 tbpl_status, log_level = None, None 1420 error_list = BaseErrorList + HarnessErrorList 1421 parser = self.get_test_output_parser( 1422 suite_category, 1423 flavor=flavor, 1424 config=self.config, 1425 error_list=error_list, 1426 log_obj=self.log_obj, 1427 ) 1428 1429 if suite_category == "reftest": 1430 ref_formatter = load_source( 1431 "ReftestFormatter", 1432 os.path.abspath( 1433 os.path.join(dirs["abs_reftest_dir"], "output.py") 1434 ), 1435 ) 1436 parser.formatter = ref_formatter.ReftestFormatter() 1437 1438 if self.query_minidump_stackwalk(): 1439 env["MINIDUMP_STACKWALK"] = self.minidump_stackwalk_path 1440 if self.config["nodejs_path"]: 1441 env["MOZ_NODE_PATH"] = self.config["nodejs_path"] 1442 env["MOZ_UPLOAD_DIR"] = self.query_abs_dirs()["abs_blob_upload_dir"] 1443 env["MINIDUMP_SAVE_PATH"] = self.query_abs_dirs()["abs_blob_upload_dir"] 1444 env["RUST_BACKTRACE"] = "full" 1445 if not os.path.isdir(env["MOZ_UPLOAD_DIR"]): 1446 self.mkdir_p(env["MOZ_UPLOAD_DIR"]) 1447 1448 if self.config["allow_software_gl_layers"]: 1449 env["MOZ_LAYERS_ALLOW_SOFTWARE_GL"] = "1" 1450 1451 env = self.query_env(partial_env=env, log_level=INFO) 1452 cmd_timeout = self.get_timeout_for_category(suite_category) 1453 1454 summary = {} 1455 for per_test_args in self.query_args(suite): 1456 # Make sure baseline code coverage tests are never 1457 # skipped and that having them run has no influence 1458 # on the max number of actual tests that are to be run. 1459 is_baseline_test = ( 1460 "baselinecoverage" in per_test_args[-1] 1461 if self.per_test_coverage 1462 else False 1463 ) 1464 if executed_too_many_tests and not is_baseline_test: 1465 continue 1466 1467 if not is_baseline_test: 1468 if (datetime.now() - self.start_time) > max_per_test_time: 1469 # Running tests has run out of time. That is okay! Stop running 1470 # them so that a task timeout is not triggered, and so that 1471 # (partial) results are made available in a timely manner. 1472 self.info( 1473 "TinderboxPrint: Running tests took too long: Not all tests " 1474 "were executed.<br/>" 1475 ) 1476 # Signal per-test time exceeded, to break out of suites and 1477 # suite categories loops also. 1478 return False 1479 if executed_tests >= max_per_test_tests: 1480 # When changesets are merged between trees or many tests are 1481 # otherwise updated at once, there probably is not enough time 1482 # to run all tests, and attempting to do so may cause other 1483 # problems, such as generating too much log output. 1484 self.info( 1485 "TinderboxPrint: Too many modified tests: Not all tests " 1486 "were executed.<br/>" 1487 ) 1488 executed_too_many_tests = True 1489 1490 executed_tests = executed_tests + 1 1491 1492 abs_base_cmd = self._query_abs_base_cmd(suite_category, suite) 1493 cmd = abs_base_cmd[:] 1494 cmd.extend( 1495 self.query_options( 1496 options_list, try_options, str_format_values=replace_dict 1497 ) 1498 ) 1499 cmd.extend( 1500 self.query_tests_args( 1501 tests_list, try_tests, str_format_values=replace_dict 1502 ) 1503 ) 1504 1505 final_cmd = copy.copy(cmd) 1506 final_cmd.extend(per_test_args) 1507 1508 # Run xpcshell self-tests only once per test-verify run or only in chunk 1. 1509 if "--self-test" in final_cmd: 1510 should_remove_selftest = False 1511 1512 # Remove self-test for test-verify runs after the first one 1513 if self.verify_enabled or self.per_test_coverage: 1514 xpcshell_selftests += 1 1515 if xpcshell_selftests > 1: 1516 should_remove_selftest = True 1517 1518 # Remove self-test for chunked runs when not in chunk 1 1519 if ( 1520 self.config.get("this_chunk") 1521 and int(self.config["this_chunk"]) != 1 1522 ): 1523 should_remove_selftest = True 1524 1525 if should_remove_selftest: 1526 final_cmd.remove("--self-test") 1527 1528 final_env = copy.copy(env) 1529 1530 finish_video = threading.Event() 1531 video_recording_thread = None 1532 if os.getenv("MOZ_RECORD_TEST"): 1533 video_recording_target = None 1534 if sys.platform == "linux": 1535 video_recording_target = do_gnome_video_recording 1536 elif sys.platform == "darwin": 1537 video_recording_target = do_macos_video_recording 1538 1539 if video_recording_target: 1540 video_recording_thread = threading.Thread( 1541 target=video_recording_target, 1542 args=( 1543 suite, 1544 env["MOZ_UPLOAD_DIR"], 1545 finish_video, 1546 ), 1547 ) 1548 self.info(f"Starting recording thread {suite}") 1549 video_recording_thread.start() 1550 else: 1551 self.warning( 1552 "Screen recording not implemented for this platform" 1553 ) 1554 1555 if self.per_test_coverage: 1556 self.set_coverage_env(final_env) 1557 1558 return_code = self.run_command( 1559 final_cmd, 1560 cwd=dirs["abs_work_dir"], 1561 output_timeout=cmd_timeout, 1562 output_parser=parser, 1563 env=final_env, 1564 ) 1565 1566 if self.per_test_coverage: 1567 self.add_per_test_coverage_report( 1568 final_env, suite, per_test_args[-1] 1569 ) 1570 1571 # mochitest, reftest, and xpcshell suites do not return 1572 # appropriate return codes. Therefore, we must parse the output 1573 # to determine what the tbpl_status and worst_log_level must 1574 # be. We do this by: 1575 # 1) checking to see if our mozharness script ran into any 1576 # errors itself with 'num_errors' <- OutputParser 1577 # 2) if num_errors is 0 then we look in the subclassed 'parser' 1578 # findings for harness/suite errors <- DesktopUnittestOutputParser 1579 # 3) checking to see if the return code is in success_codes 1580 1581 if video_recording_thread: 1582 self.info(f"Stopping recording thread {suite}") 1583 finish_video.set() 1584 video_recording_thread.join() 1585 self.info(f"Stopped recording thread {suite}") 1586 1587 success_codes = None 1588 tbpl_status, log_level, summary = parser.evaluate_parser( 1589 return_code, success_codes, summary 1590 ) 1591 parser.append_tinderboxprint_line(suite_name) 1592 1593 self.record_status(tbpl_status, level=log_level) 1594 if len(per_test_args) > 0: 1595 self.log_per_test_status( 1596 per_test_args[-1], tbpl_status, log_level 1597 ) 1598 if tbpl_status == TBPL_RETRY: 1599 self.info("Per-test run abandoned due to RETRY status") 1600 return False 1601 else: 1602 # report as INFO instead of log_level to avoid extra Treeherder lines 1603 self.info( 1604 "The %s suite: %s ran with return status: %s" 1605 % (suite_category, suite, tbpl_status), 1606 ) 1607 1608 if executed_too_many_tests: 1609 return False 1610 else: 1611 self.debug("There were no suites to run for %s" % suite_category) 1612 return True 1613 1614 def uninstall(self): 1615 # Technically, we might miss this step if earlier steps fail badly. 1616 # If that becomes a big issue we should consider moving this to 1617 # something that is more likely to execute, such as 1618 # postflight_run_cmd_suites 1619 if "WindowsApps" in self.binary_path: 1620 self.uninstall_app(self.binary_path) 1621 else: 1622 self.log("Skipping uninstall for non-MSIX test") 1623 1624 1625 # main {{{1 1626 if __name__ == "__main__": 1627 desktop_unittest = DesktopUnittest() 1628 desktop_unittest.run_and_exit()