base.py (42961B)
1 # This Source Code Form is subject to the terms of the Mozilla Public 2 # License, v. 2.0. If a copy of the MPL was not distributed with this 3 # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 5 import json 6 import os 7 import random 8 import re 9 import socket 10 import sys 11 import time 12 import traceback 13 import unittest 14 from argparse import ArgumentParser 15 from collections import defaultdict 16 from copy import deepcopy 17 18 import mozdebug 19 import mozinfo 20 import moznetwork 21 import mozprofile 22 import mozversion 23 from manifestparser import TestManifest 24 from manifestparser.filters import tags 25 from marionette_driver.marionette import Marionette 26 from moztest.adapters.unit import StructuredTestResult, StructuredTestRunner 27 from moztest.results import TestResult, TestResultCollection, relevant_line 28 29 from . import serve 30 31 here = os.path.abspath(os.path.dirname(__file__)) 32 33 34 def update_mozinfo(path=None): 35 """Walk up directories to find mozinfo.json and update the info.""" 36 path = path or here 37 dirs = set() 38 while path != os.path.expanduser("~"): 39 if path in dirs: 40 break 41 dirs.add(path) 42 path = os.path.split(path)[0] 43 44 return mozinfo.find_and_update_from_json(*dirs) 45 46 47 class MarionetteTest(TestResult): 48 @property 49 def test_name(self): 50 if self.test_class is not None: 51 return "{}.py {}.{}".format( 52 self.test_class.split(".")[0], self.test_class, self.name 53 ) 54 else: 55 return self.name 56 57 58 class MarionetteTestResult(StructuredTestResult, TestResultCollection): 59 resultClass = MarionetteTest 60 61 def __init__(self, *args, **kwargs): 62 self.marionette = kwargs.pop("marionette") 63 TestResultCollection.__init__(self, "MarionetteTest") 64 self.passed = 0 65 self.testsRun = 0 66 self.result_modifiers = [] # used by mixins to modify the result 67 StructuredTestResult.__init__(self, *args, **kwargs) 68 69 @property 70 def skipped(self): 71 return [t for t in self if t.result == "SKIPPED"] 72 73 @skipped.setter 74 def skipped(self, value): 75 pass 76 77 @property 78 def expectedFailures(self): 79 return [t for t in self if t.result == "KNOWN-FAIL"] 80 81 @expectedFailures.setter 82 def expectedFailures(self, value): 83 pass 84 85 @property 86 def unexpectedSuccesses(self): 87 return [t for t in self if t.result == "UNEXPECTED-PASS"] 88 89 @unexpectedSuccesses.setter 90 def unexpectedSuccesses(self, value): 91 pass 92 93 @property 94 def tests_passed(self): 95 return [t for t in self if t.result == "PASS"] 96 97 @property 98 def errors(self): 99 return [t for t in self if t.result == "ERROR"] 100 101 @errors.setter 102 def errors(self, value): 103 pass 104 105 @property 106 def failures(self): 107 return [t for t in self if t.result == "UNEXPECTED-FAIL"] 108 109 @failures.setter 110 def failures(self, value): 111 pass 112 113 @property 114 def duration(self): 115 if self.stop_time: 116 return self.stop_time - self.start_time 117 else: 118 return 0 119 120 def add_test_result( 121 self, 122 test, 123 result_expected="PASS", 124 result_actual="PASS", 125 output="", 126 context=None, 127 **kwargs, 128 ): 129 def get_class(test): 130 return test.__class__.__module__ + "." + test.__class__.__name__ 131 132 name = str(test).split()[0] 133 test_class = get_class(test) 134 if hasattr(test, "jsFile"): 135 name = os.path.basename(test.jsFile) 136 test_class = None 137 138 t = self.resultClass( 139 name=name, 140 test_class=test_class, 141 time_start=test.start_time, 142 result_expected=result_expected, 143 context=context, 144 **kwargs, 145 ) 146 # call any registered result modifiers 147 for modifier in self.result_modifiers: 148 result_expected, result_actual, output, context = modifier( 149 t, result_expected, result_actual, output, context 150 ) 151 t.finish( 152 result_actual, 153 time_end=time.time() if test.start_time else 0, 154 reason=relevant_line(output), 155 output=output, 156 ) 157 self.append(t) 158 159 def addError(self, test, err): 160 self.add_test_result( 161 test, output=self._exc_info_to_string(err, test), result_actual="ERROR" 162 ) 163 super().addError(test, err) 164 165 def addFailure(self, test, err): 166 self.add_test_result( 167 test, 168 output=self._exc_info_to_string(err, test), 169 result_actual="UNEXPECTED-FAIL", 170 ) 171 super().addFailure(test, err) 172 173 def addSuccess(self, test): 174 self.passed += 1 175 self.add_test_result(test, result_actual="PASS") 176 super().addSuccess(test) 177 178 def addExpectedFailure(self, test, err): 179 """Called when an expected failure/error occured.""" 180 self.add_test_result( 181 test, output=self._exc_info_to_string(err, test), result_actual="KNOWN-FAIL" 182 ) 183 super().addExpectedFailure(test, err) 184 185 def addUnexpectedSuccess(self, test): 186 """Called when a test was expected to fail, but succeed.""" 187 self.add_test_result(test, result_actual="UNEXPECTED-PASS") 188 super().addUnexpectedSuccess(test) 189 190 def addSkip(self, test, reason): 191 self.add_test_result(test, output=reason, result_actual="SKIPPED") 192 super().addSkip(test, reason) 193 194 def getInfo(self, test): 195 return test.test_name 196 197 def getDescription(self, test): 198 doc_first_line = test.shortDescription() 199 if self.descriptions and doc_first_line: 200 return "\n".join((str(test), doc_first_line)) 201 else: 202 desc = str(test) 203 return desc 204 205 def printLogs(self, test): 206 for testcase in test._tests: 207 if hasattr(testcase, "loglines") and testcase.loglines: 208 # Don't dump loglines to the console if they only contain 209 # TEST-START and TEST-END. 210 skip_log = True 211 for line in testcase.loglines: 212 str_line = " ".join(line) 213 if "TEST-END" not in str_line and "TEST-START" not in str_line: 214 skip_log = False 215 break 216 if skip_log: 217 return 218 self.logger.info("START LOG:") 219 for line in testcase.loglines: 220 self.logger.info(" ".join(line).encode("ascii", "replace")) 221 self.logger.info("END LOG:") 222 223 def stopTest(self, *args, **kwargs): 224 unittest.TextTestResult.stopTest(self, *args, **kwargs) 225 if self.marionette.check_for_crash(): 226 # this tells unittest.TestSuite not to continue running tests 227 self.shouldStop = True 228 test = next((a for a in args if isinstance(a, unittest.TestCase)), None) 229 if test: 230 self.addError(test, sys.exc_info()) 231 232 233 class MarionetteTextTestRunner(StructuredTestRunner): 234 resultclass = MarionetteTestResult 235 236 def __init__(self, **kwargs): 237 self.marionette = kwargs.pop("marionette") 238 self.capabilities = kwargs.pop("capabilities") 239 240 StructuredTestRunner.__init__(self, **kwargs) 241 242 def _makeResult(self): 243 return self.resultclass( 244 self.stream, 245 self.descriptions, 246 self.verbosity, 247 marionette=self.marionette, 248 logger=self.logger, 249 result_callbacks=self.result_callbacks, 250 ) 251 252 def run(self, test): 253 result = super().run(test) 254 result.printLogs(test) 255 return result 256 257 258 class BaseMarionetteArguments(ArgumentParser): 259 def __init__(self, **kwargs): 260 ArgumentParser.__init__(self, **kwargs) 261 262 def dir_path(path): 263 path = os.path.abspath(os.path.expanduser(path)) 264 if not os.access(path, os.F_OK): 265 os.makedirs(path) 266 return path 267 268 self.argument_containers = [] 269 self.add_argument( 270 "tests", 271 nargs="*", 272 default=[], 273 help="Tests to run. " 274 "One or more paths to test files (Python or JS), " 275 "manifest files (.toml) or directories. " 276 "When a directory is specified, " 277 "all test files in the directory will be run.", 278 ) 279 self.add_argument( 280 "--binary", 281 help="path to gecko executable to launch before running the test", 282 ) 283 self.add_argument( 284 "--address", help="host:port of running Gecko instance to connect to" 285 ) 286 self.add_argument( 287 "--emulator", 288 action="store_true", 289 help="If no --address is given, then the harness will launch an " 290 "emulator. (See Remote options group.) " 291 "If --address is given, then the harness assumes you are " 292 "running an emulator already, and will launch gecko app " 293 "on that emulator.", 294 ) 295 self.add_argument( 296 "--app", help="application to use. see marionette_driver.geckoinstance" 297 ) 298 self.add_argument( 299 "--app-arg", 300 dest="app_args", 301 action="append", 302 default=[], 303 help="specify a command line argument to be passed onto the application", 304 ) 305 self.add_argument( 306 "--profile", 307 help="profile to use when launching the gecko process. If not passed, " 308 "then a profile will be constructed and used", 309 type=dir_path, 310 ) 311 self.add_argument( 312 "--setpref", 313 action="append", 314 metavar="PREF=VALUE", 315 dest="prefs_args", 316 help="set a browser preference; repeat for multiple preferences.", 317 ) 318 self.add_argument( 319 "--preferences", 320 action="append", 321 dest="prefs_files", 322 help="read preferences from a JSON or TOML file. For TOML, use " 323 "'file.toml:section' to specify a particular section.", 324 ) 325 self.add_argument( 326 "--addon", 327 action="append", 328 dest="addons", 329 help="addon to install; repeat for multiple addons.", 330 ) 331 self.add_argument( 332 "--repeat", type=int, help="number of times to repeat the test(s)" 333 ) 334 self.add_argument( 335 "--run-until-failure", 336 action="store_true", 337 help="Run tests repeatedly and stop on the first time a test fails. " 338 "Default cap is 30 runs, which can be overwritten " 339 "with the --repeat parameter.", 340 ) 341 self.add_argument( 342 "--testvars", 343 action="append", 344 help="path to a json file with any test data required", 345 ) 346 self.add_argument( 347 "--symbols-path", 348 help="absolute path to directory containing breakpad symbols, or the " 349 "url of a zip file containing symbols", 350 ) 351 self.add_argument( 352 "--socket-timeout", 353 type=float, 354 default=Marionette.DEFAULT_SOCKET_TIMEOUT, 355 help="Set the global timeout for marionette socket operations." 356 " Default: %(default)ss.", 357 ) 358 self.add_argument( 359 "--startup-timeout", 360 type=int, 361 default=Marionette.DEFAULT_STARTUP_TIMEOUT, 362 help="the max number of seconds to wait for a Marionette connection " 363 "after launching a binary. Default: %(default)ss.", 364 ) 365 self.add_argument( 366 "--shuffle", 367 action="store_true", 368 default=False, 369 help="run tests in a random order", 370 ) 371 self.add_argument( 372 "--shuffle-seed", 373 type=int, 374 default=random.randint(0, sys.maxsize), 375 help="Use given seed to shuffle tests", 376 ) 377 self.add_argument( 378 "--total-chunks", 379 type=int, 380 help="how many chunks to split the tests up into", 381 ) 382 self.add_argument("--this-chunk", type=int, help="which chunk to run") 383 self.add_argument( 384 "--server-root", 385 help="url to a webserver or path to a document root from which content " 386 "resources are served (default: {}).".format( 387 os.path.join(os.path.dirname(here), "www") 388 ), 389 ) 390 self.add_argument( 391 "--gecko-log", 392 help="Define the path to store log file. If the path is" 393 " a directory, the real log file will be created" 394 " given the format gecko-(timestamp).log. If it is" 395 " a file, if will be used directly. '-' may be passed" 396 " to write to stdout. Default: './gecko.log'", 397 ) 398 self.add_argument( 399 "--logger-name", 400 default="Marionette-based Tests", 401 help="Define the name to associate with the logger used", 402 ) 403 self.add_argument( 404 "--jsdebugger", 405 action="store_true", 406 default=False, 407 help="Enable the jsdebugger for marionette javascript.", 408 ) 409 self.add_argument( 410 "--pydebugger", 411 help="Enable python post-mortem debugger when a test fails." 412 " Pass in the debugger you want to use, eg pdb or ipdb.", 413 ) 414 self.add_argument( 415 "--debugger", 416 default=None, 417 help="Debugger binary to run tests in. Program name or path", 418 ) 419 self.add_argument( 420 "--debugger-args", 421 dest="debugger_args", 422 default=None, 423 help="Arguments to pass to the debugger", 424 ) 425 self.add_argument( 426 "--disable-fission", 427 action="store_true", 428 dest="disable_fission", 429 default=False, 430 help="Disable Fission (site isolation) in Gecko.", 431 ) 432 self.add_argument( 433 "-z", 434 "--headless", 435 action="store_true", 436 dest="headless", 437 default=bool(os.environ.get("MOZ_HEADLESS")), 438 help="Run tests in headless mode.", 439 ) 440 self.add_argument( 441 "--tag", 442 action="append", 443 dest="test_tags", 444 default=None, 445 help="Filter out tests that don't have the given tag. Can be " 446 "used multiple times in which case the test must contain " 447 "at least one of the given tags.", 448 ) 449 self.add_argument( 450 "--workspace", 451 action="store", 452 default=None, 453 help="Path to directory for Marionette output. " 454 "(Default: .) (Default profile dest: TMP)", 455 type=dir_path, 456 ) 457 self.add_argument( 458 "-v", 459 "--verbose", 460 action="count", 461 help="Increase verbosity to include debug messages with -v, " 462 "and trace messages with -vv.", 463 ) 464 self.register_argument_container(RemoteMarionetteArguments()) 465 466 def register_argument_container(self, container): 467 group = self.add_argument_group(container.name) 468 469 for cli, kwargs in container.args: 470 group.add_argument(*cli, **kwargs) 471 472 self.argument_containers.append(container) 473 474 def parse_known_args(self, args=None, namespace=None): 475 args, remainder = ArgumentParser.parse_known_args(self, args, namespace) 476 for container in self.argument_containers: 477 if hasattr(container, "parse_args_handler"): 478 container.parse_args_handler(args) 479 return (args, remainder) 480 481 def _get_preferences(self, prefs_files, prefs_args): 482 """Return user defined profile preferences as a dict.""" 483 # object that will hold the preferences 484 prefs = mozprofile.prefs.Preferences() 485 486 # add preferences files 487 if prefs_files: 488 for prefs_file in prefs_files: 489 prefs.add_file(prefs_file) 490 491 separator = "=" 492 cli_prefs = [] 493 if prefs_args: 494 misformatted = [] 495 for pref in prefs_args: 496 if separator not in pref: 497 misformatted.append(pref) 498 else: 499 cli_prefs.append(pref.split(separator, 1)) 500 if misformatted: 501 self._print_message( 502 "Warning: Ignoring preferences not in key{}value format: {}\n".format( 503 separator, ", ".join(misformatted) 504 ) 505 ) 506 # string preferences 507 prefs.add(cli_prefs, cast=True) 508 509 return dict(prefs()) 510 511 def verify_usage(self, args): 512 if not args.tests: 513 self.error( 514 "You must specify one or more test files, manifests, or directories." 515 ) 516 517 missing_tests = [path for path in args.tests if not os.path.exists(path)] 518 if missing_tests: 519 self.error( 520 "Test file(s) not found: " + " ".join([path for path in missing_tests]) 521 ) 522 523 if not args.address and not args.binary and not args.emulator: 524 self.error("You must specify --binary, or --address, or --emulator") 525 526 if args.repeat is not None and args.repeat < 0: 527 self.error("The value of --repeat has to be equal or greater than 0.") 528 529 if args.total_chunks is not None and args.this_chunk is None: 530 self.error("You must specify which chunk to run.") 531 532 if args.this_chunk is not None and args.total_chunks is None: 533 self.error("You must specify how many chunks to split the tests into.") 534 535 if args.total_chunks is not None: 536 if not 1 < args.total_chunks: 537 self.error("Total chunks must be greater than 1.") 538 if not 1 <= args.this_chunk <= args.total_chunks: 539 self.error(f"Chunk to run must be between 1 and {args.total_chunks}.") 540 541 if args.jsdebugger: 542 args.app_args.append("-jsdebugger") 543 args.socket_timeout = None 544 545 if args.debugger_args and not args.debugger: 546 self.error("--debugger-args requires --debugger") 547 548 if args.debugger: 549 # Valgrind and some debuggers may cause Gecko to start slowly. 550 # Make sure to wait long enough to connect. 551 args.startup_timeout = 900 552 args.socket_timeout = None 553 554 args.prefs = self._get_preferences(args.prefs_files, args.prefs_args) 555 556 for container in self.argument_containers: 557 if hasattr(container, "verify_usage_handler"): 558 container.verify_usage_handler(args) 559 560 return args 561 562 563 class RemoteMarionetteArguments: 564 name = "Remote (Emulator/Device)" 565 args = [ 566 [ 567 ["--emulator-binary"], 568 { 569 "help": "Path to emulator binary. By default mozrunner uses `which emulator`", 570 "dest": "emulator_bin", 571 }, 572 ], 573 [ 574 ["--adb"], 575 { 576 "help": "Path to the adb. By default mozrunner uses `which adb`", 577 "dest": "adb_path", 578 }, 579 ], 580 [ 581 ["--avd"], 582 { 583 "help": ( 584 "Name of an AVD available in your environment." 585 "See mozrunner.FennecEmulatorRunner" 586 ), 587 }, 588 ], 589 [ 590 ["--avd-home"], 591 { 592 "help": "Path to avd parent directory", 593 }, 594 ], 595 [ 596 ["--device"], 597 { 598 "help": ( 599 "Serial ID to connect to as seen in `adb devices`,e.g emulator-5444" 600 ), 601 "dest": "device_serial", 602 }, 603 ], 604 [ 605 ["--package"], 606 { 607 "help": "Name of Android package, e.g. org.mozilla.fennec", 608 "dest": "package_name", 609 }, 610 ], 611 ] 612 613 614 class Fixtures: 615 def where_is(self, uri, on="http"): 616 return serve.where_is(uri, on) 617 618 619 class BaseMarionetteTestRunner: 620 textrunnerclass = MarionetteTextTestRunner 621 driverclass = Marionette 622 623 def __init__( 624 self, 625 address=None, 626 app=None, 627 app_args=None, 628 debugger=None, 629 debugger_args=None, 630 binary=None, 631 profile=None, 632 logger=None, 633 logdir=None, 634 repeat=None, 635 run_until_failure=None, 636 testvars=None, 637 symbols_path=None, 638 shuffle=False, 639 shuffle_seed=random.randint(0, sys.maxsize), 640 this_chunk=1, 641 total_chunks=1, 642 server_root=None, 643 gecko_log=None, 644 result_callbacks=None, 645 prefs=None, 646 test_tags=None, 647 socket_timeout=None, 648 startup_timeout=None, 649 addons=None, 650 workspace=None, 651 verbose=0, 652 emulator=False, 653 headless=False, 654 disable_fission=False, 655 **kwargs, 656 ): 657 self._appName = None 658 self._capabilities = None 659 self._filename_pattern = None 660 self._version_info = {} 661 662 self.fixture_servers = {} 663 self.fixtures = Fixtures() 664 self.extra_kwargs = kwargs 665 self.test_kwargs = deepcopy(kwargs) 666 self.address = address 667 self.app = app 668 self.app_args = app_args or [] 669 self.debugger = debugger 670 self.debugger_args = debugger_args 671 self.bin = binary 672 self.emulator = emulator 673 self.profile = profile 674 self.addons = addons 675 self.logger = logger 676 self.marionette = None 677 self.logdir = logdir 678 self.repeat = repeat or 0 679 self.run_until_failure = run_until_failure or False 680 self.symbols_path = symbols_path 681 self.socket_timeout = socket_timeout 682 self.startup_timeout = startup_timeout 683 self.shuffle = shuffle 684 self.shuffle_seed = shuffle_seed 685 self.server_root = server_root 686 self.this_chunk = this_chunk 687 self.total_chunks = total_chunks 688 self.mixin_run_tests = [] 689 self.manifest_skipped_tests = [] 690 self.tests = [] 691 self.result_callbacks = result_callbacks or [] 692 self.prefs = prefs or {} 693 self.test_tags = test_tags 694 self.workspace = workspace 695 # If no workspace is set, default location for gecko.log is . 696 # and default location for profile is TMP 697 self.workspace_path = workspace or os.getcwd() 698 self.verbose = verbose 699 self.headless = headless 700 701 self.prefs.update({"fission.autostart": not disable_fission}) 702 703 # If no repeat has been set, default to 30 extra runs 704 if self.run_until_failure and repeat is None: 705 self.repeat = 30 706 707 def gather_debug(test, status): 708 # No screenshots and page source for skipped tests 709 if status == "SKIP": 710 return 711 712 rv = {} 713 marionette = test._marionette_weakref() 714 715 # In the event we're gathering debug without starting a session, 716 # skip marionette commands 717 if marionette.session is not None: 718 try: 719 with marionette.using_context(marionette.CONTEXT_CHROME): 720 rv["screenshot"] = marionette.screenshot() 721 with marionette.using_context(marionette.CONTEXT_CONTENT): 722 rv["source"] = marionette.page_source 723 except Exception as exc: 724 self.logger.warning(f"Failed to gather test failure debug: {exc}") 725 return rv 726 727 self.result_callbacks.append(gather_debug) 728 729 # testvars are set up in self.testvars property 730 self._testvars = None 731 self.testvars_paths = testvars 732 733 self.test_handlers = [] 734 735 self.reset_test_stats() 736 737 self.logger.info(f'Using workspace for temporary data: "{self.workspace_path}"') 738 739 if not gecko_log: 740 self.gecko_log = os.path.join(self.workspace_path or "", "gecko.log") 741 else: 742 self.gecko_log = gecko_log 743 744 self.results = [] 745 746 @property 747 def filename_pattern(self): 748 if self._filename_pattern is None: 749 self._filename_pattern = re.compile(r"^test(((_.+?)+?\.((py))))$") 750 751 return self._filename_pattern 752 753 @property 754 def testvars(self): 755 if self._testvars is not None: 756 return self._testvars 757 758 self._testvars = {} 759 760 def update(d, u): 761 """Update a dictionary that may contain nested dictionaries.""" 762 for k, v in u.items(): 763 o = d.get(k, {}) 764 if isinstance(v, dict) and isinstance(o, dict): 765 d[k] = update(d.get(k, {}), v) 766 else: 767 d[k] = v 768 return d 769 770 json_testvars = self._load_testvars() 771 for j in json_testvars: 772 self._testvars = update(self._testvars, j) 773 return self._testvars 774 775 def _load_testvars(self): 776 data = [] 777 if self.testvars_paths is not None: 778 for path in list(self.testvars_paths): 779 path = os.path.abspath(os.path.expanduser(path)) 780 if not os.path.exists(path): 781 raise OSError(f"--testvars file {path} does not exist") 782 try: 783 with open(path) as f: 784 data.append(json.loads(f.read())) 785 except ValueError as e: 786 msg = "JSON file ({0}) is not properly formatted: {1}" 787 raise ValueError( 788 msg.format(os.path.abspath(path), e) 789 ).with_traceback(sys.exc_info()[2]) 790 return data 791 792 @property 793 def capabilities(self): 794 if self._capabilities: 795 return self._capabilities 796 797 self.marionette.start_session() 798 self._capabilities = self.marionette.session_capabilities 799 self.marionette.delete_session() 800 return self._capabilities 801 802 @property 803 def appName(self): 804 if self._appName: 805 return self._appName 806 807 self._appName = self.capabilities.get("browserName") 808 return self._appName 809 810 @property 811 def bin(self): 812 return self._bin 813 814 @bin.setter 815 def bin(self, path): 816 """Set binary and reset parts of runner accordingly. 817 Intended use: to change binary between calls to run_tests 818 """ 819 self._bin = path 820 self.tests = [] 821 self.cleanup() 822 823 @property 824 def version_info(self): 825 if not self._version_info: 826 try: 827 # TODO: Get version_info in Fennec case 828 self._version_info = mozversion.get_version(binary=self.bin) 829 except Exception: 830 self.logger.warning( 831 f"Failed to retrieve version information for {self.bin}" 832 ) 833 return self._version_info 834 835 def reset_test_stats(self): 836 self.passed = 0 837 self.failed = 0 838 self.crashed = 0 839 self.unexpected_successes = 0 840 self.todo = 0 841 self.skipped = 0 842 self.failures = [] 843 844 def _build_kwargs(self): 845 if self.logdir and not os.access(self.logdir, os.F_OK): 846 os.mkdir(self.logdir) 847 848 kwargs = { 849 "socket_timeout": self.socket_timeout, 850 "prefs": self.prefs, 851 "startup_timeout": self.startup_timeout, 852 "verbose": self.verbose, 853 "symbols_path": self.symbols_path, 854 } 855 if self.bin or self.emulator: 856 debugger_info = None 857 if self.debugger: 858 debugger_info = mozdebug.get_debugger_info( 859 self.debugger, self.debugger_args 860 ) 861 kwargs.update({ 862 "host": "127.0.0.1", 863 "port": 2828, 864 "app": self.app, 865 "app_args": self.app_args, 866 "debugger_info": debugger_info, 867 "profile": self.profile, 868 "addons": self.addons, 869 "gecko_log": self.gecko_log, 870 # ensure Marionette class takes care of starting gecko instance 871 "bin": True, 872 }) 873 874 if self.bin: 875 kwargs.update({ 876 "bin": self.bin, 877 }) 878 879 if self.emulator: 880 kwargs.update({ 881 "avd_home": self.extra_kwargs.get("avd_home"), 882 "adb_path": self.extra_kwargs.get("adb_path"), 883 "emulator_binary": self.extra_kwargs.get("emulator_bin"), 884 "avd": self.extra_kwargs.get("avd"), 885 "package_name": self.extra_kwargs.get("package_name"), 886 }) 887 888 if self.address: 889 host, port = self.address.split(":") 890 kwargs.update({ 891 "host": host, 892 "port": int(port), 893 }) 894 if self.emulator: 895 kwargs.update({ 896 "connect_to_running_emulator": True, 897 }) 898 if not self.bin and not self.emulator: 899 try: 900 # Establish a socket connection so we can vertify the data come back 901 connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 902 connection.connect((host, int(port))) 903 connection.close() 904 except Exception as e: 905 exc_cls, _, tb = sys.exc_info() 906 msg = "Connection attempt to {0}:{1} failed with error: {2}" 907 raise exc_cls(msg.format(host, port, e)).with_traceback(tb) 908 if self.workspace: 909 kwargs["workspace"] = self.workspace_path 910 if self.headless: 911 kwargs["headless"] = True 912 913 return kwargs 914 915 def record_crash(self): 916 crash = True 917 try: 918 crash = self.marionette.check_for_crash() 919 self.crashed += int(crash) 920 except Exception: 921 traceback.print_exc() 922 return crash 923 924 def _initialize_test_run(self, tests): 925 assert len(tests) > 0 926 assert len(self.test_handlers) > 0 927 self.reset_test_stats() 928 929 def _add_tests(self, tests): 930 for test in tests: 931 self.add_test(test) 932 933 invalid_tests = [ 934 t["filepath"] 935 for t in self.tests 936 if not self._is_filename_valid(t["filepath"]) 937 ] 938 if invalid_tests: 939 raise Exception( 940 "Test file names must be of the form " 941 "'test_something.py'." 942 " Invalid test names:\n {}".format("\n ".join(invalid_tests)) 943 ) 944 945 def _is_filename_valid(self, filename): 946 filename = os.path.basename(filename) 947 return self.filename_pattern.match(filename) 948 949 def _fix_test_path(self, path): 950 """Normalize a logged test path from the test package.""" 951 test_path_prefixes = [ 952 f"tests{os.path.sep}", 953 ] 954 955 path = os.path.relpath(path) 956 for prefix in test_path_prefixes: 957 if path.startswith(prefix): 958 path = path[len(prefix) :] 959 break 960 path = path.replace("\\", "/") 961 962 return path 963 964 def _log_skipped_tests(self): 965 for test in self.manifest_skipped_tests: 966 rel_path = None 967 if os.path.exists(test["path"]): 968 rel_path = self._fix_test_path(test["path"]) 969 970 self.logger.test_start(rel_path) 971 self.logger.test_end(rel_path, "SKIP", message=test["disabled"]) 972 self.todo += 1 973 974 def run_tests(self, tests): 975 start_time = time.time() 976 self._initialize_test_run(tests) 977 978 if self.marionette is None: 979 self.marionette = self.driverclass(**self._build_kwargs()) 980 self.logger.info("Profile path is %s" % self.marionette.profile_path) 981 982 if len(self.fixture_servers) == 0 or any( 983 not server.is_alive for _, server in self.fixture_servers 984 ): 985 self.logger.info("Starting fixture servers") 986 self.fixture_servers = self.start_fixture_servers() 987 for url in serve.iter_url(self.fixture_servers): 988 self.logger.info("Fixture server listening on %s" % url) 989 990 # backwards compatibility 991 self.marionette.baseurl = serve.where_is("/") 992 993 self._add_tests(tests) 994 995 device_info = None 996 if self.marionette.instance and self.emulator: 997 try: 998 device_info = self.marionette.instance.runner.device.device.get_info() 999 except Exception: 1000 self.logger.warning("Could not get device info", exc_info=True) 1001 1002 tests_by_group = defaultdict(list) 1003 for test in self.tests: 1004 group = self._fix_test_path(test["group"]) 1005 filepath = self._fix_test_path(test["filepath"]) 1006 tests_by_group[group].append(filepath) 1007 1008 self.logger.suite_start( 1009 tests_by_group, 1010 name="marionette-test", 1011 version_info=self.version_info, 1012 device_info=device_info, 1013 ) 1014 1015 if self.shuffle: 1016 self.logger.info("Using shuffle seed: %d" % self.shuffle_seed) 1017 1018 self._log_skipped_tests() 1019 1020 interrupted = None 1021 try: 1022 repeat_index = 0 1023 while repeat_index <= self.repeat: 1024 if repeat_index > 0: 1025 self.logger.info(f"\nREPEAT {repeat_index}\n-------") 1026 self.run_test_sets() 1027 if self.run_until_failure and self.failed > 0: 1028 break 1029 1030 repeat_index += 1 1031 1032 except KeyboardInterrupt: 1033 # in case of KeyboardInterrupt during the test execution 1034 # we want to display current test results. 1035 # so we keep the exception to raise it later. 1036 interrupted = sys.exc_info() 1037 except Exception: 1038 # For any other exception we return immediately and have to 1039 # cleanup running processes 1040 self.cleanup() 1041 raise 1042 1043 try: 1044 self._print_summary(tests) 1045 self.record_crash() 1046 self.elapsedtime = time.time() - start_time 1047 1048 for run_tests in self.mixin_run_tests: 1049 run_tests(tests) 1050 1051 self.logger.suite_end() 1052 except Exception: 1053 # raise only the exception if we were not interrupted 1054 if not interrupted: 1055 raise 1056 finally: 1057 self.cleanup() 1058 1059 # reraise previous interruption now 1060 if interrupted: 1061 raise interrupted[1].with_traceback(interrupted[2]) 1062 1063 def _print_summary(self, tests): 1064 self.logger.info("\nSUMMARY\n-------") 1065 self.logger.info(f"passed: {self.passed}") 1066 if self.unexpected_successes == 0: 1067 self.logger.info(f"failed: {self.failed}") 1068 else: 1069 self.logger.info( 1070 f"failed: {self.failed} (unexpected sucesses: {self.unexpected_successes})" 1071 ) 1072 if self.skipped == 0: 1073 self.logger.info(f"todo: {self.todo}") 1074 else: 1075 self.logger.info(f"todo: {self.todo} (skipped: {self.skipped})") 1076 1077 if self.failed > 0: 1078 self.logger.info("\nFAILED TESTS\n-------") 1079 for failed_test in self.failures: 1080 self.logger.info(f"{failed_test[0]}") 1081 1082 def start_fixture_servers(self): 1083 root = self.server_root or os.path.join(os.path.dirname(here), "www") 1084 if self.appName == "fennec": 1085 return serve.start(root, host=moznetwork.get_ip()) 1086 else: 1087 return serve.start(root) 1088 1089 def add_test(self, test, expected="pass", group="default"): 1090 filepath = os.path.abspath(test) 1091 1092 if os.path.isdir(filepath): 1093 for root, dirs, files in os.walk(filepath): 1094 for filename in files: 1095 if filename.endswith(".toml"): 1096 msg_tmpl = ( 1097 "Ignoring manifest '{0}'; running all tests in '{1}'." 1098 " See --help for details." 1099 ) 1100 relpath = os.path.relpath( 1101 os.path.join(root, filename), filepath 1102 ) 1103 self.logger.warning(msg_tmpl.format(relpath, filepath)) 1104 elif self._is_filename_valid(filename): 1105 test_file = os.path.join(root, filename) 1106 self.add_test(test_file) 1107 return 1108 1109 file_ext = os.path.splitext(os.path.split(filepath)[-1])[1] 1110 1111 if file_ext == ".toml": 1112 group = filepath 1113 1114 manifest = TestManifest() 1115 manifest.read(filepath) 1116 1117 json_path = update_mozinfo(filepath) 1118 mozinfo.update({ 1119 "appname": self.appName, 1120 "manage_instance": self.marionette.instance is not None, 1121 "headless": self.headless, 1122 }) 1123 self.logger.info(f"mozinfo updated from: {json_path}") 1124 self.logger.info(f"mozinfo is: {mozinfo.info}") 1125 1126 filters = [] 1127 if self.test_tags: 1128 filters.append(tags(self.test_tags)) 1129 1130 manifest_tests = manifest.active_tests( 1131 exists=False, disabled=True, filters=filters, **mozinfo.info 1132 ) 1133 if len(manifest_tests) == 0: 1134 self.logger.error( 1135 "No tests to run using specified " 1136 f"combination of filters: {manifest.fmt_filters()}" 1137 ) 1138 1139 target_tests = [] 1140 for manifest_test in manifest_tests: 1141 if manifest_test.get("disabled"): 1142 self.manifest_skipped_tests.append(manifest_test) 1143 else: 1144 target_tests.append(manifest_test) 1145 1146 for i in target_tests: 1147 if not os.path.exists(i["path"]): 1148 raise OSError("test file: {} does not exist".format(i["path"])) 1149 1150 self.add_test(i["path"], i["expected"], group=group) 1151 return 1152 1153 self.tests.append({"filepath": filepath, "expected": expected, "group": group}) 1154 1155 def run_test(self, filepath, expected): 1156 testloader = unittest.TestLoader() 1157 suite = unittest.TestSuite() 1158 self.test_kwargs["expected"] = expected 1159 mod_name = os.path.splitext(os.path.split(filepath)[-1])[0] 1160 for handler in self.test_handlers: 1161 if handler.match(os.path.basename(filepath)): 1162 handler.add_tests_to_suite( 1163 mod_name, 1164 filepath, 1165 suite, 1166 testloader, 1167 self.marionette, 1168 self.fixtures, 1169 self.testvars, 1170 **self.test_kwargs, 1171 ) 1172 break 1173 1174 if suite.countTestCases(): 1175 runner = self.textrunnerclass( 1176 logger=self.logger, 1177 marionette=self.marionette, 1178 capabilities=self.capabilities, 1179 result_callbacks=self.result_callbacks, 1180 ) 1181 1182 results = runner.run(suite) 1183 self.results.append(results) 1184 1185 self.failed += len(results.failures) + len(results.errors) 1186 if hasattr(results, "skipped"): 1187 self.skipped += len(results.skipped) 1188 self.todo += len(results.skipped) 1189 self.passed += results.passed 1190 for failure in results.failures + results.errors: 1191 self.failures.append(( 1192 results.getInfo(failure), 1193 failure.output, 1194 "TEST-UNEXPECTED-FAIL", 1195 )) 1196 if hasattr(results, "unexpectedSuccesses"): 1197 self.failed += len(results.unexpectedSuccesses) 1198 self.unexpected_successes += len(results.unexpectedSuccesses) 1199 for failure in results.unexpectedSuccesses: 1200 self.failures.append(( 1201 results.getInfo(failure), 1202 failure.output, 1203 "TEST-UNEXPECTED-PASS", 1204 )) 1205 if hasattr(results, "expectedFailures"): 1206 self.todo += len(results.expectedFailures) 1207 1208 self.mixin_run_tests = [] 1209 for result in self.results: 1210 result.result_modifiers = [] 1211 1212 def run_test_set(self, tests): 1213 if self.shuffle: 1214 random.seed(self.shuffle_seed) 1215 random.shuffle(tests) 1216 1217 for test in tests: 1218 self.run_test(test["filepath"], test["expected"]) 1219 if self.record_crash(): 1220 break 1221 1222 def run_test_sets(self): 1223 if len(self.tests) < 1: 1224 raise Exception("There are no tests to run.") 1225 elif self.total_chunks is not None and self.total_chunks > len(self.tests): 1226 raise ValueError( 1227 f"Total number of chunks must be between 1 and {len(self.tests)}." 1228 ) 1229 if self.total_chunks is not None and self.total_chunks > 1: 1230 chunks = [[] for i in range(self.total_chunks)] 1231 for i, test in enumerate(self.tests): 1232 target_chunk = i % self.total_chunks 1233 chunks[target_chunk].append(test) 1234 1235 self.logger.info( 1236 f"Running chunk {self.this_chunk} of {self.total_chunks} ({len(chunks[self.this_chunk - 1])} tests selected from a " 1237 f"total of {len(self.tests)})" 1238 ) 1239 self.tests = chunks[self.this_chunk - 1] 1240 1241 self.run_test_set(self.tests) 1242 1243 def cleanup(self): 1244 for proc in serve.iter_proc(self.fixture_servers): 1245 proc.stop() 1246 proc.kill() 1247 self.fixture_servers = {} 1248 1249 if hasattr(self, "marionette") and self.marionette: 1250 if self.marionette.instance is not None: 1251 if self.marionette.instance.runner.is_running(): 1252 # Force a clean shutdown of the application process first if 1253 # it is still running. If that fails, kill the process. 1254 # Therefore a new session needs to be started. 1255 self.marionette.start_session() 1256 self.marionette.quit() 1257 1258 self.marionette.instance.close(clean=True) 1259 self.marionette.instance = None 1260 1261 self.marionette.cleanup() 1262 self.marionette = None 1263 1264 __del__ = cleanup