test_runner.py (58588B)
1 #!/usr/bin/env vpython3 2 # 3 # Copyright 2013 The Chromium Authors 4 # Use of this source code is governed by a BSD-style license that can be 5 # found in the LICENSE file. 6 7 """Runs all types of tests from one unified interface.""" 8 9 from __future__ import absolute_import 10 import argparse 11 import collections 12 import contextlib 13 import io 14 import itertools 15 import logging 16 import os 17 import re 18 import shlex 19 import shutil 20 import signal 21 import sys 22 import tempfile 23 import threading 24 import traceback 25 import unittest 26 27 # Import _strptime before threaded code. datetime.datetime.strptime is 28 # threadsafe except for the initial import of the _strptime module. 29 # See http://crbug.com/724524 and https://bugs.python.org/issue7980. 30 import _strptime # pylint: disable=unused-import 31 32 # pylint: disable=ungrouped-imports 33 from pylib.constants import host_paths 34 35 if host_paths.DEVIL_PATH not in sys.path: 36 sys.path.append(host_paths.DEVIL_PATH) 37 38 from devil import base_error 39 from devil.utils import reraiser_thread 40 from devil.utils import run_tests_helper 41 42 from pylib import constants 43 from pylib.base import base_test_result 44 from pylib.base import environment_factory 45 from pylib.base import output_manager 46 from pylib.base import output_manager_factory 47 from pylib.base import test_instance_factory 48 from pylib.base import test_run_factory 49 from pylib.results import json_results 50 from pylib.results import report_results 51 from pylib.results.presentation import test_results_presentation 52 from pylib.utils import local_utils 53 from pylib.utils import logdog_helper 54 from pylib.utils import logging_utils 55 from pylib.utils import test_filter 56 57 from py_utils import contextlib_ext 58 59 from lib.proto import exception_recorder 60 from lib.proto import measures 61 from lib.results import result_sink 62 63 _DEVIL_STATIC_CONFIG_FILE = os.path.abspath(os.path.join( 64 host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'devil_config.json')) 65 66 _RERUN_FAILED_TESTS_FILE = 'rerun_failed_tests.filter' 67 68 69 def _RealPath(arg): 70 if arg.startswith('//'): 71 arg = os.path.abspath(os.path.join(host_paths.DIR_SOURCE_ROOT, 72 arg[2:].replace('/', os.sep))) 73 return os.path.realpath(arg) 74 75 76 def AddTestLauncherOptions(parser): 77 """Adds arguments mirroring //base/test/launcher. 78 79 Args: 80 parser: The parser to which arguments should be added. 81 Returns: 82 The given parser. 83 """ 84 parser.add_argument( 85 '--test-launcher-retry-limit', 86 '--test_launcher_retry_limit', 87 '--num_retries', '--num-retries', 88 '--isolated-script-test-launcher-retry-limit', 89 dest='num_retries', type=int, default=2, 90 help='Number of retries for a test before ' 91 'giving up (default: %(default)s).') 92 parser.add_argument( 93 '--test-launcher-summary-output', 94 '--json-results-file', 95 dest='json_results_file', type=os.path.realpath, 96 help='If set, will dump results in JSON form to the specified file. ' 97 'Note that this will also trigger saving per-test logcats to ' 98 'logdog.') 99 parser.add_argument( 100 '--test-launcher-shard-index', 101 type=int, default=os.environ.get('GTEST_SHARD_INDEX', 0), 102 help='Index of the external shard to run.') 103 parser.add_argument( 104 '--test-launcher-total-shards', 105 type=int, default=os.environ.get('GTEST_TOTAL_SHARDS', 1), 106 help='Total number of external shards.') 107 108 test_filter.AddFilterOptions(parser) 109 110 return parser 111 112 113 def AddCommandLineOptions(parser): 114 """Adds arguments to support passing command-line flags to the device.""" 115 parser.add_argument( 116 '--device-flags-file', 117 type=os.path.realpath, 118 help='The relative filepath to a file containing ' 119 'command-line flags to set on the device') 120 parser.add_argument( 121 '--use-apk-under-test-flags-file', 122 action='store_true', 123 help='Wether to use the flags file for the apk under test. If set, ' 124 "the filename will be looked up in the APK's PackageInfo.") 125 parser.add_argument('--variations-test-seed-path', 126 type=os.path.relpath, 127 default=None, 128 help='Path to variations seed file.') 129 parser.add_argument('--webview-variations-test-seed-path', 130 type=os.path.relpath, 131 default=None, 132 help='Path to variations seed file for WebView.') 133 134 parser.set_defaults(allow_unknown=True) 135 parser.set_defaults(command_line_flags=None) 136 137 138 def AddTracingOptions(parser): 139 # TODO(shenghuazhang): Move this into AddCommonOptions once it's supported 140 # for all test types. 141 parser.add_argument( 142 '--trace-output', 143 metavar='FILENAME', type=os.path.realpath, 144 help='Path to save test_runner trace json output to.') 145 146 parser.add_argument( 147 '--trace-all', 148 action='store_true', 149 help='Whether to trace all function calls.') 150 151 152 def AddCommonOptions(parser): 153 """Adds all common options to |parser|.""" 154 155 default_build_type = os.environ.get('BUILDTYPE', 'Debug') 156 157 debug_or_release_group = parser.add_mutually_exclusive_group() 158 debug_or_release_group.add_argument( 159 '--debug', 160 action='store_const', const='Debug', dest='build_type', 161 default=default_build_type, 162 help='If set, run test suites under out/Debug. ' 163 'Default is env var BUILDTYPE or Debug.') 164 debug_or_release_group.add_argument( 165 '--release', 166 action='store_const', const='Release', dest='build_type', 167 help='If set, run test suites under out/Release. ' 168 'Default is env var BUILDTYPE or Debug.') 169 170 parser.add_argument( 171 '--break-on-failure', '--break_on_failure', 172 dest='break_on_failure', action='store_true', 173 help='Whether to break on failure.') 174 175 # TODO(jbudorick): Remove this once everything has switched to platform 176 # mode. 177 parser.add_argument( 178 '--enable-platform-mode', 179 action='store_true', 180 help='Run the test scripts in platform mode, which ' 181 'conceptually separates the test runner from the ' 182 '"device" (local or remote, real or emulated) on ' 183 'which the tests are running. [experimental]') 184 185 parser.add_argument( 186 '-e', '--environment', 187 default='local', choices=constants.VALID_ENVIRONMENTS, 188 help='Test environment to run in (default: %(default)s).') 189 190 parser.add_argument( 191 '--local-output', 192 action='store_true', 193 help='Whether to archive test output locally and generate ' 194 'a local results detail page.') 195 196 parser.add_argument('--list-tests', 197 action='store_true', 198 help='List available tests and exit.') 199 200 parser.add_argument('--wrapper-script-args', 201 help='A string of args that were passed to the wrapper ' 202 'script. This should probably not be edited by a ' 203 'user as it is passed by the wrapper itself.') 204 205 class FastLocalDevAction(argparse.Action): 206 def __call__(self, parser, namespace, values, option_string=None): 207 namespace.enable_concurrent_adb = True 208 namespace.enable_device_cache = True 209 namespace.extract_test_list_from_filter = True 210 namespace.local_output = True 211 namespace.num_retries = 0 212 namespace.skip_clear_data = True 213 namespace.use_persistent_shell = True 214 215 parser.add_argument( 216 '--fast-local-dev', 217 type=bool, 218 nargs=0, 219 action=FastLocalDevAction, 220 help='Alias for: --num-retries=0 --enable-device-cache ' 221 '--enable-concurrent-adb --skip-clear-data ' 222 '--extract-test-list-from-filter --use-persistent-shell --local-output') 223 224 # TODO(jbudorick): Remove this once downstream bots have switched to 225 # api.test_results. 226 parser.add_argument( 227 '--flakiness-dashboard-server', 228 dest='flakiness_dashboard_server', 229 help=argparse.SUPPRESS) 230 parser.add_argument( 231 '--gs-results-bucket', 232 help='Google Storage bucket to upload results to.') 233 234 parser.add_argument( 235 '--output-directory', 236 dest='output_directory', type=os.path.realpath, 237 help='Path to the directory in which build files are' 238 ' located (must include build type). This will take' 239 ' precedence over --debug and --release') 240 parser.add_argument( 241 '-v', '--verbose', 242 dest='verbose_count', default=0, action='count', 243 help='Verbose level (multiple times for more)') 244 245 parser.add_argument( 246 '--repeat', '--gtest_repeat', '--gtest-repeat', 247 '--isolated-script-test-repeat', 248 dest='repeat', type=int, default=0, 249 help='Number of times to repeat the specified set of tests.') 250 251 # Not useful for junit tests. 252 parser.add_argument( 253 '--use-persistent-shell', 254 action='store_true', 255 help='Uses a persistent shell connection for the adb connection.') 256 257 parser.add_argument('--disable-test-server', 258 action='store_true', 259 help='Disables SpawnedTestServer which doesn' 260 't work with remote adb. ' 261 'WARNING: Will break tests which require the server.') 262 263 # This is currently only implemented for gtests and instrumentation tests. 264 parser.add_argument( 265 '--gtest_also_run_disabled_tests', '--gtest-also-run-disabled-tests', 266 '--isolated-script-test-also-run-disabled-tests', 267 dest='run_disabled', action='store_true', 268 help='Also run disabled tests if applicable.') 269 270 # These are currently only implemented for gtests. 271 parser.add_argument('--isolated-script-test-output', 272 help='If present, store test results on this path.') 273 parser.add_argument('--isolated-script-test-perf-output', 274 help='If present, store chartjson results on this path.') 275 parser.add_argument('--timeout-scale', 276 type=float, 277 help='Factor by which timeouts should be scaled.') 278 279 AddTestLauncherOptions(parser) 280 281 282 def ProcessCommonOptions(args): 283 """Processes and handles all common options.""" 284 run_tests_helper.SetLogLevel(args.verbose_count, add_handler=False) 285 if args.verbose_count > 0: 286 handler = logging_utils.ColorStreamHandler() 287 else: 288 handler = logging.StreamHandler(sys.stdout) 289 handler.setFormatter(run_tests_helper.CustomFormatter()) 290 logging.getLogger().addHandler(handler) 291 292 constants.SetBuildType(args.build_type) 293 if args.output_directory: 294 constants.SetOutputDirectory(args.output_directory) 295 296 297 def AddDeviceOptions(parser): 298 """Adds device options to |parser|.""" 299 300 parser = parser.add_argument_group('device arguments') 301 302 parser.add_argument( 303 '--adb-path', 304 type=os.path.realpath, 305 help='Specify the absolute path of the adb binary that ' 306 'should be used.') 307 parser.add_argument( 308 '--use-local-devil-tools', 309 action='store_true', 310 help='Use locally built versions of tools used by devil_chromium.') 311 parser.add_argument('--denylist-file', 312 type=os.path.realpath, 313 help='Device denylist file.') 314 parser.add_argument( 315 '-d', '--device', nargs='+', 316 dest='test_devices', 317 help='Target device(s) for the test suite to run on.') 318 parser.add_argument( 319 '--enable-concurrent-adb', 320 action='store_true', 321 help='Run multiple adb commands at the same time, even ' 322 'for the same device.') 323 parser.add_argument( 324 '--enable-device-cache', 325 action='store_true', 326 help='Cache device state to disk between runs') 327 parser.add_argument('--list-data', 328 action='store_true', 329 help='List files pushed to device and exit.') 330 parser.add_argument('--skip-clear-data', 331 action='store_true', 332 help='Do not wipe app data between tests. Use this to ' 333 'speed up local development and never on bots ' 334 '(increases flakiness)') 335 parser.add_argument( 336 '--recover-devices', 337 action='store_true', 338 help='Attempt to recover devices prior to the final retry. Warning: ' 339 'this will cause all devices to reboot.') 340 341 parser.add_argument( 342 '--upload-logcats-file', 343 action='store_true', 344 dest='upload_logcats_file', 345 help='Whether to upload logcat file to logdog.') 346 347 logcat_output_group = parser.add_mutually_exclusive_group() 348 logcat_output_group.add_argument( 349 '--logcat-output-dir', type=os.path.realpath, 350 help='If set, will dump logcats recorded during test run to directory. ' 351 'File names will be the device ids with timestamps.') 352 logcat_output_group.add_argument( 353 '--logcat-output-file', type=os.path.realpath, 354 help='If set, will merge logcats recorded during test run and dump them ' 355 'to the specified file.') 356 357 parser.add_argument( 358 '--force-main-user', 359 action='store_true', 360 help='Force the applicable adb commands to run with "--user" param set ' 361 'to the id of the main user on device. Only use when the main user is a ' 362 'secondary user, e.g. Android Automotive OS.') 363 364 parser.add_argument( 365 '--connect-over-network', 366 action='store_true', 367 help='Connect to devices over the network using "adb connect". Must ' 368 'specify device hostnames/IPs via "-d"/"--device" args.') 369 370 371 def AddEmulatorOptions(parser): 372 """Adds emulator-specific options to |parser|.""" 373 parser = parser.add_argument_group('emulator arguments') 374 375 parser.add_argument( 376 '--avd-config', 377 type=os.path.realpath, 378 help='Path to the avd config textpb. ' 379 '(See //tools/android/avd/proto/ for message definition' 380 ' and existing textpb files.)') 381 parser.add_argument( 382 '--emulator-count', 383 type=int, 384 default=1, 385 help='Number of emulators to use.') 386 parser.add_argument( 387 '--emulator-window', 388 action='store_true', 389 default=False, 390 help='Enable graphical window display on the emulator.') 391 parser.add_argument( 392 '--emulator-debug-tags', 393 help='Comma-separated list of debug tags. This can be used to enable or ' 394 'disable debug messages from specific parts of the emulator, e.g. ' 395 'init,snapshot. See "emulator -help-debug-tags" ' 396 'for a full list of tags.') 397 parser.add_argument( 398 '--emulator-enable-network', 399 action='store_true', 400 help='Enable the network (WiFi and mobile data) on the emulator.') 401 402 403 def AddGTestOptions(parser): 404 """Adds gtest options to |parser|.""" 405 406 parser = parser.add_argument_group('gtest arguments') 407 408 parser.add_argument( 409 '--additional-apk', 410 action='append', dest='additional_apks', default=[], 411 type=_RealPath, 412 help='Additional apk that must be installed on ' 413 'the device when the tests are run.') 414 parser.add_argument( 415 '--app-data-file', 416 action='append', dest='app_data_files', 417 help='A file path relative to the app data directory ' 418 'that should be saved to the host.') 419 parser.add_argument( 420 '--app-data-file-dir', 421 help='Host directory to which app data files will be' 422 ' saved. Used with --app-data-file.') 423 parser.add_argument( 424 '--enable-xml-result-parsing', 425 action='store_true', help=argparse.SUPPRESS) 426 parser.add_argument( 427 '--executable-dist-dir', 428 type=os.path.realpath, 429 help="Path to executable's dist directory for native" 430 " (non-apk) tests.") 431 parser.add_argument( 432 '--deploy-mock-openxr-runtime', 433 action='store_true', 434 help=('Prepares the device by deploying a mock OpenXR runtime to use for ' 435 'testing. Note that this *may* override a runtime specialization ' 436 'already present on the device.')) 437 parser.add_argument('--extract-test-list-from-filter', 438 action='store_true', 439 help='When a test filter is specified, and the list of ' 440 'tests can be determined from it, skip querying the ' 441 'device for the list of all tests. Speeds up local ' 442 'development, but is not safe to use on bots (' 443 'http://crbug.com/549214') 444 parser.add_argument( 445 '--gs-test-artifacts-bucket', 446 help=('If present, test artifacts will be uploaded to this Google ' 447 'Storage bucket.')) 448 parser.add_argument( 449 '--render-test-output-dir', 450 help='If present, store rendering artifacts in this path.') 451 parser.add_argument( 452 '--runtime-deps-path', 453 dest='runtime_deps_path', type=os.path.realpath, 454 help='Runtime data dependency file from GN.') 455 parser.add_argument( 456 '-t', '--shard-timeout', 457 dest='shard_timeout', type=int, default=120, 458 help='Timeout to wait for each test (default: %(default)s).') 459 parser.add_argument( 460 '--store-tombstones', 461 dest='store_tombstones', action='store_true', 462 help='Add tombstones in results if crash.') 463 parser.add_argument( 464 '-s', '--suite', 465 dest='suite_name', nargs='+', metavar='SUITE_NAME', required=True, 466 help='Executable name of the test suite to run.') 467 parser.add_argument( 468 '--test-apk-incremental-install-json', 469 type=os.path.realpath, 470 help='Path to install json for the test apk.') 471 parser.add_argument('--test-launcher-batch-limit', 472 dest='test_launcher_batch_limit', 473 type=int, 474 help='The max number of tests to run in a shard. ' 475 'Ignores non-positive ints and those greater than ' 476 'MAX_SHARDS') 477 parser.add_argument( 478 '-w', '--wait-for-java-debugger', action='store_true', 479 help='Wait for java debugger to attach before running any application ' 480 'code. Also disables test timeouts and sets retries=0.') 481 parser.add_argument( 482 '--coverage-dir', 483 type=os.path.realpath, 484 help='Directory in which to place all generated coverage files.') 485 parser.add_argument( 486 '--use-existing-test-data', 487 action='store_true', 488 help='Do not push new files to the device, instead using existing APK ' 489 'and test data. Only use when running the same test for multiple ' 490 'iterations.') 491 # This is currently only implemented for gtests tests. 492 parser.add_argument('--gtest_also_run_pre_tests', 493 '--gtest-also-run-pre-tests', 494 dest='run_pre_tests', 495 action='store_true', 496 help='Also run PRE_ tests if applicable.') 497 498 499 def AddInstrumentationTestOptions(parser): 500 """Adds Instrumentation test options to |parser|.""" 501 502 parser = parser.add_argument_group('instrumentation arguments') 503 504 parser.add_argument('--additional-apex', 505 action='append', 506 dest='additional_apexs', 507 default=[], 508 type=_RealPath, 509 help='Additional apex that must be installed on ' 510 'the device when the tests are run') 511 parser.add_argument( 512 '--additional-apk', 513 action='append', dest='additional_apks', default=[], 514 type=_RealPath, 515 help='Additional apk that must be installed on ' 516 'the device when the tests are run') 517 parser.add_argument('--forced-queryable-additional-apk', 518 action='append', 519 dest='forced_queryable_additional_apks', 520 default=[], 521 type=_RealPath, 522 help='Configures an additional-apk to be forced ' 523 'to be queryable by other APKs.') 524 parser.add_argument('--instant-additional-apk', 525 action='append', 526 dest='instant_additional_apks', 527 default=[], 528 type=_RealPath, 529 help='Configures an additional-apk to be an instant APK') 530 parser.add_argument( 531 '-A', '--annotation', 532 dest='annotation_str', 533 help='Comma-separated list of annotations. Run only tests with any of ' 534 'the given annotations. An annotation can be either a key or a ' 535 'key-values pair. A test that has no annotation is considered ' 536 '"SmallTest".') 537 # TODO(jbudorick): Remove support for name-style APK specification once 538 # bots are no longer doing it. 539 parser.add_argument( 540 '--apk-under-test', 541 help='Path or name of the apk under test.') 542 parser.add_argument( 543 '--store-data-dependencies-in-temp', 544 action='store_true', 545 help='Store data dependencies in /data/local/tmp/chromium_tests_root') 546 parser.add_argument( 547 '--module', 548 action='append', 549 dest='modules', 550 help='Specify Android App Bundle modules to install in addition to the ' 551 'base module.') 552 parser.add_argument( 553 '--fake-module', 554 action='append', 555 dest='fake_modules', 556 help='Specify Android App Bundle modules to fake install in addition to ' 557 'the real modules.') 558 parser.add_argument( 559 '--additional-locale', 560 action='append', 561 dest='additional_locales', 562 help='Specify locales in addition to the device locale to install splits ' 563 'for when --apk-under-test is an Android App Bundle.') 564 parser.add_argument( 565 '--coverage-dir', 566 type=os.path.realpath, 567 help='Directory in which to place all generated ' 568 'Jacoco coverage files.') 569 parser.add_argument('--disable-dalvik-asserts', 570 dest='set_asserts', 571 action='store_false', 572 default=True, 573 help='Removes the dalvik.vm.enableassertions property') 574 parser.add_argument( 575 '--proguard-mapping-path', 576 help='.mapping file to use to Deobfuscate java stack traces in test ' 577 'output and logcat.') 578 parser.add_argument( 579 '-E', '--exclude-annotation', 580 dest='exclude_annotation_str', 581 help='Comma-separated list of annotations. Exclude tests with these ' 582 'annotations.') 583 parser.add_argument( 584 '--enable-breakpad-dump', 585 action='store_true', 586 help='Stores any breakpad dumps till the end of the test.') 587 parser.add_argument( 588 '--replace-system-package', 589 type=_RealPath, 590 default=None, 591 help='Use this apk to temporarily replace a system package with the same ' 592 'package name.') 593 parser.add_argument( 594 '--remove-system-package', 595 default=[], 596 action='append', 597 dest='system_packages_to_remove', 598 help='Specifies a system package to remove before testing if it exists ' 599 'on the system. WARNING: THIS WILL PERMANENTLY REMOVE THE SYSTEM APP. ' 600 'Unlike --replace-system-package, the app will not be restored after ' 601 'tests are finished.') 602 parser.add_argument( 603 '--use-voice-interaction-service', 604 help='This can be used to update the voice interaction service to be a ' 605 'custom one. This is useful for mocking assistants. eg: ' 606 'android.assist.service/.MainInteractionService') 607 parser.add_argument( 608 '--use-webview-provider', 609 type=_RealPath, default=None, 610 help='Use this apk as the webview provider during test. ' 611 'The original provider will be restored if possible, ' 612 "on Nougat the provider can't be determined and so " 613 'the system will choose the default provider.') 614 parser.add_argument( 615 '--webview-command-line-arg', 616 default=[], 617 action='append', 618 help="Specifies command line arguments to add to WebView's flag file") 619 parser.add_argument( 620 '--webview-process-mode', 621 choices=['single', 'multiple'], 622 help='Run WebView instrumentation tests only in the specified process ' 623 'mode. If not set, both single and multiple process modes will execute.') 624 parser.add_argument( 625 '--run-setup-command', 626 default=[], 627 action='append', 628 dest='run_setup_commands', 629 help='This can be used to run a custom shell command on the device as a ' 630 'setup step') 631 parser.add_argument( 632 '--run-teardown-command', 633 default=[], 634 action='append', 635 dest='run_teardown_commands', 636 help='This can be used to run a custom shell command on the device as a ' 637 'teardown step') 638 parser.add_argument( 639 '--runtime-deps-path', 640 dest='runtime_deps_path', type=os.path.realpath, 641 help='Runtime data dependency file from GN.') 642 parser.add_argument( 643 '--screenshot-directory', 644 dest='screenshot_dir', type=os.path.realpath, 645 help='Capture screenshots of test failures') 646 parser.add_argument( 647 '--store-tombstones', 648 action='store_true', dest='store_tombstones', 649 help='Add tombstones in results if crash.') 650 parser.add_argument( 651 '--strict-mode', 652 dest='strict_mode', default='testing', 653 help='StrictMode command-line flag set on the device, ' 654 'death/testing to kill the process, off to stop ' 655 'checking, flash to flash only. (default: %(default)s)') 656 parser.add_argument( 657 '--test-apk', 658 required=True, 659 help='Path or name of the apk containing the tests.') 660 parser.add_argument( 661 '--test-apk-as-instant', 662 action='store_true', 663 help='Install the test apk as an instant app. ' 664 'Instant apps run in a more restrictive execution environment.') 665 parser.add_argument( 666 '--test-launcher-batch-limit', 667 dest='test_launcher_batch_limit', 668 type=int, 669 help=('Not actually used for instrumentation tests, but can be used as ' 670 'a proxy for determining if the current run is a retry without ' 671 'patch.')) 672 parser.add_argument( 673 '--is-unit-test', 674 action='store_true', 675 help=('Specify the test suite as composed of unit tests, blocking ' 676 'certain operations.')) 677 parser.add_argument( 678 '-w', '--wait-for-java-debugger', action='store_true', 679 help='Wait for java debugger to attach before running any application ' 680 'code. Also disables test timeouts and sets retries=0.') 681 parser.add_argument( 682 '--webview-rebaseline-mode', 683 action='store_true', 684 help=('Run WebView tests in rebaselining mode, updating on-device ' 685 'expectation files.')) 686 687 # WPR record mode. 688 parser.add_argument('--wpr-enable-record', 689 action='store_true', 690 default=False, 691 help='If true, WPR server runs in record mode.' 692 'otherwise, runs in replay mode.') 693 694 parser.add_argument( 695 '--approve-app-links', 696 help='Force enables Digital Asset Link verification for the provided ' 697 'package and domain, example usage: --approve-app-links ' 698 'com.android.package:www.example.com') 699 700 # These arguments are suppressed from the help text because they should 701 # only ever be specified by an intermediate script. 702 parser.add_argument( 703 '--apk-under-test-incremental-install-json', 704 help=argparse.SUPPRESS) 705 parser.add_argument( 706 '--test-apk-incremental-install-json', 707 type=os.path.realpath, 708 help=argparse.SUPPRESS) 709 710 711 def AddSkiaGoldTestOptions(parser): 712 """Adds Skia Gold test options to |parser|.""" 713 parser = parser.add_argument_group("Skia Gold arguments") 714 parser.add_argument( 715 '--code-review-system', 716 help='A non-default code review system to pass to pass to Gold, if ' 717 'applicable') 718 parser.add_argument( 719 '--continuous-integration-system', 720 help='A non-default continuous integration system to pass to Gold, if ' 721 'applicable') 722 parser.add_argument( 723 '--git-revision', help='The git commit currently being tested.') 724 parser.add_argument( 725 '--gerrit-issue', 726 help='The Gerrit issue this test is being run on, if applicable.') 727 parser.add_argument( 728 '--gerrit-patchset', 729 help='The Gerrit patchset this test is being run on, if applicable.') 730 parser.add_argument( 731 '--buildbucket-id', 732 help='The Buildbucket build ID that this test was triggered from, if ' 733 'applicable.') 734 local_group = parser.add_mutually_exclusive_group() 735 local_group.add_argument( 736 '--local-pixel-tests', 737 action='store_true', 738 default=None, 739 help='Specifies to run the Skia Gold pixel tests in local mode. When run ' 740 'in local mode, uploading to Gold is disabled and traditional ' 741 'generated/golden/diff images are output instead of triage links. ' 742 'Running in local mode also implies --no-luci-auth. If both this ' 743 'and --no-local-pixel-tests are left unset, the test harness will ' 744 'attempt to detect whether it is running on a workstation or not ' 745 'and set the options accordingly.') 746 local_group.add_argument( 747 '--no-local-pixel-tests', 748 action='store_false', 749 dest='local_pixel_tests', 750 help='Specifies to run the Skia Gold pixel tests in non-local (bot) ' 751 'mode. When run in this mode, data is actually uploaded to Gold and ' 752 'triage links are generated. If both this and --local-pixel-tests ' 753 'are left unset, the test harness will attempt to detect whether ' 754 'it is running on a workstation or not and set the options ' 755 'accordingly.') 756 parser.add_argument( 757 '--no-luci-auth', 758 action='store_true', 759 default=False, 760 help="Don't use the serve account provided by LUCI for authentication " 761 'with Skia Gold, instead relying on gsutil to be pre-authenticated. ' 762 'Meant for testing locally instead of on the bots.') 763 parser.add_argument( 764 '--bypass-skia-gold-functionality', 765 action='store_true', 766 default=False, 767 help='Bypass all interaction with Skia Gold, effectively disabling the ' 768 'image comparison portion of any tests that use Gold. Only meant to be ' 769 'used in case a Gold outage occurs and cannot be fixed quickly.') 770 771 772 def AddHostsideTestOptions(parser): 773 """Adds hostside test options to |parser|.""" 774 775 parser = parser.add_argument_group('hostside arguments') 776 777 parser.add_argument( 778 '-s', '--test-suite', required=True, 779 help='Hostside test suite to run.') 780 parser.add_argument( 781 '--test-apk-as-instant', 782 action='store_true', 783 help='Install the test apk as an instant app. ' 784 'Instant apps run in a more restrictive execution environment.') 785 parser.add_argument( 786 '--additional-apk', 787 action='append', 788 dest='additional_apks', 789 default=[], 790 type=_RealPath, 791 help='Additional apk that must be installed on ' 792 'the device when the tests are run') 793 parser.add_argument( 794 '--use-webview-provider', 795 type=_RealPath, default=None, 796 help='Use this apk as the webview provider during test. ' 797 'The original provider will be restored if possible, ' 798 "on Nougat the provider can't be determined and so " 799 'the system will choose the default provider.') 800 parser.add_argument( 801 '--tradefed-executable', 802 type=_RealPath, default=None, 803 help='Location of the cts-tradefed script') 804 parser.add_argument( 805 '--tradefed-aapt-path', 806 type=_RealPath, default=None, 807 help='Location of the directory containing aapt binary') 808 parser.add_argument( 809 '--tradefed-adb-path', 810 type=_RealPath, default=None, 811 help='Location of the directory containing adb binary') 812 # The below arguments are not used, but allow us to pass the same arguments 813 # from run_cts.py regardless of type of run (instrumentation/hostside) 814 parser.add_argument( 815 '--apk-under-test', 816 help=argparse.SUPPRESS) 817 parser.add_argument( 818 '--use-apk-under-test-flags-file', 819 action='store_true', 820 help=argparse.SUPPRESS) 821 parser.add_argument( 822 '-E', '--exclude-annotation', 823 dest='exclude_annotation_str', 824 help=argparse.SUPPRESS) 825 826 827 def AddJUnitTestOptions(parser): 828 """Adds junit test options to |parser|.""" 829 830 parser = parser.add_argument_group('junit arguments') 831 832 parser.add_argument( 833 '--coverage-on-the-fly', 834 action='store_true', 835 help='Generate coverage data by Jacoco on-the-fly instrumentation.') 836 parser.add_argument( 837 '--coverage-dir', type=os.path.realpath, 838 help='Directory to store coverage info.') 839 parser.add_argument( 840 '--package-filter', 841 help='Filters tests by package.') 842 parser.add_argument( 843 '--runner-filter', 844 help='Filters tests by runner class. Must be fully qualified.') 845 parser.add_argument('--json-config', 846 help='Runs only tests listed in this config.') 847 parser.add_argument( 848 '--shards', 849 type=int, 850 help='Number of shards to run junit tests in parallel on. Only 1 shard ' 851 'is supported when test-filter is specified. Values less than 1 will ' 852 'use auto select.') 853 parser.add_argument('--shard-filter', 854 help='Comma separated list of shard indices to run.') 855 parser.add_argument( 856 '-s', '--test-suite', required=True, 857 help='JUnit test suite to run.') 858 debug_group = parser.add_mutually_exclusive_group() 859 debug_group.add_argument( 860 '-w', '--wait-for-java-debugger', action='store_const', const='8701', 861 dest='debug_socket', help='Alias for --debug-socket=8701') 862 debug_group.add_argument( 863 '--debug-socket', 864 help='Wait for java debugger to attach at specified socket address ' 865 'before running any application code. Also disables test timeouts ' 866 'and sets retries=0.') 867 868 # These arguments are for Android Robolectric tests. 869 parser.add_argument( 870 '--robolectric-runtime-deps-dir', 871 help='Path to runtime deps for Robolectric.') 872 parser.add_argument('--native-libs-dir', 873 help='Path to search for native libraries.') 874 parser.add_argument( 875 '--resource-apk', 876 required=True, 877 help='Path to .ap_ containing binary resources for Robolectric.') 878 parser.add_argument('--shadows-allowlist', 879 help='Path to Allowlist file for Shadows.') 880 881 882 def AddLinkerTestOptions(parser): 883 884 parser = parser.add_argument_group('linker arguments') 885 886 parser.add_argument( 887 '--test-apk', 888 type=os.path.realpath, 889 help='Path to the linker test APK.') 890 891 892 def AddMonkeyTestOptions(parser): 893 """Adds monkey test options to |parser|.""" 894 895 parser = parser.add_argument_group('monkey arguments') 896 897 parser.add_argument('--browser', 898 required=True, 899 choices=list(constants.PACKAGE_INFO.keys()), 900 metavar='BROWSER', 901 help='Browser under test.') 902 parser.add_argument( 903 '--category', 904 nargs='*', dest='categories', default=[], 905 help='A list of allowed categories. Monkey will only visit activities ' 906 'that are listed with one of the specified categories.') 907 parser.add_argument( 908 '--event-count', 909 default=10000, type=int, 910 help='Number of events to generate (default: %(default)s).') 911 parser.add_argument( 912 '--seed', 913 type=int, 914 help='Seed value for pseudo-random generator. Same seed value generates ' 915 'the same sequence of events. Seed is randomized by default.') 916 parser.add_argument( 917 '--throttle', 918 default=100, type=int, 919 help='Delay between events (ms) (default: %(default)s). ') 920 921 922 def AddPythonTestOptions(parser): 923 924 parser = parser.add_argument_group('python arguments') 925 926 parser.add_argument('-s', 927 '--suite', 928 dest='suite_name', 929 metavar='SUITE_NAME', 930 choices=list(constants.PYTHON_UNIT_TEST_SUITES.keys()), 931 help='Name of the test suite to run.') 932 933 934 def _CreateClassToFileNameDict(test_apk): 935 """Creates a dict mapping classes to file names from size-info apk.""" 936 constants.CheckOutputDirectory() 937 test_apk_size_info = os.path.join(constants.GetOutDirectory(), 'size-info', 938 os.path.basename(test_apk) + '.jar.info') 939 940 class_to_file_dict = {} 941 # Some tests such as webview_cts_tests use a separately downloaded apk to run 942 # tests. This means the apk may not have been built by the system and hence 943 # no size info file exists. 944 if not os.path.exists(test_apk_size_info): 945 logging.debug('Apk size file not found. %s', test_apk_size_info) 946 return class_to_file_dict 947 948 with open(test_apk_size_info, 'r') as f: 949 for line in f: 950 file_class, file_name = line.rstrip().split(',', 1) 951 # Only want files that are not prebuilt. 952 if file_name.startswith('../../'): 953 class_to_file_dict[file_class] = str( 954 file_name.replace('../../', '//', 1)) 955 956 return class_to_file_dict 957 958 959 def _RunPythonTests(args): 960 """Subcommand of RunTestsCommand which runs python unit tests.""" 961 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name] 962 suite_path = suite_vars['path'] 963 suite_test_modules = suite_vars['test_modules'] 964 965 sys.path = [suite_path] + sys.path 966 try: 967 suite = unittest.TestSuite() 968 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m) 969 for m in suite_test_modules) 970 runner = unittest.TextTestRunner(verbosity=1+args.verbose_count) 971 return 0 if runner.run(suite).wasSuccessful() else 1 972 finally: 973 sys.path = sys.path[1:] 974 975 976 _DEFAULT_PLATFORM_MODE_TESTS = [ 977 'gtest', 'hostside', 'instrumentation', 'junit', 'linker', 'monkey' 978 ] 979 980 981 def RunTestsCommand(args, result_sink_client=None): 982 """Checks test type and dispatches to the appropriate function. 983 984 Args: 985 args: argparse.Namespace object. 986 result_sink_client: A ResultSinkClient object. 987 988 Returns: 989 Integer indicated exit code. 990 991 Raises: 992 Exception: Unknown command name passed in, or an exception from an 993 individual test runner. 994 """ 995 command = args.command 996 997 ProcessCommonOptions(args) 998 logging.info('command: %s', shlex.join(sys.argv)) 999 if args.enable_platform_mode or command in _DEFAULT_PLATFORM_MODE_TESTS: 1000 return RunTestsInPlatformMode(args, result_sink_client) 1001 1002 if command == 'python': 1003 return _RunPythonTests(args) 1004 raise Exception('Unknown test type.') 1005 1006 1007 def _SinkTestResult(test_result, test_file_name, result_sink_client): 1008 """Upload test result to result_sink. 1009 1010 Args: 1011 test_result: A BaseTestResult object 1012 test_file_name: A string representing the file location of the test 1013 result_sink_client: A ResultSinkClient object 1014 1015 Returns: 1016 N/A 1017 """ 1018 # Some tests put in non utf-8 char as part of the test 1019 # which breaks uploads, so need to decode and re-encode. 1020 log_decoded = test_result.GetLog() 1021 if isinstance(log_decoded, bytes): 1022 log_decoded = log_decoded.decode('utf-8', 'replace') 1023 html_artifact = '' 1024 https_artifacts = [] 1025 for link_name, link_url in sorted(test_result.GetLinks().items()): 1026 if link_url.startswith('https:'): 1027 https_artifacts.append('<li><a target="_blank" href=%s>%s</a></li>' % 1028 (link_url, link_name)) 1029 else: 1030 logging.info('Skipping non-https link %r (%s) for test %s.', link_name, 1031 link_url, test_result.GetName()) 1032 if https_artifacts: 1033 html_artifact += '<ul>%s</ul>' % '\n'.join(https_artifacts) 1034 result_sink_client.Post(test_result.GetNameForResultSink(), 1035 test_result.GetType(), 1036 test_result.GetDuration(), 1037 log_decoded, 1038 test_file_name, 1039 variant=test_result.GetVariantForResultSink(), 1040 failure_reason=test_result.GetFailureReason(), 1041 html_artifact=html_artifact) 1042 1043 1044 _SUPPORTED_IN_PLATFORM_MODE = [ 1045 # TODO(jbudorick): Add support for more test types. 1046 'gtest', 1047 'hostside', 1048 'instrumentation', 1049 'junit', 1050 'linker', 1051 'monkey', 1052 ] 1053 1054 1055 def UploadTestScriptRecords(result_sink_client, exc_recorder, mm_recorder): 1056 '''Upload test script data, i.e. exceptions and metrics to ResultDB. 1057 1058 Args: 1059 result_sink_client: A ResultSinkClient object 1060 exc_recorder: The module to create and manage exception records. 1061 mm_recorder: The module to create and manage measure records. 1062 ''' 1063 if not result_sink_client: 1064 return 1065 if not exc_recorder.size() and not mm_recorder.size(): 1066 return 1067 1068 try_count_max = 3 1069 for try_count in range(1, try_count_max + 1): 1070 logging.info('Uploading test script records to RDB. (TRY %d/%d)', try_count, 1071 try_count_max) 1072 try: 1073 records = {} 1074 if exc_recorder.size(): 1075 records[exc_recorder.EXCEPTION_OCCURRENCES_KEY] = exc_recorder.to_dict() 1076 if mm_recorder.size(): 1077 records[mm_recorder.TEST_SCRIPT_METRICS_KEY] = mm_recorder.to_dict() 1078 result_sink_client.UpdateInvocationExtendedProperties(records) 1079 exc_recorder.clear() 1080 mm_recorder.clear() 1081 break 1082 except Exception as e: # pylint: disable=W0703 1083 logging.error("Got error %s when uploading test script records.", e) 1084 # Upload can fail due to record size being too big. 1085 # In this case, let's try to reduce the size. 1086 if try_count == try_count_max - 2: 1087 # Clear all the stackstrace to reduce size. 1088 exc_recorder.clear_stacktrace() 1089 elif try_count == try_count_max - 1: 1090 # For the exception recorder, clear all the records and just report 1091 # the upload failure. 1092 exc_recorder.clear() 1093 exc_recorder.register(e) 1094 elif try_count == try_count_max: 1095 # Swallow all the records if the upload fails again and hit the max 1096 # try so that it won't fail the test task (and it shouldn't). 1097 exc_recorder.clear() 1098 mm_recorder.clear() 1099 logging.error("Hit max retry. Skip uploading test script records.") 1100 1101 1102 def RunTestsInPlatformMode(args, result_sink_client=None): 1103 1104 def infra_error(message): 1105 logging.fatal(message) 1106 sys.exit(constants.INFRA_EXIT_CODE) 1107 1108 if args.command not in _SUPPORTED_IN_PLATFORM_MODE: 1109 infra_error('%s is not yet supported in platform mode' % args.command) 1110 1111 ### Set up sigterm handler. 1112 1113 contexts_to_notify_on_sigterm = [] 1114 def unexpected_sigterm(_signum, _frame): 1115 msg = [ 1116 'Received SIGTERM. Shutting down.', 1117 ] 1118 for live_thread in threading.enumerate(): 1119 # pylint: disable=protected-access 1120 thread_stack = ''.join(traceback.format_stack( 1121 sys._current_frames()[live_thread.ident])) 1122 msg.extend([ 1123 'Thread "%s" (ident: %s) is currently running:' % ( 1124 live_thread.name, live_thread.ident), 1125 thread_stack]) 1126 1127 for context in contexts_to_notify_on_sigterm: 1128 context.ReceivedSigterm() 1129 1130 infra_error('\n'.join(msg)) 1131 1132 signal.signal(signal.SIGTERM, unexpected_sigterm) 1133 1134 ### Set up results handling. 1135 # TODO(jbudorick): Rewrite results handling. 1136 1137 # all_raw_results is a list of lists of 1138 # base_test_result.TestRunResults objects. Each instance of 1139 # TestRunResults contains all test results produced by a single try, 1140 # while each list of TestRunResults contains all tries in a single 1141 # iteration. 1142 all_raw_results = [] 1143 1144 # all_iteration_results is a list of base_test_result.TestRunResults 1145 # objects. Each instance of TestRunResults contains the last test 1146 # result for each test run in that iteration. 1147 all_iteration_results = [] 1148 1149 global_results_tags = set() 1150 1151 json_file = tempfile.NamedTemporaryFile(delete=False) 1152 json_file.close() 1153 1154 @contextlib.contextmanager 1155 def json_finalizer(): 1156 try: 1157 yield 1158 finally: 1159 if args.json_results_file and os.path.exists(json_file.name): 1160 shutil.move(json_file.name, args.json_results_file) 1161 elif args.isolated_script_test_output and os.path.exists(json_file.name): 1162 shutil.move(json_file.name, args.isolated_script_test_output) 1163 else: 1164 os.remove(json_file.name) 1165 1166 @contextlib.contextmanager 1167 def json_writer(): 1168 try: 1169 yield 1170 except Exception: 1171 global_results_tags.add('UNRELIABLE_RESULTS') 1172 raise 1173 finally: 1174 if args.isolated_script_test_output: 1175 interrupted = 'UNRELIABLE_RESULTS' in global_results_tags 1176 json_results.GenerateJsonTestResultFormatFile(all_raw_results, 1177 interrupted, 1178 json_file.name, 1179 indent=2) 1180 else: 1181 json_results.GenerateJsonResultsFile( 1182 all_raw_results, 1183 json_file.name, 1184 global_tags=list(global_results_tags), 1185 indent=2) 1186 1187 test_class_to_file_name_dict = {} 1188 # Test Location is only supported for instrumentation tests as it 1189 # requires the size-info file. 1190 if test_instance.TestType() == 'instrumentation': 1191 test_class_to_file_name_dict = _CreateClassToFileNameDict(args.test_apk) 1192 1193 if result_sink_client: 1194 for run in all_raw_results: 1195 for results in run: 1196 for r in results.GetAll(): 1197 # Matches chrome.page_info.PageInfoViewTest#testChromePage 1198 match = re.search(r'^(.+\..+)#', r.GetName()) 1199 test_file_name = test_class_to_file_name_dict.get( 1200 match.group(1)) if match else None 1201 _SinkTestResult(r, test_file_name, result_sink_client) 1202 1203 @contextlib.contextmanager 1204 def upload_logcats_file(): 1205 try: 1206 yield 1207 finally: 1208 if not args.logcat_output_file: 1209 logging.critical('Cannot upload logcat file: no file specified.') 1210 elif not os.path.exists(args.logcat_output_file): 1211 logging.critical("Cannot upload logcat file: file doesn't exist.") 1212 else: 1213 with open(args.logcat_output_file) as src: 1214 dst = logdog_helper.open_text('unified_logcats') 1215 if dst: 1216 shutil.copyfileobj(src, dst) 1217 dst.close() 1218 logging.critical( 1219 'Logcat: %s', logdog_helper.get_viewer_url('unified_logcats')) 1220 1221 1222 logcats_uploader = contextlib_ext.Optional( 1223 upload_logcats_file(), 1224 'upload_logcats_file' in args and args.upload_logcats_file) 1225 1226 save_detailed_results = (args.local_output or not local_utils.IsOnSwarming() 1227 ) and not args.isolated_script_test_output 1228 1229 @contextlib.contextmanager 1230 def test_script_records_uploader(): 1231 try: 1232 yield 1233 finally: 1234 UploadTestScriptRecords(result_sink_client, exception_recorder, measures) 1235 1236 ### Set up test objects. 1237 1238 out_manager = output_manager_factory.CreateOutputManager(args) 1239 env = environment_factory.CreateEnvironment( 1240 args, out_manager, infra_error) 1241 test_instance = test_instance_factory.CreateTestInstance(args, infra_error) 1242 test_run = test_run_factory.CreateTestRun(env, test_instance, infra_error) 1243 1244 contexts_to_notify_on_sigterm.append(env) 1245 contexts_to_notify_on_sigterm.append(test_run) 1246 1247 if args.list_tests: 1248 try: 1249 with out_manager, env, test_instance, test_run: 1250 test_names = test_run.GetTestsForListing() 1251 print('There are {} tests:'.format(len(test_names))) 1252 for n in test_names: 1253 print(n) 1254 return 0 1255 except NotImplementedError: 1256 sys.stderr.write('Test does not support --list-tests (type={}).\n'.format( 1257 args.command)) 1258 return 1 1259 1260 if getattr(args, 'list_data', False): 1261 with out_manager, env, test_instance, test_run: 1262 data_deps = test_run.GetDataDepsForListing() 1263 1264 print('There are {} data files:'.format(len(data_deps))) 1265 for d in data_deps: 1266 print(d) 1267 return 0 1268 1269 ### Run. 1270 with out_manager, json_finalizer(): 1271 # |raw_logs_fh| is only used by Robolectric tests. 1272 raw_logs_fh = io.StringIO() if save_detailed_results else None 1273 1274 with json_writer(), test_script_records_uploader(), logcats_uploader, \ 1275 env, test_instance, test_run: 1276 1277 repetitions = (range(args.repeat + 1278 1) if args.repeat >= 0 else itertools.count()) 1279 result_counts = collections.defaultdict( 1280 lambda: collections.defaultdict(int)) 1281 iteration_count = 0 1282 for _ in repetitions: 1283 # raw_results will be populated with base_test_result.TestRunResults by 1284 # test_run.RunTests(). It is immediately added to all_raw_results so 1285 # that in the event of an exception, all_raw_results will already have 1286 # the up-to-date results and those can be written to disk. 1287 raw_results = [] 1288 all_raw_results.append(raw_results) 1289 1290 test_run.RunTests(raw_results, raw_logs_fh=raw_logs_fh) 1291 if not raw_results: 1292 all_raw_results.pop() 1293 continue 1294 1295 iteration_results = base_test_result.TestRunResults() 1296 for r in reversed(raw_results): 1297 iteration_results.AddTestRunResults(r) 1298 all_iteration_results.append(iteration_results) 1299 iteration_count += 1 1300 1301 for r in iteration_results.GetAll(): 1302 result_counts[r.GetName()][r.GetType()] += 1 1303 1304 report_results.LogFull( 1305 results=iteration_results, 1306 test_type=test_instance.TestType(), 1307 test_package=test_run.TestPackage(), 1308 annotation=getattr(args, 'annotations', None), 1309 flakiness_server=getattr(args, 'flakiness_dashboard_server', 1310 None)) 1311 1312 failed_tests = (iteration_results.GetNotPass() - 1313 iteration_results.GetSkip()) 1314 if failed_tests: 1315 _LogRerunStatement(failed_tests, args.wrapper_script_args) 1316 1317 if args.break_on_failure and not iteration_results.DidRunPass(): 1318 break 1319 1320 if iteration_count > 1: 1321 # display summary results 1322 # only display results for a test if at least one test did not pass 1323 all_pass = 0 1324 tot_tests = 0 1325 for test_name in result_counts: 1326 tot_tests += 1 1327 if any(result_counts[test_name][x] for x in ( 1328 base_test_result.ResultType.FAIL, 1329 base_test_result.ResultType.CRASH, 1330 base_test_result.ResultType.TIMEOUT, 1331 base_test_result.ResultType.UNKNOWN)): 1332 logging.critical( 1333 '%s: %s', 1334 test_name, 1335 ', '.join('%s %s' % (str(result_counts[test_name][i]), i) 1336 for i in base_test_result.ResultType.GetTypes())) 1337 else: 1338 all_pass += 1 1339 1340 logging.critical('%s of %s tests passed in all %s runs', 1341 str(all_pass), 1342 str(tot_tests), 1343 str(iteration_count)) 1344 1345 if save_detailed_results: 1346 assert raw_logs_fh 1347 raw_logs_fh.seek(0) 1348 raw_logs = raw_logs_fh.read() 1349 if raw_logs: 1350 with out_manager.ArchivedTempfile( 1351 'raw_logs.txt', 'raw_logs', 1352 output_manager.Datatype.TEXT) as raw_logs_file: 1353 raw_logs_file.write(raw_logs) 1354 logging.critical('RAW LOGS: %s', raw_logs_file.Link()) 1355 1356 with out_manager.ArchivedTempfile( 1357 'test_results_presentation.html', 1358 'test_results_presentation', 1359 output_manager.Datatype.HTML) as results_detail_file: 1360 result_html_string, _, _ = test_results_presentation.result_details( 1361 json_path=json_file.name, 1362 test_name=args.command, 1363 cs_base_url='http://cs.chromium.org', 1364 local_output=True) 1365 results_detail_file.write(result_html_string) 1366 results_detail_file.flush() 1367 logging.critical('TEST RESULTS: %s', results_detail_file.Link()) 1368 1369 ui_screenshots = test_results_presentation.ui_screenshot_set( 1370 json_file.name) 1371 if ui_screenshots: 1372 with out_manager.ArchivedTempfile( 1373 'ui_screenshots.json', 1374 'ui_capture', 1375 output_manager.Datatype.JSON) as ui_screenshot_file: 1376 ui_screenshot_file.write(ui_screenshots) 1377 logging.critical('UI Screenshots: %s', ui_screenshot_file.Link()) 1378 1379 return (0 if all(r.DidRunPass() for r in all_iteration_results) 1380 else constants.ERROR_EXIT_CODE) 1381 1382 1383 def _LogRerunStatement(failed_tests, wrapper_arg_str): 1384 """Logs a message that can rerun the failed tests. 1385 1386 Logs a copy/pasteable message that filters tests so just the failing tests 1387 are run. 1388 1389 Args: 1390 failed_tests: A set of test results that did not pass. 1391 wrapper_arg_str: A string of args that were passed to the called wrapper 1392 script. 1393 """ 1394 rerun_arg_list = [] 1395 try: 1396 constants.CheckOutputDirectory() 1397 # constants.CheckOutputDirectory throws bare exceptions. 1398 except: # pylint: disable=bare-except 1399 logging.exception('Output directory not found. Unable to generate failing ' 1400 'test filter file.') 1401 return 1402 1403 output_directory = constants.GetOutDirectory() 1404 if not os.path.exists(output_directory): 1405 logging.error('Output directory not found. Unable to generate failing ' 1406 'test filter file.') 1407 return 1408 1409 test_filter_file = os.path.join(os.path.relpath(output_directory), 1410 _RERUN_FAILED_TESTS_FILE) 1411 arg_list = shlex.split(wrapper_arg_str) if wrapper_arg_str else sys.argv 1412 index = 0 1413 while index < len(arg_list): 1414 arg = arg_list[index] 1415 # Skip adding the filter=<file> and/or the filter arg as we're replacing 1416 # it with the new filter arg. 1417 # This covers --test-filter=, --test-launcher-filter-file=, --gtest-filter=, 1418 # --test-filter *Foobar.baz, -f *foobar, --package-filter <package>, 1419 # --runner-filter <runner>. 1420 if 'filter' in arg or arg == '-f': 1421 index += 1 if '=' in arg else 2 1422 continue 1423 1424 rerun_arg_list.append(arg) 1425 index += 1 1426 1427 failed_test_list = [str(t) for t in failed_tests] 1428 with open(test_filter_file, 'w') as fp: 1429 for t in failed_test_list: 1430 # Test result names can have # in them that don't match when applied as 1431 # a test name filter. 1432 fp.write('%s\n' % t.replace('#', '.')) 1433 1434 rerun_arg_list.append('--test-launcher-filter-file=%s' % test_filter_file) 1435 msg = """ 1436 %d Test(s) failed. 1437 Rerun failed tests with copy and pastable command: 1438 %s 1439 """ 1440 logging.critical(msg, len(failed_tests), shlex.join(rerun_arg_list)) 1441 1442 1443 def DumpThreadStacks(_signal, _frame): 1444 for thread in threading.enumerate(): 1445 reraiser_thread.LogThreadStack(thread) 1446 1447 1448 def main(): 1449 signal.signal(signal.SIGUSR1, DumpThreadStacks) 1450 1451 parser = argparse.ArgumentParser() 1452 command_parsers = parser.add_subparsers( 1453 title='test types', dest='command') 1454 1455 subp = command_parsers.add_parser( 1456 'gtest', 1457 help='googletest-based C++ tests') 1458 AddCommonOptions(subp) 1459 AddDeviceOptions(subp) 1460 AddEmulatorOptions(subp) 1461 AddGTestOptions(subp) 1462 AddTracingOptions(subp) 1463 AddCommandLineOptions(subp) 1464 1465 subp = command_parsers.add_parser( 1466 'hostside', 1467 help='Webview CTS host-side tests') 1468 AddCommonOptions(subp) 1469 AddDeviceOptions(subp) 1470 AddEmulatorOptions(subp) 1471 AddHostsideTestOptions(subp) 1472 1473 subp = command_parsers.add_parser( 1474 'instrumentation', 1475 help='InstrumentationTestCase-based Java tests') 1476 AddCommonOptions(subp) 1477 AddDeviceOptions(subp) 1478 AddEmulatorOptions(subp) 1479 AddInstrumentationTestOptions(subp) 1480 AddSkiaGoldTestOptions(subp) 1481 AddTracingOptions(subp) 1482 AddCommandLineOptions(subp) 1483 1484 subp = command_parsers.add_parser( 1485 'junit', 1486 help='JUnit4-based Java tests') 1487 AddCommonOptions(subp) 1488 AddJUnitTestOptions(subp) 1489 1490 subp = command_parsers.add_parser( 1491 'linker', 1492 help='linker tests') 1493 AddCommonOptions(subp) 1494 AddDeviceOptions(subp) 1495 AddEmulatorOptions(subp) 1496 AddLinkerTestOptions(subp) 1497 1498 subp = command_parsers.add_parser( 1499 'monkey', 1500 help="tests based on Android's monkey command") 1501 AddCommonOptions(subp) 1502 AddDeviceOptions(subp) 1503 AddEmulatorOptions(subp) 1504 AddMonkeyTestOptions(subp) 1505 1506 subp = command_parsers.add_parser( 1507 'python', 1508 help='python tests based on unittest.TestCase') 1509 AddCommonOptions(subp) 1510 AddPythonTestOptions(subp) 1511 1512 args, unknown_args = parser.parse_known_args() 1513 1514 if unknown_args: 1515 if getattr(args, 'allow_unknown', None): 1516 args.command_line_flags = unknown_args 1517 else: 1518 parser.error('unrecognized arguments: %s' % ' '.join(unknown_args)) 1519 1520 # --enable-concurrent-adb does not handle device reboots gracefully. 1521 if getattr(args, 'enable_concurrent_adb', None): 1522 if getattr(args, 'replace_system_package', None): 1523 logging.warning( 1524 'Ignoring --enable-concurrent-adb due to --replace-system-package') 1525 args.enable_concurrent_adb = False 1526 elif getattr(args, 'system_packages_to_remove', None): 1527 logging.warning( 1528 'Ignoring --enable-concurrent-adb due to --remove-system-package') 1529 args.enable_concurrent_adb = False 1530 elif getattr(args, 'use_webview_provider', None): 1531 logging.warning( 1532 'Ignoring --enable-concurrent-adb due to --use-webview-provider') 1533 args.enable_concurrent_adb = False 1534 1535 if (getattr(args, 'coverage_on_the_fly', False) 1536 and not getattr(args, 'coverage_dir', '')): 1537 parser.error('--coverage-on-the-fly requires --coverage-dir') 1538 1539 if (getattr(args, 'debug_socket', None) 1540 or getattr(args, 'wait_for_java_debugger', None)): 1541 args.num_retries = 0 1542 1543 if (getattr(args, 'connect_over_network', False) 1544 and len(getattr(args, 'test_devices', [])) != 1): 1545 parser.error('Need to specify a single device (via "--device") when using ' 1546 '--connect-over-network.') 1547 1548 # Result-sink may not exist in the environment if rdb stream is not enabled. 1549 result_sink_client = result_sink.TryInitClient() 1550 1551 try: 1552 return RunTestsCommand(args, result_sink_client) 1553 except base_error.BaseError as e: 1554 logging.exception('Error occurred.') 1555 if e.is_infra_error: 1556 return constants.INFRA_EXIT_CODE 1557 return constants.ERROR_EXIT_CODE 1558 except Exception: # pylint: disable=W0703 1559 logging.exception('Unrecognized error occurred.') 1560 return constants.ERROR_EXIT_CODE 1561 1562 1563 if __name__ == '__main__': 1564 exit_code = main() 1565 if exit_code == constants.INFRA_EXIT_CODE: 1566 # This exit code is returned in case of missing, unreachable, 1567 # or otherwise not fit for purpose test devices. 1568 # When this happens, the graceful cleanup triggered by sys.exit() 1569 # hangs indefinitely (on swarming - until it hits 20min timeout). 1570 # Skip cleanup (other than flushing output streams) and exit forcefully 1571 # to avoid the hang. 1572 sys.stdout.flush() 1573 sys.stderr.flush() 1574 os._exit(exit_code) # pylint: disable=protected-access 1575 else: 1576 sys.exit(exit_code)