tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

run_executable_test.py (12219B)


      1 #!/usr/bin/env vpython3
      2 # Copyright 2022 The Chromium Authors
      3 # Use of this source code is governed by a BSD-style license that can be
      4 # found in the LICENSE file.
      5 """Implements commands for standalone CFv2 test executables."""
      6 
      7 import argparse
      8 import logging
      9 import os
     10 import shutil
     11 import subprocess
     12 import sys
     13 
     14 from typing import List, Optional
     15 
     16 from common import get_component_uri, get_host_arch, \
     17                   register_common_args, register_device_args, \
     18                   register_log_args
     19 from compatible_utils import map_filter_file_to_package_file
     20 from ffx_integration import FfxTestRunner, run_symbolizer
     21 from test_runner import TestRunner
     22 
     23 DEFAULT_TEST_SERVER_CONCURRENCY = 4
     24 
     25 
     26 def _copy_custom_output_file(test_runner: FfxTestRunner, file: str,
     27                             dest: str) -> None:
     28    """Copy custom test output file from the device to the host."""
     29 
     30    artifact_dir = test_runner.get_custom_artifact_directory()
     31    if not artifact_dir:
     32        logging.error(
     33            'Failed to parse custom artifact directory from test summary '
     34            'output files. Not copying %s from the device', file)
     35        return
     36    shutil.copy(os.path.join(artifact_dir, file), dest)
     37 
     38 
     39 def _copy_coverage_files(test_runner: FfxTestRunner, dest: str) -> None:
     40    """Copy debug data file from the device to the host if it exists."""
     41 
     42    coverage_dir = test_runner.get_debug_data_directory()
     43    if not coverage_dir:
     44        logging.info(
     45            'Failed to parse coverage data directory from test summary '
     46            'output files. Not copying coverage files from the device.')
     47        return
     48    shutil.copytree(coverage_dir, dest, dirs_exist_ok=True)
     49 
     50 
     51 # pylint: disable=too-many-instance-attributes
     52 class ExecutableTestRunner(TestRunner):
     53    """Test runner for running standalone test executables."""
     54 
     55    def __init__(  # pylint: disable=too-many-arguments
     56            self, out_dir: str, test_args: List[str], test_name: str,
     57            target_id: Optional[str], code_coverage_dir: str,
     58            logs_dir: Optional[str], package_deps: List[str],
     59            test_realm: Optional[str]) -> None:
     60        super().__init__(out_dir, test_args, [test_name], target_id,
     61                         package_deps)
     62        if not self._test_args:
     63            self._test_args = []
     64        self._test_name = test_name
     65        self._code_coverage_dir = code_coverage_dir
     66        self._custom_artifact_directory = None
     67        self._isolated_script_test_output = None
     68        self._isolated_script_test_perf_output = None
     69        self._logs_dir = logs_dir
     70        self._test_launcher_summary_output = None
     71        self._test_server = None
     72        self._test_realm = test_realm
     73 
     74    def _get_args(self) -> List[str]:
     75        parser = argparse.ArgumentParser()
     76        parser.add_argument(
     77            '--isolated-script-test-output',
     78            help='If present, store test results on this path.')
     79        parser.add_argument('--isolated-script-test-perf-output',
     80                            help='If present, store chartjson results on this '
     81                            'path.')
     82        parser.add_argument(
     83            '--test-launcher-shard-index',
     84            type=int,
     85            default=os.environ.get('GTEST_SHARD_INDEX'),
     86            help='Index of this instance amongst swarming shards.')
     87        parser.add_argument(
     88            '--test-launcher-summary-output',
     89            help='Where the test launcher will output its json.')
     90        parser.add_argument(
     91            '--test-launcher-total-shards',
     92            type=int,
     93            default=os.environ.get('GTEST_TOTAL_SHARDS'),
     94            help='Total number of swarming shards of this suite.')
     95        parser.add_argument(
     96            '--test-launcher-filter-file',
     97            help='Filter file(s) passed to target test process. Use ";" to '
     98            'separate multiple filter files.')
     99        parser.add_argument('--test-launcher-jobs',
    100                            type=int,
    101                            help='Sets the number of parallel test jobs.')
    102        parser.add_argument('--enable-test-server',
    103                            action='store_true',
    104                            default=False,
    105                            help='Enable Chrome test server spawner.')
    106        parser.add_argument('--test-arg',
    107                            dest='test_args',
    108                            action='append',
    109                            help='Legacy flag to pass in arguments for '
    110                            'the test process. These arguments can now be '
    111                            'passed in without a preceding "--" flag.')
    112        args, child_args = parser.parse_known_args(self._test_args)
    113        if args.isolated_script_test_output:
    114            self._isolated_script_test_output = args.isolated_script_test_output
    115            child_args.append(
    116                '--isolated-script-test-output=/custom_artifacts/%s' %
    117                os.path.basename(self._isolated_script_test_output))
    118        if args.isolated_script_test_perf_output:
    119            self._isolated_script_test_perf_output = \
    120                args.isolated_script_test_perf_output
    121            child_args.append(
    122                '--isolated-script-test-perf-output=/custom_artifacts/%s' %
    123                os.path.basename(self._isolated_script_test_perf_output))
    124        if args.test_launcher_shard_index is not None:
    125            child_args.append('--test-launcher-shard-index=%d' %
    126                              args.test_launcher_shard_index)
    127        if args.test_launcher_total_shards is not None:
    128            child_args.append('--test-launcher-total-shards=%d' %
    129                              args.test_launcher_total_shards)
    130        if args.test_launcher_summary_output:
    131            self._test_launcher_summary_output = \
    132                args.test_launcher_summary_output
    133            child_args.append(
    134                '--test-launcher-summary-output=/custom_artifacts/%s' %
    135                os.path.basename(self._test_launcher_summary_output))
    136        if args.test_launcher_filter_file:
    137            test_launcher_filter_files = map(
    138                map_filter_file_to_package_file,
    139                args.test_launcher_filter_file.split(';'))
    140            child_args.append('--test-launcher-filter-file=' +
    141                              ';'.join(test_launcher_filter_files))
    142        if args.test_launcher_jobs is not None:
    143            test_concurrency = args.test_launcher_jobs
    144        else:
    145            test_concurrency = DEFAULT_TEST_SERVER_CONCURRENCY
    146        if args.enable_test_server:
    147            # Repos other than chromium may not have chrome_test_server_spawner,
    148            # and they may not run server at all, so only import the test_server
    149            # when it's really necessary.
    150 
    151            # pylint: disable=import-outside-toplevel
    152            from test_server import setup_test_server
    153            # pylint: enable=import-outside-toplevel
    154            self._test_server, spawner_url_base = setup_test_server(
    155                self._target_id, test_concurrency)
    156            child_args.append('--remote-test-server-spawner-url-base=%s' %
    157                              spawner_url_base)
    158        if get_host_arch() == 'x64':
    159            # TODO(crbug.com/40202294) Remove once Vulkan is enabled by
    160            # default.
    161            child_args.append('--use-vulkan=native')
    162        else:
    163            # TODO(crbug.com/42050042, crbug.com/42050537) Remove swiftshader
    164            # once the vulkan is enabled by default.
    165            child_args.extend(
    166                ['--use-vulkan=swiftshader', '--ozone-platform=headless'])
    167        if args.test_args:
    168            child_args.extend(args.test_args)
    169        return child_args
    170 
    171    def _postprocess(self, test_runner: FfxTestRunner) -> None:
    172        if self._test_server:
    173            self._test_server.Stop()
    174        if self._test_launcher_summary_output:
    175            _copy_custom_output_file(
    176                test_runner,
    177                os.path.basename(self._test_launcher_summary_output),
    178                self._test_launcher_summary_output)
    179        if self._isolated_script_test_output:
    180            _copy_custom_output_file(
    181                test_runner,
    182                os.path.basename(self._isolated_script_test_output),
    183                self._isolated_script_test_output)
    184        if self._isolated_script_test_perf_output:
    185            _copy_custom_output_file(
    186                test_runner,
    187                os.path.basename(self._isolated_script_test_perf_output),
    188                self._isolated_script_test_perf_output)
    189        if self._code_coverage_dir:
    190            _copy_coverage_files(test_runner,
    191                                 os.path.basename(self._code_coverage_dir))
    192 
    193    def run_test(self) -> subprocess.Popen:
    194        test_args = self._get_args()
    195        with FfxTestRunner(self._logs_dir) as test_runner:
    196            test_proc = test_runner.run_test(
    197                get_component_uri(self._test_name), test_args, self._target_id,
    198                self._test_realm)
    199 
    200            symbol_paths = []
    201            for pkg_path in self.package_deps.values():
    202                symbol_paths.append(
    203                    os.path.join(os.path.dirname(pkg_path), 'ids.txt'))
    204            # Symbolize output from test process and print to terminal.
    205            symbolizer_proc = run_symbolizer(symbol_paths, test_proc.stdout,
    206                                             sys.stdout)
    207            symbolizer_proc.communicate()
    208 
    209            if test_proc.wait() == 0:
    210                logging.info('Process exited normally with status code 0.')
    211            else:
    212                # The test runner returns an error status code if *any*
    213                # tests fail, so we should proceed anyway.
    214                logging.warning('Process exited with status code %d.',
    215                                test_proc.returncode)
    216            self._postprocess(test_runner)
    217        return test_proc
    218 
    219 
    220 def create_executable_test_runner(runner_args: argparse.Namespace,
    221                                  test_args: List[str]):
    222    """Helper for creating an ExecutableTestRunner."""
    223 
    224    return ExecutableTestRunner(runner_args.out_dir, test_args,
    225                                runner_args.test_type, runner_args.target_id,
    226                                runner_args.code_coverage_dir,
    227                                runner_args.logs_dir, runner_args.package_deps,
    228                                runner_args.test_realm)
    229 
    230 
    231 def register_executable_test_args(parser: argparse.ArgumentParser) -> None:
    232    """Register common arguments for ExecutableTestRunner."""
    233 
    234    test_args = parser.add_argument_group('test', 'arguments for test running')
    235    test_args.add_argument('--code-coverage-dir',
    236                           default=None,
    237                           help='Directory to place code coverage '
    238                           'information. Only relevant when the target was '
    239                           'built with |fuchsia_code_coverage| set to true.')
    240    test_args.add_argument('--test-name',
    241                           dest='test_type',
    242                           help='Name of the test package (e.g. '
    243                           'unit_tests).')
    244    test_args.add_argument(
    245        '--test-realm',
    246        default=None,
    247        help='The realm to run the test in. This field is optional and takes '
    248        'the form: /path/to/realm:test_collection. See '
    249        'https://fuchsia.dev/go/components/non-hermetic-tests')
    250    test_args.add_argument('--package-deps',
    251                           action='append',
    252                           help='A list of the full path of the dependencies '
    253                           'to retrieve the symbol ids. Keeping it empty to '
    254                           'automatically generates from package_metadata.')
    255 
    256 
    257 def main():
    258    """Stand-alone function for running executable tests."""
    259 
    260    parser = argparse.ArgumentParser()
    261    register_common_args(parser)
    262    register_device_args(parser)
    263    register_log_args(parser)
    264    register_executable_test_args(parser)
    265    runner_args, test_args = parser.parse_known_args()
    266    runner = create_executable_test_runner(runner_args, test_args)
    267    return runner.run_test().returncode
    268 
    269 
    270 if __name__ == '__main__':
    271    sys.exit(main())