tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

test_marionette_runner.py (19302B)


      1 # This Source Code Form is subject to the terms of the Mozilla Public
      2 # License, v. 2.0. If a copy of the MPL was not distributed with this
      3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
      4 
      5 import os
      6 
      7 import manifestparser
      8 import mozinfo
      9 import mozunit
     10 import pytest
     11 
     12 from unittest.mock import Mock, patch, mock_open, sentinel, DEFAULT
     13 
     14 from marionette_harness.runtests import MarionetteTestRunner
     15 
     16 
     17 @pytest.fixture
     18 def runner(mach_parsed_kwargs):
     19    """
     20    MarionetteTestRunner instance initialized with default options.
     21    """
     22    return MarionetteTestRunner(**mach_parsed_kwargs)
     23 
     24 
     25 @pytest.fixture
     26 def mock_runner(runner, mock_marionette, monkeypatch):
     27    """
     28    MarionetteTestRunner instance with mocked-out
     29    self.marionette and other properties,
     30    to enable testing runner.run_tests().
     31    """
     32    runner.driverclass = Mock(return_value=mock_marionette)
     33    for attr in ["run_test", "_capabilities"]:
     34        setattr(runner, attr, Mock())
     35    runner._appName = "fake_app"
     36    monkeypatch.setattr("marionette_harness.runner.base.mozversion", Mock())
     37    return runner
     38 
     39 
     40 @pytest.fixture
     41 def build_kwargs_using(mach_parsed_kwargs):
     42    """Helper function for test_build_kwargs_* functions"""
     43 
     44    def kwarg_builder(new_items, return_socket=False):
     45        mach_parsed_kwargs.update(new_items)
     46        runner = MarionetteTestRunner(**mach_parsed_kwargs)
     47        with patch("marionette_harness.runner.base.socket") as socket:
     48            built_kwargs = runner._build_kwargs()
     49        if return_socket:
     50            return built_kwargs, socket
     51        return built_kwargs
     52 
     53    return kwarg_builder
     54 
     55 
     56 @pytest.fixture
     57 def expected_driver_args(runner):
     58    """Helper fixture for tests of _build_kwargs
     59    with binary/emulator.
     60    Provides a dictionary of certain arguments
     61    related to binary/emulator settings
     62    which we expect to be passed to the
     63    driverclass constructor. Expected values can
     64    be updated in tests as needed.
     65    Provides convenience methods for comparing the
     66    expected arguments to the argument dictionary
     67    created by _build_kwargs."""
     68 
     69    class ExpectedDict(dict):
     70        def assert_matches(self, actual):
     71            for k, v in self.items():
     72                assert actual[k] == v
     73 
     74        def assert_keys_not_in(self, actual):
     75            for k in self.keys():
     76                assert k not in actual
     77 
     78    expected = ExpectedDict(host=None, port=None, bin=None)
     79    for attr in ["app", "app_args", "profile", "addons", "gecko_log"]:
     80        expected[attr] = getattr(runner, attr)
     81    return expected
     82 
     83 
     84 class ManifestFixture:
     85    def __init__(
     86        self,
     87        name="mock_manifest",
     88        tests=[{"path": "test_something.py", "expected": "pass"}],
     89    ):
     90        self.filepath = "/path/to/fake/manifest.toml"
     91        self.n_disabled = len([t for t in tests if "disabled" in t])
     92        self.n_enabled = len(tests) - self.n_disabled
     93        mock_manifest = Mock(
     94            spec=manifestparser.TestManifest, active_tests=Mock(return_value=tests)
     95        )
     96        self.manifest_class = Mock(return_value=mock_manifest)
     97        self.__repr__ = lambda: "<ManifestFixture {}>".format(name)
     98 
     99 
    100 @pytest.fixture
    101 def manifest():
    102    return ManifestFixture()
    103 
    104 
    105 @pytest.fixture(params=["enabled", "disabled", "enabled_disabled", "empty"])
    106 def manifest_with_tests(request):
    107    """
    108    Fixture for the contents of mock_manifest, where a manifest
    109    can include enabled tests, disabled tests, both, or neither (empty)
    110    """
    111    included = []
    112    if "enabled" in request.param:
    113        included += [
    114            ("test_expected_pass.py", "pass"),
    115            ("test_expected_fail.py", "fail"),
    116        ]
    117    if "disabled" in request.param:
    118        included += [
    119            ("test_pass_disabled.py", "pass", "skip-if: true"),
    120            ("test_fail_disabled.py", "fail", "skip-if: true"),
    121        ]
    122    keys = ("path", "expected", "disabled")
    123    active_tests = [dict(list(zip(keys, values))) for values in included]
    124 
    125    return ManifestFixture(request.param, active_tests)
    126 
    127 
    128 def test_args_passed_to_driverclass(mock_runner):
    129    built_kwargs = {"arg1": "value1", "arg2": "value2"}
    130    mock_runner._build_kwargs = Mock(return_value=built_kwargs)
    131    with pytest.raises(IOError):
    132        mock_runner.run_tests(["fake_tests.toml"])
    133    assert mock_runner.driverclass.call_args[1] == built_kwargs
    134 
    135 
    136 def test_build_kwargs_basic_args(build_kwargs_using):
    137    """Test the functionality of runner._build_kwargs:
    138    make sure that basic arguments (those which should
    139    always be included, irrespective of the runner's settings)
    140    get passed to the call to runner.driverclass"""
    141 
    142    basic_args = [
    143        "socket_timeout",
    144        "prefs",
    145        "startup_timeout",
    146        "verbose",
    147        "symbols_path",
    148    ]
    149    args_dict = {a: getattr(sentinel, a) for a in basic_args}
    150    # Mock an update method to work with calls to MarionetteTestRunner()
    151    args_dict["prefs"].update = Mock(return_value={})
    152    built_kwargs = build_kwargs_using([(a, getattr(sentinel, a)) for a in basic_args])
    153    for arg in basic_args:
    154        assert built_kwargs[arg] is getattr(sentinel, arg)
    155 
    156 
    157 @pytest.mark.parametrize("workspace", ["path/to/workspace", None])
    158 def test_build_kwargs_with_workspace(build_kwargs_using, workspace):
    159    built_kwargs = build_kwargs_using({"workspace": workspace})
    160    if workspace:
    161        assert built_kwargs["workspace"] == workspace
    162    else:
    163        assert "workspace" not in built_kwargs
    164 
    165 
    166 @pytest.mark.parametrize("address", ["host:123", None])
    167 def test_build_kwargs_with_address(build_kwargs_using, address):
    168    built_kwargs, socket = build_kwargs_using(
    169        {"address": address, "binary": None, "emulator": None}, return_socket=True
    170    )
    171    assert "connect_to_running_emulator" not in built_kwargs
    172    if address is not None:
    173        host, port = address.split(":")
    174        assert built_kwargs["host"] == host and built_kwargs["port"] == int(port)
    175        socket.socket().connect.assert_called_with((host, int(port)))
    176        assert socket.socket().close.called
    177    else:
    178        assert not socket.socket.called
    179 
    180 
    181 @pytest.mark.parametrize("address", ["host:123", None])
    182 @pytest.mark.parametrize("binary", ["path/to/bin", None])
    183 def test_build_kwargs_with_binary_or_address(
    184    expected_driver_args, build_kwargs_using, binary, address
    185 ):
    186    built_kwargs = build_kwargs_using(
    187        {"binary": binary, "address": address, "emulator": None}
    188    )
    189    if binary:
    190        expected_driver_args["bin"] = binary
    191        if address:
    192            host, port = address.split(":")
    193            expected_driver_args.update({"host": host, "port": int(port)})
    194        else:
    195            expected_driver_args.update({"host": "127.0.0.1", "port": 2828})
    196        expected_driver_args.assert_matches(built_kwargs)
    197    elif address is None:
    198        expected_driver_args.assert_keys_not_in(built_kwargs)
    199 
    200 
    201 @pytest.mark.parametrize("address", ["host:123", None])
    202 @pytest.mark.parametrize("emulator", [True, False, None])
    203 def test_build_kwargs_with_emulator_or_address(
    204    expected_driver_args, build_kwargs_using, emulator, address
    205 ):
    206    emulator_props = [
    207        (a, getattr(sentinel, a)) for a in ["avd_home", "adb_path", "emulator_bin"]
    208    ]
    209    built_kwargs = build_kwargs_using(
    210        [("emulator", emulator), ("address", address), ("binary", None)]
    211        + emulator_props
    212    )
    213    if emulator:
    214        expected_driver_args.update(emulator_props)
    215        expected_driver_args["emulator_binary"] = expected_driver_args.pop(
    216            "emulator_bin"
    217        )
    218        expected_driver_args["bin"] = True
    219        if address:
    220            expected_driver_args["connect_to_running_emulator"] = True
    221            host, port = address.split(":")
    222            expected_driver_args.update({"host": host, "port": int(port)})
    223        else:
    224            expected_driver_args.update({"host": "127.0.0.1", "port": 2828})
    225            assert "connect_to_running_emulator" not in built_kwargs
    226        expected_driver_args.assert_matches(built_kwargs)
    227    elif not address:
    228        expected_driver_args.assert_keys_not_in(built_kwargs)
    229 
    230 
    231 def test_parsing_testvars(mach_parsed_kwargs):
    232    mach_parsed_kwargs.pop("tests")
    233    testvars_json_loads = [
    234        {"wifi": {"ssid": "blah", "keyManagement": "WPA-PSK", "psk": "foo"}},
    235        {"wifi": {"PEAP": "bar"}, "device": {"stuff": "buzz"}},
    236    ]
    237    expected_dict = {
    238        "wifi": {
    239            "ssid": "blah",
    240            "keyManagement": "WPA-PSK",
    241            "psk": "foo",
    242            "PEAP": "bar",
    243        },
    244        "device": {"stuff": "buzz"},
    245    }
    246    with patch(
    247        "marionette_harness.runtests.MarionetteTestRunner._load_testvars",
    248        return_value=testvars_json_loads,
    249    ) as load:
    250        runner = MarionetteTestRunner(**mach_parsed_kwargs)
    251        assert runner.testvars == expected_dict
    252        assert load.call_count == 1
    253 
    254 
    255 def test_load_testvars_throws_expected_errors(mach_parsed_kwargs):
    256    mach_parsed_kwargs["testvars"] = ["some_bad_path.json"]
    257    runner = MarionetteTestRunner(**mach_parsed_kwargs)
    258    with pytest.raises(IOError) as io_exc:
    259        runner._load_testvars()
    260    assert "does not exist" in str(io_exc.value)
    261    with patch("os.path.exists", return_value=True):
    262        with patch(
    263            "marionette_harness.runner.base.open",
    264            mock_open(read_data="[not {valid JSON]"),
    265        ):
    266            with pytest.raises(Exception) as json_exc:
    267                runner._load_testvars()
    268    assert "not properly formatted" in str(json_exc.value)
    269 
    270 
    271 def _check_crash_counts(has_crashed, runner, mock_marionette):
    272    if has_crashed:
    273        assert mock_marionette.check_for_crash.call_count == 1
    274        assert runner.crashed == 1
    275    else:
    276        assert runner.crashed == 0
    277 
    278 
    279 @pytest.mark.parametrize("has_crashed", [True, False])
    280 def test_increment_crash_count_in_run_test_set(runner, has_crashed, mock_marionette):
    281    fake_tests = [{"filepath": i, "expected": "pass"} for i in "abc"]
    282 
    283    with patch.multiple(runner, run_test=DEFAULT, marionette=mock_marionette):
    284        runner.run_test_set(fake_tests)
    285        if not has_crashed:
    286            assert runner.marionette.check_for_crash.call_count == len(fake_tests)
    287        _check_crash_counts(has_crashed, runner, runner.marionette)
    288 
    289 
    290 @pytest.mark.parametrize("has_crashed", [True, False])
    291 def test_record_crash(runner, has_crashed, mock_marionette):
    292    with patch.object(runner, "marionette", mock_marionette):
    293        assert runner.record_crash() == has_crashed
    294        _check_crash_counts(has_crashed, runner, runner.marionette)
    295 
    296 
    297 def test_add_test_module(runner):
    298    tests = ["test_something.py", "testSomething.js", "bad_test.py"]
    299    assert len(runner.tests) == 0
    300    for test in tests:
    301        with patch("os.path.abspath", return_value=test) as abspath:
    302            runner.add_test(test)
    303        assert abspath.called
    304        expected = {"filepath": test, "expected": "pass", "group": "default"}
    305        assert expected in runner.tests
    306    # add_test doesn't validate module names; 'bad_test.py' gets through
    307    assert len(runner.tests) == 3
    308 
    309 
    310 def test_add_test_directory(runner):
    311    test_dir = "path/to/tests"
    312    dir_contents = [
    313        (test_dir, ("subdir",), ("test_a.py", "bad_test_a.py")),
    314        (test_dir + "/subdir", (), ("test_b.py", "bad_test_b.py")),
    315    ]
    316    tests = list(dir_contents[0][2] + dir_contents[1][2])
    317    assert len(runner.tests) == 0
    318    # Need to use side effect to make isdir return True for test_dir and False for tests
    319    with patch("os.path.isdir", side_effect=[True] + [False for t in tests]) as isdir:
    320        with patch("os.walk", return_value=dir_contents) as walk:
    321            runner.add_test(test_dir)
    322    assert isdir.called and walk.called
    323    for test in runner.tests:
    324        assert os.path.normpath(test_dir) in test["filepath"]
    325    assert len(runner.tests) == 2
    326 
    327 
    328 @pytest.mark.parametrize("test_files_exist", [True, False])
    329 def test_add_test_manifest(
    330    mock_runner, manifest_with_tests, monkeypatch, test_files_exist
    331 ):
    332    monkeypatch.setattr(
    333        "marionette_harness.runner.base.TestManifest",
    334        manifest_with_tests.manifest_class,
    335    )
    336    mock_runner.marionette = mock_runner.driverclass()
    337    with patch(
    338        "marionette_harness.runner.base.os.path.exists", return_value=test_files_exist
    339    ):
    340        if test_files_exist or manifest_with_tests.n_enabled == 0:
    341            mock_runner.add_test(manifest_with_tests.filepath)
    342            assert len(mock_runner.tests) == manifest_with_tests.n_enabled
    343            assert (
    344                len(mock_runner.manifest_skipped_tests)
    345                == manifest_with_tests.n_disabled
    346            )
    347            for test in mock_runner.tests:
    348                assert test["filepath"].endswith(test["expected"] + ".py")
    349        else:
    350            with pytest.raises(IOError):
    351                mock_runner.add_test(manifest_with_tests.filepath)
    352 
    353    assert manifest_with_tests.manifest_class().read.called
    354    assert manifest_with_tests.manifest_class().active_tests.called
    355 
    356 
    357 def get_kwargs_passed_to_manifest(mock_runner, manifest, monkeypatch, **kwargs):
    358    """Helper function for test_manifest_* tests.
    359    Returns the kwargs passed to the call to manifest.active_tests."""
    360    monkeypatch.setattr(
    361        "marionette_harness.runner.base.TestManifest", manifest.manifest_class
    362    )
    363    monkeypatch.setitem(mozinfo.info, "mozinfo_key", "mozinfo_val")
    364    for attr in kwargs:
    365        setattr(mock_runner, attr, kwargs[attr])
    366    mock_runner.marionette = mock_runner.driverclass()
    367    with patch("marionette_harness.runner.base.os.path.exists", return_value=True):
    368        mock_runner.add_test(manifest.filepath)
    369    call_args, call_kwargs = manifest.manifest_class().active_tests.call_args
    370    return call_kwargs
    371 
    372 
    373 def test_manifest_basic_args(mock_runner, manifest, monkeypatch):
    374    kwargs = get_kwargs_passed_to_manifest(mock_runner, manifest, monkeypatch)
    375    assert kwargs["exists"] is False
    376    assert kwargs["disabled"] is True
    377    assert kwargs["appname"] == "fake_app"
    378    assert "mozinfo_key" in kwargs and kwargs["mozinfo_key"] == "mozinfo_val"
    379 
    380 
    381 @pytest.mark.parametrize("test_tags", (None, ["tag", "tag2"]))
    382 def test_manifest_with_test_tags(mock_runner, manifest, monkeypatch, test_tags):
    383    kwargs = get_kwargs_passed_to_manifest(
    384        mock_runner, manifest, monkeypatch, test_tags=test_tags
    385    )
    386    if test_tags is None:
    387        assert kwargs["filters"] == []
    388    else:
    389        assert len(kwargs["filters"]) == 1 and kwargs["filters"][0].tags == test_tags
    390 
    391 
    392 def test_cleanup_with_manifest(mock_runner, manifest_with_tests, monkeypatch):
    393    monkeypatch.setattr(
    394        "marionette_harness.runner.base.TestManifest",
    395        manifest_with_tests.manifest_class,
    396    )
    397    if manifest_with_tests.n_enabled > 0:
    398        context = patch(
    399            "marionette_harness.runner.base.os.path.exists", return_value=True
    400        )
    401    else:
    402        context = pytest.raises(Exception)
    403    with context:
    404        mock_runner.run_tests([manifest_with_tests.filepath])
    405    assert mock_runner.marionette is None
    406    assert mock_runner.fixture_servers == {}
    407 
    408 
    409 def test_reset_test_stats(mock_runner):
    410    def reset_successful(runner):
    411        stats = [
    412            "passed",
    413            "failed",
    414            "unexpected_successes",
    415            "todo",
    416            "skipped",
    417            "failures",
    418        ]
    419        return all([((s in vars(runner)) and (not vars(runner)[s])) for s in stats])
    420 
    421    assert reset_successful(mock_runner)
    422    mock_runner.passed = 1
    423    mock_runner.failed = 1
    424    mock_runner.failures.append(["TEST-UNEXPECTED-FAIL"])
    425    assert not reset_successful(mock_runner)
    426    mock_runner.run_tests(["test_fake_thing.py"])
    427    assert reset_successful(mock_runner)
    428 
    429 
    430 def test_initialize_test_run(mock_runner):
    431    tests = ["test_fake_thing.py"]
    432    mock_runner.reset_test_stats = Mock()
    433    mock_runner.run_tests(tests)
    434    assert mock_runner.reset_test_stats.called
    435    with pytest.raises(AssertionError) as test_exc:
    436        mock_runner.run_tests([])
    437    assert "len(tests)" in str(test_exc.traceback[-1].statement)
    438    with pytest.raises(AssertionError) as hndl_exc:
    439        mock_runner.test_handlers = []
    440        mock_runner.run_tests(tests)
    441    assert "test_handlers" in str(hndl_exc.traceback[-1].statement)
    442    assert mock_runner.reset_test_stats.call_count == 1
    443 
    444 
    445 def test_add_tests(mock_runner):
    446    assert len(mock_runner.tests) == 0
    447    fake_tests = ["test_" + i + ".py" for i in "abc"]
    448    mock_runner.run_tests(fake_tests)
    449    assert len(mock_runner.tests) == 3
    450    for test_name, added_test in zip(fake_tests, mock_runner.tests):
    451        assert added_test["filepath"].endswith(test_name)
    452 
    453 
    454 def test_repeat(mock_runner):
    455    def update_result(test, expected):
    456        mock_runner.failed += 1
    457 
    458    fake_tests = ["test_1.py"]
    459    mock_runner.repeat = 4
    460    mock_runner.run_test = Mock(side_effect=update_result)
    461    mock_runner.run_tests(fake_tests)
    462 
    463    assert mock_runner.failed == 5
    464    assert mock_runner.passed == 0
    465    assert mock_runner.todo == 0
    466 
    467 
    468 def test_run_until_failure(mock_runner):
    469    def update_result(test, expected):
    470        mock_runner.failed += 1
    471 
    472    fake_tests = ["test_1.py"]
    473    mock_runner.run_until_failure = True
    474    mock_runner.repeat = 4
    475    mock_runner.run_test = Mock(side_effect=update_result)
    476    mock_runner.run_tests(fake_tests)
    477 
    478    assert mock_runner.failed == 1
    479    assert mock_runner.passed == 0
    480    assert mock_runner.todo == 0
    481 
    482 
    483 def test_catch_invalid_test_names(runner):
    484    good_tests = ["test_ok.py", "test_is_ok.py"]
    485    bad_tests = [
    486        "bad_test.py",
    487        "testbad.py",
    488        "_test_bad.py",
    489        "test_bad.notpy",
    490        "test_bad",
    491        "test.py",
    492        "test_.py",
    493    ]
    494    with pytest.raises(Exception) as exc:
    495        runner._add_tests(good_tests + bad_tests)
    496    msg = str(exc.value)
    497    assert "Test file names must be of the form" in msg
    498    for bad_name in bad_tests:
    499        assert bad_name in msg
    500    for good_name in good_tests:
    501        assert good_name not in msg
    502 
    503 
    504 @pytest.mark.parametrize("repeat", (None, 0, 42, -1))
    505 def test_option_repeat(mach_parsed_kwargs, repeat):
    506    if repeat is not None:
    507        mach_parsed_kwargs["repeat"] = repeat
    508    runner = MarionetteTestRunner(**mach_parsed_kwargs)
    509 
    510    if repeat is None:
    511        assert runner.repeat == 0
    512    else:
    513        assert runner.repeat == repeat
    514 
    515 
    516 @pytest.mark.parametrize("repeat", (None, 42))
    517 @pytest.mark.parametrize("run_until_failure", (None, True))
    518 def test_option_run_until_failure(mach_parsed_kwargs, repeat, run_until_failure):
    519    if run_until_failure is not None:
    520        mach_parsed_kwargs["run_until_failure"] = run_until_failure
    521    if repeat is not None:
    522        mach_parsed_kwargs["repeat"] = repeat
    523    runner = MarionetteTestRunner(**mach_parsed_kwargs)
    524 
    525    if run_until_failure is None:
    526        assert runner.run_until_failure is False
    527        if repeat is None:
    528            assert runner.repeat == 0
    529        else:
    530            assert runner.repeat == repeat
    531 
    532    else:
    533        assert runner.run_until_failure == run_until_failure
    534        if repeat is None:
    535            assert runner.repeat == 30
    536        else:
    537            assert runner.repeat == repeat
    538 
    539 
    540 if __name__ == "__main__":
    541    mozunit.main("-p", "no:terminalreporter", "--log-tbpl=-", "--capture", "no")