tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

browsertime_tp6_bench.py (7685B)


      1 # This Source Code Form is subject to the terms of the Mozilla Public
      2 # License, v. 2.0. If a copy of the MPL was not distributed with this
      3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
      4 import copy
      5 import re
      6 
      7 import filters
      8 from base_python_support import BasePythonSupport
      9 from utils import bool_from_str
     10 
     11 DOMAIN_MATCHER = re.compile(r"(?:https?:\/\/)?(?:[^@\n]+@)?(?:www\.)?([^:\/\n]+)")
     12 VARIANCE_THRESHOLD = 0.2
     13 
     14 
     15 def extract_domain(link):
     16    match = DOMAIN_MATCHER.search(link)
     17    if match:
     18        return match.group(1)
     19    raise Exception(f"Could not find domain for {link}")
     20 
     21 
     22 class TP6BenchSupport(BasePythonSupport):
     23    def __init__(self, **kwargs):
     24        super().__init__(**kwargs)
     25        self._total_times = []
     26        self._sites_tested = 0
     27        self._test_pages = {}
     28 
     29    def setup_test(self, test, args):
     30        from cmdline import DESKTOP_APPS
     31        from manifest import get_browser_test_list
     32        from utils import transform_subtest
     33 
     34        all_tests = get_browser_test_list(args.app, args.run_local)
     35 
     36        manifest_to_find = "browsertime-tp6.toml"
     37        if args.app not in DESKTOP_APPS:
     38            manifest_to_find = "browsertime-tp6m.toml"
     39 
     40        test_urls = []
     41        playback_pageset_manifests = []
     42        for parsed_test in all_tests:
     43            if manifest_to_find in parsed_test["manifest"]:
     44                if not bool_from_str(parsed_test.get("benchmark_page", "false")):
     45                    continue
     46                test_url = parsed_test["test_url"]
     47                test_urls.append(test_url)
     48                # Only use the backup manifest if it is set.
     49                if parsed_test.get("playback_pageset_manifest_backup"):
     50                    playback_pageset_manifests.append(
     51                        transform_subtest(
     52                            parsed_test["playback_pageset_manifest_backup"],
     53                            parsed_test["name"],
     54                        )
     55                    )
     56                else:
     57                    playback_pageset_manifests.append(
     58                        transform_subtest(
     59                            parsed_test["playback_pageset_manifest"],
     60                            parsed_test["name"],
     61                        )
     62                    )
     63                self._test_pages[test_url] = parsed_test
     64 
     65        if len(playback_pageset_manifests) == 0:
     66            raise Exception("Could not find any manifests for testing.")
     67 
     68        test["test_url"] = ",".join(test_urls)
     69        test["playback_pageset_manifest"] = ",".join(playback_pageset_manifests)
     70 
     71    def handle_result(self, bt_result, raw_result, last_result=False, **kwargs):
     72        measurements = {"totalTime": []}
     73 
     74        # Find new results to add
     75        for res in raw_result["extras"]:
     76            if "pageload-benchmark" in res:
     77                total_time = int(
     78                    round(res["pageload-benchmark"].get("totalTime", 0), 0)
     79                )
     80                measurements["totalTime"].append(total_time)
     81                self._total_times.append(total_time)
     82 
     83        result_name = None
     84        for cycle in raw_result["browserScripts"]:
     85            if not result_name:
     86                # When the test name is unknown, we use the url TLD combined
     87                # with the page title to differentiate pages with similar domains, and
     88                # limit it to 35 characters
     89                page_url = cycle["pageinfo"].get("url", "")
     90                if self._test_pages.get(page_url, None) is not None:
     91                    result_name = self._test_pages[page_url]["name"]
     92                else:
     93                    page_name = extract_domain(page_url)
     94                    page_title = (
     95                        cycle["pageinfo"].get("documentTitle", "")[:35].replace(" ", "")
     96                    )
     97                    result_name = f"{page_name} - {page_title}"
     98 
     99            fcp = cycle["timings"]["paintTiming"]["first-contentful-paint"]
    100            lcp = (
    101                cycle["timings"]
    102                .get("largestContentfulPaint", {})
    103                .get("renderTime", None)
    104            )
    105 
    106            measurements.setdefault(f"{result_name} - fcp", []).append(fcp)
    107 
    108            if lcp is not None:
    109                measurements.setdefault(f"{result_name} - lcp", []).append(lcp)
    110 
    111        self._sites_tested += 1
    112 
    113        for measurement, values in measurements.items():
    114            bt_result["measurements"].setdefault(measurement, []).extend(values)
    115        if last_result:
    116            bt_result["measurements"]["totalTimePerSite"] = [
    117                round(total_time / self._sites_tested, 2)
    118                for total_time in self._total_times
    119            ]
    120 
    121    def _build_subtest(self, measurement_name, replicates, test):
    122        unit = test.get("unit", "ms")
    123        if test.get("subtest_unit"):
    124            unit = test.get("subtest_unit")
    125 
    126        return {
    127            "name": measurement_name,
    128            "lowerIsBetter": test.get("lower_is_better", True),
    129            "alertThreshold": float(test.get("alert_threshold", 2.0)),
    130            "unit": unit,
    131            "replicates": replicates,
    132            "value": round(filters.geometric_mean(replicates), 3),
    133        }
    134 
    135    def summarize_test(self, test, suite, **kwargs):
    136        suite["type"] = "pageload"
    137        if suite["subtests"] == {}:
    138            suite["subtests"] = []
    139        for measurement_name, replicates in test["measurements"].items():
    140            if not replicates:
    141                continue
    142            suite["subtests"].append(
    143                self._build_subtest(measurement_name, replicates, test)
    144            )
    145        suite["subtests"].sort(key=lambda subtest: subtest["name"])
    146 
    147    def _produce_suite_alts(self, suite_base, subtests, suite_name_prefix):
    148        geomean_suite = copy.deepcopy(suite_base)
    149        geomean_suite["subtests"] = copy.deepcopy(subtests)
    150        median_suite = copy.deepcopy(geomean_suite)
    151        median_suite["subtests"] = copy.deepcopy(subtests)
    152 
    153        subtest_values = []
    154        for subtest in subtests:
    155            subtest_values.extend(subtest["replicates"])
    156 
    157        geomean_suite["name"] = suite_name_prefix + "-geomean"
    158        geomean_suite["value"] = filters.geometric_mean(subtest_values)
    159        for subtest in geomean_suite["subtests"]:
    160            subtest["value"] = filters.geometric_mean(subtest["replicates"])
    161 
    162        median_suite["name"] = suite_name_prefix + "-median"
    163        median_suite["value"] = filters.median(subtest_values)
    164        for subtest in median_suite["subtests"]:
    165            subtest["value"] = filters.median(subtest["replicates"])
    166 
    167        return [
    168            geomean_suite,
    169            median_suite,
    170        ]
    171 
    172    def summarize_suites(self, suites):
    173        fcp_subtests = []
    174        lcp_subtests = []
    175 
    176        for suite in suites:
    177            for subtest in suite["subtests"]:
    178                if "- fcp" in subtest["name"]:
    179                    fcp_subtests.append(subtest)
    180                elif "- lcp" in subtest["name"]:
    181                    lcp_subtests.append(subtest)
    182 
    183        fcp_bench_suites = self._produce_suite_alts(
    184            suites[0], fcp_subtests, "fcp-bench"
    185        )
    186 
    187        lcp_bench_suites = self._produce_suite_alts(
    188            suites[0], lcp_subtests, "lcp-bench"
    189        )
    190 
    191        overall_suite = copy.deepcopy(suites[0])
    192        new_subtests = [
    193            subtest
    194            for subtest in suites[0]["subtests"]
    195            if subtest["name"].startswith("total")
    196        ]
    197        overall_suite["subtests"] = new_subtests
    198 
    199        new_suites = [
    200            overall_suite,
    201            *fcp_bench_suites,
    202            *lcp_bench_suites,
    203        ]
    204        suites.pop()
    205        suites.extend(new_suites)