tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

browsertime_pageload.py (11170B)


      1 # This Source Code Form is subject to the terms of the Mozilla Public
      2 # License, v. 2.0. If a copy of the MPL was not distributed with this
      3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
      4 import copy
      5 
      6 import filters
      7 from base_python_support import BasePythonSupport
      8 from logger.logger import RaptorLogger
      9 from results import (
     10    NON_FIREFOX_BROWSERS,
     11    NON_FIREFOX_BROWSERS_MOBILE,
     12    MissingResultsError,
     13 )
     14 
     15 LOG = RaptorLogger(component="perftest-support-class")
     16 
     17 conversion = (
     18    ("fnbpaint", "firstPaint"),
     19    ("fcp", ["paintTiming", "first-contentful-paint"]),
     20    ("dcf", "timeToDomContentFlushed"),
     21    ("loadtime", "loadEventEnd"),
     22    ("largestContentfulPaint", ["largestContentfulPaint", "renderTime"]),
     23 )
     24 
     25 
     26 def _get_raptor_val(mdict, mname, retval=False):
     27    # gets the measurement requested, returns the value
     28    # if one was found, or retval if it couldn't be found
     29    #
     30    # mname: either a path to follow (as a list) to get to
     31    #        a requested field value, or a string to check
     32    #        if mdict contains it. i.e.
     33    #        'first-contentful-paint'/'fcp' is found by searching
     34    #        in mdict['paintTiming'].
     35    # mdict: a dictionary to look through to find the mname
     36    #        value.
     37 
     38    if type(mname) is not list:
     39        if mname in mdict:
     40            return mdict[mname]
     41        return retval
     42    target = mname[-1]
     43    tmpdict = mdict
     44    for name in mname[:-1]:
     45        tmpdict = tmpdict.get(name, {})
     46    if target in tmpdict:
     47        return tmpdict[target]
     48 
     49    return retval
     50 
     51 
     52 class PageloadSupport(BasePythonSupport):
     53    def __init__(self, **kwargs):
     54        super().__init__(**kwargs)
     55        self.perfstats = False
     56        self.browsertime_visualmetrics = False
     57        self.accept_zero_vismet = False
     58        self.subtest_alert_on = ""
     59        self.app = None
     60        self.extra_summary_methods = []
     61        self.test_type = ""
     62        self.measure = None
     63        self.power_test = False
     64        self.failed_vismets = []
     65 
     66    def setup_test(self, next_test, args):
     67        self.perfstats = next_test.get("perfstats", False)
     68        self.browsertime_visualmetrics = args.browsertime_visualmetrics
     69        self.accept_zero_vismet = next_test.get("accept_zero_vismet", False)
     70        self.subtest_alert_on = next_test.get("alert_on", "")
     71        self.app = args.app
     72        self.extra_summary_methods = args.extra_summary_methods
     73        self.test_type = next_test.get("type", "")
     74        self.measure = next_test.get("measure", [])
     75        self.power_test = args.power_test
     76 
     77    def handle_result(self, bt_result, raw_result, last_result=False, **kwargs):
     78        # extracting values from browserScripts and statistics
     79        for bt, raptor in conversion:
     80            if self.measure is not None and bt not in self.measure:
     81                continue
     82            # chrome and safari we just measure fcp and loadtime; skip fnbpaint and dcf
     83            if (
     84                self.app
     85                and self.app.lower()
     86                in NON_FIREFOX_BROWSERS + NON_FIREFOX_BROWSERS_MOBILE
     87                and bt
     88                in (
     89                    "fnbpaint",
     90                    "dcf",
     91                )
     92            ):
     93                continue
     94 
     95            # FCP uses a different path to get the timing, so we need to do
     96            # some checks here
     97            if bt == "fcp" and not _get_raptor_val(
     98                raw_result["browserScripts"][0]["timings"],
     99                raptor,
    100            ):
    101                continue
    102 
    103            # XXX looping several times in the list, could do better
    104            for cycle in raw_result["browserScripts"]:
    105                if bt not in bt_result["measurements"]:
    106                    bt_result["measurements"][bt] = []
    107                val = _get_raptor_val(cycle["timings"], raptor)
    108                if not val:
    109                    raise MissingResultsError(
    110                        f"Browsertime cycle missing {raptor} measurement"
    111                    )
    112                bt_result["measurements"][bt].append(val)
    113 
    114            # let's add the browsertime statistics; we'll use those for overall values
    115            # instead of calculating our own based on the replicates
    116            bt_result["statistics"][bt] = _get_raptor_val(
    117                raw_result["statistics"]["timings"], raptor, retval={}
    118            )
    119 
    120        if self.perfstats:
    121            for cycle in raw_result["geckoPerfStats"]:
    122                for metric in cycle:
    123                    bt_result["measurements"].setdefault(
    124                        "perfstat-" + metric, []
    125                    ).append(cycle[metric])
    126 
    127        if self.browsertime_visualmetrics:
    128            for cycle in raw_result["visualMetrics"]:
    129                for metric in cycle:
    130                    if "progress" in metric.lower():
    131                        # Bug 1665750 - Determine if we should display progress
    132                        continue
    133 
    134                    if metric not in self.measure:
    135                        continue
    136 
    137                    val = cycle[metric]
    138                    if not self.accept_zero_vismet:
    139                        if val == 0:
    140                            self.failed_vismets.append(metric)
    141                            continue
    142 
    143                    bt_result["measurements"].setdefault(metric, []).append(val)
    144                    bt_result["statistics"][metric] = raw_result["statistics"][
    145                        "visualMetrics"
    146                    ][metric]
    147 
    148        power_vals = raw_result.get("android").get("power", {})
    149        if power_vals:
    150            bt_result["measurements"].setdefault("powerUsage", []).extend([
    151                round(vals["powerUsage"] * (1 * 10**-6), 2) for vals in power_vals
    152            ])
    153 
    154    def _process_measurements(self, suite, test, measurement_name, replicates):
    155        subtest = {}
    156        subtest["name"] = measurement_name
    157        subtest["lowerIsBetter"] = test["subtest_lower_is_better"]
    158        subtest["alertThreshold"] = float(test["alert_threshold"])
    159 
    160        unit = test["subtest_unit"]
    161        if measurement_name == "cpuTime":
    162            unit = "ms"
    163        elif measurement_name == "powerUsage":
    164            unit = "uWh"
    165        subtest["unit"] = unit
    166 
    167        # Add the alert window settings if needed here too in case
    168        # there is no summary value in the test
    169        for schema_name in (
    170            "minBackWindow",
    171            "maxBackWindow",
    172            "foreWindow",
    173        ):
    174            if suite.get(schema_name, None) is not None:
    175                subtest[schema_name] = suite[schema_name]
    176 
    177        # if 'alert_on' is set for this particular measurement, then we want to set
    178        # the flag in the perfherder output to turn on alerting for this subtest
    179        if self.subtest_alert_on is not None:
    180            if measurement_name in self.subtest_alert_on:
    181                LOG.info(
    182                    "turning on subtest alerting for measurement type: %s"
    183                    % measurement_name
    184                )
    185                subtest["shouldAlert"] = True
    186                if self.app in (
    187                    "chrome",
    188                    "chrome-m",
    189                    "custom-car",
    190                    "cstm-car-m",
    191                ):
    192                    subtest["shouldAlert"] = False
    193            else:
    194                # Explicitly set `shouldAlert` to False so that the measurement
    195                # is not alerted on. Otherwise Perfherder defaults to alerting.
    196                LOG.info(
    197                    "turning off subtest alerting for measurement type: %s"
    198                    % measurement_name
    199                )
    200                subtest["shouldAlert"] = False
    201 
    202        if self.power_test and measurement_name == "powerUsage":
    203            subtest["shouldAlert"] = True
    204 
    205        subtest["replicates"] = replicates
    206        return subtest
    207 
    208    def summarize_test(self, test, suite, **kwargs):
    209        for measurement_name, replicates in test["measurements"].items():
    210            new_subtest = self._process_measurements(
    211                suite, test, measurement_name, replicates
    212            )
    213            if measurement_name not in suite["subtests"]:
    214                suite["subtests"][measurement_name] = new_subtest
    215            else:
    216                suite["subtests"][measurement_name]["replicates"].extend(
    217                    new_subtest["replicates"]
    218                )
    219 
    220        # Handle chimeras here, by default the add_additional_metrics
    221        # parses for all the results together regardless of cold/warm
    222        cycle_type = "browser-cycle"
    223        if "warm" in suite["extraOptions"]:
    224            cycle_type = "page-cycle"
    225 
    226        self.add_additional_metrics(test, suite, cycle_type=cycle_type)
    227 
    228        # Don't alert on cpuTime metrics
    229        for measurement_name, measurement_info in suite["subtests"].items():
    230            if "cputime" in measurement_name.lower():
    231                measurement_info["shouldAlert"] = False
    232 
    233    def _process_geomean(self, subtest):
    234        data = subtest["replicates"]
    235        subtest["value"] = round(filters.geometric_mean(data), 1)
    236 
    237    def _process_alt_method(self, subtest, alternative_method):
    238        data = subtest["replicates"]
    239        if alternative_method == "median":
    240            subtest["value"] = filters.median(data)
    241 
    242    def _process(self, subtest, method="geomean"):
    243        if self.test_type == "power":
    244            subtest["value"] = filters.mean(subtest["replicates"])
    245        elif method == "geomean":
    246            self._process_geomean(subtest)
    247        else:
    248            self._process_alt_method(subtest, method)
    249        return subtest
    250 
    251    def summarize_suites(self, suites):
    252        for suite in suites:
    253            suite["subtests"] = [
    254                self._process(subtest)
    255                for subtest in suite["subtests"].values()
    256                if subtest["replicates"]
    257            ]
    258 
    259            # Duplicate for different summary values if needed
    260            if self.extra_summary_methods:
    261                new_subtests = []
    262                for subtest in suite["subtests"]:
    263                    try:
    264                        for alternative_method in self.extra_summary_methods:
    265                            new_subtest = copy.deepcopy(subtest)
    266                            new_subtest["name"] = (
    267                                f"{new_subtest['name']} ({alternative_method})"
    268                            )
    269                            self._process(new_subtest, alternative_method)
    270                            new_subtests.append(new_subtest)
    271                    except Exception as e:
    272                        # Ignore failures here
    273                        LOG.info(f"Failed to summarize with alternative methods: {e}")
    274                        pass
    275                suite["subtests"].extend(new_subtests)
    276 
    277            suite["subtests"].sort(key=lambda subtest: subtest["name"])
    278 
    279    def report_test_success(self):
    280        if len(self.failed_vismets) > 0:
    281            LOG.critical(
    282                "TEST-UNEXPECTED-FAIL | Some visual metrics have an erroneous value of 0."
    283            )
    284            LOG.info("Visual metric tests failed: %s" % str(self.failed_vismets))
    285            return False
    286        return True