tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

speedometer3.py (4503B)


      1 # This Source Code Form is subject to the terms of the Mozilla Public
      2 # License, v. 2.0. If a copy of the MPL was not distributed with this
      3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
      4 
      5 import filters
      6 from base_python_support import BasePythonSupport
      7 from logger.logger import RaptorLogger
      8 from utils import flatten
      9 
     10 LOG = RaptorLogger(component="raptor-speedometer3-support")
     11 
     12 
     13 class Speedometer3Support(BasePythonSupport):
     14    def handle_result(self, bt_result, raw_result, **kwargs):
     15        """Parse a result for the required results.
     16 
     17        See base_python_support.py for what's expected from this method.
     18        """
     19        for res in raw_result["extras"]:
     20            sp3_mean_score = round(res["s3"]["score"]["mean"], 3)
     21            flattened_metrics_s3_internal = flatten(res["s3_internal"], ())
     22 
     23            clean_flat_internal_metrics = {}
     24            for k, vals in flattened_metrics_s3_internal.items():
     25                if k in ("mean", "geomean"):
     26                    # Skip these for parity with what was being
     27                    # returned in the results.py/output.py
     28                    continue
     29                clean_flat_internal_metrics[k.replace("tests/", "")] = [
     30                    round(val, 3) for val in vals
     31                ]
     32 
     33            clean_flat_internal_metrics["score-internal"] = clean_flat_internal_metrics[
     34                "score"
     35            ]
     36            clean_flat_internal_metrics["score"] = [sp3_mean_score]
     37 
     38            for k, v in clean_flat_internal_metrics.items():
     39                bt_result["measurements"].setdefault(k, []).extend(v)
     40 
     41    def _build_subtest(self, measurement_name, replicates, test):
     42        unit = test.get("unit", "ms")
     43        if test.get("subtest_unit"):
     44            unit = test.get("subtest_unit")
     45 
     46        lower_is_better = test.get(
     47            "subtest_lower_is_better", test.get("lower_is_better", True)
     48        )
     49        if "score" in measurement_name:
     50            lower_is_better = False
     51            unit = "score"
     52 
     53        subtest = {
     54            "unit": unit,
     55            "alertThreshold": float(test.get("alert_threshold", 2.0)),
     56            "lowerIsBetter": lower_is_better,
     57            "name": measurement_name,
     58            "replicates": replicates,
     59            "shouldAlert": True,
     60            "value": round(filters.mean(replicates), 3),
     61        }
     62 
     63        if "score-internal" in measurement_name:
     64            subtest["shouldAlert"] = False
     65 
     66        return subtest
     67 
     68    def summarize_test(self, test, suite, **kwargs):
     69        """Summarize the measurements found in the test as a suite with subtests.
     70 
     71        See base_python_support.py for what's expected from this method.
     72        """
     73        suite["type"] = "benchmark"
     74        if suite["subtests"] == {}:
     75            suite["subtests"] = []
     76        for measurement_name, replicates in test["measurements"].items():
     77            if not replicates:
     78                continue
     79            if self.is_additional_metric(measurement_name):
     80                continue
     81            suite["subtests"].append(
     82                self._build_subtest(measurement_name, replicates, test)
     83            )
     84 
     85        self.add_additional_metrics(test, suite, **kwargs)
     86        suite["subtests"].sort(key=lambda subtest: subtest["name"])
     87 
     88        score = 0
     89        replicates = []
     90        for subtest in suite["subtests"]:
     91            if subtest["name"] == "score":
     92                score = subtest["value"]
     93                replicates = subtest.get("replicates", [])
     94                break
     95        suite["value"] = score
     96        suite["replicates"] = replicates
     97 
     98    def modify_command(self, cmd, test):
     99        """Modify the browsertime command for speedometer 3.
    100 
    101        Presently we need to modify the commend to accommodate profiling
    102        on android devices by modifying the test url to lower the iteration
    103        counts.
    104 
    105        """
    106 
    107        # Bug 1934266
    108        # For profiling on android + speedometer3 we set the iteration count to 5.
    109        # Otherwise the profiles are too large and use too much of the allocated
    110        # host machine memory. This is a useful temporary measure until we have
    111        # a more long term solution.
    112        if test.get("gecko_profile", False) and self.app in ("fenix", "geckoview"):
    113            LOG.info(
    114                "Modifying iterationCount to 5 for gecko profiling speedometer3 on android"
    115            )
    116            btime_url_index = cmd.index("--browsertime.url")
    117            cmd[btime_url_index + 1] += "&iterationCount=5"