tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

jetstream3.py (3893B)


      1 # This Source Code Form is subject to the terms of the Mozilla Public
      2 # License, v. 2.0. If a copy of the MPL was not distributed with this
      3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
      4 
      5 import filters
      6 from base_python_support import BasePythonSupport
      7 
      8 # Only geometric measurement is returned as score in the raw data. Otherwise
      9 # the measurement is in `ms`
     10 TIME_METRICS = ["Runtime", "Startup", "First", "Worst", "Average"]
     11 
     12 
     13 class JetStreamSupport(BasePythonSupport):
     14    def handle_result(self, bt_result, raw_result, **kwargs):
     15        """Parse a result for the required results.
     16 
     17        See base_python_support.py for what's expected from this method.
     18        """
     19 
     20        score_tracker = {}
     21 
     22        for k, v in raw_result["extras"][0]["js3_res"]["tests"].items():
     23            score_tracker[k + "-" + "Geometric"] = v["metrics"]["Score"]["current"]
     24            for measure, metrics in v["tests"].items():
     25                score_tracker[k + "-" + measure] = metrics["metrics"]["Time"]["current"]
     26        geometric_measure = [v[0] for k, v in score_tracker.items() if "Geometric" in k]
     27        jetstream_overall_score = [round(filters.geometric_mean(geometric_measure), 3)]
     28 
     29        for k, v in score_tracker.items():
     30            bt_result["measurements"][k] = v
     31 
     32        bt_result["measurements"]["score"] = jetstream_overall_score
     33 
     34    def _build_subtest(self, measurement_name, replicates, test):
     35        unit = test.get("unit", "ms")
     36        lower_is_better = test.get("lower_is_better", "True")
     37        is_time_metric = measurement_name.split("-")[-1] in TIME_METRICS
     38        if test.get("subtest_unit") and is_time_metric:
     39            unit = test.get("subtest_unit")
     40            lower_is_better = test.get("subtest_lower_is_better")
     41 
     42        if "score" in measurement_name:
     43            lower_is_better = False
     44            unit = "score"
     45 
     46        subtest = {
     47            "unit": unit,
     48            # Bug 1968521 for the time being use 5% for jetstream 3 subtests.
     49            "alertThreshold": float(test.get("subtest_alert_threshold", 5.0)),
     50            "lowerIsBetter": lower_is_better,
     51            "name": measurement_name,
     52            "replicates": replicates,
     53            "shouldAlert": True,
     54            "value": round(filters.mean(replicates), 3),
     55        }
     56 
     57        # Overall score also appears in the subtests payload so just ensure it is 2%
     58        if measurement_name == "score":
     59            subtest["alertThreshold"] = float(test.get("alert_threshold", 2.0))
     60 
     61        return subtest
     62 
     63    def summarize_test(self, test, suite, **kwargs):
     64        """Summarize the measurements found in the test as a suite with subtests.
     65 
     66        See base_python_support.py for what's expected from this method.
     67        """
     68        suite["type"] = "benchmark"
     69        if suite["subtests"] == {}:
     70            suite["subtests"] = []
     71        for measurement_name, replicates in test["measurements"].items():
     72            if not replicates:
     73                continue
     74            if self.is_additional_metric(measurement_name):
     75                continue
     76            suite["subtests"].append(
     77                self._build_subtest(measurement_name, replicates, test)
     78            )
     79 
     80        self.add_additional_metrics(test, suite, **kwargs)
     81        suite["subtests"].sort(key=lambda subtest: subtest["name"])
     82 
     83        score = 0
     84        replicates = []
     85        for subtest in suite["subtests"]:
     86            if subtest["name"] == "score":
     87                score = subtest["value"]
     88                replicates = subtest.get("replicates", [])
     89                break
     90        suite["value"] = score
     91        suite["replicates"] = replicates
     92 
     93    def modify_command(self, cmd, test):
     94        """Modify the browsertime command to have the appropriate suite name in
     95        cases where we have multiple variants/versions
     96        """
     97 
     98        cmd += ["--browsertime.suite_name", test.get("suite_name")]