tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

benchmark.py (13538B)


      1 # This Source Code Form is subject to the terms of the Mozilla Public
      2 # License, v. 2.0. If a copy of the MPL was not distributed with this
      3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
      4 
      5 import six
      6 import json
      7 import os
      8 import re
      9 import shutil
     10 import sys
     11 import pathlib
     12 from abc import ABCMeta, abstractmethod, abstractproperty
     13 from argparse import ArgumentParser
     14 from collections import defaultdict
     15 
     16 from mozbuild.base import MozbuildObject, BuildEnvironmentNotFoundException
     17 from mozprocess import run_and_wait
     18 
     19 here = os.path.abspath(os.path.dirname(__file__))
     20 build = MozbuildObject.from_environment(cwd=here)
     21 
     22 JSSHELL_NOT_FOUND = """
     23 Could not detect a JS shell. Either make sure you have a non-artifact build
     24 with `ac_add_options --enable-js-shell` or specify it with `--binary`.
     25 """.strip()
     26 
     27 
     28 @six.add_metaclass(ABCMeta)
     29 class Benchmark(object):
     30    lower_is_better = True
     31    should_alert = True
     32 
     33    def __init__(self, shell, args=None, shell_name=None):
     34        self.shell = shell
     35        self.args = args
     36        self.shell_name = shell_name
     37 
     38    @abstractproperty
     39    def unit(self):
     40        """Returns the unit of measurement of the benchmark."""
     41 
     42    @abstractproperty
     43    def name(self):
     44        """Returns the string name of the benchmark."""
     45 
     46    @abstractproperty
     47    def path(self):
     48        """Return the path to the benchmark relative to topsrcdir."""
     49 
     50    @abstractmethod
     51    def process_line(self, proc, line):
     52        """Process a line of stdout from the benchmark."""
     53 
     54    @abstractmethod
     55    def collect_results(self):
     56        """Build the result after the process has finished."""
     57 
     58    @property
     59    def command(self):
     60        """Returns the command to run as a list."""
     61        cmd = [self.shell]
     62        if self.args:
     63            cmd += self.args
     64        return cmd
     65 
     66    @property
     67    def version(self):
     68        if self._version:
     69            return self._version
     70 
     71        with open(os.path.join(self.path, "VERSION"), "r") as fh:
     72            self._version = fh.read().strip("\r\n\r\n \t")
     73        return self._version
     74 
     75    def reset(self):
     76        """Resets state between runs."""
     77        name = self.name
     78        if self.shell_name:
     79            name = "{}-{}".format(name, self.shell_name)
     80 
     81        self.perfherder_data = {
     82            "framework": {
     83                "name": "js-bench",
     84            },
     85            "suites": [
     86                {
     87                    "lowerIsBetter": self.lower_is_better,
     88                    "name": name,
     89                    "shouldAlert": self.should_alert,
     90                    "subtests": [],
     91                    "unit": self.unit,
     92                    "value": None,
     93                },
     94            ],
     95        }
     96        self.suite = self.perfherder_data["suites"][0]
     97 
     98    def _provision_benchmark_script(self):
     99        if os.path.isdir(self.path):
    100            return
    101 
    102        # Some benchmarks may have been downloaded from a fetch task, make
    103        # sure they get copied over.
    104        fetches_dir = os.environ.get("MOZ_FETCHES_DIR")
    105        if fetches_dir and os.path.isdir(fetches_dir):
    106            fetchdir = os.path.join(fetches_dir, self.name)
    107            if os.path.isdir(fetchdir):
    108                shutil.copytree(fetchdir, self.path)
    109 
    110    def run(self):
    111        self.reset()
    112 
    113        # Update the environment variables
    114        env = os.environ.copy()
    115 
    116        process_args = {
    117            "args": self.command,
    118            "cwd": self.path,
    119            "env": env,
    120            "output_line_handler": self.process_line,
    121        }
    122        proc = run_and_wait(**process_args)
    123        self.collect_results()
    124        return proc.returncode
    125 
    126 
    127 class RunOnceBenchmark(Benchmark):
    128    def collect_results(self):
    129        bench_total = 0
    130        # NOTE: for this benchmark we run the test once, so we have a single value array
    131        for bench, scores in self.scores.items():
    132            for score, values in scores.items():
    133                test_name = "{}-{}".format(self.name, score)
    134                # pylint --py3k W1619
    135                mean = sum(values) / len(values)
    136                self.suite["subtests"].append({"name": test_name, "value": mean})
    137                bench_total += int(sum(values))
    138        self.suite["value"] = bench_total
    139 
    140 
    141 class Ares6(Benchmark):
    142    name = "ares6"
    143    path = os.path.join("third_party", "webkit", "PerformanceTests", "ARES-6")
    144    unit = "ms"
    145 
    146    @property
    147    def command(self):
    148        cmd = super(Ares6, self).command
    149        return cmd + ["cli.js"]
    150 
    151    def reset(self):
    152        super(Ares6, self).reset()
    153 
    154        self.bench_name = None
    155        self.last_summary = None
    156        # Scores are of the form:
    157        # {<bench_name>: {<score_name>: [<values>]}}
    158        self.scores = defaultdict(lambda: defaultdict(list))
    159 
    160    def _try_find_score(self, score_name, line):
    161        m = re.search(score_name + r":\s*(\d+\.?\d*?) (\+-)?.+", line)
    162        if not m:
    163            return False
    164 
    165        score = m.group(1)
    166        self.scores[self.bench_name][score_name].append(float(score))
    167        return True
    168 
    169    def process_line(self, proc, line):
    170        line = line.strip("\n")
    171        print(line)
    172        m = re.search(r"Running... (.+) \(.+\)", line)
    173        if m:
    174            self.bench_name = m.group(1)
    175            return
    176 
    177        if self._try_find_score("firstIteration", line):
    178            return
    179 
    180        if self._try_find_score("averageWorstCase", line):
    181            return
    182 
    183        if self._try_find_score("steadyState", line):
    184            return
    185 
    186        m = re.search(r"summary:\s*(\d+\.?\d*?) (\+-)?.+", line)
    187        if m:
    188            self.last_summary = float(m.group(1))
    189 
    190    def collect_results(self):
    191        for bench, scores in self.scores.items():
    192            for score, values in scores.items():
    193                # pylint --py3k W1619
    194                mean = sum(values) / len(values)
    195                test_name = "{}-{}".format(bench, score)
    196                self.suite["subtests"].append({"name": test_name, "value": mean})
    197 
    198        if self.last_summary:
    199            self.suite["value"] = self.last_summary
    200 
    201 
    202 class SixSpeed(RunOnceBenchmark):
    203    name = "six-speed"
    204    path = os.path.join("third_party", "webkit", "PerformanceTests", "six-speed")
    205    unit = "ms"
    206 
    207    @property
    208    def command(self):
    209        cmd = super(SixSpeed, self).command
    210        return cmd + ["test.js"]
    211 
    212    def reset(self):
    213        super(SixSpeed, self).reset()
    214 
    215        # Scores are of the form:
    216        # {<bench_name>: {<score_name>: [<values>]}}
    217        self.scores = defaultdict(lambda: defaultdict(list))
    218 
    219    def process_line(self, proc, output):
    220        output = output.strip("\n")
    221        print(output)
    222        m = re.search(r"(.+): (\d+)", output)
    223        if not m:
    224            return
    225        subtest = m.group(1)
    226        score = m.group(2)
    227        if subtest not in self.scores[self.name]:
    228            self.scores[self.name][subtest] = []
    229        self.scores[self.name][subtest].append(int(score))
    230 
    231 
    232 class SunSpider(RunOnceBenchmark):
    233    name = "sunspider"
    234    path = os.path.join(
    235        "third_party", "webkit", "PerformanceTests", "SunSpider", "sunspider-0.9.1"
    236    )
    237    unit = "ms"
    238 
    239    @property
    240    def command(self):
    241        cmd = super(SunSpider, self).command
    242        return cmd + ["sunspider-standalone-driver.js"]
    243 
    244    def reset(self):
    245        super(SunSpider, self).reset()
    246 
    247        # Scores are of the form:
    248        # {<bench_name>: {<score_name>: [<values>]}}
    249        self.scores = defaultdict(lambda: defaultdict(list))
    250 
    251    def process_line(self, proc, output):
    252        output = output.strip("\n")
    253        print(output)
    254        m = re.search(r"(.+): (\d+)", output)
    255        if not m:
    256            return
    257        subtest = m.group(1)
    258        score = m.group(2)
    259        if subtest not in self.scores[self.name]:
    260            self.scores[self.name][subtest] = []
    261        self.scores[self.name][subtest].append(int(score))
    262 
    263 
    264 class WebToolingBenchmark(Benchmark):
    265    name = "web-tooling-benchmark"
    266    path = os.path.join(
    267        "third_party", "webkit", "PerformanceTests", "web-tooling-benchmark"
    268    )
    269    main_js = "cli.js"
    270    unit = "score"
    271    lower_is_better = False
    272    subtests_lower_is_better = False
    273 
    274    @property
    275    def command(self):
    276        cmd = super(WebToolingBenchmark, self).command
    277        return cmd + [self.main_js]
    278 
    279    def reset(self):
    280        super(WebToolingBenchmark, self).reset()
    281 
    282        # Scores are of the form:
    283        # {<bench_name>: {<score_name>: [<values>]}}
    284        self.scores = defaultdict(lambda: defaultdict(list))
    285 
    286    def process_line(self, proc, output):
    287        output = output.strip("\n")
    288        print(output)
    289        m = re.search(" +([a-zA-Z].+): +([.0-9]+) +runs/sec", output)
    290        if not m:
    291            return
    292        subtest = m.group(1)
    293        score = m.group(2)
    294        if subtest not in self.scores[self.name]:
    295            self.scores[self.name][subtest] = []
    296        self.scores[self.name][subtest].append(float(score))
    297 
    298    def collect_results(self):
    299        # NOTE: for this benchmark we run the test once, so we have a single value array
    300        bench_mean = None
    301        for bench, scores in self.scores.items():
    302            for score_name, values in scores.items():
    303                test_name = "{}-{}".format(self.name, score_name)
    304                # pylint --py3k W1619
    305                mean = sum(values) / len(values)
    306                self.suite["subtests"].append(
    307                    {
    308                        "lowerIsBetter": self.subtests_lower_is_better,
    309                        "name": test_name,
    310                        "value": mean,
    311                    }
    312                )
    313                if score_name == "mean":
    314                    bench_mean = mean
    315        self.suite["value"] = bench_mean
    316 
    317    def run(self):
    318        self._provision_benchmark_script()
    319        return super(WebToolingBenchmark, self).run()
    320 
    321 
    322 class Octane(RunOnceBenchmark):
    323    name = "octane"
    324    path = os.path.join("third_party", "webkit", "PerformanceTests", "octane")
    325    unit = "score"
    326    lower_is_better = False
    327 
    328    @property
    329    def command(self):
    330        cmd = super(Octane, self).command
    331        return cmd + ["run.js"]
    332 
    333    def reset(self):
    334        super(Octane, self).reset()
    335 
    336        # Scores are of the form:
    337        # {<bench_name>: {<score_name>: [<values>]}}
    338        self.scores = defaultdict(lambda: defaultdict(list))
    339 
    340    def process_line(self, proc, output):
    341        output = output.strip("\n")
    342        print(output)
    343        m = re.search(r"(.+): (\d+)", output)
    344        if not m:
    345            return
    346        subtest = m.group(1)
    347        score = m.group(2)
    348        if subtest.startswith("Score"):
    349            subtest = "score"
    350        if subtest not in self.scores[self.name]:
    351            self.scores[self.name][subtest] = []
    352        self.scores[self.name][subtest].append(int(score))
    353 
    354    def collect_results(self):
    355        bench_score = None
    356        # NOTE: for this benchmark we run the test once, so we have a single value array
    357        for bench, scores in self.scores.items():
    358            for score_name, values in scores.items():
    359                test_name = "{}-{}".format(self.name, score_name)
    360                # pylint --py3k W1619
    361                mean = sum(values) / len(values)
    362                self.suite["subtests"].append({"name": test_name, "value": mean})
    363                if score_name == "score":
    364                    bench_score = mean
    365        self.suite["value"] = bench_score
    366 
    367    def run(self):
    368        self._provision_benchmark_script()
    369        return super(Octane, self).run()
    370 
    371 
    372 all_benchmarks = {
    373    "ares6": Ares6,
    374    "six-speed": SixSpeed,
    375    "sunspider": SunSpider,
    376    "web-tooling-benchmark": WebToolingBenchmark,
    377    "octane": Octane,
    378 }
    379 
    380 
    381 def run(benchmark, binary=None, extra_args=None, perfherder=None):
    382    if not binary:
    383        try:
    384            binary = os.path.join(build.bindir, "js" + build.substs["BIN_SUFFIX"])
    385        except BuildEnvironmentNotFoundException:
    386            binary = None
    387 
    388        if not binary or not os.path.isfile(binary):
    389            print(JSSHELL_NOT_FOUND)
    390            return 1
    391 
    392    bench = all_benchmarks.get(benchmark)(
    393        binary, args=extra_args, shell_name=perfherder
    394    )
    395    res = bench.run()
    396 
    397    if perfherder:
    398        print("PERFHERDER_DATA: {}".format(json.dumps(bench.perfherder_data)))
    399        if "MOZ_AUTOMATION" in os.environ:
    400            fetches_dir = pathlib.Path(os.environ.get("MOZ_FETCHES_DIR"))
    401            upload_path = (
    402                fetches_dir.parent / "artifacts" / "perfherder-data-jsshell.json"
    403            )
    404            upload_path.parent.mkdir(parents=True, exist_ok=True)
    405            with upload_path.open("w", encoding="utf-8") as f:
    406                json.dump(bench.perfherder_data, f)
    407 
    408    return res
    409 
    410 
    411 def get_parser():
    412    parser = ArgumentParser()
    413    parser.add_argument(
    414        "benchmark",
    415        choices=list(all_benchmarks),
    416        help="The name of the benchmark to run.",
    417    )
    418    parser.add_argument(
    419        "-b", "--binary", default=None, help="Path to the JS shell binary to use."
    420    )
    421    parser.add_argument(
    422        "--arg",
    423        dest="extra_args",
    424        action="append",
    425        default=None,
    426        help="Extra arguments to pass to the JS shell.",
    427    )
    428    parser.add_argument(
    429        "--perfherder",
    430        default=None,
    431        help="Log PERFHERDER_DATA to stdout using the given suite name.",
    432    )
    433    return parser
    434 
    435 
    436 def cli(args=sys.argv[1:]):
    437    parser = get_parser()
    438    args = parser.parser_args(args)
    439    return run(**vars(args))
    440 
    441 
    442 if __name__ == "__main__":
    443    sys.exit(cli())