motionmark-1-3.py (3549B)
1 # This Source Code Form is subject to the terms of the Mozilla Public 2 # License, v. 2.0. If a copy of the MPL was not distributed with this 3 # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 5 import filters 6 from base_python_support import BasePythonSupport 7 8 9 class MotionMarkSupport(BasePythonSupport): 10 def handle_result(self, bt_result, raw_result, **kwargs): 11 """Parse a result for the required results. 12 13 See base_python_support.py for what's expected from this method. 14 """ 15 suite_name = raw_result["extras"][0]["mm_res"]["suite_name"] 16 score_tracker = { 17 subtest: [] 18 for subtest in raw_result["extras"][0]["mm_res"]["results"][ 19 suite_name 20 ].keys() 21 } 22 23 motionmark_overall_score = [] 24 for res in raw_result["extras"]: 25 motionmark_overall_score.append(round(res["mm_res"]["score"], 3)) 26 27 for k, v in res["mm_res"]["results"][suite_name].items(): 28 score_tracker[k].append(v["complexity"]["bootstrap"]["median"]) 29 30 for k, v in score_tracker.items(): 31 bt_result["measurements"][k] = v 32 33 bt_result["measurements"]["score"] = motionmark_overall_score 34 35 def _build_subtest(self, measurement_name, replicates, test): 36 unit = test.get("unit", "ms") 37 if test.get("subtest_unit"): 38 unit = test.get("subtest_unit") 39 40 lower_is_better = test.get( 41 "subtest_lower_is_better", test.get("lower_is_better", True) 42 ) 43 if "score" in measurement_name: 44 lower_is_better = False 45 unit = "score" 46 47 subtest = { 48 "unit": unit, 49 "alertThreshold": float(test.get("alert_threshold", 2.0)), 50 "lowerIsBetter": lower_is_better, 51 "name": measurement_name, 52 "replicates": replicates, 53 "value": round(filters.mean(replicates), 3), 54 } 55 56 return subtest 57 58 def summarize_test(self, test, suite, **kwargs): 59 """Summarize the measurements found in the test as a suite with subtests. 60 61 See base_python_support.py for what's expected from this method. 62 """ 63 suite["type"] = "benchmark" 64 if suite["subtests"] == {}: 65 suite["subtests"] = [] 66 for measurement_name, replicates in test["measurements"].items(): 67 if not replicates: 68 continue 69 if self.is_additional_metric(measurement_name): 70 continue 71 suite["subtests"].append( 72 self._build_subtest(measurement_name, replicates, test) 73 ) 74 75 self.add_additional_metrics(test, suite, **kwargs) 76 suite["subtests"].sort(key=lambda subtest: subtest["name"]) 77 78 score = 0 79 replicates = [] 80 for subtest in suite["subtests"]: 81 if subtest["name"] == "score": 82 score = subtest["value"] 83 replicates = subtest.get("replicates", []) 84 break 85 suite["value"] = score 86 suite["replicates"] = replicates 87 88 def modify_command(self, cmd, test): 89 """Modify the browsertime command to have the appropriate suite name. 90 91 This is necessary to grab the correct CSS selector in the browsertime 92 script, and later for parsing through the final benchmark data in the 93 support python script (this file). 94 95 Current options are `MotionMark` and `HTML suite`. 96 """ 97 98 cmd += ["--browsertime.suite_name", test.get("suite_name")]