gc-test.py (5535B)
1 # This Source Code Form is subject to the terms of the Mozilla Public 2 # License, v. 2.0. If a copy of the MPL was not distributed with this 3 # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 5 # Works with python2.6 6 7 import json 8 import math 9 import os 10 import sys 11 from operator import itemgetter 12 from subprocess import PIPE, Popen 13 14 15 class Test: 16 def __init__(self, path, name): 17 self.path = path 18 self.name = name 19 20 @classmethod 21 def from_file(cls, path, name, options): 22 return cls(path, name) 23 24 25 def find_tests(dir, substring=None): 26 ans = [] 27 for dirpath, dirnames, filenames in os.walk(dir): 28 if dirpath == ".": 29 continue 30 for filename in filenames: 31 if not filename.endswith(".js"): 32 continue 33 test = os.path.join(dirpath, filename) 34 if substring is None or substring in os.path.relpath(test, dir): 35 ans.append([test, filename]) 36 return ans 37 38 39 def get_test_cmd(path): 40 return [JS, "-f", path] 41 42 43 def avg(seq): 44 return sum(seq) / len(seq) 45 46 47 def stddev(seq, mean): 48 diffs = ((float(item) - mean) ** 2 for item in seq) 49 return math.sqrt(sum(diffs) / len(seq)) 50 51 52 def run_test(test): 53 env = os.environ.copy() 54 env["MOZ_GCTIMER"] = "stderr" 55 cmd = get_test_cmd(test.path) 56 total = [] 57 mark = [] 58 sweep = [] 59 close_fds = sys.platform != "win32" 60 p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=close_fds, env=env) 61 out, err = p.communicate() 62 out, err = out.decode(), err.decode() 63 64 float_array = [float(_) for _ in err.split()] 65 66 if len(float_array) == 0: 67 print("Error: No data from application. Configured with --enable-gctimer?") 68 sys.exit(1) 69 70 for i, currItem in enumerate(float_array): 71 if i % 3 == 0: 72 total.append(currItem) 73 elif i % 3 == 1: 74 mark.append(currItem) 75 else: 76 sweep.append(currItem) 77 78 return max(total), avg(total), max(mark), avg(mark), max(sweep), avg(sweep) 79 80 81 def run_tests(tests, test_dir): 82 bench_map = {} 83 84 try: 85 for i, test in enumerate(tests): 86 filename_str = '"%s"' % test.name 87 TMax, TAvg, MMax, MAvg, SMax, SAvg = run_test(test) 88 bench_map[test.name] = [TMax, TAvg, MMax, MAvg, SMax, SAvg] 89 fmt = '%20s: {"TMax": %4.1f, "TAvg": %4.1f, "MMax": %4.1f, "MAvg": %4.1f, "SMax": %4.1f, "SAvg": %4.1f}' # NOQA: E501 90 if i != len(tests) - 1: 91 fmt += "," 92 print(fmt % (filename_str, TMax, TAvg, MMax, MAvg, SMax, MAvg)) 93 except KeyboardInterrupt: 94 print("fail") 95 96 return dict( 97 ( 98 filename, 99 dict(TMax=TMax, TAvg=TAvg, MMax=MMax, MAvg=MAvg, SMax=SMax, SAvg=SAvg), 100 ) 101 for filename, (TMax, TAvg, MMax, MAvg, SMax, SAvg) in bench_map.iteritems() 102 ) 103 104 105 def compare(current, baseline): 106 percent_speedups = [] 107 for key, current_result in current.iteritems(): 108 try: 109 baseline_result = baseline[key] 110 except KeyError: 111 print(key, "missing from baseline") 112 continue 113 114 val_getter = itemgetter("TMax", "TAvg", "MMax", "MAvg", "SMax", "SAvg") 115 BTMax, BTAvg, BMMax, BMAvg, BSMax, BSAvg = val_getter(baseline_result) 116 CTMax, CTAvg, CMMax, CMAvg, CSMax, CSAvg = val_getter(current_result) 117 118 if CTAvg <= BTAvg: 119 speedup = (CTAvg / BTAvg - 1) * 100 120 result = "faster: %6.2f < baseline %6.2f (%+6.2f%%)" % ( 121 CTAvg, 122 BTAvg, 123 speedup, 124 ) 125 percent_speedups.append(speedup) 126 else: 127 slowdown = (CTAvg / BTAvg - 1) * 100 128 result = "SLOWER: %6.2f > baseline %6.2f (%+6.2f%%) " % ( 129 CTAvg, 130 BTAvg, 131 slowdown, 132 ) 133 percent_speedups.append(slowdown) 134 print("%30s: %s" % (key, result)) 135 if percent_speedups: 136 print("Average speedup: %.2f%%" % avg(percent_speedups)) 137 138 139 if __name__ == "__main__": 140 script_path = os.path.abspath(__file__) 141 script_dir = os.path.dirname(script_path) 142 test_dir = os.path.join(script_dir, "tests") 143 144 from optparse import OptionParser 145 146 op = OptionParser(usage="%prog [options] JS_SHELL [TESTS]") 147 148 op.add_option( 149 "-b", 150 "--baseline", 151 metavar="JSON_PATH", 152 dest="baseline_path", 153 help="json file with baseline values to compare against", 154 ) 155 156 (OPTIONS, args) = op.parse_args() 157 if len(args) < 1: 158 op.error("missing JS_SHELL argument") 159 # We need to make sure we are using backslashes on Windows. 160 JS, test_args = os.path.normpath(args[0]), args[1:] 161 162 test_list = [] 163 bench_map = {} 164 165 test_list = find_tests(test_dir) 166 167 if not test_list: 168 print >> sys.stderr, "No tests found matching command line arguments." 169 sys.exit(0) 170 171 test_list = [Test.from_file(tst, name, OPTIONS) for tst, name in test_list] 172 173 try: 174 print("{") 175 bench_map = run_tests(test_list, test_dir) 176 print("}") 177 178 except OSError: 179 if not os.path.exists(JS): 180 print >> sys.stderr, "JS shell argument: file does not exist: '%s'" % JS 181 sys.exit(1) 182 else: 183 raise 184 185 if OPTIONS.baseline_path: 186 baseline_map = [] 187 fh = open(OPTIONS.baseline_path) 188 baseline_map = json.load(fh) 189 fh.close() 190 compare(current=bench_map, baseline=baseline_map)