compute-macrobenchmark-results.py (4617B)
1 import json 2 import pathlib 3 import sys 4 from collections import defaultdict 5 from os import environ, listdir 6 from os.path import isfile, join 7 8 9 def read_benchmark_data_from_directory(directory): 10 ## org.mozilla.fenix.benchmark-benchmarkData.json 11 benchmark_files = [ 12 file for file in listdir(directory) if isfile(join(directory, file)) 13 ] 14 benchmark_results = {} 15 for benchmark_file in benchmark_files: 16 read_benchmark_data(f"{directory}/{benchmark_file}", benchmark_results) 17 18 return benchmark_results 19 20 21 def read_benchmark_data(file_path, results): 22 """Reads the JSON file and returns the benchmark results as a dictionary.""" 23 with open(file_path) as file: 24 data = json.load(file) 25 26 # Extract benchmarks data 27 benchmarks = data["benchmarks"] 28 for benchmark in benchmarks: 29 name = benchmark["name"] 30 time_metrics = benchmark["metrics"]["timeToInitialDisplayMs"] 31 results[name] = { 32 "median": time_metrics["median"], 33 "minimum": time_metrics["minimum"], 34 "maximum": time_metrics["maximum"], 35 } 36 return results 37 38 39 def format_output_content(results): 40 """Formats the output content into the specified JSON structure.""" 41 42 # Construct the subtests list 43 subtests = [] 44 for result_name, metrics in results.items(): 45 for metric_name, value in metrics.items(): 46 subtest = { 47 "name": f"{result_name}.{metric_name}", 48 "lowerIsBetter": True, 49 "value": value, 50 "unit": "ms", 51 } 52 subtests.append(subtest) 53 54 # Define the base JSON structure using the subtests list 55 output_json = { 56 "framework": {"name": "mozperftest"}, 57 "application": {"name": "fenix"}, 58 "suites": [ 59 { 60 "name": "baseline-profile:fenix", 61 "type": "coldstart", 62 "unit": "ms", 63 "extraOptions": [], 64 "lowerIsBetter": True, 65 "subtests": subtests, 66 } 67 ], 68 } 69 70 return output_json 71 72 73 def output_results(output_json, output_file_path): 74 """Writes the output JSON to a specified file and prints it in a compacted format to the console.""" 75 # Convert JSON structure to a compacted one-line string 76 compact_json = json.dumps(output_json) 77 78 # Print in the specified format 79 print(f"PERFHERDER_DATA: {compact_json}") 80 if "MOZ_AUTOMATION" in environ: 81 upload_path = pathlib.Path(environ.get("MOZ_PERFHERDER_UPLOAD")) 82 upload_path.parent.mkdir(parents=True, exist_ok=True) 83 with upload_path.open("w", encoding="utf-8") as f: 84 f.write(compact_json) 85 86 # Write the pretty-formatted JSON to the file 87 with open(output_file_path, "w") as output_file: 88 output_file.write(json.dumps(output_json, indent=3)) 89 print(f"Results have been written to {output_file_path}") 90 91 92 def generate_markdown_table(results): 93 # Step 1: Organize the data 94 table_data = defaultdict(lambda: {"median": None, "median None": None}) 95 96 for name, metrics in results.items(): 97 base_name = name.replace("PartialWithBaselineProfiles", "") 98 if "None" in base_name: 99 main_name = base_name.replace("None", "") 100 table_data[main_name]["median None"] = metrics["median"] 101 else: 102 table_data[base_name]["median"] = metrics["median"] 103 104 # Step 2: Prepare markdown rows 105 headers = ["Benchmark", "median", "median None", "% diff"] 106 lines = [ 107 f"| {' | '.join(headers)} |", 108 f"|{':-' + '-:|:-'.join(['-' * len(h) for h in headers])}-:|", 109 ] 110 111 for benchmark, values in sorted(table_data.items()): 112 median = values["median"] 113 median_none = values["median None"] 114 if median is not None and median_none: 115 percent_diff = round((median_none - median) / median_none * 100, 1) 116 else: 117 percent_diff = "" 118 119 row = f"| {benchmark} | {median:.3f} | {median_none:.3f} | {percent_diff} |" 120 lines.append(row) 121 122 return "\n".join(lines) 123 124 125 # Main script logic 126 if __name__ == "__main__": 127 if len(sys.argv) < 3: 128 print("Usage: python script.py <input_json_path> <output_file_path>") 129 else: 130 input_json_path = sys.argv[1] 131 output_file_path = sys.argv[2] 132 133 # Process the benchmark data 134 results = read_benchmark_data_from_directory(input_json_path) 135 print(generate_markdown_table(results)) 136 output_json = format_output_content(results) 137 output_results(output_json, output_file_path)