config_status.py (8173B)
1 # This Source Code Form is subject to the terms of the Mozilla Public 2 # License, v. 2.0. If a copy of the MPL was not distributed with this 3 # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 5 # Combined with build/autoconf/config.status.m4, ConfigStatus is an almost 6 # drop-in replacement for autoconf 2.13's config.status, with features 7 # borrowed from autoconf > 2.5, and additional features. 8 9 import logging 10 import os 11 import sys 12 import time 13 from argparse import ArgumentParser 14 from itertools import chain 15 from multiprocessing import Pool, get_start_method 16 from time import process_time 17 18 from mach.logging import LoggingManager 19 20 from mozbuild.backend import backends, get_backend_class 21 from mozbuild.backend.configenvironment import ConfigEnvironment 22 from mozbuild.base import MachCommandConditions 23 from mozbuild.frontend.emitter import TreeMetadataEmitter 24 from mozbuild.frontend.reader import BuildReader 25 from mozbuild.mozinfo import write_mozinfo 26 from mozbuild.util import FileAvoidWrite 27 28 log_manager = LoggingManager() 29 30 31 ANDROID_IDE_ADVERTISEMENT = """ 32 ============= 33 ADVERTISEMENT 34 35 You are building GeckoView. After your build completes, you can open 36 the top source directory in Android Studio directly and build using Gradle. 37 See the documentation at 38 39 https://firefox-source-docs.mozilla.org/mobile/android/index.html#build-using-android-studio 40 ============= 41 """.strip() 42 43 44 ## Parallel backend setup 45 # Distributing each backend on different process is costly because we need to 46 # copy the definitions across each process. These definitions are read-only, so 47 # only copy them once when each process starts. 48 49 50 class BackendPool: 51 per_process_definitions = None 52 53 def __init__(self, definitions, *, processes=None): 54 definitions = list(definitions) 55 BackendPool._init_worker(definitions) 56 self.pool = Pool( 57 initializer=BackendPool._init_worker, 58 initargs=(definitions,), 59 processes=processes, 60 ) 61 62 def run(self, backends): 63 # We're trying to spawn a minimal number of new processes there, and 64 # limit the number of times we serialize the task state. As a 65 # consequence: 66 # 1. we initialize each process with a copy of `definitions' 67 # 2. instead of spawning as many processes as backend, we use current 68 # process to handle one of the backend and asynchronously run the 69 # others. 70 async_tasks = self.pool.map_async(BackendPool._run_worker, backends[1:]) 71 BackendPool._run_worker(backends[0]) 72 async_tasks.wait() 73 74 @staticmethod 75 def _init_worker(state): 76 BackendPool.per_process_definitions = state 77 78 @staticmethod 79 def _run_worker(backend): 80 return backend.consume(BackendPool.per_process_definitions) 81 82 83 def config_status( 84 topobjdir=".", 85 topsrcdir=".", 86 defines=None, 87 substs=None, 88 source=None, 89 mozconfig=None, 90 args=sys.argv[1:], 91 ): 92 """Main function, providing config.status functionality. 93 94 Contrary to config.status, it doesn't use CONFIG_FILES or CONFIG_HEADERS 95 variables. 96 97 Without the -n option, this program acts as config.status and considers 98 the current directory as the top object directory, even when config.status 99 is in a different directory. It will, however, treat the directory 100 containing config.status as the top object directory with the -n option. 101 102 The options to this function are passed when creating the 103 ConfigEnvironment. These lists, as well as the actual wrapper script 104 around this function, are meant to be generated by configure. 105 See build/autoconf/config.status.m4. 106 """ 107 108 if "CONFIG_FILES" in os.environ: 109 raise Exception("Using the CONFIG_FILES environment variable is not supported.") 110 if "CONFIG_HEADERS" in os.environ: 111 raise Exception( 112 "Using the CONFIG_HEADERS environment variable is not supported." 113 ) 114 115 if not os.path.isabs(topsrcdir): 116 raise Exception( 117 "topsrcdir must be defined as an absolute directory: %s" % topsrcdir 118 ) 119 120 default_backends = ["RecursiveMake"] 121 default_backends = (substs or {}).get("BUILD_BACKENDS", ["RecursiveMake"]) 122 123 parser = ArgumentParser() 124 parser.add_argument( 125 "-v", 126 "--verbose", 127 dest="verbose", 128 action="store_true", 129 help="display verbose output", 130 ) 131 parser.add_argument( 132 "-n", 133 dest="not_topobjdir", 134 action="store_true", 135 help="do not consider current directory as top object directory", 136 ) 137 parser.add_argument( 138 "-d", "--diff", action="store_true", help="print diffs of changed files." 139 ) 140 parser.add_argument( 141 "-b", 142 "--backend", 143 nargs="+", 144 choices=sorted(backends), 145 default=default_backends, 146 help="what backend to build (default: %s)." % " ".join(default_backends), 147 ) 148 parser.add_argument( 149 "--dry-run", action="store_true", help="do everything except writing files out." 150 ) 151 options = parser.parse_args(args) 152 153 # Without -n, the current directory is meant to be the top object directory 154 if not options.not_topobjdir: 155 topobjdir = os.path.realpath(".") 156 157 env = ConfigEnvironment( 158 topsrcdir, 159 topobjdir, 160 defines=defines, 161 substs=substs, 162 source=source, 163 mozconfig=mozconfig, 164 ) 165 166 with FileAvoidWrite(os.path.join(topobjdir, "mozinfo.json")) as f: 167 write_mozinfo(f, env, os.environ) 168 169 cpu_start = process_time() 170 time_start = time.monotonic() 171 172 # Make appropriate backend instances, defaulting to RecursiveMakeBackend, 173 # or what is in BUILD_BACKENDS. 174 selected_backends = [get_backend_class(b)(env) for b in options.backend] 175 176 if options.dry_run: 177 for b in selected_backends: 178 b.dry_run = True 179 180 reader = BuildReader(env) 181 emitter = TreeMetadataEmitter(env) 182 # This won't actually do anything because of the magic of generators. 183 definitions = emitter.emit(reader.read_topsrcdir()) 184 185 log_level = logging.DEBUG if options.verbose else logging.INFO 186 log_manager.add_terminal_logging(level=log_level) 187 log_manager.enable_unstructured() 188 189 print("Reticulating splines...", file=sys.stderr) 190 191 # `definitions` objects are unfortunately not picklable, which is a 192 # requirement for "spawn" method. It's fine under "fork" method. This 193 # basically excludes Windows from our optimization, we can live with it. 194 if len(selected_backends) > 1 and get_start_method() == "fork": 195 # See https://github.com/python/cpython/commit/39889864c09741909da4ec489459d0197ea8f1fc 196 # For why we cap the process count. There's also an overhead to setup 197 # new processes, and not that many backends anyway. 198 processes = min(len(selected_backends) - 1, 4) 199 pool = BackendPool(definitions, processes=processes) 200 pool.run(selected_backends) 201 else: 202 if len(selected_backends) > 1: 203 definitions = list(definitions) 204 205 for backend in selected_backends: 206 backend.consume(definitions) 207 208 execution_time = 0.0 209 for obj in chain((reader, emitter), selected_backends): 210 summary = obj.summary() 211 print(summary, file=sys.stderr) 212 execution_time += summary.execution_time 213 if hasattr(obj, "gyp_summary"): 214 summary = obj.gyp_summary() 215 print(summary, file=sys.stderr) 216 217 cpu_time = process_time() - cpu_start 218 wall_time = time.monotonic() - time_start 219 efficiency = cpu_time / wall_time if wall_time else 100 220 untracked = wall_time - execution_time 221 222 print( 223 f"Total wall time: {wall_time:.2f}s; CPU time: {cpu_time:.2f}s; Efficiency: " 224 f"{efficiency:.0%}; Untracked: {untracked:.2f}s", 225 file=sys.stderr, 226 ) 227 228 if options.diff: 229 for the_backend in selected_backends: 230 for path, diff in sorted(the_backend.file_diffs.items()): 231 print("\n".join(diff)) 232 233 # Advertise Android Studio if it is appropriate. 234 if MachCommandConditions.is_android(env): 235 print(ANDROID_IDE_ADVERTISEMENT)