IdleSchedulerParent.cpp (15518B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ 2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */ 3 /* This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #include "mozilla/StaticPrefs_page_load.h" 8 #include "mozilla/StaticPrefs_javascript.h" 9 #include "mozilla/ipc/IdleSchedulerParent.h" 10 #include "mozilla/AppShutdown.h" 11 #include "mozilla/NeverDestroyed.h" 12 #include "mozilla/ipc/SharedMemoryHandle.h" 13 #include "mozilla/ipc/SharedMemoryMapping.h" 14 #include "nsSystemInfo.h" 15 #include "nsThreadUtils.h" 16 #include "nsITimer.h" 17 #include "nsIThread.h" 18 19 namespace mozilla::ipc { 20 21 // Shared memory for counting how many child processes are running 22 // tasks. This memory is shared across all the child processes. 23 // The [0] is used for counting all the processes and 24 // [childId] is for counting per process activity. 25 // This way the global activity can be checked in a fast way by just looking 26 // at [0] value. 27 // [1] is used for cpu count for child processes. 28 static SharedMemoryMappingWithHandle& sActiveChildCounter() { 29 static NeverDestroyed<SharedMemoryMappingWithHandle> mapping; 30 return *mapping; 31 } 32 33 std::bitset<NS_IDLE_SCHEDULER_COUNTER_ARRAY_LENGHT> 34 IdleSchedulerParent::sInUseChildCounters; 35 MOZ_RUNINIT LinkedList<IdleSchedulerParent> 36 IdleSchedulerParent::sIdleAndGCRequests; 37 int32_t IdleSchedulerParent::sMaxConcurrentIdleTasksInChildProcesses = 1; 38 uint32_t IdleSchedulerParent::sMaxConcurrentGCs = 1; 39 uint32_t IdleSchedulerParent::sActiveGCs = 0; 40 uint32_t IdleSchedulerParent::sChildProcessesRunningPrioritizedOperation = 0; 41 uint32_t IdleSchedulerParent::sChildProcessesAlive = 0; 42 nsITimer* IdleSchedulerParent::sStarvationPreventer = nullptr; 43 44 uint32_t IdleSchedulerParent::sNumCPUs = 0; 45 uint32_t IdleSchedulerParent::sPrefConcurrentGCsMax = 0; 46 uint32_t IdleSchedulerParent::sPrefConcurrentGCsCPUDivisor = 0; 47 48 IdleSchedulerParent::IdleSchedulerParent() { 49 sChildProcessesAlive++; 50 51 uint32_t max_gcs_pref = 52 StaticPrefs::javascript_options_concurrent_multiprocess_gcs_max(); 53 uint32_t cpu_divisor_pref = 54 StaticPrefs::javascript_options_concurrent_multiprocess_gcs_cpu_divisor(); 55 if (!max_gcs_pref) { 56 max_gcs_pref = UINT32_MAX; 57 } 58 if (!cpu_divisor_pref) { 59 cpu_divisor_pref = 4; 60 } 61 62 if (!sNumCPUs) { 63 // While waiting for the real logical core count behave as if there was 64 // just one core. 65 sNumCPUs = 1; 66 67 // CollectProcessInfo can be an expensive call, so we dispatch it as a 68 // background task and avoid to do so during shutdown. 69 if (MOZ_LIKELY(!AppShutdown::IsInOrBeyond(ShutdownPhase::XPCOMShutdown))) { 70 nsCOMPtr<nsIThread> thread = do_GetCurrentThread(); 71 nsCOMPtr<nsIRunnable> runnable = 72 NS_NewRunnableFunction("cpucount getter", [thread]() { 73 ProcessInfo processInfo = {}; 74 if (NS_SUCCEEDED(CollectProcessInfo(processInfo))) { 75 uint32_t num_cpus = processInfo.cpuCount; 76 // We have a new cpu count, Update the number of idle tasks. 77 if (MOZ_LIKELY(!AppShutdown::IsInOrBeyond( 78 ShutdownPhase::XPCOMShutdownThreads))) { 79 nsCOMPtr<nsIRunnable> runnable = NS_NewRunnableFunction( 80 "IdleSchedulerParent::CalculateNumIdleTasks", [num_cpus]() { 81 // We're setting this within this lambda because it's run 82 // on the correct thread and avoids a race. 83 sNumCPUs = num_cpus; 84 85 // This reads the sPrefConcurrentGCsMax and 86 // sPrefConcurrentGCsCPUDivisor values set below, it will 87 // run after the code that sets those. 88 CalculateNumIdleTasks(); 89 }); 90 91 thread->Dispatch(runnable, NS_DISPATCH_NORMAL); 92 } 93 } 94 }); 95 NS_DispatchBackgroundTask(runnable.forget(), NS_DISPATCH_EVENT_MAY_BLOCK); 96 } 97 } 98 99 if (sPrefConcurrentGCsMax != max_gcs_pref || 100 sPrefConcurrentGCsCPUDivisor != cpu_divisor_pref) { 101 // We execute this if these preferences have changed. We also want to make 102 // sure it executes for the first IdleSchedulerParent, which it does because 103 // sPrefConcurrentGCsMax and sPrefConcurrentGCsCPUDivisor are initially 104 // zero. 105 sPrefConcurrentGCsMax = max_gcs_pref; 106 sPrefConcurrentGCsCPUDivisor = cpu_divisor_pref; 107 108 CalculateNumIdleTasks(); 109 } 110 } 111 112 void IdleSchedulerParent::CalculateNumIdleTasks() { 113 MOZ_ASSERT(sNumCPUs); 114 MOZ_ASSERT(sPrefConcurrentGCsMax); 115 MOZ_ASSERT(sPrefConcurrentGCsCPUDivisor); 116 117 // On one and two processor (or hardware thread) systems this will 118 // allow one concurrent idle task. 119 sMaxConcurrentIdleTasksInChildProcesses = int32_t(std::max(sNumCPUs, 1u)); 120 sMaxConcurrentGCs = std::clamp(sNumCPUs / sPrefConcurrentGCsCPUDivisor, 1u, 121 sPrefConcurrentGCsMax); 122 123 if (sActiveChildCounter()) { 124 sActiveChildCounter() 125 .DataAsSpan<Atomic<int32_t>>()[NS_IDLE_SCHEDULER_INDEX_OF_CPU_COUNTER] = 126 static_cast<int32_t>(sMaxConcurrentIdleTasksInChildProcesses); 127 } 128 IdleSchedulerParent::Schedule(nullptr); 129 } 130 131 IdleSchedulerParent::~IdleSchedulerParent() { 132 // We can't know if an active process just crashed, so we just always expect 133 // that is the case. 134 if (mChildId) { 135 sInUseChildCounters[mChildId] = false; 136 if (sActiveChildCounter()) { 137 auto counters = sActiveChildCounter().DataAsSpan<Atomic<int32_t>>(); 138 if (counters[mChildId]) { 139 --counters[NS_IDLE_SCHEDULER_INDEX_OF_ACTIVITY_COUNTER]; 140 counters[mChildId] = 0; 141 } 142 } 143 } 144 145 if (mRunningPrioritizedOperation) { 146 --sChildProcessesRunningPrioritizedOperation; 147 } 148 149 if (mDoingGC) { 150 // Give back our GC token. 151 sActiveGCs--; 152 } 153 154 if (mRequestingGC) { 155 mRequestingGC.value()(false); 156 mRequestingGC = Nothing(); 157 } 158 159 // Remove from the scheduler's queue. 160 if (isInList()) { 161 remove(); 162 } 163 164 MOZ_ASSERT(sChildProcessesAlive > 0); 165 sChildProcessesAlive--; 166 if (sChildProcessesAlive == 0) { 167 MOZ_ASSERT(sIdleAndGCRequests.isEmpty()); 168 sActiveChildCounter() = nullptr; 169 170 if (sStarvationPreventer) { 171 sStarvationPreventer->Cancel(); 172 NS_RELEASE(sStarvationPreventer); 173 } 174 } 175 176 Schedule(nullptr); 177 } 178 179 IPCResult IdleSchedulerParent::RecvInitForIdleUse( 180 InitForIdleUseResolver&& aResolve) { 181 // This must already be non-zero, if it is zero then the cleanup code for the 182 // shared memory (initialised below) will never run. The invariant is that if 183 // the shared memory is initialsed, then this is non-zero. 184 MOZ_ASSERT(sChildProcessesAlive > 0); 185 186 MOZ_ASSERT(IsNotDoingIdleTask()); 187 188 // Create a shared memory object which is shared across all the relevant 189 // processes. 190 if (!sActiveChildCounter()) { 191 size_t shmemSize = NS_IDLE_SCHEDULER_COUNTER_ARRAY_LENGHT * sizeof(int32_t); 192 sActiveChildCounter() = shared_memory::Create(shmemSize).MapWithHandle(); 193 if (sActiveChildCounter()) { 194 memset(sActiveChildCounter().Address(), 0, shmemSize); 195 sInUseChildCounters[NS_IDLE_SCHEDULER_INDEX_OF_ACTIVITY_COUNTER] = true; 196 sInUseChildCounters[NS_IDLE_SCHEDULER_INDEX_OF_CPU_COUNTER] = true; 197 sActiveChildCounter().DataAsSpan<Atomic<int32_t>>() 198 [NS_IDLE_SCHEDULER_INDEX_OF_CPU_COUNTER] = 199 static_cast<int32_t>(sMaxConcurrentIdleTasksInChildProcesses); 200 } else { 201 sActiveChildCounter() = nullptr; 202 } 203 } 204 MutableSharedMemoryHandle activeCounter = 205 sActiveChildCounter() ? sActiveChildCounter().Handle().Clone() : nullptr; 206 207 uint32_t unusedId = 0; 208 for (uint32_t i = 0; i < NS_IDLE_SCHEDULER_COUNTER_ARRAY_LENGHT; ++i) { 209 if (!sInUseChildCounters[i]) { 210 sInUseChildCounters[i] = true; 211 unusedId = i; 212 break; 213 } 214 } 215 216 // If there wasn't an empty item, we'll fallback to 0. 217 mChildId = unusedId; 218 219 aResolve( 220 std::tuple<mozilla::Maybe<MutableSharedMemoryHandle>&&, const uint32_t&>( 221 Some(std::move(activeCounter)), mChildId)); 222 return IPC_OK(); 223 } 224 225 IPCResult IdleSchedulerParent::RecvRequestIdleTime(uint64_t aId, 226 TimeDuration aBudget) { 227 MOZ_ASSERT(aBudget); 228 MOZ_ASSERT(IsNotDoingIdleTask()); 229 230 mCurrentRequestId = aId; 231 mRequestedIdleBudget = aBudget; 232 233 if (!isInList()) { 234 sIdleAndGCRequests.insertBack(this); 235 } 236 237 Schedule(this); 238 return IPC_OK(); 239 } 240 241 IPCResult IdleSchedulerParent::RecvIdleTimeUsed(uint64_t aId) { 242 // The client can either signal that they've used the idle time or they're 243 // canceling the request. We cannot use a seperate cancel message because it 244 // could arrive after the parent has granted the request. 245 MOZ_ASSERT(IsWaitingForIdle() || IsDoingIdleTask()); 246 247 // The parent process will always know the ID of the current request (since 248 // the IPC channel is reliable). The IDs are provided so that the client can 249 // check them (it's possible for the client to race ahead of the server). 250 MOZ_ASSERT(mCurrentRequestId == aId); 251 252 if (IsWaitingForIdle() && !mRequestingGC) { 253 remove(); 254 } 255 mRequestedIdleBudget = TimeDuration(); 256 Schedule(nullptr); 257 return IPC_OK(); 258 } 259 260 IPCResult IdleSchedulerParent::RecvSchedule() { 261 Schedule(nullptr); 262 return IPC_OK(); 263 } 264 265 IPCResult IdleSchedulerParent::RecvRunningPrioritizedOperation() { 266 ++mRunningPrioritizedOperation; 267 if (mRunningPrioritizedOperation == 1) { 268 ++sChildProcessesRunningPrioritizedOperation; 269 } 270 return IPC_OK(); 271 } 272 273 IPCResult IdleSchedulerParent::RecvPrioritizedOperationDone() { 274 MOZ_ASSERT(mRunningPrioritizedOperation); 275 276 --mRunningPrioritizedOperation; 277 if (mRunningPrioritizedOperation == 0) { 278 --sChildProcessesRunningPrioritizedOperation; 279 Schedule(nullptr); 280 } 281 return IPC_OK(); 282 } 283 284 IPCResult IdleSchedulerParent::RecvRequestGC(RequestGCResolver&& aResolver) { 285 MOZ_ASSERT(!mDoingGC); 286 MOZ_ASSERT(!mRequestingGC); 287 288 mRequestingGC = Some(aResolver); 289 if (!isInList()) { 290 sIdleAndGCRequests.insertBack(this); 291 } 292 293 Schedule(nullptr); 294 return IPC_OK(); 295 } 296 297 IPCResult IdleSchedulerParent::RecvStartedGC() { 298 if (mDoingGC) { 299 return IPC_OK(); 300 } 301 302 mDoingGC = true; 303 sActiveGCs++; 304 305 if (mRequestingGC) { 306 // We have to respond to the request before dropping it, even though the 307 // content process is already doing the GC. 308 mRequestingGC.value()(true); 309 mRequestingGC = Nothing(); 310 if (!IsWaitingForIdle()) { 311 remove(); 312 } 313 } 314 315 return IPC_OK(); 316 } 317 318 IPCResult IdleSchedulerParent::RecvDoneGC() { 319 MOZ_ASSERT(mDoingGC); 320 sActiveGCs--; 321 mDoingGC = false; 322 Schedule(nullptr); 323 return IPC_OK(); 324 } 325 326 int32_t IdleSchedulerParent::ActiveCount() { 327 if (sActiveChildCounter()) { 328 return sActiveChildCounter().DataAsSpan<Atomic<int32_t>>() 329 [NS_IDLE_SCHEDULER_INDEX_OF_ACTIVITY_COUNTER]; 330 } 331 return 0; 332 } 333 334 bool IdleSchedulerParent::HasSpareCycles(int32_t aActiveCount) { 335 // We can run a new task if we have a spare core. If we're running a 336 // prioritised operation we halve the number of regular spare cores. 337 // 338 // sMaxConcurrentIdleTasksInChildProcesses will always be >0 so on 1 and 2 339 // core systems this will allow 1 idle tasks (0 if running a prioritized 340 // operation). 341 MOZ_ASSERT(sMaxConcurrentIdleTasksInChildProcesses > 0); 342 return sChildProcessesRunningPrioritizedOperation 343 ? sMaxConcurrentIdleTasksInChildProcesses / 2 > aActiveCount 344 : sMaxConcurrentIdleTasksInChildProcesses > aActiveCount; 345 } 346 347 bool IdleSchedulerParent::HasSpareGCCycles() { 348 return sMaxConcurrentGCs > sActiveGCs; 349 } 350 351 void IdleSchedulerParent::SendIdleTime() { 352 // We would assert that IsWaitingForIdle() except after potentially removing 353 // the task from it's list this will return false. Instead check 354 // mRequestedIdleBudget. 355 MOZ_ASSERT(mRequestedIdleBudget); 356 (void)SendIdleTime(mCurrentRequestId, mRequestedIdleBudget); 357 } 358 359 void IdleSchedulerParent::SendMayGC() { 360 MOZ_ASSERT(mRequestingGC); 361 mRequestingGC.value()(true); 362 mRequestingGC = Nothing(); 363 mDoingGC = true; 364 sActiveGCs++; 365 } 366 367 void IdleSchedulerParent::Schedule(IdleSchedulerParent* aRequester) { 368 // Tasks won't update the active count until after they receive their message 369 // and start to run, so make a copy of it here and increment it for every task 370 // we schedule. It will become an estimate of how many tasks will be active 371 // shortly. 372 int32_t activeCount = ActiveCount(); 373 374 if (aRequester && aRequester->mRunningPrioritizedOperation) { 375 // Prioritised operations are requested only for idle time requests, so this 376 // must be an idle time request. 377 MOZ_ASSERT(aRequester->IsWaitingForIdle()); 378 379 // If the requester is prioritized, just let it run itself. 380 if (aRequester->isInList() && !aRequester->mRequestingGC) { 381 aRequester->remove(); 382 } 383 aRequester->SendIdleTime(); 384 activeCount++; 385 } 386 387 RefPtr<IdleSchedulerParent> idleRequester = sIdleAndGCRequests.getFirst(); 388 389 bool has_spare_cycles = HasSpareCycles(activeCount); 390 bool has_spare_gc_cycles = HasSpareGCCycles(); 391 392 while (idleRequester && (has_spare_cycles || has_spare_gc_cycles)) { 393 // Get the next element before potentially removing the current one from the 394 // list. 395 RefPtr<IdleSchedulerParent> next = idleRequester->getNext(); 396 397 if (has_spare_cycles && idleRequester->IsWaitingForIdle()) { 398 // We can run an idle task. 399 activeCount++; 400 if (!idleRequester->mRequestingGC) { 401 idleRequester->remove(); 402 } 403 idleRequester->SendIdleTime(); 404 has_spare_cycles = HasSpareCycles(activeCount); 405 } 406 407 if (has_spare_gc_cycles && idleRequester->mRequestingGC) { 408 if (!idleRequester->IsWaitingForIdle()) { 409 idleRequester->remove(); 410 } 411 idleRequester->SendMayGC(); 412 has_spare_gc_cycles = HasSpareGCCycles(); 413 } 414 415 idleRequester = next; 416 } 417 418 if (!sIdleAndGCRequests.isEmpty() && HasSpareCycles(activeCount)) { 419 EnsureStarvationTimer(); 420 } 421 } 422 423 void IdleSchedulerParent::EnsureStarvationTimer() { 424 // Even though idle runnables aren't really guaranteed to get run ever (which 425 // is why most of them have the timer fallback), try to not let any child 426 // process' idle handling to starve forever in case other processes are busy 427 if (!sStarvationPreventer) { 428 // Reuse StaticPrefs::page_load_deprioritization_period(), since that 429 // is used on child side when deciding the minimum idle period. 430 NS_NewTimerWithFuncCallback( 431 &sStarvationPreventer, StarvationCallback, nullptr, 432 StaticPrefs::page_load_deprioritization_period(), 433 nsITimer::TYPE_ONE_SHOT_LOW_PRIORITY, "StarvationCallback"_ns); 434 } 435 } 436 437 void IdleSchedulerParent::StarvationCallback(nsITimer* aTimer, void* aData) { 438 RefPtr<IdleSchedulerParent> idleRequester = sIdleAndGCRequests.getFirst(); 439 while (idleRequester) { 440 if (idleRequester->IsWaitingForIdle()) { 441 // Treat the first process waiting for idle time as running prioritized 442 // operation so that it gets run. 443 ++idleRequester->mRunningPrioritizedOperation; 444 ++sChildProcessesRunningPrioritizedOperation; 445 Schedule(idleRequester); 446 --idleRequester->mRunningPrioritizedOperation; 447 --sChildProcessesRunningPrioritizedOperation; 448 break; 449 } 450 451 idleRequester = idleRequester->getNext(); 452 } 453 NS_RELEASE(sStarvationPreventer); 454 } 455 456 } // namespace mozilla::ipc