HelperThreads.cpp (67990B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sts=2 et sw=2 tw=80: 3 * This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #include "vm/HelperThreads.h" 8 9 #include "mozilla/ReverseIterator.h" // mozilla::Reversed(...) 10 #include "mozilla/ScopeExit.h" 11 #include "mozilla/Span.h" // mozilla::Span<TaggedScriptThingIndex> 12 13 #include <algorithm> 14 15 #include "frontend/CompilationStencil.h" // frontend::CompilationStencil 16 #include "gc/GC.h" 17 #include "gc/Zone.h" 18 #include "jit/BaselineCompileTask.h" 19 #include "jit/Ion.h" 20 #include "jit/IonCompileTask.h" 21 #include "jit/JitRuntime.h" 22 #include "jit/JitScript.h" 23 #include "js/CompileOptions.h" // JS::PrefableCompileOptions, JS::ReadOnlyCompileOptions 24 #include "js/experimental/CompileScript.h" // JS::ThreadStackQuotaForSize 25 #include "js/friend/StackLimits.h" // js::ReportOverRecursed 26 #include "js/HelperThreadAPI.h" 27 #include "js/Stack.h" 28 #include "js/UniquePtr.h" 29 #include "js/Utility.h" 30 #include "threading/CpuCount.h" 31 #include "vm/ErrorReporting.h" 32 #include "vm/HelperThreadState.h" 33 #include "vm/InternalThreadPool.h" 34 #include "vm/MutexIDs.h" 35 #include "wasm/WasmGenerator.h" 36 37 using namespace js; 38 39 using mozilla::TimeDuration; 40 41 static void CancelOffThreadWasmCompleteTier2GeneratorLocked( 42 AutoLockHelperThreadState& lock); 43 static void CancelOffThreadWasmPartialTier2CompileLocked( 44 AutoLockHelperThreadState& lock); 45 46 // This file is structured as follows: 47 // 48 // (1) Methods for GlobalHelperThreadState, and top level scheduling logic 49 // (2) Specifics for JS task classes 50 // (3) Specifics for wasm task classes 51 52 /////////////////////////////////////////////////////////////////////////// 53 // // 54 // GlobalHelperThreadState methods and top-level scheduling logic // 55 // // 56 /////////////////////////////////////////////////////////////////////////// 57 58 namespace js { 59 60 MOZ_RUNINIT Mutex gHelperThreadLock(mutexid::GlobalHelperThreadState); 61 GlobalHelperThreadState* gHelperThreadState = nullptr; 62 63 } // namespace js 64 65 bool js::CreateHelperThreadsState() { 66 MOZ_ASSERT(!gHelperThreadState); 67 gHelperThreadState = js_new<GlobalHelperThreadState>(); 68 return gHelperThreadState; 69 } 70 71 void js::DestroyHelperThreadsState() { 72 AutoLockHelperThreadState lock; 73 74 if (!gHelperThreadState) { 75 return; 76 } 77 78 gHelperThreadState->finish(lock); 79 js_delete(gHelperThreadState); 80 gHelperThreadState = nullptr; 81 } 82 83 bool js::EnsureHelperThreadsInitialized() { 84 MOZ_ASSERT(gHelperThreadState); 85 return gHelperThreadState->ensureInitialized(); 86 } 87 88 static size_t ClampDefaultCPUCount(size_t cpuCount) { 89 // It's extremely rare for SpiderMonkey to have more than a few cores worth 90 // of work. At higher core counts, performance can even decrease due to NUMA 91 // (and SpiderMonkey's lack of NUMA-awareness), contention, and general lack 92 // of optimization for high core counts. So to avoid wasting thread stack 93 // resources (and cluttering gdb and core dumps), clamp to 8 cores for now. 94 return std::min<size_t>(cpuCount, 8); 95 } 96 97 static size_t ThreadCountForCPUCount(size_t cpuCount) { 98 // We need at least two threads for tier-2 wasm compilations, because 99 // there's a master task that holds a thread while other threads do the 100 // compilation. 101 return std::max<size_t>(cpuCount, 2); 102 } 103 104 bool js::SetFakeCPUCount(size_t count) { 105 HelperThreadState().setCpuCount(count); 106 return true; 107 } 108 109 void GlobalHelperThreadState::setCpuCount(size_t count) { 110 // This must be called before any threads have been initialized. 111 AutoLockHelperThreadState lock; 112 MOZ_ASSERT(!isInitialized(lock)); 113 114 // We can't do this if an external thread pool is in use. 115 MOZ_ASSERT(!dispatchTaskCallback); 116 117 cpuCount = count; 118 threadCount = ThreadCountForCPUCount(count); 119 } 120 121 size_t js::GetHelperThreadCount() { return HelperThreadState().threadCount; } 122 123 size_t js::GetHelperThreadCPUCount() { return HelperThreadState().cpuCount; } 124 125 void JS::SetProfilingThreadCallbacks( 126 JS::RegisterThreadCallback registerThread, 127 JS::UnregisterThreadCallback unregisterThread) { 128 HelperThreadState().registerThread = registerThread; 129 HelperThreadState().unregisterThread = unregisterThread; 130 } 131 132 // Bug 1630189: Without MOZ_NEVER_INLINE, Windows PGO builds have a linking 133 // error for HelperThreadTaskCallback. 134 JS_PUBLIC_API MOZ_NEVER_INLINE void JS::SetHelperThreadTaskCallback( 135 HelperThreadTaskCallback callback, size_t threadCount, size_t stackSize) { 136 AutoLockHelperThreadState lock; 137 HelperThreadState().setDispatchTaskCallback(callback, threadCount, stackSize, 138 lock); 139 } 140 141 JS_PUBLIC_API MOZ_NEVER_INLINE const char* JS::GetHelperThreadTaskName( 142 HelperThreadTask* task) { 143 return task->getName(); 144 } 145 146 void GlobalHelperThreadState::setDispatchTaskCallback( 147 JS::HelperThreadTaskCallback callback, size_t threadCount, size_t stackSize, 148 const AutoLockHelperThreadState& lock) { 149 MOZ_ASSERT(!isInitialized(lock)); 150 MOZ_ASSERT(!dispatchTaskCallback); 151 MOZ_ASSERT(threadCount != 0); 152 MOZ_ASSERT(stackSize >= 16 * 1024); 153 154 dispatchTaskCallback = callback; 155 this->threadCount = threadCount; 156 this->stackQuota = JS::ThreadStackQuotaForSize(stackSize); 157 } 158 159 bool GlobalHelperThreadState::ensureInitialized() { 160 MOZ_ASSERT(CanUseExtraThreads()); 161 MOZ_ASSERT(this == &HelperThreadState()); 162 163 AutoLockHelperThreadState lock; 164 165 if (isInitialized(lock)) { 166 return true; 167 } 168 169 for (size_t& i : runningTaskCount) { 170 i = 0; 171 } 172 173 useInternalThreadPool_ = !dispatchTaskCallback; 174 if (useInternalThreadPool(lock)) { 175 if (!InternalThreadPool::Initialize(threadCount, lock)) { 176 return false; 177 } 178 } 179 180 MOZ_ASSERT(dispatchTaskCallback); 181 182 if (!ensureThreadCount(threadCount, lock)) { 183 finishThreads(lock); 184 return false; 185 } 186 187 MOZ_ASSERT(threadCount != 0); 188 isInitialized_ = true; 189 return true; 190 } 191 192 bool GlobalHelperThreadState::ensureThreadCount( 193 size_t count, AutoLockHelperThreadState& lock) { 194 if (!helperTasks_.reserve(count)) { 195 return false; 196 } 197 198 if (useInternalThreadPool(lock)) { 199 InternalThreadPool& pool = InternalThreadPool::Get(); 200 if (pool.threadCount(lock) < count) { 201 if (!pool.ensureThreadCount(count, lock)) { 202 return false; 203 } 204 205 threadCount = pool.threadCount(lock); 206 } 207 } 208 209 return true; 210 } 211 212 GlobalHelperThreadState::GlobalHelperThreadState() 213 : cpuCount(0), 214 threadCount(0), 215 totalCountRunningTasks(0), 216 registerThread(nullptr), 217 unregisterThread(nullptr), 218 wasmCompleteTier2GeneratorsFinished_(0) { 219 MOZ_ASSERT(!gHelperThreadState); 220 221 cpuCount = ClampDefaultCPUCount(GetCPUCount()); 222 threadCount = ThreadCountForCPUCount(cpuCount); 223 224 MOZ_ASSERT(cpuCount > 0, "GetCPUCount() seems broken"); 225 } 226 227 void GlobalHelperThreadState::finish(AutoLockHelperThreadState& lock) { 228 if (!isInitialized(lock)) { 229 return; 230 } 231 232 MOZ_ASSERT_IF(!JSRuntime::hasLiveRuntimes(), gcParallelMarkingThreads == 0); 233 234 finishThreads(lock); 235 236 // Make sure there are no Ion free tasks left. We check this here because, 237 // unlike the other tasks, we don't explicitly block on this when 238 // destroying a runtime. 239 auto& freeList = ionFreeList(lock); 240 while (!freeList.empty()) { 241 UniquePtr<jit::IonFreeTask> task = std::move(freeList.back()); 242 freeList.popBack(); 243 jit::FreeIonCompileTasks(task->compileTasks()); 244 } 245 } 246 247 void GlobalHelperThreadState::finishThreads(AutoLockHelperThreadState& lock) { 248 waitForAllTasksLocked(lock); 249 terminating_ = true; 250 251 if (InternalThreadPool::IsInitialized()) { 252 InternalThreadPool::ShutDown(lock); 253 } 254 } 255 256 #ifdef DEBUG 257 void GlobalHelperThreadState::assertIsLockedByCurrentThread() const { 258 gHelperThreadLock.assertOwnedByCurrentThread(); 259 } 260 #endif // DEBUG 261 262 void GlobalHelperThreadState::dispatch(const AutoLockHelperThreadState& lock) { 263 if (helperTasks_.length() >= threadCount) { 264 return; 265 } 266 267 HelperThreadTask* task = findHighestPriorityTask(lock); 268 if (!task) { 269 return; 270 } 271 272 #ifdef DEBUG 273 MOZ_ASSERT(tasksPending_ < threadCount); 274 tasksPending_++; 275 #endif 276 277 // Add task to list of running tasks immediately. 278 helperTasks(lock).infallibleEmplaceBack(task); 279 runningTaskCount[task->threadType()]++; 280 totalCountRunningTasks++; 281 282 lock.queueTaskToDispatch(task); 283 } 284 285 void GlobalHelperThreadState::wait( 286 AutoLockHelperThreadState& lock, 287 TimeDuration timeout /* = TimeDuration::Forever() */) { 288 MOZ_ASSERT(!lock.hasQueuedTasks()); 289 consumerWakeup.wait_for(lock, timeout); 290 } 291 292 void GlobalHelperThreadState::notifyAll(const AutoLockHelperThreadState&) { 293 consumerWakeup.notify_all(); 294 } 295 296 void GlobalHelperThreadState::notifyOne(const AutoLockHelperThreadState&) { 297 consumerWakeup.notify_one(); 298 } 299 300 bool GlobalHelperThreadState::hasActiveThreads( 301 const AutoLockHelperThreadState& lock) { 302 return !helperTasks(lock).empty(); 303 } 304 305 void js::WaitForAllHelperThreads() { HelperThreadState().waitForAllTasks(); } 306 307 void js::WaitForAllHelperThreads(AutoLockHelperThreadState& lock) { 308 HelperThreadState().waitForAllTasksLocked(lock); 309 } 310 311 void GlobalHelperThreadState::waitForAllTasks() { 312 AutoLockHelperThreadState lock; 313 waitForAllTasksLocked(lock); 314 } 315 316 void GlobalHelperThreadState::waitForAllTasksLocked( 317 AutoLockHelperThreadState& lock) { 318 CancelOffThreadWasmCompleteTier2GeneratorLocked(lock); 319 CancelOffThreadWasmPartialTier2CompileLocked(lock); 320 321 while (canStartTasks(lock) || hasActiveThreads(lock)) { 322 wait(lock); 323 } 324 325 MOZ_ASSERT(tasksPending_ == 0); 326 MOZ_ASSERT(gcParallelWorklist().isEmpty(lock)); 327 MOZ_ASSERT(ionWorklist(lock).empty()); 328 MOZ_ASSERT(wasmWorklist(lock, wasm::CompileState::EagerTier1).empty()); 329 MOZ_ASSERT(promiseHelperTasks(lock).empty()); 330 MOZ_ASSERT(compressionWorklist(lock).empty()); 331 MOZ_ASSERT(ionFreeList(lock).empty()); 332 MOZ_ASSERT(wasmWorklist(lock, wasm::CompileState::EagerTier2).empty()); 333 MOZ_ASSERT(wasmCompleteTier2GeneratorWorklist(lock).empty()); 334 MOZ_ASSERT(wasmPartialTier2CompileWorklist(lock).empty()); 335 MOZ_ASSERT(!tasksPending_); 336 MOZ_ASSERT(!hasActiveThreads(lock)); 337 } 338 339 // A task can be a "master" task, ie, it will block waiting for other worker 340 // threads that perform work on its behalf. If so it must not take the last 341 // available thread; there must always be at least one worker thread able to do 342 // the actual work. (Or the system may deadlock.) 343 // 344 // If a task is a master task it *must* pass isMaster=true here, or perform a 345 // similar calculation to avoid deadlock from starvation. 346 // 347 // isMaster should only be true if the thread calling checkTaskThreadLimit() is 348 // a helper thread. 349 // 350 // NOTE: Calling checkTaskThreadLimit() from a helper thread in the dynamic 351 // region after currentTask.emplace() and before currentTask.reset() may cause 352 // it to return a different result than if it is called outside that dynamic 353 // region, as the predicate inspects the values of the threads' currentTask 354 // members. 355 356 bool GlobalHelperThreadState::checkTaskThreadLimit( 357 ThreadType threadType, size_t maxThreads, bool isMaster, 358 const AutoLockHelperThreadState& lock) const { 359 MOZ_ASSERT(maxThreads >= 1); 360 MOZ_ASSERT(maxThreads <= threadCount); 361 362 // Check thread limit for this task kind. 363 size_t count = runningTaskCount[threadType]; 364 if (count >= maxThreads) { 365 return false; 366 } 367 368 // Check overall idle thread count taking into account master threads. A 369 // master thread must not use the last idle thread or it will deadlock itself. 370 MOZ_ASSERT(threadCount >= totalCountRunningTasks); 371 size_t idleCount = threadCount - totalCountRunningTasks; 372 size_t idleRequired = isMaster ? 2 : 1; 373 return idleCount >= idleRequired; 374 } 375 376 static inline bool IsHelperThreadSimulatingOOM(js::ThreadType threadType) { 377 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT) 378 return js::oom::simulator.targetThread() == threadType; 379 #else 380 return false; 381 #endif 382 } 383 384 void GlobalHelperThreadState::addSizeOfIncludingThis( 385 JS::GlobalStats* stats, const AutoLockHelperThreadState& lock) const { 386 #ifdef DEBUG 387 assertIsLockedByCurrentThread(); 388 #endif 389 390 mozilla::MallocSizeOf mallocSizeOf = stats->mallocSizeOf_; 391 JS::HelperThreadStats& htStats = stats->helperThread; 392 393 htStats.stateData += mallocSizeOf(this); 394 395 if (InternalThreadPool::IsInitialized()) { 396 htStats.stateData += 397 InternalThreadPool::Get().sizeOfIncludingThis(mallocSizeOf, lock); 398 } 399 400 // Report memory used by various containers 401 htStats.stateData += 402 ionWorklist_.sizeOfExcludingThis(mallocSizeOf) + 403 ionFinishedList_.sizeOfExcludingThis(mallocSizeOf) + 404 ionFreeList_.sizeOfExcludingThis(mallocSizeOf) + 405 wasmWorklist_tier1_.sizeOfExcludingThis(mallocSizeOf) + 406 wasmWorklist_tier2_.sizeOfExcludingThis(mallocSizeOf) + 407 wasmCompleteTier2GeneratorWorklist_.sizeOfExcludingThis(mallocSizeOf) + 408 wasmPartialTier2CompileWorklist_.sizeOfExcludingThis(mallocSizeOf) + 409 promiseHelperTasks_.sizeOfExcludingThis(mallocSizeOf) + 410 compressionWorklist_.sizeOfExcludingThis(mallocSizeOf) + 411 compressionFinishedList_.sizeOfExcludingThis(mallocSizeOf) + 412 gcParallelWorklist_.sizeOfExcludingThis(mallocSizeOf, lock) + 413 helperTasks_.sizeOfExcludingThis(mallocSizeOf); 414 415 // Report IonCompileTasks on wait lists 416 for (auto task : ionWorklist_) { 417 htStats.ionCompileTask += task->sizeOfExcludingThis(mallocSizeOf); 418 } 419 for (auto task : ionFinishedList_) { 420 htStats.ionCompileTask += task->sizeOfExcludingThis(mallocSizeOf); 421 } 422 for (const auto& task : ionFreeList_) { 423 for (auto* compileTask : task->compileTasks()) { 424 htStats.ionCompileTask += compileTask->sizeOfExcludingThis(mallocSizeOf); 425 } 426 } 427 428 // Report wasm::CompileTasks on wait lists 429 for (auto task : wasmWorklist_tier1_) { 430 htStats.wasmCompile += task->sizeOfExcludingThis(mallocSizeOf); 431 } 432 for (auto task : wasmWorklist_tier2_) { 433 htStats.wasmCompile += task->sizeOfExcludingThis(mallocSizeOf); 434 } 435 436 // Report number of helper threads. 437 MOZ_ASSERT(htStats.idleThreadCount == 0); 438 MOZ_ASSERT(threadCount >= totalCountRunningTasks); 439 htStats.activeThreadCount = totalCountRunningTasks; 440 htStats.idleThreadCount = threadCount - totalCountRunningTasks; 441 } 442 443 size_t GlobalHelperThreadState::maxBaselineCompilationThreads() const { 444 if (IsHelperThreadSimulatingOOM(js::THREAD_TYPE_BASELINE)) { 445 return 1; 446 } 447 return threadCount; 448 } 449 450 size_t GlobalHelperThreadState::maxIonCompilationThreads() const { 451 if (IsHelperThreadSimulatingOOM(js::THREAD_TYPE_ION)) { 452 return 1; 453 } 454 return threadCount; 455 } 456 457 size_t GlobalHelperThreadState::maxIonFreeThreads() const { 458 // IonFree tasks are low priority. Limit to one thread to help avoid jemalloc 459 // lock contention. 460 return 1; 461 } 462 463 size_t GlobalHelperThreadState::maxPromiseHelperThreads() const { 464 if (IsHelperThreadSimulatingOOM(js::THREAD_TYPE_PROMISE_TASK)) { 465 return 1; 466 } 467 return std::min(cpuCount, threadCount); 468 } 469 470 size_t GlobalHelperThreadState::maxDelazifyThreads() const { 471 if (IsHelperThreadSimulatingOOM(js::THREAD_TYPE_DELAZIFY)) { 472 return 1; 473 } 474 return std::min(cpuCount, threadCount); 475 } 476 477 size_t GlobalHelperThreadState::maxCompressionThreads() const { 478 if (IsHelperThreadSimulatingOOM(js::THREAD_TYPE_COMPRESS)) { 479 return 1; 480 } 481 482 // Compression is triggered on major GCs to compress ScriptSources. It is 483 // considered low priority work. 484 return 1; 485 } 486 487 size_t GlobalHelperThreadState::maxGCParallelThreads() const { 488 if (IsHelperThreadSimulatingOOM(js::THREAD_TYPE_GCPARALLEL)) { 489 return 1; 490 } 491 return threadCount; 492 } 493 494 size_t GlobalHelperThreadState::maxWasmCompilationThreads() const { 495 if (IsHelperThreadSimulatingOOM(js::THREAD_TYPE_WASM_COMPILE_TIER1) || 496 IsHelperThreadSimulatingOOM(js::THREAD_TYPE_WASM_COMPILE_TIER2)) { 497 return 1; 498 } 499 return std::min(cpuCount, threadCount); 500 } 501 502 size_t js::GetMaxWasmCompilationThreads() { 503 return HelperThreadState().maxWasmCompilationThreads(); 504 } 505 506 size_t GlobalHelperThreadState::maxWasmCompleteTier2GeneratorThreads() const { 507 return MaxCompleteTier2GeneratorTasks; 508 } 509 510 size_t GlobalHelperThreadState::maxWasmPartialTier2CompileThreads() const { 511 return MaxPartialTier2CompileTasks; 512 } 513 514 void GlobalHelperThreadState::trace(JSTracer* trc) { 515 { 516 AutoLockHelperThreadState lock; 517 518 #ifdef DEBUG 519 // Since we hold the helper thread lock here we must disable GCMarker's 520 // checking of the atom marking bitmap since that also relies on taking the 521 // lock. 522 GCMarker* marker = nullptr; 523 if (trc->isMarkingTracer()) { 524 marker = GCMarker::fromTracer(trc); 525 marker->setCheckAtomMarking(false); 526 } 527 auto reenableAtomMarkingCheck = mozilla::MakeScopeExit([marker] { 528 if (marker) { 529 marker->setCheckAtomMarking(true); 530 } 531 }); 532 #endif 533 534 for (auto task : baselineWorklist(lock)) { 535 task->trace(trc); 536 } 537 for (auto task : baselineFinishedList(lock)) { 538 task->trace(trc); 539 } 540 541 for (auto task : ionWorklist(lock)) { 542 task->alloc().lifoAlloc()->setReadWrite(); 543 task->trace(trc); 544 task->alloc().lifoAlloc()->setReadOnly(); 545 } 546 for (auto task : ionFinishedList(lock)) { 547 task->trace(trc); 548 } 549 550 for (auto* helper : helperTasks(lock)) { 551 if (helper->is<jit::IonCompileTask>()) { 552 jit::IonCompileTask* ionCompileTask = helper->as<jit::IonCompileTask>(); 553 ionCompileTask->alloc().lifoAlloc()->setReadWrite(); 554 ionCompileTask->trace(trc); 555 } else if (helper->is<jit::BaselineCompileTask>()) { 556 helper->as<jit::BaselineCompileTask>()->trace(trc); 557 } 558 } 559 } 560 561 // The lazy link list is only accessed on the main thread, so trace it after 562 // releasing the lock. 563 JSRuntime* rt = trc->runtime(); 564 if (auto* jitRuntime = rt->jitRuntime()) { 565 jit::IonCompileTask* task = jitRuntime->ionLazyLinkList(rt).getFirst(); 566 while (task) { 567 task->trace(trc); 568 task = task->getNext(); 569 } 570 } 571 } 572 573 // Definition of helper thread tasks. 574 // 575 // Priority is determined by the order they're listed here. 576 const GlobalHelperThreadState::Selector GlobalHelperThreadState::selectors[] = { 577 &GlobalHelperThreadState::maybeGetGCParallelTask, 578 &GlobalHelperThreadState::maybeGetBaselineCompileTask, 579 &GlobalHelperThreadState::maybeGetIonCompileTask, 580 &GlobalHelperThreadState::maybeGetWasmTier1CompileTask, 581 &GlobalHelperThreadState::maybeGetPromiseHelperTask, 582 &GlobalHelperThreadState::maybeGetFreeDelazifyTask, 583 &GlobalHelperThreadState::maybeGetDelazifyTask, 584 &GlobalHelperThreadState::maybeGetCompressionTask, 585 &GlobalHelperThreadState::maybeGetLowPrioIonCompileTask, 586 &GlobalHelperThreadState::maybeGetIonFreeTask, 587 &GlobalHelperThreadState::maybeGetWasmPartialTier2CompileTask, 588 &GlobalHelperThreadState::maybeGetWasmTier2CompileTask, 589 &GlobalHelperThreadState::maybeGetWasmCompleteTier2GeneratorTask}; 590 591 bool GlobalHelperThreadState::canStartTasks( 592 const AutoLockHelperThreadState& lock) { 593 return canStartGCParallelTask(lock) || canStartBaselineCompileTask(lock) || 594 canStartIonCompileTask(lock) || canStartWasmTier1CompileTask(lock) || 595 canStartPromiseHelperTask(lock) || canStartFreeDelazifyTask(lock) || 596 canStartDelazifyTask(lock) || canStartCompressionTask(lock) || 597 canStartIonFreeTask(lock) || canStartWasmTier2CompileTask(lock) || 598 canStartWasmCompleteTier2GeneratorTask(lock) || 599 canStartWasmPartialTier2CompileTask(lock); 600 } 601 602 void JS::RunHelperThreadTask(HelperThreadTask* task) { 603 MOZ_ASSERT(task); 604 MOZ_ASSERT(CanUseExtraThreads()); 605 606 AutoLockHelperThreadState lock; 607 608 if (!gHelperThreadState || HelperThreadState().isTerminating(lock)) { 609 return; 610 } 611 612 HelperThreadState().runOneTask(task, lock); 613 HelperThreadState().dispatch(lock); 614 } 615 616 void GlobalHelperThreadState::runOneTask(HelperThreadTask* task, 617 AutoLockHelperThreadState& lock) { 618 #ifdef DEBUG 619 MOZ_ASSERT(tasksPending_ > 0); 620 tasksPending_--; 621 #endif 622 623 runTaskLocked(task, lock); 624 625 notifyAll(lock); 626 } 627 628 HelperThreadTask* GlobalHelperThreadState::findHighestPriorityTask( 629 const AutoLockHelperThreadState& locked) { 630 // Return the highest priority task that is ready to start, or nullptr. 631 632 for (const auto& selector : selectors) { 633 if (auto* task = (this->*(selector))(locked)) { 634 return task; 635 } 636 } 637 638 return nullptr; 639 } 640 641 #ifdef DEBUG 642 static bool VectorHasTask( 643 const Vector<HelperThreadTask*, 0, SystemAllocPolicy>& tasks, 644 HelperThreadTask* task) { 645 for (HelperThreadTask* t : tasks) { 646 if (t == task) { 647 return true; 648 } 649 } 650 651 return false; 652 } 653 #endif 654 655 void GlobalHelperThreadState::runTaskLocked(HelperThreadTask* task, 656 AutoLockHelperThreadState& locked) { 657 ThreadType threadType = task->threadType(); 658 659 MOZ_ASSERT(VectorHasTask(helperTasks(locked), task)); 660 MOZ_ASSERT(totalCountRunningTasks != 0); 661 MOZ_ASSERT(runningTaskCount[threadType] != 0); 662 663 js::oom::SetThreadType(threadType); 664 665 { 666 JS::AutoSuppressGCAnalysis nogc; 667 task->runHelperThreadTask(locked); 668 } 669 670 js::oom::SetThreadType(js::THREAD_TYPE_NONE); 671 672 helperTasks(locked).eraseIfEqual(task); 673 totalCountRunningTasks--; 674 runningTaskCount[threadType]--; 675 } 676 677 void AutoHelperTaskQueue::queueTaskToDispatch( 678 JS::HelperThreadTask* task) const { 679 // This is marked const because it doesn't release the mutex. 680 681 task->onThreadPoolDispatch(); 682 683 AutoEnterOOMUnsafeRegion oomUnsafe; 684 if (!tasksToDispatch.append(task)) { 685 oomUnsafe.crash("AutoLockHelperThreadState::queueTaskToDispatch"); 686 } 687 } 688 689 void AutoHelperTaskQueue::dispatchQueuedTasks() { 690 // The hazard analysis can't tell that the callback doesn't GC. 691 JS::AutoSuppressGCAnalysis nogc; 692 693 for (size_t i = 0; i < tasksToDispatch.length(); i++) { 694 HelperThreadState().dispatchTaskCallback(tasksToDispatch[i]); 695 } 696 tasksToDispatch.clear(); 697 } 698 699 /////////////////////////////////////////////////////////////////////////// 700 // // 701 // JS task definitions // 702 // // 703 /////////////////////////////////////////////////////////////////////////// 704 705 //== IonCompileTask and CompilationSelector =============================== 706 707 bool GlobalHelperThreadState::canStartIonCompileTask( 708 const AutoLockHelperThreadState& lock) { 709 return !ionWorklist(lock).empty() && 710 checkTaskThreadLimit(THREAD_TYPE_ION, maxIonCompilationThreads(), 711 lock); 712 } 713 714 static bool IonCompileTaskHasHigherPriority(jit::IonCompileTask* first, 715 jit::IonCompileTask* second) { 716 // Return true if priority(first) > priority(second). 717 // 718 // This method can return whatever it wants, though it really ought to be a 719 // total order. The ordering is allowed to race (change on the fly), however. 720 721 // A higher warm-up counter indicates a higher priority. 722 jit::JitScript* firstJitScript = first->script()->jitScript(); 723 jit::JitScript* secondJitScript = second->script()->jitScript(); 724 return firstJitScript->warmUpCount() / first->script()->length() > 725 secondJitScript->warmUpCount() / second->script()->length(); 726 } 727 728 jit::IonCompileTask* GlobalHelperThreadState::highestPriorityPendingIonCompile( 729 const AutoLockHelperThreadState& lock, bool checkExecutionStatus) { 730 auto& worklist = ionWorklist(lock); 731 MOZ_ASSERT(!worklist.empty()); 732 733 // Get the highest priority IonCompileTask which has not started compilation 734 // yet. 735 size_t index = worklist.length(); 736 for (size_t i = 0; i < worklist.length(); i++) { 737 if (checkExecutionStatus && !worklist[i]->isMainThreadRunningJS()) { 738 continue; 739 } 740 if (i < index || 741 IonCompileTaskHasHigherPriority(worklist[i], worklist[index])) { 742 index = i; 743 } 744 } 745 746 if (index == worklist.length()) { 747 return nullptr; 748 } 749 jit::IonCompileTask* task = worklist[index]; 750 worklist.erase(&worklist[index]); 751 return task; 752 } 753 754 HelperThreadTask* GlobalHelperThreadState::maybeGetIonCompileTask( 755 const AutoLockHelperThreadState& lock) { 756 if (!canStartIonCompileTask(lock)) { 757 return nullptr; 758 } 759 760 return highestPriorityPendingIonCompile(lock, 761 /* checkExecutionStatus */ true); 762 } 763 764 HelperThreadTask* GlobalHelperThreadState::maybeGetLowPrioIonCompileTask( 765 const AutoLockHelperThreadState& lock) { 766 if (!canStartIonCompileTask(lock)) { 767 return nullptr; 768 } 769 770 return highestPriorityPendingIonCompile(lock, 771 /* checkExecutionStatus */ false); 772 } 773 774 bool GlobalHelperThreadState::submitTask( 775 jit::IonCompileTask* task, const AutoLockHelperThreadState& locked) { 776 MOZ_ASSERT(isInitialized(locked)); 777 778 if (!ionWorklist(locked).append(task)) { 779 return false; 780 } 781 782 // The build is moving off-thread. Freeze the LifoAlloc to prevent any 783 // unwanted mutations. 784 task->alloc().lifoAlloc()->setReadOnly(); 785 786 dispatch(locked); 787 return true; 788 } 789 790 bool js::StartOffThreadIonCompile(jit::IonCompileTask* task, 791 const AutoLockHelperThreadState& lock) { 792 return HelperThreadState().submitTask(task, lock); 793 } 794 795 /* 796 * Move an IonCompilationTask for which compilation has either finished, failed, 797 * or been cancelled into the global finished compilation list. All off thread 798 * compilations which are started must eventually be finished. 799 */ 800 void js::FinishOffThreadIonCompile(jit::IonCompileTask* task, 801 const AutoLockHelperThreadState& lock) { 802 AutoEnterOOMUnsafeRegion oomUnsafe; 803 if (!HelperThreadState().ionFinishedList(lock).append(task)) { 804 oomUnsafe.crash("FinishOffThreadIonCompile"); 805 } 806 task->script() 807 ->runtimeFromAnyThread() 808 ->jitRuntime() 809 ->numFinishedOffThreadTasksRef(lock)++; 810 } 811 812 static JSRuntime* GetSelectorRuntime(const CompilationSelector& selector) { 813 struct Matcher { 814 JSRuntime* operator()(JSScript* script) { 815 return script->runtimeFromMainThread(); 816 } 817 JSRuntime* operator()(Zone* zone) { return zone->runtimeFromMainThread(); } 818 JSRuntime* operator()(ZonesInState zbs) { return zbs.runtime; } 819 JSRuntime* operator()(JSRuntime* runtime) { return runtime; } 820 }; 821 822 return selector.match(Matcher()); 823 } 824 825 static bool IonCompileTaskMatches(const CompilationSelector& selector, 826 jit::IonCompileTask* task) { 827 struct TaskMatches { 828 jit::IonCompileTask* task_; 829 830 bool operator()(JSScript* script) { return script == task_->script(); } 831 bool operator()(Zone* zone) { 832 return zone == task_->script()->zoneFromAnyThread(); 833 } 834 bool operator()(JSRuntime* runtime) { 835 return runtime == task_->script()->runtimeFromAnyThread(); 836 } 837 bool operator()(ZonesInState zbs) { 838 return zbs.runtime == task_->script()->runtimeFromAnyThread() && 839 zbs.state == task_->script()->zoneFromAnyThread()->gcState(); 840 } 841 }; 842 843 return selector.match(TaskMatches{task}); 844 } 845 846 // If we're canceling Ion compilations for a zone/runtime, force a new 847 // IonFreeTask even if there are just a few tasks. This lets us free as much 848 // memory as possible. 849 static bool ShouldForceIonFreeTask(const CompilationSelector& selector) { 850 struct Matcher { 851 bool operator()(JSScript* script) { return false; } 852 bool operator()(Zone* zone) { return true; } 853 bool operator()(ZonesInState zbs) { return true; } 854 bool operator()(JSRuntime* runtime) { return true; } 855 }; 856 857 return selector.match(Matcher()); 858 } 859 860 void GlobalHelperThreadState::cancelOffThreadIonCompile( 861 const CompilationSelector& selector) { 862 jit::JitRuntime* jitRuntime = GetSelectorRuntime(selector)->jitRuntime(); 863 MOZ_ASSERT(jitRuntime); 864 865 AutoStartIonFreeTask freeTask(jitRuntime, ShouldForceIonFreeTask(selector)); 866 867 { 868 AutoLockHelperThreadState lock; 869 if (!isInitialized(lock)) { 870 return; 871 } 872 873 /* Cancel any pending entries for which processing hasn't started. */ 874 GlobalHelperThreadState::IonCompileTaskVector& worklist = ionWorklist(lock); 875 for (size_t i = 0; i < worklist.length(); i++) { 876 jit::IonCompileTask* task = worklist[i]; 877 if (IonCompileTaskMatches(selector, task)) { 878 // Once finished, tasks are added to a Linked list which is 879 // allocated with the IonCompileTask class. The IonCompileTask is 880 // allocated in the LifoAlloc so we need the LifoAlloc to be mutable. 881 worklist[i]->alloc().lifoAlloc()->setReadWrite(); 882 883 FinishOffThreadIonCompile(task, lock); 884 remove(worklist, &i); 885 } 886 } 887 888 /* Wait for in progress entries to finish up. */ 889 bool cancelled; 890 do { 891 cancelled = false; 892 for (auto* helper : helperTasks(lock)) { 893 if (!helper->is<jit::IonCompileTask>()) { 894 continue; 895 } 896 897 jit::IonCompileTask* ionCompileTask = helper->as<jit::IonCompileTask>(); 898 if (IonCompileTaskMatches(selector, ionCompileTask)) { 899 ionCompileTask->alloc().lifoAlloc()->setReadWrite(); 900 ionCompileTask->mirGen().cancel(); 901 cancelled = true; 902 } 903 } 904 if (cancelled) { 905 wait(lock); 906 } 907 } while (cancelled); 908 909 /* Cancel code generation for any completed entries. */ 910 GlobalHelperThreadState::IonCompileTaskVector& finished = 911 ionFinishedList(lock); 912 for (size_t i = 0; i < finished.length(); i++) { 913 jit::IonCompileTask* task = finished[i]; 914 if (IonCompileTaskMatches(selector, task)) { 915 JSRuntime* rt = task->script()->runtimeFromAnyThread(); 916 jitRuntime->numFinishedOffThreadTasksRef(lock)--; 917 jit::FinishOffThreadTask(rt, freeTask, task); 918 remove(finished, &i); 919 } 920 } 921 } 922 923 /* Cancel lazy linking for pending tasks (attached to the ionScript). */ 924 JSRuntime* runtime = GetSelectorRuntime(selector); 925 jit::IonCompileTask* task = jitRuntime->ionLazyLinkList(runtime).getFirst(); 926 while (task) { 927 jit::IonCompileTask* next = task->getNext(); 928 if (IonCompileTaskMatches(selector, task)) { 929 jit::FinishOffThreadTask(runtime, freeTask, task); 930 } 931 task = next; 932 } 933 } 934 935 static bool JitDataStructuresExist(const CompilationSelector& selector) { 936 struct Matcher { 937 bool operator()(JSScript* script) { return !!script->zone()->jitZone(); } 938 bool operator()(Zone* zone) { return !!zone->jitZone(); } 939 bool operator()(ZonesInState zbs) { return zbs.runtime->hasJitRuntime(); } 940 bool operator()(JSRuntime* runtime) { return runtime->hasJitRuntime(); } 941 }; 942 943 return selector.match(Matcher()); 944 } 945 946 void js::CancelOffThreadIonCompile(const CompilationSelector& selector) { 947 if (!JitDataStructuresExist(selector)) { 948 return; 949 } 950 951 if (jit::IsPortableBaselineInterpreterEnabled()) { 952 return; 953 } 954 955 HelperThreadState().cancelOffThreadIonCompile(selector); 956 } 957 958 #ifdef DEBUG 959 bool GlobalHelperThreadState::hasOffThreadIonCompile( 960 Zone* zone, AutoLockHelperThreadState& lock) { 961 for (jit::IonCompileTask* task : ionWorklist(lock)) { 962 if (task->script()->zoneFromAnyThread() == zone) { 963 return true; 964 } 965 } 966 967 for (auto* helper : helperTasks(lock)) { 968 if (helper->is<jit::IonCompileTask>()) { 969 JSScript* script = helper->as<jit::IonCompileTask>()->script(); 970 if (script->zoneFromAnyThread() == zone) { 971 return true; 972 } 973 } 974 } 975 976 for (jit::IonCompileTask* task : ionFinishedList(lock)) { 977 if (task->script()->zoneFromAnyThread() == zone) { 978 return true; 979 } 980 } 981 982 JSRuntime* rt = zone->runtimeFromMainThread(); 983 if (rt->hasJitRuntime()) { 984 for (jit::IonCompileTask* task : rt->jitRuntime()->ionLazyLinkList(rt)) { 985 if (task->script()->zone() == zone) { 986 return true; 987 } 988 } 989 } 990 991 return false; 992 } 993 994 bool js::HasOffThreadIonCompile(Zone* zone) { 995 if (jit::IsPortableBaselineInterpreterEnabled()) { 996 return false; 997 } 998 999 AutoLockHelperThreadState lock; 1000 1001 if (!HelperThreadState().isInitialized(lock)) { 1002 return false; 1003 } 1004 1005 return HelperThreadState().hasOffThreadIonCompile(zone, lock); 1006 } 1007 #endif 1008 1009 //== IonFreeTask ========================================================== 1010 1011 bool GlobalHelperThreadState::canStartIonFreeTask( 1012 const AutoLockHelperThreadState& lock) { 1013 return !ionFreeList(lock).empty() && 1014 checkTaskThreadLimit(THREAD_TYPE_ION_FREE, maxIonFreeThreads(), lock); 1015 } 1016 1017 HelperThreadTask* GlobalHelperThreadState::maybeGetIonFreeTask( 1018 const AutoLockHelperThreadState& lock) { 1019 if (!canStartIonFreeTask(lock)) { 1020 return nullptr; 1021 } 1022 1023 UniquePtr<jit::IonFreeTask> task = std::move(ionFreeList(lock).back()); 1024 ionFreeList(lock).popBack(); 1025 return task.release(); 1026 } 1027 1028 void jit::JitRuntime::maybeStartIonFreeTask(bool force) { 1029 IonFreeCompileTasks& tasks = ionFreeTaskBatch_.ref(); 1030 if (tasks.empty()) { 1031 return; 1032 } 1033 1034 // Start an IonFreeTask if we have at least eight tasks. If |force| is true we 1035 // always start an IonFreeTask. 1036 if (!force) { 1037 constexpr size_t MinBatchSize = 8; 1038 static_assert(IonFreeCompileTasks::InlineLength >= MinBatchSize, 1039 "Minimum batch size shouldn't require malloc"); 1040 if (tasks.length() < MinBatchSize) { 1041 return; 1042 } 1043 } 1044 1045 auto freeTask = js::MakeUnique<jit::IonFreeTask>(std::move(tasks)); 1046 if (!freeTask) { 1047 // Free compilation data on the main thread instead. 1048 MOZ_ASSERT(!tasks.empty(), "shouldn't have moved tasks on OOM"); 1049 jit::FreeIonCompileTasks(tasks); 1050 tasks.clearAndFree(); 1051 return; 1052 } 1053 1054 AutoLockHelperThreadState lock; 1055 if (!HelperThreadState().submitTask(std::move(freeTask), lock)) { 1056 // If submitTask OOMs, then freeTask hasn't been moved so we can still use 1057 // its task list. 1058 jit::FreeIonCompileTasks(freeTask->compileTasks()); 1059 } 1060 1061 tasks.clearAndFree(); 1062 } 1063 1064 bool GlobalHelperThreadState::submitTask( 1065 UniquePtr<jit::IonFreeTask>&& task, 1066 const AutoLockHelperThreadState& locked) { 1067 MOZ_ASSERT(isInitialized(locked)); 1068 1069 if (!ionFreeList(locked).append(std::move(task))) { 1070 return false; 1071 } 1072 1073 dispatch(locked); 1074 return true; 1075 } 1076 1077 bool js::AutoStartIonFreeTask::addIonCompileToFreeTaskBatch( 1078 jit::IonCompileTask* task) { 1079 return jitRuntime_->addIonCompileToFreeTaskBatch(task); 1080 } 1081 1082 js::AutoStartIonFreeTask::~AutoStartIonFreeTask() { 1083 jitRuntime_->maybeStartIonFreeTask(force_); 1084 } 1085 1086 //== BaselineCompileTask ================================================== 1087 1088 bool GlobalHelperThreadState::canStartBaselineCompileTask( 1089 const AutoLockHelperThreadState& lock) { 1090 return !baselineWorklist(lock).empty() && 1091 checkTaskThreadLimit(THREAD_TYPE_BASELINE, 1092 maxBaselineCompilationThreads(), lock); 1093 } 1094 1095 HelperThreadTask* GlobalHelperThreadState::maybeGetBaselineCompileTask( 1096 const AutoLockHelperThreadState& lock) { 1097 if (!canStartBaselineCompileTask(lock)) { 1098 return nullptr; 1099 } 1100 1101 return baselineWorklist(lock).popCopy(); 1102 } 1103 1104 bool GlobalHelperThreadState::submitTask( 1105 jit::BaselineCompileTask* task, const AutoLockHelperThreadState& locked) { 1106 MOZ_ASSERT(isInitialized(locked)); 1107 1108 if (!baselineWorklist(locked).append(task)) { 1109 return false; 1110 } 1111 1112 dispatch(locked); 1113 return true; 1114 } 1115 1116 bool js::StartOffThreadBaselineCompile(jit::BaselineCompileTask* task, 1117 const AutoLockHelperThreadState& lock) { 1118 return HelperThreadState().submitTask(task, lock); 1119 } 1120 1121 /* 1122 * Move a BaselineCompileTask for which compilation has either finished, failed, 1123 * or been cancelled into the global finished compilation list. All off thread 1124 * compilations which are started must eventually be finished. 1125 */ 1126 void js::FinishOffThreadBaselineCompile(jit::BaselineCompileTask* task, 1127 const AutoLockHelperThreadState& lock) { 1128 AutoEnterOOMUnsafeRegion oomUnsafe; 1129 if (!HelperThreadState().baselineFinishedList(lock).append(task)) { 1130 oomUnsafe.crash("FinishOffThreadBaselineCompile"); 1131 } 1132 task->runtimeFromAnyThread()->jitRuntime()->numFinishedOffThreadTasksRef( 1133 lock)++; 1134 } 1135 1136 static bool BaselineCompileTaskMatches(const CompilationSelector& selector, 1137 jit::BaselineCompileTask* task) { 1138 struct TaskMatches { 1139 jit::BaselineCompileTask* task_; 1140 1141 bool operator()(JSScript* script) { return task_->scriptMatches(script); } 1142 bool operator()(Zone* zone) { return zone == task_->zoneFromAnyThread(); } 1143 bool operator()(JSRuntime* runtime) { 1144 return runtime == task_->runtimeFromAnyThread(); 1145 } 1146 bool operator()(ZonesInState zbs) { 1147 return zbs.runtime == task_->runtimeFromAnyThread() && 1148 zbs.state == task_->zoneFromAnyThread()->gcState(); 1149 } 1150 }; 1151 1152 return selector.match(TaskMatches{task}); 1153 } 1154 1155 void GlobalHelperThreadState::cancelOffThreadBaselineCompile( 1156 const CompilationSelector& selector) { 1157 jit::JitRuntime* jitRuntime = GetSelectorRuntime(selector)->jitRuntime(); 1158 MOZ_ASSERT(jitRuntime); 1159 1160 { 1161 AutoLockHelperThreadState lock; 1162 if (!isInitialized(lock)) { 1163 return; 1164 } 1165 1166 /* Cancel any pending entries for which processing hasn't started. */ 1167 GlobalHelperThreadState::BaselineCompileTaskVector& worklist = 1168 baselineWorklist(lock); 1169 for (size_t i = 0; i < worklist.length(); i++) { 1170 jit::BaselineCompileTask* task = worklist[i]; 1171 if (BaselineCompileTaskMatches(selector, task)) { 1172 FinishOffThreadBaselineCompile(task, lock); 1173 remove(worklist, &i); 1174 } 1175 } 1176 1177 /* Wait for in progress entries to finish up. */ 1178 while (true) { 1179 bool inProgress = false; 1180 for (auto* helper : helperTasks(lock)) { 1181 if (!helper->is<jit::BaselineCompileTask>()) { 1182 continue; 1183 } 1184 1185 jit::BaselineCompileTask* task = helper->as<jit::BaselineCompileTask>(); 1186 if (BaselineCompileTaskMatches(selector, task)) { 1187 inProgress = true; 1188 break; 1189 } 1190 } 1191 if (!inProgress) { 1192 break; 1193 } 1194 wait(lock); 1195 } 1196 1197 /* Cancel linking for any completed entries. */ 1198 GlobalHelperThreadState::BaselineCompileTaskVector& finished = 1199 baselineFinishedList(lock); 1200 for (size_t i = 0; i < finished.length(); i++) { 1201 jit::BaselineCompileTask* task = finished[i]; 1202 if (BaselineCompileTaskMatches(selector, task)) { 1203 jitRuntime->numFinishedOffThreadTasksRef(lock)--; 1204 jit::BaselineCompileTask::FinishOffThreadTask(task); 1205 remove(finished, &i); 1206 } 1207 } 1208 } 1209 } 1210 1211 void js::CancelOffThreadBaselineCompile(const CompilationSelector& selector) { 1212 if (!JitDataStructuresExist(selector)) { 1213 return; 1214 } 1215 1216 if (jit::IsPortableBaselineInterpreterEnabled()) { 1217 return; 1218 } 1219 1220 HelperThreadState().cancelOffThreadBaselineCompile(selector); 1221 } 1222 1223 //== DelazifyTask ========================================================= 1224 1225 bool GlobalHelperThreadState::canStartDelazifyTask( 1226 const AutoLockHelperThreadState& lock) { 1227 return !delazifyWorklist(lock).isEmpty() && 1228 checkTaskThreadLimit(THREAD_TYPE_DELAZIFY, maxDelazifyThreads(), 1229 /*isMaster=*/true, lock); 1230 } 1231 1232 HelperThreadTask* GlobalHelperThreadState::maybeGetDelazifyTask( 1233 const AutoLockHelperThreadState& lock) { 1234 // NOTE: We want to span all cores availables with delazification tasks, in 1235 // order to parse a maximum number of functions ahead of their executions. 1236 // Thus, as opposed to parse task which have a higher priority, we are not 1237 // exclusively executing these task on parse threads. 1238 auto& worklist = delazifyWorklist(lock); 1239 if (worklist.isEmpty()) { 1240 return nullptr; 1241 } 1242 return worklist.popFirst(); 1243 } 1244 1245 void GlobalHelperThreadState::submitTask( 1246 DelazifyTask* task, const AutoLockHelperThreadState& locked) { 1247 delazifyWorklist(locked).insertBack(task); 1248 dispatch(locked); 1249 } 1250 1251 void js::StartOffThreadDelazification( 1252 JSContext* maybeCx, const JS::ReadOnlyCompileOptions& options, 1253 frontend::InitialStencilAndDelazifications* stencils) { 1254 // Skip delazify tasks if we parse everything on-demand or ahead. 1255 auto strategy = options.eagerDelazificationStrategy(); 1256 if (strategy == JS::DelazificationOption::OnDemandOnly || 1257 strategy == JS::DelazificationOption::ParseEverythingEagerly) { 1258 return; 1259 } 1260 1261 // Skip delazify task if code coverage is enabled. 1262 if (maybeCx && maybeCx->realm()->collectCoverageForDebug()) { 1263 return; 1264 } 1265 1266 if (!CanUseExtraThreads()) { 1267 return; 1268 } 1269 1270 JSRuntime* maybeRuntime = maybeCx ? maybeCx->runtime() : nullptr; 1271 UniquePtr<DelazifyTask> task; 1272 task = DelazifyTask::Create(maybeRuntime, options, stencils); 1273 if (!task) { 1274 return; 1275 } 1276 1277 // Schedule delazification task if there is any function to delazify. 1278 if (!task->done()) { 1279 AutoLockHelperThreadState lock; 1280 HelperThreadState().submitTask(task.release(), lock); 1281 } 1282 } 1283 1284 UniquePtr<DelazifyTask> DelazifyTask::Create( 1285 JSRuntime* maybeRuntime, const JS::ReadOnlyCompileOptions& options, 1286 frontend::InitialStencilAndDelazifications* stencils) { 1287 UniquePtr<DelazifyTask> task; 1288 task.reset(js_new<DelazifyTask>(maybeRuntime, options.prefableOptions())); 1289 if (!task) { 1290 return nullptr; 1291 } 1292 1293 if (!task->init(options, stencils)) { 1294 // In case of errors, skip this and delazify on-demand. 1295 return nullptr; 1296 } 1297 1298 return task; 1299 } 1300 1301 DelazifyTask::DelazifyTask( 1302 JSRuntime* maybeRuntime, 1303 const JS::PrefableCompileOptions& initialPrefableOptions) 1304 : maybeRuntime(maybeRuntime), 1305 delazificationCx(initialPrefableOptions, HelperThreadState().stackQuota) { 1306 } 1307 1308 DelazifyTask::~DelazifyTask() { 1309 // The LinkedListElement destructor will remove us from any list we are part 1310 // of without synchronization, so ensure that doesn't happen. 1311 MOZ_DIAGNOSTIC_ASSERT(!isInList()); 1312 } 1313 1314 bool DelazifyTask::init(const JS::ReadOnlyCompileOptions& options, 1315 frontend::InitialStencilAndDelazifications* stencils) { 1316 return delazificationCx.init(options, stencils); 1317 } 1318 1319 size_t DelazifyTask::sizeOfExcludingThis( 1320 mozilla::MallocSizeOf mallocSizeOf) const { 1321 return delazificationCx.sizeOfExcludingThis(mallocSizeOf); 1322 } 1323 1324 void DelazifyTask::runHelperThreadTask(AutoLockHelperThreadState& lock) { 1325 { 1326 AutoUnlockHelperThreadState unlock(lock); 1327 // NOTE: We do not report errors beyond this scope, as there is no where 1328 // to report these errors to. In the mean time, prevent the eager 1329 // delazification from running after any kind of errors. 1330 (void)runTask(); 1331 } 1332 1333 // If we should continue to delazify even more functions, then re-add this 1334 // task to the vector of delazification tasks. This might happen when the 1335 // DelazifyTask is interrupted by a higher priority task. (see 1336 // mozilla::TaskController & mozilla::Task) 1337 if (!delazificationCx.done()) { 1338 HelperThreadState().submitTask(this, lock); 1339 } else { 1340 UniquePtr<FreeDelazifyTask> freeTask(js_new<FreeDelazifyTask>(this)); 1341 if (freeTask) { 1342 HelperThreadState().submitTask(std::move(freeTask), lock); 1343 } 1344 } 1345 } 1346 1347 bool DelazifyTask::runTask() { return delazificationCx.delazify(); } 1348 1349 bool DelazifyTask::done() const { return delazificationCx.done(); } 1350 1351 void GlobalHelperThreadState::cancelPendingDelazifyTask( 1352 JSRuntime* rt, AutoLockHelperThreadState& lock) { 1353 auto& delazifyList = delazifyWorklist(lock); 1354 1355 auto end = delazifyList.end(); 1356 for (auto iter = delazifyList.begin(); iter != end;) { 1357 DelazifyTask* task = *iter; 1358 ++iter; 1359 if (task->runtimeMatchesOrNoRuntime(rt)) { 1360 task->removeFrom(delazifyList); 1361 js_delete(task); 1362 } 1363 } 1364 } 1365 1366 void GlobalHelperThreadState::waitUntilCancelledDelazifyTasks( 1367 JSRuntime* rt, AutoLockHelperThreadState& lock) { 1368 while (true) { 1369 cancelPendingDelazifyTask(rt, lock); 1370 1371 // If running tasks are delazifying any functions, then we have to wait 1372 // until they complete to remove them from the pending list. DelazifyTask 1373 // are inserting themself back to be processed once more after delazifying a 1374 // function. 1375 bool inProgress = false; 1376 for (auto* helper : helperTasks(lock)) { 1377 if (helper->is<DelazifyTask>() && 1378 helper->as<DelazifyTask>()->runtimeMatchesOrNoRuntime(rt)) { 1379 inProgress = true; 1380 break; 1381 } 1382 } 1383 if (!inProgress) { 1384 break; 1385 } 1386 1387 wait(lock); 1388 } 1389 1390 MOZ_ASSERT(!hasAnyDelazifyTask(rt, lock)); 1391 } 1392 1393 void GlobalHelperThreadState::waitUntilEmptyFreeDelazifyTaskVector( 1394 AutoLockHelperThreadState& lock) { 1395 while (true) { 1396 bool inProgress = false; 1397 if (!freeDelazifyTaskVector(lock).empty()) { 1398 inProgress = true; 1399 } 1400 1401 // If running tasks are delazifying any functions, then we have to wait 1402 // until they complete to remove them from the pending list. DelazifyTask 1403 // are inserting themself back to be processed once more after delazifying a 1404 // function. 1405 for (auto* helper : helperTasks(lock)) { 1406 if (helper->is<FreeDelazifyTask>()) { 1407 inProgress = true; 1408 break; 1409 } 1410 } 1411 if (!inProgress) { 1412 break; 1413 } 1414 1415 wait(lock); 1416 } 1417 } 1418 1419 void js::CancelOffThreadDelazify(JSRuntime* runtime) { 1420 AutoLockHelperThreadState lock; 1421 1422 if (!HelperThreadState().isInitialized(lock)) { 1423 return; 1424 } 1425 1426 // Cancel all Delazify tasks from the given runtime, and wait if tasks are 1427 // from the given runtime are being executed. 1428 HelperThreadState().waitUntilCancelledDelazifyTasks(runtime, lock); 1429 1430 // Empty the free list of delazify task, in case one of the delazify task 1431 // ended and therefore did not returned to the pending list of delazify tasks. 1432 HelperThreadState().waitUntilEmptyFreeDelazifyTaskVector(lock); 1433 } 1434 1435 bool GlobalHelperThreadState::hasAnyDelazifyTask( 1436 JSRuntime* rt, AutoLockHelperThreadState& lock) { 1437 for (auto task : delazifyWorklist(lock)) { 1438 if (task->runtimeMatchesOrNoRuntime(rt)) { 1439 return true; 1440 } 1441 } 1442 1443 for (auto* helper : helperTasks(lock)) { 1444 if (helper->is<DelazifyTask>() && 1445 helper->as<DelazifyTask>()->runtimeMatchesOrNoRuntime(rt)) { 1446 return true; 1447 } 1448 } 1449 1450 return false; 1451 } 1452 1453 void js::WaitForAllDelazifyTasks(JSRuntime* rt) { 1454 AutoLockHelperThreadState lock; 1455 if (!HelperThreadState().isInitialized(lock)) { 1456 return; 1457 } 1458 1459 while (true) { 1460 if (!HelperThreadState().hasAnyDelazifyTask(rt, lock)) { 1461 break; 1462 } 1463 1464 HelperThreadState().wait(lock); 1465 } 1466 } 1467 1468 //== FreeDelazifyTask ===================================================== 1469 1470 bool GlobalHelperThreadState::canStartFreeDelazifyTask( 1471 const AutoLockHelperThreadState& lock) { 1472 return !freeDelazifyTaskVector(lock).empty() && 1473 checkTaskThreadLimit(THREAD_TYPE_DELAZIFY_FREE, maxDelazifyThreads(), 1474 /*isMaster=*/true, lock); 1475 } 1476 1477 HelperThreadTask* GlobalHelperThreadState::maybeGetFreeDelazifyTask( 1478 const AutoLockHelperThreadState& lock) { 1479 auto& freeList = freeDelazifyTaskVector(lock); 1480 if (!freeList.empty()) { 1481 UniquePtr<FreeDelazifyTask> task = std::move(freeList.back()); 1482 freeList.popBack(); 1483 return task.release(); 1484 } 1485 return nullptr; 1486 } 1487 1488 bool GlobalHelperThreadState::submitTask( 1489 UniquePtr<FreeDelazifyTask> task, const AutoLockHelperThreadState& locked) { 1490 if (!freeDelazifyTaskVector(locked).append(std::move(task))) { 1491 return false; 1492 } 1493 dispatch(locked); 1494 return true; 1495 } 1496 1497 void FreeDelazifyTask::runHelperThreadTask(AutoLockHelperThreadState& locked) { 1498 { 1499 AutoUnlockHelperThreadState unlock(locked); 1500 js_delete(task); 1501 task = nullptr; 1502 } 1503 1504 js_delete(this); 1505 } 1506 1507 //== PromiseHelperTask ==================================================== 1508 1509 bool GlobalHelperThreadState::canStartPromiseHelperTask( 1510 const AutoLockHelperThreadState& lock) { 1511 // PromiseHelperTasks can be wasm compilation tasks that in turn block on 1512 // wasm compilation so set isMaster = true. 1513 return !promiseHelperTasks(lock).empty() && 1514 checkTaskThreadLimit(THREAD_TYPE_PROMISE_TASK, 1515 maxPromiseHelperThreads(), 1516 /*isMaster=*/true, lock); 1517 } 1518 1519 HelperThreadTask* GlobalHelperThreadState::maybeGetPromiseHelperTask( 1520 const AutoLockHelperThreadState& lock) { 1521 if (!canStartPromiseHelperTask(lock)) { 1522 return nullptr; 1523 } 1524 1525 return promiseHelperTasks(lock).popCopy(); 1526 } 1527 1528 bool GlobalHelperThreadState::submitTask(PromiseHelperTask* task) { 1529 AutoLockHelperThreadState lock; 1530 1531 if (!promiseHelperTasks(lock).append(task)) { 1532 return false; 1533 } 1534 1535 dispatch(lock); 1536 return true; 1537 } 1538 1539 void PromiseHelperTask::executeAndResolveAndDestroy(JSContext* cx) { 1540 execute(); 1541 run(cx, JS::Dispatchable::NotShuttingDown); 1542 } 1543 1544 void PromiseHelperTask::runHelperThreadTask(AutoLockHelperThreadState& lock) { 1545 { 1546 AutoUnlockHelperThreadState unlock(lock); 1547 execute(); 1548 } 1549 1550 // Don't release the lock between dispatching the resolve and destroy 1551 // operation (which may start immediately on another thread) and returning 1552 // from this method. 1553 1554 dispatchResolveAndDestroy(lock); 1555 } 1556 1557 bool js::StartOffThreadPromiseHelperTask(JSContext* cx, 1558 UniquePtr<PromiseHelperTask> task) { 1559 // Execute synchronously if there are no helper threads. 1560 if (!CanUseExtraThreads()) { 1561 task.release()->executeAndResolveAndDestroy(cx); 1562 return true; 1563 } 1564 1565 if (!HelperThreadState().submitTask(task.get())) { 1566 ReportOutOfMemory(cx); 1567 return false; 1568 } 1569 1570 (void)task.release(); 1571 return true; 1572 } 1573 1574 bool js::StartOffThreadPromiseHelperTask(PromiseHelperTask* task) { 1575 MOZ_ASSERT(CanUseExtraThreads()); 1576 1577 return HelperThreadState().submitTask(task); 1578 } 1579 1580 //== SourceCompressionTask ================================================ 1581 1582 bool GlobalHelperThreadState::canStartCompressionTask( 1583 const AutoLockHelperThreadState& lock) { 1584 return !compressionWorklist(lock).empty() && 1585 checkTaskThreadLimit(THREAD_TYPE_COMPRESS, maxCompressionThreads(), 1586 lock); 1587 } 1588 1589 HelperThreadTask* GlobalHelperThreadState::maybeGetCompressionTask( 1590 const AutoLockHelperThreadState& lock) { 1591 if (!canStartCompressionTask(lock)) { 1592 return nullptr; 1593 } 1594 1595 auto& worklist = compressionWorklist(lock); 1596 UniquePtr<SourceCompressionTask> task = std::move(worklist.back()); 1597 worklist.popBack(); 1598 return task.release(); 1599 } 1600 1601 bool GlobalHelperThreadState::submitTask( 1602 UniquePtr<SourceCompressionTask> task, 1603 const AutoLockHelperThreadState& locked) { 1604 if (!compressionWorklist(locked).append(std::move(task))) { 1605 return false; 1606 } 1607 1608 dispatch(locked); 1609 return true; 1610 } 1611 1612 void GlobalHelperThreadState::createAndSubmitCompressionTasks( 1613 ScheduleCompressionTask schedule, JSRuntime* rt) { 1614 // First create the SourceCompressionTasks and add them to a Vector. 1615 Vector<UniquePtr<SourceCompressionTask>, 8, SystemAllocPolicy> tasksToSubmit; 1616 1617 // We use some simple heuristics to batch multiple script sources in a single 1618 // SourceCompressionTask, to reduce overhead for small script sources. 1619 // 1620 // MaxBatchLength is the maximum length (in characters) for a single batch. 1621 // If a single script source exceeds this length, it will get its own 1622 // SourceCompressionTask. 1623 // 1624 // The main downside of increasing the MaxBatchLength threshold is that a 1625 // large compression task could block a helper thread from taking on higher 1626 // priority work. 1627 static constexpr size_t MaxBatchLength = 300'000; 1628 SourceCompressionTask* currentBatch = nullptr; 1629 size_t currentBatchLength = 0; 1630 1631 rt->pendingCompressions().eraseIf([&](const auto& entry) { 1632 MOZ_ASSERT(entry.source()->hasUncompressedSource()); 1633 1634 // If the script source has no other references then remove it from the 1635 // vector and don't compress it. 1636 if (entry.shouldCancel()) { 1637 return true; 1638 } 1639 1640 // If we're starting tasks on a non-shrinking GC, we wait a few major GCs to 1641 // start compressing in order to avoid immediate compression. 1642 if (schedule == ScheduleCompressionTask::NonShrinkingGC && 1643 rt->gc.majorGCCount() <= entry.majorGCNumber() + 3) { 1644 return false; 1645 } 1646 1647 // Add this entry to the current batch if the total length doesn't exceed 1648 // MaxBatchLength. 1649 size_t length = entry.source()->length(); 1650 if (currentBatch && currentBatchLength + length <= MaxBatchLength) { 1651 if (!currentBatch->addEntry(entry.source())) { 1652 return false; 1653 } 1654 currentBatchLength += length; 1655 return true; 1656 } 1657 1658 // Heap allocate the task. It will be freed upon compression completing in 1659 // AttachFinishedCompressedSources. On OOM we leave the pending compression 1660 // in the vector. 1661 auto ownedTask = MakeUnique<SourceCompressionTask>(rt, entry.source()); 1662 SourceCompressionTask* task = ownedTask.get(); 1663 if (!ownedTask || !tasksToSubmit.append(std::move(ownedTask))) { 1664 return false; 1665 } 1666 // Heuristic: prefer the task with the smallest source length for batching. 1667 if (!currentBatch || length < currentBatchLength) { 1668 currentBatch = task; 1669 currentBatchLength = length; 1670 } 1671 return true; 1672 }); 1673 if (rt->pendingCompressions().empty()) { 1674 rt->pendingCompressions().clearAndFree(); 1675 } 1676 1677 if (tasksToSubmit.empty()) { 1678 return; 1679 } 1680 1681 AutoLockHelperThreadState lock; 1682 for (auto& task : tasksToSubmit) { 1683 // OOMing during appending results in the task not being scheduled and 1684 // deleted. 1685 (void)submitTask(std::move(task), lock); 1686 } 1687 } 1688 1689 void js::AttachFinishedCompressions(JSRuntime* runtime, 1690 AutoLockHelperThreadState& lock) { 1691 auto& finished = HelperThreadState().compressionFinishedList(lock); 1692 for (size_t i = 0; i < finished.length(); i++) { 1693 if (finished[i]->runtimeMatches(runtime)) { 1694 UniquePtr<SourceCompressionTask> compressionTask(std::move(finished[i])); 1695 HelperThreadState().remove(finished, &i); 1696 compressionTask->complete(); 1697 } 1698 } 1699 } 1700 1701 void js::RunPendingSourceCompressions(JSRuntime* runtime) { 1702 if (!CanUseExtraThreads()) { 1703 return; 1704 } 1705 1706 HelperThreadState().runPendingSourceCompressions(runtime); 1707 } 1708 1709 void GlobalHelperThreadState::runPendingSourceCompressions(JSRuntime* runtime) { 1710 createAndSubmitCompressionTasks( 1711 GlobalHelperThreadState::ScheduleCompressionTask::API, runtime); 1712 1713 // Wait until all tasks have started compression. 1714 AutoLockHelperThreadState lock; 1715 while (!compressionWorklist(lock).empty()) { 1716 wait(lock); 1717 } 1718 1719 // Wait for all in-process compression tasks to complete. 1720 waitForAllTasksLocked(lock); 1721 1722 AttachFinishedCompressions(runtime, lock); 1723 } 1724 1725 void js::StartOffThreadCompressionsOnGC(JSRuntime* runtime, 1726 bool isShrinkingGC) { 1727 auto schedule = 1728 isShrinkingGC 1729 ? GlobalHelperThreadState::ScheduleCompressionTask::ShrinkingGC 1730 : GlobalHelperThreadState::ScheduleCompressionTask::NonShrinkingGC; 1731 HelperThreadState().createAndSubmitCompressionTasks(schedule, runtime); 1732 } 1733 1734 template <typename T> 1735 static void ClearCompressionTaskList(T& list, JSRuntime* runtime) { 1736 for (size_t i = 0; i < list.length(); i++) { 1737 if (list[i]->runtimeMatches(runtime)) { 1738 HelperThreadState().remove(list, &i); 1739 } 1740 } 1741 } 1742 1743 void GlobalHelperThreadState::cancelOffThreadCompressions( 1744 JSRuntime* runtime, AutoLockHelperThreadState& lock) { 1745 // Cancel all pending compression tasks. 1746 runtime->pendingCompressions().clearAndFree(); 1747 ClearCompressionTaskList(compressionWorklist(lock), runtime); 1748 1749 // Cancel all in-process compression tasks and wait for them to join so we 1750 // clean up the finished tasks. 1751 while (true) { 1752 bool inProgress = false; 1753 for (auto* helper : helperTasks(lock)) { 1754 if (!helper->is<SourceCompressionTask>()) { 1755 continue; 1756 } 1757 1758 if (helper->as<SourceCompressionTask>()->runtimeMatches(runtime)) { 1759 inProgress = true; 1760 } 1761 } 1762 1763 if (!inProgress) { 1764 break; 1765 } 1766 1767 wait(lock); 1768 } 1769 1770 // Clean up finished tasks. 1771 ClearCompressionTaskList(compressionFinishedList(lock), runtime); 1772 } 1773 1774 void js::CancelOffThreadCompressions(JSRuntime* runtime) { 1775 if (!CanUseExtraThreads()) { 1776 return; 1777 } 1778 1779 AutoLockHelperThreadState lock; 1780 HelperThreadState().cancelOffThreadCompressions(runtime, lock); 1781 } 1782 1783 //== GCParallelTask ======================================================= 1784 1785 bool GlobalHelperThreadState::canStartGCParallelTask( 1786 const AutoLockHelperThreadState& lock) { 1787 return !gcParallelWorklist().isEmpty(lock) && 1788 checkTaskThreadLimit(THREAD_TYPE_GCPARALLEL, maxGCParallelThreads(), 1789 lock); 1790 } 1791 1792 HelperThreadTask* GlobalHelperThreadState::maybeGetGCParallelTask( 1793 const AutoLockHelperThreadState& lock) { 1794 if (!canStartGCParallelTask(lock)) { 1795 return nullptr; 1796 } 1797 1798 return gcParallelWorklist().popFirst(lock); 1799 } 1800 1801 bool GlobalHelperThreadState::submitTask( 1802 GCParallelTask* task, const AutoLockHelperThreadState& locked) { 1803 gcParallelWorklist().insertBack(task, locked); 1804 dispatch(locked); 1805 return true; 1806 } 1807 1808 /////////////////////////////////////////////////////////////////////////// 1809 // // 1810 // Wasm task definitions // 1811 // // 1812 /////////////////////////////////////////////////////////////////////////// 1813 1814 //== WasmTier1CompileTask ================================================= 1815 1816 HelperThreadTask* GlobalHelperThreadState::maybeGetWasmTier1CompileTask( 1817 const AutoLockHelperThreadState& lock) { 1818 return maybeGetWasmCompile(lock, wasm::CompileState::EagerTier1); 1819 } 1820 1821 bool GlobalHelperThreadState::canStartWasmTier1CompileTask( 1822 const AutoLockHelperThreadState& lock) { 1823 return canStartWasmCompile(lock, wasm::CompileState::EagerTier1); 1824 } 1825 1826 //== WasmTier2CompileTask ================================================= 1827 1828 HelperThreadTask* GlobalHelperThreadState::maybeGetWasmTier2CompileTask( 1829 const AutoLockHelperThreadState& lock) { 1830 return maybeGetWasmCompile(lock, wasm::CompileState::EagerTier2); 1831 } 1832 1833 bool GlobalHelperThreadState::canStartWasmTier2CompileTask( 1834 const AutoLockHelperThreadState& lock) { 1835 return canStartWasmCompile(lock, wasm::CompileState::EagerTier2); 1836 } 1837 1838 //== WasmCompleteTier2GeneratorTask ======================================= 1839 1840 bool GlobalHelperThreadState::canStartWasmCompleteTier2GeneratorTask( 1841 const AutoLockHelperThreadState& lock) { 1842 return !wasmCompleteTier2GeneratorWorklist(lock).empty() && 1843 checkTaskThreadLimit(THREAD_TYPE_WASM_GENERATOR_COMPLETE_TIER2, 1844 maxWasmCompleteTier2GeneratorThreads(), 1845 /*isMaster=*/true, lock); 1846 } 1847 1848 HelperThreadTask* 1849 GlobalHelperThreadState::maybeGetWasmCompleteTier2GeneratorTask( 1850 const AutoLockHelperThreadState& lock) { 1851 if (!canStartWasmCompleteTier2GeneratorTask(lock)) { 1852 return nullptr; 1853 } 1854 1855 return wasmCompleteTier2GeneratorWorklist(lock).popCopy(); 1856 } 1857 1858 bool GlobalHelperThreadState::submitTask( 1859 wasm::UniqueCompleteTier2GeneratorTask task) { 1860 AutoLockHelperThreadState lock; 1861 1862 MOZ_ASSERT(isInitialized(lock)); 1863 1864 if (!wasmCompleteTier2GeneratorWorklist(lock).append(task.get())) { 1865 return false; 1866 } 1867 (void)task.release(); 1868 1869 dispatch(lock); 1870 return true; 1871 } 1872 1873 void js::StartOffThreadWasmCompleteTier2Generator( 1874 wasm::UniqueCompleteTier2GeneratorTask task) { 1875 (void)HelperThreadState().submitTask(std::move(task)); 1876 } 1877 1878 void GlobalHelperThreadState::cancelOffThreadWasmCompleteTier2Generator( 1879 AutoLockHelperThreadState& lock) { 1880 // Remove pending tasks from the tier2 generator worklist and cancel and 1881 // delete them. 1882 { 1883 wasm::CompleteTier2GeneratorTaskPtrVector& worklist = 1884 wasmCompleteTier2GeneratorWorklist(lock); 1885 for (size_t i = 0; i < worklist.length(); i++) { 1886 wasm::CompleteTier2GeneratorTask* task = worklist[i]; 1887 remove(worklist, &i); 1888 js_delete(task); 1889 } 1890 } 1891 1892 // There is at most one running CompleteTier2Generator task and we assume that 1893 // below. 1894 static_assert(GlobalHelperThreadState::MaxCompleteTier2GeneratorTasks == 1, 1895 "code must be generalized"); 1896 1897 // If there is a running Tier2 generator task, shut it down in a predictable 1898 // way. The task will be deleted by the normal deletion logic. 1899 for (auto* helper : helperTasks(lock)) { 1900 if (helper->is<wasm::CompleteTier2GeneratorTask>()) { 1901 // Set a flag that causes compilation to shortcut itself. 1902 helper->as<wasm::CompleteTier2GeneratorTask>()->cancel(); 1903 1904 // Wait for the generator task to finish. This avoids a shutdown race 1905 // where the shutdown code is trying to shut down helper threads and the 1906 // ongoing tier2 compilation is trying to finish, which requires it to 1907 // have access to helper threads. 1908 uint32_t oldFinishedCount = wasmCompleteTier2GeneratorsFinished(lock); 1909 while (wasmCompleteTier2GeneratorsFinished(lock) == oldFinishedCount) { 1910 wait(lock); 1911 } 1912 1913 // At most one of these tasks. 1914 break; 1915 } 1916 } 1917 } 1918 1919 static void CancelOffThreadWasmCompleteTier2GeneratorLocked( 1920 AutoLockHelperThreadState& lock) { 1921 if (!HelperThreadState().isInitialized(lock)) { 1922 return; 1923 } 1924 1925 HelperThreadState().cancelOffThreadWasmCompleteTier2Generator(lock); 1926 } 1927 1928 void js::CancelOffThreadWasmCompleteTier2Generator() { 1929 AutoLockHelperThreadState lock; 1930 CancelOffThreadWasmCompleteTier2GeneratorLocked(lock); 1931 } 1932 1933 //== WasmPartialTier2CompileTask ========================================== 1934 1935 bool GlobalHelperThreadState::canStartWasmPartialTier2CompileTask( 1936 const AutoLockHelperThreadState& lock) { 1937 size_t maxThreads = maxWasmPartialTier2CompileThreads(); 1938 // Avoid assertion failure in checkTaskThreadLimit(). 1939 if (maxThreads > threadCount) { 1940 maxThreads = threadCount; 1941 } 1942 return !wasmPartialTier2CompileWorklist(lock).empty() && 1943 checkTaskThreadLimit(THREAD_TYPE_WASM_COMPILE_PARTIAL_TIER2, 1944 maxThreads, /*isMaster=*/false, lock); 1945 } 1946 1947 HelperThreadTask* GlobalHelperThreadState::maybeGetWasmPartialTier2CompileTask( 1948 const AutoLockHelperThreadState& lock) { 1949 if (!canStartWasmPartialTier2CompileTask(lock)) { 1950 return nullptr; 1951 } 1952 1953 // Take the task at the start of the vector and slide the rest down. The 1954 // vector is almost always small (fewer than 50 items) and most of the time 1955 // has only one item, so this isn't a big expense. 1956 wasm::PartialTier2CompileTaskPtrVector& worklist = 1957 wasmPartialTier2CompileWorklist(lock); 1958 MOZ_ASSERT(!worklist.empty()); 1959 HelperThreadTask* task = worklist[0]; 1960 worklist.erase(worklist.begin()); 1961 return task; 1962 } 1963 1964 bool GlobalHelperThreadState::submitTask( 1965 wasm::UniquePartialTier2CompileTask task) { 1966 AutoLockHelperThreadState lock; 1967 1968 MOZ_ASSERT(isInitialized(lock)); 1969 1970 wasm::PartialTier2CompileTaskPtrVector& workList = 1971 wasmPartialTier2CompileWorklist(lock); 1972 1973 // Put the new task at the end of the vector. 1974 // ::maybeGetWasmPartialTier2CompileTask pulls tasks from the front of the 1975 // vector, hence giving FIFO behaviour. 1976 if (!workList.append(task.get())) { 1977 return false; 1978 } 1979 (void)task.release(); 1980 1981 dispatch(lock); 1982 return true; 1983 } 1984 1985 void js::StartOffThreadWasmPartialTier2Compile( 1986 wasm::UniquePartialTier2CompileTask task) { 1987 (void)HelperThreadState().submitTask(std::move(task)); 1988 } 1989 1990 void GlobalHelperThreadState::cancelOffThreadWasmPartialTier2Compile( 1991 AutoLockHelperThreadState& lock) { 1992 // Remove pending tasks from the partial tier2 compilation worklist and 1993 // cancel and delete them. 1994 wasm::PartialTier2CompileTaskPtrVector& worklist = 1995 wasmPartialTier2CompileWorklist(lock); 1996 for (size_t i = 0; i < worklist.length(); i++) { 1997 wasm::PartialTier2CompileTask* task = worklist[i]; 1998 remove(worklist, &i); 1999 js_delete(task); 2000 } 2001 2002 // And remove running partial tier2 compilation tasks. They will be deleted 2003 // by the normal deletion logic (in 2004 // PartialTier2CompileTaskImpl::runHelperThreadTask). 2005 bool anyCancelled; 2006 do { 2007 anyCancelled = false; 2008 for (auto* helper : helperTasks(lock)) { 2009 if (!helper->is<wasm::PartialTier2CompileTask>()) { 2010 continue; 2011 } 2012 wasm::PartialTier2CompileTask* pt2CompileTask = 2013 helper->as<wasm::PartialTier2CompileTask>(); 2014 pt2CompileTask->cancel(); 2015 anyCancelled = true; 2016 } 2017 if (anyCancelled) { 2018 wait(lock); 2019 } 2020 } while (anyCancelled); 2021 } 2022 2023 static void CancelOffThreadWasmPartialTier2CompileLocked( 2024 AutoLockHelperThreadState& lock) { 2025 if (!HelperThreadState().isInitialized(lock)) { 2026 return; 2027 } 2028 2029 HelperThreadState().cancelOffThreadWasmPartialTier2Compile(lock); 2030 } 2031 2032 void js::CancelOffThreadWasmPartialTier2Compile() { 2033 AutoLockHelperThreadState lock; 2034 CancelOffThreadWasmPartialTier2CompileLocked(lock); 2035 } 2036 2037 //== wasm task management ================================================= 2038 2039 bool GlobalHelperThreadState::canStartWasmCompile( 2040 const AutoLockHelperThreadState& lock, wasm::CompileState state) { 2041 if (wasmWorklist(lock, state).empty()) { 2042 return false; 2043 } 2044 2045 // Parallel compilation and background compilation should be disabled on 2046 // unicore systems. 2047 2048 MOZ_RELEASE_ASSERT(cpuCount > 1); 2049 2050 // If CompleteTier2 is very backlogged we must give priority to it, since the 2051 // CompleteTier2 queue holds onto Tier1 tasks. Indeed if CompleteTier2 is 2052 // backlogged we will devote more resources to CompleteTier2 and not start 2053 // any Tier1 work at all. 2054 2055 bool completeTier2oversubscribed = 2056 wasmCompleteTier2GeneratorWorklist(lock).length() > 20; 2057 2058 // For Tier1 and Once compilation, honor the maximum allowed threads to 2059 // compile wasm jobs at once, to avoid oversaturating the machine. 2060 // 2061 // For CompleteTier2 compilation we need to allow other things to happen too, 2062 // so we do not allow all logical cores to be used for background work; 2063 // instead we wish to use a fraction of the physical cores. We can't 2064 // directly compute the physical cores from the logical cores, but 1/3 of the 2065 // logical cores is a safe estimate for the number of physical cores 2066 // available for background work. 2067 2068 size_t physCoresAvailable = size_t(ceil(cpuCount / 3.0)); 2069 2070 size_t threads; 2071 ThreadType threadType; 2072 if (state == wasm::CompileState::EagerTier2) { 2073 if (completeTier2oversubscribed) { 2074 threads = maxWasmCompilationThreads(); 2075 } else { 2076 threads = physCoresAvailable; 2077 } 2078 threadType = THREAD_TYPE_WASM_COMPILE_TIER2; 2079 } else { 2080 if (completeTier2oversubscribed) { 2081 threads = 0; 2082 } else { 2083 threads = maxWasmCompilationThreads(); 2084 } 2085 threadType = THREAD_TYPE_WASM_COMPILE_TIER1; 2086 } 2087 2088 return threads != 0 && checkTaskThreadLimit(threadType, threads, lock); 2089 } 2090 2091 HelperThreadTask* GlobalHelperThreadState::maybeGetWasmCompile( 2092 const AutoLockHelperThreadState& lock, wasm::CompileState state) { 2093 if (!canStartWasmCompile(lock, state)) { 2094 return nullptr; 2095 } 2096 2097 return wasmWorklist(lock, state).popCopyFront(); 2098 } 2099 2100 size_t js::RemovePendingWasmCompileTasks( 2101 const wasm::CompileTaskState& taskState, wasm::CompileState state, 2102 const AutoLockHelperThreadState& lock) { 2103 wasm::CompileTaskPtrFifo& worklist = 2104 HelperThreadState().wasmWorklist(lock, state); 2105 return worklist.eraseIf([&taskState](wasm::CompileTask* task) { 2106 return &task->state == &taskState; 2107 }); 2108 } 2109 2110 bool GlobalHelperThreadState::submitTask(wasm::CompileTask* task, 2111 wasm::CompileState state) { 2112 AutoLockHelperThreadState lock; 2113 if (!wasmWorklist(lock, state).pushBack(task)) { 2114 return false; 2115 } 2116 2117 dispatch(lock); 2118 return true; 2119 } 2120 2121 bool js::StartOffThreadWasmCompile(wasm::CompileTask* task, 2122 wasm::CompileState state) { 2123 return HelperThreadState().submitTask(task, state); 2124 }