Nursery.cpp (81850B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sw=2 et tw=80: 3 * 4 * This Source Code Form is subject to the terms of the Mozilla Public 5 * License, v. 2.0. If a copy of the MPL was not distributed with this file, 6 * You can obtain one at http://mozilla.org/MPL/2.0/. */ 7 8 #include "gc/Nursery-inl.h" 9 10 #include "mozilla/DebugOnly.h" 11 #include "mozilla/IntegerPrintfMacros.h" 12 #include "mozilla/Sprintf.h" 13 #include "mozilla/TimeStamp.h" 14 15 #include <algorithm> 16 #include <cmath> 17 #include <utility> 18 19 #include "builtin/MapObject.h" 20 #include "debugger/DebugAPI.h" 21 #include "gc/Allocator.h" 22 #include "gc/GCInternals.h" 23 #include "gc/GCLock.h" 24 #include "gc/GCParallelTask.h" 25 #include "gc/Memory.h" 26 #include "gc/PublicIterators.h" 27 #include "gc/Tenuring.h" 28 #include "jit/JitFrames.h" 29 #include "jit/JitZone.h" 30 #include "js/Printer.h" 31 #include "util/DifferentialTesting.h" 32 #include "util/GetPidProvider.h" // getpid() 33 #include "util/Poison.h" 34 #include "vm/JSONPrinter.h" 35 #include "vm/Logging.h" 36 #include "vm/Realm.h" 37 #include "vm/Time.h" 38 39 #include "gc/BufferAllocator-inl.h" 40 #include "gc/Heap-inl.h" 41 #include "gc/Marking-inl.h" 42 #include "gc/StableCellHasher-inl.h" 43 #include "gc/StoreBuffer-inl.h" 44 #include "vm/GeckoProfiler-inl.h" 45 46 using namespace js; 47 using namespace js::gc; 48 49 using mozilla::DebugOnly; 50 using mozilla::PodCopy; 51 using mozilla::TimeDuration; 52 using mozilla::TimeStamp; 53 54 namespace js { 55 56 static constexpr size_t NurseryChunkHeaderSize = 57 RoundUp(sizeof(ChunkBase), CellAlignBytes); 58 59 // The amount of space in a nursery chunk available to allocations. 60 static constexpr size_t NurseryChunkUsableSize = 61 ChunkSize - NurseryChunkHeaderSize; 62 63 struct NurseryChunk : public ChunkBase { 64 alignas(CellAlignBytes) uint8_t data[NurseryChunkUsableSize]; 65 66 static NurseryChunk* fromChunk(ArenaChunk* chunk, ChunkKind kind, 67 uint8_t index); 68 69 explicit NurseryChunk(JSRuntime* runtime, ChunkKind kind, uint8_t chunkIndex) 70 : ChunkBase(runtime, &runtime->gc.storeBuffer(), kind, chunkIndex) {} 71 72 void poisonRange(size_t start, size_t end, uint8_t value, 73 MemCheckKind checkKind); 74 void poisonAfterEvict(size_t extent = ChunkSize); 75 76 // Mark pages from startOffset to the end of the chunk as unused. The start 77 // offset must be after the first page, which contains the chunk header and is 78 // not marked as unused. 79 void markPagesUnusedHard(size_t startOffset); 80 81 // Mark pages from startOffset to endOffset as in use, undoing the effect of a 82 // previous call to markPagesUnusedHard. 83 [[nodiscard]] bool markPagesInUseHard(size_t startOffset, size_t endOffset); 84 85 uintptr_t start() const { return uintptr_t(&data); } 86 uintptr_t end() const { return uintptr_t(this) + ChunkSize; } 87 }; 88 static_assert(sizeof(NurseryChunk) == ChunkSize, 89 "Nursery chunk size must match Chunk size."); 90 static_assert(offsetof(NurseryChunk, data) == NurseryChunkHeaderSize); 91 92 class NurserySweepTask : public GCParallelTask { 93 SlimLinkedList<BufferAllocator> allocatorsToSweep; 94 95 public: 96 explicit NurserySweepTask(gc::GCRuntime* gc) 97 : GCParallelTask(gc, gcstats::PhaseKind::NONE) {} 98 99 bool isEmpty(AutoLockHelperThreadState& lock) const { 100 return allocatorsToSweep.isEmpty(); 101 } 102 103 void queueAllocatorToSweep(BufferAllocator& allocator) { 104 MOZ_ASSERT(isIdle()); 105 allocatorsToSweep.pushBack(&allocator); 106 } 107 108 private: 109 void run(AutoLockHelperThreadState& lock) override; 110 }; 111 112 class NurseryDecommitTask : public GCParallelTask { 113 public: 114 explicit NurseryDecommitTask(gc::GCRuntime* gc); 115 bool reserveSpaceForChunks(size_t nchunks); 116 117 bool isEmpty(const AutoLockHelperThreadState& lock) const; 118 119 void queueChunk(NurseryChunk* chunk, const AutoLockHelperThreadState& lock); 120 void queueRange(size_t newCapacity, NurseryChunk* chunk, 121 const AutoLockHelperThreadState& lock); 122 123 private: 124 struct Region { 125 NurseryChunk* chunk; 126 size_t startOffset; 127 }; 128 129 using NurseryChunkVector = Vector<NurseryChunk*, 0, SystemAllocPolicy>; 130 using RegionVector = Vector<Region, 2, SystemAllocPolicy>; 131 132 void run(AutoLockHelperThreadState& lock) override; 133 134 NurseryChunkVector& chunksToDecommit() { return chunksToDecommit_.ref(); } 135 const NurseryChunkVector& chunksToDecommit() const { 136 return chunksToDecommit_.ref(); 137 } 138 RegionVector& regionsToDecommit() { return regionsToDecommit_.ref(); } 139 const RegionVector& regionsToDecommit() const { 140 return regionsToDecommit_.ref(); 141 } 142 143 MainThreadOrGCTaskData<NurseryChunkVector> chunksToDecommit_; 144 MainThreadOrGCTaskData<RegionVector> regionsToDecommit_; 145 }; 146 147 } // namespace js 148 149 inline void js::NurseryChunk::poisonRange(size_t start, size_t end, 150 uint8_t value, 151 MemCheckKind checkKind) { 152 MOZ_ASSERT(start >= NurseryChunkHeaderSize); 153 MOZ_ASSERT((start % gc::CellAlignBytes) == 0); 154 MOZ_ASSERT((end % gc::CellAlignBytes) == 0); 155 MOZ_ASSERT(end >= start); 156 MOZ_ASSERT(end <= ChunkSize); 157 158 auto* ptr = reinterpret_cast<uint8_t*>(this) + start; 159 size_t size = end - start; 160 161 // We can poison the same chunk more than once, so first make sure memory 162 // sanitizers will let us poison it. 163 MOZ_MAKE_MEM_UNDEFINED(ptr, size); 164 Poison(ptr, value, size, checkKind); 165 } 166 167 inline void js::NurseryChunk::poisonAfterEvict(size_t extent) { 168 poisonRange(NurseryChunkHeaderSize, extent, JS_SWEPT_NURSERY_PATTERN, 169 MemCheckKind::MakeNoAccess); 170 } 171 172 inline void js::NurseryChunk::markPagesUnusedHard(size_t startOffset) { 173 MOZ_ASSERT(startOffset >= NurseryChunkHeaderSize); // Don't touch the header. 174 MOZ_ASSERT(startOffset >= SystemPageSize()); 175 MOZ_ASSERT(startOffset <= ChunkSize); 176 uintptr_t start = uintptr_t(this) + startOffset; 177 size_t length = ChunkSize - startOffset; 178 MarkPagesUnusedHard(reinterpret_cast<void*>(start), length); 179 } 180 181 inline bool js::NurseryChunk::markPagesInUseHard(size_t startOffset, 182 size_t endOffset) { 183 MOZ_ASSERT(startOffset >= NurseryChunkHeaderSize); 184 MOZ_ASSERT(startOffset >= SystemPageSize()); 185 MOZ_ASSERT(startOffset < endOffset); 186 MOZ_ASSERT(endOffset <= ChunkSize); 187 uintptr_t start = uintptr_t(this) + startOffset; 188 size_t length = endOffset - startOffset; 189 return MarkPagesInUseHard(reinterpret_cast<void*>(start), length); 190 } 191 192 // static 193 inline js::NurseryChunk* js::NurseryChunk::fromChunk(ArenaChunk* chunk, 194 ChunkKind kind, 195 uint8_t index) { 196 return new (chunk) NurseryChunk(chunk->runtime, kind, index); 197 } 198 199 void js::NurserySweepTask::run(AutoLockHelperThreadState& lock) { 200 SlimLinkedList<BufferAllocator> allocators; 201 std::swap(allocators, allocatorsToSweep); 202 AutoUnlockHelperThreadState unlock(lock); 203 204 while (!allocators.isEmpty()) { 205 BufferAllocator* allocator = allocators.popFirst(); 206 allocator->sweepForMinorCollection(); 207 } 208 } 209 210 js::NurseryDecommitTask::NurseryDecommitTask(gc::GCRuntime* gc) 211 : GCParallelTask(gc, gcstats::PhaseKind::NONE) { 212 // This can occur outside GCs so doesn't have a stats phase. 213 MOZ_ALWAYS_TRUE(regionsToDecommit().reserve(2)); 214 } 215 216 bool js::NurseryDecommitTask::isEmpty( 217 const AutoLockHelperThreadState& lock) const { 218 return chunksToDecommit().empty() && regionsToDecommit().empty(); 219 } 220 221 bool js::NurseryDecommitTask::reserveSpaceForChunks(size_t nchunks) { 222 MOZ_ASSERT(isIdle()); 223 return chunksToDecommit().reserve(nchunks); 224 } 225 226 void js::NurseryDecommitTask::queueChunk( 227 NurseryChunk* chunk, const AutoLockHelperThreadState& lock) { 228 MOZ_ASSERT(isIdle(lock)); 229 MOZ_ALWAYS_TRUE(chunksToDecommit().append(chunk)); 230 } 231 232 void js::NurseryDecommitTask::queueRange( 233 size_t newCapacity, NurseryChunk* chunk, 234 const AutoLockHelperThreadState& lock) { 235 MOZ_ASSERT(isIdle(lock)); 236 MOZ_ASSERT(regionsToDecommit_.ref().length() < 2); 237 MOZ_ASSERT(newCapacity < ChunkSize); 238 MOZ_ASSERT(newCapacity % SystemPageSize() == 0); 239 240 regionsToDecommit().infallibleAppend(Region{chunk, newCapacity}); 241 } 242 243 void js::NurseryDecommitTask::run(AutoLockHelperThreadState& lock) { 244 while (!chunksToDecommit().empty()) { 245 NurseryChunk* nurseryChunk = chunksToDecommit().popCopy(); 246 AutoUnlockHelperThreadState unlock(lock); 247 nurseryChunk->~NurseryChunk(); 248 ArenaChunk* tenuredChunk = 249 ArenaChunk::init(nurseryChunk, gc, /* allMemoryCommitted = */ false); 250 AutoLockGC lock(gc); 251 gc->recycleChunk(tenuredChunk, lock); 252 } 253 254 while (!regionsToDecommit().empty()) { 255 Region region = regionsToDecommit().popCopy(); 256 AutoUnlockHelperThreadState unlock(lock); 257 region.chunk->markPagesUnusedHard(region.startOffset); 258 } 259 } 260 261 js::Nursery::Nursery(GCRuntime* gc) 262 : toSpace(ChunkKind::NurseryToSpace), 263 fromSpace(ChunkKind::NurseryFromSpace), 264 gc(gc), 265 capacity_(0), 266 enableProfiling_(false), 267 semispaceEnabled_(gc::TuningDefaults::SemispaceNurseryEnabled), 268 canAllocateStrings_(true), 269 canAllocateBigInts_(true), 270 reportDeduplications_(false), 271 minorGCTriggerReason_(JS::GCReason::NO_REASON), 272 prevPosition_(0), 273 hasRecentGrowthData(false), 274 smoothedTargetSize(0.0) { 275 // Try to keep fields used by allocation fast path together at the start of 276 // the nursery. 277 static_assert(offsetof(Nursery, toSpace.position_) < TypicalCacheLineSize); 278 static_assert(offsetof(Nursery, toSpace.currentEnd_) < TypicalCacheLineSize); 279 280 const char* env = getenv("MOZ_NURSERY_STRINGS"); 281 if (env && *env) { 282 canAllocateStrings_ = (*env == '1'); 283 } 284 env = getenv("MOZ_NURSERY_BIGINTS"); 285 if (env && *env) { 286 canAllocateBigInts_ = (*env == '1'); 287 } 288 } 289 290 static void PrintAndExit(const char* message) { 291 fprintf(stderr, "%s", message); 292 exit(0); 293 } 294 295 static const char* GetEnvVar(const char* name, const char* helpMessage) { 296 const char* value = getenv(name); 297 if (!value) { 298 return nullptr; 299 } 300 301 if (strcmp(value, "help") == 0) { 302 PrintAndExit(helpMessage); 303 } 304 305 return value; 306 } 307 308 static bool GetBoolEnvVar(const char* name, const char* helpMessage) { 309 const char* env = GetEnvVar(name, helpMessage); 310 return env && bool(atoi(env)); 311 } 312 313 static void ReadReportPretenureEnv(const char* name, const char* helpMessage, 314 AllocSiteFilter* filter) { 315 const char* env = GetEnvVar(name, helpMessage); 316 if (!env) { 317 return; 318 } 319 320 if (!AllocSiteFilter::readFromString(env, filter)) { 321 PrintAndExit(helpMessage); 322 } 323 } 324 325 bool js::Nursery::init(AutoLockGCBgAlloc& lock) { 326 ReadProfileEnv("JS_GC_PROFILE_NURSERY", 327 "Report minor GCs taking at least N microseconds.\n", 328 &enableProfiling_, &profileWorkers_, &profileThreshold_); 329 330 reportDeduplications_ = GetBoolEnvVar( 331 "JS_GC_REPORT_STATS", 332 "JS_GC_REPORT_STATS=1\n" 333 "\tAfter a minor GC, report how many strings were deduplicated.\n"); 334 335 #ifdef JS_GC_ZEAL 336 reportPromotion_ = GetBoolEnvVar( 337 "JS_GC_REPORT_PROMOTE", 338 "JS_GC_REPORT_PROMOTE=1\n" 339 "\tAfter a minor GC, report what kinds of things were promoted.\n"); 340 #endif 341 342 ReadReportPretenureEnv( 343 "JS_GC_REPORT_PRETENURE", 344 "JS_GC_REPORT_PRETENURE=FILTER\n" 345 "\tAfter a minor GC, report information about pretenuring, including\n" 346 "\tallocation sites which match the filter specification. This is comma\n" 347 "\tseparated list of one or more elements which can include:\n" 348 "\t\tinteger N: report sites with at least N allocations\n" 349 "\t\t'normal': report normal sites used for pretenuring\n" 350 "\t\t'unknown': report catch-all sites for allocations without a\n" 351 "\t\t specific site associated with them\n" 352 "\t\t'optimized': report catch-all sites for allocations from\n" 353 "\t\t optimized JIT code\n" 354 "\t\t'missing': report automatically generated missing sites\n" 355 "\t\t'object': report sites associated with JS objects\n" 356 "\t\t'string': report sites associated with JS strings\n" 357 "\t\t'bigint': report sites associated with JS big ints\n" 358 "\t\t'longlived': report sites in the LongLived state (ignored for\n" 359 "\t\t catch-all sites)\n" 360 "\t\t'shortlived': report sites in the ShortLived state (ignored for\n" 361 "\t\t catch-all sites)\n" 362 "\tFilters of the same kind are combined with OR and of different kinds\n" 363 "\twith AND. Prefixes of the keywords above are accepted.\n", 364 &pretenuringReportFilter_); 365 366 sweepTask = MakeUnique<NurserySweepTask>(gc); 367 if (!sweepTask) { 368 return false; 369 } 370 371 decommitTask = MakeUnique<NurseryDecommitTask>(gc); 372 if (!decommitTask) { 373 return false; 374 } 375 376 if (!gc->storeBuffer().enable()) { 377 return false; 378 } 379 380 return initFirstChunk(lock); 381 } 382 383 js::Nursery::~Nursery() { disable(); } 384 385 void js::Nursery::enable() { 386 MOZ_ASSERT(TlsContext.get()->generationalDisabled == 0); 387 388 if (isEnabled()) { 389 return; 390 } 391 392 MOZ_ASSERT(isEmpty()); 393 MOZ_ASSERT(!gc->isVerifyPreBarriersEnabled()); 394 395 { 396 AutoLockGCBgAlloc lock(gc); 397 if (!initFirstChunk(lock)) { 398 // If we fail to allocate memory, the nursery will not be enabled. 399 return; 400 } 401 } 402 403 #ifdef JS_GC_ZEAL 404 if (gc->hasZealMode(ZealMode::GenerationalGC)) { 405 enterZealMode(); 406 } 407 #endif 408 409 updateAllZoneAllocFlags(); 410 411 // This should always succeed after the first time it's called. 412 MOZ_ALWAYS_TRUE(gc->storeBuffer().enable()); 413 } 414 415 bool js::Nursery::initFirstChunk(AutoLockGCBgAlloc& lock) { 416 MOZ_ASSERT(!isEnabled()); 417 MOZ_ASSERT(toSpace.chunks_.length() == 0); 418 MOZ_ASSERT(fromSpace.chunks_.length() == 0); 419 420 setCapacity(minSpaceSize()); 421 422 size_t nchunks = toSpace.maxChunkCount_ + fromSpace.maxChunkCount_; 423 if (!decommitTask->reserveSpaceForChunks(nchunks) || 424 !allocateNextChunk(lock)) { 425 setCapacity(0); 426 MOZ_ASSERT(toSpace.isEmpty()); 427 MOZ_ASSERT(fromSpace.isEmpty()); 428 return false; 429 } 430 431 toSpace.moveToStartOfChunk(this, 0); 432 toSpace.setStartToCurrentPosition(); 433 434 if (semispaceEnabled_) { 435 fromSpace.moveToStartOfChunk(this, 0); 436 fromSpace.setStartToCurrentPosition(); 437 } 438 439 MOZ_ASSERT(toSpace.isEmpty()); 440 MOZ_ASSERT(fromSpace.isEmpty()); 441 442 poisonAndInitCurrentChunk(); 443 444 // Clear any information about previous collections. 445 clearRecentGrowthData(); 446 447 tenureThreshold_ = 0; 448 449 #ifdef DEBUG 450 toSpace.checkKind(ChunkKind::NurseryToSpace); 451 fromSpace.checkKind(ChunkKind::NurseryFromSpace); 452 #endif 453 454 return true; 455 } 456 457 size_t RequiredChunkCount(size_t nbytes) { 458 return nbytes <= ChunkSize ? 1 : nbytes / ChunkSize; 459 } 460 461 void js::Nursery::setCapacity(size_t newCapacity) { 462 MOZ_ASSERT(newCapacity == roundSize(newCapacity)); 463 capacity_ = newCapacity; 464 size_t count = RequiredChunkCount(newCapacity); 465 toSpace.maxChunkCount_ = count; 466 if (semispaceEnabled_) { 467 fromSpace.maxChunkCount_ = count; 468 } 469 } 470 471 void js::Nursery::disable() { 472 MOZ_ASSERT(isEmpty()); 473 if (!isEnabled()) { 474 return; 475 } 476 477 // Wait for any background tasks. 478 sweepTask->join(); 479 decommitTask->join(); 480 481 // Free all chunks. 482 freeChunksFrom(toSpace, 0); 483 freeChunksFrom(fromSpace, 0); 484 decommitTask->runFromMainThread(); 485 486 setCapacity(0); 487 488 // We must reset currentEnd_ so that there is no space for anything in the 489 // nursery. JIT'd code uses this even if the nursery is disabled. 490 toSpace = Space(ChunkKind::NurseryToSpace); 491 fromSpace = Space(ChunkKind::NurseryFromSpace); 492 MOZ_ASSERT(toSpace.isEmpty()); 493 MOZ_ASSERT(fromSpace.isEmpty()); 494 495 gc->storeBuffer().disable(); 496 497 if (gc->wasInitialized()) { 498 // This assumes there is an atoms zone. 499 updateAllZoneAllocFlags(); 500 } 501 } 502 503 void js::Nursery::enableStrings() { 504 MOZ_ASSERT(isEmpty()); 505 canAllocateStrings_ = true; 506 updateAllZoneAllocFlags(); 507 } 508 509 void js::Nursery::disableStrings() { 510 MOZ_ASSERT(isEmpty()); 511 canAllocateStrings_ = false; 512 updateAllZoneAllocFlags(); 513 } 514 515 void js::Nursery::enableBigInts() { 516 MOZ_ASSERT(isEmpty()); 517 canAllocateBigInts_ = true; 518 updateAllZoneAllocFlags(); 519 } 520 521 void js::Nursery::disableBigInts() { 522 MOZ_ASSERT(isEmpty()); 523 canAllocateBigInts_ = false; 524 updateAllZoneAllocFlags(); 525 } 526 527 void js::Nursery::updateAllZoneAllocFlags() { 528 // The alloc flags are not relevant for the atoms zone, and flushing 529 // jit-related information can be problematic for the atoms zone. 530 for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) { 531 updateAllocFlagsForZone(zone); 532 } 533 } 534 535 void js::Nursery::getAllocFlagsForZone(JS::Zone* zone, bool* allocObjectsOut, 536 bool* allocStringsOut, 537 bool* allocBigIntsOut, 538 bool* allocGetterSettersOut) { 539 *allocObjectsOut = isEnabled(); 540 *allocStringsOut = 541 isEnabled() && canAllocateStrings() && !zone->nurseryStringsDisabled; 542 *allocBigIntsOut = 543 isEnabled() && canAllocateBigInts() && !zone->nurseryBigIntsDisabled; 544 *allocGetterSettersOut = isEnabled(); 545 } 546 547 void js::Nursery::setAllocFlagsForZone(JS::Zone* zone) { 548 bool allocObjects; 549 bool allocStrings; 550 bool allocBigInts; 551 bool allocGetterSetters; 552 553 getAllocFlagsForZone(zone, &allocObjects, &allocStrings, &allocBigInts, 554 &allocGetterSetters); 555 zone->setNurseryAllocFlags(allocObjects, allocStrings, allocBigInts, 556 allocGetterSetters); 557 } 558 559 void js::Nursery::updateAllocFlagsForZone(JS::Zone* zone) { 560 bool allocObjects; 561 bool allocStrings; 562 bool allocBigInts; 563 bool allocGetterSetters; 564 565 getAllocFlagsForZone(zone, &allocObjects, &allocStrings, &allocBigInts, 566 &allocGetterSetters); 567 568 if (allocObjects != zone->allocNurseryObjects() || 569 allocStrings != zone->allocNurseryStrings() || 570 allocBigInts != zone->allocNurseryBigInts() || 571 allocGetterSetters != zone->allocNurseryGetterSetters()) { 572 CancelOffThreadIonCompile(zone); 573 zone->setNurseryAllocFlags(allocObjects, allocStrings, allocBigInts, 574 allocGetterSetters); 575 discardCodeAndSetJitFlagsForZone(zone); 576 } 577 } 578 579 void js::Nursery::discardCodeAndSetJitFlagsForZone(JS::Zone* zone) { 580 zone->forceDiscardJitCode(runtime()->gcContext()); 581 582 if (jit::JitZone* jitZone = zone->jitZone()) { 583 jitZone->discardStubs(); 584 jitZone->setStringsCanBeInNursery(zone->allocNurseryStrings()); 585 } 586 } 587 588 void js::Nursery::setSemispaceEnabled(bool enabled) { 589 if (semispaceEnabled() == enabled) { 590 return; 591 } 592 593 bool wasEnabled = isEnabled(); 594 if (wasEnabled) { 595 if (!isEmpty()) { 596 gc->minorGC(JS::GCReason::EVICT_NURSERY); 597 } 598 disable(); 599 } 600 601 semispaceEnabled_ = enabled; 602 603 if (wasEnabled) { 604 enable(); 605 } 606 } 607 608 bool js::Nursery::isEmpty() const { 609 MOZ_ASSERT(fromSpace.isEmpty()); 610 611 if (!isEnabled()) { 612 return true; 613 } 614 615 if (!gc->hasZealMode(ZealMode::GenerationalGC)) { 616 MOZ_ASSERT(startChunk() == 0); 617 MOZ_ASSERT(startPosition() == chunk(0).start()); 618 } 619 620 return toSpace.isEmpty(); 621 } 622 623 bool js::Nursery::Space::isEmpty() const { return position_ == startPosition_; } 624 625 static size_t AdjustSizeForSemispace(size_t size, bool semispaceEnabled) { 626 if (!semispaceEnabled) { 627 return size; 628 } 629 630 return Nursery::roundSize(size / 2); 631 } 632 633 size_t js::Nursery::maxSpaceSize() const { 634 return AdjustSizeForSemispace(tunables().gcMaxNurseryBytes(), 635 semispaceEnabled_); 636 } 637 638 size_t js::Nursery::minSpaceSize() const { 639 return AdjustSizeForSemispace(tunables().gcMinNurseryBytes(), 640 semispaceEnabled_); 641 } 642 643 #ifdef JS_GC_ZEAL 644 void js::Nursery::enterZealMode() { 645 if (!isEnabled()) { 646 return; 647 } 648 649 MOZ_ASSERT(isEmpty()); 650 651 decommitTask->join(); 652 653 AutoEnterOOMUnsafeRegion oomUnsafe; 654 655 if (isSubChunkMode()) { 656 if (!chunk(0).markPagesInUseHard(capacity_, ChunkSize)) { 657 oomUnsafe.crash("Out of memory trying to extend chunk for zeal mode"); 658 } 659 chunk(0).poisonRange(capacity_, ChunkSize, JS_FRESH_NURSERY_PATTERN, 660 MemCheckKind::MakeUndefined); 661 } 662 663 setCapacity(maxSpaceSize()); 664 665 size_t nchunks = toSpace.maxChunkCount_ + fromSpace.maxChunkCount_; 666 if (!decommitTask->reserveSpaceForChunks(nchunks)) { 667 oomUnsafe.crash("Nursery::enterZealMode"); 668 } 669 670 setCurrentEnd(); 671 } 672 673 void js::Nursery::leaveZealMode() { 674 if (!isEnabled()) { 675 return; 676 } 677 678 MOZ_ASSERT(isEmpty()); 679 680 // Reset the nursery size. 681 setCapacity(minSpaceSize()); 682 683 toSpace.moveToStartOfChunk(this, 0); 684 toSpace.setStartToCurrentPosition(); 685 686 if (semispaceEnabled_) { 687 fromSpace.moveToStartOfChunk(this, 0); 688 fromSpace.setStartToCurrentPosition(); 689 } 690 691 poisonAndInitCurrentChunk(); 692 } 693 #endif // JS_GC_ZEAL 694 695 void* js::Nursery::allocateCell(gc::AllocSite* site, size_t size, 696 JS::TraceKind kind) { 697 MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime())); 698 699 void* ptr = tryAllocateCell(site, size, kind); 700 if (MOZ_LIKELY(ptr)) { 701 return ptr; 702 } 703 704 if (handleAllocationFailure() != JS::GCReason::NO_REASON) { 705 return nullptr; 706 } 707 708 ptr = tryAllocateCell(site, size, kind); 709 MOZ_ASSERT(ptr); 710 return ptr; 711 } 712 713 inline void* js::Nursery::allocate(size_t size) { 714 MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime())); 715 716 void* ptr = tryAllocate(size); 717 if (MOZ_LIKELY(ptr)) { 718 return ptr; 719 } 720 721 if (handleAllocationFailure() != JS::GCReason::NO_REASON) { 722 return nullptr; 723 } 724 725 ptr = tryAllocate(size); 726 MOZ_ASSERT(ptr); 727 return ptr; 728 } 729 730 MOZ_NEVER_INLINE JS::GCReason Nursery::handleAllocationFailure() { 731 if (minorGCRequested()) { 732 // If a minor GC was requested then fail the allocation. The collection is 733 // then run in GCRuntime::tryNewNurseryCell. 734 return minorGCTriggerReason_; 735 } 736 737 if (!moveToNextChunk()) { 738 return JS::GCReason::OUT_OF_NURSERY; 739 } 740 741 return JS::GCReason::NO_REASON; 742 } 743 744 bool Nursery::moveToNextChunk() { 745 unsigned chunkno = currentChunk() + 1; 746 MOZ_ASSERT(chunkno <= maxChunkCount()); 747 MOZ_ASSERT(chunkno <= allocatedChunkCount()); 748 if (chunkno == maxChunkCount()) { 749 return false; 750 } 751 752 if (chunkno == allocatedChunkCount()) { 753 TimeStamp start = TimeStamp::Now(); 754 { 755 AutoLockGCBgAlloc lock(gc); 756 if (!allocateNextChunk(lock)) { 757 return false; 758 } 759 } 760 timeInChunkAlloc_ += TimeStamp::Now() - start; 761 MOZ_ASSERT(chunkno < allocatedChunkCount()); 762 } 763 764 moveToStartOfChunk(chunkno); 765 poisonAndInitCurrentChunk(); 766 return true; 767 } 768 769 std::tuple<void*, bool> js::Nursery::allocNurseryOrMallocBuffer( 770 Zone* zone, size_t nbytes, arena_id_t arenaId) { 771 MOZ_ASSERT(nbytes > 0); 772 MOZ_ASSERT(nbytes <= SIZE_MAX - gc::CellAlignBytes); 773 nbytes = RoundUp(nbytes, gc::CellAlignBytes); 774 775 if (nbytes <= MaxNurseryBufferSize) { 776 void* buffer = allocate(nbytes); 777 if (buffer) { 778 return {buffer, false}; 779 } 780 } 781 782 void* buffer = zone->pod_arena_malloc<uint8_t>(arenaId, nbytes); 783 return {buffer, bool(buffer)}; 784 } 785 786 void* js::Nursery::allocateInternalBuffer(Zone* zone, size_t nbytes) { 787 MOZ_ASSERT(nbytes > 0); 788 MOZ_ASSERT(nbytes <= MaxNurseryBufferSize); 789 MOZ_ASSERT(nbytes % CellAlignBytes == 0); 790 return allocate(nbytes); 791 } 792 793 void* js::Nursery::tryAllocateNurseryBuffer(JS::Zone* zone, size_t nbytes, 794 arena_id_t arenaId) { 795 MOZ_ASSERT(nbytes > 0); 796 MOZ_ASSERT(nbytes <= SIZE_MAX - gc::CellAlignBytes); 797 nbytes = RoundUp(nbytes, gc::CellAlignBytes); 798 799 if (nbytes <= MaxNurseryBufferSize) { 800 return allocate(nbytes); 801 } 802 803 return nullptr; 804 } 805 806 void* js::Nursery::allocNurseryOrMallocBuffer(Zone* zone, Cell* owner, 807 size_t nbytes, 808 arena_id_t arenaId) { 809 MOZ_ASSERT(owner); 810 MOZ_ASSERT(nbytes > 0); 811 812 if (!IsInsideNursery(owner)) { 813 return zone->pod_arena_malloc<uint8_t>(arenaId, nbytes); 814 } 815 816 auto [buffer, isMalloced] = allocNurseryOrMallocBuffer(zone, nbytes, arenaId); 817 if (isMalloced && !registerMallocedBuffer(buffer, nbytes)) { 818 js_free(buffer); 819 return nullptr; 820 } 821 return buffer; 822 } 823 824 std::tuple<void*, bool> js::Nursery::allocateZeroedBuffer(Zone* zone, 825 size_t nbytes, 826 arena_id_t arena) { 827 MOZ_ASSERT(nbytes > 0); 828 829 if (nbytes <= MaxNurseryBufferSize) { 830 void* buffer = allocate(nbytes); 831 if (buffer) { 832 memset(buffer, 0, nbytes); 833 return {buffer, false}; 834 } 835 } 836 837 void* buffer = zone->pod_arena_calloc<uint8_t>(arena, nbytes); 838 return {buffer, bool(buffer)}; 839 } 840 841 void* js::Nursery::allocateZeroedBuffer(Cell* owner, size_t nbytes, 842 arena_id_t arena) { 843 MOZ_ASSERT(owner); 844 MOZ_ASSERT(nbytes > 0); 845 846 if (!IsInsideNursery(owner)) { 847 return owner->asTenured().zone()->pod_arena_calloc<uint8_t>(arena, nbytes); 848 } 849 auto [buffer, isMalloced] = 850 allocateZeroedBuffer(owner->nurseryZone(), nbytes, arena); 851 if (isMalloced && !registerMallocedBuffer(buffer, nbytes)) { 852 js_free(buffer); 853 return nullptr; 854 } 855 return buffer; 856 } 857 858 void* js::Nursery::reallocNurseryOrMallocBuffer(Zone* zone, Cell* cell, 859 void* oldBuffer, 860 size_t oldBytes, 861 size_t newBytes, 862 arena_id_t arena) { 863 if (!IsInsideNursery(cell)) { 864 MOZ_ASSERT(!isInside(oldBuffer)); 865 return zone->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes); 866 } 867 868 if (!isInside(oldBuffer)) { 869 MOZ_ASSERT(toSpace.mallocedBufferBytes >= oldBytes); 870 void* newBuffer = 871 zone->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes); 872 if (newBuffer) { 873 if (oldBuffer != newBuffer) { 874 MOZ_ALWAYS_TRUE( 875 toSpace.mallocedBuffers.rekeyAs(oldBuffer, newBuffer, newBuffer)); 876 } 877 toSpace.mallocedBufferBytes -= oldBytes; 878 toSpace.mallocedBufferBytes += newBytes; 879 } 880 return newBuffer; 881 } 882 883 // The nursery cannot make use of the returned slots data. 884 if (newBytes < oldBytes) { 885 return oldBuffer; 886 } 887 888 auto newBuffer = 889 allocNurseryOrMallocBuffer(zone, cell, newBytes, js::MallocArena); 890 if (newBuffer) { 891 PodCopy((uint8_t*)newBuffer, (uint8_t*)oldBuffer, oldBytes); 892 } 893 return newBuffer; 894 } 895 896 void* js::Nursery::reallocateBuffer(Zone* zone, Cell* cell, void* oldBuffer, 897 size_t oldBytes, size_t newBytes, 898 size_t maxNurserySize) { 899 if (!IsInsideNursery(cell)) { 900 MOZ_ASSERT(IsBufferAlloc(oldBuffer)); 901 MOZ_ASSERT(!IsNurseryOwned(zone, oldBuffer)); 902 return ReallocBuffer(zone, oldBuffer, newBytes, false); 903 } 904 905 if (IsBufferAlloc(oldBuffer)) { 906 MOZ_ASSERT(IsNurseryOwned(zone, oldBuffer)); 907 return ReallocBuffer(zone, oldBuffer, newBytes, true); 908 } 909 910 // The nursery cannot make use of the returned slots data. 911 if (newBytes < oldBytes) { 912 return oldBuffer; 913 } 914 915 auto newBuffer = allocateBuffer(zone, cell, newBytes, maxNurserySize); 916 if (newBuffer) { 917 PodCopy((uint8_t*)newBuffer, (uint8_t*)oldBuffer, oldBytes); 918 } 919 return newBuffer; 920 } 921 922 void Nursery::freeBuffer(JS::Zone* zone, gc::Cell* cell, void* buffer, 923 size_t bytes) { 924 MOZ_ASSERT(IsBufferAlloc(buffer) || isInside(buffer)); 925 MOZ_ASSERT_IF(!IsInsideNursery(cell), IsBufferAlloc(buffer)); 926 MOZ_ASSERT_IF(!IsInsideNursery(cell), !IsNurseryOwned(zone, buffer)); 927 928 if (!IsBufferAlloc(buffer)) { 929 // The nursery cannot make use of the returned space. 930 return; 931 } 932 933 FreeBuffer(zone, buffer); 934 } 935 936 #ifdef DEBUG 937 /* static */ 938 inline bool Nursery::checkForwardingPointerInsideNursery(void* ptr) { 939 // If a zero-capacity elements header lands right at the end of a chunk then 940 // elements data will appear to be in the next chunk. If we have a pointer to 941 // the very start of a chunk, check the previous chunk. 942 if ((uintptr_t(ptr) & ChunkMask) == 0) { 943 return isInside(reinterpret_cast<uint8_t*>(ptr) - 1); 944 } 945 946 return isInside(ptr); 947 } 948 #endif 949 950 void Nursery::setIndirectForwardingPointer(void* oldData, void* newData) { 951 MOZ_ASSERT(checkForwardingPointerInsideNursery(oldData)); 952 // |newData| may be either in the nursery or in the malloc heap. 953 954 AutoEnterOOMUnsafeRegion oomUnsafe; 955 #ifdef DEBUG 956 if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(oldData)) { 957 MOZ_ASSERT(p->value() == newData); 958 } 959 #endif 960 if (!forwardedBuffers.put(oldData, newData)) { 961 oomUnsafe.crash("Nursery::setForwardingPointer"); 962 } 963 } 964 965 #ifdef DEBUG 966 static bool IsWriteableAddress(void* ptr) { 967 auto* vPtr = reinterpret_cast<volatile uint64_t*>(ptr); 968 *vPtr = *vPtr; 969 return true; 970 } 971 #endif 972 973 void js::Nursery::forwardBufferPointer(uintptr_t* pSlotsElems) { 974 // Read the current pointer value which may be one of: 975 // - Non-nursery pointer 976 // - Nursery-allocated buffer 977 // - A BufferRelocationOverlay inside the nursery 978 // 979 // Note: The buffer has already be relocated. We are just patching stale 980 // pointers now. 981 auto* buffer = reinterpret_cast<void*>(*pSlotsElems); 982 983 if (!isInside(buffer)) { 984 return; 985 } 986 987 // The new location for this buffer is either stored inline with it or in 988 // the forwardedBuffers table. 989 if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(buffer)) { 990 buffer = p->value(); 991 // It's not valid to assert IsWriteableAddress for indirect forwarding 992 // pointers because the size of the allocation could be less than a word. 993 } else { 994 BufferRelocationOverlay* reloc = 995 static_cast<BufferRelocationOverlay*>(buffer); 996 buffer = *reloc; 997 MOZ_ASSERT(IsWriteableAddress(buffer)); 998 } 999 1000 MOZ_ASSERT_IF(isInside(buffer), !inCollectedRegion(buffer)); 1001 *pSlotsElems = reinterpret_cast<uintptr_t>(buffer); 1002 } 1003 1004 inline double js::Nursery::calcPromotionRate(bool* validForTenuring) const { 1005 MOZ_ASSERT(validForTenuring); 1006 1007 if (previousGC.nurseryUsedBytes == 0) { 1008 *validForTenuring = false; 1009 return 0.0; 1010 } 1011 1012 double used = double(previousGC.nurseryUsedBytes); 1013 double capacity = double(previousGC.nurseryCapacity); 1014 double tenured = double(previousGC.tenuredBytes); 1015 1016 // We should only use the promotion rate to make tenuring decisions if it's 1017 // likely to be valid. The criterion we use is that the nursery was at least 1018 // 90% full. 1019 *validForTenuring = used > capacity * 0.9; 1020 1021 MOZ_ASSERT(tenured <= used); 1022 return tenured / used; 1023 } 1024 1025 void js::Nursery::renderProfileJSON(JSONPrinter& json) const { 1026 if (!isEnabled()) { 1027 json.beginObject(); 1028 json.property("status", "nursery disabled"); 1029 json.endObject(); 1030 return; 1031 } 1032 1033 if (previousGC.reason == JS::GCReason::NO_REASON) { 1034 // If the nursery was empty when the last minorGC was requested, then 1035 // no nursery collection will have been performed but JSON may still be 1036 // requested. (And as a public API, this function should not crash in 1037 // such a case.) 1038 json.beginObject(); 1039 json.property("status", "nursery empty"); 1040 json.endObject(); 1041 return; 1042 } 1043 1044 // The profiler data uses the term 'tenured' for compatibility with the 1045 // existing data format, although 'promoted' would be more accurate given 1046 // support for semispace nursery. 1047 1048 json.beginObject(); 1049 1050 json.property("status", "complete"); 1051 1052 json.property("reason", JS::ExplainGCReason(previousGC.reason)); 1053 json.property("bytes_tenured", previousGC.tenuredBytes); 1054 json.property("cells_tenured", previousGC.tenuredCells); 1055 json.property("strings_tenured", 1056 stats().getStat(gcstats::STAT_STRINGS_PROMOTED)); 1057 json.property("strings_deduplicated", 1058 stats().getStat(gcstats::STAT_STRINGS_DEDUPLICATED)); 1059 json.property("bigints_tenured", 1060 stats().getStat(gcstats::STAT_BIGINTS_PROMOTED)); 1061 json.property("bytes_used", previousGC.nurseryUsedBytes); 1062 json.property("cur_capacity", previousGC.nurseryCapacity); 1063 const size_t newCapacity = capacity(); 1064 if (newCapacity != previousGC.nurseryCapacity) { 1065 json.property("new_capacity", newCapacity); 1066 } 1067 if (previousGC.nurseryCommitted != previousGC.nurseryCapacity) { 1068 json.property("lazy_capacity", previousGC.nurseryCommitted); 1069 } 1070 if (!timeInChunkAlloc_.IsZero()) { 1071 json.property("chunk_alloc_us", timeInChunkAlloc_, json.MICROSECONDS); 1072 } 1073 1074 // This calculation includes the whole collection time, not just the time 1075 // spent promoting. 1076 double totalTime = profileDurations_[ProfileKey::Total].ToSeconds(); 1077 if (totalTime > 0.0) { 1078 double tenuredAllocRate = double(previousGC.tenuredBytes) / totalTime; 1079 json.property("tenured_allocation_rate", size_t(tenuredAllocRate)); 1080 } 1081 1082 // These counters only contain consistent data if the profiler is enabled, 1083 // and then there's no guarentee. 1084 if (runtime()->geckoProfiler().enabled()) { 1085 json.property("cells_allocated_nursery", 1086 pretenuringNursery.totalAllocCount()); 1087 json.property("cells_allocated_tenured", 1088 stats().allocsSinceMinorGCTenured()); 1089 } 1090 1091 json.beginObjectProperty("phase_times"); 1092 1093 #define EXTRACT_NAME(name, text) #name, 1094 static const char* const names[] = { 1095 FOR_EACH_NURSERY_PROFILE_TIME(EXTRACT_NAME) 1096 #undef EXTRACT_NAME 1097 ""}; 1098 1099 size_t i = 0; 1100 for (auto time : profileDurations_) { 1101 json.property(names[i++], time, json.MICROSECONDS); 1102 } 1103 1104 json.endObject(); // timings value 1105 1106 json.endObject(); 1107 } 1108 1109 // The following macros define nursery GC profile metadata fields that are 1110 // printed before the timing information defined by 1111 // FOR_EACH_NURSERY_PROFILE_TIME. 1112 1113 #define FOR_EACH_NURSERY_PROFILE_COMMON_METADATA(_) \ 1114 _("PID", 7, "%7zu", pid) \ 1115 _("Runtime", 14, "0x%12p", runtime) 1116 1117 #define FOR_EACH_NURSERY_PROFILE_SLICE_METADATA(_) \ 1118 _("Timestamp", 10, "%10.6f", timestamp.ToSeconds()) \ 1119 _("Reason", 20, "%-20.20s", reasonStr) \ 1120 _("PRate", 6, "%5.1f%%", promotionRatePercent) \ 1121 _("OldKB", 6, "%6zu", oldSizeKB) \ 1122 _("NewKB", 6, "%6zu", newSizeKB) \ 1123 _("Dedup", 6, "%6zu", dedupCount) 1124 1125 #define FOR_EACH_NURSERY_PROFILE_METADATA(_) \ 1126 FOR_EACH_NURSERY_PROFILE_COMMON_METADATA(_) \ 1127 FOR_EACH_NURSERY_PROFILE_SLICE_METADATA(_) 1128 1129 void js::Nursery::printCollectionProfile(JS::GCReason reason, 1130 double promotionRate) { 1131 stats().maybePrintProfileHeaders(); 1132 1133 Sprinter sprinter; 1134 if (!sprinter.init()) { 1135 return; 1136 } 1137 sprinter.put(gcstats::MinorGCProfilePrefix); 1138 1139 size_t pid = getpid(); 1140 JSRuntime* runtime = gc->rt; 1141 TimeDuration timestamp = collectionStartTime() - stats().creationTime(); 1142 const char* reasonStr = ExplainGCReason(reason); 1143 double promotionRatePercent = promotionRate * 100; 1144 size_t oldSizeKB = previousGC.nurseryCapacity / 1024; 1145 size_t newSizeKB = capacity() / 1024; 1146 size_t dedupCount = stats().getStat(gcstats::STAT_STRINGS_DEDUPLICATED); 1147 1148 #define PRINT_FIELD_VALUE(_1, _2, format, value) \ 1149 sprinter.printf(" " format, value); 1150 1151 FOR_EACH_NURSERY_PROFILE_METADATA(PRINT_FIELD_VALUE) 1152 #undef PRINT_FIELD_VALUE 1153 1154 printProfileDurations(profileDurations_, sprinter); 1155 1156 JS::UniqueChars str = sprinter.release(); 1157 if (!str) { 1158 return; 1159 } 1160 fputs(str.get(), stats().profileFile()); 1161 } 1162 1163 void js::Nursery::printProfileHeader() { 1164 Sprinter sprinter; 1165 if (!sprinter.init()) { 1166 return; 1167 } 1168 sprinter.put(gcstats::MinorGCProfilePrefix); 1169 1170 #define PRINT_FIELD_NAME(name, width, _1, _2) \ 1171 sprinter.printf(" %-*s", width, name); 1172 1173 FOR_EACH_NURSERY_PROFILE_METADATA(PRINT_FIELD_NAME) 1174 #undef PRINT_FIELD_NAME 1175 1176 #define PRINT_PROFILE_NAME(_1, text) sprinter.printf(" %-6.6s", text); 1177 1178 FOR_EACH_NURSERY_PROFILE_TIME(PRINT_PROFILE_NAME) 1179 #undef PRINT_PROFILE_NAME 1180 1181 sprinter.put("\n"); 1182 1183 JS::UniqueChars str = sprinter.release(); 1184 if (!str) { 1185 return; 1186 } 1187 fputs(str.get(), stats().profileFile()); 1188 } 1189 1190 // static 1191 void js::Nursery::printProfileDurations(const ProfileDurations& times, 1192 Sprinter& sprinter) { 1193 for (auto time : times) { 1194 double micros = time.ToMicroseconds(); 1195 if (micros < 0.001 || micros >= 1.0) { 1196 sprinter.printf(" %6ld", std::lround(micros)); 1197 } else { 1198 sprinter.printf(" %6.3f", micros); 1199 } 1200 } 1201 1202 sprinter.put("\n"); 1203 } 1204 1205 static constexpr size_t NurserySliceMetadataFormatWidth() { 1206 size_t fieldCount = 0; 1207 size_t totalWidth = 0; 1208 1209 #define UPDATE_COUNT_AND_WIDTH(_1, width, _2, _3) \ 1210 fieldCount++; \ 1211 totalWidth += width; 1212 FOR_EACH_NURSERY_PROFILE_SLICE_METADATA(UPDATE_COUNT_AND_WIDTH) 1213 #undef UPDATE_COUNT_AND_WIDTH 1214 1215 // Add padding between fields. 1216 totalWidth += fieldCount - 1; 1217 1218 return totalWidth; 1219 } 1220 1221 void js::Nursery::printTotalProfileTimes() { 1222 if (!enableProfiling_) { 1223 return; 1224 } 1225 1226 Sprinter sprinter; 1227 if (!sprinter.init()) { 1228 return; 1229 } 1230 sprinter.put(gcstats::MinorGCProfilePrefix); 1231 1232 size_t pid = getpid(); 1233 JSRuntime* runtime = gc->rt; 1234 1235 char collections[32]; 1236 DebugOnly<int> r = SprintfLiteral( 1237 collections, "TOTALS: %7" PRIu64 " collections:", gc->minorGCCount()); 1238 MOZ_ASSERT(r > 0 && r < int(sizeof(collections))); 1239 1240 #define PRINT_FIELD_VALUE(_1, _2, format, value) \ 1241 sprinter.printf(" " format, value); 1242 1243 FOR_EACH_NURSERY_PROFILE_COMMON_METADATA(PRINT_FIELD_VALUE) 1244 #undef PRINT_FIELD_VALUE 1245 1246 // Use whole width of per-slice metadata to print total slices so the profile 1247 // totals that follow line up. 1248 size_t width = NurserySliceMetadataFormatWidth(); 1249 sprinter.printf(" %-*s", int(width), collections); 1250 1251 printProfileDurations(totalDurations_, sprinter); 1252 1253 JS::UniqueChars str = sprinter.release(); 1254 if (!str) { 1255 return; 1256 } 1257 fputs(str.get(), stats().profileFile()); 1258 } 1259 1260 void js::Nursery::maybeClearProfileDurations() { 1261 for (auto& duration : profileDurations_) { 1262 duration = mozilla::TimeDuration::Zero(); 1263 } 1264 } 1265 1266 inline void js::Nursery::startProfile(ProfileKey key) { 1267 startTimes_[key] = TimeStamp::Now(); 1268 } 1269 1270 inline void js::Nursery::endProfile(ProfileKey key) { 1271 profileDurations_[key] = TimeStamp::Now() - startTimes_[key]; 1272 totalDurations_[key] += profileDurations_[key]; 1273 } 1274 1275 inline TimeStamp js::Nursery::collectionStartTime() const { 1276 return startTimes_[ProfileKey::Total]; 1277 } 1278 1279 TimeStamp js::Nursery::lastCollectionEndTime() const { 1280 return previousGC.endTime; 1281 } 1282 1283 bool js::Nursery::wantEagerCollection() const { 1284 if (!isEnabled()) { 1285 return false; 1286 } 1287 1288 if (isEmpty() && capacity() == minSpaceSize()) { 1289 return false; 1290 } 1291 1292 if (minorGCRequested()) { 1293 return true; 1294 } 1295 1296 if (freeSpaceIsBelowEagerThreshold()) { 1297 return true; 1298 } 1299 1300 // If the nursery is not being collected often then it may be taking up more 1301 // space than necessary. 1302 return isUnderused(); 1303 } 1304 1305 inline bool js::Nursery::freeSpaceIsBelowEagerThreshold() const { 1306 // The threshold is specified in terms of free space so that it doesn't depend 1307 // on the size of the nursery. 1308 // 1309 // There two thresholds, an absolute free bytes threshold and a free space 1310 // fraction threshold. Two thresholds are used so that we don't collect too 1311 // eagerly for small nurseries (or even all the time if nursery size is less 1312 // than the free bytes threshold) or too eagerly for large nurseries (where a 1313 // fractional threshold may leave a significant amount of nursery unused). 1314 // 1315 // Since the aim is making this less eager we require both thresholds to be 1316 // met. 1317 1318 size_t freeBytes = freeSpace(); 1319 double freeFraction = double(freeBytes) / double(capacity()); 1320 1321 size_t bytesThreshold = tunables().nurseryEagerCollectionThresholdBytes(); 1322 double fractionThreshold = 1323 tunables().nurseryEagerCollectionThresholdPercent(); 1324 1325 return freeBytes < bytesThreshold && freeFraction < fractionThreshold; 1326 } 1327 1328 inline bool js::Nursery::isUnderused() const { 1329 if (js::SupportDifferentialTesting() || !previousGC.endTime) { 1330 return false; 1331 } 1332 1333 if (capacity() == minSpaceSize()) { 1334 return false; 1335 } 1336 1337 // If the nursery is above its minimum size, collect it every so often if we 1338 // have idle time. This allows the nursery to shrink when it's not being 1339 // used. There are other heuristics we could use for this, but this is the 1340 // simplest. 1341 TimeDuration timeSinceLastCollection = 1342 TimeStamp::NowLoRes() - previousGC.endTime; 1343 return timeSinceLastCollection > tunables().nurseryEagerCollectionTimeout(); 1344 } 1345 1346 void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) { 1347 JSRuntime* rt = runtime(); 1348 MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC); 1349 1350 JS_LOG(gc, Info, "minor GC for reason %s", ExplainGCReason(reason)); 1351 1352 { 1353 AutoGCSession commitSession(gc, JS::HeapState::Idle); 1354 rt->commitPendingWrapperPreservations(); 1355 } 1356 1357 if (minorGCRequested()) { 1358 MOZ_ASSERT(position() == chunk(currentChunk()).end()); 1359 toSpace.position_ = prevPosition_; 1360 prevPosition_ = 0; 1361 minorGCTriggerReason_ = JS::GCReason::NO_REASON; 1362 rt->mainContextFromOwnThread()->clearPendingInterrupt( 1363 InterruptReason::MinorGC); 1364 } 1365 1366 if (!isEnabled() || isEmpty()) { 1367 // Our barriers are not always exact, and there may be entries in the 1368 // storebuffer even when the nursery is disabled or empty. It's not safe 1369 // to keep these entries as they may refer to tenured cells which may be 1370 // freed after this point. 1371 gc->storeBuffer().clear(); 1372 1373 MOZ_ASSERT_IF(!semispaceEnabled_, !pretenuringNursery.hasAllocatedSites()); 1374 } 1375 1376 if (!isEnabled()) { 1377 return; 1378 } 1379 1380 AutoGCSession session(gc, JS::HeapState::MinorCollecting); 1381 1382 stats().beginNurseryCollection(); 1383 gcprobes::MinorGCStart(); 1384 1385 gc->callNurseryCollectionCallbacks( 1386 JS::GCNurseryProgress::GC_NURSERY_COLLECTION_START, reason); 1387 1388 maybeClearProfileDurations(); 1389 startProfile(ProfileKey::Total); 1390 1391 previousGC.reason = JS::GCReason::NO_REASON; 1392 previousGC.nurseryUsedBytes = usedSpace(); 1393 previousGC.nurseryCapacity = capacity(); 1394 previousGC.nurseryCommitted = totalCommitted(); 1395 previousGC.nurseryUsedChunkCount = currentChunk() + 1; 1396 previousGC.tenuredBytes = 0; 1397 previousGC.tenuredCells = 0; 1398 tenuredEverything = true; 1399 1400 // Wait for any previous buffer sweeping to finish. This happens even if the 1401 // nursery is empty because we track whether this has happened by checking the 1402 // minor GC number, which is incremented regardless. See the call to 1403 // joinSweepTask in GCRuntime::endSweepingSweepGroup. 1404 joinSweepTask(); 1405 1406 if (stats().bufferAllocStatsEnabled() && runtime()->isMainRuntime()) { 1407 stats().maybePrintProfileHeaders(); 1408 BufferAllocator::printStats(gc, gc->stats().creationTime(), false, 1409 gc->stats().profileFile()); 1410 } 1411 1412 // If it isn't empty, it will call doCollection, and possibly after that 1413 // isEmpty() will become true, so use another variable to keep track of the 1414 // old empty state. 1415 bool wasEmpty = isEmpty(); 1416 if (!wasEmpty) { 1417 CollectionResult result = doCollection(session, options, reason); 1418 // Don't include chunk headers when calculating nursery space, since this 1419 // space does not represent data that can be tenured 1420 MOZ_ASSERT(result.tenuredBytes <= 1421 (previousGC.nurseryUsedBytes - 1422 (NurseryChunkHeaderSize * previousGC.nurseryUsedChunkCount))); 1423 1424 previousGC.reason = reason; 1425 previousGC.tenuredBytes = result.tenuredBytes; 1426 previousGC.tenuredCells = result.tenuredCells; 1427 previousGC.nurseryUsedChunkCount = currentChunk() + 1; 1428 } 1429 1430 // Resize the nursery. 1431 maybeResizeNursery(options, reason); 1432 1433 if (!semispaceEnabled()) { 1434 poisonAndInitCurrentChunk(); 1435 } 1436 1437 bool validPromotionRate; 1438 const double promotionRate = calcPromotionRate(&validPromotionRate); 1439 1440 startProfile(ProfileKey::Pretenure); 1441 size_t sitesPretenured = 0; 1442 sitesPretenured = 1443 doPretenuring(rt, reason, validPromotionRate, promotionRate); 1444 endProfile(ProfileKey::Pretenure); 1445 1446 previousGC.endTime = 1447 TimeStamp::Now(); // Must happen after maybeResizeNursery. 1448 endProfile(ProfileKey::Total); 1449 gc->incMinorGcNumber(); 1450 1451 TimeDuration totalTime = profileDurations_[ProfileKey::Total]; 1452 sendTelemetry(reason, totalTime, wasEmpty, promotionRate, sitesPretenured); 1453 1454 gc->callNurseryCollectionCallbacks( 1455 JS::GCNurseryProgress::GC_NURSERY_COLLECTION_END, reason); 1456 1457 stats().endNurseryCollection(); 1458 gcprobes::MinorGCEnd(); 1459 1460 timeInChunkAlloc_ = mozilla::TimeDuration::Zero(); 1461 1462 js::StringStats prevStats = gc->stringStats; 1463 js::StringStats& currStats = gc->stringStats; 1464 currStats = js::StringStats(); 1465 for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) { 1466 currStats += zone->stringStats; 1467 zone->previousGCStringStats = zone->stringStats; 1468 } 1469 stats().setStat( 1470 gcstats::STAT_STRINGS_DEDUPLICATED, 1471 currStats.deduplicatedStrings - prevStats.deduplicatedStrings); 1472 if (ShouldPrintProfile(runtime(), enableProfiling_, profileWorkers_, 1473 profileThreshold_, totalTime)) { 1474 printCollectionProfile(reason, promotionRate); 1475 } 1476 1477 if (reportDeduplications_) { 1478 printDeduplicationData(prevStats, currStats); 1479 } 1480 } 1481 1482 void js::Nursery::sendTelemetry(JS::GCReason reason, TimeDuration totalTime, 1483 bool wasEmpty, double promotionRate, 1484 size_t sitesPretenured) { 1485 JSRuntime* rt = runtime(); 1486 rt->metrics().GC_MINOR_REASON(uint32_t(reason)); 1487 1488 // Long minor GCs are those that take more than 1ms. 1489 bool wasLongMinorGC = totalTime.ToMilliseconds() > 1.0; 1490 if (wasLongMinorGC) { 1491 rt->metrics().GC_MINOR_REASON_LONG(uint32_t(reason)); 1492 } 1493 rt->metrics().GC_MINOR_US(totalTime); 1494 rt->metrics().GC_NURSERY_BYTES_2(totalCommitted()); 1495 1496 if (!wasEmpty) { 1497 rt->metrics().GC_PRETENURE_COUNT_2(sitesPretenured); 1498 rt->metrics().GC_NURSERY_PROMOTION_RATE(promotionRate * 100); 1499 } 1500 } 1501 1502 void js::Nursery::printDeduplicationData(js::StringStats& prev, 1503 js::StringStats& curr) { 1504 if (curr.deduplicatedStrings > prev.deduplicatedStrings) { 1505 fprintf(stderr, 1506 "pid %zu: deduplicated %" PRIi64 " strings, %" PRIu64 1507 " chars, %" PRIu64 " malloc bytes\n", 1508 size_t(getpid()), 1509 curr.deduplicatedStrings - prev.deduplicatedStrings, 1510 curr.deduplicatedChars - prev.deduplicatedChars, 1511 curr.deduplicatedBytes - prev.deduplicatedBytes); 1512 } 1513 } 1514 1515 js::Nursery::CollectionResult js::Nursery::doCollection(AutoGCSession& session, 1516 JS::GCOptions options, 1517 JS::GCReason reason) { 1518 JSRuntime* rt = runtime(); 1519 AutoSetThreadIsPerformingGC performingGC(rt->gcContext()); 1520 AutoStopVerifyingBarriers av(rt, false); 1521 AutoDisableProxyCheck disableStrictProxyChecking; 1522 mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion; 1523 1524 #ifdef JS_GC_ZEAL 1525 if (gc->hasZealMode(ZealMode::CheckHeapBeforeMinorGC)) { 1526 gc->checkHeapBeforeMinorGC(session); 1527 } 1528 #endif 1529 1530 // Swap nursery spaces. 1531 swapSpaces(); 1532 MOZ_ASSERT(toSpace.isEmpty()); 1533 MOZ_ASSERT(toSpace.mallocedBuffers.empty()); 1534 if (semispaceEnabled_) { 1535 poisonAndInitCurrentChunk(); 1536 } 1537 1538 clearMapAndSetNurseryIterators(); 1539 1540 MOZ_ASSERT(sweepTask->isIdle()); 1541 { 1542 BufferAllocator::MaybeLock lock; 1543 for (ZonesIter zone(runtime(), WithAtoms); !zone.done(); zone.next()) { 1544 zone->bufferAllocator.startMinorCollection(lock); 1545 } 1546 } 1547 1548 // Move objects pointed to by roots from the nursery to the major heap. 1549 tenuredEverything = shouldTenureEverything(reason); 1550 TenuringTracer mover(rt, this, tenuredEverything); 1551 #ifdef JS_GC_ZEAL 1552 if (reportPromotion_) { 1553 mover.initPromotionReport(); 1554 } 1555 #endif 1556 1557 // Trace everything considered as a root by a minor GC. 1558 traceRoots(session, mover); 1559 1560 startProfile(ProfileKey::SweepCaches); 1561 gc->purgeRuntimeForMinorGC(); 1562 endProfile(ProfileKey::SweepCaches); 1563 1564 // Most of the work is done here. This loop iterates over objects that have 1565 // been moved to the major heap. If these objects have any outgoing pointers 1566 // to the nursery, then those nursery objects get moved as well, until no 1567 // objects are left to move. That is, we iterate to a fixed point. 1568 startProfile(ProfileKey::CollectToObjFP); 1569 mover.collectToObjectFixedPoint(); 1570 endProfile(ProfileKey::CollectToObjFP); 1571 1572 startProfile(ProfileKey::CollectToStrFP); 1573 mover.collectToStringFixedPoint(); 1574 endProfile(ProfileKey::CollectToStrFP); 1575 1576 #ifdef JS_GC_ZEAL 1577 if (reportPromotion_ && options != JS::GCOptions::Shutdown) { 1578 JSContext* cx = runtime()->mainContextFromOwnThread(); 1579 JS::AutoAssertNoGC nogc(cx); 1580 mover.printPromotionReport(cx, reason, nogc); 1581 } 1582 #endif 1583 1584 // Sweep to update any pointers to nursery objects that have now been 1585 // tenured. 1586 startProfile(ProfileKey::Sweep); 1587 sweep(); 1588 endProfile(ProfileKey::Sweep); 1589 1590 // Update any slot or element pointers whose destination has been tenured. 1591 startProfile(ProfileKey::UpdateJitActivations); 1592 js::jit::UpdateJitActivationsForMinorGC(rt); 1593 forwardedBuffers.clearAndCompact(); 1594 endProfile(ProfileKey::UpdateJitActivations); 1595 1596 startProfile(ProfileKey::ObjectsTenuredCallback); 1597 gc->callObjectsTenuredCallback(); 1598 endProfile(ProfileKey::ObjectsTenuredCallback); 1599 1600 // Sweep malloced buffers. 1601 startProfile(ProfileKey::FreeMallocedBuffers); 1602 gc->queueBuffersForFreeAfterMinorGC(fromSpace.mallocedBuffers, 1603 stringBuffersToReleaseAfterMinorGC_); 1604 fromSpace.mallocedBufferBytes = 0; 1605 endProfile(ProfileKey::FreeMallocedBuffers); 1606 1607 startProfile(ProfileKey::ClearNursery); 1608 clear(); 1609 endProfile(ProfileKey::ClearNursery); 1610 1611 // Purge the StringToAtomCache. This has to happen at the end because the 1612 // cache is used when tenuring strings. 1613 startProfile(ProfileKey::PurgeStringToAtomCache); 1614 runtime()->caches().stringToAtomCache.purge(); 1615 endProfile(ProfileKey::PurgeStringToAtomCache); 1616 1617 #ifdef JS_GC_ZEAL 1618 // Make sure hashtables have been updated after the collection. 1619 startProfile(ProfileKey::CheckHashTables); 1620 if (gc->hasZealMode(ZealMode::CheckHashTablesOnMinorGC)) { 1621 runtime()->caches().checkEvalCacheAfterMinorGC(); 1622 gc->checkHashTablesAfterMovingGC(); 1623 } 1624 endProfile(ProfileKey::CheckHashTables); 1625 1626 // Check for missing post barriers. 1627 if (gc->hasZealMode(ZealMode::VerifierPost)) { 1628 gc->verifyPostBarriers(session); 1629 } 1630 #endif 1631 1632 if (semispaceEnabled_) { 1633 // On the next collection, tenure everything before |tenureThreshold_|. 1634 tenureThreshold_ = toSpace.offsetFromExclusiveAddress(position()); 1635 } else { 1636 // Swap nursery spaces back because we only use one. 1637 swapSpaces(); 1638 MOZ_ASSERT(toSpace.isEmpty()); 1639 } 1640 1641 MOZ_ASSERT(fromSpace.isEmpty()); 1642 1643 if (semispaceEnabled_) { 1644 poisonAndInitCurrentChunk(); 1645 } 1646 1647 return {mover.getPromotedSize(), mover.getPromotedCells()}; 1648 } 1649 1650 void js::Nursery::swapSpaces() { 1651 std::swap(toSpace, fromSpace); 1652 toSpace.setKind(ChunkKind::NurseryToSpace); 1653 fromSpace.setKind(ChunkKind::NurseryFromSpace); 1654 } 1655 1656 void js::Nursery::traceRoots(AutoGCSession& session, TenuringTracer& mover) { 1657 { 1658 // Suppress the sampling profiler to prevent it observing moved functions. 1659 AutoSuppressProfilerSampling suppressProfiler( 1660 runtime()->mainContextFromOwnThread()); 1661 1662 // Trace the store buffer, which must happen first. 1663 1664 // Create an empty store buffer on the stack and swap it with the main store 1665 // buffer, clearing it. 1666 StoreBuffer sb(runtime()); 1667 { 1668 AutoEnterOOMUnsafeRegion oomUnsafe; 1669 if (!sb.enable()) { 1670 oomUnsafe.crash("Nursery::traceRoots"); 1671 } 1672 } 1673 std::swap(sb, gc->storeBuffer()); 1674 MOZ_ASSERT(gc->storeBuffer().isEnabled()); 1675 MOZ_ASSERT(gc->storeBuffer().isEmpty()); 1676 1677 startProfile(ProfileKey::TraceWholeCells); 1678 sb.traceWholeCells(mover); 1679 endProfile(ProfileKey::TraceWholeCells); 1680 1681 startProfile(ProfileKey::TraceValues); 1682 sb.traceValues(mover); 1683 endProfile(ProfileKey::TraceValues); 1684 1685 startProfile(ProfileKey::TraceWasmAnyRefs); 1686 sb.traceWasmAnyRefs(mover); 1687 endProfile(ProfileKey::TraceWasmAnyRefs); 1688 1689 startProfile(ProfileKey::TraceCells); 1690 sb.traceCells(mover); 1691 endProfile(ProfileKey::TraceCells); 1692 1693 startProfile(ProfileKey::TraceSlots); 1694 sb.traceSlots(mover); 1695 endProfile(ProfileKey::TraceSlots); 1696 1697 startProfile(ProfileKey::TraceGenericEntries); 1698 sb.traceGenericEntries(&mover); 1699 endProfile(ProfileKey::TraceGenericEntries); 1700 1701 startProfile(ProfileKey::MarkRuntime); 1702 gc->traceRuntimeForMinorGC(&mover, session); 1703 endProfile(ProfileKey::MarkRuntime); 1704 } 1705 1706 startProfile(ProfileKey::MarkDebugger); 1707 { 1708 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS); 1709 DebugAPI::traceAllForMovingGC(&mover); 1710 } 1711 endProfile(ProfileKey::MarkDebugger); 1712 1713 // This should happen after debugger marking as this also marks weak map 1714 // entries. 1715 startProfile(ProfileKey::TraceWeakMaps); 1716 traceWeakMaps(mover); 1717 endProfile(ProfileKey::TraceWeakMaps); 1718 } 1719 1720 bool js::Nursery::shouldTenureEverything(JS::GCReason reason) { 1721 if (!semispaceEnabled()) { 1722 return true; 1723 } 1724 1725 return reason == JS::GCReason::EVICT_NURSERY || 1726 reason == JS::GCReason::DISABLE_GENERATIONAL_GC; 1727 } 1728 1729 size_t js::Nursery::doPretenuring(JSRuntime* rt, JS::GCReason reason, 1730 bool validPromotionRate, 1731 double promotionRate) { 1732 size_t sitesPretenured = pretenuringNursery.doPretenuring( 1733 gc, reason, validPromotionRate, promotionRate, pretenuringReportFilter_); 1734 1735 size_t zonesWhereStringsDisabled = 0; 1736 size_t zonesWhereBigIntsDisabled = 0; 1737 1738 uint32_t numStringsPromoted = 0; 1739 uint32_t numBigIntsPromoted = 0; 1740 for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) { 1741 bool disableNurseryStrings = 1742 zone->allocNurseryStrings() && 1743 zone->unknownAllocSite(JS::TraceKind::String)->state() == 1744 AllocSite::State::LongLived; 1745 1746 bool disableNurseryBigInts = 1747 zone->allocNurseryBigInts() && 1748 zone->unknownAllocSite(JS::TraceKind::BigInt)->state() == 1749 AllocSite::State::LongLived; 1750 1751 if (disableNurseryStrings || disableNurseryBigInts) { 1752 if (disableNurseryStrings) { 1753 zone->nurseryStringsDisabled = true; 1754 zonesWhereStringsDisabled++; 1755 } 1756 if (disableNurseryBigInts) { 1757 zone->nurseryBigIntsDisabled = true; 1758 zonesWhereStringsDisabled++; 1759 } 1760 updateAllocFlagsForZone(zone); 1761 } 1762 1763 numStringsPromoted += zone->nurseryPromotedCount(JS::TraceKind::String); 1764 numBigIntsPromoted += zone->nurseryPromotedCount(JS::TraceKind::BigInt); 1765 } 1766 1767 stats().setStat(gcstats::STAT_STRINGS_PROMOTED, numStringsPromoted); 1768 stats().setStat(gcstats::STAT_BIGINTS_PROMOTED, numBigIntsPromoted); 1769 1770 if (reportPretenuring() && zonesWhereStringsDisabled) { 1771 fprintf(stderr, 1772 "Pretenuring disabled nursery string allocation in %zu zones\n", 1773 zonesWhereStringsDisabled); 1774 } 1775 if (reportPretenuring() && zonesWhereBigIntsDisabled) { 1776 fprintf(stderr, 1777 "Pretenuring disabled nursery big int allocation in %zu zones\n", 1778 zonesWhereBigIntsDisabled); 1779 } 1780 1781 return sitesPretenured; 1782 } 1783 1784 bool js::Nursery::registerMallocedBuffer(void* buffer, size_t nbytes) { 1785 MOZ_ASSERT(buffer); 1786 MOZ_ASSERT(nbytes > 0); 1787 MOZ_ASSERT(!isInside(buffer)); 1788 1789 if (!toSpace.mallocedBuffers.putNew(buffer)) { 1790 return false; 1791 } 1792 1793 addMallocedBufferBytes(nbytes); 1794 return true; 1795 } 1796 1797 /* 1798 * Several things may need to happen when a nursery allocated cell with an 1799 * external buffer is promoted: 1800 * - the buffer may need to be moved if it is currently in the nursery 1801 * - the buffer may need to be removed from the list of buffers that will be 1802 * freed after nursery collection if it is malloced 1803 * - memory accounting for the buffer needs to be updated 1804 */ 1805 Nursery::WasBufferMoved 1806 js::Nursery::maybeMoveRawNurseryOrMallocBufferOnPromotion( 1807 void** bufferp, gc::Cell* owner, size_t bytesUsed, size_t bytesCapacity, 1808 MemoryUse use, arena_id_t arena) { 1809 MOZ_ASSERT(bytesUsed <= bytesCapacity); 1810 1811 void* buffer = *bufferp; 1812 if (!isInside(buffer)) { 1813 // This is a malloced buffer. Remove it from the nursery's previous list of 1814 // buffers so we don't free it. 1815 removeMallocedBufferDuringMinorGC(buffer); 1816 trackMallocedBufferOnPromotion(buffer, owner, bytesCapacity, use); 1817 return BufferNotMoved; 1818 } 1819 1820 // Copy the nursery-allocated buffer into a new malloc allocation. 1821 1822 AutoEnterOOMUnsafeRegion oomUnsafe; 1823 Zone* zone = owner->zone(); 1824 void* movedBuffer = zone->pod_arena_malloc<uint8_t>(arena, bytesCapacity); 1825 if (!movedBuffer) { 1826 oomUnsafe.crash("Nursery::maybeMoveRawNurseryOrMallocBufferOnPromotion"); 1827 } 1828 1829 memcpy(movedBuffer, buffer, bytesUsed); 1830 1831 trackMallocedBufferOnPromotion(movedBuffer, owner, bytesCapacity, use); 1832 1833 *bufferp = movedBuffer; 1834 return BufferMoved; 1835 } 1836 1837 void js::Nursery::trackMallocedBufferOnPromotion(void* buffer, gc::Cell* owner, 1838 size_t nbytes, MemoryUse use) { 1839 if (owner->isTenured()) { 1840 // If we tenured the owner then account for the memory. 1841 AddCellMemory(owner, nbytes, use); 1842 return; 1843 } 1844 1845 // Otherwise add it to the nursery's new buffer list. 1846 AutoEnterOOMUnsafeRegion oomUnsafe; 1847 if (!registerMallocedBuffer(buffer, nbytes)) { 1848 oomUnsafe.crash("Nursery::trackMallocedBufferOnPromotion"); 1849 } 1850 } 1851 1852 Nursery::WasBufferMoved js::Nursery::maybeMoveRawBufferOnPromotion( 1853 void** bufferp, gc::Cell* owner, size_t nbytes) { 1854 bool nurseryOwned = IsInsideNursery(owner); 1855 1856 void* buffer = *bufferp; 1857 if (IsBufferAlloc(buffer)) { 1858 // This is an external buffer allocation owned by a nursery GC thing. 1859 Zone* zone = owner->zone(); 1860 MOZ_ASSERT(IsNurseryOwned(zone, buffer)); 1861 zone->bufferAllocator.markNurseryOwnedAlloc(buffer, nurseryOwned); 1862 return BufferNotMoved; 1863 } 1864 1865 // Copy the nursery-allocated buffer into a new allocation. 1866 1867 // todo: only necessary for copying inline elements data where we didn't 1868 // calculate this on allocation. 1869 size_t dstBytes = GetGoodAllocSize(nbytes); 1870 1871 AutoEnterOOMUnsafeRegion oomUnsafe; 1872 void* movedBuffer = AllocBufferInGC(owner->zone(), dstBytes, nurseryOwned); 1873 if (!movedBuffer) { 1874 oomUnsafe.crash("Nursery::maybeMoveRawBufferOnPromotion"); 1875 } 1876 1877 memcpy(movedBuffer, buffer, nbytes); 1878 1879 *bufferp = movedBuffer; 1880 return BufferMoved; 1881 } 1882 1883 void js::Nursery::sweepBuffers() { 1884 for (ZonesIter zone(runtime(), WithAtoms); !zone.done(); zone.next()) { 1885 if (zone->bufferAllocator.startMinorSweeping()) { 1886 sweepTask->queueAllocatorToSweep(zone->bufferAllocator); 1887 } 1888 } 1889 1890 AutoLockHelperThreadState lock; 1891 if (!sweepTask->isEmpty(lock)) { 1892 sweepTask->startOrRunIfIdle(lock); 1893 } 1894 } 1895 1896 void Nursery::requestMinorGC(JS::GCReason reason) { 1897 JS::HeapState heapState = runtime()->heapState(); 1898 #ifdef DEBUG 1899 if (heapState == JS::HeapState::Idle || 1900 heapState == JS::HeapState::MinorCollecting) { 1901 MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime())); 1902 } else if (heapState == JS::HeapState::MajorCollecting) { 1903 // The GC runs sweeping tasks that may access the storebuffer in parallel 1904 // and these require taking the store buffer lock. 1905 MOZ_ASSERT(!CurrentThreadIsGCMarking()); 1906 runtime()->gc.assertCurrentThreadHasLockedStoreBuffer(); 1907 } else { 1908 MOZ_CRASH("Unexpected heap state"); 1909 } 1910 #endif 1911 1912 MOZ_ASSERT(reason != JS::GCReason::NO_REASON); 1913 MOZ_ASSERT(isEnabled()); 1914 1915 if (minorGCRequested()) { 1916 return; 1917 } 1918 1919 if (heapState == JS::HeapState::MinorCollecting) { 1920 // This can happen when we promote a lot of data to the second generation in 1921 // a semispace collection. This can trigger a GC due to the amount of store 1922 // buffer entries added. 1923 return; 1924 } 1925 1926 // Set position to end of chunk to block further allocation. 1927 MOZ_ASSERT(prevPosition_ == 0); 1928 prevPosition_ = position(); 1929 toSpace.position_ = chunk(currentChunk()).end(); 1930 1931 minorGCTriggerReason_ = reason; 1932 runtime()->mainContextFromAnyThread()->requestInterrupt( 1933 InterruptReason::MinorGC); 1934 } 1935 1936 size_t SemispaceSizeFactor(bool semispaceEnabled) { 1937 return semispaceEnabled ? 2 : 1; 1938 } 1939 1940 size_t js::Nursery::totalCapacity() const { 1941 return capacity() * SemispaceSizeFactor(semispaceEnabled_); 1942 } 1943 1944 size_t js::Nursery::totalCommitted() const { 1945 size_t size = std::min(capacity_, allocatedChunkCount() * gc::ChunkSize); 1946 return size * SemispaceSizeFactor(semispaceEnabled_); 1947 } 1948 1949 size_t Nursery::sizeOfMallocedBuffers( 1950 mozilla::MallocSizeOf mallocSizeOf) const { 1951 MOZ_ASSERT(fromSpace.mallocedBuffers.empty()); 1952 1953 size_t total = 0; 1954 for (BufferSet::Range r = toSpace.mallocedBuffers.all(); !r.empty(); 1955 r.popFront()) { 1956 total += mallocSizeOf(r.front()); 1957 } 1958 total += toSpace.mallocedBuffers.shallowSizeOfExcludingThis(mallocSizeOf); 1959 1960 for (AllZonesIter zone(runtime()); !zone.done(); zone.next()) { 1961 total += zone->bufferAllocator.getSizeOfNurseryBuffers(); 1962 } 1963 1964 return total; 1965 } 1966 1967 void js::Nursery::sweepStringsWithBuffer() { 1968 // Add StringBuffers to stringBuffersToReleaseAfterMinorGC_. Strings we 1969 // tenured must have an additional refcount at this point. 1970 1971 MOZ_ASSERT(stringBuffersToReleaseAfterMinorGC_.empty()); 1972 1973 auto sweep = [&](JSLinearString* str, 1974 mozilla::StringBuffer* buffer) -> JSLinearString* { 1975 MOZ_ASSERT(inCollectedRegion(str)); 1976 1977 if (!IsForwarded(str)) { 1978 MOZ_ASSERT(str->hasStringBuffer() || str->isAtomRef()); 1979 MOZ_ASSERT_IF(str->hasStringBuffer(), str->stringBuffer() == buffer); 1980 if (!stringBuffersToReleaseAfterMinorGC_.append(buffer)) { 1981 // Release on the main thread on OOM. 1982 buffer->Release(); 1983 } 1984 return nullptr; 1985 } 1986 1987 JSLinearString* dst = Forwarded(str); 1988 if (!IsInsideNursery(dst)) { 1989 MOZ_ASSERT_IF(dst->hasStringBuffer() && dst->stringBuffer() == buffer, 1990 buffer->RefCount() > 1); 1991 if (!stringBuffersToReleaseAfterMinorGC_.append(buffer)) { 1992 // Release on the main thread on OOM. 1993 buffer->Release(); 1994 } 1995 return nullptr; 1996 } 1997 1998 return dst; 1999 }; 2000 2001 stringBuffers_.mutableEraseIf([&](StringAndBuffer& entry) { 2002 if (JSLinearString* dst = sweep(entry.first, entry.second)) { 2003 entry.first = dst; 2004 // See comment in Nursery::addStringBuffer. 2005 if (!entry.second->HasMultipleReferences()) { 2006 addMallocedBufferBytes(entry.second->AllocationSize()); 2007 } 2008 return false; 2009 } 2010 return true; 2011 }); 2012 2013 AutoEnterOOMUnsafeRegion oomUnsafe; 2014 2015 ExtensibleStringBuffers buffers(std::move(extensibleStringBuffers_)); 2016 MOZ_ASSERT(extensibleStringBuffers_.empty()); 2017 2018 for (ExtensibleStringBuffers::Enum e(buffers); !e.empty(); e.popFront()) { 2019 if (JSLinearString* dst = sweep(e.front().key(), e.front().value())) { 2020 if (!extensibleStringBuffers_.putNew(dst, e.front().value())) { 2021 oomUnsafe.crash("sweepStringsWithBuffer"); 2022 } 2023 // Ensure mallocedBufferBytes includes the buffer size for 2024 // removeExtensibleStringBuffer. 2025 addMallocedBufferBytes(e.front().value()->AllocationSize()); 2026 } 2027 } 2028 } 2029 2030 void js::Nursery::sweep() { 2031 // It's important that the context's GCUse is not Finalizing at this point, 2032 // otherwise we will miscount memory attached to nursery objects with 2033 // CellAllocPolicy. 2034 AutoSetThreadIsSweeping setThreadSweeping(runtime()->gcContext()); 2035 2036 // Start sweeping buffers off-thread as soon as possible. 2037 sweepBuffers(); 2038 2039 MinorSweepingTracer trc(runtime()); 2040 2041 // Sweep unique IDs first before we sweep any tables that may be keyed based 2042 // on them. 2043 cellsWithUid_.mutableEraseIf([](Cell*& cell) { 2044 auto* obj = static_cast<JSObject*>(cell); 2045 if (!IsForwarded(obj)) { 2046 gc::RemoveUniqueId(obj); 2047 return true; 2048 } 2049 2050 JSObject* dst = Forwarded(obj); 2051 gc::TransferUniqueId(dst, obj); 2052 2053 if (!IsInsideNursery(dst)) { 2054 return true; 2055 } 2056 2057 cell = dst; 2058 return false; 2059 }); 2060 2061 sweepStringsWithBuffer(); 2062 2063 for (ZonesIter zone(runtime(), SkipAtoms); !zone.done(); zone.next()) { 2064 zone->sweepAfterMinorGC(&trc); 2065 } 2066 2067 sweepMapAndSetObjects(); 2068 2069 sweepWeakMaps(); 2070 2071 runtime()->caches().sweepAfterMinorGC(&trc); 2072 } 2073 2074 void js::Nursery::clear() { 2075 fromSpace.clear(this); 2076 MOZ_ASSERT(fromSpace.isEmpty()); 2077 } 2078 2079 void js::Nursery::Space::clear(Nursery* nursery) { 2080 GCRuntime* gc = nursery->gc; 2081 2082 // Poison the nursery contents so touching a freed object will crash. 2083 unsigned firstClearChunk; 2084 if (gc->hasZealMode(ZealMode::GenerationalGC) || nursery->semispaceEnabled_) { 2085 // Poison all the chunks used in this cycle. 2086 firstClearChunk = startChunk_; 2087 } else { 2088 // Poison from the second chunk onwards as the first one will be used 2089 // in the next cycle and poisoned in Nusery::collect(); 2090 MOZ_ASSERT(startChunk_ == 0); 2091 firstClearChunk = 1; 2092 } 2093 for (unsigned i = firstClearChunk; i < currentChunk_; ++i) { 2094 chunks_[i]->poisonAfterEvict(); 2095 } 2096 // Clear only the used part of the chunk because that's the part we touched, 2097 // but only if it's not going to be re-used immediately (>= firstClearChunk). 2098 if (currentChunk_ >= firstClearChunk) { 2099 size_t usedBytes = position_ - chunks_[currentChunk_]->start(); 2100 chunks_[currentChunk_]->poisonAfterEvict(NurseryChunkHeaderSize + 2101 usedBytes); 2102 } 2103 2104 // Reset the start chunk & position if we're not in this zeal mode, or we're 2105 // in it and close to the end of the nursery. 2106 MOZ_ASSERT(maxChunkCount_ > 0); 2107 if (!gc->hasZealMode(ZealMode::GenerationalGC) || 2108 currentChunk_ + 1 == maxChunkCount_) { 2109 moveToStartOfChunk(nursery, 0); 2110 } 2111 2112 // Set current start position for isEmpty checks. 2113 setStartToCurrentPosition(); 2114 } 2115 2116 void js::Nursery::moveToStartOfChunk(unsigned chunkno) { 2117 toSpace.moveToStartOfChunk(this, chunkno); 2118 } 2119 2120 void js::Nursery::Space::moveToStartOfChunk(Nursery* nursery, 2121 unsigned chunkno) { 2122 MOZ_ASSERT(chunkno < chunks_.length()); 2123 2124 currentChunk_ = chunkno; 2125 position_ = chunks_[chunkno]->start(); 2126 setCurrentEnd(nursery); 2127 2128 MOZ_ASSERT(position_ != 0); 2129 MOZ_ASSERT(currentEnd_ > position_); // Check this cannot wrap. 2130 } 2131 2132 void js::Nursery::poisonAndInitCurrentChunk() { 2133 NurseryChunk& chunk = this->chunk(currentChunk()); 2134 size_t start = position() - uintptr_t(&chunk); 2135 size_t end = isSubChunkMode() ? capacity_ : ChunkSize; 2136 chunk.poisonRange(start, end, JS_FRESH_NURSERY_PATTERN, 2137 MemCheckKind::MakeUndefined); 2138 new (&chunk) 2139 NurseryChunk(runtime(), ChunkKind::NurseryToSpace, currentChunk()); 2140 } 2141 2142 void js::Nursery::setCurrentEnd() { toSpace.setCurrentEnd(this); } 2143 2144 void js::Nursery::Space::setCurrentEnd(Nursery* nursery) { 2145 currentEnd_ = uintptr_t(chunks_[currentChunk_]) + 2146 std::min(nursery->capacity(), ChunkSize); 2147 } 2148 2149 bool js::Nursery::allocateNextChunk(AutoLockGCBgAlloc& lock) { 2150 // Allocate a new nursery chunk. If semispace collection is enabled, we have 2151 // to allocate one for both spaces. 2152 2153 const unsigned priorCount = toSpace.chunks_.length(); 2154 const unsigned newCount = priorCount + 1; 2155 2156 MOZ_ASSERT(newCount <= maxChunkCount()); 2157 MOZ_ASSERT(fromSpace.chunks_.length() == 2158 (semispaceEnabled_ ? priorCount : 0)); 2159 2160 if (!toSpace.chunks_.reserve(newCount) || 2161 (semispaceEnabled_ && !fromSpace.chunks_.reserve(newCount))) { 2162 return false; 2163 } 2164 2165 ArenaChunk* toSpaceChunk = gc->getOrAllocChunk(StallAndRetry::No, lock); 2166 if (!toSpaceChunk) { 2167 return false; 2168 } 2169 2170 ArenaChunk* fromSpaceChunk = nullptr; 2171 if (semispaceEnabled_ && 2172 !(fromSpaceChunk = gc->getOrAllocChunk(StallAndRetry::No, lock))) { 2173 gc->recycleChunk(toSpaceChunk, lock); 2174 return false; 2175 } 2176 2177 uint8_t index = toSpace.chunks_.length(); 2178 NurseryChunk* nurseryChunk = 2179 NurseryChunk::fromChunk(toSpaceChunk, ChunkKind::NurseryToSpace, index); 2180 toSpace.chunks_.infallibleAppend(nurseryChunk); 2181 2182 if (semispaceEnabled_) { 2183 MOZ_ASSERT(index == fromSpace.chunks_.length()); 2184 nurseryChunk = NurseryChunk::fromChunk(fromSpaceChunk, 2185 ChunkKind::NurseryFromSpace, index); 2186 fromSpace.chunks_.infallibleAppend(nurseryChunk); 2187 } 2188 2189 return true; 2190 } 2191 2192 void js::Nursery::setStartToCurrentPosition() { 2193 toSpace.setStartToCurrentPosition(); 2194 } 2195 2196 void js::Nursery::Space::setStartToCurrentPosition() { 2197 startChunk_ = currentChunk_; 2198 startPosition_ = position_; 2199 MOZ_ASSERT(isEmpty()); 2200 } 2201 2202 void js::Nursery::maybeResizeNursery(JS::GCOptions options, 2203 JS::GCReason reason) { 2204 #ifdef JS_GC_ZEAL 2205 // This zeal mode disabled nursery resizing. 2206 if (gc->hasZealMode(ZealMode::GenerationalGC)) { 2207 return; 2208 } 2209 #endif 2210 2211 size_t newCapacity = 2212 std::clamp(targetSize(options, reason), minSpaceSize(), maxSpaceSize()); 2213 2214 MOZ_ASSERT(roundSize(newCapacity) == newCapacity); 2215 MOZ_ASSERT(newCapacity >= SystemPageSize()); 2216 2217 if (newCapacity == capacity()) { 2218 return; 2219 } 2220 2221 decommitTask->join(); 2222 2223 if (newCapacity > capacity()) { 2224 growAllocableSpace(newCapacity); 2225 } else { 2226 MOZ_ASSERT(newCapacity < capacity()); 2227 shrinkAllocableSpace(newCapacity); 2228 } 2229 2230 AutoLockHelperThreadState lock; 2231 if (!decommitTask->isEmpty(lock)) { 2232 decommitTask->startOrRunIfIdle(lock); 2233 } 2234 2235 // The size of the store buffers depends on the nursery size. 2236 gc->storeBuffer().updateSize(); 2237 } 2238 2239 static inline bool ClampDouble(double* value, double min, double max) { 2240 MOZ_ASSERT(!std::isnan(*value) && !std::isnan(min) && !std::isnan(max)); 2241 MOZ_ASSERT(max >= min); 2242 2243 if (*value <= min) { 2244 *value = min; 2245 return true; 2246 } 2247 2248 if (*value >= max) { 2249 *value = max; 2250 return true; 2251 } 2252 2253 return false; 2254 } 2255 2256 size_t js::Nursery::targetSize(JS::GCOptions options, JS::GCReason reason) { 2257 // Shrink the nursery as much as possible if purging was requested or in low 2258 // memory situations. 2259 if (options == JS::GCOptions::Shrink || gc::IsOOMReason(reason) || 2260 gc->systemHasLowMemory()) { 2261 clearRecentGrowthData(); 2262 return 0; 2263 } 2264 2265 // Don't resize the nursery during shutdown. 2266 if (options == JS::GCOptions::Shutdown) { 2267 clearRecentGrowthData(); 2268 return capacity(); 2269 } 2270 2271 TimeStamp now = TimeStamp::Now(); 2272 2273 if (reason == JS::GCReason::PREPARE_FOR_PAGELOAD) { 2274 return roundSize(maxSpaceSize()); 2275 } 2276 2277 // If the nursery is completely unused then minimise it. 2278 if (hasRecentGrowthData && previousGC.nurseryUsedBytes == 0 && 2279 now - lastCollectionEndTime() > 2280 tunables().nurseryEagerCollectionTimeout() && 2281 !js::SupportDifferentialTesting()) { 2282 clearRecentGrowthData(); 2283 return 0; 2284 } 2285 2286 // Calculate the fraction of the nursery promoted out of its entire 2287 // capacity. This gives better results than using the promotion rate (based on 2288 // the amount of nursery used) in cases where we collect before the nursery is 2289 // full. 2290 double fractionPromoted = 2291 double(previousGC.tenuredBytes) / double(previousGC.nurseryCapacity); 2292 2293 // Calculate the duty factor, the fraction of time spent collecting the 2294 // nursery. 2295 double dutyFactor = 0.0; 2296 TimeDuration collectorTime = now - collectionStartTime(); 2297 if (hasRecentGrowthData && !js::SupportDifferentialTesting()) { 2298 TimeDuration totalTime = now - lastCollectionEndTime(); 2299 dutyFactor = collectorTime.ToSeconds() / totalTime.ToSeconds(); 2300 } 2301 2302 // Calculate a growth factor to try to achieve target promotion rate and duty 2303 // factor goals. 2304 static const double PromotionGoal = 0.02; 2305 static const double DutyFactorGoal = 0.01; 2306 double promotionGrowth = fractionPromoted / PromotionGoal; 2307 double dutyGrowth = dutyFactor / DutyFactorGoal; 2308 double growthFactor = std::max(promotionGrowth, dutyGrowth); 2309 2310 #ifndef DEBUG 2311 // In optimized builds, decrease the growth factor to try to keep collections 2312 // shorter than a target maximum time. Don't do this during page load. 2313 // 2314 // Debug builds are so much slower and more unpredictable that doing this 2315 // would cause very different nursery behaviour to an equivalent release 2316 // build. 2317 double maxTimeGoalMS = tunables().nurseryMaxTimeGoalMS().ToMilliseconds(); 2318 if (!gc->isInPageLoad() && maxTimeGoalMS != 0.0 && 2319 !js::SupportDifferentialTesting()) { 2320 double timeGrowth = maxTimeGoalMS / collectorTime.ToMilliseconds(); 2321 growthFactor = std::min(growthFactor, timeGrowth); 2322 } 2323 #endif 2324 2325 // Limit the range of the growth factor to prevent transient high promotion 2326 // rates from affecting the nursery size too far into the future. 2327 static const double GrowthRange = 2.0; 2328 bool wasClamped = ClampDouble(&growthFactor, 1.0 / GrowthRange, GrowthRange); 2329 2330 // Calculate the target size based on data from this collection. 2331 double target = double(capacity()) * growthFactor; 2332 2333 // Use exponential smoothing on the target size to take into account data from 2334 // recent previous collections. 2335 if (hasRecentGrowthData && 2336 now - lastCollectionEndTime() < TimeDuration::FromMilliseconds(200) && 2337 !js::SupportDifferentialTesting()) { 2338 // Pay more attention to large changes. 2339 double fraction = wasClamped ? 0.5 : 0.25; 2340 smoothedTargetSize = 2341 (1 - fraction) * smoothedTargetSize + fraction * target; 2342 } else { 2343 smoothedTargetSize = target; 2344 } 2345 hasRecentGrowthData = true; 2346 2347 // Leave size untouched if we are close to the target. 2348 static const double GoalWidth = 1.5; 2349 growthFactor = smoothedTargetSize / double(capacity()); 2350 if (growthFactor > (1.0 / GoalWidth) && growthFactor < GoalWidth) { 2351 return capacity(); 2352 } 2353 2354 return roundSize(size_t(smoothedTargetSize)); 2355 } 2356 2357 void js::Nursery::clearRecentGrowthData() { 2358 if (js::SupportDifferentialTesting()) { 2359 return; 2360 } 2361 2362 hasRecentGrowthData = false; 2363 smoothedTargetSize = 0.0; 2364 } 2365 2366 /* static */ 2367 size_t js::Nursery::roundSize(size_t size) { 2368 size_t step = size >= ChunkSize ? ChunkSize : SystemPageSize(); 2369 return Round(size, step); 2370 } 2371 2372 void js::Nursery::growAllocableSpace(size_t newCapacity) { 2373 MOZ_ASSERT_IF(!isSubChunkMode(), newCapacity > currentChunk() * ChunkSize); 2374 MOZ_ASSERT(newCapacity <= maxSpaceSize()); 2375 MOZ_ASSERT(newCapacity > capacity()); 2376 2377 size_t nchunks = 2378 RequiredChunkCount(newCapacity) * SemispaceSizeFactor(semispaceEnabled_); 2379 if (!decommitTask->reserveSpaceForChunks(nchunks)) { 2380 return; 2381 } 2382 2383 if (isSubChunkMode()) { 2384 if (!toSpace.commitSubChunkRegion(capacity(), newCapacity) || 2385 (semispaceEnabled_ && 2386 !fromSpace.commitSubChunkRegion(capacity(), newCapacity))) { 2387 return; 2388 } 2389 } 2390 2391 setCapacity(newCapacity); 2392 2393 toSpace.setCurrentEnd(this); 2394 if (semispaceEnabled_) { 2395 fromSpace.setCurrentEnd(this); 2396 } 2397 } 2398 2399 bool js::Nursery::Space::commitSubChunkRegion(size_t oldCapacity, 2400 size_t newCapacity) { 2401 MOZ_ASSERT(currentChunk_ == 0); 2402 MOZ_ASSERT(oldCapacity < ChunkSize); 2403 MOZ_ASSERT(newCapacity > oldCapacity); 2404 2405 size_t newChunkEnd = std::min(newCapacity, ChunkSize); 2406 2407 // The remainder of the chunk may have been decommitted. 2408 if (!chunks_[0]->markPagesInUseHard(oldCapacity, newChunkEnd)) { 2409 // The OS won't give us the memory we need, we can't grow. 2410 return false; 2411 } 2412 2413 // The capacity has changed and since we were in sub-chunk mode we need to 2414 // update the poison values / asan information for the now-valid region of 2415 // this chunk. 2416 chunks_[0]->poisonRange(oldCapacity, newChunkEnd, JS_FRESH_NURSERY_PATTERN, 2417 MemCheckKind::MakeUndefined); 2418 return true; 2419 } 2420 2421 void js::Nursery::freeChunksFrom(Space& space, const unsigned firstFreeChunk) { 2422 if (firstFreeChunk >= space.chunks_.length()) { 2423 return; 2424 } 2425 2426 // The loop below may need to skip the first chunk, so we may use this so we 2427 // can modify it. 2428 unsigned firstChunkToDecommit = firstFreeChunk; 2429 2430 if ((firstChunkToDecommit == 0) && isSubChunkMode()) { 2431 // Part of the first chunk may be hard-decommitted, un-decommit it so that 2432 // the GC's normal chunk-handling doesn't segfault. 2433 MOZ_ASSERT(space.currentChunk_ == 0); 2434 if (!space.chunks_[0]->markPagesInUseHard(capacity_, ChunkSize)) { 2435 // Free the chunk if we can't allocate its pages. 2436 UnmapPages(space.chunks_[0], ChunkSize); 2437 firstChunkToDecommit = 1; 2438 } 2439 } 2440 2441 { 2442 AutoLockHelperThreadState lock; 2443 for (size_t i = firstChunkToDecommit; i < space.chunks_.length(); i++) { 2444 decommitTask->queueChunk(space.chunks_[i], lock); 2445 } 2446 } 2447 2448 space.chunks_.shrinkTo(firstFreeChunk); 2449 } 2450 2451 void js::Nursery::shrinkAllocableSpace(size_t newCapacity) { 2452 MOZ_ASSERT(!gc->hasZealMode(ZealMode::GenerationalGC)); 2453 MOZ_ASSERT(newCapacity < capacity_); 2454 2455 if (semispaceEnabled() && usedSpace() >= newCapacity) { 2456 // Can't shrink below what we've already used. 2457 return; 2458 } 2459 2460 unsigned newCount = HowMany(newCapacity, ChunkSize); 2461 if (newCount < allocatedChunkCount()) { 2462 freeChunksFrom(toSpace, newCount); 2463 freeChunksFrom(fromSpace, newCount); 2464 } 2465 2466 size_t oldCapacity = capacity_; 2467 setCapacity(newCapacity); 2468 2469 toSpace.setCurrentEnd(this); 2470 if (semispaceEnabled_) { 2471 fromSpace.setCurrentEnd(this); 2472 } 2473 2474 if (isSubChunkMode()) { 2475 toSpace.decommitSubChunkRegion(this, oldCapacity, newCapacity); 2476 if (semispaceEnabled_) { 2477 fromSpace.decommitSubChunkRegion(this, oldCapacity, newCapacity); 2478 } 2479 } 2480 } 2481 2482 void js::Nursery::Space::decommitSubChunkRegion(Nursery* nursery, 2483 size_t oldCapacity, 2484 size_t newCapacity) { 2485 MOZ_ASSERT(currentChunk_ == 0); 2486 MOZ_ASSERT(newCapacity < ChunkSize); 2487 MOZ_ASSERT(newCapacity < oldCapacity); 2488 2489 size_t oldChunkEnd = std::min(oldCapacity, ChunkSize); 2490 chunks_[0]->poisonRange(newCapacity, oldChunkEnd, JS_SWEPT_NURSERY_PATTERN, 2491 MemCheckKind::MakeNoAccess); 2492 2493 AutoLockHelperThreadState lock; 2494 nursery->decommitTask->queueRange(newCapacity, chunks_[0], lock); 2495 } 2496 2497 js::Nursery::Space::Space(gc::ChunkKind kind) : kind(kind) { 2498 MOZ_ASSERT(kind == ChunkKind::NurseryFromSpace || 2499 kind == ChunkKind::NurseryToSpace); 2500 } 2501 2502 void js::Nursery::Space::setKind(ChunkKind newKind) { 2503 #ifdef DEBUG 2504 MOZ_ASSERT(newKind == ChunkKind::NurseryFromSpace || 2505 newKind == ChunkKind::NurseryToSpace); 2506 checkKind(kind); 2507 #endif 2508 2509 kind = newKind; 2510 for (NurseryChunk* chunk : chunks_) { 2511 chunk->kind = newKind; 2512 } 2513 2514 #ifdef DEBUG 2515 checkKind(newKind); 2516 #endif 2517 } 2518 2519 #ifdef DEBUG 2520 void js::Nursery::Space::checkKind(ChunkKind expected) const { 2521 MOZ_ASSERT(kind == expected); 2522 for (NurseryChunk* chunk : chunks_) { 2523 MOZ_ASSERT(chunk->getKind() == expected); 2524 } 2525 } 2526 #endif 2527 2528 #ifdef DEBUG 2529 size_t js::Nursery::Space::findChunkIndex(uintptr_t chunkAddr) const { 2530 for (size_t i = 0; i < chunks_.length(); i++) { 2531 if (uintptr_t(chunks_[i]) == chunkAddr) { 2532 return i; 2533 } 2534 } 2535 2536 MOZ_CRASH("Nursery chunk not found"); 2537 } 2538 #endif 2539 2540 gcstats::Statistics& js::Nursery::stats() const { return gc->stats(); } 2541 2542 MOZ_ALWAYS_INLINE const js::gc::GCSchedulingTunables& js::Nursery::tunables() 2543 const { 2544 return gc->tunables; 2545 } 2546 2547 bool js::Nursery::isSubChunkMode() const { 2548 return capacity() <= NurseryChunkUsableSize; 2549 } 2550 2551 void js::Nursery::clearMapAndSetNurseryIterators() { 2552 // Clears the lists of nursery iterators for all Map/Set objects. These lists 2553 // are cleared at the start of minor GC and rebuilt when iterators are 2554 // promoted during minor GC. 2555 for (auto* map : mapsWithNurseryIterators_) { 2556 map->clearNurseryIteratorsBeforeMinorGC(); 2557 } 2558 for (auto* set : setsWithNurseryIterators_) { 2559 set->clearNurseryIteratorsBeforeMinorGC(); 2560 } 2561 } 2562 2563 void js::Nursery::sweepMapAndSetObjects() { 2564 // This processes all Map and Set objects that are known to have associated 2565 // iterator objects that are nursery allocated. 2566 // 2567 // These objects may die and be finalized or if not their internal state and 2568 // memory tracking are updated. 2569 // 2570 // Finally the lists themselves are rebuilt so as to remove objects that are 2571 // no longer associated with nursery iterators (because all iterators died or 2572 // were promoted to the tenured heap). 2573 2574 auto* gcx = runtime()->gcContext(); 2575 2576 AutoEnterOOMUnsafeRegion oomUnsafe; 2577 2578 MapObjectVector maps; 2579 std::swap(mapsWithNurseryIterators_, maps); 2580 for (auto* mapobj : maps) { 2581 mapobj = MapObject::sweepAfterMinorGC(gcx, mapobj); 2582 if (mapobj) { 2583 if (!mapsWithNurseryIterators_.append(mapobj)) { 2584 oomUnsafe.crash("sweepAfterMinorGC"); 2585 } 2586 } 2587 } 2588 2589 SetObjectVector sets; 2590 std::swap(setsWithNurseryIterators_, sets); 2591 for (auto* setobj : sets) { 2592 setobj = SetObject::sweepAfterMinorGC(gcx, setobj); 2593 if (setobj) { 2594 if (!setsWithNurseryIterators_.append(setobj)) { 2595 oomUnsafe.crash("sweepAfterMinorGC"); 2596 } 2597 } 2598 } 2599 } 2600 2601 void Nursery::traceWeakMaps(TenuringTracer& trc) { 2602 MOZ_ASSERT(trc.weakMapAction() == JS::WeakMapTraceAction::TraceKeysAndValues); 2603 weakMapsWithNurseryEntries_.eraseIf( 2604 [&](WeakMapBase* wm) { return wm->traceNurseryEntriesOnMinorGC(&trc); }); 2605 } 2606 2607 void js::Nursery::sweepWeakMaps() { 2608 // This sweeps all weak maps that contain nursery keys to remove entries for 2609 // keys that have not survived. Nursery values in weak maps are always 2610 // promoted. 2611 2612 // Don't update retained size for weak maps here. 2613 AutoSetThreadGCUse setUse(runtime()->gcContext(), GCUse::Unspecified); 2614 2615 weakMapsWithNurseryEntries_.eraseIf( 2616 [&](WeakMapBase* wm) { return wm->sweepAfterMinorGC(); }); 2617 } 2618 2619 void js::Nursery::joinSweepTask() { sweepTask->join(); } 2620 2621 void js::Nursery::joinDecommitTask() { decommitTask->join(); } 2622 2623 #ifdef DEBUG 2624 bool js::Nursery::sweepTaskIsIdle() { return sweepTask->isIdle(); } 2625 #endif