tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Allocator.cpp (23375B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "gc/Allocator.h"
      8 
      9 #include "mozilla/OperatorNewExtensions.h"
     10 #include "mozilla/TimeStamp.h"
     11 
     12 #include "gc/GCInternals.h"
     13 #include "gc/GCLock.h"
     14 #include "gc/GCProbes.h"
     15 #include "gc/Nursery.h"
     16 #include "threading/CpuCount.h"
     17 #include "util/Poison.h"
     18 #include "vm/BigIntType.h"
     19 #include "vm/FrameIter.h"
     20 #include "vm/Runtime.h"
     21 #include "vm/StringType.h"
     22 
     23 #include "gc/ArenaList-inl.h"
     24 #include "gc/Heap-inl.h"
     25 #include "gc/PrivateIterators-inl.h"
     26 #include "vm/JSContext-inl.h"
     27 #include "vm/JSScript-inl.h"
     28 
     29 using mozilla::TimeStamp;
     30 
     31 using namespace js;
     32 using namespace js::gc;
     33 
     34 // Return a Heap value that can be compared numerically with an
     35 // allocation's requested heap to determine whether to allocate in the nursery
     36 // or the tenured heap.
     37 //
     38 // If nursery allocation is allowed this returns Heap::Tenured, meaning only
     39 // Heap::Tenured allocations will be tenured. If nursery allocation is not
     40 // allowed this returns Heap::Default, meaning all allocations are tenured.
     41 static Heap MinHeapToTenure(bool allowNurseryAlloc) {
     42  static_assert(Heap::Tenured > Heap::Default);
     43  return allowNurseryAlloc ? Heap::Tenured : Heap::Default;
     44 }
     45 
     46 void Zone::setNurseryAllocFlags(bool allocObjects, bool allocStrings,
     47                                bool allocBigInts, bool allocGetterSetters) {
     48  allocNurseryObjects_ = allocObjects;
     49  allocNurseryStrings_ = allocStrings;
     50  allocNurseryBigInts_ = allocBigInts;
     51  allocNurseryGetterSetters_ = allocGetterSetters;
     52 
     53  minObjectHeapToTenure_ = MinHeapToTenure(allocNurseryObjects());
     54  minStringHeapToTenure_ = MinHeapToTenure(allocNurseryStrings());
     55  minBigintHeapToTenure_ = MinHeapToTenure(allocNurseryBigInts());
     56  minGetterSetterHeapToTenure_ = MinHeapToTenure(allocNurseryGetterSetters());
     57 }
     58 
     59 #define INSTANTIATE_ALLOC_NURSERY_CELL(traceKind, allowGc)          \
     60  template void*                                                    \
     61  gc::CellAllocator::AllocNurseryOrTenuredCell<traceKind, allowGc>( \
     62      JSContext*, AllocKind, size_t, gc::Heap, AllocSite*);
     63 INSTANTIATE_ALLOC_NURSERY_CELL(JS::TraceKind::Object, NoGC)
     64 INSTANTIATE_ALLOC_NURSERY_CELL(JS::TraceKind::Object, CanGC)
     65 INSTANTIATE_ALLOC_NURSERY_CELL(JS::TraceKind::String, NoGC)
     66 INSTANTIATE_ALLOC_NURSERY_CELL(JS::TraceKind::String, CanGC)
     67 INSTANTIATE_ALLOC_NURSERY_CELL(JS::TraceKind::BigInt, NoGC)
     68 INSTANTIATE_ALLOC_NURSERY_CELL(JS::TraceKind::BigInt, CanGC)
     69 INSTANTIATE_ALLOC_NURSERY_CELL(JS::TraceKind::GetterSetter, NoGC)
     70 INSTANTIATE_ALLOC_NURSERY_CELL(JS::TraceKind::GetterSetter, CanGC)
     71 #undef INSTANTIATE_ALLOC_NURSERY_CELL
     72 
     73 // Attempt to allocate a new cell in the nursery. If there is not enough room in
     74 // the nursery or there is an OOM, this method will return nullptr.
     75 template <AllowGC allowGC>
     76 /* static */
     77 MOZ_NEVER_INLINE void* CellAllocator::RetryNurseryAlloc(JSContext* cx,
     78                                                        JS::TraceKind traceKind,
     79                                                        AllocKind allocKind,
     80                                                        size_t thingSize,
     81                                                        AllocSite* site) {
     82  MOZ_ASSERT(cx->isNurseryAllocAllowed());
     83 
     84  Zone* zone = site->zone();
     85  MOZ_ASSERT(!zone->isAtomsZone());
     86  MOZ_ASSERT(zone->allocKindInNursery(traceKind));
     87 
     88  Nursery& nursery = cx->nursery();
     89  JS::GCReason reason = nursery.handleAllocationFailure();
     90  if (reason == JS::GCReason::NO_REASON) {
     91    void* ptr = nursery.tryAllocateCell(site, thingSize, traceKind);
     92    MOZ_ASSERT(ptr);
     93    return ptr;
     94  }
     95 
     96  // Our most common non-jit allocation path is NoGC; thus, if we fail the
     97  // alloc and cannot GC, we *must* return nullptr here so that the caller
     98  // will do a CanGC allocation to clear the nursery. Failing to do so will
     99  // cause all allocations on this path to land in Tenured, and we will not
    100  // get the benefit of the nursery.
    101  if constexpr (!allowGC) {
    102    return nullptr;
    103  }
    104 
    105  if (!cx->suppressGC) {
    106    cx->runtime()->gc.minorGC(reason);
    107 
    108    // Exceeding gcMaxBytes while tenuring can disable the Nursery.
    109    if (zone->allocKindInNursery(traceKind)) {
    110      void* ptr = cx->nursery().allocateCell(site, thingSize, traceKind);
    111      if (ptr) {
    112        return ptr;
    113      }
    114    }
    115  }
    116 
    117  // As a final fallback, allocate the cell in the tenured heap.
    118  return AllocTenuredCellForNurseryAlloc<allowGC>(cx, allocKind);
    119 }
    120 
    121 template void* CellAllocator::RetryNurseryAlloc<NoGC>(JSContext* cx,
    122                                                      JS::TraceKind traceKind,
    123                                                      AllocKind allocKind,
    124                                                      size_t thingSize,
    125                                                      AllocSite* site);
    126 template void* CellAllocator::RetryNurseryAlloc<CanGC>(JSContext* cx,
    127                                                       JS::TraceKind traceKind,
    128                                                       AllocKind allocKind,
    129                                                       size_t thingSize,
    130                                                       AllocSite* site);
    131 
    132 static inline void MajorGCIfRequested(JSContext* cx) {
    133  // Invoking the interrupt callback can fail and we can't usefully
    134  // handle that here. Just check in case we need to collect instead.
    135  if (cx->hasPendingInterrupt(InterruptReason::MajorGC)) {
    136    cx->runtime()->gc.gcIfRequested();
    137  }
    138 }
    139 
    140 template <AllowGC allowGC>
    141 MOZ_NEVER_INLINE void* gc::CellAllocator::AllocTenuredCellForNurseryAlloc(
    142    JSContext* cx, gc::AllocKind kind) {
    143  if constexpr (allowGC) {
    144    MajorGCIfRequested(cx);
    145  }
    146 
    147  return AllocTenuredCellUnchecked<allowGC>(cx->zone(), kind);
    148 }
    149 template void* gc::CellAllocator::AllocTenuredCellForNurseryAlloc<NoGC>(
    150    JSContext*, AllocKind);
    151 template void* gc::CellAllocator::AllocTenuredCellForNurseryAlloc<CanGC>(
    152    JSContext*, AllocKind);
    153 
    154 #ifdef DEBUG
    155 static bool IsAtomsZoneKind(AllocKind kind) {
    156  return kind == AllocKind::ATOM || kind == AllocKind::FAT_INLINE_ATOM ||
    157         kind == AllocKind::SYMBOL;
    158 }
    159 #endif
    160 
    161 template <AllowGC allowGC>
    162 void* gc::CellAllocator::AllocTenuredCell(JSContext* cx, gc::AllocKind kind) {
    163  MOZ_ASSERT(!IsNurseryAllocable(kind));
    164  MOZ_ASSERT_IF(cx->zone()->isAtomsZone(),
    165                IsAtomsZoneKind(kind) || kind == AllocKind::JITCODE);
    166  MOZ_ASSERT_IF(!cx->zone()->isAtomsZone(), !IsAtomsZoneKind(kind));
    167  MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
    168 
    169  if constexpr (allowGC) {
    170    PreAllocGCChecks(cx);
    171  }
    172 
    173  if (!CheckForSimulatedFailure(cx, allowGC)) {
    174    return nullptr;
    175  }
    176 
    177  if constexpr (allowGC) {
    178    MajorGCIfRequested(cx);
    179  }
    180 
    181  return AllocTenuredCellUnchecked<allowGC>(cx->zone(), kind);
    182 }
    183 template void* gc::CellAllocator::AllocTenuredCell<NoGC>(JSContext*, AllocKind);
    184 template void* gc::CellAllocator::AllocTenuredCell<CanGC>(JSContext*,
    185                                                          AllocKind);
    186 
    187 template <AllowGC allowGC>
    188 /* static */
    189 void* CellAllocator::AllocTenuredCellUnchecked(JS::Zone* zone, AllocKind kind) {
    190  // Bump allocate in the arena's current free-list span.
    191  void* ptr = zone->arenas.freeLists().allocate(kind);
    192  if (MOZ_UNLIKELY(!ptr)) {
    193    // Get the next available free list and allocate out of it. This may acquire
    194    // a new arena, which will lock the chunk list. If there are no chunks
    195    // available it may also allocate new memory directly.
    196    ptr = GCRuntime::refillFreeList(zone, kind);
    197 
    198    if (MOZ_UNLIKELY(!ptr)) {
    199      if constexpr (allowGC) {
    200        return RetryTenuredAlloc(zone, kind);
    201      }
    202 
    203      return nullptr;
    204    }
    205  }
    206 
    207 #ifdef DEBUG
    208  CheckIncrementalZoneState(zone, ptr);
    209 #endif
    210 
    211  gcprobes::TenuredAlloc(ptr, kind);
    212 
    213  // We count this regardless of the profiler's state, assuming that it costs
    214  // just as much to count it, as to check the profiler's state and decide not
    215  // to count it.
    216  zone->noteTenuredAlloc();
    217 
    218  return ptr;
    219 }
    220 template void* CellAllocator::AllocTenuredCellUnchecked<NoGC>(JS::Zone* zone,
    221                                                              AllocKind kind);
    222 template void* CellAllocator::AllocTenuredCellUnchecked<CanGC>(JS::Zone* zone,
    223                                                               AllocKind kind);
    224 /* static */
    225 MOZ_NEVER_INLINE void* CellAllocator::RetryTenuredAlloc(JS::Zone* zone,
    226                                                        AllocKind kind) {
    227  JSRuntime* runtime = zone->runtimeFromMainThread();
    228  runtime->gc.attemptLastDitchGC();
    229 
    230  void* ptr = AllocTenuredCellUnchecked<NoGC>(zone, kind);
    231  if (!ptr) {
    232    ReportOutOfMemory(runtime->mainContextFromOwnThread());
    233    return nullptr;
    234  }
    235 
    236  return ptr;
    237 }
    238 
    239 void GCRuntime::attemptLastDitchGC() {
    240  // Either there was no memory available for a new chunk or the heap hit its
    241  // size limit. Try to perform an all-compartments, non-incremental, shrinking
    242  // GC and wait for it to finish.
    243 
    244  if (!lastLastDitchTime.IsNull() &&
    245      TimeStamp::Now() - lastLastDitchTime <= tunables.minLastDitchGCPeriod()) {
    246    return;
    247  }
    248 
    249  JS::PrepareForFullGC(rt->mainContextFromOwnThread());
    250  gc(JS::GCOptions::Shrink, JS::GCReason::LAST_DITCH);
    251  waitBackgroundAllocEnd();
    252  waitBackgroundFreeEnd();
    253 
    254  lastLastDitchTime = mozilla::TimeStamp::Now();
    255 }
    256 
    257 #ifdef JS_GC_ZEAL
    258 
    259 /* static */
    260 AllocSite* CellAllocator::MaybeGenerateMissingAllocSite(JSContext* cx,
    261                                                        JS::TraceKind traceKind,
    262                                                        AllocSite* site) {
    263  MOZ_ASSERT(site);
    264 
    265  if (!cx->runtime()->gc.tunables.generateMissingAllocSites()) {
    266    return site;
    267  }
    268 
    269  if (!site->isUnknown()) {
    270    return site;
    271  }
    272 
    273  if (cx->inUnsafeCallWithABI) {
    274    return site;
    275  }
    276 
    277  FrameIter frame(cx);
    278  if (frame.done() || !frame.isBaseline()) {
    279    return site;
    280  }
    281 
    282  MOZ_ASSERT(site == cx->zone()->unknownAllocSite(traceKind));
    283  MOZ_ASSERT(frame.hasScript());
    284 
    285  JSScript* script = frame.script();
    286  if (cx->zone() != script->zone()) {
    287    return site;  // Skip cross-zone allocation.
    288  }
    289 
    290  uint32_t pcOffset = script->pcToOffset(frame.pc());
    291  if (!script->hasBaselineScript() || pcOffset > AllocSite::MaxValidPCOffset) {
    292    return site;
    293  }
    294 
    295  AllocSite* missingSite =
    296      GetOrCreateMissingAllocSite(cx, script, pcOffset, traceKind);
    297  if (!missingSite) {
    298    return site;
    299  }
    300 
    301  return missingSite;
    302 }
    303 
    304 #endif  // JS_GC_ZEAL
    305 
    306 #ifdef DEBUG
    307 /* static */
    308 void CellAllocator::CheckIncrementalZoneState(JS::Zone* zone, void* ptr) {
    309  MOZ_ASSERT(ptr);
    310  TenuredCell* cell = reinterpret_cast<TenuredCell*>(ptr);
    311  ArenaChunkBase* chunk = detail::GetCellChunkBase(cell);
    312  if (zone->isGCMarkingOrSweeping()) {
    313    MOZ_ASSERT(chunk->markBits.isMarkedBlack(cell));
    314  } else {
    315    MOZ_ASSERT(!chunk->markBits.isMarkedAny(cell));
    316  }
    317 }
    318 #endif
    319 
    320 void* js::gc::AllocateTenuredCellInGC(Zone* zone, AllocKind thingKind) {
    321  void* ptr = zone->arenas.allocateFromFreeList(thingKind);
    322  if (!ptr) {
    323    AutoEnterOOMUnsafeRegion oomUnsafe;
    324    ptr = GCRuntime::refillFreeListInGC(zone, thingKind);
    325    if (!ptr) {
    326      oomUnsafe.crash(ChunkSize, "Failed to allocate new chunk during GC");
    327    }
    328  }
    329  return ptr;
    330 }
    331 
    332 // ///////////  Arena -> Thing Allocator  //////////////////////////////////////
    333 
    334 void GCRuntime::startBackgroundAllocTaskIfIdle() {
    335  AutoLockHelperThreadState lock;
    336  if (!allocTask.wasStarted(lock)) {
    337    // Join the previous invocation of the task. This will return immediately
    338    // if the thread has never been started.
    339    allocTask.joinWithLockHeld(lock);
    340    allocTask.startWithLockHeld(lock);
    341  }
    342 }
    343 
    344 /* static */
    345 void* GCRuntime::refillFreeList(JS::Zone* zone, AllocKind thingKind) {
    346  MOZ_ASSERT(zone->arenas.freeLists().isEmpty(thingKind));
    347 
    348  // It should not be possible to allocate on the main thread while we are
    349  // inside a GC.
    350  MOZ_ASSERT(!JS::RuntimeHeapIsCollecting(), "allocating while under GC");
    351 
    352  return zone->arenas.refillFreeListAndAllocate(
    353      thingKind, ShouldCheckThresholds::CheckThresholds, StallAndRetry::No);
    354 }
    355 
    356 /* static */
    357 void* GCRuntime::refillFreeListInGC(Zone* zone, AllocKind thingKind) {
    358  // Called when tenuring nursery cells and during compacting GC.
    359  MOZ_ASSERT_IF(!JS::RuntimeHeapIsMinorCollecting(),
    360                !zone->runtimeFromMainThread()->gc.isBackgroundSweeping());
    361 
    362  // Since this needs to succeed we pass StallAndRetry::Yes.
    363  return zone->arenas.refillFreeListAndAllocate(
    364      thingKind, ShouldCheckThresholds::DontCheckThresholds,
    365      StallAndRetry::Yes);
    366 }
    367 
    368 void* ArenaLists::refillFreeListAndAllocate(
    369    AllocKind thingKind, ShouldCheckThresholds checkThresholds,
    370    StallAndRetry stallAndRetry) {
    371  MOZ_ASSERT(freeLists().isEmpty(thingKind));
    372 
    373  GCRuntime* gc = &runtimeFromAnyThread()->gc;
    374 
    375 retry_loop:
    376  Arena* arena = arenaList(thingKind).takeInitialNonFullArena();
    377  if (arena) {
    378    // Empty arenas should be immediately freed.
    379    MOZ_ASSERT(!arena->isEmpty());
    380    return freeLists().setArenaAndAllocate(arena, thingKind);
    381  }
    382 
    383  // If we have just finished background sweep then merge the swept arenas in
    384  // and retry.
    385  if (MOZ_UNLIKELY(concurrentUse(thingKind) ==
    386                   ConcurrentUse::BackgroundFinalizeFinished)) {
    387    ArenaList sweptArenas;
    388    {
    389      AutoLockGC lock(gc);
    390      sweptArenas = std::move(collectingArenaList(thingKind));
    391    }
    392    concurrentUse(thingKind) = ConcurrentUse::None;
    393    if (!sweptArenas.isEmpty()) {
    394      mergeSweptArenas(thingKind, sweptArenas);
    395      goto retry_loop;
    396    }
    397  }
    398 
    399  // Use the current chunk if set.
    400  ArenaChunk* chunk = gc->currentChunk_;
    401  MOZ_ASSERT_IF(chunk, gc->isCurrentChunk(chunk));
    402 
    403  if (!chunk) {
    404    // The chunk lists can be accessed by background sweeping and background
    405    // chunk allocation. Take the GC lock to synchronize access.
    406    AutoLockGCBgAlloc lock(gc);
    407 
    408    chunk = gc->pickChunk(stallAndRetry, lock);
    409    if (!chunk) {
    410      return nullptr;
    411    }
    412 
    413    gc->setCurrentChunk(chunk, lock);
    414  }
    415 
    416  MOZ_ASSERT(gc->isCurrentChunk(chunk));
    417 
    418  // Although our chunk should definitely have enough space for another arena,
    419  // there are other valid reasons why ArenaChunk::allocateArena() may fail.
    420  arena = gc->allocateArena(chunk, zone_, thingKind, checkThresholds);
    421  if (!arena) {
    422    return nullptr;
    423  }
    424 
    425  arena->init(gc, zone_, thingKind);
    426 
    427  ArenaList& al = arenaList(thingKind);
    428  MOZ_ASSERT(!al.hasNonFullArenas());
    429  al.pushBack(arena);
    430 
    431  return freeLists().setArenaAndAllocate(arena, thingKind);
    432 }
    433 
    434 inline void* FreeLists::setArenaAndAllocate(Arena* arena, AllocKind kind) {
    435 #ifdef DEBUG
    436  auto* old = freeLists_[kind];
    437  if (!old->isEmpty()) {
    438    old->getArena()->checkNoMarkedFreeCells();
    439  }
    440 #endif
    441 
    442  FreeSpan* span = arena->getFirstFreeSpan();
    443  freeLists_[kind] = span;
    444 
    445  Zone* zone = arena->zone();
    446  if (MOZ_UNLIKELY(zone->isGCMarkingOrSweeping())) {
    447    arena->arenaAllocatedDuringGC();
    448  }
    449 
    450  TenuredCell* thing = span->allocate(Arena::thingSize(kind));
    451  MOZ_ASSERT(thing);  // This allocation is infallible.
    452 
    453  return thing;
    454 }
    455 
    456 void Arena::arenaAllocatedDuringGC() {
    457  // Ensure that anything allocated during the mark or sweep phases of an
    458  // incremental GC will be marked black by pre-marking all free cells in the
    459  // arena we are about to allocate from.
    460 
    461  MOZ_ASSERT(zone()->isGCMarkingOrSweeping());
    462  for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
    463    MOZ_ASSERT(!cell->isMarkedAny());
    464    cell->markBlack();
    465  }
    466 }
    467 
    468 // ///////////  ArenaChunk -> Arena Allocator  /////////////////////////////////
    469 
    470 bool GCRuntime::wantBackgroundAllocation(const AutoLockGC& lock) const {
    471  // To minimize memory waste, we do not want to run the background chunk
    472  // allocation if we already have some empty chunks or when the runtime has
    473  // a small heap size (and therefore likely has a small growth rate).
    474  return allocTask.enabled() &&
    475         emptyChunks(lock).count() < minEmptyChunkCount(lock) &&
    476         (fullChunks(lock).count() + availableChunks(lock).count()) >= 4;
    477 }
    478 
    479 // Allocate a new arena but don't initialize it.
    480 Arena* GCRuntime::allocateArena(ArenaChunk* chunk, Zone* zone,
    481                                AllocKind thingKind,
    482                                ShouldCheckThresholds checkThresholds) {
    483  MOZ_ASSERT(chunk->hasAvailableArenas());
    484 
    485  // Fail the allocation if we are over our heap size limits.
    486  if ((checkThresholds != ShouldCheckThresholds::DontCheckThresholds) &&
    487      (heapSize.bytes() >= tunables.gcMaxBytes())) {
    488    return nullptr;
    489  }
    490 
    491  Arena* arena = chunk->allocateArena(this, zone, thingKind);
    492 
    493  zone->gcHeapSize.addGCArena(heapSize);
    494 
    495  // Trigger an incremental slice if needed.
    496  if (checkThresholds != ShouldCheckThresholds::DontCheckThresholds) {
    497    maybeTriggerGCAfterAlloc(zone);
    498  }
    499 
    500  return arena;
    501 }
    502 
    503 Arena* ArenaChunk::allocateArena(GCRuntime* gc, Zone* zone,
    504                                 AllocKind thingKind) {
    505  MOZ_ASSERT(info.isCurrentChunk);
    506  MOZ_ASSERT(hasAvailableArenas());
    507 
    508  if (info.numArenasFreeCommitted == 0) {
    509    commitOnePage(gc);
    510    MOZ_ASSERT(info.numArenasFreeCommitted == ArenasPerPage);
    511  }
    512 
    513  MOZ_ASSERT(info.numArenasFreeCommitted > 0);
    514  Arena* arena = fetchNextFreeArena(gc);
    515 
    516  updateCurrentChunkAfterAlloc(gc);
    517 
    518  return arena;
    519 }
    520 
    521 void ArenaChunk::commitOnePage(GCRuntime* gc) {
    522  MOZ_ASSERT(info.numArenasFreeCommitted == 0);
    523  MOZ_ASSERT(info.numArenasFree >= ArenasPerPage);
    524 
    525  uint32_t pageIndex = decommittedPages.FindFirst();
    526  MOZ_ASSERT(pageIndex < PagesPerChunk);
    527  MOZ_ASSERT(decommittedPages[pageIndex]);
    528 
    529  if (DecommitEnabled()) {
    530    MarkPagesInUseSoft(pageAddress(pageIndex), PageSize);
    531  }
    532 
    533  decommittedPages[pageIndex] = false;
    534 
    535  for (size_t i = 0; i < ArenasPerPage; i++) {
    536    size_t arenaIndex = pageToArenaIndex(pageIndex) + i;
    537    MOZ_ASSERT(!freeCommittedArenas[arenaIndex]);
    538    freeCommittedArenas[arenaIndex] = true;
    539    ++info.numArenasFreeCommitted;
    540  }
    541 
    542  verify();
    543 }
    544 
    545 Arena* ArenaChunk::fetchNextFreeArena(GCRuntime* gc) {
    546  MOZ_ASSERT(info.numArenasFreeCommitted > 0);
    547  MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
    548 
    549  size_t index = freeCommittedArenas.FindFirst();
    550  MOZ_ASSERT(index < ArenasPerChunk);
    551 
    552  MOZ_ASSERT(freeCommittedArenas[index]);
    553  freeCommittedArenas[index] = false;
    554 
    555  return &arenas[index];
    556 }
    557 
    558 // ///////////  System -> ArenaChunk Allocator  ////////////////////////////////
    559 
    560 ArenaChunk* GCRuntime::getOrAllocChunk(StallAndRetry stallAndRetry,
    561                                       AutoLockGCBgAlloc& lock) {
    562  ArenaChunk* chunk;
    563  if (!emptyChunks(lock).empty()) {
    564    chunk = emptyChunks(lock).head();
    565    // Reinitialize ChunkBase; arenas are all free and may or may not be
    566    // committed.
    567    SetMemCheckKind(chunk, sizeof(ChunkBase), MemCheckKind::MakeUndefined);
    568    chunk->initBaseForArenaChunk(rt);
    569    MOZ_ASSERT(chunk->isEmpty());
    570    emptyChunks(lock).remove(chunk);
    571  } else {
    572    void* ptr = ArenaChunk::allocate(this, stallAndRetry);
    573    if (!ptr) {
    574      return nullptr;
    575    }
    576 
    577    chunk = ArenaChunk::init(ptr, this, /* allMemoryCommitted = */ true);
    578  }
    579 
    580  if (wantBackgroundAllocation(lock)) {
    581    lock.tryToStartBackgroundAllocation();
    582  }
    583 
    584  MOZ_ASSERT(chunk);
    585  return chunk;
    586 }
    587 
    588 void GCRuntime::recycleChunk(ArenaChunk* chunk, const AutoLockGC& lock) {
    589 #ifdef DEBUG
    590  MOZ_ASSERT(chunk->isEmpty());
    591  MOZ_ASSERT(!chunk->info.isCurrentChunk);
    592  chunk->verify();
    593 #endif
    594 
    595  // Poison ChunkBase to catch use after free.
    596  AlwaysPoison(chunk, JS_FREED_CHUNK_PATTERN, sizeof(ChunkBase),
    597               MemCheckKind::MakeNoAccess);
    598 
    599  emptyChunks(lock).push(chunk);
    600 }
    601 
    602 ArenaChunk* GCRuntime::pickChunk(StallAndRetry stallAndRetry,
    603                                 AutoLockGCBgAlloc& lock) {
    604  if (availableChunks(lock).count()) {
    605    ArenaChunk* chunk = availableChunks(lock).head();
    606    availableChunks(lock).remove(chunk);
    607    return chunk;
    608  }
    609 
    610  ArenaChunk* chunk = getOrAllocChunk(stallAndRetry, lock);
    611  if (!chunk) {
    612    return nullptr;
    613  }
    614 
    615 #ifdef DEBUG
    616  chunk->verify();
    617  MOZ_ASSERT(chunk->isEmpty());
    618 #endif
    619 
    620  return chunk;
    621 }
    622 
    623 BackgroundAllocTask::BackgroundAllocTask(GCRuntime* gc, ChunkPool& pool)
    624    : GCParallelTask(gc, gcstats::PhaseKind::NONE),
    625      chunkPool_(pool),
    626      enabled_(CanUseExtraThreads() && GetCPUCount() >= 2) {
    627  // This can occur outside GCs so doesn't have a stats phase.
    628 }
    629 
    630 void BackgroundAllocTask::run(AutoLockHelperThreadState& lock) {
    631  AutoUnlockHelperThreadState unlock(lock);
    632 
    633  AutoLockGC gcLock(gc);
    634  while (!isCancelled() && gc->wantBackgroundAllocation(gcLock)) {
    635    ArenaChunk* chunk;
    636    {
    637      AutoUnlockGC unlock(gcLock);
    638      void* ptr = ArenaChunk::allocate(gc, StallAndRetry::No);
    639      if (!ptr) {
    640        break;
    641      }
    642      chunk = ArenaChunk::init(ptr, gc, /* allMemoryCommitted = */ true);
    643    }
    644    chunkPool_.ref().push(chunk);
    645  }
    646 }
    647 
    648 /* static */
    649 void* ArenaChunk::allocate(GCRuntime* gc, StallAndRetry stallAndRetry) {
    650  void* chunk = MapAlignedPages(ChunkSize, ChunkSize, stallAndRetry);
    651  if (!chunk) {
    652    return nullptr;
    653  }
    654 
    655  gc->stats().count(gcstats::COUNT_NEW_CHUNK);
    656  return chunk;
    657 }
    658 
    659 static inline bool ShouldDecommitNewChunk(bool allMemoryCommitted,
    660                                          const GCSchedulingState& state) {
    661  if (!DecommitEnabled()) {
    662    return false;
    663  }
    664 
    665  return !allMemoryCommitted || !state.inHighFrequencyGCMode();
    666 }
    667 
    668 ArenaChunk* ArenaChunk::init(void* ptr, GCRuntime* gc,
    669                             bool allMemoryCommitted) {
    670  /* The chunk may still have some regions marked as no-access. */
    671  MOZ_MAKE_MEM_UNDEFINED(ptr, ChunkSize);
    672 
    673  /*
    674   * Poison the chunk. Note that decommitAllArenas() below will mark the
    675   * arenas as inaccessible (for memory sanitizers).
    676   */
    677  Poison(ptr, JS_FRESH_TENURED_PATTERN, ChunkSize, MemCheckKind::MakeUndefined);
    678 
    679  ArenaChunk* chunk = new (mozilla::KnownNotNull, ptr) ArenaChunk(gc->rt);
    680 
    681  if (ShouldDecommitNewChunk(allMemoryCommitted, gc->schedulingState)) {
    682    // Decommit the arenas. We do this after poisoning so that if the OS does
    683    // not have to recycle the pages, we still get the benefit of poisoning.
    684    chunk->decommitAllArenas();
    685  } else {
    686    chunk->initAsCommitted();
    687  }
    688 
    689  MOZ_ASSERT(chunk->isEmpty());
    690  chunk->verify();
    691 
    692  return chunk;
    693 }
    694 
    695 void ArenaChunk::decommitAllArenas() {
    696  MOZ_ASSERT(isEmpty());
    697  MarkPagesUnusedSoft(&arenas[0], ArenasPerChunk * ArenaSize);
    698  initAsDecommitted();
    699 }
    700 
    701 void ArenaChunkBase::initAsDecommitted() {
    702  // Set the state of all arenas to free and decommitted. They might not
    703  // actually be decommitted, but in that case the re-commit operation is a
    704  // no-op so it doesn't matter.
    705  decommittedPages.SetAll();
    706  freeCommittedArenas.ResetAll();
    707  info.numArenasFree = ArenasPerChunk;
    708  info.numArenasFreeCommitted = 0;
    709 }
    710 
    711 void ArenaChunkBase::initAsCommitted() {
    712  // Set the state of all arenas to free and committed. They might have
    713  // been decommitted, but in that case the re-commit operation is a
    714  // no-op so it doesn't matter.
    715  decommittedPages.ResetAll();
    716  freeCommittedArenas.SetAll();
    717  info.numArenasFree = ArenasPerChunk;
    718  info.numArenasFreeCommitted = ArenasPerChunk;
    719 }