tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Heap.cpp (21658B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 /*
      8 * Tenured heap management.
      9 *
     10 * This file contains method definitions for the following classes for code that
     11 * is not specific to a particular phase of GC:
     12 *
     13 *  - Arena
     14 *  - ArenaList
     15 *  - FreeLists
     16 *  - ArenaLists
     17 *  - ArenaChunk
     18 *  - ChunkPool
     19 */
     20 
     21 #include "gc/Heap-inl.h"
     22 
     23 #include "gc/GCLock.h"
     24 #include "gc/Memory.h"
     25 #include "jit/Assembler.h"
     26 #include "threading/Thread.h"
     27 #include "vm/BigIntType.h"
     28 #include "vm/RegExpShared.h"
     29 #include "vm/Scope.h"
     30 
     31 #include "gc/ArenaList-inl.h"
     32 #include "gc/PrivateIterators-inl.h"
     33 
     34 using namespace js;
     35 using namespace js::gc;
     36 
     37 // Check that reserved bits of a Cell are compatible with our typical allocators
     38 // since most derived classes will store a pointer in the first word.
     39 static const size_t MinFirstWordAlignment = 1u << CellFlagBitsReservedForGC;
     40 static_assert(js::detail::LIFO_ALLOC_ALIGN >= MinFirstWordAlignment,
     41              "CellFlagBitsReservedForGC should support LifoAlloc");
     42 static_assert(CellAlignBytes >= MinFirstWordAlignment,
     43              "CellFlagBitsReservedForGC should support gc::Cell");
     44 static_assert(js::jit::CodeAlignment >= MinFirstWordAlignment,
     45              "CellFlagBitsReservedForGC should support JIT code");
     46 static_assert(js::gc::JSClassAlignBytes >= MinFirstWordAlignment,
     47              "CellFlagBitsReservedForGC should support JSClass pointers");
     48 static_assert(js::ScopeDataAlignBytes >= MinFirstWordAlignment,
     49              "CellFlagBitsReservedForGC should support scope data pointers");
     50 
     51 #define CHECK_THING_SIZE(allocKind, traceKind, type, sizedType, bgFinal,       \
     52                         nursery, compact)                                     \
     53  static_assert(sizeof(sizedType) >= sizeof(FreeSpan),                         \
     54                #sizedType " is smaller than FreeSpan");                       \
     55  static_assert(sizeof(sizedType) % CellAlignBytes == 0,                       \
     56                "Size of " #sizedType " is not a multiple of CellAlignBytes"); \
     57  static_assert(sizeof(sizedType) >= MinCellSize,                              \
     58                "Size of " #sizedType " is smaller than the minimum size");
     59 FOR_EACH_ALLOCKIND(CHECK_THING_SIZE);
     60 #undef CHECK_THING_SIZE
     61 
     62 FreeSpan FreeLists::emptySentinel;
     63 
     64 template <typename T>
     65 struct ArenaLayout {
     66  static constexpr size_t thingSize() { return sizeof(T); }
     67  static constexpr size_t thingsPerArena() {
     68    return (ArenaSize - ArenaHeaderSize) / thingSize();
     69  }
     70  static constexpr size_t firstThingOffset() {
     71    return ArenaSize - thingSize() * thingsPerArena();
     72  }
     73 };
     74 
     75 const uint8_t Arena::ThingSizes[] = {
     76 #define EXPAND_THING_SIZE(_1, _2, _3, sizedType, _4, _5, _6) \
     77  ArenaLayout<sizedType>::thingSize(),
     78    FOR_EACH_ALLOCKIND(EXPAND_THING_SIZE)
     79 #undef EXPAND_THING_SIZE
     80 };
     81 
     82 const uint8_t Arena::FirstThingOffsets[] = {
     83 #define EXPAND_FIRST_THING_OFFSET(_1, _2, _3, sizedType, _4, _5, _6) \
     84  ArenaLayout<sizedType>::firstThingOffset(),
     85    FOR_EACH_ALLOCKIND(EXPAND_FIRST_THING_OFFSET)
     86 #undef EXPAND_FIRST_THING_OFFSET
     87 };
     88 
     89 const uint8_t Arena::ThingsPerArena[] = {
     90 #define EXPAND_THINGS_PER_ARENA(_1, _2, _3, sizedType, _4, _5, _6) \
     91  ArenaLayout<sizedType>::thingsPerArena(),
     92    FOR_EACH_ALLOCKIND(EXPAND_THINGS_PER_ARENA)
     93 #undef EXPAND_THINGS_PER_ARENA
     94 };
     95 
     96 bool Arena::allocated() const {
     97 #if defined(DEBUG) && defined(MOZ_VALGRIND)
     98  // In debug builds, valgrind complains about the access to `allocKind` even
     99  // though it is legitimate, so temporarily disable reporting of addressing
    100  // errors in that range.  Note this doesn't change the state of the address
    101  // range, as tracked by valgrind, so subsequent checking against its state is
    102  // unaffected.  See bug 1932412.
    103  VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(&allocKind, sizeof(void*));
    104 #endif
    105 
    106  size_t arenaIndex = ArenaChunk::arenaIndex(this);
    107  size_t pageIndex = ArenaChunk::arenaToPageIndex(arenaIndex);
    108  bool result = !chunk()->decommittedPages[pageIndex] &&
    109                !chunk()->freeCommittedArenas[arenaIndex] &&
    110                IsValidAllocKind(allocKind);
    111  MOZ_ASSERT_IF(result, zone_);
    112  MOZ_ASSERT_IF(result, (uintptr_t(zone_) & 7) == 0);
    113 
    114 #if defined(DEBUG) && defined(MOZ_VALGRIND)
    115  // Reenable error reporting for the range we just said to ignore.
    116  VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(&allocKind, sizeof(void*));
    117 #endif
    118  return result;
    119 }
    120 
    121 void Arena::unmarkAll() {
    122  AtomicBitmapWord* arenaBits = chunk()->markBits.arenaBits(this);
    123  for (size_t i = 0; i < ArenaBitmapWords; i++) {
    124    arenaBits[i] = 0;
    125  }
    126 }
    127 
    128 void Arena::unmarkPreMarkedFreeCells() {
    129  for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
    130    MOZ_ASSERT(cell->isMarkedBlack());
    131    cell->unmark();
    132  }
    133 }
    134 
    135 #ifdef DEBUG
    136 
    137 void Arena::checkNoMarkedFreeCells() {
    138  for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
    139    MOZ_ASSERT(!cell->isMarkedAny());
    140  }
    141 }
    142 
    143 void Arena::checkAllCellsMarkedBlack() {
    144  for (ArenaCellIter cell(this); !cell.done(); cell.next()) {
    145    MOZ_ASSERT(cell->isMarkedBlack());
    146  }
    147 }
    148 
    149 #endif
    150 
    151 #if defined(DEBUG) || defined(JS_GC_ZEAL)
    152 void Arena::checkNoMarkedCells() {
    153  for (ArenaCellIter cell(this); !cell.done(); cell.next()) {
    154    MOZ_ASSERT(!cell->isMarkedAny());
    155  }
    156 }
    157 #endif
    158 
    159 /* static */
    160 void Arena::staticAsserts() {
    161  static_assert(size_t(AllocKind::LIMIT) <= 255,
    162                "All AllocKinds and AllocKind::LIMIT must fit in a uint8_t.");
    163  static_assert(std::size(ThingSizes) == AllocKindCount,
    164                "We haven't defined all thing sizes.");
    165  static_assert(std::size(FirstThingOffsets) == AllocKindCount,
    166                "We haven't defined all offsets.");
    167  static_assert(std::size(ThingsPerArena) == AllocKindCount,
    168                "We haven't defined all counts.");
    169  static_assert(ArenaZoneOffset == offsetof(Arena, zone_),
    170                "The hardcoded API zone offset must match the actual offset.");
    171  static_assert(sizeof(Arena) == ArenaSize,
    172                "ArenaSize must match the actual size of the Arena structure.");
    173  static_assert(
    174      offsetof(Arena, data) == ArenaHeaderSize,
    175      "ArenaHeaderSize must match the actual size of the header fields.");
    176 }
    177 
    178 /* static */
    179 void Arena::checkLookupTables() {
    180 #ifdef DEBUG
    181  for (size_t i = 0; i < AllocKindCount; i++) {
    182    MOZ_ASSERT(
    183        FirstThingOffsets[i] + ThingsPerArena[i] * ThingSizes[i] == ArenaSize,
    184        "Inconsistent arena lookup table data");
    185  }
    186 #endif
    187 }
    188 
    189 #ifdef DEBUG
    190 void js::gc::ArenaList::dump() {
    191  fprintf(stderr, "ArenaList %p:\n", this);
    192  for (auto arena = iter(); !arena.done(); arena.next()) {
    193    fprintf(stderr, "  %p %zu", arena.get(), arena->countFreeCells());
    194    if (arena->isEmpty()) {
    195      fprintf(stderr, " (empty)");
    196    }
    197    if (arena->isFull()) {
    198      fprintf(stderr, " (full)");
    199    }
    200    fprintf(stderr, "\n");
    201  }
    202 }
    203 #endif
    204 
    205 AutoGatherSweptArenas::AutoGatherSweptArenas(JS::Zone* zone, AllocKind kind) {
    206  GCRuntime& gc = zone->runtimeFromMainThread()->gc;
    207  sortedList = gc.maybeGetForegroundFinalizedArenas(zone, kind);
    208  if (!sortedList) {
    209    return;
    210  }
    211 
    212  // Link individual sorted arena lists together for iteration, saving the
    213  // internal state so we can restore it later.
    214  linked = sortedList->convertToArenaList(bucketLastPointers);
    215 }
    216 
    217 AutoGatherSweptArenas::~AutoGatherSweptArenas() {
    218  if (!sortedList) {
    219    MOZ_ASSERT(linked.isEmpty());
    220    return;
    221  }
    222 
    223  sortedList->restoreFromArenaList(linked, bucketLastPointers);
    224 }
    225 
    226 FreeLists::FreeLists() {
    227  for (auto i : AllAllocKinds()) {
    228    freeLists_[i] = &emptySentinel;
    229  }
    230 }
    231 
    232 ArenaLists::ArenaLists(Zone* zone)
    233    : zone_(zone),
    234      gcCompactPropMapArenasToUpdate(nullptr),
    235      gcNormalPropMapArenasToUpdate(nullptr),
    236      savedEmptyArenas(nullptr) {
    237  for (auto i : AllAllocKinds()) {
    238    concurrentUse(i) = ConcurrentUse::None;
    239  }
    240 }
    241 
    242 ArenaLists::~ArenaLists() {
    243  AutoLockGC lock(runtime());
    244 
    245  for (auto i : AllAllocKinds()) {
    246    /*
    247     * We can only call this during the shutdown after the last GC when
    248     * the background finalization is disabled.
    249     */
    250    MOZ_ASSERT(concurrentUse(i) == ConcurrentUse::None);
    251    runtime()->gc.releaseArenaList(arenaList(i), lock);
    252  }
    253 
    254  runtime()->gc.releaseArenas(savedEmptyArenas, lock);
    255 }
    256 
    257 void ArenaLists::moveArenasToCollectingLists() {
    258  checkEmptyFreeLists();
    259  for (AllocKind kind : AllAllocKinds()) {
    260    MOZ_ASSERT(collectingArenaList(kind).isEmpty());
    261    collectingArenaList(kind) = std::move(arenaList(kind));
    262    MOZ_ASSERT(arenaList(kind).isEmpty());
    263  }
    264 }
    265 
    266 void ArenaLists::mergeArenasFromCollectingLists() {
    267  for (AllocKind kind : AllAllocKinds()) {
    268    arenaList(kind).prepend(std::move(collectingArenaList(kind)));
    269    MOZ_ASSERT(collectingArenaList(kind).isEmpty());
    270  }
    271 }
    272 
    273 Arena* ArenaLists::takeSweptEmptyArenas() {
    274  Arena* arenas = savedEmptyArenas;
    275  savedEmptyArenas = nullptr;
    276  return arenas;
    277 }
    278 
    279 void ArenaLists::checkGCStateNotInUse() {
    280  // Called before and after collection to check the state is as expected.
    281 #ifdef DEBUG
    282  checkSweepStateNotInUse();
    283  for (auto i : AllAllocKinds()) {
    284    MOZ_ASSERT(collectingArenaList(i).isEmpty());
    285  }
    286 #endif
    287 }
    288 
    289 void ArenaLists::checkSweepStateNotInUse() {
    290 #ifdef DEBUG
    291  checkNoArenasToUpdate();
    292  MOZ_ASSERT(!savedEmptyArenas);
    293  for (auto i : AllAllocKinds()) {
    294    MOZ_ASSERT(concurrentUse(i) == ConcurrentUse::None);
    295  }
    296 #endif
    297 }
    298 
    299 void ArenaLists::checkNoArenasToUpdate() {
    300  MOZ_ASSERT(!gcCompactPropMapArenasToUpdate);
    301  MOZ_ASSERT(!gcNormalPropMapArenasToUpdate);
    302 }
    303 
    304 void ArenaLists::checkNoArenasToUpdateForKind(AllocKind kind) {
    305 #ifdef DEBUG
    306  switch (kind) {
    307    case AllocKind::COMPACT_PROP_MAP:
    308      MOZ_ASSERT(!gcCompactPropMapArenasToUpdate);
    309      break;
    310    case AllocKind::NORMAL_PROP_MAP:
    311      MOZ_ASSERT(!gcNormalPropMapArenasToUpdate);
    312      break;
    313    default:
    314      break;
    315  }
    316 #endif
    317 }
    318 
    319 inline bool ArenaChunk::canDecommitPage(size_t pageIndex) const {
    320  if (decommittedPages[pageIndex]) {
    321    return false;
    322  }
    323 
    324  size_t arenaIndex = pageToArenaIndex(pageIndex);
    325  for (size_t i = 0; i < ArenasPerPage; i++) {
    326    if (!freeCommittedArenas[arenaIndex + i]) {
    327      return false;
    328    }
    329  }
    330 
    331  return true;
    332 }
    333 
    334 void ArenaChunk::decommitFreeArenas(GCRuntime* gc, const bool& cancel,
    335                                    AutoLockGC& lock) {
    336  MOZ_ASSERT(DecommitEnabled());
    337  MOZ_ASSERT(!info.isCurrentChunk);
    338 
    339  for (size_t i = 0; i < PagesPerChunk; i++) {
    340    if (cancel) {
    341      break;
    342    }
    343 
    344    if (!canDecommitPage(i)) {
    345      continue;
    346    }
    347 
    348    if (!decommitOneFreePage(gc, i, lock)) {
    349      break;
    350    }
    351 
    352    {
    353      // Give main thread a chance to take the lock.
    354      AutoUnlockGC unlock(lock);
    355      ThisThread::SleepMilliseconds(0);
    356    }
    357 
    358    // Re-check whether the chunk is being used for allocation after releasing
    359    // the lock.
    360    if (info.isCurrentChunk) {
    361      break;
    362    }
    363  }
    364 }
    365 
    366 void ArenaChunk::releaseArena(GCRuntime* gc, Arena* arena,
    367                              const AutoLockGC& lock) {
    368  if (info.isCurrentChunk) {
    369    // The main thread is allocating out of this chunk without holding the
    370    // lock. Don't touch any data structures it is using but add the arena to a
    371    // pending set. This will be merged back by mergePendingFreeArenas.
    372    auto& bitmap = gc->pendingFreeCommittedArenas.ref();
    373    MOZ_ASSERT(!bitmap[arenaIndex(arena)]);
    374    bitmap[arenaIndex(arena)] = true;
    375    return;
    376  }
    377 
    378  MOZ_ASSERT(!arena->allocated());
    379  MOZ_ASSERT(!freeCommittedArenas[arenaIndex(arena)]);
    380 
    381  freeCommittedArenas[arenaIndex(arena)] = true;
    382  updateFreeCountsAfterFree(gc, 1, true, lock);
    383 
    384  verify();
    385 }
    386 
    387 bool ArenaChunk::decommitOneFreePage(GCRuntime* gc, size_t pageIndex,
    388                                     const AutoLockGC& lock) {
    389  MOZ_ASSERT(DecommitEnabled());
    390  MOZ_ASSERT(canDecommitPage(pageIndex));
    391  MOZ_ASSERT(info.numArenasFree >= info.numArenasFreeCommitted);
    392  MOZ_ASSERT(info.numArenasFreeCommitted >= ArenasPerPage);
    393 
    394  if (oom::ShouldFailWithOOM()) {
    395    return false;
    396  }
    397 
    398  if (!MarkPagesUnusedSoft(pageAddress(pageIndex), PageSize)) {
    399    return false;
    400  }
    401 
    402  // Mark the page as decommited.
    403  decommittedPages[pageIndex] = true;
    404  for (size_t i = 0; i < ArenasPerPage; i++) {
    405    size_t arenaIndex = pageToArenaIndex(pageIndex) + i;
    406    MOZ_ASSERT(freeCommittedArenas[arenaIndex]);
    407    freeCommittedArenas[arenaIndex] = false;
    408  }
    409  info.numArenasFreeCommitted -= ArenasPerPage;
    410 
    411  verify();
    412 
    413  return true;
    414 }
    415 
    416 void ArenaChunk::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) {
    417  MOZ_ASSERT(DecommitEnabled());
    418 
    419  for (size_t i = 0; i < PagesPerChunk; i++) {
    420    if (!canDecommitPage(i)) {
    421      continue;
    422    }
    423 
    424    MOZ_ASSERT(!decommittedPages[i]);
    425    MOZ_ASSERT(info.numArenasFreeCommitted >= ArenasPerPage);
    426 
    427    if (js::oom::ShouldFailWithOOM() ||
    428        !MarkPagesUnusedSoft(pageAddress(i), SystemPageSize())) {
    429      break;
    430    }
    431 
    432    decommittedPages[i] = true;
    433    for (size_t j = 0; j < ArenasPerPage; ++j) {
    434      size_t arenaIndex = pageToArenaIndex(i) + j;
    435      MOZ_ASSERT(freeCommittedArenas[arenaIndex]);
    436      freeCommittedArenas[arenaIndex] = false;
    437    }
    438    info.numArenasFreeCommitted -= ArenasPerPage;
    439  }
    440 
    441  verify();
    442 }
    443 
    444 void ArenaChunk::updateFreeCountsAfterAlloc(GCRuntime* gc,
    445                                            size_t numArenasAlloced,
    446                                            const AutoLockGC& lock) {
    447  MOZ_ASSERT(!info.isCurrentChunk);
    448  MOZ_ASSERT(numArenasAlloced > 0);
    449 
    450  bool wasEmpty = isEmpty();
    451 
    452  MOZ_ASSERT(info.numArenasFree >= numArenasAlloced);
    453  MOZ_ASSERT(info.numArenasFreeCommitted >= numArenasAlloced);
    454  info.numArenasFreeCommitted -= numArenasAlloced;
    455  info.numArenasFree -= numArenasAlloced;
    456 
    457  if (MOZ_UNLIKELY(wasEmpty)) {
    458    gc->emptyChunks(lock).remove(this);
    459    gc->availableChunks(lock).push(this);
    460    return;
    461  }
    462 
    463  if (MOZ_UNLIKELY(isFull())) {
    464    gc->availableChunks(lock).remove(this);
    465    gc->fullChunks(lock).push(this);
    466    return;
    467  }
    468 
    469  MOZ_ASSERT(gc->availableChunks(lock).contains(this));
    470 }
    471 
    472 void ArenaChunk::updateCurrentChunkAfterAlloc(GCRuntime* gc) {
    473  MOZ_ASSERT(info.isCurrentChunk);  // Can access without holding lock.
    474  MOZ_ASSERT(gc->isCurrentChunk(this));
    475 
    476  MOZ_ASSERT(info.numArenasFree >= 1);
    477  MOZ_ASSERT(info.numArenasFreeCommitted >= 1);
    478  info.numArenasFreeCommitted--;
    479  info.numArenasFree--;
    480 
    481  verify();
    482 
    483  if (MOZ_UNLIKELY(isFull())) {
    484    AutoLockGC lock(gc);
    485    mergePendingFreeArenas(gc, lock);
    486    if (isFull()) {
    487      gc->clearCurrentChunk(lock);
    488    }
    489  }
    490 }
    491 
    492 void ArenaChunk::updateFreeCountsAfterFree(GCRuntime* gc, size_t numArenasFreed,
    493                                           bool wasCommitted,
    494                                           const AutoLockGC& lock) {
    495  MOZ_ASSERT(!info.isCurrentChunk);
    496  MOZ_ASSERT(numArenasFreed > 0);
    497  MOZ_ASSERT(info.numArenasFree + numArenasFreed <= ArenasPerChunk);
    498  MOZ_ASSERT(info.numArenasFreeCommitted + numArenasFreed <= ArenasPerChunk);
    499 
    500  bool wasFull = isFull();
    501 
    502  info.numArenasFree += numArenasFreed;
    503  if (wasCommitted) {
    504    info.numArenasFreeCommitted += numArenasFreed;
    505  }
    506 
    507  if (MOZ_UNLIKELY(wasFull)) {
    508    gc->fullChunks(lock).remove(this);
    509    gc->availableChunks(lock).push(this);
    510    return;
    511  }
    512 
    513  if (MOZ_UNLIKELY(isEmpty())) {
    514    gc->availableChunks(lock).remove(this);
    515    gc->recycleChunk(this, lock);
    516    return;
    517  }
    518 
    519  MOZ_ASSERT(gc->availableChunks(lock).contains(this));
    520 }
    521 
    522 void GCRuntime::setCurrentChunk(ArenaChunk* chunk, const AutoLockGC& lock) {
    523  MOZ_ASSERT(!currentChunk_);
    524  MOZ_ASSERT(pendingFreeCommittedArenas.ref().IsEmpty());
    525  MOZ_ASSERT(chunk);
    526  MOZ_ASSERT(!chunk->info.isCurrentChunk);
    527 
    528  currentChunk_ = chunk;
    529  chunk->info.isCurrentChunk = true;  // Lock needed here.
    530 }
    531 
    532 void GCRuntime::clearCurrentChunk(const AutoLockGC& lock) {
    533  MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
    534 
    535  ArenaChunk* chunk = currentChunk_;
    536  if (!chunk) {
    537    return;
    538  }
    539 
    540  chunk->mergePendingFreeArenas(this, lock);
    541 
    542  MOZ_ASSERT(chunk->info.isCurrentChunk);
    543  chunk->info.isCurrentChunk = false;  // Lock needed here.
    544  currentChunk_ = nullptr;
    545 
    546  if (chunk->isFull()) {
    547    fullChunks(lock).push(chunk);
    548    return;
    549  }
    550 
    551  if (chunk->isEmpty()) {
    552    emptyChunks(lock).push(chunk);
    553    return;
    554  }
    555 
    556  MOZ_ASSERT(chunk->hasAvailableArenas());
    557  availableChunks(lock).push(chunk);
    558 }
    559 
    560 void ArenaChunk::mergePendingFreeArenas(GCRuntime* gc, const AutoLockGC& lock) {
    561  MOZ_ASSERT(info.isCurrentChunk);
    562 
    563  auto& bitmap = gc->pendingFreeCommittedArenas.ref();
    564  if (bitmap.IsEmpty()) {
    565    return;
    566  }
    567 
    568  MOZ_ASSERT((freeCommittedArenas & bitmap).IsEmpty());
    569  size_t count = bitmap.Count();
    570  freeCommittedArenas |= bitmap;
    571  bitmap.ResetAll();
    572 
    573  info.numArenasFree += count;
    574  info.numArenasFreeCommitted += count;
    575 
    576  verify();
    577 }
    578 
    579 ArenaChunk* ChunkPool::pop() {
    580  MOZ_ASSERT(bool(head_) == bool(count_));
    581  if (!count_) {
    582    return nullptr;
    583  }
    584  return remove(head_);
    585 }
    586 
    587 void ChunkPool::push(ArenaChunk* chunk) {
    588  MOZ_ASSERT(!chunk->info.next);
    589  MOZ_ASSERT(!chunk->info.prev);
    590 
    591  chunk->info.next = head_;
    592  if (head_) {
    593    head_->info.prev = chunk;
    594  }
    595  head_ = chunk;
    596  ++count_;
    597 }
    598 
    599 ArenaChunk* ChunkPool::remove(ArenaChunk* chunk) {
    600  MOZ_ASSERT(count_ > 0);
    601  MOZ_ASSERT(contains(chunk));
    602 
    603  if (head_ == chunk) {
    604    head_ = chunk->info.next;
    605  }
    606  if (chunk->info.prev) {
    607    chunk->info.prev->info.next = chunk->info.next;
    608  }
    609  if (chunk->info.next) {
    610    chunk->info.next->info.prev = chunk->info.prev;
    611  }
    612  chunk->info.next = chunk->info.prev = nullptr;
    613  --count_;
    614 
    615  return chunk;
    616 }
    617 
    618 // We could keep the chunk pool sorted, but that's likely to be more expensive.
    619 // This sort is nlogn, but keeping it sorted is likely to be m*n, with m being
    620 // the number of operations (likely higher than n).
    621 void ChunkPool::sort() {
    622  // Only sort if the list isn't already sorted.
    623  if (!isSorted()) {
    624    head_ = mergeSort(head(), count());
    625 
    626    // Fixup prev pointers.
    627    ArenaChunk* prev = nullptr;
    628    for (ArenaChunk* cur = head_; cur; cur = cur->info.next) {
    629      cur->info.prev = prev;
    630      prev = cur;
    631    }
    632  }
    633 
    634  MOZ_ASSERT(verify());
    635  MOZ_ASSERT(isSorted());
    636 }
    637 
    638 ArenaChunk* ChunkPool::mergeSort(ArenaChunk* list, size_t count) {
    639  MOZ_ASSERT(bool(list) == bool(count));
    640 
    641  if (count < 2) {
    642    return list;
    643  }
    644 
    645  size_t half = count / 2;
    646 
    647  // Split;
    648  ArenaChunk* front = list;
    649  ArenaChunk* back;
    650  {
    651    ArenaChunk* cur = list;
    652    for (size_t i = 0; i < half - 1; i++) {
    653      MOZ_ASSERT(cur);
    654      cur = cur->info.next;
    655    }
    656    back = cur->info.next;
    657    cur->info.next = nullptr;
    658  }
    659 
    660  front = mergeSort(front, half);
    661  back = mergeSort(back, count - half);
    662 
    663  // Merge
    664  list = nullptr;
    665  ArenaChunk** cur = &list;
    666  while (front || back) {
    667    if (!front) {
    668      *cur = back;
    669      break;
    670    }
    671    if (!back) {
    672      *cur = front;
    673      break;
    674    }
    675 
    676    // Note that the sort is stable due to the <= here. Nothing depends on
    677    // this but it could.
    678    if (front->info.numArenasFree <= back->info.numArenasFree) {
    679      *cur = front;
    680      front = front->info.next;
    681      cur = &(*cur)->info.next;
    682    } else {
    683      *cur = back;
    684      back = back->info.next;
    685      cur = &(*cur)->info.next;
    686    }
    687  }
    688 
    689  return list;
    690 }
    691 
    692 bool ChunkPool::isSorted() const {
    693  uint32_t last = 1;
    694  for (ArenaChunk* cursor = head_; cursor; cursor = cursor->info.next) {
    695    if (cursor->info.numArenasFree < last) {
    696      return false;
    697    }
    698    last = cursor->info.numArenasFree;
    699  }
    700  return true;
    701 }
    702 
    703 #ifdef DEBUG
    704 
    705 bool ChunkPool::contains(ArenaChunk* chunk) const {
    706  verify();
    707  for (ArenaChunk* cursor = head_; cursor; cursor = cursor->info.next) {
    708    if (cursor == chunk) {
    709      return true;
    710    }
    711  }
    712  return false;
    713 }
    714 
    715 bool ChunkPool::verify() const {
    716  MOZ_ASSERT(bool(head_) == bool(count_));
    717  uint32_t count = 0;
    718  for (ArenaChunk* cursor = head_; cursor;
    719       cursor = cursor->info.next, ++count) {
    720    MOZ_ASSERT_IF(cursor->info.prev, cursor->info.prev->info.next == cursor);
    721    MOZ_ASSERT_IF(cursor->info.next, cursor->info.next->info.prev == cursor);
    722  }
    723  MOZ_ASSERT(count_ == count);
    724  return true;
    725 }
    726 
    727 void ChunkPool::verifyChunks() const {
    728  for (ArenaChunk* chunk = head_; chunk; chunk = chunk->info.next) {
    729    chunk->verify();
    730    MOZ_ASSERT(!chunk->info.isCurrentChunk);
    731  }
    732 }
    733 
    734 void ArenaChunk::verify() const {
    735  // Check the mark bits for each arena are aligned to the cache line size.
    736  static_assert((offsetof(ArenaChunk, arenas) % ArenaSize) == 0);
    737  constexpr size_t CellBytesPerMarkByte = CellBytesPerMarkBit * 8;
    738  static_assert((ArenaSize % CellBytesPerMarkByte) == 0);
    739  constexpr size_t MarkBytesPerArena = ArenaSize / CellBytesPerMarkByte;
    740  static_assert((MarkBytesPerArena % TypicalCacheLineSize) == 0);
    741  static_assert((offsetof(ArenaChunk, markBits) % TypicalCacheLineSize) == 0);
    742 
    743  MOZ_ASSERT(info.numArenasFree <= ArenasPerChunk);
    744  MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
    745 
    746  size_t decommittedCount = decommittedPages.Count() * ArenasPerPage;
    747  size_t freeCommittedCount = freeCommittedArenas.Count();
    748  size_t freeCount = freeCommittedCount + decommittedCount;
    749 
    750  MOZ_ASSERT(freeCount == info.numArenasFree);
    751  MOZ_ASSERT(freeCommittedCount == info.numArenasFreeCommitted);
    752 
    753  for (size_t i = 0; i < ArenasPerChunk; ++i) {
    754    MOZ_ASSERT(
    755        !(decommittedPages[arenaToPageIndex(i)] && freeCommittedArenas[i]));
    756  }
    757 }
    758 
    759 #endif
    760 
    761 void ChunkPool::Iter::next() {
    762  MOZ_ASSERT(!done());
    763  current_ = current_->info.next;
    764 }