tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

BufferAllocator.cpp (114224B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "gc/BufferAllocator-inl.h"
      8 
      9 #include "mozilla/ScopeExit.h"
     10 
     11 #ifdef XP_DARWIN
     12 #  include <mach/mach_init.h>
     13 #  include <mach/vm_map.h>
     14 #endif
     15 
     16 #include "gc/BufferAllocatorInternals.h"
     17 #include "gc/GCInternals.h"
     18 #include "gc/GCLock.h"
     19 #include "gc/PublicIterators.h"
     20 #include "gc/Zone.h"
     21 #include "js/HeapAPI.h"
     22 #include "util/Poison.h"
     23 
     24 #include "gc/Heap-inl.h"
     25 #include "gc/Marking-inl.h"
     26 
     27 using namespace js;
     28 using namespace js::gc;
     29 
     30 namespace js::gc {
     31 
     32 BufferAllocator::AutoLock::AutoLock(GCRuntime* gc)
     33    : LockGuard(gc->bufferAllocatorLock) {}
     34 
     35 BufferAllocator::AutoLock::AutoLock(BufferAllocator* allocator)
     36    : LockGuard(allocator->lock()) {}
     37 
     38 static void CheckHighBitsOfPointer(void* ptr) {
     39 #ifdef JS_64BIT
     40  // We require bit 48 and higher be clear.
     41  MOZ_DIAGNOSTIC_ASSERT((uintptr_t(ptr) >> 47) == 0);
     42 #endif
     43 }
     44 
     45 BufferAllocator::FreeLists::FreeLists(FreeLists&& other) {
     46  MOZ_ASSERT(this != &other);
     47  assertEmpty();
     48  std::swap(lists, other.lists);
     49  std::swap(available, other.available);
     50  other.assertEmpty();
     51 }
     52 
     53 BufferAllocator::FreeLists& BufferAllocator::FreeLists::operator=(
     54    FreeLists&& other) {
     55  MOZ_ASSERT(this != &other);
     56  assertEmpty();
     57  std::swap(lists, other.lists);
     58  std::swap(available, other.available);
     59  other.assertEmpty();
     60  return *this;
     61 }
     62 
     63 BufferAllocator::FreeLists::FreeListIter
     64 BufferAllocator::FreeLists::freeListIter() {
     65  return FreeListIter(*this);
     66 }
     67 
     68 BufferAllocator::FreeLists::FreeRegionIter
     69 BufferAllocator::FreeLists::freeRegionIter() {
     70  return FreeRegionIter(*this);
     71 }
     72 
     73 bool BufferAllocator::FreeLists::hasSizeClass(size_t sizeClass) const {
     74  MOZ_ASSERT(sizeClass <= MaxMediumAllocClass);
     75  return available[sizeClass];
     76 }
     77 
     78 size_t BufferAllocator::FreeLists::getFirstAvailableSizeClass(
     79    size_t minSizeClass, size_t maxSizeClass) const {
     80  MOZ_ASSERT(maxSizeClass <= MaxMediumAllocClass);
     81 
     82  size_t result = available.FindNext(minSizeClass);
     83  MOZ_ASSERT(result >= minSizeClass);
     84  MOZ_ASSERT_IF(result != SIZE_MAX, !lists[result].isEmpty());
     85 
     86  if (result > maxSizeClass) {
     87    return SIZE_MAX;
     88  }
     89 
     90  return result;
     91 }
     92 
     93 size_t BufferAllocator::FreeLists::getLastAvailableSizeClass(
     94    size_t minSizeClass, size_t maxSizeClass) const {
     95  MOZ_ASSERT(maxSizeClass <= MaxMediumAllocClass);
     96 
     97  size_t result = available.FindPrev(maxSizeClass);
     98  MOZ_ASSERT(result <= maxSizeClass || result == SIZE_MAX);
     99  MOZ_ASSERT_IF(result != SIZE_MAX, !lists[result].isEmpty());
    100 
    101  if (result < minSizeClass) {
    102    return SIZE_MAX;
    103  }
    104 
    105  return result;
    106 }
    107 
    108 BufferAllocator::FreeRegion* BufferAllocator::FreeLists::getFirstRegion(
    109    size_t sizeClass) {
    110  MOZ_ASSERT(!lists[sizeClass].isEmpty());
    111  return lists[sizeClass].getFirst();
    112 }
    113 
    114 void BufferAllocator::FreeLists::pushFront(size_t sizeClass,
    115                                           FreeRegion* region) {
    116  MOZ_ASSERT(sizeClass < AllocSizeClasses);
    117  lists[sizeClass].pushFront(region);
    118  available[sizeClass] = true;
    119 }
    120 
    121 void BufferAllocator::FreeLists::pushBack(size_t sizeClass,
    122                                          FreeRegion* region) {
    123  MOZ_ASSERT(sizeClass < AllocSizeClasses);
    124  lists[sizeClass].pushBack(region);
    125  available[sizeClass] = true;
    126 }
    127 
    128 void BufferAllocator::FreeLists::append(FreeLists&& other) {
    129  for (size_t i = 0; i < AllocSizeClasses; i++) {
    130    if (!other.lists[i].isEmpty()) {
    131      lists[i].append(std::move(other.lists[i]));
    132      available[i] = true;
    133    }
    134  }
    135  other.available.ResetAll();
    136  other.assertEmpty();
    137 }
    138 
    139 void BufferAllocator::FreeLists::prepend(FreeLists&& other) {
    140  for (size_t i = 0; i < AllocSizeClasses; i++) {
    141    if (!other.lists[i].isEmpty()) {
    142      lists[i].prepend(std::move(other.lists[i]));
    143      available[i] = true;
    144    }
    145  }
    146  other.available.ResetAll();
    147  other.assertEmpty();
    148 }
    149 
    150 void BufferAllocator::FreeLists::remove(size_t sizeClass, FreeRegion* region) {
    151  MOZ_ASSERT(sizeClass < AllocSizeClasses);
    152  lists[sizeClass].remove(region);
    153  available[sizeClass] = !lists[sizeClass].isEmpty();
    154 }
    155 
    156 void BufferAllocator::FreeLists::clear() {
    157  for (auto freeList = freeListIter(); !freeList.done(); freeList.next()) {
    158    new (&freeList.get()) FreeList;  // clear() is less efficient.
    159  }
    160  available.ResetAll();
    161 }
    162 
    163 template <typename Func>
    164 void BufferAllocator::FreeLists::forEachRegion(Func&& func) {
    165  for (size_t i = 0; i <= MaxMediumAllocClass; i++) {
    166    FreeList& freeList = lists[i];
    167    FreeRegion* region = freeList.getFirst();
    168    while (region) {
    169      FreeRegion* next = region->getNext();
    170      func(freeList, i, region);
    171      region = next;
    172    }
    173    available[i] = !freeList.isEmpty();
    174  }
    175 }
    176 
    177 inline void BufferAllocator::FreeLists::assertEmpty() const {
    178 #ifdef DEBUG
    179  for (size_t i = 0; i < AllocSizeClasses; i++) {
    180    MOZ_ASSERT(lists[i].isEmpty());
    181  }
    182  MOZ_ASSERT(available.IsEmpty());
    183 #endif
    184 }
    185 
    186 inline void BufferAllocator::FreeLists::assertContains(
    187    size_t sizeClass, FreeRegion* region) const {
    188 #ifdef DEBUG
    189  MOZ_ASSERT(available[sizeClass]);
    190  MOZ_ASSERT(lists[sizeClass].contains(region));
    191 #endif
    192 }
    193 
    194 inline void BufferAllocator::FreeLists::checkAvailable() const {
    195 #ifdef DEBUG
    196  for (size_t i = 0; i < AllocSizeClasses; i++) {
    197    MOZ_ASSERT(available[i] == !lists[i].isEmpty());
    198  }
    199 #endif
    200 }
    201 
    202 BufferAllocator::ChunkLists::ChunkListIter
    203 BufferAllocator::ChunkLists::chunkListIter() {
    204  return ChunkListIter(*this);
    205 }
    206 
    207 BufferAllocator::ChunkLists::ChunkIter
    208 BufferAllocator::ChunkLists::chunkIter() {
    209  return ChunkIter(*this);
    210 }
    211 
    212 size_t BufferAllocator::ChunkLists::getFirstAvailableSizeClass(
    213    size_t minSizeClass, size_t maxSizeClass) const {
    214  MOZ_ASSERT(maxSizeClass <= MaxMediumAllocClass);
    215 
    216  size_t result = available.FindNext(minSizeClass);
    217  MOZ_ASSERT(result >= minSizeClass);
    218  MOZ_ASSERT_IF(result != SIZE_MAX, !lists[result].isEmpty());
    219 
    220  if (result > maxSizeClass) {
    221    return SIZE_MAX;
    222  }
    223 
    224  return result;
    225 }
    226 
    227 BufferChunk* BufferAllocator::ChunkLists::popFirstChunk(size_t sizeClass) {
    228  MOZ_ASSERT(sizeClass < AllocSizeClasses);
    229  MOZ_ASSERT(!lists[sizeClass].isEmpty());
    230  BufferChunk* chunk = lists[sizeClass].popFirst();
    231  if (lists[sizeClass].isEmpty()) {
    232    available[sizeClass] = false;
    233  }
    234  return chunk;
    235 }
    236 
    237 void BufferAllocator::ChunkLists::remove(size_t sizeClass, BufferChunk* chunk) {
    238  MOZ_ASSERT(sizeClass <= AllocSizeClasses);
    239  lists[sizeClass].remove(chunk);
    240  available[sizeClass] = !lists[sizeClass].isEmpty();
    241 }
    242 
    243 void BufferAllocator::ChunkLists::pushFront(size_t sizeClass,
    244                                            BufferChunk* chunk) {
    245  MOZ_ASSERT(sizeClass <= AllocSizeClasses);
    246  lists[sizeClass].pushFront(chunk);
    247  available[sizeClass] = true;
    248 }
    249 
    250 void BufferAllocator::ChunkLists::pushBack(BufferChunk* chunk) {
    251  MOZ_ASSERT(chunk->ownsFreeLists);
    252  pushBack(chunk->sizeClassForAvailableLists(), chunk);
    253 }
    254 
    255 void BufferAllocator::ChunkLists::pushBack(size_t sizeClass,
    256                                           BufferChunk* chunk) {
    257  MOZ_ASSERT(sizeClass <= AllocSizeClasses);
    258  MOZ_ASSERT(sizeClass == chunk->sizeClassForAvailableLists());
    259  lists[sizeClass].pushBack(chunk);
    260  available[sizeClass] = true;
    261 }
    262 
    263 BufferAllocator::BufferChunkList
    264 BufferAllocator::ChunkLists::extractAllChunks() {
    265  BufferChunkList result;
    266  for (auto list = chunkListIter(); !list.done(); list.next()) {
    267    result.append(std::move(list.get()));
    268  }
    269  available.ResetAll();
    270  return result;
    271 }
    272 
    273 inline bool BufferAllocator::ChunkLists::isEmpty() const {
    274  checkAvailable();
    275  return available.IsEmpty();
    276 }
    277 
    278 inline void BufferAllocator::ChunkLists::checkAvailable() const {
    279 #ifdef DEBUG
    280  for (size_t i = 0; i < AllocSizeClasses; i++) {
    281    MOZ_ASSERT(available[i] == !lists[i].isEmpty());
    282  }
    283 #endif
    284 }
    285 
    286 }  // namespace js::gc
    287 
    288 MOZ_ALWAYS_INLINE void PoisonAlloc(void* alloc, uint8_t value, size_t bytes,
    289                                   MemCheckKind kind) {
    290 #ifndef EARLY_BETA_OR_EARLIER
    291  // Limit poisoning in release builds.
    292  bytes = std::min(bytes, size_t(256));
    293 #endif
    294  AlwaysPoison(alloc, value, bytes, kind);
    295 }
    296 
    297 template <typename D, size_t S, size_t G>
    298 void AllocSpace<D, S, G>::setAllocated(void* alloc, size_t bytes,
    299                                       bool allocated) {
    300  size_t startBit = ptrToIndex(alloc);
    301  MOZ_ASSERT(bytes % GranularityBytes == 0);
    302  size_t endBit = startBit + bytes / GranularityBytes;
    303  MOZ_ASSERT(endBit <= MaxAllocCount);
    304  MOZ_ASSERT(allocStartBitmap.ref()[startBit] != allocated);
    305  MOZ_ASSERT_IF(endBit != MaxAllocCount, allocStartBitmap.ref()[startBit] ==
    306                                             allocEndBitmap.ref()[endBit]);
    307  MOZ_ASSERT_IF(startBit + 1 < MaxAllocCount,
    308                allocStartBitmap.ref().FindNext(startBit + 1) >= endBit);
    309  MOZ_ASSERT(findEndBit(startBit) >= endBit);
    310 
    311  allocStartBitmap.ref()[startBit] = allocated;
    312  if (endBit != MaxAllocCount) {
    313    allocEndBitmap.ref()[endBit] = allocated;
    314  }
    315 }
    316 
    317 template <typename D, size_t S, size_t G>
    318 void AllocSpace<D, S, G>::updateEndOffset(void* alloc, size_t oldBytes,
    319                                          size_t newBytes) {
    320  MOZ_ASSERT(isAllocated(alloc));
    321  MOZ_ASSERT(oldBytes % GranularityBytes == 0);
    322  MOZ_ASSERT(newBytes % GranularityBytes == 0);
    323 
    324  size_t startBit = ptrToIndex(alloc);
    325  size_t oldEndBit = startBit + oldBytes / GranularityBytes;
    326  MOZ_ASSERT(oldEndBit <= MaxAllocCount);
    327  if (oldEndBit != MaxAllocCount) {
    328    MOZ_ASSERT(allocEndBitmap.ref()[oldEndBit]);
    329    allocEndBitmap.ref()[oldEndBit] = false;
    330  }
    331 
    332  size_t newEndBit = startBit + newBytes / GranularityBytes;
    333  MOZ_ASSERT(newEndBit <= MaxAllocCount);
    334  MOZ_ASSERT_IF(startBit + 1 < MaxAllocCount,
    335                allocStartBitmap.ref().FindNext(startBit + 1) >= newEndBit);
    336  MOZ_ASSERT(findEndBit(startBit) >= newEndBit);
    337  if (newEndBit != MaxAllocCount) {
    338    allocEndBitmap.ref()[newEndBit] = true;
    339  }
    340 }
    341 
    342 template <typename D, size_t S, size_t G>
    343 size_t AllocSpace<D, S, G>::allocBytes(const void* alloc) const {
    344  MOZ_ASSERT(isAllocated(alloc));
    345 
    346  size_t startBit = ptrToIndex(alloc);
    347  size_t endBit = findEndBit(startBit);
    348  MOZ_ASSERT(endBit > startBit);
    349  MOZ_ASSERT(endBit <= MaxAllocCount);
    350 
    351  return (endBit - startBit) * GranularityBytes;
    352 }
    353 
    354 template <typename D, size_t S, size_t G>
    355 bool AllocSpace<D, S, G>::setMarked(void* alloc) {
    356  MOZ_ASSERT(isAllocated(alloc));
    357  size_t bit = ptrToIndex(alloc);
    358 
    359  // This is thread safe but can return false positives if another thread also
    360  // marked the same allocation at the same time;
    361  if (markBits.ref().getBit(bit)) {
    362    return false;
    363  }
    364 
    365  markBits.ref().setBit(bit, true);
    366  return true;
    367 }
    368 
    369 template <typename D, size_t S, size_t G>
    370 size_t AllocSpace<D, S, G>::findNextAllocated(uintptr_t offset) const {
    371  size_t bit = offsetToIndex(offset);
    372  size_t next = allocStartBitmap.ref().FindNext(bit);
    373  if (next == SIZE_MAX) {
    374    return SizeBytes;
    375  }
    376 
    377  return next * GranularityBytes;
    378 }
    379 
    380 template <typename D, size_t S, size_t G>
    381 size_t AllocSpace<D, S, G>::findPrevAllocated(uintptr_t offset) const {
    382  size_t bit = offsetToIndex(offset);
    383  size_t prev = allocStartBitmap.ref().FindPrev(bit);
    384  if (prev == SIZE_MAX) {
    385    return SizeBytes;
    386  }
    387 
    388  return prev * GranularityBytes;
    389 }
    390 
    391 template <typename D, size_t S, size_t G>
    392 BufferAllocator::FreeRegion* AllocSpace<D, S, G>::findFollowingFreeRegion(
    393    uintptr_t startAddr) {
    394  // Find the free region that starts at |startAddr|, which is not allocated and
    395  // not at the end of the chunk. Always returns a region.
    396 
    397  uintptr_t offset = uintptr_t(startAddr) & AddressMask;
    398  MOZ_ASSERT(isValidOffset(offset));
    399  MOZ_ASSERT((offset % GranularityBytes) == 0);
    400 
    401  MOZ_ASSERT(!isAllocated(offset));  // Already marked as not allocated.
    402  offset = findNextAllocated(offset);
    403  MOZ_ASSERT(offset <= SizeBytes);
    404 
    405  auto* region = FreeRegion::fromEndAddr(startAddress() + offset);
    406  MOZ_ASSERT(region->startAddr == startAddr);
    407 
    408  return region;
    409 }
    410 
    411 template <typename D, size_t S, size_t G>
    412 BufferAllocator::FreeRegion* AllocSpace<D, S, G>::findPrecedingFreeRegion(
    413    uintptr_t endAddr) {
    414  // Find the free region, if any, that ends at |endAddr|, which may be
    415  // allocated or at the start of the chunk.
    416 
    417  uintptr_t offset = uintptr_t(endAddr) & AddressMask;
    418  MOZ_ASSERT(isValidOffset(offset));
    419  MOZ_ASSERT((offset % GranularityBytes) == 0);
    420 
    421  if (offset == firstAllocOffset()) {
    422    return nullptr;  // Already at start of chunk.
    423  }
    424 
    425  MOZ_ASSERT(!isAllocated(offset));
    426  offset = findPrevAllocated(offset);
    427 
    428  if (offset != SizeBytes) {
    429    // Found a preceding allocation.
    430    const void* alloc = ptrFromOffset(offset);
    431    size_t bytes = allocBytes(alloc);
    432    MOZ_ASSERT(uintptr_t(alloc) + bytes <= endAddr);
    433    if (uintptr_t(alloc) + bytes == endAddr) {
    434      // No free space between preceding allocation and |endAddr|.
    435      return nullptr;
    436    }
    437  }
    438 
    439  auto* region = FreeRegion::fromEndAddr(endAddr);
    440 
    441 #ifdef DEBUG
    442  region->check();
    443  if (offset != SizeBytes) {
    444    const void* alloc = ptrFromOffset(offset);
    445    size_t bytes = allocBytes(alloc);
    446    MOZ_ASSERT(region->startAddr == uintptr_t(alloc) + bytes);
    447  } else {
    448    MOZ_ASSERT(region->startAddr == startAddress() + firstAllocOffset());
    449  }
    450 #endif
    451 
    452  return region;
    453 }
    454 
    455 BufferChunk::BufferChunk(Zone* zone)
    456    : ChunkBase(zone->runtimeFromMainThread(), ChunkKind::Buffers) {
    457 #ifdef DEBUG
    458  this->zone = zone;
    459  MOZ_ASSERT(decommittedPages.ref().IsEmpty());
    460 #endif
    461 }
    462 
    463 BufferChunk::~BufferChunk() {
    464 #ifdef DEBUG
    465  MOZ_ASSERT(allocStartBitmap.ref().IsEmpty());
    466  MOZ_ASSERT(allocEndBitmap.ref().IsEmpty());
    467  MOZ_ASSERT(nurseryOwnedBitmap.ref().IsEmpty());
    468 #endif
    469 }
    470 
    471 void BufferChunk::setSmallBufferRegion(void* alloc, bool smallAlloc) {
    472  MOZ_ASSERT(isAllocated(alloc));
    473  size_t bit = ptrToIndex<SmallRegionSize, SmallRegionSize>(alloc);
    474  smallRegionBitmap.ref().setBit(bit, smallAlloc);
    475 }
    476 
    477 bool BufferChunk::isSmallBufferRegion(const void* alloc) const {
    478  // Allow any valid small alloc pointer within the region.
    479  size_t bit = ptrToIndex<SmallRegionSize, SmallAllocGranularity>(alloc);
    480  return smallRegionBitmap.ref().getBit(bit);
    481 }
    482 
    483 size_t BufferChunk::sizeClassForAvailableLists() const {
    484  MOZ_ASSERT(ownsFreeLists);
    485 
    486  // To quickly find an available chunk we bin them by the size of their largest
    487  // free region. This allows us to select a chunk we know will be able to
    488  // satisfy a request.
    489  //
    490  // This prioritises allocating into chunks with large free regions first. It
    491  // might be better for memory use to allocate into chunks with less free space
    492  // first instead.
    493  size_t sizeClass =
    494      freeLists.ref().getLastAvailableSizeClass(0, MaxMediumAllocClass);
    495 
    496  // Use a special size class for completely full chunks.
    497  if (sizeClass == SIZE_MAX) {
    498    return BufferAllocator::FullChunkSizeClass;
    499  }
    500 
    501  return sizeClass;
    502 }
    503 
    504 void SmallBufferRegion::setHasNurseryOwnedAllocs(bool value) {
    505  hasNurseryOwnedAllocs_ = value;
    506 }
    507 bool SmallBufferRegion::hasNurseryOwnedAllocs() const {
    508  return hasNurseryOwnedAllocs_.ref();
    509 }
    510 
    511 BufferAllocator::BufferAllocator(Zone* zone)
    512    : zone(zone),
    513      sweptMixedChunks(lock()),
    514      sweptTenuredChunks(lock()),
    515      sweptLargeTenuredAllocs(lock()),
    516      minorState(State::NotCollecting),
    517      majorState(State::NotCollecting),
    518      minorSweepingFinished(lock()),
    519      majorSweepingFinished(lock()) {}
    520 
    521 BufferAllocator::~BufferAllocator() {
    522 #ifdef DEBUG
    523  checkGCStateNotInUse();
    524  MOZ_ASSERT(mixedChunks.ref().isEmpty());
    525  MOZ_ASSERT(tenuredChunks.ref().isEmpty());
    526  freeLists.ref().assertEmpty();
    527  MOZ_ASSERT(availableMixedChunks.ref().isEmpty());
    528  MOZ_ASSERT(availableTenuredChunks.ref().isEmpty());
    529  MOZ_ASSERT(largeNurseryAllocs.ref().isEmpty());
    530  MOZ_ASSERT(largeTenuredAllocs.ref().isEmpty());
    531 #endif
    532 }
    533 
    534 bool BufferAllocator::isEmpty() const {
    535  MOZ_ASSERT(!zone->wasGCStarted() || zone->isGCFinished());
    536  MOZ_ASSERT(minorState == State::NotCollecting);
    537  MOZ_ASSERT(majorState == State::NotCollecting);
    538  return mixedChunks.ref().isEmpty() && availableMixedChunks.ref().isEmpty() &&
    539         tenuredChunks.ref().isEmpty() &&
    540         availableTenuredChunks.ref().isEmpty() &&
    541         largeNurseryAllocs.ref().isEmpty() &&
    542         largeTenuredAllocs.ref().isEmpty();
    543 }
    544 
    545 Mutex& BufferAllocator::lock() const {
    546  return zone->runtimeFromAnyThread()->gc.bufferAllocatorLock;
    547 }
    548 
    549 void* BufferAllocator::alloc(size_t bytes, bool nurseryOwned) {
    550  MOZ_ASSERT_IF(zone->isGCMarkingOrSweeping(), majorState == State::Marking);
    551 
    552  if (IsLargeAllocSize(bytes)) {
    553    return allocLarge(bytes, nurseryOwned, false);
    554  }
    555 
    556  if (IsSmallAllocSize(bytes)) {
    557    return allocSmall(bytes, nurseryOwned, false);
    558  }
    559 
    560  return allocMedium(bytes, nurseryOwned, false);
    561 }
    562 
    563 void* BufferAllocator::allocInGC(size_t bytes, bool nurseryOwned) {
    564  // Currently this is used during tenuring only.
    565  MOZ_ASSERT(minorState == State::Marking);
    566 
    567  MOZ_ASSERT_IF(zone->isGCMarkingOrSweeping(), majorState == State::Marking);
    568 
    569  void* result;
    570  if (IsLargeAllocSize(bytes)) {
    571    result = allocLarge(bytes, nurseryOwned, true);
    572  } else if (IsSmallAllocSize(bytes)) {
    573    result = allocSmall(bytes, nurseryOwned, true);
    574  } else {
    575    result = allocMedium(bytes, nurseryOwned, true);
    576  }
    577 
    578  if (!result) {
    579    return nullptr;
    580  }
    581 
    582  // Barrier to mark nursery-owned allocations that happen during collection. We
    583  // don't need to do this for tenured-owned allocations because we don't sweep
    584  // tenured-owned allocations that happened after the start of a major
    585  // collection.
    586  if (nurseryOwned) {
    587    markNurseryOwnedAlloc(result, true);
    588  }
    589 
    590  return result;
    591 }
    592 
    593 #ifdef DEBUG
    594 
    595 inline Zone* LargeBuffer::zone() {
    596  Zone* zone = zoneFromAnyThread();
    597  MOZ_ASSERT(CurrentThreadCanAccessZone(zone));
    598  return zone;
    599 }
    600 
    601 inline Zone* LargeBuffer::zoneFromAnyThread() {
    602  return BufferChunk::from(this)->zone;
    603 }
    604 
    605 #endif
    606 
    607 #ifdef XP_DARWIN
    608 static inline void VirtualCopyPages(void* dst, const void* src, size_t bytes) {
    609  MOZ_ASSERT((uintptr_t(dst) & PageMask) == 0);
    610  MOZ_ASSERT((uintptr_t(src) & PageMask) == 0);
    611  MOZ_ASSERT(bytes >= ChunkSize);
    612 
    613  kern_return_t r = vm_copy(mach_task_self(), vm_address_t(src),
    614                            vm_size_t(bytes), vm_address_t(dst));
    615  if (r != KERN_SUCCESS) {
    616    MOZ_CRASH("vm_copy() failed");
    617  }
    618 }
    619 #endif
    620 
    621 void* BufferAllocator::realloc(void* alloc, size_t bytes, bool nurseryOwned) {
    622  // Reallocate a buffer. This has the same semantics as standard libarary
    623  // realloc: if |ptr| is null it creates a new allocation, and if it fails it
    624  // returns |nullptr| and the original |ptr| is still valid.
    625 
    626  if (!alloc) {
    627    return this->alloc(bytes, nurseryOwned);
    628  }
    629 
    630  MOZ_ASSERT(isNurseryOwned(alloc) == nurseryOwned);
    631  MOZ_ASSERT_IF(zone->isGCMarkingOrSweeping(), majorState == State::Marking);
    632 
    633  bytes = GetGoodAllocSize(bytes);
    634 
    635  size_t currentBytes;
    636  if (IsLargeAlloc(alloc)) {
    637    LargeBuffer* buffer = lookupLargeBuffer(alloc);
    638    currentBytes = buffer->allocBytes();
    639 
    640    // We can shrink large allocations (on some platforms).
    641    if (bytes < buffer->allocBytes() && IsLargeAllocSize(bytes)) {
    642      if (shrinkLarge(buffer, bytes)) {
    643        return alloc;
    644      }
    645    }
    646  } else if (IsMediumAlloc(alloc)) {
    647    BufferChunk* chunk = BufferChunk::from(alloc);
    648    MOZ_ASSERT(!chunk->isSmallBufferRegion(alloc));
    649 
    650    currentBytes = chunk->allocBytes(alloc);
    651 
    652    // We can grow or shrink medium allocations.
    653    if (bytes < currentBytes && !IsSmallAllocSize(bytes)) {
    654      if (shrinkMedium(alloc, bytes)) {
    655        return alloc;
    656      }
    657    }
    658 
    659    if (bytes > currentBytes && !IsLargeAllocSize(bytes)) {
    660      if (growMedium(alloc, bytes)) {
    661        return alloc;
    662      }
    663    }
    664  } else {
    665    // TODO: Grow and shrink small allocations.
    666    auto* region = SmallBufferRegion::from(alloc);
    667    currentBytes = region->allocBytes(alloc);
    668  }
    669 
    670  if (bytes == currentBytes) {
    671    return alloc;
    672  }
    673 
    674  void* newAlloc = this->alloc(bytes, nurseryOwned);
    675  if (!newAlloc) {
    676    return nullptr;
    677  }
    678 
    679  auto freeGuard = mozilla::MakeScopeExit([&]() { free(alloc); });
    680 
    681  size_t bytesToCopy = std::min(bytes, currentBytes);
    682 
    683 #ifdef XP_DARWIN
    684  if (bytesToCopy >= ChunkSize) {
    685    MOZ_ASSERT(IsLargeAlloc(alloc));
    686    MOZ_ASSERT(IsLargeAlloc(newAlloc));
    687    VirtualCopyPages(newAlloc, alloc, bytesToCopy);
    688    return newAlloc;
    689  }
    690 #endif
    691 
    692  memcpy(newAlloc, alloc, bytesToCopy);
    693  return newAlloc;
    694 }
    695 
    696 void BufferAllocator::free(void* alloc) {
    697  MOZ_ASSERT(alloc);
    698 
    699  if (IsLargeAlloc(alloc)) {
    700    freeLarge(alloc);
    701    return;
    702  }
    703 
    704  if (IsMediumAlloc(alloc)) {
    705    freeMedium(alloc);
    706    return;
    707  }
    708 
    709  // Can't free small allocations.
    710 }
    711 
    712 /* static */
    713 bool BufferAllocator::IsBufferAlloc(void* alloc) {
    714  // Precondition: |alloc| is a pointer to a buffer allocation, a GC thing or a
    715  // direct nursery allocation returned by Nursery::allocateBuffer.
    716 
    717  if (IsLargeAlloc(alloc)) {
    718    return true;
    719  }
    720 
    721  ChunkBase* chunk = detail::GetGCAddressChunkBase(alloc);
    722  return chunk->getKind() == ChunkKind::Buffers;
    723 }
    724 
    725 #ifdef DEBUG
    726 bool BufferAllocator::hasAlloc(void* alloc) {
    727  MOZ_ASSERT(IsBufferAlloc(alloc));
    728 
    729  if (IsLargeAlloc(alloc)) {
    730    MaybeLock lock;
    731    if (needLockToAccessBufferMap()) {
    732      lock.emplace(this);
    733    }
    734    auto ptr = largeAllocMap.ref().readonlyThreadsafeLookup(alloc);
    735    return ptr.found();
    736  }
    737 
    738  BufferChunk* chunk = BufferChunk::from(alloc);
    739  return chunk->zone == zone;
    740 }
    741 #endif
    742 
    743 size_t BufferAllocator::getAllocSize(void* alloc) {
    744  if (IsLargeAlloc(alloc)) {
    745    LargeBuffer* buffer = lookupLargeBuffer(alloc);
    746    return buffer->allocBytes();
    747  }
    748 
    749  if (IsSmallAlloc(alloc)) {
    750    auto* region = SmallBufferRegion::from(alloc);
    751    return region->allocBytes(alloc);
    752  }
    753 
    754  MOZ_ASSERT(IsMediumAlloc(alloc));
    755  BufferChunk* chunk = BufferChunk::from(alloc);
    756  return chunk->allocBytes(alloc);
    757 }
    758 
    759 bool BufferAllocator::isNurseryOwned(void* alloc) {
    760  if (IsLargeAlloc(alloc)) {
    761    LargeBuffer* buffer = lookupLargeBuffer(alloc);
    762    return buffer->isNurseryOwned;
    763  }
    764 
    765  if (IsSmallAlloc(alloc)) {
    766    auto* region = SmallBufferRegion::from(alloc);
    767    return region->isNurseryOwned(alloc);
    768  }
    769 
    770  BufferChunk* chunk = BufferChunk::from(alloc);
    771  return chunk->isNurseryOwned(alloc);
    772 }
    773 
    774 void BufferAllocator::markNurseryOwnedAlloc(void* alloc, bool nurseryOwned) {
    775  MOZ_ASSERT(alloc);
    776  MOZ_ASSERT(isNurseryOwned(alloc));
    777  MOZ_ASSERT(minorState == State::Marking);
    778 
    779  if (IsLargeAlloc(alloc)) {
    780    LargeBuffer* buffer = lookupLargeBuffer(alloc);
    781    MOZ_ASSERT(buffer->zone() == zone);
    782    markLargeNurseryOwnedBuffer(buffer, nurseryOwned);
    783    return;
    784  }
    785 
    786  if (IsSmallAlloc(alloc)) {
    787    markSmallNurseryOwnedBuffer(alloc, nurseryOwned);
    788    return;
    789  }
    790 
    791  MOZ_ASSERT(IsMediumAlloc(alloc));
    792  markMediumNurseryOwnedBuffer(alloc, nurseryOwned);
    793 }
    794 
    795 void BufferAllocator::markSmallNurseryOwnedBuffer(void* alloc,
    796                                                  bool nurseryOwned) {
    797 #ifdef DEBUG
    798  BufferChunk* chunk = BufferChunk::from(alloc);
    799  MOZ_ASSERT(chunk->zone == zone);
    800  MOZ_ASSERT(chunk->hasNurseryOwnedAllocs);
    801 #endif
    802 
    803  auto* region = SmallBufferRegion::from(alloc);
    804  MOZ_ASSERT(region->hasNurseryOwnedAllocs());
    805  MOZ_ASSERT(region->isNurseryOwned(alloc));
    806 
    807  if (region->isMarked(alloc)) {
    808    MOZ_ASSERT(nurseryOwned);
    809    return;
    810  }
    811 
    812  if (!nurseryOwned) {
    813    region->setNurseryOwned(alloc, false);
    814    // If all nursery owned allocations in the region were tenured then
    815    // chunk->isNurseryOwned(region) will now be stale. It will be updated when
    816    // the region is swept.
    817    return;
    818  }
    819 
    820  region->setMarked(alloc);
    821 }
    822 
    823 void BufferAllocator::markMediumNurseryOwnedBuffer(void* alloc,
    824                                                   bool nurseryOwned) {
    825  BufferChunk* chunk = BufferChunk::from(alloc);
    826  MOZ_ASSERT(chunk->zone == zone);
    827  MOZ_ASSERT(chunk->hasNurseryOwnedAllocs);
    828  MOZ_ASSERT(chunk->isAllocated(alloc));
    829  MOZ_ASSERT(chunk->isNurseryOwned(alloc));
    830 
    831  if (chunk->isMarked(alloc)) {
    832    MOZ_ASSERT(nurseryOwned);
    833    return;
    834  }
    835 
    836  size_t size = chunk->allocBytes(alloc);
    837  increaseHeapSize(size, nurseryOwned, false, false);
    838 
    839  if (!nurseryOwned) {
    840    // Change the allocation to a tenured owned one. This prevents sweeping in a
    841    // minor collection.
    842    chunk->setNurseryOwned(alloc, false);
    843    return;
    844  }
    845 
    846  chunk->setMarked(alloc);
    847 }
    848 
    849 void BufferAllocator::markLargeNurseryOwnedBuffer(LargeBuffer* buffer,
    850                                                  bool nurseryOwned) {
    851  MOZ_ASSERT(buffer->isNurseryOwned);
    852 
    853  // The buffer metadata is held in a small buffer. Check whether it has already
    854  // been marked.
    855  auto* region = SmallBufferRegion::from(buffer);
    856  MOZ_ASSERT(region->isNurseryOwned(buffer));
    857 
    858  if (region->isMarked(buffer)) {
    859    MOZ_ASSERT(nurseryOwned);
    860    return;
    861  }
    862 
    863  markSmallNurseryOwnedBuffer(buffer, nurseryOwned);
    864 
    865  largeNurseryAllocsToSweep.ref().remove(buffer);
    866 
    867  size_t usableSize = buffer->allocBytes();
    868  increaseHeapSize(usableSize, nurseryOwned, false, false);
    869 
    870  if (!nurseryOwned) {
    871    buffer->isNurseryOwned = false;
    872    buffer->allocatedDuringCollection = majorState != State::NotCollecting;
    873    largeTenuredAllocs.ref().pushBack(buffer);
    874    return;
    875  }
    876 
    877  largeNurseryAllocs.ref().pushBack(buffer);
    878 }
    879 
    880 bool BufferAllocator::isMarkedBlack(void* alloc) {
    881  if (IsLargeAlloc(alloc)) {
    882    // The buffer metadata is held in a small buffer.
    883    alloc = lookupLargeBuffer(alloc);
    884  } else if (!IsSmallAlloc(alloc)) {
    885    MOZ_ASSERT(IsMediumAlloc(alloc));
    886    BufferChunk* chunk = BufferChunk::from(alloc);
    887    return chunk->isMarked(alloc);
    888  }
    889 
    890  auto* region = SmallBufferRegion::from(alloc);
    891  return region->isMarked(alloc);
    892 }
    893 
    894 void BufferAllocator::traceEdge(JSTracer* trc, Cell* owner, void** bufferp,
    895                                const char* name) {
    896  // Buffers are conceptually part of the owning cell and are not reported to
    897  // the tracer.
    898 
    899  // TODO: This should be unified with the rest of the tracing system.
    900 
    901  MOZ_ASSERT(bufferp);
    902 
    903  void* buffer = *bufferp;
    904  MOZ_ASSERT(buffer);
    905 
    906  if (trc->isMarkingTracer() && !zone->isGCMarking()) {
    907    return;
    908  }
    909 
    910  MOZ_ASSERT_IF(trc->isTenuringTracer(),
    911                minorState.refNoCheck() == State::Marking);
    912  MOZ_ASSERT_IF(trc->isMarkingTracer(),
    913                majorState.refNoCheck() == State::Marking);
    914 
    915  if (!IsLargeAlloc(buffer) &&
    916      js::gc::detail::GetGCAddressChunkBase(buffer)->isNurseryChunk()) {
    917    // JSObject slots and elements can be allocated in the nursery and this is
    918    // handled separately.
    919    return;
    920  }
    921 
    922  MOZ_ASSERT(IsBufferAlloc(buffer));
    923  MOZ_ASSERT_IF(isNurseryOwned(buffer), owner);
    924 
    925  if (IsLargeAlloc(buffer)) {
    926    traceLargeAlloc(trc, owner, bufferp, name);
    927    return;
    928  }
    929 
    930  if (IsSmallAlloc(buffer)) {
    931    traceSmallAlloc(trc, owner, bufferp, name);
    932    return;
    933  }
    934 
    935  traceMediumAlloc(trc, owner, bufferp, name);
    936 }
    937 
    938 void BufferAllocator::traceSmallAlloc(JSTracer* trc, Cell* owner, void** allocp,
    939                                      const char* name) {
    940  void* alloc = *allocp;
    941  auto* region = SmallBufferRegion::from(alloc);
    942 
    943  if (trc->isTenuringTracer()) {
    944    if (region->isNurseryOwned(alloc)) {
    945      markSmallNurseryOwnedBuffer(alloc, !owner->isTenured());
    946    }
    947    return;
    948  }
    949 
    950  if (trc->isMarkingTracer()) {
    951    if (!region->isNurseryOwned(alloc)) {
    952      markSmallTenuredAlloc(alloc);
    953    }
    954    return;
    955  }
    956 }
    957 
    958 void BufferAllocator::traceMediumAlloc(JSTracer* trc, Cell* owner,
    959                                       void** allocp, const char* name) {
    960  void* alloc = *allocp;
    961  BufferChunk* chunk = BufferChunk::from(alloc);
    962 
    963  if (trc->isTenuringTracer()) {
    964    if (chunk->isNurseryOwned(alloc)) {
    965      markMediumNurseryOwnedBuffer(alloc, !owner->isTenured());
    966    }
    967    return;
    968  }
    969 
    970  if (trc->isMarkingTracer()) {
    971    if (!chunk->isNurseryOwned(alloc)) {
    972      markMediumTenuredAlloc(alloc);
    973    }
    974    return;
    975  }
    976 }
    977 
    978 void BufferAllocator::traceLargeAlloc(JSTracer* trc, Cell* owner, void** allocp,
    979                                      const char* name) {
    980  void* alloc = *allocp;
    981  LargeBuffer* buffer = lookupLargeBuffer(alloc);
    982 
    983  if (trc->isTenuringTracer()) {
    984    if (buffer->isNurseryOwned) {
    985      markLargeNurseryOwnedBuffer(buffer, !owner->isTenured());
    986    }
    987    return;
    988  }
    989 
    990  if (trc->isMarkingTracer()) {
    991    if (!buffer->isNurseryOwned) {
    992      markLargeTenuredBuffer(buffer);
    993    }
    994    return;
    995  }
    996 }
    997 
    998 bool BufferAllocator::markTenuredAlloc(void* alloc) {
    999  MOZ_ASSERT(alloc);
   1000  MOZ_ASSERT(!isNurseryOwned(alloc));
   1001 
   1002  if (IsLargeAlloc(alloc)) {
   1003    LargeBuffer* buffer = lookupLargeBuffer(alloc);
   1004    return markLargeTenuredBuffer(buffer);
   1005  }
   1006 
   1007  if (IsSmallAlloc(alloc)) {
   1008    return markSmallTenuredAlloc(alloc);
   1009  }
   1010 
   1011  return markMediumTenuredAlloc(alloc);
   1012 }
   1013 
   1014 bool BufferAllocator::markSmallTenuredAlloc(void* alloc) {
   1015  auto* chunk = BufferChunk::from(alloc);
   1016  if (chunk->allocatedDuringCollection) {
   1017    // Will not be swept, already counted as marked.
   1018    return false;
   1019  }
   1020 
   1021  auto* region = SmallBufferRegion::from(alloc);
   1022  MOZ_ASSERT(region->isAllocated(alloc));
   1023  return region->setMarked(alloc);
   1024 }
   1025 
   1026 bool BufferAllocator::markMediumTenuredAlloc(void* alloc) {
   1027  BufferChunk* chunk = BufferChunk::from(alloc);
   1028  MOZ_ASSERT(chunk->isAllocated(alloc));
   1029  if (chunk->allocatedDuringCollection) {
   1030    // Will not be swept, already counted as marked.
   1031    return false;
   1032  }
   1033 
   1034  return chunk->setMarked(alloc);
   1035 }
   1036 
   1037 void BufferAllocator::startMinorCollection(MaybeLock& lock) {
   1038  maybeMergeSweptData(lock);
   1039 
   1040 #ifdef DEBUG
   1041  MOZ_ASSERT(minorState == State::NotCollecting);
   1042  if (majorState == State::NotCollecting) {
   1043    GCRuntime* gc = &zone->runtimeFromMainThread()->gc;
   1044    if (gc->hasZealMode(ZealMode::CheckHeapBeforeMinorGC)) {
   1045      // This is too expensive to run on every minor GC.
   1046      checkGCStateNotInUse(lock);
   1047    }
   1048  }
   1049 #endif
   1050 
   1051  // Large allocations that are marked when tracing the nursery will be moved
   1052  // back to the main list.
   1053  MOZ_ASSERT(largeNurseryAllocsToSweep.ref().isEmpty());
   1054  std::swap(largeNurseryAllocs.ref(), largeNurseryAllocsToSweep.ref());
   1055 
   1056  minorState = State::Marking;
   1057 }
   1058 
   1059 bool BufferAllocator::startMinorSweeping() {
   1060  // Called during minor GC. Operates on the active allocs/chunks lists. The 'to
   1061  // sweep' lists do not contain nursery owned allocations.
   1062 
   1063 #ifdef DEBUG
   1064  MOZ_ASSERT(minorState == State::Marking);
   1065  {
   1066    AutoLock lock(this);
   1067    MOZ_ASSERT(!minorSweepingFinished);
   1068    MOZ_ASSERT(sweptMixedChunks.ref().isEmpty());
   1069  }
   1070  for (LargeBuffer* buffer : largeNurseryAllocs.ref()) {
   1071    MOZ_ASSERT(buffer->isNurseryOwned);
   1072  }
   1073  for (LargeBuffer* buffer : largeNurseryAllocsToSweep.ref()) {
   1074    MOZ_ASSERT(buffer->isNurseryOwned);
   1075  }
   1076 #endif
   1077 
   1078  // Check whether there are any medium chunks containing nursery owned
   1079  // allocations that need to be swept.
   1080  if (mixedChunks.ref().isEmpty() && availableMixedChunks.ref().isEmpty() &&
   1081      largeNurseryAllocsToSweep.ref().isEmpty()) {
   1082    // Nothing more to do. Don't transition to sweeping state.
   1083    minorState = State::NotCollecting;
   1084    return false;
   1085  }
   1086 
   1087 #ifdef DEBUG
   1088  for (BufferChunk* chunk : mixedChunks.ref()) {
   1089    MOZ_ASSERT(!chunk->ownsFreeLists);
   1090    chunk->freeLists.ref().assertEmpty();
   1091  }
   1092 #endif
   1093 
   1094  // Move free regions in |tenuredChunks| out of |freeLists| and into their
   1095  // respective chunk header. Discard free regions in |mixedChunks| which will
   1096  // be rebuilt by sweeping.
   1097  //
   1098  // This is done for |tenuredChunks| too in order to reduce the number of free
   1099  // regions we need to process here on the next minor GC.
   1100  //
   1101  // Some possibilities to make this more efficient are:
   1102  //  - have separate free lists for nursery/tenured chunks
   1103  //  - keep free regions at different ends of the free list depending on chunk
   1104  //    kind
   1105  freeLists.ref().forEachRegion(
   1106      [](FreeList& list, size_t sizeClass, FreeRegion* region) {
   1107        BufferChunk* chunk = BufferChunk::from(region);
   1108        if (!chunk->hasNurseryOwnedAllocs) {
   1109          list.remove(region);
   1110          chunk->freeLists.ref().pushBack(sizeClass, region);
   1111        }
   1112      });
   1113  freeLists.ref().clear();
   1114 
   1115  // Set the flag to indicate all tenured chunks now own their free regions.
   1116  for (BufferChunk* chunk : tenuredChunks.ref()) {
   1117    MOZ_ASSERT(!chunk->hasNurseryOwnedAllocs);
   1118    chunk->ownsFreeLists = true;
   1119  }
   1120 
   1121  // Move all mixed chunks to the list of chunks to sweep.
   1122  mixedChunksToSweep.ref() = std::move(mixedChunks.ref());
   1123  mixedChunksToSweep.ref().append(
   1124      availableMixedChunks.ref().extractAllChunks());
   1125 
   1126  // Move all tenured chunks to |availableTenuredChunks|.
   1127  while (BufferChunk* chunk = tenuredChunks.ref().popFirst()) {
   1128    availableTenuredChunks.ref().pushBack(chunk);
   1129  }
   1130 
   1131  minorState = State::Sweeping;
   1132 
   1133  return true;
   1134 }
   1135 
   1136 struct LargeAllocToFree {
   1137  size_t bytes;
   1138  LargeAllocToFree* next = nullptr;
   1139 
   1140  explicit LargeAllocToFree(size_t bytes) : bytes(bytes) {}
   1141 };
   1142 
   1143 static void PushLargeAllocToFree(LargeAllocToFree** listHead,
   1144                                 LargeBuffer* buffer) {
   1145  auto* alloc = new (buffer->data()) LargeAllocToFree(buffer->bytes);
   1146  alloc->next = *listHead;
   1147  *listHead = alloc;
   1148 }
   1149 
   1150 static void FreeLargeAllocs(LargeAllocToFree* listHead) {
   1151  while (listHead) {
   1152    LargeAllocToFree* alloc = listHead;
   1153    LargeAllocToFree* next = alloc->next;
   1154    UnmapPages(alloc, alloc->bytes);
   1155    listHead = next;
   1156  }
   1157 }
   1158 
   1159 void BufferAllocator::sweepForMinorCollection() {
   1160  // Called on a background thread.
   1161 
   1162  MOZ_ASSERT(minorState.refNoCheck() == State::Sweeping);
   1163  {
   1164    AutoLock lock(this);
   1165    MOZ_ASSERT(sweptMixedChunks.ref().isEmpty());
   1166  }
   1167 
   1168  // Bug 1961749: Freeing large buffers can be slow so it might be worth
   1169  // splitting sweeping into two phases so that all zones get their medium
   1170  // buffers swept and made available for allocation before any large buffers
   1171  // are freed.
   1172 
   1173  // Freeing large buffers may be slow, so leave that till the end. However
   1174  // large buffer metadata is stored in small buffers so form a list of large
   1175  // buffers to free before sweeping small buffers.
   1176  LargeAllocToFree* largeAllocsToFree = nullptr;
   1177  while (!largeNurseryAllocsToSweep.ref().isEmpty()) {
   1178    LargeBuffer* buffer = largeNurseryAllocsToSweep.ref().popFirst();
   1179    PushLargeAllocToFree(&largeAllocsToFree, buffer);
   1180    MaybeLock lock(std::in_place, this);
   1181    unregisterLarge(buffer, true, lock);
   1182  }
   1183 
   1184  while (!mixedChunksToSweep.ref().isEmpty()) {
   1185    BufferChunk* chunk = mixedChunksToSweep.ref().popFirst();
   1186    if (sweepChunk(chunk, SweepKind::Nursery, false)) {
   1187      {
   1188        AutoLock lock(this);
   1189        sweptMixedChunks.ref().pushBack(chunk);
   1190      }
   1191 
   1192      // Signal to the main thread that swept data is available by setting this
   1193      // relaxed atomic flag.
   1194      hasMinorSweepDataToMerge = true;
   1195    }
   1196  }
   1197 
   1198  // Unmap large buffers.
   1199  FreeLargeAllocs(largeAllocsToFree);
   1200 
   1201  // Signal to main thread to update minorState.
   1202  {
   1203    AutoLock lock(this);
   1204    MOZ_ASSERT(!minorSweepingFinished);
   1205    minorSweepingFinished = true;
   1206    hasMinorSweepDataToMerge = true;
   1207  }
   1208 }
   1209 
   1210 void BufferAllocator::startMajorCollection(MaybeLock& lock) {
   1211  maybeMergeSweptData(lock);
   1212 
   1213 #ifdef DEBUG
   1214  MOZ_ASSERT(majorState == State::NotCollecting);
   1215  checkGCStateNotInUse(lock);
   1216 
   1217  // Everything is tenured since we just evicted the nursery, or will be by the
   1218  // time minor sweeping finishes.
   1219  MOZ_ASSERT(mixedChunks.ref().isEmpty());
   1220  MOZ_ASSERT(availableMixedChunks.ref().isEmpty());
   1221  MOZ_ASSERT(largeNurseryAllocs.ref().isEmpty());
   1222 #endif
   1223 
   1224 #ifdef DEBUG
   1225  for (BufferChunk* chunk : tenuredChunks.ref()) {
   1226    MOZ_ASSERT(!chunk->ownsFreeLists);
   1227    chunk->freeLists.ref().assertEmpty();
   1228  }
   1229 #endif
   1230 
   1231  largeTenuredAllocsToSweep.ref() = std::move(largeTenuredAllocs.ref());
   1232 
   1233  // Move free regions that need to be swept to the free lists in their
   1234  // respective chunks.
   1235  freeLists.ref().forEachRegion(
   1236      [](FreeList& list, size_t sizeClass, FreeRegion* region) {
   1237        BufferChunk* chunk = BufferChunk::from(region);
   1238        MOZ_ASSERT(!chunk->hasNurseryOwnedAllocs);
   1239        list.remove(region);
   1240        chunk->freeLists.ref().pushBack(sizeClass, region);
   1241      });
   1242 
   1243  for (BufferChunk* chunk : tenuredChunks.ref()) {
   1244    MOZ_ASSERT(!chunk->hasNurseryOwnedAllocs);
   1245    chunk->ownsFreeLists = true;
   1246  }
   1247 
   1248  tenuredChunksToSweep.ref() = std::move(tenuredChunks.ref());
   1249  tenuredChunksToSweep.ref().append(
   1250      availableTenuredChunks.ref().extractAllChunks());
   1251 
   1252  if (minorState == State::Sweeping) {
   1253    // Ensure swept nursery chunks are moved to the tenuredChunks lists in
   1254    // mergeSweptData.
   1255    majorStartedWhileMinorSweeping = true;
   1256  }
   1257 
   1258 #ifdef DEBUG
   1259  MOZ_ASSERT(tenuredChunks.ref().isEmpty());
   1260  MOZ_ASSERT(availableTenuredChunks.ref().isEmpty());
   1261  freeLists.ref().assertEmpty();
   1262  MOZ_ASSERT(largeTenuredAllocs.ref().isEmpty());
   1263 #endif
   1264 
   1265  majorState = State::Marking;
   1266 }
   1267 
   1268 void BufferAllocator::startMajorSweeping(MaybeLock& lock) {
   1269  // Called when a zone transitions from marking to sweeping.
   1270 
   1271 #ifdef DEBUG
   1272  MOZ_ASSERT(majorState == State::Marking);
   1273  MOZ_ASSERT(zone->isGCFinished());
   1274  MOZ_ASSERT(!majorSweepingFinished.refNoCheck());
   1275 #endif
   1276 
   1277  maybeMergeSweptData(lock);
   1278  MOZ_ASSERT(!majorStartedWhileMinorSweeping);
   1279 
   1280  majorState = State::Sweeping;
   1281 }
   1282 
   1283 void BufferAllocator::sweepForMajorCollection(bool shouldDecommit) {
   1284  // Called on a background thread.
   1285 
   1286  MOZ_ASSERT(majorState.refNoCheck() == State::Sweeping);
   1287 
   1288  // Sweep large allocs first since they rely on the mark bits of their
   1289  // corresponding LargeBuffer structures which are stored small buffers.
   1290  //
   1291  // It's tempting to try and optimize this by moving the allocations between
   1292  // lists when they are marked, in the same way as for nursery sweeping. This
   1293  // would require synchronizing the list modification when marking in parallel,
   1294  // so is probably not worth it.
   1295  LargeAllocList sweptLargeAllocs;
   1296  LargeAllocToFree* largeAllocsToFree = nullptr;
   1297  while (!largeTenuredAllocsToSweep.ref().isEmpty()) {
   1298    LargeBuffer* buffer = largeTenuredAllocsToSweep.ref().popFirst();
   1299    if (isLargeTenuredMarked(buffer)) {
   1300      sweptLargeAllocs.pushBack(buffer);
   1301    } else {
   1302      PushLargeAllocToFree(&largeAllocsToFree, buffer);
   1303      MaybeLock lock(std::in_place, this);
   1304      unregisterLarge(buffer, true, lock);
   1305    }
   1306  }
   1307 
   1308  while (!tenuredChunksToSweep.ref().isEmpty()) {
   1309    BufferChunk* chunk = tenuredChunksToSweep.ref().popFirst();
   1310    if (sweepChunk(chunk, SweepKind::Tenured, shouldDecommit)) {
   1311      {
   1312        AutoLock lock(this);
   1313        sweptTenuredChunks.ref().pushBack(chunk);
   1314      }
   1315 
   1316      // Signal to the main thread that swept data is available by setting this
   1317      // relaxed atomic flag.
   1318      hasMinorSweepDataToMerge = true;
   1319    }
   1320  }
   1321 
   1322  // Unmap large buffers.
   1323  //
   1324  // Bug 1961749: This could possibly run after signalling sweeping is finished
   1325  // or concurrently with other sweeping.
   1326  FreeLargeAllocs(largeAllocsToFree);
   1327 
   1328  AutoLock lock(this);
   1329  sweptLargeTenuredAllocs.ref() = std::move(sweptLargeAllocs);
   1330 
   1331  // Signal to main thread to update majorState.
   1332  MOZ_ASSERT(!majorSweepingFinished);
   1333  majorSweepingFinished = true;
   1334 }
   1335 
   1336 void BufferAllocator::finishMajorCollection(const AutoLock& lock) {
   1337  // This can be called in any state:
   1338  //
   1339  //  - NotCollecting: after major sweeping has finished and the state has been
   1340  //                   reset to NotCollecting in mergeSweptData.
   1341  //
   1342  //  - Marking:       if collection was aborted and startMajorSweeping was not
   1343  //                   called.
   1344  //
   1345  //  - Sweeping:      if sweeping has finished and mergeSweptData has not been
   1346  //                   called yet.
   1347 
   1348  MOZ_ASSERT_IF(majorState == State::Sweeping, majorSweepingFinished);
   1349 
   1350  if (minorState == State::Sweeping || majorState == State::Sweeping) {
   1351    mergeSweptData(lock);
   1352  }
   1353 
   1354  if (majorState == State::Marking) {
   1355    abortMajorSweeping(lock);
   1356  }
   1357 
   1358 #ifdef DEBUG
   1359  checkGCStateNotInUse(lock);
   1360 #endif
   1361 }
   1362 
   1363 void BufferAllocator::abortMajorSweeping(const AutoLock& lock) {
   1364  // We have aborted collection without sweeping this zone. Restore or rebuild
   1365  // the original state.
   1366 
   1367 #ifdef DEBUG
   1368  MOZ_ASSERT(majorState == State::Marking);
   1369  MOZ_ASSERT(sweptTenuredChunks.ref().isEmpty());
   1370  for (auto chunk = availableTenuredChunks.ref().chunkIter(); !chunk.done();
   1371       chunk.next()) {
   1372    MOZ_ASSERT(chunk->allocatedDuringCollection);
   1373  }
   1374 #endif
   1375 
   1376  clearAllocatedDuringCollectionState(lock);
   1377 
   1378  if (minorState == State::Sweeping) {
   1379    // If we are minor sweeping then chunks with allocatedDuringCollection set
   1380    // may be present in |mixedChunksToSweep|. Set a flag so these are cleared
   1381    // when they are merged later.
   1382    majorFinishedWhileMinorSweeping = true;
   1383  }
   1384 
   1385  for (BufferChunk* chunk : tenuredChunksToSweep.ref()) {
   1386    MOZ_ASSERT(chunk->ownsFreeLists);
   1387 
   1388    // Clear mark bits for chunks we didn't end up sweeping.
   1389    clearChunkMarkBits(chunk);
   1390  }
   1391 
   1392  while (BufferChunk* chunk = tenuredChunksToSweep.ref().popFirst()) {
   1393    availableTenuredChunks.ref().pushBack(chunk);
   1394  }
   1395 
   1396  largeTenuredAllocs.ref().prepend(std::move(largeTenuredAllocsToSweep.ref()));
   1397 
   1398  majorState = State::NotCollecting;
   1399 }
   1400 
   1401 void BufferAllocator::clearAllocatedDuringCollectionState(
   1402    const AutoLock& lock) {
   1403 #ifdef DEBUG
   1404  // This flag is not set for large nursery-owned allocations.
   1405  for (LargeBuffer* buffer : largeNurseryAllocs.ref()) {
   1406    MOZ_ASSERT(!buffer->allocatedDuringCollection);
   1407  }
   1408 #endif
   1409 
   1410  ClearAllocatedDuringCollection(mixedChunks.ref());
   1411  ClearAllocatedDuringCollection(availableMixedChunks.ref());
   1412  ClearAllocatedDuringCollection(tenuredChunks.ref());
   1413  ClearAllocatedDuringCollection(availableTenuredChunks.ref());
   1414  ClearAllocatedDuringCollection(largeTenuredAllocs.ref());
   1415 }
   1416 
   1417 /* static */
   1418 void BufferAllocator::ClearAllocatedDuringCollection(ChunkLists& chunks) {
   1419  for (auto chunk = chunks.chunkIter(); !chunk.done(); chunk.next()) {
   1420    chunk->allocatedDuringCollection = false;
   1421  }
   1422 }
   1423 /* static */
   1424 void BufferAllocator::ClearAllocatedDuringCollection(BufferChunkList& list) {
   1425  for (auto* chunk : list) {
   1426    chunk->allocatedDuringCollection = false;
   1427  }
   1428 }
   1429 /* static */
   1430 void BufferAllocator::ClearAllocatedDuringCollection(LargeAllocList& list) {
   1431  for (auto* element : list) {
   1432    element->allocatedDuringCollection = false;
   1433  }
   1434 }
   1435 
   1436 void BufferAllocator::maybeMergeSweptData() {
   1437  if (minorState == State::Sweeping || majorState == State::Sweeping) {
   1438    mergeSweptData();
   1439  }
   1440 }
   1441 
   1442 void BufferAllocator::mergeSweptData() {
   1443  AutoLock lock(this);
   1444  mergeSweptData(lock);
   1445 }
   1446 
   1447 void BufferAllocator::maybeMergeSweptData(MaybeLock& lock) {
   1448  if (minorState == State::Sweeping || majorState == State::Sweeping) {
   1449    if (lock.isNothing()) {
   1450      lock.emplace(this);
   1451    }
   1452    mergeSweptData(lock.ref());
   1453  }
   1454 }
   1455 
   1456 void BufferAllocator::mergeSweptData(const AutoLock& lock) {
   1457  MOZ_ASSERT(minorState == State::Sweeping || majorState == State::Sweeping);
   1458 
   1459  if (majorSweepingFinished) {
   1460    clearAllocatedDuringCollectionState(lock);
   1461 
   1462    if (minorState == State::Sweeping) {
   1463      majorFinishedWhileMinorSweeping = true;
   1464    }
   1465  }
   1466 
   1467  // Merge swept chunks that previously contained nursery owned allocations. If
   1468  // semispace nursery collection is in use then these chunks may contain both
   1469  // nursery and tenured-owned allocations, otherwise all allocations will be
   1470  // tenured-owned.
   1471  while (!sweptMixedChunks.ref().isEmpty()) {
   1472    BufferChunk* chunk = sweptMixedChunks.ref().popLast();
   1473    MOZ_ASSERT(chunk->ownsFreeLists);
   1474    MOZ_ASSERT(chunk->hasNurseryOwnedAllocs);
   1475    chunk->hasNurseryOwnedAllocs = chunk->hasNurseryOwnedAllocsAfterSweep;
   1476 
   1477    MOZ_ASSERT_IF(
   1478        majorState == State::NotCollecting && !majorFinishedWhileMinorSweeping,
   1479        !chunk->allocatedDuringCollection);
   1480    if (majorFinishedWhileMinorSweeping) {
   1481      chunk->allocatedDuringCollection = false;
   1482    }
   1483 
   1484    size_t sizeClass = chunk->sizeClassForAvailableLists();
   1485    if (chunk->hasNurseryOwnedAllocs) {
   1486      availableMixedChunks.ref().pushFront(sizeClass, chunk);
   1487    } else if (majorStartedWhileMinorSweeping) {
   1488      tenuredChunksToSweep.ref().pushFront(chunk);
   1489    } else {
   1490      availableTenuredChunks.ref().pushFront(sizeClass, chunk);
   1491    }
   1492  }
   1493 
   1494  // Merge swept chunks that did not contain nursery owned allocations.
   1495 #ifdef DEBUG
   1496  for (BufferChunk* chunk : sweptTenuredChunks.ref()) {
   1497    MOZ_ASSERT(!chunk->hasNurseryOwnedAllocs);
   1498    MOZ_ASSERT(!chunk->hasNurseryOwnedAllocsAfterSweep);
   1499    MOZ_ASSERT(!chunk->allocatedDuringCollection);
   1500  }
   1501 #endif
   1502  while (BufferChunk* chunk = sweptTenuredChunks.ref().popFirst()) {
   1503    size_t sizeClass = chunk->sizeClassForAvailableLists();
   1504    availableTenuredChunks.ref().pushFront(sizeClass, chunk);
   1505  }
   1506 
   1507  largeTenuredAllocs.ref().prepend(std::move(sweptLargeTenuredAllocs.ref()));
   1508 
   1509  hasMinorSweepDataToMerge = false;
   1510 
   1511  if (minorSweepingFinished) {
   1512    MOZ_ASSERT(minorState == State::Sweeping);
   1513    minorState = State::NotCollecting;
   1514    minorSweepingFinished = false;
   1515    majorStartedWhileMinorSweeping = false;
   1516    majorFinishedWhileMinorSweeping = false;
   1517 
   1518 #ifdef DEBUG
   1519    for (BufferChunk* chunk : mixedChunks.ref()) {
   1520      verifyChunk(chunk, true);
   1521    }
   1522    for (BufferChunk* chunk : tenuredChunks.ref()) {
   1523      verifyChunk(chunk, false);
   1524    }
   1525 #endif
   1526  }
   1527 
   1528  if (majorSweepingFinished) {
   1529    MOZ_ASSERT(majorState == State::Sweeping);
   1530    majorState = State::NotCollecting;
   1531    majorSweepingFinished = false;
   1532 
   1533    MOZ_ASSERT(tenuredChunksToSweep.ref().isEmpty());
   1534  }
   1535 }
   1536 
   1537 void BufferAllocator::clearMarkStateAfterBarrierVerification() {
   1538  MOZ_ASSERT(!zone->wasGCStarted());
   1539 
   1540  maybeMergeSweptData();
   1541  MOZ_ASSERT(minorState == State::NotCollecting);
   1542  MOZ_ASSERT(majorState == State::NotCollecting);
   1543 
   1544  for (auto* chunks : {&mixedChunks.ref(), &tenuredChunks.ref()}) {
   1545    for (auto* chunk : *chunks) {
   1546      clearChunkMarkBits(chunk);
   1547    }
   1548  }
   1549 
   1550  for (auto* chunks :
   1551       {&availableMixedChunks.ref(), &availableTenuredChunks.ref()}) {
   1552    for (auto chunk = chunks->chunkIter(); !chunk.done(); chunk.next()) {
   1553      clearChunkMarkBits(chunk);
   1554    }
   1555  }
   1556 
   1557 #ifdef DEBUG
   1558  checkGCStateNotInUse();
   1559 #endif
   1560 }
   1561 
   1562 void BufferAllocator::clearChunkMarkBits(BufferChunk* chunk) {
   1563  chunk->markBits.ref().clear();
   1564  for (auto iter = chunk->smallRegionIter(); !iter.done(); iter.next()) {
   1565    SmallBufferRegion* region = iter.get();
   1566    region->markBits.ref().clear();
   1567  }
   1568 }
   1569 
   1570 bool BufferAllocator::isPointerWithinBuffer(void* ptr) {
   1571  maybeMergeSweptData();
   1572 
   1573  MOZ_ASSERT(mixedChunksToSweep.ref().isEmpty());
   1574  MOZ_ASSERT_IF(majorState != State::Marking,
   1575                tenuredChunksToSweep.ref().isEmpty());
   1576 
   1577  for (const auto* chunks : {&mixedChunks.ref(), &tenuredChunks.ref(),
   1578                             &tenuredChunksToSweep.ref()}) {
   1579    for (auto* chunk : *chunks) {
   1580      if (chunk->isPointerWithinAllocation(ptr)) {
   1581        return true;
   1582      }
   1583    }
   1584  }
   1585 
   1586  for (auto* chunks :
   1587       {&availableMixedChunks.ref(), &availableTenuredChunks.ref()}) {
   1588    for (auto chunk = chunks->chunkIter(); !chunk.done(); chunk.next()) {
   1589      if (chunk->isPointerWithinAllocation(ptr)) {
   1590        return true;
   1591      }
   1592    }
   1593  }
   1594 
   1595  // Note we cannot safely access data that is being swept on another thread.
   1596 
   1597  for (const auto* allocs :
   1598       {&largeNurseryAllocs.ref(), &largeTenuredAllocs.ref()}) {
   1599    for (auto* alloc : *allocs) {
   1600      if (alloc->isPointerWithinAllocation(ptr)) {
   1601        return true;
   1602      }
   1603    }
   1604  }
   1605 
   1606  return false;
   1607 }
   1608 
   1609 bool BufferChunk::isPointerWithinAllocation(void* ptr) const {
   1610  uintptr_t offset = uintptr_t(ptr) - uintptr_t(this);
   1611  if (offset >= ChunkSize || offset < FirstMediumAllocOffset) {
   1612    return false;
   1613  }
   1614 
   1615  if (smallRegionBitmap.ref().getBit(offset / SmallRegionSize)) {
   1616    auto* region = SmallBufferRegion::from(ptr);
   1617    return region->isPointerWithinAllocation(ptr);
   1618  }
   1619 
   1620  uintptr_t allocOffset =
   1621      findPrevAllocated(RoundDown(offset, MinMediumAllocSize));
   1622  MOZ_ASSERT(allocOffset <= ChunkSize);
   1623  if (allocOffset == ChunkSize) {
   1624    return false;
   1625  }
   1626 
   1627  const void* alloc = ptrFromOffset(allocOffset);
   1628  size_t size = allocBytes(alloc);
   1629  return offset < allocOffset + size;
   1630 }
   1631 
   1632 bool SmallBufferRegion::isPointerWithinAllocation(void* ptr) const {
   1633  uintptr_t offset = uintptr_t(ptr) - uintptr_t(this);
   1634  MOZ_ASSERT(offset < SmallRegionSize);
   1635 
   1636  uintptr_t allocOffset =
   1637      findPrevAllocated(RoundDown(offset, SmallAllocGranularity));
   1638  MOZ_ASSERT(allocOffset <= SmallRegionSize);
   1639  if (allocOffset == SmallRegionSize) {
   1640    return false;
   1641  }
   1642 
   1643  const void* alloc = ptrFromOffset(allocOffset);
   1644  size_t size = allocBytes(alloc);
   1645  return offset < allocOffset + size;
   1646 }
   1647 
   1648 bool LargeBuffer::isPointerWithinAllocation(void* ptr) const {
   1649  return uintptr_t(ptr) - uintptr_t(alloc) < bytes;
   1650 }
   1651 
   1652 #ifdef DEBUG
   1653 
   1654 void BufferAllocator::checkGCStateNotInUse() {
   1655  maybeMergeSweptData();
   1656  AutoLock lock(this);  // Some fields are protected by this lock.
   1657  checkGCStateNotInUse(lock);
   1658 }
   1659 
   1660 void BufferAllocator::checkGCStateNotInUse(MaybeLock& maybeLock) {
   1661  if (maybeLock.isNothing()) {
   1662    // Some fields are protected by this lock.
   1663    maybeLock.emplace(this);
   1664  }
   1665 
   1666  checkGCStateNotInUse(maybeLock.ref());
   1667 }
   1668 
   1669 void BufferAllocator::checkGCStateNotInUse(const AutoLock& lock) {
   1670  MOZ_ASSERT(majorState == State::NotCollecting);
   1671  bool isNurserySweeping = minorState == State::Sweeping;
   1672 
   1673  checkChunkListGCStateNotInUse(mixedChunks.ref(), true, false, false);
   1674  checkChunkListGCStateNotInUse(tenuredChunks.ref(), false, false, false);
   1675  checkChunkListsGCStateNotInUse(availableMixedChunks.ref(), true, false);
   1676  checkChunkListsGCStateNotInUse(availableTenuredChunks.ref(), false, false);
   1677 
   1678  if (isNurserySweeping) {
   1679    checkChunkListGCStateNotInUse(sweptMixedChunks.ref(), true,
   1680                                  majorFinishedWhileMinorSweeping, true);
   1681    checkChunkListGCStateNotInUse(sweptTenuredChunks.ref(), false, false, true);
   1682  } else {
   1683    MOZ_ASSERT(mixedChunksToSweep.ref().isEmpty());
   1684    MOZ_ASSERT(largeNurseryAllocsToSweep.ref().isEmpty());
   1685 
   1686    MOZ_ASSERT(sweptMixedChunks.ref().isEmpty());
   1687    MOZ_ASSERT(sweptTenuredChunks.ref().isEmpty());
   1688 
   1689    MOZ_ASSERT(!majorStartedWhileMinorSweeping);
   1690    MOZ_ASSERT(!majorFinishedWhileMinorSweeping);
   1691    MOZ_ASSERT(!hasMinorSweepDataToMerge);
   1692    MOZ_ASSERT(!minorSweepingFinished);
   1693    MOZ_ASSERT(!majorSweepingFinished);
   1694  }
   1695 
   1696  MOZ_ASSERT(tenuredChunksToSweep.ref().isEmpty());
   1697 
   1698  checkAllocListGCStateNotInUse(largeNurseryAllocs.ref(), true);
   1699  checkAllocListGCStateNotInUse(largeTenuredAllocs.ref(), false);
   1700 
   1701  MOZ_ASSERT(largeTenuredAllocsToSweep.ref().isEmpty());
   1702  MOZ_ASSERT(sweptLargeTenuredAllocs.ref().isEmpty());
   1703 }
   1704 
   1705 void BufferAllocator::checkChunkListsGCStateNotInUse(
   1706    ChunkLists& chunkLists, bool hasNurseryOwnedAllocs,
   1707    bool allowAllocatedDuringCollection) {
   1708  for (auto chunk = chunkLists.chunkIter(); !chunk.done(); chunk.next()) {
   1709    checkChunkGCStateNotInUse(chunk, allowAllocatedDuringCollection, true);
   1710    verifyChunk(chunk, hasNurseryOwnedAllocs);
   1711 
   1712    MOZ_ASSERT(chunk->ownsFreeLists);
   1713    size_t sizeClass = chunk.getSizeClass();
   1714 
   1715    MOZ_ASSERT(chunk->sizeClassForAvailableLists() == sizeClass);
   1716    MOZ_ASSERT_IF(sizeClass != FullChunkSizeClass,
   1717                  chunk->freeLists.ref().hasSizeClass(sizeClass));
   1718  }
   1719 }
   1720 
   1721 void BufferAllocator::checkChunkListGCStateNotInUse(
   1722    BufferChunkList& chunks, bool hasNurseryOwnedAllocs,
   1723    bool allowAllocatedDuringCollection, bool allowFreeLists) {
   1724  for (BufferChunk* chunk : chunks) {
   1725    checkChunkGCStateNotInUse(chunk, allowAllocatedDuringCollection,
   1726                              allowFreeLists);
   1727    verifyChunk(chunk, hasNurseryOwnedAllocs);
   1728  }
   1729 }
   1730 
   1731 void BufferAllocator::checkChunkGCStateNotInUse(
   1732    BufferChunk* chunk, bool allowAllocatedDuringCollection,
   1733    bool allowFreeLists) {
   1734  MOZ_ASSERT_IF(!allowAllocatedDuringCollection,
   1735                !chunk->allocatedDuringCollection);
   1736  MOZ_ASSERT(chunk->markBits.ref().isEmpty());
   1737  for (auto iter = chunk->smallRegionIter(); !iter.done(); iter.next()) {
   1738    SmallBufferRegion* region = iter.get();
   1739    MOZ_ASSERT(region->markBits.ref().isEmpty());
   1740  }
   1741  MOZ_ASSERT(allowFreeLists == chunk->ownsFreeLists);
   1742  if (!chunk->ownsFreeLists) {
   1743    chunk->freeLists.ref().assertEmpty();
   1744  }
   1745 }
   1746 
   1747 void BufferAllocator::verifyChunk(BufferChunk* chunk,
   1748                                  bool hasNurseryOwnedAllocs) {
   1749  MOZ_ASSERT(chunk->hasNurseryOwnedAllocs == hasNurseryOwnedAllocs);
   1750 
   1751  static constexpr size_t StepBytes = MediumAllocGranularity;
   1752 
   1753  size_t freeOffset = FirstMediumAllocOffset;
   1754 
   1755  size_t freeListsFreeRegionCount = 0;
   1756  if (chunk->ownsFreeLists) {
   1757    chunk->freeLists.ref().checkAvailable();
   1758    for (auto region = chunk->freeLists.ref().freeRegionIter(); !region.done();
   1759         region.next()) {
   1760      MOZ_ASSERT(BufferChunk::from(region) == chunk);
   1761      freeListsFreeRegionCount++;
   1762    }
   1763  } else {
   1764    MOZ_ASSERT(chunk->freeLists.ref().isEmpty());
   1765  }
   1766 
   1767  size_t chunkFreeRegionCount = 0;
   1768  for (auto iter = chunk->allocIter(); !iter.done(); iter.next()) {
   1769    // Check any free region preceding this allocation.
   1770    size_t offset = iter.getOffset();
   1771    MOZ_ASSERT(offset >= FirstMediumAllocOffset);
   1772    if (offset > freeOffset) {
   1773      verifyFreeRegion(chunk, offset, offset - freeOffset,
   1774                       chunkFreeRegionCount);
   1775    }
   1776 
   1777    // Check this allocation.
   1778    void* alloc = iter.get();
   1779    MOZ_ASSERT_IF(chunk->isNurseryOwned(alloc), hasNurseryOwnedAllocs);
   1780    size_t bytes = chunk->allocBytes(alloc);
   1781    uintptr_t endOffset = offset + bytes;
   1782    MOZ_ASSERT(endOffset <= ChunkSize);
   1783    for (size_t i = offset + StepBytes; i < endOffset; i += StepBytes) {
   1784      MOZ_ASSERT(!chunk->isAllocated(i));
   1785    }
   1786 
   1787    if (chunk->isSmallBufferRegion(alloc)) {
   1788      auto* region = SmallBufferRegion::from(alloc);
   1789      MOZ_ASSERT_IF(region->hasNurseryOwnedAllocs(), hasNurseryOwnedAllocs);
   1790      verifySmallBufferRegion(region, chunkFreeRegionCount);
   1791    }
   1792 
   1793    freeOffset = endOffset;
   1794  }
   1795 
   1796  // Check any free region following the last allocation.
   1797  if (freeOffset < ChunkSize) {
   1798    verifyFreeRegion(chunk, ChunkSize, ChunkSize - freeOffset,
   1799                     chunkFreeRegionCount);
   1800  }
   1801 
   1802  MOZ_ASSERT_IF(chunk->ownsFreeLists,
   1803                freeListsFreeRegionCount == chunkFreeRegionCount);
   1804 }
   1805 
   1806 void BufferAllocator::verifyFreeRegion(BufferChunk* chunk, uintptr_t endOffset,
   1807                                       size_t expectedSize,
   1808                                       size_t& freeRegionCount) {
   1809  MOZ_ASSERT(expectedSize >= MinFreeRegionSize);
   1810  auto* freeRegion = FreeRegion::fromEndOffset(chunk, endOffset);
   1811  MOZ_ASSERT(freeRegion->isInList());
   1812  MOZ_ASSERT(freeRegion->size() == expectedSize);
   1813  freeRegionCount++;
   1814 }
   1815 
   1816 void BufferAllocator::verifySmallBufferRegion(SmallBufferRegion* region,
   1817                                              size_t& freeRegionCount) {
   1818  bool foundNurseryOwnedAllocs = false;
   1819 
   1820  static constexpr size_t StepBytes = SmallAllocGranularity;
   1821 
   1822  size_t freeOffset = FirstSmallAllocOffset;
   1823 
   1824  for (auto iter = region->allocIter(); !iter.done(); iter.next()) {
   1825    // Check any free region preceding this allocation.
   1826    size_t offset = iter.getOffset();
   1827    MOZ_ASSERT(offset >= FirstSmallAllocOffset);
   1828    if (offset > freeOffset) {
   1829      verifyFreeRegion(region, offset, offset - freeOffset, freeRegionCount);
   1830    }
   1831 
   1832    // Check this allocation.
   1833    void* alloc = iter.get();
   1834    MOZ_ASSERT_IF(region->isNurseryOwned(alloc),
   1835                  region->hasNurseryOwnedAllocs());
   1836    size_t bytes = region->allocBytes(alloc);
   1837    uintptr_t endOffset = offset + bytes;
   1838    MOZ_ASSERT(endOffset <= SmallRegionSize);
   1839    for (size_t i = offset + StepBytes; i < endOffset; i += StepBytes) {
   1840      MOZ_ASSERT(!region->isAllocated(i));
   1841    }
   1842 
   1843    if (region->isNurseryOwned(alloc)) {
   1844      foundNurseryOwnedAllocs = true;
   1845    }
   1846 
   1847    freeOffset = endOffset;
   1848  }
   1849 
   1850  MOZ_ASSERT(foundNurseryOwnedAllocs == region->hasNurseryOwnedAllocs());
   1851 
   1852  // Check any free region following the last allocation.
   1853  if (freeOffset < SmallRegionSize) {
   1854    verifyFreeRegion(region, SmallRegionSize, SmallRegionSize - freeOffset,
   1855                     freeRegionCount);
   1856  }
   1857 }
   1858 
   1859 void BufferAllocator::verifyFreeRegion(SmallBufferRegion* region,
   1860                                       uintptr_t endOffset, size_t expectedSize,
   1861                                       size_t& freeRegionCount) {
   1862  if (expectedSize < MinFreeRegionSize) {
   1863    return;
   1864  }
   1865 
   1866  auto* freeRegion = FreeRegion::fromEndOffset(region, endOffset);
   1867  MOZ_ASSERT(freeRegion->isInList());
   1868  MOZ_ASSERT(freeRegion->size() == expectedSize);
   1869  freeRegionCount++;
   1870 }
   1871 
   1872 void BufferAllocator::checkAllocListGCStateNotInUse(LargeAllocList& list,
   1873                                                    bool isNurseryOwned) {
   1874  for (LargeBuffer* buffer : list) {
   1875    MOZ_ASSERT(buffer->isNurseryOwned == isNurseryOwned);
   1876    MOZ_ASSERT_IF(!isNurseryOwned, !buffer->allocatedDuringCollection);
   1877  }
   1878 }
   1879 
   1880 #endif
   1881 
   1882 void* BufferAllocator::allocSmall(size_t bytes, bool nurseryOwned, bool inGC) {
   1883  MOZ_ASSERT(IsSmallAllocSize(bytes));
   1884 
   1885  // Round up to next available size.
   1886  bytes = RoundUp(std::max(bytes, MinSmallAllocSize), SmallAllocGranularity);
   1887  MOZ_ASSERT(bytes <= MaxSmallAllocSize);
   1888 
   1889  // Get size class from |bytes|.
   1890  size_t sizeClass = SizeClassForSmallAlloc(bytes);
   1891 
   1892  void* alloc = bumpAlloc(bytes, sizeClass, MaxSmallAllocClass);
   1893  if (MOZ_UNLIKELY(!alloc)) {
   1894    alloc = retrySmallAlloc(bytes, sizeClass, inGC);
   1895    if (!alloc) {
   1896      return nullptr;
   1897    }
   1898  }
   1899 
   1900  SmallBufferRegion* region = SmallBufferRegion::from(alloc);
   1901  region->setAllocated(alloc, bytes, true);
   1902  MOZ_ASSERT(region->allocBytes(alloc) == bytes);
   1903 
   1904  MOZ_ASSERT(!region->isNurseryOwned(alloc));
   1905  region->setNurseryOwned(alloc, nurseryOwned);
   1906 
   1907  auto* chunk = BufferChunk::from(alloc);
   1908  if (nurseryOwned && !region->hasNurseryOwnedAllocs()) {
   1909    region->setHasNurseryOwnedAllocs(true);
   1910    setChunkHasNurseryAllocs(chunk);
   1911  }
   1912 
   1913  // Heap size updates are done for the small buffer region as a whole, not
   1914  // individual allocations within it.
   1915 
   1916  MOZ_ASSERT(!region->isMarked(alloc));
   1917  MOZ_ASSERT(IsSmallAlloc(alloc));
   1918 
   1919  return alloc;
   1920 }
   1921 
   1922 MOZ_NEVER_INLINE void* BufferAllocator::retrySmallAlloc(size_t bytes,
   1923                                                        size_t sizeClass,
   1924                                                        bool inGC) {
   1925  auto alloc = [&]() {
   1926    return bumpAlloc(bytes, sizeClass, MaxSmallAllocClass);
   1927  };
   1928  auto growHeap = [&]() { return allocNewSmallRegion(inGC); };
   1929 
   1930  return refillFreeListsAndRetryAlloc(sizeClass, MaxSmallAllocClass, alloc,
   1931                                      growHeap);
   1932 }
   1933 
   1934 bool BufferAllocator::allocNewSmallRegion(bool inGC) {
   1935  void* ptr = allocMediumAligned(SmallRegionSize, inGC);
   1936  if (!ptr) {
   1937    return false;
   1938  }
   1939 
   1940  auto* region = new (ptr) SmallBufferRegion;
   1941 
   1942  BufferChunk* chunk = BufferChunk::from(region);
   1943  chunk->setSmallBufferRegion(region, true);
   1944 
   1945  uintptr_t freeStart = uintptr_t(region) + FirstSmallAllocOffset;
   1946  uintptr_t freeEnd = uintptr_t(region) + SmallRegionSize;
   1947 
   1948  size_t sizeClass =
   1949      SizeClassForFreeRegion(freeEnd - freeStart, SizeKind::Small);
   1950 
   1951  ptr = reinterpret_cast<void*>(freeEnd - sizeof(FreeRegion));
   1952  FreeRegion* freeRegion = new (ptr) FreeRegion(freeStart);
   1953  MOZ_ASSERT(freeRegion->getEnd() == freeEnd);
   1954  freeLists.ref().pushFront(sizeClass, freeRegion);
   1955  return true;
   1956 }
   1957 
   1958 /* static */
   1959 bool BufferAllocator::IsSmallAlloc(void* alloc) {
   1960  MOZ_ASSERT(IsBufferAlloc(alloc));
   1961 
   1962  // Test for large buffers before calling this so we can assume |alloc| is
   1963  // inside a chunk.
   1964  MOZ_ASSERT(!IsLargeAlloc(alloc));
   1965 
   1966  BufferChunk* chunk = BufferChunk::from(alloc);
   1967  return chunk->isSmallBufferRegion(alloc);
   1968 }
   1969 
   1970 void* BufferAllocator::allocMedium(size_t bytes, bool nurseryOwned, bool inGC) {
   1971  MOZ_ASSERT(!IsSmallAllocSize(bytes));
   1972  MOZ_ASSERT(!IsLargeAllocSize(bytes));
   1973 
   1974  // Round up to next allowed size.
   1975  bytes = RoundUp(bytes, MediumAllocGranularity);
   1976  MOZ_ASSERT(bytes <= MaxMediumAllocSize);
   1977 
   1978  // Get size class from |bytes|.
   1979  size_t sizeClass = SizeClassForMediumAlloc(bytes);
   1980 
   1981  void* alloc = bumpAlloc(bytes, sizeClass, MaxMediumAllocClass);
   1982  if (MOZ_UNLIKELY(!alloc)) {
   1983    alloc = retryMediumAlloc(bytes, sizeClass, inGC);
   1984    if (!alloc) {
   1985      return nullptr;
   1986    }
   1987  }
   1988 
   1989  setAllocated(alloc, bytes, nurseryOwned, inGC);
   1990  return alloc;
   1991 }
   1992 
   1993 MOZ_NEVER_INLINE void* BufferAllocator::retryMediumAlloc(size_t bytes,
   1994                                                         size_t sizeClass,
   1995                                                         bool inGC) {
   1996  auto alloc = [&]() {
   1997    return bumpAlloc(bytes, sizeClass, MaxMediumAllocClass);
   1998  };
   1999  auto growHeap = [&]() { return allocNewChunk(inGC); };
   2000  return refillFreeListsAndRetryAlloc(sizeClass, MaxMediumAllocClass, alloc,
   2001                                      growHeap);
   2002 }
   2003 
   2004 template <typename Alloc, typename GrowHeap>
   2005 void* BufferAllocator::refillFreeListsAndRetryAlloc(size_t sizeClass,
   2006                                                    size_t maxSizeClass,
   2007                                                    Alloc&& alloc,
   2008                                                    GrowHeap&& growHeap) {
   2009  for (;;) {
   2010    RefillResult r = refillFreeLists(sizeClass, maxSizeClass, growHeap);
   2011    if (r == RefillResult::Fail) {
   2012      return nullptr;
   2013    }
   2014 
   2015    if (r == RefillResult::Retry) {
   2016      continue;
   2017    }
   2018 
   2019    void* ptr = alloc();
   2020    MOZ_ASSERT(ptr);
   2021    return ptr;
   2022  }
   2023 }
   2024 
   2025 template <typename GrowHeap>
   2026 BufferAllocator::RefillResult BufferAllocator::refillFreeLists(
   2027    size_t sizeClass, size_t maxSizeClass, GrowHeap&& growHeap) {
   2028  MOZ_ASSERT(sizeClass <= maxSizeClass);
   2029 
   2030  // Take chunks from the available lists and add their free regions to the
   2031  // free lists.
   2032  if (useAvailableChunk(sizeClass, maxSizeClass)) {
   2033    return RefillResult::Success;
   2034  }
   2035 
   2036  // If that fails try to merge swept data and retry, avoiding taking the lock
   2037  // unless we know there is data to merge. This reduces context switches.
   2038  if (hasMinorSweepDataToMerge) {
   2039    mergeSweptData();
   2040    return RefillResult::Retry;
   2041  }
   2042 
   2043  // If all else fails try to grow the heap.
   2044  if (growHeap()) {
   2045    return RefillResult::Success;
   2046  }
   2047 
   2048  return RefillResult::Fail;
   2049 }
   2050 
   2051 bool BufferAllocator::useAvailableChunk(size_t sizeClass, size_t maxSizeClass) {
   2052  return useAvailableChunk(sizeClass, maxSizeClass, availableMixedChunks.ref(),
   2053                           mixedChunks.ref()) ||
   2054         useAvailableChunk(sizeClass, maxSizeClass,
   2055                           availableTenuredChunks.ref(), tenuredChunks.ref());
   2056 }
   2057 
   2058 bool BufferAllocator::useAvailableChunk(size_t sizeClass, size_t maxSizeClass,
   2059                                        ChunkLists& src, BufferChunkList& dst) {
   2060  // Move available chunks from available list |src| to current list |dst| (and
   2061  // put their free regions into the |freeLists|) for size classes less than or
   2062  // equal to |sizeClass| that are not currently represented in the free lists
   2063  // and for which we have chunks in |src|.
   2064  //
   2065  // Chunks are moved from the available list to the free lists as needed to
   2066  // limit the number of regions in the free lists, as these need to be iterated
   2067  // on minor GC.
   2068  //
   2069  // This restriction on only moving regions less than or equal to the required
   2070  // size class is to encourage filling up more used chunks before using less
   2071  // used chunks, in the hope that less used chunks will become completely empty
   2072  // and can be reclaimed.
   2073 
   2074  MOZ_ASSERT(freeLists.ref().getFirstAvailableSizeClass(
   2075                 sizeClass, maxSizeClass) == SIZE_MAX);
   2076 
   2077  SizeClassBitSet sizeClasses = getChunkSizeClassesToMove(maxSizeClass, src);
   2078  for (auto i = BitSetIter(sizeClasses); !i.done(); i.next()) {
   2079    MOZ_ASSERT(i <= maxSizeClass);
   2080    MOZ_ASSERT(!freeLists.ref().hasSizeClass(i));
   2081 
   2082    BufferChunk* chunk = src.popFirstChunk(i);
   2083    MOZ_ASSERT(chunk->ownsFreeLists);
   2084    MOZ_ASSERT(chunk->freeLists.ref().hasSizeClass(i));
   2085 
   2086    dst.pushBack(chunk);
   2087    freeLists.ref().append(std::move(chunk->freeLists.ref()));
   2088    chunk->ownsFreeLists = false;
   2089    chunk->freeLists.ref().assertEmpty();
   2090 
   2091    if (i >= sizeClass) {
   2092      // We should now be able to allocate a block of the required size as we've
   2093      // added free regions of size class |i| where |i => sizeClass|.
   2094      MOZ_ASSERT(freeLists.ref().getFirstAvailableSizeClass(
   2095                     sizeClass, maxSizeClass) != SIZE_MAX);
   2096      return true;
   2097    }
   2098  }
   2099 
   2100  MOZ_ASSERT(freeLists.ref().getFirstAvailableSizeClass(
   2101                 sizeClass, maxSizeClass) == SIZE_MAX);
   2102  return false;
   2103 }
   2104 
   2105 BufferAllocator::SizeClassBitSet BufferAllocator::getChunkSizeClassesToMove(
   2106    size_t maxSizeClass, ChunkLists& src) const {
   2107  // Make a bitmap of size classes up to |maxSizeClass| which are not present in
   2108  // |freeLists| but which are present in available chunks |src|.
   2109  //
   2110  // The ChunkLists bitmap has an extra bit to represent full chunks compared to
   2111  // the FreeLists bitmap. This prevents using the classes methods, but since
   2112  // they both fit into a single word we can manipulate the storage directly.
   2113  SizeClassBitSet result;
   2114  auto& sizeClasses = result.Storage()[0];
   2115  auto& srcAvailable = src.availableSizeClasses().Storage()[0];
   2116  auto& freeAvailable = freeLists.ref().availableSizeClasses().Storage()[0];
   2117  sizeClasses = srcAvailable & ~freeAvailable & BitMask(maxSizeClass + 1);
   2118  return result;
   2119 }
   2120 
   2121 // Differentiate between small and medium size classes. Large allocations do not
   2122 // use size classes.
   2123 static bool IsMediumSizeClass(size_t sizeClass) {
   2124  MOZ_ASSERT(sizeClass < BufferAllocator::AllocSizeClasses);
   2125  return sizeClass >= MinMediumAllocClass;
   2126 }
   2127 
   2128 /* static */
   2129 BufferAllocator::SizeKind BufferAllocator::SizeClassKind(size_t sizeClass) {
   2130  return IsMediumSizeClass(sizeClass) ? SizeKind::Medium : SizeKind::Small;
   2131 }
   2132 
   2133 void* BufferAllocator::bumpAlloc(size_t bytes, size_t sizeClass,
   2134                                 size_t maxSizeClass) {
   2135  MOZ_ASSERT(SizeClassKind(sizeClass) == SizeClassKind(maxSizeClass));
   2136  freeLists.ref().checkAvailable();
   2137 
   2138  // Find smallest suitable size class that has free regions.
   2139  sizeClass =
   2140      freeLists.ref().getFirstAvailableSizeClass(sizeClass, maxSizeClass);
   2141  if (sizeClass == SIZE_MAX) {
   2142    return nullptr;
   2143  }
   2144 
   2145  FreeRegion* region = freeLists.ref().getFirstRegion(sizeClass);
   2146  MOZ_ASSERT(region->size() >= bytes);
   2147 
   2148  void* ptr = allocFromRegion(region, bytes, sizeClass);
   2149  updateFreeListsAfterAlloc(&freeLists.ref(), region, sizeClass);
   2150 
   2151  DebugOnlyPoison(ptr, JS_ALLOCATED_BUFFER_PATTERN, bytes,
   2152                  MemCheckKind::MakeUndefined);
   2153 
   2154  return ptr;
   2155 }
   2156 
   2157 #ifdef DEBUG
   2158 static size_t GranularityForSizeClass(size_t sizeClass) {
   2159  return IsMediumSizeClass(sizeClass) ? MediumAllocGranularity
   2160                                      : SmallAllocGranularity;
   2161 }
   2162 #endif  // DEBUG
   2163 
   2164 void* BufferAllocator::allocFromRegion(FreeRegion* region, size_t bytes,
   2165                                       size_t sizeClass) {
   2166  uintptr_t start = region->startAddr;
   2167  MOZ_ASSERT(region->getEnd() > start);
   2168  MOZ_ASSERT_IF(sizeClass != MaxMediumAllocClass,
   2169                region->size() >= SizeClassBytes(sizeClass));
   2170  MOZ_ASSERT_IF(sizeClass == MaxMediumAllocClass,
   2171                region->size() >= MaxMediumAllocSize);
   2172  MOZ_ASSERT(start % GranularityForSizeClass(sizeClass) == 0);
   2173  MOZ_ASSERT(region->size() % GranularityForSizeClass(sizeClass) == 0);
   2174 
   2175  // Ensure whole region is commited.
   2176  if (region->hasDecommittedPages) {
   2177    recommitRegion(region);
   2178  }
   2179 
   2180  // Allocate from start of region.
   2181  void* ptr = reinterpret_cast<void*>(start);
   2182  start += bytes;
   2183  MOZ_ASSERT(region->getEnd() >= start);
   2184 
   2185  // Update region start.
   2186  region->startAddr = start;
   2187 
   2188  return ptr;
   2189 }
   2190 
   2191 // Allocate a region of size |bytes| aligned to |bytes|. The maximum size is
   2192 // limited to 256KB. In practice this is only ever used to allocate
   2193 // SmallBufferRegions.
   2194 void* BufferAllocator::allocMediumAligned(size_t bytes, bool inGC) {
   2195  MOZ_ASSERT(bytes >= MinMediumAllocSize);
   2196  MOZ_ASSERT(bytes <= MaxAlignedAllocSize);
   2197  MOZ_ASSERT(mozilla::IsPowerOfTwo(bytes));
   2198 
   2199  // Get size class from |bytes|.
   2200  size_t sizeClass = SizeClassForMediumAlloc(bytes);
   2201 
   2202  void* alloc = alignedAlloc(sizeClass);
   2203  if (MOZ_UNLIKELY(!alloc)) {
   2204    alloc = retryAlignedAlloc(sizeClass, inGC);
   2205    if (!alloc) {
   2206      return nullptr;
   2207    }
   2208  }
   2209 
   2210  setAllocated(alloc, bytes, false, inGC);
   2211 
   2212  return alloc;
   2213 }
   2214 
   2215 MOZ_NEVER_INLINE void* BufferAllocator::retryAlignedAlloc(size_t sizeClass,
   2216                                                          bool inGC) {
   2217  auto alloc = [&]() { return alignedAlloc(sizeClass); };
   2218  auto growHeap = [&]() { return allocNewChunk(inGC); };
   2219  return refillFreeListsAndRetryAlloc(sizeClass + 1, MaxMediumAllocClass, alloc,
   2220                                      growHeap);
   2221 }
   2222 
   2223 void* BufferAllocator::alignedAlloc(size_t sizeClass) {
   2224  freeLists.ref().checkAvailable();
   2225 
   2226  // Try the first free region for the smallest possible size class. This will
   2227  // fail if that region is for the exact size class requested but the region is
   2228  // not aligned.
   2229  size_t allocClass = freeLists.ref().getFirstAvailableSizeClass(
   2230      sizeClass, MaxMediumAllocClass);
   2231  MOZ_ASSERT(allocClass >= sizeClass);
   2232  if (allocClass == SIZE_MAX) {
   2233    return nullptr;
   2234  }
   2235 
   2236  FreeRegion* region = freeLists.ref().getFirstRegion(allocClass);
   2237  void* ptr = alignedAllocFromRegion(region, sizeClass);
   2238  if (ptr) {
   2239    updateFreeListsAfterAlloc(&freeLists.ref(), region, allocClass);
   2240    return ptr;
   2241  }
   2242 
   2243  // If we couldn't allocate an aligned region, try a larger size class. This
   2244  // only happens if we selected the size class equal to the requested size.
   2245  MOZ_ASSERT(allocClass == sizeClass);
   2246  allocClass = freeLists.ref().getFirstAvailableSizeClass(sizeClass + 1,
   2247                                                          MaxMediumAllocClass);
   2248  if (allocClass == SIZE_MAX) {
   2249    return nullptr;
   2250  }
   2251 
   2252  region = freeLists.ref().getFirstRegion(allocClass);
   2253  ptr = alignedAllocFromRegion(region, sizeClass);
   2254  MOZ_ASSERT(ptr);
   2255  updateFreeListsAfterAlloc(&freeLists.ref(), region, allocClass);
   2256  return ptr;
   2257 }
   2258 
   2259 void* BufferAllocator::alignedAllocFromRegion(FreeRegion* region,
   2260                                              size_t sizeClass) {
   2261  // Attempt to allocate an aligned region from |region|.
   2262 
   2263  uintptr_t start = region->startAddr;
   2264  MOZ_ASSERT(region->getEnd() > start);
   2265  MOZ_ASSERT(region->size() >= SizeClassBytes(sizeClass));
   2266  MOZ_ASSERT((region->size() % MinMediumAllocSize) == 0);
   2267 
   2268  size_t bytes = SizeClassBytes(sizeClass);
   2269  size_t alignedStart = RoundUp(start, bytes);
   2270  size_t end = alignedStart + bytes;
   2271  if (end > region->getEnd()) {
   2272    return nullptr;
   2273  }
   2274 
   2275  // Align the start of the region, creating a new free region out of the space
   2276  // at the start if necessary.
   2277  if (alignedStart != start) {
   2278    size_t alignBytes = alignedStart - start;
   2279    void* prefix = allocFromRegion(region, alignBytes, sizeClass);
   2280    MOZ_ASSERT(uintptr_t(prefix) == start);
   2281    (void)prefix;
   2282    MOZ_ASSERT(!region->hasDecommittedPages);
   2283    addFreeRegion(&freeLists.ref(), start, alignBytes, SizeKind::Medium, false,
   2284                  ListPosition::Back);
   2285  }
   2286 
   2287  // Now the start is aligned we can use the normal allocation method.
   2288  MOZ_ASSERT(region->startAddr % bytes == 0);
   2289  return allocFromRegion(region, bytes, sizeClass);
   2290 }
   2291 
   2292 void BufferAllocator::setAllocated(void* alloc, size_t bytes, bool nurseryOwned,
   2293                                   bool inGC) {
   2294  BufferChunk* chunk = BufferChunk::from(alloc);
   2295  chunk->setAllocated(alloc, bytes, true);
   2296  MOZ_ASSERT(chunk->allocBytes(alloc) == bytes);
   2297 
   2298  MOZ_ASSERT(!chunk->isNurseryOwned(alloc));
   2299  chunk->setNurseryOwned(alloc, nurseryOwned);
   2300 
   2301  if (nurseryOwned) {
   2302    setChunkHasNurseryAllocs(chunk);
   2303  }
   2304 
   2305  MOZ_ASSERT(!chunk->isMarked(alloc));
   2306 
   2307  bool checkThresholds = !inGC;
   2308  increaseHeapSize(bytes, nurseryOwned, checkThresholds, false);
   2309 
   2310  MOZ_ASSERT(!chunk->isSmallBufferRegion(alloc));
   2311 }
   2312 
   2313 void BufferAllocator::setChunkHasNurseryAllocs(BufferChunk* chunk) {
   2314  MOZ_ASSERT(!chunk->ownsFreeLists);
   2315 
   2316  if (chunk->hasNurseryOwnedAllocs) {
   2317    return;
   2318  }
   2319 
   2320  tenuredChunks.ref().remove(chunk);
   2321  mixedChunks.ref().pushBack(chunk);
   2322  chunk->hasNurseryOwnedAllocs = true;
   2323 }
   2324 
   2325 void BufferAllocator::updateFreeListsAfterAlloc(FreeLists* freeLists,
   2326                                                FreeRegion* region,
   2327                                                size_t sizeClass) {
   2328  // Updates |freeLists| after an allocation from |region| which is currently in
   2329  // the |sizeClass| free list. This may move the region to a different free
   2330  // list.
   2331 
   2332  freeLists->assertContains(sizeClass, region);
   2333 
   2334  // If the region is still valid for further allocations of this size class
   2335  // then there's nothing to do.
   2336  size_t classBytes = SizeClassBytes(sizeClass);
   2337  size_t newSize = region->size();
   2338  MOZ_ASSERT(newSize % GranularityForSizeClass(sizeClass) == 0);
   2339  if (newSize >= classBytes) {
   2340    return;
   2341  }
   2342 
   2343  // Remove region from this free list.
   2344  freeLists->remove(sizeClass, region);
   2345 
   2346  // If the region is now empty then we're done.
   2347  if (newSize == 0) {
   2348    return;
   2349  }
   2350 
   2351  // Otherwise region is now too small. Move it to the appropriate bucket for
   2352  // its reduced size if possible.
   2353 
   2354  if (newSize < MinFreeRegionSize) {
   2355    // We can't record a region this small. The free space will not be reused
   2356    // until enough adjacent space become free
   2357    return;
   2358  }
   2359 
   2360  size_t newSizeClass =
   2361      SizeClassForFreeRegion(newSize, SizeClassKind(sizeClass));
   2362  MOZ_ASSERT_IF(newSizeClass != MaxMediumAllocClass,
   2363                newSize >= SizeClassBytes(newSizeClass));
   2364  MOZ_ASSERT(newSizeClass <= sizeClass);
   2365  MOZ_ASSERT_IF(newSizeClass != MaxMediumAllocClass, newSizeClass < sizeClass);
   2366  MOZ_ASSERT(SizeClassKind(newSizeClass) == SizeClassKind(sizeClass));
   2367  freeLists->pushFront(newSizeClass, region);
   2368 }
   2369 
   2370 void BufferAllocator::recommitRegion(FreeRegion* region) {
   2371  MOZ_ASSERT(region->hasDecommittedPages);
   2372  MOZ_ASSERT(DecommitEnabled());
   2373 
   2374  BufferChunk* chunk = BufferChunk::from(region);
   2375  uintptr_t startAddr = RoundUp(region->startAddr, PageSize);
   2376  uintptr_t endAddr = RoundDown(uintptr_t(region), PageSize);
   2377 
   2378  size_t startPage = (startAddr - uintptr_t(chunk)) / PageSize;
   2379  size_t endPage = (endAddr - uintptr_t(chunk)) / PageSize;
   2380 
   2381  // If the start of the region does not lie on a page boundary the page it is
   2382  // in should be committed as it must either contain the start of the chunk, a
   2383  // FreeRegion or an allocation.
   2384  MOZ_ASSERT_IF((region->startAddr % PageSize) != 0,
   2385                !chunk->decommittedPages.ref()[startPage - 1]);
   2386 
   2387  // The end of the region should be committed as it holds FreeRegion |region|.
   2388  MOZ_ASSERT(!chunk->decommittedPages.ref()[endPage]);
   2389 
   2390  MarkPagesInUseSoft(reinterpret_cast<void*>(startAddr), endAddr - startAddr);
   2391  for (size_t i = startPage; i != endPage; i++) {
   2392    chunk->decommittedPages.ref()[i] = false;
   2393  }
   2394 
   2395  region->hasDecommittedPages = false;
   2396 }
   2397 
   2398 static inline StallAndRetry ShouldStallAndRetry(bool inGC) {
   2399  return inGC ? StallAndRetry::Yes : StallAndRetry::No;
   2400 }
   2401 
   2402 bool BufferAllocator::allocNewChunk(bool inGC) {
   2403  GCRuntime* gc = &zone->runtimeFromMainThread()->gc;
   2404  AutoLockGCBgAlloc lock(gc);
   2405  ArenaChunk* baseChunk = gc->getOrAllocChunk(ShouldStallAndRetry(inGC), lock);
   2406  if (!baseChunk) {
   2407    return false;
   2408  }
   2409 
   2410  CheckHighBitsOfPointer(baseChunk);
   2411 
   2412  // Ensure all memory is initially committed.
   2413  if (!baseChunk->decommittedPages.IsEmpty()) {
   2414    MOZ_ASSERT(DecommitEnabled());
   2415    MarkPagesInUseSoft(baseChunk, ChunkSize);
   2416  }
   2417 
   2418  // Unpoison past the ChunkBase header.
   2419  void* ptr = reinterpret_cast<void*>(uintptr_t(baseChunk) + sizeof(ChunkBase));
   2420  size_t size = ChunkSize - sizeof(ChunkBase);
   2421  SetMemCheckKind(ptr, size, MemCheckKind::MakeUndefined);
   2422 
   2423  BufferChunk* chunk = new (baseChunk) BufferChunk(zone);
   2424  chunk->allocatedDuringCollection = majorState != State::NotCollecting;
   2425 
   2426  tenuredChunks.ref().pushBack(chunk);
   2427 
   2428  uintptr_t freeStart = uintptr_t(chunk) + FirstMediumAllocOffset;
   2429  uintptr_t freeEnd = uintptr_t(chunk) + ChunkSize;
   2430 
   2431  size_t sizeClass =
   2432      SizeClassForFreeRegion(freeEnd - freeStart, SizeKind::Medium);
   2433  MOZ_ASSERT(sizeClass > MaxSmallAllocClass);
   2434  MOZ_ASSERT(sizeClass <= MaxMediumAllocClass);
   2435 
   2436  ptr = reinterpret_cast<void*>(freeEnd - sizeof(FreeRegion));
   2437  FreeRegion* region = new (ptr) FreeRegion(freeStart);
   2438  MOZ_ASSERT(region->getEnd() == freeEnd);
   2439  freeLists.ref().pushFront(sizeClass, region);
   2440 
   2441  return true;
   2442 }
   2443 
   2444 static void SetDeallocated(BufferChunk* chunk, void* alloc, size_t bytes) {
   2445  MOZ_ASSERT(!chunk->isSmallBufferRegion(alloc));
   2446  MOZ_ASSERT(chunk->allocBytes(alloc) == bytes);
   2447  chunk->setNurseryOwned(alloc, false);
   2448  chunk->setAllocated(alloc, bytes, false);
   2449 }
   2450 
   2451 static void SetDeallocated(SmallBufferRegion* region, void* alloc,
   2452                           size_t bytes) {
   2453  MOZ_ASSERT(region->allocBytes(alloc) == bytes);
   2454  region->setNurseryOwned(alloc, false);
   2455  region->setAllocated(alloc, bytes, false);
   2456 }
   2457 
   2458 bool BufferAllocator::sweepChunk(BufferChunk* chunk, SweepKind sweepKind,
   2459                                 bool shouldDecommit) {
   2460  // Find all regions of free space in |chunk| and add them to the swept free
   2461  // lists.
   2462 
   2463  // TODO: It could be beneficialy to allocate from most-full chunks first. This
   2464  // could happen by sweeping all chunks and then sorting them by how much free
   2465  // space they had and then adding their free regions to the free lists in that
   2466  // order.
   2467 
   2468  MOZ_ASSERT_IF(sweepKind == SweepKind::Tenured,
   2469                !chunk->allocatedDuringCollection);
   2470  MOZ_ASSERT_IF(sweepKind == SweepKind::Tenured, chunk->ownsFreeLists);
   2471  FreeLists& freeLists = chunk->freeLists.ref();
   2472 
   2473  // TODO: For tenured sweeping, check whether anything needs to be swept and
   2474  // reuse the existing free regions rather than rebuilding these every time.
   2475  freeLists.clear();
   2476  chunk->ownsFreeLists = true;
   2477 
   2478  GCRuntime* gc = &zone->runtimeFromAnyThread()->gc;
   2479 
   2480  bool hasNurseryOwnedAllocs = false;
   2481 
   2482  size_t freeStart = FirstMediumAllocOffset;
   2483  bool sweptAny = false;
   2484  size_t tenuredBytesFreed = 0;
   2485 
   2486  // First sweep any small buffer regions.
   2487  for (auto iter = chunk->smallRegionIter(); !iter.done(); iter.next()) {
   2488    SmallBufferRegion* region = iter.get();
   2489    MOZ_ASSERT(!chunk->isMarked(region));
   2490    MOZ_ASSERT(chunk->allocBytes(region) == SmallRegionSize);
   2491 
   2492    if (!sweepSmallBufferRegion(chunk, region, sweepKind)) {
   2493      chunk->setSmallBufferRegion(region, false);
   2494      SetDeallocated(chunk, region, SmallRegionSize);
   2495      PoisonAlloc(region, JS_SWEPT_TENURED_PATTERN, sizeof(SmallBufferRegion),
   2496                  MemCheckKind::MakeUndefined);
   2497      tenuredBytesFreed += SmallRegionSize;
   2498      sweptAny = true;
   2499    } else if (region->hasNurseryOwnedAllocs()) {
   2500      hasNurseryOwnedAllocs = true;
   2501    }
   2502  }
   2503 
   2504  for (auto iter = chunk->allocIter(); !iter.done(); iter.next()) {
   2505    void* alloc = iter.get();
   2506 
   2507    size_t bytes = chunk->allocBytes(alloc);
   2508    uintptr_t allocEnd = iter.getOffset() + bytes;
   2509 
   2510    bool nurseryOwned = chunk->isNurseryOwned(alloc);
   2511    bool canSweep = !chunk->isSmallBufferRegion(alloc) &&
   2512                    CanSweepAlloc(nurseryOwned, sweepKind);
   2513 
   2514    bool shouldSweep = canSweep && !chunk->isMarked(alloc);
   2515    if (shouldSweep) {
   2516      // Dead. Update allocated bitmap, metadata and heap size accounting.
   2517      if (!nurseryOwned) {
   2518        tenuredBytesFreed += bytes;
   2519      }
   2520      SetDeallocated(chunk, alloc, bytes);
   2521      PoisonAlloc(alloc, JS_SWEPT_TENURED_PATTERN, bytes,
   2522                  MemCheckKind::MakeUndefined);
   2523      sweptAny = true;
   2524    } else {
   2525      // Alive. Add any free space before this allocation.
   2526      uintptr_t allocStart = iter.getOffset();
   2527      if (freeStart != allocStart) {
   2528        addSweptRegion(chunk, freeStart, allocStart, shouldDecommit, !sweptAny,
   2529                       freeLists);
   2530      }
   2531      freeStart = allocEnd;
   2532      if (canSweep) {
   2533        chunk->setUnmarked(alloc);
   2534      }
   2535      if (nurseryOwned) {
   2536        MOZ_ASSERT(sweepKind == SweepKind::Nursery);
   2537        hasNurseryOwnedAllocs = true;
   2538      }
   2539    }
   2540  }
   2541 
   2542  if (tenuredBytesFreed) {
   2543    bool inMajorGC = sweepKind == SweepKind::Tenured;
   2544    decreaseHeapSize(tenuredBytesFreed, false, inMajorGC);
   2545  }
   2546 
   2547  if (freeStart == FirstMediumAllocOffset) {
   2548    // Chunk is empty. Give it back to the system.
   2549    bool allMemoryCommitted = chunk->decommittedPages.ref().IsEmpty();
   2550    chunk->~BufferChunk();
   2551    ArenaChunk* tenuredChunk = ArenaChunk::init(chunk, gc, allMemoryCommitted);
   2552    AutoLockGC lock(gc);
   2553    gc->recycleChunk(tenuredChunk, lock);
   2554    return false;
   2555  }
   2556 
   2557  // Add any free space from the last allocation to the end of the chunk.
   2558  if (freeStart != ChunkSize) {
   2559    addSweptRegion(chunk, freeStart, ChunkSize, shouldDecommit, !sweptAny,
   2560                   freeLists);
   2561  }
   2562 
   2563  chunk->hasNurseryOwnedAllocsAfterSweep = hasNurseryOwnedAllocs;
   2564 
   2565  return true;
   2566 }
   2567 
   2568 /* static */
   2569 bool BufferAllocator::CanSweepAlloc(bool nurseryOwned,
   2570                                    BufferAllocator::SweepKind sweepKind) {
   2571  static_assert(SweepKind::Nursery == SweepKind(uint8_t(true)));
   2572  static_assert(SweepKind::Tenured == SweepKind(uint8_t(false)));
   2573  SweepKind requiredKind = SweepKind(uint8_t(nurseryOwned));
   2574  return sweepKind == requiredKind;
   2575 }
   2576 
   2577 void BufferAllocator::addSweptRegion(BufferChunk* chunk, uintptr_t freeStart,
   2578                                     uintptr_t freeEnd, bool shouldDecommit,
   2579                                     bool expectUnchanged,
   2580                                     FreeLists& freeLists) {
   2581  // Add the region from |freeStart| to |freeEnd| to the appropriate swept free
   2582  // list based on its size.
   2583 
   2584  MOZ_ASSERT(freeStart >= FirstMediumAllocOffset);
   2585  MOZ_ASSERT(freeStart < freeEnd);
   2586  MOZ_ASSERT(freeEnd <= ChunkSize);
   2587  MOZ_ASSERT((freeStart % MediumAllocGranularity) == 0);
   2588  MOZ_ASSERT((freeEnd % MediumAllocGranularity) == 0);
   2589  MOZ_ASSERT_IF(shouldDecommit, DecommitEnabled());
   2590 
   2591  // Decommit pages if |shouldDecommit| was specified, but leave space for
   2592  // the FreeRegion structure at the end.
   2593  bool anyDecommitted = false;
   2594  uintptr_t decommitStart = RoundUp(freeStart, PageSize);
   2595  uintptr_t decommitEnd = RoundDown(freeEnd - sizeof(FreeRegion), PageSize);
   2596  size_t endPage = decommitEnd / PageSize;
   2597  if (shouldDecommit && decommitEnd > decommitStart) {
   2598    void* ptr = reinterpret_cast<void*>(decommitStart + uintptr_t(chunk));
   2599    MarkPagesUnusedSoft(ptr, decommitEnd - decommitStart);
   2600    size_t startPage = decommitStart / PageSize;
   2601    for (size_t i = startPage; i != endPage; i++) {
   2602      chunk->decommittedPages.ref()[i] = true;
   2603    }
   2604    anyDecommitted = true;
   2605  } else {
   2606    // Check for any previously decommitted pages.
   2607    uintptr_t startPage = RoundDown(freeStart, PageSize) / PageSize;
   2608    for (size_t i = startPage; i != endPage; i++) {
   2609      if (chunk->decommittedPages.ref()[i]) {
   2610        anyDecommitted = true;
   2611      }
   2612    }
   2613  }
   2614 
   2615  // The last page must have previously been either a live allocation or a
   2616  // FreeRegion, so it must already be committed.
   2617  MOZ_ASSERT(!chunk->decommittedPages.ref()[endPage]);
   2618 
   2619  freeStart += uintptr_t(chunk);
   2620  freeEnd += uintptr_t(chunk);
   2621 
   2622  size_t bytes = freeEnd - freeStart;
   2623  addFreeRegion(&freeLists, freeStart, bytes, SizeKind::Medium, anyDecommitted,
   2624                ListPosition::Back, expectUnchanged);
   2625 }
   2626 
   2627 bool BufferAllocator::sweepSmallBufferRegion(BufferChunk* chunk,
   2628                                             SmallBufferRegion* region,
   2629                                             SweepKind sweepKind) {
   2630  bool hasNurseryOwnedAllocs = false;
   2631 
   2632  FreeLists& freeLists = chunk->freeLists.ref();
   2633 
   2634  size_t freeStart = FirstSmallAllocOffset;
   2635  bool sweptAny = false;
   2636 
   2637  for (auto iter = region->allocIter(); !iter.done(); iter.next()) {
   2638    void* alloc = iter.get();
   2639 
   2640    size_t bytes = region->allocBytes(alloc);
   2641    uintptr_t allocEnd = iter.getOffset() + bytes;
   2642 
   2643    bool nurseryOwned = region->isNurseryOwned(alloc);
   2644    bool canSweep = CanSweepAlloc(nurseryOwned, sweepKind);
   2645 
   2646    bool shouldSweep = canSweep && !region->isMarked(alloc);
   2647    if (shouldSweep) {
   2648      // Dead. Update allocated bitmap, metadata and heap size accounting.
   2649      SetDeallocated(region, alloc, bytes);
   2650      PoisonAlloc(alloc, JS_SWEPT_TENURED_PATTERN, bytes,
   2651                  MemCheckKind::MakeUndefined);
   2652      sweptAny = true;
   2653    } else {
   2654      // Alive. Add any free space before this allocation.
   2655      uintptr_t allocStart = iter.getOffset();
   2656      if (freeStart != allocStart) {
   2657        addSweptRegion(region, freeStart, allocStart, !sweptAny, freeLists);
   2658      }
   2659      freeStart = allocEnd;
   2660      if (canSweep) {
   2661        region->setUnmarked(alloc);
   2662      }
   2663      if (nurseryOwned) {
   2664        MOZ_ASSERT(sweepKind == SweepKind::Nursery);
   2665        hasNurseryOwnedAllocs = true;
   2666      }
   2667      sweptAny = false;
   2668    }
   2669  }
   2670 
   2671  if (freeStart == FirstSmallAllocOffset) {
   2672    // Region is empty.
   2673    return false;
   2674  }
   2675 
   2676  // Add any free space from the last allocation to the end of the chunk.
   2677  if (freeStart != SmallRegionSize) {
   2678    addSweptRegion(region, freeStart, SmallRegionSize, !sweptAny, freeLists);
   2679  }
   2680 
   2681  region->setHasNurseryOwnedAllocs(hasNurseryOwnedAllocs);
   2682 
   2683  return true;
   2684 }
   2685 
   2686 void BufferAllocator::addSweptRegion(SmallBufferRegion* region,
   2687                                     uintptr_t freeStart, uintptr_t freeEnd,
   2688                                     bool expectUnchanged,
   2689                                     FreeLists& freeLists) {
   2690  // Add the region from |freeStart| to |freeEnd| to the appropriate swept free
   2691  // list based on its size. Unused pages in small buffer regions are not
   2692  // decommitted.
   2693 
   2694  MOZ_ASSERT(freeStart >= FirstSmallAllocOffset);
   2695  MOZ_ASSERT(freeStart < freeEnd);
   2696  MOZ_ASSERT(freeEnd <= SmallRegionSize);
   2697  MOZ_ASSERT(freeStart % SmallAllocGranularity == 0);
   2698  MOZ_ASSERT(freeEnd % SmallAllocGranularity == 0);
   2699 
   2700  freeStart += uintptr_t(region);
   2701  freeEnd += uintptr_t(region);
   2702 
   2703  size_t bytes = freeEnd - freeStart;
   2704  addFreeRegion(&freeLists, freeStart, bytes, SizeKind::Small, false,
   2705                ListPosition::Back, expectUnchanged);
   2706 }
   2707 
   2708 void BufferAllocator::freeMedium(void* alloc) {
   2709  // Free a medium sized allocation. This coalesces the free space with any
   2710  // neighboring free regions. Coalescing is necessary for resize to work
   2711  // properly.
   2712 
   2713  BufferChunk* chunk = BufferChunk::from(alloc);
   2714  MOZ_ASSERT(chunk->zone == zone);
   2715 
   2716  size_t bytes = chunk->allocBytes(alloc);
   2717  PoisonAlloc(alloc, JS_FREED_BUFFER_PATTERN, bytes,
   2718              MemCheckKind::MakeUndefined);
   2719 
   2720  if (isSweepingChunk(chunk)) {
   2721    return;  // We can't free if the chunk is currently being swept.
   2722  }
   2723 
   2724  // Update heap size.
   2725  bool updateRetained =
   2726      majorState == State::Marking && !chunk->allocatedDuringCollection;
   2727  decreaseHeapSize(bytes, chunk->isNurseryOwned(alloc), updateRetained);
   2728 
   2729  // TODO: Since the mark bits are atomic, it's probably OK to unmark even if
   2730  // the chunk is currently being swept. If we get lucky the memory will be
   2731  // freed sooner.
   2732  chunk->setUnmarked(alloc);
   2733 
   2734  // Set region as not allocated and clear metadata.
   2735  SetDeallocated(chunk, alloc, bytes);
   2736 
   2737  FreeLists* freeLists = getChunkFreeLists(chunk);
   2738 
   2739  uintptr_t startAddr = uintptr_t(alloc);
   2740  uintptr_t endAddr = startAddr + bytes;
   2741 
   2742  // If the chunk is in one of the available lists we may need to move it.
   2743  ChunkLists* availableChunks = getChunkAvailableLists(chunk);
   2744  size_t oldChunkSizeClass = SIZE_MAX;
   2745  if (availableChunks) {
   2746    oldChunkSizeClass = chunk->sizeClassForAvailableLists();
   2747  }
   2748 
   2749  // First check whether there is a free region following the allocation.
   2750  FreeRegion* region;
   2751  uintptr_t endOffset = endAddr & ChunkMask;
   2752  if (endOffset == 0 || chunk->isAllocated(endOffset)) {
   2753    // The allocation abuts the end of the chunk or another allocation. Add the
   2754    // allocation as a new free region.
   2755    //
   2756    // The new region is added to the front of relevant list so as to reuse
   2757    // recently freed memory preferentially. This may reduce fragmentation. See
   2758    // "The Memory Fragmentation Problem: Solved?"  by Johnstone et al.
   2759    region = addFreeRegion(freeLists, startAddr, bytes, SizeKind::Medium, false,
   2760                           ListPosition::Front);
   2761    MOZ_ASSERT(region);  // Always succeeds for medium allocations.
   2762  } else {
   2763    // There is a free region following this allocation. Expand the existing
   2764    // region down to cover the newly freed space.
   2765    region = chunk->findFollowingFreeRegion(endAddr);
   2766    MOZ_ASSERT(region->startAddr == endAddr);
   2767    updateFreeRegionStart(freeLists, region, startAddr, SizeKind::Medium);
   2768  }
   2769 
   2770  // Next check for any preceding free region and coalesce.
   2771  FreeRegion* precRegion = chunk->findPrecedingFreeRegion(startAddr);
   2772  if (precRegion) {
   2773    if (freeLists) {
   2774      size_t sizeClass =
   2775          SizeClassForFreeRegion(precRegion->size(), SizeKind::Medium);
   2776      freeLists->remove(sizeClass, precRegion);
   2777    }
   2778 
   2779    updateFreeRegionStart(freeLists, region, precRegion->startAddr,
   2780                          SizeKind::Medium);
   2781    if (precRegion->hasDecommittedPages) {
   2782      region->hasDecommittedPages = true;
   2783    }
   2784  }
   2785 
   2786  if (availableChunks) {
   2787    maybeUpdateAvailableLists(availableChunks, chunk, oldChunkSizeClass);
   2788  }
   2789 }
   2790 
   2791 void BufferAllocator::maybeUpdateAvailableLists(ChunkLists* availableChunks,
   2792                                                BufferChunk* chunk,
   2793                                                size_t oldChunkSizeClass) {
   2794  // A realloc or free operation can change the amount of free space in an
   2795  // available chunk, so we may need to move it to a different list.
   2796  size_t newChunkSizeClass = chunk->sizeClassForAvailableLists();
   2797  if (newChunkSizeClass != oldChunkSizeClass) {
   2798    availableChunks->remove(oldChunkSizeClass, chunk);
   2799    availableChunks->pushBack(newChunkSizeClass, chunk);
   2800  }
   2801 }
   2802 
   2803 bool BufferAllocator::isSweepingChunk(BufferChunk* chunk) {
   2804  if (minorState == State::Sweeping && chunk->hasNurseryOwnedAllocs) {
   2805    // We are currently sweeping nursery owned allocations.
   2806 
   2807    // TODO: We could set a flag for nursery chunks allocated during minor
   2808    // collection to allow operations on chunks that are not being swept here.
   2809 
   2810    if (!hasMinorSweepDataToMerge) {
   2811 #ifdef DEBUG
   2812      {
   2813        AutoLock lock(this);
   2814        MOZ_ASSERT_IF(!hasMinorSweepDataToMerge, !minorSweepingFinished);
   2815      }
   2816 #endif
   2817 
   2818      // Likely no data to merge so don't bother taking the lock.
   2819      return true;
   2820    }
   2821 
   2822    // Merge swept data and recheck.
   2823    //
   2824    // TODO: It would be good to know how often this helps and if it is
   2825    // worthwhile.
   2826    mergeSweptData();
   2827    if (minorState == State::Sweeping && chunk->hasNurseryOwnedAllocs) {
   2828      return true;
   2829    }
   2830  }
   2831 
   2832  if (majorState == State::Sweeping && !chunk->allocatedDuringCollection) {
   2833    // We are currently sweeping tenured owned allocations.
   2834    return true;
   2835  }
   2836 
   2837  return false;
   2838 }
   2839 
   2840 BufferAllocator::FreeRegion* BufferAllocator::addFreeRegion(
   2841    FreeLists* freeLists, uintptr_t start, size_t bytes, SizeKind kind,
   2842    bool anyDecommitted, ListPosition position,
   2843    bool expectUnchanged /* = false */) {
   2844  static_assert(sizeof(FreeRegion) <= MinFreeRegionSize);
   2845  if (bytes < MinFreeRegionSize) {
   2846    // We can't record a region this small. The free space will not be reused
   2847    // until enough adjacent space become free.
   2848    return nullptr;
   2849  }
   2850 
   2851  size_t sizeClass = SizeClassForFreeRegion(bytes, kind);
   2852  MOZ_ASSERT_IF(sizeClass != MaxMediumAllocClass,
   2853                bytes >= SizeClassBytes(sizeClass));
   2854 
   2855  MOZ_ASSERT(start % GranularityForSizeClass(sizeClass) == 0);
   2856  MOZ_ASSERT(bytes % GranularityForSizeClass(sizeClass) == 0);
   2857 
   2858  uintptr_t end = start + bytes;
   2859 #ifdef DEBUG
   2860  if (expectUnchanged) {
   2861    // We didn't free any allocations so there should already be a FreeRegion
   2862    // from |start| to |end|.
   2863    auto* region = FreeRegion::fromEndAddr(end);
   2864    MOZ_ASSERT(region->startAddr == start);
   2865  }
   2866 #endif
   2867 
   2868  void* ptr = reinterpret_cast<void*>(end - sizeof(FreeRegion));
   2869  FreeRegion* region = new (ptr) FreeRegion(start, anyDecommitted);
   2870  MOZ_ASSERT(region->getEnd() == end);
   2871 
   2872  if (freeLists) {
   2873    if (position == ListPosition::Front) {
   2874      freeLists->pushFront(sizeClass, region);
   2875    } else {
   2876      freeLists->pushBack(sizeClass, region);
   2877    }
   2878  }
   2879 
   2880  return region;
   2881 }
   2882 
   2883 void BufferAllocator::updateFreeRegionStart(FreeLists* freeLists,
   2884                                            FreeRegion* region,
   2885                                            uintptr_t newStart, SizeKind kind) {
   2886  MOZ_ASSERT((newStart & ~ChunkMask) == (uintptr_t(region) & ~ChunkMask));
   2887  MOZ_ASSERT(region->startAddr != newStart);
   2888 
   2889  // TODO: Support realloc for small regions.
   2890  MOZ_ASSERT(kind == SizeKind::Medium);
   2891 
   2892  size_t oldSize = region->size();
   2893  region->startAddr = newStart;
   2894 
   2895  if (!freeLists) {
   2896    return;
   2897  }
   2898 
   2899  size_t currentSizeClass = SizeClassForFreeRegion(oldSize, kind);
   2900  size_t newSizeClass = SizeClassForFreeRegion(region->size(), kind);
   2901  MOZ_ASSERT(SizeClassKind(newSizeClass) == SizeClassKind(currentSizeClass));
   2902  if (currentSizeClass != newSizeClass) {
   2903    freeLists->remove(currentSizeClass, region);
   2904    freeLists->pushFront(newSizeClass, region);
   2905  }
   2906 }
   2907 
   2908 bool BufferAllocator::growMedium(void* alloc, size_t newBytes) {
   2909  MOZ_ASSERT(!IsSmallAllocSize(newBytes));
   2910  MOZ_ASSERT(!IsLargeAllocSize(newBytes));
   2911  newBytes = std::max(newBytes, MinMediumAllocSize);
   2912  MOZ_ASSERT(newBytes == GetGoodAllocSize(newBytes));
   2913 
   2914  BufferChunk* chunk = BufferChunk::from(alloc);
   2915  MOZ_ASSERT(chunk->zone == zone);
   2916 
   2917  if (isSweepingChunk(chunk)) {
   2918    return false;  // We can't grow if the chunk is currently being swept.
   2919  }
   2920 
   2921  size_t currentBytes = chunk->allocBytes(alloc);
   2922  MOZ_ASSERT(newBytes > currentBytes);
   2923 
   2924  uintptr_t endOffset = (uintptr_t(alloc) & ChunkMask) + currentBytes;
   2925  MOZ_ASSERT(endOffset <= ChunkSize);
   2926  if (endOffset == ChunkSize) {
   2927    return false;  // Can't extend because we're at the end of the chunk.
   2928  }
   2929 
   2930  size_t endAddr = uintptr_t(chunk) + endOffset;
   2931  if (chunk->isAllocated(endOffset)) {
   2932    return false;  // Can't extend because we abut another allocation.
   2933  }
   2934 
   2935  FreeRegion* region = chunk->findFollowingFreeRegion(endAddr);
   2936  MOZ_ASSERT(region->startAddr == endAddr);
   2937 
   2938  size_t extraBytes = newBytes - currentBytes;
   2939  if (region->size() < extraBytes) {
   2940    return false;  // Can't extend because following free region is too small.
   2941  }
   2942 
   2943  size_t sizeClass = SizeClassForFreeRegion(region->size(), SizeKind::Medium);
   2944 
   2945  allocFromRegion(region, extraBytes, sizeClass);
   2946 
   2947  // If the chunk is in one of the available lists we may need to move it if the
   2948  // largest free region has shrunk too much.
   2949  ChunkLists* availableChunks = getChunkAvailableLists(chunk);
   2950  size_t oldChunkSizeClass = SIZE_MAX;
   2951  if (availableChunks) {
   2952    oldChunkSizeClass = chunk->sizeClassForAvailableLists();
   2953  }
   2954 
   2955  FreeLists* freeLists = getChunkFreeLists(chunk);
   2956  updateFreeListsAfterAlloc(freeLists, region, sizeClass);
   2957 
   2958  if (availableChunks) {
   2959    maybeUpdateAvailableLists(availableChunks, chunk, oldChunkSizeClass);
   2960  }
   2961 
   2962  chunk->updateEndOffset(alloc, currentBytes, newBytes);
   2963  MOZ_ASSERT(chunk->allocBytes(alloc) == newBytes);
   2964 
   2965  bool updateRetained =
   2966      majorState == State::Marking && !chunk->allocatedDuringCollection;
   2967  increaseHeapSize(extraBytes, chunk->isNurseryOwned(alloc), true,
   2968                   updateRetained);
   2969 
   2970  return true;
   2971 }
   2972 
   2973 bool BufferAllocator::shrinkMedium(void* alloc, size_t newBytes) {
   2974  MOZ_ASSERT(!IsSmallAllocSize(newBytes));
   2975  MOZ_ASSERT(!IsLargeAllocSize(newBytes));
   2976  newBytes = std::max(newBytes, MinMediumAllocSize);
   2977  MOZ_ASSERT(newBytes == GetGoodAllocSize(newBytes));
   2978 
   2979  BufferChunk* chunk = BufferChunk::from(alloc);
   2980  MOZ_ASSERT(chunk->zone == zone);
   2981 
   2982  if (isSweepingChunk(chunk)) {
   2983    return false;  // We can't shrink if the chunk is currently being swept.
   2984  }
   2985 
   2986  size_t currentBytes = chunk->allocBytes(alloc);
   2987  if (newBytes == currentBytes) {
   2988    // Requested size is the same after adjusting to a valid medium alloc size.
   2989    return false;
   2990  }
   2991 
   2992  MOZ_ASSERT(newBytes < currentBytes);
   2993  size_t sizeChange = currentBytes - newBytes;
   2994 
   2995  // Update allocation size.
   2996  chunk->updateEndOffset(alloc, currentBytes, newBytes);
   2997  MOZ_ASSERT(chunk->allocBytes(alloc) == newBytes);
   2998  bool updateRetained =
   2999      majorState == State::Marking && !chunk->allocatedDuringCollection;
   3000  decreaseHeapSize(sizeChange, chunk->isNurseryOwned(alloc), updateRetained);
   3001 
   3002  uintptr_t startOffset = uintptr_t(alloc) & ChunkMask;
   3003  uintptr_t oldEndOffset = startOffset + currentBytes;
   3004  uintptr_t newEndOffset = startOffset + newBytes;
   3005  MOZ_ASSERT(oldEndOffset <= ChunkSize);
   3006 
   3007  // Poison freed memory.
   3008  uintptr_t chunkAddr = uintptr_t(chunk);
   3009  PoisonAlloc(reinterpret_cast<void*>(chunkAddr + newEndOffset),
   3010              JS_SWEPT_TENURED_PATTERN, sizeChange,
   3011              MemCheckKind::MakeUndefined);
   3012 
   3013  // If the chunk is in one of the available lists we may need to move it.
   3014  ChunkLists* availableChunks = getChunkAvailableLists(chunk);
   3015  size_t oldChunkSizeClass = SIZE_MAX;
   3016  if (availableChunks) {
   3017    oldChunkSizeClass = chunk->sizeClassForAvailableLists();
   3018  }
   3019 
   3020  FreeLists* freeLists = getChunkFreeLists(chunk);
   3021  if (oldEndOffset == ChunkSize || chunk->isAllocated(oldEndOffset)) {
   3022    // If we abut another allocation then add a new free region.
   3023    uintptr_t freeStart = chunkAddr + newEndOffset;
   3024    addFreeRegion(freeLists, freeStart, sizeChange, SizeKind::Medium, false,
   3025                  ListPosition::Front);
   3026  } else {
   3027    // Otherwise find the following free region and extend it down.
   3028    FreeRegion* region =
   3029        chunk->findFollowingFreeRegion(chunkAddr + oldEndOffset);
   3030    MOZ_ASSERT(region->startAddr == chunkAddr + oldEndOffset);
   3031    updateFreeRegionStart(freeLists, region, chunkAddr + newEndOffset,
   3032                          SizeKind::Medium);
   3033  }
   3034 
   3035  if (availableChunks) {
   3036    maybeUpdateAvailableLists(availableChunks, chunk, oldChunkSizeClass);
   3037  }
   3038 
   3039  return true;
   3040 }
   3041 
   3042 BufferAllocator::FreeLists* BufferAllocator::getChunkFreeLists(
   3043    BufferChunk* chunk) {
   3044  MOZ_ASSERT_IF(majorState == State::Sweeping,
   3045                chunk->allocatedDuringCollection);
   3046  MOZ_ASSERT_IF(
   3047      majorState == State::Marking && !chunk->allocatedDuringCollection,
   3048      chunk->ownsFreeLists);
   3049 
   3050  if (chunk->ownsFreeLists) {
   3051    // The chunk is in one of the available lists.
   3052    return &chunk->freeLists.ref();
   3053  }
   3054 
   3055  return &freeLists.ref();
   3056 }
   3057 
   3058 BufferAllocator::ChunkLists* BufferAllocator::getChunkAvailableLists(
   3059    BufferChunk* chunk) {
   3060  MOZ_ASSERT_IF(majorState == State::Sweeping,
   3061                chunk->allocatedDuringCollection);
   3062 
   3063  if (!chunk->ownsFreeLists) {
   3064    return nullptr;  // Chunk is not in either available list.
   3065  }
   3066 
   3067  if (majorState == State::Marking && !chunk->allocatedDuringCollection) {
   3068    return nullptr;  // Chunk is waiting to be swept.
   3069  }
   3070 
   3071  if (chunk->hasNurseryOwnedAllocs) {
   3072    return &availableMixedChunks.ref();
   3073  }
   3074 
   3075  return &availableTenuredChunks.ref();
   3076 }
   3077 
   3078 /* static */
   3079 size_t BufferAllocator::SizeClassForSmallAlloc(size_t bytes) {
   3080  MOZ_ASSERT(bytes >= MinSmallAllocSize);
   3081  MOZ_ASSERT(bytes <= MaxSmallAllocSize);
   3082 
   3083  size_t log2Size = mozilla::CeilingLog2(bytes);
   3084  MOZ_ASSERT((size_t(1) << log2Size) >= bytes);
   3085  MOZ_ASSERT(MinSizeClassShift == mozilla::CeilingLog2(MinFreeRegionSize));
   3086  if (log2Size < MinSizeClassShift) {
   3087    return 0;
   3088  }
   3089 
   3090  size_t sizeClass = log2Size - MinSizeClassShift;
   3091  MOZ_ASSERT(sizeClass <= MaxSmallAllocClass);
   3092  return sizeClass;
   3093 }
   3094 
   3095 /* static */
   3096 size_t BufferAllocator::SizeClassForMediumAlloc(size_t bytes) {
   3097  MOZ_ASSERT(bytes >= MinMediumAllocSize);
   3098  MOZ_ASSERT(bytes <= MaxMediumAllocSize);
   3099 
   3100  size_t log2Size = mozilla::CeilingLog2(bytes);
   3101  MOZ_ASSERT((size_t(1) << log2Size) >= bytes);
   3102 
   3103  MOZ_ASSERT(log2Size >= MinMediumAllocShift);
   3104  size_t sizeClass = log2Size - MinMediumAllocShift + MinMediumAllocClass;
   3105 
   3106  MOZ_ASSERT(sizeClass >= MinMediumAllocClass);
   3107  MOZ_ASSERT(sizeClass < AllocSizeClasses);
   3108  return sizeClass;
   3109 }
   3110 
   3111 /* static */
   3112 size_t BufferAllocator::SizeClassForFreeRegion(size_t bytes, SizeKind kind) {
   3113  MOZ_ASSERT(bytes >= MinFreeRegionSize);
   3114  MOZ_ASSERT(bytes < ChunkSize);
   3115 
   3116  if (kind == SizeKind::Medium && bytes >= MaxMediumAllocSize) {
   3117    // Free regions large enough for MaxMediumAllocSize don't have to have
   3118    // enough space for that size rounded up to the next power of two, as is the
   3119    // case for smaller regions.
   3120    return MaxMediumAllocClass;
   3121  }
   3122 
   3123  size_t log2Size = mozilla::FloorLog2(bytes);
   3124  MOZ_ASSERT((size_t(1) << log2Size) <= bytes);
   3125  MOZ_ASSERT(log2Size >= MinSizeClassShift);
   3126  size_t sizeClass =
   3127      std::min(log2Size - MinSizeClassShift, AllocSizeClasses - 1);
   3128 
   3129  if (kind == SizeKind::Small) {
   3130    return std::min(sizeClass, MaxSmallAllocClass);
   3131  }
   3132 
   3133  sizeClass++;  // Medium size classes start after small ones.
   3134 
   3135  MOZ_ASSERT(sizeClass >= MinMediumAllocClass);
   3136  MOZ_ASSERT(sizeClass < AllocSizeClasses);
   3137  return sizeClass;
   3138 }
   3139 
   3140 /* static */
   3141 inline size_t BufferAllocator::SizeClassBytes(size_t sizeClass) {
   3142  MOZ_ASSERT(sizeClass < AllocSizeClasses);
   3143 
   3144  // The first medium size class is the same size as the last small size class.
   3145  if (sizeClass >= MinMediumAllocClass) {
   3146    sizeClass--;
   3147  }
   3148 
   3149  return 1 << (sizeClass + MinSizeClassShift);
   3150 }
   3151 
   3152 /* static */
   3153 bool BufferAllocator::IsMediumAlloc(void* alloc) {
   3154  MOZ_ASSERT(IsBufferAlloc(alloc));
   3155 
   3156  // Test for large buffers before calling this so we can assume |alloc| is
   3157  // inside a chunk.
   3158  MOZ_ASSERT(!IsLargeAlloc(alloc));
   3159 
   3160  BufferChunk* chunk = BufferChunk::from(alloc);
   3161  return !chunk->isSmallBufferRegion(alloc);
   3162 }
   3163 
   3164 bool BufferAllocator::needLockToAccessBufferMap() const {
   3165  MOZ_ASSERT(CurrentThreadCanAccessZone(zone) || CurrentThreadIsPerformingGC());
   3166  return minorState.refNoCheck() == State::Sweeping ||
   3167         majorState.refNoCheck() == State::Sweeping;
   3168 }
   3169 
   3170 LargeBuffer* BufferAllocator::lookupLargeBuffer(void* alloc) {
   3171  MaybeLock lock;
   3172  return lookupLargeBuffer(alloc, lock);
   3173 }
   3174 
   3175 LargeBuffer* BufferAllocator::lookupLargeBuffer(void* alloc, MaybeLock& lock) {
   3176  MOZ_ASSERT(lock.isNothing());
   3177  if (needLockToAccessBufferMap()) {
   3178    lock.emplace(this);
   3179  }
   3180 
   3181  auto ptr = largeAllocMap.ref().readonlyThreadsafeLookup(alloc);
   3182  MOZ_ASSERT(ptr);
   3183  LargeBuffer* buffer = ptr->value();
   3184  MOZ_ASSERT(buffer->data() == alloc);
   3185  MOZ_ASSERT(buffer->zoneFromAnyThread() == zone);
   3186  return buffer;
   3187 }
   3188 
   3189 void* BufferAllocator::allocLarge(size_t bytes, bool nurseryOwned, bool inGC) {
   3190  bytes = RoundUp(bytes, ChunkSize);
   3191  MOZ_ASSERT(bytes > MaxMediumAllocSize);
   3192  MOZ_ASSERT(bytes >= bytes);
   3193 
   3194  // Allocate a small buffer the size of a LargeBuffer to hold the metadata.
   3195  static_assert(sizeof(LargeBuffer) <= MaxSmallAllocSize);
   3196  void* bufferPtr = allocSmall(sizeof(LargeBuffer), nurseryOwned, inGC);
   3197  if (!bufferPtr) {
   3198    return nullptr;
   3199  }
   3200 
   3201  // Large allocations are aligned to the chunk size, even if they are smaller
   3202  // than a chunk. This allows us to tell large buffer allocations apart by
   3203  // looking at the pointer alignment.
   3204  void* alloc = MapAlignedPages(bytes, ChunkSize, ShouldStallAndRetry(inGC));
   3205  if (!alloc) {
   3206    return nullptr;
   3207  }
   3208  auto freeGuard = mozilla::MakeScopeExit([&]() { UnmapPages(alloc, bytes); });
   3209 
   3210  CheckHighBitsOfPointer(alloc);
   3211 
   3212  auto* buffer = new (bufferPtr) LargeBuffer(alloc, bytes, nurseryOwned);
   3213 
   3214  {
   3215    MaybeLock lock;
   3216    if (needLockToAccessBufferMap()) {
   3217      lock.emplace(this);
   3218    }
   3219    if (!largeAllocMap.ref().putNew(alloc, buffer)) {
   3220      return nullptr;
   3221    }
   3222  }
   3223 
   3224  freeGuard.release();
   3225 
   3226  if (nurseryOwned) {
   3227    largeNurseryAllocs.ref().pushBack(buffer);
   3228  } else {
   3229    buffer->allocatedDuringCollection = majorState != State::NotCollecting;
   3230    largeTenuredAllocs.ref().pushBack(buffer);
   3231  }
   3232 
   3233  // Update memory accounting and trigger an incremental slice if needed.
   3234  bool checkThresholds = !inGC;
   3235  increaseHeapSize(bytes, nurseryOwned, checkThresholds, false);
   3236 
   3237  MOZ_ASSERT(IsLargeAlloc(alloc));
   3238  return alloc;
   3239 }
   3240 
   3241 void BufferAllocator::increaseHeapSize(size_t bytes, bool nurseryOwned,
   3242                                       bool checkThresholds,
   3243                                       bool updateRetainedSize) {
   3244  // Update memory accounting and trigger an incremental slice if needed.
   3245  // TODO: This will eventually be attributed to gcHeapSize.
   3246  GCRuntime* gc = &zone->runtimeFromAnyThread()->gc;
   3247  if (nurseryOwned) {
   3248    gc->nursery().addMallocedBufferBytes(bytes);
   3249  } else {
   3250    zone->mallocHeapSize.addBytes(bytes, updateRetainedSize);
   3251    if (checkThresholds) {
   3252      gc->maybeTriggerGCAfterMalloc(zone);
   3253    }
   3254  }
   3255 }
   3256 
   3257 void BufferAllocator::decreaseHeapSize(size_t bytes, bool nurseryOwned,
   3258                                       bool updateRetainedSize) {
   3259  if (nurseryOwned) {
   3260    GCRuntime* gc = &zone->runtimeFromAnyThread()->gc;
   3261    gc->nursery().removeMallocedBufferBytes(bytes);
   3262  } else {
   3263    zone->mallocHeapSize.removeBytes(bytes, updateRetainedSize);
   3264  }
   3265 }
   3266 
   3267 /* static */
   3268 bool BufferAllocator::IsLargeAlloc(void* alloc) {
   3269  return (uintptr_t(alloc) & ChunkMask) == 0;
   3270 }
   3271 
   3272 bool BufferAllocator::markLargeTenuredBuffer(LargeBuffer* buffer) {
   3273  MOZ_ASSERT(!buffer->isNurseryOwned);
   3274 
   3275  if (buffer->allocatedDuringCollection) {
   3276    return false;
   3277  }
   3278 
   3279  // Bug 1961755: This method can return false positives. A fully atomic version
   3280  // would be preferable in this case.
   3281  auto* region = SmallBufferRegion::from(buffer);
   3282  return region->setMarked(buffer);
   3283 }
   3284 
   3285 bool BufferAllocator::isLargeTenuredMarked(LargeBuffer* buffer) {
   3286  MOZ_ASSERT(!buffer->isNurseryOwned);
   3287  MOZ_ASSERT(buffer->zoneFromAnyThread() == zone);
   3288  MOZ_ASSERT(!buffer->isInList());
   3289 
   3290  auto* region = SmallBufferRegion::from(buffer);
   3291  return region->isMarked(buffer);
   3292 }
   3293 
   3294 void BufferAllocator::freeLarge(void* alloc) {
   3295  MaybeLock lock;
   3296  LargeBuffer* buffer = lookupLargeBuffer(alloc, lock);
   3297  MOZ_ASSERT(buffer->zone() == zone);
   3298 
   3299  DebugOnlyPoison(alloc, JS_FREED_BUFFER_PATTERN, buffer->allocBytes(),
   3300                  MemCheckKind::MakeUndefined);
   3301 
   3302  if (!buffer->isNurseryOwned && majorState == State::Sweeping &&
   3303      !buffer->allocatedDuringCollection) {
   3304    return;  // Large allocations are currently being swept.
   3305  }
   3306 
   3307  MOZ_ASSERT(buffer->isInList());
   3308 
   3309  if (buffer->isNurseryOwned) {
   3310    largeNurseryAllocs.ref().remove(buffer);
   3311  } else if (majorState == State::Marking &&
   3312             !buffer->allocatedDuringCollection) {
   3313    largeTenuredAllocsToSweep.ref().remove(buffer);
   3314  } else {
   3315    largeTenuredAllocs.ref().remove(buffer);
   3316  }
   3317 
   3318  unmapLarge(buffer, false, lock);
   3319 }
   3320 
   3321 bool BufferAllocator::shrinkLarge(LargeBuffer* buffer, size_t newBytes) {
   3322  MOZ_ASSERT(IsLargeAllocSize(newBytes));
   3323 #ifdef XP_WIN
   3324  // Can't unmap part of a region mapped with VirtualAlloc on Windows.
   3325  //
   3326  // It is possible to decommit the physical pages so we could do that and
   3327  // track virtual size as well as committed size. This would also allow us to
   3328  // grow the allocation again if necessary.
   3329  return false;
   3330 #else
   3331  MOZ_ASSERT(buffer->zone() == zone);
   3332 
   3333  if (!buffer->isNurseryOwned && majorState == State::Sweeping &&
   3334      !buffer->allocatedDuringCollection) {
   3335    return false;  // Large allocations are currently being swept.
   3336  }
   3337 
   3338  MOZ_ASSERT(buffer->isInList());
   3339 
   3340  newBytes = RoundUp(newBytes, ChunkSize);
   3341  size_t oldBytes = buffer->bytes;
   3342  MOZ_ASSERT(oldBytes > newBytes);
   3343  size_t shrinkBytes = oldBytes - newBytes;
   3344 
   3345  decreaseHeapSize(shrinkBytes, buffer->isNurseryOwned, false);
   3346 
   3347  buffer->bytes = newBytes;
   3348 
   3349  void* endPtr = reinterpret_cast<void*>(uintptr_t(buffer->data()) + newBytes);
   3350  UnmapPages(endPtr, shrinkBytes);
   3351 
   3352  return true;
   3353 #endif
   3354 }
   3355 
   3356 void BufferAllocator::unmapLarge(LargeBuffer* buffer, bool isSweeping,
   3357                                 MaybeLock& lock) {
   3358  unregisterLarge(buffer, isSweeping, lock);
   3359  UnmapPages(buffer->data(), buffer->bytes);
   3360 }
   3361 
   3362 void BufferAllocator::unregisterLarge(LargeBuffer* buffer, bool isSweeping,
   3363                                      MaybeLock& lock) {
   3364  MOZ_ASSERT(buffer->zoneFromAnyThread() == zone);
   3365  MOZ_ASSERT(!buffer->isInList());
   3366  MOZ_ASSERT_IF(isSweeping || needLockToAccessBufferMap(), lock.isSome());
   3367 
   3368 #ifdef DEBUG
   3369  auto ptr = largeAllocMap.ref().lookup(buffer->data());
   3370  MOZ_ASSERT(ptr && ptr->value() == buffer);
   3371 #endif
   3372  largeAllocMap.ref().remove(buffer->data());
   3373 
   3374  // Drop the lock now we've updated the map.
   3375  lock.reset();
   3376 
   3377  if (!buffer->isNurseryOwned || !isSweeping) {
   3378    decreaseHeapSize(buffer->bytes, buffer->isNurseryOwned, isSweeping);
   3379  }
   3380 }
   3381 
   3382 #include "js/Printer.h"
   3383 #include "util/GetPidProvider.h"
   3384 
   3385 static const char* const BufferAllocatorStatsPrefix = "BufAllc:";
   3386 
   3387 #define FOR_EACH_BUFFER_STATS_FIELD(_)                 \
   3388  _("PID", 7, "%7zu", pid)                             \
   3389  _("Runtime", 14, "0x%12p", runtime)                  \
   3390  _("Timestamp", 10, "%10.6f", timestamp.ToSeconds())  \
   3391  _("Reason", 20, "%-20.20s", reason)                  \
   3392  _("", 2, "%2s", "")                                  \
   3393  _("TotalKB", 8, "%8zu", totalBytes / 1024)           \
   3394  _("UsedKB", 8, "%8zu", stats.usedBytes / 1024)       \
   3395  _("FreeKB", 8, "%8zu", stats.freeBytes / 1024)       \
   3396  _("Zs", 3, "%3zu", zoneCount)                        \
   3397  _("", 7, "%7s", "")                                  \
   3398  _("MixSRs", 6, "%6zu", stats.mixedSmallRegions)      \
   3399  _("TnrSRs", 6, "%6zu", stats.tenuredSmallRegions)    \
   3400  _("MixCs", 6, "%6zu", stats.mixedChunks)             \
   3401  _("TnrCs", 6, "%6zu", stats.tenuredChunks)           \
   3402  _("AMixCs", 6, "%6zu", stats.availableMixedChunks)   \
   3403  _("ATnrCs", 6, "%6zu", stats.availableTenuredChunks) \
   3404  _("FreeRs", 6, "%6zu", stats.freeRegions)            \
   3405  _("LNurAs", 6, "%6zu", stats.largeNurseryAllocs)     \
   3406  _("LTnrAs", 6, "%6zu", stats.largeTenuredAllocs)
   3407 
   3408 /* static */
   3409 void BufferAllocator::printStatsHeader(FILE* file) {
   3410  Sprinter sprinter;
   3411  if (!sprinter.init()) {
   3412    return;
   3413  }
   3414  sprinter.put(BufferAllocatorStatsPrefix);
   3415 
   3416 #define PRINT_METADATA_NAME(name, width, _1, _2) \
   3417  sprinter.printf(" %-*s", width, name);
   3418 
   3419  FOR_EACH_BUFFER_STATS_FIELD(PRINT_METADATA_NAME)
   3420 #undef PRINT_METADATA_NAME
   3421 
   3422  sprinter.put("\n");
   3423 
   3424  JS::UniqueChars str = sprinter.release();
   3425  if (!str) {
   3426    return;
   3427  }
   3428  fputs(str.get(), file);
   3429 }
   3430 
   3431 /* static */
   3432 void BufferAllocator::printStats(GCRuntime* gc, mozilla::TimeStamp creationTime,
   3433                                 bool isMajorGC, FILE* file) {
   3434  Sprinter sprinter;
   3435  if (!sprinter.init()) {
   3436    return;
   3437  }
   3438  sprinter.put(BufferAllocatorStatsPrefix);
   3439 
   3440  size_t pid = getpid();
   3441  JSRuntime* runtime = gc->rt;
   3442  mozilla::TimeDuration timestamp = mozilla::TimeStamp::Now() - creationTime;
   3443  const char* reason = isMajorGC ? "post major slice" : "pre minor GC";
   3444 
   3445  size_t zoneCount = 0;
   3446  Stats stats;
   3447  for (AllZonesIter zone(gc); !zone.done(); zone.next()) {
   3448    zoneCount++;
   3449    zone->bufferAllocator.getStats(stats);
   3450  }
   3451 
   3452  size_t totalBytes = stats.usedBytes + stats.freeBytes + stats.adminBytes;
   3453 
   3454 #define PRINT_FIELD_VALUE(_1, _2, format, value) \
   3455  sprinter.printf(" " format, value);
   3456 
   3457  FOR_EACH_BUFFER_STATS_FIELD(PRINT_FIELD_VALUE)
   3458 #undef PRINT_FIELD_VALUE
   3459 
   3460  sprinter.put("\n");
   3461 
   3462  JS::UniqueChars str = sprinter.release();
   3463  if (!str) {
   3464    return;
   3465  }
   3466 
   3467  fputs(str.get(), file);
   3468 }
   3469 
   3470 size_t BufferAllocator::getSizeOfNurseryBuffers() {
   3471  maybeMergeSweptData();
   3472 
   3473  MOZ_ASSERT(minorState == State::NotCollecting);
   3474  MOZ_ASSERT(majorState == State::NotCollecting);
   3475 
   3476  size_t bytes = 0;
   3477 
   3478  for (BufferChunk* chunk : mixedChunks.ref()) {
   3479    for (auto alloc = chunk->allocIter(); !alloc.done(); alloc.next()) {
   3480      if (chunk->isNurseryOwned(alloc)) {
   3481        bytes += chunk->allocBytes(alloc);
   3482      }
   3483    }
   3484  }
   3485 
   3486  for (const LargeBuffer* buffer : largeNurseryAllocs.ref()) {
   3487    bytes += buffer->allocBytes();
   3488  }
   3489 
   3490  return bytes;
   3491 }
   3492 
   3493 void BufferAllocator::addSizeOfExcludingThis(size_t* usedBytesOut,
   3494                                             size_t* freeBytesOut,
   3495                                             size_t* adminBytesOut) {
   3496  maybeMergeSweptData();
   3497 
   3498  MOZ_ASSERT(minorState == State::NotCollecting);
   3499  MOZ_ASSERT(majorState == State::NotCollecting);
   3500 
   3501  Stats stats;
   3502  getStats(stats);
   3503 
   3504  *usedBytesOut += stats.usedBytes;
   3505  *freeBytesOut += stats.freeBytes;
   3506  *adminBytesOut += stats.adminBytes;
   3507 }
   3508 
   3509 static void GetChunkStats(BufferChunk* chunk, BufferAllocator::Stats& stats) {
   3510  stats.usedBytes += ChunkSize - FirstMediumAllocOffset;
   3511  stats.adminBytes += FirstMediumAllocOffset;
   3512  for (auto iter = chunk->smallRegionIter(); !iter.done(); iter.next()) {
   3513    SmallBufferRegion* region = iter.get();
   3514    if (region->hasNurseryOwnedAllocs()) {
   3515      stats.mixedSmallRegions++;
   3516    } else {
   3517      stats.tenuredSmallRegions++;
   3518    }
   3519    stats.adminBytes += FirstSmallAllocOffset;
   3520  }
   3521 }
   3522 
   3523 void BufferAllocator::getStats(Stats& stats) {
   3524  maybeMergeSweptData();
   3525 
   3526  MOZ_ASSERT(minorState == State::NotCollecting);
   3527 
   3528  for (BufferChunk* chunk : mixedChunks.ref()) {
   3529    stats.mixedChunks++;
   3530    GetChunkStats(chunk, stats);
   3531  }
   3532  for (auto chunk = availableMixedChunks.ref().chunkIter(); !chunk.done();
   3533       chunk.next()) {
   3534    stats.availableMixedChunks++;
   3535    GetChunkStats(chunk, stats);
   3536  }
   3537  for (BufferChunk* chunk : tenuredChunks.ref()) {
   3538    stats.tenuredChunks++;
   3539    GetChunkStats(chunk, stats);
   3540  }
   3541  for (auto chunk = availableTenuredChunks.ref().chunkIter(); !chunk.done();
   3542       chunk.next()) {
   3543    stats.availableTenuredChunks++;
   3544    GetChunkStats(chunk, stats);
   3545  }
   3546  for (const LargeBuffer* buffer : largeNurseryAllocs.ref()) {
   3547    stats.largeNurseryAllocs++;
   3548    stats.usedBytes += buffer->allocBytes();
   3549    stats.adminBytes += sizeof(LargeBuffer);
   3550  }
   3551  for (const LargeBuffer* buffer : largeTenuredAllocs.ref()) {
   3552    stats.largeTenuredAllocs++;
   3553    stats.usedBytes += buffer->allocBytes();
   3554    stats.adminBytes += sizeof(LargeBuffer);
   3555  }
   3556  for (auto region = freeLists.ref().freeRegionIter(); !region.done();
   3557       region.next()) {
   3558    stats.freeRegions++;
   3559    size_t size = region->size();
   3560    MOZ_ASSERT(stats.usedBytes >= size);
   3561    stats.usedBytes -= size;
   3562    stats.freeBytes += size;
   3563  }
   3564 }