tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

testGCAllocator.cpp (28919B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 */
      4 /* This Source Code Form is subject to the terms of the Mozilla Public
      5 * License, v. 2.0. If a copy of the MPL was not distributed with this
      6 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      7 
      8 #include "mozilla/MathAlgorithms.h"
      9 
     10 #include <cstdlib>
     11 
     12 #include "jsmath.h"
     13 
     14 #include "gc/Allocator.h"
     15 #include "gc/BufferAllocatorInternals.h"
     16 #include "gc/Memory.h"
     17 #include "gc/Nursery.h"
     18 #include "gc/Zone.h"
     19 #include "jsapi-tests/tests.h"
     20 #include "vm/PlainObject.h"
     21 
     22 #include "gc/BufferAllocator-inl.h"
     23 
     24 #if defined(XP_WIN)
     25 #  include "util/WindowsWrapper.h"
     26 #  include <psapi.h>
     27 #elif defined(__wasi__)
     28 // Nothing.
     29 #else
     30 #  include <algorithm>
     31 #  include <errno.h>
     32 #  include <sys/mman.h>
     33 #  include <sys/resource.h>
     34 #  include <sys/stat.h>
     35 #  include <sys/types.h>
     36 #  include <unistd.h>
     37 #endif
     38 
     39 #include "gc/BufferAllocator-inl.h"
     40 #include "gc/StoreBuffer-inl.h"
     41 #include "vm/JSContext-inl.h"
     42 #include "vm/JSObject-inl.h"
     43 
     44 using namespace js;
     45 using namespace js::gc;
     46 
     47 BEGIN_TEST(testGCAllocator) {
     48 #ifdef JS_64BIT
     49  // If we're using the scattershot allocator, this test does not apply.
     50  if (js::gc::UsingScattershotAllocator()) {
     51    return true;
     52  }
     53 #endif
     54 
     55  size_t PageSize = js::gc::SystemPageSize();
     56 
     57  /* Finish any ongoing background free activity. */
     58  js::gc::FinishGC(cx);
     59 
     60  bool growUp = false;
     61  CHECK(addressesGrowUp(&growUp));
     62 
     63  if (growUp) {
     64    return testGCAllocatorUp(PageSize);
     65  } else {
     66    return testGCAllocatorDown(PageSize);
     67  }
     68 }
     69 
     70 static const size_t Chunk = 512 * 1024;
     71 static const size_t Alignment = 2 * Chunk;
     72 static const int MaxTempChunks = 4096;
     73 static const size_t StagingSize = 16 * Chunk;
     74 
     75 bool addressesGrowUp(bool* resultOut) {
     76  /*
     77   * Try to detect whether the OS allocates memory in increasing or decreasing
     78   * address order by making several allocations and comparing the addresses.
     79   */
     80 
     81  static const unsigned ChunksToTest = 20;
     82  static const int ThresholdCount = 15;
     83 
     84  void* chunks[ChunksToTest];
     85  for (unsigned i = 0; i < ChunksToTest; i++) {
     86    chunks[i] = mapMemory(2 * Chunk);
     87    CHECK(chunks[i]);
     88  }
     89 
     90  int upCount = 0;
     91  int downCount = 0;
     92 
     93  for (unsigned i = 0; i < ChunksToTest - 1; i++) {
     94    if (chunks[i] < chunks[i + 1]) {
     95      upCount++;
     96    } else {
     97      downCount++;
     98    }
     99  }
    100 
    101  for (unsigned i = 0; i < ChunksToTest; i++) {
    102    unmapPages(chunks[i], 2 * Chunk);
    103  }
    104 
    105  /* Check results were mostly consistent. */
    106  CHECK(abs(upCount - downCount) >= ThresholdCount);
    107 
    108  *resultOut = upCount > downCount;
    109 
    110  return true;
    111 }
    112 
    113 size_t offsetFromAligned(void* p) { return uintptr_t(p) % Alignment; }
    114 
    115 enum AllocType { UseNormalAllocator, UseLastDitchAllocator };
    116 
    117 bool testGCAllocatorUp(const size_t PageSize) {
    118  const size_t UnalignedSize = StagingSize + Alignment - PageSize;
    119  void* chunkPool[MaxTempChunks];
    120  // Allocate a contiguous chunk that we can partition for testing.
    121  void* stagingArea = mapMemory(UnalignedSize);
    122  if (!stagingArea) {
    123    return false;
    124  }
    125  // Ensure that the staging area is aligned.
    126  unmapPages(stagingArea, UnalignedSize);
    127  if (offsetFromAligned(stagingArea)) {
    128    const size_t Offset = offsetFromAligned(stagingArea);
    129    // Place the area at the lowest aligned address.
    130    stagingArea = (void*)(uintptr_t(stagingArea) + (Alignment - Offset));
    131  }
    132  mapMemoryAt(stagingArea, StagingSize);
    133  // Make sure there are no available chunks below the staging area.
    134  int tempChunks;
    135  if (!fillSpaceBeforeStagingArea(tempChunks, stagingArea, chunkPool, false)) {
    136    return false;
    137  }
    138  // Unmap the staging area so we can set it up for testing.
    139  unmapPages(stagingArea, StagingSize);
    140  // Check that the first chunk is used if it is aligned.
    141  CHECK(positionIsCorrect("xxooxxx---------", stagingArea, chunkPool,
    142                          tempChunks));
    143  // Check that the first chunk is used if it can be aligned.
    144  CHECK(positionIsCorrect("x-ooxxx---------", stagingArea, chunkPool,
    145                          tempChunks));
    146  // Check that an aligned chunk after a single unalignable chunk is used.
    147  CHECK(positionIsCorrect("x--xooxxx-------", stagingArea, chunkPool,
    148                          tempChunks));
    149  // Check that we fall back to the slow path after two unalignable chunks.
    150  CHECK(positionIsCorrect("x--xx--xoo--xxx-", stagingArea, chunkPool,
    151                          tempChunks));
    152  // Check that we also fall back after an unalignable and an alignable chunk.
    153  CHECK(positionIsCorrect("x--xx---x-oo--x-", stagingArea, chunkPool,
    154                          tempChunks));
    155  // Check that the last ditch allocator works as expected.
    156  CHECK(positionIsCorrect("x--xx--xx-oox---", stagingArea, chunkPool,
    157                          tempChunks, UseLastDitchAllocator));
    158  // Check that the last ditch allocator can deal with naturally aligned chunks.
    159  CHECK(positionIsCorrect("x--xx--xoo------", stagingArea, chunkPool,
    160                          tempChunks, UseLastDitchAllocator));
    161 
    162  // Clean up.
    163  while (--tempChunks >= 0) {
    164    unmapPages(chunkPool[tempChunks], 2 * Chunk);
    165  }
    166  return true;
    167 }
    168 
    169 bool testGCAllocatorDown(const size_t PageSize) {
    170  const size_t UnalignedSize = StagingSize + Alignment - PageSize;
    171  void* chunkPool[MaxTempChunks];
    172  // Allocate a contiguous chunk that we can partition for testing.
    173  void* stagingArea = mapMemory(UnalignedSize);
    174  if (!stagingArea) {
    175    return false;
    176  }
    177  // Ensure that the staging area is aligned.
    178  unmapPages(stagingArea, UnalignedSize);
    179  if (offsetFromAligned(stagingArea)) {
    180    void* stagingEnd = (void*)(uintptr_t(stagingArea) + UnalignedSize);
    181    const size_t Offset = offsetFromAligned(stagingEnd);
    182    // Place the area at the highest aligned address.
    183    stagingArea = (void*)(uintptr_t(stagingEnd) - Offset - StagingSize);
    184  }
    185  mapMemoryAt(stagingArea, StagingSize);
    186  // Make sure there are no available chunks above the staging area.
    187  int tempChunks;
    188  if (!fillSpaceBeforeStagingArea(tempChunks, stagingArea, chunkPool, true)) {
    189    return false;
    190  }
    191  // Unmap the staging area so we can set it up for testing.
    192  unmapPages(stagingArea, StagingSize);
    193  // Check that the first chunk is used if it is aligned.
    194  CHECK(positionIsCorrect("---------xxxooxx", stagingArea, chunkPool,
    195                          tempChunks));
    196  // Check that the first chunk is used if it can be aligned.
    197  CHECK(positionIsCorrect("---------xxxoo-x", stagingArea, chunkPool,
    198                          tempChunks));
    199  // Check that an aligned chunk after a single unalignable chunk is used.
    200  CHECK(positionIsCorrect("-------xxxoox--x", stagingArea, chunkPool,
    201                          tempChunks));
    202  // Check that we fall back to the slow path after two unalignable chunks.
    203  CHECK(positionIsCorrect("-xxx--oox--xx--x", stagingArea, chunkPool,
    204                          tempChunks));
    205  // Check that we also fall back after an unalignable and an alignable chunk.
    206  CHECK(positionIsCorrect("-x--oo-x---xx--x", stagingArea, chunkPool,
    207                          tempChunks));
    208  // Check that the last ditch allocator works as expected.
    209  CHECK(positionIsCorrect("---xoo-xx--xx--x", stagingArea, chunkPool,
    210                          tempChunks, UseLastDitchAllocator));
    211  // Check that the last ditch allocator can deal with naturally aligned chunks.
    212  CHECK(positionIsCorrect("------oox--xx--x", stagingArea, chunkPool,
    213                          tempChunks, UseLastDitchAllocator));
    214 
    215  // Clean up.
    216  while (--tempChunks >= 0) {
    217    unmapPages(chunkPool[tempChunks], 2 * Chunk);
    218  }
    219  return true;
    220 }
    221 
    222 bool fillSpaceBeforeStagingArea(int& tempChunks, void* stagingArea,
    223                                void** chunkPool, bool addressesGrowDown) {
    224  // Make sure there are no available chunks before the staging area.
    225  tempChunks = 0;
    226  chunkPool[tempChunks++] = mapMemory(2 * Chunk);
    227  while (tempChunks < MaxTempChunks && chunkPool[tempChunks - 1] &&
    228         (chunkPool[tempChunks - 1] < stagingArea) ^ addressesGrowDown) {
    229    chunkPool[tempChunks++] = mapMemory(2 * Chunk);
    230    if (!chunkPool[tempChunks - 1]) {
    231      break;  // We already have our staging area, so OOM here is okay.
    232    }
    233    if ((chunkPool[tempChunks - 1] < chunkPool[tempChunks - 2]) ^
    234        addressesGrowDown) {
    235      break;  // The address growth direction is inconsistent!
    236    }
    237  }
    238  // OOM also means success in this case.
    239  if (!chunkPool[tempChunks - 1]) {
    240    --tempChunks;
    241    return true;
    242  }
    243  // Bail if we can't guarantee the right address space layout.
    244  if ((chunkPool[tempChunks - 1] < stagingArea) ^ addressesGrowDown ||
    245      (tempChunks > 1 &&
    246       (chunkPool[tempChunks - 1] < chunkPool[tempChunks - 2]) ^
    247           addressesGrowDown)) {
    248    while (--tempChunks >= 0) {
    249      unmapPages(chunkPool[tempChunks], 2 * Chunk);
    250    }
    251    unmapPages(stagingArea, StagingSize);
    252    return false;
    253  }
    254  return true;
    255 }
    256 
    257 bool positionIsCorrect(const char* str, void* base, void** chunkPool,
    258                       int tempChunks,
    259                       AllocType allocator = UseNormalAllocator) {
    260  // str represents a region of memory, with each character representing a
    261  // region of Chunk bytes. str should contain only x, o and -, where
    262  // x = mapped by the test to set up the initial conditions,
    263  // o = mapped by the GC allocator, and
    264  // - = unmapped.
    265  // base should point to a region of contiguous free memory
    266  // large enough to hold strlen(str) chunks of Chunk bytes.
    267  int len = strlen(str);
    268  int i;
    269  // Find the index of the desired address.
    270  for (i = 0; i < len && str[i] != 'o'; ++i);
    271  void* desired = (void*)(uintptr_t(base) + i * Chunk);
    272  // Map the regions indicated by str.
    273  for (i = 0; i < len; ++i) {
    274    if (str[i] == 'x') {
    275      mapMemoryAt((void*)(uintptr_t(base) + i * Chunk), Chunk);
    276    }
    277  }
    278  // Allocate using the GC's allocator.
    279  void* result;
    280  if (allocator == UseNormalAllocator) {
    281    result = js::gc::MapAlignedPages(2 * Chunk, Alignment);
    282  } else {
    283    result = js::gc::TestMapAlignedPagesLastDitch(2 * Chunk, Alignment);
    284  }
    285  // Clean up the mapped regions.
    286  if (result) {
    287    js::gc::UnmapPages(result, 2 * Chunk);
    288  }
    289  for (--i; i >= 0; --i) {
    290    if (str[i] == 'x') {
    291      unmapPages((void*)(uintptr_t(base) + i * Chunk), Chunk);
    292    }
    293  }
    294  // CHECK returns, so clean up on failure.
    295  if (result != desired) {
    296    while (--tempChunks >= 0) {
    297      unmapPages(chunkPool[tempChunks], 2 * Chunk);
    298    }
    299  }
    300  return result == desired;
    301 }
    302 
    303 #if defined(XP_WIN)
    304 
    305 void* mapMemoryAt(void* desired, size_t length) {
    306  return VirtualAlloc(desired, length, MEM_COMMIT | MEM_RESERVE,
    307                      PAGE_READWRITE);
    308 }
    309 
    310 void* mapMemory(size_t length) {
    311  return VirtualAlloc(nullptr, length, MEM_COMMIT | MEM_RESERVE,
    312                      PAGE_READWRITE);
    313 }
    314 
    315 void unmapPages(void* p, size_t size) {
    316  MOZ_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
    317 }
    318 
    319 #elif defined(__wasi__)
    320 
    321 void* mapMemoryAt(void* desired, size_t length) { return nullptr; }
    322 
    323 void* mapMemory(size_t length) {
    324  void* addr = nullptr;
    325  if (int err = posix_memalign(&addr, js::gc::SystemPageSize(), length)) {
    326    MOZ_ASSERT(err == ENOMEM);
    327  }
    328  MOZ_ASSERT(addr);
    329  memset(addr, 0, length);
    330  return addr;
    331 }
    332 
    333 void unmapPages(void* p, size_t size) { free(p); }
    334 
    335 #else
    336 
    337 void* mapMemoryAt(void* desired, size_t length) {
    338  void* region = mmap(desired, length, PROT_READ | PROT_WRITE,
    339                      MAP_PRIVATE | MAP_ANON, -1, 0);
    340  if (region == MAP_FAILED) {
    341    return nullptr;
    342  }
    343  if (region != desired) {
    344    if (munmap(region, length)) {
    345      MOZ_RELEASE_ASSERT(errno == ENOMEM);
    346    }
    347    return nullptr;
    348  }
    349  return region;
    350 }
    351 
    352 void* mapMemory(size_t length) {
    353  int prot = PROT_READ | PROT_WRITE;
    354  int flags = MAP_PRIVATE | MAP_ANON;
    355  int fd = -1;
    356  off_t offset = 0;
    357  void* region = mmap(nullptr, length, prot, flags, fd, offset);
    358  if (region == MAP_FAILED) {
    359    return nullptr;
    360  }
    361  return region;
    362 }
    363 
    364 void unmapPages(void* p, size_t size) {
    365  if (munmap(p, size)) {
    366    MOZ_RELEASE_ASSERT(errno == ENOMEM);
    367  }
    368 }
    369 
    370 #endif
    371 
    372 END_TEST(testGCAllocator)
    373 
    374 class AutoAddGCRootsTracer {
    375  JSContext* cx_;
    376  JSTraceDataOp traceOp_;
    377  void* data_;
    378 
    379 public:
    380  AutoAddGCRootsTracer(JSContext* cx, JSTraceDataOp traceOp, void* data)
    381      : cx_(cx), traceOp_(traceOp), data_(data) {
    382    JS_AddExtraGCRootsTracer(cx, traceOp, data);
    383  }
    384  ~AutoAddGCRootsTracer() { JS_RemoveExtraGCRootsTracer(cx_, traceOp_, data_); }
    385 };
    386 
    387 static size_t SomeAllocSizes[] = {16,
    388                                  17,
    389                                  31,
    390                                  32,
    391                                  100,
    392                                  200,
    393                                  240,
    394                                  256,
    395                                  1000,
    396                                  3000,
    397                                  3968,
    398                                  4096,
    399                                  5000,
    400                                  16 * 1024,
    401                                  100 * 1024,
    402                                  255 * 1024,
    403                                  257 * 1024,
    404                                  600 * 1024,
    405                                  MaxMediumAllocSize,
    406                                  MaxMediumAllocSize + 1,
    407                                  1020 * 1024,
    408                                  1 * 1024 * 1024,
    409                                  3 * 1024 * 1024,
    410                                  10 * 1024 * 1024};
    411 
    412 static void WriteAllocData(void* alloc, size_t bytes) {
    413  auto* data = reinterpret_cast<uint32_t*>(alloc);
    414  size_t length = std::min(bytes / sizeof(uint32_t), size_t(4096));
    415  for (size_t i = 0; i < length; i++) {
    416    data[i] = i;
    417  }
    418 }
    419 
    420 static bool CheckAllocData(void* alloc, size_t bytes) {
    421  const auto* data = reinterpret_cast<uint32_t*>(alloc);
    422  size_t length = std::min(bytes / sizeof(uint32_t), size_t(4096));
    423  for (size_t i = 0; i < length; i++) {
    424    if (data[i] != i) {
    425      return false;
    426    }
    427  }
    428  return true;
    429 }
    430 
    431 class BufferHolderObject : public NativeObject {
    432 public:
    433  static const JSClass class_;
    434 
    435  static BufferHolderObject* create(JSContext* cx);
    436 
    437  void setBuffer(void* buffer);
    438 
    439 private:
    440  static const JSClassOps classOps_;
    441 
    442  static void trace(JSTracer* trc, JSObject* obj);
    443 };
    444 
    445 const JSClass BufferHolderObject::class_ = {"BufferHolderObject",
    446                                            JSCLASS_HAS_RESERVED_SLOTS(1),
    447                                            &BufferHolderObject::classOps_};
    448 
    449 const JSClassOps BufferHolderObject::classOps_ = {
    450    nullptr,                    // addProperty
    451    nullptr,                    // delProperty
    452    nullptr,                    // enumerate
    453    nullptr,                    // newEnumerate
    454    nullptr,                    // resolve
    455    nullptr,                    // mayResolve
    456    nullptr,                    // finalize
    457    nullptr,                    // call
    458    nullptr,                    // construct
    459    BufferHolderObject::trace,  // trace
    460 };
    461 
    462 /* static */
    463 BufferHolderObject* BufferHolderObject::create(JSContext* cx) {
    464  NativeObject* obj = NewObjectWithGivenProto(cx, &class_, nullptr);
    465  if (!obj) {
    466    return nullptr;
    467  }
    468 
    469  BufferHolderObject* holder = &obj->as<BufferHolderObject>();
    470  holder->setBuffer(nullptr);
    471  return holder;
    472 }
    473 
    474 void BufferHolderObject::setBuffer(void* buffer) {
    475  setFixedSlot(0, JS::PrivateValue(buffer));
    476 }
    477 
    478 /* static */
    479 void BufferHolderObject::trace(JSTracer* trc, JSObject* obj) {
    480  NativeObject* holder = &obj->as<NativeObject>();
    481  void* buffer = holder->getFixedSlot(0).toPrivate();
    482  if (buffer) {
    483    TraceBufferEdge(trc, obj, &buffer, "BufferHolderObject buffer");
    484    if (buffer != holder->getFixedSlot(0).toPrivate()) {
    485      holder->setFixedSlot(0, JS::PrivateValue(buffer));
    486    }
    487  }
    488 }
    489 
    490 namespace js::gc {
    491 size_t TestGetAllocSizeKind(void* alloc) {
    492  if (BufferAllocator::IsLargeAlloc(alloc)) {
    493    return 2;
    494  }
    495  if (BufferAllocator::IsMediumAlloc(alloc)) {
    496    return 1;
    497  }
    498  MOZ_RELEASE_ASSERT(BufferAllocator::IsSmallAlloc(alloc));
    499  return 0;
    500 }
    501 }  // namespace js::gc
    502 
    503 BEGIN_TEST(testBufferAllocator_API) {
    504  AutoLeaveZeal leaveZeal(cx);
    505 
    506  Rooted<BufferHolderObject*> holder(cx, BufferHolderObject::create(cx));
    507  CHECK(holder);
    508 
    509  JS::NonIncrementalGC(cx, JS::GCOptions::Shrink, JS::GCReason::API);
    510 
    511  Zone* zone = cx->zone();
    512  size_t initialGCHeapSize = zone->gcHeapSize.bytes();
    513  size_t initialMallocHeapSize = zone->mallocHeapSize.bytes();
    514 
    515  for (size_t requestSize : SomeAllocSizes) {
    516    size_t goodSize = GetGoodAllocSize(requestSize);
    517 
    518    size_t wastage = goodSize - requestSize;
    519    double fraction = double(wastage) / double(goodSize);
    520    fprintf(stderr, "%8zu -> %8zu %7zu (%3.1f%%)\n", requestSize, goodSize,
    521            wastage, fraction * 100.0);
    522 
    523    CHECK(goodSize >= requestSize);
    524    if (requestSize > 64) {
    525      CHECK(goodSize < 2 * requestSize);
    526    }
    527    CHECK(GetGoodAllocSize(goodSize) == goodSize);
    528 
    529    // Check we don't waste space requesting 1MB aligned sizes.
    530    if (requestSize >= ChunkSize) {
    531      CHECK(goodSize == RoundUp(requestSize, ChunkSize));
    532    }
    533 
    534    for (bool nurseryOwned : {true, false}) {
    535      void* alloc = AllocBuffer(zone, requestSize, nurseryOwned);
    536      CHECK(alloc);
    537 
    538      CHECK(IsBufferAlloc(alloc));
    539      size_t actualSize = GetAllocSize(zone, alloc);
    540      CHECK(actualSize == GetGoodAllocSize(requestSize));
    541 
    542      CHECK(IsNurseryOwned(zone, alloc) == nurseryOwned);
    543 
    544      size_t expectedKind;
    545      if (goodSize >= MinLargeAllocSize) {
    546        expectedKind = 2;
    547      } else if (goodSize >= MinMediumAllocSize) {
    548        expectedKind = 1;
    549      } else {
    550        expectedKind = 0;
    551      }
    552      CHECK(TestGetAllocSizeKind(alloc) == expectedKind);
    553 
    554      WriteAllocData(alloc, actualSize);
    555      CHECK(CheckAllocData(alloc, actualSize));
    556 
    557      CHECK(!IsBufferAllocMarkedBlack(zone, alloc));
    558 
    559      CHECK(cx->runtime()->gc.isPointerWithinBufferAlloc(alloc));
    560      void* ptr = reinterpret_cast<void*>(uintptr_t(alloc) + 8);
    561      CHECK(cx->runtime()->gc.isPointerWithinBufferAlloc(ptr));
    562 
    563      holder->setBuffer(alloc);
    564      if (nurseryOwned) {
    565        // Hack to force minor GC. We've marked our alloc 'nursery owned' even
    566        // though that isn't true.
    567        NewPlainObject(cx);
    568        // Hack to force marking our holder.
    569        cx->runtime()->gc.storeBuffer().putWholeCell(holder);
    570      }
    571      JS_GC(cx);
    572 
    573      // Post GC marking state depends on whether allocation is small or not.
    574      // Small allocations will remain marked whereas others will have their
    575      // mark state cleared.
    576 
    577      CHECK(CheckAllocData(alloc, actualSize));
    578 
    579      holder->setBuffer(nullptr);
    580      JS_GC(cx);
    581 
    582      CHECK(zone->gcHeapSize.bytes() == initialGCHeapSize);
    583      CHECK(zone->mallocHeapSize.bytes() == initialMallocHeapSize);
    584    }
    585  }
    586 
    587  return true;
    588 }
    589 END_TEST(testBufferAllocator_API)
    590 
    591 BEGIN_TEST(testBufferAllocator_realloc) {
    592  AutoLeaveZeal leaveZeal(cx);
    593 
    594  Rooted<BufferHolderObject*> holder(cx, BufferHolderObject::create(cx));
    595  CHECK(holder);
    596 
    597  JS::NonIncrementalGC(cx, JS::GCOptions::Shrink, JS::GCReason::API);
    598 
    599  Zone* zone = cx->zone();
    600  size_t initialGCHeapSize = zone->gcHeapSize.bytes();
    601  size_t initialMallocHeapSize = zone->mallocHeapSize.bytes();
    602 
    603  for (bool nurseryOwned : {false, true}) {
    604    for (size_t requestSize : SomeAllocSizes) {
    605      if (nurseryOwned && requestSize < Nursery::MaxNurseryBufferSize) {
    606        continue;
    607      }
    608 
    609      // Realloc nullptr.
    610      void* alloc = ReallocBuffer(zone, nullptr, requestSize, nurseryOwned);
    611      CHECK(alloc);
    612      CHECK(IsBufferAlloc(alloc));
    613      CHECK(IsNurseryOwned(zone, alloc) == nurseryOwned);
    614      size_t actualSize = GetAllocSize(zone, alloc);
    615      WriteAllocData(alloc, actualSize);
    616      holder->setBuffer(alloc);
    617 
    618      // Realloc to same size.
    619      void* prev = alloc;
    620      alloc = ReallocBuffer(zone, alloc, requestSize, nurseryOwned);
    621      CHECK(alloc);
    622      CHECK(alloc == prev);
    623      CHECK(actualSize == GetAllocSize(zone, alloc));
    624      CHECK(IsNurseryOwned(zone, alloc) == nurseryOwned);
    625      CHECK(CheckAllocData(alloc, actualSize));
    626 
    627      // Grow.
    628      size_t newSize = requestSize + requestSize / 2;
    629      alloc = ReallocBuffer(zone, alloc, newSize, nurseryOwned);
    630      CHECK(alloc);
    631      CHECK(IsNurseryOwned(zone, alloc) == nurseryOwned);
    632      CHECK(CheckAllocData(alloc, actualSize));
    633 
    634      // Shrink.
    635      newSize = newSize / 2;
    636      alloc = ReallocBuffer(zone, alloc, newSize, nurseryOwned);
    637      CHECK(alloc);
    638      CHECK(IsNurseryOwned(zone, alloc) == nurseryOwned);
    639      actualSize = GetAllocSize(zone, alloc);
    640      CHECK(CheckAllocData(alloc, actualSize));
    641 
    642      // Free.
    643      holder->setBuffer(nullptr);
    644      FreeBuffer(zone, alloc);
    645    }
    646 
    647    NewPlainObject(cx);  // Force minor GC.
    648    JS_GC(cx);
    649  }
    650 
    651  CHECK(zone->gcHeapSize.bytes() == initialGCHeapSize);
    652  CHECK(zone->mallocHeapSize.bytes() == initialMallocHeapSize);
    653 
    654  return true;
    655 }
    656 END_TEST(testBufferAllocator_realloc)
    657 
    658 BEGIN_TEST(testBufferAllocator_reallocInPlace) {
    659  AutoLeaveZeal leaveZeal(cx);
    660 
    661  Rooted<BufferHolderObject*> holder(cx, BufferHolderObject::create(cx));
    662  CHECK(holder);
    663 
    664  JS::NonIncrementalGC(cx, JS::GCOptions::Shrink, JS::GCReason::API);
    665 
    666  Zone* zone = cx->zone();
    667  size_t initialGCHeapSize = zone->gcHeapSize.bytes();
    668  size_t initialMallocHeapSize = zone->mallocHeapSize.bytes();
    669 
    670  // Check that we resize some buffers in place if the sizes allow.
    671 
    672  // Grow medium -> medium: supported if free space after allocation
    673  // We should be able to grow in place if it's the last thing allocated.
    674  // *** If this starts failing we may need to allocate a new zone ***
    675  size_t bytes = MinMediumAllocSize;
    676  CHECK(TestRealloc(bytes, bytes * 2, true));
    677 
    678  // Shrink medium -> medium: supported
    679  CHECK(TestRealloc(bytes * 2, bytes, true));
    680 
    681  // Grow large -> large: not supported
    682  bytes = MinLargeAllocSize;
    683  CHECK(TestRealloc(bytes, 2 * bytes, false));
    684 
    685  // Shrink large -> large: supported on non-Windows platforms
    686 #ifdef XP_WIN
    687  CHECK(TestRealloc(2 * bytes, bytes, false));
    688 #else
    689  CHECK(TestRealloc(2 * bytes, bytes, true));
    690 #endif
    691 
    692  JS_GC(cx);
    693  CHECK(zone->gcHeapSize.bytes() == initialGCHeapSize);
    694  CHECK(zone->mallocHeapSize.bytes() == initialMallocHeapSize);
    695 
    696  return true;
    697 }
    698 
    699 bool TestRealloc(size_t fromSize, size_t toSize, bool expectedInPlace) {
    700  fprintf(stderr, "TestRealloc %zu -> %zu %u\n", fromSize, toSize,
    701          unsigned(expectedInPlace));
    702 
    703  Zone* zone = cx->zone();
    704  void* alloc = AllocBuffer(zone, fromSize, false);
    705  CHECK(alloc);
    706 
    707  void* newAlloc = ReallocBuffer(zone, alloc, toSize, false);
    708  CHECK(newAlloc);
    709 
    710  if (expectedInPlace) {
    711    CHECK(newAlloc == alloc);
    712  } else {
    713    CHECK(newAlloc != alloc);
    714  }
    715 
    716  FreeBuffer(zone, newAlloc);
    717  return true;
    718 }
    719 END_TEST(testBufferAllocator_reallocInPlace)
    720 
    721 namespace js::gc {
    722 void* TestAllocAligned(Zone* zone, size_t bytes) {
    723  return zone->bufferAllocator.allocMediumAligned(bytes, false);
    724 }
    725 }  // namespace js::gc
    726 
    727 BEGIN_TEST(testBufferAllocator_alignedAlloc) {
    728  AutoLeaveZeal leaveZeal(cx);
    729 
    730  Rooted<BufferHolderObject*> holder(cx, BufferHolderObject::create(cx));
    731  CHECK(holder);
    732 
    733  JS::NonIncrementalGC(cx, JS::GCOptions::Shrink, JS::GCReason::API);
    734 
    735  Zone* zone = cx->zone();
    736  size_t initialGCHeapSize = zone->gcHeapSize.bytes();
    737  size_t initialMallocHeapSize = zone->mallocHeapSize.bytes();
    738 
    739  for (size_t requestSize = MinMediumAllocSize;
    740       requestSize <= MaxAlignedAllocSize; requestSize *= 2) {
    741    void* alloc = TestAllocAligned(zone, requestSize);
    742    CHECK(alloc);
    743    CHECK((uintptr_t(alloc) % requestSize) == 0);
    744 
    745    CHECK(IsBufferAlloc(alloc));
    746    size_t actualSize = GetAllocSize(zone, alloc);
    747    CHECK(actualSize == requestSize);
    748 
    749    CHECK(!IsNurseryOwned(zone, alloc));
    750    FreeBuffer(zone, alloc);
    751  }
    752 
    753  JS_GC(cx);
    754  CHECK(zone->gcHeapSize.bytes() == initialGCHeapSize);
    755  CHECK(zone->mallocHeapSize.bytes() == initialMallocHeapSize);
    756 
    757  return true;
    758 }
    759 END_TEST(testBufferAllocator_alignedAlloc)
    760 
    761 BEGIN_TEST(testBufferAllocator_rooting) {
    762  // Exercise RootedBuffer API to hold tenured-owned buffers live before
    763  // attaching them to a GC thing.
    764 
    765  const size_t bytes = 12 * 1024;  // Large enough to affect memory accounting.
    766 
    767  Zone* zone = cx->zone();
    768  size_t initialMallocHeapSize = zone->mallocHeapSize.bytes();
    769 
    770  auto* buffer = static_cast<uint8_t*>(gc::AllocBuffer(zone, bytes, false));
    771  CHECK(buffer);
    772 
    773  RootedBuffer<uint8_t> root(cx, buffer);
    774  buffer = nullptr;
    775  CHECK(root);
    776  CHECK(zone->mallocHeapSize.bytes() > initialMallocHeapSize);
    777 
    778  memset(root, 42, bytes);
    779  JS_GC(cx);
    780  CHECK(zone->mallocHeapSize.bytes() > initialMallocHeapSize);
    781  for (size_t i = 0; i < bytes; i++) {
    782    CHECK(root[i] == 42);
    783  }
    784 
    785  HandleBuffer<uint8_t> handle(root);
    786  CHECK(handle[0] == 42);
    787 
    788  MutableHandleBuffer<uint8_t> mutableHandle(&root);
    789  CHECK(mutableHandle[0] == 42);
    790  mutableHandle.set(nullptr);
    791  CHECK(!root);
    792  CHECK(!handle);
    793 
    794  JS_GC(cx);
    795  CHECK(zone->mallocHeapSize.bytes() == initialMallocHeapSize);
    796 
    797  return true;
    798 }
    799 END_TEST(testBufferAllocator_rooting)
    800 
    801 BEGIN_TEST(testBufferAllocator_predicatesOnOtherAllocs) {
    802  if (!cx->runtime()->gc.nursery().isEnabled()) {
    803    fprintf(stderr, "Skipping test as nursery is disabled.\n");
    804  }
    805 
    806  AutoLeaveZeal leaveZeal(cx);
    807 
    808  JS_GC(cx);
    809  auto [buffer, isMalloced] = cx->nursery().allocNurseryOrMallocBuffer(
    810      cx->zone(), 256, js::MallocArena);
    811  CHECK(buffer);
    812  CHECK(!isMalloced);
    813  CHECK(cx->nursery().isInside(buffer));
    814  CHECK(!IsBufferAlloc(buffer));
    815 
    816  RootedObject obj(cx, NewPlainObject(cx));
    817  CHECK(obj);
    818  CHECK(IsInsideNursery(obj));
    819  CHECK(!IsBufferAlloc(obj));
    820 
    821  JS_GC(cx);
    822  CHECK(!IsInsideNursery(obj));
    823  CHECK(!IsBufferAlloc(obj));
    824 
    825  return true;
    826 }
    827 END_TEST(testBufferAllocator_predicatesOnOtherAllocs)
    828 
    829 BEGIN_TEST(testBufferAllocator_stress) {
    830  AutoLeaveZeal leaveZeal(cx);
    831 
    832  unsigned seed = unsigned(GenerateRandomSeed());
    833  fprintf(stderr, "Random seed: 0x%x\n", seed);
    834  std::srand(seed);
    835 
    836  Rooted<PlainObject*> holder(
    837      cx, NewPlainObjectWithAllocKind(cx, gc::AllocKind::OBJECT2));
    838  CHECK(holder);
    839 
    840  JS::NonIncrementalGC(cx, JS::GCOptions::Shrink, JS::GCReason::API);
    841  Zone* zone = cx->zone();
    842 
    843  size_t initialGCHeapSize = zone->gcHeapSize.bytes();
    844  size_t initialMallocHeapSize = zone->mallocHeapSize.bytes();
    845 
    846  void* liveAllocs[MaxLiveAllocs];
    847  mozilla::PodZero(&liveAllocs);
    848 
    849  AutoGCParameter setMaxHeap(cx, JSGC_MAX_BYTES, uint32_t(-1));
    850  AutoGCParameter param1(cx, JSGC_INCREMENTAL_GC_ENABLED, true);
    851  AutoGCParameter param2(cx, JSGC_PER_ZONE_GC_ENABLED, true);
    852 
    853 #ifdef JS_GC_ZEAL
    854  JS::SetGCZeal(cx, 10, 50);
    855 #endif
    856 
    857  holder->initFixedSlot(0, JS::PrivateValue(&liveAllocs));
    858  AutoAddGCRootsTracer addTracer(cx, traceAllocs, &holder);
    859 
    860  for (size_t i = 0; i < Iterations; i++) {
    861    size_t index = std::rand() % MaxLiveAllocs;
    862    size_t bytes = randomSize();
    863 
    864    if (!liveAllocs[index]) {
    865      if ((std::rand() % 4) == 0 && bytes >= MinMediumAllocSize &&
    866          bytes <= ChunkSize / 4) {
    867        bytes = mozilla::RoundUpPow2(bytes);
    868        liveAllocs[index] = TestAllocAligned(zone, bytes);
    869      } else {
    870        liveAllocs[index] = AllocBuffer(zone, bytes, false);
    871      }
    872    } else {
    873      void* ptr = ReallocBuffer(zone, liveAllocs[index], bytes, false);
    874      if (ptr) {
    875        liveAllocs[index] = ptr;
    876      }
    877    }
    878 
    879    index = std::rand() % MaxLiveAllocs;
    880    if (liveAllocs[index]) {
    881      if (std::rand() % 1) {
    882        FreeBuffer(zone, liveAllocs[index]);
    883      }
    884      liveAllocs[index] = nullptr;
    885    }
    886 
    887    // Trigger zeal GCs.
    888    NewPlainObject(cx);
    889 
    890    if ((i % 500) == 0) {
    891      // Trigger extra minor GCs.
    892      cx->minorGC(JS::GCReason::API);
    893    }
    894  }
    895 
    896  mozilla::PodArrayZero(liveAllocs);
    897 
    898 #ifdef JS_GC_ZEAL
    899  JS::SetGCZeal(cx, 0, 100);
    900 #endif
    901 
    902  JS::PrepareForFullGC(cx);
    903  JS::NonIncrementalGC(cx, JS::GCOptions::Shrink, JS::GCReason::API);
    904 
    905  CHECK(zone->gcHeapSize.bytes() == initialGCHeapSize);
    906  CHECK(zone->mallocHeapSize.bytes() == initialMallocHeapSize);
    907 
    908  return true;
    909 }
    910 
    911 static constexpr size_t Iterations = 50000;
    912 static constexpr size_t MaxLiveAllocs = 500;
    913 
    914 static size_t randomSize() {
    915  constexpr size_t Log2MinSize = 4;
    916  constexpr size_t Log2MaxSize = 22;  // Up to 4MB.
    917 
    918  double r = double(std::rand()) / double(RAND_MAX);
    919  double log2size = (Log2MaxSize - Log2MinSize) * r + Log2MinSize;
    920  MOZ_ASSERT(log2size <= Log2MaxSize);
    921  return size_t(std::pow(2.0, log2size));
    922 }
    923 
    924 static void traceAllocs(JSTracer* trc, void* data) {
    925  auto& holder = *static_cast<Rooted<PlainObject*>*>(data);
    926  auto* liveAllocs = static_cast<void**>(holder->getFixedSlot(0).toPrivate());
    927  for (size_t i = 0; i < MaxLiveAllocs; i++) {
    928    void** bufferp = &liveAllocs[i];
    929    if (*bufferp) {
    930      TraceBufferEdge(trc, holder, bufferp, "test buffer");
    931    }
    932  }
    933 }
    934 END_TEST(testBufferAllocator_stress)