tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Memory.cpp (40175B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "gc/Memory.h"
      8 
      9 #include "mozilla/MathAlgorithms.h"
     10 #include "mozilla/RandomNum.h"
     11 #include "mozilla/TaggedAnonymousMemory.h"
     12 
     13 #include "jit/JitOptions.h"
     14 #include "js/HeapAPI.h"
     15 #include "js/Utility.h"
     16 #include "vm/JSContext.h"
     17 
     18 #ifdef MOZ_MEMORY
     19 #  include "mozmemory_stall.h"
     20 #endif
     21 
     22 #include "util/Memory.h"
     23 
     24 #ifdef XP_WIN
     25 
     26 #  include "util/WindowsWrapper.h"
     27 #  include <psapi.h>
     28 
     29 #else
     30 
     31 #  include <algorithm>
     32 #  include <errno.h>
     33 #  include <unistd.h>
     34 
     35 #  ifdef XP_LINUX
     36 // For SYS_gettid. Older glibc does not provide gettid() wrapper.
     37 #    include <sys/syscall.h>
     38 #  endif
     39 
     40 #  if !defined(__wasi__)
     41 #    include <sys/mman.h>
     42 #    include <sys/resource.h>
     43 #    include <sys/stat.h>
     44 #    include <sys/types.h>
     45 #  endif  // !defined(__wasi__)
     46 
     47 #endif  // !XP_WIN
     48 
     49 #if defined(XP_WIN) && !defined(MOZ_MEMORY)
     50 namespace mozilla {
     51 // On Windows platforms, mozjemalloc provides MozVirtualAlloc, a version of
     52 // VirtualAlloc that will sleep and retry on failure. This is a shim for when
     53 // that function is not available.
     54 MOZ_ALWAYS_INLINE void* MozVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize,
     55                                        DWORD flAllocationType,
     56                                        DWORD flProtect) {
     57  return VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
     58 }
     59 }  // namespace mozilla
     60 #endif  // defined(XP_WIN) && !defined(MOZ_MEMORY)
     61 
     62 namespace js::gc {
     63 
     64 /*
     65 * System allocation functions generally require the allocation size
     66 * to be an integer multiple of the page size of the running process.
     67 */
     68 static size_t pageSize = 0;
     69 
     70 /* The OS allocation granularity may not match the page size. */
     71 static size_t allocGranularity = 0;
     72 
     73 /* The number of bits used by addresses on this platform. */
     74 static size_t numAddressBits = 0;
     75 
     76 /* An estimate of the number of bytes available for virtual memory. */
     77 static size_t virtualMemoryLimit = size_t(-1);
     78 
     79 /* Whether decommit is enabled. */
     80 static bool decommitEnabled = false;
     81 
     82 /* Whether DisableDecommit() has been called. */
     83 static bool disableDecommitRequested = false;
     84 
     85 /*
     86 * System allocation functions may hand out regions of memory in increasing or
     87 * decreasing order. This ordering is used as a hint during chunk alignment to
     88 * reduce the number of system calls. On systems with 48-bit addresses, our
     89 * workarounds to obtain 47-bit pointers cause addresses to be handed out in
     90 * increasing order.
     91 *
     92 * We do not use the growth direction on Windows, as constraints on VirtualAlloc
     93 * would make its application failure prone and complex. Tests indicate that
     94 * VirtualAlloc always hands out regions of memory in increasing order.
     95 */
     96 #if defined(XP_DARWIN)
     97 static mozilla::Atomic<int, mozilla::Relaxed> growthDirection(1);
     98 #elif defined(XP_UNIX)
     99 static mozilla::Atomic<int, mozilla::Relaxed> growthDirection(0);
    100 #endif
    101 
    102 /*
    103 * Data from OOM crashes shows there may be up to 24 chunk-sized but unusable
    104 * chunks available in low memory situations. These chunks may all need to be
    105 * used up before we gain access to remaining *alignable* chunk-sized regions,
    106 * so we use a generous limit of 32 unusable chunks to ensure we reach them.
    107 */
    108 static const int MaxLastDitchAttempts = 32;
    109 
    110 #ifdef JS_64BIT
    111 /*
    112 * On some 64-bit platforms we can use a random, scattershot allocator that
    113 * tries addresses from the available range at random. If the address range
    114 * is large enough this will have a high chance of success and additionally
    115 * makes the memory layout of our process less predictable.
    116 *
    117 * However, not all 64-bit platforms have a very large address range. For
    118 * example, AArch64 on Linux defaults to using 39-bit addresses to limit the
    119 * number of translation tables used. On such configurations the scattershot
    120 * approach to allocation creates a conflict with our desire to reserve large
    121 * regions of memory for applications like WebAssembly: Small allocations may
    122 * inadvertently block off all available 4-6GiB regions, and conversely
    123 * reserving such regions may lower the success rate for smaller allocations to
    124 * unacceptable levels.
    125 *
    126 * So we make a compromise: Instead of using the scattershot on all 64-bit
    127 * platforms, we only use it on platforms that meet a minimum requirement for
    128 * the available address range. In addition we split the address range,
    129 * reserving the upper half for huge allocations and the lower half for smaller
    130 * allocations. We use a limit of 43 bits so that at least 42 bits are available
    131 * for huge allocations - this matches the 8TiB per process address space limit
    132 * that we're already subject to on Windows.
    133 */
    134 static const size_t MinAddressBitsForRandomAlloc = 43;
    135 
    136 /* The lower limit for huge allocations. This is fairly arbitrary. */
    137 static const size_t HugeAllocationSize = 1024 * 1024 * 1024;
    138 
    139 /* The minimum and maximum valid addresses that can be allocated into. */
    140 static size_t minValidAddress = 0;
    141 static size_t maxValidAddress = 0;
    142 
    143 /* The upper limit for smaller allocations and the lower limit for huge ones. */
    144 static size_t hugeSplit = 0;
    145 #endif
    146 
    147 /* Running totals to report to the profiler. */
    148 mozilla::Atomic<size_t, mozilla::Relaxed> gMappedMemorySizeBytes;
    149 mozilla::Atomic<uint64_t, mozilla::Relaxed> gMappedMemoryOperations;
    150 
    151 size_t SystemPageSize() { return pageSize; }
    152 
    153 size_t SystemAddressBits() { return numAddressBits; }
    154 
    155 size_t VirtualMemoryLimit() { return virtualMemoryLimit; }
    156 
    157 bool UsingScattershotAllocator() {
    158 #ifdef JS_64BIT
    159  return numAddressBits >= MinAddressBitsForRandomAlloc;
    160 #else
    161  return false;
    162 #endif
    163 }
    164 
    165 enum class Commit : bool {
    166  No = false,
    167  Yes = true,
    168 };
    169 
    170 #ifdef XP_WIN
    171 enum class PageAccess : DWORD {
    172  None = PAGE_NOACCESS,
    173  Read = PAGE_READONLY,
    174  ReadWrite = PAGE_READWRITE,
    175  Execute = PAGE_EXECUTE,
    176  ReadExecute = PAGE_EXECUTE_READ,
    177  ReadWriteExecute = PAGE_EXECUTE_READWRITE,
    178 };
    179 #elif defined(__wasi__)
    180 enum class PageAccess : int {
    181  None = 0,
    182  Read = 0,
    183  ReadWrite = 0,
    184  Execute = 0,
    185  ReadExecute = 0,
    186  ReadWriteExecute = 0,
    187 };
    188 #else
    189 enum class PageAccess : int {
    190  None = PROT_NONE,
    191  Read = PROT_READ,
    192  ReadWrite = PROT_READ | PROT_WRITE,
    193  Execute = PROT_EXEC,
    194  ReadExecute = PROT_READ | PROT_EXEC,
    195  ReadWriteExecute = PROT_READ | PROT_WRITE | PROT_EXEC,
    196 };
    197 #endif
    198 
    199 template <bool AlwaysGetNew = true>
    200 static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
    201                            size_t length, size_t alignment);
    202 
    203 #ifndef __wasi__
    204 static void* MapAlignedPagesSlow(size_t length, size_t alignment);
    205 #endif  // wasi
    206 static void* MapAlignedPagesLastDitch(size_t length, size_t alignment,
    207                                      StallAndRetry stallAndRetry);
    208 
    209 #ifdef JS_64BIT
    210 static void* MapAlignedPagesRandom(size_t length, size_t alignment);
    211 #endif
    212 
    213 void* TestMapAlignedPagesLastDitch(size_t length, size_t alignment) {
    214  void* region = MapAlignedPagesLastDitch(length, alignment, StallAndRetry::No);
    215  if (region) {
    216    RecordMemoryAlloc(length);
    217  }
    218  return region;
    219 }
    220 
    221 bool DecommitEnabled() { return decommitEnabled; }
    222 
    223 void DisableDecommit() {
    224  MOZ_RELEASE_ASSERT(
    225      pageSize == 0,
    226      "DisableDecommit should be called before InitMemorySubsystem");
    227  disableDecommitRequested = true;
    228 }
    229 
    230 /* Returns the offset from the nearest aligned address at or below |region|. */
    231 static inline size_t OffsetFromAligned(void* region, size_t alignment) {
    232  return uintptr_t(region) % alignment;
    233 }
    234 
    235 template <Commit commit, StallAndRetry retry = StallAndRetry::No>
    236 static inline void* MapInternal(void* desired, size_t length) {
    237  void* region = nullptr;
    238 #ifdef XP_WIN
    239  DWORD flags =
    240      (commit == Commit::Yes ? MEM_RESERVE | MEM_COMMIT : MEM_RESERVE);
    241  if constexpr (retry == StallAndRetry::Yes) {
    242    region = mozilla::MozVirtualAlloc(desired, length, flags,
    243                                      DWORD(PageAccess::ReadWrite));
    244  } else {
    245    region = VirtualAlloc(desired, length, flags, DWORD(PageAccess::ReadWrite));
    246  }
    247 #elif defined(__wasi__)
    248  if (int err = posix_memalign(&region, gc::SystemPageSize(), length)) {
    249    MOZ_RELEASE_ASSERT(err == ENOMEM);
    250    return nullptr;
    251  }
    252  if (region) {
    253    memset(region, 0, length);
    254  }
    255 #else
    256  int flags = MAP_PRIVATE | MAP_ANON;
    257  region = MozTaggedAnonymousMmap(desired, length, int(PageAccess::ReadWrite),
    258                                  flags, -1, 0, "js-gc-heap");
    259  if (region == MAP_FAILED) {
    260    return nullptr;
    261  }
    262 #endif
    263  return region;
    264 }
    265 
    266 static inline void UnmapInternal(void* region, size_t length) {
    267  MOZ_ASSERT(region && OffsetFromAligned(region, allocGranularity) == 0);
    268  MOZ_ASSERT(length > 0 && length % pageSize == 0);
    269 
    270 #ifdef XP_WIN
    271  MOZ_RELEASE_ASSERT(VirtualFree(region, 0, MEM_RELEASE) != 0);
    272 #elif defined(__wasi__)
    273  free(region);
    274 #else
    275  if (munmap(region, length)) {
    276    MOZ_RELEASE_ASSERT(errno == ENOMEM);
    277  }
    278 #endif
    279 }
    280 
    281 template <Commit commit = Commit::Yes, StallAndRetry retry = StallAndRetry::No>
    282 static inline void* MapMemory(size_t length) {
    283  MOZ_ASSERT(length > 0);
    284 
    285  return MapInternal<commit, retry>(nullptr, length);
    286 }
    287 
    288 /*
    289 * Attempts to map memory at the given address, but allows the system
    290 * to return a different address that may still be suitable.
    291 */
    292 template <Commit commit = Commit::Yes>
    293 static inline void* MapMemoryAtFuzzy(void* desired, size_t length) {
    294  MOZ_ASSERT(desired && OffsetFromAligned(desired, allocGranularity) == 0);
    295  MOZ_ASSERT(length > 0);
    296 
    297  // Note that some platforms treat the requested address as a hint, so the
    298  // returned address might not match the requested address.
    299  return MapInternal<commit>(desired, length);
    300 }
    301 
    302 /*
    303 * Attempts to map memory at the given address, returning nullptr if
    304 * the system returns any address other than the requested one.
    305 */
    306 template <Commit commit = Commit::Yes>
    307 static inline void* MapMemoryAt(void* desired, size_t length) {
    308  MOZ_ASSERT(desired && OffsetFromAligned(desired, allocGranularity) == 0);
    309  MOZ_ASSERT(length > 0);
    310 
    311  void* region = MapInternal<commit>(desired, length);
    312  if (!region) {
    313    return nullptr;
    314  }
    315 
    316  // On some platforms mmap treats the desired address as a hint, so
    317  // check that the address we got is the address we requested.
    318  if (region != desired) {
    319    UnmapInternal(region, length);
    320    return nullptr;
    321  }
    322  return region;
    323 }
    324 
    325 #ifdef JS_64BIT
    326 
    327 /* Returns a random number in the given range. */
    328 static inline uint64_t GetNumberInRange(uint64_t minNum, uint64_t maxNum) {
    329  const uint64_t MaxRand = UINT64_C(0xffffffffffffffff);
    330  maxNum -= minNum;
    331  uint64_t binSize = 1 + (MaxRand - maxNum) / (maxNum + 1);
    332 
    333  uint64_t rndNum;
    334  do {
    335    mozilla::Maybe<uint64_t> result;
    336    do {
    337      result = mozilla::RandomUint64();
    338    } while (!result);
    339    rndNum = result.value() / binSize;
    340  } while (rndNum > maxNum);
    341 
    342  return minNum + rndNum;
    343 }
    344 
    345 #  ifndef XP_WIN
    346 static inline uint64_t FindAddressLimitInner(size_t highBit, size_t tries);
    347 
    348 /*
    349 * The address range available to applications depends on both hardware and
    350 * kernel configuration. For example, AArch64 on Linux uses addresses with
    351 * 39 significant bits by default, but can be configured to use addresses with
    352 * 48 significant bits by enabling a 4th translation table. Unfortunately,
    353 * there appears to be no standard way to query the limit at runtime
    354 * (Windows exposes this via GetSystemInfo()).
    355 *
    356 * This function tries to find the address limit by performing a binary search
    357 * on the index of the most significant set bit in the addresses it attempts to
    358 * allocate. As the requested address is often treated as a hint by the
    359 * operating system, we use the actual returned addresses to narrow the range.
    360 * We return the number of bits of an address that may be set.
    361 */
    362 static size_t FindAddressLimit() {
    363  // Use 32 bits as a lower bound in case we keep getting nullptr.
    364  uint64_t low = 31;
    365  uint64_t highestSeen = (UINT64_C(1) << 32) - allocGranularity - 1;
    366 
    367  // Exclude 48-bit and 47-bit addresses first.
    368  uint64_t high = 47;
    369  for (; high >= std::max(low, UINT64_C(46)); --high) {
    370    highestSeen = std::max(FindAddressLimitInner(high, 4), highestSeen);
    371    low = mozilla::FloorLog2(highestSeen);
    372  }
    373  // If those didn't work, perform a modified binary search.
    374  while (high - 1 > low) {
    375    uint64_t middle = low + (high - low) / 2;
    376    highestSeen = std::max(FindAddressLimitInner(middle, 4), highestSeen);
    377    low = mozilla::FloorLog2(highestSeen);
    378    if (highestSeen < (UINT64_C(1) << middle)) {
    379      high = middle;
    380    }
    381  }
    382  // We can be sure of the lower bound, but check the upper bound again.
    383  do {
    384    high = low + 1;
    385    highestSeen = std::max(FindAddressLimitInner(high, 8), highestSeen);
    386    low = mozilla::FloorLog2(highestSeen);
    387  } while (low >= high);
    388 
    389  // `low` is the highest set bit, so `low + 1` is the number of bits.
    390  return low + 1;
    391 }
    392 
    393 static inline uint64_t FindAddressLimitInner(size_t highBit, size_t tries) {
    394  const size_t length = allocGranularity;  // Used as both length and alignment.
    395 
    396  uint64_t highestSeen = 0;
    397  uint64_t startRaw = UINT64_C(1) << highBit;
    398  uint64_t endRaw = 2 * startRaw - length - 1;
    399  uint64_t start = (startRaw + length - 1) / length;
    400  uint64_t end = (endRaw - (length - 1)) / length;
    401  for (size_t i = 0; i < tries; ++i) {
    402    uint64_t desired = length * GetNumberInRange(start, end);
    403    void* address = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
    404    uint64_t actual = uint64_t(address);
    405    if (address) {
    406      UnmapInternal(address, length);
    407    }
    408    if (actual > highestSeen) {
    409      highestSeen = actual;
    410      if (actual >= startRaw) {
    411        break;
    412      }
    413    }
    414  }
    415  return highestSeen;
    416 }
    417 #  endif  // !defined(XP_WIN)
    418 
    419 #endif  // defined(JS_64BIT)
    420 
    421 void InitMemorySubsystem() {
    422  if (pageSize == 0) {
    423 #ifdef XP_WIN
    424    SYSTEM_INFO sysinfo;
    425    GetSystemInfo(&sysinfo);
    426    pageSize = sysinfo.dwPageSize;
    427    allocGranularity = sysinfo.dwAllocationGranularity;
    428 #else
    429    pageSize = size_t(sysconf(_SC_PAGESIZE));
    430    allocGranularity = pageSize;
    431 #endif
    432 
    433    // Decommit is supported if the system page size is the size as the
    434    // compile time constant and has not been disabled.
    435    decommitEnabled = pageSize == PageSize && !disableDecommitRequested;
    436 
    437 #ifdef JS_64BIT
    438 #  ifdef XP_WIN
    439    minValidAddress = size_t(sysinfo.lpMinimumApplicationAddress);
    440    maxValidAddress = size_t(sysinfo.lpMaximumApplicationAddress);
    441    numAddressBits = mozilla::FloorLog2(maxValidAddress) + 1;
    442 #  else
    443    // No standard way to determine these, so fall back to FindAddressLimit().
    444    numAddressBits = FindAddressLimit();
    445    minValidAddress = allocGranularity;
    446    maxValidAddress = (UINT64_C(1) << numAddressBits) - 1 - allocGranularity;
    447 #  endif
    448    // Sanity check the address to ensure we don't use more than 47 bits.
    449    uint64_t maxJSAddress = UINT64_C(0x00007fffffffffff) - allocGranularity;
    450    if (maxValidAddress > maxJSAddress) {
    451      maxValidAddress = maxJSAddress;
    452      hugeSplit = UINT64_C(0x00003fffffffffff) - allocGranularity;
    453    } else {
    454      hugeSplit = (UINT64_C(1) << (numAddressBits - 1)) - 1 - allocGranularity;
    455    }
    456 #else  // !defined(JS_64BIT)
    457    numAddressBits = 32;
    458 #endif
    459 #ifdef RLIMIT_AS
    460    if (jit::HasJitBackend()) {
    461      rlimit as_limit;
    462      if (getrlimit(RLIMIT_AS, &as_limit) == 0 &&
    463          as_limit.rlim_max != RLIM_INFINITY) {
    464        virtualMemoryLimit = as_limit.rlim_max;
    465      }
    466    }
    467 #endif
    468  }
    469 
    470  MOZ_ASSERT(gMappedMemorySizeBytes == 0);
    471 }
    472 
    473 void MapStack(size_t stackSize) {
    474  // Main thread only of the main runtime only. Note: these are insufficient
    475  // tests. In Firefox, for instance, the ProxyAutoConfig code starts up a
    476  // "main" JS runtime on a thread.
    477  MOZ_ASSERT(js::CurrentThreadIsMainThread());
    478  MOZ_ASSERT(MaybeGetJSContext()->runtime()->isMainRuntime());
    479 
    480 #if defined(FUZZING) && defined(XP_LINUX)
    481  // Test whether we're *really* on the process's main thread.
    482  if (getpid() != (pid_t)syscall(SYS_gettid)) {
    483    return;
    484  }
    485 
    486  // Allocate the full maximum allowed stack region immediately, to prevent heap
    487  // mmaps from grabbing pages from within this region when virtual memory gets
    488  // tight.
    489  uintptr_t stackTop = reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
    490  uintptr_t stackBase = stackTop - stackSize;
    491  size_t pageSize = js::gc::SystemPageSize();
    492  MOZ_ASSERT(pageSize > 0);
    493 
    494  // Back up a page: the stack pointer was grabbed up above (in stackTop),
    495  // but it may get decremented before and while calling mmap, and mmapping
    496  // the page containing sp and beyond (the live stack) will corrupt it.
    497  stackTop -= pageSize;
    498 
    499  stackBase = RoundDown(stackBase, pageSize);
    500  stackTop = RoundDown(stackTop, pageSize);
    501 
    502  // It is possible that deeper parts of the stack have already been reserved
    503  // (due to an initial stack reservation, or because something ran that used
    504  // more of the stack even though it has all been popped off now.) Make space
    505  // for a guard page so that the stack is allowed to grow. The rest of the
    506  // stack mapping will be clobbered by the mmap below.
    507  uintptr_t guardBase = stackBase - pageSize;
    508  if (munmap(reinterpret_cast<void*>(guardBase), pageSize) < 0) {
    509    MOZ_CRASH("unable to unmap guard page in unused portion of existing stack");
    510  }
    511 
    512  int flags =
    513      MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS | MAP_GROWSDOWN | MAP_NORESERVE;
    514 #  ifdef MAP_STACK
    515  flags |= MAP_STACK;
    516 #  endif
    517 
    518  void* result = mmap(reinterpret_cast<void*>(stackBase), stackTop - stackBase,
    519                      PROT_READ | PROT_WRITE, flags, -1, 0);
    520  if (result == MAP_FAILED) {
    521    MOZ_CRASH("mmap of stack failed");
    522  }
    523  if (result != reinterpret_cast<void*>(stackBase)) {
    524    MOZ_CRASH("(old kernel) interfering mapping exists");
    525  }
    526 #endif
    527 }
    528 
    529 void CheckMemorySubsystemOnShutDown() {
    530  MOZ_ASSERT(gMappedMemorySizeBytes == 0);
    531 }
    532 
    533 #ifdef JS_64BIT
    534 /* The JS engine uses 47-bit pointers; all higher bits must be clear. */
    535 static inline bool IsInvalidRegion(void* region, size_t length) {
    536  const uint64_t invalidPointerMask = UINT64_C(0xffff800000000000);
    537  return (uintptr_t(region) + length - 1) & invalidPointerMask;
    538 }
    539 #endif
    540 
    541 void* MapAlignedPages(size_t length, size_t alignment,
    542                      StallAndRetry stallAndRetry) {
    543  MOZ_RELEASE_ASSERT(length > 0 && alignment > 0);
    544  MOZ_RELEASE_ASSERT(length % pageSize == 0);
    545  MOZ_RELEASE_ASSERT(std::max(alignment, allocGranularity) %
    546                         std::min(alignment, allocGranularity) ==
    547                     0);
    548 
    549  // Smaller alignments aren't supported by the allocation functions.
    550  if (alignment < allocGranularity) {
    551    alignment = allocGranularity;
    552  }
    553 
    554 #ifdef __wasi__
    555  void* region = nullptr;
    556  if (int err = posix_memalign(&region, alignment, length)) {
    557    MOZ_ASSERT(err == ENOMEM);
    558    (void)err;
    559    return nullptr;
    560  }
    561  MOZ_ASSERT(region != nullptr);
    562  memset(region, 0, length);
    563  return region;
    564 #else
    565 
    566 #  ifdef JS_64BIT
    567  // Use the scattershot allocator if the address range is large enough.
    568  if (UsingScattershotAllocator()) {
    569    void* region = MapAlignedPagesRandom(length, alignment);
    570 
    571    MOZ_RELEASE_ASSERT(!IsInvalidRegion(region, length));
    572    MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
    573 
    574    RecordMemoryAlloc(length);
    575    return region;
    576  }
    577 #  endif
    578 
    579  // Try to allocate the region.
    580  void* region = MapMemory(length);
    581  if (!region) {
    582    return nullptr;
    583  }
    584 
    585  // If the returned address is aligned, we're done.
    586  if (OffsetFromAligned(region, alignment) == 0) {
    587    RecordMemoryAlloc(length);
    588    return region;
    589  }
    590 
    591  // Try to align the region. On success, TryToAlignChunk() returns
    592  // true and we can return the aligned region immediately.
    593  void* retainedRegion;
    594  if (TryToAlignChunk(&region, &retainedRegion, length, alignment)) {
    595    MOZ_ASSERT(region && OffsetFromAligned(region, alignment) == 0);
    596    MOZ_ASSERT(!retainedRegion);
    597    RecordMemoryAlloc(length);
    598    return region;
    599  }
    600 
    601  // On failure, the unaligned region is retained unless we OOMed. We don't
    602  // use the retained region on this path (see the last ditch allocator).
    603  if (retainedRegion) {
    604    UnmapInternal(retainedRegion, length);
    605  }
    606 
    607  // If it fails to align the given region, TryToAlignChunk() returns the
    608  // next valid region that we might be able to align (unless we OOMed).
    609  if (region) {
    610    MOZ_ASSERT(OffsetFromAligned(region, alignment) != 0);
    611    UnmapInternal(region, length);
    612  }
    613 
    614  // Since we couldn't align the first region, fall back to allocating a
    615  // region large enough that we can definitely align it.
    616  region = MapAlignedPagesSlow(length, alignment);
    617  if (!region) {
    618    // If there wasn't enough contiguous address space left for that,
    619    // try to find an alignable region using the last ditch allocator.
    620    region = MapAlignedPagesLastDitch(length, alignment, stallAndRetry);
    621    if (!region) {
    622      return nullptr;
    623    }
    624  }
    625 
    626  // At this point we should have an aligned region.
    627  MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
    628 
    629  RecordMemoryAlloc(length);
    630  return region;
    631 #endif  // !__wasi__
    632 }
    633 
    634 #ifdef JS_64BIT
    635 
    636 /*
    637 * This allocator takes advantage of the large address range on some 64-bit
    638 * platforms to allocate in a scattershot manner, choosing addresses at random
    639 * from the range. By controlling the range we can avoid returning addresses
    640 * that have more than 47 significant bits (as required by SpiderMonkey).
    641 * This approach also has some other advantages over the methods employed by
    642 * the other allocation functions in this file:
    643 * 1) Allocations are extremely likely to succeed on the first try.
    644 * 2) The randomness makes our memory layout becomes harder to predict.
    645 * 3) The low probability of reusing regions guards against use-after-free.
    646 *
    647 * The main downside is that detecting physical OOM situations becomes more
    648 * difficult; to guard against this, we occasionally try a regular allocation.
    649 * In addition, sprinkling small allocations throughout the full address range
    650 * might get in the way of large address space reservations such as those
    651 * employed by WebAssembly. To avoid this (or the opposite problem of such
    652 * reservations reducing the chance of success for smaller allocations) we
    653 * split the address range in half, with one half reserved for huge allocations
    654 * and the other for regular (usually chunk sized) allocations.
    655 */
    656 static void* MapAlignedPagesRandom(size_t length, size_t alignment) {
    657  uint64_t minNum, maxNum;
    658  if (length < HugeAllocationSize) {
    659    // Use the lower half of the range.
    660    minNum = (minValidAddress + alignment - 1) / alignment;
    661    maxNum = (hugeSplit - (length - 1)) / alignment;
    662  } else {
    663    // Use the upper half of the range.
    664    minNum = (hugeSplit + 1 + alignment - 1) / alignment;
    665    maxNum = (maxValidAddress - (length - 1)) / alignment;
    666  }
    667 
    668  // Try to allocate in random aligned locations.
    669  void* region = nullptr;
    670  for (size_t i = 1; i <= 1024; ++i) {
    671    if (i & 0xf) {
    672      uint64_t desired = alignment * GetNumberInRange(minNum, maxNum);
    673 #  if defined(__FreeBSD__)
    674      int flags = MAP_PRIVATE | MAP_ANON |
    675                  MAP_ALIGNED(mozilla::CeilingLog2Size(alignment));
    676      region = MozTaggedAnonymousMmap((void*)(uintptr_t)desired, length,
    677                                      int(PageAccess::ReadWrite), flags, -1, 0,
    678                                      "js-gc-heap");
    679 #  else
    680      region = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
    681 
    682 #  endif
    683      if (!region) {
    684        continue;
    685      }
    686    } else {
    687      // Check for OOM.
    688      region = MapMemory(length);
    689      if (!region) {
    690        return nullptr;
    691      }
    692    }
    693    if (IsInvalidRegion(region, length)) {
    694      UnmapInternal(region, length);
    695      continue;
    696    }
    697    if (OffsetFromAligned(region, alignment) == 0) {
    698      return region;
    699    }
    700    void* retainedRegion = nullptr;
    701    if (TryToAlignChunk<false>(&region, &retainedRegion, length, alignment)) {
    702      MOZ_ASSERT(region && OffsetFromAligned(region, alignment) == 0);
    703      MOZ_ASSERT(!retainedRegion);
    704      return region;
    705    }
    706    MOZ_ASSERT(region && !retainedRegion);
    707    UnmapInternal(region, length);
    708  }
    709 
    710  if (numAddressBits < 48) {
    711    // Try the reliable fallback of overallocating.
    712    // Note: This will not respect the address space split.
    713    region = MapAlignedPagesSlow(length, alignment);
    714    if (region) {
    715      return region;
    716    }
    717  }
    718  if (length < HugeAllocationSize) {
    719    MOZ_CRASH("Couldn't allocate even after 1000 tries!");
    720  }
    721 
    722  return nullptr;
    723 }
    724 
    725 #endif  // defined(JS_64BIT)
    726 
    727 #ifndef __wasi__
    728 static void* MapAlignedPagesSlow(size_t length, size_t alignment) {
    729  void* alignedRegion = nullptr;
    730  do {
    731    size_t reserveLength = length + alignment - pageSize;
    732 #  ifdef XP_WIN
    733    // Don't commit the requested pages as we won't use the region directly.
    734    void* region = MapMemory<Commit::No>(reserveLength);
    735 #  else
    736    void* region = MapMemory(reserveLength);
    737 #  endif
    738    if (!region) {
    739      return nullptr;
    740    }
    741    alignedRegion =
    742        reinterpret_cast<void*>(AlignBytes(uintptr_t(region), alignment));
    743 #  ifdef XP_WIN
    744    // Windows requires that map and unmap calls be matched, so deallocate
    745    // and immediately reallocate at the desired (aligned) address.
    746    UnmapInternal(region, reserveLength);
    747    alignedRegion = MapMemoryAt(alignedRegion, length);
    748 #  else
    749    // munmap allows us to simply unmap the pages that don't interest us.
    750    if (alignedRegion != region) {
    751      UnmapInternal(region, uintptr_t(alignedRegion) - uintptr_t(region));
    752    }
    753    void* regionEnd =
    754        reinterpret_cast<void*>(uintptr_t(region) + reserveLength);
    755    void* alignedEnd =
    756        reinterpret_cast<void*>(uintptr_t(alignedRegion) + length);
    757    if (alignedEnd != regionEnd) {
    758      UnmapInternal(alignedEnd, uintptr_t(regionEnd) - uintptr_t(alignedEnd));
    759    }
    760 #  endif
    761    // On Windows we may have raced with another thread; if so, try again.
    762  } while (!alignedRegion);
    763 
    764  return alignedRegion;
    765 }
    766 #endif  // wasi
    767 
    768 /*
    769 * In a low memory or high fragmentation situation, alignable chunks of the
    770 * desired length may still be available, even if there are no more contiguous
    771 * free chunks that meet the |length + alignment - pageSize| requirement of
    772 * MapAlignedPagesSlow. In this case, try harder to find an alignable chunk
    773 * by temporarily holding onto the unaligned parts of each chunk until the
    774 * allocator gives us a chunk that either is, or can be aligned.
    775 */
    776 static void* MapAlignedPagesLastDitch(size_t length, size_t alignment,
    777                                      StallAndRetry stallAndRetry) {
    778  void* tempMaps[MaxLastDitchAttempts];
    779  int attempt = 0;
    780  void* region;
    781 
    782  if (stallAndRetry == StallAndRetry::Yes) {
    783    region = MapMemory<Commit::Yes, StallAndRetry::Yes>(length);
    784  } else {
    785    region = MapMemory<Commit::Yes, StallAndRetry::No>(length);
    786  }
    787 
    788  if (OffsetFromAligned(region, alignment) == 0) {
    789    return region;
    790  }
    791  for (; attempt < MaxLastDitchAttempts; ++attempt) {
    792    if (TryToAlignChunk(&region, tempMaps + attempt, length, alignment)) {
    793      MOZ_ASSERT(region && OffsetFromAligned(region, alignment) == 0);
    794      MOZ_ASSERT(!tempMaps[attempt]);
    795      break;  // Success!
    796    }
    797    if (!region || !tempMaps[attempt]) {
    798      break;  // We ran out of memory, so give up.
    799    }
    800  }
    801  if (OffsetFromAligned(region, alignment)) {
    802    UnmapInternal(region, length);
    803    region = nullptr;
    804  }
    805  while (--attempt >= 0) {
    806    UnmapInternal(tempMaps[attempt], length);
    807  }
    808  return region;
    809 }
    810 
    811 #ifdef XP_WIN
    812 
    813 /*
    814 * On Windows, map and unmap calls must be matched, so we deallocate the
    815 * unaligned chunk, then reallocate the unaligned part to block off the
    816 * old address and force the allocator to give us a new one.
    817 */
    818 template <bool>
    819 static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
    820                            size_t length, size_t alignment) {
    821  void* region = *aRegion;
    822  MOZ_ASSERT(region && OffsetFromAligned(region, alignment) != 0);
    823 
    824  size_t retainedLength = 0;
    825  void* retainedRegion = nullptr;
    826  do {
    827    size_t offset = OffsetFromAligned(region, alignment);
    828    if (offset == 0) {
    829      // If the address is aligned, either we hit OOM or we're done.
    830      break;
    831    }
    832    UnmapInternal(region, length);
    833    retainedLength = alignment - offset;
    834    retainedRegion = MapMemoryAt<Commit::No>(region, retainedLength);
    835    region = MapMemory(length);
    836 
    837    // If retainedRegion is null here, we raced with another thread.
    838  } while (!retainedRegion);
    839 
    840  bool result = OffsetFromAligned(region, alignment) == 0;
    841  if (result && retainedRegion) {
    842    UnmapInternal(retainedRegion, retainedLength);
    843    retainedRegion = nullptr;
    844  }
    845 
    846  *aRegion = region;
    847  *aRetainedRegion = retainedRegion;
    848  return region && result;
    849 }
    850 
    851 #else  // !defined(XP_WIN)
    852 
    853 /*
    854 * mmap calls don't have to be matched with calls to munmap, so we can unmap
    855 * just the pages we don't need. However, as we don't know a priori if addresses
    856 * are handed out in increasing or decreasing order, we have to try both
    857 * directions (depending on the environment, one will always fail).
    858 */
    859 template <bool AlwaysGetNew>
    860 static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
    861                            size_t length, size_t alignment) {
    862  void* regionStart = *aRegion;
    863  MOZ_ASSERT(regionStart && OffsetFromAligned(regionStart, alignment) != 0);
    864 
    865  bool addressesGrowUpward = growthDirection > 0;
    866  bool directionUncertain = -8 < growthDirection && growthDirection <= 8;
    867  size_t offsetLower = OffsetFromAligned(regionStart, alignment);
    868  size_t offsetUpper = alignment - offsetLower;
    869  for (size_t i = 0; i < 2; ++i) {
    870    if (addressesGrowUpward) {
    871      void* upperStart =
    872          reinterpret_cast<void*>(uintptr_t(regionStart) + offsetUpper);
    873      void* regionEnd =
    874          reinterpret_cast<void*>(uintptr_t(regionStart) + length);
    875      if (MapMemoryAt(regionEnd, offsetUpper)) {
    876        UnmapInternal(regionStart, offsetUpper);
    877        if (directionUncertain) {
    878          ++growthDirection;
    879        }
    880        regionStart = upperStart;
    881        break;
    882      }
    883    } else {
    884      auto* lowerStart =
    885          reinterpret_cast<void*>(uintptr_t(regionStart) - offsetLower);
    886      auto* lowerEnd = reinterpret_cast<void*>(uintptr_t(lowerStart) + length);
    887      if (lowerStart && MapMemoryAt(lowerStart, offsetLower)) {
    888        UnmapInternal(lowerEnd, offsetLower);
    889        if (directionUncertain) {
    890          --growthDirection;
    891        }
    892        regionStart = lowerStart;
    893        break;
    894      }
    895    }
    896    // If we're confident in the growth direction, don't try the other.
    897    if (!directionUncertain) {
    898      break;
    899    }
    900    addressesGrowUpward = !addressesGrowUpward;
    901  }
    902 
    903  void* retainedRegion = nullptr;
    904  bool result = OffsetFromAligned(regionStart, alignment) == 0;
    905  if (AlwaysGetNew && !result) {
    906    // If our current chunk cannot be aligned, just get a new one.
    907    retainedRegion = regionStart;
    908    regionStart = MapMemory(length);
    909    // Our new region might happen to already be aligned.
    910    result = OffsetFromAligned(regionStart, alignment) == 0;
    911    if (result) {
    912      UnmapInternal(retainedRegion, length);
    913      retainedRegion = nullptr;
    914    }
    915  }
    916 
    917  *aRegion = regionStart;
    918  *aRetainedRegion = retainedRegion;
    919  return regionStart && result;
    920 }
    921 
    922 #endif
    923 
    924 void UnmapPages(void* region, size_t length) {
    925  MOZ_RELEASE_ASSERT(region &&
    926                     OffsetFromAligned(region, allocGranularity) == 0);
    927  MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
    928 
    929  // ASan does not automatically unpoison memory, so we have to do this here.
    930  MOZ_MAKE_MEM_UNDEFINED(region, length);
    931 
    932  UnmapInternal(region, length);
    933 
    934 #ifndef __wasi__
    935  RecordMemoryFree(length);
    936 #endif
    937 }
    938 
    939 static void CheckDecommit(void* region, size_t length) {
    940  MOZ_RELEASE_ASSERT(region);
    941  MOZ_RELEASE_ASSERT(length > 0);
    942 
    943  // pageSize == ArenaSize doesn't necessarily hold, but this function is
    944  // used by the GC to decommit unused Arenas, so we don't want to assert
    945  // if pageSize > ArenaSize.
    946  MOZ_ASSERT(OffsetFromAligned(region, ArenaSize) == 0);
    947  MOZ_ASSERT(length % ArenaSize == 0);
    948 
    949  MOZ_RELEASE_ASSERT(OffsetFromAligned(region, pageSize) == 0);
    950  MOZ_RELEASE_ASSERT(length % pageSize == 0);
    951 }
    952 
    953 bool MarkPagesUnusedSoft(void* region, size_t length) {
    954  MOZ_ASSERT(DecommitEnabled());
    955  CheckDecommit(region, length);
    956 
    957  MOZ_MAKE_MEM_NOACCESS(region, length);
    958 
    959 #if defined(XP_WIN)
    960  return VirtualAlloc(region, length, MEM_RESET,
    961                      DWORD(PageAccess::ReadWrite)) == region;
    962 #elif defined(__wasi__)
    963  return 0;
    964 #else
    965  int status;
    966  do {
    967 #  if defined(XP_DARWIN)
    968    status = madvise(region, length, MADV_FREE_REUSABLE);
    969 #  elif defined(XP_SOLARIS)
    970    status = posix_madvise(region, length, POSIX_MADV_DONTNEED);
    971 #  else
    972    status = madvise(region, length, MADV_DONTNEED);
    973 #  endif
    974  } while (status == -1 && errno == EAGAIN);
    975  return status == 0;
    976 #endif
    977 }
    978 
    979 bool MarkPagesUnusedHard(void* region, size_t length) {
    980  CheckDecommit(region, length);
    981 
    982  MOZ_MAKE_MEM_NOACCESS(region, length);
    983 
    984  if (!DecommitEnabled()) {
    985    return true;
    986  }
    987 
    988 #if defined(XP_WIN)
    989  return VirtualFree(region, length, MEM_DECOMMIT);
    990 #else
    991  return MarkPagesUnusedSoft(region, length);
    992 #endif
    993 }
    994 
    995 void MarkPagesInUseSoft(void* region, size_t length) {
    996  MOZ_ASSERT(DecommitEnabled());
    997  CheckDecommit(region, length);
    998 
    999 #if defined(XP_DARWIN)
   1000  while (madvise(region, length, MADV_FREE_REUSE) == -1 && errno == EAGAIN) {
   1001  }
   1002 #endif
   1003 
   1004  MOZ_MAKE_MEM_UNDEFINED(region, length);
   1005 }
   1006 
   1007 bool MarkPagesInUseHard(void* region, size_t length) {
   1008  if (js::oom::ShouldFailWithOOM()) {
   1009    return false;
   1010  }
   1011 
   1012  CheckDecommit(region, length);
   1013 
   1014  MOZ_MAKE_MEM_UNDEFINED(region, length);
   1015 
   1016  if (!DecommitEnabled()) {
   1017    return true;
   1018  }
   1019 
   1020 #if defined(XP_WIN)
   1021  return VirtualAlloc(region, length, MEM_COMMIT,
   1022                      DWORD(PageAccess::ReadWrite)) == region;
   1023 #else
   1024  return true;
   1025 #endif
   1026 }
   1027 
   1028 size_t GetPageFaultCount() {
   1029 #ifdef XP_WIN
   1030  PROCESS_MEMORY_COUNTERS pmc;
   1031  if (GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)) == 0) {
   1032    return 0;
   1033  }
   1034  return pmc.PageFaultCount;
   1035 #elif defined(__wasi__)
   1036  return 0;
   1037 #else
   1038  struct rusage usage;
   1039  int err = getrusage(RUSAGE_SELF, &usage);
   1040  if (err) {
   1041    return 0;
   1042  }
   1043  return usage.ru_majflt;
   1044 #endif
   1045 }
   1046 
   1047 void* AllocateMappedContent(int fd, size_t offset, size_t length,
   1048                            size_t alignment) {
   1049 #ifdef __wasi__
   1050  MOZ_CRASH("Not yet supported for WASI");
   1051 #else
   1052  if (length == 0 || alignment == 0 || offset % alignment != 0 ||
   1053      std::max(alignment, allocGranularity) %
   1054              std::min(alignment, allocGranularity) !=
   1055          0) {
   1056    return nullptr;
   1057  }
   1058 
   1059  size_t alignedOffset = offset - (offset % allocGranularity);
   1060  size_t alignedLength = length + (offset % allocGranularity);
   1061 
   1062  // We preallocate the mapping using MapAlignedPages, which expects
   1063  // the length parameter to be an integer multiple of the page size.
   1064  size_t mappedLength = alignedLength;
   1065  if (alignedLength % pageSize != 0) {
   1066    mappedLength += pageSize - alignedLength % pageSize;
   1067  }
   1068 
   1069 #  ifdef XP_WIN
   1070  HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd));
   1071 
   1072  // This call will fail if the file does not exist.
   1073  HANDLE hMap =
   1074      CreateFileMappingW(hFile, nullptr, PAGE_READONLY, 0, 0, nullptr);
   1075  if (!hMap) {
   1076    return nullptr;
   1077  }
   1078 
   1079  DWORD offsetH = uint32_t(uint64_t(alignedOffset) >> 32);
   1080  DWORD offsetL = uint32_t(alignedOffset);
   1081 
   1082  uint8_t* map = nullptr;
   1083  for (;;) {
   1084    // The value of a pointer is technically only defined while the region
   1085    // it points to is allocated, so explicitly treat this one as a number.
   1086    uintptr_t region = uintptr_t(MapAlignedPages(mappedLength, alignment));
   1087    if (region == 0) {
   1088      break;
   1089    }
   1090    UnmapInternal(reinterpret_cast<void*>(region), mappedLength);
   1091    // If the offset or length are out of bounds, this call will fail.
   1092    map = static_cast<uint8_t*>(
   1093        MapViewOfFileEx(hMap, FILE_MAP_COPY, offsetH, offsetL, alignedLength,
   1094                        reinterpret_cast<void*>(region)));
   1095    if (map) {
   1096      break;
   1097    }
   1098 
   1099    RecordMemoryFree(mappedLength);
   1100 
   1101    // Retry if another thread mapped the address we were trying to use.
   1102    if (GetLastError() != ERROR_INVALID_ADDRESS) {
   1103      break;
   1104    }
   1105  }
   1106 
   1107  // This just decreases the file mapping object's internal reference count;
   1108  // it won't actually be destroyed until we unmap the associated view.
   1109  CloseHandle(hMap);
   1110 
   1111  if (!map) {
   1112    return nullptr;
   1113  }
   1114 #  else  // !defined(XP_WIN)
   1115  // Sanity check the offset and length, as mmap does not do this for us.
   1116  struct stat st;
   1117  if (fstat(fd, &st) || offset >= uint64_t(st.st_size) ||
   1118      length > uint64_t(st.st_size) - offset) {
   1119    return nullptr;
   1120  }
   1121 
   1122  void* region = MapAlignedPages(mappedLength, alignment);
   1123  if (!region) {
   1124    return nullptr;
   1125  }
   1126 
   1127  // Calling mmap with MAP_FIXED will replace the previous mapping, allowing
   1128  // us to reuse the region we obtained without racing with other threads.
   1129  uint8_t* map =
   1130      static_cast<uint8_t*>(mmap(region, alignedLength, PROT_READ | PROT_WRITE,
   1131                                 MAP_PRIVATE | MAP_FIXED, fd, alignedOffset));
   1132  if (map == MAP_FAILED) {
   1133    UnmapInternal(region, mappedLength);
   1134    return nullptr;
   1135  }
   1136 #  endif
   1137 
   1138 #  ifdef DEBUG
   1139  // Zero out data before and after the desired mapping to catch errors early.
   1140  if (offset != alignedOffset) {
   1141    memset(map, 0, offset - alignedOffset);
   1142  }
   1143  if (alignedLength % pageSize) {
   1144    memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
   1145  }
   1146 #  endif
   1147 
   1148  return map + (offset - alignedOffset);
   1149 #endif  // __wasi__
   1150 }
   1151 
   1152 void DeallocateMappedContent(void* region, size_t length) {
   1153 #ifdef __wasi__
   1154  MOZ_CRASH("Not yet supported for WASI");
   1155 #else
   1156  if (!region) {
   1157    return;
   1158  }
   1159 
   1160  // Due to bug 1502562, the following assertion does not currently hold.
   1161  // MOZ_RELEASE_ASSERT(length > 0);
   1162 
   1163  // Calculate the address originally returned by the system call.
   1164  // This is needed because AllocateMappedContent returns a pointer
   1165  // that might be offset from the mapping, as the beginning of a
   1166  // mapping must be aligned with the allocation granularity.
   1167  uintptr_t map = uintptr_t(region) - (uintptr_t(region) % allocGranularity);
   1168 
   1169  size_t alignedLength = length + (uintptr_t(region) % allocGranularity);
   1170 
   1171  size_t mappedLength = alignedLength;
   1172  if (alignedLength % pageSize != 0) {
   1173    mappedLength += pageSize - alignedLength % pageSize;
   1174  }
   1175 
   1176 #  ifdef XP_WIN
   1177  MOZ_RELEASE_ASSERT(UnmapViewOfFile(reinterpret_cast<void*>(map)) != 0);
   1178 #  else
   1179  if (munmap(reinterpret_cast<void*>(map), alignedLength)) {
   1180    MOZ_RELEASE_ASSERT(errno == ENOMEM);
   1181  }
   1182 #  endif
   1183 
   1184  RecordMemoryFree(mappedLength);
   1185 
   1186 #endif  // __wasi__
   1187 }
   1188 
   1189 static inline void ProtectMemory(void* region, size_t length, PageAccess prot) {
   1190  MOZ_RELEASE_ASSERT(region && OffsetFromAligned(region, pageSize) == 0);
   1191  MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
   1192 #ifdef XP_WIN
   1193  DWORD oldProtect;
   1194  MOZ_RELEASE_ASSERT(VirtualProtect(region, length, DWORD(prot), &oldProtect) !=
   1195                     0);
   1196 #elif defined(__wasi__)
   1197  /* nothing */
   1198 #else
   1199  MOZ_RELEASE_ASSERT(mprotect(region, length, int(prot)) == 0);
   1200 #endif
   1201 }
   1202 
   1203 void ProtectPages(void* region, size_t length) {
   1204  ProtectMemory(region, length, PageAccess::None);
   1205 }
   1206 
   1207 void MakePagesReadOnly(void* region, size_t length) {
   1208  ProtectMemory(region, length, PageAccess::Read);
   1209 }
   1210 
   1211 void UnprotectPages(void* region, size_t length) {
   1212  ProtectMemory(region, length, PageAccess::ReadWrite);
   1213 }
   1214 
   1215 void RecordMemoryAlloc(size_t bytes) {
   1216  MOZ_ASSERT(bytes);
   1217  MOZ_ASSERT((bytes % pageSize) == 0);
   1218 
   1219  gMappedMemorySizeBytes += bytes;
   1220  gMappedMemoryOperations++;
   1221 }
   1222 
   1223 void RecordMemoryFree(size_t bytes) {
   1224  MOZ_ASSERT(bytes);
   1225  MOZ_ASSERT((bytes % pageSize) == 0);
   1226  MOZ_ASSERT(bytes <= gMappedMemorySizeBytes);
   1227 
   1228  gMappedMemorySizeBytes -= bytes;
   1229  gMappedMemoryOperations++;
   1230 }
   1231 
   1232 JS_PUBLIC_API ProfilerMemoryCounts GetProfilerMemoryCounts() {
   1233  return {gc::gMappedMemorySizeBytes, gc::gMappedMemoryOperations};
   1234 }
   1235 
   1236 }  // namespace js::gc