tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Chunk.cpp (24823B)


      1 /* This Source Code Form is subject to the terms of the Mozilla Public
      2 * License, v. 2.0. If a copy of the MPL was not distributed with this
      3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
      4 
      5 #include <cerrno>
      6 #include <cinttypes>
      7 #include <cstdio>
      8 
      9 #ifdef XP_WIN
     10 #  include <io.h>
     11 #  include <windows.h>
     12 #else
     13 #  include <sys/mman.h>
     14 #  include <unistd.h>
     15 #endif
     16 #ifdef XP_DARWIN
     17 #  include <libkern/OSAtomic.h>
     18 #  include <mach/mach_init.h>
     19 #  include <mach/vm_map.h>
     20 #endif
     21 
     22 #if defined(XP_WIN)
     23 #  include "mozmemory_stall.h"
     24 #endif
     25 #include "Mutex.h"
     26 #include "Chunk.h"
     27 #include "Extent.h"
     28 #include "Globals.h"
     29 #include "RedBlackTree.h"
     30 
     31 #include "mozilla/Assertions.h"
     32 #include "mozilla/HelperMacros.h"
     33 // Note: MozTaggedAnonymousMmap() could call an LD_PRELOADed mmap
     34 // instead of the one defined here; use only MozTagAnonymousMemory().
     35 #include "mozilla/TaggedAnonymousMemory.h"
     36 #include "mozilla/ThreadSafety.h"
     37 
     38 // For GetGeckoProcessType(), when it's used.
     39 #if defined(XP_WIN) && !defined(JS_STANDALONE)
     40 #  include "mozilla/ProcessType.h"
     41 #endif
     42 
     43 using namespace mozilla;
     44 
     45 // On Windows, delay crashing on OOM.
     46 #ifdef XP_WIN
     47 
     48 // Implementation of VirtualAlloc wrapper (bug 1716727).
     49 namespace MozAllocRetries {
     50 
     51 // Maximum retry count on OOM.
     52 constexpr size_t kMaxAttempts = 10;
     53 // Minimum delay time between retries. (The actual delay time may be larger. See
     54 // Microsoft's documentation for ::Sleep() for details.)
     55 constexpr size_t kDelayMs = 50;
     56 
     57 using StallSpecs = ::mozilla::StallSpecs;
     58 
     59 static constexpr StallSpecs maxStall = {.maxAttempts = kMaxAttempts,
     60                                        .delayMs = kDelayMs};
     61 
     62 static inline StallSpecs GetStallSpecs() {
     63 #  if defined(JS_STANDALONE)
     64  // GetGeckoProcessType() isn't available in this configuration. (SpiderMonkey
     65  // on Windows mostly skips this in favor of directly calling ::VirtualAlloc(),
     66  // though, so it's probably not going to matter whether we stall here or not.)
     67  return maxStall;
     68 #  else
     69  switch (GetGeckoProcessType()) {
     70    // For the main process, stall for the maximum permissible time period. (The
     71    // main process is the most important one to keep alive.)
     72    case GeckoProcessType::GeckoProcessType_Default:
     73      return maxStall;
     74 
     75    // For all other process types, stall for at most half as long.
     76    default:
     77      return {.maxAttempts = maxStall.maxAttempts / 2,
     78              .delayMs = maxStall.delayMs};
     79  }
     80 #  endif
     81 }
     82 
     83 }  // namespace MozAllocRetries
     84 
     85 namespace mozilla {
     86 
     87 StallSpecs GetAllocatorStallSpecs() {
     88  return ::MozAllocRetries::GetStallSpecs();
     89 }
     90 
     91 // Drop-in wrapper around VirtualAlloc. When out of memory, may attempt to stall
     92 // and retry rather than returning immediately, in hopes that the page file is
     93 // about to be expanded by Windows.
     94 //
     95 // Ref:
     96 // https://docs.microsoft.com/en-us/troubleshoot/windows-client/performance/slow-page-file-growth-memory-allocation-errors
     97 // https://hacks.mozilla.org/2022/11/improving-firefox-stability-with-this-one-weird-trick/
     98 void* MozVirtualAlloc(void* lpAddress, size_t dwSize, uint32_t flAllocationType,
     99                      uint32_t flProtect) {
    100  using namespace MozAllocRetries;
    101 
    102  DWORD const lastError = ::GetLastError();
    103 
    104  constexpr auto IsOOMError = [] {
    105    switch (::GetLastError()) {
    106      // This is the usual error result from VirtualAlloc for OOM.
    107      case ERROR_COMMITMENT_LIMIT:
    108      // Although rare, this has also been observed in low-memory situations.
    109      // (Presumably this means Windows can't allocate enough kernel-side space
    110      // for its own internal representation of the process's virtual address
    111      // space.)
    112      case ERROR_NOT_ENOUGH_MEMORY:
    113        return true;
    114    }
    115    return false;
    116  };
    117 
    118  {
    119    void* ptr = ::VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
    120    if (MOZ_LIKELY(ptr)) return ptr;
    121 
    122    // We can't do anything for errors other than OOM...
    123    if (!IsOOMError()) return nullptr;
    124    // ... or if this wasn't a request to commit memory in the first place.
    125    // (This function has no strategy for resolving MEM_RESERVE failures.)
    126    if (!(flAllocationType & MEM_COMMIT)) return nullptr;
    127  }
    128 
    129  // Retry as many times as desired (possibly zero).
    130  const StallSpecs stallSpecs = GetStallSpecs();
    131 
    132  const auto ret =
    133      stallSpecs.StallAndRetry(&::Sleep, [&]() -> std::optional<void*> {
    134        void* ptr =
    135            ::VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
    136 
    137        if (ptr) {
    138          // The OOM status has been handled, and should not be reported to
    139          // telemetry.
    140          if (IsOOMError()) {
    141            ::SetLastError(lastError);
    142          }
    143          return ptr;
    144        }
    145 
    146        // Failure for some reason other than OOM.
    147        if (!IsOOMError()) {
    148          return nullptr;
    149        }
    150 
    151        return std::nullopt;
    152      });
    153 
    154  return ret.value_or(nullptr);
    155 }
    156 
    157 }  // namespace mozilla
    158 
    159 #endif  // XP_WIN
    160 
    161 // Begin chunk management functions.
    162 
    163 // Some tools, such as /dev/dsp wrappers, LD_PRELOAD libraries that
    164 // happen to override mmap() and call dlsym() from their overridden
    165 // mmap(). The problem is that dlsym() calls malloc(), and this ends
    166 // up in a dead lock in jemalloc.
    167 // On these systems, we prefer to directly use the system call.
    168 // We do that for Linux systems and kfreebsd with GNU userland.
    169 // Note sanity checks are not done (alignment of offset, ...) because
    170 // the uses of mmap are pretty limited, in jemalloc.
    171 //
    172 // On Alpha, glibc has a bug that prevents syscall() to work for system
    173 // calls with 6 arguments.
    174 #if (defined(XP_LINUX) && !defined(__alpha__)) || \
    175    (defined(__FreeBSD_kernel__) && defined(__GLIBC__))
    176 #  include <sys/syscall.h>
    177 #  if defined(SYS_mmap) || defined(SYS_mmap2)
    178 static inline void* _mmap(void* addr, size_t length, int prot, int flags,
    179                          int fd, off_t offset) {
    180 // S390 only passes one argument to the mmap system call, which is a
    181 // pointer to a structure containing the arguments.
    182 #    ifdef __s390__
    183  struct {
    184    void* addr;
    185    size_t length;
    186    long prot;
    187    long flags;
    188    long fd;
    189    off_t offset;
    190  } args = {addr, length, prot, flags, fd, offset};
    191  return (void*)syscall(SYS_mmap, &args);
    192 #    else
    193 #      if defined(ANDROID) && defined(__aarch64__) && defined(SYS_mmap2)
    194  // Android NDK defines SYS_mmap2 for AArch64 despite it not supporting mmap2.
    195 #        undef SYS_mmap2
    196 #      endif
    197 #      ifdef SYS_mmap2
    198  return (void*)syscall(SYS_mmap2, addr, length, prot, flags, fd, offset >> 12);
    199 #      else
    200  return (void*)syscall(SYS_mmap, addr, length, prot, flags, fd, offset);
    201 #      endif
    202 #    endif
    203 }
    204 #    define mmap _mmap
    205 #    define munmap(a, l) syscall(SYS_munmap, a, l)
    206 #  endif
    207 #endif
    208 
    209 #ifdef XP_WIN
    210 
    211 static void* pages_map(void* aAddr, size_t aSize, ShouldCommit should_commit) {
    212  switch (should_commit) {
    213    case ReserveAndCommit:
    214      return MozVirtualAlloc(aAddr, aSize, MEM_RESERVE | MEM_COMMIT,
    215                             PAGE_READWRITE);
    216    case ReserveOnly:
    217      // When not requesting committed memory we can skip MozVirtualAlloc and
    218      // its stall behaviour.
    219      return VirtualAlloc(aAddr, aSize, MEM_RESERVE, PAGE_NOACCESS);
    220  }
    221 }
    222 
    223 static void pages_unmap(void* aAddr, size_t aSize) {
    224  if (VirtualFree(aAddr, 0, MEM_RELEASE) == 0) {
    225    _malloc_message(_getprogname(), ": (malloc) Error in VirtualFree()\n");
    226  }
    227 }
    228 #else
    229 
    230 static void pages_unmap(void* aAddr, size_t aSize) {
    231  if (munmap(aAddr, aSize) == -1) {
    232    char buf[64];
    233 
    234    if (strerror_r(errno, buf, sizeof(buf)) == 0) {
    235      _malloc_message(_getprogname(), ": (malloc) Error in munmap(): ", buf,
    236                      "\n");
    237    }
    238  }
    239 }
    240 
    241 static void* pages_map(void* aAddr, size_t aSize, ShouldCommit should_commit) {
    242  void* ret;
    243 #  if defined(__ia64__) || \
    244      (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
    245  // The JS engine assumes that all allocated pointers have their high 17 bits
    246  // clear, which ia64's mmap doesn't support directly. However, we can emulate
    247  // it by passing mmap an "addr" parameter with those bits clear. The mmap will
    248  // return that address, or the nearest available memory above that address,
    249  // providing a near-guarantee that those bits are clear. If they are not, we
    250  // return nullptr below to indicate out-of-memory.
    251  //
    252  // The addr is chosen as 0x0000070000000000, which still allows about 120TB of
    253  // virtual address space.
    254  //
    255  // See Bug 589735 for more information.
    256  bool check_placement = true;
    257  if (!aAddr) {
    258    aAddr = (void*)0x0000070000000000;
    259    check_placement = false;
    260  }
    261 #  endif
    262 
    263 #  if defined(__sparc__) && defined(__arch64__) && defined(__linux__)
    264  const uintptr_t start = 0x0000070000000000ULL;
    265  const uintptr_t end = 0x0000800000000000ULL;
    266 
    267  // Copied from js/src/gc/Memory.cpp and adapted for this source
    268  uintptr_t hint;
    269  void* region = MAP_FAILED;
    270  for (hint = start; region == MAP_FAILED && hint + aSize <= end;
    271       hint += kChunkSize) {
    272    region = mmap((void*)hint, aSize,
    273                  should_commit ? PROT_READ | PROT_WRITE : PROT_NONE,
    274                  MAP_PRIVATE | MAP_ANON, -1, 0);
    275    if (region != MAP_FAILED) {
    276      if (((size_t)region + (aSize - 1)) & 0xffff800000000000) {
    277        if (munmap(region, aSize)) {
    278          MOZ_ASSERT(errno == ENOMEM);
    279        }
    280        region = MAP_FAILED;
    281      }
    282    }
    283  }
    284  ret = region;
    285 #  else
    286  // We don't use MAP_FIXED here, because it can cause the *replacement*
    287  // of existing mappings, and we only want to create new mappings.
    288  ret = mmap(
    289      aAddr, aSize,
    290      should_commit == ReserveAndCommit ? PROT_READ | PROT_WRITE : PROT_NONE,
    291      MAP_PRIVATE | MAP_ANON, -1, 0);
    292  MOZ_ASSERT(ret);
    293 #  endif
    294  if (ret == MAP_FAILED) {
    295    ret = nullptr;
    296  }
    297 #  if defined(__ia64__) || \
    298      (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
    299  // If the allocated memory doesn't have its upper 17 bits clear, consider it
    300  // as out of memory.
    301  else if ((long long)ret & 0xffff800000000000) {
    302    munmap(ret, aSize);
    303    ret = nullptr;
    304  }
    305  // If the caller requested a specific memory location, verify that's what mmap
    306  // returned.
    307  else if (check_placement && ret != aAddr) {
    308 #  else
    309  else if (aAddr && ret != aAddr) {
    310 #  endif
    311    // We succeeded in mapping memory, but not in the right place.
    312    pages_unmap(ret, aSize);
    313    ret = nullptr;
    314  }
    315  if (ret) {
    316    MozTagAnonymousMemory(ret, aSize, "jemalloc");
    317  }
    318 
    319 #  if defined(__ia64__) || \
    320      (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
    321  MOZ_ASSERT(!ret || (!check_placement && ret) ||
    322             (check_placement && ret == aAddr));
    323 #  else
    324  MOZ_ASSERT(!ret || (!aAddr && ret != aAddr) || (aAddr && ret == aAddr));
    325 #  endif
    326  return ret;
    327 }
    328 #endif
    329 
    330 // ***************************************************************************
    331 
    332 void pages_decommit(void* aAddr, size_t aSize) {
    333 #ifdef XP_WIN
    334  // The region starting at addr may have been allocated in multiple calls
    335  // to VirtualAlloc and recycled, so decommitting the entire region in one
    336  // go may not be valid. However, since we allocate at least a chunk at a
    337  // time, we may touch any region in chunksized increments.
    338  size_t pages_size = std::min(aSize, kChunkSize - GetChunkOffsetForPtr(aAddr));
    339  while (aSize > 0) {
    340    // This will cause Access Violation on read and write and thus act as a
    341    // guard page or region as well.
    342    if (!VirtualFree(aAddr, pages_size, MEM_DECOMMIT)) {
    343      MOZ_CRASH();
    344    }
    345    aAddr = (void*)((uintptr_t)aAddr + pages_size);
    346    aSize -= pages_size;
    347    pages_size = std::min(aSize, kChunkSize);
    348  }
    349 #else
    350  if (mmap(aAddr, aSize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1,
    351           0) == MAP_FAILED) {
    352    // We'd like to report the OOM for our tooling, but we can't allocate
    353    // memory at this point, so avoid the use of printf.
    354    const char out_of_mappings[] =
    355        "[unhandlable oom] Failed to mmap, likely no more mappings "
    356        "available " __FILE__ " : " MOZ_STRINGIFY(__LINE__);
    357    if (errno == ENOMEM) {
    358 #  ifndef ANDROID
    359      fputs(out_of_mappings, stderr);
    360      fflush(stderr);
    361 #  endif
    362      MOZ_CRASH_ANNOTATE(out_of_mappings);
    363    }
    364    MOZ_REALLY_CRASH(__LINE__);
    365  }
    366  MozTagAnonymousMemory(aAddr, aSize, "jemalloc-decommitted");
    367 #endif
    368 }
    369 
    370 // Commit pages. Returns whether pages were committed.
    371 [[nodiscard]] bool pages_commit(void* aAddr, size_t aSize) {
    372 #ifdef XP_WIN
    373  // The region starting at addr may have been allocated in multiple calls
    374  // to VirtualAlloc and recycled, so committing the entire region in one
    375  // go may not be valid. However, since we allocate at least a chunk at a
    376  // time, we may touch any region in chunksized increments.
    377  size_t pages_size = std::min(aSize, kChunkSize - GetChunkOffsetForPtr(aAddr));
    378  while (aSize > 0) {
    379    if (!MozVirtualAlloc(aAddr, pages_size, MEM_COMMIT, PAGE_READWRITE)) {
    380      return false;
    381    }
    382    aAddr = (void*)((uintptr_t)aAddr + pages_size);
    383    aSize -= pages_size;
    384    pages_size = std::min(aSize, kChunkSize);
    385  }
    386 #else
    387  if (mmap(aAddr, aSize, PROT_READ | PROT_WRITE,
    388           MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == MAP_FAILED) {
    389    return false;
    390  }
    391  MozTagAnonymousMemory(aAddr, aSize, "jemalloc");
    392 #endif
    393  return true;
    394 }
    395 
    396 // Purge and release the pages in the chunk of length `length` at `addr` to
    397 // the OS.
    398 // Returns whether the pages are guaranteed to be full of zeroes when the
    399 // function returns.
    400 // The force_zero argument explicitly requests that the memory is guaranteed
    401 // to be full of zeroes when the function returns.
    402 static bool pages_purge(void* addr, size_t length, bool force_zero) {
    403  pages_decommit(addr, length);
    404  return true;
    405 }
    406 
    407 // pages_trim, pages_mmap_aligned_slow and pages_mmap_aligned were
    408 // cherry-picked from upstream jemalloc 3.4.1 to fix Mozilla bug 956501.
    409 
    410 static void* pages_trim(void* addr, size_t alloc_size, size_t leadsize,
    411                        size_t size, ShouldCommit should_commit) {
    412  void* ret = (void*)((uintptr_t)addr + leadsize);
    413 
    414  MOZ_ASSERT(alloc_size >= leadsize + size);
    415 #ifdef XP_WIN
    416  {
    417    void* new_addr;
    418 
    419    pages_unmap(addr, alloc_size);
    420    new_addr = pages_map(ret, size, should_commit);
    421    if (new_addr == ret) {
    422      return ret;
    423    }
    424    if (new_addr) {
    425      pages_unmap(new_addr, size);
    426    }
    427    return nullptr;
    428  }
    429 #else
    430  {
    431    size_t trailsize = alloc_size - leadsize - size;
    432 
    433    if (leadsize != 0) {
    434      pages_unmap(addr, leadsize);
    435    }
    436    if (trailsize != 0) {
    437      pages_unmap((void*)((uintptr_t)ret + size), trailsize);
    438    }
    439    return ret;
    440  }
    441 #endif
    442 }
    443 
    444 static void* pages_mmap_aligned_slow(size_t size, size_t alignment,
    445                                     ShouldCommit should_commit) {
    446  void *ret, *pages;
    447  size_t alloc_size, leadsize;
    448 
    449  alloc_size = size + alignment - gRealPageSize;
    450  // Beware size_t wrap-around.
    451  if (alloc_size < size) {
    452    return nullptr;
    453  }
    454  do {
    455    pages = pages_map(nullptr, alloc_size, should_commit);
    456    if (!pages) {
    457      return nullptr;
    458    }
    459    leadsize =
    460        ALIGNMENT_CEILING((uintptr_t)pages, alignment) - (uintptr_t)pages;
    461    ret = pages_trim(pages, alloc_size, leadsize, size, should_commit);
    462  } while (!ret);
    463 
    464  MOZ_ASSERT(ret);
    465  return ret;
    466 }
    467 
    468 void* pages_mmap_aligned(size_t size, size_t alignment,
    469                         ShouldCommit should_commit) {
    470  void* ret;
    471  size_t offset;
    472 
    473  // Ideally, there would be a way to specify alignment to mmap() (like
    474  // NetBSD has), but in the absence of such a feature, we have to work
    475  // hard to efficiently create aligned mappings. The reliable, but
    476  // slow method is to create a mapping that is over-sized, then trim the
    477  // excess. However, that always results in one or two calls to
    478  // pages_unmap().
    479  //
    480  // Optimistically try mapping precisely the right amount before falling
    481  // back to the slow method, with the expectation that the optimistic
    482  // approach works most of the time.
    483  ret = pages_map(nullptr, size, should_commit);
    484  if (!ret) {
    485    return nullptr;
    486  }
    487  offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
    488  if (offset != 0) {
    489    pages_unmap(ret, size);
    490    return pages_mmap_aligned_slow(size, alignment, should_commit);
    491  }
    492 
    493  MOZ_ASSERT(ret);
    494  return ret;
    495 }
    496 
    497 constinit AddressRadixTree<(sizeof(void*) << 3) - LOG2(kChunkSize)> gChunkRTree;
    498 
    499 // Protects chunk-related data structures.
    500 static Mutex chunks_mtx;
    501 
    502 // Trees of chunks that were previously allocated (trees differ only in node
    503 // ordering).  These are used when allocating chunks, in an attempt to re-use
    504 // address space.  Depending on function, different tree orderings are needed,
    505 // which is why there are two trees with the same contents.
    506 static RedBlackTree<extent_node_t, ExtentTreeSzTrait> gChunksBySize
    507    MOZ_GUARDED_BY(chunks_mtx);
    508 static RedBlackTree<extent_node_t, ExtentTreeTrait> gChunksByAddress
    509    MOZ_GUARDED_BY(chunks_mtx);
    510 
    511 // The current amount of recycled bytes, updated atomically.
    512 Atomic<size_t> gRecycledSize;
    513 
    514 void chunks_init() {
    515  // Initialize chunks data.
    516  chunks_mtx.Init();
    517 }
    518 
    519 #ifdef XP_WIN
    520 // On Windows, calls to VirtualAlloc and VirtualFree must be matched, making it
    521 // awkward to recycle allocations of varying sizes. Therefore we only allow
    522 // recycling when the size equals the chunksize, unless deallocation is entirely
    523 // disabled.
    524 #  define CAN_RECYCLE(size) ((size) == kChunkSize)
    525 #else
    526 #  define CAN_RECYCLE(size) true
    527 #endif
    528 
    529 #ifdef MOZ_DEBUG
    530 void chunk_assert_zero(void* aPtr, size_t aSize) {
    531 // Only run this expensive check in a vigilant mode.
    532 #  ifdef MALLOC_DEBUG_VIGILANT
    533  size_t i;
    534  size_t* p = (size_t*)(uintptr_t)aPtr;
    535 
    536  for (i = 0; i < aSize / sizeof(size_t); i++) {
    537    MOZ_ASSERT(p[i] == 0);
    538  }
    539 #  endif
    540 }
    541 #endif
    542 
    543 static void chunk_record(void* aChunk, size_t aSize, ChunkType aType) {
    544  extent_node_t key;
    545 
    546  if (aType != ZEROED_CHUNK) {
    547    if (pages_purge(aChunk, aSize, aType == HUGE_CHUNK)) {
    548      aType = ZEROED_CHUNK;
    549    }
    550  }
    551 
    552  // Allocate a node before acquiring chunks_mtx even though it might not
    553  // be needed, because TypedBaseAlloc::alloc() may cause a new base chunk to
    554  // be allocated, which could cause deadlock if chunks_mtx were already
    555  // held.
    556  UniqueBaseNode xnode(ExtentAlloc::alloc());
    557  // Use xprev to implement conditional deferred deallocation of prev.
    558  UniqueBaseNode xprev;
    559 
    560  // RAII deallocates xnode and xprev defined above after unlocking
    561  // in order to avoid potential dead-locks
    562  MutexAutoLock lock(chunks_mtx);
    563  key.mAddr = (void*)((uintptr_t)aChunk + aSize);
    564  extent_node_t* node = gChunksByAddress.SearchOrNext(&key);
    565  // Try to coalesce forward.
    566  if (node && node->mAddr == key.mAddr) {
    567    // Coalesce chunk with the following address range.  This does
    568    // not change the position within gChunksByAddress, so only
    569    // remove/insert from/into gChunksBySize.
    570    gChunksBySize.Remove(node);
    571    node->mAddr = aChunk;
    572    node->mSize += aSize;
    573    if (node->mChunkType != aType) {
    574      node->mChunkType = RECYCLED_CHUNK;
    575    }
    576    gChunksBySize.Insert(node);
    577  } else {
    578    // Coalescing forward failed, so insert a new node.
    579    if (!xnode) {
    580      // TypedBaseAlloc::alloc() failed, which is an exceedingly
    581      // unlikely failure.  Leak chunk; its pages have
    582      // already been purged, so this is only a virtual
    583      // memory leak.
    584      return;
    585    }
    586    node = xnode.release();
    587    node->mAddr = aChunk;
    588    node->mSize = aSize;
    589    node->mChunkType = aType;
    590    gChunksByAddress.Insert(node);
    591    gChunksBySize.Insert(node);
    592  }
    593 
    594  // Try to coalesce backward.
    595  extent_node_t* prev = gChunksByAddress.Prev(node);
    596  if (prev && (void*)((uintptr_t)prev->mAddr + prev->mSize) == aChunk) {
    597    // Coalesce chunk with the previous address range.  This does
    598    // not change the position within gChunksByAddress, so only
    599    // remove/insert node from/into gChunksBySize.
    600    gChunksBySize.Remove(prev);
    601    gChunksByAddress.Remove(prev);
    602 
    603    gChunksBySize.Remove(node);
    604    node->mAddr = prev->mAddr;
    605    node->mSize += prev->mSize;
    606    if (node->mChunkType != prev->mChunkType) {
    607      node->mChunkType = RECYCLED_CHUNK;
    608    }
    609    gChunksBySize.Insert(node);
    610 
    611    xprev.reset(prev);
    612  }
    613 
    614  gRecycledSize += aSize;
    615 }
    616 
    617 void chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType) {
    618  MOZ_ASSERT(aChunk);
    619  MOZ_ASSERT(GetChunkOffsetForPtr(aChunk) == 0);
    620  MOZ_ASSERT(aSize != 0);
    621  MOZ_ASSERT((aSize & kChunkSizeMask) == 0);
    622 
    623  gChunkRTree.Unset(aChunk);
    624 
    625  if (CAN_RECYCLE(aSize)) {
    626    size_t recycled_so_far = gRecycledSize;
    627    // In case some race condition put us above the limit.
    628    if (recycled_so_far < gRecycleLimit) {
    629      size_t recycle_remaining = gRecycleLimit - recycled_so_far;
    630      size_t to_recycle;
    631      if (aSize > recycle_remaining) {
    632 #ifndef XP_WIN
    633        to_recycle = recycle_remaining;
    634        // Drop pages that would overflow the recycle limit
    635        pages_trim(aChunk, aSize, 0, to_recycle, ReserveAndCommit);
    636 #else
    637        // On windows pages_trim unallocates and reallocates the whole
    638        // chunk, there's no point doing that during recycling so instead we
    639        // fail.
    640        pages_unmap(aChunk, aSize);
    641        return;
    642 #endif
    643      } else {
    644        to_recycle = aSize;
    645      }
    646      chunk_record(aChunk, to_recycle, aType);
    647      return;
    648    }
    649  }
    650 
    651  pages_unmap(aChunk, aSize);
    652 }
    653 
    654 static void* chunk_recycle(size_t aSize, size_t aAlignment) {
    655  extent_node_t key;
    656 
    657  size_t alloc_size = aSize + aAlignment - kChunkSize;
    658  // Beware size_t wrap-around.
    659  if (alloc_size < aSize) {
    660    return nullptr;
    661  }
    662  key.mAddr = nullptr;
    663  key.mSize = alloc_size;
    664  chunks_mtx.Lock();
    665  extent_node_t* node = gChunksBySize.SearchOrNext(&key);
    666  if (!node) {
    667    chunks_mtx.Unlock();
    668    return nullptr;
    669  }
    670  size_t leadsize = ALIGNMENT_CEILING((uintptr_t)node->mAddr, aAlignment) -
    671                    (uintptr_t)node->mAddr;
    672  MOZ_ASSERT(node->mSize >= leadsize + aSize);
    673  size_t trailsize = node->mSize - leadsize - aSize;
    674  void* ret = (void*)((uintptr_t)node->mAddr + leadsize);
    675 
    676  // All recycled chunks are zeroed (because they're purged) before being
    677  // recycled.
    678  MOZ_ASSERT(node->mChunkType == ZEROED_CHUNK);
    679 
    680  // Remove node from the tree.
    681  gChunksBySize.Remove(node);
    682  gChunksByAddress.Remove(node);
    683  if (leadsize != 0) {
    684    // Insert the leading space as a smaller chunk.
    685    node->mSize = leadsize;
    686    gChunksBySize.Insert(node);
    687    gChunksByAddress.Insert(node);
    688    node = nullptr;
    689  }
    690  if (trailsize != 0) {
    691    // Insert the trailing space as a smaller chunk.
    692    if (!node) {
    693      // An additional node is required, but
    694      // TypedBaseAlloc::alloc() can cause a new base chunk to be
    695      // allocated.  Drop chunks_mtx in order to avoid
    696      // deadlock, and if node allocation fails, deallocate
    697      // the result before returning an error.
    698      chunks_mtx.Unlock();
    699      node = ExtentAlloc::alloc();
    700      if (!node) {
    701        chunk_dealloc(ret, aSize, ZEROED_CHUNK);
    702        return nullptr;
    703      }
    704      chunks_mtx.Lock();
    705    }
    706    node->mAddr = (void*)((uintptr_t)(ret) + aSize);
    707    node->mSize = trailsize;
    708    node->mChunkType = ZEROED_CHUNK;
    709    gChunksBySize.Insert(node);
    710    gChunksByAddress.Insert(node);
    711    node = nullptr;
    712  }
    713 
    714  gRecycledSize -= aSize;
    715 
    716  chunks_mtx.Unlock();
    717 
    718  if (node) {
    719    ExtentAlloc::dealloc(node);
    720  }
    721  if (!pages_commit(ret, aSize)) {
    722    return nullptr;
    723  }
    724 
    725  return ret;
    726 }
    727 
    728 // Allocates `size` bytes of system memory aligned for `alignment`.
    729 // `base` indicates whether the memory will be used for the base allocator
    730 // (e.g. base_alloc).
    731 // `zeroed` is an outvalue that returns whether the allocated memory is
    732 // guaranteed to be full of zeroes. It can be omitted when the caller doesn't
    733 // care about the result.
    734 void* chunk_alloc(size_t aSize, size_t aAlignment, bool aBase) {
    735  void* ret = nullptr;
    736 
    737  MOZ_ASSERT(aSize != 0);
    738  MOZ_ASSERT((aSize & kChunkSizeMask) == 0);
    739  MOZ_ASSERT(aAlignment != 0);
    740  MOZ_ASSERT((aAlignment & kChunkSizeMask) == 0);
    741 
    742  // Base allocations can't be fulfilled by recycling because of
    743  // possible deadlock or infinite recursion.
    744  if (CAN_RECYCLE(aSize) && !aBase) {
    745    ret = chunk_recycle(aSize, aAlignment);
    746  }
    747  if (!ret) {
    748    ret = pages_mmap_aligned(aSize, aAlignment, ReserveAndCommit);
    749  }
    750  if (ret && !aBase) {
    751    if (!gChunkRTree.Set(ret, ret)) {
    752      chunk_dealloc(ret, aSize, UNKNOWN_CHUNK);
    753      return nullptr;
    754    }
    755  }
    756 
    757  MOZ_ASSERT(GetChunkOffsetForPtr(ret) == 0);
    758  return ret;
    759 }
    760 
    761 // This would be all alone in an Extent.cpp file, instead put it here where
    762 // it is used.
    763 template <>
    764 extent_node_t* ExtentAlloc::sFirstFree = nullptr;
    765 
    766 arena_chunk_t::arena_chunk_t(arena_t* aArena)
    767    : mArena(aArena), mDirtyRunHint(gChunkHeaderNumPages) {}
    768 
    769 bool arena_chunk_t::IsEmpty() {
    770  return (mPageMap[gChunkHeaderNumPages].bits &
    771          (~gPageSizeMask | CHUNK_MAP_ALLOCATED)) == gMaxLargeClass;
    772 }