tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

mozjemalloc.cpp (182507B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 // Portions of this file were originally under the following license:
      8 //
      9 // Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
     10 // All rights reserved.
     11 // Copyright (C) 2007-2017 Mozilla Foundation.
     12 //
     13 // Redistribution and use in source and binary forms, with or without
     14 // modification, are permitted provided that the following conditions
     15 // are met:
     16 // 1. Redistributions of source code must retain the above copyright
     17 //    notice(s), this list of conditions and the following disclaimer as
     18 //    the first lines of this file unmodified other than the possible
     19 //    addition of one or more copyright notices.
     20 // 2. Redistributions in binary form must reproduce the above copyright
     21 //    notice(s), this list of conditions and the following disclaimer in
     22 //    the documentation and/or other materials provided with the
     23 //    distribution.
     24 //
     25 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
     26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28 // PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
     29 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
     32 // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     33 // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
     34 // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
     35 // EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     36 //
     37 // *****************************************************************************
     38 //
     39 // This allocator implementation is designed to provide scalable performance
     40 // for multi-threaded programs on multi-processor systems.  The following
     41 // features are included for this purpose:
     42 //
     43 //   + Multiple arenas are used if there are multiple CPUs, which reduces lock
     44 //     contention and cache sloshing.
     45 //
     46 //   + Cache line sharing between arenas is avoided for internal data
     47 //     structures.
     48 //
     49 //   + Memory is managed in chunks and runs (chunks can be split into runs),
     50 //     rather than as individual pages.  This provides a constant-time
     51 //     mechanism for associating allocations with particular arenas.
     52 //
     53 // Allocation requests are rounded up to the nearest size class, and no record
     54 // of the original request size is maintained.  Allocations are broken into
     55 // categories according to size class.  Assuming runtime defaults, the size
     56 // classes in each category are as follows (for x86, x86_64 and Apple Silicon):
     57 //
     58 //   |=========================================================|
     59 //   | Category | Subcategory    |     x86 |  x86_64 | Mac ARM |
     60 //   |---------------------------+---------+---------+---------|
     61 //   | Word size                 |  32 bit |  64 bit |  64 bit |
     62 //   | Page size                 |    4 Kb |    4 Kb |   16 Kb |
     63 //   |=========================================================|
     64 //   | Small    | Quantum-spaced |      16 |      16 |      16 |
     65 //   |          |                |      32 |      32 |      32 |
     66 //   |          |                |      48 |      48 |      48 |
     67 //   |          |                |     ... |     ... |     ... |
     68 //   |          |                |     480 |     480 |     480 |
     69 //   |          |                |     496 |     496 |     496 |
     70 //   |          |----------------+---------|---------|---------|
     71 //   |          | Quantum-wide-  |     512 |     512 |     512 |
     72 //   |          | spaced         |     768 |     768 |     768 |
     73 //   |          |                |     ... |     ... |     ... |
     74 //   |          |                |    3584 |    3584 |    3584 |
     75 //   |          |                |    3840 |    3840 |    3840 |
     76 //   |          |----------------+---------|---------|---------|
     77 //   |          | Sub-page       |       - |       - |    4096 |
     78 //   |          |                |       - |       - |    8 kB |
     79 //   |=========================================================|
     80 //   | Large                     |    4 kB |    4 kB |       - |
     81 //   |                           |    8 kB |    8 kB |       - |
     82 //   |                           |   12 kB |   12 kB |       - |
     83 //   |                           |   16 kB |   16 kB |   16 kB |
     84 //   |                           |     ... |     ... |       - |
     85 //   |                           |   32 kB |   32 kB |   32 kB |
     86 //   |                           |     ... |     ... |     ... |
     87 //   |                           | 1008 kB | 1008 kB | 1008 kB |
     88 //   |                           | 1012 kB | 1012 kB |       - |
     89 //   |                           | 1016 kB | 1016 kB |       - |
     90 //   |                           | 1020 kB | 1020 kB |       - |
     91 //   |=========================================================|
     92 //   | Huge                      |    1 MB |    1 MB |    1 MB |
     93 //   |                           |    2 MB |    2 MB |    2 MB |
     94 //   |                           |    3 MB |    3 MB |    3 MB |
     95 //   |                           |     ... |     ... |     ... |
     96 //   |=========================================================|
     97 //
     98 // Legend:
     99 //   n:    Size class exists for this platform.
    100 //   -:    This size class doesn't exist for this platform.
    101 //   ...:  Size classes follow a pattern here.
    102 //
    103 // A different mechanism is used for each category:
    104 //
    105 //   Small : Each size class is segregated into its own set of runs.  Each run
    106 //           maintains a bitmap of which regions are free/allocated.
    107 //
    108 //   Large : Each allocation is backed by a dedicated run.  Metadata are stored
    109 //           in the associated arena chunk header maps.
    110 //
    111 //   Huge : Each allocation is backed by a dedicated contiguous set of chunks.
    112 //          Metadata are stored in a separate red-black tree.
    113 //
    114 // *****************************************************************************
    115 
    116 #include "mozmemory_wrap.h"
    117 #include "mozjemalloc.h"
    118 #include "mozjemalloc_types.h"
    119 #include "mozjemalloc_profiling.h"
    120 
    121 #include <cstring>
    122 #include <cerrno>
    123 #include <chrono>
    124 #ifdef XP_WIN
    125 #  include <io.h>
    126 #  include <windows.h>
    127 #else
    128 #  include <sys/mman.h>
    129 #  include <unistd.h>
    130 #endif
    131 #ifdef XP_DARWIN
    132 #  include <libkern/OSAtomic.h>
    133 #  include <mach/mach_init.h>
    134 #  include <mach/vm_map.h>
    135 #endif
    136 
    137 #include "mozilla/Atomics.h"
    138 #include "mozilla/Alignment.h"
    139 #include "mozilla/Assertions.h"
    140 #include "mozilla/CheckedInt.h"
    141 #include "mozilla/DebugOnly.h"
    142 #include "mozilla/DoublyLinkedList.h"
    143 #include "mozilla/HelperMacros.h"
    144 #include "mozilla/Likely.h"
    145 #include "mozilla/Literals.h"
    146 #include "mozilla/MathAlgorithms.h"
    147 #include "mozilla/RandomNum.h"
    148 #include "mozilla/RefPtr.h"
    149 // Note: MozTaggedAnonymousMmap() could call an LD_PRELOADed mmap
    150 // instead of the one defined here; use only MozTagAnonymousMemory().
    151 #include "mozilla/TaggedAnonymousMemory.h"
    152 #include "mozilla/ThreadLocal.h"
    153 #include "mozilla/XorShift128PlusRNG.h"
    154 #include "mozilla/fallible.h"
    155 #include "RadixTree.h"
    156 #include "BaseAlloc.h"
    157 #include "Chunk.h"
    158 #include "Constants.h"
    159 #include "Extent.h"
    160 #include "Globals.h"
    161 #include "Mutex.h"
    162 #include "PHC.h"
    163 #include "RedBlackTree.h"
    164 #include "Utils.h"
    165 #include "Zero.h"
    166 
    167 #if defined(XP_WIN)
    168 #  include "mozmemory_stall.h"
    169 #endif
    170 
    171 using namespace mozilla;
    172 
    173 #ifdef MOZJEMALLOC_PROFILING_CALLBACKS
    174 // MallocProfilerCallbacks is refcounted so that one thread cannot destroy it
    175 // while another thread accesses it.  This means that clearing this value or
    176 // otherwise dropping a reference to it must not be done while holding an
    177 // arena's lock.
    178 constinit static RefPtr<MallocProfilerCallbacks> sCallbacks;
    179 #endif
    180 
    181 // ***************************************************************************
    182 // MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
    183 #if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
    184 #  error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
    185 #endif
    186 
    187 // Set to true once the allocator has been initialized.
    188 #if defined(_MSC_VER) && !defined(__clang__)
    189 // MSVC may create a static initializer for an Atomic<bool>, which may actually
    190 // run after `malloc_init` has been called once, which triggers multiple
    191 // initializations.
    192 // We work around the problem by not using an Atomic<bool> at all. There is a
    193 // theoretical problem with using `malloc_initialized` non-atomically, but
    194 // practically, this is only true if `malloc_init` is never called before
    195 // threads are created.
    196 static bool malloc_initialized;
    197 #else
    198 // We can rely on Relaxed here because this variable is only ever set when
    199 // holding gInitLock. A thread that still sees it false while another sets it
    200 // true will enter the same lock, synchronize with the former and check the
    201 // flag again under the lock.
    202 static Atomic<bool, MemoryOrdering::Relaxed> malloc_initialized;
    203 #endif
    204 
    205 // This lock must be held while bootstrapping us.
    206 constinit StaticMutex gInitLock MOZ_UNANNOTATED;
    207 
    208 // ***************************************************************************
    209 // Statistics data structures.
    210 
    211 struct arena_stats_t {
    212  // Number of bytes currently mapped.
    213  size_t mapped = 0;
    214 
    215  // Current number of committed pages (non madvised/decommitted)
    216  size_t committed = 0;
    217 
    218  // Per-size-category statistics.
    219  size_t allocated_small = 0;
    220 
    221  size_t allocated_large = 0;
    222 
    223  // The number of "memory operations" aka mallocs/frees.
    224  uint64_t operations = 0;
    225 };
    226 
    227 // Describe size classes to which allocations are rounded up to.
    228 // TODO: add large and huge types when the arena allocation code
    229 // changes in a way that allows it to be beneficial.
    230 class SizeClass {
    231 public:
    232  enum ClassType {
    233    Quantum,
    234    QuantumWide,
    235    SubPage,
    236    Large,
    237  };
    238 
    239  explicit inline SizeClass(size_t aSize) {
    240    // We can skip an extra condition here if aSize > 0 and kQuantum >=
    241    // kMinQuantumClass.
    242    MOZ_ASSERT(aSize > 0);
    243    static_assert(kQuantum >= kMinQuantumClass);
    244 
    245    if (aSize <= kMaxQuantumClass) {
    246      mType = Quantum;
    247      mSize = QUANTUM_CEILING(aSize);
    248    } else if (aSize <= kMaxQuantumWideClass) {
    249      mType = QuantumWide;
    250      mSize = QUANTUM_WIDE_CEILING(aSize);
    251    } else if (aSize <= gMaxSubPageClass) {
    252      mType = SubPage;
    253      mSize = SUBPAGE_CEILING(aSize);
    254    } else if (aSize <= gMaxLargeClass) {
    255      mType = Large;
    256      mSize = PAGE_CEILING(aSize);
    257    } else {
    258      MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Invalid size");
    259    }
    260  }
    261 
    262  SizeClass& operator=(const SizeClass& aOther) = default;
    263 
    264  bool operator==(const SizeClass& aOther) { return aOther.mSize == mSize; }
    265 
    266  size_t Size() { return mSize; }
    267 
    268  ClassType Type() { return mType; }
    269 
    270  SizeClass Next() { return SizeClass(mSize + 1); }
    271 
    272 private:
    273  ClassType mType;
    274  size_t mSize;
    275 };
    276 
    277 // ***************************************************************************
    278 // Arena data structures.
    279 
    280 struct arena_bin_t;
    281 
    282 struct ArenaChunkMapLink {
    283  static RedBlackTreeNode<arena_chunk_map_t>& GetTreeNode(
    284      arena_chunk_map_t* aThis) {
    285    return aThis->link;
    286  }
    287 };
    288 
    289 struct ArenaAvailTreeTrait : public ArenaChunkMapLink {
    290  static inline Order Compare(arena_chunk_map_t* aNode,
    291                              arena_chunk_map_t* aOther) {
    292    size_t size1 = aNode->bits & ~gPageSizeMask;
    293    size_t size2 = aOther->bits & ~gPageSizeMask;
    294    Order ret = CompareInt(size1, size2);
    295    return (ret != Order::eEqual)
    296               ? ret
    297               : CompareAddr((aNode->bits & CHUNK_MAP_KEY) ? nullptr : aNode,
    298                             aOther);
    299  }
    300 };
    301 
    302 namespace mozilla {
    303 
    304 struct DirtyChunkListTrait {
    305  static DoublyLinkedListElement<arena_chunk_t>& Get(arena_chunk_t* aThis) {
    306    return aThis->mChunksDirtyElim;
    307  }
    308  static const DoublyLinkedListElement<arena_chunk_t>& Get(
    309      const arena_chunk_t* aThis) {
    310    return aThis->mChunksDirtyElim;
    311  }
    312 };
    313 
    314 #ifdef MALLOC_DOUBLE_PURGE
    315 struct MadvisedChunkListTrait {
    316  static DoublyLinkedListElement<arena_chunk_t>& Get(arena_chunk_t* aThis) {
    317    return aThis->mChunksMavisedElim;
    318  }
    319  static const DoublyLinkedListElement<arena_chunk_t>& Get(
    320      const arena_chunk_t* aThis) {
    321    return aThis->mChunksMavisedElim;
    322  }
    323 };
    324 #endif
    325 }  // namespace mozilla
    326 
    327 enum class purge_action_t {
    328  None,
    329  PurgeNow,
    330  Queue,
    331 };
    332 
    333 struct arena_run_t {
    334 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
    335  uint32_t mMagic;
    336 #  define ARENA_RUN_MAGIC 0x384adf93
    337 
    338  // On 64-bit platforms, having the arena_bin_t pointer following
    339  // the mMagic field means there's padding between both fields, making
    340  // the run header larger than necessary.
    341  // But when MOZ_DIAGNOSTIC_ASSERT_ENABLED is not set, starting the
    342  // header with this field followed by the arena_bin_t pointer yields
    343  // the same padding. We do want the mMagic field to appear first, so
    344  // depending whether MOZ_DIAGNOSTIC_ASSERT_ENABLED is set or not, we
    345  // move some field to avoid padding.
    346 
    347  // Number of free regions in run.
    348  unsigned mNumFree;
    349 #endif
    350 
    351  // Used by arena_bin_t::mNonFullRuns.
    352  DoublyLinkedListElement<arena_run_t> mRunListElem;
    353 
    354  // Bin this run is associated with.
    355  arena_bin_t* mBin;
    356 
    357  // Index of first element that might have a free region.
    358  unsigned mRegionsMinElement;
    359 
    360 #if !defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
    361  // Number of free regions in run.
    362  unsigned mNumFree;
    363 #endif
    364 
    365  // Bitmask of in-use regions (0: in use, 1: free).
    366  unsigned mRegionsMask[];  // Dynamically sized.
    367 };
    368 
    369 namespace mozilla {
    370 
    371 template <>
    372 struct GetDoublyLinkedListElement<arena_run_t> {
    373  static DoublyLinkedListElement<arena_run_t>& Get(arena_run_t* aThis) {
    374    return aThis->mRunListElem;
    375  }
    376  static const DoublyLinkedListElement<arena_run_t>& Get(
    377      const arena_run_t* aThis) {
    378    return aThis->mRunListElem;
    379  }
    380 };
    381 
    382 }  // namespace mozilla
    383 
    384 struct arena_bin_t {
    385  // We use a LIFO ("last-in-first-out") policy to refill non-full runs.
    386  //
    387  // This has the following reasons:
    388  // 1. It is cheap, as all our non-full-runs' book-keeping is O(1), no
    389  //    tree-balancing or walking is needed.
    390  // 2. It also helps to increase the probability for CPU cache hits for the
    391  //    book-keeping and the reused slots themselves, as the same memory was
    392  //    most recently touched during free, especially when used from the same
    393  //    core (or via the same shared cache, depending on the architecture).
    394  DoublyLinkedList<arena_run_t> mNonFullRuns;
    395 
    396  // Bin's size class.
    397  size_t mSizeClass;
    398 
    399  // Total number of regions in a run for this bin's size class.
    400  uint32_t mRunNumRegions;
    401 
    402  // Number of elements in a run's mRegionsMask for this bin's size class.
    403  uint32_t mRunNumRegionsMask;
    404 
    405  // Offset of first region in a run for this bin's size class.
    406  uint32_t mRunFirstRegionOffset;
    407 
    408  // Current number of runs in this bin, full or otherwise.
    409  uint32_t mNumRuns = 0;
    410 
    411  // A constant for fast division by size class.  This value is 16 bits wide so
    412  // it is placed last.
    413  FastDivisor<uint16_t> mSizeDivisor;
    414 
    415  // Total number of pages in a run for this bin's size class.
    416  uint8_t mRunSizePages;
    417 
    418  // Amount of overhead runs are allowed to have.
    419  static constexpr double kRunOverhead = 1.6_percent;
    420  static constexpr double kRunRelaxedOverhead = 2.4_percent;
    421 
    422  // Initialize a bin for the given size class.
    423  // The generated run sizes, for a page size of 4 KiB, are:
    424  //   size|run       size|run       size|run       size|run
    425  //  class|size     class|size     class|size     class|size
    426  //     4   4 KiB      8   4 KiB     16   4 KiB     32   4 KiB
    427  //    48   4 KiB     64   4 KiB     80   4 KiB     96   4 KiB
    428  //   112   4 KiB    128   8 KiB    144   4 KiB    160   8 KiB
    429  //   176   4 KiB    192   4 KiB    208   8 KiB    224   4 KiB
    430  //   240   8 KiB    256  16 KiB    272   8 KiB    288   4 KiB
    431  //   304  12 KiB    320  12 KiB    336   4 KiB    352   8 KiB
    432  //   368   4 KiB    384   8 KiB    400  20 KiB    416  16 KiB
    433  //   432  12 KiB    448   4 KiB    464  16 KiB    480   8 KiB
    434  //   496  20 KiB    512  32 KiB    768  16 KiB   1024  64 KiB
    435  //  1280  24 KiB   1536  32 KiB   1792  16 KiB   2048 128 KiB
    436  //  2304  16 KiB   2560  48 KiB   2816  36 KiB   3072  64 KiB
    437  //  3328  36 KiB   3584  32 KiB   3840  64 KiB
    438  explicit arena_bin_t(SizeClass aSizeClass);
    439 };
    440 
    441 // We try to keep the above structure aligned with common cache lines sizes,
    442 // often that's 64 bytes on x86 and ARM, we don't make assumptions for other
    443 // architectures.
    444 #if defined(__x86_64__) || defined(__aarch64__)
    445 // On 64bit platforms this structure is often 48 bytes
    446 // long, which means every other array element will be properly aligned.
    447 static_assert(sizeof(arena_bin_t) == 48);
    448 #elif defined(__x86__) || defined(__arm__)
    449 static_assert(sizeof(arena_bin_t) == 32);
    450 #endif
    451 
    452 // We cannot instantiate
    453 // Atomic<std::chrono::time_point<std::chrono::steady_clock>>
    454 // so we explicitly force timestamps to be uint64_t in ns.
    455 uint64_t GetTimestampNS() {
    456  // On most if not all systems we care about the conversion to ns is a no-op,
    457  // so we prefer to keep the precision here for performance, but let's be
    458  // explicit about it.
    459  return std::chrono::floor<std::chrono::nanoseconds>(
    460             std::chrono::steady_clock::now())
    461      .time_since_epoch()
    462      .count();
    463 }
    464 
    465 enum PurgeCondition { PurgeIfThreshold, PurgeUnconditional };
    466 
    467 struct arena_t {
    468 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
    469 #  define ARENA_MAGIC 0x947d3d24
    470  uint32_t mMagic = ARENA_MAGIC;
    471 #endif
    472 
    473  // Linkage for the tree of arenas by id.
    474  // This just provides the memory to be used by the collection tree
    475  // and thus needs no arena_t::mLock.
    476  RedBlackTreeNode<arena_t> mLink;
    477 
    478  // Arena id, that we keep away from the beginning of the struct so that
    479  // free list pointers in TypedBaseAlloc<arena_t> don't overflow in it,
    480  // and it keeps the value it had after the destructor.
    481  arena_id_t mId = 0;
    482 
    483  // Operations on this arena require that lock be locked. The MaybeMutex
    484  // class will elude locking if the arena is accessed from a single thread
    485  // only (currently only the main thread can be used like this).
    486  // Can be acquired while holding gArenas.mLock, but must not be acquired or
    487  // held while holding or acquiring gArenas.mPurgeListLock.
    488  MaybeMutex mLock MOZ_UNANNOTATED;
    489 
    490  // The lock is required to write to fields of mStats, but it is not needed to
    491  // read them, so long as inconsistents reads are okay (fields might not make
    492  // sense together).
    493  arena_stats_t mStats MOZ_GUARDED_BY(mLock);
    494 
    495  // We can read the allocated counts from mStats without a lock:
    496  size_t AllocatedBytes() const MOZ_NO_THREAD_SAFETY_ANALYSIS {
    497    return mStats.allocated_small + mStats.allocated_large;
    498  }
    499 
    500  // We can read the operations field from mStats without a lock:
    501  uint64_t Operations() const MOZ_NO_THREAD_SAFETY_ANALYSIS {
    502    return mStats.operations;
    503  }
    504 
    505 private:
    506  // Queue of dirty-page-containing chunks this arena manages.  Generally it is
    507  // operated in FIFO order, chunks are purged from the beginning of the list
    508  // and newly-dirtied chunks are placed at the end.  We assume that this makes
    509  // finding larger runs of dirty pages easier, it probably doesn't affect the
    510  // chance that a new allocation has a page fault since that is controlled by
    511  // the order of mAvailRuns.
    512  DoublyLinkedList<arena_chunk_t, DirtyChunkListTrait> mChunksDirty
    513      MOZ_GUARDED_BY(mLock);
    514 
    515 #ifdef MALLOC_DOUBLE_PURGE
    516  // Head of a linked list of MADV_FREE'd-page-containing chunks this
    517  // arena manages.
    518  DoublyLinkedList<arena_chunk_t, MadvisedChunkListTrait> mChunksMAdvised
    519      MOZ_GUARDED_BY(mLock);
    520 #endif
    521 
    522  // In order to avoid rapid chunk allocation/deallocation when an arena
    523  // oscillates right on the cusp of needing a new chunk, cache the most
    524  // recently freed chunk.  The spare is left in the arena's chunk trees
    525  // until it is deleted.
    526  //
    527  // There is one spare chunk per arena, rather than one spare total, in
    528  // order to avoid interactions between multiple threads that could make
    529  // a single spare inadequate.
    530  arena_chunk_t* mSpare MOZ_GUARDED_BY(mLock) = nullptr;
    531 
    532  // A per-arena opt-in to randomize the offset of small allocations
    533  // Needs no lock, read-only.
    534  bool mRandomizeSmallAllocations;
    535 
    536  // A pseudorandom number generator. Initially null, it gets initialized
    537  // on first use to avoid recursive malloc initialization (e.g. on OSX
    538  // arc4random allocates memory).
    539  mozilla::non_crypto::XorShift128PlusRNG* mPRNG MOZ_GUARDED_BY(mLock) =
    540      nullptr;
    541  bool mIsPRNGInitializing MOZ_GUARDED_BY(mLock) = false;
    542 
    543 public:
    544  // Whether this is a private arena. Multiple public arenas are just a
    545  // performance optimization and not a safety feature.
    546  //
    547  // Since, for example, we don't want thread-local arenas to grow too much, we
    548  // use the default arena for bigger allocations. We use this member to allow
    549  // realloc() to switch out of our arena if needed (which is not allowed for
    550  // private arenas for security).
    551  // Needs no lock, read-only.
    552  bool mIsPrivate;
    553 
    554  // Current count of pages within unused runs that are potentially
    555  // dirty, and for which madvise(... MADV_FREE) has not been called.  By
    556  // tracking this, we can institute a limit on how much dirty unused
    557  // memory is mapped for each arena.
    558  size_t mNumDirty MOZ_GUARDED_BY(mLock) = 0;
    559 
    560  // Precalculated value for faster checks.
    561  size_t mMaxDirty MOZ_GUARDED_BY(mLock);
    562 
    563  // The current number of pages that are available without a system call (but
    564  // probably a page fault).
    565  size_t mNumMAdvised MOZ_GUARDED_BY(mLock) = 0;
    566  size_t mNumFresh MOZ_GUARDED_BY(mLock) = 0;
    567 
    568  // Maximum value allowed for mNumDirty.
    569  // Needs no lock, read-only.
    570  size_t mMaxDirtyBase;
    571 
    572  // Needs no lock, read-only.
    573  int32_t mMaxDirtyIncreaseOverride = 0;
    574  int32_t mMaxDirtyDecreaseOverride = 0;
    575 
    576  // The link to gArenas.mOutstandingPurges.
    577  // Note that this must only be accessed while holding gArenas.mPurgeListLock
    578  // (but not arena_t.mLock !) through gArenas.mOutstandingPurges.
    579  DoublyLinkedListElement<arena_t> mPurgeListElem;
    580 
    581  // A "significant reuse" is when a dirty page is used for a new allocation,
    582  // it has the CHUNK_MAP_DIRTY bit cleared and CHUNK_MAP_ALLOCATED set.
    583  //
    584  // Timestamp of the last time we saw a significant reuse (in ns).
    585  // Note that this variable is written very often from many threads and read
    586  // only sparsely on the main thread, but when we read it we need to see the
    587  // chronologically latest write asap (so we cannot use Relaxed).
    588  Atomic<uint64_t> mLastSignificantReuseNS;
    589 
    590 public:
    591  // A flag that indicates if arena will be Purge()'d.
    592  //
    593  // It is set either when a thread commits to adding it to mOutstandingPurges
    594  // or when imitating a Purge.  Cleared only by Purge when we know we are
    595  // completely done.  This is used to avoid accessing the list (and list lock)
    596  // on every call to ShouldStartPurge() and to avoid deleting arenas that
    597  // another thread is purging.
    598  bool mIsPurgePending MOZ_GUARDED_BY(mLock) = false;
    599 
    600  // A mirror of ArenaCollection::mIsDeferredPurgeEnabled, here only to
    601  // optimize memory reads in ShouldStartPurge().
    602  bool mIsDeferredPurgeEnabled MOZ_GUARDED_BY(mLock);
    603 
    604  // True if the arena is in the process of being destroyed, and needs to be
    605  // released after a concurrent purge completes.
    606  bool mMustDeleteAfterPurge MOZ_GUARDED_BY(mLock) = false;
    607 
    608  // mLabel describes the label for the firefox profiler.  It's stored in a
    609  // fixed size area including a null terminating byte.  The actual maximum
    610  // length of the string is one less than LABEL_MAX_CAPACITY;
    611  static constexpr size_t LABEL_MAX_CAPACITY = 128;
    612  char mLabel[LABEL_MAX_CAPACITY] = {};
    613 
    614 private:
    615  // Size/address-ordered tree of this arena's available runs.  This tree
    616  // is used for first-best-fit run allocation.
    617  RedBlackTree<arena_chunk_map_t, ArenaAvailTreeTrait> mRunsAvail
    618      MOZ_GUARDED_BY(mLock);
    619 
    620 public:
    621  // mBins is used to store rings of free regions of the following sizes,
    622  // assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
    623  //
    624  //  | mBins[i] | size |
    625  //  +----------+------+
    626  //  |       0  |    2 |
    627  //  |       1  |    4 |
    628  //  |       2  |    8 |
    629  //  +----------+------+
    630  //  |       3  |   16 |
    631  //  |       4  |   32 |
    632  //  |       5  |   48 |
    633  //  |       6  |   64 |
    634  //  |          :      :
    635  //  |          :      :
    636  //  |      33  |  496 |
    637  //  |      34  |  512 |
    638  //  +----------+------+
    639  //  |      35  |  768 |
    640  //  |      36  | 1024 |
    641  //  |          :      :
    642  //  |          :      :
    643  //  |      46  | 3584 |
    644  //  |      47  | 3840 |
    645  //  +----------+------+
    646  arena_bin_t mBins[] MOZ_GUARDED_BY(mLock);  // Dynamically sized.
    647 
    648  explicit arena_t(arena_params_t* aParams, bool aIsPrivate);
    649  ~arena_t();
    650 
    651  void ResetSmallAllocRandomization();
    652 
    653  void InitPRNG() MOZ_REQUIRES(mLock);
    654 
    655 private:
    656  void InitChunk(arena_chunk_t* aChunk, size_t aMinCommittedPages)
    657      MOZ_REQUIRES(mLock);
    658 
    659  // Remove the chunk from the arena.  This removes it from all the page counts.
    660  // It assumes its run has already been removed and lets the caller clear
    661  // mSpare as necessary.
    662  bool RemoveChunk(arena_chunk_t* aChunk) MOZ_REQUIRES(mLock);
    663 
    664  // This may return a chunk that should be destroyed with chunk_dealloc outside
    665  // of the arena lock.  It is not the same chunk as was passed in (since that
    666  // chunk now becomes mSpare).
    667  [[nodiscard]] arena_chunk_t* DemoteChunkToSpare(arena_chunk_t* aChunk)
    668      MOZ_REQUIRES(mLock);
    669 
    670  // Try to merge the run with its neighbours. Returns the new index of the run
    671  // (since it may have merged with an earlier one).
    672  size_t TryCoalesce(arena_chunk_t* aChunk, size_t run_ind, size_t run_pages,
    673                     size_t size) MOZ_REQUIRES(mLock);
    674 
    675  arena_run_t* AllocRun(size_t aSize, bool aLarge, bool aZero)
    676      MOZ_REQUIRES(mLock);
    677 
    678  arena_chunk_t* DallocRun(arena_run_t* aRun, bool aDirty) MOZ_REQUIRES(mLock);
    679 
    680  [[nodiscard]] bool SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge,
    681                              bool aZero) MOZ_REQUIRES(mLock);
    682 
    683  void TrimRunHead(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize,
    684                   size_t aNewSize) MOZ_REQUIRES(mLock);
    685 
    686  void TrimRunTail(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize,
    687                   size_t aNewSize, bool dirty) MOZ_REQUIRES(mLock);
    688 
    689  arena_run_t* GetNewEmptyBinRun(arena_bin_t* aBin) MOZ_REQUIRES(mLock);
    690 
    691  inline arena_run_t* GetNonFullBinRun(arena_bin_t* aBin) MOZ_REQUIRES(mLock);
    692 
    693  inline uint8_t FindFreeBitInMask(uint32_t aMask, uint32_t& aRng)
    694      MOZ_REQUIRES(mLock);
    695 
    696  inline void* ArenaRunRegAlloc(arena_run_t* aRun, arena_bin_t* aBin)
    697      MOZ_REQUIRES(mLock);
    698 
    699  inline void* MallocSmall(size_t aSize, bool aZero) MOZ_EXCLUDES(mLock);
    700 
    701  void* MallocLarge(size_t aSize, bool aZero) MOZ_EXCLUDES(mLock);
    702 
    703  void* MallocHuge(size_t aSize, bool aZero) MOZ_EXCLUDES(mLock);
    704 
    705  void* PallocLarge(size_t aAlignment, size_t aSize, size_t aAllocSize)
    706      MOZ_EXCLUDES(mLock);
    707 
    708  void* PallocHuge(size_t aSize, size_t aAlignment, bool aZero)
    709      MOZ_EXCLUDES(mLock);
    710 
    711  void RallocShrinkLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
    712                         size_t aOldSize) MOZ_EXCLUDES(mLock);
    713 
    714  bool RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
    715                       size_t aOldSize) MOZ_EXCLUDES(mLock);
    716 
    717  void* RallocSmallOrLarge(void* aPtr, size_t aSize, size_t aOldSize)
    718      MOZ_EXCLUDES(mLock);
    719 
    720  void* RallocHuge(void* aPtr, size_t aSize, size_t aOldSize)
    721      MOZ_EXCLUDES(mLock);
    722 
    723 public:
    724  inline void* Malloc(size_t aSize, bool aZero) MOZ_EXCLUDES(mLock);
    725 
    726  void* Palloc(size_t aAlignment, size_t aSize) MOZ_EXCLUDES(mLock);
    727 
    728  // This may return a chunk that should be destroyed with chunk_dealloc outside
    729  // of the arena lock.  It is not the same chunk as was passed in (since that
    730  // chunk now becomes mSpare).
    731  [[nodiscard]] inline arena_chunk_t* DallocSmall(arena_chunk_t* aChunk,
    732                                                  void* aPtr,
    733                                                  arena_chunk_map_t* aMapElm)
    734      MOZ_REQUIRES(mLock);
    735 
    736  [[nodiscard]] arena_chunk_t* DallocLarge(arena_chunk_t* aChunk, void* aPtr)
    737      MOZ_REQUIRES(mLock);
    738 
    739  void* Ralloc(void* aPtr, size_t aSize, size_t aOldSize) MOZ_EXCLUDES(mLock);
    740 
    741  void UpdateMaxDirty() MOZ_EXCLUDES(mLock);
    742 
    743 #ifdef MALLOC_DECOMMIT
    744  // During a commit operation (for aReqPages) we have the opportunity of
    745  // commiting at most aRemPages additional pages.  How many should we commit to
    746  // amortise system calls?
    747  size_t ExtraCommitPages(size_t aReqPages, size_t aRemainingPages)
    748      MOZ_REQUIRES(mLock);
    749 #endif
    750 
    751  // Purge some dirty pages.
    752  //
    753  // When this is called the caller has already tested ShouldStartPurge()
    754  // (possibly on another thread asychronously) or is passing
    755  // PurgeUnconditional.  However because it's called without the lock it will
    756  // recheck ShouldContinuePurge() before doing any work.
    757  //
    758  // It may purge a number of runs within a single chunk before returning.  It
    759  // will return Continue if there's more work to do in other chunks
    760  // (ShouldContinuePurge()).
    761  //
    762  // To release more pages from other chunks then it's best to call Purge
    763  // in a loop, looping when it returns Continue.
    764  //
    765  // This must be called without the mLock held (it'll take the lock).
    766  //
    767  ArenaPurgeResult Purge(PurgeCondition aCond, PurgeStats& aStats)
    768      MOZ_EXCLUDES(mLock);
    769 
    770  // Run Purge() in a loop. If sCallback is non-null then collect statistics and
    771  // publish them through the callback,  aCaller should be used to identify the
    772  // caller in the profiling data.
    773  //
    774  // aCond         - when to stop purging
    775  // aCaller       - a string representing the caller, this is used for
    776  //                 profiling
    777  // aReuseGraceMS - Stop purging the arena if it was used within this many
    778  //                 milliseconds.  Or 0 to ignore recent reuse.
    779  // aKeepGoing    - Optional function to implement a time budget.
    780  //
    781  ArenaPurgeResult PurgeLoop(
    782      PurgeCondition aCond, const char* aCaller, uint32_t aReuseGraceMS = 0,
    783      Maybe<std::function<bool()>> aKeepGoing = Nothing()) MOZ_EXCLUDES(mLock);
    784 
    785  class PurgeInfo {
    786   private:
    787    // The dirty memory begins at mDirtyInd and is mDirtyLen pages long.
    788    // However it may have clean memory within it.
    789    size_t mDirtyInd = 0;
    790    size_t mDirtyLen = 0;
    791 
    792    // mDirtyNPages is the actual number of dirty pages within the span above.
    793    size_t mDirtyNPages = 0;
    794 
    795    // This is the run containing the dirty memory, the entire run is
    796    // unallocated.
    797    size_t mFreeRunInd = 0;
    798    size_t mFreeRunLen = 0;
    799 
    800   public:
    801    arena_t& mArena;
    802 
    803    arena_chunk_t* mChunk = nullptr;
    804 
    805   private:
    806    PurgeStats& mPurgeStats;
    807 
    808   public:
    809    size_t FreeRunLenBytes() const { return mFreeRunLen << gPageSize2Pow; }
    810 
    811    // The last index of the free run.
    812    size_t FreeRunLastInd() const { return mFreeRunInd + mFreeRunLen - 1; }
    813 
    814    void* DirtyPtr() const {
    815      return (void*)(uintptr_t(mChunk) + (mDirtyInd << gPageSize2Pow));
    816    }
    817 
    818    size_t DirtyLenBytes() const { return mDirtyLen << gPageSize2Pow; }
    819 
    820    // Purging memory is seperated into 3 phases.
    821    //  * FindDirtyPages() which find the dirty pages in a chunk and marks the
    822    //    run and chunk as busy while holding the lock.
    823    //  * Release the pages (without the lock)
    824    //  * UpdatePagesAndCounts() which marks the dirty pages as not-dirty and
    825    //    updates other counters (while holding the lock).
    826    //
    827    // FindDirtyPages() will return false purging should not continue purging in
    828    // this chunk.  Either because it has no dirty pages or is dying.
    829    bool FindDirtyPages(bool aPurgedOnce) MOZ_REQUIRES(mArena.mLock);
    830 
    831    // This is used internally by FindDirtyPages to actually perform scanning
    832    // within a chunk's page tables.  It finds the first dirty page within the
    833    // chunk.
    834    bool ScanForFirstDirtyPage() MOZ_REQUIRES(mArena.mLock);
    835 
    836    // After ScanForFirstDirtyPage() returns true, this may be used to find the
    837    // last dirty page within the same run.
    838    bool ScanForLastDirtyPage() MOZ_REQUIRES(mArena.mLock);
    839 
    840    // Returns a pair, the first field indicates if there are more dirty pages
    841    // remaining in the current chunk. The second field if non-null points to a
    842    // chunk that must be released by the caller.
    843    std::pair<bool, arena_chunk_t*> UpdatePagesAndCounts()
    844        MOZ_REQUIRES(mArena.mLock);
    845 
    846    // FinishPurgingInChunk() is used whenever we decide to stop purging in a
    847    // chunk, This could be because there are no more dirty pages, or the chunk
    848    // is dying, or we hit the arena-level threshold.
    849    void FinishPurgingInChunk(bool aAddToMAdvised, bool aAddToDirty)
    850        MOZ_REQUIRES(mArena.mLock);
    851 
    852    explicit PurgeInfo(arena_t& arena, arena_chunk_t* chunk, PurgeStats& stats)
    853        : mArena(arena), mChunk(chunk), mPurgeStats(stats) {}
    854  };
    855 
    856  void HardPurge();
    857 
    858  // Check mNumDirty against EffectiveMaxDirty and return the appropriate
    859  // action to be taken by MayDoOrQueuePurge (outside mLock's scope).
    860  //
    861  // None:     Nothing to do.
    862  // PurgeNow: Immediate synchronous purge.
    863  // Queue:    Add a new purge request.
    864  //
    865  // Note that in the case of deferred purge this function takes into account
    866  // mIsDeferredPurgeNeeded to avoid useless operations on the purge list
    867  // that would require gArenas.mPurgeListLock.
    868  inline purge_action_t ShouldStartPurge() MOZ_REQUIRES(mLock);
    869 
    870  // Take action according to ShouldStartPurge.
    871  inline void MayDoOrQueuePurge(purge_action_t aAction, const char* aCaller)
    872      MOZ_EXCLUDES(mLock);
    873 
    874  // Check the EffectiveHalfMaxDirty threshold to decide if we continue purge.
    875  // This threshold is lower than ShouldStartPurge to have some hysteresis.
    876  bool ShouldContinuePurge(PurgeCondition aCond) MOZ_REQUIRES(mLock) {
    877    return (mNumDirty > ((aCond == PurgeUnconditional) ? 0 : mMaxDirty >> 1));
    878  }
    879 
    880  // Update the last significant reuse timestamp.
    881  void NotifySignificantReuse() MOZ_EXCLUDES(mLock);
    882 
    883  bool IsMainThreadOnly() const { return !mLock.LockIsEnabled(); }
    884 
    885  void* operator new(size_t aCount) = delete;
    886 
    887  void* operator new(size_t aCount, const fallible_t&) noexcept;
    888 
    889  void operator delete(void*);
    890 };
    891 
    892 namespace mozilla {
    893 
    894 template <>
    895 struct GetDoublyLinkedListElement<arena_t> {
    896  static DoublyLinkedListElement<arena_t>& Get(arena_t* aThis) {
    897    return aThis->mPurgeListElem;
    898  }
    899  static const DoublyLinkedListElement<arena_t>& Get(const arena_t* aThis) {
    900    return aThis->mPurgeListElem;
    901  }
    902 };
    903 
    904 }  // namespace mozilla
    905 
    906 struct ArenaTreeTrait {
    907  static RedBlackTreeNode<arena_t>& GetTreeNode(arena_t* aThis) {
    908    return aThis->mLink;
    909  }
    910 
    911  static inline Order Compare(arena_t* aNode, arena_t* aOther) {
    912    MOZ_ASSERT(aNode);
    913    MOZ_ASSERT(aOther);
    914    return CompareInt(aNode->mId, aOther->mId);
    915  }
    916 };
    917 
    918 // Bookkeeping for all the arenas used by the allocator.
    919 // Arenas are separated in two categories:
    920 // - "private" arenas, used through the moz_arena_* API
    921 // - all the other arenas: the default arena, and thread-local arenas,
    922 //   used by the standard API.
    923 class ArenaCollection {
    924 public:
    925  constexpr ArenaCollection() {}
    926 
    927  bool Init() MOZ_REQUIRES(gInitLock) MOZ_EXCLUDES(mLock) {
    928    arena_params_t params;
    929    // The main arena allows more dirty pages than the default for other arenas.
    930    params.mMaxDirty = opt_dirty_max;
    931    params.mLabel = "Default";
    932    mDefaultArena =
    933        mLock.Init() ? CreateArena(/* aIsPrivate = */ false, &params) : nullptr;
    934    mPurgeListLock.Init();
    935    mIsDeferredPurgeEnabled = false;
    936    return bool(mDefaultArena);
    937  }
    938 
    939  // The requested arena must exist.
    940  inline arena_t* GetById(arena_id_t aArenaId, bool aIsPrivate)
    941      MOZ_EXCLUDES(mLock);
    942 
    943  arena_t* CreateArena(bool aIsPrivate, arena_params_t* aParams)
    944      MOZ_EXCLUDES(mLock);
    945 
    946  void DisposeArena(arena_t* aArena) MOZ_EXCLUDES(mLock) {
    947    // This will not call MayPurge but only unlink the element in case.
    948    // It returns true if we successfully removed the item from the list,
    949    // meaning we have exclusive access to it and can delete it.
    950    bool delete_now = RemoveFromOutstandingPurges(aArena);
    951 
    952    {
    953      MutexAutoLock lock(mLock);
    954      Tree& tree =
    955 #ifndef NON_RANDOM_ARENA_IDS
    956          aArena->IsMainThreadOnly() ? mMainThreadArenas :
    957 #endif
    958                                     mPrivateArenas;
    959 
    960      MOZ_RELEASE_ASSERT(tree.Search(aArena), "Arena not in tree");
    961      tree.Remove(aArena);
    962      mNumOperationsDisposedArenas += aArena->Operations();
    963    }
    964    {
    965      MutexAutoLock lock(aArena->mLock);
    966      if (!aArena->mIsPurgePending) {
    967        // If no purge was pending then we have exclusive access to the
    968        // arena and must delete it.
    969        delete_now = true;
    970      } else if (!delete_now) {
    971        // The remaining possibility, when we failed to remove the arena from
    972        // the list (because a purging thread alredy did so) then that thread
    973        // will be the last thread holding the arena and is now responsible for
    974        // deleting it.
    975        aArena->mMustDeleteAfterPurge = true;
    976 
    977        // Not that it's not possible to have checked the list of pending purges
    978        // BEFORE the arena was added to the list because that would mean that
    979        // an operation on the arena (free or realloc) was running concurrently
    980        // with deletion, which would be a memory error and the assertions in
    981        // the destructor help check for that.
    982      }
    983    }
    984 
    985    if (delete_now) {
    986      delete aArena;
    987    }
    988  }
    989 
    990  void SetDefaultMaxDirtyPageModifier(int32_t aModifier) {
    991    {
    992      MutexAutoLock lock(mLock);
    993      mDefaultMaxDirtyPageModifier = aModifier;
    994      for (auto* arena : iter()) {
    995        // We can only update max-dirty for main-thread-only arenas from the
    996        // main thread.
    997        if (!arena->IsMainThreadOnly() || IsOnMainThreadWeak()) {
    998          arena->UpdateMaxDirty();
    999        }
   1000      }
   1001    }
   1002  }
   1003 
   1004  int32_t DefaultMaxDirtyPageModifier() { return mDefaultMaxDirtyPageModifier; }
   1005 
   1006  using Tree = RedBlackTree<arena_t, ArenaTreeTrait>;
   1007 
   1008  class Iterator {
   1009   public:
   1010    explicit Iterator(Tree* aTree, Tree* aSecondTree,
   1011                      Tree* aThirdTree = nullptr)
   1012        : mFirstIterator(aTree),
   1013          mSecondTree(aSecondTree),
   1014          mThirdTree(aThirdTree) {}
   1015 
   1016    class Item {
   1017     private:
   1018      Iterator& mIter;
   1019      arena_t* mArena;
   1020 
   1021     public:
   1022      Item(Iterator& aIter, arena_t* aArena) : mIter(aIter), mArena(aArena) {}
   1023 
   1024      bool operator!=(const Item& aOther) const {
   1025        return mArena != aOther.mArena;
   1026      }
   1027 
   1028      arena_t* operator*() const { return mArena; }
   1029 
   1030      const Item& operator++() {
   1031        mArena = mIter.Next();
   1032        return *this;
   1033      }
   1034    };
   1035 
   1036    Item begin() {
   1037      // If the first tree is empty calling Next() would access memory out of
   1038      // bounds, so advance to the next non-empty tree (or last empty tree).
   1039      MaybeNextTree();
   1040      return Item(*this, mFirstIterator.Current());
   1041    }
   1042 
   1043    Item end() { return Item(*this, nullptr); }
   1044 
   1045   private:
   1046    Tree::Iterator mFirstIterator;
   1047    Tree* mSecondTree;
   1048    Tree* mThirdTree;
   1049 
   1050    void MaybeNextTree() {
   1051      while (!mFirstIterator.NotDone() && mSecondTree) {
   1052        mFirstIterator = mSecondTree->iter();
   1053        mSecondTree = mThirdTree;
   1054        mThirdTree = nullptr;
   1055      }
   1056    }
   1057 
   1058    arena_t* Next() {
   1059      arena_t* arena = mFirstIterator.Next();
   1060      if (arena) {
   1061        return arena;
   1062      }
   1063 
   1064      // Advance to the next tree if we can, if there's no next tree, or the
   1065      // next tree is empty then Current() will return nullptr.
   1066      MaybeNextTree();
   1067      return mFirstIterator.Current();
   1068    }
   1069 
   1070    friend Item;
   1071  };
   1072 
   1073  Iterator iter() MOZ_REQUIRES(mLock) {
   1074 #ifdef NON_RANDOM_ARENA_IDS
   1075    return Iterator(&mArenas, &mPrivateArenas);
   1076 #else
   1077    return Iterator(&mArenas, &mPrivateArenas, &mMainThreadArenas);
   1078 #endif
   1079  }
   1080 
   1081  inline arena_t* GetDefault() { return mDefaultArena; }
   1082 
   1083  // Guards the collection of arenas. Must not be acquired while holding
   1084  // a single arena's lock or mPurgeListLock.
   1085  Mutex mLock MOZ_UNANNOTATED;
   1086 
   1087  // Guards only the list of outstanding purge requests. Can be acquired
   1088  // while holding gArenas.mLock, but must not be acquired or held while
   1089  // holding or acquiring a single arena's lock.
   1090  Mutex mPurgeListLock;
   1091 
   1092  // We're running on the main thread which is set by a call to SetMainThread().
   1093  bool IsOnMainThread() const {
   1094    return mMainThreadId.isSome() &&
   1095           ThreadIdEqual(mMainThreadId.value(), GetThreadId());
   1096  }
   1097 
   1098  // We're running on the main thread or SetMainThread() has never been called.
   1099  bool IsOnMainThreadWeak() const {
   1100    return mMainThreadId.isNothing() || IsOnMainThread();
   1101  }
   1102 
   1103  // After a fork set the new thread ID in the child.
   1104  // This is done as the first thing after a fork, before mLock even re-inits.
   1105  void ResetMainThread() MOZ_EXCLUDES(mLock) {
   1106    // The post fork handler in the child can run from a MacOS worker thread,
   1107    // so we can't set our main thread to it here.  Instead we have to clear it.
   1108    mMainThreadId = Nothing();
   1109  }
   1110 
   1111  void SetMainThread() MOZ_EXCLUDES(mLock) {
   1112    MutexAutoLock lock(mLock);
   1113    MOZ_ASSERT(mMainThreadId.isNothing());
   1114    mMainThreadId = Some(GetThreadId());
   1115  }
   1116 
   1117  // This requires the lock to get a consistent count across all the active
   1118  // + disposed arenas.
   1119  uint64_t OperationsDisposedArenas() MOZ_REQUIRES(mLock) {
   1120    return mNumOperationsDisposedArenas;
   1121  }
   1122 
   1123  // Enable or disable the lazy purge.
   1124  //
   1125  // Returns the former state of enablement.
   1126  // This is a global setting for all arenas. Changing it may cause an
   1127  // immediate purge for all arenas.
   1128  bool SetDeferredPurge(bool aEnable) {
   1129    MOZ_ASSERT(IsOnMainThreadWeak());
   1130 
   1131    bool ret = mIsDeferredPurgeEnabled;
   1132    {
   1133      MutexAutoLock lock(mLock);
   1134      mIsDeferredPurgeEnabled = aEnable;
   1135      for (auto* arena : iter()) {
   1136        MaybeMutexAutoLock lock(arena->mLock);
   1137        arena->mIsDeferredPurgeEnabled = aEnable;
   1138      }
   1139    }
   1140    if (ret != aEnable) {
   1141      MayPurgeAll(PurgeIfThreshold, __func__);
   1142    }
   1143    return ret;
   1144  }
   1145 
   1146  bool IsDeferredPurgeEnabled() { return mIsDeferredPurgeEnabled; }
   1147 
   1148  // Set aside a new purge request for aArena.
   1149  void AddToOutstandingPurges(arena_t* aArena) MOZ_EXCLUDES(mPurgeListLock);
   1150 
   1151  // Remove an unhandled purge request for aArena.  Returns true if the arena
   1152  // was in the list.
   1153  bool RemoveFromOutstandingPurges(arena_t* aArena)
   1154      MOZ_EXCLUDES(mPurgeListLock);
   1155 
   1156  // Execute all outstanding purge requests, if any.
   1157  void MayPurgeAll(PurgeCondition aCond, const char* aCaller);
   1158 
   1159  // Purge some dirty memory, based on purge requests, returns true if there are
   1160  // more to process.
   1161  //
   1162  // Returns a may_purge_now_result_t with the following meaning:
   1163  // Done:       Purge has completed for all arenas.
   1164  // NeedsMore:  There may be some arenas that needs to be purged now.
   1165  // WantsLater: There is at least one arena that might want a purge later,
   1166  //             according to aReuseGraceMS.
   1167  //
   1168  // Parameters:
   1169  // aPeekOnly:     If true, check only if there is work to do without doing it.
   1170  // aReuseGraceMS: The time to wait with purge after a
   1171  //                significant re-use happened for an arena.
   1172  // aKeepGoing:    If this returns false purging will cease.
   1173  //
   1174  // This could exit for 3 different reasons.
   1175  //  - There are no more requests (it returns false)
   1176  //  - There are more requests but aKeepGoing() returned false. (returns true)
   1177  //  - One arena is completely purged, (returns true).
   1178  //
   1179  may_purge_now_result_t MayPurgeSteps(
   1180      bool aPeekOnly, uint32_t aReuseGraceMS,
   1181      const Maybe<std::function<bool()>>& aKeepGoing);
   1182 
   1183 private:
   1184  const static arena_id_t MAIN_THREAD_ARENA_BIT = 0x1;
   1185 
   1186 #ifndef NON_RANDOM_ARENA_IDS
   1187  // Can be called with or without lock, depending on aTree.
   1188  inline arena_t* GetByIdInternal(Tree& aTree, arena_id_t aArenaId);
   1189 
   1190  arena_id_t MakeRandArenaId(bool aIsMainThreadOnly) const MOZ_REQUIRES(mLock);
   1191 #endif
   1192  static bool ArenaIdIsMainThreadOnly(arena_id_t aArenaId) {
   1193    return aArenaId & MAIN_THREAD_ARENA_BIT;
   1194  }
   1195 
   1196  arena_t* mDefaultArena = nullptr;
   1197  arena_id_t mLastPublicArenaId MOZ_GUARDED_BY(mLock) = 0;
   1198 
   1199  // Accessing mArenas and mPrivateArenas can only be done while holding mLock.
   1200  Tree mArenas MOZ_GUARDED_BY(mLock);
   1201  Tree mPrivateArenas MOZ_GUARDED_BY(mLock);
   1202 
   1203 #ifdef NON_RANDOM_ARENA_IDS
   1204  // Arena ids are pseudo-obfuscated/deobfuscated based on these values randomly
   1205  // initialized on first use.
   1206  arena_id_t mArenaIdKey = 0;
   1207  int8_t mArenaIdRotation = 0;
   1208 #else
   1209  // Some mMainThreadArenas accesses to mMainThreadArenas can (and should) elude
   1210  // the lock, see GetById().
   1211  Tree mMainThreadArenas MOZ_GUARDED_BY(mLock);
   1212 #endif
   1213 
   1214  // Set only rarely and then propagated on the same thread to all arenas via
   1215  // UpdateMaxDirty(). But also read in ExtraCommitPages on arbitrary threads.
   1216  // TODO: Could ExtraCommitPages use arena_t::mMaxDirty instead ?
   1217  Atomic<int32_t> mDefaultMaxDirtyPageModifier;
   1218  // This is never changed except for forking, and it does not need mLock.
   1219  Maybe<ThreadId> mMainThreadId;
   1220 
   1221  // The number of operations that happened in arenas that have since been
   1222  // destroyed.
   1223  uint64_t mNumOperationsDisposedArenas = 0;
   1224 
   1225  // Linked list of outstanding purges. This list has no particular order.
   1226  // It is ok for an arena to be in this list even if mIsPurgePending is false,
   1227  // it will just cause an extra round of a (most likely no-op) purge.
   1228  // It is not ok to not be in this list but have mIsPurgePending set to true,
   1229  // as this would prevent any future purges for this arena (except for during
   1230  // MayPurgeStep or Purge).
   1231  DoublyLinkedList<arena_t> mOutstandingPurges MOZ_GUARDED_BY(mPurgeListLock);
   1232  // Flag if we should defer purge to later. Only ever set when holding the
   1233  // collection lock. Read only during arena_t ctor.
   1234  Atomic<bool> mIsDeferredPurgeEnabled;
   1235 };
   1236 
   1237 constinit static ArenaCollection gArenas;
   1238 
   1239 // Protects huge allocation-related data structures.
   1240 static Mutex huge_mtx;
   1241 
   1242 // Tree of chunks that are stand-alone huge allocations.
   1243 static RedBlackTree<extent_node_t, ExtentTreeTrait> huge
   1244    MOZ_GUARDED_BY(huge_mtx);
   1245 
   1246 // Huge allocation statistics.
   1247 static size_t huge_allocated MOZ_GUARDED_BY(huge_mtx);
   1248 static size_t huge_mapped MOZ_GUARDED_BY(huge_mtx);
   1249 static uint64_t huge_operations MOZ_GUARDED_BY(huge_mtx);
   1250 
   1251 // ******
   1252 // Arenas.
   1253 
   1254 // The arena associated with the current thread (per
   1255 // jemalloc_thread_local_arena) On OSX, __thread/thread_local circles back
   1256 // calling malloc to allocate storage on first access on each thread, which
   1257 // leads to an infinite loop, but pthread-based TLS somehow doesn't have this
   1258 // problem.
   1259 #if !defined(XP_DARWIN)
   1260 static MOZ_THREAD_LOCAL(arena_t*) thread_arena;
   1261 #else
   1262 static detail::ThreadLocal<arena_t*, detail::ThreadLocalKeyStorage>
   1263    thread_arena;
   1264 #endif
   1265 
   1266 // ***************************************************************************
   1267 // Begin forward declarations.
   1268 
   1269 static void huge_dalloc(void* aPtr, arena_t* aArena);
   1270 static bool malloc_init_hard();
   1271 
   1272 #ifndef XP_WIN
   1273 #  ifdef XP_DARWIN
   1274 #    define FORK_HOOK extern "C"
   1275 #  else
   1276 #    define FORK_HOOK static
   1277 #  endif
   1278 FORK_HOOK void _malloc_prefork(void);
   1279 FORK_HOOK void _malloc_postfork_parent(void);
   1280 FORK_HOOK void _malloc_postfork_child(void);
   1281 #  ifdef XP_DARWIN
   1282 FORK_HOOK void _malloc_postfork(void);
   1283 #  endif
   1284 #endif
   1285 
   1286 // End forward declarations.
   1287 // ***************************************************************************
   1288 
   1289 // FreeBSD's pthreads implementation calls malloc(3), so the malloc
   1290 // implementation has to take pains to avoid infinite recursion during
   1291 // initialization.
   1292 // Returns whether the allocator was successfully initialized.
   1293 static inline bool malloc_init() {
   1294  if (!malloc_initialized) {
   1295    return malloc_init_hard();
   1296  }
   1297  return true;
   1298 }
   1299 
   1300 #ifdef ANDROID
   1301 // Android's pthread.h does not declare pthread_atfork() until SDK 21.
   1302 extern "C" MOZ_EXPORT int pthread_atfork(void (*)(void), void (*)(void),
   1303                                         void (*)(void));
   1304 #endif
   1305 
   1306 // ***************************************************************************
   1307 // Begin Utility functions/macros.
   1308 
   1309 #ifdef MOZJEMALLOC_PROFILING_CALLBACKS
   1310 namespace mozilla {
   1311 
   1312 void jemalloc_set_profiler_callbacks(
   1313    RefPtr<MallocProfilerCallbacks>&& aCallbacks) {
   1314  sCallbacks = aCallbacks;
   1315 }
   1316 
   1317 }  // namespace mozilla
   1318 #endif
   1319 
   1320 template <>
   1321 arena_t* TypedBaseAlloc<arena_t>::sFirstFree = nullptr;
   1322 
   1323 template <>
   1324 size_t TypedBaseAlloc<arena_t>::size_of() {
   1325  // Allocate enough space for trailing bins.
   1326  return sizeof(arena_t) + (sizeof(arena_bin_t) * NUM_SMALL_CLASSES);
   1327 }
   1328 
   1329 // End Utility functions/macros.
   1330 // ***************************************************************************
   1331 // Begin arena.
   1332 
   1333 static inline arena_t* thread_local_arena(bool enabled) {
   1334  arena_t* arena;
   1335 
   1336  if (enabled) {
   1337    // The arena will essentially be leaked if this function is
   1338    // called with `false`, but it doesn't matter at the moment.
   1339    // because in practice nothing actually calls this function
   1340    // with `false`, except maybe at shutdown.
   1341    arena_params_t params;
   1342    params.mLabel = "Thread local";
   1343    arena = gArenas.CreateArena(/* aIsPrivate = */ false, &params);
   1344  } else {
   1345    arena = gArenas.GetDefault();
   1346  }
   1347  thread_arena.set(arena);
   1348  return arena;
   1349 }
   1350 
   1351 inline void MozJemalloc::jemalloc_thread_local_arena(bool aEnabled) {
   1352  if (malloc_init()) {
   1353    thread_local_arena(aEnabled);
   1354  }
   1355 }
   1356 
   1357 // Choose an arena based on a per-thread value.
   1358 static inline arena_t* choose_arena(size_t size) {
   1359  arena_t* ret = nullptr;
   1360 
   1361  // We can only use TLS if this is a PIC library, since for the static
   1362  // library version, libc's malloc is used by TLS allocation, which
   1363  // introduces a bootstrapping issue.
   1364 
   1365  if (size > kMaxQuantumClass) {
   1366    // Force the default arena for larger allocations.
   1367    ret = gArenas.GetDefault();
   1368  } else {
   1369    // Check TLS to see if our thread has requested a pinned arena.
   1370    ret = thread_arena.get();
   1371    // If ret is non-null, it must not be in the first page.
   1372    MOZ_DIAGNOSTIC_ASSERT_IF(ret, (size_t)ret >= gPageSize);
   1373    if (!ret) {
   1374      // Nothing in TLS. Pin this thread to the default arena.
   1375      ret = thread_local_arena(false);
   1376    }
   1377  }
   1378 
   1379  MOZ_DIAGNOSTIC_ASSERT(ret);
   1380  return ret;
   1381 }
   1382 
   1383 inline uint8_t arena_t::FindFreeBitInMask(uint32_t aMask, uint32_t& aRng) {
   1384  if (mPRNG != nullptr) {
   1385    if (aRng == UINT_MAX) {
   1386      aRng = mPRNG->next() % 32;
   1387    }
   1388    uint8_t bitIndex;
   1389    // RotateRight asserts when provided bad input.
   1390    aMask = aRng ? RotateRight(aMask, aRng)
   1391                 : aMask;  // Rotate the mask a random number of slots
   1392    bitIndex = CountTrailingZeroes32(aMask);
   1393    return (bitIndex + aRng) % 32;
   1394  }
   1395  return CountTrailingZeroes32(aMask);
   1396 }
   1397 
   1398 inline void* arena_t::ArenaRunRegAlloc(arena_run_t* aRun, arena_bin_t* aBin) {
   1399  void* ret;
   1400  unsigned i, mask, bit, regind;
   1401  uint32_t rndPos = UINT_MAX;
   1402 
   1403  MOZ_DIAGNOSTIC_ASSERT(aRun->mMagic == ARENA_RUN_MAGIC);
   1404  MOZ_ASSERT(aRun->mRegionsMinElement < aBin->mRunNumRegionsMask);
   1405 
   1406  // Move the first check outside the loop, so that aRun->mRegionsMinElement can
   1407  // be updated unconditionally, without the possibility of updating it
   1408  // multiple times.
   1409  i = aRun->mRegionsMinElement;
   1410  mask = aRun->mRegionsMask[i];
   1411  if (mask != 0) {
   1412    bit = FindFreeBitInMask(mask, rndPos);
   1413 
   1414    regind = ((i << (LOG2(sizeof(int)) + 3)) + bit);
   1415    MOZ_ASSERT(regind < aBin->mRunNumRegions);
   1416    ret = (void*)(((uintptr_t)aRun) + aBin->mRunFirstRegionOffset +
   1417                  (aBin->mSizeClass * regind));
   1418 
   1419    // Clear bit.
   1420    mask ^= (1U << bit);
   1421    aRun->mRegionsMask[i] = mask;
   1422 
   1423    return ret;
   1424  }
   1425 
   1426  for (i++; i < aBin->mRunNumRegionsMask; i++) {
   1427    mask = aRun->mRegionsMask[i];
   1428    if (mask != 0) {
   1429      bit = FindFreeBitInMask(mask, rndPos);
   1430 
   1431      regind = ((i << (LOG2(sizeof(int)) + 3)) + bit);
   1432      MOZ_ASSERT(regind < aBin->mRunNumRegions);
   1433      ret = (void*)(((uintptr_t)aRun) + aBin->mRunFirstRegionOffset +
   1434                    (aBin->mSizeClass * regind));
   1435 
   1436      // Clear bit.
   1437      mask ^= (1U << bit);
   1438      aRun->mRegionsMask[i] = mask;
   1439 
   1440      // Make a note that nothing before this element
   1441      // contains a free region.
   1442      aRun->mRegionsMinElement = i;  // Low payoff: + (mask == 0);
   1443 
   1444      return ret;
   1445    }
   1446  }
   1447  // Not reached.
   1448  MOZ_DIAGNOSTIC_ASSERT(0);
   1449  return nullptr;
   1450 }
   1451 
   1452 static inline void arena_run_reg_dalloc(arena_run_t* run, arena_bin_t* bin,
   1453                                        void* ptr, size_t size) {
   1454  uint32_t diff, regind;
   1455  unsigned elm, bit;
   1456 
   1457  MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
   1458 
   1459  // Avoid doing division with a variable divisor if possible.  Using
   1460  // actual division here can reduce allocator throughput by over 20%!
   1461  diff =
   1462      (uint32_t)((uintptr_t)ptr - (uintptr_t)run - bin->mRunFirstRegionOffset);
   1463 
   1464  MOZ_ASSERT(diff <=
   1465             (static_cast<unsigned>(bin->mRunSizePages) << gPageSize2Pow));
   1466  regind = diff / bin->mSizeDivisor;
   1467 
   1468  MOZ_DIAGNOSTIC_ASSERT(diff == regind * size);
   1469  MOZ_DIAGNOSTIC_ASSERT(regind < bin->mRunNumRegions);
   1470 
   1471  elm = regind >> (LOG2(sizeof(int)) + 3);
   1472  if (elm < run->mRegionsMinElement) {
   1473    run->mRegionsMinElement = elm;
   1474  }
   1475  bit = regind - (elm << (LOG2(sizeof(int)) + 3));
   1476  MOZ_RELEASE_ASSERT((run->mRegionsMask[elm] & (1U << bit)) == 0,
   1477                     "Double-free?");
   1478  run->mRegionsMask[elm] |= (1U << bit);
   1479 }
   1480 
   1481 bool arena_t::SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge,
   1482                       bool aZero) {
   1483  arena_chunk_t* chunk = GetChunkForPtr(aRun);
   1484  size_t old_ndirty = chunk->mNumDirty;
   1485  size_t run_ind =
   1486      (unsigned)((uintptr_t(aRun) - uintptr_t(chunk)) >> gPageSize2Pow);
   1487  size_t total_pages =
   1488      (chunk->mPageMap[run_ind].bits & ~gPageSizeMask) >> gPageSize2Pow;
   1489  size_t need_pages = (aSize >> gPageSize2Pow);
   1490  MOZ_ASSERT(need_pages > 0);
   1491  MOZ_ASSERT(need_pages <= total_pages);
   1492  size_t rem_pages = total_pages - need_pages;
   1493 
   1494  MOZ_ASSERT((chunk->mPageMap[run_ind].bits & CHUNK_MAP_BUSY) == 0);
   1495 
   1496 #ifdef MALLOC_DECOMMIT
   1497  size_t i = 0;
   1498  while (i < need_pages) {
   1499    MOZ_ASSERT((chunk->mPageMap[run_ind + i].bits & CHUNK_MAP_BUSY) == 0);
   1500 
   1501    // Commit decommitted pages if necessary.  If a decommitted
   1502    // page is encountered, commit all needed adjacent decommitted
   1503    // pages in one operation, in order to reduce system call
   1504    // overhead.
   1505    if (chunk->mPageMap[run_ind + i].bits & CHUNK_MAP_DECOMMITTED) {
   1506      // The start of the decommitted area is on a real page boundary.
   1507      MOZ_ASSERT((run_ind + i) % gPagesPerRealPage == 0);
   1508 
   1509      // Advance i+j to just past the index of the last page
   1510      // to commit.  Clear CHUNK_MAP_DECOMMITTED along the way.
   1511      size_t j;
   1512      for (j = 0; i + j < need_pages && (chunk->mPageMap[run_ind + i + j].bits &
   1513                                         CHUNK_MAP_DECOMMITTED);
   1514           j++) {
   1515        // DECOMMITTED, MADVISED and FRESH are mutually exclusive.
   1516        MOZ_ASSERT((chunk->mPageMap[run_ind + i + j].bits &
   1517                    (CHUNK_MAP_FRESH | CHUNK_MAP_MADVISED)) == 0);
   1518      }
   1519 
   1520      // Consider committing more pages to amortise calls to VirtualAlloc.
   1521      // This only makes sense at the edge of our run hence the if condition
   1522      // here.
   1523      if (i + j == need_pages) {
   1524        size_t extra_commit = ExtraCommitPages(j, rem_pages);
   1525        extra_commit =
   1526            PAGES_PER_REAL_PAGE_CEILING(run_ind + i + j + extra_commit) -
   1527            run_ind - i - j;
   1528        for (; i + j < need_pages + extra_commit &&
   1529               (chunk->mPageMap[run_ind + i + j].bits &
   1530                CHUNK_MAP_MADVISED_OR_DECOMMITTED);
   1531             j++) {
   1532          MOZ_ASSERT((chunk->mPageMap[run_ind + i + j].bits &
   1533                      (CHUNK_MAP_FRESH | CHUNK_MAP_MADVISED)) == 0);
   1534        }
   1535      }
   1536      // The end of the decommitted area is on a real page boundary.
   1537      MOZ_ASSERT((run_ind + i + j) % gPagesPerRealPage == 0);
   1538 
   1539      if (!pages_commit(
   1540              (void*)(uintptr_t(chunk) + ((run_ind + i) << gPageSize2Pow)),
   1541              j << gPageSize2Pow)) {
   1542        return false;
   1543      }
   1544 
   1545      // pages_commit zeroes pages, so mark them as such if it succeeded.
   1546      // That's checked further below to avoid manually zeroing the pages.
   1547      for (size_t k = 0; k < j; k++) {
   1548        chunk->mPageMap[run_ind + i + k].bits =
   1549            (chunk->mPageMap[run_ind + i + k].bits & ~CHUNK_MAP_DECOMMITTED) |
   1550            CHUNK_MAP_ZEROED | CHUNK_MAP_FRESH;
   1551      }
   1552 
   1553      mNumFresh += j;
   1554      i += j;
   1555    } else {
   1556      i++;
   1557    }
   1558  }
   1559 #endif
   1560 
   1561  mRunsAvail.Remove(&chunk->mPageMap[run_ind]);
   1562 
   1563  // Keep track of trailing unused pages for later use.
   1564  if (rem_pages > 0) {
   1565    chunk->mPageMap[run_ind + need_pages].bits =
   1566        (rem_pages << gPageSize2Pow) |
   1567        (chunk->mPageMap[run_ind + need_pages].bits & gPageSizeMask);
   1568    chunk->mPageMap[run_ind + total_pages - 1].bits =
   1569        (rem_pages << gPageSize2Pow) |
   1570        (chunk->mPageMap[run_ind + total_pages - 1].bits & gPageSizeMask);
   1571    mRunsAvail.Insert(&chunk->mPageMap[run_ind + need_pages]);
   1572  }
   1573 
   1574  if (chunk->mDirtyRunHint == run_ind) {
   1575    chunk->mDirtyRunHint = run_ind + need_pages;
   1576  }
   1577 
   1578  for (size_t i = 0; i < need_pages; i++) {
   1579    // Zero if necessary.
   1580    if (aZero) {
   1581      if ((chunk->mPageMap[run_ind + i].bits & CHUNK_MAP_ZEROED) == 0) {
   1582        memset((void*)(uintptr_t(chunk) + ((run_ind + i) << gPageSize2Pow)), 0,
   1583               gPageSize);
   1584        // CHUNK_MAP_ZEROED is cleared below.
   1585      }
   1586    }
   1587 
   1588    // Update dirty page accounting.
   1589    if (chunk->mPageMap[run_ind + i].bits & CHUNK_MAP_DIRTY) {
   1590      chunk->mNumDirty--;
   1591      mNumDirty--;
   1592      // CHUNK_MAP_DIRTY is cleared below.
   1593    } else if (chunk->mPageMap[run_ind + i].bits & CHUNK_MAP_MADVISED) {
   1594      mStats.committed++;
   1595      mNumMAdvised--;
   1596    } else if (chunk->mPageMap[run_ind + i].bits & CHUNK_MAP_FRESH) {
   1597      mStats.committed++;
   1598      mNumFresh--;
   1599    }
   1600 
   1601    // This bit has already been cleared
   1602    MOZ_ASSERT(!(chunk->mPageMap[run_ind + i].bits & CHUNK_MAP_DECOMMITTED));
   1603 
   1604    // Initialize the chunk map.  This clears the dirty, zeroed and madvised
   1605    // bits, decommitted is cleared above.
   1606    if (aLarge) {
   1607      chunk->mPageMap[run_ind + i].bits = CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
   1608    } else {
   1609      chunk->mPageMap[run_ind + i].bits = size_t(aRun) | CHUNK_MAP_ALLOCATED;
   1610    }
   1611  }
   1612 
   1613  // Set the run size only in the first element for large runs.  This is
   1614  // primarily a debugging aid, since the lack of size info for trailing
   1615  // pages only matters if the application tries to operate on an
   1616  // interior pointer.
   1617  if (aLarge) {
   1618    chunk->mPageMap[run_ind].bits |= aSize;
   1619  }
   1620 
   1621  if (chunk->mNumDirty == 0 && old_ndirty > 0 && !chunk->mIsPurging &&
   1622      mChunksDirty.ElementProbablyInList(chunk)) {
   1623    mChunksDirty.remove(chunk);
   1624  }
   1625  return true;
   1626 }
   1627 
   1628 void arena_t::InitChunk(arena_chunk_t* aChunk, size_t aMinCommittedPages) {
   1629  new (aChunk) arena_chunk_t(this);
   1630 
   1631  mStats.mapped += kChunkSize;
   1632 
   1633  // Setup the chunk's pages in two phases.  First we mark which pages are
   1634  // committed & decommitted and perform the decommit.  Then we update the map
   1635  // to create the runs.
   1636 
   1637  // Clear the bits for the real header pages.
   1638  size_t i;
   1639  for (i = 0; i < gChunkHeaderNumPages - gPagesPerRealPage; i++) {
   1640    aChunk->mPageMap[i].bits = 0;
   1641  }
   1642  mStats.committed += gChunkHeaderNumPages - gPagesPerRealPage;
   1643 
   1644  // Decommit the last header page (=leading page) as a guard.
   1645  MOZ_ASSERT(i % gPagesPerRealPage == 0);
   1646  pages_decommit((void*)(uintptr_t(aChunk) + (i << gPageSize2Pow)),
   1647                 gRealPageSize);
   1648  for (; i < gChunkHeaderNumPages; i++) {
   1649    aChunk->mPageMap[i].bits = CHUNK_MAP_DECOMMITTED;
   1650  }
   1651 
   1652  // If MALLOC_DECOMMIT is enabled then commit only the pages we're about to
   1653  // use.  Otherwise commit all of them.
   1654 #ifdef MALLOC_DECOMMIT
   1655  // The number of usable pages in the chunk, in other words, the total number
   1656  // of pages in the chunk, minus the number of pages in the chunk header
   1657  // (including the guard page at the beginning of the chunk), and the number of
   1658  // pages for the guard page at the end of the chunk.
   1659  size_t chunk_usable_pages =
   1660      gChunkNumPages - gChunkHeaderNumPages - gPagesPerRealPage;
   1661  size_t n_fresh_pages = PAGES_PER_REAL_PAGE_CEILING(
   1662      aMinCommittedPages +
   1663      ExtraCommitPages(aMinCommittedPages,
   1664                       chunk_usable_pages - aMinCommittedPages));
   1665 #else
   1666  size_t n_fresh_pages =
   1667      gChunkNumPages - gPagesPerRealPage - gChunkHeaderNumPages;
   1668 #endif
   1669 
   1670  // The committed pages are marked as Fresh.  Our caller, SplitRun will update
   1671  // this when it uses them.
   1672  for (size_t j = 0; j < n_fresh_pages; j++) {
   1673    aChunk->mPageMap[i + j].bits = CHUNK_MAP_ZEROED | CHUNK_MAP_FRESH;
   1674  }
   1675  i += n_fresh_pages;
   1676  mNumFresh += n_fresh_pages;
   1677 
   1678 #ifndef MALLOC_DECOMMIT
   1679  // If MALLOC_DECOMMIT isn't defined then all the pages are fresh and setup in
   1680  // the loop above.
   1681  MOZ_ASSERT(i == gChunkNumPages - gPagesPerRealPage);
   1682 #endif
   1683 
   1684  // If MALLOC_DECOMMIT is defined, then this will decommit the remainder of the
   1685  // chunk plus the last page which is a guard page, if it is not defined it
   1686  // will only decommit the guard page.
   1687  MOZ_ASSERT(i % gPagesPerRealPage == 0);
   1688  pages_decommit((void*)(uintptr_t(aChunk) + (i << gPageSize2Pow)),
   1689                 (gChunkNumPages - i) << gPageSize2Pow);
   1690  for (; i < gChunkNumPages; i++) {
   1691    aChunk->mPageMap[i].bits = CHUNK_MAP_DECOMMITTED;
   1692  }
   1693 
   1694  // aMinCommittedPages will create a valid run.
   1695  MOZ_ASSERT(aMinCommittedPages > 0);
   1696  MOZ_ASSERT(aMinCommittedPages <=
   1697             gChunkNumPages - gChunkHeaderNumPages - gPagesPerRealPage);
   1698 
   1699  // Create the run.
   1700  aChunk->mPageMap[gChunkHeaderNumPages].bits |= gMaxLargeClass;
   1701  aChunk->mPageMap[gChunkNumPages - gPagesPerRealPage - 1].bits |=
   1702      gMaxLargeClass;
   1703  mRunsAvail.Insert(&aChunk->mPageMap[gChunkHeaderNumPages]);
   1704 }
   1705 
   1706 bool arena_t::RemoveChunk(arena_chunk_t* aChunk) {
   1707  aChunk->mDying = true;
   1708 
   1709  // If the chunk has busy pages that means that a Purge() is in progress.
   1710  // We can't remove the chunk now, instead Purge() will do it.
   1711  if (aChunk->mIsPurging) {
   1712    return false;
   1713  }
   1714 
   1715  if (aChunk->mNumDirty > 0) {
   1716    MOZ_ASSERT(aChunk->mArena == this);
   1717    if (mChunksDirty.ElementProbablyInList(aChunk)) {
   1718      mChunksDirty.remove(aChunk);
   1719    }
   1720    mNumDirty -= aChunk->mNumDirty;
   1721    mStats.committed -= aChunk->mNumDirty;
   1722  }
   1723 
   1724  // Count the number of madvised/fresh pages and update the stats.
   1725  size_t madvised = 0;
   1726  size_t fresh = 0;
   1727  for (size_t i = gChunkHeaderNumPages; i < gChunkNumPages - gPagesPerRealPage;
   1728       i++) {
   1729    // There must not be any pages that are not fresh, madvised, decommitted or
   1730    // dirty.
   1731    MOZ_ASSERT(aChunk->mPageMap[i].bits &
   1732               (CHUNK_MAP_FRESH_MADVISED_OR_DECOMMITTED | CHUNK_MAP_DIRTY));
   1733    MOZ_ASSERT((aChunk->mPageMap[i].bits & CHUNK_MAP_BUSY) == 0);
   1734 
   1735    if (aChunk->mPageMap[i].bits & CHUNK_MAP_MADVISED) {
   1736      madvised++;
   1737    } else if (aChunk->mPageMap[i].bits & CHUNK_MAP_FRESH) {
   1738      fresh++;
   1739    }
   1740  }
   1741 
   1742  mNumMAdvised -= madvised;
   1743  mNumFresh -= fresh;
   1744 
   1745 #ifdef MALLOC_DOUBLE_PURGE
   1746  if (mChunksMAdvised.ElementProbablyInList(aChunk)) {
   1747    mChunksMAdvised.remove(aChunk);
   1748  }
   1749 #endif
   1750 
   1751  mStats.mapped -= kChunkSize;
   1752  mStats.committed -= gChunkHeaderNumPages - gPagesPerRealPage;
   1753 
   1754  return true;
   1755 }
   1756 
   1757 arena_chunk_t* arena_t::DemoteChunkToSpare(arena_chunk_t* aChunk) {
   1758  if (mSpare) {
   1759    if (!RemoveChunk(mSpare)) {
   1760      // If we can't remove the spare chunk now purge will finish removing it
   1761      // later.  Set it to null so that the return below will return null and
   1762      // our caller won't delete the chunk before Purge() is finished.
   1763      mSpare = nullptr;
   1764    }
   1765  }
   1766 
   1767  arena_chunk_t* chunk_dealloc = mSpare;
   1768  mSpare = aChunk;
   1769  return chunk_dealloc;
   1770 }
   1771 
   1772 arena_run_t* arena_t::AllocRun(size_t aSize, bool aLarge, bool aZero) {
   1773  arena_run_t* run;
   1774  arena_chunk_map_t* mapelm;
   1775  arena_chunk_map_t key;
   1776 
   1777  MOZ_ASSERT(aSize <= gMaxLargeClass);
   1778  MOZ_ASSERT((aSize & gPageSizeMask) == 0);
   1779 
   1780  // Search the arena's chunks for the lowest best fit.
   1781  key.bits = aSize | CHUNK_MAP_KEY;
   1782  mapelm = mRunsAvail.SearchOrNext(&key);
   1783  if (mapelm) {
   1784    arena_chunk_t* chunk = GetChunkForPtr(mapelm);
   1785    size_t pageind = (uintptr_t(mapelm) - uintptr_t(chunk->mPageMap)) /
   1786                     sizeof(arena_chunk_map_t);
   1787 
   1788    MOZ_ASSERT((chunk->mPageMap[pageind].bits & CHUNK_MAP_BUSY) == 0);
   1789    run = (arena_run_t*)(uintptr_t(chunk) + (pageind << gPageSize2Pow));
   1790  } else if (mSpare && !mSpare->mIsPurging) {
   1791    // Use the spare.
   1792    arena_chunk_t* chunk = mSpare;
   1793    mSpare = nullptr;
   1794    run = (arena_run_t*)(uintptr_t(chunk) +
   1795                         (gChunkHeaderNumPages << gPageSize2Pow));
   1796    // Insert the run into the tree of available runs.
   1797    MOZ_ASSERT((chunk->mPageMap[gChunkHeaderNumPages].bits & CHUNK_MAP_BUSY) ==
   1798               0);
   1799    mRunsAvail.Insert(&chunk->mPageMap[gChunkHeaderNumPages]);
   1800  } else {
   1801    // No usable runs.  Create a new chunk from which to allocate
   1802    // the run.
   1803    arena_chunk_t* chunk =
   1804        (arena_chunk_t*)chunk_alloc(kChunkSize, kChunkSize, false);
   1805    if (!chunk) {
   1806      return nullptr;
   1807    }
   1808 
   1809    InitChunk(chunk, aSize >> gPageSize2Pow);
   1810    run = (arena_run_t*)(uintptr_t(chunk) +
   1811                         (gChunkHeaderNumPages << gPageSize2Pow));
   1812  }
   1813  // Update page map.
   1814  return SplitRun(run, aSize, aLarge, aZero) ? run : nullptr;
   1815 }
   1816 
   1817 void arena_t::UpdateMaxDirty() {
   1818  MaybeMutexAutoLock lock(mLock);
   1819  int32_t modifier = gArenas.DefaultMaxDirtyPageModifier();
   1820  if (modifier) {
   1821    int32_t arenaOverride =
   1822        modifier > 0 ? mMaxDirtyIncreaseOverride : mMaxDirtyDecreaseOverride;
   1823    if (arenaOverride) {
   1824      modifier = arenaOverride;
   1825    }
   1826  }
   1827 
   1828  mMaxDirty =
   1829      modifier >= 0 ? mMaxDirtyBase << modifier : mMaxDirtyBase >> -modifier;
   1830 }
   1831 
   1832 #ifdef MALLOC_DECOMMIT
   1833 
   1834 size_t arena_t::ExtraCommitPages(size_t aReqPages, size_t aRemainingPages) {
   1835  const int32_t modifier = gArenas.DefaultMaxDirtyPageModifier();
   1836  if (modifier < 0) {
   1837    return 0;
   1838  }
   1839 
   1840  // The maximum size of the page cache
   1841  const size_t max_page_cache = mMaxDirty;
   1842 
   1843  // The current size of the page cache, note that we use mNumFresh +
   1844  // mNumMAdvised here but Purge() does not.
   1845  const size_t page_cache = mNumDirty + mNumFresh + mNumMAdvised;
   1846 
   1847  if (page_cache > max_page_cache) {
   1848    // We're already exceeding our dirty page count even though we're trying
   1849    // to allocate.  This can happen due to fragmentation.  Don't commit
   1850    // excess memory since we're probably here due to a larger allocation and
   1851    // small amounts of memory are certainly available in the page cache.
   1852    return 0;
   1853  }
   1854  if (modifier > 0) {
   1855    // If modifier is > 0 then we want to keep all the pages we can, but don't
   1856    // exceed the size of the page cache.  The subtraction cannot underflow
   1857    // because of the condition above.
   1858    return std::min(aRemainingPages, max_page_cache - page_cache);
   1859  }
   1860 
   1861  // The rest is arbitrary and involves a some assumptions.  I've broken it down
   1862  // into simple expressions to document them more clearly.
   1863 
   1864  // Assumption 1: a quarter of mMaxDirty is a sensible "minimum
   1865  // target" for the dirty page cache.  Likewise 3 quarters is a sensible
   1866  // "maximum target".  Note that for the maximum we avoid using the whole page
   1867  // cache now so that a free that follows this allocation doesn't immeidatly
   1868  // call Purge (churning memory).
   1869  const size_t min = max_page_cache / 4;
   1870  const size_t max = 3 * max_page_cache / 4;
   1871 
   1872  // Assumption 2: Committing 32 pages at a time is sufficient to amortise
   1873  // VirtualAlloc costs.
   1874  size_t amortisation_threshold = 32;
   1875 
   1876  // extra_pages is the number of additional pages needed to meet
   1877  // amortisation_threshold.
   1878  size_t extra_pages = aReqPages < amortisation_threshold
   1879                           ? amortisation_threshold - aReqPages
   1880                           : 0;
   1881 
   1882  // If committing extra_pages isn't enough to hit the minimum target then
   1883  // increase it.
   1884  if (page_cache + extra_pages < min) {
   1885    extra_pages = min - page_cache;
   1886  } else if (page_cache + extra_pages > max) {
   1887    // If committing extra_pages would exceed our maximum target then it may
   1888    // still be useful to allocate extra pages.  One of the reasons this can
   1889    // happen could be fragmentation of the cache,
   1890 
   1891    // Therefore reduce the amortisation threshold so that we might allocate
   1892    // some extra pages but avoid exceeding the dirty page cache.
   1893    amortisation_threshold /= 2;
   1894    extra_pages = std::min(aReqPages < amortisation_threshold
   1895                               ? amortisation_threshold - aReqPages
   1896                               : 0,
   1897                           max_page_cache - page_cache);
   1898  }
   1899 
   1900  // Cap extra_pages to aRemainingPages and adjust aRemainingPages.  We will
   1901  // commit at least this many extra pages.
   1902  extra_pages = std::min(extra_pages, aRemainingPages);
   1903 
   1904  // Finally if commiting a small number of additional pages now can prevent
   1905  // a small commit later then try to commit a little more now, provided we
   1906  // don't exceed max_page_cache.
   1907  if ((aRemainingPages - extra_pages) < amortisation_threshold / 2 &&
   1908      (page_cache + aRemainingPages) < max_page_cache) {
   1909    return aRemainingPages;
   1910  }
   1911 
   1912  return extra_pages;
   1913 }
   1914 #endif
   1915 
   1916 ArenaPurgeResult arena_t::Purge(PurgeCondition aCond, PurgeStats& aStats) {
   1917  arena_chunk_t* chunk = nullptr;
   1918 
   1919  // The first critical section will find a chunk and mark dirty pages in it as
   1920  // busy.
   1921  {
   1922    MaybeMutexAutoLock lock(mLock);
   1923 
   1924    if (mMustDeleteAfterPurge) {
   1925      mIsPurgePending = false;
   1926      return Dying;
   1927    }
   1928 
   1929 #ifdef MOZ_DEBUG
   1930    size_t ndirty = 0;
   1931    for (auto& chunk : mChunksDirty) {
   1932      ndirty += chunk.mNumDirty;
   1933    }
   1934    // Not all dirty chunks are in mChunksDirty as some may not have enough
   1935    // dirty pages for purging or might currently be being purged.
   1936    MOZ_ASSERT(ndirty <= mNumDirty);
   1937 #endif
   1938 
   1939    if (!ShouldContinuePurge(aCond)) {
   1940      mIsPurgePending = false;
   1941      return ReachedThresholdOrBusy;
   1942    }
   1943 
   1944    // Take a single chunk and attempt to purge some of its dirty pages.  The
   1945    // loop below will purge memory from the chunk until either:
   1946    //  * The dirty page count for the arena hits its target,
   1947    //  * Another thread attempts to delete this chunk, or
   1948    //  * The chunk has no more dirty pages.
   1949    // In any of these cases the loop will break and Purge() will return, which
   1950    // means it may return before the arena meets its dirty page count target,
   1951    // the return value is used by the caller to call Purge() again where it
   1952    // will take the next chunk with dirty pages.
   1953    if (mSpare && mSpare->mNumDirty && !mSpare->mIsPurging &&
   1954        mChunksDirty.ElementProbablyInList(mSpare)) {
   1955      // If the spare chunk has dirty pages then try to purge these first.
   1956      //
   1957      // They're unlikely to be used in the near future because the spare chunk
   1958      // is only used if there's no run in mRunsAvail suitable.  mRunsAvail
   1959      // never contains runs from the spare chunk.
   1960      chunk = mSpare;
   1961      mChunksDirty.remove(chunk);
   1962    } else {
   1963      if (!mChunksDirty.isEmpty()) {
   1964        chunk = mChunksDirty.popFront();
   1965      }
   1966    }
   1967    if (!chunk) {
   1968      // We have to clear the flag to preserve the invariant that if Purge()
   1969      // returns anything other than NotDone then the flag is clear. If there's
   1970      // more purging work to do in other chunks then either other calls to
   1971      // Purge() (in other threads) will handle it or we rely on
   1972      // ShouldStartPurge() returning true at some point in the future.
   1973      mIsPurgePending = false;
   1974 
   1975      // There are chunks with dirty pages (because mNumDirty > 0 above) but
   1976      // they're not in mChunksDirty, they might not have enough dirty pages.
   1977      // Or maybe they're busy being purged by other threads.
   1978      return ReachedThresholdOrBusy;
   1979    }
   1980    MOZ_ASSERT(chunk->mNumDirty > 0);
   1981 
   1982    // Mark the chunk as busy so it won't be deleted and remove it from
   1983    // mChunksDirty so we're the only thread purging it.
   1984    MOZ_ASSERT(!chunk->mIsPurging);
   1985    chunk->mIsPurging = true;
   1986    aStats.chunks++;
   1987  }  // MaybeMutexAutoLock
   1988 
   1989  // True if we should continue purging memory from this arena.
   1990  bool continue_purge_arena = true;
   1991 
   1992  // True if we should continue purging memory in this chunk.
   1993  bool continue_purge_chunk = true;
   1994 
   1995  // True if at least one Purge operation has occured and therefore we need to
   1996  // call FinishPurgingInChunk() before returning.
   1997  bool purged_once = false;
   1998 
   1999  while (continue_purge_chunk && continue_purge_arena) {
   2000    // This structure is used to communicate between the two PurgePhase
   2001    // functions.
   2002    PurgeInfo purge_info(*this, chunk, aStats);
   2003 
   2004    {
   2005      // Phase 1: Find pages that need purging.
   2006      MaybeMutexAutoLock lock(purge_info.mArena.mLock);
   2007      MOZ_ASSERT(chunk->mIsPurging);
   2008 
   2009      if (purge_info.mArena.mMustDeleteAfterPurge) {
   2010        chunk->mIsPurging = false;
   2011        purge_info.mArena.mIsPurgePending = false;
   2012        return Dying;
   2013      }
   2014 
   2015      continue_purge_chunk = purge_info.FindDirtyPages(purged_once);
   2016      continue_purge_arena = purge_info.mArena.ShouldContinuePurge(aCond);
   2017 
   2018      // The code below will exit returning false if these are both false, so
   2019      // clear mIsDeferredPurgeNeeded while we still hold the lock.
   2020      if (!continue_purge_chunk && !continue_purge_arena) {
   2021        purge_info.mArena.mIsPurgePending = false;
   2022      }
   2023    }
   2024    if (!continue_purge_chunk) {
   2025      if (chunk->mDying) {
   2026        // Phase one already unlinked the chunk from structures, we just need to
   2027        // release the memory.
   2028        chunk_dealloc((void*)chunk, kChunkSize, ARENA_CHUNK);
   2029      }
   2030      // There's nothing else to do here, our caller may execute Purge() again
   2031      // if continue_purge_arena is true.
   2032      return continue_purge_arena ? NotDone : ReachedThresholdOrBusy;
   2033    }
   2034 
   2035 #ifdef MALLOC_DECOMMIT
   2036    pages_decommit(purge_info.DirtyPtr(), purge_info.DirtyLenBytes());
   2037 #else
   2038 #  ifdef XP_SOLARIS
   2039    posix_madvise(purge_info.DirtyPtr(), purge_info.DirtyLenBytes(), MADV_FREE);
   2040 #  else
   2041    madvise(purge_info.DirtyPtr(), purge_info.DirtyLenBytes(), MADV_FREE);
   2042 #  endif
   2043 #endif
   2044 
   2045    arena_chunk_t* chunk_to_release = nullptr;
   2046    bool is_dying;
   2047    {
   2048      // Phase 2: Mark the pages with their final state (madvised or
   2049      // decommitted) and fix up any other bookkeeping.
   2050      MaybeMutexAutoLock lock(purge_info.mArena.mLock);
   2051      MOZ_ASSERT(chunk->mIsPurging);
   2052 
   2053      // We can't early exit if the arena is dying, we have to finish the purge
   2054      // (which restores the state so the destructor will check it) and maybe
   2055      // release the old spare arena.
   2056      is_dying = purge_info.mArena.mMustDeleteAfterPurge;
   2057 
   2058      auto [cpc, ctr] = purge_info.UpdatePagesAndCounts();
   2059      continue_purge_chunk = cpc;
   2060      chunk_to_release = ctr;
   2061      continue_purge_arena = purge_info.mArena.ShouldContinuePurge(aCond);
   2062 
   2063      if (!continue_purge_chunk || !continue_purge_arena) {
   2064        // We're going to stop purging here so update the chunk's bookkeeping.
   2065        purge_info.FinishPurgingInChunk(true, continue_purge_chunk);
   2066        purge_info.mArena.mIsPurgePending = false;
   2067      }
   2068    }  // MaybeMutexAutoLock
   2069 
   2070    // Phase 2 can release the spare chunk (not always == chunk) so an extra
   2071    // parameter is used to return that chunk.
   2072    if (chunk_to_release) {
   2073      chunk_dealloc((void*)chunk_to_release, kChunkSize, ARENA_CHUNK);
   2074    }
   2075    if (is_dying) {
   2076      return Dying;
   2077    }
   2078    purged_once = true;
   2079  }
   2080 
   2081  return continue_purge_arena ? NotDone : ReachedThresholdOrBusy;
   2082 }
   2083 
   2084 ArenaPurgeResult arena_t::PurgeLoop(PurgeCondition aCond, const char* aCaller,
   2085                                    uint32_t aReuseGraceMS,
   2086                                    Maybe<std::function<bool()>> aKeepGoing) {
   2087  PurgeStats purge_stats(mId, mLabel, aCaller);
   2088 
   2089 #ifdef MOZJEMALLOC_PROFILING_CALLBACKS
   2090  // We hold our own reference to callbacks for the duration of PurgeLoop to
   2091  // make sure it's not released during purging.
   2092  RefPtr<MallocProfilerCallbacks> callbacks = sCallbacks;
   2093  TimeStamp start;
   2094  if (callbacks) {
   2095    start = TimeStamp::Now();
   2096  }
   2097 #endif
   2098 
   2099  uint64_t reuseGraceNS = (uint64_t)aReuseGraceMS * 1000 * 1000;
   2100  uint64_t now = aReuseGraceMS ? 0 : GetTimestampNS();
   2101  ArenaPurgeResult pr;
   2102  do {
   2103    pr = Purge(aCond, purge_stats);
   2104    now = aReuseGraceMS ? 0 : GetTimestampNS();
   2105  } while (
   2106      pr == NotDone &&
   2107      (!aReuseGraceMS || (now - mLastSignificantReuseNS >= reuseGraceNS)) &&
   2108      (!aKeepGoing || (*aKeepGoing)()));
   2109 
   2110 #ifdef MOZJEMALLOC_PROFILING_CALLBACKS
   2111  if (callbacks) {
   2112    TimeStamp end = TimeStamp::Now();
   2113    // We can't hold an arena lock while committing profiler markers.
   2114    callbacks->OnPurge(start, end, purge_stats, pr);
   2115  }
   2116 #endif
   2117 
   2118  return pr;
   2119 }
   2120 
   2121 bool arena_t::PurgeInfo::FindDirtyPages(bool aPurgedOnce) {
   2122  // It's possible that the previously dirty pages have now been
   2123  // allocated or the chunk is dying.
   2124  if (mChunk->mNumDirty == 0 || mChunk->mDying) {
   2125    // Add the chunk to the mChunksMAdvised list if it's had at least one
   2126    // madvise.
   2127    FinishPurgingInChunk(aPurgedOnce, false);
   2128    return false;
   2129  }
   2130 
   2131  // This will locate a span of dirty pages within a single run (unallocated
   2132  // runs never have unallocated neighbours).  The span of dirty pages may have
   2133  // "holes" of clean never-allocated pages.  We don't know for sure the
   2134  // trade-offs of purging those clean pages.  On one hand:
   2135  //  * This reduces the number of system calls needed
   2136  //  * This may cause less fragmentation in the kernel's structures, but not
   2137  //    the CPU's page tables.
   2138  //  * It's likely that the pages aren't committed by the OS anyway.
   2139  // On the other hand:
   2140  //  * Now accessing those pages will require either pages_commit() or a page
   2141  //    fault to ensure they're available.
   2142  do {
   2143    if (!ScanForFirstDirtyPage()) {
   2144      FinishPurgingInChunk(aPurgedOnce, false);
   2145      return false;
   2146    }
   2147  } while (!ScanForLastDirtyPage());
   2148 
   2149  MOZ_ASSERT(mFreeRunInd >= gChunkHeaderNumPages);
   2150  MOZ_ASSERT(mFreeRunInd <= mDirtyInd);
   2151  MOZ_ASSERT(mFreeRunLen > 0);
   2152  MOZ_ASSERT(mDirtyInd != 0);
   2153  MOZ_ASSERT(mDirtyLen != 0);
   2154  MOZ_ASSERT(mDirtyLen <= mFreeRunLen);
   2155  MOZ_ASSERT(mDirtyInd + mDirtyLen <= mFreeRunInd + mFreeRunLen);
   2156  MOZ_ASSERT(mDirtyInd % gPagesPerRealPage == 0);
   2157  MOZ_ASSERT(mDirtyLen % gPagesPerRealPage == 0);
   2158 
   2159  // Count the number of dirty pages and clear their bits.
   2160  mDirtyNPages = 0;
   2161  for (size_t i = 0; i < mDirtyLen; i++) {
   2162    size_t& bits = mChunk->mPageMap[mDirtyInd + i].bits;
   2163    if (bits & CHUNK_MAP_DIRTY) {
   2164      mDirtyNPages++;
   2165      bits ^= CHUNK_MAP_DIRTY;
   2166    }
   2167  }
   2168 
   2169  MOZ_ASSERT(mDirtyNPages > 0);
   2170  MOZ_ASSERT(mDirtyNPages <= mChunk->mNumDirty);
   2171  MOZ_ASSERT(mDirtyNPages <= mDirtyLen);
   2172 
   2173  mChunk->mNumDirty -= mDirtyNPages;
   2174  mArena.mNumDirty -= mDirtyNPages;
   2175 
   2176  // Mark the run as busy so that another thread freeing memory won't try to
   2177  // coalesce it.
   2178  mChunk->mPageMap[mFreeRunInd].bits |= CHUNK_MAP_BUSY;
   2179  mChunk->mPageMap[FreeRunLastInd()].bits |= CHUNK_MAP_BUSY;
   2180 
   2181  // Before we unlock ensure that no other thread can allocate from these
   2182  // pages.
   2183  if (mArena.mSpare != mChunk) {
   2184    mArena.mRunsAvail.Remove(&mChunk->mPageMap[mFreeRunInd]);
   2185  }
   2186  return true;
   2187 }
   2188 
   2189 // Look for the first dirty page and the run it belongs to.
   2190 bool arena_t::PurgeInfo::ScanForFirstDirtyPage() {
   2191  // Scan in two nested loops.  The outer loop iterates over runs, and the inner
   2192  // loop iterates over pages within unallocated runs.
   2193  size_t run_pages;
   2194  for (size_t run_idx = mChunk->mDirtyRunHint;
   2195       run_idx < gChunkNumPages - gPagesPerRealPage; run_idx += run_pages) {
   2196    size_t run_bits = mChunk->mPageMap[run_idx].bits;
   2197    // We must not find any busy pages because this chunk shouldn't be in
   2198    // the dirty list.
   2199    MOZ_ASSERT((run_bits & CHUNK_MAP_BUSY) == 0);
   2200 
   2201    // Determine the run's size, this is used in the loop iteration to move to
   2202    // the next run.
   2203    if (run_bits & CHUNK_MAP_LARGE || !(run_bits & CHUNK_MAP_ALLOCATED)) {
   2204      size_t size = run_bits & ~gPageSizeMask;
   2205      run_pages = size >> gPageSize2Pow;
   2206    } else {
   2207      arena_run_t* run =
   2208          reinterpret_cast<arena_run_t*>(run_bits & ~gPageSizeMask);
   2209      MOZ_ASSERT(run == reinterpret_cast<arena_run_t*>(
   2210                            reinterpret_cast<uintptr_t>(mChunk) +
   2211                            (run_idx << gPageSize2Pow)));
   2212      run_pages = run->mBin->mRunSizePages;
   2213    }
   2214    MOZ_ASSERT(run_pages > 0);
   2215    MOZ_ASSERT(run_idx + run_pages <= gChunkNumPages);
   2216 
   2217    if (run_bits & CHUNK_MAP_ALLOCATED) {
   2218      // Allocated runs won't contain dirty pages.
   2219      continue;
   2220    }
   2221 
   2222    mFreeRunInd = run_idx;
   2223    mFreeRunLen = run_pages;
   2224    mDirtyInd = 0;
   2225    // Scan for dirty pages.
   2226    for (size_t page_idx = run_idx; page_idx < run_idx + run_pages;
   2227         page_idx++) {
   2228      size_t& page_bits = mChunk->mPageMap[page_idx].bits;
   2229      // We must not find any busy pages because this chunk shouldn't be in
   2230      // the dirty list.
   2231      MOZ_ASSERT((page_bits & CHUNK_MAP_BUSY) == 0);
   2232 
   2233      // gPagesPerRealPage is a power of two, use a bitmask to check if page_idx
   2234      // is a multiple.
   2235      if ((page_idx & (gPagesPerRealPage - 1)) == 0) {
   2236        // A system call can be aligned here.
   2237        mDirtyInd = page_idx;
   2238      }
   2239 
   2240      if (page_bits & CHUNK_MAP_DIRTY) {
   2241        MOZ_ASSERT((page_bits & CHUNK_MAP_FRESH_MADVISED_OR_DECOMMITTED) == 0);
   2242        MOZ_ASSERT(mChunk->mDirtyRunHint <= run_idx);
   2243        mChunk->mDirtyRunHint = run_idx;
   2244 
   2245        if (mDirtyInd) {
   2246          return true;
   2247        }
   2248 
   2249        // This dirty page occurs before a page we can align on,
   2250        // so it can't be purged.
   2251        mPurgeStats.pages_unpurgable++;
   2252      }
   2253    }
   2254  }
   2255 
   2256  return false;
   2257 }
   2258 
   2259 bool arena_t::PurgeInfo::ScanForLastDirtyPage() {
   2260  mDirtyLen = 0;
   2261  for (size_t i = FreeRunLastInd(); i >= mDirtyInd; i--) {
   2262    size_t& bits = mChunk->mPageMap[i].bits;
   2263    // We must not find any busy pages because this chunk shouldn't be in the
   2264    // dirty list.
   2265    MOZ_ASSERT(!(bits & CHUNK_MAP_BUSY));
   2266 
   2267    // gPagesPerRealPage is a power of two, use a bitmask to check if page_idx
   2268    // is a multiple minus one.
   2269    if ((i & (gPagesPerRealPage - 1)) == gPagesPerRealPage - 1) {
   2270      // A system call can be aligned here.
   2271      mDirtyLen = i - mDirtyInd + 1;
   2272    }
   2273 
   2274    if (bits & CHUNK_MAP_DIRTY) {
   2275      if (mDirtyLen) {
   2276        return true;
   2277      }
   2278 
   2279      // This dirty page occurs after a page we can align on,
   2280      // so it can't be purged.
   2281      mPurgeStats.pages_unpurgable++;
   2282    }
   2283  }
   2284 
   2285  // Advance the dirty page hint so that the next scan will make progress.
   2286  mChunk->mDirtyRunHint = FreeRunLastInd() + 1;
   2287  return false;
   2288 }
   2289 
   2290 std::pair<bool, arena_chunk_t*> arena_t::PurgeInfo::UpdatePagesAndCounts() {
   2291  size_t num_madvised = 0;
   2292  size_t num_decommitted = 0;
   2293  size_t num_fresh = 0;
   2294 
   2295  for (size_t i = 0; i < mDirtyLen; i++) {
   2296    size_t& bits = mChunk->mPageMap[mDirtyInd + i].bits;
   2297 
   2298    // The page must not have the dirty bit set.
   2299    MOZ_ASSERT((bits & CHUNK_MAP_DIRTY) == 0);
   2300 
   2301 #ifdef MALLOC_DECOMMIT
   2302    if (bits & CHUNK_MAP_DECOMMITTED) {
   2303      num_decommitted++;
   2304    }
   2305 #else
   2306    if (bits & CHUNK_MAP_MADVISED) {
   2307      num_madvised++;
   2308    }
   2309 #endif
   2310    else if (bits & CHUNK_MAP_FRESH) {
   2311      num_fresh++;
   2312    }
   2313 
   2314    // Clear these page status bits.
   2315    bits &= ~CHUNK_MAP_FRESH_MADVISED_OR_DECOMMITTED;
   2316 
   2317    // Set the free_operation bit.
   2318 #ifdef MALLOC_DECOMMIT
   2319    bits |= CHUNK_MAP_DECOMMITTED;
   2320 #else
   2321    bits |= CHUNK_MAP_MADVISED;
   2322 #endif
   2323  }
   2324 
   2325  // Remove the CHUNK_MAP_BUSY marks from the run.
   2326 #ifdef MOZ_DEBUG
   2327  MOZ_ASSERT(mChunk->mPageMap[mFreeRunInd].bits & CHUNK_MAP_BUSY);
   2328  MOZ_ASSERT(mChunk->mPageMap[FreeRunLastInd()].bits & CHUNK_MAP_BUSY);
   2329 #endif
   2330  mChunk->mPageMap[mFreeRunInd].bits &= ~CHUNK_MAP_BUSY;
   2331  mChunk->mPageMap[FreeRunLastInd()].bits &= ~CHUNK_MAP_BUSY;
   2332 
   2333 #ifndef MALLOC_DECOMMIT
   2334  mArena.mNumMAdvised += mDirtyLen - num_madvised;
   2335 #endif
   2336 
   2337  mArena.mNumFresh -= num_fresh;
   2338  mArena.mStats.committed -=
   2339      mDirtyLen - num_madvised - num_decommitted - num_fresh;
   2340  mPurgeStats.pages_dirty += mDirtyNPages;
   2341  mPurgeStats.pages_total += mDirtyLen;
   2342  mPurgeStats.system_calls++;
   2343 
   2344  // Note that this code can't update the dirty run hint.  There may be other
   2345  // dirty pages within the same run.
   2346 
   2347  if (mChunk->mDying) {
   2348    // A dying chunk doesn't need to be coaleased, it will already have one
   2349    // large run.
   2350    MOZ_ASSERT(mFreeRunInd == gChunkHeaderNumPages &&
   2351               mFreeRunLen ==
   2352                   gChunkNumPages - gChunkHeaderNumPages - gPagesPerRealPage);
   2353 
   2354    return std::make_pair(false, mChunk);
   2355  }
   2356 
   2357  bool was_empty = mChunk->IsEmpty();
   2358  mFreeRunInd =
   2359      mArena.TryCoalesce(mChunk, mFreeRunInd, mFreeRunLen, FreeRunLenBytes());
   2360 
   2361  arena_chunk_t* chunk_to_release = nullptr;
   2362  if (!was_empty && mChunk->IsEmpty()) {
   2363    // This now-empty chunk will become the spare chunk and the spare
   2364    // chunk will be returned for deletion.
   2365    chunk_to_release = mArena.DemoteChunkToSpare(mChunk);
   2366  }
   2367 
   2368  if (mChunk != mArena.mSpare) {
   2369    mArena.mRunsAvail.Insert(&mChunk->mPageMap[mFreeRunInd]);
   2370  }
   2371 
   2372  return std::make_pair(mChunk->mNumDirty != 0, chunk_to_release);
   2373 }
   2374 
   2375 void arena_t::PurgeInfo::FinishPurgingInChunk(bool aAddToMAdvised,
   2376                                              bool aAddToDirty) {
   2377  // If there's no more purge activity for this chunk then finish up while
   2378  // we still have the lock.
   2379  MOZ_ASSERT(mChunk->mIsPurging);
   2380  mChunk->mIsPurging = false;
   2381 
   2382  if (mChunk->mDying) {
   2383    // Another thread tried to delete this chunk while we weren't holding
   2384    // the lock.  Now it's our responsibility to finish deleting it.
   2385 
   2386    DebugOnly<bool> release_chunk = mArena.RemoveChunk(mChunk);
   2387    // RemoveChunk() can't return false because mIsPurging was false
   2388    // during the call.
   2389    MOZ_ASSERT(release_chunk);
   2390    return;
   2391  }
   2392 
   2393  if (mChunk->mNumDirty != 0 && aAddToDirty) {
   2394    // Put the semi-processed chunk on the front of the queue so that it is
   2395    // the first chunk processed next time.
   2396    mArena.mChunksDirty.pushFront(mChunk);
   2397  }
   2398 
   2399 #ifdef MALLOC_DOUBLE_PURGE
   2400  if (aAddToMAdvised) {
   2401    // The chunk might already be in the list, but this
   2402    // makes sure it's at the front.
   2403    if (mArena.mChunksMAdvised.ElementProbablyInList(mChunk)) {
   2404      mArena.mChunksMAdvised.remove(mChunk);
   2405    }
   2406    mArena.mChunksMAdvised.pushFront(mChunk);
   2407  }
   2408 #endif
   2409 }
   2410 
   2411 // run_pages and size make each-other redundant. But we use them both and the
   2412 // caller computes both so this function requires both and will assert if they
   2413 // are inconsistent.
   2414 size_t arena_t::TryCoalesce(arena_chunk_t* aChunk, size_t run_ind,
   2415                            size_t run_pages, size_t size) {
   2416  // Copy in/out parameters to local variables so that we don't need '*'
   2417  // operators throughout this code but also so that type checking is stricter
   2418  // (references are too easily coerced).
   2419  MOZ_ASSERT(size == run_pages << gPageSize2Pow);
   2420 
   2421  // Try to coalesce forward.
   2422  if (run_ind + run_pages < gChunkNumPages - gPagesPerRealPage &&
   2423      (aChunk->mPageMap[run_ind + run_pages].bits &
   2424       (CHUNK_MAP_ALLOCATED | CHUNK_MAP_BUSY)) == 0) {
   2425    size_t nrun_size =
   2426        aChunk->mPageMap[run_ind + run_pages].bits & ~gPageSizeMask;
   2427 
   2428    // Remove successor from tree of available runs; the coalesced run is
   2429    // inserted later.
   2430    mRunsAvail.Remove(&aChunk->mPageMap[run_ind + run_pages]);
   2431 
   2432    size += nrun_size;
   2433    run_pages = size >> gPageSize2Pow;
   2434 
   2435    MOZ_DIAGNOSTIC_ASSERT((aChunk->mPageMap[run_ind + run_pages - 1].bits &
   2436                           ~gPageSizeMask) == nrun_size);
   2437    aChunk->mPageMap[run_ind].bits =
   2438        size | (aChunk->mPageMap[run_ind].bits & gPageSizeMask);
   2439    aChunk->mPageMap[run_ind + run_pages - 1].bits =
   2440        size | (aChunk->mPageMap[run_ind + run_pages - 1].bits & gPageSizeMask);
   2441  }
   2442 
   2443  // Try to coalesce backward.
   2444  if (run_ind > gChunkHeaderNumPages &&
   2445      (aChunk->mPageMap[run_ind - 1].bits &
   2446       (CHUNK_MAP_ALLOCATED | CHUNK_MAP_BUSY)) == 0) {
   2447    size_t prun_size = aChunk->mPageMap[run_ind - 1].bits & ~gPageSizeMask;
   2448 
   2449    run_ind -= prun_size >> gPageSize2Pow;
   2450 
   2451    // Remove predecessor from tree of available runs; the coalesced run is
   2452    // inserted later.
   2453    mRunsAvail.Remove(&aChunk->mPageMap[run_ind]);
   2454 
   2455    size += prun_size;
   2456    run_pages = size >> gPageSize2Pow;
   2457 
   2458    MOZ_DIAGNOSTIC_ASSERT((aChunk->mPageMap[run_ind].bits & ~gPageSizeMask) ==
   2459                          prun_size);
   2460    aChunk->mPageMap[run_ind].bits =
   2461        size | (aChunk->mPageMap[run_ind].bits & gPageSizeMask);
   2462    aChunk->mPageMap[run_ind + run_pages - 1].bits =
   2463        size | (aChunk->mPageMap[run_ind + run_pages - 1].bits & gPageSizeMask);
   2464  }
   2465 
   2466  // If the dirty run hint points within the run then the new greater run
   2467  // is the run with the lowest index containing dirty pages.  So update the
   2468  // hint.
   2469  if ((aChunk->mDirtyRunHint > run_ind) &&
   2470      (aChunk->mDirtyRunHint < run_ind + run_pages)) {
   2471    aChunk->mDirtyRunHint = run_ind;
   2472  }
   2473 
   2474  return run_ind;
   2475 }
   2476 
   2477 arena_chunk_t* arena_t::DallocRun(arena_run_t* aRun, bool aDirty) {
   2478  arena_chunk_t* chunk = GetChunkForPtr(aRun);
   2479  size_t run_ind =
   2480      (size_t)((uintptr_t(aRun) - uintptr_t(chunk)) >> gPageSize2Pow);
   2481  MOZ_DIAGNOSTIC_ASSERT(run_ind >= gChunkHeaderNumPages);
   2482  MOZ_RELEASE_ASSERT(run_ind < gChunkNumPages - 1);
   2483 
   2484  size_t size, run_pages;
   2485  if ((chunk->mPageMap[run_ind].bits & CHUNK_MAP_LARGE) != 0) {
   2486    size = chunk->mPageMap[run_ind].bits & ~gPageSizeMask;
   2487    run_pages = (size >> gPageSize2Pow);
   2488  } else {
   2489    run_pages = aRun->mBin->mRunSizePages;
   2490    size = run_pages << gPageSize2Pow;
   2491  }
   2492 
   2493  // Mark pages as unallocated in the chunk map, at the same time clear all the
   2494  // page bits and size information, set the dirty bit if the pages are now
   2495  // dirty..
   2496  for (size_t i = 0; i < run_pages; i++) {
   2497    size_t& bits = chunk->mPageMap[run_ind + i].bits;
   2498 
   2499    // No bits other than ALLOCATED or LARGE may be set.
   2500    MOZ_DIAGNOSTIC_ASSERT(
   2501        (bits & gPageSizeMask & ~(CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED)) == 0);
   2502    bits = aDirty ? CHUNK_MAP_DIRTY : 0;
   2503  }
   2504 
   2505  if (aDirty) {
   2506    // One of the reasons we check mIsPurging here is so that we don't add a
   2507    // chunk that's currently in the middle of purging to the list, which could
   2508    // start a concurrent purge.
   2509    if (!chunk->mIsPurging &&
   2510        (chunk->mNumDirty == 0 || !mChunksDirty.ElementProbablyInList(chunk))) {
   2511      mChunksDirty.pushBack(chunk);
   2512    }
   2513    chunk->mNumDirty += run_pages;
   2514    mNumDirty += run_pages;
   2515  }
   2516 
   2517  chunk->mPageMap[run_ind].bits |= size;
   2518  chunk->mPageMap[run_ind + run_pages - 1].bits |= size;
   2519 
   2520  run_ind = TryCoalesce(chunk, run_ind, run_pages, size);
   2521 
   2522  // Now that run_ind is finalised we can update the dirty run hint.
   2523  if (aDirty && run_ind < chunk->mDirtyRunHint) {
   2524    chunk->mDirtyRunHint = run_ind;
   2525  }
   2526 
   2527  // Deallocate chunk if it is now completely unused.
   2528  arena_chunk_t* chunk_dealloc = nullptr;
   2529  if (chunk->IsEmpty()) {
   2530    chunk_dealloc = DemoteChunkToSpare(chunk);
   2531  } else {
   2532    // Insert into tree of available runs, now that coalescing is complete.
   2533    mRunsAvail.Insert(&chunk->mPageMap[run_ind]);
   2534  }
   2535 
   2536  return chunk_dealloc;
   2537 }
   2538 
   2539 void arena_t::TrimRunHead(arena_chunk_t* aChunk, arena_run_t* aRun,
   2540                          size_t aOldSize, size_t aNewSize) {
   2541  size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> gPageSize2Pow;
   2542  size_t head_npages = (aOldSize - aNewSize) >> gPageSize2Pow;
   2543 
   2544  MOZ_ASSERT(aOldSize > aNewSize);
   2545 
   2546  // Update the chunk map so that arena_t::RunDalloc() can treat the
   2547  // leading run as separately allocated.
   2548  aChunk->mPageMap[pageind].bits =
   2549      (aOldSize - aNewSize) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
   2550  aChunk->mPageMap[pageind + head_npages].bits =
   2551      aNewSize | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
   2552 
   2553  DebugOnly<arena_chunk_t*> no_chunk = DallocRun(aRun, false);
   2554  // This will never release a chunk as there's still at least one allocated
   2555  // run.
   2556  MOZ_ASSERT(!no_chunk);
   2557 }
   2558 
   2559 void arena_t::TrimRunTail(arena_chunk_t* aChunk, arena_run_t* aRun,
   2560                          size_t aOldSize, size_t aNewSize, bool aDirty) {
   2561  size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> gPageSize2Pow;
   2562  size_t npages = aNewSize >> gPageSize2Pow;
   2563 
   2564  MOZ_ASSERT(aOldSize > aNewSize);
   2565 
   2566  // Update the chunk map so that arena_t::RunDalloc() can treat the
   2567  // trailing run as separately allocated.
   2568  aChunk->mPageMap[pageind].bits =
   2569      aNewSize | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
   2570  aChunk->mPageMap[pageind + npages].bits =
   2571      (aOldSize - aNewSize) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
   2572 
   2573  DebugOnly<arena_chunk_t*> no_chunk =
   2574      DallocRun((arena_run_t*)(uintptr_t(aRun) + aNewSize), aDirty);
   2575 
   2576  // This will never release a chunk as there's still at least one allocated
   2577  // run.
   2578  MOZ_ASSERT(!no_chunk);
   2579 }
   2580 
   2581 arena_run_t* arena_t::GetNewEmptyBinRun(arena_bin_t* aBin) {
   2582  arena_run_t* run;
   2583  unsigned i, remainder;
   2584 
   2585  // Allocate a new run.
   2586  run = AllocRun(static_cast<size_t>(aBin->mRunSizePages) << gPageSize2Pow,
   2587                 false, false);
   2588  if (!run) {
   2589    return nullptr;
   2590  }
   2591 
   2592  // Initialize run internals.
   2593  run->mBin = aBin;
   2594 
   2595  for (i = 0; i < aBin->mRunNumRegionsMask - 1; i++) {
   2596    run->mRegionsMask[i] = UINT_MAX;
   2597  }
   2598  remainder = aBin->mRunNumRegions & ((1U << (LOG2(sizeof(int)) + 3)) - 1);
   2599  if (remainder == 0) {
   2600    run->mRegionsMask[i] = UINT_MAX;
   2601  } else {
   2602    // The last element has spare bits that need to be unset.
   2603    run->mRegionsMask[i] =
   2604        (UINT_MAX >> ((1U << (LOG2(sizeof(int)) + 3)) - remainder));
   2605  }
   2606 
   2607  run->mRegionsMinElement = 0;
   2608 
   2609  run->mNumFree = aBin->mRunNumRegions;
   2610 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
   2611  run->mMagic = ARENA_RUN_MAGIC;
   2612 #endif
   2613 
   2614  // Make sure we continue to use this run for subsequent allocations.
   2615  new (&run->mRunListElem) DoublyLinkedListElement<arena_run_t>();
   2616  aBin->mNonFullRuns.pushFront(run);
   2617 
   2618  aBin->mNumRuns++;
   2619  return run;
   2620 }
   2621 
   2622 arena_run_t* arena_t::GetNonFullBinRun(arena_bin_t* aBin) {
   2623  auto mrf_head = aBin->mNonFullRuns.begin();
   2624  if (mrf_head) {
   2625    // Take the head and if we are going to fill it, remove it from our list.
   2626    arena_run_t* run = &(*mrf_head);
   2627    MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
   2628    if (run->mNumFree == 1) {
   2629      aBin->mNonFullRuns.remove(run);
   2630    }
   2631    return run;
   2632  }
   2633  return GetNewEmptyBinRun(aBin);
   2634 }
   2635 
   2636 arena_bin_t::arena_bin_t(SizeClass aSizeClass) : mSizeClass(aSizeClass.Size()) {
   2637  size_t try_run_size;
   2638  unsigned try_nregs, try_mask_nelms, try_reg0_offset;
   2639  // Size of the run header, excluding mRegionsMask.
   2640  static const size_t kFixedHeaderSize = offsetof(arena_run_t, mRegionsMask);
   2641 
   2642  MOZ_ASSERT(aSizeClass.Size() <= gMaxBinClass);
   2643 
   2644  try_run_size = gPageSize;
   2645 
   2646  // Run size expansion loop.
   2647  while (true) {
   2648    try_nregs = ((try_run_size - kFixedHeaderSize) / mSizeClass) +
   2649                1;  // Counter-act try_nregs-- in loop.
   2650 
   2651    // The do..while loop iteratively reduces the number of regions until
   2652    // the run header and the regions no longer overlap.  A closed formula
   2653    // would be quite messy, since there is an interdependency between the
   2654    // header's mask length and the number of regions.
   2655    do {
   2656      try_nregs--;
   2657      try_mask_nelms =
   2658          (try_nregs >> (LOG2(sizeof(int)) + 3)) +
   2659          ((try_nregs & ((1U << (LOG2(sizeof(int)) + 3)) - 1)) ? 1 : 0);
   2660      try_reg0_offset = try_run_size - (try_nregs * mSizeClass);
   2661    } while (kFixedHeaderSize + (sizeof(unsigned) * try_mask_nelms) >
   2662             try_reg0_offset);
   2663 
   2664    // Try to keep the run overhead below kRunOverhead.
   2665    if (Fraction(try_reg0_offset, try_run_size) <= kRunOverhead) {
   2666      break;
   2667    }
   2668 
   2669    // If the overhead is larger than the size class, it means the size class
   2670    // is small and doesn't align very well with the header. It's desirable to
   2671    // have smaller run sizes for them, so relax the overhead requirement.
   2672    if (try_reg0_offset > mSizeClass) {
   2673      if (Fraction(try_reg0_offset, try_run_size) <= kRunRelaxedOverhead) {
   2674        break;
   2675      }
   2676    }
   2677 
   2678    // The run header includes one bit per region of the given size. For sizes
   2679    // small enough, the number of regions is large enough that growing the run
   2680    // size barely moves the needle for the overhead because of all those bits.
   2681    // For example, for a size of 8 bytes, adding 4KiB to the run size adds
   2682    // close to 512 bits to the header, which is 64 bytes.
   2683    // With such overhead, there is no way to get to the wanted overhead above,
   2684    // so we give up if the required size for mRegionsMask more than doubles the
   2685    // size of the run header.
   2686    if (try_mask_nelms * sizeof(unsigned) >= kFixedHeaderSize) {
   2687      break;
   2688    }
   2689 
   2690    // If next iteration is going to be larger than the largest possible large
   2691    // size class, then we didn't find a setup where the overhead is small
   2692    // enough, and we can't do better than the current settings, so just use
   2693    // that.
   2694    if (try_run_size + gPageSize > gMaxLargeClass) {
   2695      break;
   2696    }
   2697 
   2698    // Try more aggressive settings.
   2699    try_run_size += gPageSize;
   2700  }
   2701 
   2702  MOZ_ASSERT(kFixedHeaderSize + (sizeof(unsigned) * try_mask_nelms) <=
   2703             try_reg0_offset);
   2704  MOZ_ASSERT((try_mask_nelms << (LOG2(sizeof(int)) + 3)) >= try_nregs);
   2705 
   2706  // Our list management would break if mRunNumRegions == 1 and we should use
   2707  // a large size class instead, anyways.
   2708  MOZ_ASSERT(try_nregs > 1);
   2709 
   2710  // Copy final settings.
   2711  MOZ_ASSERT((try_run_size >> gPageSize2Pow) <= UINT8_MAX);
   2712  mRunSizePages = static_cast<uint8_t>(try_run_size >> gPageSize2Pow);
   2713  mRunNumRegions = try_nregs;
   2714  mRunNumRegionsMask = try_mask_nelms;
   2715  mRunFirstRegionOffset = try_reg0_offset;
   2716  mSizeDivisor = FastDivisor<uint16_t>(aSizeClass.Size(), try_run_size);
   2717 }
   2718 
   2719 void arena_t::ResetSmallAllocRandomization() {
   2720  if (MOZ_UNLIKELY(opt_randomize_small)) {
   2721    MaybeMutexAutoLock lock(mLock);
   2722    InitPRNG();
   2723  }
   2724  mRandomizeSmallAllocations = opt_randomize_small;
   2725 }
   2726 
   2727 void arena_t::InitPRNG() {
   2728  // Both another thread could race and the code backing RandomUint64
   2729  // (arc4random for example) may allocate memory while here, so we must
   2730  // ensure to start the mPRNG initialization only once and to not hold
   2731  // the lock while initializing.
   2732  mIsPRNGInitializing = true;
   2733  {
   2734    mLock.Unlock();
   2735    mozilla::Maybe<uint64_t> prngState1 = mozilla::RandomUint64();
   2736    mozilla::Maybe<uint64_t> prngState2 = mozilla::RandomUint64();
   2737    mLock.Lock();
   2738 
   2739    mozilla::non_crypto::XorShift128PlusRNG prng(prngState1.valueOr(0),
   2740                                                 prngState2.valueOr(0));
   2741    if (mPRNG) {
   2742      *mPRNG = prng;
   2743    } else {
   2744      void* backing =
   2745          sBaseAlloc.alloc(sizeof(mozilla::non_crypto::XorShift128PlusRNG));
   2746      mPRNG = new (backing)
   2747          mozilla::non_crypto::XorShift128PlusRNG(std::move(prng));
   2748    }
   2749  }
   2750  mIsPRNGInitializing = false;
   2751 }
   2752 
   2753 void* arena_t::MallocSmall(size_t aSize, bool aZero) {
   2754  void* ret;
   2755  arena_bin_t* bin;
   2756  arena_run_t* run;
   2757  SizeClass sizeClass(aSize);
   2758  aSize = sizeClass.Size();
   2759 
   2760  switch (sizeClass.Type()) {
   2761    case SizeClass::Quantum:
   2762      // Although we divide 2 things by kQuantum, the compiler will
   2763      // reduce `kMinQuantumClass / kQuantum` to a single constant.
   2764      bin = &mBins[(aSize / kQuantum) - (kMinQuantumClass / kQuantum)];
   2765      break;
   2766    case SizeClass::QuantumWide:
   2767      bin = &mBins[kNumQuantumClasses + (aSize / kQuantumWide) -
   2768                   (kMinQuantumWideClass / kQuantumWide)];
   2769      break;
   2770    case SizeClass::SubPage:
   2771      bin = &mBins[kNumQuantumClasses + kNumQuantumWideClasses +
   2772                   (FloorLog2(aSize) - LOG2(kMinSubPageClass))];
   2773      break;
   2774    default:
   2775      MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected size class type");
   2776  }
   2777  MOZ_DIAGNOSTIC_ASSERT(aSize == bin->mSizeClass);
   2778 
   2779  size_t num_dirty_before, num_dirty_after;
   2780  {
   2781    MaybeMutexAutoLock lock(mLock);
   2782 
   2783 #ifdef MOZ_DEBUG
   2784    bool isInitializingThread(false);
   2785 #endif
   2786 
   2787    if (MOZ_UNLIKELY(mRandomizeSmallAllocations && mPRNG == nullptr &&
   2788                     !mIsPRNGInitializing)) {
   2789 #ifdef MOZ_DEBUG
   2790      isInitializingThread = true;
   2791 #endif
   2792      InitPRNG();
   2793    }
   2794 
   2795    MOZ_ASSERT(!mRandomizeSmallAllocations || mPRNG ||
   2796               (mIsPRNGInitializing && !isInitializingThread));
   2797 
   2798    num_dirty_before = mNumDirty;
   2799    run = GetNonFullBinRun(bin);
   2800    num_dirty_after = mNumDirty;
   2801    if (MOZ_UNLIKELY(!run)) {
   2802      return nullptr;
   2803    }
   2804    MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
   2805    MOZ_DIAGNOSTIC_ASSERT(run->mNumFree > 0);
   2806    ret = ArenaRunRegAlloc(run, bin);
   2807    MOZ_DIAGNOSTIC_ASSERT(ret);
   2808    run->mNumFree--;
   2809    if (!ret) {
   2810      return nullptr;
   2811    }
   2812 
   2813    mStats.allocated_small += aSize;
   2814    mStats.operations++;
   2815  }
   2816  if (num_dirty_after < num_dirty_before) {
   2817    NotifySignificantReuse();
   2818  }
   2819  if (!aZero) {
   2820    ApplyZeroOrJunk(ret, aSize);
   2821  } else {
   2822    memset(ret, 0, aSize);
   2823  }
   2824 
   2825  return ret;
   2826 }
   2827 
   2828 void* arena_t::MallocLarge(size_t aSize, bool aZero) {
   2829  void* ret;
   2830 
   2831  // Large allocation.
   2832  aSize = PAGE_CEILING(aSize);
   2833 
   2834  size_t num_dirty_before, num_dirty_after;
   2835  {
   2836    MaybeMutexAutoLock lock(mLock);
   2837    num_dirty_before = mNumDirty;
   2838    ret = AllocRun(aSize, true, aZero);
   2839    num_dirty_after = mNumDirty;
   2840    if (!ret) {
   2841      return nullptr;
   2842    }
   2843    mStats.allocated_large += aSize;
   2844    mStats.operations++;
   2845  }
   2846  if (num_dirty_after < num_dirty_before) {
   2847    NotifySignificantReuse();
   2848  }
   2849 
   2850  if (!aZero) {
   2851    ApplyZeroOrJunk(ret, aSize);
   2852  }
   2853 
   2854  return ret;
   2855 }
   2856 
   2857 void* arena_t::Malloc(size_t aSize, bool aZero) {
   2858  MOZ_DIAGNOSTIC_ASSERT(mMagic == ARENA_MAGIC);
   2859  MOZ_ASSERT(aSize != 0);
   2860 
   2861  if (aSize <= gMaxBinClass) {
   2862    return MallocSmall(aSize, aZero);
   2863  }
   2864  if (aSize <= gMaxLargeClass) {
   2865    return MallocLarge(aSize, aZero);
   2866  }
   2867  return MallocHuge(aSize, aZero);
   2868 }
   2869 
   2870 // Only handles large allocations that require more than page alignment.
   2871 void* arena_t::PallocLarge(size_t aAlignment, size_t aSize, size_t aAllocSize) {
   2872  void* ret;
   2873  size_t offset;
   2874  arena_chunk_t* chunk;
   2875 
   2876  MOZ_ASSERT((aSize & gPageSizeMask) == 0);
   2877  MOZ_ASSERT((aAlignment & gPageSizeMask) == 0);
   2878 
   2879  size_t num_dirty_before, num_dirty_after;
   2880  {
   2881    MaybeMutexAutoLock lock(mLock);
   2882    num_dirty_before = mNumDirty;
   2883    ret = AllocRun(aAllocSize, true, false);
   2884    if (!ret) {
   2885      return nullptr;
   2886    }
   2887 
   2888    chunk = GetChunkForPtr(ret);
   2889 
   2890    offset = uintptr_t(ret) & (aAlignment - 1);
   2891    MOZ_ASSERT((offset & gPageSizeMask) == 0);
   2892    MOZ_ASSERT(offset < aAllocSize);
   2893    if (offset == 0) {
   2894      TrimRunTail(chunk, (arena_run_t*)ret, aAllocSize, aSize, false);
   2895    } else {
   2896      size_t leadsize, trailsize;
   2897 
   2898      leadsize = aAlignment - offset;
   2899      if (leadsize > 0) {
   2900        TrimRunHead(chunk, (arena_run_t*)ret, aAllocSize,
   2901                    aAllocSize - leadsize);
   2902        ret = (void*)(uintptr_t(ret) + leadsize);
   2903      }
   2904 
   2905      trailsize = aAllocSize - leadsize - aSize;
   2906      if (trailsize != 0) {
   2907        // Trim trailing space.
   2908        MOZ_ASSERT(trailsize < aAllocSize);
   2909        TrimRunTail(chunk, (arena_run_t*)ret, aSize + trailsize, aSize, false);
   2910      }
   2911    }
   2912    num_dirty_after = mNumDirty;
   2913 
   2914    mStats.allocated_large += aSize;
   2915    mStats.operations++;
   2916  }
   2917  if (num_dirty_after < num_dirty_before) {
   2918    NotifySignificantReuse();
   2919  }
   2920  // Note that since Bug 1488780we don't attempt purge dirty memory on this code
   2921  // path. In general there won't be dirty memory above the threshold after an
   2922  // allocation, only after free.  The exception is if the dirty page threshold
   2923  // has changed.
   2924 
   2925  ApplyZeroOrJunk(ret, aSize);
   2926  return ret;
   2927 }
   2928 
   2929 void* arena_t::Palloc(size_t aAlignment, size_t aSize) {
   2930  void* ret;
   2931  size_t ceil_size;
   2932 
   2933  // Round size up to the nearest multiple of alignment.
   2934  //
   2935  // This done, we can take advantage of the fact that for each small
   2936  // size class, every object is aligned at the smallest power of two
   2937  // that is non-zero in the base two representation of the size.  For
   2938  // example:
   2939  //
   2940  //   Size |   Base 2 | Minimum alignment
   2941  //   -----+----------+------------------
   2942  //     96 |  1100000 |  32
   2943  //    144 | 10100000 |  32
   2944  //    192 | 11000000 |  64
   2945  //
   2946  // Depending on runtime settings, it is possible that arena_malloc()
   2947  // will further round up to a power of two, but that never causes
   2948  // correctness issues.
   2949  ceil_size = ALIGNMENT_CEILING(aSize, aAlignment);
   2950 
   2951  // (ceil_size < aSize) protects against the combination of maximal
   2952  // alignment and size greater than maximal alignment.
   2953  if (ceil_size < aSize) {
   2954    // size_t overflow.
   2955    return nullptr;
   2956  }
   2957 
   2958  if (ceil_size <= gPageSize ||
   2959      (aAlignment <= gPageSize && ceil_size <= gMaxLargeClass)) {
   2960    ret = Malloc(ceil_size, false);
   2961  } else {
   2962    size_t run_size;
   2963 
   2964    // We can't achieve sub-page alignment, so round up alignment
   2965    // permanently; it makes later calculations simpler.
   2966    aAlignment = PAGE_CEILING(aAlignment);
   2967    ceil_size = PAGE_CEILING(aSize);
   2968 
   2969    // (ceil_size < aSize) protects against very large sizes within
   2970    // pagesize of SIZE_T_MAX.
   2971    //
   2972    // (ceil_size + aAlignment < ceil_size) protects against the
   2973    // combination of maximal alignment and ceil_size large enough
   2974    // to cause overflow.  This is similar to the first overflow
   2975    // check above, but it needs to be repeated due to the new
   2976    // ceil_size value, which may now be *equal* to maximal
   2977    // alignment, whereas before we only detected overflow if the
   2978    // original size was *greater* than maximal alignment.
   2979    if (ceil_size < aSize || ceil_size + aAlignment < ceil_size) {
   2980      // size_t overflow.
   2981      return nullptr;
   2982    }
   2983 
   2984    // Calculate the size of the over-size run that arena_palloc()
   2985    // would need to allocate in order to guarantee the alignment.
   2986    if (ceil_size >= aAlignment) {
   2987      run_size = ceil_size + aAlignment - gPageSize;
   2988    } else {
   2989      // It is possible that (aAlignment << 1) will cause
   2990      // overflow, but it doesn't matter because we also
   2991      // subtract pagesize, which in the case of overflow
   2992      // leaves us with a very large run_size.  That causes
   2993      // the first conditional below to fail, which means
   2994      // that the bogus run_size value never gets used for
   2995      // anything important.
   2996      run_size = (aAlignment << 1) - gPageSize;
   2997    }
   2998 
   2999    if (run_size <= gMaxLargeClass) {
   3000      ret = PallocLarge(aAlignment, ceil_size, run_size);
   3001    } else if (aAlignment <= kChunkSize) {
   3002      ret = MallocHuge(ceil_size, false);
   3003    } else {
   3004      ret = PallocHuge(ceil_size, aAlignment, false);
   3005    }
   3006  }
   3007 
   3008  MOZ_ASSERT((uintptr_t(ret) & (aAlignment - 1)) == 0);
   3009  return ret;
   3010 }
   3011 
   3012 class AllocInfo {
   3013 public:
   3014  template <bool Validate = false>
   3015  static inline AllocInfo Get(const void* aPtr) {
   3016    // If the allocator is not initialized, the pointer can't belong to it.
   3017    if (Validate && !malloc_initialized) {
   3018      return AllocInfo();
   3019    }
   3020 
   3021    auto chunk = GetChunkForPtr(aPtr);
   3022    if (Validate) {
   3023      if (!chunk || !gChunkRTree.Get(chunk)) {
   3024        return AllocInfo();
   3025      }
   3026    }
   3027 
   3028    if (chunk != aPtr) {
   3029      MOZ_DIAGNOSTIC_ASSERT(chunk->mArena->mMagic == ARENA_MAGIC);
   3030      size_t pageind = (((uintptr_t)aPtr - (uintptr_t)chunk) >> gPageSize2Pow);
   3031      return GetInChunk(aPtr, chunk, pageind);
   3032    }
   3033 
   3034    extent_node_t key;
   3035 
   3036    // Huge allocation
   3037    key.mAddr = chunk;
   3038    MutexAutoLock lock(huge_mtx);
   3039    extent_node_t* node = huge.Search(&key);
   3040    if (Validate && !node) {
   3041      return AllocInfo();
   3042    }
   3043    return AllocInfo(node->mSize, node);
   3044  }
   3045 
   3046  // Get the allocation information for a pointer we know is within a chunk
   3047  // (Small or large, not huge).
   3048  static inline AllocInfo GetInChunk(const void* aPtr, arena_chunk_t* aChunk,
   3049                                     size_t pageind) {
   3050    size_t mapbits = aChunk->mPageMap[pageind].bits;
   3051    MOZ_DIAGNOSTIC_ASSERT((mapbits & CHUNK_MAP_ALLOCATED) != 0);
   3052 
   3053    size_t size;
   3054    if ((mapbits & CHUNK_MAP_LARGE) == 0) {
   3055      arena_run_t* run = (arena_run_t*)(mapbits & ~gPageSizeMask);
   3056      MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
   3057      size = run->mBin->mSizeClass;
   3058    } else {
   3059      size = mapbits & ~gPageSizeMask;
   3060      MOZ_DIAGNOSTIC_ASSERT(size != 0);
   3061    }
   3062 
   3063    return AllocInfo(size, aChunk);
   3064  }
   3065 
   3066  // Validate ptr before assuming that it points to an allocation.  Currently,
   3067  // the following validation is performed:
   3068  //
   3069  // + Check that ptr is not nullptr.
   3070  //
   3071  // + Check that ptr lies within a mapped chunk.
   3072  static inline AllocInfo GetValidated(const void* aPtr) {
   3073    return Get<true>(aPtr);
   3074  }
   3075 
   3076  AllocInfo() : mSize(0), mChunk(nullptr) {}
   3077 
   3078  explicit AllocInfo(size_t aSize, arena_chunk_t* aChunk)
   3079      : mSize(aSize), mChunk(aChunk) {
   3080    MOZ_ASSERT(mSize <= gMaxLargeClass);
   3081  }
   3082 
   3083  explicit AllocInfo(size_t aSize, extent_node_t* aNode)
   3084      : mSize(aSize), mNode(aNode) {
   3085    MOZ_ASSERT(mSize > gMaxLargeClass);
   3086  }
   3087 
   3088  size_t Size() { return mSize; }
   3089 
   3090  arena_t* Arena() {
   3091    if (mSize <= gMaxLargeClass) {
   3092      return mChunk->mArena;
   3093    }
   3094    // Best effort detection that we're not trying to access an already
   3095    // disposed arena. In the case of a disposed arena, the memory location
   3096    // pointed by mNode->mArena is either free (but still a valid memory
   3097    // region, per TypedBaseAlloc<arena_t>), in which case its id was reset,
   3098    // or has been reallocated for a new region, and its id is very likely
   3099    // different (per randomness). In both cases, the id is unlikely to
   3100    // match what it was for the disposed arena.
   3101    MOZ_RELEASE_ASSERT(mNode->mArenaId == mNode->mArena->mId);
   3102    return mNode->mArena;
   3103  }
   3104 
   3105  bool IsValid() const { return !!mSize; }
   3106 
   3107 private:
   3108  size_t mSize;
   3109  union {
   3110    // Pointer to the chunk associated with the allocation for small
   3111    // and large allocations.
   3112    arena_chunk_t* mChunk;
   3113 
   3114    // Pointer to the extent node for huge allocations.
   3115    extent_node_t* mNode;
   3116  };
   3117 };
   3118 
   3119 inline void MozJemalloc::jemalloc_ptr_info(const void* aPtr,
   3120                                           jemalloc_ptr_info_t* aInfo) {
   3121  arena_chunk_t* chunk = GetChunkForPtr(aPtr);
   3122 
   3123  // Is the pointer null, or within one chunk's size of null?
   3124  // Alternatively, if the allocator is not initialized yet, the pointer
   3125  // can't be known.
   3126  if (!chunk || !malloc_initialized) {
   3127    *aInfo = {TagUnknown, nullptr, 0, 0};
   3128    return;
   3129  }
   3130 
   3131  // Look for huge allocations before looking for |chunk| in gChunkRTree.
   3132  // This is necessary because |chunk| won't be in gChunkRTree if it's
   3133  // the second or subsequent chunk in a huge allocation.
   3134  extent_node_t* node;
   3135  extent_node_t key;
   3136  {
   3137    MutexAutoLock lock(huge_mtx);
   3138    key.mAddr = const_cast<void*>(aPtr);
   3139    node =
   3140        reinterpret_cast<RedBlackTree<extent_node_t, ExtentTreeBoundsTrait>*>(
   3141            &huge)
   3142            ->Search(&key);
   3143    if (node) {
   3144      *aInfo = {TagLiveAlloc, node->mAddr, node->mSize, node->mArena->mId};
   3145      return;
   3146    }
   3147  }
   3148 
   3149  // It's not a huge allocation. Check if we have a known chunk.
   3150  if (!gChunkRTree.Get(chunk)) {
   3151    *aInfo = {TagUnknown, nullptr, 0, 0};
   3152    return;
   3153  }
   3154 
   3155  MOZ_DIAGNOSTIC_ASSERT(chunk->mArena->mMagic == ARENA_MAGIC);
   3156 
   3157  // Get the page number within the chunk.
   3158  size_t pageind = (((uintptr_t)aPtr - (uintptr_t)chunk) >> gPageSize2Pow);
   3159  if (pageind < gChunkHeaderNumPages) {
   3160    // Within the chunk header.
   3161    *aInfo = {TagUnknown, nullptr, 0, 0};
   3162    return;
   3163  }
   3164 
   3165  size_t mapbits = chunk->mPageMap[pageind].bits;
   3166 
   3167  if (!(mapbits & CHUNK_MAP_ALLOCATED)) {
   3168    void* pageaddr = (void*)(uintptr_t(aPtr) & ~gPageSizeMask);
   3169    *aInfo = {TagFreedPage, pageaddr, gPageSize, chunk->mArena->mId};
   3170    return;
   3171  }
   3172 
   3173  if (mapbits & CHUNK_MAP_LARGE) {
   3174    // It's a large allocation. Only the first page of a large
   3175    // allocation contains its size, so if the address is not in
   3176    // the first page, scan back to find the allocation size.
   3177    size_t size;
   3178    while (true) {
   3179      size = mapbits & ~gPageSizeMask;
   3180      if (size != 0) {
   3181        break;
   3182      }
   3183 
   3184      // The following two return paths shouldn't occur in
   3185      // practice unless there is heap corruption.
   3186      pageind--;
   3187      MOZ_DIAGNOSTIC_ASSERT(pageind >= gChunkHeaderNumPages);
   3188      if (pageind < gChunkHeaderNumPages) {
   3189        *aInfo = {TagUnknown, nullptr, 0, 0};
   3190        return;
   3191      }
   3192 
   3193      mapbits = chunk->mPageMap[pageind].bits;
   3194      MOZ_DIAGNOSTIC_ASSERT(mapbits & CHUNK_MAP_LARGE);
   3195      if (!(mapbits & CHUNK_MAP_LARGE)) {
   3196        *aInfo = {TagUnknown, nullptr, 0, 0};
   3197        return;
   3198      }
   3199    }
   3200 
   3201    void* addr = ((char*)chunk) + (pageind << gPageSize2Pow);
   3202    *aInfo = {TagLiveAlloc, addr, size, chunk->mArena->mId};
   3203    return;
   3204  }
   3205 
   3206  // It must be a small allocation.
   3207  auto run = (arena_run_t*)(mapbits & ~gPageSizeMask);
   3208  MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
   3209 
   3210  // The allocation size is stored in the run metadata.
   3211  size_t size = run->mBin->mSizeClass;
   3212 
   3213  // Address of the first possible pointer in the run after its headers.
   3214  uintptr_t reg0_addr = (uintptr_t)run + run->mBin->mRunFirstRegionOffset;
   3215  if (aPtr < (void*)reg0_addr) {
   3216    // In the run header.
   3217    *aInfo = {TagUnknown, nullptr, 0, 0};
   3218    return;
   3219  }
   3220 
   3221  // Position in the run.
   3222  unsigned regind = ((uintptr_t)aPtr - reg0_addr) / size;
   3223 
   3224  // Pointer to the allocation's base address.
   3225  void* addr = (void*)(reg0_addr + regind * size);
   3226 
   3227  // Check if the allocation has been freed.
   3228  unsigned elm = regind >> (LOG2(sizeof(int)) + 3);
   3229  unsigned bit = regind - (elm << (LOG2(sizeof(int)) + 3));
   3230  PtrInfoTag tag =
   3231      ((run->mRegionsMask[elm] & (1U << bit))) ? TagFreedAlloc : TagLiveAlloc;
   3232 
   3233  *aInfo = {tag, addr, size, chunk->mArena->mId};
   3234 }
   3235 
   3236 namespace Debug {
   3237 // Helper for debuggers. We don't want it to be inlined and optimized out.
   3238 MOZ_NEVER_INLINE jemalloc_ptr_info_t* jemalloc_ptr_info(const void* aPtr) {
   3239  static jemalloc_ptr_info_t info;
   3240  MozJemalloc::jemalloc_ptr_info(aPtr, &info);
   3241  return &info;
   3242 }
   3243 }  // namespace Debug
   3244 
   3245 arena_chunk_t* arena_t::DallocSmall(arena_chunk_t* aChunk, void* aPtr,
   3246                                    arena_chunk_map_t* aMapElm) {
   3247  arena_run_t* run;
   3248  arena_bin_t* bin;
   3249  size_t size;
   3250 
   3251  run = (arena_run_t*)(aMapElm->bits & ~gPageSizeMask);
   3252  MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
   3253  bin = run->mBin;
   3254  size = bin->mSizeClass;
   3255  MOZ_DIAGNOSTIC_ASSERT(uintptr_t(aPtr) >=
   3256                        uintptr_t(run) + bin->mRunFirstRegionOffset);
   3257 
   3258  arena_run_reg_dalloc(run, bin, aPtr, size);
   3259  run->mNumFree++;
   3260  arena_chunk_t* dealloc_chunk = nullptr;
   3261 
   3262  if (run->mNumFree == bin->mRunNumRegions) {
   3263    // This run is entirely freed, remove it from our bin.
   3264 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
   3265    run->mMagic = 0;
   3266 #endif
   3267    MOZ_ASSERT(bin->mNonFullRuns.ElementProbablyInList(run));
   3268    bin->mNonFullRuns.remove(run);
   3269    dealloc_chunk = DallocRun(run, true);
   3270    bin->mNumRuns--;
   3271  } else if (run->mNumFree == 1) {
   3272    // This is first slot we freed from this run, start tracking.
   3273    MOZ_ASSERT(!bin->mNonFullRuns.ElementProbablyInList(run));
   3274    bin->mNonFullRuns.pushFront(run);
   3275  }
   3276  // else we just keep the run in mNonFullRuns where it is.
   3277  // Note that we could move it to the head of the list here to get a strict
   3278  // "most-recently-freed" order, but some of our benchmarks seem to be more
   3279  // sensible to the increased overhead that this brings than to the order
   3280  // supposedly slightly better for keeping CPU caches warm if we do.
   3281  // In general we cannot foresee the future, so any order we choose might
   3282  // perform different for different use cases and needs to be balanced with
   3283  // the book-keeping overhead via measurements.
   3284 
   3285  mStats.allocated_small -= size;
   3286  mStats.operations++;
   3287 
   3288  return dealloc_chunk;
   3289 }
   3290 
   3291 arena_chunk_t* arena_t::DallocLarge(arena_chunk_t* aChunk, void* aPtr) {
   3292  MOZ_DIAGNOSTIC_ASSERT((uintptr_t(aPtr) & gPageSizeMask) == 0);
   3293  size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> gPageSize2Pow;
   3294  size_t size = aChunk->mPageMap[pageind].bits & ~gPageSizeMask;
   3295 
   3296  mStats.allocated_large -= size;
   3297  mStats.operations++;
   3298 
   3299  return DallocRun((arena_run_t*)aPtr, true);
   3300 }
   3301 
   3302 static inline void arena_dalloc(void* aPtr, size_t aOffset, arena_t* aArena) {
   3303  MOZ_ASSERT(aPtr);
   3304  MOZ_ASSERT(aOffset != 0);
   3305  MOZ_ASSERT(GetChunkOffsetForPtr(aPtr) == aOffset);
   3306 
   3307  auto chunk = (arena_chunk_t*)((uintptr_t)aPtr - aOffset);
   3308  auto arena = chunk->mArena;
   3309  MOZ_ASSERT(arena);
   3310  MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
   3311  MOZ_RELEASE_ASSERT(!aArena || arena == aArena);
   3312 
   3313  size_t pageind = aOffset >> gPageSize2Pow;
   3314  if (opt_poison) {
   3315    AllocInfo info = AllocInfo::GetInChunk(aPtr, chunk, pageind);
   3316    MOZ_ASSERT(info.IsValid());
   3317    MaybePoison(aPtr, info.Size());
   3318  }
   3319 
   3320  arena_chunk_t* chunk_dealloc_delay = nullptr;
   3321  purge_action_t purge_action;
   3322  {
   3323    MOZ_DIAGNOSTIC_ASSERT(arena->mLock.SafeOnThisThread());
   3324    MaybeMutexAutoLock lock(arena->mLock);
   3325    arena_chunk_map_t* mapelm = &chunk->mPageMap[pageind];
   3326    MOZ_RELEASE_ASSERT(
   3327        (mapelm->bits &
   3328         (CHUNK_MAP_FRESH_MADVISED_OR_DECOMMITTED | CHUNK_MAP_ZEROED)) == 0,
   3329        "Freeing in a page with bad bits.");
   3330    MOZ_RELEASE_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0,
   3331                       "Double-free?");
   3332    if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
   3333      // Small allocation.
   3334      chunk_dealloc_delay = arena->DallocSmall(chunk, aPtr, mapelm);
   3335    } else {
   3336      // Large allocation.
   3337      chunk_dealloc_delay = arena->DallocLarge(chunk, aPtr);
   3338    }
   3339 
   3340    purge_action = arena->ShouldStartPurge();
   3341  }
   3342 
   3343  if (chunk_dealloc_delay) {
   3344    chunk_dealloc((void*)chunk_dealloc_delay, kChunkSize, ARENA_CHUNK);
   3345  }
   3346 
   3347  arena->MayDoOrQueuePurge(purge_action, "arena_dalloc");
   3348 }
   3349 
   3350 static inline void idalloc(void* ptr, arena_t* aArena) {
   3351  size_t offset;
   3352 
   3353  MOZ_ASSERT(ptr);
   3354 
   3355  offset = GetChunkOffsetForPtr(ptr);
   3356  if (offset != 0) {
   3357    arena_dalloc(ptr, offset, aArena);
   3358  } else {
   3359    huge_dalloc(ptr, aArena);
   3360  }
   3361 }
   3362 
   3363 inline purge_action_t arena_t::ShouldStartPurge() {
   3364  if (mNumDirty > mMaxDirty) {
   3365    if (!mIsDeferredPurgeEnabled) {
   3366      return purge_action_t::PurgeNow;
   3367    }
   3368    if (mIsPurgePending) {
   3369      return purge_action_t::None;
   3370    }
   3371    mIsPurgePending = true;
   3372    return purge_action_t::Queue;
   3373  }
   3374  return purge_action_t::None;
   3375 }
   3376 
   3377 inline void arena_t::MayDoOrQueuePurge(purge_action_t aAction,
   3378                                       const char* aCaller) {
   3379  switch (aAction) {
   3380    case purge_action_t::Queue:
   3381      // Note that this thread committed earlier by setting
   3382      // mIsDeferredPurgePending to add us to the list. There is a low
   3383      // chance that in the meantime another thread ran Purge() and cleared
   3384      // the flag, but that is fine, we'll adjust our bookkeeping when calling
   3385      // ShouldStartPurge() or Purge() next time.
   3386      gArenas.AddToOutstandingPurges(this);
   3387      break;
   3388    case purge_action_t::PurgeNow: {
   3389      ArenaPurgeResult pr = PurgeLoop(PurgeIfThreshold, aCaller);
   3390      // Arenas cannot die here because the caller is still using the arena, if
   3391      // they did it'd be a use-after-free: the arena is destroyed but then used
   3392      // afterwards.
   3393      MOZ_RELEASE_ASSERT(pr != ArenaPurgeResult::Dying);
   3394      break;
   3395    }
   3396    case purge_action_t::None:
   3397      // do nothing.
   3398      break;
   3399  }
   3400 }
   3401 
   3402 inline void arena_t::NotifySignificantReuse() {
   3403  // Note that there is a chance here for a race between threads calling
   3404  // GetTimeStampNS in a different order than writing it to the Atomic,
   3405  // resulting in mLastSignificantReuseNS going potentially backwards.
   3406  // Our use case is not sensitive to small deviations, the worse that can
   3407  // happen is a slightly earlier purge.
   3408  mLastSignificantReuseNS = GetTimestampNS();
   3409 }
   3410 
   3411 void arena_t::RallocShrinkLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
   3412                                size_t aOldSize) {
   3413  MOZ_ASSERT(aSize < aOldSize);
   3414 
   3415  // Shrink the run, and make trailing pages available for other
   3416  // allocations.
   3417  purge_action_t purge_action;
   3418  {
   3419    MaybeMutexAutoLock lock(mLock);
   3420    TrimRunTail(aChunk, (arena_run_t*)aPtr, aOldSize, aSize, true);
   3421    mStats.allocated_large -= aOldSize - aSize;
   3422    mStats.operations++;
   3423 
   3424    purge_action = ShouldStartPurge();
   3425  }
   3426  MayDoOrQueuePurge(purge_action, "RallocShrinkLarge");
   3427 }
   3428 
   3429 // Returns whether reallocation was successful.
   3430 bool arena_t::RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
   3431                              size_t aOldSize) {
   3432  size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> gPageSize2Pow;
   3433  size_t npages = aOldSize >> gPageSize2Pow;
   3434 
   3435  size_t num_dirty_before, num_dirty_after;
   3436  {
   3437    MaybeMutexAutoLock lock(mLock);
   3438    MOZ_DIAGNOSTIC_ASSERT(aOldSize ==
   3439                          (aChunk->mPageMap[pageind].bits & ~gPageSizeMask));
   3440 
   3441    // Try to extend the run.
   3442    MOZ_ASSERT(aSize > aOldSize);
   3443    if (pageind + npages < gChunkNumPages - 1 &&
   3444        (aChunk->mPageMap[pageind + npages].bits &
   3445         (CHUNK_MAP_ALLOCATED | CHUNK_MAP_BUSY)) == 0 &&
   3446        (aChunk->mPageMap[pageind + npages].bits & ~gPageSizeMask) >=
   3447            aSize - aOldSize) {
   3448      num_dirty_before = mNumDirty;
   3449      // The next run is available and sufficiently large.  Split the
   3450      // following run, then merge the first part with the existing
   3451      // allocation.
   3452      if (!SplitRun((arena_run_t*)(uintptr_t(aChunk) +
   3453                                   ((pageind + npages) << gPageSize2Pow)),
   3454                    aSize - aOldSize, true, false)) {
   3455        return false;
   3456      }
   3457 
   3458      aChunk->mPageMap[pageind].bits =
   3459          aSize | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
   3460      aChunk->mPageMap[pageind + npages].bits =
   3461          CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
   3462 
   3463      mStats.allocated_large += aSize - aOldSize;
   3464      mStats.operations++;
   3465      num_dirty_after = mNumDirty;
   3466    } else {
   3467      return false;
   3468    }
   3469  }
   3470  if (num_dirty_after < num_dirty_before) {
   3471    NotifySignificantReuse();
   3472  }
   3473  return true;
   3474 }
   3475 
   3476 #ifdef XP_DARWIN
   3477 #  define VM_COPY_MIN kChunkSize
   3478 static inline void pages_copy(void* dest, const void* src, size_t n) {
   3479  MOZ_ASSERT((void*)((uintptr_t)dest & ~gPageSizeMask) == dest);
   3480  MOZ_ASSERT(n >= VM_COPY_MIN);
   3481  MOZ_ASSERT((void*)((uintptr_t)src & ~gPageSizeMask) == src);
   3482 
   3483  kern_return_t r = vm_copy(mach_task_self(), (vm_address_t)src, (vm_size_t)n,
   3484                            (vm_address_t)dest);
   3485  if (r != KERN_SUCCESS) {
   3486    MOZ_CRASH("vm_copy() failed");
   3487  }
   3488 }
   3489 #endif
   3490 
   3491 void* arena_t::RallocSmallOrLarge(void* aPtr, size_t aSize, size_t aOldSize) {
   3492  void* ret;
   3493  size_t copysize;
   3494  SizeClass sizeClass(aSize);
   3495 
   3496  // Try to avoid moving the allocation.
   3497  if (aOldSize <= gMaxLargeClass && sizeClass.Size() == aOldSize) {
   3498    if (aSize < aOldSize) {
   3499      MaybePoison((void*)(uintptr_t(aPtr) + aSize), aOldSize - aSize);
   3500    }
   3501    return aPtr;
   3502  }
   3503  if (sizeClass.Type() == SizeClass::Large && aOldSize > gMaxBinClass &&
   3504      aOldSize <= gMaxLargeClass) {
   3505    arena_chunk_t* chunk = GetChunkForPtr(aPtr);
   3506    if (sizeClass.Size() < aOldSize) {
   3507      // Fill before shrinking in order to avoid a race.
   3508      MaybePoison((void*)((uintptr_t)aPtr + aSize), aOldSize - aSize);
   3509      RallocShrinkLarge(chunk, aPtr, sizeClass.Size(), aOldSize);
   3510      return aPtr;
   3511    }
   3512    if (RallocGrowLarge(chunk, aPtr, sizeClass.Size(), aOldSize)) {
   3513      ApplyZeroOrJunk((void*)((uintptr_t)aPtr + aOldSize), aSize - aOldSize);
   3514      return aPtr;
   3515    }
   3516  }
   3517 
   3518  // If we get here, then aSize and aOldSize are different enough that we
   3519  // need to move the object or the run can't be expanded because the memory
   3520  // after it is allocated or busy.  In that case, fall back to allocating new
   3521  // space and copying. Allow non-private arenas to switch arenas.
   3522  ret = (mIsPrivate ? this : choose_arena(aSize))->Malloc(aSize, false);
   3523  if (!ret) {
   3524    return nullptr;
   3525  }
   3526 
   3527  // Junk/zero-filling were already done by arena_t::Malloc().
   3528  copysize = (aSize < aOldSize) ? aSize : aOldSize;
   3529 #ifdef VM_COPY_MIN
   3530  if (copysize >= VM_COPY_MIN) {
   3531    pages_copy(ret, aPtr, copysize);
   3532  } else
   3533 #endif
   3534  {
   3535    memcpy(ret, aPtr, copysize);
   3536  }
   3537  idalloc(aPtr, this);
   3538  return ret;
   3539 }
   3540 
   3541 void* arena_t::Ralloc(void* aPtr, size_t aSize, size_t aOldSize) {
   3542  MOZ_DIAGNOSTIC_ASSERT(mMagic == ARENA_MAGIC);
   3543  MOZ_ASSERT(aPtr);
   3544  MOZ_ASSERT(aSize != 0);
   3545 
   3546  return (aSize <= gMaxLargeClass) ? RallocSmallOrLarge(aPtr, aSize, aOldSize)
   3547                                   : RallocHuge(aPtr, aSize, aOldSize);
   3548 }
   3549 
   3550 void* arena_t::operator new(size_t aCount, const fallible_t&) noexcept {
   3551  MOZ_ASSERT(aCount == sizeof(arena_t));
   3552  return TypedBaseAlloc<arena_t>::alloc();
   3553 }
   3554 
   3555 void arena_t::operator delete(void* aPtr) {
   3556  TypedBaseAlloc<arena_t>::dealloc((arena_t*)aPtr);
   3557 }
   3558 
   3559 arena_t::arena_t(arena_params_t* aParams, bool aIsPrivate)
   3560    : mRandomizeSmallAllocations(opt_randomize_small),
   3561      mIsPrivate(aIsPrivate),
   3562      // The default maximum amount of dirty pages allowed on arenas is a
   3563      // fraction of opt_dirty_max.
   3564      mMaxDirtyBase((aParams && aParams->mMaxDirty) ? aParams->mMaxDirty
   3565                                                    : (opt_dirty_max / 8)),
   3566      mLastSignificantReuseNS(GetTimestampNS()),
   3567      mIsDeferredPurgeEnabled(gArenas.IsDeferredPurgeEnabled()) {
   3568  MaybeMutex::DoLock doLock = MaybeMutex::MUST_LOCK;
   3569  if (aParams) {
   3570    uint32_t randFlags = aParams->mFlags & ARENA_FLAG_RANDOMIZE_SMALL_MASK;
   3571    switch (randFlags) {
   3572      case ARENA_FLAG_RANDOMIZE_SMALL_ENABLED:
   3573        mRandomizeSmallAllocations = true;
   3574        break;
   3575      case ARENA_FLAG_RANDOMIZE_SMALL_DISABLED:
   3576        mRandomizeSmallAllocations = false;
   3577        break;
   3578      case ARENA_FLAG_RANDOMIZE_SMALL_DEFAULT:
   3579      default:
   3580        break;
   3581    }
   3582 
   3583    uint32_t threadFlags = aParams->mFlags & ARENA_FLAG_THREAD_MASK;
   3584    if (threadFlags == ARENA_FLAG_THREAD_MAIN_THREAD_ONLY) {
   3585      // At the moment we require that any ARENA_FLAG_THREAD_MAIN_THREAD_ONLY
   3586      // arenas are created and therefore always accessed by the main thread.
   3587      // This is for two reasons:
   3588      //  * it allows jemalloc_stats to read their statistics (we also require
   3589      //    that jemalloc_stats is only used on the main thread).
   3590      //  * Only main-thread or threadsafe arenas can be guanteed to be in a
   3591      //    consistent state after a fork() from the main thread.  If fork()
   3592      //    occurs off-thread then the new child process cannot use these arenas
   3593      //    (new children should usually exec() or exit() since other data may
   3594      //    also be inconsistent).
   3595      MOZ_ASSERT(gArenas.IsOnMainThread());
   3596      MOZ_ASSERT(aIsPrivate);
   3597      doLock = MaybeMutex::AVOID_LOCK_UNSAFE;
   3598    }
   3599 
   3600    mMaxDirtyIncreaseOverride = aParams->mMaxDirtyIncreaseOverride;
   3601    mMaxDirtyDecreaseOverride = aParams->mMaxDirtyDecreaseOverride;
   3602 
   3603    if (aParams->mLabel) {
   3604      // The string may be truncated so always place a null-byte in the last
   3605      // position.
   3606      strncpy(mLabel, aParams->mLabel, LABEL_MAX_CAPACITY - 1);
   3607      mLabel[LABEL_MAX_CAPACITY - 1] = 0;
   3608 
   3609      // If the string was truncated, then replace its end with "..."
   3610      if (strlen(aParams->mLabel) >= LABEL_MAX_CAPACITY) {
   3611        for (int i = 0; i < 3; i++) {
   3612          mLabel[LABEL_MAX_CAPACITY - 2 - i] = '.';
   3613        }
   3614      }
   3615    }
   3616  }
   3617 
   3618  MOZ_RELEASE_ASSERT(mLock.Init(doLock));
   3619 
   3620  UpdateMaxDirty();
   3621 
   3622  // Initialize bins.
   3623  SizeClass sizeClass(1);
   3624 
   3625  unsigned i;
   3626  for (i = 0;; i++) {
   3627    new (&mBins[i]) arena_bin_t(sizeClass);
   3628 
   3629    // SizeClass doesn't want sizes larger than gMaxBinClass for now.
   3630    if (sizeClass.Size() == gMaxBinClass) {
   3631      break;
   3632    }
   3633    sizeClass = sizeClass.Next();
   3634  }
   3635  MOZ_ASSERT(i == NUM_SMALL_CLASSES - 1);
   3636 }
   3637 
   3638 arena_t::~arena_t() {
   3639  size_t i;
   3640  MaybeMutexAutoLock lock(mLock);
   3641 
   3642  MOZ_RELEASE_ASSERT(!mLink.Left() && !mLink.Right(),
   3643                     "Arena is still registered");
   3644  MOZ_RELEASE_ASSERT(!mStats.allocated_small && !mStats.allocated_large,
   3645                     "Arena is not empty");
   3646  if (mSpare) {
   3647    chunk_dealloc(mSpare, kChunkSize, ARENA_CHUNK);
   3648  }
   3649  for (i = 0; i < NUM_SMALL_CLASSES; i++) {
   3650    MOZ_RELEASE_ASSERT(mBins[i].mNonFullRuns.isEmpty(), "Bin is not empty");
   3651  }
   3652 #ifdef MOZ_DEBUG
   3653  {
   3654    MutexAutoLock lock(huge_mtx);
   3655    // This is an expensive check, so we only do it on debug builds.
   3656    for (auto node : huge.iter()) {
   3657      MOZ_RELEASE_ASSERT(node->mArenaId != mId, "Arena has huge allocations");
   3658    }
   3659  }
   3660 #endif
   3661  mId = 0;
   3662 }
   3663 
   3664 arena_t* ArenaCollection::CreateArena(bool aIsPrivate,
   3665                                      arena_params_t* aParams) {
   3666  arena_t* ret = new (fallible) arena_t(aParams, aIsPrivate);
   3667  if (!ret) {
   3668    // Only reached if there is an OOM error.
   3669 
   3670    // OOM here is quite inconvenient to propagate, since dealing with it
   3671    // would require a check for failure in the fast path.  Instead, punt
   3672    // by using the first arena.
   3673    // In practice, this is an extremely unlikely failure.
   3674    _malloc_message(_getprogname(), ": (malloc) Error initializing arena\n");
   3675 
   3676    return mDefaultArena;
   3677  }
   3678 
   3679  MutexAutoLock lock(mLock);
   3680 
   3681  // For public arenas, it's fine to just use incrementing arena id
   3682  if (!aIsPrivate) {
   3683    ret->mId = mLastPublicArenaId++;
   3684    mArenas.Insert(ret);
   3685    return ret;
   3686  }
   3687 
   3688 #ifdef NON_RANDOM_ARENA_IDS
   3689  // For private arenas, slightly obfuscate the id by XORing a key generated
   3690  // once, and rotate the bits by an amount also generated once.
   3691  if (mArenaIdKey == 0) {
   3692    mozilla::Maybe<uint64_t> maybeRandom = mozilla::RandomUint64();
   3693    MOZ_RELEASE_ASSERT(maybeRandom.isSome());
   3694    mArenaIdKey = maybeRandom.value();
   3695    maybeRandom = mozilla::RandomUint64();
   3696    MOZ_RELEASE_ASSERT(maybeRandom.isSome());
   3697    mArenaIdRotation = maybeRandom.value() & (sizeof(void*) * 8 - 1);
   3698  }
   3699  arena_id_t id = reinterpret_cast<arena_id_t>(ret) ^ mArenaIdKey;
   3700  ret->mId =
   3701      (id >> mArenaIdRotation) | (id << (sizeof(void*) * 8 - mArenaIdRotation));
   3702  mPrivateArenas.Insert(ret);
   3703  return ret;
   3704 #else
   3705  // For private arenas, generate a cryptographically-secure random id for the
   3706  // new arena. If an attacker manages to get control of the process, this
   3707  // should make it more difficult for them to "guess" the ID of a memory
   3708  // arena, stopping them from getting data they may want
   3709  Tree& tree = (ret->IsMainThreadOnly()) ? mMainThreadArenas : mPrivateArenas;
   3710  arena_id_t arena_id;
   3711  do {
   3712    arena_id = MakeRandArenaId(ret->IsMainThreadOnly());
   3713    // Keep looping until we ensure that the random number we just generated
   3714    // isn't already in use by another active arena
   3715  } while (GetByIdInternal(tree, arena_id));
   3716 
   3717  ret->mId = arena_id;
   3718  tree.Insert(ret);
   3719  return ret;
   3720 #endif
   3721 }
   3722 
   3723 #ifndef NON_RANDOM_ARENA_IDS
   3724 arena_id_t ArenaCollection::MakeRandArenaId(bool aIsMainThreadOnly) const {
   3725  uint64_t rand;
   3726  do {
   3727    mozilla::Maybe<uint64_t> maybeRandomId = mozilla::RandomUint64();
   3728    MOZ_RELEASE_ASSERT(maybeRandomId.isSome());
   3729 
   3730    rand = maybeRandomId.value();
   3731 
   3732    // Set or clear the least significant bit depending on if this is a
   3733    // main-thread-only arena.  We use this in GetById.
   3734    if (aIsMainThreadOnly) {
   3735      rand = rand | MAIN_THREAD_ARENA_BIT;
   3736    } else {
   3737      rand = rand & ~MAIN_THREAD_ARENA_BIT;
   3738    }
   3739 
   3740    // Avoid 0 as an arena Id. We use 0 for disposed arenas.
   3741  } while (rand == 0);
   3742 
   3743  return arena_id_t(rand);
   3744 }
   3745 #endif
   3746 
   3747 // End arena.
   3748 // ***************************************************************************
   3749 // Begin general internal functions.
   3750 
   3751 // Initialize huge allocation data.
   3752 static void huge_init() MOZ_REQUIRES(gInitLock) {
   3753  huge_mtx.Init();
   3754  MOZ_PUSH_IGNORE_THREAD_SAFETY
   3755  huge_allocated = 0;
   3756  huge_mapped = 0;
   3757  huge_operations = 0;
   3758  MOZ_POP_THREAD_SAFETY
   3759 }
   3760 
   3761 void* arena_t::MallocHuge(size_t aSize, bool aZero) {
   3762  return PallocHuge(aSize, kChunkSize, aZero);
   3763 }
   3764 
   3765 void* arena_t::PallocHuge(size_t aSize, size_t aAlignment, bool aZero) {
   3766  void* ret;
   3767  size_t csize;
   3768  size_t psize;
   3769  extent_node_t* node;
   3770 
   3771  // We're going to configure guard pages in the region between the
   3772  // page-aligned size and the chunk-aligned size, so if those are the same
   3773  // then we need to force that region into existence.
   3774  csize = CHUNK_CEILING(aSize + gRealPageSize);
   3775  if (csize < aSize) {
   3776    // size is large enough to cause size_t wrap-around.
   3777    return nullptr;
   3778  }
   3779 
   3780  // Allocate an extent node with which to track the chunk.
   3781  node = ExtentAlloc::alloc();
   3782  if (!node) {
   3783    return nullptr;
   3784  }
   3785 
   3786  // Allocate one or more contiguous chunks for this request.
   3787  ret = chunk_alloc(csize, aAlignment, false);
   3788  if (!ret) {
   3789    ExtentAlloc::dealloc(node);
   3790    return nullptr;
   3791  }
   3792  psize = REAL_PAGE_CEILING(aSize);
   3793  MOZ_ASSERT(psize < csize);
   3794 #ifdef MOZ_DEBUG
   3795  if (aZero) {
   3796    chunk_assert_zero(ret, psize);
   3797  }
   3798 #endif
   3799 
   3800  // Insert node into huge.
   3801  node->mAddr = ret;
   3802  node->mSize = psize;
   3803  node->mArena = this;
   3804  node->mArenaId = mId;
   3805 
   3806  {
   3807    MutexAutoLock lock(huge_mtx);
   3808    huge.Insert(node);
   3809 
   3810    // Although we allocated space for csize bytes, we indicate that we've
   3811    // allocated only psize bytes.
   3812    //
   3813    // If DECOMMIT is defined, this is a reasonable thing to do, since
   3814    // we'll explicitly decommit the bytes in excess of psize.
   3815    //
   3816    // If DECOMMIT is not defined, then we're relying on the OS to be lazy
   3817    // about how it allocates physical pages to mappings.  If we never
   3818    // touch the pages in excess of psize, the OS won't allocate a physical
   3819    // page, and we won't use more than psize bytes of physical memory.
   3820    //
   3821    // A correct program will only touch memory in excess of how much it
   3822    // requested if it first calls malloc_usable_size and finds out how
   3823    // much space it has to play with.  But because we set node->mSize =
   3824    // psize above, malloc_usable_size will return psize, not csize, and
   3825    // the program will (hopefully) never touch bytes in excess of psize.
   3826    // Thus those bytes won't take up space in physical memory, and we can
   3827    // reasonably claim we never "allocated" them in the first place.
   3828    huge_allocated += psize;
   3829    huge_mapped += csize;
   3830    huge_operations++;
   3831  }
   3832 
   3833  pages_decommit((void*)((uintptr_t)ret + psize), csize - psize);
   3834 
   3835  if (!aZero) {
   3836    ApplyZeroOrJunk(ret, psize);
   3837  }
   3838 
   3839  return ret;
   3840 }
   3841 
   3842 void* arena_t::RallocHuge(void* aPtr, size_t aSize, size_t aOldSize) {
   3843  void* ret;
   3844  size_t copysize;
   3845 
   3846  // Avoid moving the allocation if the size class would not change.
   3847  if (aOldSize > gMaxLargeClass &&
   3848      CHUNK_CEILING(aSize + gPageSize) == CHUNK_CEILING(aOldSize + gPageSize)) {
   3849    size_t psize = REAL_PAGE_CEILING(aSize);
   3850    if (aSize < aOldSize) {
   3851      MaybePoison((void*)((uintptr_t)aPtr + aSize), aOldSize - aSize);
   3852    }
   3853    if (psize < aOldSize) {
   3854      extent_node_t key;
   3855 
   3856      pages_decommit((void*)((uintptr_t)aPtr + psize), aOldSize - psize);
   3857 
   3858      // Update recorded size.
   3859      MutexAutoLock lock(huge_mtx);
   3860      key.mAddr = const_cast<void*>(aPtr);
   3861      extent_node_t* node = huge.Search(&key);
   3862      MOZ_ASSERT(node);
   3863      MOZ_ASSERT(node->mSize == aOldSize);
   3864      MOZ_RELEASE_ASSERT(node->mArena == this);
   3865      huge_allocated -= aOldSize - psize;
   3866      huge_operations++;
   3867      // No need to change huge_mapped, because we didn't (un)map anything.
   3868      node->mSize = psize;
   3869    } else if (psize > aOldSize) {
   3870      if (!pages_commit((void*)((uintptr_t)aPtr + aOldSize),
   3871                        psize - aOldSize)) {
   3872        return nullptr;
   3873      }
   3874 
   3875      // We need to update the recorded size if the size increased,
   3876      // so malloc_usable_size doesn't return a value smaller than
   3877      // what was requested via realloc().
   3878      extent_node_t key;
   3879      MutexAutoLock lock(huge_mtx);
   3880      key.mAddr = const_cast<void*>(aPtr);
   3881      extent_node_t* node = huge.Search(&key);
   3882      MOZ_ASSERT(node);
   3883      MOZ_ASSERT(node->mSize == aOldSize);
   3884      MOZ_RELEASE_ASSERT(node->mArena == this);
   3885      huge_allocated += psize - aOldSize;
   3886      huge_operations++;
   3887      // No need to change huge_mapped, because we didn't
   3888      // (un)map anything.
   3889      node->mSize = psize;
   3890    }
   3891 
   3892    if (aSize > aOldSize) {
   3893      ApplyZeroOrJunk((void*)((uintptr_t)aPtr + aOldSize), aSize - aOldSize);
   3894    }
   3895    return aPtr;
   3896  }
   3897 
   3898  // If we get here, then aSize and aOldSize are different enough that we
   3899  // need to use a different size class.  In that case, fall back to allocating
   3900  // new space and copying. Allow non-private arenas to switch arenas.
   3901  ret = (mIsPrivate ? this : choose_arena(aSize))->MallocHuge(aSize, false);
   3902  if (!ret) {
   3903    return nullptr;
   3904  }
   3905 
   3906  copysize = (aSize < aOldSize) ? aSize : aOldSize;
   3907 #ifdef VM_COPY_MIN
   3908  if (copysize >= VM_COPY_MIN) {
   3909    pages_copy(ret, aPtr, copysize);
   3910  } else
   3911 #endif
   3912  {
   3913    memcpy(ret, aPtr, copysize);
   3914  }
   3915  idalloc(aPtr, this);
   3916  return ret;
   3917 }
   3918 
   3919 static void huge_dalloc(void* aPtr, arena_t* aArena) {
   3920  extent_node_t* node;
   3921  size_t mapped = 0;
   3922  {
   3923    extent_node_t key;
   3924    MutexAutoLock lock(huge_mtx);
   3925 
   3926    // Extract from tree of huge allocations.
   3927    key.mAddr = aPtr;
   3928    node = huge.Search(&key);
   3929    MOZ_RELEASE_ASSERT(node, "Double-free?");
   3930    MOZ_ASSERT(node->mAddr == aPtr);
   3931    MOZ_RELEASE_ASSERT(!aArena || node->mArena == aArena);
   3932    // See AllocInfo::Arena.
   3933    MOZ_RELEASE_ASSERT(node->mArenaId == node->mArena->mId);
   3934    huge.Remove(node);
   3935 
   3936    mapped = CHUNK_CEILING(node->mSize + gRealPageSize);
   3937    huge_allocated -= node->mSize;
   3938    huge_mapped -= mapped;
   3939    huge_operations++;
   3940  }
   3941 
   3942  // Unmap chunk.
   3943  chunk_dealloc(node->mAddr, mapped, HUGE_CHUNK);
   3944 
   3945  ExtentAlloc::dealloc(node);
   3946 }
   3947 
   3948 // Returns whether the allocator was successfully initialized.
   3949 static bool malloc_init_hard() {
   3950  unsigned i;
   3951  const char* opts;
   3952 
   3953  AutoLock<StaticMutex> lock(gInitLock);
   3954 
   3955  if (malloc_initialized) {
   3956    // Another thread initialized the allocator before this one
   3957    // acquired gInitLock.
   3958    return true;
   3959  }
   3960 
   3961  if (!thread_arena.init()) {
   3962    return true;
   3963  }
   3964 
   3965  // Get page size and number of CPUs
   3966  const size_t page_size = GetKernelPageSize();
   3967  // We assume that the page size is a power of 2.
   3968  MOZ_ASSERT(IsPowerOfTwo(page_size));
   3969 #ifdef MALLOC_STATIC_PAGESIZE
   3970  if (gRealPageSize % page_size) {
   3971    _malloc_message(
   3972        _getprogname(),
   3973        "Compile-time page size does not divide the runtime one.\n");
   3974    MOZ_CRASH();
   3975  }
   3976 #else
   3977  gPageSize = page_size;
   3978  gRealPageSize = page_size;
   3979 #endif
   3980 
   3981  // Get runtime configuration.
   3982  if ((opts = getenv("MALLOC_OPTIONS"))) {
   3983    for (i = 0; opts[i] != '\0'; i++) {
   3984      // All options are single letters, some take a *prefix* numeric argument.
   3985 
   3986      // Parse the argument.
   3987      unsigned prefix_arg = 0;
   3988      while (opts[i] >= '0' && opts[i] <= '9') {
   3989        prefix_arg *= 10;
   3990        prefix_arg += opts[i] - '0';
   3991        i++;
   3992      }
   3993 
   3994      switch (opts[i]) {
   3995        case 'f':
   3996          opt_dirty_max >>= prefix_arg ? prefix_arg : 1;
   3997          break;
   3998        case 'F':
   3999          prefix_arg = prefix_arg ? prefix_arg : 1;
   4000          if (opt_dirty_max == 0) {
   4001            opt_dirty_max = 1;
   4002            prefix_arg--;
   4003          }
   4004          opt_dirty_max <<= prefix_arg;
   4005          if (opt_dirty_max == 0) {
   4006            // If the shift above overflowed all the bits then clamp the result
   4007            // instead.  If we started with DIRTY_MAX_DEFAULT then this will
   4008            // always be a power of two so choose the maximum power of two that
   4009            // fits in a size_t.
   4010            opt_dirty_max = size_t(1) << (sizeof(size_t) * CHAR_BIT - 1);
   4011          }
   4012          break;
   4013 #ifdef MALLOC_RUNTIME_CONFIG
   4014        case 'j':
   4015          opt_junk = false;
   4016          break;
   4017        case 'J':
   4018          opt_junk = true;
   4019          break;
   4020        case 'q':
   4021          // The argument selects how much poisoning to do.
   4022          opt_poison = NONE;
   4023          break;
   4024        case 'Q':
   4025          if (opts[i + 1] == 'Q') {
   4026            // Maximum poisoning.
   4027            i++;
   4028            opt_poison = ALL;
   4029          } else {
   4030            opt_poison = SOME;
   4031            opt_poison_size = kCacheLineSize * prefix_arg;
   4032          }
   4033          break;
   4034        case 'z':
   4035          opt_zero = false;
   4036          break;
   4037        case 'Z':
   4038          opt_zero = true;
   4039          break;
   4040 #  ifndef MALLOC_STATIC_PAGESIZE
   4041        case 'P':
   4042          MOZ_ASSERT(gPageSize >= 1_KiB);
   4043          MOZ_ASSERT(gPageSize <= 64_KiB);
   4044          prefix_arg = prefix_arg ? prefix_arg : 1;
   4045          gPageSize <<= prefix_arg;
   4046          // We know that if the shift causes gPageSize to be zero then it's
   4047          // because it shifted all the bits off.  We didn't start with zero.
   4048          // Therefore if gPageSize is out of bounds we set it to 64KiB.
   4049          if (gPageSize < 1_KiB || gPageSize > 64_KiB) {
   4050            gPageSize = 64_KiB;
   4051          }
   4052          // We also limit gPageSize to be no larger than gRealPageSize, there's
   4053          // no reason to support this.
   4054          if (gPageSize > gRealPageSize) {
   4055            gPageSize = gRealPageSize;
   4056          }
   4057          break;
   4058        case 'p':
   4059          MOZ_ASSERT(gPageSize >= 1_KiB);
   4060          MOZ_ASSERT(gPageSize <= 64_KiB);
   4061          prefix_arg = prefix_arg ? prefix_arg : 1;
   4062          gPageSize >>= prefix_arg;
   4063          if (gPageSize < 1_KiB) {
   4064            gPageSize = 1_KiB;
   4065          }
   4066          break;
   4067 #  endif
   4068 #endif
   4069        case 'r':
   4070          opt_randomize_small = false;
   4071          break;
   4072        case 'R':
   4073          opt_randomize_small = true;
   4074          break;
   4075        default: {
   4076          char cbuf[2];
   4077 
   4078          cbuf[0] = opts[i];
   4079          cbuf[1] = '\0';
   4080          _malloc_message(_getprogname(),
   4081                          ": (malloc) Unsupported character "
   4082                          "in malloc options: '",
   4083                          cbuf, "'\n");
   4084        }
   4085      }
   4086    }
   4087  }
   4088 
   4089  MOZ_ASSERT(gPageSize <= gRealPageSize);
   4090 #ifndef MALLOC_STATIC_PAGESIZE
   4091  DefineGlobals();
   4092 #endif
   4093  gRecycledSize = 0;
   4094 
   4095  chunks_init();
   4096  huge_init();
   4097  sBaseAlloc.Init();
   4098 
   4099  // Initialize arenas collection here.
   4100  if (!gArenas.Init()) {
   4101    return false;
   4102  }
   4103 
   4104  // Assign the default arena to the initial thread.
   4105  thread_arena.set(gArenas.GetDefault());
   4106 
   4107  if (!gChunkRTree.Init()) {
   4108    return false;
   4109  }
   4110 
   4111  malloc_initialized = true;
   4112 
   4113  // Dummy call so that the function is not removed by dead-code elimination
   4114  Debug::jemalloc_ptr_info(nullptr);
   4115 
   4116 #if !defined(XP_WIN) && !defined(XP_DARWIN)
   4117  // Prevent potential deadlock on malloc locks after fork.
   4118  pthread_atfork(_malloc_prefork, _malloc_postfork_parent,
   4119                 _malloc_postfork_child);
   4120 #endif
   4121 
   4122 #ifdef MOZ_PHC
   4123  // PHC must be initialised after mozjemalloc.
   4124  phc_init();
   4125 #endif
   4126 
   4127  return true;
   4128 }
   4129 
   4130 // End general internal functions.
   4131 // ***************************************************************************
   4132 // Begin malloc(3)-compatible functions.
   4133 
   4134 // The BaseAllocator class is a helper class that implements the base allocator
   4135 // functions (malloc, calloc, realloc, free, memalign) for a given arena,
   4136 // or an appropriately chosen arena (per choose_arena()) when none is given.
   4137 struct BaseAllocator {
   4138 #define MALLOC_DECL(name, return_type, ...) \
   4139  inline return_type name(__VA_ARGS__);
   4140 
   4141 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
   4142 #include "malloc_decls.h"
   4143 
   4144  explicit BaseAllocator(arena_t* aArena) : mArena(aArena) {}
   4145 
   4146 private:
   4147  arena_t* mArena;
   4148 };
   4149 
   4150 #define MALLOC_DECL(name, return_type, ...)                  \
   4151  inline return_type MozJemalloc::name(                      \
   4152      ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) {              \
   4153    BaseAllocator allocator(nullptr);                        \
   4154    return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
   4155  }
   4156 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
   4157 #include "malloc_decls.h"
   4158 
   4159 inline void* BaseAllocator::malloc(size_t aSize) {
   4160  void* ret;
   4161  arena_t* arena;
   4162 
   4163  if (!malloc_init()) {
   4164    ret = nullptr;
   4165    goto RETURN;
   4166  }
   4167 
   4168  if (aSize == 0) {
   4169    aSize = 1;
   4170  }
   4171  // If mArena is non-null, it must not be in the first page.
   4172  MOZ_DIAGNOSTIC_ASSERT_IF(mArena, (size_t)mArena >= gPageSize);
   4173  arena = mArena ? mArena : choose_arena(aSize);
   4174  ret = arena->Malloc(aSize, /* aZero = */ false);
   4175 
   4176 RETURN:
   4177  if (!ret) {
   4178    errno = ENOMEM;
   4179  }
   4180 
   4181  return ret;
   4182 }
   4183 
   4184 inline void* BaseAllocator::memalign(size_t aAlignment, size_t aSize) {
   4185  MOZ_ASSERT(((aAlignment - 1) & aAlignment) == 0);
   4186 
   4187  if (!malloc_init()) {
   4188    return nullptr;
   4189  }
   4190 
   4191  if (aSize == 0) {
   4192    aSize = 1;
   4193  }
   4194 
   4195  aAlignment = aAlignment < sizeof(void*) ? sizeof(void*) : aAlignment;
   4196  arena_t* arena = mArena ? mArena : choose_arena(aSize);
   4197  return arena->Palloc(aAlignment, aSize);
   4198 }
   4199 
   4200 inline void* BaseAllocator::calloc(size_t aNum, size_t aSize) {
   4201  void* ret;
   4202 
   4203  if (malloc_init()) {
   4204    CheckedInt<size_t> checkedSize = CheckedInt<size_t>(aNum) * aSize;
   4205    if (checkedSize.isValid()) {
   4206      size_t allocSize = checkedSize.value();
   4207      if (allocSize == 0) {
   4208        allocSize = 1;
   4209      }
   4210      arena_t* arena = mArena ? mArena : choose_arena(allocSize);
   4211      ret = arena->Malloc(allocSize, /* aZero = */ true);
   4212    } else {
   4213      ret = nullptr;
   4214    }
   4215  } else {
   4216    ret = nullptr;
   4217  }
   4218 
   4219  if (!ret) {
   4220    errno = ENOMEM;
   4221  }
   4222 
   4223  return ret;
   4224 }
   4225 
   4226 inline void* BaseAllocator::realloc(void* aPtr, size_t aSize) {
   4227  void* ret;
   4228 
   4229  if (aSize == 0) {
   4230    aSize = 1;
   4231  }
   4232 
   4233  if (aPtr) {
   4234    MOZ_RELEASE_ASSERT(malloc_initialized);
   4235 
   4236    auto info = AllocInfo::Get(aPtr);
   4237    auto arena = info.Arena();
   4238    MOZ_RELEASE_ASSERT(!mArena || arena == mArena);
   4239    ret = arena->Ralloc(aPtr, aSize, info.Size());
   4240  } else {
   4241    if (!malloc_init()) {
   4242      ret = nullptr;
   4243    } else {
   4244      arena_t* arena = mArena ? mArena : choose_arena(aSize);
   4245      ret = arena->Malloc(aSize, /* aZero = */ false);
   4246    }
   4247  }
   4248 
   4249  if (!ret) {
   4250    errno = ENOMEM;
   4251  }
   4252  return ret;
   4253 }
   4254 
   4255 inline void BaseAllocator::free(void* aPtr) {
   4256  size_t offset;
   4257 
   4258  // A version of idalloc that checks for nullptr pointer.
   4259  offset = GetChunkOffsetForPtr(aPtr);
   4260  if (offset != 0) {
   4261    MOZ_RELEASE_ASSERT(malloc_initialized);
   4262    arena_dalloc(aPtr, offset, mArena);
   4263  } else if (aPtr) {
   4264    MOZ_RELEASE_ASSERT(malloc_initialized);
   4265    huge_dalloc(aPtr, mArena);
   4266  }
   4267 }
   4268 
   4269 inline int MozJemalloc::posix_memalign(void** aMemPtr, size_t aAlignment,
   4270                                       size_t aSize) {
   4271  return AlignedAllocator<memalign>::posix_memalign(aMemPtr, aAlignment, aSize);
   4272 }
   4273 
   4274 inline void* MozJemalloc::aligned_alloc(size_t aAlignment, size_t aSize) {
   4275  return AlignedAllocator<memalign>::aligned_alloc(aAlignment, aSize);
   4276 }
   4277 
   4278 inline void* MozJemalloc::valloc(size_t aSize) {
   4279  return AlignedAllocator<memalign>::valloc(aSize);
   4280 }
   4281 
   4282 // End malloc(3)-compatible functions.
   4283 // ***************************************************************************
   4284 // Begin non-standard functions.
   4285 
   4286 // This was added by Mozilla for use by SQLite.
   4287 inline size_t MozJemalloc::malloc_good_size(size_t aSize) {
   4288  if (aSize == 0) {
   4289    aSize = SizeClass(1).Size();
   4290  } else if (aSize <= gMaxLargeClass) {
   4291    // Small or large
   4292    aSize = SizeClass(aSize).Size();
   4293  } else {
   4294    // Huge.  We use PAGE_CEILING to get psize, instead of using
   4295    // CHUNK_CEILING to get csize.  This ensures that this
   4296    // malloc_usable_size(malloc(n)) always matches
   4297    // malloc_good_size(n).
   4298    aSize = PAGE_CEILING(aSize);
   4299  }
   4300  return aSize;
   4301 }
   4302 
   4303 inline size_t MozJemalloc::malloc_usable_size(usable_ptr_t aPtr) {
   4304  return AllocInfo::GetValidated(aPtr).Size();
   4305 }
   4306 
   4307 inline void MozJemalloc::jemalloc_stats_internal(
   4308    jemalloc_stats_t* aStats, jemalloc_bin_stats_t* aBinStats) {
   4309  size_t non_arena_mapped, chunk_header_size;
   4310 
   4311  if (!aStats) {
   4312    return;
   4313  }
   4314  if (!malloc_init()) {
   4315    memset(aStats, 0, sizeof(*aStats));
   4316    return;
   4317  }
   4318  if (aBinStats) {
   4319    memset(aBinStats, 0, sizeof(jemalloc_bin_stats_t) * NUM_SMALL_CLASSES);
   4320  }
   4321 
   4322  // Gather runtime settings.
   4323  aStats->opt_junk = opt_junk;
   4324  aStats->opt_randomize_small = opt_randomize_small;
   4325  aStats->opt_zero = opt_zero;
   4326  aStats->quantum = kQuantum;
   4327  aStats->quantum_max = kMaxQuantumClass;
   4328  aStats->quantum_wide = kQuantumWide;
   4329  aStats->quantum_wide_max = kMaxQuantumWideClass;
   4330  aStats->subpage_max = gMaxSubPageClass;
   4331  aStats->large_max = gMaxLargeClass;
   4332  aStats->chunksize = kChunkSize;
   4333  aStats->page_size = gPageSize;
   4334  aStats->dirty_max = opt_dirty_max;
   4335 
   4336  // Gather current memory usage statistics.
   4337  aStats->narenas = 0;
   4338  aStats->mapped = 0;
   4339  aStats->allocated = 0;
   4340  aStats->waste = 0;
   4341  aStats->pages_dirty = 0;
   4342  aStats->pages_fresh = 0;
   4343  aStats->pages_madvised = 0;
   4344  aStats->bookkeeping = 0;
   4345  aStats->bin_unused = 0;
   4346 
   4347  non_arena_mapped = 0;
   4348 
   4349  // Get huge mapped/allocated.
   4350  {
   4351    MutexAutoLock lock(huge_mtx);
   4352    non_arena_mapped += huge_mapped;
   4353    aStats->allocated += huge_allocated;
   4354    aStats->num_operations += huge_operations;
   4355    MOZ_ASSERT(huge_mapped >= huge_allocated);
   4356  }
   4357 
   4358  // Get base mapped/allocated.
   4359  auto base_stats = sBaseAlloc.GetStats();
   4360  non_arena_mapped += base_stats.mMapped;
   4361  aStats->bookkeeping += base_stats.mCommitted;
   4362 
   4363  gArenas.mLock.Lock();
   4364 
   4365  // Stats can only read complete information if its run on the main thread.
   4366  MOZ_ASSERT(gArenas.IsOnMainThreadWeak());
   4367 
   4368  // Iterate over arenas.
   4369  for (auto arena : gArenas.iter()) {
   4370    // Cannot safely read stats for this arena and therefore stats would be
   4371    // incomplete.
   4372    MOZ_ASSERT(arena->mLock.SafeOnThisThread());
   4373 
   4374    size_t arena_mapped, arena_allocated, arena_committed, arena_dirty,
   4375        arena_fresh, arena_madvised, j, arena_unused, arena_headers;
   4376 
   4377    arena_headers = 0;
   4378    arena_unused = 0;
   4379 
   4380    {
   4381      MaybeMutexAutoLock lock(arena->mLock);
   4382 
   4383      arena_mapped = arena->mStats.mapped;
   4384 
   4385      // "committed" counts dirty and allocated memory.
   4386      arena_committed = arena->mStats.committed << gPageSize2Pow;
   4387 
   4388      arena_allocated =
   4389          arena->mStats.allocated_small + arena->mStats.allocated_large;
   4390 
   4391      arena_dirty = arena->mNumDirty << gPageSize2Pow;
   4392      arena_fresh = arena->mNumFresh << gPageSize2Pow;
   4393      arena_madvised = arena->mNumMAdvised << gPageSize2Pow;
   4394 
   4395      aStats->num_operations += arena->mStats.operations;
   4396 
   4397      for (j = 0; j < NUM_SMALL_CLASSES; j++) {
   4398        arena_bin_t* bin = &arena->mBins[j];
   4399        size_t bin_unused = 0;
   4400        size_t num_non_full_runs = 0;
   4401 
   4402        for (arena_run_t& run : bin->mNonFullRuns) {
   4403          MOZ_DIAGNOSTIC_ASSERT(run.mMagic == ARENA_RUN_MAGIC);
   4404          MOZ_RELEASE_ASSERT(run.mNumFree > 0 &&
   4405                             run.mNumFree < bin->mRunNumRegions);
   4406          MOZ_RELEASE_ASSERT(run.mBin == bin);
   4407          MOZ_RELEASE_ASSERT(bin->mNonFullRuns.ElementIsLinkedWell(&run));
   4408          arena_chunk_t* chunk = GetChunkForPtr(&run);
   4409          MOZ_RELEASE_ASSERT(chunk->mArena == arena);
   4410          bin_unused += run.mNumFree * bin->mSizeClass;
   4411          num_non_full_runs++;
   4412        }
   4413 
   4414        arena_unused += bin_unused;
   4415        arena_headers += bin->mNumRuns * bin->mRunFirstRegionOffset;
   4416        if (aBinStats) {
   4417          aBinStats[j].size = bin->mSizeClass;
   4418          aBinStats[j].num_non_full_runs += num_non_full_runs;
   4419          aBinStats[j].num_runs += bin->mNumRuns;
   4420          aBinStats[j].bytes_unused += bin_unused;
   4421          size_t bytes_per_run = static_cast<size_t>(bin->mRunSizePages)
   4422                                 << gPageSize2Pow;
   4423          aBinStats[j].bytes_total +=
   4424              bin->mNumRuns * (bytes_per_run - bin->mRunFirstRegionOffset);
   4425          aBinStats[j].bytes_per_run = bytes_per_run;
   4426          aBinStats[j].regions_per_run = bin->mRunNumRegions;
   4427        }
   4428      }
   4429    }
   4430 
   4431    MOZ_ASSERT(arena_mapped >= arena_committed);
   4432    MOZ_ASSERT(arena_committed >= arena_allocated + arena_dirty);
   4433 
   4434    aStats->mapped += arena_mapped;
   4435    aStats->allocated += arena_allocated;
   4436    aStats->pages_dirty += arena_dirty;
   4437    aStats->pages_fresh += arena_fresh;
   4438    aStats->pages_madvised += arena_madvised;
   4439    // "waste" is committed memory that is neither dirty nor
   4440    // allocated.  If you change this definition please update
   4441    // memory/replace/logalloc/replay/Replay.cpp's jemalloc_stats calculation of
   4442    // committed.
   4443    MOZ_ASSERT(arena_committed >=
   4444               (arena_allocated + arena_dirty + arena_unused + arena_headers));
   4445    aStats->waste += arena_committed - arena_allocated - arena_dirty -
   4446                     arena_unused - arena_headers;
   4447    aStats->bin_unused += arena_unused;
   4448    aStats->bookkeeping += arena_headers;
   4449    aStats->narenas++;
   4450  }
   4451  gArenas.mLock.Unlock();
   4452 
   4453  // Account for arena chunk headers in bookkeeping rather than waste.
   4454  chunk_header_size =
   4455      ((aStats->mapped / aStats->chunksize) * (gChunkHeaderNumPages - 1))
   4456      << gPageSize2Pow;
   4457 
   4458  aStats->mapped += non_arena_mapped;
   4459  aStats->bookkeeping += chunk_header_size;
   4460  aStats->waste -= chunk_header_size;
   4461 
   4462  MOZ_ASSERT(aStats->mapped >= aStats->allocated + aStats->waste +
   4463                                   aStats->pages_dirty + aStats->bookkeeping);
   4464 }
   4465 
   4466 inline void MozJemalloc::jemalloc_stats_lite(jemalloc_stats_lite_t* aStats) {
   4467  if (!aStats) {
   4468    return;
   4469  }
   4470  if (!malloc_init()) {
   4471    memset(aStats, 0, sizeof(*aStats));
   4472    return;
   4473  }
   4474 
   4475  aStats->allocated_bytes = 0;
   4476  aStats->num_operations = 0;
   4477 
   4478  // Get huge mapped/allocated.
   4479  {
   4480    MutexAutoLock lock(huge_mtx);
   4481    aStats->allocated_bytes += huge_allocated;
   4482    aStats->num_operations += huge_operations;
   4483    MOZ_ASSERT(huge_mapped >= huge_allocated);
   4484  }
   4485 
   4486  {
   4487    MutexAutoLock lock(gArenas.mLock);
   4488    for (auto arena : gArenas.iter()) {
   4489      // We don't need to lock the arena to access these fields.
   4490      aStats->allocated_bytes += arena->AllocatedBytes();
   4491      aStats->num_operations += arena->Operations();
   4492    }
   4493    aStats->num_operations += gArenas.OperationsDisposedArenas();
   4494  }
   4495 }
   4496 
   4497 inline size_t MozJemalloc::jemalloc_stats_num_bins() {
   4498  return NUM_SMALL_CLASSES;
   4499 }
   4500 
   4501 inline void MozJemalloc::jemalloc_set_main_thread() {
   4502  MOZ_ASSERT(malloc_initialized);
   4503  gArenas.SetMainThread();
   4504 }
   4505 
   4506 #ifdef MALLOC_DOUBLE_PURGE
   4507 
   4508 // Explicitly remove all of this chunk's MADV_FREE'd pages from memory.
   4509 static size_t hard_purge_chunk(arena_chunk_t* aChunk) {
   4510  size_t total_npages = 0;
   4511  // See similar logic in arena_t::Purge().
   4512  for (size_t i = gChunkHeaderNumPages; i < gChunkNumPages; i++) {
   4513    // Find all adjacent pages with CHUNK_MAP_MADVISED set.
   4514    size_t npages;
   4515    for (npages = 0; aChunk->mPageMap[i + npages].bits & CHUNK_MAP_MADVISED &&
   4516                     i + npages < gChunkNumPages;
   4517         npages++) {
   4518      // Turn off the page's CHUNK_MAP_MADVISED bit and turn on its
   4519      // CHUNK_MAP_FRESH bit.
   4520      MOZ_DIAGNOSTIC_ASSERT(!(aChunk->mPageMap[i + npages].bits &
   4521                              (CHUNK_MAP_FRESH | CHUNK_MAP_DECOMMITTED)));
   4522      aChunk->mPageMap[i + npages].bits ^=
   4523          (CHUNK_MAP_MADVISED | CHUNK_MAP_FRESH);
   4524    }
   4525 
   4526    // We could use mincore to find out which pages are actually
   4527    // present, but it's not clear that's better.
   4528    if (npages > 0) {
   4529      // i and npages should be aligned because they needed to be for the
   4530      // purge code that set CHUNK_MAP_MADVISED.
   4531      MOZ_ASSERT((i % gPagesPerRealPage) == 0);
   4532      MOZ_ASSERT((npages % gPagesPerRealPage) == 0);
   4533      pages_decommit(((char*)aChunk) + (i << gPageSize2Pow),
   4534                     npages << gPageSize2Pow);
   4535      (void)pages_commit(((char*)aChunk) + (i << gPageSize2Pow),
   4536                         npages << gPageSize2Pow);
   4537    }
   4538    total_npages += npages;
   4539    i += npages;
   4540  }
   4541 
   4542  return total_npages;
   4543 }
   4544 
   4545 // Explicitly remove all of this arena's MADV_FREE'd pages from memory.
   4546 void arena_t::HardPurge() {
   4547  MaybeMutexAutoLock lock(mLock);
   4548 
   4549  while (!mChunksMAdvised.isEmpty()) {
   4550    arena_chunk_t* chunk = mChunksMAdvised.popFront();
   4551    size_t npages = hard_purge_chunk(chunk);
   4552    mNumMAdvised -= npages;
   4553    mNumFresh += npages;
   4554  }
   4555 }
   4556 
   4557 inline void MozJemalloc::jemalloc_purge_freed_pages() {
   4558  if (malloc_initialized) {
   4559    MutexAutoLock lock(gArenas.mLock);
   4560    MOZ_ASSERT(gArenas.IsOnMainThreadWeak());
   4561    for (auto arena : gArenas.iter()) {
   4562      arena->HardPurge();
   4563    }
   4564  }
   4565 }
   4566 
   4567 #else  // !defined MALLOC_DOUBLE_PURGE
   4568 
   4569 inline void MozJemalloc::jemalloc_purge_freed_pages() {
   4570  // Do nothing.
   4571 }
   4572 
   4573 #endif  // defined MALLOC_DOUBLE_PURGE
   4574 
   4575 inline void MozJemalloc::jemalloc_free_dirty_pages(void) {
   4576  if (malloc_initialized) {
   4577    gArenas.MayPurgeAll(PurgeUnconditional, __func__);
   4578  }
   4579 }
   4580 
   4581 inline void MozJemalloc::jemalloc_free_excess_dirty_pages(void) {
   4582  if (malloc_initialized) {
   4583    gArenas.MayPurgeAll(PurgeIfThreshold, __func__);
   4584  }
   4585 }
   4586 
   4587 #ifndef NON_RANDOM_ARENA_IDS
   4588 inline arena_t* ArenaCollection::GetByIdInternal(Tree& aTree,
   4589                                                 arena_id_t aArenaId) {
   4590  // Use AlignedStorage2 to avoid running the arena_t constructor, while
   4591  // we only need it as a placeholder for mId.
   4592  mozilla::AlignedStorage2<arena_t> key;
   4593  key.addr()->mId = aArenaId;
   4594  return aTree.Search(key.addr());
   4595 }
   4596 #endif
   4597 
   4598 inline arena_t* ArenaCollection::GetById(arena_id_t aArenaId, bool aIsPrivate) {
   4599  if (!malloc_initialized) {
   4600    return nullptr;
   4601  }
   4602 
   4603 #ifdef NON_RANDOM_ARENA_IDS
   4604  // This function is never called with aIsPrivate = false, let's make sure it
   4605  // doesn't silently change while we're making that assumption below because
   4606  // we can't resolve non-private arenas this way.
   4607  MOZ_RELEASE_ASSERT(aIsPrivate);
   4608  // This function is not expected to be called before at least one private
   4609  // arena was created.
   4610  // coverity[missing_lock]
   4611  MOZ_RELEASE_ASSERT(mArenaIdKey);
   4612  arena_id_t id = (aArenaId << mArenaIdRotation) |
   4613                  (aArenaId >> (sizeof(void*) * 8 - mArenaIdRotation));
   4614  arena_t* result = reinterpret_cast<arena_t*>(id ^ mArenaIdKey);
   4615 #else
   4616  Tree* tree = nullptr;
   4617  if (aIsPrivate) {
   4618    if (ArenaIdIsMainThreadOnly(aArenaId)) {
   4619      // The main thread only arenas support lock free access, so it's desirable
   4620      // to do GetById without taking mLock either.
   4621      //
   4622      // Races can occur between writers and writers, or between writers and
   4623      // readers.  The only writer is the main thread and it will never race
   4624      // against itself so we can elude the lock when the main thread is
   4625      // reading.
   4626      MOZ_ASSERT(IsOnMainThread());
   4627      MOZ_PUSH_IGNORE_THREAD_SAFETY
   4628      arena_t* result = GetByIdInternal(mMainThreadArenas, aArenaId);
   4629      MOZ_POP_THREAD_SAFETY
   4630      MOZ_RELEASE_ASSERT(result);
   4631      return result;
   4632    }
   4633    tree = &mPrivateArenas;
   4634  } else {
   4635    tree = &mArenas;
   4636  }
   4637 
   4638  MutexAutoLock lock(mLock);
   4639  arena_t* result = GetByIdInternal(*tree, aArenaId);
   4640 #endif
   4641  MOZ_RELEASE_ASSERT(result);
   4642  MOZ_RELEASE_ASSERT(result->mId == aArenaId);
   4643  return result;
   4644 }
   4645 
   4646 inline arena_id_t MozJemalloc::moz_create_arena_with_params(
   4647    arena_params_t* aParams) {
   4648  if (malloc_init()) {
   4649    arena_t* arena = gArenas.CreateArena(/* IsPrivate = */ true, aParams);
   4650    return arena->mId;
   4651  }
   4652  return 0;
   4653 }
   4654 
   4655 inline void MozJemalloc::moz_dispose_arena(arena_id_t aArenaId) {
   4656  arena_t* arena = gArenas.GetById(aArenaId, /* IsPrivate = */ true);
   4657  MOZ_RELEASE_ASSERT(arena);
   4658  gArenas.DisposeArena(arena);
   4659 }
   4660 
   4661 inline void MozJemalloc::moz_set_max_dirty_page_modifier(int32_t aModifier) {
   4662  if (malloc_init()) {
   4663    gArenas.SetDefaultMaxDirtyPageModifier(aModifier);
   4664  }
   4665 }
   4666 
   4667 inline void MozJemalloc::jemalloc_reset_small_alloc_randomization(
   4668    bool aRandomizeSmall) {
   4669  // When this process got forked by ForkServer then it inherited the existing
   4670  // state of mozjemalloc. Specifically, parsing of MALLOC_OPTIONS has already
   4671  // been done but it may not reflect anymore the current set of options after
   4672  // the fork().
   4673  //
   4674  // Similar behavior is also present on Android where it is also required to
   4675  // perform this step.
   4676  //
   4677  // Content process will have randomization on small malloc disabled via the
   4678  // MALLOC_OPTIONS environment variable set by parent process, missing this
   4679  // will lead to serious performance regressions because CPU prefetch will
   4680  // break, cf bug 1912262.  However on forkserver-forked Content processes, the
   4681  // environment is not yet reset when the postfork child handler is being
   4682  // called.
   4683  //
   4684  // This API is here to allow those Content processes (spawned by ForkServer or
   4685  // Android service) to notify jemalloc to turn off the randomization on small
   4686  // allocations and perform the required reinitialization of already existing
   4687  // arena's PRNG.  It is important to make sure that the PRNG state is properly
   4688  // re-initialized otherwise child processes would share all the same state.
   4689 
   4690  {
   4691    AutoLock<StaticMutex> lock(gInitLock);
   4692    opt_randomize_small = aRandomizeSmall;
   4693  }
   4694 
   4695  MutexAutoLock lock(gArenas.mLock);
   4696  for (auto* arena : gArenas.iter()) {
   4697    // We can only initialize the PRNG for main-thread-only arenas from the main
   4698    // thread.
   4699    if (!arena->IsMainThreadOnly() || gArenas.IsOnMainThreadWeak()) {
   4700      arena->ResetSmallAllocRandomization();
   4701    }
   4702  }
   4703 }
   4704 
   4705 inline bool MozJemalloc::moz_enable_deferred_purge(bool aEnabled) {
   4706  return gArenas.SetDeferredPurge(aEnabled);
   4707 }
   4708 
   4709 inline may_purge_now_result_t MozJemalloc::moz_may_purge_now(
   4710    bool aPeekOnly, uint32_t aReuseGraceMS,
   4711    const Maybe<std::function<bool()>>& aKeepGoing) {
   4712  return gArenas.MayPurgeSteps(aPeekOnly, aReuseGraceMS, aKeepGoing);
   4713 }
   4714 
   4715 inline void ArenaCollection::AddToOutstandingPurges(arena_t* aArena) {
   4716  MOZ_ASSERT(aArena);
   4717 
   4718  // We cannot trust the caller to know whether the element was already added
   4719  // from another thread given we have our own lock.
   4720  MutexAutoLock lock(mPurgeListLock);
   4721  if (!mOutstandingPurges.ElementProbablyInList(aArena)) {
   4722    mOutstandingPurges.pushBack(aArena);
   4723  }
   4724 }
   4725 
   4726 inline bool ArenaCollection::RemoveFromOutstandingPurges(arena_t* aArena) {
   4727  MOZ_ASSERT(aArena);
   4728 
   4729  // We cannot trust the caller to know whether the element was already removed
   4730  // from another thread given we have our own lock.
   4731  MutexAutoLock lock(mPurgeListLock);
   4732  if (mOutstandingPurges.ElementProbablyInList(aArena)) {
   4733    mOutstandingPurges.remove(aArena);
   4734    return true;
   4735  }
   4736  return false;
   4737 }
   4738 
   4739 may_purge_now_result_t ArenaCollection::MayPurgeSteps(
   4740    bool aPeekOnly, uint32_t aReuseGraceMS,
   4741    const Maybe<std::function<bool()>>& aKeepGoing) {
   4742  // This only works on the main thread because it may process main-thread-only
   4743  // arenas.
   4744  MOZ_ASSERT(IsOnMainThreadWeak());
   4745 
   4746  uint64_t now = GetTimestampNS();
   4747  uint64_t reuseGraceNS = (uint64_t)aReuseGraceMS * 1000 * 1000;
   4748  arena_t* found = nullptr;
   4749  {
   4750    MutexAutoLock lock(mPurgeListLock);
   4751    if (mOutstandingPurges.isEmpty()) {
   4752      return may_purge_now_result_t::Done;
   4753    }
   4754    for (arena_t& arena : mOutstandingPurges) {
   4755      if (now - arena.mLastSignificantReuseNS >= reuseGraceNS) {
   4756        found = &arena;
   4757        break;
   4758      }
   4759    }
   4760 
   4761    if (!found) {
   4762      return may_purge_now_result_t::WantsLater;
   4763    }
   4764    if (aPeekOnly) {
   4765      return may_purge_now_result_t::NeedsMore;
   4766    }
   4767 
   4768    // We need to avoid the invalid state where mIsDeferredPurgePending is set
   4769    // but the arena is not in the list or about to be added. So remove the
   4770    // arena from the list before calling Purge().
   4771    mOutstandingPurges.remove(found);
   4772  }
   4773 
   4774  ArenaPurgeResult pr =
   4775      found->PurgeLoop(PurgeIfThreshold, __func__, aReuseGraceMS, aKeepGoing);
   4776 
   4777  if (pr == ArenaPurgeResult::NotDone) {
   4778    // If there's more work to do we re-insert the arena into the purge queue.
   4779    // If the arena was busy we don't since the other thread that's purging it
   4780    // will finish that work.
   4781 
   4782    // Note that after the above Purge() and taking the lock below there's a
   4783    // chance another thread may be purging the arena and clear
   4784    // mIsDeferredPurgePending.  Resulting in the state of being in the list
   4785    // with that flag clear.  That's okay since the next time a purge occurs
   4786    // (and one will because it's in the list) it'll clear the flag and the
   4787    // state will be consistent again.
   4788    MutexAutoLock lock(mPurgeListLock);
   4789    if (!mOutstandingPurges.ElementProbablyInList(found)) {
   4790      // Given we want to continue to purge this arena, push it to the front
   4791      // to increase the probability to find it fast.
   4792      mOutstandingPurges.pushFront(found);
   4793    }
   4794  } else if (pr == ArenaPurgeResult::Dying) {
   4795    delete found;
   4796  }
   4797 
   4798  // Even if there is no other arena that needs work, let the caller just call
   4799  // us again and we will do the above checks then and return their result.
   4800  // Note that in the current surrounding setting this may (rarely) cause a
   4801  // new slice of our idle task runner if we are exceeding idle budget.
   4802  return may_purge_now_result_t::NeedsMore;
   4803 }
   4804 
   4805 void ArenaCollection::MayPurgeAll(PurgeCondition aCond, const char* aCaller) {
   4806  MutexAutoLock lock(mLock);
   4807  for (auto* arena : iter()) {
   4808    // Arenas that are not IsMainThreadOnly can be purged from any thread.
   4809    // So we do what we can even if called from another thread.
   4810    if (!arena->IsMainThreadOnly() || IsOnMainThreadWeak()) {
   4811      RemoveFromOutstandingPurges(arena);
   4812      ArenaPurgeResult pr = arena->PurgeLoop(aCond, aCaller);
   4813 
   4814      // No arena can die here because we're holding the arena collection lock.
   4815      // Arenas are removed from the collection before setting their mDying
   4816      // flag.
   4817      MOZ_RELEASE_ASSERT(pr != ArenaPurgeResult::Dying);
   4818    }
   4819  }
   4820 }
   4821 
   4822 #define MALLOC_DECL(name, return_type, ...)                          \
   4823  inline return_type MozJemalloc::moz_arena_##name(                  \
   4824      arena_id_t aArenaId, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) { \
   4825    BaseAllocator allocator(                                         \
   4826        gArenas.GetById(aArenaId, /* IsPrivate = */ true));          \
   4827    return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__));         \
   4828  }
   4829 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
   4830 #include "malloc_decls.h"
   4831 
   4832 // End non-standard functions.
   4833 // ***************************************************************************
   4834 #ifndef XP_WIN
   4835 // Begin library-private functions, used by threading libraries for protection
   4836 // of malloc during fork().  These functions are only called if the program is
   4837 // running in threaded mode, so there is no need to check whether the program
   4838 // is threaded here.
   4839 //
   4840 // Note that the only way to keep the main-thread-only arenas in a consistent
   4841 // state for the child is if fork is called from the main thread only.  Or the
   4842 // child must not use them, eg it should call exec().  We attempt to prevent the
   4843 // child for accessing these arenas by refusing to re-initialise them.
   4844 //
   4845 // This is only accessed in the fork handlers while gArenas.mLock is held.
   4846 static pthread_t gForkingThread;
   4847 
   4848 #  ifdef XP_DARWIN
   4849 // This is only accessed in the fork handlers while gArenas.mLock is held.
   4850 static pid_t gForkingProcess;
   4851 #  endif
   4852 
   4853 FORK_HOOK
   4854 void _malloc_prefork(void) MOZ_NO_THREAD_SAFETY_ANALYSIS {
   4855  // Acquire all mutexes in a safe order.
   4856  gArenas.mLock.Lock();
   4857  gForkingThread = pthread_self();
   4858 #  ifdef XP_DARWIN
   4859  gForkingProcess = getpid();
   4860 #  endif
   4861 
   4862  for (auto arena : gArenas.iter()) {
   4863    if (arena->mLock.LockIsEnabled()) {
   4864      arena->mLock.Lock();
   4865    }
   4866  }
   4867 
   4868  gArenas.mPurgeListLock.Lock();
   4869 
   4870  sBaseAlloc.mMutex.Lock();
   4871 
   4872  huge_mtx.Lock();
   4873 }
   4874 
   4875 FORK_HOOK
   4876 void _malloc_postfork_parent(void) MOZ_NO_THREAD_SAFETY_ANALYSIS {
   4877  // Release all mutexes, now that fork() has completed.
   4878  huge_mtx.Unlock();
   4879 
   4880  sBaseAlloc.mMutex.Unlock();
   4881 
   4882  gArenas.mPurgeListLock.Unlock();
   4883 
   4884  for (auto arena : gArenas.iter()) {
   4885    if (arena->mLock.LockIsEnabled()) {
   4886      arena->mLock.Unlock();
   4887    }
   4888  }
   4889 
   4890  gArenas.mLock.Unlock();
   4891 }
   4892 
   4893 FORK_HOOK
   4894 void _malloc_postfork_child(void) {
   4895  // Do this before iterating over the arenas.
   4896  gArenas.ResetMainThread();
   4897 
   4898  // Reinitialize all mutexes, now that fork() has completed.
   4899  huge_mtx.Init();
   4900 
   4901  sBaseAlloc.mMutex.Init();
   4902 
   4903  gArenas.mPurgeListLock.Init();
   4904 
   4905  MOZ_PUSH_IGNORE_THREAD_SAFETY
   4906  for (auto arena : gArenas.iter()) {
   4907    arena->mLock.Reinit(gForkingThread);
   4908  }
   4909  MOZ_POP_THREAD_SAFETY
   4910 
   4911  gArenas.mLock.Init();
   4912 }
   4913 
   4914 #  ifdef XP_DARWIN
   4915 FORK_HOOK
   4916 void _malloc_postfork(void) {
   4917  // On MacOS we need to check if this is running in the parent or child
   4918  // process.
   4919  bool is_in_parent = getpid() == gForkingProcess;
   4920  gForkingProcess = 0;
   4921  if (is_in_parent) {
   4922    _malloc_postfork_parent();
   4923  } else {
   4924    _malloc_postfork_child();
   4925  }
   4926 }
   4927 #  endif  // XP_DARWIN
   4928 #endif    // ! XP_WIN
   4929 
   4930 // End library-private functions.
   4931 // ***************************************************************************
   4932 #ifdef MOZ_REPLACE_MALLOC
   4933 // Windows doesn't come with weak imports as they are possible with
   4934 // LD_PRELOAD or DYLD_INSERT_LIBRARIES on Linux/OSX. On this platform,
   4935 // the replacement functions are defined as variable pointers to the
   4936 // function resolved with GetProcAddress() instead of weak definitions
   4937 // of functions. On Android, the same needs to happen as well, because
   4938 // the Android linker doesn't handle weak linking with non LD_PRELOADed
   4939 // libraries, but LD_PRELOADing is not very convenient on Android, with
   4940 // the zygote.
   4941 #  ifdef XP_DARWIN
   4942 #    define MOZ_REPLACE_WEAK __attribute__((weak_import))
   4943 #  elif defined(XP_WIN) || defined(ANDROID)
   4944 #    define MOZ_DYNAMIC_REPLACE_INIT
   4945 #    define replace_init replace_init_decl
   4946 #  elif defined(__GNUC__)
   4947 #    define MOZ_REPLACE_WEAK __attribute__((weak))
   4948 #  endif
   4949 
   4950 #  include "replace_malloc.h"
   4951 
   4952 #  define MALLOC_DECL(name, return_type, ...) CanonicalMalloc::name,
   4953 
   4954 // The default malloc table, i.e. plain allocations. It never changes. It's
   4955 // used by init(), and not used after that.
   4956 static const malloc_table_t gDefaultMallocTable = {
   4957 #  include "malloc_decls.h"
   4958 };
   4959 
   4960 // The malloc table installed by init(). It never changes from that point
   4961 // onward. It will be the same as gDefaultMallocTable if no replace-malloc tool
   4962 // is enabled at startup.
   4963 static malloc_table_t gOriginalMallocTable = {
   4964 #  include "malloc_decls.h"
   4965 };
   4966 
   4967 // The malloc table installed by jemalloc_replace_dynamic(). (Read the
   4968 // comments above that function for more details.)
   4969 static malloc_table_t gDynamicMallocTable = {
   4970 #  include "malloc_decls.h"
   4971 };
   4972 
   4973 // This briefly points to gDefaultMallocTable at startup. After that, it points
   4974 // to either gOriginalMallocTable or gDynamicMallocTable. It's atomic to avoid
   4975 // races when switching between tables.
   4976 static Atomic<malloc_table_t const*, mozilla::MemoryOrdering::Relaxed>
   4977    gMallocTablePtr;
   4978 
   4979 #  ifdef MOZ_DYNAMIC_REPLACE_INIT
   4980 #    undef replace_init
   4981 typedef decltype(replace_init_decl) replace_init_impl_t;
   4982 static replace_init_impl_t* replace_init = nullptr;
   4983 #  endif
   4984 
   4985 #  ifdef XP_WIN
   4986 typedef HMODULE replace_malloc_handle_t;
   4987 
   4988 static replace_malloc_handle_t replace_malloc_handle() {
   4989  wchar_t replace_malloc_lib[1024];
   4990  if (GetEnvironmentVariableW(L"MOZ_REPLACE_MALLOC_LIB", replace_malloc_lib,
   4991                              std::size(replace_malloc_lib)) > 0) {
   4992    return LoadLibraryW(replace_malloc_lib);
   4993  }
   4994  return nullptr;
   4995 }
   4996 
   4997 #    define REPLACE_MALLOC_GET_INIT_FUNC(handle) \
   4998      (replace_init_impl_t*)GetProcAddress(handle, "replace_init")
   4999 
   5000 #  elif defined(ANDROID)
   5001 #    include <dlfcn.h>
   5002 
   5003 typedef void* replace_malloc_handle_t;
   5004 
   5005 static replace_malloc_handle_t replace_malloc_handle() {
   5006  const char* replace_malloc_lib = getenv("MOZ_REPLACE_MALLOC_LIB");
   5007  if (replace_malloc_lib && *replace_malloc_lib) {
   5008    return dlopen(replace_malloc_lib, RTLD_LAZY);
   5009  }
   5010  return nullptr;
   5011 }
   5012 
   5013 #    define REPLACE_MALLOC_GET_INIT_FUNC(handle) \
   5014      (replace_init_impl_t*)dlsym(handle, "replace_init")
   5015 
   5016 #  endif
   5017 
   5018 static void replace_malloc_init_funcs(malloc_table_t*);
   5019 
   5020 #  ifdef MOZ_REPLACE_MALLOC_STATIC
   5021 extern "C" void logalloc_init(malloc_table_t*, ReplaceMallocBridge**);
   5022 
   5023 extern "C" void dmd_init(malloc_table_t*, ReplaceMallocBridge**);
   5024 #  endif
   5025 
   5026 void phc_init(malloc_table_t*, ReplaceMallocBridge**);
   5027 
   5028 bool Equals(const malloc_table_t& aTable1, const malloc_table_t& aTable2) {
   5029  return memcmp(&aTable1, &aTable2, sizeof(malloc_table_t)) == 0;
   5030 }
   5031 
   5032 // Below is the malloc implementation overriding jemalloc and calling the
   5033 // replacement functions if they exist.
   5034 static ReplaceMallocBridge* gReplaceMallocBridge = nullptr;
   5035 static void init() {
   5036  malloc_table_t tempTable = gDefaultMallocTable;
   5037 
   5038 #  ifdef MOZ_DYNAMIC_REPLACE_INIT
   5039  replace_malloc_handle_t handle = replace_malloc_handle();
   5040  if (handle) {
   5041    replace_init = REPLACE_MALLOC_GET_INIT_FUNC(handle);
   5042  }
   5043 #  endif
   5044 
   5045  // Set this *before* calling replace_init, otherwise if replace_init calls
   5046  // malloc() we'll get an infinite loop.
   5047  gMallocTablePtr = &gDefaultMallocTable;
   5048 
   5049  // Pass in the default allocator table so replace functions can copy and use
   5050  // it for their allocations. The replace_init() function should modify the
   5051  // table if it wants to be active, otherwise leave it unmodified.
   5052  if (replace_init) {
   5053    replace_init(&tempTable, &gReplaceMallocBridge);
   5054  }
   5055 #  ifdef MOZ_REPLACE_MALLOC_STATIC
   5056  if (Equals(tempTable, gDefaultMallocTable)) {
   5057    logalloc_init(&tempTable, &gReplaceMallocBridge);
   5058  }
   5059 #    ifdef MOZ_DMD
   5060  if (Equals(tempTable, gDefaultMallocTable)) {
   5061    dmd_init(&tempTable, &gReplaceMallocBridge);
   5062  }
   5063 #    endif
   5064 #  endif
   5065  if (!Equals(tempTable, gDefaultMallocTable)) {
   5066    replace_malloc_init_funcs(&tempTable);
   5067  }
   5068  gOriginalMallocTable = tempTable;
   5069  gMallocTablePtr = &gOriginalMallocTable;
   5070 }
   5071 
   5072 // WARNING WARNING WARNING: this function should be used with extreme care. It
   5073 // is not as general-purpose as it looks. It is currently used by
   5074 // tools/profiler/core/memory_hooks.cpp for counting allocations and probably
   5075 // should not be used for any other purpose.
   5076 //
   5077 // This function allows the original malloc table to be temporarily replaced by
   5078 // a different malloc table. Or, if the argument is nullptr, it switches back to
   5079 // the original malloc table.
   5080 //
   5081 // Limitations:
   5082 //
   5083 // - It is not threadsafe. If multiple threads pass it the same
   5084 //   `replace_init_func` at the same time, there will be data races writing to
   5085 //   the malloc_table_t within that function.
   5086 //
   5087 // - Only one replacement can be installed. No nesting is allowed.
   5088 //
   5089 // - The new malloc table must be able to free allocations made by the original
   5090 //   malloc table, and upon removal the original malloc table must be able to
   5091 //   free allocations made by the new malloc table. This means the new malloc
   5092 //   table can only do simple things like recording extra information, while
   5093 //   delegating actual allocation/free operations to the original malloc table.
   5094 //
   5095 MOZ_JEMALLOC_API void jemalloc_replace_dynamic(
   5096    jemalloc_init_func replace_init_func) {
   5097  if (replace_init_func) {
   5098    malloc_table_t tempTable = gOriginalMallocTable;
   5099    (*replace_init_func)(&tempTable, &gReplaceMallocBridge);
   5100    if (!Equals(tempTable, gOriginalMallocTable)) {
   5101      replace_malloc_init_funcs(&tempTable);
   5102 
   5103      // Temporarily switch back to the original malloc table. In the
   5104      // (supported) non-nested case, this is a no-op. But just in case this is
   5105      // a (unsupported) nested call, it makes the overwriting of
   5106      // gDynamicMallocTable less racy, because ongoing calls to malloc() and
   5107      // friends won't go through gDynamicMallocTable.
   5108      gMallocTablePtr = &gOriginalMallocTable;
   5109 
   5110      gDynamicMallocTable = tempTable;
   5111      gMallocTablePtr = &gDynamicMallocTable;
   5112      // We assume that dynamic replaces don't occur close enough for a
   5113      // thread to still have old copies of the table pointer when the 2nd
   5114      // replace occurs.
   5115    }
   5116  } else {
   5117    // Switch back to the original malloc table.
   5118    gMallocTablePtr = &gOriginalMallocTable;
   5119  }
   5120 }
   5121 
   5122 #  define MALLOC_DECL(name, return_type, ...)                           \
   5123    inline return_type ReplaceMalloc::name(                             \
   5124        ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) {                       \
   5125      if (MOZ_UNLIKELY(!gMallocTablePtr)) {                             \
   5126        init();                                                         \
   5127      }                                                                 \
   5128      return (*gMallocTablePtr).name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
   5129    }
   5130 #  include "malloc_decls.h"
   5131 
   5132 MOZ_JEMALLOC_API struct ReplaceMallocBridge* get_bridge(void) {
   5133  if (MOZ_UNLIKELY(!gMallocTablePtr)) {
   5134    init();
   5135  }
   5136  return gReplaceMallocBridge;
   5137 }
   5138 
   5139 // posix_memalign, aligned_alloc, memalign and valloc all implement some kind
   5140 // of aligned memory allocation. For convenience, a replace-malloc library can
   5141 // skip defining replace_posix_memalign, replace_aligned_alloc and
   5142 // replace_valloc, and default implementations will be automatically derived
   5143 // from replace_memalign.
   5144 static void replace_malloc_init_funcs(malloc_table_t* table) {
   5145  if (table->posix_memalign == CanonicalMalloc::posix_memalign &&
   5146      table->memalign != CanonicalMalloc::memalign) {
   5147    table->posix_memalign =
   5148        AlignedAllocator<ReplaceMalloc::memalign>::posix_memalign;
   5149  }
   5150  if (table->aligned_alloc == CanonicalMalloc::aligned_alloc &&
   5151      table->memalign != CanonicalMalloc::memalign) {
   5152    table->aligned_alloc =
   5153        AlignedAllocator<ReplaceMalloc::memalign>::aligned_alloc;
   5154  }
   5155  if (table->valloc == CanonicalMalloc::valloc &&
   5156      table->memalign != CanonicalMalloc::memalign) {
   5157    table->valloc = AlignedAllocator<ReplaceMalloc::memalign>::valloc;
   5158  }
   5159  if (table->moz_create_arena_with_params ==
   5160          CanonicalMalloc::moz_create_arena_with_params &&
   5161      table->malloc != CanonicalMalloc::malloc) {
   5162 #  define MALLOC_DECL(name, ...) \
   5163    table->name = DummyArenaAllocator<ReplaceMalloc>::name;
   5164 #  define MALLOC_FUNCS MALLOC_FUNCS_ARENA_BASE
   5165 #  include "malloc_decls.h"
   5166  }
   5167  if (table->moz_arena_malloc == CanonicalMalloc::moz_arena_malloc &&
   5168      table->malloc != CanonicalMalloc::malloc) {
   5169 #  define MALLOC_DECL(name, ...) \
   5170    table->name = DummyArenaAllocator<ReplaceMalloc>::name;
   5171 #  define MALLOC_FUNCS MALLOC_FUNCS_ARENA_ALLOC
   5172 #  include "malloc_decls.h"
   5173  }
   5174 }
   5175 
   5176 #endif  // MOZ_REPLACE_MALLOC
   5177 // ***************************************************************************
   5178 // Definition of all the _impl functions
   5179 // GENERIC_MALLOC_DECL2_MINGW is only used for the MinGW build, and aliases
   5180 // the malloc funcs (e.g. malloc) to the je_ versions. It does not generate
   5181 // aliases for the other functions (jemalloc and arena functions).
   5182 //
   5183 // We do need aliases for the other mozglue.def-redirected functions though,
   5184 // these are done at the bottom of mozmemory_wrap.cpp
   5185 #define GENERIC_MALLOC_DECL2_MINGW(name, name_impl, return_type, ...) \
   5186  return_type name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__))            \
   5187      __attribute__((alias(MOZ_STRINGIFY(name_impl))));
   5188 
   5189 #define GENERIC_MALLOC_DECL2(attributes, name, name_impl, return_type, ...)  \
   5190  return_type name_impl(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) attributes { \
   5191    return DefaultMalloc::name(ARGS_HELPER(ARGS, ##__VA_ARGS__));            \
   5192  }
   5193 
   5194 #ifndef __MINGW32__
   5195 #  define GENERIC_MALLOC_DECL(attributes, name, return_type, ...)    \
   5196    GENERIC_MALLOC_DECL2(attributes, name, name##_impl, return_type, \
   5197                         ##__VA_ARGS__)
   5198 #else
   5199 #  define GENERIC_MALLOC_DECL(attributes, name, return_type, ...)    \
   5200    GENERIC_MALLOC_DECL2(attributes, name, name##_impl, return_type, \
   5201                         ##__VA_ARGS__)                              \
   5202    GENERIC_MALLOC_DECL2_MINGW(name, name##_impl, return_type, ##__VA_ARGS__)
   5203 #endif
   5204 
   5205 #define NOTHROW_MALLOC_DECL(...) \
   5206  MOZ_MEMORY_API MACRO_CALL(GENERIC_MALLOC_DECL, (noexcept(true), __VA_ARGS__))
   5207 #define MALLOC_DECL(...) \
   5208  MOZ_MEMORY_API MACRO_CALL(GENERIC_MALLOC_DECL, (, __VA_ARGS__))
   5209 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
   5210 #include "malloc_decls.h"
   5211 
   5212 #undef GENERIC_MALLOC_DECL
   5213 #define GENERIC_MALLOC_DECL(attributes, name, return_type, ...) \
   5214  GENERIC_MALLOC_DECL2(attributes, name, name, return_type, ##__VA_ARGS__)
   5215 
   5216 #define MALLOC_DECL(...) \
   5217  MOZ_JEMALLOC_API MACRO_CALL(GENERIC_MALLOC_DECL, (, __VA_ARGS__))
   5218 #define MALLOC_FUNCS (MALLOC_FUNCS_JEMALLOC | MALLOC_FUNCS_ARENA)
   5219 #include "malloc_decls.h"
   5220 // ***************************************************************************
   5221 
   5222 #ifdef HAVE_DLFCN_H
   5223 #  include <dlfcn.h>
   5224 #endif
   5225 
   5226 #if defined(__GLIBC__) && !defined(__UCLIBC__)
   5227 // glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
   5228 // to inconsistently reference libc's malloc(3)-compatible functions
   5229 // (bug 493541).
   5230 //
   5231 // These definitions interpose hooks in glibc.  The functions are actually
   5232 // passed an extra argument for the caller return address, which will be
   5233 // ignored.
   5234 
   5235 extern "C" {
   5236 MOZ_EXPORT void (*__free_hook)(void*) = free_impl;
   5237 MOZ_EXPORT void* (*__malloc_hook)(size_t) = malloc_impl;
   5238 MOZ_EXPORT void* (*__realloc_hook)(void*, size_t) = realloc_impl;
   5239 MOZ_EXPORT void* (*__memalign_hook)(size_t, size_t) = memalign_impl;
   5240 }
   5241 
   5242 #elif defined(RTLD_DEEPBIND)
   5243 // XXX On systems that support RTLD_GROUP or DF_1_GROUP, do their
   5244 // implementations permit similar inconsistencies?  Should STV_SINGLETON
   5245 // visibility be used for interposition where available?
   5246 #  error \
   5247      "Interposing malloc is unsafe on this system without libc malloc hooks."
   5248 #endif
   5249 
   5250 #ifdef XP_WIN
   5251 MOZ_EXPORT void* _recalloc(void* aPtr, size_t aCount, size_t aSize) {
   5252  size_t oldsize = aPtr ? AllocInfo::Get(aPtr).Size() : 0;
   5253  CheckedInt<size_t> checkedSize = CheckedInt<size_t>(aCount) * aSize;
   5254 
   5255  if (!checkedSize.isValid()) {
   5256    return nullptr;
   5257  }
   5258 
   5259  size_t newsize = checkedSize.value();
   5260 
   5261  // In order for all trailing bytes to be zeroed, the caller needs to
   5262  // use calloc(), followed by recalloc().  However, the current calloc()
   5263  // implementation only zeros the bytes requested, so if recalloc() is
   5264  // to work 100% correctly, calloc() will need to change to zero
   5265  // trailing bytes.
   5266  aPtr = DefaultMalloc::realloc(aPtr, newsize);
   5267  if (aPtr && oldsize < newsize) {
   5268    memset((void*)((uintptr_t)aPtr + oldsize), 0, newsize - oldsize);
   5269  }
   5270 
   5271  return aPtr;
   5272 }
   5273 
   5274 // This impl of _expand doesn't ever actually expand or shrink blocks: it
   5275 // simply replies that you may continue using a shrunk block.
   5276 MOZ_EXPORT void* _expand(void* aPtr, size_t newsize) {
   5277  if (AllocInfo::Get(aPtr).Size() >= newsize) {
   5278    return aPtr;
   5279  }
   5280 
   5281  return nullptr;
   5282 }
   5283 
   5284 MOZ_EXPORT size_t _msize(void* aPtr) {
   5285  return DefaultMalloc::malloc_usable_size(aPtr);
   5286 }
   5287 #endif
   5288 
   5289 #ifdef MOZ_PHC
   5290 // Compile PHC and mozjemalloc together so that PHC can inline mozjemalloc.
   5291 #  include "PHC.cpp"
   5292 #endif