tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

LifoAlloc.h (42174B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef ds_LifoAlloc_h
      8 #define ds_LifoAlloc_h
      9 
     10 #include "mozilla/Attributes.h"
     11 #include "mozilla/CheckedArithmetic.h"
     12 #include "mozilla/MemoryChecking.h"
     13 #include "mozilla/MemoryReporting.h"
     14 
     15 #include <algorithm>
     16 #include <new>
     17 #include <stddef.h>  // size_t
     18 #include <type_traits>
     19 #include <utility>
     20 
     21 // [SMDOC] LifoAlloc bump allocator
     22 //
     23 // This file defines an allocator named LifoAlloc which is a Bump allocator,
     24 // which has the property of making fast allocation but is not able to reclaim
     25 // individual allocations.
     26 //
     27 // * Allocation principle
     28 //
     29 // In practice a LifoAlloc is implemented using a list of BumpChunks, which are
     30 // contiguous memory areas which are chained in a single linked list.
     31 //
     32 // When an allocation is performed, we check if there is space in the last
     33 // chunk. If there is we bump the pointer of the last chunk and return the
     34 // previous value of the pointer. Otherwise we allocate a new chunk which is
     35 // large enough and perform the allocation the same way.
     36 //
     37 // Each allocation is made with 2 main functions, called
     38 // BumpChunk::nextAllocBase and BumpChunk::nextAllocEnd. These functions are
     39 // made to avoid duplicating logic, such as allocating, checking if we can
     40 // allocate or reserving a given buffer space. They are made to align the
     41 // pointer for the next allocation (8-byte aligned), and also to reserve some
     42 // red-zones to improve reports of our security instrumentation. (see Security
     43 // features below)
     44 //
     45 // The Chunks sizes are following the heuristics implemented in NextSize
     46 // (LifoAlloc.cpp), which doubles the size until we reach 1 MB and then
     47 // continues with a smaller geometric series. This heuristic is meant to reduce
     48 // the number of allocations, such that we spend less time allocating/freeing
     49 // chunks of a few KB at a time.
     50 //
     51 // ** Oversize allocations
     52 //
     53 // When allocating with a LifoAlloc, we distinguish 2 different kinds of
     54 // allocations, the small allocations and the large allocations. The reason for
     55 // splitting in 2 sets is to avoid wasting memory.
     56 //
     57 // If you had a single linked list of chunks, then making oversized allocations
     58 // can cause chunks to contain a lot of wasted space as new chunks would have to
     59 // be allocated to fit these allocations, and the space of the previous chunk
     60 // would remain unused.
     61 //
     62 // Oversize allocation size can be disabled or customized with disableOversize
     63 // and setOversizeThreshold, which must be smaller than the default chunk size
     64 // with which the LifoAlloc was initialized.
     65 //
     66 // ** LifoAllocScope (mark & release)
     67 //
     68 // As the memory cannot be reclaimed except when the LifoAlloc structure is
     69 // deleted, the LifoAllocScope structure is used to create scopes, related to a
     70 // stacked task. When going out of a LifoAllocScope the memory associated to the
     71 // scope is marked as unused but not reclaimed. This implies that the memory
     72 // allocated for one task can be reused for a similar task later on. (see
     73 // Safety)
     74 //
     75 // LifoAllocScope is based on mark and release functions. The mark function is
     76 // used to recall the offsets at which a LifoAllocScope got created. The release
     77 // function takes the Mark as input and will flag all memory allocated after the
     78 // mark creation as unused.
     79 //
     80 // When releasing all the memory of BumpChunks, these are moved to a list of
     81 // unused chunks which will later be reused by new allocations.
     82 //
     83 // A bump chunk allocator normally has a single bump pointers, whereas we have
     84 // 2. (see Oversize allocations) By doing so, we lose the ordering of allocation
     85 // coming from a single linked list of allocation.
     86 //
     87 // However, we rely on the ordering of allocation with LifoAllocScope, i-e when
     88 // mark and release functions are used. Thus the LifoAlloc::Mark is composed of
     89 // 2 marks, One for each singled linked list of allocations, to keep both lists
     90 // of allocations ordered.
     91 //
     92 // ** Infallible Allocator
     93 //
     94 // LifoAlloc can also be used as an infallible allocator. This requires the user
     95 // to periodically ensure that enough space has been reserved to satisfy the
     96 // upcoming set of allocations by calling LifoAlloc::ensureUnusedApproximate or
     97 // LifoAlloc::allocEnsureUnused functions. Between 2 calls of these functions,
     98 // functions such as allocInfallible can be used without checking against
     99 // nullptr, as long as there is a bounded number of such calls and that all
    100 // allocations including their red-zone fit in the reserved space.
    101 //
    102 // The infallible allocator mode can be toggle as being the default by calling
    103 // setAsInfallibleByDefault, in which case an AutoFallibleScope should be used
    104 // to make any large allocations. Failing to do so will raise an issue when
    105 // running the LifoAlloc with the OOM Simulator. (see Security features)
    106 //
    107 // * LifoAlloc::Enum Iterator
    108 //
    109 // A LifoAlloc is used for backing the store-buffer of the Garbage Collector
    110 // (GC). The store-buffer is appending data as it is being reported during
    111 // incremental GC. The LifoAlloc::Enum class is used for iterating over the set
    112 // of allocations made within the LifoAlloc.
    113 //
    114 // However, one must take extra care into having the proper associated types for
    115 // the data which are being written and read out of the LifoAlloc. The iterator
    116 // is reusing the same logic as the allocator in order to skip red-zones.
    117 //
    118 // At the moment, the iterator will cause a hard failure if any oversize
    119 // allocation are made.
    120 //
    121 // * Safety
    122 //
    123 // A LifoAlloc is neither thread-safe nor interrupt-safe. It should only be
    124 // manipulated in one thread of execution at a time. It can be transferred from
    125 // one thread to another but should not be used concurrently.
    126 //
    127 // When using LifoAllocScope, no pointer to the data allocated within a
    128 // LifoAllocScope should be stored in data allocated before the latest
    129 // LifoAllocScope. This kind of issue can hide in different forms, such as
    130 // appending to a Vector backed by a LifoAlloc, which can resize and move the
    131 // data below the LifoAllocScope. Thus causing a use-after-free once leaving a
    132 // LifoAllocScope.
    133 //
    134 // * Security features
    135 //
    136 // ** Single Linked List
    137 //
    138 // For sanity reasons this LifoAlloc implementation makes use of its own single
    139 // linked list implementation based on unique pointers (UniquePtr). The reason
    140 // for this is to ensure that a BumpChunk is owned only once, thus preventing
    141 // use-after-free issues.
    142 //
    143 // ** OOM Simulator
    144 //
    145 // The OOM simulator is controlled by the JS_OOM_BREAKPOINT macro, and used to
    146 // check any fallible allocation for potential OOM. Fallible functions are
    147 // instrumented with JS_OOM_POSSIBLY_FAIL(); function calls, and are expected to
    148 // return null on failures.
    149 //
    150 // Except for simulating OOMs, LifoAlloc is instrumented in DEBUG and OOM
    151 // Simulator builds to checks for the correctness of the Infallible Allocator
    152 // state. When using a LifoAlloc as an infallible allocator, enough space should
    153 // always be reserved for the next allocations. Therefore, to check this
    154 // invariant LifoAlloc::newChunkWithCapacity checks that any new chunks are
    155 // allocated within a fallible scope, under AutoFallibleScope.
    156 //
    157 // ** Address Sanitizers & Valgrind
    158 //
    159 // When manipulating memory in a LifoAlloc, the memory remains contiguous and
    160 // therefore subject to potential buffer overflow/underflow. To check for these
    161 // memory corruptions, the macro LIFO_HAVE_MEM_CHECK is used to add red-zones
    162 // with LIFO_MAKE_MEM_NOACCESS and LIFO_MAKE_MEM_UNDEFINED.
    163 //
    164 // The red-zone is a minimum space left in between 2 allocations. Any access to
    165 // these red-zones should warn in both valgrind / ASan builds.
    166 //
    167 // The red-zone size is defined in BumpChunk::RedZoneSize and default to 0 if
    168 // not instrumentation is expected, and 16 otherwise.
    169 //
    170 // ** Magic Number
    171 //
    172 // A simple sanity check is present in all BumpChunk under the form of a
    173 // constant field which is never mutated. the BumpChunk::magic_ is initalized to
    174 // the "Lif" string. Any mutation of this value indicate a memory corruption.
    175 //
    176 // This magic number is enabled in all MOZ_DIAGNOSTIC_ASSERT_ENABLED builds,
    177 // which implies that all Nightly and dev-edition versions of
    178 // Firefox/SpiderMonkey contain this instrumentation.
    179 //
    180 // ** Memory protection
    181 //
    182 // LifoAlloc chunks are holding a lot of memory. When the memory is known to be
    183 // unused, unchanged for some period of time, such as moving from one thread to
    184 // another. Then the memory can be set as read-only with LifoAlloc::setReadOnly
    185 // and reset as read-write with LifoAlloc::setReadWrite.
    186 //
    187 // This code is guarded by LIFO_CHUNK_PROTECT and at the moment only enabled in
    188 // DEBUG builds in order to avoid the fragmentation of the TLB which might run
    189 // out-of-memory when calling mprotect.
    190 //
    191 
    192 #include "js/UniquePtr.h"
    193 #include "util/Memory.h"
    194 #include "util/Poison.h"
    195 
    196 namespace js {
    197 
    198 // Because the LifoAlloc just drops its contents on the floor, it should only be
    199 // used for types that are trivially destructible, or that are explicitly
    200 // allowed (either because they are ok with not having their destructors run, or
    201 // are manually destructed before the LifoAlloc is cleared.)
    202 
    203 template <typename T, typename = void>
    204 struct CanLifoAlloc : std::false_type {};
    205 
    206 // Pointers can be dropped.
    207 template <typename T>
    208 struct CanLifoAlloc<T*> : std::true_type {};
    209 
    210 // Use this to wrap a return type for a function that returns a T* stored within
    211 // a LifoAlloc. It will fail SFINAE if it should not be used.
    212 //
    213 // Example:
    214 //
    215 //   template <typename T> auto new_(int x, int y) -> js::lifo_alloc_pointer<T*>
    216 //   { ... }
    217 //
    218 // If a type has a nontrivial destructor but should still be allowed, allowlist
    219 // it with CanLifoAlloc<T>:
    220 //
    221 //   template <> struct CanLifoAlloc<MyType> : std::true_type {};
    222 //
    223 template <typename T>
    224 using lifo_alloc_pointer = typename std::enable_if<
    225    js::CanLifoAlloc<typename std::remove_pointer<T>::type>::value ||
    226        std::is_trivially_destructible_v<typename std::remove_pointer<T>::type>,
    227    T>::type;
    228 
    229 namespace detail {
    230 
    231 template <typename T, typename D>
    232 class SingleLinkedList;
    233 
    234 template <typename T, typename D = JS::DeletePolicy<T>>
    235 class SingleLinkedListElement {
    236  friend class SingleLinkedList<T, D>;
    237  js::UniquePtr<T, D> next_;
    238 
    239 public:
    240  SingleLinkedListElement() : next_(nullptr) {}
    241  ~SingleLinkedListElement() { MOZ_ASSERT(!next_); }
    242 
    243  T* next() const { return next_.get(); }
    244 };
    245 
    246 // Single linked list which is using UniquePtr to hold the next pointers.
    247 // UniquePtr are used to ensure that none of the elements are used
    248 // silmutaneously in 2 different list.
    249 template <typename T, typename D = JS::DeletePolicy<T>>
    250 class SingleLinkedList {
    251 private:
    252  // First element of the list which owns the next element, and ensure that
    253  // that this list is the only owner of the element.
    254  UniquePtr<T, D> head_;
    255 
    256  // Weak pointer to the last element of the list.
    257  T* last_;
    258 
    259  void assertInvariants() {
    260    MOZ_ASSERT(bool(head_) == bool(last_));
    261    MOZ_ASSERT_IF(last_, !last_->next_);
    262  }
    263 
    264 public:
    265  SingleLinkedList() : head_(nullptr), last_(nullptr) { assertInvariants(); }
    266 
    267  SingleLinkedList(SingleLinkedList&& other)
    268      : head_(std::move(other.head_)), last_(other.last_) {
    269    other.last_ = nullptr;
    270    assertInvariants();
    271    other.assertInvariants();
    272  }
    273 
    274  ~SingleLinkedList() {
    275    MOZ_ASSERT(!head_);
    276    MOZ_ASSERT(!last_);
    277  }
    278 
    279  // Move the elements of the |other| list in the current one, and implicitly
    280  // remove all the elements of the current list.
    281  SingleLinkedList& operator=(SingleLinkedList&& other) {
    282    head_ = std::move(other.head_);
    283    last_ = other.last_;
    284    other.last_ = nullptr;
    285    assertInvariants();
    286    other.assertInvariants();
    287    return *this;
    288  }
    289 
    290  bool empty() const { return !last_; }
    291 
    292  // Iterates over the elements of the list, this is used to avoid raw
    293  // manipulation of pointers as much as possible.
    294  class Iterator {
    295    T* current_;
    296 
    297   public:
    298    explicit Iterator(T* current) : current_(current) {}
    299 
    300    T& operator*() const { return *current_; }
    301    T* operator->() const { return current_; }
    302    T* get() const { return current_; }
    303 
    304    const Iterator& operator++() {
    305      current_ = current_->next();
    306      return *this;
    307    }
    308 
    309    bool operator!=(const Iterator& other) const {
    310      return current_ != other.current_;
    311    }
    312    bool operator==(const Iterator& other) const {
    313      return current_ == other.current_;
    314    }
    315  };
    316 
    317  Iterator begin() const { return Iterator(head_.get()); }
    318  Iterator end() const { return Iterator(nullptr); }
    319  Iterator last() const { return Iterator(last_); }
    320 
    321  // Split the list in 2 single linked lists after the element given as
    322  // argument.  The front of the list remains in the current list, while the
    323  // back goes in the newly create linked list.
    324  //
    325  // This is used for example to extract one element from a list. (see
    326  // LifoAlloc::getOrCreateChunk)
    327  SingleLinkedList splitAfter(T* newLast) {
    328    MOZ_ASSERT(newLast);
    329    SingleLinkedList result;
    330    if (newLast->next_) {
    331      result.head_ = std::move(newLast->next_);
    332      result.last_ = last_;
    333      last_ = newLast;
    334    }
    335    assertInvariants();
    336    result.assertInvariants();
    337    return result;
    338  }
    339 
    340  void pushFront(UniquePtr<T, D>&& elem) {
    341    if (!last_) {
    342      last_ = elem.get();
    343    }
    344    elem->next_ = std::move(head_);
    345    head_ = std::move(elem);
    346    assertInvariants();
    347  }
    348 
    349  void append(UniquePtr<T, D>&& elem) {
    350    if (last_) {
    351      last_->next_ = std::move(elem);
    352      last_ = last_->next_.get();
    353    } else {
    354      head_ = std::move(elem);
    355      last_ = head_.get();
    356    }
    357    assertInvariants();
    358  }
    359  void appendAll(SingleLinkedList&& list) {
    360    if (list.empty()) {
    361      return;
    362    }
    363    if (last_) {
    364      last_->next_ = std::move(list.head_);
    365    } else {
    366      head_ = std::move(list.head_);
    367    }
    368    last_ = list.last_;
    369    list.last_ = nullptr;
    370    assertInvariants();
    371    list.assertInvariants();
    372  }
    373  void steal(SingleLinkedList&& list) {
    374    head_ = std::move(list.head_);
    375    last_ = list.last_;
    376    list.last_ = nullptr;
    377    assertInvariants();
    378    list.assertInvariants();
    379  }
    380  void prependAll(SingleLinkedList&& list) {
    381    list.appendAll(std::move(*this));
    382    steal(std::move(list));
    383  }
    384  UniquePtr<T, D> popFirst() {
    385    MOZ_ASSERT(head_);
    386    UniquePtr<T, D> result = std::move(head_);
    387    head_ = std::move(result->next_);
    388    if (!head_) {
    389      last_ = nullptr;
    390    }
    391    assertInvariants();
    392    return result;
    393  }
    394 };
    395 
    396 static const size_t LIFO_ALLOC_ALIGN = 8;
    397 
    398 MOZ_ALWAYS_INLINE
    399 uint8_t* AlignPtr(uint8_t* orig) {
    400  static_assert(mozilla::IsPowerOfTwo(LIFO_ALLOC_ALIGN),
    401                "LIFO_ALLOC_ALIGN must be a power of two");
    402 
    403  uint8_t* result = (uint8_t*)AlignBytes(uintptr_t(orig), LIFO_ALLOC_ALIGN);
    404  MOZ_ASSERT(uintptr_t(result) % LIFO_ALLOC_ALIGN == 0);
    405  return result;
    406 }
    407 
    408 // A Chunk represent a single memory allocation made with the system
    409 // allocator. As the owner of the memory, it is responsible for the allocation
    410 // and the deallocation.
    411 //
    412 // This structure is only move-able, but not copyable.
    413 class BumpChunk : public SingleLinkedListElement<BumpChunk> {
    414 private:
    415  // Pointer to the last byte allocated in this chunk.
    416  uint8_t* bump_;
    417  // Pointer to the first byte after this chunk.
    418  uint8_t* const capacity_;
    419 
    420 #ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
    421  // Magic number used to check against poisoned values.
    422  const uintptr_t magic_ : 24;
    423  static constexpr uintptr_t magicNumber = uintptr_t(0x4c6966);
    424 #endif
    425 
    426 #if defined(DEBUG)
    427 #  define LIFO_CHUNK_PROTECT 1
    428 #endif
    429 
    430  // Poison the memory with memset, in order to catch errors due to
    431  // use-after-free, with JS_LIFO_UNDEFINED_PATTERN pattern, or to catch
    432  // use-before-init with JS_LIFO_UNINITIALIZED_PATTERN.
    433 #if defined(DEBUG)
    434 #  define LIFO_HAVE_MEM_CHECKS 1
    435 #  define LIFO_MAKE_MEM_NOACCESS(addr, size)       \
    436    do {                                           \
    437      uint8_t* base = (addr);                      \
    438      size_t sz = (size);                          \
    439      MOZ_MAKE_MEM_UNDEFINED(base, sz);            \
    440      memset(base, JS_LIFO_UNDEFINED_PATTERN, sz); \
    441      MOZ_MAKE_MEM_NOACCESS(base, sz);             \
    442    } while (0)
    443 
    444 #  define LIFO_MAKE_MEM_UNDEFINED(addr, size)          \
    445    do {                                               \
    446      uint8_t* base = (addr);                          \
    447      size_t sz = (size);                              \
    448      MOZ_MAKE_MEM_UNDEFINED(base, sz);                \
    449      memset(base, JS_LIFO_UNINITIALIZED_PATTERN, sz); \
    450      MOZ_MAKE_MEM_UNDEFINED(base, sz);                \
    451    } while (0)
    452 
    453 #elif defined(MOZ_HAVE_MEM_CHECKS)
    454 #  define LIFO_HAVE_MEM_CHECKS 1
    455 #  define LIFO_MAKE_MEM_NOACCESS(addr, size) \
    456    MOZ_MAKE_MEM_NOACCESS((addr), (size))
    457 #  define LIFO_MAKE_MEM_UNDEFINED(addr, size) \
    458    MOZ_MAKE_MEM_UNDEFINED((addr), (size))
    459 #endif
    460 
    461 #ifdef LIFO_HAVE_MEM_CHECKS
    462  // Red zone reserved after each allocation.
    463  static constexpr size_t RedZoneSize = 16;
    464 #else
    465  static constexpr size_t RedZoneSize = 0;
    466 #endif
    467 
    468  void assertInvariants() {
    469    MOZ_DIAGNOSTIC_ASSERT(magic_ == magicNumber);
    470    MOZ_ASSERT(begin() <= end());
    471    MOZ_ASSERT(end() <= capacity_);
    472  }
    473 
    474  BumpChunk& operator=(const BumpChunk&) = delete;
    475  BumpChunk(const BumpChunk&) = delete;
    476 
    477  explicit BumpChunk(uintptr_t capacity)
    478      : bump_(begin()),
    479        capacity_(base() + capacity)
    480 #ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
    481        ,
    482        magic_(magicNumber)
    483 #endif
    484  {
    485    assertInvariants();
    486 #if defined(LIFO_HAVE_MEM_CHECKS)
    487    // The memory is freshly allocated and marked as undefined by the
    488    // allocator of the BumpChunk. Instead, we mark this memory as
    489    // no-access, as it has not been allocated within the BumpChunk.
    490    LIFO_MAKE_MEM_NOACCESS(bump_, capacity_ - bump_);
    491 #endif
    492  }
    493 
    494  // Cast |this| into a uint8_t* pointer.
    495  //
    496  // Warning: Are you sure you do not want to use begin() instead?
    497  const uint8_t* base() const { return reinterpret_cast<const uint8_t*>(this); }
    498  uint8_t* base() { return reinterpret_cast<uint8_t*>(this); }
    499 
    500  // Update the bump pointer to any value contained in this chunk, which is
    501  // above the private fields of this chunk.
    502  //
    503  // The memory is poisoned and flagged as no-access when the bump pointer is
    504  // moving backward, such as when memory is released, or when a Mark is used
    505  // to unwind previous allocations.
    506  //
    507  // The memory is flagged as undefined when the bump pointer is moving
    508  // forward.
    509  void setBump(uint8_t* newBump) {
    510    assertInvariants();
    511    MOZ_ASSERT(begin() <= newBump);
    512    MOZ_ASSERT(newBump <= capacity_);
    513 #if defined(LIFO_HAVE_MEM_CHECKS)
    514    // Poison/Unpoison memory that we just free'd/allocated.
    515    if (bump_ > newBump) {
    516      LIFO_MAKE_MEM_NOACCESS(newBump, bump_ - newBump);
    517    } else if (newBump > bump_) {
    518      MOZ_ASSERT(newBump - RedZoneSize >= bump_);
    519      LIFO_MAKE_MEM_UNDEFINED(bump_, newBump - RedZoneSize - bump_);
    520      // The area [newBump - RedZoneSize .. newBump[ is already flagged as
    521      // no-access either with the previous if-branch or with the
    522      // BumpChunk constructor. No need to mark it twice.
    523    }
    524 #endif
    525    bump_ = newBump;
    526  }
    527 
    528 public:
    529  ~BumpChunk() { release(); }
    530 
    531  // Returns true if this chunk contains no allocated content.
    532  bool empty() const { return end() == begin(); }
    533 
    534  // Returns the size in bytes of the number of allocated space. This includes
    535  // the size consumed by the alignment of the allocations.
    536  size_t used() const { return end() - begin(); }
    537 
    538  // These are used for manipulating a chunk as if it was a vector of bytes,
    539  // and used for iterating over the content of the buffer (see
    540  // LifoAlloc::Enum)
    541  inline const uint8_t* begin() const;
    542  inline uint8_t* begin();
    543  uint8_t* end() const { return bump_; }
    544 
    545  // This function is the only way to allocate and construct a chunk. It
    546  // returns a UniquePtr to the newly allocated chunk.  The size given as
    547  // argument includes the space needed for the header of the chunk.
    548  static UniquePtr<BumpChunk> newWithCapacity(size_t size, arena_id_t arena);
    549 
    550  // Report allocation.
    551  size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
    552    return mallocSizeOf(this);
    553  }
    554 
    555  // Report allocation size.
    556  size_t computedSizeOfIncludingThis() const { return capacity_ - base(); }
    557 
    558  // Opaque type used to carry a pointer to the current location of the bump_
    559  // pointer, within a BumpChunk.
    560  class Mark {
    561    // Chunk which owns the pointer.
    562    BumpChunk* chunk_;
    563    // Recorded of the bump_ pointer of the BumpChunk.
    564    uint8_t* bump_;
    565 
    566    friend class BumpChunk;
    567    Mark(BumpChunk* chunk, uint8_t* bump) : chunk_(chunk), bump_(bump) {}
    568 
    569   public:
    570    Mark() : chunk_(nullptr), bump_(nullptr) {}
    571 
    572    BumpChunk* markedChunk() const { return chunk_; }
    573  };
    574 
    575  // Return a mark to be able to unwind future allocations with the release
    576  // function. (see LifoAllocScope)
    577  Mark mark() { return Mark(this, end()); }
    578 
    579  // Check if a pointer is part of the allocated data of this chunk.
    580  bool contains(const void* ptr) const {
    581    // Note: We cannot check "ptr < end()" because the mark have a 0-size
    582    // length.
    583    return begin() <= ptr && ptr <= end();
    584  }
    585 
    586  // Check if a mark is part of the allocated data of this chunk.
    587  bool contains(Mark m) const {
    588    MOZ_ASSERT(m.chunk_ == this);
    589    return contains(m.bump_);
    590  }
    591 
    592  // Release the memory allocated in this chunk. This function does not call
    593  // any of the destructors.
    594  void release() { setBump(begin()); }
    595 
    596  // Release the memory allocated in this chunk since the corresponding mark
    597  // got created. This function does not call any of the destructors.
    598  void release(Mark m) {
    599    MOZ_RELEASE_ASSERT(contains(m));
    600    setBump(m.bump_);
    601  }
    602 
    603  // Given an amount, compute the total size of a chunk for it: reserved
    604  // space before |begin()|, space for |amount| bytes, and red-zone space
    605  // after those bytes that will ultimately end at |capacity_|.
    606  [[nodiscard]] static inline bool allocSizeWithRedZone(size_t amount,
    607                                                        size_t* size);
    608 
    609  // Given a bump chunk pointer, find the next base/end pointers. This is
    610  // useful for having consistent allocations, and iterating over known size
    611  // allocations.
    612  static uint8_t* nextAllocBase(uint8_t* e) { return detail::AlignPtr(e); }
    613  static uint8_t* nextAllocEnd(uint8_t* b, size_t n) {
    614    return b + n + RedZoneSize;
    615  }
    616 
    617  // Returns true, if the unused space is large enough for an allocation of
    618  // |n| bytes.
    619  bool canAlloc(size_t n) const {
    620    uint8_t* newBump = nextAllocEnd(nextAllocBase(end()), n);
    621    // bump_ <= newBump, is necessary to catch overflow.
    622    return bump_ <= newBump && newBump <= capacity_;
    623  }
    624 
    625  // Space remaining in the current chunk.
    626  size_t unused() const {
    627    uint8_t* aligned = nextAllocBase(end());
    628    if (aligned < capacity_) {
    629      return capacity_ - aligned;
    630    }
    631    return 0;
    632  }
    633 
    634  // Try to perform an allocation of size |n|, returns nullptr if not possible.
    635  MOZ_ALWAYS_INLINE
    636  void* tryAlloc(size_t n) {
    637    uint8_t* aligned = nextAllocBase(end());
    638    uint8_t* newBump = nextAllocEnd(aligned, n);
    639 
    640    if (newBump > capacity_) {
    641      return nullptr;
    642    }
    643 
    644    // Check for overflow.
    645    if (MOZ_UNLIKELY(newBump < bump_)) {
    646      return nullptr;
    647    }
    648 
    649    MOZ_ASSERT(canAlloc(n));  // Ensure consistency between "can" and "try".
    650    setBump(newBump);
    651    return aligned;
    652  }
    653 
    654 #ifdef LIFO_CHUNK_PROTECT
    655  void setReadOnly();
    656  void setReadWrite();
    657 #else
    658  void setReadOnly() const {}
    659  void setReadWrite() const {}
    660 #endif
    661 };
    662 
    663 // Space reserved for the BumpChunk internal data, and the alignment of the
    664 // first allocation content. This can be used to ensure there is enough space
    665 // for the next allocation (see LifoAlloc::newChunkWithCapacity).
    666 static constexpr size_t BumpChunkReservedSpace =
    667    AlignBytes(sizeof(BumpChunk), LIFO_ALLOC_ALIGN);
    668 
    669 [[nodiscard]] /* static */ inline bool BumpChunk::allocSizeWithRedZone(
    670    size_t amount, size_t* size) {
    671  constexpr size_t SpaceBefore = BumpChunkReservedSpace;
    672  static_assert((SpaceBefore % LIFO_ALLOC_ALIGN) == 0,
    673                "reserved space presumed already aligned");
    674 
    675  constexpr size_t SpaceAfter = RedZoneSize;  // may be zero
    676 
    677  constexpr size_t SpaceBeforeAndAfter = SpaceBefore + SpaceAfter;
    678  static_assert(SpaceBeforeAndAfter >= SpaceBefore,
    679                "intermediate addition must not overflow");
    680 
    681  *size = SpaceBeforeAndAfter + amount;
    682  return MOZ_LIKELY(*size >= SpaceBeforeAndAfter);
    683 }
    684 
    685 inline const uint8_t* BumpChunk::begin() const {
    686  return base() + BumpChunkReservedSpace;
    687 }
    688 
    689 inline uint8_t* BumpChunk::begin() { return base() + BumpChunkReservedSpace; }
    690 
    691 }  // namespace detail
    692 
    693 // LIFO bump allocator: used for phase-oriented and fast LIFO allocations.
    694 //
    695 // Note: We leave BumpChunks latent in the set of unused chunks after they've
    696 // been released to avoid thrashing before a GC.
    697 class LifoAlloc {
    698  using UniqueBumpChunk = js::UniquePtr<detail::BumpChunk>;
    699  using BumpChunkList = detail::SingleLinkedList<detail::BumpChunk>;
    700 
    701  // List of chunks containing allocated data of size smaller than the default
    702  // chunk size. In the common case, the last chunk of this list is always
    703  // used to perform the allocations. When the allocation cannot be performed,
    704  // we move a Chunk from the unused set to the list of used chunks.
    705  BumpChunkList chunks_;
    706 
    707  // List of chunks containing allocated data where each allocation is larger
    708  // than the oversize threshold. Each chunk contains exactly one allocation.
    709  // This reduces wasted space in the chunk list.
    710  //
    711  // Oversize chunks are allocated on demand and freed as soon as they are
    712  // released, instead of being pushed to the unused list.
    713  BumpChunkList oversize_;
    714 
    715  // Set of unused chunks, which can be reused for future allocations.
    716  BumpChunkList unused_;
    717 
    718  size_t markCount;
    719  size_t defaultChunkSize_;
    720  size_t oversizeThreshold_;
    721 
    722  // Size of all chunks in chunks_, oversize_, unused_ lists.
    723  size_t curSize_;
    724  size_t peakSize_;
    725 
    726  // Size of all chunks containing small bump allocations. This heuristic is
    727  // used to compute growth rate while ignoring chunks such as oversized,
    728  // now-unused, or transferred (which followed their own growth patterns).
    729  size_t smallAllocsSize_;
    730 
    731  // Arena to use for the allocations from this LifoAlloc. This is typically
    732  // MallocArena for main-thread-focused LifoAllocs and BackgroundMallocArena
    733  // for background-thread-focused LifoAllocs.
    734  // If you are unsure at the time of authorship whether this LifoAlloc will be
    735  // mostly on or mostly off the main thread, just take a guess, and that
    736  // will be fine. There should be no serious consequences for getting this
    737  // wrong unless your system is very hot and makes heavy use of its LifoAlloc.
    738  // In that case, run both options through a try run of Speedometer 3 or
    739  // whatever is most current and pick whichever performs better.
    740  arena_id_t arena_;
    741 
    742 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
    743  bool fallibleScope_;
    744 #endif
    745 
    746  void operator=(const LifoAlloc&) = delete;
    747  LifoAlloc(const LifoAlloc&) = delete;
    748 
    749  // Return a BumpChunk that can perform an allocation of at least size |n|.
    750  UniqueBumpChunk newChunkWithCapacity(size_t n, bool oversize);
    751 
    752  // Reuse or allocate a BumpChunk that can perform an allocation of at least
    753  // size |n|, if successful it is placed at the end the list of |chunks_|.
    754  UniqueBumpChunk getOrCreateChunk(size_t n);
    755 
    756  void reset(size_t defaultChunkSize);
    757 
    758  // Append unused chunks to the end of this LifoAlloc.
    759  void appendUnused(BumpChunkList&& otherUnused) {
    760 #ifdef DEBUG
    761    for (detail::BumpChunk& bc : otherUnused) {
    762      MOZ_ASSERT(bc.empty());
    763    }
    764 #endif
    765    unused_.appendAll(std::move(otherUnused));
    766  }
    767 
    768  // Append used chunks to the end of this LifoAlloc. We act as if all the
    769  // chunks in |this| are used, even if they're not, so memory may be wasted.
    770  void appendUsed(BumpChunkList&& otherChunks) {
    771    chunks_.appendAll(std::move(otherChunks));
    772  }
    773 
    774  // Track the amount of space allocated in used and unused chunks.
    775  void incrementCurSize(size_t size) {
    776    curSize_ += size;
    777    if (curSize_ > peakSize_) {
    778      peakSize_ = curSize_;
    779    }
    780  }
    781  void decrementCurSize(size_t size) {
    782    MOZ_ASSERT(curSize_ >= size);
    783    curSize_ -= size;
    784    MOZ_ASSERT(curSize_ >= smallAllocsSize_);
    785  }
    786 
    787  void* allocImplColdPath(size_t n);
    788  void* allocImplOversize(size_t n);
    789 
    790  MOZ_ALWAYS_INLINE
    791  void* allocImpl(size_t n) {
    792    void* result;
    793    // Give oversized allocations their own chunk instead of wasting space
    794    // due to fragmentation at the end of normal chunk.
    795    if (MOZ_UNLIKELY(n > oversizeThreshold_)) {
    796      return allocImplOversize(n);
    797    }
    798    if (MOZ_LIKELY(!chunks_.empty() &&
    799                   (result = chunks_.last()->tryAlloc(n)))) {
    800      return result;
    801    }
    802    return allocImplColdPath(n);
    803  }
    804 
    805  // Check for space in unused chunks or allocate a new unused chunk.
    806  [[nodiscard]] bool ensureUnusedApproximateColdPath(size_t n, size_t total);
    807 
    808 public:
    809  LifoAlloc(size_t defaultChunkSize, arena_id_t arena)
    810      : peakSize_(0),
    811        arena_(arena)
    812 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
    813        ,
    814        fallibleScope_(true)
    815 #endif
    816  {
    817    reset(defaultChunkSize);
    818  }
    819 
    820  // Set the threshold to allocate data in its own chunk outside the space for
    821  // small allocations.
    822  void disableOversize() { oversizeThreshold_ = SIZE_MAX; }
    823  void setOversizeThreshold(size_t oversizeThreshold) {
    824    MOZ_ASSERT(oversizeThreshold <= defaultChunkSize_);
    825    oversizeThreshold_ = oversizeThreshold;
    826  }
    827 
    828  // Steal allocated chunks from |other|.
    829  void steal(LifoAlloc* other);
    830 
    831  // Append all chunks from |other|. They are removed from |other|.
    832  void transferFrom(LifoAlloc* other);
    833 
    834  // Append unused chunks from |other|. They are removed from |other|.
    835  void transferUnusedFrom(LifoAlloc* other);
    836 
    837  ~LifoAlloc() { freeAll(); }
    838 
    839  size_t defaultChunkSize() const { return defaultChunkSize_; }
    840 
    841  // Frees all held memory.
    842  void freeAll();
    843 
    844  void freeAllIfHugeAndUnused() {
    845    if (markCount == 0 && isHuge()) {
    846      freeAll();
    847    }
    848  }
    849 
    850  MOZ_ALWAYS_INLINE
    851  void* alloc(size_t n) {
    852 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
    853    // Only simulate OOMs when we are not using the LifoAlloc as an
    854    // infallible allocator.
    855    if (fallibleScope_) {
    856      JS_OOM_POSSIBLY_FAIL();
    857    }
    858 #endif
    859    return allocImpl(n);
    860  }
    861 
    862  // Allocates |n| bytes if we can guarantee that we will have
    863  // |needed| unused bytes remaining. Returns nullptr if we can't.
    864  // This is useful for maintaining our ballast invariants while
    865  // attempting fallible allocations.
    866  MOZ_ALWAYS_INLINE
    867  void* allocEnsureUnused(size_t n, size_t needed) {
    868    JS_OOM_POSSIBLY_FAIL();
    869    MOZ_ASSERT(fallibleScope_);
    870 
    871    Mark m = mark();
    872    void* result = allocImpl(n);
    873    if (!ensureUnusedApproximate(needed)) {
    874      release(m);
    875      return nullptr;
    876    }
    877    cancelMark(m);
    878    return result;
    879  }
    880 
    881  template <typename T, typename... Args>
    882  MOZ_ALWAYS_INLINE auto newWithSize(size_t n, Args&&... args)
    883      -> js::lifo_alloc_pointer<T*> {
    884    MOZ_ASSERT(n >= sizeof(T), "must request enough space to store a T");
    885    static_assert(alignof(T) <= detail::LIFO_ALLOC_ALIGN,
    886                  "LifoAlloc must provide enough alignment to store T");
    887    void* ptr = alloc(n);
    888    if (!ptr) {
    889      return nullptr;
    890    }
    891 
    892    return new (ptr) T(std::forward<Args>(args)...);
    893  }
    894 
    895  MOZ_ALWAYS_INLINE
    896  void* allocInfallible(size_t n) {
    897    AutoEnterOOMUnsafeRegion oomUnsafe;
    898    if (void* result = allocImpl(n)) {
    899      return result;
    900    }
    901    oomUnsafe.crash("LifoAlloc::allocInfallible");
    902    return nullptr;
    903  }
    904 
    905  // Ensures that enough space exists to satisfy N bytes worth of
    906  // allocation requests, not necessarily contiguous. Note that this does
    907  // not guarantee a successful single allocation of N bytes.
    908  [[nodiscard]] MOZ_ALWAYS_INLINE bool ensureUnusedApproximate(size_t n) {
    909    AutoFallibleScope fallibleAllocator(this);
    910    size_t total = 0;
    911    if (!chunks_.empty()) {
    912      total += chunks_.last()->unused();
    913      if (total >= n) {
    914        return true;
    915      }
    916    }
    917 
    918    return ensureUnusedApproximateColdPath(n, total);
    919  }
    920 
    921  MOZ_ALWAYS_INLINE
    922  void setAsInfallibleByDefault() {
    923 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
    924    fallibleScope_ = false;
    925 #endif
    926  }
    927 
    928  class MOZ_NON_TEMPORARY_CLASS AutoFallibleScope {
    929 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
    930    LifoAlloc* lifoAlloc_;
    931    bool prevFallibleScope_;
    932 
    933   public:
    934    explicit AutoFallibleScope(LifoAlloc* lifoAlloc) {
    935      lifoAlloc_ = lifoAlloc;
    936      prevFallibleScope_ = lifoAlloc->fallibleScope_;
    937      lifoAlloc->fallibleScope_ = true;
    938    }
    939 
    940    ~AutoFallibleScope() { lifoAlloc_->fallibleScope_ = prevFallibleScope_; }
    941 #else
    942   public:
    943    explicit AutoFallibleScope(LifoAlloc*) {}
    944 #endif
    945  };
    946 
    947  template <typename T>
    948  T* newArray(size_t count) {
    949    static_assert(std::is_trivial_v<T>,
    950                  "T must be trivially constructible so that constructors need "
    951                  "not be called");
    952    static_assert(std::is_trivially_destructible_v<T>,
    953                  "T must be trivially destructible so destructors don't need "
    954                  "to be called when the LifoAlloc is freed");
    955    return newArrayUninitialized<T>(count);
    956  }
    957 
    958  // Create an array with uninitialized elements of type |T|.
    959  // The caller is responsible for initialization.
    960  template <typename T>
    961  T* newArrayUninitialized(size_t count) {
    962    size_t bytes;
    963    if (MOZ_UNLIKELY(!CalculateAllocSize<T>(count, &bytes))) {
    964      return nullptr;
    965    }
    966    return static_cast<T*>(alloc(bytes));
    967  }
    968 
    969  class Mark {
    970    friend class LifoAlloc;
    971    detail::BumpChunk::Mark chunk;
    972    detail::BumpChunk::Mark oversize;
    973  };
    974 
    975  // Note: MOZ_NEVER_INLINE is a work around for a Clang 9 (PGO) miscompilation.
    976  // See bug 1583907.
    977  MOZ_NEVER_INLINE Mark mark();
    978 
    979  void release(Mark mark);
    980 
    981  void cancelMark(Mark mark) { markCount--; }
    982 
    983  void releaseAll() {
    984    MOZ_ASSERT(!markCount);
    985 
    986    // When releasing all chunks, we can no longer determine which chunks were
    987    // transferred and which were not, so simply clear the heuristic to zero
    988    // right away.
    989    smallAllocsSize_ = 0;
    990 
    991    for (detail::BumpChunk& bc : chunks_) {
    992      bc.release();
    993    }
    994    unused_.appendAll(std::move(chunks_));
    995 
    996    // On release, we free any oversize allocations instead of keeping them
    997    // in unused chunks.
    998    while (!oversize_.empty()) {
    999      UniqueBumpChunk bc = oversize_.popFirst();
   1000      decrementCurSize(bc->computedSizeOfIncludingThis());
   1001    }
   1002  }
   1003 
   1004  // Protect the content of the LifoAlloc chunks.
   1005 #ifdef LIFO_CHUNK_PROTECT
   1006  void setReadOnly();
   1007  void setReadWrite();
   1008 #else
   1009  void setReadOnly() const {}
   1010  void setReadWrite() const {}
   1011 #endif
   1012 
   1013  // Get the total "used" (occupied bytes) count for the arena chunks.
   1014  size_t used() const {
   1015    size_t accum = 0;
   1016    for (const detail::BumpChunk& chunk : chunks_) {
   1017      accum += chunk.used();
   1018    }
   1019    return accum;
   1020  }
   1021 
   1022  // Return true if the LifoAlloc does not currently contain any allocations.
   1023  bool isEmpty() const {
   1024    bool empty = chunks_.empty() ||
   1025                 (chunks_.begin() == chunks_.last() && chunks_.last()->empty());
   1026    MOZ_ASSERT_IF(!oversize_.empty(), !oversize_.last()->empty());
   1027    return empty && oversize_.empty();
   1028  }
   1029 
   1030  static const unsigned HUGE_ALLOCATION = 50 * 1024 * 1024;
   1031  bool isHuge() const { return curSize_ > HUGE_ALLOCATION; }
   1032 
   1033  // Return the number of bytes remaining to allocate in the current chunk.
   1034  // e.g. How many bytes we can allocate before needing a new block.
   1035  size_t availableInCurrentChunk() const {
   1036    if (chunks_.empty()) {
   1037      return 0;
   1038    }
   1039    return chunks_.last()->unused();
   1040  }
   1041 
   1042  // Get the total size of the arena chunks (including unused space).
   1043  size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
   1044    size_t n = 0;
   1045    for (const detail::BumpChunk& chunk : chunks_) {
   1046      n += chunk.sizeOfIncludingThis(mallocSizeOf);
   1047    }
   1048    for (const detail::BumpChunk& chunk : oversize_) {
   1049      n += chunk.sizeOfIncludingThis(mallocSizeOf);
   1050    }
   1051    for (const detail::BumpChunk& chunk : unused_) {
   1052      n += chunk.sizeOfIncludingThis(mallocSizeOf);
   1053    }
   1054    return n;
   1055  }
   1056 
   1057  // Like sizeOfExcludingThis(), but includes the size of the LifoAlloc itself.
   1058  size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
   1059    return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
   1060  }
   1061 
   1062  // Get the current size of the arena chunks (including unused space and
   1063  // bookkeeping space).
   1064  size_t computedSizeOfExcludingThis() const { return curSize_; }
   1065 
   1066  // Get the peak size of the arena chunks (including unused space and
   1067  // bookkeeping space).
   1068  size_t peakSizeOfExcludingThis() const { return peakSize_; }
   1069 
   1070  // Doesn't perform construction; useful for lazily-initialized POD types.
   1071  template <typename T>
   1072  MOZ_ALWAYS_INLINE T* pod_malloc() {
   1073    return static_cast<T*>(alloc(sizeof(T)));
   1074  }
   1075 
   1076  JS_DECLARE_NEW_METHODS(new_, alloc, MOZ_ALWAYS_INLINE)
   1077  JS_DECLARE_NEW_METHODS(newInfallible, allocInfallible, MOZ_ALWAYS_INLINE)
   1078 
   1079 #ifdef DEBUG
   1080  bool contains(const void* ptr) const {
   1081    for (const detail::BumpChunk& chunk : chunks_) {
   1082      if (chunk.contains(ptr)) {
   1083        return true;
   1084      }
   1085    }
   1086    for (const detail::BumpChunk& chunk : oversize_) {
   1087      if (chunk.contains(ptr)) {
   1088        return true;
   1089      }
   1090    }
   1091    return false;
   1092  }
   1093 #endif
   1094 
   1095  // Iterate over the data allocated in a LifoAlloc, and interpret it.
   1096  class Enum {
   1097    friend class LifoAlloc;
   1098    friend class detail::BumpChunk;
   1099 
   1100    // Iterator over the list of bump chunks.
   1101    BumpChunkList::Iterator chunkIt_;
   1102    BumpChunkList::Iterator chunkEnd_;
   1103    // Read head (must be within chunk_).
   1104    uint8_t* head_;
   1105 
   1106    // If there is not enough room in the remaining block for |size|,
   1107    // advance to the next block and update the position.
   1108    uint8_t* seekBaseAndAdvanceBy(size_t size) {
   1109      MOZ_ASSERT(!empty());
   1110 
   1111      uint8_t* aligned = detail::BumpChunk::nextAllocBase(head_);
   1112      if (detail::BumpChunk::nextAllocEnd(aligned, size) > chunkIt_->end()) {
   1113        ++chunkIt_;
   1114        aligned = chunkIt_->begin();
   1115        // The current code assumes that if we have a chunk, then we
   1116        // have allocated something it in.
   1117        MOZ_ASSERT(!chunkIt_->empty());
   1118      }
   1119      head_ = detail::BumpChunk::nextAllocEnd(aligned, size);
   1120      MOZ_ASSERT(head_ <= chunkIt_->end());
   1121      return aligned;
   1122    }
   1123 
   1124   public:
   1125    explicit Enum(LifoAlloc& alloc)
   1126        : chunkIt_(alloc.chunks_.begin()),
   1127          chunkEnd_(alloc.chunks_.end()),
   1128          head_(nullptr) {
   1129      MOZ_RELEASE_ASSERT(alloc.oversize_.empty());
   1130      if (chunkIt_ != chunkEnd_) {
   1131        head_ = chunkIt_->begin();
   1132      }
   1133    }
   1134 
   1135    // Return true if there are no more bytes to enumerate.
   1136    bool empty() {
   1137      return chunkIt_ == chunkEnd_ ||
   1138             (chunkIt_->next() == chunkEnd_.get() && head_ >= chunkIt_->end());
   1139    }
   1140 
   1141    // Move the read position forward by the size of one T.
   1142    template <typename T>
   1143    T* read(size_t size = sizeof(T)) {
   1144      return reinterpret_cast<T*>(read(size));
   1145    }
   1146 
   1147    // Return a pointer to the item at the current position. This returns a
   1148    // pointer to the inline storage, not a copy, and moves the read-head by
   1149    // the requested |size|.
   1150    void* read(size_t size) { return seekBaseAndAdvanceBy(size); }
   1151  };
   1152 };
   1153 
   1154 class MOZ_NON_TEMPORARY_CLASS LifoAllocScope {
   1155  LifoAlloc* lifoAlloc;
   1156  LifoAlloc::Mark mark;
   1157  LifoAlloc::AutoFallibleScope fallibleScope;
   1158 
   1159 public:
   1160  explicit LifoAllocScope(LifoAlloc* lifoAlloc)
   1161      : lifoAlloc(lifoAlloc),
   1162        mark(lifoAlloc->mark()),
   1163        fallibleScope(lifoAlloc) {}
   1164 
   1165  ~LifoAllocScope() {
   1166    lifoAlloc->release(mark);
   1167 
   1168    /*
   1169     * The parser can allocate enormous amounts of memory for large functions.
   1170     * Eagerly free the memory now (which otherwise won't be freed until the
   1171     * next GC) to avoid unnecessary OOMs.
   1172     */
   1173    lifoAlloc->freeAllIfHugeAndUnused();
   1174  }
   1175 
   1176  LifoAlloc& alloc() { return *lifoAlloc; }
   1177 };
   1178 
   1179 enum Fallibility { Fallible, Infallible };
   1180 
   1181 template <Fallibility fb>
   1182 class LifoAllocPolicy {
   1183  LifoAlloc& alloc_;
   1184 
   1185 public:
   1186  MOZ_IMPLICIT LifoAllocPolicy(LifoAlloc& alloc) : alloc_(alloc) {}
   1187  template <typename T>
   1188  T* maybe_pod_malloc(size_t numElems) {
   1189    size_t bytes;
   1190    if (MOZ_UNLIKELY(!CalculateAllocSize<T>(numElems, &bytes))) {
   1191      return nullptr;
   1192    }
   1193    void* p =
   1194        fb == Fallible ? alloc_.alloc(bytes) : alloc_.allocInfallible(bytes);
   1195    return static_cast<T*>(p);
   1196  }
   1197  template <typename T>
   1198  T* maybe_pod_calloc(size_t numElems) {
   1199    T* p = maybe_pod_malloc<T>(numElems);
   1200    if (MOZ_UNLIKELY(!p)) {
   1201      return nullptr;
   1202    }
   1203    memset(p, 0, numElems * sizeof(T));
   1204    return p;
   1205  }
   1206  template <typename T>
   1207  T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
   1208    T* n = maybe_pod_malloc<T>(newSize);
   1209    if (MOZ_UNLIKELY(!n)) {
   1210      return nullptr;
   1211    }
   1212    size_t oldLength;
   1213    [[maybe_unused]] bool nooverflow =
   1214        mozilla::SafeMul(oldSize, sizeof(T), &oldLength);
   1215    MOZ_ASSERT(nooverflow);
   1216    memcpy(n, p, std::min(oldLength, newSize * sizeof(T)));
   1217    return n;
   1218  }
   1219  template <typename T>
   1220  T* pod_malloc(size_t numElems) {
   1221    return maybe_pod_malloc<T>(numElems);
   1222  }
   1223  template <typename T>
   1224  T* pod_calloc(size_t numElems) {
   1225    return maybe_pod_calloc<T>(numElems);
   1226  }
   1227  template <typename T>
   1228  T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
   1229    return maybe_pod_realloc<T>(p, oldSize, newSize);
   1230  }
   1231  template <typename T>
   1232  void free_(T* p, size_t numElems) {}
   1233  void reportAllocOverflow() const {}
   1234  [[nodiscard]] bool checkSimulatedOOM() const {
   1235    return fb == Infallible || !js::oom::ShouldFailWithOOM();
   1236  }
   1237 };
   1238 
   1239 }  // namespace js
   1240 
   1241 #endif /* ds_LifoAlloc_h */