tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Vector.h (50667B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 /* A type/length-parametrized vector class. */
      8 
      9 #ifndef mozilla_Vector_h
     10 #define mozilla_Vector_h
     11 
     12 #include <new>  // for placement new
     13 #include <type_traits>
     14 #include <utility>
     15 
     16 #include "mozilla/AllocPolicy.h"
     17 #include "mozilla/ArrayUtils.h"  // for PointerRangeSize
     18 #include "mozilla/Assertions.h"
     19 #include "mozilla/Attributes.h"
     20 #include "mozilla/CheckedArithmetic.h"
     21 #include "mozilla/Likely.h"
     22 #include "mozilla/MathAlgorithms.h"
     23 #include "mozilla/MemoryReporting.h"
     24 #include "mozilla/OperatorNewExtensions.h"
     25 #include "mozilla/ReentrancyGuard.h"
     26 #include "mozilla/Span.h"
     27 
     28 namespace mozilla {
     29 
     30 template <typename T, size_t N, class AllocPolicy>
     31 class Vector;
     32 
     33 namespace detail {
     34 
     35 /*
     36 * Check that the given capacity wastes the minimal amount of space if
     37 * allocated on the heap. This means that aCapacity*EltSize is as close to a
     38 * power-of-two as possible. growStorageBy() is responsible for ensuring this.
     39 */
     40 template <size_t EltSize>
     41 static bool CapacityHasExcessSpace(size_t aCapacity) {
     42  size_t size = aCapacity * EltSize;
     43  return RoundUpPow2(size) - size >= EltSize;
     44 }
     45 
     46 /*
     47 * AllocPolicy can optionally provide a `computeGrowth<T>(size_t aOldElts,
     48 * size_t aIncr)` method that returns the new number of elements to allocate
     49 * when the current capacity is `aOldElts` and `aIncr` more are being
     50 * requested. If the AllocPolicy does not have such a method, a fallback
     51 * will be used that mostly will just round the new requested capacity up to
     52 * the next power of two, which results in doubling capacity for the most part.
     53 *
     54 * If the new size would overflow some limit, `computeGrowth` returns 0.
     55 *
     56 * A simpler way would be to make computeGrowth() part of the API for all
     57 * AllocPolicy classes, but this turns out to be rather complex because
     58 * mozalloc.h defines a very widely-used InfallibleAllocPolicy, and yet it
     59 * can only be compiled in limited contexts, eg within `extern "C"` and with
     60 * -std=c++11 rather than a later version. That makes the headers that are
     61 * necessary for the computation unavailable (eg mfbt/MathAlgorithms.h).
     62 */
     63 
     64 // Fallback version.
     65 template <size_t EltSize>
     66 inline size_t GrowEltsByDoubling(size_t aOldElts, size_t aIncr) {
     67  /*
     68   * When choosing a new capacity, its size in bytes should is as close to 2**N
     69   * bytes as possible.  2**N-sized requests are best because they are unlikely
     70   * to be rounded up by the allocator.  Asking for a 2**N number of elements
     71   * isn't as good, because if EltSize is not a power-of-two that would
     72   * result in a non-2**N request size.
     73   */
     74 
     75  if (aIncr == 1) {
     76    if (aOldElts == 0) {
     77      return 1;
     78    }
     79 
     80    /* This case occurs in ~15--20% of the calls to Vector::growStorageBy. */
     81 
     82    /*
     83     * Will aOldSize * 4 * sizeof(T) overflow?  This condition limits a
     84     * collection to 1GB of memory on a 32-bit system, which is a reasonable
     85     * limit.  It also ensures that
     86     *
     87     *   static_cast<char*>(end()) - static_cast<char*>(begin())
     88     *
     89     * for a Vector doesn't overflow ptrdiff_t (see bug 510319).
     90     */
     91    [[maybe_unused]] size_t tmp;
     92    if (MOZ_UNLIKELY(!mozilla::SafeMul(aOldElts, 4 * EltSize, &tmp))) {
     93      return 0;
     94    }
     95 
     96    /*
     97     * If we reach here, the existing capacity will have a size that is already
     98     * as close to 2^N as sizeof(T) will allow.  Just double the capacity, and
     99     * then there might be space for one more element.
    100     */
    101    size_t newElts = aOldElts * 2;
    102    if (CapacityHasExcessSpace<EltSize>(newElts)) {
    103      newElts += 1;
    104    }
    105    return newElts;
    106  }
    107 
    108  /* This case occurs in ~2% of the calls to Vector::growStorageBy. */
    109  size_t newMinCap = aOldElts + aIncr;
    110 
    111  /* Did aOldElts + aIncr overflow?  Will newMinCap * EltSize rounded up to the
    112   * next power of two overflow PTRDIFF_MAX? */
    113  [[maybe_unused]] size_t tmp;
    114  if (MOZ_UNLIKELY(newMinCap < aOldElts ||
    115                   !mozilla::SafeMul(newMinCap, 4 * EltSize, &tmp))) {
    116    return 0;
    117  }
    118 
    119  size_t newMinSize = newMinCap * EltSize;
    120  size_t newSize = RoundUpPow2(newMinSize);
    121  return newSize / EltSize;
    122 };
    123 
    124 // Fallback version.
    125 template <typename AP, size_t EltSize>
    126 static size_t ComputeGrowth(size_t aOldElts, size_t aIncr, int) {
    127  return GrowEltsByDoubling<EltSize>(aOldElts, aIncr);
    128 }
    129 
    130 // If the AllocPolicy provides its own computeGrowth<EltSize> implementation,
    131 // use that.
    132 template <typename AP, size_t EltSize>
    133 static size_t ComputeGrowth(
    134    size_t aOldElts, size_t aIncr,
    135    decltype(std::declval<AP>().template computeGrowth<EltSize>(0, 0),
    136             bool()) aOverloadSelector) {
    137  size_t newElts = AP::template computeGrowth<EltSize>(aOldElts, aIncr);
    138  MOZ_ASSERT(newElts <= PTRDIFF_MAX && newElts * EltSize <= PTRDIFF_MAX,
    139             "invalid Vector size (see bug 510319)");
    140  return newElts;
    141 }
    142 
    143 /*
    144 * This template class provides a default implementation for vector operations
    145 * when the element type is not known to be a POD, as judged by IsPod.
    146 */
    147 template <typename T, size_t N, class AP, bool IsPod>
    148 struct VectorImpl {
    149  /*
    150   * Constructs an object in the uninitialized memory at *aDst with aArgs.
    151   */
    152  template <typename... Args>
    153  MOZ_NONNULL(1)
    154  static inline void new_(T* aDst, Args&&... aArgs) {
    155    new (KnownNotNull, aDst) T(std::forward<Args>(aArgs)...);
    156  }
    157 
    158  /* Destroys constructed objects in the range [aBegin, aEnd). */
    159  static inline void destroy(T* aBegin, T* aEnd) {
    160    MOZ_ASSERT(aBegin <= aEnd);
    161    for (T* p = aBegin; p < aEnd; ++p) {
    162      p->~T();
    163    }
    164  }
    165 
    166  /* Constructs objects in the uninitialized range [aBegin, aEnd). */
    167  static inline void initialize(T* aBegin, T* aEnd) {
    168    MOZ_ASSERT(aBegin <= aEnd);
    169    for (T* p = aBegin; p < aEnd; ++p) {
    170      new_(p);
    171    }
    172  }
    173 
    174  /*
    175   * Copy-constructs objects in the uninitialized range
    176   * [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
    177   */
    178  template <typename U>
    179  static inline void copyConstruct(T* aDst, const U* aSrcStart,
    180                                   const U* aSrcEnd) {
    181    MOZ_ASSERT(aSrcStart <= aSrcEnd);
    182    for (const U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
    183      new_(aDst, *p);
    184    }
    185  }
    186 
    187  /*
    188   * Move-constructs objects in the uninitialized range
    189   * [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
    190   */
    191  template <typename U>
    192  static inline void moveConstruct(T* aDst, U* aSrcStart, U* aSrcEnd) {
    193    MOZ_ASSERT(aSrcStart <= aSrcEnd);
    194    for (U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
    195      new_(aDst, std::move(*p));
    196    }
    197  }
    198 
    199  /*
    200   * Copy-constructs objects in the uninitialized range [aDst, aDst+aN) from
    201   * the same object aU.
    202   */
    203  template <typename U>
    204  static inline void copyConstructN(T* aDst, size_t aN, const U& aU) {
    205    for (T* end = aDst + aN; aDst < end; ++aDst) {
    206      new_(aDst, aU);
    207    }
    208  }
    209 
    210  /*
    211   * Grows the given buffer to have capacity aNewCap, preserving the objects
    212   * constructed in the range [begin, end) and updating aV. Assumes that (1)
    213   * aNewCap has not overflowed, and (2) multiplying aNewCap by sizeof(T) will
    214   * not overflow.
    215   */
    216  [[nodiscard]] static inline bool growTo(Vector<T, N, AP>& aV,
    217                                          size_t aNewCap) {
    218    MOZ_ASSERT(!aV.usingInlineStorage());
    219    MOZ_ASSERT(!CapacityHasExcessSpace<sizeof(T)>(aNewCap));
    220    T* newbuf = aV.template pod_malloc<T>(aNewCap);
    221    if (MOZ_UNLIKELY(!newbuf)) {
    222      return false;
    223    }
    224    T* dst = newbuf;
    225    T* src = aV.beginNoCheck();
    226    for (; src < aV.endNoCheck(); ++dst, ++src) {
    227      new_(dst, std::move(*src));
    228    }
    229    VectorImpl::destroy(aV.beginNoCheck(), aV.endNoCheck());
    230    aV.free_(aV.mBegin, aV.mTail.mCapacity);
    231    aV.mBegin = newbuf;
    232    /* aV.mLength is unchanged. */
    233    aV.mTail.mCapacity = aNewCap;
    234    return true;
    235  }
    236 };
    237 
    238 /*
    239 * This partial template specialization provides a default implementation for
    240 * vector operations when the element type is known to be a POD, as judged by
    241 * IsPod.
    242 */
    243 template <typename T, size_t N, class AP>
    244 struct VectorImpl<T, N, AP, true> {
    245  template <typename... Args>
    246  MOZ_NONNULL(1)
    247  static inline void new_(T* aDst, Args&&... aArgs) {
    248    // Explicitly construct a local object instead of using a temporary since
    249    // T(args...) will be treated like a C-style cast in the unary case and
    250    // allow unsafe conversions. Both forms should be equivalent to an
    251    // optimizing compiler.
    252    T temp(std::forward<Args>(aArgs)...);
    253    *aDst = temp;
    254  }
    255 
    256  static inline void destroy(T*, T*) {}
    257 
    258  static inline void initialize(T* aBegin, T* aEnd) {
    259    /*
    260     * You would think that memset would be a big win (or even break even)
    261     * when we know T is a POD. But currently it's not. This is probably
    262     * because |append| tends to be given small ranges and memset requires
    263     * a function call that doesn't get inlined.
    264     *
    265     * memset(aBegin, 0, sizeof(T) * (aEnd - aBegin));
    266     */
    267    MOZ_ASSERT(aBegin <= aEnd);
    268    for (T* p = aBegin; p < aEnd; ++p) {
    269      new_(p);
    270    }
    271  }
    272 
    273  template <typename U>
    274  static inline void copyConstruct(T* aDst, const U* aSrcStart,
    275                                   const U* aSrcEnd) {
    276    /*
    277     * See above memset comment. Also, notice that copyConstruct is
    278     * currently templated (T != U), so memcpy won't work without
    279     * requiring T == U.
    280     *
    281     * memcpy(aDst, aSrcStart, sizeof(T) * (aSrcEnd - aSrcStart));
    282     */
    283    MOZ_ASSERT(aSrcStart <= aSrcEnd);
    284    for (const U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
    285      new_(aDst, *p);
    286    }
    287  }
    288 
    289  template <typename U>
    290  static inline void moveConstruct(T* aDst, const U* aSrcStart,
    291                                   const U* aSrcEnd) {
    292    copyConstruct(aDst, aSrcStart, aSrcEnd);
    293  }
    294 
    295  static inline void copyConstructN(T* aDst, size_t aN, const T& aT) {
    296    for (T* end = aDst + aN; aDst < end; ++aDst) {
    297      new_(aDst, aT);
    298    }
    299  }
    300 
    301  [[nodiscard]] static inline bool growTo(Vector<T, N, AP>& aV,
    302                                          size_t aNewCap) {
    303    MOZ_ASSERT(!aV.usingInlineStorage());
    304    MOZ_ASSERT(!CapacityHasExcessSpace<sizeof(T)>(aNewCap));
    305    T* newbuf =
    306        aV.template pod_realloc<T>(aV.mBegin, aV.mTail.mCapacity, aNewCap);
    307    if (MOZ_UNLIKELY(!newbuf)) {
    308      return false;
    309    }
    310    aV.mBegin = newbuf;
    311    /* aV.mLength is unchanged. */
    312    aV.mTail.mCapacity = aNewCap;
    313    return true;
    314  }
    315 };
    316 
    317 // A struct for TestVector.cpp to access private internal fields.
    318 // DO NOT DEFINE IN YOUR OWN CODE.
    319 struct VectorTesting;
    320 
    321 }  // namespace detail
    322 
    323 /*
    324 * STL-like container providing a short-lived, dynamic buffer.  Vector calls the
    325 * constructors/destructors of all elements stored in its internal buffer, so
    326 * non-PODs may be safely used.  Additionally, Vector will store the first N
    327 * elements in-place before resorting to dynamic allocation.
    328 *
    329 * T requirements:
    330 *  - default and copy constructible, assignable, destructible
    331 *  - operations do not throw
    332 * MinInlineCapacity requirements:
    333 *  - any value, however, MinInlineCapacity is clamped to min/max values
    334 * AllocPolicy:
    335 *  - see "Allocation policies" in AllocPolicy.h (defaults to
    336 *    mozilla::MallocAllocPolicy)
    337 *
    338 * Vector is not reentrant: T member functions called during Vector member
    339 * functions must not call back into the same object!
    340 */
    341 template <typename T, size_t MinInlineCapacity = 0,
    342          class AllocPolicy = MallocAllocPolicy>
    343 class MOZ_NON_PARAM MOZ_GSL_OWNER Vector final : private AllocPolicy {
    344  /* utilities */
    345  static constexpr bool kElemIsPod =
    346      std::is_trivial_v<T> && std::is_standard_layout_v<T>;
    347  using Impl =
    348      detail::VectorImpl<T, MinInlineCapacity, AllocPolicy, kElemIsPod>;
    349  friend struct detail::VectorImpl<T, MinInlineCapacity, AllocPolicy,
    350                                   kElemIsPod>;
    351 
    352  friend struct detail::VectorTesting;
    353 
    354  [[nodiscard]] bool growStorageBy(size_t aIncr);
    355  [[nodiscard]] bool convertToHeapStorage(size_t aNewCap);
    356  [[nodiscard]] bool maybeCheckSimulatedOOM(size_t aRequestedSize);
    357 
    358  /* magic constants */
    359 
    360  /**
    361   * The maximum space allocated for inline element storage.
    362   *
    363   * We reduce space by what the AllocPolicy base class and prior Vector member
    364   * fields likely consume to attempt to play well with binary size classes.
    365   */
    366  static constexpr size_t kMaxInlineBytes =
    367      1024 -
    368      (sizeof(AllocPolicy) + sizeof(T*) + sizeof(size_t) + sizeof(size_t));
    369 
    370  /**
    371   * The number of T elements of inline capacity built into this Vector.  This
    372   * is usually |MinInlineCapacity|, but it may be less (or zero!) for large T.
    373   *
    374   * We use a partially-specialized template (not explicit specialization, which
    375   * is only allowed at namespace scope) to compute this value.  The benefit is
    376   * that |sizeof(T)| need not be computed, and |T| doesn't have to be fully
    377   * defined at the time |Vector<T>| appears, if no inline storage is requested.
    378   */
    379  template <size_t MinimumInlineCapacity, size_t Dummy>
    380  struct ComputeCapacity {
    381    static constexpr size_t value =
    382        std::min(MinimumInlineCapacity, kMaxInlineBytes / sizeof(T));
    383  };
    384 
    385  template <size_t Dummy>
    386  struct ComputeCapacity<0, Dummy> {
    387    static constexpr size_t value = 0;
    388  };
    389 
    390  /** The actual inline capacity in number of elements T.  This may be zero! */
    391  static constexpr size_t kInlineCapacity =
    392      ComputeCapacity<MinInlineCapacity, 0>::value;
    393 
    394  /* member data */
    395 
    396  /*
    397   * Pointer to the buffer, be it inline or heap-allocated. Only [mBegin,
    398   * mBegin + mLength) hold valid constructed T objects. The range [mBegin +
    399   * mLength, mBegin + mCapacity) holds uninitialized memory. The range
    400   * [mBegin + mLength, mBegin + mReserved) also holds uninitialized memory
    401   * previously allocated by a call to reserve().
    402   */
    403  T* mBegin;
    404 
    405  /* Number of elements in the vector. */
    406  size_t mLength;
    407 
    408  /*
    409   * Memory used to store capacity, reserved element count (debug builds only),
    410   * and inline storage.  The simple "answer" is:
    411   *
    412   *   size_t mCapacity;
    413   *   #ifdef DEBUG
    414   *   size_t mReserved;
    415   *   #endif
    416   *   alignas(T) unsigned char mBytes[kInlineCapacity * sizeof(T)];
    417   *
    418   * but there are complications.  First, C++ forbids zero-sized arrays that
    419   * might result.  Second, we don't want zero capacity to affect Vector's size
    420   * (even empty classes take up a byte, unless they're base classes).
    421   *
    422   * Yet again, we eliminate the zero-sized array using partial specialization.
    423   * And we eliminate potential size hit by putting capacity/reserved in one
    424   * struct, then putting the array (if any) in a derived struct.  If no array
    425   * is needed, the derived struct won't consume extra space.
    426   */
    427  struct CapacityAndReserved {
    428    explicit CapacityAndReserved(size_t aCapacity, size_t aReserved)
    429        : mCapacity(aCapacity)
    430 #ifdef DEBUG
    431          ,
    432          mReserved(aReserved)
    433 #endif
    434    {
    435    }
    436    CapacityAndReserved() = default;
    437 
    438    /* Max number of elements storable in the vector without resizing. */
    439    size_t mCapacity;
    440 
    441 #ifdef DEBUG
    442    /* Max elements of reserved or used space in this vector. */
    443    size_t mReserved;
    444 #endif
    445  };
    446 
    447 // Silence warnings about this struct possibly being padded dued to the
    448 // alignas() in it -- there's nothing we can do to avoid it.
    449 #ifdef _MSC_VER
    450 #  pragma warning(push)
    451 #  pragma warning(disable : 4324)
    452 #endif  // _MSC_VER
    453 
    454  template <size_t Capacity, size_t Dummy>
    455  struct CRAndStorage : CapacityAndReserved {
    456    explicit CRAndStorage(size_t aCapacity, size_t aReserved)
    457        : CapacityAndReserved(aCapacity, aReserved) {}
    458    CRAndStorage() = default;
    459 
    460    alignas(T) unsigned char mBytes[Capacity * sizeof(T)];
    461 
    462    // GCC fails due to -Werror=strict-aliasing if |mBytes| is directly cast to
    463    // T*.  Indirecting through this function addresses the problem.
    464    void* data() { return mBytes; }
    465 
    466    T* storage() { return static_cast<T*>(data()); }
    467  };
    468 
    469  template <size_t Dummy>
    470  struct CRAndStorage<0, Dummy> : CapacityAndReserved {
    471    explicit CRAndStorage(size_t aCapacity, size_t aReserved)
    472        : CapacityAndReserved(aCapacity, aReserved) {}
    473    CRAndStorage() = default;
    474 
    475    T* storage() {
    476      // If this returns |nullptr|, functions like |Vector::begin()| would too,
    477      // breaking callers that pass a vector's elements as pointer/length to
    478      // code that bounds its operation by length but (even just as a sanity
    479      // check) always wants a non-null pointer.  Fake up an aligned, non-null
    480      // pointer to support these callers.
    481      return reinterpret_cast<T*>(sizeof(T));
    482    }
    483  };
    484 
    485  CRAndStorage<kInlineCapacity, 0> mTail;
    486 
    487 #ifdef _MSC_VER
    488 #  pragma warning(pop)
    489 #endif  // _MSC_VER
    490 
    491 #ifdef DEBUG
    492  friend class ReentrancyGuard;
    493  bool mEntered;
    494 #endif
    495 
    496  /* private accessors */
    497 
    498  bool usingInlineStorage() const {
    499    return mBegin == const_cast<Vector*>(this)->inlineStorage();
    500  }
    501 
    502  T* inlineStorage() { return mTail.storage(); }
    503 
    504  T* beginNoCheck() const { return mBegin; }
    505 
    506  T* endNoCheck() { return mBegin + mLength; }
    507 
    508  const T* endNoCheck() const { return mBegin + mLength; }
    509 
    510 #ifdef DEBUG
    511  /**
    512   * The amount of explicitly allocated space in this vector that is immediately
    513   * available to be filled by appending additional elements.  This value is
    514   * always greater than or equal to |length()| -- the vector's actual elements
    515   * are implicitly reserved.  This value is always less than or equal to
    516   * |capacity()|.  It may be explicitly increased using the |reserve()| method.
    517   */
    518  size_t reserved() const {
    519    MOZ_ASSERT(mLength <= mTail.mReserved);
    520    MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
    521    return mTail.mReserved;
    522  }
    523 #endif
    524 
    525  bool internalEnsureCapacity(size_t aNeeded);
    526 
    527  /* Append operations guaranteed to succeed due to pre-reserved space. */
    528  template <typename U>
    529  void internalAppend(U&& aU);
    530  template <typename U, size_t O, class BP>
    531  void internalAppendAll(const Vector<U, O, BP>& aU);
    532  void internalAppendN(const T& aT, size_t aN);
    533  template <typename U>
    534  void internalAppend(const U* aBegin, size_t aLength);
    535  template <typename U>
    536  void internalMoveAppend(U* aBegin, size_t aLength);
    537 
    538 public:
    539  static const size_t sMaxInlineStorage = MinInlineCapacity;
    540 
    541  using ElementType = T;
    542 
    543  explicit Vector(AllocPolicy);
    544  Vector() : Vector(AllocPolicy()) {}
    545 
    546  Vector(Vector&&);            /* Move constructor. */
    547  Vector& operator=(Vector&&); /* Move assignment. */
    548  ~Vector();
    549 
    550  /* accessors */
    551 
    552  const AllocPolicy& allocPolicy() const { return *this; }
    553 
    554  AllocPolicy& allocPolicy() { return *this; }
    555 
    556  enum { InlineLength = MinInlineCapacity };
    557 
    558  size_t length() const { return mLength; }
    559 
    560  bool empty() const { return mLength == 0; }
    561 
    562  size_t capacity() const { return mTail.mCapacity; }
    563 
    564  T* begin() {
    565    MOZ_ASSERT(!mEntered);
    566    return mBegin;
    567  }
    568 
    569  const T* begin() const {
    570    MOZ_ASSERT(!mEntered);
    571    return mBegin;
    572  }
    573 
    574  T* end() {
    575    MOZ_ASSERT(!mEntered);
    576    return mBegin + mLength;
    577  }
    578 
    579  const T* end() const {
    580    MOZ_ASSERT(!mEntered);
    581    return mBegin + mLength;
    582  }
    583 
    584  T& operator[](size_t aIndex) {
    585    MOZ_ASSERT(!mEntered);
    586    if (MOZ_UNLIKELY(aIndex >= mLength)) {
    587      mozilla::detail::InvalidArrayIndex_CRASH(aIndex, mLength);
    588    }
    589    return begin()[aIndex];
    590  }
    591 
    592  const T& operator[](size_t aIndex) const {
    593    MOZ_ASSERT(!mEntered);
    594    if (MOZ_UNLIKELY(aIndex >= mLength)) {
    595      mozilla::detail::InvalidArrayIndex_CRASH(aIndex, mLength);
    596    }
    597    return begin()[aIndex];
    598  }
    599 
    600  T& back() {
    601    MOZ_ASSERT(!mEntered);
    602    if (MOZ_UNLIKELY(empty())) {
    603      mozilla::detail::InvalidArrayIndex_CRASH(0, 0);
    604    }
    605    return *(end() - 1);
    606  }
    607 
    608  const T& back() const {
    609    MOZ_ASSERT(!mEntered);
    610    if (MOZ_UNLIKELY(empty())) {
    611      mozilla::detail::InvalidArrayIndex_CRASH(0, 0);
    612    }
    613    return *(end() - 1);
    614  }
    615 
    616  operator mozilla::Span<const T>() const {
    617    // Explicitly specify template argument here to avoid instantiating Span<T>
    618    // first and then implicitly converting to Span<const T>
    619    return mozilla::Span<const T>{mBegin, mLength};
    620  }
    621 
    622  operator mozilla::Span<T>() { return mozilla::Span{mBegin, mLength}; }
    623 
    624  class Range {
    625    friend class Vector;
    626    T* mCur;
    627    T* mEnd;
    628    Range(T* aCur, T* aEnd) : mCur(aCur), mEnd(aEnd) {
    629      MOZ_ASSERT(aCur <= aEnd);
    630    }
    631 
    632   public:
    633    bool empty() const { return mCur == mEnd; }
    634    size_t remain() const { return PointerRangeSize(mCur, mEnd); }
    635    T& front() const {
    636      MOZ_ASSERT(!empty());
    637      return *mCur;
    638    }
    639    void popFront() {
    640      MOZ_ASSERT(!empty());
    641      ++mCur;
    642    }
    643    T popCopyFront() {
    644      MOZ_ASSERT(!empty());
    645      return *mCur++;
    646    }
    647  };
    648 
    649  class ConstRange {
    650    friend class Vector;
    651    const T* mCur;
    652    const T* mEnd;
    653    ConstRange(const T* aCur, const T* aEnd) : mCur(aCur), mEnd(aEnd) {
    654      MOZ_ASSERT(aCur <= aEnd);
    655    }
    656 
    657   public:
    658    bool empty() const { return mCur == mEnd; }
    659    size_t remain() const { return PointerRangeSize(mCur, mEnd); }
    660    const T& front() const {
    661      MOZ_ASSERT(!empty());
    662      return *mCur;
    663    }
    664    void popFront() {
    665      MOZ_ASSERT(!empty());
    666      ++mCur;
    667    }
    668    T popCopyFront() {
    669      MOZ_ASSERT(!empty());
    670      return *mCur++;
    671    }
    672  };
    673 
    674  Range all() { return Range(begin(), end()); }
    675  ConstRange all() const { return ConstRange(begin(), end()); }
    676 
    677  /* mutators */
    678 
    679  /**
    680   * Reverse the order of the elements in the vector in place.
    681   */
    682  void reverse();
    683 
    684  /**
    685   * Given that the vector is empty, grow the internal capacity to |aRequest|,
    686   * keeping the length 0.
    687   */
    688  [[nodiscard]] bool initCapacity(size_t aRequest);
    689 
    690  /**
    691   * Given that the vector is empty, grow the internal capacity and length to
    692   * |aRequest| leaving the elements' memory completely uninitialized (with all
    693   * the associated hazards and caveats). This avoids the usual allocation-size
    694   * rounding that happens in resize and overhead of initialization for elements
    695   * that are about to be overwritten.
    696   */
    697  [[nodiscard]] bool initLengthUninitialized(size_t aRequest);
    698 
    699  /**
    700   * If reserve(aRequest) succeeds and |aRequest >= length()|, then appending
    701   * |aRequest - length()| elements, in any sequence of append/appendAll calls,
    702   * is guaranteed to succeed.
    703   *
    704   * A request to reserve an amount less than the current length does not affect
    705   * reserved space.
    706   */
    707  [[nodiscard]] bool reserve(size_t aRequest);
    708 
    709  /**
    710   * Destroy elements in the range [end() - aIncr, end()). Does not deallocate
    711   * or unreserve storage for those elements.
    712   */
    713  void shrinkBy(size_t aIncr);
    714 
    715  /**
    716   * Destroy elements in the range [aNewLength, end()). Does not deallocate
    717   * or unreserve storage for those elements.
    718   */
    719  void shrinkTo(size_t aNewLength);
    720 
    721  /** Grow the vector by aIncr elements. */
    722  [[nodiscard]] bool growBy(size_t aIncr);
    723 
    724  /** Call shrinkBy or growBy based on whether newSize > length(). */
    725  [[nodiscard]] bool resize(size_t aNewLength);
    726 
    727  /**
    728   * Increase the length of the vector, but don't initialize the new elements
    729   * -- leave them as uninitialized memory.
    730   */
    731  [[nodiscard]] bool growByUninitialized(size_t aIncr);
    732  void infallibleGrowByUninitialized(size_t aIncr);
    733  [[nodiscard]] bool resizeUninitialized(size_t aNewLength);
    734 
    735  /** Shorthand for shrinkBy(length()). */
    736  void clear();
    737 
    738  /** Clears and releases any heap-allocated storage. */
    739  void clearAndFree();
    740 
    741  /**
    742   * Shrinks the storage to drop excess capacity if possible.
    743   *
    744   * The return value indicates whether the operation succeeded, otherwise, it
    745   * represents an OOM. The bool can be safely ignored unless you want to
    746   * provide the guarantee that `length() == capacity()`.
    747   *
    748   * For PODs, it calls the AllocPolicy's pod_realloc. For non-PODs, it moves
    749   * the elements into the new storage.
    750   */
    751  bool shrinkStorageToFit();
    752 
    753  /**
    754   * If true, appending |aNeeded| elements won't reallocate elements storage.
    755   * This *doesn't* mean that infallibleAppend may be used!  You still must
    756   * reserve the extra space, even if this method indicates that appends won't
    757   * need to reallocate elements storage.
    758   */
    759  bool canAppendWithoutRealloc(size_t aNeeded) const;
    760 
    761  /** Potentially fallible append operations. */
    762 
    763  /**
    764   * This can take either a T& or a T&&. Given a T&&, it moves |aU| into the
    765   * vector, instead of copying it. If it fails, |aU| is left unmoved. ("We are
    766   * not amused.")
    767   */
    768  template <typename U>
    769  [[nodiscard]] bool append(U&& aU);
    770 
    771  /**
    772   * Construct a T in-place as a new entry at the end of this vector.
    773   */
    774  template <typename... Args>
    775  [[nodiscard]] bool emplaceBack(Args&&... aArgs) {
    776    if (!growByUninitialized(1)) return false;
    777    Impl::new_(&back(), std::forward<Args>(aArgs)...);
    778    return true;
    779  }
    780 
    781  template <typename U, size_t O, class BP>
    782  [[nodiscard]] bool appendAll(const Vector<U, O, BP>& aU);
    783  template <typename U, size_t O, class BP>
    784  [[nodiscard]] bool appendAll(Vector<U, O, BP>&& aU);
    785  [[nodiscard]] bool appendN(const T& aT, size_t aN);
    786  template <typename U>
    787  [[nodiscard]] bool append(const U* aBegin, const U* aEnd);
    788  template <typename U>
    789  [[nodiscard]] bool append(const U* aBegin, size_t aLength);
    790  template <typename U>
    791  [[nodiscard]] bool moveAppend(U* aBegin, U* aEnd);
    792 
    793  /*
    794   * Guaranteed-infallible append operations for use upon vectors whose
    795   * memory has been pre-reserved.  Don't use this if you haven't reserved the
    796   * memory!
    797   */
    798  template <typename U>
    799  void infallibleAppend(U&& aU) {
    800    internalAppend(std::forward<U>(aU));
    801  }
    802  void infallibleAppendN(const T& aT, size_t aN) { internalAppendN(aT, aN); }
    803  template <typename U>
    804  void infallibleAppend(const U* aBegin, const U* aEnd) {
    805    internalAppend(aBegin, PointerRangeSize(aBegin, aEnd));
    806  }
    807  template <typename U>
    808  void infallibleAppend(const U* aBegin, size_t aLength) {
    809    internalAppend(aBegin, aLength);
    810  }
    811  template <typename... Args>
    812  void infallibleEmplaceBack(Args&&... aArgs) {
    813    infallibleGrowByUninitialized(1);
    814    Impl::new_(&back(), std::forward<Args>(aArgs)...);
    815  }
    816 
    817  void popBack();
    818 
    819  T popCopy();
    820 
    821  /**
    822   * If elements are stored in-place, return nullptr and leave this vector
    823   * unmodified.
    824   *
    825   * Otherwise return this vector's elements buffer, and clear this vector as if
    826   * by clearAndFree(). The caller now owns the buffer and is responsible for
    827   * deallocating it consistent with this vector's AllocPolicy.
    828   *
    829   * N.B. Although a T*, only the range [0, length()) is constructed.
    830   */
    831  [[nodiscard]] T* extractRawBuffer();
    832 
    833  /**
    834   * If elements are stored in-place, allocate a new buffer, move this vector's
    835   * elements into it, and return that buffer.
    836   *
    837   * Otherwise return this vector's elements buffer. The caller now owns the
    838   * buffer and is responsible for deallocating it consistent with this vector's
    839   * AllocPolicy.
    840   *
    841   * This vector is cleared, as if by clearAndFree(), when this method
    842   * succeeds. This method fails and returns nullptr only if new elements buffer
    843   * allocation fails.
    844   *
    845   * N.B. Only the range [0, length()) of the returned buffer is constructed.
    846   * If any of these elements are uninitialized (as growByUninitialized
    847   * enables), behavior is undefined.
    848   */
    849  [[nodiscard]] T* extractOrCopyRawBuffer();
    850 
    851  /**
    852   * Transfer ownership of an array of objects into the vector.  The caller
    853   * must have allocated the array in accordance with this vector's
    854   * AllocPolicy.
    855   *
    856   * N.B. This call assumes that there are no uninitialized elements in the
    857   *      passed range [aP, aP + aLength). The range [aP + aLength, aP +
    858   *      aCapacity) must be allocated uninitialized memory.
    859   */
    860  void replaceRawBuffer(T* aP, size_t aLength, size_t aCapacity);
    861 
    862  /**
    863   * Transfer ownership of an array of objects into the vector.  The caller
    864   * must have allocated the array in accordance with this vector's
    865   * AllocPolicy.
    866   *
    867   * N.B. This call assumes that there are no uninitialized elements in the
    868   *      passed array.
    869   */
    870  void replaceRawBuffer(T* aP, size_t aLength);
    871 
    872  /**
    873   * Places |aVal| at position |aP|, shifting existing elements from |aP| onward
    874   * one position higher.  On success, |aP| should not be reused because it'll
    875   * be a dangling pointer if reallocation of the vector storage occurred; the
    876   * return value should be used instead.  On failure, nullptr is returned.
    877   *
    878   * Example usage:
    879   *
    880   *   if (!(p = vec.insert(p, val))) {
    881   *     <handle failure>
    882   *   }
    883   *   <keep working with p>
    884   *
    885   * This is inherently a linear-time operation.  Be careful!
    886   */
    887  template <typename U>
    888  [[nodiscard]] T* insert(T* aP, U&& aVal);
    889 
    890  /**
    891   * Removes the element |aT|, which must fall in the bounds [begin, end),
    892   * shifting existing elements from |aT + 1| onward one position lower.
    893   */
    894  void erase(T* aT);
    895 
    896  /**
    897   * Removes the elements [|aBegin|, |aEnd|), which must fall in the bounds
    898   * [begin, end), shifting existing elements from |aEnd| onward to aBegin's old
    899   * position.
    900   */
    901  void erase(T* aBegin, T* aEnd);
    902 
    903  /**
    904   * Removes all elements that satisfy the predicate, shifting existing elements
    905   * lower to fill erased gaps.
    906   */
    907  template <typename Pred>
    908  void eraseIf(Pred aPred);
    909 
    910  /**
    911   * Removes all elements that compare equal to |aU|, shifting existing elements
    912   * lower to fill erased gaps.
    913   */
    914  template <typename U>
    915  void eraseIfEqual(const U& aU);
    916 
    917  /**
    918   * Measure the size of the vector's heap-allocated storage.
    919   */
    920  size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const;
    921 
    922  /**
    923   * Like sizeOfExcludingThis, but also measures the size of the vector
    924   * object (which must be heap-allocated) itself.
    925   */
    926  size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
    927 
    928  void swap(Vector& aOther);
    929 
    930 private:
    931  Vector(const Vector&) = delete;
    932  void operator=(const Vector&) = delete;
    933 };
    934 
    935 /* This does the re-entrancy check plus several other sanity checks. */
    936 #define MOZ_REENTRANCY_GUARD_ET_AL                                         \
    937  ReentrancyGuard g(*this);                                                \
    938  MOZ_ASSERT_IF(usingInlineStorage(), mTail.mCapacity == kInlineCapacity); \
    939  MOZ_ASSERT(reserved() <= mTail.mCapacity);                               \
    940  MOZ_ASSERT(mLength <= reserved());                                       \
    941  MOZ_ASSERT(mLength <= mTail.mCapacity)
    942 
    943 /* Vector Implementation */
    944 
    945 template <typename T, size_t N, class AP>
    946 MOZ_ALWAYS_INLINE Vector<T, N, AP>::Vector(AP aAP)
    947    : AP(std::move(aAP)),
    948      mLength(0),
    949      mTail(kInlineCapacity, 0)
    950 #ifdef DEBUG
    951      ,
    952      mEntered(false)
    953 #endif
    954 {
    955  mBegin = inlineStorage();
    956 }
    957 
    958 /* Move constructor. */
    959 template <typename T, size_t N, class AllocPolicy>
    960 MOZ_ALWAYS_INLINE Vector<T, N, AllocPolicy>::Vector(Vector&& aRhs)
    961    : AllocPolicy(std::move(aRhs))
    962 #ifdef DEBUG
    963      ,
    964      mEntered(false)
    965 #endif
    966 {
    967  mLength = aRhs.mLength;
    968  mTail.mCapacity = aRhs.mTail.mCapacity;
    969 #ifdef DEBUG
    970  mTail.mReserved = aRhs.mTail.mReserved;
    971 #endif
    972 
    973  if (aRhs.usingInlineStorage()) {
    974    /* We can't move the buffer over in this case, so copy elements. */
    975    mBegin = inlineStorage();
    976    Impl::moveConstruct(mBegin, aRhs.beginNoCheck(), aRhs.endNoCheck());
    977    /*
    978     * Leave aRhs's mLength, mBegin, mCapacity, and mReserved as they are.
    979     * The elements in its in-line storage still need to be destroyed.
    980     */
    981  } else {
    982    /*
    983     * Take src's buffer, and turn src into an empty vector using
    984     * in-line storage.
    985     */
    986    mBegin = aRhs.mBegin;
    987    aRhs.mBegin = aRhs.inlineStorage();
    988    aRhs.mTail.mCapacity = kInlineCapacity;
    989    aRhs.mLength = 0;
    990 #ifdef DEBUG
    991    aRhs.mTail.mReserved = 0;
    992 #endif
    993  }
    994 }
    995 
    996 /* Move assignment. */
    997 template <typename T, size_t N, class AP>
    998 MOZ_ALWAYS_INLINE Vector<T, N, AP>& Vector<T, N, AP>::operator=(Vector&& aRhs) {
    999  MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited");
   1000  this->~Vector();
   1001  new (KnownNotNull, this) Vector(std::move(aRhs));
   1002  return *this;
   1003 }
   1004 
   1005 template <typename T, size_t N, class AP>
   1006 MOZ_ALWAYS_INLINE Vector<T, N, AP>::~Vector() {
   1007  MOZ_REENTRANCY_GUARD_ET_AL;
   1008  Impl::destroy(beginNoCheck(), endNoCheck());
   1009  if (!usingInlineStorage()) {
   1010    this->free_(beginNoCheck(), mTail.mCapacity);
   1011  }
   1012 }
   1013 
   1014 template <typename T, size_t N, class AP>
   1015 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::reverse() {
   1016  MOZ_REENTRANCY_GUARD_ET_AL;
   1017  T* elems = mBegin;
   1018  size_t len = mLength;
   1019  size_t mid = len / 2;
   1020  for (size_t i = 0; i < mid; i++) {
   1021    std::swap(elems[i], elems[len - i - 1]);
   1022  }
   1023 }
   1024 
   1025 /*
   1026 * This function will create a new heap buffer with capacity aNewCap,
   1027 * move all elements in the inline buffer to this new buffer,
   1028 * and fail on OOM.
   1029 */
   1030 template <typename T, size_t N, class AP>
   1031 inline bool Vector<T, N, AP>::convertToHeapStorage(size_t aNewCap) {
   1032  MOZ_ASSERT(usingInlineStorage());
   1033 
   1034  /* Allocate buffer. */
   1035  MOZ_ASSERT(!detail::CapacityHasExcessSpace<sizeof(T)>(aNewCap));
   1036  T* newBuf = this->template pod_malloc<T>(aNewCap);
   1037  if (MOZ_UNLIKELY(!newBuf)) {
   1038    return false;
   1039  }
   1040 
   1041  /* Copy inline elements into heap buffer. */
   1042  Impl::moveConstruct(newBuf, beginNoCheck(), endNoCheck());
   1043  Impl::destroy(beginNoCheck(), endNoCheck());
   1044 
   1045  /* Switch in heap buffer. */
   1046  mBegin = newBuf;
   1047  /* mLength is unchanged. */
   1048  mTail.mCapacity = aNewCap;
   1049  return true;
   1050 }
   1051 
   1052 template <typename T, size_t N, class AP>
   1053 MOZ_NEVER_INLINE bool Vector<T, N, AP>::growStorageBy(size_t aIncr) {
   1054  MOZ_ASSERT(mLength + aIncr > mTail.mCapacity);
   1055 
   1056  size_t newCap;
   1057 
   1058  if (aIncr == 1 && usingInlineStorage()) {
   1059    /* This case occurs in ~70--80% of the calls to this function. */
   1060    constexpr size_t newSize = RoundUpPow2((kInlineCapacity + 1) * sizeof(T));
   1061    static_assert(newSize / sizeof(T) > 0,
   1062                  "overflow when exceeding inline Vector storage");
   1063    newCap = newSize / sizeof(T);
   1064  } else {
   1065    newCap = detail::ComputeGrowth<AP, sizeof(T)>(mLength, aIncr, true);
   1066    if (MOZ_UNLIKELY(newCap == 0)) {
   1067      this->reportAllocOverflow();
   1068      return false;
   1069    }
   1070  }
   1071 
   1072  if (usingInlineStorage()) {
   1073    return convertToHeapStorage(newCap);
   1074  }
   1075 
   1076  return Impl::growTo(*this, newCap);
   1077 }
   1078 
   1079 template <typename T, size_t N, class AP>
   1080 inline bool Vector<T, N, AP>::initCapacity(size_t aRequest) {
   1081  MOZ_ASSERT(empty());
   1082  MOZ_ASSERT(usingInlineStorage());
   1083  if (aRequest == 0) {
   1084    return true;
   1085  }
   1086  T* newbuf = this->template pod_malloc<T>(aRequest);
   1087  if (MOZ_UNLIKELY(!newbuf)) {
   1088    return false;
   1089  }
   1090  mBegin = newbuf;
   1091  mTail.mCapacity = aRequest;
   1092 #ifdef DEBUG
   1093  mTail.mReserved = aRequest;
   1094 #endif
   1095  return true;
   1096 }
   1097 
   1098 template <typename T, size_t N, class AP>
   1099 inline bool Vector<T, N, AP>::initLengthUninitialized(size_t aRequest) {
   1100  if (!initCapacity(aRequest)) {
   1101    return false;
   1102  }
   1103  infallibleGrowByUninitialized(aRequest);
   1104  return true;
   1105 }
   1106 
   1107 template <typename T, size_t N, class AP>
   1108 inline bool Vector<T, N, AP>::maybeCheckSimulatedOOM(size_t aRequestedSize) {
   1109  if (aRequestedSize <= N) {
   1110    return true;
   1111  }
   1112 
   1113 #ifdef DEBUG
   1114  if (aRequestedSize <= mTail.mReserved) {
   1115    return true;
   1116  }
   1117 #endif
   1118 
   1119  return allocPolicy().checkSimulatedOOM();
   1120 }
   1121 
   1122 template <typename T, size_t N, class AP>
   1123 inline bool Vector<T, N, AP>::reserve(size_t aRequest) {
   1124  MOZ_REENTRANCY_GUARD_ET_AL;
   1125  if (aRequest > mTail.mCapacity) {
   1126    if (MOZ_UNLIKELY(!growStorageBy(aRequest - mLength))) {
   1127      return false;
   1128    }
   1129  } else if (!maybeCheckSimulatedOOM(aRequest)) {
   1130    return false;
   1131  }
   1132 #ifdef DEBUG
   1133  if (aRequest > mTail.mReserved) {
   1134    mTail.mReserved = aRequest;
   1135  }
   1136  MOZ_ASSERT(mLength <= mTail.mReserved);
   1137  MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
   1138 #endif
   1139  return true;
   1140 }
   1141 
   1142 template <typename T, size_t N, class AP>
   1143 inline void Vector<T, N, AP>::shrinkBy(size_t aIncr) {
   1144  MOZ_REENTRANCY_GUARD_ET_AL;
   1145  MOZ_ASSERT(aIncr <= mLength);
   1146  Impl::destroy(endNoCheck() - aIncr, endNoCheck());
   1147  mLength -= aIncr;
   1148 }
   1149 
   1150 template <typename T, size_t N, class AP>
   1151 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::shrinkTo(size_t aNewLength) {
   1152  MOZ_ASSERT(aNewLength <= mLength);
   1153  shrinkBy(mLength - aNewLength);
   1154 }
   1155 
   1156 template <typename T, size_t N, class AP>
   1157 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::growBy(size_t aIncr) {
   1158  MOZ_REENTRANCY_GUARD_ET_AL;
   1159  if (aIncr > mTail.mCapacity - mLength) {
   1160    if (MOZ_UNLIKELY(!growStorageBy(aIncr))) {
   1161      return false;
   1162    }
   1163  } else if (!maybeCheckSimulatedOOM(mLength + aIncr)) {
   1164    return false;
   1165  }
   1166  MOZ_ASSERT(mLength + aIncr <= mTail.mCapacity);
   1167  T* newend = endNoCheck() + aIncr;
   1168  Impl::initialize(endNoCheck(), newend);
   1169  mLength += aIncr;
   1170 #ifdef DEBUG
   1171  if (mLength > mTail.mReserved) {
   1172    mTail.mReserved = mLength;
   1173  }
   1174 #endif
   1175  return true;
   1176 }
   1177 
   1178 template <typename T, size_t N, class AP>
   1179 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::growByUninitialized(size_t aIncr) {
   1180  MOZ_REENTRANCY_GUARD_ET_AL;
   1181  if (aIncr > mTail.mCapacity - mLength) {
   1182    if (MOZ_UNLIKELY(!growStorageBy(aIncr))) {
   1183      return false;
   1184    }
   1185  } else if (!maybeCheckSimulatedOOM(mLength + aIncr)) {
   1186    return false;
   1187  }
   1188 #ifdef DEBUG
   1189  if (mLength + aIncr > mTail.mReserved) {
   1190    mTail.mReserved = mLength + aIncr;
   1191  }
   1192 #endif
   1193  infallibleGrowByUninitialized(aIncr);
   1194  return true;
   1195 }
   1196 
   1197 template <typename T, size_t N, class AP>
   1198 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::infallibleGrowByUninitialized(
   1199    size_t aIncr) {
   1200  MOZ_ASSERT(mLength + aIncr <= reserved());
   1201  mLength += aIncr;
   1202 }
   1203 
   1204 template <typename T, size_t N, class AP>
   1205 inline bool Vector<T, N, AP>::resize(size_t aNewLength) {
   1206  size_t curLength = mLength;
   1207  if (aNewLength > curLength) {
   1208    return growBy(aNewLength - curLength);
   1209  }
   1210  shrinkBy(curLength - aNewLength);
   1211  return true;
   1212 }
   1213 
   1214 template <typename T, size_t N, class AP>
   1215 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::resizeUninitialized(
   1216    size_t aNewLength) {
   1217  size_t curLength = mLength;
   1218  if (aNewLength > curLength) {
   1219    return growByUninitialized(aNewLength - curLength);
   1220  }
   1221  shrinkBy(curLength - aNewLength);
   1222  return true;
   1223 }
   1224 
   1225 template <typename T, size_t N, class AP>
   1226 inline void Vector<T, N, AP>::clear() {
   1227  MOZ_REENTRANCY_GUARD_ET_AL;
   1228  Impl::destroy(beginNoCheck(), endNoCheck());
   1229  mLength = 0;
   1230 }
   1231 
   1232 template <typename T, size_t N, class AP>
   1233 inline void Vector<T, N, AP>::clearAndFree() {
   1234  clear();
   1235 
   1236  if (usingInlineStorage()) {
   1237    return;
   1238  }
   1239  this->free_(beginNoCheck(), mTail.mCapacity);
   1240  mBegin = inlineStorage();
   1241  mTail.mCapacity = kInlineCapacity;
   1242 #ifdef DEBUG
   1243  mTail.mReserved = 0;
   1244 #endif
   1245 }
   1246 
   1247 template <typename T, size_t N, class AP>
   1248 inline bool Vector<T, N, AP>::shrinkStorageToFit() {
   1249  MOZ_REENTRANCY_GUARD_ET_AL;
   1250 
   1251  const auto length = this->length();
   1252  if (usingInlineStorage() || length == capacity()) {
   1253    return true;
   1254  }
   1255 
   1256  if (!length) {
   1257    this->free_(beginNoCheck(), mTail.mCapacity);
   1258    mBegin = inlineStorage();
   1259    mTail.mCapacity = kInlineCapacity;
   1260 #ifdef DEBUG
   1261    mTail.mReserved = 0;
   1262 #endif
   1263    return true;
   1264  }
   1265 
   1266  T* newBuf;
   1267  size_t newCap;
   1268  if (length <= kInlineCapacity) {
   1269    newBuf = inlineStorage();
   1270    newCap = kInlineCapacity;
   1271  } else {
   1272    if (kElemIsPod) {
   1273      newBuf = this->template pod_realloc<T>(beginNoCheck(), mTail.mCapacity,
   1274                                             length);
   1275    } else {
   1276      newBuf = this->template pod_malloc<T>(length);
   1277    }
   1278    if (MOZ_UNLIKELY(!newBuf)) {
   1279      return false;
   1280    }
   1281    newCap = length;
   1282  }
   1283  if (!kElemIsPod || newBuf == inlineStorage()) {
   1284    Impl::moveConstruct(newBuf, beginNoCheck(), endNoCheck());
   1285    Impl::destroy(beginNoCheck(), endNoCheck());
   1286  }
   1287  if (!kElemIsPod) {
   1288    this->free_(beginNoCheck(), mTail.mCapacity);
   1289  }
   1290  mBegin = newBuf;
   1291  mTail.mCapacity = newCap;
   1292 #ifdef DEBUG
   1293  mTail.mReserved = length;
   1294 #endif
   1295  return true;
   1296 }
   1297 
   1298 template <typename T, size_t N, class AP>
   1299 inline bool Vector<T, N, AP>::canAppendWithoutRealloc(size_t aNeeded) const {
   1300  return mLength + aNeeded <= mTail.mCapacity;
   1301 }
   1302 
   1303 template <typename T, size_t N, class AP>
   1304 template <typename U, size_t O, class BP>
   1305 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppendAll(
   1306    const Vector<U, O, BP>& aOther) {
   1307  internalAppend(aOther.begin(), aOther.length());
   1308 }
   1309 
   1310 template <typename T, size_t N, class AP>
   1311 template <typename U>
   1312 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppend(U&& aU) {
   1313  MOZ_ASSERT(mLength + 1 <= mTail.mReserved);
   1314  MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
   1315  Impl::new_(endNoCheck(), std::forward<U>(aU));
   1316  ++mLength;
   1317 }
   1318 
   1319 template <typename T, size_t N, class AP>
   1320 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::appendN(const T& aT, size_t aNeeded) {
   1321  MOZ_REENTRANCY_GUARD_ET_AL;
   1322  if (mLength + aNeeded > mTail.mCapacity) {
   1323    if (MOZ_UNLIKELY(!growStorageBy(aNeeded))) {
   1324      return false;
   1325    }
   1326  } else if (!maybeCheckSimulatedOOM(mLength + aNeeded)) {
   1327    return false;
   1328  }
   1329 #ifdef DEBUG
   1330  if (mLength + aNeeded > mTail.mReserved) {
   1331    mTail.mReserved = mLength + aNeeded;
   1332  }
   1333 #endif
   1334  internalAppendN(aT, aNeeded);
   1335  return true;
   1336 }
   1337 
   1338 template <typename T, size_t N, class AP>
   1339 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppendN(const T& aT,
   1340                                                         size_t aNeeded) {
   1341  MOZ_ASSERT(mLength + aNeeded <= mTail.mReserved);
   1342  MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
   1343  Impl::copyConstructN(endNoCheck(), aNeeded, aT);
   1344  mLength += aNeeded;
   1345 }
   1346 
   1347 template <typename T, size_t N, class AP>
   1348 template <typename U>
   1349 inline T* Vector<T, N, AP>::insert(T* aP, U&& aVal) {
   1350  MOZ_ASSERT(begin() <= aP);
   1351  MOZ_ASSERT(aP <= end());
   1352  size_t pos = aP - begin();
   1353  MOZ_ASSERT(pos <= mLength);
   1354  size_t oldLength = mLength;
   1355  if (pos == oldLength) {
   1356    if (!append(std::forward<U>(aVal))) {
   1357      return nullptr;
   1358    }
   1359  } else {
   1360    T oldBack = std::move(back());
   1361    if (!append(std::move(oldBack))) {
   1362      return nullptr;
   1363    }
   1364    for (size_t i = oldLength - 1; i > pos; --i) {
   1365      (*this)[i] = std::move((*this)[i - 1]);
   1366    }
   1367    (*this)[pos] = std::forward<U>(aVal);
   1368  }
   1369  return begin() + pos;
   1370 }
   1371 
   1372 template <typename T, size_t N, class AP>
   1373 inline void Vector<T, N, AP>::erase(T* aIt) {
   1374  MOZ_ASSERT(begin() <= aIt);
   1375  MOZ_ASSERT(aIt < end());
   1376  while (aIt + 1 < end()) {
   1377    *aIt = std::move(*(aIt + 1));
   1378    ++aIt;
   1379  }
   1380  popBack();
   1381 }
   1382 
   1383 template <typename T, size_t N, class AP>
   1384 inline void Vector<T, N, AP>::erase(T* aBegin, T* aEnd) {
   1385  MOZ_ASSERT(begin() <= aBegin);
   1386  MOZ_ASSERT(aBegin <= aEnd);
   1387  MOZ_ASSERT(aEnd <= end());
   1388  while (aEnd < end()) {
   1389    *aBegin++ = std::move(*aEnd++);
   1390  }
   1391  shrinkBy(aEnd - aBegin);
   1392 }
   1393 
   1394 template <typename T, size_t N, class AP>
   1395 template <typename Pred>
   1396 void Vector<T, N, AP>::eraseIf(Pred aPred) {
   1397  // remove_if finds the first element to be erased, and then efficiently move-
   1398  // assigns elements to effectively overwrite elements that satisfy the
   1399  // predicate. It returns the new end pointer, after which there are only
   1400  // moved-from elements ready to be destroyed, so we just need to shrink the
   1401  // vector accordingly.
   1402  T* newEnd = std::remove_if(begin(), end(),
   1403                             [&aPred](const T& aT) { return aPred(aT); });
   1404  MOZ_ASSERT(newEnd <= end());
   1405  shrinkBy(end() - newEnd);
   1406 }
   1407 
   1408 template <typename T, size_t N, class AP>
   1409 template <typename U>
   1410 void Vector<T, N, AP>::eraseIfEqual(const U& aU) {
   1411  return eraseIf([&aU](const T& aT) { return aT == aU; });
   1412 }
   1413 
   1414 template <typename T, size_t N, class AP>
   1415 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::internalEnsureCapacity(
   1416    size_t aNeeded) {
   1417  if (mLength + aNeeded > mTail.mCapacity) {
   1418    if (MOZ_UNLIKELY(!growStorageBy(aNeeded))) {
   1419      return false;
   1420    }
   1421  } else if (!maybeCheckSimulatedOOM(mLength + aNeeded)) {
   1422    return false;
   1423  }
   1424 #ifdef DEBUG
   1425  if (mLength + aNeeded > mTail.mReserved) {
   1426    mTail.mReserved = mLength + aNeeded;
   1427  }
   1428 #endif
   1429  return true;
   1430 }
   1431 
   1432 template <typename T, size_t N, class AP>
   1433 template <typename U>
   1434 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::append(const U* aInsBegin,
   1435                                                const U* aInsEnd) {
   1436  MOZ_REENTRANCY_GUARD_ET_AL;
   1437  const size_t needed = PointerRangeSize(aInsBegin, aInsEnd);
   1438  if (!internalEnsureCapacity(needed)) {
   1439    return false;
   1440  }
   1441  internalAppend(aInsBegin, needed);
   1442  return true;
   1443 }
   1444 
   1445 template <typename T, size_t N, class AP>
   1446 template <typename U>
   1447 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalAppend(const U* aInsBegin,
   1448                                                        size_t aInsLength) {
   1449  MOZ_ASSERT(mLength + aInsLength <= mTail.mReserved);
   1450  MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
   1451  Impl::copyConstruct(endNoCheck(), aInsBegin, aInsBegin + aInsLength);
   1452  mLength += aInsLength;
   1453 }
   1454 
   1455 template <typename T, size_t N, class AP>
   1456 template <typename U>
   1457 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::moveAppend(U* aInsBegin, U* aInsEnd) {
   1458  MOZ_REENTRANCY_GUARD_ET_AL;
   1459  const size_t needed = PointerRangeSize(aInsBegin, aInsEnd);
   1460  if (!internalEnsureCapacity(needed)) {
   1461    return false;
   1462  }
   1463  internalMoveAppend(aInsBegin, needed);
   1464  return true;
   1465 }
   1466 
   1467 template <typename T, size_t N, class AP>
   1468 template <typename U>
   1469 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::internalMoveAppend(U* aInsBegin,
   1470                                                            size_t aInsLength) {
   1471  MOZ_ASSERT(mLength + aInsLength <= mTail.mReserved);
   1472  MOZ_ASSERT(mTail.mReserved <= mTail.mCapacity);
   1473  Impl::moveConstruct(endNoCheck(), aInsBegin, aInsBegin + aInsLength);
   1474  mLength += aInsLength;
   1475 }
   1476 
   1477 template <typename T, size_t N, class AP>
   1478 template <typename U>
   1479 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::append(U&& aU) {
   1480  MOZ_REENTRANCY_GUARD_ET_AL;
   1481  if (mLength == mTail.mCapacity) {
   1482    if (MOZ_UNLIKELY(!growStorageBy(1))) {
   1483      return false;
   1484    }
   1485  } else if (!maybeCheckSimulatedOOM(mLength + 1)) {
   1486    return false;
   1487  }
   1488 #ifdef DEBUG
   1489  if (mLength + 1 > mTail.mReserved) {
   1490    mTail.mReserved = mLength + 1;
   1491  }
   1492 #endif
   1493  internalAppend(std::forward<U>(aU));
   1494  return true;
   1495 }
   1496 
   1497 template <typename T, size_t N, class AP>
   1498 template <typename U, size_t O, class BP>
   1499 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::appendAll(
   1500    const Vector<U, O, BP>& aOther) {
   1501  return append(aOther.begin(), aOther.length());
   1502 }
   1503 
   1504 template <typename T, size_t N, class AP>
   1505 template <typename U, size_t O, class BP>
   1506 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::appendAll(Vector<U, O, BP>&& aOther) {
   1507  if (empty() && capacity() < aOther.length()) {
   1508    *this = std::move(aOther);
   1509    return true;
   1510  }
   1511 
   1512  if (moveAppend(aOther.begin(), aOther.end())) {
   1513    aOther.clearAndFree();
   1514    return true;
   1515  }
   1516 
   1517  return false;
   1518 }
   1519 
   1520 template <typename T, size_t N, class AP>
   1521 template <class U>
   1522 MOZ_ALWAYS_INLINE bool Vector<T, N, AP>::append(const U* aInsBegin,
   1523                                                size_t aInsLength) {
   1524  return append(aInsBegin, aInsBegin + aInsLength);
   1525 }
   1526 
   1527 template <typename T, size_t N, class AP>
   1528 MOZ_ALWAYS_INLINE void Vector<T, N, AP>::popBack() {
   1529  MOZ_REENTRANCY_GUARD_ET_AL;
   1530  if (MOZ_UNLIKELY(empty())) {
   1531    mozilla::detail::InvalidArrayIndex_CRASH(0, 0);
   1532  }
   1533  --mLength;
   1534  endNoCheck()->~T();
   1535 }
   1536 
   1537 template <typename T, size_t N, class AP>
   1538 MOZ_ALWAYS_INLINE T Vector<T, N, AP>::popCopy() {
   1539  T ret = back();
   1540  popBack();
   1541  return ret;
   1542 }
   1543 
   1544 template <typename T, size_t N, class AP>
   1545 inline T* Vector<T, N, AP>::extractRawBuffer() {
   1546  MOZ_REENTRANCY_GUARD_ET_AL;
   1547 
   1548  if (usingInlineStorage()) {
   1549    return nullptr;
   1550  }
   1551 
   1552  T* ret = mBegin;
   1553  mBegin = inlineStorage();
   1554  mLength = 0;
   1555  mTail.mCapacity = kInlineCapacity;
   1556 #ifdef DEBUG
   1557  mTail.mReserved = 0;
   1558 #endif
   1559  return ret;
   1560 }
   1561 
   1562 template <typename T, size_t N, class AP>
   1563 inline T* Vector<T, N, AP>::extractOrCopyRawBuffer() {
   1564  if (T* ret = extractRawBuffer()) {
   1565    return ret;
   1566  }
   1567 
   1568  MOZ_REENTRANCY_GUARD_ET_AL;
   1569 
   1570  T* copy = this->template pod_malloc<T>(mLength);
   1571  if (!copy) {
   1572    return nullptr;
   1573  }
   1574 
   1575  Impl::moveConstruct(copy, beginNoCheck(), endNoCheck());
   1576  Impl::destroy(beginNoCheck(), endNoCheck());
   1577  mBegin = inlineStorage();
   1578  mLength = 0;
   1579  mTail.mCapacity = kInlineCapacity;
   1580 #ifdef DEBUG
   1581  mTail.mReserved = 0;
   1582 #endif
   1583  return copy;
   1584 }
   1585 
   1586 template <typename T, size_t N, class AP>
   1587 inline void Vector<T, N, AP>::replaceRawBuffer(T* aP, size_t aLength,
   1588                                               size_t aCapacity) {
   1589  MOZ_REENTRANCY_GUARD_ET_AL;
   1590 
   1591  /* Destroy what we have. */
   1592  Impl::destroy(beginNoCheck(), endNoCheck());
   1593  if (!usingInlineStorage()) {
   1594    this->free_(beginNoCheck(), mTail.mCapacity);
   1595  }
   1596 
   1597  /* Take in the new buffer. */
   1598  if (aCapacity <= kInlineCapacity) {
   1599    /*
   1600     * We convert to inline storage if possible, even though aP might
   1601     * otherwise be acceptable.  Maybe this behaviour should be
   1602     * specifiable with an argument to this function.
   1603     */
   1604    mBegin = inlineStorage();
   1605    mLength = aLength;
   1606    mTail.mCapacity = kInlineCapacity;
   1607    Impl::moveConstruct(mBegin, aP, aP + aLength);
   1608    Impl::destroy(aP, aP + aLength);
   1609    this->free_(aP, aCapacity);
   1610  } else {
   1611    mBegin = aP;
   1612    mLength = aLength;
   1613    mTail.mCapacity = aCapacity;
   1614  }
   1615 #ifdef DEBUG
   1616  mTail.mReserved = aCapacity;
   1617 #endif
   1618 }
   1619 
   1620 template <typename T, size_t N, class AP>
   1621 inline void Vector<T, N, AP>::replaceRawBuffer(T* aP, size_t aLength) {
   1622  replaceRawBuffer(aP, aLength, aLength);
   1623 }
   1624 
   1625 template <typename T, size_t N, class AP>
   1626 inline size_t Vector<T, N, AP>::sizeOfExcludingThis(
   1627    MallocSizeOf aMallocSizeOf) const {
   1628  return usingInlineStorage() ? 0 : aMallocSizeOf(beginNoCheck());
   1629 }
   1630 
   1631 template <typename T, size_t N, class AP>
   1632 inline size_t Vector<T, N, AP>::sizeOfIncludingThis(
   1633    MallocSizeOf aMallocSizeOf) const {
   1634  return aMallocSizeOf(this) + sizeOfExcludingThis(aMallocSizeOf);
   1635 }
   1636 
   1637 template <typename T, size_t N, class AP>
   1638 inline void Vector<T, N, AP>::swap(Vector& aOther) {
   1639  static_assert(N == 0,
   1640                "still need to implement this for N != 0 (Bug 1987683)");
   1641 
   1642  // This only works when inline storage is always empty.
   1643  if (!usingInlineStorage() && aOther.usingInlineStorage()) {
   1644    aOther.mBegin = mBegin;
   1645    mBegin = inlineStorage();
   1646  } else if (usingInlineStorage() && !aOther.usingInlineStorage()) {
   1647    mBegin = aOther.mBegin;
   1648    aOther.mBegin = aOther.inlineStorage();
   1649  } else if (!usingInlineStorage() && !aOther.usingInlineStorage()) {
   1650    std::swap(mBegin, aOther.mBegin);
   1651  } else {
   1652    // This case is a no-op, since we'd set both to use their inline storage.
   1653  }
   1654 
   1655  std::swap(mLength, aOther.mLength);
   1656  std::swap(mTail.mCapacity, aOther.mTail.mCapacity);
   1657 #ifdef DEBUG
   1658  std::swap(mTail.mReserved, aOther.mTail.mReserved);
   1659 #endif
   1660 }
   1661 
   1662 }  // namespace mozilla
   1663 
   1664 #endif /* mozilla_Vector_h */