tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

HashTable.h (77340B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 //---------------------------------------------------------------------------
      8 // Overview
      9 //---------------------------------------------------------------------------
     10 //
     11 // This file defines HashMap<Key, Value> and HashSet<T>, hash tables that are
     12 // fast and have a nice API.
     13 //
     14 // Both hash tables have two optional template parameters.
     15 //
     16 // - HashPolicy. This defines the operations for hashing and matching keys. The
     17 //   default HashPolicy is appropriate when both of the following two
     18 //   conditions are true.
     19 //
     20 //   - The key type stored in the table (|Key| for |HashMap<Key, Value>|, |T|
     21 //     for |HashSet<T>|) is an integer, pointer, UniquePtr, float, or double.
     22 //
     23 //   - The type used for lookups (|Lookup|) is the same as the key type. This
     24 //     is usually the case, but not always.
     25 //
     26 //   There is also a |CStringHasher| policy for |char*| keys. If your keys
     27 //   don't match any of the above cases, you must provide your own hash policy;
     28 //   see the "Hash Policy" section below.
     29 //
     30 // - AllocPolicy. This defines how allocations are done by the table.
     31 //
     32 //   - |MallocAllocPolicy| is the default and is usually appropriate; note that
     33 //     operations (such as insertions) that might cause allocations are
     34 //     fallible and must be checked for OOM. These checks are enforced by the
     35 //     use of [[nodiscard]].
     36 //
     37 //   - |InfallibleAllocPolicy| is another possibility; it allows the
     38 //     abovementioned OOM checks to be done with MOZ_ALWAYS_TRUE().
     39 //
     40 //   Note that entry storage allocation is lazy, and not done until the first
     41 //   lookupForAdd(), put(), or putNew() is performed.
     42 //
     43 //  See AllocPolicy.h for more details.
     44 //
     45 // Documentation on how to use HashMap and HashSet, including examples, is
     46 // present within those classes. Search for "class HashMap" and "class
     47 // HashSet".
     48 //
     49 // Both HashMap and HashSet are implemented on top of a third class, HashTable.
     50 // You only need to look at HashTable if you want to understand the
     51 // implementation.
     52 //
     53 // How does mozilla::HashTable (this file) compare with PLDHashTable (and its
     54 // subclasses, such as nsTHashtable)?
     55 //
     56 // - mozilla::HashTable is a lot faster, largely because it uses templates
     57 //   throughout *and* inlines everything. PLDHashTable inlines operations much
     58 //   less aggressively, and also uses "virtual ops" for operations like hashing
     59 //   and matching entries that require function calls.
     60 //
     61 // - Correspondingly, mozilla::HashTable use is likely to increase executable
     62 //   size much more than PLDHashTable.
     63 //
     64 // - mozilla::HashTable has a nicer API, with a proper HashSet vs. HashMap
     65 //   distinction.
     66 //
     67 // - mozilla::HashTable requires more explicit OOM checking. As mentioned
     68 //   above, the use of |InfallibleAllocPolicy| can simplify things.
     69 //
     70 // - mozilla::HashTable has a default capacity on creation of 32 and a minimum
     71 //   capacity of 4. PLDHashTable has a default capacity on creation of 8 and a
     72 //   minimum capacity of 8.
     73 
     74 #ifndef mozilla_HashTable_h
     75 #define mozilla_HashTable_h
     76 
     77 #include <utility>
     78 #include <type_traits>
     79 
     80 #include "mozilla/AllocPolicy.h"
     81 #include "mozilla/Assertions.h"
     82 #include "mozilla/Attributes.h"
     83 #include "mozilla/Casting.h"
     84 #include "mozilla/EndianUtils.h"
     85 #include "mozilla/HashFunctions.h"
     86 #include "mozilla/MathAlgorithms.h"
     87 #include "mozilla/Maybe.h"
     88 #include "mozilla/MemoryChecking.h"
     89 #include "mozilla/MemoryReporting.h"
     90 #include "mozilla/Opaque.h"
     91 #include "mozilla/OperatorNewExtensions.h"
     92 #include "mozilla/ReentrancyGuard.h"
     93 #include "mozilla/UniquePtr.h"
     94 #include "mozilla/WrappingOperations.h"
     95 
     96 namespace mozilla {
     97 
     98 template <class, class = void>
     99 struct DefaultHasher;
    100 
    101 template <class, class>
    102 class HashMapEntry;
    103 
    104 namespace detail {
    105 
    106 template <typename T>
    107 class HashTableEntry;
    108 
    109 template <class T, class HashPolicy, class AllocPolicy>
    110 class HashTable;
    111 
    112 }  // namespace detail
    113 
    114 // The "generation" of a hash table is an opaque value indicating the state of
    115 // modification of the hash table through its lifetime.  If the generation of
    116 // a hash table compares equal at times T1 and T2, then lookups in the hash
    117 // table, pointers to (or into) hash table entries, etc. at time T1 are valid
    118 // at time T2.  If the generation compares unequal, these computations are all
    119 // invalid and must be performed again to be used.
    120 //
    121 // Generations are meaningfully comparable only with respect to a single hash
    122 // table.  It's always nonsensical to compare the generation of distinct hash
    123 // tables H1 and H2.
    124 using Generation = Opaque<uint64_t>;
    125 
    126 //---------------------------------------------------------------------------
    127 // HashMap
    128 //---------------------------------------------------------------------------
    129 
    130 // HashMap is a fast hash-based map from keys to values.
    131 //
    132 // Template parameter requirements:
    133 // - Key/Value: movable, destructible, assignable.
    134 // - HashPolicy: see the "Hash Policy" section below.
    135 // - AllocPolicy: see AllocPolicy.h.
    136 //
    137 // Note:
    138 // - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members
    139 //   called by HashMap must not call back into the same HashMap object.
    140 //
    141 template <class Key, class Value, class HashPolicy = DefaultHasher<Key>,
    142          class AllocPolicy = MallocAllocPolicy>
    143 class MOZ_STANDALONE_DEBUG HashMap {
    144  // -- Implementation details -----------------------------------------------
    145 
    146  // HashMap is not copyable or assignable.
    147  HashMap(const HashMap& hm) = delete;
    148  HashMap& operator=(const HashMap& hm) = delete;
    149 
    150  using TableEntry = HashMapEntry<Key, Value>;
    151 
    152  struct MapHashPolicy : HashPolicy {
    153    using Base = HashPolicy;
    154    using KeyType = Key;
    155 
    156    static const Key& getKey(TableEntry& aEntry) { return aEntry.key(); }
    157 
    158    static void setKey(TableEntry& aEntry, Key& aKey) {
    159      HashPolicy::rekey(aEntry.mutableKey(), aKey);
    160    }
    161  };
    162 
    163  using Impl = detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy>;
    164  Impl mImpl;
    165 
    166  friend class Impl::Enum;
    167 
    168 public:
    169  using Lookup = typename HashPolicy::Lookup;
    170  using Entry = TableEntry;
    171 
    172  // -- Initialization -------------------------------------------------------
    173 
    174  explicit HashMap(AllocPolicy aAllocPolicy = AllocPolicy(),
    175                   uint32_t aLen = Impl::sDefaultLen)
    176      : mImpl(std::move(aAllocPolicy), aLen) {}
    177 
    178  explicit HashMap(uint32_t aLen) : mImpl(AllocPolicy(), aLen) {}
    179 
    180  // HashMap is movable.
    181  HashMap(HashMap&& aRhs) = default;
    182  HashMap& operator=(HashMap&& aRhs) = default;
    183 
    184  // Swap the contents of this hash map with another.
    185  void swap(HashMap& aOther) { mImpl.swap(aOther.mImpl); }
    186 
    187  // -- Status and sizing ----------------------------------------------------
    188 
    189  // The map's current generation.
    190  Generation generation() const { return mImpl.generation(); }
    191 
    192  // Is the map empty?
    193  bool empty() const { return mImpl.empty(); }
    194 
    195  // Number of keys/values in the map.
    196  uint32_t count() const { return mImpl.count(); }
    197 
    198  // Number of key/value slots in the map. Note: resize will happen well before
    199  // count() == capacity().
    200  uint32_t capacity() const { return mImpl.capacity(); }
    201 
    202  // The size of the map's entry storage, in bytes. If the keys/values contain
    203  // pointers to other heap blocks, you must iterate over the map and measure
    204  // them separately; hence the "shallow" prefix.
    205  size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
    206    return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
    207  }
    208  size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
    209    return aMallocSizeOf(this) +
    210           mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
    211  }
    212 
    213  // Attempt to minimize the capacity(). If the table is empty, this will free
    214  // the empty storage and upon regrowth it will be given the minimum capacity.
    215  void compact() { mImpl.compact(); }
    216 
    217  // Attempt to reserve enough space to fit at least |aLen| elements. This is
    218  // total capacity, including elements already present. Does nothing if the
    219  // map already has sufficient capacity.
    220  [[nodiscard]] bool reserve(uint32_t aLen) { return mImpl.reserve(aLen); }
    221 
    222  // -- Lookups --------------------------------------------------------------
    223 
    224  // Does the map contain a key/value matching |aLookup|?
    225  bool has(const Lookup& aLookup) const {
    226    return mImpl.lookup(aLookup).found();
    227  }
    228 
    229  // Return a Ptr indicating whether a key/value matching |aLookup| is
    230  // present in the map. E.g.:
    231  //
    232  //   using HM = HashMap<int,char>;
    233  //   HM h;
    234  //   if (HM::Ptr p = h.lookup(3)) {
    235  //     assert(p->key() == 3);
    236  //     char val = p->value();
    237  //   }
    238  //
    239  using Ptr = typename Impl::Ptr;
    240  MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const {
    241    return mImpl.lookup(aLookup);
    242  }
    243 
    244  // Like lookup(), but does not assert if two threads call it at the same
    245  // time. Only use this method when none of the threads will modify the map.
    246  MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const {
    247    return mImpl.readonlyThreadsafeLookup(aLookup);
    248  }
    249 
    250  // -- Insertions -----------------------------------------------------------
    251 
    252  // Overwrite existing value with |aValue|, or add it if not present. Returns
    253  // false on OOM.
    254  template <typename KeyInput, typename ValueInput>
    255  [[nodiscard]] bool put(KeyInput&& aKey, ValueInput&& aValue) {
    256    return put(aKey, std::forward<KeyInput>(aKey),
    257               std::forward<ValueInput>(aValue));
    258  }
    259 
    260  template <typename KeyInput, typename ValueInput>
    261  [[nodiscard]] bool put(const Lookup& aLookup, KeyInput&& aKey,
    262                         ValueInput&& aValue) {
    263    AddPtr p = lookupForAdd(aLookup);
    264    if (p) {
    265      p->value() = std::forward<ValueInput>(aValue);
    266      return true;
    267    }
    268    return add(p, std::forward<KeyInput>(aKey),
    269               std::forward<ValueInput>(aValue));
    270  }
    271 
    272  // Like put(), but slightly faster. Must only be used when the given key is
    273  // not already present. (In debug builds, assertions check this.)
    274  template <typename KeyInput, typename ValueInput>
    275  [[nodiscard]] bool putNew(KeyInput&& aKey, ValueInput&& aValue) {
    276    return mImpl.putNew(aKey, std::forward<KeyInput>(aKey),
    277                        std::forward<ValueInput>(aValue));
    278  }
    279 
    280  template <typename KeyInput, typename ValueInput>
    281  [[nodiscard]] bool putNew(const Lookup& aLookup, KeyInput&& aKey,
    282                            ValueInput&& aValue) {
    283    return mImpl.putNew(aLookup, std::forward<KeyInput>(aKey),
    284                        std::forward<ValueInput>(aValue));
    285  }
    286 
    287  // Like putNew(), but should be only used when the table is known to be big
    288  // enough for the insertion, and hashing cannot fail. Typically this is used
    289  // to populate an empty map with known-unique keys after reserving space with
    290  // reserve(), e.g.
    291  //
    292  //   using HM = HashMap<int,char>;
    293  //   HM h;
    294  //   if (!h.reserve(3)) {
    295  //     MOZ_CRASH("OOM");
    296  //   }
    297  //   h.putNewInfallible(1, 'a');    // unique key
    298  //   h.putNewInfallible(2, 'b');    // unique key
    299  //   h.putNewInfallible(3, 'c');    // unique key
    300  //
    301  template <typename KeyInput, typename ValueInput>
    302  void putNewInfallible(KeyInput&& aKey, ValueInput&& aValue) {
    303    mImpl.putNewInfallible(aKey, std::forward<KeyInput>(aKey),
    304                           std::forward<ValueInput>(aValue));
    305  }
    306 
    307  // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
    308  // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
    309  // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new key/value. E.g.:
    310  //
    311  //   using HM = HashMap<int,char>;
    312  //   HM h;
    313  //   HM::AddPtr p = h.lookupForAdd(3);
    314  //   if (!p) {
    315  //     if (!h.add(p, 3, 'a')) {
    316  //       return false;
    317  //     }
    318  //   }
    319  //   assert(p->key() == 3);
    320  //   char val = p->value();
    321  //
    322  // N.B. The caller must ensure that no mutating hash table operations occur
    323  // between a pair of lookupForAdd() and add() calls. To avoid looking up the
    324  // key a second time, the caller may use the more efficient relookupOrAdd()
    325  // method. This method reuses part of the hashing computation to more
    326  // efficiently insert the key if it has not been added. For example, a
    327  // mutation-handling version of the previous example:
    328  //
    329  //    HM::AddPtr p = h.lookupForAdd(3);
    330  //    if (!p) {
    331  //      call_that_may_mutate_h();
    332  //      if (!h.relookupOrAdd(p, 3, 'a')) {
    333  //        return false;
    334  //      }
    335  //    }
    336  //    assert(p->key() == 3);
    337  //    char val = p->value();
    338  //
    339  using AddPtr = typename Impl::AddPtr;
    340  MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) {
    341    return mImpl.lookupForAdd(aLookup);
    342  }
    343 
    344  // Add a key/value. Returns false on OOM.
    345  template <typename KeyInput, typename ValueInput>
    346  [[nodiscard]] bool add(AddPtr& aPtr, KeyInput&& aKey, ValueInput&& aValue) {
    347    return mImpl.add(aPtr, std::forward<KeyInput>(aKey),
    348                     std::forward<ValueInput>(aValue));
    349  }
    350 
    351  // See the comment above lookupForAdd() for details.
    352  template <typename KeyInput, typename ValueInput>
    353  [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, KeyInput&& aKey,
    354                                   ValueInput&& aValue) {
    355    return mImpl.relookupOrAdd(aPtr, aKey, std::forward<KeyInput>(aKey),
    356                               std::forward<ValueInput>(aValue));
    357  }
    358 
    359  // -- Removal --------------------------------------------------------------
    360 
    361  // Lookup and remove the key/value matching |aLookup|, if present.
    362  void remove(const Lookup& aLookup) {
    363    if (Ptr p = lookup(aLookup)) {
    364      remove(p);
    365    }
    366  }
    367 
    368  // Remove a previously found key/value (assuming aPtr.found()). The map must
    369  // not have been mutated in the interim.
    370  void remove(Ptr aPtr) { mImpl.remove(aPtr); }
    371 
    372  // Remove all keys/values without changing the capacity.
    373  void clear() { mImpl.clear(); }
    374 
    375  // Like clear() followed by compact().
    376  void clearAndCompact() { mImpl.clearAndCompact(); }
    377 
    378  // -- Rekeying -------------------------------------------------------------
    379 
    380  // Infallibly rekey one entry, if necessary. Requires that template
    381  // parameters Key and HashPolicy::Lookup are the same type.
    382  void rekeyIfMoved(const Key& aOldKey, const Key& aNewKey) {
    383    if (aOldKey != aNewKey) {
    384      rekeyAs(aOldKey, aNewKey, aNewKey);
    385    }
    386  }
    387 
    388  // Infallibly rekey one entry if present, and return whether that happened.
    389  bool rekeyAs(const Lookup& aOldLookup, const Lookup& aNewLookup,
    390               const Key& aNewKey) {
    391    if (Ptr p = lookup(aOldLookup)) {
    392      mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewKey);
    393      return true;
    394    }
    395    return false;
    396  }
    397 
    398  // -- Iteration ------------------------------------------------------------
    399 
    400  // |iter()| returns an Iterator:
    401  //
    402  //   HashMap<int, char> h;
    403  //   for (auto iter = h.iter(); !iter.done(); iter.next()) {
    404  //     char c = iter.get().value();
    405  //   }
    406  //
    407  using Iterator = typename Impl::Iterator;
    408  Iterator iter() const { return mImpl.iter(); }
    409 
    410  // |modIter()| returns a ModIterator:
    411  //
    412  //   HashMap<int, char> h;
    413  //   for (auto iter = h.modIter(); !iter.done(); iter.next()) {
    414  //     if (iter.get().value() == 'l') {
    415  //       iter.remove();
    416  //     }
    417  //   }
    418  //
    419  // Table resize may occur in ModIterator's destructor.
    420  using ModIterator = typename Impl::ModIterator;
    421  ModIterator modIter() { return mImpl.modIter(); }
    422 
    423  // These are similar to Iterator/ModIterator/iter(), but use different
    424  // terminology.
    425  using Range = typename Impl::Range;
    426  using Enum = typename Impl::Enum;
    427  Range all() const { return mImpl.all(); }
    428 
    429  static size_t offsetOfHashShift() {
    430    return offsetof(HashMap, mImpl) + Impl::offsetOfHashShift();
    431  }
    432  static size_t offsetOfTable() {
    433    return offsetof(HashMap, mImpl) + Impl::offsetOfTable();
    434  }
    435  static size_t offsetOfEntryCount() {
    436    return offsetof(HashMap, mImpl) + Impl::offsetOfEntryCount();
    437  }
    438 };
    439 
    440 //---------------------------------------------------------------------------
    441 // HashSet
    442 //---------------------------------------------------------------------------
    443 
    444 // HashSet is a fast hash-based set of values.
    445 //
    446 // Template parameter requirements:
    447 // - T: movable, destructible, assignable.
    448 // - HashPolicy: see the "Hash Policy" section below.
    449 // - AllocPolicy: see AllocPolicy.h
    450 //
    451 // Note:
    452 // - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by
    453 //   HashSet must not call back into the same HashSet object.
    454 //
    455 template <class T, class HashPolicy = DefaultHasher<T>,
    456          class AllocPolicy = MallocAllocPolicy>
    457 class HashSet {
    458  // -- Implementation details -----------------------------------------------
    459 
    460  // HashSet is not copyable or assignable.
    461  HashSet(const HashSet& hs) = delete;
    462  HashSet& operator=(const HashSet& hs) = delete;
    463 
    464  struct SetHashPolicy : HashPolicy {
    465    using Base = HashPolicy;
    466    using KeyType = T;
    467 
    468    static const KeyType& getKey(const T& aT) { return aT; }
    469 
    470    static void setKey(T& aT, KeyType& aKey) { HashPolicy::rekey(aT, aKey); }
    471  };
    472 
    473  using Impl = detail::HashTable<const T, SetHashPolicy, AllocPolicy>;
    474  Impl mImpl;
    475 
    476  friend class Impl::Enum;
    477 
    478 public:
    479  using Lookup = typename HashPolicy::Lookup;
    480  using Entry = T;
    481 
    482  // -- Initialization -------------------------------------------------------
    483 
    484  explicit HashSet(AllocPolicy aAllocPolicy = AllocPolicy(),
    485                   uint32_t aLen = Impl::sDefaultLen)
    486      : mImpl(std::move(aAllocPolicy), aLen) {}
    487 
    488  explicit HashSet(uint32_t aLen) : mImpl(AllocPolicy(), aLen) {}
    489 
    490  // HashSet is movable.
    491  HashSet(HashSet&& aRhs) = default;
    492  HashSet& operator=(HashSet&& aRhs) = default;
    493 
    494  // Swap the contents of this hash set with another.
    495  void swap(HashSet& aOther) { mImpl.swap(aOther.mImpl); }
    496 
    497  // -- Status and sizing ----------------------------------------------------
    498 
    499  // The set's current generation.
    500  Generation generation() const { return mImpl.generation(); }
    501 
    502  // Is the set empty?
    503  bool empty() const { return mImpl.empty(); }
    504 
    505  // Number of elements in the set.
    506  uint32_t count() const { return mImpl.count(); }
    507 
    508  // Number of element slots in the set. Note: resize will happen well before
    509  // count() == capacity().
    510  uint32_t capacity() const { return mImpl.capacity(); }
    511 
    512  // The size of the set's entry storage, in bytes. If the elements contain
    513  // pointers to other heap blocks, you must iterate over the set and measure
    514  // them separately; hence the "shallow" prefix.
    515  size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
    516    return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
    517  }
    518  size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
    519    return aMallocSizeOf(this) +
    520           mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
    521  }
    522 
    523  // Attempt to minimize the capacity(). If the table is empty, this will free
    524  // the empty storage and upon regrowth it will be given the minimum capacity.
    525  void compact() { mImpl.compact(); }
    526 
    527  // Attempt to reserve enough space to fit at least |aLen| elements. This is
    528  // total capacity, including elements already present. Does nothing if the
    529  // map already has sufficient capacity.
    530  [[nodiscard]] bool reserve(uint32_t aLen) { return mImpl.reserve(aLen); }
    531 
    532  // -- Lookups --------------------------------------------------------------
    533 
    534  // Does the set contain an element matching |aLookup|?
    535  bool has(const Lookup& aLookup) const {
    536    return mImpl.lookup(aLookup).found();
    537  }
    538 
    539  // Return a Ptr indicating whether an element matching |aLookup| is present
    540  // in the set. E.g.:
    541  //
    542  //   using HS = HashSet<int>;
    543  //   HS h;
    544  //   if (HS::Ptr p = h.lookup(3)) {
    545  //     assert(*p == 3);   // p acts like a pointer to int
    546  //   }
    547  //
    548  using Ptr = typename Impl::Ptr;
    549  MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const {
    550    return mImpl.lookup(aLookup);
    551  }
    552 
    553  // Like lookup(), but does not assert if two threads call it at the same
    554  // time. Only use this method when none of the threads will modify the set.
    555  MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const {
    556    return mImpl.readonlyThreadsafeLookup(aLookup);
    557  }
    558 
    559  // -- Insertions -----------------------------------------------------------
    560 
    561  // Add |aU| if it is not present already. Returns false on OOM.
    562  template <typename U>
    563  [[nodiscard]] bool put(U&& aU) {
    564    AddPtr p = lookupForAdd(aU);
    565    return p ? true : add(p, std::forward<U>(aU));
    566  }
    567 
    568  // Like put(), but slightly faster. Must only be used when the given element
    569  // is not already present. (In debug builds, assertions check this.)
    570  template <typename U>
    571  [[nodiscard]] bool putNew(U&& aU) {
    572    return mImpl.putNew(aU, std::forward<U>(aU));
    573  }
    574 
    575  // Like the other putNew(), but for when |Lookup| is different to |T|.
    576  template <typename U>
    577  [[nodiscard]] bool putNew(const Lookup& aLookup, U&& aU) {
    578    return mImpl.putNew(aLookup, std::forward<U>(aU));
    579  }
    580 
    581  // Like putNew(), but should be only used when the table is known to be big
    582  // enough for the insertion, and hashing cannot fail. Typically this is used
    583  // to populate an empty set with known-unique elements after reserving space
    584  // with reserve(), e.g.
    585  //
    586  //   using HS = HashMap<int>;
    587  //   HS h;
    588  //   if (!h.reserve(3)) {
    589  //     MOZ_CRASH("OOM");
    590  //   }
    591  //   h.putNewInfallible(1);     // unique element
    592  //   h.putNewInfallible(2);     // unique element
    593  //   h.putNewInfallible(3);     // unique element
    594  //
    595  template <typename U>
    596  void putNewInfallible(const Lookup& aLookup, U&& aU) {
    597    mImpl.putNewInfallible(aLookup, std::forward<U>(aU));
    598  }
    599 
    600  // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
    601  // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using
    602  // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.:
    603  //
    604  //   using HS = HashSet<int>;
    605  //   HS h;
    606  //   HS::AddPtr p = h.lookupForAdd(3);
    607  //   if (!p) {
    608  //     if (!h.add(p, 3)) {
    609  //       return false;
    610  //     }
    611  //   }
    612  //   assert(*p == 3);   // p acts like a pointer to int
    613  //
    614  // N.B. The caller must ensure that no mutating hash table operations occur
    615  // between a pair of lookupForAdd() and add() calls. To avoid looking up the
    616  // key a second time, the caller may use the more efficient relookupOrAdd()
    617  // method. This method reuses part of the hashing computation to more
    618  // efficiently insert the key if it has not been added. For example, a
    619  // mutation-handling version of the previous example:
    620  //
    621  //    HS::AddPtr p = h.lookupForAdd(3);
    622  //    if (!p) {
    623  //      call_that_may_mutate_h();
    624  //      if (!h.relookupOrAdd(p, 3, 3)) {
    625  //        return false;
    626  //      }
    627  //    }
    628  //    assert(*p == 3);
    629  //
    630  // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
    631  // entry |t|, where the caller ensures match(l,t).
    632  using AddPtr = typename Impl::AddPtr;
    633  MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) {
    634    return mImpl.lookupForAdd(aLookup);
    635  }
    636 
    637  // Add an element. Returns false on OOM.
    638  template <typename U>
    639  [[nodiscard]] bool add(AddPtr& aPtr, U&& aU) {
    640    return mImpl.add(aPtr, std::forward<U>(aU));
    641  }
    642 
    643  // See the comment above lookupForAdd() for details.
    644  template <typename U>
    645  [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup,
    646                                   U&& aU) {
    647    return mImpl.relookupOrAdd(aPtr, aLookup, std::forward<U>(aU));
    648  }
    649 
    650  // -- Removal --------------------------------------------------------------
    651 
    652  // Lookup and remove the element matching |aLookup|, if present.
    653  void remove(const Lookup& aLookup) {
    654    if (Ptr p = lookup(aLookup)) {
    655      remove(p);
    656    }
    657  }
    658 
    659  // Remove a previously found element (assuming aPtr.found()). The set must
    660  // not have been mutated in the interim.
    661  void remove(Ptr aPtr) { mImpl.remove(aPtr); }
    662 
    663  // Remove all keys/values without changing the capacity.
    664  void clear() { mImpl.clear(); }
    665 
    666  // Like clear() followed by compact().
    667  void clearAndCompact() { mImpl.clearAndCompact(); }
    668 
    669  // -- Rekeying -------------------------------------------------------------
    670 
    671  // Infallibly rekey one entry, if present. Requires that template parameters
    672  // T and HashPolicy::Lookup are the same type.
    673  void rekeyIfMoved(const Lookup& aOldValue, const T& aNewValue) {
    674    if (aOldValue != aNewValue) {
    675      rekeyAs(aOldValue, aNewValue, aNewValue);
    676    }
    677  }
    678 
    679  // Infallibly rekey one entry if present, and return whether that happened.
    680  bool rekeyAs(const Lookup& aOldLookup, const Lookup& aNewLookup,
    681               const T& aNewValue) {
    682    if (Ptr p = lookup(aOldLookup)) {
    683      mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewValue);
    684      return true;
    685    }
    686    return false;
    687  }
    688 
    689  // Infallibly replace the current key at |aPtr| with an equivalent key.
    690  // Specifically, both HashPolicy::hash and HashPolicy::match must return
    691  // identical results for the new and old key when applied against all
    692  // possible matching values.
    693  void replaceKey(Ptr aPtr, const Lookup& aLookup, const T& aNewValue) {
    694    MOZ_ASSERT(aPtr.found());
    695    MOZ_ASSERT(*aPtr != aNewValue);
    696    MOZ_ASSERT(HashPolicy::match(*aPtr, aLookup));
    697    MOZ_ASSERT(HashPolicy::match(aNewValue, aLookup));
    698    const_cast<T&>(*aPtr) = aNewValue;
    699    MOZ_ASSERT(*lookup(aLookup) == aNewValue);
    700  }
    701  void replaceKey(Ptr aPtr, const T& aNewValue) {
    702    replaceKey(aPtr, aNewValue, aNewValue);
    703  }
    704 
    705  // -- Iteration ------------------------------------------------------------
    706 
    707  // |iter()| returns an Iterator:
    708  //
    709  //   HashSet<int> h;
    710  //   for (auto iter = h.iter(); !iter.done(); iter.next()) {
    711  //     int i = iter.get();
    712  //   }
    713  //
    714  using Iterator = typename Impl::Iterator;
    715  Iterator iter() const { return mImpl.iter(); }
    716 
    717  // |modIter()| returns a ModIterator:
    718  //
    719  //   HashSet<int> h;
    720  //   for (auto iter = h.modIter(); !iter.done(); iter.next()) {
    721  //     if (iter.get() == 42) {
    722  //       iter.remove();
    723  //     }
    724  //   }
    725  //
    726  // Table resize may occur in ModIterator's destructor.
    727  using ModIterator = typename Impl::ModIterator;
    728  ModIterator modIter() { return mImpl.modIter(); }
    729 
    730  // These are similar to Iterator/ModIterator/iter(), but use different
    731  // terminology.
    732  using Range = typename Impl::Range;
    733  using Enum = typename Impl::Enum;
    734  Range all() const { return mImpl.all(); }
    735 };
    736 
    737 //---------------------------------------------------------------------------
    738 // Hash Policy
    739 //---------------------------------------------------------------------------
    740 
    741 // A hash policy |HP| for a hash table with key-type |Key| must provide:
    742 //
    743 //  - a type |HP::Lookup| to use to lookup table entries;
    744 //
    745 //  - a static member function |HP::hash| that hashes lookup values:
    746 //
    747 //      static mozilla::HashNumber hash(const Lookup&);
    748 //
    749 //  - a static member function |HP::match| that tests equality of key and
    750 //    lookup values:
    751 //
    752 //      static bool match(const Key& aKey, const Lookup& aLookup);
    753 //
    754 //    |aKey| and |aLookup| can have different hash numbers, only when a
    755 //    collision happens with |prepareHash| operation, which is less frequent.
    756 //    Thus, |HP::match| shouldn't assume the hash equality in the comparison,
    757 //    even if the hash numbers are almost always same between them.
    758 //
    759 // Normally, Lookup = Key. In general, though, different values and types of
    760 // values can be used to lookup and store. If a Lookup value |l| is not equal
    761 // to the added Key value |k|, the user must ensure that |HP::match(k,l)| is
    762 // true. E.g.:
    763 //
    764 //   mozilla::HashSet<Key, HP>::AddPtr p = h.lookup(l);
    765 //   if (!p) {
    766 //     assert(HP::match(k, l));  // must hold
    767 //     h.add(p, k);
    768 //   }
    769 
    770 // A pointer hashing policy that uses HashGeneric() to create good hashes for
    771 // pointers. Note that we don't shift out the lowest k bits because we don't
    772 // want to assume anything about the alignment of the pointers.
    773 template <typename Key>
    774 struct PointerHasher {
    775  using Lookup = Key;
    776 
    777  static HashNumber hash(const Lookup& aLookup) { return HashGeneric(aLookup); }
    778 
    779  static bool match(const Key& aKey, const Lookup& aLookup) {
    780    return aKey == aLookup;
    781  }
    782 
    783  static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
    784 };
    785 
    786 // The default hash policy, which only works with integers.
    787 template <class Key, typename>
    788 struct DefaultHasher {
    789  using Lookup = Key;
    790 
    791  static HashNumber hash(const Lookup& aLookup) {
    792    // Just convert the integer to a HashNumber and use that as is. (This
    793    // discards the high 32-bits of 64-bit integers!) ScrambleHashCode() is
    794    // subsequently called on the value to improve the distribution.
    795    return aLookup;
    796  }
    797 
    798  static bool match(const Key& aKey, const Lookup& aLookup) {
    799    // Use builtin or overloaded operator==.
    800    return aKey == aLookup;
    801  }
    802 
    803  static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
    804 };
    805 
    806 // A DefaultHasher specialization for enums.
    807 template <class T>
    808 struct DefaultHasher<T, std::enable_if_t<std::is_enum_v<T>>> {
    809  using Key = T;
    810  using Lookup = Key;
    811 
    812  static HashNumber hash(const Lookup& aLookup) { return HashGeneric(aLookup); }
    813 
    814  static bool match(const Key& aKey, const Lookup& aLookup) {
    815    // Use builtin or overloaded operator==.
    816    return aKey == static_cast<Key>(aLookup);
    817  }
    818 
    819  static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
    820 };
    821 
    822 // A DefaultHasher specialization for pointers.
    823 template <class T>
    824 struct DefaultHasher<T*> : PointerHasher<T*> {};
    825 
    826 // A DefaultHasher specialization for mozilla::UniquePtr.
    827 template <class T, class D>
    828 struct DefaultHasher<UniquePtr<T, D>> {
    829  using Key = UniquePtr<T, D>;
    830  using Lookup = Key;
    831  using PtrHasher = PointerHasher<T*>;
    832 
    833  static HashNumber hash(const Lookup& aLookup) {
    834    return PtrHasher::hash(aLookup.get());
    835  }
    836 
    837  static bool match(const Key& aKey, const Lookup& aLookup) {
    838    return PtrHasher::match(aKey.get(), aLookup.get());
    839  }
    840 
    841  static void rekey(UniquePtr<T, D>& aKey, UniquePtr<T, D>&& aNewKey) {
    842    aKey = std::move(aNewKey);
    843  }
    844 };
    845 
    846 // A DefaultHasher specialization for doubles.
    847 template <>
    848 struct DefaultHasher<double> {
    849  using Key = double;
    850  using Lookup = Key;
    851 
    852  static HashNumber hash(const Lookup& aLookup) {
    853    // Just xor the high bits with the low bits, and then treat the bits of the
    854    // result as a uint32_t.
    855    static_assert(sizeof(HashNumber) == 4,
    856                  "subsequent code assumes a four-byte hash");
    857    uint64_t u = BitwiseCast<uint64_t>(aLookup);
    858    return HashNumber(u ^ (u >> 32));
    859  }
    860 
    861  static bool match(const Key& aKey, const Lookup& aLookup) {
    862    return BitwiseCast<uint64_t>(aKey) == BitwiseCast<uint64_t>(aLookup);
    863  }
    864 };
    865 
    866 // A DefaultHasher specialization for floats.
    867 template <>
    868 struct DefaultHasher<float> {
    869  using Key = float;
    870  using Lookup = Key;
    871 
    872  static HashNumber hash(const Lookup& aLookup) {
    873    // Just use the value as if its bits form an integer. ScrambleHashCode() is
    874    // subsequently called on the value to improve the distribution.
    875    static_assert(sizeof(HashNumber) == 4,
    876                  "subsequent code assumes a four-byte hash");
    877    return HashNumber(BitwiseCast<uint32_t>(aLookup));
    878  }
    879 
    880  static bool match(const Key& aKey, const Lookup& aLookup) {
    881    return BitwiseCast<uint32_t>(aKey) == BitwiseCast<uint32_t>(aLookup);
    882  }
    883 };
    884 
    885 // A hash policy for C strings.
    886 struct CStringHasher {
    887  using Key = const char*;
    888  using Lookup = const char*;
    889 
    890  static HashNumber hash(const Lookup& aLookup) { return HashString(aLookup); }
    891 
    892  static bool match(const Key& aKey, const Lookup& aLookup) {
    893    return strcmp(aKey, aLookup) == 0;
    894  }
    895 };
    896 
    897 //---------------------------------------------------------------------------
    898 // Fallible Hashing Interface
    899 //---------------------------------------------------------------------------
    900 
    901 // Most of the time generating a hash code is infallible, but sometimes it is
    902 // necessary to generate hash codes on demand in a way that can fail. Specialize
    903 // this class for your own hash policy to provide fallible hashing.
    904 //
    905 // This is used by MovableCellHasher to handle the fact that generating a unique
    906 // ID for cell pointer may fail due to OOM.
    907 //
    908 // The default implementations of these methods delegate to the usual HashPolicy
    909 // implementation and always succeed.
    910 template <typename HashPolicy>
    911 struct FallibleHashMethods {
    912  // Return true if a hashcode is already available for its argument, and
    913  // sets |aHashOut|. Once this succeeds for a specific argument it
    914  // must continue to do so.
    915  //
    916  // Return false if a hashcode is not already available. This implies that any
    917  // lookup must fail, as the hash code would have to have been successfully
    918  // created on insertion.
    919  template <typename Lookup>
    920  static bool maybeGetHash(Lookup&& aLookup, HashNumber* aHashOut) {
    921    *aHashOut = HashPolicy::hash(aLookup);
    922    return true;
    923  }
    924 
    925  // Fallible method to ensure a hashcode exists for its argument and create one
    926  // if not. Sets |aHashOut| to the hashcode and retuns true on success. Returns
    927  // false on error, e.g. out of memory.
    928  template <typename Lookup>
    929  static bool ensureHash(Lookup&& aLookup, HashNumber* aHashOut) {
    930    *aHashOut = HashPolicy::hash(aLookup);
    931    return true;
    932  }
    933 };
    934 
    935 template <typename HashPolicy, typename Lookup>
    936 static bool MaybeGetHash(Lookup&& aLookup, HashNumber* aHashOut) {
    937  return FallibleHashMethods<typename HashPolicy::Base>::maybeGetHash(
    938      std::forward<Lookup>(aLookup), aHashOut);
    939 }
    940 
    941 template <typename HashPolicy, typename Lookup>
    942 static bool EnsureHash(Lookup&& aLookup, HashNumber* aHashOut) {
    943  return FallibleHashMethods<typename HashPolicy::Base>::ensureHash(
    944      std::forward<Lookup>(aLookup), aHashOut);
    945 }
    946 
    947 //---------------------------------------------------------------------------
    948 // Implementation Details (HashMapEntry, HashTableEntry, HashTable)
    949 //---------------------------------------------------------------------------
    950 
    951 // Both HashMap and HashSet are implemented by a single HashTable that is even
    952 // more heavily parameterized than the other two. This leaves HashTable gnarly
    953 // and extremely coupled to HashMap and HashSet; thus code should not use
    954 // HashTable directly.
    955 
    956 template <class Key, class Value>
    957 class HashMapEntry {
    958  Key key_;
    959  Value value_;
    960 
    961  template <class, class, class>
    962  friend class detail::HashTable;
    963  template <class>
    964  friend class detail::HashTableEntry;
    965  template <class, class, class, class>
    966  friend class HashMap;
    967 
    968 public:
    969  template <typename KeyInput, typename ValueInput>
    970  HashMapEntry(KeyInput&& aKey, ValueInput&& aValue)
    971      : key_(std::forward<KeyInput>(aKey)),
    972        value_(std::forward<ValueInput>(aValue)) {}
    973 
    974  HashMapEntry(HashMapEntry&& aRhs) = default;
    975  HashMapEntry& operator=(HashMapEntry&& aRhs) = default;
    976 
    977  using KeyType = Key;
    978  using ValueType = Value;
    979 
    980  const Key& key() const { return key_; }
    981 
    982  // Use this method with caution! If the key is changed such that its hash
    983  // value also changes, the map will be left in an invalid state.
    984  Key& mutableKey() { return key_; }
    985 
    986  const Value& value() const { return value_; }
    987  Value& value() { return value_; }
    988 
    989  static size_t offsetOfKey() { return offsetof(HashMapEntry, key_); }
    990  static size_t offsetOfValue() { return offsetof(HashMapEntry, value_); }
    991 
    992 private:
    993  HashMapEntry(const HashMapEntry&) = delete;
    994  void operator=(const HashMapEntry&) = delete;
    995 };
    996 
    997 namespace detail {
    998 
    999 static const HashNumber kHashTableFreeKey = 0;
   1000 static const HashNumber kHashTableRemovedKey = 1;
   1001 static const HashNumber kHashTableCollisionBit = 1;
   1002 
   1003 template <class T, class HashPolicy, class AllocPolicy>
   1004 class HashTable;
   1005 
   1006 template <typename T>
   1007 class EntrySlot;
   1008 
   1009 template <typename T>
   1010 class HashTableEntry {
   1011 private:
   1012  using NonConstT = std::remove_const_t<T>;
   1013 
   1014  // Instead of having a hash table entry store that looks like this:
   1015  //
   1016  // +--------+--------+--------+--------+
   1017  // | entry0 | entry1 |  ....  | entryN |
   1018  // +--------+--------+--------+--------+
   1019  //
   1020  // where the entries contained their cached hash code, we're going to lay out
   1021  // the entry store thusly:
   1022  //
   1023  // +-------+-------+-------+-------+--------+--------+--------+--------+
   1024  // | hash0 | hash1 |  ...  | hashN | entry0 | entry1 |  ....  | entryN |
   1025  // +-------+-------+-------+-------+--------+--------+--------+--------+
   1026  //
   1027  // with all the cached hashes prior to the actual entries themselves.
   1028  //
   1029  // We do this because implementing the first strategy requires us to make
   1030  // HashTableEntry look roughly like:
   1031  //
   1032  // template <typename T>
   1033  // class HashTableEntry {
   1034  //   HashNumber mKeyHash;
   1035  //   T mValue;
   1036  // };
   1037  //
   1038  // The problem with this setup is that, depending on the layout of `T`, there
   1039  // may be platform ABI-mandated padding between `mKeyHash` and the first
   1040  // member of `T`. This ABI-mandated padding is wasted space, and can be
   1041  // surprisingly common, e.g. when `T` is a single pointer on 64-bit platforms.
   1042  // In such cases, we're throwing away a quarter of our entry store on padding,
   1043  // which is undesirable.
   1044  //
   1045  // The second layout above, namely:
   1046  //
   1047  // +-------+-------+-------+-------+--------+--------+--------+--------+
   1048  // | hash0 | hash1 |  ...  | hashN | entry0 | entry1 |  ....  | entryN |
   1049  // +-------+-------+-------+-------+--------+--------+--------+--------+
   1050  //
   1051  // means there is no wasted space between the hashes themselves, and no wasted
   1052  // space between the entries themselves.  However, we would also like there to
   1053  // be no gap between the last hash and the first entry. The memory allocator
   1054  // guarantees the alignment of the start of the hashes. The use of a
   1055  // power-of-two capacity of at least 4 guarantees that the alignment of the
   1056  // *end* of the hash array is no less than the alignment of the start.
   1057  // Finally, the static_asserts here guarantee that the entries themselves
   1058  // don't need to be any more aligned than the alignment of the entry store
   1059  // itself.
   1060  //
   1061  // This assertion is safe for 32-bit builds because on both Windows and Linux
   1062  // (including Android), the minimum alignment for allocations larger than 8
   1063  // bytes is 8 bytes, and the actual data for entries in our entry store is
   1064  // guaranteed to have that alignment as well, thanks to the power-of-two
   1065  // number of cached hash values stored prior to the entry data.
   1066 
   1067  // The allocation policy must allocate a table with at least this much
   1068  // alignment.
   1069  static constexpr size_t kMinimumAlignment = 8;
   1070 
   1071  static_assert(alignof(HashNumber) <= kMinimumAlignment,
   1072                "[N*2 hashes, N*2 T values] allocation's alignment must be "
   1073                "enough to align each hash");
   1074  static_assert(alignof(NonConstT) <= 2 * sizeof(HashNumber),
   1075                "subsequent N*2 T values must not require more than an even "
   1076                "number of HashNumbers provides");
   1077 
   1078  static const HashNumber sFreeKey = kHashTableFreeKey;
   1079  static const HashNumber sRemovedKey = kHashTableRemovedKey;
   1080  static const HashNumber sCollisionBit = kHashTableCollisionBit;
   1081 
   1082  alignas(NonConstT) unsigned char mValueData[sizeof(NonConstT)];
   1083 
   1084 private:
   1085  template <class, class, class>
   1086  friend class HashTable;
   1087  template <typename>
   1088  friend class EntrySlot;
   1089 
   1090  // Some versions of GCC treat it as a -Wstrict-aliasing violation (ergo a
   1091  // -Werror compile error) to reinterpret_cast<> |mValueData| to |T*|, even
   1092  // through |void*|.  Placing the latter cast in these separate functions
   1093  // breaks the chain such that affected GCC versions no longer warn/error.
   1094  void* rawValuePtr() { return mValueData; }
   1095 
   1096  static bool isLiveHash(HashNumber hash) { return hash > sRemovedKey; }
   1097 
   1098  HashTableEntry(const HashTableEntry&) = delete;
   1099  void operator=(const HashTableEntry&) = delete;
   1100 
   1101  NonConstT* valuePtr() { return reinterpret_cast<NonConstT*>(rawValuePtr()); }
   1102 
   1103  void destroyStoredT() {
   1104    NonConstT* ptr = valuePtr();
   1105    ptr->~T();
   1106    MOZ_MAKE_MEM_UNDEFINED(ptr, sizeof(*ptr));
   1107  }
   1108 
   1109 public:
   1110  HashTableEntry() = default;
   1111 
   1112  ~HashTableEntry() { MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this)); }
   1113 
   1114  void destroy() { destroyStoredT(); }
   1115 
   1116  void swap(HashTableEntry* aOther, bool aIsLive) {
   1117    // This allows types to use Argument-Dependent-Lookup, and thus use a custom
   1118    // std::swap, which is needed by types like JS::Heap and such.
   1119    using std::swap;
   1120 
   1121    if (this == aOther) {
   1122      return;
   1123    }
   1124    if (aIsLive) {
   1125      swap(*valuePtr(), *aOther->valuePtr());
   1126    } else {
   1127      *aOther->valuePtr() = std::move(*valuePtr());
   1128      destroy();
   1129    }
   1130  }
   1131 
   1132  T& get() { return *valuePtr(); }
   1133 
   1134  NonConstT& getMutable() { return *valuePtr(); }
   1135 };
   1136 
   1137 // A slot represents a cached hash value and its associated entry stored
   1138 // in the hash table. These two things are not stored in contiguous memory.
   1139 template <class T>
   1140 class EntrySlot {
   1141  using NonConstT = std::remove_const_t<T>;
   1142 
   1143  using Entry = HashTableEntry<T>;
   1144 
   1145  Entry* mEntry;
   1146  HashNumber* mKeyHash;
   1147 
   1148  template <class, class, class>
   1149  friend class HashTable;
   1150 
   1151  EntrySlot(Entry* aEntry, HashNumber* aKeyHash)
   1152      : mEntry(aEntry), mKeyHash(aKeyHash) {}
   1153 
   1154 public:
   1155  static bool isLiveHash(HashNumber hash) { return hash > Entry::sRemovedKey; }
   1156 
   1157  EntrySlot(const EntrySlot&) = default;
   1158  EntrySlot(EntrySlot&& aOther) = default;
   1159 
   1160  EntrySlot& operator=(const EntrySlot&) = default;
   1161  EntrySlot& operator=(EntrySlot&&) = default;
   1162 
   1163  bool operator==(const EntrySlot& aRhs) const { return mEntry == aRhs.mEntry; }
   1164 
   1165  bool operator<(const EntrySlot& aRhs) const { return mEntry < aRhs.mEntry; }
   1166 
   1167  EntrySlot& operator++() {
   1168    ++mEntry;
   1169    ++mKeyHash;
   1170    return *this;
   1171  }
   1172 
   1173  void destroy() { mEntry->destroy(); }
   1174 
   1175  void swap(EntrySlot& aOther) {
   1176    mEntry->swap(aOther.mEntry, aOther.isLive());
   1177    std::swap(*mKeyHash, *aOther.mKeyHash);
   1178  }
   1179 
   1180  T& get() const { return mEntry->get(); }
   1181 
   1182  NonConstT& getMutable() { return mEntry->getMutable(); }
   1183 
   1184  bool isFree() const { return *mKeyHash == Entry::sFreeKey; }
   1185 
   1186  void clearLive() {
   1187    MOZ_ASSERT(isLive());
   1188    *mKeyHash = Entry::sFreeKey;
   1189    mEntry->destroyStoredT();
   1190  }
   1191 
   1192  void clear() {
   1193    if (isLive()) {
   1194      mEntry->destroyStoredT();
   1195    }
   1196    MOZ_MAKE_MEM_UNDEFINED(mEntry, sizeof(*mEntry));
   1197    *mKeyHash = Entry::sFreeKey;
   1198  }
   1199 
   1200  bool isRemoved() const { return *mKeyHash == Entry::sRemovedKey; }
   1201 
   1202  void removeLive() {
   1203    MOZ_ASSERT(isLive());
   1204    *mKeyHash = Entry::sRemovedKey;
   1205    mEntry->destroyStoredT();
   1206  }
   1207 
   1208  bool isLive() const { return isLiveHash(*mKeyHash); }
   1209 
   1210  void setCollision() {
   1211    MOZ_ASSERT(isLive());
   1212    *mKeyHash |= Entry::sCollisionBit;
   1213  }
   1214  void unsetCollision() { *mKeyHash &= ~Entry::sCollisionBit; }
   1215  bool hasCollision() const { return *mKeyHash & Entry::sCollisionBit; }
   1216  bool matchHash(HashNumber hn) {
   1217    return (*mKeyHash & ~Entry::sCollisionBit) == hn;
   1218  }
   1219  HashNumber getKeyHash() const { return *mKeyHash & ~Entry::sCollisionBit; }
   1220 
   1221  template <typename... Args>
   1222  void setLive(HashNumber aHashNumber, Args&&... aArgs) {
   1223    MOZ_ASSERT(!isLive());
   1224    *mKeyHash = aHashNumber;
   1225    new (KnownNotNull, mEntry->valuePtr()) T(std::forward<Args>(aArgs)...);
   1226    MOZ_ASSERT(isLive());
   1227  }
   1228 
   1229  Entry* toEntry() const { return mEntry; }
   1230 };
   1231 
   1232 template <class T, class HashPolicy, class AllocPolicy>
   1233 class MOZ_STANDALONE_DEBUG HashTable : private AllocPolicy {
   1234  friend class mozilla::ReentrancyGuard;
   1235 
   1236  using NonConstT = std::remove_const_t<T>;
   1237  using Key = typename HashPolicy::KeyType;
   1238  using Lookup = typename HashPolicy::Lookup;
   1239 
   1240 public:
   1241  using Entry = HashTableEntry<T>;
   1242  using Slot = EntrySlot<T>;
   1243 
   1244  template <typename F>
   1245  static void forEachSlot(char* aTable, uint32_t aCapacity, F&& f) {
   1246    auto hashes = reinterpret_cast<HashNumber*>(aTable);
   1247    auto entries = reinterpret_cast<Entry*>(&hashes[aCapacity]);
   1248    Slot slot(entries, hashes);
   1249    for (size_t i = 0; i < size_t(aCapacity); ++i) {
   1250      f(slot);
   1251      ++slot;
   1252    }
   1253  }
   1254 
   1255  // A nullable pointer to a hash table element. A Ptr |p| can be tested
   1256  // either explicitly |if (p.found()) p->...| or using boolean conversion
   1257  // |if (p) p->...|. Ptr objects must not be used after any mutating hash
   1258  // table operations unless |generation()| is tested.
   1259  class Ptr {
   1260    friend class HashTable;
   1261 
   1262    Slot mSlot;
   1263 #ifdef DEBUG
   1264    const HashTable* mTable;
   1265    Generation mGeneration;
   1266 #endif
   1267 
   1268   protected:
   1269    Ptr(Slot aSlot, const HashTable& aTable)
   1270        : mSlot(aSlot)
   1271 #ifdef DEBUG
   1272          ,
   1273          mTable(&aTable),
   1274          mGeneration(aTable.generation())
   1275 #endif
   1276    {
   1277    }
   1278 
   1279    // This constructor is used only by AddPtr() within lookupForAdd().
   1280    explicit Ptr(const HashTable& aTable)
   1281        : mSlot(nullptr, nullptr)
   1282 #ifdef DEBUG
   1283          ,
   1284          mTable(&aTable),
   1285          mGeneration(aTable.generation())
   1286 #endif
   1287    {
   1288    }
   1289 
   1290    bool isValid() const { return !!mSlot.toEntry(); }
   1291 
   1292   public:
   1293    Ptr()
   1294        : mSlot(nullptr, nullptr)
   1295 #ifdef DEBUG
   1296          ,
   1297          mTable(nullptr),
   1298          mGeneration(0)
   1299 #endif
   1300    {
   1301    }
   1302 
   1303    bool found() const {
   1304      if (!isValid()) {
   1305        return false;
   1306      }
   1307 #ifdef DEBUG
   1308      MOZ_ASSERT(mGeneration == mTable->generation());
   1309 #endif
   1310      return mSlot.isLive();
   1311    }
   1312 
   1313    explicit operator bool() const { return found(); }
   1314 
   1315    bool operator==(const Ptr& aRhs) const {
   1316      MOZ_ASSERT(found() && aRhs.found());
   1317      return mSlot == aRhs.mSlot;
   1318    }
   1319 
   1320    bool operator!=(const Ptr& aRhs) const {
   1321 #ifdef DEBUG
   1322      MOZ_ASSERT(mGeneration == mTable->generation());
   1323 #endif
   1324      return !(*this == aRhs);
   1325    }
   1326 
   1327    T& operator*() const {
   1328 #ifdef DEBUG
   1329      MOZ_ASSERT(found());
   1330      MOZ_ASSERT(mGeneration == mTable->generation());
   1331 #endif
   1332      return mSlot.get();
   1333    }
   1334 
   1335    T* operator->() const {
   1336 #ifdef DEBUG
   1337      MOZ_ASSERT(found());
   1338      MOZ_ASSERT(mGeneration == mTable->generation());
   1339 #endif
   1340      return &mSlot.get();
   1341    }
   1342  };
   1343 
   1344  // A Ptr that can be used to add a key after a failed lookup.
   1345  class AddPtr : public Ptr {
   1346    friend class HashTable;
   1347 
   1348    HashNumber mKeyHash;
   1349 #ifdef DEBUG
   1350    uint64_t mMutationCount;
   1351 #endif
   1352 
   1353    AddPtr(Slot aSlot, const HashTable& aTable, HashNumber aHashNumber)
   1354        : Ptr(aSlot, aTable),
   1355          mKeyHash(aHashNumber)
   1356 #ifdef DEBUG
   1357          ,
   1358          mMutationCount(aTable.mMutationCount)
   1359 #endif
   1360    {
   1361    }
   1362 
   1363    // This constructor is used when lookupForAdd() is performed on a table
   1364    // lacking entry storage; it leaves mSlot null but initializes everything
   1365    // else.
   1366    AddPtr(const HashTable& aTable, HashNumber aHashNumber)
   1367        : Ptr(aTable),
   1368          mKeyHash(aHashNumber)
   1369 #ifdef DEBUG
   1370          ,
   1371          mMutationCount(aTable.mMutationCount)
   1372 #endif
   1373    {
   1374      MOZ_ASSERT(isLive());
   1375    }
   1376 
   1377    bool isLive() const { return isLiveHash(mKeyHash); }
   1378 
   1379   public:
   1380    AddPtr() : mKeyHash(0) {}
   1381  };
   1382 
   1383  // A hash table iterator that (mostly) doesn't allow table modifications.
   1384  // As with Ptr/AddPtr, Iterator objects must not be used after any mutating
   1385  // hash table operation unless the |generation()| is tested.
   1386  class Iterator {
   1387    void moveToNextLiveEntry() {
   1388      while (++mCur < mEnd && !mCur.isLive()) {
   1389        continue;
   1390      }
   1391    }
   1392 
   1393   protected:
   1394    friend class HashTable;
   1395 
   1396    explicit Iterator(const HashTable& aTable)
   1397        : mCur(aTable.slotForIndex(0)),
   1398          mEnd(aTable.slotForIndex(aTable.capacity()))
   1399 #ifdef DEBUG
   1400          ,
   1401          mTable(aTable),
   1402          mMutationCount(aTable.mMutationCount),
   1403          mGeneration(aTable.generation()),
   1404          mValidEntry(true)
   1405 #endif
   1406    {
   1407      if (!done() && !mCur.isLive()) {
   1408        moveToNextLiveEntry();
   1409      }
   1410    }
   1411 
   1412    Slot mCur;
   1413    Slot mEnd;
   1414 #ifdef DEBUG
   1415    const HashTable& mTable;
   1416    uint64_t mMutationCount;
   1417    Generation mGeneration;
   1418    bool mValidEntry;
   1419 #endif
   1420 
   1421   public:
   1422    bool done() const {
   1423      MOZ_ASSERT(mGeneration == mTable.generation());
   1424      MOZ_ASSERT(mMutationCount == mTable.mMutationCount);
   1425      return mCur == mEnd;
   1426    }
   1427 
   1428    T& get() const {
   1429      MOZ_ASSERT(!done());
   1430      MOZ_ASSERT(mValidEntry);
   1431      MOZ_ASSERT(mGeneration == mTable.generation());
   1432      MOZ_ASSERT(mMutationCount == mTable.mMutationCount);
   1433      return mCur.get();
   1434    }
   1435 
   1436    void next() {
   1437      MOZ_ASSERT(!done());
   1438      MOZ_ASSERT(mGeneration == mTable.generation());
   1439      MOZ_ASSERT(mMutationCount == mTable.mMutationCount);
   1440      moveToNextLiveEntry();
   1441 #ifdef DEBUG
   1442      mValidEntry = true;
   1443 #endif
   1444    }
   1445  };
   1446 
   1447  // A hash table iterator that permits modification, removal and rekeying.
   1448  // Since rehashing when elements were removed during enumeration would be
   1449  // bad, it is postponed until the ModIterator is destructed. Since the
   1450  // ModIterator's destructor touches the hash table, the user must ensure
   1451  // that the hash table is still alive when the destructor runs.
   1452  class ModIterator : public Iterator {
   1453    friend class HashTable;
   1454 
   1455    HashTable& mTable;
   1456    bool mRekeyed;
   1457    bool mRemoved;
   1458 
   1459    // ModIterator is movable but not copyable.
   1460    ModIterator(const ModIterator&) = delete;
   1461    void operator=(const ModIterator&) = delete;
   1462 
   1463   protected:
   1464    explicit ModIterator(HashTable& aTable)
   1465        : Iterator(aTable), mTable(aTable), mRekeyed(false), mRemoved(false) {}
   1466 
   1467   public:
   1468    MOZ_IMPLICIT ModIterator(ModIterator&& aOther)
   1469        : Iterator(aOther),
   1470          mTable(aOther.mTable),
   1471          mRekeyed(aOther.mRekeyed),
   1472          mRemoved(aOther.mRemoved) {
   1473      aOther.mRekeyed = false;
   1474      aOther.mRemoved = false;
   1475    }
   1476 
   1477    // Removes the current element from the table, leaving |get()| invalid until
   1478    // the next call to |next()|.
   1479    //
   1480    // See the comments on ~ModIterator about table resizing after removing
   1481    // entries.
   1482    void remove() {
   1483      mTable.remove(this->mCur);
   1484      mRemoved = true;
   1485 #ifdef DEBUG
   1486      this->mValidEntry = false;
   1487      this->mMutationCount = mTable.mMutationCount;
   1488 #endif
   1489    }
   1490 
   1491    NonConstT& getMutable() {
   1492      MOZ_ASSERT(!this->done());
   1493      MOZ_ASSERT(this->mValidEntry);
   1494      MOZ_ASSERT(this->mGeneration == this->Iterator::mTable.generation());
   1495      MOZ_ASSERT(this->mMutationCount == this->Iterator::mTable.mMutationCount);
   1496      return this->mCur.getMutable();
   1497    }
   1498 
   1499    // Removes the current element and re-inserts it into the table with
   1500    // a new key at the new Lookup position.  |get()| is invalid after
   1501    // this operation until the next call to |next()|.
   1502    void rekey(const Lookup& l, const Key& k) {
   1503      MOZ_ASSERT(&k != &HashPolicy::getKey(this->mCur.get()));
   1504      Ptr p(this->mCur, mTable);
   1505      mTable.rekeyWithoutRehash(p, l, k);
   1506      mRekeyed = true;
   1507 #ifdef DEBUG
   1508      this->mValidEntry = false;
   1509      this->mMutationCount = mTable.mMutationCount;
   1510 #endif
   1511    }
   1512 
   1513    void rekey(const Key& k) { rekey(k, k); }
   1514 
   1515    // This can rehash the table or resize it if entries were removed.
   1516    //
   1517    // This does not go as far as freeing the table if it is now empty, as that
   1518    // can lead to repeatedly allocating and freeing the table when a small
   1519    // number of entries are repeatedly added and removed. If callers require
   1520    // memory to be minimised after removing entries they should call compact().
   1521    ~ModIterator() {
   1522      if (mRekeyed) {
   1523        mTable.incrementGeneration();
   1524        mTable.infallibleRehashIfOverloaded();
   1525      }
   1526 
   1527      if (mRemoved) {
   1528        mTable.shrinkToBestCapacity();
   1529      }
   1530    }
   1531  };
   1532 
   1533  // Range is similar to Iterator, but uses different terminology.
   1534  class Range {
   1535    friend class HashTable;
   1536 
   1537    Iterator mIter;
   1538 
   1539   protected:
   1540    explicit Range(const HashTable& table) : mIter(table) {}
   1541 
   1542   public:
   1543    bool empty() const { return mIter.done(); }
   1544 
   1545    T& front() const { return mIter.get(); }
   1546 
   1547    void popFront() { return mIter.next(); }
   1548  };
   1549 
   1550  // Enum is similar to ModIterator, but uses different terminology.
   1551  class Enum {
   1552    ModIterator mIter;
   1553 
   1554    // Enum is movable but not copyable.
   1555    Enum(const Enum&) = delete;
   1556    void operator=(const Enum&) = delete;
   1557 
   1558   public:
   1559    template <class Map>
   1560    explicit Enum(Map& map) : mIter(map.mImpl) {}
   1561 
   1562    MOZ_IMPLICIT Enum(Enum&& other) : mIter(std::move(other.mIter)) {}
   1563 
   1564    bool empty() const { return mIter.done(); }
   1565 
   1566    T& front() const { return mIter.get(); }
   1567 
   1568    void popFront() { return mIter.next(); }
   1569 
   1570    // See the comments on ~ModIterator about table resizing after removing
   1571    // entries.
   1572    void removeFront() { mIter.remove(); }
   1573 
   1574    NonConstT& mutableFront() { return mIter.getMutable(); }
   1575 
   1576    void rekeyFront(const Lookup& aLookup, const Key& aKey) {
   1577      mIter.rekey(aLookup, aKey);
   1578    }
   1579 
   1580    void rekeyFront(const Key& aKey) { mIter.rekey(aKey); }
   1581  };
   1582 
   1583  // HashTable is movable
   1584  HashTable(HashTable&& aRhs) : AllocPolicy(std::move(aRhs)) { moveFrom(aRhs); }
   1585  HashTable& operator=(HashTable&& aRhs) {
   1586    MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited");
   1587    if (mTable) {
   1588      destroyTable(*this, mTable, capacity());
   1589    }
   1590    AllocPolicy::operator=(std::move(aRhs));
   1591    moveFrom(aRhs);
   1592    return *this;
   1593  }
   1594 
   1595  void swap(HashTable& aOther) {
   1596    ReentrancyGuard g1(*this);
   1597    ReentrancyGuard g2(aOther);
   1598 
   1599    std::swap(mGenAndHashShift, aOther.mGenAndHashShift);
   1600    std::swap(mTable, aOther.mTable);
   1601    std::swap(mEntryCount, aOther.mEntryCount);
   1602    std::swap(mRemovedCount, aOther.mRemovedCount);
   1603 #ifdef DEBUG
   1604    std::swap(mMutationCount, aOther.mMutationCount);
   1605    std::swap(mEntered, aOther.mEntered);
   1606 #endif
   1607  }
   1608 
   1609 private:
   1610  void moveFrom(HashTable& aRhs) {
   1611    mGenAndHashShift = aRhs.mGenAndHashShift;
   1612    mTable = aRhs.mTable;
   1613    mEntryCount = aRhs.mEntryCount;
   1614    mRemovedCount = aRhs.mRemovedCount;
   1615 #ifdef DEBUG
   1616    mMutationCount = aRhs.mMutationCount;
   1617    mEntered = aRhs.mEntered;
   1618 #endif
   1619    aRhs.mTable = nullptr;
   1620    aRhs.clearAndCompact();
   1621  }
   1622 
   1623  // HashTable is not copyable or assignable
   1624  HashTable(const HashTable&) = delete;
   1625  void operator=(const HashTable&) = delete;
   1626 
   1627  static const uint32_t CAP_BITS = 30;
   1628 
   1629 public:
   1630  uint64_t mGenAndHashShift;  // entry storage generation number (56 bits)
   1631                              // and multiplicative hash shift (8 bits)
   1632  char* mTable;               // entry storage
   1633  uint32_t mEntryCount;       // number of entries in mTable
   1634  uint32_t mRemovedCount;     // removed entry sentinels in mTable
   1635 
   1636 #ifdef DEBUG
   1637  uint64_t mMutationCount;
   1638  mutable bool mEntered;
   1639 #endif
   1640 
   1641  // The default initial capacity is 32 (enough to hold 16 elements), but it
   1642  // can be as low as 4.
   1643  static const uint32_t sDefaultLen = 16;
   1644  static const uint32_t sMinCapacity = 4;
   1645  // See the comments in HashTableEntry about this value.
   1646  static_assert(sMinCapacity >= 4, "too-small sMinCapacity breaks assumptions");
   1647  static const uint32_t sMaxInit = 1u << (CAP_BITS - 1);
   1648  static const uint32_t sMaxCapacity = 1u << CAP_BITS;
   1649 
   1650  // Hash-table alpha is conceptually a fraction, but to avoid floating-point
   1651  // math we implement it as a ratio of integers.
   1652  static const uint8_t sAlphaDenominator = 4;
   1653  static const uint8_t sMinAlphaNumerator = 1;  // min alpha: 1/4
   1654  static const uint8_t sMaxAlphaNumerator = 3;  // max alpha: 3/4
   1655 
   1656  static const HashNumber sFreeKey = Entry::sFreeKey;
   1657  static const HashNumber sRemovedKey = Entry::sRemovedKey;
   1658  static const HashNumber sCollisionBit = Entry::sCollisionBit;
   1659 
   1660  static const uint64_t sHashShiftBits = 8;
   1661  static const uint64_t sHashShiftMask = (1 << sHashShiftBits) - 1;
   1662  static const uint64_t sGenerationShift = sHashShiftBits;
   1663 
   1664  MOZ_ALWAYS_INLINE uint8_t hashShift() const {
   1665    return uint8_t(mGenAndHashShift & sHashShiftMask);
   1666  }
   1667  MOZ_ALWAYS_INLINE uint64_t gen() const {
   1668    return mGenAndHashShift >> sGenerationShift;
   1669  }
   1670 
   1671 private:
   1672  void setGenAndHashShift(uint64_t aGeneration, uint8_t aHashShift) {
   1673    mGenAndHashShift = aGeneration << sGenerationShift | aHashShift;
   1674  }
   1675 
   1676 public:
   1677  void incrementGeneration() { setGenAndHashShift(gen() + 1, hashShift()); }
   1678  void setHashShift(uint32_t aHashShift) {
   1679    MOZ_ASSERT((aHashShift & sHashShiftMask) == aHashShift);
   1680    mGenAndHashShift = (mGenAndHashShift & ~sHashShiftMask) | aHashShift;
   1681  }
   1682 
   1683  static uint32_t bestCapacity(uint32_t aLen) {
   1684    static_assert(
   1685        (sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit,
   1686        "multiplication in numerator below could overflow");
   1687    static_assert(
   1688        sMaxInit * sAlphaDenominator <= UINT32_MAX - sMaxAlphaNumerator,
   1689        "numerator calculation below could potentially overflow");
   1690 
   1691    // Callers should ensure this is true.
   1692    MOZ_ASSERT(aLen <= sMaxInit);
   1693 
   1694    // Compute the smallest capacity allowing |aLen| elements to be
   1695    // inserted without rehashing: ceil(aLen / max-alpha).  (Ceiling
   1696    // integral division: <http://stackoverflow.com/a/2745086>.)
   1697    uint32_t capacity = (aLen * sAlphaDenominator + sMaxAlphaNumerator - 1) /
   1698                        sMaxAlphaNumerator;
   1699    capacity = (capacity < sMinCapacity) ? sMinCapacity : RoundUpPow2(capacity);
   1700 
   1701    MOZ_ASSERT(capacity >= aLen);
   1702    MOZ_ASSERT(capacity <= sMaxCapacity);
   1703 
   1704    return capacity;
   1705  }
   1706 
   1707  static uint32_t hashShiftForLength(uint32_t aLen) {
   1708    // Reject all lengths whose initial computed capacity would exceed
   1709    // sMaxCapacity. Round that maximum aLen down to the nearest power of two
   1710    // for speedier code.
   1711    if (MOZ_UNLIKELY(aLen > sMaxInit)) {
   1712      MOZ_CRASH("initial length is too large");
   1713    }
   1714 
   1715    return kHashNumberBits - mozilla::CeilingLog2(bestCapacity(aLen));
   1716  }
   1717 
   1718  static bool isLiveHash(HashNumber aHash) { return Entry::isLiveHash(aHash); }
   1719 
   1720  static HashNumber prepareHash(HashNumber aInputHash) {
   1721    HashNumber keyHash = ScrambleHashCode(aInputHash);
   1722 
   1723    // Avoid reserved hash codes.
   1724    if (!isLiveHash(keyHash)) {
   1725      keyHash -= (sRemovedKey + 1);
   1726    }
   1727    return keyHash & ~sCollisionBit;
   1728  }
   1729 
   1730  enum FailureBehavior { DontReportFailure = false, ReportFailure = true };
   1731 
   1732  // Fake a struct that we're going to alloc. See the comments in
   1733  // HashTableEntry about how the table is laid out, and why it's safe.
   1734  struct FakeSlot {
   1735    unsigned char c[sizeof(HashNumber) + sizeof(typename Entry::NonConstT)];
   1736  };
   1737 
   1738  static char* createTable(AllocPolicy& aAllocPolicy, uint32_t aCapacity,
   1739                           FailureBehavior aReportFailure = ReportFailure) {
   1740    FakeSlot* fake =
   1741        aReportFailure
   1742            ? aAllocPolicy.template pod_malloc<FakeSlot>(aCapacity)
   1743            : aAllocPolicy.template maybe_pod_malloc<FakeSlot>(aCapacity);
   1744 
   1745    MOZ_ASSERT((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) ==
   1746               0);
   1747 
   1748    char* table = reinterpret_cast<char*>(fake);
   1749    if (table) {
   1750      forEachSlot(table, aCapacity, [&](Slot& slot) {
   1751        *slot.mKeyHash = sFreeKey;
   1752        new (KnownNotNull, slot.toEntry()) Entry();
   1753      });
   1754    }
   1755    return table;
   1756  }
   1757 
   1758  static void destroyTable(AllocPolicy& aAllocPolicy, char* aOldTable,
   1759                           uint32_t aCapacity) {
   1760    forEachSlot(aOldTable, aCapacity, [&](const Slot& slot) {
   1761      if (slot.isLive()) {
   1762        slot.toEntry()->destroyStoredT();
   1763      }
   1764    });
   1765    freeTable(aAllocPolicy, aOldTable, aCapacity);
   1766  }
   1767 
   1768  static void freeTable(AllocPolicy& aAllocPolicy, char* aOldTable,
   1769                        uint32_t aCapacity) {
   1770    FakeSlot* fake = reinterpret_cast<FakeSlot*>(aOldTable);
   1771    aAllocPolicy.free_(fake, aCapacity);
   1772  }
   1773 
   1774 public:
   1775  HashTable(AllocPolicy aAllocPolicy, uint32_t aLen)
   1776      : AllocPolicy(std::move(aAllocPolicy)),
   1777        mGenAndHashShift(hashShiftForLength(aLen)),
   1778        mTable(nullptr),
   1779        mEntryCount(0),
   1780        mRemovedCount(0)
   1781 #ifdef DEBUG
   1782        ,
   1783        mMutationCount(0),
   1784        mEntered(false)
   1785 #endif
   1786  {
   1787  }
   1788 
   1789  explicit HashTable(AllocPolicy aAllocPolicy)
   1790      : HashTable(aAllocPolicy, sDefaultLen) {}
   1791 
   1792  ~HashTable() {
   1793    if (mTable) {
   1794      destroyTable(*this, mTable, capacity());
   1795    }
   1796  }
   1797 
   1798 private:
   1799  HashNumber hash1(HashNumber aHash0) const { return aHash0 >> hashShift(); }
   1800 
   1801  struct DoubleHash {
   1802    HashNumber mHash2;
   1803    HashNumber mSizeMask;
   1804  };
   1805 
   1806  DoubleHash hash2(HashNumber aCurKeyHash) const {
   1807    uint32_t sizeLog2 = kHashNumberBits - hashShift();
   1808    DoubleHash dh = {((aCurKeyHash << sizeLog2) >> hashShift()) | 1,
   1809                     (HashNumber(1) << sizeLog2) - 1};
   1810    return dh;
   1811  }
   1812 
   1813  static HashNumber applyDoubleHash(HashNumber aHash1,
   1814                                    const DoubleHash& aDoubleHash) {
   1815    return WrappingSubtract(aHash1, aDoubleHash.mHash2) & aDoubleHash.mSizeMask;
   1816  }
   1817 
   1818  static MOZ_ALWAYS_INLINE bool match(T& aEntry, const Lookup& aLookup) {
   1819    return HashPolicy::match(HashPolicy::getKey(aEntry), aLookup);
   1820  }
   1821 
   1822  enum LookupReason { ForNonAdd, ForAdd };
   1823 
   1824  Slot slotForIndex(HashNumber aIndex) const {
   1825    auto hashes = reinterpret_cast<HashNumber*>(mTable);
   1826    auto entries = reinterpret_cast<Entry*>(&hashes[capacity()]);
   1827    return Slot(&entries[aIndex], &hashes[aIndex]);
   1828  }
   1829 
   1830  // Warning: in order for readonlyThreadsafeLookup() to be safe this
   1831  // function must not modify the table in any way when Reason==ForNonAdd.
   1832  template <LookupReason Reason>
   1833  MOZ_ALWAYS_INLINE Slot lookup(const Lookup& aLookup,
   1834                                HashNumber aKeyHash) const {
   1835    MOZ_ASSERT(isLiveHash(aKeyHash));
   1836    MOZ_ASSERT(!(aKeyHash & sCollisionBit));
   1837    MOZ_ASSERT(mTable);
   1838 
   1839    // Compute the primary hash address.
   1840    HashNumber h1 = hash1(aKeyHash);
   1841    Slot slot = slotForIndex(h1);
   1842 
   1843    // Miss: return space for a new entry.
   1844    if (slot.isFree()) {
   1845      return slot;
   1846    }
   1847 
   1848    // Hit: return entry.
   1849    if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) {
   1850      return slot;
   1851    }
   1852 
   1853    // Collision: double hash.
   1854    DoubleHash dh = hash2(aKeyHash);
   1855 
   1856    // Save the first removed entry pointer so we can recycle later.
   1857    Maybe<Slot> firstRemoved;
   1858 
   1859    while (true) {
   1860      if (Reason == ForAdd && !firstRemoved) {
   1861        if (MOZ_UNLIKELY(slot.isRemoved())) {
   1862          firstRemoved.emplace(slot);
   1863        } else {
   1864          slot.setCollision();
   1865        }
   1866      }
   1867 
   1868      h1 = applyDoubleHash(h1, dh);
   1869 
   1870      slot = slotForIndex(h1);
   1871      if (slot.isFree()) {
   1872        return firstRemoved.refOr(slot);
   1873      }
   1874 
   1875      if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) {
   1876        return slot;
   1877      }
   1878    }
   1879  }
   1880 
   1881  // This is a copy of lookup() hardcoded to the assumptions:
   1882  //   1. the lookup is for an add;
   1883  //   2. the key, whose |keyHash| has been passed, is not in the table.
   1884  Slot findNonLiveSlot(HashNumber aKeyHash) {
   1885    MOZ_ASSERT(!(aKeyHash & sCollisionBit));
   1886    MOZ_ASSERT(mTable);
   1887 
   1888    // We assume 'aKeyHash' has already been distributed.
   1889 
   1890    // Compute the primary hash address.
   1891    HashNumber h1 = hash1(aKeyHash);
   1892    Slot slot = slotForIndex(h1);
   1893 
   1894    // Miss: return space for a new entry.
   1895    if (!slot.isLive()) {
   1896      return slot;
   1897    }
   1898 
   1899    // Collision: double hash.
   1900    DoubleHash dh = hash2(aKeyHash);
   1901 
   1902    while (true) {
   1903      slot.setCollision();
   1904 
   1905      h1 = applyDoubleHash(h1, dh);
   1906 
   1907      slot = slotForIndex(h1);
   1908      if (!slot.isLive()) {
   1909        return slot;
   1910      }
   1911    }
   1912  }
   1913 
   1914  enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
   1915 
   1916  RebuildStatus changeTableSize(
   1917      uint32_t newCapacity, FailureBehavior aReportFailure = ReportFailure) {
   1918    MOZ_ASSERT(IsPowerOfTwo(newCapacity));
   1919    MOZ_ASSERT(!!mTable == !!capacity());
   1920 
   1921    // Look, but don't touch, until we succeed in getting new entry store.
   1922    char* oldTable = mTable;
   1923    uint32_t oldCapacity = capacity();
   1924    uint32_t newLog2 = mozilla::CeilingLog2(newCapacity);
   1925 
   1926    if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) {
   1927      if (aReportFailure) {
   1928        this->reportAllocOverflow();
   1929      }
   1930      return RehashFailed;
   1931    }
   1932 
   1933    char* newTable = createTable(*this, newCapacity, aReportFailure);
   1934    if (!newTable) {
   1935      return RehashFailed;
   1936    }
   1937 
   1938    // We can't fail from here on, so update table parameters.
   1939    mRemovedCount = 0;
   1940    incrementGeneration();
   1941    setHashShift(kHashNumberBits - newLog2);
   1942    mTable = newTable;
   1943 
   1944    // Copy only live entries, leaving removed ones behind.
   1945    forEachSlot(oldTable, oldCapacity, [&](Slot& slot) {
   1946      if (slot.isLive()) {
   1947        HashNumber hn = slot.getKeyHash();
   1948        findNonLiveSlot(hn).setLive(
   1949            hn, std::move(const_cast<typename Entry::NonConstT&>(slot.get())));
   1950      }
   1951 
   1952      slot.clear();
   1953    });
   1954 
   1955    // All entries have been destroyed, no need to destroyTable.
   1956    freeTable(*this, oldTable, oldCapacity);
   1957    return Rehashed;
   1958  }
   1959 
   1960  RebuildStatus rehashIfOverloaded(
   1961      FailureBehavior aReportFailure = ReportFailure) {
   1962    static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator,
   1963                  "multiplication below could overflow");
   1964 
   1965    // Note: if capacity() is zero, this will always succeed, which is
   1966    // what we want.
   1967    bool overloaded = mEntryCount + mRemovedCount >=
   1968                      capacity() * sMaxAlphaNumerator / sAlphaDenominator;
   1969 
   1970    if (!overloaded) {
   1971      return NotOverloaded;
   1972    }
   1973 
   1974    // Succeed if a quarter or more of all entries are removed. Note that this
   1975    // always succeeds if capacity() == 0 (i.e. entry storage has not been
   1976    // allocated), which is what we want, because it means changeTableSize()
   1977    // will allocate the requested capacity rather than doubling it.
   1978    bool manyRemoved = mRemovedCount >= (capacity() >> 2);
   1979    uint32_t newCapacity = manyRemoved ? rawCapacity() : rawCapacity() * 2;
   1980    return changeTableSize(newCapacity, aReportFailure);
   1981  }
   1982 
   1983  void infallibleRehashIfOverloaded() {
   1984    if (rehashIfOverloaded(DontReportFailure) == RehashFailed) {
   1985      rehashTableInPlace();
   1986    }
   1987  }
   1988 
   1989  void remove(Slot& aSlot) {
   1990    MOZ_ASSERT(mTable);
   1991 
   1992    if (aSlot.hasCollision()) {
   1993      aSlot.removeLive();
   1994      mRemovedCount++;
   1995    } else {
   1996      aSlot.clearLive();
   1997    }
   1998    mEntryCount--;
   1999 #ifdef DEBUG
   2000    mMutationCount++;
   2001 #endif
   2002  }
   2003 
   2004  void shrinkIfUnderloaded() {
   2005    static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator,
   2006                  "multiplication below could overflow");
   2007    bool underloaded =
   2008        capacity() > sMinCapacity &&
   2009        mEntryCount <= capacity() * sMinAlphaNumerator / sAlphaDenominator;
   2010 
   2011    if (underloaded) {
   2012      (void)changeTableSize(capacity() / 2, DontReportFailure);
   2013    }
   2014  }
   2015 
   2016  // This is identical to changeTableSize(currentSize), but without requiring
   2017  // a second table.  We do this by recycling the collision bits to tell us if
   2018  // the element is already inserted or still waiting to be inserted.  Since
   2019  // already-inserted elements win any conflicts, we get the same table as we
   2020  // would have gotten through random insertion order.
   2021  void rehashTableInPlace() {
   2022    mRemovedCount = 0;
   2023    incrementGeneration();
   2024    forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.unsetCollision(); });
   2025    for (uint32_t i = 0; i < capacity();) {
   2026      Slot src = slotForIndex(i);
   2027 
   2028      if (!src.isLive() || src.hasCollision()) {
   2029        ++i;
   2030        continue;
   2031      }
   2032 
   2033      HashNumber keyHash = src.getKeyHash();
   2034      HashNumber h1 = hash1(keyHash);
   2035      DoubleHash dh = hash2(keyHash);
   2036      Slot tgt = slotForIndex(h1);
   2037      while (true) {
   2038        if (!tgt.hasCollision()) {
   2039          src.swap(tgt);
   2040          tgt.setCollision();
   2041          break;
   2042        }
   2043 
   2044        h1 = applyDoubleHash(h1, dh);
   2045        tgt = slotForIndex(h1);
   2046      }
   2047    }
   2048 
   2049    // TODO: this algorithm leaves collision bits on *all* elements, even if
   2050    // they are on no collision path. We have the option of setting the
   2051    // collision bits correctly on a subsequent pass or skipping the rehash
   2052    // unless we are totally filled with tombstones: benchmark to find out
   2053    // which approach is best.
   2054  }
   2055 
   2056  // Prefer to use putNewInfallible; this function does not check
   2057  // invariants.
   2058  template <typename... Args>
   2059  void putNewInfallibleInternal(HashNumber aKeyHash, Args&&... aArgs) {
   2060    MOZ_ASSERT(mTable);
   2061 
   2062    Slot slot = findNonLiveSlot(aKeyHash);
   2063 
   2064    if (slot.isRemoved()) {
   2065      mRemovedCount--;
   2066      aKeyHash |= sCollisionBit;
   2067    }
   2068 
   2069    slot.setLive(aKeyHash, std::forward<Args>(aArgs)...);
   2070    mEntryCount++;
   2071 #ifdef DEBUG
   2072    mMutationCount++;
   2073 #endif
   2074  }
   2075 
   2076 public:
   2077  void clear() {
   2078    forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.clear(); });
   2079    mRemovedCount = 0;
   2080    mEntryCount = 0;
   2081 #ifdef DEBUG
   2082    mMutationCount++;
   2083 #endif
   2084  }
   2085 
   2086  // Minimise the memory used. If there are no entries the table is freed,
   2087  // otherwise the table is resized to the smallest capacity that doesn't
   2088  // overload the table and that is at least sMinCapacity entries.
   2089  //
   2090  // Since we shrink the table after every remove, you only need to call this if
   2091  // you want to free the table when it's empty.
   2092  void compact() {
   2093    if (empty()) {
   2094      // Free the entry storage.
   2095      freeTable(*this, mTable, capacity());
   2096      incrementGeneration();
   2097      setHashShift(
   2098          hashShiftForLength(0));  // gives minimum capacity on regrowth
   2099      mTable = nullptr;
   2100      mRemovedCount = 0;
   2101      return;
   2102    }
   2103 
   2104    shrinkToBestCapacity();
   2105  }
   2106 
   2107  void shrinkToBestCapacity() {
   2108    uint32_t bestCapacity = this->bestCapacity(mEntryCount);
   2109    if (bestCapacity < capacity()) {
   2110      (void)changeTableSize(bestCapacity, DontReportFailure);
   2111    }
   2112  }
   2113 
   2114  void clearAndCompact() {
   2115    clear();
   2116    compact();
   2117  }
   2118 
   2119  [[nodiscard]] bool reserve(uint32_t aLen) {
   2120    if (aLen == 0) {
   2121      return true;
   2122    }
   2123 
   2124    if (MOZ_UNLIKELY(aLen > sMaxInit)) {
   2125      this->reportAllocOverflow();
   2126      return false;
   2127    }
   2128 
   2129    uint32_t bestCapacity = this->bestCapacity(aLen);
   2130    if (bestCapacity <= capacity()) {
   2131      return true;  // Capacity is already sufficient.
   2132    }
   2133 
   2134    RebuildStatus status = changeTableSize(bestCapacity, ReportFailure);
   2135    MOZ_ASSERT(status != NotOverloaded);
   2136    return status != RehashFailed;
   2137  }
   2138 
   2139  Iterator iter() const { return Iterator(*this); }
   2140 
   2141  ModIterator modIter() { return ModIterator(*this); }
   2142 
   2143  Range all() const { return Range(*this); }
   2144 
   2145  bool empty() const { return mEntryCount == 0; }
   2146 
   2147  uint32_t count() const { return mEntryCount; }
   2148 
   2149  uint32_t rawCapacity() const { return 1u << (kHashNumberBits - hashShift()); }
   2150 
   2151  uint32_t capacity() const { return mTable ? rawCapacity() : 0; }
   2152 
   2153  Generation generation() const { return Generation(gen()); }
   2154 
   2155  size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
   2156    return aMallocSizeOf(mTable);
   2157  }
   2158 
   2159  size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
   2160    return aMallocSizeOf(this) + shallowSizeOfExcludingThis(aMallocSizeOf);
   2161  }
   2162 
   2163  MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const {
   2164    if (empty()) {
   2165      return Ptr();
   2166    }
   2167 
   2168    HashNumber inputHash;
   2169    if (!MaybeGetHash<HashPolicy>(aLookup, &inputHash)) {
   2170      return Ptr();
   2171    }
   2172 
   2173    HashNumber keyHash = prepareHash(inputHash);
   2174    return Ptr(lookup<ForNonAdd>(aLookup, keyHash), *this);
   2175  }
   2176 
   2177  MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const {
   2178    ReentrancyGuard g(*this);
   2179    return readonlyThreadsafeLookup(aLookup);
   2180  }
   2181 
   2182  MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) {
   2183    ReentrancyGuard g(*this);
   2184 
   2185    HashNumber inputHash;
   2186    if (!EnsureHash<HashPolicy>(aLookup, &inputHash)) {
   2187      return AddPtr();
   2188    }
   2189 
   2190    HashNumber keyHash = prepareHash(inputHash);
   2191 
   2192    if (!mTable) {
   2193      return AddPtr(*this, keyHash);
   2194    }
   2195 
   2196    // Directly call the constructor in the return statement to avoid
   2197    // excess copying when building with Visual Studio 2017.
   2198    // See bug 1385181.
   2199    return AddPtr(lookup<ForAdd>(aLookup, keyHash), *this, keyHash);
   2200  }
   2201 
   2202  template <typename... Args>
   2203  [[nodiscard]] bool add(AddPtr& aPtr, Args&&... aArgs) {
   2204    ReentrancyGuard g(*this);
   2205    MOZ_ASSERT_IF(aPtr.isValid(), mTable);
   2206    MOZ_ASSERT_IF(aPtr.isValid(), aPtr.mTable == this);
   2207    MOZ_ASSERT(!aPtr.found());
   2208    MOZ_ASSERT(!(aPtr.mKeyHash & sCollisionBit));
   2209 
   2210    // Check for error from ensureHash() here.
   2211    if (!aPtr.isLive()) {
   2212      return false;
   2213    }
   2214 
   2215    MOZ_ASSERT(aPtr.mGeneration == generation());
   2216 #ifdef DEBUG
   2217    MOZ_ASSERT(aPtr.mMutationCount == mMutationCount);
   2218 #endif
   2219 
   2220    if (!aPtr.isValid()) {
   2221      MOZ_ASSERT(!mTable && mEntryCount == 0);
   2222      uint32_t newCapacity = rawCapacity();
   2223      RebuildStatus status = changeTableSize(newCapacity, ReportFailure);
   2224      MOZ_ASSERT(status != NotOverloaded);
   2225      if (status == RehashFailed) {
   2226        return false;
   2227      }
   2228      aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash);
   2229 
   2230    } else if (aPtr.mSlot.isRemoved()) {
   2231      // Changing an entry from removed to live does not affect whether we are
   2232      // overloaded and can be handled separately.
   2233      if (!this->checkSimulatedOOM()) {
   2234        return false;
   2235      }
   2236      mRemovedCount--;
   2237      aPtr.mKeyHash |= sCollisionBit;
   2238 
   2239    } else {
   2240      // Preserve the validity of |aPtr.mSlot|.
   2241      RebuildStatus status = rehashIfOverloaded();
   2242      if (status == RehashFailed) {
   2243        return false;
   2244      }
   2245      if (status == NotOverloaded && !this->checkSimulatedOOM()) {
   2246        return false;
   2247      }
   2248      if (status == Rehashed) {
   2249        aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash);
   2250      }
   2251    }
   2252 
   2253    aPtr.mSlot.setLive(aPtr.mKeyHash, std::forward<Args>(aArgs)...);
   2254    mEntryCount++;
   2255 #ifdef DEBUG
   2256    mMutationCount++;
   2257    aPtr.mGeneration = generation();
   2258    aPtr.mMutationCount = mMutationCount;
   2259 #endif
   2260    return true;
   2261  }
   2262 
   2263  // Note: |aLookup| may reference pieces of arguments in |aArgs|, so this
   2264  // function must take care not to use |aLookup| after moving |aArgs|.
   2265  template <typename... Args>
   2266  void putNewInfallible(const Lookup& aLookup, Args&&... aArgs) {
   2267    MOZ_ASSERT(!lookup(aLookup).found());
   2268    ReentrancyGuard g(*this);
   2269    HashNumber keyHash = prepareHash(HashPolicy::hash(aLookup));
   2270    putNewInfallibleInternal(keyHash, std::forward<Args>(aArgs)...);
   2271  }
   2272 
   2273  // Note: |aLookup| may alias arguments in |aArgs|, so this function must take
   2274  // care not to use |aLookup| after moving |aArgs|.
   2275  template <typename... Args>
   2276  [[nodiscard]] bool putNew(const Lookup& aLookup, Args&&... aArgs) {
   2277    MOZ_ASSERT(!lookup(aLookup).found());
   2278    ReentrancyGuard g(*this);
   2279    if (!this->checkSimulatedOOM()) {
   2280      return false;
   2281    }
   2282    HashNumber inputHash;
   2283    if (!EnsureHash<HashPolicy>(aLookup, &inputHash)) {
   2284      return false;
   2285    }
   2286    HashNumber keyHash = prepareHash(inputHash);
   2287    if (rehashIfOverloaded() == RehashFailed) {
   2288      return false;
   2289    }
   2290    putNewInfallibleInternal(keyHash, std::forward<Args>(aArgs)...);
   2291    return true;
   2292  }
   2293 
   2294  // Note: |aLookup| may be a reference pieces of arguments in |aArgs|, so this
   2295  // function must take care not to use |aLookup| after moving |aArgs|.
   2296  template <typename... Args>
   2297  [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup,
   2298                                   Args&&... aArgs) {
   2299    // Check for error from ensureHash() here.
   2300    if (!aPtr.isLive()) {
   2301      return false;
   2302    }
   2303 #ifdef DEBUG
   2304    aPtr.mGeneration = generation();
   2305    aPtr.mMutationCount = mMutationCount;
   2306 #endif
   2307    if (mTable) {
   2308      ReentrancyGuard g(*this);
   2309      // Check that aLookup has not been destroyed.
   2310      MOZ_ASSERT(prepareHash(HashPolicy::hash(aLookup)) == aPtr.mKeyHash);
   2311      aPtr.mSlot = lookup<ForAdd>(aLookup, aPtr.mKeyHash);
   2312      if (aPtr.found()) {
   2313        return true;
   2314      }
   2315    } else {
   2316      // Clear aPtr so it's invalid; add() will allocate storage and redo the
   2317      // lookup.
   2318      aPtr.mSlot = Slot(nullptr, nullptr);
   2319    }
   2320    return add(aPtr, std::forward<Args>(aArgs)...);
   2321  }
   2322 
   2323  void remove(Ptr aPtr) {
   2324    MOZ_ASSERT(mTable);
   2325    ReentrancyGuard g(*this);
   2326    MOZ_ASSERT(aPtr.found());
   2327    MOZ_ASSERT(aPtr.mGeneration == generation());
   2328    remove(aPtr.mSlot);
   2329    shrinkIfUnderloaded();
   2330  }
   2331 
   2332  void rekeyWithoutRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) {
   2333    MOZ_ASSERT(mTable);
   2334    ReentrancyGuard g(*this);
   2335    MOZ_ASSERT(aPtr.found());
   2336    MOZ_ASSERT(aPtr.mGeneration == generation());
   2337    typename HashTableEntry<T>::NonConstT t(std::move(*aPtr));
   2338    HashPolicy::setKey(t, const_cast<Key&>(aKey));
   2339    remove(aPtr.mSlot);
   2340    HashNumber keyHash = prepareHash(HashPolicy::hash(aLookup));
   2341    putNewInfallibleInternal(keyHash, std::move(t));
   2342  }
   2343 
   2344  void rekeyAndMaybeRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) {
   2345    rekeyWithoutRehash(aPtr, aLookup, aKey);
   2346    infallibleRehashIfOverloaded();
   2347  }
   2348 
   2349  static size_t offsetOfHashShift() {
   2350    static_assert(sHashShiftBits == 8,
   2351                  "callers assume hash shift is stored in a byte");
   2352    // The hash shift is stored in the least significant bits of
   2353    // mGenAndHashShift. On little-endian platforms, this is the
   2354    // same offset as mGenAndHashShift itself. On big-endian platforms,
   2355    // we have to add an additional offset to point to the last byte.
   2356    // (Or we would if we had JIT support for any big-endian platforms.)
   2357 #if MOZ_BIG_ENDIAN()
   2358    return offsetof(HashTable, mGenAndHashShift) + sizeof(mGenAndHashShift) -
   2359           sizeof(uint8_t);
   2360 #else
   2361    return offsetof(HashTable, mGenAndHashShift);
   2362 #endif
   2363  }
   2364  static size_t offsetOfTable() { return offsetof(HashTable, mTable); }
   2365  static size_t offsetOfEntryCount() {
   2366    return offsetof(HashTable, mEntryCount);
   2367  }
   2368 };
   2369 
   2370 }  // namespace detail
   2371 }  // namespace mozilla
   2372 
   2373 #endif /* mozilla_HashTable_h */