tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

OrderedHashTableObject.h (52803B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef builtin_OrderedHashTableObject_h
      8 #define builtin_OrderedHashTableObject_h
      9 
     10 /*
     11 * [SMDOC] OrderedHashTableObject (JS Map/Set implementation)
     12 *
     13 * This file defines js::OrderedHashMapObject (js::MapObject base class) and
     14 * js::OrderedHashSetObject (js::SetObject base class).
     15 *
     16 * It also defines two templates, js::OrderedHashMapImpl and
     17 * js::OrderedHashSetImpl, that operate on these objects and implement the
     18 * ordered hash table algorithm. These templates are defined separately from
     19 * the JS object types because it lets us switch between different template
     20 * instantiations to enable or disable GC barriers.
     21 *
     22 * The implemented hash table algorithm is different from HashMap and HashSet in
     23 * MFBT:
     24 *
     25 *   - JS Map/Set iterators must visit entries in insertion order. The hashing
     26 *     is a pure performance optimization and does not affect iteration order.
     27 *
     28 *   - Iterator objects must remain valid even when entries are added, removed,
     29 *     or the table is resized.
     30 *
     31 * Implementation
     32 * ==============
     33 * The hash table contains two arrays: an array of data entries that stores all
     34 * entries (in insertion/enumeration order) and a separate array of hash buckets
     35 * that provides O(1) lookup performance instead of O(n).
     36 *
     37 * As an optimization, we allocate a single buffer that stores both of these
     38 * arrays (and the HashCodeScrambler). We allocate this buffer when the first
     39 * entry is added to the table.
     40 *
     41 * The capacity of the data array is based on the number of hash buckets and the
     42 * FillFactor constant (currently 8.0/3.0).
     43 *
     44 * For example, consider the following JS code:
     45 *
     46 *   var m = new Map();
     47 *   m.set(1, "a");
     48 *   m.set(2, "b");
     49 *   m.set(3, "c");
     50 *   m.set(4, "d");
     51 *   m.delete(2);
     52 *
     53 * After this, the Map object might look like this:
     54 *
     55 *   HashTableSlot
     56 *     |    +---------+---------+
     57 *     +--> | Data #3 | Data #2 |
     58 *          +-----+---+-----+---+
     59 *                |         |
     60 *                +---------|----------------------------+
     61 *                          +----------------+           |
     62 *                                           |           |
     63 *   DataSlot                                v           v
     64 *     |    +------------+------------+------------+------------+------------+
     65 *     |    | Data #0    | Data #1    | Data #2    | Data #3    | Data #4    |
     66 *     +--> | key: 1     | key: $empty| key: 3     | key: 4     | <free>     |
     67 *          | value: "a" | value: #   | value: "c" | value: "d" |            |
     68 *          | chain: null| chain: null| chain: #0  | chain: #1  |            |
     69 *          +------------+------------+------+-----+------+-----+------------+
     70 *                 ^            ^            |            |
     71 *                 |            |            |            |
     72 *                 |            +------------|------------+
     73 *                 +-------------------------+
     74 *
     75 *   LiveCountSlot = 3    (number of entries in the table)
     76 *   DataCapacitySlot = 5 (total capacity of the data array)
     77 *   DataLengthSlot = 4   (number of entries used, including deleted items)
     78 *   HashShiftSlot = 31   (2 hash buckets; see hashShiftToNumHashBuckets)
     79 *
     80 * In this case we have only two hash buckets and each bucket contains two Data
     81 * entries. Entries in the same bucket form a linked list using the |chain|
     82 * field. This implements separate chaining for hash collisions.
     83 *
     84 * When an entry is deleted from the table, we set its key to the
     85 * MagicValue(JS_HASH_KEY_EMPTY) tombstone Value, shown as $empty in this
     86 * diagram. Deleted entries are removed when the table is resized or compacted.
     87 *
     88 * Iterators
     89 * =========
     90 * Each hash table has a doubly linked list of iterators for it. This is used to
     91 * update active iterators when we remove an entry, when we compact the table,
     92 * or when we clear the table. See TableIteratorObject and IterOps.
     93 *
     94 * HashCodeScrambler
     95 * =================
     96 * Each table has a HashCodeScrambler, used to avoid revealing JSObject*
     97 * addresses. See bug 1312001 and HashValue in MapObject.cpp
     98 *
     99 * Nursery GC Optimizations
    100 * ========================
    101 * Each table has a list of keys allocated in the nursery that's traced by the
    102 * OrderedHashTableRef store buffer entry. This avoids tracing all non-nursery
    103 * entries during a nursery GC.
    104 *
    105 * Similarly, each table has a separate list of nursery-allocated iterators.
    106 * This list is cleared at the start of a nursery GC and rebuilt when iterators
    107 * are promoted. See Nursery::clearMapAndSetNurseryIterators.
    108 */
    109 
    110 #include "mozilla/HashFunctions.h"
    111 #include "mozilla/Likely.h"
    112 #include "mozilla/Maybe.h"
    113 #include "mozilla/MemoryReporting.h"
    114 
    115 #include <memory>
    116 #include <tuple>
    117 #include <utility>
    118 
    119 #include "builtin/SelfHostingDefines.h"
    120 #include "gc/Barrier.h"
    121 #include "gc/Zone.h"
    122 #include "js/GCPolicyAPI.h"
    123 #include "js/HashTable.h"
    124 #include "vm/JSContext.h"
    125 #include "vm/Realm.h"
    126 
    127 class JSTracer;
    128 
    129 namespace js {
    130 
    131 class MapIteratorObject;
    132 class SetIteratorObject;
    133 
    134 namespace detail {
    135 
    136 template <class T, class Ops>
    137 class OrderedHashTableImpl;
    138 
    139 // Base class for OrderedHashMapObject and OrderedHashSetObject.
    140 class OrderedHashTableObject : public NativeObject {
    141  // Use a friend class to avoid exposing these slot definitions directly to
    142  // MapObject and SetObject.
    143  template <class T, class Ops>
    144  friend class OrderedHashTableImpl;
    145 
    146  enum Slots {
    147    HashTableSlot,
    148    DataSlot,
    149    DataLengthSlot,
    150    DataCapacitySlot,
    151    LiveCountSlot,
    152    HashShiftSlot,
    153    TenuredIteratorsSlot,
    154    NurseryIteratorsSlot,
    155    HashCodeScramblerSlot,
    156    SlotCount
    157  };
    158 
    159  inline void* allocateCellBuffer(JSContext* cx, size_t numBytes);
    160  inline void freeCellBuffer(JSContext* cx, void* data, size_t numBytes);
    161 
    162 public:
    163  static constexpr size_t offsetOfDataLength() {
    164    return getFixedSlotOffset(DataLengthSlot);
    165  }
    166  static constexpr size_t offsetOfData() {
    167    return getFixedSlotOffset(DataSlot);
    168  }
    169  static constexpr size_t offsetOfHashTable() {
    170    return getFixedSlotOffset(HashTableSlot);
    171  }
    172  static constexpr size_t offsetOfHashShift() {
    173    return getFixedSlotOffset(HashShiftSlot);
    174  }
    175  static constexpr size_t offsetOfLiveCount() {
    176    return getFixedSlotOffset(LiveCountSlot);
    177  }
    178  static constexpr size_t offsetOfHashCodeScrambler() {
    179    return getFixedSlotOffset(HashCodeScramblerSlot);
    180  }
    181 };
    182 
    183 }  // namespace detail
    184 
    185 // Base class for MapIteratorObject and SetIteratorObject.
    186 //
    187 // Iterators remain valid for the lifetime of the OrderedHashTableObject, even
    188 // if entries are added or removed or if the table is resized. Hash table
    189 // objects have a doubly linked list of all active iterator objects and this
    190 // lets us update the iterators when needed.
    191 //
    192 // An "active" iterator object has a target object and must be part of its
    193 // linked list of iterators. When the iterator is finished, the target slot is
    194 // cleared and the iterator is removed from the linked list.
    195 //
    196 // Note: the iterator list stores pointers to iterators as PrivateValue instead
    197 // of ObjectValue because we want these links to be weak pointers: an iterator
    198 // must not keep all other iterators alive.
    199 class TableIteratorObject : public NativeObject {
    200  template <class T, class Ops>
    201  friend class detail::OrderedHashTableImpl;
    202 
    203 public:
    204  enum Slots {
    205    TargetSlot,
    206    KindSlot,
    207    IndexSlot,
    208    CountSlot,
    209    PrevPtrSlot,
    210    NextSlot,
    211    SlotCount
    212  };
    213  enum class Kind { Keys, Values, Entries };
    214 
    215  // Slot numbers and Kind values must match constants used in self-hosted code.
    216  static_assert(TargetSlot == ITERATOR_SLOT_TARGET);
    217  static_assert(KindSlot == MAP_SET_ITERATOR_SLOT_ITEM_KIND);
    218  static_assert(int32_t(Kind::Keys) == ITEM_KIND_KEY);
    219  static_assert(int32_t(Kind::Values) == ITEM_KIND_VALUE);
    220  static_assert(int32_t(Kind::Entries) == ITEM_KIND_KEY_AND_VALUE);
    221 
    222 private:
    223  // The index of the current entry within the table's data array.
    224  uint32_t getIndex() const {
    225    return getReservedSlot(IndexSlot).toPrivateUint32();
    226  }
    227  void setIndex(uint32_t i) {
    228    MOZ_ASSERT(isActive());
    229    setReservedSlotPrivateUint32Unbarriered(IndexSlot, i);
    230  }
    231 
    232  // The number of nonempty entries in the data array to the left of |index|.
    233  // This is used when the table is resized or compacted.
    234  uint32_t getCount() const {
    235    return getReservedSlot(CountSlot).toPrivateUint32();
    236  }
    237  void setCount(uint32_t i) {
    238    MOZ_ASSERT(isActive());
    239    setReservedSlotPrivateUint32Unbarriered(CountSlot, i);
    240  }
    241 
    242  // Links in the doubly-linked list of active iterator objects on the
    243  // OrderedHashTableObject.
    244  //
    245  // The PrevPtr slot points to either the previous iterator's NextSlot or to
    246  // the table's TenuredIteratorsSlot or NurseryIteratorsSlot if this is the
    247  // first iterator in the list.
    248  //
    249  // The Next slot points to the next iterator object, or is nullptr if this is
    250  // the last iterator in the list.
    251  //
    252  // Invariant: *getPrevPtr() == this.
    253  TableIteratorObject** getPrevPtr() const {
    254    MOZ_ASSERT(isActive());
    255    Value v = getReservedSlot(PrevPtrSlot);
    256    return static_cast<TableIteratorObject**>(v.toPrivate());
    257  }
    258  void setPrevPtr(TableIteratorObject** p) {
    259    MOZ_ASSERT(isActive());
    260    setReservedSlotPrivateUnbarriered(PrevPtrSlot, p);
    261  }
    262  TableIteratorObject* getNext() const {
    263    MOZ_ASSERT(isActive());
    264    Value v = getReservedSlot(NextSlot);
    265    return static_cast<TableIteratorObject*>(v.toPrivate());
    266  }
    267  TableIteratorObject** addressOfNext() {
    268    MOZ_ASSERT(isActive());
    269    return addressOfFixedSlotPrivatePtr<TableIteratorObject>(NextSlot);
    270  }
    271  void setNext(TableIteratorObject* p) {
    272    MOZ_ASSERT(isActive());
    273    setReservedSlotPrivateUnbarriered(NextSlot, p);
    274  }
    275 
    276  void link(TableIteratorObject** listp) {
    277    MOZ_ASSERT(isActive());
    278    TableIteratorObject* next = *listp;
    279    setPrevPtr(listp);
    280    setNext(next);
    281    *listp = this;
    282    if (next) {
    283      next->setPrevPtr(this->addressOfNext());
    284    }
    285  }
    286 
    287  void init(detail::OrderedHashTableObject* target, Kind kind,
    288            TableIteratorObject** listp) {
    289    initReservedSlot(TargetSlot, ObjectValue(*target));
    290    setReservedSlotPrivateUint32Unbarriered(KindSlot, uint32_t(kind));
    291    setReservedSlotPrivateUint32Unbarriered(IndexSlot, 0);
    292    setReservedSlotPrivateUint32Unbarriered(CountSlot, 0);
    293    link(listp);
    294  }
    295 
    296  void assertActiveIteratorFor(JSObject* target) {
    297    MOZ_ASSERT(&getReservedSlot(TargetSlot).toObject() == target);
    298    MOZ_ASSERT(*getPrevPtr() == this);
    299    MOZ_ASSERT(getNext() != this);
    300  }
    301 
    302 protected:
    303  bool isActive() const { return getReservedSlot(TargetSlot).isObject(); }
    304 
    305  void finish() {
    306    MOZ_ASSERT(isActive());
    307    unlink();
    308    // Mark iterator inactive.
    309    setReservedSlot(TargetSlot, UndefinedValue());
    310  }
    311  void unlink() {
    312    MOZ_ASSERT(isActive());
    313    TableIteratorObject** prevp = getPrevPtr();
    314    TableIteratorObject* next = getNext();
    315    *prevp = next;
    316    if (next) {
    317      next->setPrevPtr(prevp);
    318    }
    319  }
    320 
    321  // Update list pointers after a compacting GC moved this iterator in memory.
    322  // Note: this isn't used for nursery iterators because in that case we have to
    323  // rebuild the list with clearNurseryIterators and relinkNurseryIterator.
    324  void updateListAfterMove(TableIteratorObject* old) {
    325    MOZ_ASSERT(!IsInsideNursery(old));
    326    MOZ_ASSERT(isActive());
    327    MOZ_ASSERT(old != this);
    328 
    329    TableIteratorObject** prevp = getPrevPtr();
    330    MOZ_ASSERT(*prevp == old);
    331    *prevp = this;
    332 
    333    if (TableIteratorObject* next = getNext()) {
    334      MOZ_ASSERT(next->getPrevPtr() == old->addressOfNext());
    335      next->setPrevPtr(this->addressOfNext());
    336    }
    337  }
    338 
    339  Kind kind() const {
    340    uint32_t i = getReservedSlot(KindSlot).toPrivateUint32();
    341    MOZ_ASSERT(i == uint32_t(Kind::Keys) || i == uint32_t(Kind::Values) ||
    342               i == uint32_t(Kind::Entries));
    343    return Kind(i);
    344  }
    345 
    346 public:
    347  static constexpr size_t offsetOfTarget() {
    348    return getFixedSlotOffset(TargetSlot);
    349  }
    350  static constexpr size_t offsetOfIndex() {
    351    return getFixedSlotOffset(IndexSlot);
    352  }
    353  static constexpr size_t offsetOfCount() {
    354    return getFixedSlotOffset(CountSlot);
    355  }
    356  static constexpr size_t offsetOfPrevPtr() {
    357    return getFixedSlotOffset(PrevPtrSlot);
    358  }
    359  static constexpr size_t offsetOfNext() {
    360    return getFixedSlotOffset(NextSlot);
    361  }
    362 };
    363 
    364 namespace detail {
    365 
    366 /*
    367 * detail::OrderedHashTableImpl is the underlying code used to implement both
    368 * OrderedHashMapImpl and OrderedHashSetImpl. Programs should use one of those
    369 * two templates rather than OrderedHashTableImpl.
    370 */
    371 template <class T, class Ops>
    372 class MOZ_STACK_CLASS OrderedHashTableImpl {
    373 public:
    374  using Key = typename Ops::KeyType;
    375  using MutableKey = std::remove_const_t<Key>;
    376  using UnbarrieredKey = typename RemoveBarrier<MutableKey>::Type;
    377  using Lookup = typename Ops::Lookup;
    378  using HashCodeScrambler = mozilla::HashCodeScrambler;
    379  static constexpr size_t SlotCount = OrderedHashTableObject::SlotCount;
    380 
    381  // Note: use alignas(8) to simplify JIT code generation because
    382  // alignof(JS::Value) can be either 4 or 8 on 32-bit platforms.
    383  struct alignas(8) Data {
    384    T element;
    385    Data* chain;
    386 
    387    Data(const T& e, Data* c) : element(e), chain(c) {}
    388    Data(T&& e, Data* c) : element(std::move(e)), chain(c) {}
    389  };
    390 
    391 private:
    392  using Slots = OrderedHashTableObject::Slots;
    393  OrderedHashTableObject* const obj;
    394 
    395  // Whether we have allocated a buffer for this object. This buffer is
    396  // allocated when adding the first entry and it contains the data array, the
    397  // hash table buckets and the hash code scrambler.
    398  bool hasAllocatedBuffer() const {
    399    MOZ_ASSERT(hasInitializedSlots());
    400    return obj->getReservedSlot(Slots::DataSlot).toPrivate() != nullptr;
    401  }
    402 
    403  // Hash table. Has hashBuckets() elements.
    404  // Note: a single malloc buffer is used for the data and hashTable arrays and
    405  // the HashCodeScrambler. The pointer in DataSlot points to the start of this
    406  // buffer.
    407  Data** getHashTable() const {
    408    MOZ_ASSERT(hasAllocatedBuffer());
    409    Value v = obj->getReservedSlot(Slots::HashTableSlot);
    410    return static_cast<Data**>(v.toPrivate());
    411  }
    412  void setHashTable(Data** table) {
    413    obj->setReservedSlotPrivateUnbarriered(Slots::HashTableSlot, table);
    414  }
    415 
    416  // Array of Data objects. Elements data[0:dataLength] are constructed and the
    417  // total capacity is dataCapacity.
    418  //
    419  // maybeData returns nullptr if this object doesn't have a buffer yet.
    420  // getData asserts the object has a buffer.
    421  Data* maybeData() const {
    422    Value v = obj->getReservedSlot(Slots::DataSlot);
    423    return static_cast<Data*>(v.toPrivate());
    424  }
    425  Data* getData() const {
    426    MOZ_ASSERT(hasAllocatedBuffer());
    427    return maybeData();
    428  }
    429  void setData(Data* data) {
    430    obj->setReservedSlotPrivateUnbarriered(Slots::DataSlot, data);
    431  }
    432 
    433  // Number of constructed elements in the data array.
    434  uint32_t getDataLength() const {
    435    return obj->getReservedSlot(Slots::DataLengthSlot).toPrivateUint32();
    436  }
    437  void setDataLength(uint32_t length) {
    438    obj->setReservedSlotPrivateUint32Unbarriered(Slots::DataLengthSlot, length);
    439  }
    440 
    441  // Size of data array, in elements.
    442  uint32_t getDataCapacity() const {
    443    return obj->getReservedSlot(Slots::DataCapacitySlot).toPrivateUint32();
    444  }
    445  void setDataCapacity(uint32_t capacity) {
    446    obj->setReservedSlotPrivateUint32Unbarriered(Slots::DataCapacitySlot,
    447                                                 capacity);
    448  }
    449 
    450  // The number of elements in this table. This is different from dataLength
    451  // because the data array can contain empty/removed elements.
    452  uint32_t getLiveCount() const {
    453    return obj->getReservedSlot(Slots::LiveCountSlot).toPrivateUint32();
    454  }
    455  void setLiveCount(uint32_t liveCount) {
    456    obj->setReservedSlotPrivateUint32Unbarriered(Slots::LiveCountSlot,
    457                                                 liveCount);
    458  }
    459 
    460  // Multiplicative hash shift.
    461  uint32_t getHashShift() const {
    462    MOZ_ASSERT(hasAllocatedBuffer(),
    463               "hash shift is meaningless if there's no hash table");
    464    return obj->getReservedSlot(Slots::HashShiftSlot).toPrivateUint32();
    465  }
    466  void setHashShift(uint32_t hashShift) {
    467    obj->setReservedSlotPrivateUint32Unbarriered(Slots::HashShiftSlot,
    468                                                 hashShift);
    469  }
    470 
    471  // List of all active iterators on this table in the tenured heap. Populated
    472  // when iterators are created.
    473  TableIteratorObject* getTenuredIterators() const {
    474    Value v = obj->getReservedSlot(Slots::TenuredIteratorsSlot);
    475    return static_cast<TableIteratorObject*>(v.toPrivate());
    476  }
    477  void setTenuredIterators(TableIteratorObject* iter) {
    478    obj->setReservedSlotPrivateUnbarriered(Slots::TenuredIteratorsSlot, iter);
    479  }
    480  TableIteratorObject** addressOfTenuredIterators() const {
    481    static constexpr size_t slot = Slots::TenuredIteratorsSlot;
    482    return obj->addressOfFixedSlotPrivatePtr<TableIteratorObject>(slot);
    483  }
    484 
    485  // List of all active iterators on this table in the GC nursery.
    486  // Populated when iterators are created. This is cleared at the start
    487  // of minor GC and rebuilt when iterators are moved.
    488  TableIteratorObject* getNurseryIterators() const {
    489    Value v = obj->getReservedSlot(Slots::NurseryIteratorsSlot);
    490    return static_cast<TableIteratorObject*>(v.toPrivate());
    491  }
    492  void setNurseryIterators(TableIteratorObject* iter) {
    493    obj->setReservedSlotPrivateUnbarriered(Slots::NurseryIteratorsSlot, iter);
    494  }
    495  TableIteratorObject** addressOfNurseryIterators() const {
    496    static constexpr size_t slot = Slots::NurseryIteratorsSlot;
    497    return obj->addressOfFixedSlotPrivatePtr<TableIteratorObject>(slot);
    498  }
    499 
    500  // Returns a pointer to the list of tenured or nursery iterators depending on
    501  // where |iter| is allocated.
    502  TableIteratorObject** addressOfIterators(TableIteratorObject* iter) {
    503    return IsInsideNursery(iter) ? addressOfNurseryIterators()
    504                                 : addressOfTenuredIterators();
    505  }
    506 
    507  // Scrambler to not reveal pointer hash codes.
    508  const HashCodeScrambler* getHashCodeScrambler() const {
    509    MOZ_ASSERT(hasAllocatedBuffer());
    510    Value v = obj->getReservedSlot(Slots::HashCodeScramblerSlot);
    511    return static_cast<const HashCodeScrambler*>(v.toPrivate());
    512  }
    513  void setHashCodeScrambler(HashCodeScrambler* hcs) {
    514    obj->setReservedSlotPrivateUnbarriered(Slots::HashCodeScramblerSlot, hcs);
    515  }
    516 
    517  static constexpr uint32_t hashShiftToNumHashBuckets(uint32_t hashShift) {
    518    return 1 << (js::kHashNumberBits - hashShift);
    519  }
    520  static constexpr uint32_t numHashBucketsToDataCapacity(
    521      uint32_t numHashBuckets) {
    522    return uint32_t(numHashBuckets * FillFactor);
    523  }
    524 
    525  // Logarithm base 2 of the number of buckets in the hash table initially.
    526  static constexpr uint32_t InitialBucketsLog2 = 1;
    527  static constexpr uint32_t InitialBuckets = 1 << InitialBucketsLog2;
    528  static constexpr uint32_t InitialHashShift =
    529      js::kHashNumberBits - InitialBucketsLog2;
    530 
    531  // The maximum load factor (mean number of entries per bucket).
    532  // It is an invariant that
    533  //     dataCapacity == floor(hashBuckets * FillFactor).
    534  //
    535  // The fill factor should be between 2 and 4, and it should be chosen so that
    536  // the fill factor times sizeof(Data) is close to but <= a power of 2.
    537  // This fixed fill factor was chosen to make the size of the data
    538  // array, in bytes, close to a power of two when sizeof(T) is 16.
    539  static constexpr double FillFactor = 8.0 / 3.0;
    540 
    541  // The minimum permitted value of (liveCount / dataLength).
    542  // If that ratio drops below this value, we shrink the table.
    543  static constexpr double MinDataFill = 0.25;
    544 
    545  // Note: a lower hash shift results in a higher capacity.
    546  static constexpr uint32_t MinHashShift = 8;
    547  static constexpr uint32_t MaxHashBuckets =
    548      hashShiftToNumHashBuckets(MinHashShift);  // 16777216
    549  static constexpr uint32_t MaxDataCapacity =
    550      numHashBucketsToDataCapacity(MaxHashBuckets);  // 44739242
    551 
    552  template <typename F>
    553  void forEachIterator(F&& f) {
    554    TableIteratorObject* next;
    555    for (TableIteratorObject* iter = getTenuredIterators(); iter; iter = next) {
    556      next = iter->getNext();
    557      f(iter);
    558    }
    559    for (TableIteratorObject* iter = getNurseryIterators(); iter; iter = next) {
    560      next = iter->getNext();
    561      f(iter);
    562    }
    563  }
    564 
    565  bool hasInitializedSlots() const {
    566    return !obj->getReservedSlot(Slots::DataSlot).isUndefined();
    567  }
    568 
    569  static constexpr size_t calcAllocSize(size_t dataCapacity, size_t buckets) {
    570    return dataCapacity * sizeof(Data) + sizeof(HashCodeScrambler) +
    571           buckets * sizeof(Data*);
    572  }
    573 
    574  // Allocate a single buffer that stores the data array followed by the hash
    575  // code scrambler and the hash table entries.
    576  using AllocationResult =
    577      std::tuple<Data*, Data**, HashCodeScrambler*, size_t>;
    578  AllocationResult allocateBuffer(JSContext* cx, uint32_t dataCapacity,
    579                                  uint32_t buckets) {
    580    MOZ_ASSERT(dataCapacity <= MaxDataCapacity);
    581    MOZ_ASSERT(buckets <= MaxHashBuckets);
    582 
    583    // Ensure the maximum buffer size doesn't exceed INT32_MAX. Don't change
    584    // this without auditing the buffer allocation code!
    585    static_assert(calcAllocSize(MaxDataCapacity, MaxHashBuckets) <= INT32_MAX);
    586 
    587    size_t numBytes = calcAllocSize(dataCapacity, buckets);
    588 
    589    void* buf = obj->allocateCellBuffer(cx, numBytes);
    590    if (!buf) {
    591      return {};
    592    }
    593 
    594    return getBufferParts(buf, numBytes, dataCapacity, buckets);
    595  }
    596 
    597  static AllocationResult getBufferParts(void* buf, size_t numBytes,
    598                                         uint32_t dataCapacity,
    599                                         uint32_t buckets) {
    600    static_assert(alignof(Data) % alignof(HashCodeScrambler) == 0,
    601                  "Hash code scrambler must be aligned properly");
    602    static_assert(alignof(HashCodeScrambler) % alignof(Data*) == 0,
    603                  "Hash table entries must be aligned properly");
    604 
    605    auto* data = static_cast<Data*>(buf);
    606    auto* hcs = reinterpret_cast<HashCodeScrambler*>(data + dataCapacity);
    607    auto** table = reinterpret_cast<Data**>(hcs + 1);
    608 
    609    MOZ_ASSERT(uintptr_t(table + buckets) == uintptr_t(buf) + numBytes);
    610 
    611    return {data, table, hcs, numBytes};
    612  }
    613 
    614  [[nodiscard]] bool initBuffer(JSContext* cx) {
    615    MOZ_ASSERT(!hasAllocatedBuffer());
    616    MOZ_ASSERT(getDataLength() == 0);
    617    MOZ_ASSERT(getLiveCount() == 0);
    618 
    619    constexpr uint32_t buckets = InitialBuckets;
    620    constexpr uint32_t capacity = uint32_t(buckets * FillFactor);
    621 
    622    auto [dataAlloc, tableAlloc, hcsAlloc, numBytes] =
    623        allocateBuffer(cx, capacity, buckets);
    624    if (!dataAlloc) {
    625      return false;
    626    }
    627 
    628    *hcsAlloc = cx->realm()->randomHashCodeScrambler();
    629 
    630    std::uninitialized_fill_n(tableAlloc, buckets, nullptr);
    631 
    632    setHashTable(tableAlloc);
    633    setData(dataAlloc);
    634    setDataCapacity(capacity);
    635    setHashShift(InitialHashShift);
    636    setHashCodeScrambler(hcsAlloc);
    637    MOZ_ASSERT(hashBuckets() == buckets);
    638    return true;
    639  }
    640 
    641  void updateHashTableForRekey(Data* entry, HashNumber oldHash,
    642                               HashNumber newHash) {
    643    uint32_t hashShift = getHashShift();
    644    oldHash >>= hashShift;
    645    newHash >>= hashShift;
    646 
    647    if (oldHash == newHash) {
    648      return;
    649    }
    650 
    651    // Remove this entry from its old hash chain. (If this crashes reading
    652    // nullptr, it would mean we did not find this entry on the hash chain where
    653    // we expected it. That probably means the key's hash code changed since it
    654    // was inserted, breaking the hash code invariant.)
    655    Data** hashTable = getHashTable();
    656    Data** ep = &hashTable[oldHash];
    657    while (*ep != entry) {
    658      ep = &(*ep)->chain;
    659    }
    660    *ep = entry->chain;
    661 
    662    // Add it to the new hash chain. We could just insert it at the beginning of
    663    // the chain. Instead, we do a bit of work to preserve the invariant that
    664    // hash chains always go in reverse insertion order (descending memory
    665    // order). No code currently depends on this invariant, so it's fine to kill
    666    // it if needed.
    667    ep = &hashTable[newHash];
    668    while (*ep && *ep > entry) {
    669      ep = &(*ep)->chain;
    670    }
    671    entry->chain = *ep;
    672    *ep = entry;
    673  }
    674 
    675 public:
    676  explicit OrderedHashTableImpl(OrderedHashTableObject* obj) : obj(obj) {}
    677 
    678  void initSlots() {
    679    MOZ_ASSERT(!hasInitializedSlots(), "init must be called at most once");
    680    setHashTable(nullptr);
    681    setData(nullptr);
    682    setDataLength(0);
    683    setDataCapacity(0);
    684    setLiveCount(0);
    685    setHashShift(0);
    686    setTenuredIterators(nullptr);
    687    setNurseryIterators(nullptr);
    688    setHashCodeScrambler(nullptr);
    689  }
    690 
    691  void maybeMoveBufferOnPromotion(Nursery& nursery) {
    692    if (!hasAllocatedBuffer()) {
    693      return;
    694    }
    695 
    696    Data* oldData = getData();
    697    uint32_t dataCapacity = getDataCapacity();
    698    uint32_t buckets = hashBuckets();
    699 
    700    size_t numBytes = calcAllocSize(dataCapacity, buckets);
    701 
    702    void* buf = oldData;
    703    Nursery::WasBufferMoved result =
    704        nursery.maybeMoveBufferOnPromotion(&buf, obj, numBytes);
    705    if (result == Nursery::BufferNotMoved) {
    706      return;
    707    }
    708 
    709    // The buffer was moved in memory. Update reserved slots and fix up the
    710    // |Data*| pointers for the hash table chains.
    711    // TODO(bug 1931492): consider storing indices instead of pointers to
    712    // simplify this.
    713 
    714    auto [data, table, hcs, numBytesUnused] =
    715        getBufferParts(buf, numBytes, dataCapacity, buckets);
    716 
    717    auto entryIndex = [=](const Data* entry) {
    718      MOZ_ASSERT(entry >= oldData);
    719      MOZ_ASSERT(size_t(entry - oldData) < dataCapacity);
    720      return entry - oldData;
    721    };
    722 
    723    for (uint32_t i = 0, len = getDataLength(); i < len; i++) {
    724      if (const Data* chain = data[i].chain) {
    725        data[i].chain = data + entryIndex(chain);
    726      }
    727    }
    728    for (uint32_t i = 0; i < buckets; i++) {
    729      if (const Data* chain = table[i]) {
    730        table[i] = data + entryIndex(chain);
    731      }
    732    }
    733 
    734    setData(data);
    735    setHashTable(table);
    736    setHashCodeScrambler(hcs);
    737  }
    738 
    739  size_t sizeOfExcludingObject(mozilla::MallocSizeOf mallocSizeOf) const {
    740    MOZ_ASSERT(obj->isTenured());  // Assumes data is not in the nursery.
    741 
    742    size_t size = 0;
    743    if (hasInitializedSlots() && hasAllocatedBuffer()) {
    744      // Note: this also includes the HashCodeScrambler and the hashTable array.
    745      size += gc::GetAllocSize(obj->zone(), getData());
    746    }
    747    return size;
    748  }
    749 
    750  /* Return the number of elements in the table. */
    751  uint32_t count() const { return getLiveCount(); }
    752 
    753  /* True if any element matches l. */
    754  bool has(const Lookup& l) const { return lookup(l) != nullptr; }
    755 
    756  /* Return a pointer to the element, if any, that matches l, or nullptr. */
    757  T* get(const Lookup& l) {
    758    Data* e = lookup(l);
    759    return e ? &e->element : nullptr;
    760  }
    761 
    762  /*
    763   * If the table already contains an entry that matches |element|,
    764   * replace that entry with |element|. Otherwise add a new entry.
    765   *
    766   * On success, return true, whether there was already a matching element or
    767   * not. On allocation failure, return false. If this returns false, it
    768   * means the element was not added to the table.
    769   */
    770  template <typename ElementInput>
    771  [[nodiscard]] bool put(JSContext* cx, ElementInput&& element) {
    772    HashNumber h;
    773    if (hasAllocatedBuffer()) {
    774      h = prepareHash(Ops::getKey(element));
    775      if (Data* e = lookup(Ops::getKey(element), h)) {
    776        e->element = std::forward<ElementInput>(element);
    777        return true;
    778      }
    779      if (getDataLength() == getDataCapacity() && !rehashOnFull(cx)) {
    780        return false;
    781      }
    782    } else {
    783      if (!initBuffer(cx)) {
    784        return false;
    785      }
    786      h = prepareHash(Ops::getKey(element));
    787    }
    788    auto [entry, chain] = addEntry(h);
    789    new (entry) Data(std::forward<ElementInput>(element), chain);
    790    return true;
    791  }
    792 
    793  /*
    794   * If the table already contains an entry that matches |element|,
    795   * return that entry. Otherwise add a new entry.
    796   *
    797   * On success, return a pointer to the element, whether there was already a
    798   * matching element or not. On allocation failure, return a nullptr. If this
    799   * returns a nullptr, it means the element was not added to the table.
    800   */
    801  template <typename ElementInput>
    802  [[nodiscard]] T* getOrAdd(JSContext* cx, ElementInput&& element) {
    803    HashNumber h;
    804    if (hasAllocatedBuffer()) {
    805      h = prepareHash(Ops::getKey(element));
    806      if (Data* e = lookup(Ops::getKey(element), h)) {
    807        return &e->element;
    808      }
    809      if (getDataLength() == getDataCapacity() && !rehashOnFull(cx)) {
    810        return nullptr;
    811      }
    812    } else {
    813      if (!initBuffer(cx)) {
    814        return nullptr;
    815      }
    816      h = prepareHash(Ops::getKey(element));
    817    }
    818    auto [entry, chain] = addEntry(h);
    819    new (entry) Data(std::forward<ElementInput>(element), chain);
    820    return &entry->element;
    821  }
    822 
    823  /*
    824   * If the table contains an element matching l, remove it and return true.
    825   * Otherwise return false.
    826   */
    827  bool remove(JSContext* cx, const Lookup& l) {
    828    // Note: This could be optimized so that removing the last entry,
    829    // data[dataLength - 1], decrements dataLength. LIFO use cases would
    830    // benefit.
    831 
    832    // If a matching entry exists, empty it.
    833    Data* e = lookup(l);
    834    if (e == nullptr) {
    835      return false;
    836    }
    837 
    838    MOZ_ASSERT(uint32_t(e - getData()) < getDataCapacity());
    839 
    840    uint32_t liveCount = getLiveCount();
    841    liveCount--;
    842    setLiveCount(liveCount);
    843    Ops::makeEmpty(&e->element);
    844 
    845    // Update active iterators.
    846    uint32_t pos = e - getData();
    847    forEachIterator(
    848        [this, pos](auto* iter) { IterOps::onRemove(obj, iter, pos); });
    849 
    850    // If many entries have been removed, try to shrink the table. Ignore OOM
    851    // because shrinking the table is an optimization and it's okay for it to
    852    // fail.
    853    if (hashBuckets() > InitialBuckets &&
    854        liveCount < getDataLength() * MinDataFill) {
    855      if (!rehash(cx, getHashShift() + 1)) {
    856        cx->recoverFromOutOfMemory();
    857      }
    858    }
    859 
    860    return true;
    861  }
    862 
    863  /*
    864   * Remove all entries.
    865   *
    866   * The effect on active iterators is the same as removing all entries; in
    867   * particular, those iterators are still active and will see any entries
    868   * added after a clear().
    869   */
    870  void clear(JSContext* cx) {
    871    if (getDataLength() != 0) {
    872      destroyData(getData(), getDataLength());
    873      setDataLength(0);
    874      setLiveCount(0);
    875 
    876      size_t buckets = hashBuckets();
    877      std::fill_n(getHashTable(), buckets, nullptr);
    878 
    879      forEachIterator([](auto* iter) { IterOps::onClear(iter); });
    880 
    881      // Try to shrink the table. Ignore OOM because shrinking the table is an
    882      // optimization and it's okay for it to fail.
    883      if (buckets > InitialBuckets) {
    884        if (!rehash(cx, InitialHashShift)) {
    885          cx->recoverFromOutOfMemory();
    886        }
    887      }
    888    }
    889 
    890    MOZ_ASSERT(getDataLength() == 0);
    891    MOZ_ASSERT(getLiveCount() == 0);
    892  }
    893 
    894  class IterOps {
    895    friend class OrderedHashTableImpl;
    896 
    897    static void init(OrderedHashTableObject* table, TableIteratorObject* iter,
    898                     TableIteratorObject::Kind kind) {
    899      auto** listp = OrderedHashTableImpl(table).addressOfIterators(iter);
    900      iter->init(table, kind, listp);
    901      seek(table, iter);
    902    }
    903 
    904    static void seek(OrderedHashTableObject* table, TableIteratorObject* iter) {
    905      iter->assertActiveIteratorFor(table);
    906      const Data* data = OrderedHashTableImpl(table).maybeData();
    907      uint32_t dataLength = OrderedHashTableImpl(table).getDataLength();
    908      uint32_t i = iter->getIndex();
    909      while (i < dataLength && Ops::isEmpty(Ops::getKey(data[i].element))) {
    910        i++;
    911      }
    912      iter->setIndex(i);
    913    }
    914 
    915    // The hash table calls this when an entry is removed.
    916    // j is the index of the removed entry.
    917    static void onRemove(OrderedHashTableObject* table,
    918                         TableIteratorObject* iter, uint32_t j) {
    919      iter->assertActiveIteratorFor(table);
    920      uint32_t i = iter->getIndex();
    921      if (j < i) {
    922        iter->setCount(iter->getCount() - 1);
    923      }
    924      if (j == i) {
    925        seek(table, iter);
    926      }
    927    }
    928 
    929    // The hash table calls this when the table is resized or compacted.
    930    // Since |count| is the number of nonempty entries to the left of |index|,
    931    // discarding the empty entries will not affect |count|, and it will make
    932    // |index| and |count| equal.
    933    static void onCompact(TableIteratorObject* iter) {
    934      iter->setIndex(iter->getCount());
    935    }
    936 
    937    // The hash table calls this when cleared.
    938    static void onClear(TableIteratorObject* iter) {
    939      iter->setIndex(0);
    940      iter->setCount(0);
    941    }
    942 
    943    // If the iterator reached the end of the data array, we're done: mark the
    944    // iterator inactive, remove it from the linked list, and return |true|.
    945    // Else, call |f| for the current entry, advance the iterator to the next
    946    // entry, and return |false|.
    947    template <typename F>
    948    static bool next(OrderedHashTableObject* obj, TableIteratorObject* iter,
    949                     F&& f) {
    950      iter->assertActiveIteratorFor(obj);
    951 
    952      OrderedHashTableImpl table(obj);
    953      uint32_t index = iter->getIndex();
    954 
    955      if (index >= table.getDataLength()) {
    956        iter->finish();
    957        return true;
    958      }
    959 
    960      f(iter->kind(), table.getData()[index].element);
    961 
    962      iter->setCount(iter->getCount() + 1);
    963      iter->setIndex(index + 1);
    964      seek(obj, iter);
    965      return false;
    966    }
    967  };
    968 
    969  // Calls |f| for each entry in the table. This function must not mutate the
    970  // table.
    971  template <typename F>
    972  [[nodiscard]] bool forEachEntry(F&& f) const {
    973    const Data* data = maybeData();
    974    uint32_t dataLength = getDataLength();
    975 #ifdef DEBUG
    976    uint32_t liveCount = getLiveCount();
    977 #endif
    978    for (uint32_t i = 0; i < dataLength; i++) {
    979      if (!Ops::isEmpty(Ops::getKey(data[i].element))) {
    980        if (!f(data[i].element)) {
    981          return false;
    982        }
    983      }
    984    }
    985    MOZ_ASSERT(maybeData() == data);
    986    MOZ_ASSERT(getDataLength() == dataLength);
    987    MOZ_ASSERT(getLiveCount() == liveCount);
    988    return true;
    989  }
    990 #ifdef DEBUG
    991  // Like forEachEntry, but infallible and the function is called at most
    992  // maxCount times. This is useful for debug assertions.
    993  template <typename F>
    994  void forEachEntryUpTo(size_t maxCount, F&& f) const {
    995    MOZ_ASSERT(maxCount > 0);
    996    const Data* data = maybeData();
    997    uint32_t dataLength = getDataLength();
    998    uint32_t liveCount = getLiveCount();
    999    size_t count = 0;
   1000    for (uint32_t i = 0; i < dataLength; i++) {
   1001      if (!Ops::isEmpty(Ops::getKey(data[i].element))) {
   1002        f(data[i].element);
   1003        count++;
   1004        if (count == maxCount) {
   1005          break;
   1006        }
   1007      }
   1008    }
   1009    MOZ_ASSERT(maybeData() == data);
   1010    MOZ_ASSERT(getDataLength() == dataLength);
   1011    MOZ_ASSERT(getLiveCount() == liveCount);
   1012  }
   1013 #endif
   1014 
   1015  void trace(JSTracer* trc) {
   1016    Data* data = maybeData();
   1017    if (data) {
   1018      TraceBufferEdge(trc, obj, &data, "OrderedHashTable data");
   1019      if (data != maybeData()) {
   1020        setData(data);
   1021      }
   1022    }
   1023 
   1024    uint32_t dataLength = getDataLength();
   1025    for (uint32_t i = 0; i < dataLength; i++) {
   1026      if (!Ops::isEmpty(Ops::getKey(data[i].element))) {
   1027        Ops::trace(trc, this, i, data[i].element);
   1028      }
   1029    }
   1030  }
   1031 
   1032  // For use by the implementation of Ops::trace.
   1033  void traceKey(JSTracer* trc, uint32_t index, const Key& key) {
   1034    MOZ_ASSERT(index < getDataLength());
   1035    UnbarrieredKey newKey = key;
   1036    JS::GCPolicy<UnbarrieredKey>::trace(trc, &newKey,
   1037                                        "OrderedHashTableObject key");
   1038    if (newKey != key) {
   1039      rekey(&getData()[index], newKey);
   1040    }
   1041  }
   1042  template <typename Value>
   1043  void traceValue(JSTracer* trc, Value& value) {
   1044    JS::GCPolicy<Value>::trace(trc, &value, "OrderedHashMapObject value");
   1045  }
   1046 
   1047  void initIterator(TableIteratorObject* iter,
   1048                    TableIteratorObject::Kind kind) const {
   1049    IterOps::init(obj, iter, kind);
   1050  }
   1051  template <typename F>
   1052  bool iteratorNext(TableIteratorObject* iter, F&& f) const {
   1053    return IterOps::next(obj, iter, f);
   1054  }
   1055 
   1056  void clearNurseryIterators() {
   1057    if (TableIteratorObject* iter = getNurseryIterators()) {
   1058      iter->setPrevPtr(nullptr);
   1059    }
   1060    setNurseryIterators(nullptr);
   1061  }
   1062  void relinkNurseryIterator(TableIteratorObject* iter) {
   1063    auto** listp = addressOfIterators(iter);
   1064    iter->link(listp);
   1065  }
   1066 
   1067  void updateIteratorsAfterMove(OrderedHashTableObject* old) {
   1068    if (TableIteratorObject* iter = getTenuredIterators()) {
   1069      MOZ_ASSERT(iter->getPrevPtr() ==
   1070                 OrderedHashTableImpl(old).addressOfTenuredIterators());
   1071      iter->setPrevPtr(addressOfTenuredIterators());
   1072    }
   1073    if (TableIteratorObject* iter = getNurseryIterators()) {
   1074      MOZ_ASSERT(iter->getPrevPtr() ==
   1075                 OrderedHashTableImpl(old).addressOfNurseryIterators());
   1076      iter->setPrevPtr(addressOfNurseryIterators());
   1077    }
   1078  }
   1079 
   1080  bool hasNurseryIterators() const { return getNurseryIterators(); }
   1081 
   1082  /*
   1083   * Change the value of the given key.
   1084   *
   1085   * This calls Ops::hash on both the current key and the new key.
   1086   * Ops::hash on the current key must return the same hash code as
   1087   * when the entry was added to the table.
   1088   */
   1089  void rekeyOneEntry(const Key& current, const Key& newKey, const T& element) {
   1090    if (current == newKey) {
   1091      return;
   1092    }
   1093 
   1094    HashNumber currentHash = prepareHash(current);
   1095    HashNumber newHash = prepareHash(newKey);
   1096 
   1097    Data* entry = lookup(current, currentHash);
   1098    MOZ_ASSERT(entry);
   1099    entry->element = element;
   1100 
   1101    updateHashTableForRekey(entry, currentHash, newHash);
   1102  }
   1103 
   1104  static constexpr size_t offsetOfDataElement() {
   1105    static_assert(offsetof(Data, element) == 0,
   1106                  "TableIteratorLoadEntry and TableIteratorAdvance depend on "
   1107                  "offsetof(Data, element) being 0");
   1108    return offsetof(Data, element);
   1109  }
   1110  static constexpr size_t offsetOfDataChain() { return offsetof(Data, chain); }
   1111  static constexpr size_t sizeofData() { return sizeof(Data); }
   1112 
   1113 #ifdef DEBUG
   1114  mozilla::Maybe<HashNumber> hash(const Lookup& l) const {
   1115    // We can only compute the hash number if we have an allocated buffer
   1116    // because the buffer contains the hash code scrambler.
   1117    if (!hasAllocatedBuffer()) {
   1118      return {};
   1119    }
   1120    return mozilla::Some(prepareHash(l));
   1121  }
   1122 #endif
   1123 
   1124 private:
   1125  HashNumber prepareHash(const Lookup& l) const {
   1126    MOZ_ASSERT(hasAllocatedBuffer(),
   1127               "the hash code scrambler is allocated in the buffer");
   1128    const HashCodeScrambler& hcs = *getHashCodeScrambler();
   1129    return mozilla::ScrambleHashCode(Ops::hash(l, hcs));
   1130  }
   1131 
   1132  /* The size of the hash table, in elements. Always a power of two. */
   1133  uint32_t hashBuckets() const {
   1134    return hashShiftToNumHashBuckets(getHashShift());
   1135  }
   1136 
   1137  void destroyData(Data* data, uint32_t length) {
   1138    Data* end = data + length;
   1139    for (Data* p = data; p != end; p++) {
   1140      p->~Data();
   1141    }
   1142  }
   1143 
   1144  void freeData(JSContext* cx, Data* data, uint32_t length, uint32_t capacity,
   1145                uint32_t hashBuckets) {
   1146    MOZ_ASSERT(data);
   1147    MOZ_ASSERT(capacity > 0);
   1148 
   1149    destroyData(data, length);
   1150 
   1151    size_t numBytes = calcAllocSize(capacity, hashBuckets);
   1152 
   1153    obj->freeCellBuffer(cx, data, numBytes);
   1154  }
   1155 
   1156  Data* lookup(const Lookup& l, HashNumber h) const {
   1157    MOZ_ASSERT(hasAllocatedBuffer());
   1158    Data** hashTable = getHashTable();
   1159    uint32_t hashShift = getHashShift();
   1160    for (Data* e = hashTable[h >> hashShift]; e; e = e->chain) {
   1161      if (Ops::match(Ops::getKey(e->element), l)) {
   1162        return e;
   1163      }
   1164    }
   1165    return nullptr;
   1166  }
   1167 
   1168  Data* lookup(const Lookup& l) const {
   1169    // Note: checking |getLiveCount() > 0| is a minor performance optimization
   1170    // but this check is also required for correctness because it implies
   1171    // |hasAllocatedBuffer()|.
   1172    if (getLiveCount() == 0) {
   1173      return nullptr;
   1174    }
   1175    return lookup(l, prepareHash(l));
   1176  }
   1177 
   1178  std::tuple<Data*, Data*> addEntry(HashNumber hash) {
   1179    uint32_t dataLength = getDataLength();
   1180    MOZ_ASSERT(dataLength < getDataCapacity());
   1181 
   1182    Data* entry = &getData()[dataLength];
   1183    setDataLength(dataLength + 1);
   1184    setLiveCount(getLiveCount() + 1);
   1185 
   1186    Data** hashTable = getHashTable();
   1187    hash >>= getHashShift();
   1188    Data* chain = hashTable[hash];
   1189    hashTable[hash] = entry;
   1190 
   1191    return std::make_tuple(entry, chain);
   1192  }
   1193 
   1194  /* This is called after rehashing the table. */
   1195  void compacted() {
   1196    // If we had any empty entries, compacting may have moved live entries
   1197    // to the left within the data array. Notify all active iterators of
   1198    // the change.
   1199    forEachIterator([](auto* iter) { IterOps::onCompact(iter); });
   1200  }
   1201 
   1202  /* Compact the entries in the data array and rehash them. */
   1203  void rehashInPlace() {
   1204    Data** hashTable = getHashTable();
   1205    std::fill_n(hashTable, hashBuckets(), nullptr);
   1206 
   1207    Data* const data = getData();
   1208    uint32_t hashShift = getHashShift();
   1209    Data* wp = data;
   1210    Data* end = data + getDataLength();
   1211    for (Data* rp = data; rp != end; rp++) {
   1212      if (!Ops::isEmpty(Ops::getKey(rp->element))) {
   1213        HashNumber h = prepareHash(Ops::getKey(rp->element)) >> hashShift;
   1214        if (rp != wp) {
   1215          wp->element = std::move(rp->element);
   1216        }
   1217        wp->chain = hashTable[h];
   1218        hashTable[h] = wp;
   1219        wp++;
   1220      }
   1221    }
   1222    MOZ_ASSERT(wp == data + getLiveCount());
   1223 
   1224    while (wp != end) {
   1225      wp->~Data();
   1226      wp++;
   1227    }
   1228    setDataLength(getLiveCount());
   1229    compacted();
   1230  }
   1231 
   1232  [[nodiscard]] bool rehashOnFull(JSContext* cx) {
   1233    MOZ_ASSERT(getDataLength() == getDataCapacity());
   1234 
   1235    // If the hashTable is more than 1/4 deleted data, simply rehash in
   1236    // place to free up some space. Otherwise, grow the table.
   1237    uint32_t newHashShift = getLiveCount() >= getDataCapacity() * 0.75
   1238                                ? getHashShift() - 1
   1239                                : getHashShift();
   1240    return rehash(cx, newHashShift);
   1241  }
   1242 
   1243  /*
   1244   * Grow, shrink, or compact both the hash table and data array.
   1245   *
   1246   * On success, this returns true, dataLength == liveCount, and there are no
   1247   * empty elements in data[0:dataLength]. On allocation failure, this
   1248   * leaves everything as it was and returns false.
   1249   */
   1250  [[nodiscard]] bool rehash(JSContext* cx, uint32_t newHashShift) {
   1251    // If the size of the table is not changing, rehash in place to avoid
   1252    // allocating memory.
   1253    if (newHashShift == getHashShift()) {
   1254      rehashInPlace();
   1255      return true;
   1256    }
   1257 
   1258    if (MOZ_UNLIKELY(newHashShift < MinHashShift)) {
   1259      ReportAllocationOverflow(cx);
   1260      return false;
   1261    }
   1262 
   1263    uint32_t newHashBuckets = hashShiftToNumHashBuckets(newHashShift);
   1264    uint32_t newCapacity = numHashBucketsToDataCapacity(newHashBuckets);
   1265 
   1266    auto [newData, newHashTable, newHcs, numBytes] =
   1267        allocateBuffer(cx, newCapacity, newHashBuckets);
   1268    if (!newData) {
   1269      return false;
   1270    }
   1271 
   1272    *newHcs = *getHashCodeScrambler();
   1273 
   1274    std::uninitialized_fill_n(newHashTable, newHashBuckets, nullptr);
   1275 
   1276    Data* const oldData = getData();
   1277    const uint32_t oldDataLength = getDataLength();
   1278 
   1279    Data* wp = newData;
   1280    Data* end = oldData + oldDataLength;
   1281    for (Data* p = oldData; p != end; p++) {
   1282      if (!Ops::isEmpty(Ops::getKey(p->element))) {
   1283        HashNumber h = prepareHash(Ops::getKey(p->element)) >> newHashShift;
   1284        new (wp) Data(std::move(p->element), newHashTable[h]);
   1285        newHashTable[h] = wp;
   1286        wp++;
   1287      }
   1288    }
   1289    MOZ_ASSERT(wp == newData + getLiveCount());
   1290 
   1291    freeData(cx, oldData, oldDataLength, getDataCapacity(), hashBuckets());
   1292 
   1293    setHashTable(newHashTable);
   1294    setData(newData);
   1295    setDataLength(getLiveCount());
   1296    setDataCapacity(newCapacity);
   1297    setHashShift(newHashShift);
   1298    setHashCodeScrambler(newHcs);
   1299    MOZ_ASSERT(hashBuckets() == newHashBuckets);
   1300 
   1301    compacted();
   1302    return true;
   1303  }
   1304 
   1305  // Change the key of the front entry.
   1306  //
   1307  // This calls Ops::hash on both the current key and the new key. Ops::hash on
   1308  // the current key must return the same hash code as when the entry was added
   1309  // to the table.
   1310  void rekey(Data* entry, const UnbarrieredKey& k) {
   1311    HashNumber oldHash = prepareHash(Ops::getKey(entry->element));
   1312    HashNumber newHash = prepareHash(k);
   1313    reinterpret_cast<UnbarrieredKey&>(Ops::getKeyRef(entry->element)) = k;
   1314    updateHashTableForRekey(entry, oldHash, newHash);
   1315  }
   1316 };
   1317 
   1318 }  // namespace detail
   1319 
   1320 class OrderedHashMapObject : public detail::OrderedHashTableObject {};
   1321 
   1322 template <class Key, class Value, class OrderedHashPolicy>
   1323 class MOZ_STACK_CLASS OrderedHashMapImpl {
   1324 public:
   1325  class Entry {
   1326    template <class, class>
   1327    friend class detail::OrderedHashTableImpl;
   1328    void operator=(const Entry& rhs) {
   1329      const_cast<Key&>(key) = rhs.key;
   1330      value = rhs.value;
   1331    }
   1332 
   1333    void operator=(Entry&& rhs) {
   1334      MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
   1335      const_cast<Key&>(key) = std::move(rhs.key);
   1336      value = std::move(rhs.value);
   1337    }
   1338 
   1339   public:
   1340    Entry() = default;
   1341    explicit Entry(const Key& k) : key(k) {}
   1342    template <typename V>
   1343    Entry(const Key& k, V&& v) : key(k), value(std::forward<V>(v)) {}
   1344    Entry(Entry&& rhs) : key(std::move(rhs.key)), value(std::move(rhs.value)) {}
   1345 
   1346    const Key key{};
   1347    Value value{};
   1348 
   1349    static constexpr size_t offsetOfKey() { return offsetof(Entry, key); }
   1350    static constexpr size_t offsetOfValue() { return offsetof(Entry, value); }
   1351  };
   1352 
   1353 private:
   1354  struct MapOps;
   1355  using Impl = detail::OrderedHashTableImpl<Entry, MapOps>;
   1356 
   1357  struct MapOps : OrderedHashPolicy {
   1358    using KeyType = Key;
   1359    static void makeEmpty(Entry* e) {
   1360      OrderedHashPolicy::makeEmpty(const_cast<Key*>(&e->key));
   1361 
   1362      // Clear the value. Destroying it is another possibility, but that
   1363      // would complicate class Entry considerably.
   1364      e->value = Value();
   1365    }
   1366    static const Key& getKey(const Entry& e) { return e.key; }
   1367    static Key& getKeyRef(Entry& e) { return const_cast<Key&>(e.key); }
   1368    static void trace(JSTracer* trc, Impl* table, uint32_t index,
   1369                      Entry& entry) {
   1370      table->traceKey(trc, index, entry.key);
   1371      table->traceValue(trc, entry.value);
   1372    }
   1373  };
   1374 
   1375  Impl impl;
   1376 
   1377 public:
   1378  using Lookup = typename Impl::Lookup;
   1379  static constexpr size_t SlotCount = Impl::SlotCount;
   1380 
   1381  explicit OrderedHashMapImpl(OrderedHashMapObject* obj) : impl(obj) {}
   1382 
   1383  void initSlots() { impl.initSlots(); }
   1384  uint32_t count() const { return impl.count(); }
   1385  bool has(const Lookup& key) const { return impl.has(key); }
   1386  template <typename F>
   1387  [[nodiscard]] bool forEachEntry(F&& f) const {
   1388    return impl.forEachEntry(f);
   1389  }
   1390 #ifdef DEBUG
   1391  template <typename F>
   1392  void forEachEntryUpTo(size_t maxCount, F&& f) const {
   1393    impl.forEachEntryUpTo(maxCount, f);
   1394  }
   1395 #endif
   1396  Entry* get(const Lookup& key) { return impl.get(key); }
   1397  bool remove(JSContext* cx, const Lookup& key) { return impl.remove(cx, key); }
   1398  void clear(JSContext* cx) { impl.clear(cx); }
   1399 
   1400  template <typename K, typename V>
   1401  [[nodiscard]] bool put(JSContext* cx, K&& key, V&& value) {
   1402    return impl.put(cx, Entry(std::forward<K>(key), std::forward<V>(value)));
   1403  }
   1404 
   1405  template <typename K, typename V>
   1406  [[nodiscard]] Entry* getOrAdd(JSContext* cx, K&& key, V&& value) {
   1407    return impl.getOrAdd(cx,
   1408                         Entry(std::forward<K>(key), std::forward<V>(value)));
   1409  }
   1410 
   1411 #ifdef DEBUG
   1412  mozilla::Maybe<HashNumber> hash(const Lookup& key) const {
   1413    return impl.hash(key);
   1414  }
   1415 #endif
   1416 
   1417  template <typename GetNewKey>
   1418  mozilla::Maybe<Key> rekeyOneEntry(Lookup& current, GetNewKey&& getNewKey) {
   1419    // TODO: This is inefficient because we also look up the entry in
   1420    // impl.rekeyOneEntry below.
   1421    const Entry* e = get(current);
   1422    if (!e) {
   1423      return mozilla::Nothing();
   1424    }
   1425 
   1426    Key newKey = getNewKey(current);
   1427    impl.rekeyOneEntry(current, newKey, Entry(newKey, e->value));
   1428    return mozilla::Some(newKey);
   1429  }
   1430 
   1431  void initIterator(MapIteratorObject* iter,
   1432                    TableIteratorObject::Kind kind) const {
   1433    impl.initIterator(iter, kind);
   1434  }
   1435  template <typename F>
   1436  bool iteratorNext(MapIteratorObject* iter, F&& f) const {
   1437    return impl.iteratorNext(iter, f);
   1438  }
   1439 
   1440  void clearNurseryIterators() { impl.clearNurseryIterators(); }
   1441  void relinkNurseryIterator(MapIteratorObject* iter) {
   1442    impl.relinkNurseryIterator(iter);
   1443  }
   1444  void updateIteratorsAfterMove(OrderedHashMapObject* old) {
   1445    impl.updateIteratorsAfterMove(old);
   1446  }
   1447  bool hasNurseryIterators() const { return impl.hasNurseryIterators(); }
   1448 
   1449  void maybeMoveBufferOnPromotion(Nursery& nursery) {
   1450    return impl.maybeMoveBufferOnPromotion(nursery);
   1451  }
   1452 
   1453  void trace(JSTracer* trc) { impl.trace(trc); }
   1454 
   1455  static constexpr size_t offsetOfEntryKey() { return Entry::offsetOfKey(); }
   1456  static constexpr size_t offsetOfImplDataElement() {
   1457    return Impl::offsetOfDataElement();
   1458  }
   1459  static constexpr size_t offsetOfImplDataChain() {
   1460    return Impl::offsetOfDataChain();
   1461  }
   1462  static constexpr size_t sizeofImplData() { return Impl::sizeofData(); }
   1463 
   1464  size_t sizeOfExcludingObject(mozilla::MallocSizeOf mallocSizeOf) const {
   1465    return impl.sizeOfExcludingObject(mallocSizeOf);
   1466  }
   1467 };
   1468 
   1469 class OrderedHashSetObject : public detail::OrderedHashTableObject {};
   1470 
   1471 template <class T, class OrderedHashPolicy>
   1472 class MOZ_STACK_CLASS OrderedHashSetImpl {
   1473 private:
   1474  struct SetOps;
   1475  using Impl = detail::OrderedHashTableImpl<T, SetOps>;
   1476 
   1477  struct SetOps : OrderedHashPolicy {
   1478    using KeyType = const T;
   1479    static const T& getKey(const T& v) { return v; }
   1480    static T& getKeyRef(T& e) { return e; }
   1481    static void trace(JSTracer* trc, Impl* table, uint32_t index, T& entry) {
   1482      table->traceKey(trc, index, entry);
   1483    }
   1484  };
   1485 
   1486  Impl impl;
   1487 
   1488 public:
   1489  using Lookup = typename Impl::Lookup;
   1490  static constexpr size_t SlotCount = Impl::SlotCount;
   1491 
   1492  explicit OrderedHashSetImpl(OrderedHashSetObject* obj) : impl(obj) {}
   1493 
   1494  void initSlots() { impl.initSlots(); }
   1495  uint32_t count() const { return impl.count(); }
   1496  bool has(const Lookup& value) const { return impl.has(value); }
   1497  template <typename F>
   1498  [[nodiscard]] bool forEachEntry(F&& f) const {
   1499    return impl.forEachEntry(f);
   1500  }
   1501 #ifdef DEBUG
   1502  template <typename F>
   1503  void forEachEntryUpTo(size_t maxCount, F&& f) const {
   1504    impl.forEachEntryUpTo(maxCount, f);
   1505  }
   1506 #endif
   1507  template <typename Input>
   1508  [[nodiscard]] bool put(JSContext* cx, Input&& value) {
   1509    return impl.put(cx, std::forward<Input>(value));
   1510  }
   1511  bool remove(JSContext* cx, const Lookup& value) {
   1512    return impl.remove(cx, value);
   1513  }
   1514  void clear(JSContext* cx) { impl.clear(cx); }
   1515 
   1516 #ifdef DEBUG
   1517  mozilla::Maybe<HashNumber> hash(const Lookup& value) const {
   1518    return impl.hash(value);
   1519  }
   1520 #endif
   1521 
   1522  template <typename GetNewKey>
   1523  mozilla::Maybe<T> rekeyOneEntry(Lookup& current, GetNewKey&& getNewKey) {
   1524    // TODO: This is inefficient because we also look up the entry in
   1525    // impl.rekeyOneEntry below.
   1526    if (!has(current)) {
   1527      return mozilla::Nothing();
   1528    }
   1529 
   1530    T newKey = getNewKey(current);
   1531    impl.rekeyOneEntry(current, newKey, newKey);
   1532    return mozilla::Some(newKey);
   1533  }
   1534 
   1535  void initIterator(SetIteratorObject* iter,
   1536                    TableIteratorObject::Kind kind) const {
   1537    impl.initIterator(iter, kind);
   1538  }
   1539  template <typename F>
   1540  bool iteratorNext(SetIteratorObject* iter, F&& f) const {
   1541    return impl.iteratorNext(iter, f);
   1542  }
   1543 
   1544  void clearNurseryIterators() { impl.clearNurseryIterators(); }
   1545  void relinkNurseryIterator(SetIteratorObject* iter) {
   1546    impl.relinkNurseryIterator(iter);
   1547  }
   1548  void updateIteratorsAfterMove(OrderedHashSetObject* old) {
   1549    impl.updateIteratorsAfterMove(old);
   1550  }
   1551  bool hasNurseryIterators() const { return impl.hasNurseryIterators(); }
   1552 
   1553  void maybeMoveBufferOnPromotion(Nursery& nursery) {
   1554    return impl.maybeMoveBufferOnPromotion(nursery);
   1555  }
   1556 
   1557  void trace(JSTracer* trc) { impl.trace(trc); }
   1558 
   1559  static constexpr size_t offsetOfEntryKey() { return 0; }
   1560  static constexpr size_t offsetOfImplDataElement() {
   1561    return Impl::offsetOfDataElement();
   1562  }
   1563  static constexpr size_t offsetOfImplDataChain() {
   1564    return Impl::offsetOfDataChain();
   1565  }
   1566  static constexpr size_t sizeofImplData() { return Impl::sizeofData(); }
   1567 
   1568  size_t sizeOfExcludingObject(mozilla::MallocSizeOf mallocSizeOf) const {
   1569    return impl.sizeOfExcludingObject(mallocSizeOf);
   1570  }
   1571 };
   1572 
   1573 }  // namespace js
   1574 
   1575 #endif /* builtin_OrderedHashTableObject_h */