tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

commit 25de8c18648c6d6536b9b9379337560c78dffd30
parent 8c89daa76c67f0ecb039202c77b3e619052d660a
Author: Iain Ireland <iireland@mozilla.com>
Date:   Thu, 27 Nov 2025 21:39:55 +0000

Bug 2000328: Remove bitfield in mozilla::detail::HashTable r=jandem

We need to load the hash shift from JIT code, which requires a known offset. Explicit shifting is more reliable than bitfields.

Testing on godbolt (https://godbolt.org/z/srvaqfoPb), it looks like this will generally produce very slightly nicer code than before. Previously, the hashShift bitfield was being stored at the most-significant end of the word. To load `mGen`, we therefore had to mask off the hash shift with a large 64-bit constant. Now we can just do a shift, which is the same number of instructions but a smaller encoding.

Differential Revision: https://phabricator.services.mozilla.com/D273669

Diffstat:
Mjs/src/gdb/mozilla/ExecutableAllocator.py | 4+++-
Mmfbt/HashTable.h | 75++++++++++++++++++++++++++++++++++++++++++++-------------------------------
2 files changed, 47 insertions(+), 32 deletions(-)

diff --git a/js/src/gdb/mozilla/ExecutableAllocator.py b/js/src/gdb/mozilla/ExecutableAllocator.py @@ -58,7 +58,9 @@ class jsjitExecutableAllocator: self.table = allocator.value["m_pools"]["mImpl"]["mTable"] self.index = 0 kHashNumberBits = 32 - hashShift = allocator.value["m_pools"]["mImpl"]["mHashShift"] + hashShiftMask = 0xFF + genAndHashShift = allocator.value["m_pools"]["mImpl"]["mGenAndHashShift"] + hashShift = genAndHashShift & hashShiftMask self.capacity = 1 << (kHashNumberBits - hashShift) if self.table == 0: self.capacity = 0 diff --git a/mfbt/HashTable.h b/mfbt/HashTable.h @@ -1506,7 +1506,7 @@ class MOZ_STANDALONE_DEBUG HashTable : private AllocPolicy { // memory to be minimised after removing entries they should call compact(). ~ModIterator() { if (mRekeyed) { - mTable.mGen++; + mTable.incrementGeneration(); mTable.infallibleRehashIfOverloaded(); } @@ -1582,16 +1582,7 @@ class MOZ_STANDALONE_DEBUG HashTable : private AllocPolicy { ReentrancyGuard g1(*this); ReentrancyGuard g2(aOther); - // Manual swap of generation because it's a bitfield - uint64_t generation = mGen; - mGen = aOther.mGen; - aOther.mGen = generation; - - // Manual swap of hashShift because it's a bitfield - uint64_t hashShift = mHashShift; - mHashShift = aOther.mHashShift; - aOther.mHashShift = hashShift; - + std::swap(mGenAndHashShift, aOther.mGenAndHashShift); std::swap(mTable, aOther.mTable); std::swap(mEntryCount, aOther.mEntryCount); std::swap(mRemovedCount, aOther.mRemovedCount); @@ -1603,8 +1594,7 @@ class MOZ_STANDALONE_DEBUG HashTable : private AllocPolicy { private: void moveFrom(HashTable& aRhs) { - mGen = aRhs.mGen; - mHashShift = aRhs.mHashShift; + mGenAndHashShift = aRhs.mGenAndHashShift; mTable = aRhs.mTable; mEntryCount = aRhs.mEntryCount; mRemovedCount = aRhs.mRemovedCount; @@ -1623,11 +1613,11 @@ class MOZ_STANDALONE_DEBUG HashTable : private AllocPolicy { static const uint32_t CAP_BITS = 30; public: - uint64_t mGen : 56; // entry storage generation number - uint64_t mHashShift : 8; // multiplicative hash shift - char* mTable; // entry storage - uint32_t mEntryCount; // number of entries in mTable - uint32_t mRemovedCount; // removed entry sentinels in mTable + uint64_t mGenAndHashShift; // entry storage generation number (56 bits) + // and multiplicative hash shift (8 bits) + char* mTable; // entry storage + uint32_t mEntryCount; // number of entries in mTable + uint32_t mRemovedCount; // removed entry sentinels in mTable #ifdef DEBUG uint64_t mMutationCount; @@ -1653,6 +1643,29 @@ class MOZ_STANDALONE_DEBUG HashTable : private AllocPolicy { static const HashNumber sRemovedKey = Entry::sRemovedKey; static const HashNumber sCollisionBit = Entry::sCollisionBit; + static const uint64_t sHashShiftBits = 8; + static const uint64_t sHashShiftMask = (1 << sHashShiftBits) - 1; + static const uint64_t sGenerationShift = sHashShiftBits; + + MOZ_ALWAYS_INLINE uint8_t hashShift() const { + return uint8_t(mGenAndHashShift & sHashShiftMask); + } + MOZ_ALWAYS_INLINE uint64_t gen() const { + return mGenAndHashShift >> sGenerationShift; + } + + private: + void setGenAndHashShift(uint64_t aGeneration, uint8_t aHashShift) { + mGenAndHashShift = aGeneration << sGenerationShift | aHashShift; + } + + public: + void incrementGeneration() { setGenAndHashShift(gen() + 1, hashShift()); } + void setHashShift(uint32_t aHashShift) { + MOZ_ASSERT((aHashShift & sHashShiftMask) == aHashShift); + mGenAndHashShift = (mGenAndHashShift & ~sHashShiftMask) | aHashShift; + } + static uint32_t bestCapacity(uint32_t aLen) { static_assert( (sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit, @@ -1677,7 +1690,7 @@ class MOZ_STANDALONE_DEBUG HashTable : private AllocPolicy { return capacity; } - static uint32_t hashShift(uint32_t aLen) { + static uint32_t hashShiftForLength(uint32_t aLen) { // Reject all lengths whose initial computed capacity would exceed // sMaxCapacity. Round that maximum aLen down to the nearest power of two // for speedier code. @@ -1747,8 +1760,7 @@ class MOZ_STANDALONE_DEBUG HashTable : private AllocPolicy { public: HashTable(AllocPolicy aAllocPolicy, uint32_t aLen) : AllocPolicy(std::move(aAllocPolicy)), - mGen(0), - mHashShift(hashShift(aLen)), + mGenAndHashShift(hashShiftForLength(aLen)), mTable(nullptr), mEntryCount(0), mRemovedCount(0) @@ -1770,7 +1782,7 @@ class MOZ_STANDALONE_DEBUG HashTable : private AllocPolicy { } private: - HashNumber hash1(HashNumber aHash0) const { return aHash0 >> mHashShift; } + HashNumber hash1(HashNumber aHash0) const { return aHash0 >> hashShift(); } struct DoubleHash { HashNumber mHash2; @@ -1778,8 +1790,8 @@ class MOZ_STANDALONE_DEBUG HashTable : private AllocPolicy { }; DoubleHash hash2(HashNumber aCurKeyHash) const { - uint32_t sizeLog2 = kHashNumberBits - mHashShift; - DoubleHash dh = {((aCurKeyHash << sizeLog2) >> mHashShift) | 1, + uint32_t sizeLog2 = kHashNumberBits - hashShift(); + DoubleHash dh = {((aCurKeyHash << sizeLog2) >> hashShift()) | 1, (HashNumber(1) << sizeLog2) - 1}; return dh; } @@ -1910,9 +1922,9 @@ class MOZ_STANDALONE_DEBUG HashTable : private AllocPolicy { } // We can't fail from here on, so update table parameters. - mHashShift = kHashNumberBits - newLog2; mRemovedCount = 0; - mGen++; + incrementGeneration(); + setHashShift(kHashNumberBits - newLog2); mTable = newTable; // Copy only live entries, leaving removed ones behind. @@ -1994,7 +2006,7 @@ class MOZ_STANDALONE_DEBUG HashTable : private AllocPolicy { // would have gotten through random insertion order. void rehashTableInPlace() { mRemovedCount = 0; - mGen++; + incrementGeneration(); forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.unsetCollision(); }); for (uint32_t i = 0; i < capacity();) { Slot src = slotForIndex(i); @@ -2067,8 +2079,9 @@ class MOZ_STANDALONE_DEBUG HashTable : private AllocPolicy { if (empty()) { // Free the entry storage. freeTable(*this, mTable, capacity()); - mGen++; - mHashShift = hashShift(0); // gives minimum capacity on regrowth + incrementGeneration(); + setHashShift( + hashShiftForLength(0)); // gives minimum capacity on regrowth mTable = nullptr; mRemovedCount = 0; return; @@ -2119,11 +2132,11 @@ class MOZ_STANDALONE_DEBUG HashTable : private AllocPolicy { uint32_t count() const { return mEntryCount; } - uint32_t rawCapacity() const { return 1u << (kHashNumberBits - mHashShift); } + uint32_t rawCapacity() const { return 1u << (kHashNumberBits - hashShift()); } uint32_t capacity() const { return mTable ? rawCapacity() : 0; } - Generation generation() const { return Generation(mGen); } + Generation generation() const { return Generation(gen()); } size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { return aMallocSizeOf(mTable);