RadixTree.h (4965B)
1 /* This Source Code Form is subject to the terms of the Mozilla Public 2 * License, v. 2.0. If a copy of the MPL was not distributed with this 3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ 4 5 #ifndef RADIX_TREE_H 6 #define RADIX_TREE_H 7 8 #include "mozilla/ThreadSafety.h" 9 10 #include "Constants.h" 11 #include "Utils.h" 12 #include "BaseAlloc.h" 13 #include "Mutex.h" 14 15 // *************************************************************************** 16 // Radix tree data structures. 17 // 18 // The number of bits passed to the template is the number of significant bits 19 // in an address to do a radix lookup with. 20 // 21 // An address is looked up by splitting it in kBitsPerLevel bit chunks, except 22 // the most significant bits, where the bit chunk is kBitsAtLevel1 which can be 23 // different if Bits is not a multiple of kBitsPerLevel. 24 // 25 // With e.g. sizeof(void*)=4, Bits=16 and kBitsPerLevel=8, an address is split 26 // like the following: 27 // 0x12345678 -> mRoot[0x12][0x34] 28 template <size_t Bits> 29 class AddressRadixTree { 30 // Size of each radix tree node (as a power of 2). 31 // This impacts tree depth. 32 #ifdef HAVE_64BIT_BUILD 33 static const size_t kNodeSize = kCacheLineSize; 34 #else 35 static const size_t kNodeSize = 16_KiB; 36 #endif 37 static const size_t kBitsPerLevel = LOG2(kNodeSize) - LOG2(sizeof(void*)); 38 static const size_t kBitsAtLevel1 = 39 (Bits % kBitsPerLevel) ? Bits % kBitsPerLevel : kBitsPerLevel; 40 static const size_t kHeight = (Bits + kBitsPerLevel - 1) / kBitsPerLevel; 41 static_assert(kBitsAtLevel1 + (kHeight - 1) * kBitsPerLevel == Bits, 42 "AddressRadixTree parameters don't work out"); 43 44 Mutex mLock MOZ_UNANNOTATED; 45 // We guard only the single slot creations and assume read-only is safe 46 // at any time. 47 void** mRoot = nullptr; 48 49 public: 50 // Note that the constructor does not initialise the mutex. 51 constexpr AddressRadixTree() {} 52 53 bool Init() MOZ_REQUIRES(gInitLock) MOZ_EXCLUDES(mLock); 54 55 inline void* Get(void* aAddr) MOZ_EXCLUDES(mLock); 56 57 // Returns whether the value was properly set. 58 inline bool Set(void* aAddr, void* aValue) MOZ_EXCLUDES(mLock); 59 60 inline bool Unset(void* aAddr) MOZ_EXCLUDES(mLock) { 61 return Set(aAddr, nullptr); 62 } 63 64 private: 65 // GetSlotInternal is agnostic wrt mLock and used directly only in DEBUG 66 // code. 67 inline void** GetSlotInternal(void* aAddr, bool aCreate); 68 69 inline void** GetSlotIfExists(void* aAddr) MOZ_EXCLUDES(mLock) { 70 return GetSlotInternal(aAddr, false); 71 } 72 inline void** GetOrCreateSlot(void* aAddr) MOZ_REQUIRES(mLock) { 73 return GetSlotInternal(aAddr, true); 74 } 75 }; 76 77 template <size_t Bits> 78 bool AddressRadixTree<Bits>::Init() { 79 mLock.Init(); 80 mRoot = (void**)sBaseAlloc.calloc(1 << kBitsAtLevel1, sizeof(void*)); 81 return mRoot; 82 } 83 84 template <size_t Bits> 85 void** AddressRadixTree<Bits>::GetSlotInternal(void* aAddr, bool aCreate) { 86 uintptr_t key = reinterpret_cast<uintptr_t>(aAddr); 87 uintptr_t subkey; 88 unsigned i, lshift, height, bits; 89 void** node; 90 void** child; 91 92 for (i = lshift = 0, height = kHeight, node = mRoot; i < height - 1; 93 i++, lshift += bits, node = child) { 94 bits = i ? kBitsPerLevel : kBitsAtLevel1; 95 subkey = (key << lshift) >> ((sizeof(void*) << 3) - bits); 96 child = (void**)node[subkey]; 97 if (!child && aCreate) { 98 child = (void**)sBaseAlloc.calloc(1 << kBitsPerLevel, sizeof(void*)); 99 if (child) { 100 node[subkey] = child; 101 } 102 } 103 if (!child) { 104 return nullptr; 105 } 106 } 107 108 // node is a leaf, so it contains values rather than node 109 // pointers. 110 bits = i ? kBitsPerLevel : kBitsAtLevel1; 111 subkey = (key << lshift) >> ((sizeof(void*) << 3) - bits); 112 return &node[subkey]; 113 } 114 115 template <size_t Bits> 116 void* AddressRadixTree<Bits>::Get(void* aAddr) { 117 void* ret = nullptr; 118 119 void** slot = GetSlotIfExists(aAddr); 120 121 if (slot) { 122 ret = *slot; 123 } 124 #ifdef MOZ_DEBUG 125 MutexAutoLock lock(mLock); 126 127 // Suppose that it were possible for a jemalloc-allocated chunk to be 128 // munmap()ped, followed by a different allocator in another thread re-using 129 // overlapping virtual memory, all without invalidating the cached rtree 130 // value. The result would be a false positive (the rtree would claim that 131 // jemalloc owns memory that it had actually discarded). I don't think this 132 // scenario is possible, but the following assertion is a prudent sanity 133 // check. 134 if (!slot) { 135 // In case a slot has been created in the meantime. 136 slot = GetSlotInternal(aAddr, false); 137 } 138 if (slot) { 139 // The MutexAutoLock above should act as a memory barrier, forcing 140 // the compiler to emit a new read instruction for *slot. 141 MOZ_ASSERT(ret == *slot); 142 } else { 143 MOZ_ASSERT(ret == nullptr); 144 } 145 #endif 146 return ret; 147 } 148 149 template <size_t Bits> 150 bool AddressRadixTree<Bits>::Set(void* aAddr, void* aValue) { 151 MutexAutoLock lock(mLock); 152 void** slot = GetOrCreateSlot(aAddr); 153 if (slot) { 154 *slot = aValue; 155 } 156 return slot; 157 } 158 159 #endif /* ! RADIX_TREE_H */