tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

BufferAllocatorInternals.h (16270B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 // Internal class definitions for the buffer allocator.
      8 
      9 #ifndef gc_BufferAllocatorInternals_h
     10 #define gc_BufferAllocatorInternals_h
     11 
     12 #include "mozilla/MathAlgorithms.h"
     13 
     14 #include "ds/SlimLinkedList.h"
     15 
     16 #include "gc/BufferAllocator.h"
     17 #include "gc/IteratorUtils.h"
     18 
     19 namespace js::gc {
     20 
     21 static constexpr size_t MinFreeRegionSize =
     22    1 << BufferAllocator::MinSizeClassShift;
     23 
     24 static constexpr size_t SmallRegionShift = 14;  // 16 KB
     25 static constexpr size_t SmallRegionSize = 1 << SmallRegionShift;
     26 static constexpr uintptr_t SmallRegionMask = SmallRegionSize - 1;
     27 static_assert(SmallRegionSize >= MinMediumAllocSize);
     28 static_assert(SmallRegionSize <= MaxMediumAllocSize);
     29 
     30 // Size classes map to power of two sizes. The full range contains two
     31 // consecutive sub-ranges [MinSmallAllocClass, MaxSmallAllocClass] and
     32 // [MinMediumAllocClass, MaxMediumAllocClass]. MaxSmallAllocClass and
     33 // MinMediumAllocClass are consecutive but both map to the same size, which is
     34 // MinMediumAllocSize.
     35 static constexpr size_t MinSmallAllocClass = 0;
     36 static constexpr size_t MaxSmallAllocClass =
     37    BufferAllocator::SmallSizeClasses - 1;
     38 static constexpr size_t MinMediumAllocClass = MaxSmallAllocClass + 1;
     39 static constexpr size_t MaxMediumAllocClass =
     40    MinMediumAllocClass + BufferAllocator::MediumSizeClasses - 1;
     41 static_assert(MaxMediumAllocClass == BufferAllocator::AllocSizeClasses - 1);
     42 
     43 #ifdef DEBUG
     44 // Magic check values used debug builds.
     45 static constexpr uint32_t LargeBufferCheckValue = 0xBFA110C2;
     46 static constexpr uint32_t FreeRegionCheckValue = 0xBFA110C3;
     47 #endif
     48 
     49 // Iterator that yields the indexes of set bits in a mozilla::BitSet.
     50 template <size_t N, typename Word = size_t>
     51 class BitSetIter {
     52  using BitSet = mozilla::BitSet<N, Word>;
     53  const BitSet& bitset;
     54  size_t bit = 0;
     55 
     56 public:
     57  explicit BitSetIter(const BitSet& bitset) : bitset(bitset) {
     58    MOZ_ASSERT(!done());
     59    if (!bitset[bit]) {
     60      next();
     61    }
     62  }
     63  bool done() const {
     64    MOZ_ASSERT(bit <= N || bit == SIZE_MAX);
     65    return bit >= N;
     66  }
     67  void next() {
     68    MOZ_ASSERT(!done());
     69    bit++;
     70    if (bit != N) {
     71      bit = bitset.FindNext(bit);
     72    }
     73  }
     74  size_t get() const {
     75    MOZ_ASSERT(!done());
     76    return bit;
     77  }
     78  operator size_t() const { return get(); }
     79 };
     80 
     81 // Iterator that yields the indexes of set bits in an AtomicBitmap.
     82 template <size_t N>
     83 class js::gc::AtomicBitmap<N>::Iter {
     84  const AtomicBitmap& bitmap;
     85  size_t bit = 0;
     86 
     87 public:
     88  explicit Iter(AtomicBitmap& bitmap) : bitmap(bitmap) {
     89    if (!bitmap.getBit(bit)) {
     90      next();
     91    }
     92  }
     93 
     94  bool done() const {
     95    MOZ_ASSERT(bit <= N);
     96    return bit == N;
     97  }
     98 
     99  void next() {
    100    MOZ_ASSERT(!done());
    101 
    102    bit++;
    103    if (bit == N) {
    104      return;
    105    }
    106 
    107    static constexpr size_t bitsPerWord = sizeof(Word) * CHAR_BIT;
    108    size_t wordIndex = bit / bitsPerWord;
    109    size_t bitIndex = bit % bitsPerWord;
    110 
    111    uintptr_t word = bitmap.getWord(wordIndex);
    112    // Mask word containing |bit|.
    113    word &= (uintptr_t(-1) << bitIndex);
    114    while (word == 0) {
    115      wordIndex++;
    116      if (wordIndex == WordCount) {
    117        bit = N;
    118        return;
    119      }
    120      word = bitmap.getWord(wordIndex);
    121    }
    122 
    123    bitIndex = mozilla::CountTrailingZeroes(word);
    124    bit = wordIndex * bitsPerWord + bitIndex;
    125  }
    126 
    127  size_t get() const {
    128    MOZ_ASSERT(!done());
    129    return bit;
    130  }
    131 };
    132 
    133 // Iterator that yields offsets and pointers into a block of memory
    134 // corresponding to the bits set in a BitSet.
    135 template <typename BitmapIter, size_t Granularity, typename T = void>
    136 class BitmapToBlockIter : public BitmapIter {
    137  uintptr_t baseAddr;
    138 
    139 public:
    140  template <typename S>
    141  BitmapToBlockIter(void* base, S&& arg)
    142      : BitmapIter(std::forward<S>(arg)), baseAddr(uintptr_t(base)) {}
    143  size_t getOffset() const { return BitmapIter::get() * Granularity; }
    144  T* get() const { return reinterpret_cast<T*>(baseAddr + getOffset()); }
    145  operator T*() const { return get(); }
    146  T* operator->() const { return get(); }
    147 };
    148 
    149 template <typename T>
    150 class LinkedListIter {
    151  T* element;
    152 
    153 public:
    154  explicit LinkedListIter(SlimLinkedList<T>& list) : element(list.getFirst()) {}
    155  bool done() const { return !element; }
    156  void next() {
    157    MOZ_ASSERT(!done());
    158    element = element->getNext();
    159  }
    160  T* get() const { return element; }
    161  operator T*() const { return get(); }
    162  T* operator->() const { return get(); }
    163 };
    164 
    165 class BufferAllocator::FreeLists::FreeListIter
    166    : public BitSetIter<AllocSizeClasses, uint32_t> {
    167  FreeLists& freeLists;
    168 
    169 public:
    170  explicit FreeListIter(FreeLists& freeLists)
    171      : BitSetIter(freeLists.available), freeLists(freeLists) {}
    172  FreeList& get() {
    173    size_t sizeClass = BitSetIter::get();
    174    return freeLists.lists[sizeClass];
    175  }
    176  operator FreeList&() { return get(); }
    177 };
    178 
    179 class BufferAllocator::FreeLists::FreeRegionIter
    180    : public NestedIterator<FreeListIter, LinkedListIter<FreeRegion>> {
    181 public:
    182  explicit FreeRegionIter(FreeLists& freeLists) : NestedIterator(freeLists) {}
    183 };
    184 
    185 class BufferAllocator::ChunkLists::ChunkListIter
    186    : public BitSetIter<AllocSizeClasses + 1, uint32_t> {
    187  ChunkLists& chunkLists;
    188 
    189 public:
    190  explicit ChunkListIter(ChunkLists& chunkLists)
    191      : BitSetIter(chunkLists.available), chunkLists(chunkLists) {}
    192  BufferChunkList& get() { return chunkLists.lists[getSizeClass()]; }
    193  size_t getSizeClass() const { return BitSetIter::get(); }
    194  operator BufferChunkList&() { return get(); }
    195 };
    196 
    197 class BufferAllocator::ChunkLists::ChunkIter
    198    : public NestedIterator<ChunkListIter, LinkedListIter<BufferChunk>> {
    199 public:
    200  explicit ChunkIter(ChunkLists& chunkLists) : NestedIterator(chunkLists) {}
    201  size_t getSizeClass() const { return iterA().getSizeClass(); }
    202 };
    203 
    204 template <typename Derived, size_t Size, size_t Granularity>
    205 struct AllocSpace {
    206  static_assert(Size > Granularity);
    207  static_assert(mozilla::IsPowerOfTwo(Size));
    208  static_assert(mozilla::IsPowerOfTwo(Granularity));
    209  static constexpr size_t SizeBytes = Size;
    210  static constexpr size_t GranularityBytes = Granularity;
    211 
    212  static constexpr uintptr_t AddressMask = SizeBytes - 1;
    213  static constexpr size_t MaxAllocCount = SizeBytes / GranularityBytes;
    214 
    215  using PerAllocBitmap = mozilla::BitSet<MaxAllocCount>;
    216  using AtomicPerAllocBitmap =
    217      mozilla::BitSet<MaxAllocCount, mozilla::Atomic<size_t, mozilla::Relaxed>>;
    218 
    219  // Mark bitmap: one bit minimum per allocation, no gray bits.
    220  MainThreadOrGCTaskData<AtomicBitmap<MaxAllocCount>> markBits;
    221 
    222  // Allocation start and end bitmaps: these have a bit set corresponding to the
    223  // start of the allocation and to the byte after the end of allocation (except
    224  // for the end of the chunk).
    225  MainThreadOrGCTaskData<PerAllocBitmap> allocStartBitmap;
    226  MainThreadOrGCTaskData<AtomicPerAllocBitmap> allocEndBitmap;
    227 
    228  // A bitmap indicating whether an allocation is owned by a nursery or a
    229  // tenured GC thing.
    230  MainThreadOrGCTaskData<AtomicPerAllocBitmap> nurseryOwnedBitmap;
    231 
    232  static constexpr uintptr_t firstAllocOffset() {
    233    return RoundUp(sizeof(Derived), GranularityBytes);
    234  }
    235 
    236  void setAllocated(void* alloc, size_t bytes, bool allocated);
    237  void updateEndOffset(void* alloc, size_t oldBytes, size_t newBytes);
    238 
    239  bool isAllocated(const void* alloc) const {
    240    size_t bit = ptrToIndex(alloc);
    241    return allocStartBitmap.ref()[bit];
    242  }
    243 
    244  bool isAllocated(uintptr_t offset) const {
    245    size_t bit = offsetToIndex(offset);
    246    return allocStartBitmap.ref()[bit];
    247  }
    248 
    249  size_t allocBytes(const void* alloc) const;
    250 
    251  void setNurseryOwned(void* alloc, bool nurseryOwned) {
    252    MOZ_ASSERT(isAllocated(alloc));
    253    size_t bit = ptrToIndex(alloc);
    254    nurseryOwnedBitmap.ref()[bit] = nurseryOwned;
    255  }
    256 
    257  bool isNurseryOwned(const void* alloc) const {
    258    MOZ_ASSERT(isAllocated(alloc));
    259    size_t bit = ptrToIndex(alloc);
    260    return nurseryOwnedBitmap.ref()[bit];
    261  }
    262 
    263  bool setMarked(void* alloc);
    264 
    265  void setUnmarked(void* alloc) {
    266    MOZ_ASSERT(isAllocated(alloc));
    267    size_t bit = ptrToIndex(alloc);
    268    markBits.ref().setBit(bit, false);
    269  }
    270 
    271  bool isMarked(const void* alloc) const {
    272    MOZ_ASSERT(isAllocated(alloc));
    273    size_t bit = ptrToIndex(alloc);
    274    return markBits.ref().getBit(bit);
    275  }
    276 
    277  // Find next/previous allocations from |offset|. Return SizeBytes on failure.
    278  size_t findNextAllocated(uintptr_t offset) const;
    279  size_t findPrevAllocated(uintptr_t offset) const;
    280 
    281  // Find next/previous free region from a start/end address.
    282  using FreeRegion = BufferAllocator::FreeRegion;
    283  FreeRegion* findFollowingFreeRegion(uintptr_t startAddr);
    284  FreeRegion* findPrecedingFreeRegion(uintptr_t endAddr);
    285 
    286 protected:
    287  AllocSpace() {
    288    MOZ_ASSERT(allocStartBitmap.ref().IsEmpty());
    289    MOZ_ASSERT(allocEndBitmap.ref().IsEmpty());
    290    MOZ_ASSERT(nurseryOwnedBitmap.ref().IsEmpty());
    291  }
    292 
    293  uintptr_t startAddress() const {
    294    return uintptr_t(static_cast<const Derived*>(this));
    295  }
    296 
    297  template <size_t Divisor = GranularityBytes, size_t Align = Divisor>
    298  size_t ptrToIndex(const void* alloc) const {
    299    MOZ_ASSERT((uintptr_t(alloc) & ~AddressMask) == startAddress());
    300    uintptr_t offset = uintptr_t(alloc) & AddressMask;
    301    return offsetToIndex<Divisor, Align>(offset);
    302  }
    303 
    304  template <size_t Divisor = GranularityBytes, size_t Align = Divisor>
    305  static size_t offsetToIndex(uintptr_t offset) {
    306    MOZ_ASSERT(isValidOffset(offset));
    307    MOZ_ASSERT(offset % Align == 0);
    308    return offset / Divisor;
    309  }
    310 
    311  const void* ptrFromOffset(uintptr_t offset) const {
    312    MOZ_ASSERT(isValidOffset(offset));
    313    MOZ_ASSERT(offset % GranularityBytes == 0);
    314    return reinterpret_cast<void*>(startAddress() + offset);
    315  }
    316 
    317  size_t findEndBit(size_t startIndex) const {
    318    MOZ_ASSERT(startIndex < MaxAllocCount);
    319    if (startIndex + 1 == MaxAllocCount) {
    320      return MaxAllocCount;
    321    }
    322    size_t endIndex = allocEndBitmap.ref().FindNext(startIndex + 1);
    323    if (endIndex == SIZE_MAX) {
    324      return MaxAllocCount;
    325    }
    326    return endIndex;
    327  }
    328 
    329 #ifdef DEBUG
    330  static bool isValidOffset(uintptr_t offset) {
    331    return offset >= firstAllocOffset() && offset < SizeBytes;
    332  }
    333 #endif
    334 };
    335 
    336 // A chunk containing medium buffer allocations for a single zone. Unlike
    337 // ArenaChunk, allocations from different zones do not share chunks.
    338 struct BufferChunk
    339    : public ChunkBase,
    340      public SlimLinkedListElement<BufferChunk>,
    341      public AllocSpace<BufferChunk, ChunkSize, MediumAllocGranularity> {
    342 #ifdef DEBUG
    343  MainThreadOrGCTaskData<Zone*> zone;
    344 #endif
    345 
    346  MainThreadOrGCTaskData<bool> allocatedDuringCollection;
    347  MainThreadData<bool> hasNurseryOwnedAllocs;
    348  MainThreadOrGCTaskData<bool> hasNurseryOwnedAllocsAfterSweep;
    349 
    350  static constexpr size_t MaxAllocsPerChunk = MaxAllocCount;  // todo remove
    351 
    352  static constexpr size_t PagesPerChunk = ChunkSize / PageSize;
    353  using PerPageBitmap = mozilla::BitSet<PagesPerChunk, uint32_t>;
    354  MainThreadOrGCTaskData<PerPageBitmap> decommittedPages;
    355 
    356  static constexpr size_t SmallRegionsPerChunk = ChunkSize / SmallRegionSize;
    357  using SmallRegionBitmap = AtomicBitmap<SmallRegionsPerChunk>;
    358  MainThreadOrGCTaskData<SmallRegionBitmap> smallRegionBitmap;
    359 
    360  // Free regions in this chunk. When a chunk is swept its free regions are
    361  // stored here. When the chunk is being used for allocation these are moved to
    362  // BufferAllocator::freeLists. |ownsFreeLists| indicates whether this is in
    363  // use.
    364  MainThreadOrGCTaskData<BufferAllocator::FreeLists> freeLists;
    365  MainThreadOrGCTaskData<bool> ownsFreeLists;
    366 
    367  using AllocIter =
    368      BitmapToBlockIter<BitSetIter<MaxAllocCount>, MediumAllocGranularity>;
    369  AllocIter allocIter() { return {this, allocStartBitmap.ref()}; }
    370 
    371  using SmallRegionIter = BitmapToBlockIter<SmallRegionBitmap::Iter,
    372                                            SmallRegionSize, SmallBufferRegion>;
    373  SmallRegionIter smallRegionIter() { return {this, smallRegionBitmap.ref()}; }
    374 
    375  static const BufferChunk* from(const void* alloc) {
    376    return from(const_cast<void*>(alloc));
    377  }
    378  static BufferChunk* from(void* alloc) {
    379    ChunkBase* chunk = js::gc::detail::GetGCAddressChunkBase(alloc);
    380    MOZ_ASSERT(chunk->kind == ChunkKind::Buffers);
    381    return static_cast<BufferChunk*>(chunk);
    382  }
    383 
    384  explicit BufferChunk(Zone* zone);
    385  ~BufferChunk();
    386 
    387  void setSmallBufferRegion(void* alloc, bool smallAlloc);
    388  bool isSmallBufferRegion(const void* alloc) const;
    389 
    390  size_t sizeClassForAvailableLists() const;
    391 
    392  bool isPointerWithinAllocation(void* ptr) const;
    393 };
    394 
    395 constexpr size_t FirstMediumAllocOffset = BufferChunk::firstAllocOffset();
    396 
    397 // A sub-region backed by a medium allocation which contains small buffer
    398 // allocations.
    399 struct SmallBufferRegion : public AllocSpace<SmallBufferRegion, SmallRegionSize,
    400                                             SmallAllocGranularity> {
    401  MainThreadOrGCTaskData<bool> hasNurseryOwnedAllocs_;
    402 
    403  using AllocIter =
    404      BitmapToBlockIter<BitSetIter<MaxAllocCount>, SmallAllocGranularity>;
    405  AllocIter allocIter() { return {this, allocStartBitmap.ref()}; }
    406 
    407  static SmallBufferRegion* from(void* alloc) {
    408    uintptr_t addr = uintptr_t(alloc) & ~SmallRegionMask;
    409    auto* region = reinterpret_cast<SmallBufferRegion*>(addr);
    410 #ifdef DEBUG
    411    BufferChunk* chunk = BufferChunk::from(region);
    412    MOZ_ASSERT(chunk->isAllocated(region));
    413    MOZ_ASSERT(chunk->isSmallBufferRegion(region));
    414 #endif
    415    return region;
    416  }
    417 
    418  void setHasNurseryOwnedAllocs(bool value);
    419  bool hasNurseryOwnedAllocs() const;
    420 
    421  bool isPointerWithinAllocation(void* ptr) const;
    422 };
    423 
    424 constexpr size_t FirstSmallAllocOffset = SmallBufferRegion::firstAllocOffset();
    425 static_assert(FirstSmallAllocOffset < SmallRegionSize);
    426 
    427 // Describes a free region in a buffer chunk. This structure is stored at the
    428 // end of the region.
    429 //
    430 // Medium allocations are made in FreeRegions in increasing address order. The
    431 // final allocation will contain the now empty and unused FreeRegion structure.
    432 // FreeRegions are stored in buckets based on their size in FreeLists. Each
    433 // bucket is a linked list of FreeRegions.
    434 struct BufferAllocator::FreeRegion
    435    : public SlimLinkedListElement<BufferAllocator::FreeRegion> {
    436  uintptr_t startAddr;
    437  bool hasDecommittedPages;
    438 
    439 #ifdef DEBUG
    440  uint32_t checkValue = FreeRegionCheckValue;
    441 #endif
    442 
    443  explicit FreeRegion(uintptr_t startAddr, bool decommitted = false)
    444      : startAddr(startAddr), hasDecommittedPages(decommitted) {}
    445 
    446  static FreeRegion* fromEndOffset(BufferChunk* chunk, uintptr_t endOffset) {
    447    MOZ_ASSERT(endOffset <= ChunkSize);
    448    return fromEndAddr(uintptr_t(chunk) + endOffset);
    449  }
    450  static FreeRegion* fromEndOffset(SmallBufferRegion* region,
    451                                   uintptr_t endOffset) {
    452    MOZ_ASSERT(endOffset <= SmallRegionSize);
    453    return fromEndAddr(uintptr_t(region) + endOffset);
    454  }
    455  static FreeRegion* fromEndAddr(uintptr_t endAddr) {
    456    MOZ_ASSERT(endAddr % SmallAllocGranularity == 0);
    457    auto* region = reinterpret_cast<FreeRegion*>(endAddr - sizeof(FreeRegion));
    458    region->check();
    459    return region;
    460  }
    461 
    462  void check() const { MOZ_ASSERT(checkValue == FreeRegionCheckValue); }
    463 
    464  uintptr_t getEnd() const { return uintptr_t(this + 1); }
    465  size_t size() const { return getEnd() - startAddr; }
    466 };
    467 
    468 // Metadata about a large buffer, stored externally.
    469 struct LargeBuffer : public SlimLinkedListElement<LargeBuffer> {
    470  void* alloc;
    471  size_t bytes;
    472  bool isNurseryOwned;
    473  bool allocatedDuringCollection = false;
    474 
    475 #ifdef DEBUG
    476  uint32_t checkValue = LargeBufferCheckValue;
    477 #endif
    478 
    479  LargeBuffer(void* alloc, size_t bytes, bool nurseryOwned)
    480      : alloc(alloc), bytes(bytes), isNurseryOwned(nurseryOwned) {
    481    MOZ_ASSERT((bytes % ChunkSize) == 0);
    482  }
    483 
    484  void check() const { MOZ_ASSERT(checkValue == LargeBufferCheckValue); }
    485 
    486 #ifdef DEBUG
    487  inline Zone* zone();
    488  inline Zone* zoneFromAnyThread();
    489 #endif
    490 
    491  void* data() { return alloc; }
    492  size_t allocBytes() const { return bytes; }
    493  bool isPointerWithinAllocation(void* ptr) const;
    494 };
    495 
    496 }  // namespace js::gc
    497 
    498 #endif  // gc_BufferAllocatorInternals_h