tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

BufferAllocator.h (27820B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 // GC-internal header file for the buffer allocator.
      8 
      9 #ifndef gc_BufferAllocator_h
     10 #define gc_BufferAllocator_h
     11 
     12 #include "mozilla/Array.h"
     13 #include "mozilla/Atomics.h"
     14 #include "mozilla/BitSet.h"
     15 #include "mozilla/HashTable.h"
     16 #include "mozilla/Maybe.h"
     17 #include "mozilla/TimeStamp.h"
     18 
     19 #include <cstdint>
     20 #include <stddef.h>
     21 
     22 #include "jstypes.h"  // JS_PUBLIC_API
     23 
     24 #include "ds/SlimLinkedList.h"
     25 #include "js/HeapAPI.h"
     26 #include "threading/LockGuard.h"
     27 #include "threading/Mutex.h"
     28 #include "threading/ProtectedData.h"
     29 
     30 class JS_PUBLIC_API JSTracer;
     31 
     32 namespace JS {
     33 class JS_PUBLIC_API Zone;
     34 }  // namespace JS
     35 
     36 namespace js {
     37 
     38 class GCMarker;
     39 class Nursery;
     40 
     41 namespace gc {
     42 
     43 struct BufferChunk;
     44 struct Cell;
     45 class GCRuntime;
     46 struct LargeBuffer;
     47 struct SmallBufferRegion;
     48 
     49 // BufferAllocator allocates dynamically sized blocks of memory which can be
     50 // reclaimed by the garbage collector and are associated with GC things.
     51 //
     52 // Although these blocks can be reclaimed by GC, explicit free and resize are
     53 // also supported. This is important for buffers that can grow or shrink.
     54 //
     55 // The allocator uses a different strategy depending on the size of the
     56 // allocation requested. There are three size ranges, divided as follows:
     57 //
     58 //   Size:            Kind:   Allocator implementation:
     59 //    16 B  -   4 KB  Small   Uses a free list allocator from 16KB regions
     60 //     4 KB - 512 KB  Medium  Uses a free list allocator from 1 MB chunks
     61 //     1 MB -         Large   Uses the OS page allocator (e.g. mmap)
     62 //
     63 // The smallest supported allocation size is 16 bytes. This is used for a
     64 // dynamic slots allocation with zero slots to set a unique ID on a native
     65 // object. This is the size of two JS::Values.
     66 //
     67 // These size ranges are chosen to be the same as those used by jemalloc
     68 // (although that uses a different kind of allocator for its equivalent of small
     69 // allocations).
     70 //
     71 // Supported operations
     72 // --------------------
     73 //
     74 //  - Allocate a buffer. Buffers are always owned by a GC cell, and the
     75 //    allocator tracks whether the owner is in the nursery or the tenured heap.
     76 //
     77 //  - Trace an edge to buffer. When the owning cell is traced it must trace the
     78 //    edge to the buffer. This will mark the buffer in a GC and prevent it from
     79 //    being swept. This does not trace the buffer contents.
     80 //
     81 //  - Free a buffer. This allows uniquely owned buffers to be freed and reused
     82 //    without waiting for the next GC. It is a hint only and is not supported
     83 //    for all buffer kinds.
     84 //
     85 //  - Reallocate a buffer. This allows uniquely owned buffers to be resized,
     86 //    possibly in-place. This is important for performance on some benchmarks.
     87 //
     88 // Integration with the rest of the GC
     89 // -----------------------------------
     90 //
     91 // The GC calls the allocator at several points during minor and major GC (at
     92 // the start, when sweeping starts and at the end). Allocations are swept on a
     93 // background thread and the memory used by unmarked allocations is reclaimed.
     94 //
     95 // The allocator tries hard to avoid locking, and where it is necessary tries to
     96 // minimize the time spent holding locks. A lock is required for the following
     97 // operations:
     98 //
     99 //  - allocating a new chunk
    100 //  - main thread operations on large buffers while sweeping off-thread
    101 //  - merging back swept data on the main thread
    102 //
    103 // No locks are required to allocate on the main thread, even when off-thread
    104 // sweeping is taking place. This is achieved by moving data to be swept to
    105 // separate containers from those used for allocation on the main thread.
    106 //
    107 // Small and medium allocations
    108 // ----------------------------
    109 //
    110 // These are allocated in their own zone-specific chunks using a segregated free
    111 // list strategy. This is the main part of the allocator.
    112 //
    113 // The requested allocation size is used to pick a size class, which is used to
    114 // find a free regions of suitable size to satisfy that request. In general size
    115 // classes for allocations are calculated by rounding up to the next power of
    116 // two; size classes for free regions are calculated by rounding down to the
    117 // previous power of two. This means that free regions of a particular size
    118 // class are always large enough to satisfy an allocation of that size class.
    119 //
    120 // The allocation size class is used to index into an array giving a list of
    121 // free regions of that size class. The first region in the list is used and
    122 // its start address updated; it may also be moved to a different list if it is
    123 // now empty or too small to satisfy further allocations for this size class.
    124 //
    125 // Medium allocations are allocated out of chunks directly and small allocations
    126 // out of 16KB sub-regions (which are essentially medium allocations
    127 // themselves).
    128 //
    129 // Data about allocations is stored in a header in the chunk or region, using
    130 // bitmaps for boolean flags.
    131 //
    132 // Sweeping works by processing a list of chunks, scanning each one for
    133 // allocated but unmarked buffers and rebuilding the free region data for that
    134 // chunk. Sweeping happens separately for minor and major GC and only
    135 // nursery-owned or tenured-owned buffers are swept at one time. This means that
    136 // chunks containing nursery-owned allocations are swept twice during a major
    137 // GC, once to sweep nursery-owned allocations and once for tenured-owned
    138 // allocations. This is required because the sweeping happens at different
    139 // times.
    140 //
    141 // Chunks containing nursery-owned buffers are stored in a separate list to
    142 // chunks that only contain tenured-owned buffers to reduce the number of chunks
    143 // that need to be swept for minor GC. They are stored in |mixedChunks| and are
    144 // moved to |mixedChunksToSweep| at the start of minor GC. They are then swept
    145 // on a background thread and are placed in |sweptMixedChunks| if they are not
    146 // freed. From there they can merged back into one of the main thread lists
    147 // (since they may no longer contain nursery-owned buffers).
    148 //
    149 // Chunks containing tenured-owned buffers are stored in |tenuredChunks| and are
    150 // moved to |tenuredChunksToSweep| at the start of major GC. They are
    151 // unavailable for allocation after this point and will be swept on a background
    152 // thread and placed in |sweptTenuredChunks| if they are not freed. From there
    153 // they will be merged back into |tenuredChunks|. This means that allocation
    154 // during an incremental GC will allocate a new chunk.
    155 //
    156 // Merging swept data requires taking a lock and so only happens when
    157 // necessary. This happens when a new chunk is needed or at various points
    158 // during GC. During sweeping no additional synchronization is required for
    159 // allocation.
    160 //
    161 // Since major GC requires doing a minor GC at the start and we don't want to
    162 // have to wait for minor GC sweeping to finish there is an optimization where
    163 // chunks containing nursery-owned buffers swept as part of minor GC at the
    164 // start of a major GC are moved directly from |sweptMixedChunks| to
    165 // |tenuredChunksToSweep| at the end of minor GC sweeping. This is controlled by
    166 // the |majorStartedWhileMinorSweeping| flag.
    167 //
    168 // Similarly, if a major GC finishes while minor GC is sweeping then rather than
    169 // waiting for it to finish we set the |majorFinishedWhileMinorSweeping| flag so
    170 // that we clear the |allocatedDuringCollection| for these chunks the end of the
    171 // minor sweeping.
    172 //
    173 // Free works by extending neighboring free regions to cover the freed
    174 // allocation or adding a new one if necessary. Free regions are found by
    175 // checking the allocated bitmap. Free is not supported if the containing chunk
    176 // is currently being swept off-thread. Free is only supported for medium sized
    177 // allocations.
    178 //
    179 // Reallocation works by resizing in place if possible. It is always possible to
    180 // shrink a medium allocation in place if the target size is still medium.
    181 // In-place growth is possible if there is enough free space following the
    182 // allocation. This is not supported if the containing chunk is currently being
    183 // swept off-thread.  If not possible, a new allocation is made and the contents
    184 // of the buffer copied. Resizing in place is only supported for medium sized
    185 // allocations.
    186 //
    187 // Large allocations
    188 // -----------------
    189 //
    190 // These are implemented using the OS page allocator. Allocations of this size
    191 // are relatively rare and not much attempt is made to optimize them. They are
    192 // chunk aligned which allows them to be distinguished from the other allocation
    193 // kinds by checking the low bits the pointer.
    194 //
    195 // Shrinking large allocations in place is only supported on Posix-like systems.
    196 //
    197 // Naming conventions
    198 // ------------------
    199 //
    200 // The following conventions are used in the code:
    201 //  - alloc:  client pointer to allocated memory
    202 //  - buffer: pointer to per-allocation metadata
    203 //
    204 
    205 class BufferAllocator : public SlimLinkedListElement<BufferAllocator> {
    206 public:
    207  static constexpr size_t MinSmallAllocShift = 4;    // 16 B
    208  static constexpr size_t MinMediumAllocShift = 12;  //  4 KB
    209  static constexpr size_t MinLargeAllocShift = 20;   //  1 MB
    210 
    211  // TODO: Ideally this would equal MinSmallAllocShift but we're constrained by
    212  // the size of FreeRegion which won't fit into 16 bytes.
    213  static constexpr size_t MinSizeClassShift = 5;  // 32 B
    214  static_assert(MinSizeClassShift >= MinSmallAllocShift);
    215 
    216  static constexpr size_t SmallSizeClasses =
    217      MinMediumAllocShift - MinSizeClassShift + 1;
    218  static constexpr size_t MediumSizeClasses =
    219      MinLargeAllocShift - MinMediumAllocShift + 1;
    220  static constexpr size_t AllocSizeClasses =
    221      SmallSizeClasses + MediumSizeClasses;
    222 
    223  static constexpr size_t FullChunkSizeClass = AllocSizeClasses;
    224 
    225  // An RAII guard to lock and unlock the buffer allocator lock.
    226  class AutoLock : public LockGuard<Mutex> {
    227   public:
    228    explicit AutoLock(GCRuntime* gc);
    229    explicit AutoLock(BufferAllocator* allocator);
    230    friend class UnlockGuard<AutoLock>;
    231  };
    232 
    233  // A lock guard that is locked only when needed.
    234  using MaybeLock = mozilla::Maybe<AutoLock>;
    235 
    236 private:
    237  template <typename Derived, size_t SizeBytes, size_t GranularityBytes>
    238  friend struct AllocSpace;
    239 
    240  using BufferChunkList = SlimLinkedList<BufferChunk>;
    241 
    242  struct FreeRegion;
    243  using FreeList = SlimLinkedList<FreeRegion>;
    244 
    245  using SizeClassBitSet = mozilla::BitSet<AllocSizeClasses, uint32_t>;
    246 
    247  // Segregated free list: an array of free lists, one per size class.
    248  class FreeLists {
    249    using FreeListArray = mozilla::Array<FreeList, AllocSizeClasses>;
    250 
    251    FreeListArray lists;
    252    SizeClassBitSet available;
    253 
    254   public:
    255    class FreeListIter;
    256    class FreeRegionIter;
    257 
    258    FreeLists() = default;
    259 
    260    FreeLists(FreeLists&& other);
    261    FreeLists& operator=(FreeLists&& other);
    262 
    263    FreeListIter freeListIter();
    264    FreeRegionIter freeRegionIter();
    265 
    266    bool isEmpty() const { return available.IsEmpty(); }
    267 
    268    bool hasSizeClass(size_t sizeClass) const;
    269    const auto& availableSizeClasses() const { return available; }
    270 
    271    // Returns SIZE_MAX if none available.
    272    size_t getFirstAvailableSizeClass(size_t minSizeClass,
    273                                      size_t maxSizeClass) const;
    274    size_t getLastAvailableSizeClass(size_t minSizeClass,
    275                                     size_t maxSizeClass) const;
    276 
    277    FreeRegion* getFirstRegion(size_t sizeClass);
    278 
    279    void pushFront(size_t sizeClass, FreeRegion* region);
    280    void pushBack(size_t sizeClass, FreeRegion* region);
    281 
    282    void append(FreeLists&& other);
    283    void prepend(FreeLists&& other);
    284 
    285    void remove(size_t sizeClass, FreeRegion* region);
    286 
    287    void clear();
    288 
    289    template <typename Func>
    290    void forEachRegion(Func&& func);
    291 
    292    void assertEmpty() const;
    293    void assertContains(size_t sizeClass, FreeRegion* region) const;
    294    void checkAvailable() const;
    295  };
    296 
    297  class ChunkLists {
    298    using ChunkListArray =
    299        mozilla::Array<BufferChunkList, AllocSizeClasses + 1>;
    300    using AvailableBitSet = mozilla::BitSet<AllocSizeClasses + 1, uint32_t>;
    301 
    302    ChunkListArray lists;
    303    AvailableBitSet available;
    304 
    305   public:
    306    class ChunkListIter;
    307    class ChunkIter;
    308 
    309    ChunkLists() = default;
    310 
    311    ChunkLists(const ChunkLists& other) = delete;
    312    ChunkLists& operator=(const ChunkLists& other) = delete;
    313 
    314    ChunkListIter chunkListIter();
    315    ChunkIter chunkIter();
    316    const auto& availableSizeClasses() const { return available; }
    317 
    318    void pushFront(size_t sizeClass, BufferChunk* chunk);
    319    void pushBack(BufferChunk* chunk);
    320    void pushBack(size_t sizeClass, BufferChunk* chunk);
    321 
    322    // Returns SIZE_MAX if none available.
    323    size_t getFirstAvailableSizeClass(size_t minSizeClass,
    324                                      size_t maxSizeClass) const;
    325 
    326    BufferChunk* popFirstChunk(size_t sizeClass);
    327 
    328    void remove(size_t sizeClass, BufferChunk* chunk);
    329 
    330    BufferChunkList extractAllChunks();
    331 
    332    bool isEmpty() const;
    333    void checkAvailable() const;
    334  };
    335 
    336  using LargeAllocList = SlimLinkedList<LargeBuffer>;
    337 
    338  using LargeAllocMap =
    339      mozilla::HashMap<void*, LargeBuffer*, PointerHasher<void*>>;
    340 
    341  enum class State : uint8_t { NotCollecting, Marking, Sweeping };
    342 
    343  enum class SizeKind : uint8_t { Small, Medium };
    344 
    345  enum class SweepKind : uint8_t { Tenured = 0, Nursery };
    346 
    347  // The zone this allocator is associated with.
    348  MainThreadOrGCTaskData<JS::Zone*> zone;
    349 
    350  // Chunks containing medium and small buffers. They may contain both
    351  // nursery-owned and tenured-owned buffers.
    352  MainThreadData<BufferChunkList> mixedChunks;
    353 
    354  // Chunks containing only tenured-owned small and medium buffers.
    355  MainThreadData<BufferChunkList> tenuredChunks;
    356 
    357  // Free lists for the small and medium buffers in |mixedChunks| and
    358  // |tenuredChunks|. Used for allocation.
    359  MainThreadData<FreeLists> freeLists;
    360 
    361  // Chunks that may contain nursery-owned buffers waiting to be swept during a
    362  // minor GC. Populated from |mixedChunks|.
    363  MainThreadOrGCTaskData<BufferChunkList> mixedChunksToSweep;
    364 
    365  // Chunks that contain only tenured-owned buffers waiting to be swept during a
    366  // major GC. Populated from |tenuredChunks|.
    367  MainThreadOrGCTaskData<BufferChunkList> tenuredChunksToSweep;
    368 
    369  // Chunks that have been swept. Populated by a background thread.
    370  MutexData<BufferChunkList> sweptMixedChunks;
    371  MutexData<BufferChunkList> sweptTenuredChunks;
    372 
    373  // Chunks that have been swept and are available for allocation but have not
    374  // had their free regions merged into |freeLists|. Owned by the main thread.
    375  MainThreadData<ChunkLists> availableMixedChunks;
    376  MainThreadData<ChunkLists> availableTenuredChunks;
    377 
    378  // List of large nursery-owned buffers.
    379  MainThreadData<LargeAllocList> largeNurseryAllocs;
    380 
    381  // List of large tenured-owned buffers.
    382  MainThreadData<LargeAllocList> largeTenuredAllocs;
    383 
    384  // Map from allocation pointer to buffer metadata for large buffers.
    385  // Access requires holding the mutex during sweeping.
    386  MainThreadOrGCTaskData<LargeAllocMap> largeAllocMap;
    387 
    388  // Large buffers waiting to be swept.
    389  MainThreadOrGCTaskData<LargeAllocList> largeNurseryAllocsToSweep;
    390  MainThreadOrGCTaskData<LargeAllocList> largeTenuredAllocsToSweep;
    391 
    392  // Large buffers that have been swept.
    393  MutexData<LargeAllocList> sweptLargeTenuredAllocs;
    394 
    395  // Flag to indicate that data from minor sweeping is available to be
    396  // merged. This includes chunks in the |sweptMixedChunks| or
    397  // |sweptTenuredChunks| lists and the minorSweepingFinished flag.
    398  mozilla::Atomic<bool, mozilla::Relaxed> hasMinorSweepDataToMerge;
    399 
    400  // GC state for minor and major GC.
    401  MainThreadData<State> minorState;
    402  MainThreadData<State> majorState;
    403 
    404  // Flags to tell the main thread that sweeping has finished and the state
    405  // should be updated.
    406  MutexData<bool> minorSweepingFinished;
    407  MutexData<bool> majorSweepingFinished;
    408 
    409  // A major GC was started while a minor GC was still sweeping. Chunks by the
    410  // minor GC will be moved directly to the list of chunks to sweep for the
    411  // major GC. This happens for the minor GC at the start of every major GC.
    412  MainThreadData<bool> majorStartedWhileMinorSweeping;
    413 
    414  // A major GC finished while a minor GC was still sweeping. Some post major GC
    415  // cleanup will be deferred to the end of the minor sweeping.
    416  MainThreadData<bool> majorFinishedWhileMinorSweeping;
    417 
    418 public:
    419  explicit BufferAllocator(JS::Zone* zone);
    420  ~BufferAllocator();
    421 
    422  static inline size_t GetGoodAllocSize(size_t requiredBytes);
    423  static inline size_t GetGoodElementCount(size_t requiredElements,
    424                                           size_t elementSize);
    425  static inline size_t GetGoodPower2AllocSize(size_t requiredBytes);
    426  static inline size_t GetGoodPower2ElementCount(size_t requiredElements,
    427                                                 size_t elementSize);
    428  static bool IsBufferAlloc(void* alloc);
    429 
    430  void* alloc(size_t bytes, bool nurseryOwned);
    431  void* allocInGC(size_t bytes, bool nurseryOwned);
    432  void* realloc(void* alloc, size_t bytes, bool nurseryOwned);
    433  void free(void* alloc);
    434  size_t getAllocSize(void* alloc);
    435  bool isNurseryOwned(void* alloc);
    436 
    437  void startMinorCollection(MaybeLock& lock);
    438  bool startMinorSweeping();
    439  void sweepForMinorCollection();
    440 
    441  void startMajorCollection(MaybeLock& lock);
    442  void startMajorSweeping(MaybeLock& lock);
    443  void sweepForMajorCollection(bool shouldDecommit);
    444  void finishMajorCollection(const AutoLock& lock);
    445  void clearMarkStateAfterBarrierVerification();
    446  void clearChunkMarkBits(BufferChunk* chunk);
    447 
    448  bool isEmpty() const;
    449 
    450  void traceEdge(JSTracer* trc, Cell* owner, void** bufferp, const char* name);
    451  bool markTenuredAlloc(void* alloc);
    452  bool isMarkedBlack(void* alloc);
    453 
    454  // For debugging, used to implement GetMarkInfo. Returns false for allocations
    455  // being swept on another thread.
    456  bool isPointerWithinBuffer(void* ptr);
    457 
    458  Mutex& lock() const;
    459 
    460  size_t getSizeOfNurseryBuffers();
    461 
    462  void addSizeOfExcludingThis(size_t* usedBytesOut, size_t* freeBytesOut,
    463                              size_t* adminBytesOut);
    464 
    465  static void printStatsHeader(FILE* file);
    466  static void printStats(GCRuntime* gc, mozilla::TimeStamp creationTime,
    467                         bool isMajorGC, FILE* file);
    468 
    469  struct Stats {
    470    size_t usedBytes = 0;
    471    size_t freeBytes = 0;
    472    size_t adminBytes = 0;
    473    size_t mixedSmallRegions = 0;
    474    size_t tenuredSmallRegions = 0;
    475    size_t mixedChunks = 0;
    476    size_t tenuredChunks = 0;
    477    size_t availableMixedChunks = 0;
    478    size_t availableTenuredChunks = 0;
    479    size_t freeRegions = 0;
    480    size_t largeNurseryAllocs = 0;
    481    size_t largeTenuredAllocs = 0;
    482  };
    483  void getStats(Stats& stats);
    484 
    485 #ifdef DEBUG
    486  bool hasAlloc(void* alloc);
    487  void checkGCStateNotInUse();
    488  void checkGCStateNotInUse(MaybeLock& lock);
    489  void checkGCStateNotInUse(const AutoLock& lock);
    490 #endif
    491 
    492 private:
    493  void markNurseryOwnedAlloc(void* alloc, bool nurseryOwned);
    494  friend class js::Nursery;
    495 
    496  void maybeMergeSweptData();
    497  void maybeMergeSweptData(MaybeLock& lock);
    498  void mergeSweptData();
    499  void mergeSweptData(const AutoLock& lock);
    500  void abortMajorSweeping(const AutoLock& lock);
    501  void clearAllocatedDuringCollectionState(const AutoLock& lock);
    502 
    503  // Small allocation methods:
    504 
    505  static inline bool IsSmallAllocSize(size_t bytes);
    506  static bool IsSmallAlloc(void* alloc);
    507 
    508  void* allocSmall(size_t bytes, bool nurseryOwned, bool inGC);
    509  void* retrySmallAlloc(size_t requestedBytes, size_t sizeClass, bool inGC);
    510  bool allocNewSmallRegion(bool inGC);
    511  void traceSmallAlloc(JSTracer* trc, Cell* owner, void** allocp,
    512                       const char* name);
    513  void markSmallNurseryOwnedBuffer(void* alloc, bool nurseryOwned);
    514  bool markSmallTenuredAlloc(void* alloc);
    515 
    516  // Medium allocation methods:
    517 
    518  static bool IsMediumAlloc(void* alloc);
    519  static bool CanSweepAlloc(bool nurseryOwned,
    520                            BufferAllocator::SweepKind sweepKind);
    521 
    522  void* allocMedium(size_t bytes, bool nurseryOwned, bool inGC);
    523  void* retryMediumAlloc(size_t requestedBytes, size_t sizeClass, bool inGC);
    524  template <typename Alloc, typename GrowHeap>
    525  void* refillFreeListsAndRetryAlloc(size_t sizeClass, size_t maxSizeClass,
    526                                     Alloc&& alloc, GrowHeap&& growHeap);
    527  enum class RefillResult { Fail = 0, Success, Retry };
    528  template <typename GrowHeap>
    529  RefillResult refillFreeLists(size_t sizeClass, size_t maxSizeClass,
    530                               GrowHeap&& growHeap);
    531  bool useAvailableChunk(size_t sizeClass, size_t maxSizeClass);
    532  bool useAvailableChunk(size_t sizeClass, size_t maxSizeClass, ChunkLists& src,
    533                         BufferChunkList& dst);
    534  SizeClassBitSet getChunkSizeClassesToMove(size_t maxSizeClass,
    535                                            ChunkLists& src) const;
    536  void* bumpAlloc(size_t bytes, size_t sizeClass, size_t maxSizeClass);
    537  void* allocFromRegion(FreeRegion* region, size_t bytes, size_t sizeClass);
    538  void* allocMediumAligned(size_t bytes, bool inGC);
    539  void* retryAlignedAlloc(size_t sizeClass, bool inGC);
    540  void* alignedAlloc(size_t sizeClass);
    541  void* alignedAllocFromRegion(FreeRegion* region, size_t sizeClass);
    542  void updateFreeListsAfterAlloc(FreeLists* freeLists, FreeRegion* region,
    543                                 size_t sizeClass);
    544  void setAllocated(void* alloc, size_t bytes, bool nurseryOwned, bool inGC);
    545  void setChunkHasNurseryAllocs(BufferChunk* chunk);
    546  void recommitRegion(FreeRegion* region);
    547  bool allocNewChunk(bool inGC);
    548  bool sweepChunk(BufferChunk* chunk, SweepKind sweepKind, bool shouldDecommit);
    549  void addSweptRegion(BufferChunk* chunk, uintptr_t freeStart,
    550                      uintptr_t freeEnd, bool shouldDecommit,
    551                      bool expectUnchanged, FreeLists& freeLists);
    552  bool sweepSmallBufferRegion(BufferChunk* chunk, SmallBufferRegion* region,
    553                              SweepKind sweepKind);
    554  void addSweptRegion(SmallBufferRegion* region, uintptr_t freeStart,
    555                      uintptr_t freeEnd, bool expectUnchanged,
    556                      FreeLists& freeLists);
    557  void freeMedium(void* alloc);
    558  bool growMedium(void* alloc, size_t newBytes);
    559  bool shrinkMedium(void* alloc, size_t newBytes);
    560  enum class ListPosition { Front, Back };
    561  FreeRegion* addFreeRegion(FreeLists* freeLists, uintptr_t start,
    562                            uintptr_t bytes, SizeKind kind, bool anyDecommitted,
    563                            ListPosition position,
    564                            bool expectUnchanged = false);
    565  void updateFreeRegionStart(FreeLists* freeLists, FreeRegion* region,
    566                             uintptr_t newStart, SizeKind kind);
    567  FreeLists* getChunkFreeLists(BufferChunk* chunk);
    568  ChunkLists* getChunkAvailableLists(BufferChunk* chunk);
    569  void maybeUpdateAvailableLists(ChunkLists* availableChunks,
    570                                 BufferChunk* chunk, size_t oldChunkSizeClass);
    571  bool isSweepingChunk(BufferChunk* chunk);
    572  void traceMediumAlloc(JSTracer* trc, Cell* owner, void** allocp,
    573                        const char* name);
    574  bool isMediumBufferNurseryOwned(void* alloc) const;
    575  void markMediumNurseryOwnedBuffer(void* alloc, bool nurseryOwned);
    576  bool markMediumTenuredAlloc(void* alloc);
    577 
    578  // Determine whether a size class is for a small or medium allocation.
    579  static SizeKind SizeClassKind(size_t sizeClass);
    580 
    581  // Get the size class for an allocation. This rounds up to a class that is
    582  // large enough to hold the required size.
    583  static size_t SizeClassForSmallAlloc(size_t bytes);
    584  static size_t SizeClassForMediumAlloc(size_t bytes);
    585 
    586  // Get the maximum size class of allocations that can use a free region. This
    587  // rounds down to the largest class that can fit in this region.
    588  static size_t SizeClassForFreeRegion(size_t bytes, SizeKind kind);
    589 
    590  static size_t SizeClassBytes(size_t sizeClass);
    591  friend struct BufferChunk;
    592 
    593  static void ClearAllocatedDuringCollection(ChunkLists& chunks);
    594  static void ClearAllocatedDuringCollection(BufferChunkList& list);
    595  static void ClearAllocatedDuringCollection(LargeAllocList& list);
    596 
    597  // Large allocation methods:
    598 
    599  static inline bool IsLargeAllocSize(size_t bytes);
    600  static bool IsLargeAlloc(void* alloc);
    601 
    602  void* allocLarge(size_t bytes, bool nurseryOwned, bool inGC);
    603  bool isLargeTenuredMarked(LargeBuffer* buffer);
    604  void freeLarge(void* alloc);
    605  bool shrinkLarge(LargeBuffer* buffer, size_t newBytes);
    606  void unmapLarge(LargeBuffer* buffer, bool isSweeping, MaybeLock& lock);
    607  void unregisterLarge(LargeBuffer* buffer, bool isSweeping, MaybeLock& lock);
    608  void traceLargeAlloc(JSTracer* trc, Cell* owner, void** allocp,
    609                       const char* name);
    610  void markLargeNurseryOwnedBuffer(LargeBuffer* buffer, bool nurseryOwned);
    611  bool markLargeTenuredBuffer(LargeBuffer* buffer);
    612 
    613  // Lookup a large buffer by pointer in the map.
    614  LargeBuffer* lookupLargeBuffer(void* alloc);
    615  LargeBuffer* lookupLargeBuffer(void* alloc, MaybeLock& lock);
    616  bool needLockToAccessBufferMap() const;
    617 
    618  void increaseHeapSize(size_t bytes, bool nurseryOwned, bool checkThresholds,
    619                        bool updateRetainedSize);
    620  void decreaseHeapSize(size_t bytes, bool nurseryOwned,
    621                        bool updateRetainedSize);
    622 
    623  // Testing functions we allow access.
    624  friend void* TestAllocAligned(JS::Zone* zone, size_t bytes);
    625  friend size_t TestGetAllocSizeKind(void* alloc);
    626 
    627 #ifdef DEBUG
    628  void checkChunkListsGCStateNotInUse(ChunkLists& chunkLists,
    629                                      bool hasNurseryOwnedAllocs,
    630                                      bool allowAllocatedDuringCollection);
    631  void checkChunkListGCStateNotInUse(BufferChunkList& chunks,
    632                                     bool hasNurseryOwnedAllocs,
    633                                     bool allowAllocatedDuringCollection,
    634                                     bool allowFreeLists);
    635  void checkChunkGCStateNotInUse(BufferChunk* chunk,
    636                                 bool allowAllocatedDuringCollection,
    637                                 bool allowFreeLists);
    638  void checkAllocListGCStateNotInUse(LargeAllocList& list, bool isNurseryOwned);
    639  void verifyChunk(BufferChunk* chunk, bool hasNurseryOwnedAllocs);
    640  void verifyFreeRegion(BufferChunk* chunk, uintptr_t endOffset,
    641                        size_t expectedSize, size_t& freeRegionCount);
    642  void verifySmallBufferRegion(SmallBufferRegion* region,
    643                               size_t& freeRegionCount);
    644  void verifyFreeRegion(SmallBufferRegion* chunk, uintptr_t endOffset,
    645                        size_t expectedSize, size_t& freeRegionCount);
    646 #endif
    647 };
    648 
    649 static constexpr size_t SmallAllocGranularityShift =
    650    BufferAllocator::MinSmallAllocShift;
    651 static constexpr size_t MediumAllocGranularityShift =
    652    BufferAllocator::MinMediumAllocShift;
    653 
    654 static constexpr size_t SmallAllocGranularity = 1 << SmallAllocGranularityShift;
    655 static constexpr size_t MediumAllocGranularity = 1
    656                                                 << MediumAllocGranularityShift;
    657 
    658 static constexpr size_t MinSmallAllocSize =
    659    1 << BufferAllocator::MinSmallAllocShift;
    660 static constexpr size_t MinMediumAllocSize =
    661    1 << BufferAllocator::MinMediumAllocShift;
    662 static constexpr size_t MinLargeAllocSize =
    663    1 << BufferAllocator::MinLargeAllocShift;
    664 
    665 static constexpr size_t MinAllocSize = MinSmallAllocSize;
    666 
    667 static constexpr size_t MaxSmallAllocSize =
    668    MinMediumAllocSize - SmallAllocGranularity;
    669 static constexpr size_t MaxMediumAllocSize =
    670    MinLargeAllocSize - MediumAllocGranularity;
    671 static constexpr size_t MaxAlignedAllocSize = MinLargeAllocSize / 4;
    672 
    673 }  // namespace gc
    674 }  // namespace js
    675 
    676 #endif  // gc_BufferAllocator_h