tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

GCRuntime.h (54333B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef gc_GCRuntime_h
      8 #define gc_GCRuntime_h
      9 
     10 #include "mozilla/Atomics.h"
     11 #include "mozilla/EnumSet.h"
     12 #include "mozilla/Maybe.h"
     13 #include "mozilla/TimeStamp.h"
     14 
     15 #include "gc/ArenaList.h"
     16 #include "gc/AtomMarking.h"
     17 #include "gc/GCContext.h"
     18 #include "gc/GCMarker.h"
     19 #include "gc/GCParallelTask.h"
     20 #include "gc/IteratorUtils.h"
     21 #include "gc/Memory.h"
     22 #include "gc/Nursery.h"
     23 #include "gc/Scheduling.h"
     24 #include "gc/Statistics.h"
     25 #include "gc/StoreBuffer.h"
     26 #include "js/friend/PerformanceHint.h"
     27 #include "js/GCAnnotations.h"
     28 #include "js/UniquePtr.h"
     29 #include "vm/AtomsTable.h"
     30 
     31 namespace js {
     32 
     33 class AutoLockGC;
     34 class AutoLockGCBgAlloc;
     35 class AutoLockHelperThreadState;
     36 class FinalizationRegistryObject;
     37 class FinalizationRecordObject;
     38 class FinalizationQueueObject;
     39 class GlobalObject;
     40 class VerifyPreTracer;
     41 class WeakRefObject;
     42 
     43 namespace gc {
     44 
     45 using BlackGrayEdgeVector = Vector<TenuredCell*, 0, SystemAllocPolicy>;
     46 using ZoneVector = Vector<JS::Zone*, 4, SystemAllocPolicy>;
     47 
     48 class AutoCallGCCallbacks;
     49 class AutoUpdateBarriersForSweeping;
     50 class AutoGCSession;
     51 class AutoHeapSession;
     52 class AutoTraceSession;
     53 class BufferAllocator;
     54 class MarkingValidator;
     55 struct MovingTracer;
     56 class ParallelMarkTask;
     57 enum class ShouldCheckThresholds;
     58 class SweepGroupsIter;
     59 
     60 // Interface to a sweep action.
     61 struct SweepAction {
     62  // The arguments passed to each action.
     63  struct Args {
     64    GCRuntime* gc;
     65    JS::GCContext* gcx;
     66    JS::SliceBudget& budget;
     67  };
     68 
     69  virtual ~SweepAction() = default;
     70  virtual IncrementalProgress run(Args& state) = 0;
     71  virtual void assertFinished() const = 0;
     72  virtual bool shouldSkip() { return false; }
     73 };
     74 
     75 class ChunkPool {
     76  ArenaChunk* head_;
     77  size_t count_;
     78 
     79 public:
     80  ChunkPool() : head_(nullptr), count_(0) {}
     81  ChunkPool(const ChunkPool& other) = delete;
     82  ChunkPool(ChunkPool&& other) { *this = std::move(other); }
     83 
     84  ~ChunkPool() {
     85    MOZ_ASSERT(!head_);
     86    MOZ_ASSERT(count_ == 0);
     87  }
     88 
     89  ChunkPool& operator=(const ChunkPool& other) = delete;
     90  ChunkPool& operator=(ChunkPool&& other) {
     91    head_ = other.head_;
     92    other.head_ = nullptr;
     93    count_ = other.count_;
     94    other.count_ = 0;
     95    return *this;
     96  }
     97 
     98  bool empty() const { return !head_; }
     99  size_t count() const { return count_; }
    100 
    101  ArenaChunk* head() {
    102    MOZ_ASSERT(head_);
    103    return head_;
    104  }
    105  ArenaChunk* pop();
    106  void push(ArenaChunk* chunk);
    107  ArenaChunk* remove(ArenaChunk* chunk);
    108 
    109  void sort();
    110 
    111 private:
    112  ArenaChunk* mergeSort(ArenaChunk* list, size_t count);
    113  bool isSorted() const;
    114 
    115 #ifdef DEBUG
    116 public:
    117  bool contains(ArenaChunk* chunk) const;
    118  bool verify() const;
    119  void verifyChunks() const;
    120 #endif
    121 
    122 public:
    123  // Pool mutation does not invalidate an Iter unless the mutation
    124  // is of the ArenaChunk currently being visited by the Iter.
    125  class Iter {
    126   public:
    127    explicit Iter(ChunkPool& pool) : current_(pool.head_) {}
    128    bool done() const { return !current_; }
    129    void next();
    130    ArenaChunk* get() const { return current_; }
    131    operator ArenaChunk*() const { return get(); }
    132    ArenaChunk* operator->() const { return get(); }
    133 
    134   private:
    135    ArenaChunk* current_;
    136  };
    137 };
    138 
    139 class BackgroundMarkTask : public GCParallelTask {
    140 public:
    141  explicit BackgroundMarkTask(GCRuntime* gc);
    142  void setBudget(const JS::SliceBudget& budget) { this->budget = budget; }
    143  void run(AutoLockHelperThreadState& lock) override;
    144 
    145 private:
    146  JS::SliceBudget budget;
    147 };
    148 
    149 class BackgroundUnmarkTask : public GCParallelTask {
    150 public:
    151  explicit BackgroundUnmarkTask(GCRuntime* gc);
    152  void initZones();
    153  void run(AutoLockHelperThreadState& lock) override;
    154 
    155 private:
    156  void unmark();
    157 
    158  ZoneVector zones;
    159 };
    160 
    161 class BackgroundSweepTask : public GCParallelTask {
    162 public:
    163  explicit BackgroundSweepTask(GCRuntime* gc);
    164  void run(AutoLockHelperThreadState& lock) override;
    165 };
    166 
    167 class BackgroundFreeTask : public GCParallelTask {
    168 public:
    169  explicit BackgroundFreeTask(GCRuntime* gc);
    170  void run(AutoLockHelperThreadState& lock) override;
    171 };
    172 
    173 // Performs extra allocation off thread so that when memory is required on the
    174 // main thread it will already be available and waiting.
    175 class BackgroundAllocTask : public GCParallelTask {
    176  // Guarded by the GC lock.
    177  GCLockData<ChunkPool&> chunkPool_;
    178 
    179  const bool enabled_;
    180 
    181 public:
    182  BackgroundAllocTask(GCRuntime* gc, ChunkPool& pool);
    183  bool enabled() const { return enabled_; }
    184 
    185  void run(AutoLockHelperThreadState& lock) override;
    186 };
    187 
    188 // Search the provided chunks for free arenas and decommit them.
    189 class BackgroundDecommitTask : public GCParallelTask {
    190 public:
    191  explicit BackgroundDecommitTask(GCRuntime* gc);
    192  void run(AutoLockHelperThreadState& lock) override;
    193 };
    194 
    195 template <typename F>
    196 struct Callback {
    197  F op;
    198  void* data;
    199 
    200  Callback() : op(nullptr), data(nullptr) {}
    201  Callback(F op, void* data) : op(op), data(data) {}
    202 };
    203 
    204 template <typename F>
    205 using CallbackVector = Vector<Callback<F>, 4, SystemAllocPolicy>;
    206 
    207 using RootedValueMap =
    208    HashMap<Value*, const char*, DefaultHasher<Value*>, SystemAllocPolicy>;
    209 
    210 using AllocKinds = mozilla::EnumSet<AllocKind, uint64_t>;
    211 
    212 // A singly linked list of zones.
    213 class ZoneList {
    214  static Zone* const End;
    215 
    216  Zone* head;
    217  Zone* tail;
    218 
    219 public:
    220  ZoneList();
    221  ~ZoneList();
    222 
    223  bool isEmpty() const;
    224  Zone* front() const;
    225 
    226  void prepend(Zone* zone);
    227  void append(Zone* zone);
    228  void prependList(ZoneList&& other);
    229  void appendList(ZoneList&& other);
    230  Zone* removeFront();
    231  void clear();
    232 
    233 private:
    234  explicit ZoneList(Zone* singleZone);
    235  void check() const;
    236 
    237  ZoneList(const ZoneList& other) = delete;
    238  ZoneList& operator=(const ZoneList& other) = delete;
    239 };
    240 
    241 struct WeakCacheToSweep {
    242  JS::detail::WeakCacheBase* cache;
    243  JS::Zone* zone;
    244 };
    245 
    246 class WeakCacheSweepIterator {
    247  using WeakCacheBase = JS::detail::WeakCacheBase;
    248 
    249  JS::Zone* sweepZone;
    250  WeakCacheBase* sweepCache;
    251 
    252 public:
    253  explicit WeakCacheSweepIterator(JS::Zone* sweepGroup);
    254 
    255  bool done() const;
    256  WeakCacheToSweep get() const;
    257  void next();
    258 
    259 private:
    260  void settle();
    261 };
    262 
    263 struct SweepingTracer final : public GenericTracerImpl<SweepingTracer> {
    264  explicit SweepingTracer(JSRuntime* rt);
    265 
    266 private:
    267  template <typename T>
    268  void onEdge(T** thingp, const char* name);
    269  friend class GenericTracerImpl<SweepingTracer>;
    270 };
    271 
    272 class GCRuntime {
    273 public:
    274  explicit GCRuntime(JSRuntime* rt);
    275  [[nodiscard]] bool init(uint32_t maxbytes);
    276  bool wasInitialized() const { return initialized; }
    277  void finishRoots();
    278  void finish();
    279 
    280  Zone* atomsZone() {
    281    Zone* zone = zones()[0];
    282    MOZ_ASSERT(JS::shadow::Zone::from(zone)->isAtomsZone());
    283    return zone;
    284  }
    285  Zone* maybeSharedAtomsZone() { return sharedAtomsZone_; }
    286 
    287  [[nodiscard]] bool freezeSharedAtomsZone();
    288  void restoreSharedAtomsZone();
    289 
    290  JS::HeapState heapState() const { return heapState_; }
    291 
    292  bool hasZealMode(ZealMode mode) const;
    293  bool hasAnyZealModeOf(mozilla::EnumSet<ZealMode> mode) const;
    294  void clearZealMode(ZealMode mode);
    295  bool needZealousGC();
    296  bool zealModeControlsYieldPoint() const;
    297 
    298  [[nodiscard]] bool addRoot(Value* vp, const char* name);
    299  void removeRoot(Value* vp);
    300 
    301  [[nodiscard]] bool setParameter(JSContext* cx, JSGCParamKey key,
    302                                  uint32_t value);
    303  void resetParameter(JSContext* cx, JSGCParamKey key);
    304  uint32_t getParameter(JSGCParamKey key);
    305 
    306  void setPerformanceHint(PerformanceHint hint);
    307  bool isInPageLoad() const { return inPageLoadCount != 0; }
    308 
    309  [[nodiscard]] bool triggerGC(JS::GCReason reason);
    310  // Check whether to trigger a zone GC after allocating GC cells.
    311  void maybeTriggerGCAfterAlloc(Zone* zone);
    312  // Check whether to trigger a zone GC after malloc memory.
    313  void maybeTriggerGCAfterMalloc(Zone* zone);
    314  bool maybeTriggerGCAfterMalloc(Zone* zone, const HeapSize& heap,
    315                                 const HeapThreshold& threshold,
    316                                 JS::GCReason reason);
    317  // The return value indicates if we were able to do the GC.
    318  bool triggerZoneGC(Zone* zone, JS::GCReason reason, size_t usedBytes,
    319                     size_t thresholdBytes);
    320 
    321  void maybeGC();
    322 
    323  // Return whether we want to run a major GC. If eagerOk is true, include eager
    324  // triggers (eg EAGER_ALLOC_TRIGGER) in this determination, and schedule all
    325  // zones that exceed the eager thresholds.
    326  JS::GCReason wantMajorGC(bool eagerOk);
    327  bool checkEagerAllocTrigger(const HeapSize& size,
    328                              const HeapThreshold& threshold);
    329 
    330  // Do a minor GC if requested, followed by a major GC if requested. The return
    331  // value indicates whether a major GC was performed.
    332  bool gcIfRequested() { return gcIfRequestedImpl(false); }
    333 
    334  // Internal function to do a GC if previously requested. But if not and
    335  // eagerOk, do an eager GC for all Zones that have exceeded the eager
    336  // thresholds.
    337  //
    338  // Return whether a major GC was performed or started.
    339  bool gcIfRequestedImpl(bool eagerOk);
    340 
    341  void gc(JS::GCOptions options, JS::GCReason reason);
    342  void startGC(JS::GCOptions options, JS::GCReason reason,
    343               const JS::SliceBudget& budget);
    344  void gcSlice(JS::GCReason reason, const JS::SliceBudget& budget);
    345  void finishGC(JS::GCReason reason);
    346  void abortGC();
    347  void startDebugGC(JS::GCOptions options, const JS::SliceBudget& budget);
    348  void debugGCSlice(const JS::SliceBudget& budget);
    349 
    350  void runDebugGC();
    351  void notifyRootsRemoved();
    352 
    353  enum TraceOrMarkRuntime { TraceRuntime, MarkRuntime };
    354  void traceRuntime(JSTracer* trc, AutoHeapSession& session);
    355  void traceRuntimeForMinorGC(JSTracer* trc, AutoGCSession& session);
    356 
    357  void purgeRuntimeForMinorGC();
    358 
    359  void shrinkBuffers();
    360  void onOutOfMallocMemory();
    361  void onOutOfMallocMemory(const AutoLockGC& lock);
    362 
    363  Nursery& nursery() { return nursery_.ref(); }
    364  gc::StoreBuffer& storeBuffer() { return storeBuffer_.ref(); }
    365 
    366  void minorGC(JS::GCReason reason,
    367               gcstats::PhaseKind phase = gcstats::PhaseKind::MINOR_GC)
    368      JS_HAZ_GC_CALL;
    369  void evictNursery(JS::GCReason reason = JS::GCReason::EVICT_NURSERY) {
    370    minorGC(reason, gcstats::PhaseKind::EVICT_NURSERY);
    371  }
    372 
    373  void* addressOfNurseryPosition() {
    374    return nursery_.refNoCheck().addressOfPosition();
    375  }
    376 
    377  void* addressOfNurseryAllocatedSites() {
    378    return nursery_.refNoCheck().addressOfNurseryAllocatedSites();
    379  }
    380 
    381  const void* addressOfLastBufferedWholeCell() {
    382    return storeBuffer_.refNoCheck().addressOfLastBufferedWholeCell();
    383  }
    384 
    385 #ifdef JS_GC_ZEAL
    386  const uint32_t* addressOfZealModeBits() { return &zealModeBits.refNoCheck(); }
    387  void getZealBits(uint32_t* zealBits, uint32_t* frequency,
    388                   uint32_t* nextScheduled);
    389  void setZeal(uint8_t zeal, uint32_t frequency);
    390  void unsetZeal(uint8_t zeal);
    391  // Note that currently, different modes cannot have different frequencies.
    392  struct ZealSetting {
    393    uint8_t mode;
    394    uint32_t frequency;
    395  };
    396  using ZealSettings = js::Vector<ZealSetting, 0, SystemAllocPolicy>;
    397  bool parseZeal(const char* str, size_t len, ZealSettings* zeal,
    398                 bool* invalid);
    399  bool parseAndSetZeal(const char* str);
    400  void setNextScheduled(uint32_t count);
    401  void verifyPreBarriers();
    402  void maybeVerifyPreBarriers(bool always);
    403  void verifyPostBarriers();
    404  bool selectForMarking(JSObject* object);
    405  void clearSelectedForMarking();
    406  void setDeterministic(bool enable);
    407  void setMarkStackLimit(size_t limit, AutoLockGC& lock);
    408 #endif
    409 
    410  uint64_t nextCellUniqueId() {
    411    MOZ_ASSERT(nextCellUniqueId_ > 0);
    412    uint64_t uid = ++nextCellUniqueId_;
    413    return uid;
    414  }
    415 
    416  void setLowMemoryState(bool newState) { lowMemoryState = newState; }
    417  bool systemHasLowMemory() const { return lowMemoryState; }
    418 
    419 public:
    420  // Internal public interface
    421  ZoneVector& zones() { return zones_.ref(); }
    422  gcstats::Statistics& stats() { return stats_.ref(); }
    423  const gcstats::Statistics& stats() const { return stats_.ref(); }
    424  State state() const { return incrementalState; }
    425  bool isHeapCompacting() const { return state() == State::Compact; }
    426  bool isForegroundSweeping() const { return state() == State::Sweep; }
    427  bool isBackgroundSweeping() const { return sweepTask.wasStarted(); }
    428  bool isBackgroundMarking() const { return markTask.wasStarted(); }
    429  bool isBackgroundDecommitting() const { return decommitTask.wasStarted(); }
    430  void waitBackgroundSweepEnd();
    431  void waitBackgroundDecommitEnd();
    432  void waitBackgroundAllocEnd() { allocTask.cancelAndWait(); }
    433  void waitBackgroundFreeEnd();
    434  void waitForBackgroundTasks();
    435  bool isWaitingOnBackgroundTask() const;
    436 
    437  void lockGC() { lock.lock(); }
    438  void unlockGC() { lock.unlock(); }
    439 
    440  void lockStoreBuffer() { storeBufferLock.lock(); }
    441  void unlockStoreBuffer() { storeBufferLock.unlock(); }
    442 
    443 #ifdef DEBUG
    444  void assertCurrentThreadHasLockedGC() const {
    445    lock.assertOwnedByCurrentThread();
    446  }
    447  void assertCurrentThreadHasLockedStoreBuffer() const {
    448    storeBufferLock.assertOwnedByCurrentThread();
    449  }
    450 #endif  // DEBUG
    451 
    452  void setAlwaysPreserveCode() { alwaysPreserveCode = true; }
    453 
    454  void setIncrementalGCEnabled(bool enabled);
    455  void setNurseryEnabled(bool enabled);
    456 
    457  bool isIncrementalGCEnabled() const { return incrementalGCEnabled; }
    458  bool isPerZoneGCEnabled() const { return perZoneGCEnabled; }
    459  bool isCompactingGCEnabled() const;
    460  bool isParallelMarkingEnabled() const { return parallelMarkingEnabled; }
    461 
    462  bool isIncrementalGCInProgress() const {
    463    return state() != State::NotActive && !isVerifyPreBarriersEnabled();
    464  }
    465 
    466  bool hasForegroundWork() const;
    467 
    468  bool isShrinkingGC() const { return gcOptions() == JS::GCOptions::Shrink; }
    469 
    470  bool isShutdownGC() const { return gcOptions() == JS::GCOptions::Shutdown; }
    471 
    472 #ifdef DEBUG
    473  bool isShuttingDown() const { return hadShutdownGC; }
    474 #endif
    475 
    476  bool initSweepActions();
    477 
    478  void setGrayRootsTracer(JSGrayRootsTracer traceOp, void* data);
    479  [[nodiscard]] bool addBlackRootsTracer(JSTraceDataOp traceOp, void* data);
    480  void removeBlackRootsTracer(JSTraceDataOp traceOp, void* data);
    481  void clearBlackAndGrayRootTracers();
    482 
    483  void setGCCallback(JSGCCallback callback, void* data);
    484  void callGCCallback(JSGCStatus status, JS::GCReason reason) const;
    485  void setObjectsTenuredCallback(JSObjectsTenuredCallback callback, void* data);
    486  void callObjectsTenuredCallback();
    487  [[nodiscard]] bool addFinalizeCallback(JSFinalizeCallback callback,
    488                                         void* data);
    489  void removeFinalizeCallback(JSFinalizeCallback callback);
    490  void setHostCleanupFinalizationRegistryCallback(
    491      JSHostCleanupFinalizationRegistryCallback callback, void* data);
    492  void callHostCleanupFinalizationRegistryCallback(JSFunction* doCleanup,
    493                                                   JSObject* hostDefinedData);
    494  [[nodiscard]] bool addWeakPointerZonesCallback(
    495      JSWeakPointerZonesCallback callback, void* data);
    496  void removeWeakPointerZonesCallback(JSWeakPointerZonesCallback callback);
    497  [[nodiscard]] bool addWeakPointerCompartmentCallback(
    498      JSWeakPointerCompartmentCallback callback, void* data);
    499  void removeWeakPointerCompartmentCallback(
    500      JSWeakPointerCompartmentCallback callback);
    501  JS::GCSliceCallback setSliceCallback(JS::GCSliceCallback callback);
    502  bool addNurseryCollectionCallback(JS::GCNurseryCollectionCallback callback,
    503                                    void* data);
    504  void removeNurseryCollectionCallback(JS::GCNurseryCollectionCallback callback,
    505                                       void* data);
    506  JS::DoCycleCollectionCallback setDoCycleCollectionCallback(
    507      JS::DoCycleCollectionCallback callback);
    508  void callNurseryCollectionCallbacks(JS::GCNurseryProgress progress,
    509                                      JS::GCReason reason);
    510 
    511  bool addFinalizationRegistry(JSContext* cx,
    512                               Handle<FinalizationRegistryObject*> registry);
    513  bool registerWithFinalizationRegistry(
    514      JSContext* cx, HandleValue target,
    515      Handle<FinalizationRecordObject*> record);
    516  void queueFinalizationRegistryForCleanup(FinalizationQueueObject* queue);
    517 
    518  void setFullCompartmentChecks(bool enable);
    519 
    520  // Get the main marking tracer.
    521  GCMarker& marker() { return *markers[0]; }
    522 
    523  JS::Zone* getCurrentSweepGroup() { return currentSweepGroup; }
    524  unsigned getCurrentSweepGroupIndex() {
    525    MOZ_ASSERT_IF(unsigned(state()) < unsigned(State::Sweep),
    526                  sweepGroupIndex == 0);
    527    return sweepGroupIndex;
    528  }
    529 
    530  uint64_t gcNumber() const { return number; }
    531  void incGcNumber() { ++number; }
    532 
    533  uint64_t minorGCCount() const { return minorGCNumber; }
    534  void incMinorGcNumber() { ++minorGCNumber; }
    535 
    536  uint64_t majorGCCount() const { return majorGCNumber; }
    537  void incMajorGcNumber() { ++majorGCNumber; }
    538 
    539  uint64_t gcSliceCount() const { return sliceNumber; }
    540  void incGcSliceNumber() { ++sliceNumber; }
    541 
    542  int64_t defaultSliceBudgetMS() const { return defaultTimeBudgetMS_; }
    543 
    544  bool isIncrementalGc() const { return isIncremental; }
    545  bool isFullGc() const { return isFull; }
    546  bool isCompactingGc() const { return isCompacting; }
    547  bool didCompactZones() const { return isCompacting && zonesCompacted; }
    548 
    549  bool areGrayBitsValid() const { return grayBitsValid; }
    550  void setGrayBitsInvalid();
    551 
    552  mozilla::TimeStamp lastGCStartTime() const { return lastGCStartTime_; }
    553  mozilla::TimeStamp lastGCEndTime() const { return lastGCEndTime_; }
    554 
    555  bool majorGCRequested() const {
    556    return majorGCTriggerReason != JS::GCReason::NO_REASON;
    557  }
    558 
    559  double computeHeapGrowthFactor(size_t lastBytes);
    560  size_t computeTriggerBytes(double growthFactor, size_t lastBytes);
    561 
    562  ChunkPool& fullChunks(const AutoLockGC& lock) { return fullChunks_.ref(); }
    563  ChunkPool& availableChunks(const AutoLockGC& lock) {
    564    return availableChunks_.ref();
    565  }
    566  ChunkPool& emptyChunks(const AutoLockGC& lock) { return emptyChunks_.ref(); }
    567  const ChunkPool& fullChunks(const AutoLockGC& lock) const {
    568    return fullChunks_.ref();
    569  }
    570  const ChunkPool& availableChunks(const AutoLockGC& lock) const {
    571    return availableChunks_.ref();
    572  }
    573  const ChunkPool& emptyChunks(const AutoLockGC& lock) const {
    574    return emptyChunks_.ref();
    575  }
    576  using NonEmptyChunksIter = ChainedIterator<ChunkPool::Iter, 2>;
    577  NonEmptyChunksIter allNonEmptyChunks(const AutoLockGC& lock) {
    578    clearCurrentChunk(lock);
    579    return NonEmptyChunksIter(availableChunks(lock), fullChunks(lock));
    580  }
    581  uint32_t minEmptyChunkCount(const AutoLockGC& lock) const {
    582    return minEmptyChunkCount_;
    583  }
    584  void setCurrentChunk(ArenaChunk* chunk, const AutoLockGC& lock);
    585  void clearCurrentChunk(const AutoLockGC& lock);
    586 #ifdef DEBUG
    587  bool isCurrentChunk(ArenaChunk* chunk) const {
    588    return chunk == currentChunk_;
    589  }
    590  void verifyAllChunks();
    591 #endif
    592 
    593  // Get or allocate a free chunk, removing it from the empty chunks pool.
    594  ArenaChunk* getOrAllocChunk(StallAndRetry stallAndRetry,
    595                              AutoLockGCBgAlloc& lock);
    596 
    597  void recycleChunk(ArenaChunk* chunk, const AutoLockGC& lock);
    598 
    599 #ifdef JS_GC_ZEAL
    600  void startVerifyPreBarriers();
    601  void endVerifyPreBarriers();
    602  void finishVerifier();
    603  bool isVerifyPreBarriersEnabled() const { return verifyPreData.refNoCheck(); }
    604  bool shouldYieldForZeal(ZealMode mode);
    605  void verifyPostBarriers(AutoHeapSession& session);
    606  void checkHeapBeforeMinorGC(AutoHeapSession& session);
    607 #else
    608  bool isVerifyPreBarriersEnabled() const { return false; }
    609 #endif
    610 
    611 #ifdef JSGC_HASH_TABLE_CHECKS
    612  void checkHashTablesAfterMovingGC();
    613 #endif
    614 
    615  // Crawl the heap to check whether an arbitary pointer is within a cell of
    616  // the given kind. (TraceKind::Null means to ignore the kind.)
    617  bool isPointerWithinTenuredCell(
    618      void* ptr, JS::TraceKind traceKind = JS::TraceKind::Null);
    619  // Crawl the heap to check whether an arbitary pointer is within a buffer.
    620  bool isPointerWithinBufferAlloc(void* ptr);
    621 
    622 #ifdef DEBUG
    623  bool hasZone(Zone* target);
    624 #endif
    625 
    626  // Queue memory memory to be freed on a background thread if possible.
    627  void queueUnusedLifoBlocksForFree(LifoAlloc* lifo);
    628  void queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc* lifo);
    629  void queueBuffersForFreeAfterMinorGC(
    630      Nursery::BufferSet& buffers, Nursery::StringBufferVector& stringBuffers);
    631 
    632  // Public here for ReleaseArenaLists and FinalizeTypedArenas.
    633  void releaseArena(Arena* arena, const AutoLockGC& lock);
    634  void releaseArenas(Arena* arena, const AutoLockGC& lock);
    635  void releaseArenaList(ArenaList& arenaList, const AutoLockGC& lock);
    636 
    637  Arena* releaseSomeEmptyArenas(Zone* zone, Arena* emptyArenas);
    638 
    639  // Allocator internals.
    640  static void* refillFreeListInGC(Zone* zone, AllocKind thingKind);
    641 
    642  // Delayed marking.
    643  void delayMarkingChildren(gc::Cell* cell, MarkColor color);
    644  bool hasDelayedMarking() const;
    645  void markAllDelayedChildren(ShouldReportMarkTime reportTime);
    646 
    647  // If we have yielded to the mutator while foreground finalizing arenas from
    648  // zone |zone| with kind |kind| then return a list of the arenas finalized so
    649  // far. These will have been removed from the main arena lists at this
    650  // point. Otherwise return nullptr.
    651  SortedArenaList* maybeGetForegroundFinalizedArenas(Zone* zone,
    652                                                     AllocKind kind);
    653 
    654  /*
    655   * Concurrent sweep infrastructure.
    656   */
    657  void startTask(GCParallelTask& task, AutoLockHelperThreadState& lock);
    658  void joinTask(GCParallelTask& task, AutoLockHelperThreadState& lock);
    659  void updateHelperThreadCount();
    660  size_t parallelWorkerCount() const;
    661  void maybeRequestGCAfterBackgroundTask(const AutoLockHelperThreadState& lock);
    662 
    663  // GC parallel task dispatch infrastructure.
    664  size_t getMaxParallelThreads() const;
    665  void dispatchOrQueueParallelTask(GCParallelTask* task,
    666                                   const AutoLockHelperThreadState& lock);
    667  void maybeDispatchParallelTasks(const AutoLockHelperThreadState& lock);
    668  void onParallelTaskEnd(bool wasDispatched,
    669                         const AutoLockHelperThreadState& lock);
    670 
    671  // Parallel marking.
    672  bool setParallelMarkingEnabled(bool enabled);
    673  bool initOrDisableParallelMarking();
    674  [[nodiscard]] bool updateMarkersVector();
    675  size_t markingWorkerCount() const;
    676 
    677  // WeakRefs
    678  bool registerWeakRef(JSContext* cx, HandleValue target,
    679                       Handle<WeakRefObject*> weakRef);
    680  void traceKeptObjects(JSTracer* trc);
    681 
    682  JS::GCReason lastStartReason() const { return initialReason; }
    683 
    684  void updateAllocationRates();
    685 
    686  // Allocator internals
    687  static void* refillFreeList(JS::Zone* zone, AllocKind thingKind);
    688  void attemptLastDitchGC();
    689 
    690  // Return whether |sym| is marked at least |color| in the atom marking state
    691  // for uncollected zones.
    692  bool isSymbolReferencedByUncollectedZone(JS::Symbol* sym, MarkColor color);
    693 
    694  // Test mark queue.
    695 #ifdef DEBUG
    696  const GCVector<HeapPtr<JS::Value>, 0, SystemAllocPolicy>& getTestMarkQueue()
    697      const;
    698  [[nodiscard]] bool appendTestMarkQueue(const JS::Value& value);
    699  void clearTestMarkQueue();
    700  size_t testMarkQueuePos() const;
    701  size_t testMarkQueueRemaining() const;
    702 #endif
    703 
    704 private:
    705  enum class IncrementalResult { Reset = 0, Abort, Ok };
    706 
    707  bool hasBuffersForBackgroundFree() const {
    708    return !lifoBlocksToFree.ref().isEmpty() ||
    709           !buffersToFreeAfterMinorGC.ref().empty() ||
    710           !stringBuffersToReleaseAfterMinorGC.ref().empty();
    711  }
    712 
    713  // Returns false on failure without raising an exception.
    714  [[nodiscard]] bool setParameter(JSGCParamKey key, uint32_t value,
    715                                  AutoLockGC& lock);
    716  void resetParameter(JSGCParamKey key, AutoLockGC& lock);
    717  uint32_t getParameter(JSGCParamKey key, const AutoLockGC& lock);
    718  // Returns false on failure without raising an exception.
    719  bool setThreadParameter(JSGCParamKey key, uint32_t value, AutoLockGC& lock);
    720  void resetThreadParameter(JSGCParamKey key, AutoLockGC& lock);
    721  void updateThreadDataStructures(AutoLockGC& lock);
    722 
    723  JS::GCOptions gcOptions() const { return maybeGcOptions.ref().ref(); }
    724 
    725  TriggerResult checkHeapThreshold(Zone* zone, const HeapSize& heapSize,
    726                                   const HeapThreshold& heapThreshold);
    727 
    728  void updateSchedulingStateOnGCStart();
    729  void updateSchedulingStateOnGCEnd(mozilla::TimeStamp currentTime);
    730  void updateAllGCStartThresholds();
    731 
    732  // For ArenaLists::allocateFromArena()
    733  friend class ArenaLists;
    734  ArenaChunk* pickChunk(StallAndRetry stallAndRetry, AutoLockGCBgAlloc& lock);
    735  Arena* allocateArena(ArenaChunk* chunk, Zone* zone, AllocKind kind,
    736                       ShouldCheckThresholds checkThresholds);
    737 
    738  /*
    739   * Return the list of chunks that can be released outside the GC lock.
    740   * Must be called either during the GC or with the GC lock taken.
    741   */
    742  friend class BackgroundDecommitTask;
    743  bool tooManyEmptyChunks(const AutoLockGC& lock);
    744  ChunkPool expireEmptyChunkPool(const AutoLockGC& lock);
    745  void freeEmptyChunks(const AutoLockGC& lock);
    746  void prepareToFreeChunk(ArenaChunkInfo& info);
    747  void setMinEmptyChunkCount(uint32_t value, const AutoLockGC& lock);
    748 
    749  friend class BackgroundAllocTask;
    750  bool wantBackgroundAllocation(const AutoLockGC& lock) const;
    751  void startBackgroundAllocTaskIfIdle();
    752 
    753  void requestMajorGC(JS::GCReason reason);
    754  JS::SliceBudget defaultBudget(JS::GCReason reason, int64_t millis);
    755  bool maybeIncreaseSliceBudget(JS::SliceBudget& budget,
    756                                mozilla::TimeStamp sliceStartTime,
    757                                mozilla::TimeStamp gcStartTime);
    758  bool maybeIncreaseSliceBudgetForLongCollections(
    759      JS::SliceBudget& budget, mozilla::TimeStamp sliceStartTime,
    760      mozilla::TimeStamp gcStartTime);
    761  bool maybeIncreaseSliceBudgetForUrgentCollections(JS::SliceBudget& budget);
    762  IncrementalResult budgetIncrementalGC(bool nonincrementalByAPI,
    763                                        JS::GCReason reason,
    764                                        JS::SliceBudget& budget);
    765  void checkZoneIsScheduled(Zone* zone, JS::GCReason reason,
    766                            const char* trigger);
    767  IncrementalResult resetIncrementalGC(GCAbortReason reason);
    768 
    769  // Assert if the system state is such that we should never
    770  // receive a request to do GC work.
    771  void checkCanCallAPI();
    772 
    773  // Check if the system state is such that GC has been supressed
    774  // or otherwise delayed.
    775  [[nodiscard]] bool checkIfGCAllowedInCurrentState(JS::GCReason reason);
    776 
    777  gcstats::ZoneGCStats scanZonesBeforeGC();
    778 
    779  void setGCOptions(JS::GCOptions options);
    780 
    781  void collect(bool nonincrementalByAPI, const JS::SliceBudget& budget,
    782               JS::GCReason reason) JS_HAZ_GC_CALL;
    783 
    784  /*
    785   * Run one GC "cycle" (either a slice of incremental GC or an entire
    786   * non-incremental GC).
    787   *
    788   * Returns:
    789   *  * ResetIncremental if we "reset" an existing incremental GC, which would
    790   *    force us to run another cycle or
    791   *  * Ok otherwise.
    792   */
    793  [[nodiscard]] IncrementalResult gcCycle(bool nonincrementalByAPI,
    794                                          const JS::SliceBudget& budgetArg,
    795                                          JS::GCReason reason);
    796  bool shouldRepeatForDeadZone(JS::GCReason reason);
    797 
    798  void incrementalSlice(JS::SliceBudget& budget, JS::GCReason reason,
    799                        bool budgetWasIncreased);
    800 
    801  bool mightSweepInThisSlice(bool nonIncremental);
    802  void collectNurseryFromMajorGC(JS::GCReason reason);
    803  void collectNursery(JS::GCOptions options, JS::GCReason reason,
    804                      gcstats::PhaseKind phase);
    805 
    806  friend class AutoCallGCCallbacks;
    807  void maybeCallGCCallback(JSGCStatus status, JS::GCReason reason);
    808 
    809  void startCollection(JS::GCReason reason);
    810 
    811  void purgeRuntime();
    812  [[nodiscard]] bool beginPreparePhase(JS::GCReason reason,
    813                                       AutoGCSession& session);
    814  bool prepareZonesForCollection(JS::GCReason reason, bool* isFullOut);
    815  void unmarkWeakMaps();
    816  void endPreparePhase(JS::GCReason reason);
    817  void beginMarkPhase(AutoGCSession& session);
    818  bool shouldPreserveJITCode(JS::Realm* realm,
    819                             const mozilla::TimeStamp& currentTime,
    820                             JS::GCReason reason, bool canAllocateMoreCode,
    821                             bool isActiveCompartment);
    822  void maybeDiscardJitCodeForGC();
    823  void startBackgroundFreeAfterMinorGC();
    824  void relazifyFunctionsForShrinkingGC();
    825  void purgePropMapTablesForShrinkingGC();
    826  void purgeSourceURLsForShrinkingGC();
    827  void purgePendingWrapperPreservationBuffersForShrinkingGC();
    828  void traceRuntimeForMajorGC(JSTracer* trc, AutoGCSession& session);
    829  void traceRuntimeAtoms(JSTracer* trc);
    830  void traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark);
    831  void traceEmbeddingBlackRoots(JSTracer* trc);
    832  void traceEmbeddingGrayRoots(JSTracer* trc);
    833  IncrementalProgress traceEmbeddingGrayRoots(JSTracer* trc,
    834                                              JS::SliceBudget& budget);
    835  void checkNoRuntimeRoots(AutoGCSession& session);
    836  void maybeDoCycleCollection();
    837  void findDeadCompartments();
    838 
    839  friend class BackgroundMarkTask;
    840  enum ParallelMarking : bool {
    841    SingleThreadedMarking = false,
    842    AllowParallelMarking = true
    843  };
    844  IncrementalProgress markUntilBudgetExhausted(
    845      JS::SliceBudget& sliceBudget,
    846      ParallelMarking allowParallelMarking = SingleThreadedMarking,
    847      ShouldReportMarkTime reportTime = ReportMarkTime);
    848  bool canMarkInParallel() const;
    849  bool initParallelMarking();
    850  void finishParallelMarkers();
    851 
    852  bool reserveMarkingThreads(size_t count);
    853  void releaseMarkingThreads();
    854 
    855  bool hasMarkingWork(MarkColor color) const;
    856 
    857  void drainMarkStack();
    858 
    859 #ifdef DEBUG
    860  void assertNoMarkingWork() const;
    861 #else
    862  void assertNoMarkingWork() const {}
    863 #endif
    864 
    865  void markDelayedChildren(gc::Arena* arena, MarkColor color);
    866  void processDelayedMarkingList(gc::MarkColor color);
    867  void rebuildDelayedMarkingList();
    868  void appendToDelayedMarkingList(gc::Arena** listTail, gc::Arena* arena);
    869  void resetDelayedMarking();
    870  template <typename F>
    871  void forEachDelayedMarkingArena(F&& f);
    872 
    873  template <class ZoneIterT>
    874  IncrementalProgress markWeakReferences(JS::SliceBudget& budget);
    875  IncrementalProgress markWeakReferencesInCurrentGroup(JS::SliceBudget& budget);
    876  IncrementalProgress markGrayRoots(JS::SliceBudget& budget,
    877                                    gcstats::PhaseKind phase);
    878  void markBufferedGrayRoots(JS::Zone* zone);
    879  IncrementalProgress markAllWeakReferences();
    880  void markAllGrayReferences(gcstats::PhaseKind phase);
    881 
    882  // The mark queue is a testing-only feature for controlling mark ordering and
    883  // yield timing.
    884  enum MarkQueueProgress {
    885    QueueYielded,   // End this incremental GC slice, if possible
    886    QueueComplete,  // Done with the queue
    887    QueueSuspended  // Continue the GC without ending the slice
    888  };
    889  MarkQueueProgress processTestMarkQueue();
    890 
    891  // GC Sweeping. Implemented in Sweeping.cpp.
    892  void beginSweepPhase(JS::GCReason reason, AutoGCSession& session);
    893  void dropStringWrappers();
    894  void groupZonesForSweeping(JS::GCReason reason);
    895  [[nodiscard]] bool findSweepGroupEdges();
    896  [[nodiscard]] bool addEdgesForMarkQueue();
    897  void moveToNextSweepGroup();
    898  void resetGrayList(Compartment* comp);
    899  IncrementalProgress beginMarkingSweepGroup(JS::GCContext* gcx,
    900                                             JS::SliceBudget& budget);
    901  IncrementalProgress markGrayRootsInCurrentGroup(JS::GCContext* gcx,
    902                                                  JS::SliceBudget& budget);
    903  IncrementalProgress markGray(JS::GCContext* gcx, JS::SliceBudget& budget);
    904  IncrementalProgress endMarkingSweepGroup(JS::GCContext* gcx,
    905                                           JS::SliceBudget& budget);
    906  void markIncomingGrayCrossCompartmentPointers();
    907  IncrementalProgress beginSweepingSweepGroup(JS::GCContext* gcx,
    908                                              JS::SliceBudget& budget);
    909  void initBackgroundSweep(Zone* zone, JS::GCContext* gcx,
    910                           const AllocKinds& kinds);
    911  IncrementalProgress markDuringSweeping(JS::GCContext* gcx,
    912                                         JS::SliceBudget& budget);
    913  void updateAtomsBitmap();
    914  void sweepCCWrappers();
    915  void sweepRealmGlobals();
    916  void sweepEmbeddingWeakPointers(JS::GCContext* gcx);
    917  void sweepMisc();
    918  void sweepCompressionTasks();
    919  void sweepWeakMaps();
    920  void sweepUniqueIds();
    921  void sweepObjectsWithWeakPointers();
    922  void sweepDebuggerOnMainThread(JS::GCContext* gcx);
    923  void sweepJitDataOnMainThread(JS::GCContext* gcx);
    924  void sweepFinalizationObserversOnMainThread();
    925  void traceWeakFinalizationObserverEdges(JSTracer* trc, Zone* zone);
    926  void sweepWeakRefs();
    927  IncrementalProgress endSweepingSweepGroup(JS::GCContext* gcx,
    928                                            JS::SliceBudget& budget);
    929  IncrementalProgress performSweepActions(JS::SliceBudget& sliceBudget);
    930  void startSweepingAtomsTable();
    931  IncrementalProgress sweepAtomsTable(JS::GCContext* gcx,
    932                                      JS::SliceBudget& budget);
    933  IncrementalProgress sweepWeakCaches(JS::GCContext* gcx,
    934                                      JS::SliceBudget& budget);
    935  IncrementalProgress finalizeAllocKind(JS::GCContext* gcx,
    936                                        JS::SliceBudget& budget);
    937  IncrementalProgress sweepPropMapTree(JS::GCContext* gcx,
    938                                       JS::SliceBudget& budget);
    939  void endSweepPhase(bool destroyingRuntime);
    940  void queueZonesAndStartBackgroundSweep(ZoneList&& zones);
    941  void sweepFromBackgroundThread(AutoLockHelperThreadState& lock);
    942  void startBackgroundFree();
    943  void freeFromBackgroundThread(AutoLockHelperThreadState& lock);
    944  void sweepBackgroundThings(ZoneList& zones);
    945  void prepareForSweepSlice(JS::GCReason reason);
    946  void disableIncrementalBarriers();
    947  void enableIncrementalBarriers();
    948  void assertBackgroundSweepingFinished();
    949 #ifdef DEBUG
    950  bool zoneInCurrentSweepGroup(Zone* zone) const;
    951 #endif
    952 
    953  bool allCCVisibleZonesWereCollected();
    954  void sweepZones(JS::GCContext* gcx, bool destroyingRuntime);
    955  bool shouldDecommit() const;
    956  void startDecommit();
    957  void decommitEmptyChunks(const bool& cancel, AutoLockGC& lock);
    958  void decommitFreeArenas(const bool& cancel, AutoLockGC& lock);
    959  void decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock);
    960 
    961  // Compacting GC. Implemented in Compacting.cpp.
    962  bool shouldCompact();
    963  void beginCompactPhase();
    964  IncrementalProgress compactPhase(JS::GCReason reason,
    965                                   JS::SliceBudget& sliceBudget,
    966                                   AutoGCSession& session);
    967  void endCompactPhase();
    968  void sweepZoneAfterCompacting(MovingTracer* trc, Zone* zone);
    969  bool canRelocateZone(Zone* zone) const;
    970  [[nodiscard]] bool relocateArenas(Zone* zone, JS::GCReason reason,
    971                                    Arena*& relocatedListOut,
    972                                    JS::SliceBudget& sliceBudget);
    973  void updateCellPointers(Zone* zone, AllocKinds kinds);
    974  void updateAllCellPointers(MovingTracer* trc, Zone* zone);
    975  void updateZonePointersToRelocatedCells(Zone* zone);
    976  void updateRuntimePointersToRelocatedCells(AutoGCSession& session);
    977  void clearRelocatedArenas(Arena* arenaList, JS::GCReason reason);
    978  void clearRelocatedArenasWithoutUnlocking(Arena* arenaList,
    979                                            JS::GCReason reason,
    980                                            const AutoLockGC& lock);
    981  void releaseRelocatedArenas(Arena* arenaList);
    982  void releaseRelocatedArenasWithoutUnlocking(Arena* arenaList,
    983                                              const AutoLockGC& lock);
    984 #ifdef DEBUG
    985  void protectOrReleaseRelocatedArenas(Arena* arenaList, JS::GCReason reason);
    986  void protectAndHoldArenas(Arena* arenaList);
    987  void unprotectHeldRelocatedArenas(const AutoLockGC& lock);
    988  void releaseHeldRelocatedArenas();
    989  void releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC& lock);
    990 #endif
    991 
    992  IncrementalProgress waitForBackgroundTask(GCParallelTask& task,
    993                                            const JS::SliceBudget& budget,
    994                                            bool shouldPauseMutator);
    995 
    996  void cancelRequestedGCAfterBackgroundTask();
    997  void finishCollection(JS::GCReason reason);
    998  void maybeStopPretenuring();
    999  void checkGCStateNotInUse();
   1000  IncrementalProgress joinBackgroundMarkTask();
   1001 
   1002 #ifdef JS_GC_ZEAL
   1003  void computeNonIncrementalMarkingForValidation(AutoGCSession& session);
   1004  void validateIncrementalMarking();
   1005  void finishMarkingValidation();
   1006 #endif
   1007 
   1008 #ifdef DEBUG
   1009  void checkForCompartmentMismatches();
   1010 #endif
   1011 
   1012  void callFinalizeCallbacks(JS::GCContext* gcx, JSFinalizeStatus status) const;
   1013  void callWeakPointerZonesCallbacks(JSTracer* trc) const;
   1014  void callWeakPointerCompartmentCallbacks(JSTracer* trc,
   1015                                           JS::Compartment* comp) const;
   1016  void callDoCycleCollectionCallback(JSContext* cx);
   1017 
   1018 public:
   1019  JSRuntime* const rt;
   1020 
   1021  // Embedders can use this zone however they wish.
   1022  MainThreadData<JS::Zone*> systemZone;
   1023 
   1024  MainThreadData<JS::GCContext> mainThreadContext;
   1025 
   1026 private:
   1027  // For parent runtimes, a zone containing atoms that is shared by child
   1028  // runtimes.
   1029  MainThreadData<Zone*> sharedAtomsZone_;
   1030 
   1031  // All zones in the runtime. The first element is always the atoms zone.
   1032  MainThreadOrGCTaskData<ZoneVector> zones_;
   1033 
   1034  // Any activity affecting the heap.
   1035  MainThreadOrGCTaskData<JS::HeapState> heapState_;
   1036  friend class AutoHeapSession;
   1037  friend class JS::AutoEnterCycleCollection;
   1038 
   1039  UnprotectedData<gcstats::Statistics> stats_;
   1040 
   1041 public:
   1042  js::StringStats stringStats;
   1043 
   1044  Vector<UniquePtr<GCMarker>, 1, SystemAllocPolicy> markers;
   1045 
   1046  // Delayed marking support in case we OOM pushing work onto the mark stack.
   1047  MainThreadOrGCTaskData<js::gc::Arena*> delayedMarkingList;
   1048  MainThreadOrGCTaskData<bool> delayedMarkingWorkAdded;
   1049 #ifdef DEBUG
   1050  /* Count of arenas that are currently in the stack. */
   1051  MainThreadOrGCTaskData<size_t> markLaterArenas;
   1052 #endif
   1053 
   1054  SweepingTracer sweepingTracer;
   1055 
   1056  /* Track total GC heap size for this runtime. */
   1057  HeapSize heapSize;
   1058 
   1059  /* GC scheduling state and parameters. */
   1060  GCSchedulingTunables tunables;
   1061  GCSchedulingState schedulingState;
   1062  MainThreadData<bool> fullGCRequested;
   1063  // If an enterWeakMarking slice takes too long, suppress yielding during the
   1064  // next slice.
   1065  MainThreadData<bool> finishMarkingDuringSweeping;
   1066 
   1067  // Helper thread configuration.
   1068  MainThreadData<double> helperThreadRatio;
   1069  MainThreadData<size_t> maxHelperThreads;
   1070  MainThreadOrGCTaskData<size_t> helperThreadCount;
   1071  MainThreadData<size_t> maxMarkingThreads;
   1072  MainThreadData<size_t> markingThreadCount;
   1073 
   1074  // Per-runtime helper thread task queue. Can be accessed from helper threads
   1075  // in maybeDispatchParallelTasks().
   1076  HelperThreadLockData<size_t> maxParallelThreads;
   1077  HelperThreadLockData<size_t> dispatchedParallelTasks;
   1078  HelperThreadLockData<GCParallelTaskList> queuedParallelTasks;
   1079 
   1080  // State used for managing atom mark bitmaps in each zone.
   1081  AtomMarkingRuntime atomMarking;
   1082  MainThreadOrGCTaskData<UniquePtr<DenseBitmap>> atomsUsedByUncollectedZones;
   1083 
   1084  /*
   1085   * Pointer to a callback that, if set, will be used to create a
   1086   * budget for internally-triggered GCs.
   1087   */
   1088  MainThreadData<JS::CreateSliceBudgetCallback> createBudgetCallback;
   1089 
   1090 private:
   1091  // Arenas used for permanent things created at startup and shared by child
   1092  // runtimes.
   1093  MainThreadData<ArenaList> permanentAtoms;
   1094  MainThreadData<ArenaList> permanentWellKnownSymbols;
   1095 
   1096  // When chunks are empty, they reside in the emptyChunks pool and are
   1097  // re-used as needed or eventually expired if not re-used. The emptyChunks
   1098  // pool gets refilled from the background allocation task heuristically so
   1099  // that empty chunks should always be available for immediate allocation
   1100  // without syscalls.
   1101  GCLockData<ChunkPool> emptyChunks_;
   1102 
   1103  // Chunks which have had some, but not all, of their arenas allocated live
   1104  // in the available chunk lists. When all available arenas in a chunk have
   1105  // been allocated, the chunk is removed from the available list and moved
   1106  // to the fullChunks pool. During a GC, if all arenas are free, the chunk
   1107  // is moved back to the emptyChunks pool and scheduled for eventual
   1108  // release.
   1109  GCLockData<ChunkPool> availableChunks_;
   1110 
   1111  // When all arenas in a chunk are used, it is moved to the fullChunks pool
   1112  // so as to reduce the cost of operations on the available lists.
   1113  GCLockData<ChunkPool> fullChunks_;
   1114 
   1115  // The chunk currently being allocated from. If non-null this is at the head
   1116  // of the available chunks list and has isCurrentChunk set to true. Can be
   1117  // accessed without taking the GC lock.
   1118  MainThreadData<ArenaChunk*> currentChunk_;
   1119 
   1120  // Bitmap for arenas in the current chunk that have been freed by background
   1121  // sweeping but not yet merged into the chunk's freeCommittedArenas.
   1122  GCLockData<ChunkArenaBitmap> pendingFreeCommittedArenas;
   1123  friend class ArenaChunk;
   1124 
   1125  /*
   1126   * JSGC_MIN_EMPTY_CHUNK_COUNT
   1127   *
   1128   * Controls the number of empty chunks reserved for future allocation.
   1129   *
   1130   * They can be read off main thread by the background allocation task and the
   1131   * background decommit task.
   1132   */
   1133  GCLockData<uint32_t> minEmptyChunkCount_;
   1134 
   1135  MainThreadData<RootedValueMap> rootsHash;
   1136 
   1137  // An incrementing id used to assign unique ids to cells that require one.
   1138  MainThreadData<uint64_t> nextCellUniqueId_;
   1139 
   1140  MainThreadData<VerifyPreTracer*> verifyPreData;
   1141 
   1142  MainThreadData<mozilla::TimeStamp> lastGCStartTime_;
   1143  MainThreadData<mozilla::TimeStamp> lastGCEndTime_;
   1144 
   1145  WriteOnceData<bool> initialized;
   1146  MainThreadData<bool> incrementalGCEnabled;
   1147  MainThreadData<bool> perZoneGCEnabled;
   1148 
   1149  mozilla::Atomic<size_t, mozilla::ReleaseAcquire> numActiveZoneIters;
   1150 
   1151  /*
   1152   * The gray bits can become invalid if UnmarkGray overflows the stack. A
   1153   * full GC will reset this bit, since it fills in all the gray bits.
   1154   */
   1155  UnprotectedData<bool> grayBitsValid;
   1156 
   1157  mozilla::Atomic<JS::GCReason, mozilla::ReleaseAcquire> majorGCTriggerReason;
   1158 
   1159  /* Incremented at the start of every minor GC. */
   1160  MainThreadData<uint64_t> minorGCNumber;
   1161 
   1162  /* Incremented at the start of every major GC. */
   1163  MainThreadData<uint64_t> majorGCNumber;
   1164 
   1165  /* Incremented on every GC slice or minor collection. */
   1166  MainThreadData<uint64_t> number;
   1167 
   1168  /* Incremented on every GC slice. */
   1169  MainThreadData<uint64_t> sliceNumber;
   1170 
   1171  /*
   1172   * This runtime's current contribution to the global number of helper threads
   1173   * 'reserved' for parallel marking. Does not affect other uses of helper
   1174   * threads.
   1175   */
   1176  MainThreadData<size_t> reservedMarkingThreads;
   1177 
   1178  /* Whether the currently running GC can finish in multiple slices. */
   1179  MainThreadOrGCTaskData<bool> isIncremental;
   1180 
   1181  /* Whether all zones are being collected in first GC slice. */
   1182  MainThreadData<bool> isFull;
   1183 
   1184  /* Whether the heap will be compacted at the end of GC. */
   1185  MainThreadData<bool> isCompacting;
   1186 
   1187  /* Whether to use parallel marking. */
   1188  MainThreadData<ParallelMarking> useParallelMarking;
   1189 
   1190  /* The invocation kind of the current GC, set at the start of collection. */
   1191  MainThreadOrGCTaskData<mozilla::Maybe<JS::GCOptions>> maybeGcOptions;
   1192 
   1193  /* The initial GC reason, taken from the first slice. */
   1194  MainThreadData<JS::GCReason> initialReason;
   1195 
   1196  /*
   1197   * The current incremental GC phase. This is also used internally in
   1198   * non-incremental GC.
   1199   */
   1200  MainThreadOrGCTaskData<State> incrementalState;
   1201 
   1202  /* The incremental state at the start of this slice. */
   1203  MainThreadOrGCTaskData<State> initialState;
   1204 
   1205  /* Whether to pay attention the zeal settings in this incremental slice. */
   1206 #ifdef JS_GC_ZEAL
   1207  MainThreadData<bool> useZeal;
   1208 #else
   1209  const bool useZeal;
   1210 #endif
   1211 
   1212  /* Indicates that the last incremental slice exhausted the mark stack. */
   1213  MainThreadData<bool> lastMarkSlice;
   1214 
   1215  // Whether it's currently safe to yield to the mutator in an incremental GC.
   1216  MainThreadData<bool> safeToYield;
   1217 
   1218  // Whether to do any marking caused by barriers on a background thread during
   1219  // incremental sweeping, while also sweeping zones which have finished
   1220  // marking.
   1221  MainThreadData<bool> markOnBackgroundThreadDuringSweeping;
   1222 
   1223  // Whether any sweeping and decommitting will run on a separate GC helper
   1224  // thread.
   1225  MainThreadData<bool> useBackgroundThreads;
   1226 
   1227 #ifdef DEBUG
   1228  /* Shutdown has started. Further collections must be shutdown collections. */
   1229  MainThreadData<bool> hadShutdownGC;
   1230 #endif
   1231 
   1232  /* Singly linked list of zones to be swept in the background. */
   1233  HelperThreadLockData<ZoneList> backgroundSweepZones;
   1234 
   1235  /*
   1236   * Whether to trigger a GC slice after a background task is complete, so that
   1237   * the collector can continue or finsish collecting. This is only used for the
   1238   * tasks that run concurrently with the mutator, which are background
   1239   * finalization and background decommit.
   1240   */
   1241  HelperThreadLockData<bool> requestSliceAfterBackgroundTask;
   1242 
   1243  /*
   1244   * Free LIFO blocks are transferred to these allocators before being freed on
   1245   * a background thread.
   1246   */
   1247  HelperThreadLockData<LifoAlloc> lifoBlocksToFree;
   1248  MainThreadData<LifoAlloc> lifoBlocksToFreeAfterFullMinorGC;
   1249  MainThreadData<LifoAlloc> lifoBlocksToFreeAfterNextMinorGC;
   1250  HelperThreadLockData<Nursery::BufferSet> buffersToFreeAfterMinorGC;
   1251  HelperThreadLockData<Nursery::StringBufferVector>
   1252      stringBuffersToReleaseAfterMinorGC;
   1253 
   1254  /* The number of the minor GC peformed at the start of major GC. */
   1255  MainThreadData<uint64_t> initialMinorGCNumber;
   1256 
   1257  /* Index of current sweep group (for stats). */
   1258  MainThreadData<unsigned> sweepGroupIndex;
   1259 
   1260  /*
   1261   * Incremental sweep state.
   1262   */
   1263  MainThreadData<JS::Zone*> sweepGroups;
   1264  MainThreadOrGCTaskData<JS::Zone*> currentSweepGroup;
   1265  MainThreadData<UniquePtr<SweepAction>> sweepActions;
   1266  MainThreadOrGCTaskData<JS::Zone*> sweepZone;
   1267  MainThreadOrGCTaskData<AllocKind> sweepAllocKind;
   1268  MainThreadData<mozilla::Maybe<AtomsTable::SweepIterator>> maybeAtomsToSweep;
   1269  MainThreadOrGCTaskData<mozilla::Maybe<WeakCacheSweepIterator>>
   1270      weakCachesToSweep;
   1271  MainThreadData<bool> abortSweepAfterCurrentGroup;
   1272  MainThreadOrGCTaskData<IncrementalProgress> sweepMarkResult;
   1273  MainThreadData<bool> disableBarriersForSweeping;
   1274  friend class AutoUpdateBarriersForSweeping;
   1275 
   1276  /*
   1277   * During incremental foreground finalization, we may have a list of arenas of
   1278   * the current AllocKind and Zone whose contents have been finalized but which
   1279   * have not yet been merged back into the main arena lists.
   1280   */
   1281  MainThreadOrGCTaskData<JS::Zone*> foregroundFinalizedZone;
   1282  MainThreadOrGCTaskData<AllocKind> foregroundFinalizedAllocKind;
   1283  MainThreadData<mozilla::Maybe<SortedArenaList>> foregroundFinalizedArenas;
   1284 
   1285 #ifdef DEBUG
   1286  /*
   1287   * List of objects to mark at the beginning of a GC for testing purposes. May
   1288   * also contain string directives to change mark color or wait until different
   1289   * phases of the GC.
   1290   *
   1291   * This is a WeakCache because not everything in this list is guaranteed to
   1292   * end up marked (eg if you insert an object from an already-processed sweep
   1293   * group in the middle of an incremental GC). Also, the mark queue is not
   1294   * used during shutdown GCs. In either case, unmarked objects may need to be
   1295   * discarded.
   1296   */
   1297  JS::WeakCache<GCVector<HeapPtr<JS::Value>, 0, SystemAllocPolicy>>
   1298      testMarkQueue;
   1299 
   1300  /* Position within the test mark queue. */
   1301  size_t queuePos = 0;
   1302 
   1303  /* The test marking queue might want to be marking a particular color. */
   1304  mozilla::Maybe<js::gc::MarkColor> queueMarkColor;
   1305 #endif
   1306 
   1307  friend class SweepGroupsIter;
   1308 
   1309  /*
   1310   * Incremental compacting state.
   1311   */
   1312  MainThreadData<bool> startedCompacting;
   1313  MainThreadData<ZoneList> zonesToMaybeCompact;
   1314  MainThreadData<size_t> zonesCompacted;
   1315 #ifdef DEBUG
   1316  GCLockData<Arena*> relocatedArenasToRelease;
   1317 #endif
   1318 
   1319 #ifdef JS_GC_ZEAL
   1320  MainThreadData<MarkingValidator*> markingValidator;
   1321 #endif
   1322 
   1323  /*
   1324   * Default budget for incremental GC slice. See js/SliceBudget.h.
   1325   *
   1326   * JSGC_SLICE_TIME_BUDGET_MS
   1327   * pref: javascript.options.mem.gc_incremental_slice_ms,
   1328   */
   1329  MainThreadData<int64_t> defaultTimeBudgetMS_;
   1330 
   1331  /*
   1332   * Whether compacting GC is enabled globally.
   1333   *
   1334   * JSGC_COMPACTING_ENABLED
   1335   * pref: javascript.options.mem.gc_compacting
   1336   */
   1337  MainThreadData<bool> compactingEnabled;
   1338 
   1339  /*
   1340   * Whether generational GC is enabled globally.
   1341   *
   1342   * JSGC_NURSERY_ENABLED
   1343   * pref: javascript.options.mem.gc_generational
   1344   */
   1345  MainThreadData<bool> nurseryEnabled;
   1346 
   1347  /*
   1348   * Whether parallel marking is enabled globally.
   1349   *
   1350   * JSGC_PARALLEL_MARKING_ENABLED
   1351   * pref: javascript.options.mem.gc_parallel_marking
   1352   */
   1353  MainThreadData<bool> parallelMarkingEnabled;
   1354 
   1355  MainThreadData<bool> rootsRemoved;
   1356 
   1357  /*
   1358   * These options control the zealousness of the GC. At every allocation,
   1359   * nextScheduled is decremented. When it reaches zero we do a full GC.
   1360   *
   1361   * At this point, if zeal_ is one of the types that trigger periodic
   1362   * collection, then nextScheduled is reset to the value of zealFrequency.
   1363   * Otherwise, no additional GCs take place.
   1364   *
   1365   * You can control these values in several ways:
   1366   *   - Set the JS_GC_ZEAL environment variable
   1367   *   - Call gczeal() or schedulegc() from inside shell-executed JS code
   1368   *     (see the help for details)
   1369   *
   1370   * If gcZeal_ == 1 then we perform GCs in select places (during MaybeGC and
   1371   * whenever we are notified that GC roots have been removed). This option is
   1372   * mainly useful to embedders.
   1373   *
   1374   * We use zeal_ == 4 to enable write barrier verification. See the comment
   1375   * in gc/Verifier.cpp for more information about this.
   1376   *
   1377   * zeal_ values from 8 to 10 periodically run different types of
   1378   * incremental GC.
   1379   *
   1380   * zeal_ value 14 performs periodic shrinking collections.
   1381   */
   1382 #ifdef JS_GC_ZEAL
   1383  static_assert(size_t(ZealMode::Count) <= 32,
   1384                "Too many zeal modes to store in a uint32_t");
   1385  MainThreadData<uint32_t> zealModeBits;
   1386  MainThreadData<int> zealFrequency;
   1387  MainThreadData<int> nextScheduled;
   1388  MainThreadData<bool> deterministicOnly;
   1389  MainThreadData<int> zealSliceBudget;
   1390  MainThreadData<size_t> maybeMarkStackLimit;
   1391 
   1392  MainThreadData<PersistentRooted<GCVector<JSObject*, 0, SystemAllocPolicy>>>
   1393      selectedForMarking;
   1394 #endif
   1395 
   1396  MainThreadData<bool> fullCompartmentChecks;
   1397 
   1398  MainThreadData<uint32_t> gcCallbackDepth;
   1399 
   1400  MainThreadData<Callback<JSGCCallback>> gcCallback;
   1401  MainThreadData<Callback<JS::DoCycleCollectionCallback>>
   1402      gcDoCycleCollectionCallback;
   1403  MainThreadData<Callback<JSObjectsTenuredCallback>> tenuredCallback;
   1404  MainThreadData<CallbackVector<JSFinalizeCallback>> finalizeCallbacks;
   1405  MainThreadOrGCTaskData<Callback<JSHostCleanupFinalizationRegistryCallback>>
   1406      hostCleanupFinalizationRegistryCallback;
   1407  MainThreadData<CallbackVector<JSWeakPointerZonesCallback>>
   1408      updateWeakPointerZonesCallbacks;
   1409  MainThreadData<CallbackVector<JSWeakPointerCompartmentCallback>>
   1410      updateWeakPointerCompartmentCallbacks;
   1411  MainThreadData<CallbackVector<JS::GCNurseryCollectionCallback>>
   1412      nurseryCollectionCallbacks;
   1413 
   1414  /*
   1415   * The trace operations to trace embedding-specific GC roots. One is for
   1416   * tracing through black roots and the other is for tracing through gray
   1417   * roots. The black/gray distinction is only relevant to the cycle
   1418   * collector.
   1419   */
   1420  MainThreadData<CallbackVector<JSTraceDataOp>> blackRootTracers;
   1421  MainThreadOrGCTaskData<Callback<JSGrayRootsTracer>> grayRootTracer;
   1422 
   1423  /* Always preserve JIT code during GCs, for testing. */
   1424  MainThreadData<bool> alwaysPreserveCode;
   1425 
   1426  /* Count of the number of zones that are currently in page load. */
   1427  MainThreadData<size_t> inPageLoadCount;
   1428 
   1429  MainThreadData<bool> lowMemoryState;
   1430 
   1431  /*
   1432   * General purpose GC lock, used for synchronising operations on
   1433   * arenas and during parallel marking.
   1434   */
   1435  friend class js::AutoLockGC;
   1436  friend class js::AutoLockGCBgAlloc;
   1437  Mutex lock MOZ_UNANNOTATED;
   1438 
   1439  /*
   1440   * Lock used to synchronise access to the store buffer during parallel
   1441   * sweeping.
   1442   */
   1443  Mutex storeBufferLock MOZ_UNANNOTATED;
   1444 
   1445  /* Lock used to synchronise access to delayed marking state. */
   1446  Mutex delayedMarkingLock MOZ_UNANNOTATED;
   1447 
   1448  /*
   1449   * Lock used by buffer allocators to synchronise data passed back to the main
   1450   * thread by background sweeping.
   1451   */
   1452  Mutex bufferAllocatorLock MOZ_UNANNOTATED;
   1453  friend class BufferAllocator;
   1454  friend class AutoLock;
   1455 
   1456  friend class BackgroundSweepTask;
   1457  friend class BackgroundFreeTask;
   1458 
   1459  BackgroundAllocTask allocTask;
   1460  BackgroundUnmarkTask unmarkTask;
   1461  BackgroundMarkTask markTask;
   1462  BackgroundSweepTask sweepTask;
   1463  BackgroundFreeTask freeTask;
   1464  BackgroundDecommitTask decommitTask;
   1465 
   1466  MainThreadData<Nursery> nursery_;
   1467 
   1468  // The store buffer used to track tenured to nursery edges for generational
   1469  // GC. This is accessed off main thread when sweeping WeakCaches.
   1470  MainThreadOrGCTaskData<gc::StoreBuffer> storeBuffer_;
   1471 
   1472  mozilla::TimeStamp lastLastDitchTime;
   1473 
   1474  // The last time per-zone allocation rates were updated.
   1475  MainThreadData<mozilla::TimeStamp> lastAllocRateUpdateTime;
   1476 
   1477  // Total collector time since per-zone allocation rates were last updated.
   1478  MainThreadData<mozilla::TimeDuration> collectorTimeSinceAllocRateUpdate;
   1479 
   1480  friend class MarkingValidator;
   1481  friend class AutoEnterIteration;
   1482 };
   1483 
   1484 #ifndef JS_GC_ZEAL
   1485 inline bool GCRuntime::hasZealMode(ZealMode mode) const { return false; }
   1486 inline void GCRuntime::clearZealMode(ZealMode mode) {}
   1487 inline bool GCRuntime::needZealousGC() { return false; }
   1488 inline bool GCRuntime::zealModeControlsYieldPoint() const { return false; }
   1489 #endif
   1490 
   1491 /* Prevent compartments and zones from being collected during iteration. */
   1492 class MOZ_RAII AutoEnterIteration {
   1493  GCRuntime* gc;
   1494 
   1495 public:
   1496  explicit AutoEnterIteration(GCRuntime* gc_) : gc(gc_) {
   1497    ++gc->numActiveZoneIters;
   1498  }
   1499 
   1500  ~AutoEnterIteration() {
   1501    MOZ_ASSERT(gc->numActiveZoneIters);
   1502    --gc->numActiveZoneIters;
   1503  }
   1504 };
   1505 
   1506 bool IsCurrentlyAnimating(const mozilla::TimeStamp& lastAnimationTime,
   1507                          const mozilla::TimeStamp& currentTime);
   1508 
   1509 } /* namespace gc */
   1510 } /* namespace js */
   1511 
   1512 #endif