tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Scheduling.cpp (30712B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "gc/Scheduling.h"
      8 
      9 #include "mozilla/CheckedInt.h"
     10 #include "mozilla/ScopeExit.h"
     11 #include "mozilla/TimeStamp.h"
     12 
     13 #include <algorithm>
     14 #include <cmath>
     15 
     16 #include "gc/Memory.h"
     17 #include "gc/Nursery.h"
     18 #include "gc/RelocationOverlay.h"
     19 #include "gc/ZoneAllocator.h"
     20 #include "util/DifferentialTesting.h"
     21 #include "vm/MutexIDs.h"
     22 
     23 using namespace js;
     24 using namespace js::gc;
     25 
     26 using mozilla::CheckedInt;
     27 using mozilla::Maybe;
     28 using mozilla::Nothing;
     29 using mozilla::Some;
     30 using mozilla::TimeDuration;
     31 
     32 /*
     33 * We may start to collect a zone before its trigger threshold is reached if
     34 * GCRuntime::maybeGC() is called for that zone or we start collecting other
     35 * zones. These eager threshold factors are not configurable.
     36 */
     37 static constexpr double HighFrequencyEagerAllocTriggerFactor = 0.85;
     38 static constexpr double LowFrequencyEagerAllocTriggerFactor = 0.9;
     39 
     40 /*
     41 * Don't allow heap growth factors to be set so low that eager collections could
     42 * reduce the trigger threshold.
     43 */
     44 static constexpr double MinHeapGrowthFactor =
     45    1.0f / std::min(HighFrequencyEagerAllocTriggerFactor,
     46                    LowFrequencyEagerAllocTriggerFactor);
     47 
     48 // Limit various parameters to reasonable levels to catch errors.
     49 static constexpr double MaxHeapGrowthFactor = 100;
     50 static constexpr size_t MaxNurseryBytesParam = 128 * 1024 * 1024;
     51 
     52 namespace {
     53 
     54 // Helper classes to marshal GC parameter values to/from uint32_t.
     55 
     56 template <typename T>
     57 struct ConvertGeneric {
     58  static uint32_t toUint32(T value) {
     59    static_assert(std::is_arithmetic_v<T>);
     60    if constexpr (std::is_signed_v<T>) {
     61      MOZ_ASSERT(value >= 0);
     62    }
     63    if constexpr (!std::is_same_v<T, bool> &&
     64                  std::numeric_limits<T>::max() >
     65                      std::numeric_limits<uint32_t>::max()) {
     66      MOZ_ASSERT(value <= UINT32_MAX);
     67    }
     68    return uint32_t(value);
     69  }
     70  static Maybe<T> fromUint32(uint32_t param) {
     71    // Currently we use explicit conversion and don't range check.
     72    return Some(T(param));
     73  }
     74 };
     75 
     76 using ConvertBool = ConvertGeneric<bool>;
     77 using ConvertSize = ConvertGeneric<size_t>;
     78 using ConvertDouble = ConvertGeneric<double>;
     79 
     80 struct ConvertTimes100 {
     81  static uint32_t toUint32(double value) { return uint32_t(value * 100.0); }
     82  static Maybe<double> fromUint32(uint32_t param) {
     83    return Some(double(param) / 100.0);
     84  }
     85 };
     86 
     87 struct ConvertNurseryBytes : ConvertSize {
     88  static Maybe<size_t> fromUint32(uint32_t param) {
     89    return Some(Nursery::roundSize(param));
     90  }
     91 };
     92 
     93 struct ConvertKB {
     94  static uint32_t toUint32(size_t value) { return value / 1024; }
     95  static Maybe<size_t> fromUint32(uint32_t param) {
     96    // Parameters which represent heap sizes in bytes are restricted to values
     97    // which can be represented on 32 bit platforms.
     98    CheckedInt<uint32_t> size = CheckedInt<uint32_t>(param) * 1024;
     99    return size.isValid() ? Some(size_t(size.value())) : Nothing();
    100  }
    101 };
    102 
    103 struct ConvertMB {
    104  static uint32_t toUint32(size_t value) { return value / (1024 * 1024); }
    105  static Maybe<size_t> fromUint32(uint32_t param) {
    106    // Parameters which represent heap sizes in bytes are restricted to values
    107    // which can be represented on 32 bit platforms.
    108    CheckedInt<uint32_t> size = CheckedInt<uint32_t>(param) * 1024 * 1024;
    109    return size.isValid() ? Some(size_t(size.value())) : Nothing();
    110  }
    111 };
    112 
    113 struct ConvertMillis {
    114  static uint32_t toUint32(TimeDuration value) {
    115    return uint32_t(value.ToMilliseconds());
    116  }
    117  static Maybe<TimeDuration> fromUint32(uint32_t param) {
    118    return Some(TimeDuration::FromMilliseconds(param));
    119  }
    120 };
    121 
    122 struct ConvertSeconds {
    123  static uint32_t toUint32(TimeDuration value) {
    124    return uint32_t(value.ToSeconds());
    125  }
    126  static Maybe<TimeDuration> fromUint32(uint32_t param) {
    127    return Some(TimeDuration::FromSeconds(param));
    128  }
    129 };
    130 
    131 }  // anonymous namespace
    132 
    133 // Helper functions to check GC parameter values
    134 
    135 template <typename T>
    136 static bool NoCheck(T value) {
    137  return true;
    138 }
    139 
    140 template <typename T>
    141 static bool CheckNonZero(T value) {
    142  return value != 0;
    143 }
    144 
    145 static bool CheckNurserySize(size_t bytes) {
    146  return bytes >= SystemPageSize() && bytes <= MaxNurseryBytesParam;
    147 }
    148 
    149 static bool CheckHeapGrowth(double growth) {
    150  return growth >= MinHeapGrowthFactor && growth <= MaxHeapGrowthFactor;
    151 }
    152 
    153 static bool CheckIncrementalLimit(double factor) {
    154  return factor >= 1.0 && factor <= MaxHeapGrowthFactor;
    155 }
    156 
    157 static bool CheckNonZeroUnitRange(double value) {
    158  return value > 0.0 && value <= 100.0;
    159 }
    160 
    161 GCSchedulingTunables::GCSchedulingTunables() {
    162 #define INIT_TUNABLE_FIELD(key, type, name, convert, check, default) \
    163  name##_ = default;                                                 \
    164  MOZ_ASSERT(check(name##_));
    165  FOR_EACH_GC_TUNABLE(INIT_TUNABLE_FIELD)
    166 #undef INIT_TUNABLE_FIELD
    167 
    168  checkInvariants();
    169 }
    170 
    171 uint32_t GCSchedulingTunables::getParameter(JSGCParamKey key) {
    172  switch (key) {
    173 #define GET_TUNABLE_FIELD(key, type, name, convert, check, default) \
    174  case key:                                                         \
    175    return convert::toUint32(name##_);
    176    FOR_EACH_GC_TUNABLE(GET_TUNABLE_FIELD)
    177 #undef GET_TUNABLE_FIELD
    178 
    179    default:
    180      MOZ_CRASH("Unknown parameter key");
    181  }
    182 }
    183 
    184 bool GCSchedulingTunables::setParameter(JSGCParamKey key, uint32_t value) {
    185  auto guard = mozilla::MakeScopeExit([this] { checkInvariants(); });
    186 
    187  switch (key) {
    188 #define SET_TUNABLE_FIELD(key, type, name, convert, check, default) \
    189  case key: {                                                       \
    190    Maybe<type> converted = convert::fromUint32(value);             \
    191    if (!converted || !check(converted.value())) {                  \
    192      return false;                                                 \
    193    }                                                               \
    194    name##_ = converted.value();                                    \
    195    break;                                                          \
    196  }
    197    FOR_EACH_GC_TUNABLE(SET_TUNABLE_FIELD)
    198 #undef SET_TUNABLE_FIELD
    199 
    200    default:
    201      MOZ_CRASH("Unknown GC parameter.");
    202  }
    203 
    204  maintainInvariantsAfterUpdate(key);
    205  return true;
    206 }
    207 
    208 void GCSchedulingTunables::resetParameter(JSGCParamKey key) {
    209  auto guard = mozilla::MakeScopeExit([this] { checkInvariants(); });
    210 
    211  switch (key) {
    212 #define RESET_TUNABLE_FIELD(key, type, name, convert, check, default) \
    213  case key:                                                           \
    214    name##_ = default;                                                \
    215    MOZ_ASSERT(check(name##_));                                       \
    216    break;
    217    FOR_EACH_GC_TUNABLE(RESET_TUNABLE_FIELD)
    218 #undef RESET_TUNABLE_FIELD
    219 
    220    default:
    221      MOZ_CRASH("Unknown GC parameter.");
    222  }
    223 
    224  maintainInvariantsAfterUpdate(key);
    225 }
    226 
    227 void GCSchedulingTunables::maintainInvariantsAfterUpdate(JSGCParamKey updated) {
    228  // Check whether a change to parameter |updated| has broken an invariant in
    229  // relation to another parameter. If it has, adjust that other parameter to
    230  // restore the invariant.
    231  switch (updated) {
    232    case JSGC_MIN_NURSERY_BYTES:
    233      if (gcMaxNurseryBytes_ < gcMinNurseryBytes_) {
    234        gcMaxNurseryBytes_ = gcMinNurseryBytes_;
    235      }
    236      break;
    237    case JSGC_MAX_NURSERY_BYTES:
    238      if (gcMinNurseryBytes_ > gcMaxNurseryBytes_) {
    239        gcMinNurseryBytes_ = gcMaxNurseryBytes_;
    240      }
    241      break;
    242    case JSGC_SMALL_HEAP_SIZE_MAX:
    243      if (smallHeapSizeMaxBytes_ >= largeHeapSizeMinBytes_) {
    244        largeHeapSizeMinBytes_ = smallHeapSizeMaxBytes_ + 1;
    245      }
    246      break;
    247    case JSGC_LARGE_HEAP_SIZE_MIN:
    248      if (largeHeapSizeMinBytes_ <= smallHeapSizeMaxBytes_) {
    249        smallHeapSizeMaxBytes_ = largeHeapSizeMinBytes_ - 1;
    250      }
    251      break;
    252    case JSGC_HIGH_FREQUENCY_SMALL_HEAP_GROWTH:
    253      if (highFrequencySmallHeapGrowth_ < highFrequencyLargeHeapGrowth_) {
    254        highFrequencyLargeHeapGrowth_ = highFrequencySmallHeapGrowth_;
    255      }
    256      break;
    257    case JSGC_HIGH_FREQUENCY_LARGE_HEAP_GROWTH:
    258      if (highFrequencyLargeHeapGrowth_ > highFrequencySmallHeapGrowth_) {
    259        highFrequencySmallHeapGrowth_ = highFrequencyLargeHeapGrowth_;
    260      }
    261      break;
    262    case JSGC_SMALL_HEAP_INCREMENTAL_LIMIT:
    263      if (smallHeapIncrementalLimit_ < largeHeapIncrementalLimit_) {
    264        largeHeapIncrementalLimit_ = smallHeapIncrementalLimit_;
    265      }
    266      break;
    267    case JSGC_LARGE_HEAP_INCREMENTAL_LIMIT:
    268      if (largeHeapIncrementalLimit_ > smallHeapIncrementalLimit_) {
    269        smallHeapIncrementalLimit_ = largeHeapIncrementalLimit_;
    270      }
    271      break;
    272    default:
    273      break;
    274  }
    275 }
    276 
    277 void GCSchedulingTunables::checkInvariants() {
    278  MOZ_ASSERT(gcMinNurseryBytes_ == Nursery::roundSize(gcMinNurseryBytes_));
    279  MOZ_ASSERT(gcMaxNurseryBytes_ == Nursery::roundSize(gcMaxNurseryBytes_));
    280  MOZ_ASSERT(gcMinNurseryBytes_ <= gcMaxNurseryBytes_);
    281  MOZ_ASSERT(gcMinNurseryBytes_ >= SystemPageSize());
    282  MOZ_ASSERT(gcMaxNurseryBytes_ <= MaxNurseryBytesParam);
    283 
    284  MOZ_ASSERT(largeHeapSizeMinBytes_ > smallHeapSizeMaxBytes_);
    285 
    286  MOZ_ASSERT(lowFrequencyHeapGrowth_ >= MinHeapGrowthFactor);
    287  MOZ_ASSERT(lowFrequencyHeapGrowth_ <= MaxHeapGrowthFactor);
    288 
    289  MOZ_ASSERT(highFrequencySmallHeapGrowth_ >= MinHeapGrowthFactor);
    290  MOZ_ASSERT(highFrequencySmallHeapGrowth_ <= MaxHeapGrowthFactor);
    291  MOZ_ASSERT(highFrequencyLargeHeapGrowth_ <= highFrequencySmallHeapGrowth_);
    292  MOZ_ASSERT(highFrequencyLargeHeapGrowth_ >= MinHeapGrowthFactor);
    293  MOZ_ASSERT(highFrequencySmallHeapGrowth_ <= MaxHeapGrowthFactor);
    294 
    295  MOZ_ASSERT(smallHeapIncrementalLimit_ >= largeHeapIncrementalLimit_);
    296 }
    297 
    298 void GCSchedulingState::updateHighFrequencyModeOnGCStart(
    299    JS::GCOptions options, const mozilla::TimeStamp& lastGCTime,
    300    const mozilla::TimeStamp& currentTime,
    301    const GCSchedulingTunables& tunables) {
    302  // Set high frequency mode based on the time between collections.
    303  TimeDuration timeSinceLastGC = currentTime - lastGCTime;
    304  inHighFrequencyGCMode_ = !js::SupportDifferentialTesting() &&
    305                           options == JS::GCOptions::Normal &&
    306                           timeSinceLastGC < tunables.highFrequencyThreshold();
    307 }
    308 
    309 void GCSchedulingState::updateHighFrequencyModeOnSliceStart(
    310    JS::GCOptions options, JS::GCReason reason) {
    311  MOZ_ASSERT_IF(options != JS::GCOptions::Normal, !inHighFrequencyGCMode_);
    312 
    313  // Additionally set high frequency mode for reasons that indicate that slices
    314  // aren't being triggered often enough and the allocation rate is high.
    315  if (options == JS::GCOptions::Normal &&
    316      (reason == JS::GCReason::ALLOC_TRIGGER ||
    317       reason == JS::GCReason::TOO_MUCH_MALLOC)) {
    318    inHighFrequencyGCMode_ = true;
    319  }
    320 }
    321 
    322 static constexpr size_t BytesPerMB = 1024 * 1024;
    323 static constexpr double CollectionRateSmoothingFactor = 0.5;
    324 static constexpr double AllocationRateSmoothingFactor = 0.5;
    325 
    326 static double ExponentialMovingAverage(double prevAverage, double newData,
    327                                       double smoothingFactor) {
    328  MOZ_ASSERT(smoothingFactor > 0.0 && smoothingFactor <= 1.0);
    329  return smoothingFactor * newData + (1.0 - smoothingFactor) * prevAverage;
    330 }
    331 
    332 void js::ZoneAllocator::updateCollectionRate(
    333    mozilla::TimeDuration mainThreadGCTime, size_t initialBytesForAllZones) {
    334  MOZ_ASSERT(initialBytesForAllZones != 0);
    335  MOZ_ASSERT(gcHeapSize.initialBytes() <= initialBytesForAllZones);
    336 
    337  double zoneFraction =
    338      double(gcHeapSize.initialBytes()) / double(initialBytesForAllZones);
    339  double zoneDuration = mainThreadGCTime.ToSeconds() * zoneFraction +
    340                        perZoneGCTime.ref().ToSeconds();
    341  double collectionRate =
    342      double(gcHeapSize.initialBytes()) / (zoneDuration * BytesPerMB);
    343 
    344  if (!smoothedCollectionRate.ref()) {
    345    smoothedCollectionRate = Some(collectionRate);
    346  } else {
    347    double prevRate = smoothedCollectionRate.ref().value();
    348    smoothedCollectionRate = Some(ExponentialMovingAverage(
    349        prevRate, collectionRate, CollectionRateSmoothingFactor));
    350  }
    351 }
    352 
    353 void js::ZoneAllocator::updateAllocationRate(TimeDuration mutatorTime) {
    354  // To get the total size allocated since the last collection we have to
    355  // take account of how much memory got freed in the meantime.
    356  size_t freedBytes = gcHeapSize.freedBytes();
    357 
    358  size_t sizeIncludingFreedBytes = gcHeapSize.bytes() + freedBytes;
    359 
    360  MOZ_ASSERT(prevGCHeapSize <= sizeIncludingFreedBytes);
    361  size_t allocatedBytes = sizeIncludingFreedBytes - prevGCHeapSize;
    362 
    363  double allocationRate =
    364      double(allocatedBytes) / (mutatorTime.ToSeconds() * BytesPerMB);
    365 
    366  if (!smoothedAllocationRate.ref()) {
    367    smoothedAllocationRate = Some(allocationRate);
    368  } else {
    369    double prevRate = smoothedAllocationRate.ref().value();
    370    smoothedAllocationRate = Some(ExponentialMovingAverage(
    371        prevRate, allocationRate, AllocationRateSmoothingFactor));
    372  }
    373 
    374  gcHeapSize.clearFreedBytes();
    375  prevGCHeapSize = gcHeapSize.bytes();
    376 }
    377 
    378 // GC thresholds may exceed the range of size_t on 32-bit platforms, so these
    379 // are calculated using 64-bit integers and clamped.
    380 static inline size_t ToClampedSize(uint64_t bytes) {
    381  return std::min(bytes, uint64_t(SIZE_MAX));
    382 }
    383 
    384 void HeapThreshold::setIncrementalLimitFromStartBytes(
    385    size_t retainedBytes, const GCSchedulingTunables& tunables) {
    386  // Calculate the incremental limit for a heap based on its size and start
    387  // threshold.
    388  //
    389  // This effectively classifies the heap size into small, medium or large, and
    390  // uses the small heap incremental limit paramer, the large heap incremental
    391  // limit parameter or an interpolation between them.
    392  //
    393  // The incremental limit is always set greater than the start threshold by at
    394  // least the maximum nursery size to reduce the chance that tenuring a full
    395  // nursery will send us straight into non-incremental collection.
    396 
    397  MOZ_ASSERT(tunables.smallHeapIncrementalLimit() >=
    398             tunables.largeHeapIncrementalLimit());
    399 
    400  double factor = LinearInterpolate(double(retainedBytes),
    401                                    double(tunables.smallHeapSizeMaxBytes()),
    402                                    tunables.smallHeapIncrementalLimit(),
    403                                    double(tunables.largeHeapSizeMinBytes()),
    404                                    tunables.largeHeapIncrementalLimit());
    405 
    406  uint64_t bytes =
    407      std::max(uint64_t(double(startBytes_) * factor),
    408               uint64_t(startBytes_) + tunables.gcMaxNurseryBytes());
    409  incrementalLimitBytes_ = ToClampedSize(bytes);
    410  MOZ_ASSERT(incrementalLimitBytes_ >= startBytes_);
    411 
    412  // Maintain the invariant that the slice threshold is always less than the
    413  // incremental limit when adjusting GC parameters.
    414  if (hasSliceThreshold() && sliceBytes() > incrementalLimitBytes()) {
    415    sliceBytes_ = incrementalLimitBytes();
    416  }
    417 }
    418 
    419 size_t HeapThreshold::eagerAllocTrigger(bool highFrequencyGC) const {
    420  double eagerTriggerFactor = highFrequencyGC
    421                                  ? HighFrequencyEagerAllocTriggerFactor
    422                                  : LowFrequencyEagerAllocTriggerFactor;
    423  return size_t(eagerTriggerFactor * double(startBytes()));
    424 }
    425 
    426 void HeapThreshold::setSliceThreshold(ZoneAllocator* zone,
    427                                      const HeapSize& heapSize,
    428                                      const GCSchedulingTunables& tunables,
    429                                      bool waitingOnBGTask) {
    430  // Set the allocation threshold at which to trigger the a GC slice in an
    431  // ongoing incremental collection. This is used to ensure progress in
    432  // allocation heavy code that may not return to the main event loop.
    433  //
    434  // The threshold is based on the JSGC_ZONE_ALLOC_DELAY_KB parameter, but this
    435  // is reduced to increase the slice frequency as we approach the incremental
    436  // limit, in the hope that we never reach it. If collector is waiting for a
    437  // background task to complete, don't trigger any slices until we reach the
    438  // urgent threshold.
    439 
    440  size_t bytesRemaining = incrementalBytesRemaining(heapSize);
    441  bool isUrgent = bytesRemaining < tunables.urgentThresholdBytes();
    442 
    443  size_t delayBeforeNextSlice = tunables.zoneAllocDelayBytes();
    444  if (isUrgent) {
    445    double fractionRemaining =
    446        double(bytesRemaining) / double(tunables.urgentThresholdBytes());
    447    delayBeforeNextSlice =
    448        size_t(double(delayBeforeNextSlice) * fractionRemaining);
    449    MOZ_ASSERT(delayBeforeNextSlice <= tunables.zoneAllocDelayBytes());
    450  } else if (waitingOnBGTask) {
    451    delayBeforeNextSlice = bytesRemaining - tunables.urgentThresholdBytes();
    452  }
    453 
    454  sliceBytes_ = ToClampedSize(
    455      std::min(uint64_t(heapSize.bytes()) + uint64_t(delayBeforeNextSlice),
    456               uint64_t(incrementalLimitBytes_)));
    457 }
    458 
    459 size_t HeapThreshold::incrementalBytesRemaining(
    460    const HeapSize& heapSize) const {
    461  if (heapSize.bytes() >= incrementalLimitBytes_) {
    462    return 0;
    463  }
    464 
    465  return incrementalLimitBytes_ - heapSize.bytes();
    466 }
    467 
    468 /* static */
    469 double HeapThreshold::computeZoneHeapGrowthFactorForHeapSize(
    470    size_t lastBytes, const GCSchedulingTunables& tunables,
    471    const GCSchedulingState& state) {
    472  // For small zones, our collection heuristics do not matter much: favor
    473  // something simple in this case.
    474  if (lastBytes < 1 * 1024 * 1024) {
    475    return tunables.lowFrequencyHeapGrowth();
    476  }
    477 
    478  // The heap growth factor depends on the heap size after a GC and the GC
    479  // frequency. If GC's are not triggering in rapid succession, use a lower
    480  // threshold so that we will collect garbage sooner.
    481  if (!state.inHighFrequencyGCMode()) {
    482    return tunables.lowFrequencyHeapGrowth();
    483  }
    484 
    485  // For high frequency GCs we let the heap grow depending on whether we
    486  // classify the heap as small, medium or large. There are parameters for small
    487  // and large heap sizes and linear interpolation is used between them for
    488  // medium sized heaps.
    489 
    490  MOZ_ASSERT(tunables.smallHeapSizeMaxBytes() <=
    491             tunables.largeHeapSizeMinBytes());
    492  MOZ_ASSERT(tunables.highFrequencyLargeHeapGrowth() <=
    493             tunables.highFrequencySmallHeapGrowth());
    494 
    495  return LinearInterpolate(double(lastBytes),
    496                           double(tunables.smallHeapSizeMaxBytes()),
    497                           tunables.highFrequencySmallHeapGrowth(),
    498                           double(tunables.largeHeapSizeMinBytes()),
    499                           tunables.highFrequencyLargeHeapGrowth());
    500 }
    501 
    502 /* static */
    503 size_t GCHeapThreshold::computeZoneTriggerBytes(
    504    double growthFactor, size_t lastBytes,
    505    const GCSchedulingTunables& tunables) {
    506  size_t base = std::max(lastBytes, tunables.gcZoneAllocThresholdBase());
    507  double trigger = double(base) * growthFactor;
    508  double triggerMax =
    509      double(tunables.gcMaxBytes()) / tunables.largeHeapIncrementalLimit();
    510  return ToClampedSize(uint64_t(std::min(triggerMax, trigger)));
    511 }
    512 
    513 // Parameters for balanced heap limits computation.
    514 
    515 // The W0 parameter. How much memory can be traversed in the minimum collection
    516 // time.
    517 static constexpr double BalancedHeapBaseMB = 5.0;
    518 
    519 // The minimum heap limit. Do not constrain the heap to any less than this size.
    520 static constexpr double MinBalancedHeapLimitMB = 10.0;
    521 
    522 // The minimum amount of additional space to allow beyond the retained size.
    523 static constexpr double MinBalancedHeadroomMB = 3.0;
    524 
    525 // The maximum factor by which to expand the heap beyond the retained size.
    526 static constexpr double MaxHeapGrowth = 3.0;
    527 
    528 // The default allocation rate in MB/s allocated by the mutator to use before we
    529 // have an estimate. Used to set the heap limit for zones that have not yet been
    530 // collected.
    531 static constexpr double DefaultAllocationRate = 0.0;
    532 
    533 // The s0 parameter. The default collection rate in MB/s to use before we have
    534 // an estimate. Used to set the heap limit for zones that have not yet been
    535 // collected.
    536 static constexpr double DefaultCollectionRate = 200.0;
    537 
    538 double GCHeapThreshold::computeBalancedHeapLimit(
    539    size_t lastBytes, double allocationRate, double collectionRate,
    540    const GCSchedulingTunables& tunables) {
    541  MOZ_ASSERT(tunables.balancedHeapLimitsEnabled());
    542 
    543  // Optimal heap limits as described in https://arxiv.org/abs/2204.10455
    544 
    545  double W = double(lastBytes) / BytesPerMB;  // Retained size / MB.
    546  double W0 = BalancedHeapBaseMB;
    547  double d = tunables.heapGrowthFactor();  // Rearranged constant 'c'.
    548  double g = allocationRate;
    549  double s = collectionRate;
    550  double f = d * sqrt((W + W0) * (g / s));
    551  double M = W + std::min(f, MaxHeapGrowth * W);
    552  M = std::max({MinBalancedHeapLimitMB, W + MinBalancedHeadroomMB, M});
    553 
    554  return M * double(BytesPerMB);
    555 }
    556 
    557 void GCHeapThreshold::updateStartThreshold(
    558    size_t lastBytes, mozilla::Maybe<double> allocationRate,
    559    mozilla::Maybe<double> collectionRate, const GCSchedulingTunables& tunables,
    560    const GCSchedulingState& state, bool isAtomsZone) {
    561  if (!tunables.balancedHeapLimitsEnabled()) {
    562    double growthFactor =
    563        computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state);
    564 
    565    startBytes_ = computeZoneTriggerBytes(growthFactor, lastBytes, tunables);
    566  } else {
    567    double threshold = computeBalancedHeapLimit(
    568        lastBytes, allocationRate.valueOr(DefaultAllocationRate),
    569        collectionRate.valueOr(DefaultCollectionRate), tunables);
    570 
    571    double triggerMax =
    572        double(tunables.gcMaxBytes()) / tunables.largeHeapIncrementalLimit();
    573 
    574    startBytes_ = ToClampedSize(uint64_t(std::min(triggerMax, threshold)));
    575  }
    576 
    577  setIncrementalLimitFromStartBytes(lastBytes, tunables);
    578 }
    579 
    580 /* static */
    581 size_t MallocHeapThreshold::computeZoneTriggerBytes(double growthFactor,
    582                                                    size_t lastBytes,
    583                                                    size_t baseBytes) {
    584  return ToClampedSize(
    585      uint64_t(double(std::max(lastBytes, baseBytes)) * growthFactor));
    586 }
    587 
    588 void MallocHeapThreshold::updateStartThreshold(
    589    size_t lastBytes, const GCSchedulingTunables& tunables,
    590    const GCSchedulingState& state) {
    591  double growthFactor =
    592      computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state);
    593 
    594  startBytes_ = computeZoneTriggerBytes(growthFactor, lastBytes,
    595                                        tunables.mallocThresholdBase());
    596 
    597  setIncrementalLimitFromStartBytes(lastBytes, tunables);
    598 }
    599 
    600 #ifdef DEBUG
    601 
    602 static const char* MemoryUseName(MemoryUse use) {
    603  switch (use) {
    604 #  define DEFINE_CASE(Name) \
    605    case MemoryUse::Name:   \
    606      return #Name;
    607    JS_FOR_EACH_MEMORY_USE(DEFINE_CASE)
    608 #  undef DEFINE_CASE
    609  }
    610 
    611  MOZ_CRASH("Unknown memory use");
    612 }
    613 
    614 MemoryTracker::MemoryTracker() : mutex(mutexid::MemoryTracker) {}
    615 
    616 void MemoryTracker::checkEmptyOnDestroy() {
    617  bool ok = true;
    618 
    619  if (!gcMap.empty()) {
    620    ok = false;
    621    fprintf(stderr, "Missing calls to JS::RemoveAssociatedMemory:\n");
    622    for (auto r = gcMap.all(); !r.empty(); r.popFront()) {
    623      fprintf(stderr, "  %p 0x%zx %s\n", r.front().key().ptr(),
    624              r.front().value(), MemoryUseName(r.front().key().use()));
    625    }
    626  }
    627 
    628  if (!nonGCMap.empty()) {
    629    ok = false;
    630    fprintf(stderr, "Missing calls to Zone::decNonGCMemory:\n");
    631    for (auto r = nonGCMap.all(); !r.empty(); r.popFront()) {
    632      fprintf(stderr, "  %p 0x%zx\n", r.front().key().ptr(), r.front().value());
    633    }
    634  }
    635 
    636  MOZ_ASSERT(ok);
    637 }
    638 
    639 /* static */
    640 inline bool MemoryTracker::isGCMemoryUse(MemoryUse use) {
    641  // Most memory uses are for memory associated with GC things but some are for
    642  // memory associated with non-GC thing pointers.
    643  return !isNonGCMemoryUse(use);
    644 }
    645 
    646 /* static */
    647 inline bool MemoryTracker::isNonGCMemoryUse(MemoryUse use) {
    648  return use == MemoryUse::TrackedAllocPolicy;
    649 }
    650 
    651 /* static */
    652 inline bool MemoryTracker::allowMultipleAssociations(MemoryUse use) {
    653  // For most uses only one association is possible for each GC thing. Allow a
    654  // one-to-many relationship only where necessary.
    655  return isNonGCMemoryUse(use) || use == MemoryUse::RegExpSharedBytecode ||
    656         use == MemoryUse::BreakpointSite || use == MemoryUse::Breakpoint ||
    657         use == MemoryUse::ICUObject;
    658 }
    659 
    660 void MemoryTracker::trackGCMemory(Cell* cell, size_t nbytes, MemoryUse use) {
    661  MOZ_ASSERT(cell->isTenured());
    662  MOZ_ASSERT(isGCMemoryUse(use));
    663 
    664  LockGuard<Mutex> lock(mutex);
    665 
    666  Key<Cell> key{cell, use};
    667  AutoEnterOOMUnsafeRegion oomUnsafe;
    668  auto ptr = gcMap.lookupForAdd(key);
    669  if (ptr) {
    670    if (!allowMultipleAssociations(use)) {
    671      MOZ_CRASH_UNSAFE_PRINTF("Association already present: %p 0x%zx %s", cell,
    672                              nbytes, MemoryUseName(use));
    673    }
    674    ptr->value() += nbytes;
    675    return;
    676  }
    677 
    678  if (!gcMap.add(ptr, key, nbytes)) {
    679    oomUnsafe.crash("MemoryTracker::trackGCMemory");
    680  }
    681 }
    682 
    683 void MemoryTracker::untrackGCMemory(Cell* cell, size_t nbytes, MemoryUse use) {
    684  MOZ_ASSERT(cell->isTenured());
    685 
    686  LockGuard<Mutex> lock(mutex);
    687 
    688  Key<Cell> key{cell, use};
    689  auto ptr = gcMap.lookup(key);
    690  if (!ptr) {
    691    MOZ_CRASH_UNSAFE_PRINTF("Association not found: %p 0x%zx %s", cell, nbytes,
    692                            MemoryUseName(use));
    693  }
    694 
    695  if (!allowMultipleAssociations(use) && ptr->value() != nbytes) {
    696    MOZ_CRASH_UNSAFE_PRINTF(
    697        "Association for %p %s has different size: "
    698        "expected 0x%zx but got 0x%zx",
    699        cell, MemoryUseName(use), ptr->value(), nbytes);
    700  }
    701 
    702  if (nbytes > ptr->value()) {
    703    MOZ_CRASH_UNSAFE_PRINTF(
    704        "Association for %p %s size is too large: "
    705        "expected at most 0x%zx but got 0x%zx",
    706        cell, MemoryUseName(use), ptr->value(), nbytes);
    707  }
    708 
    709  ptr->value() -= nbytes;
    710 
    711  if (ptr->value() == 0) {
    712    gcMap.remove(ptr);
    713  }
    714 }
    715 
    716 void MemoryTracker::swapGCMemory(Cell* a, Cell* b, MemoryUse use) {
    717  Key<Cell> ka{a, use};
    718  Key<Cell> kb{b, use};
    719 
    720  LockGuard<Mutex> lock(mutex);
    721 
    722  size_t sa = getAndRemoveEntry(ka, lock);
    723  size_t sb = getAndRemoveEntry(kb, lock);
    724 
    725  AutoEnterOOMUnsafeRegion oomUnsafe;
    726 
    727  if ((sa && b->isTenured() && !gcMap.put(kb, sa)) ||
    728      (sb && a->isTenured() && !gcMap.put(ka, sb))) {
    729    oomUnsafe.crash("MemoryTracker::swapGCMemory");
    730  }
    731 }
    732 
    733 size_t MemoryTracker::getAndRemoveEntry(const Key<Cell>& key,
    734                                        LockGuard<Mutex>& lock) {
    735  auto ptr = gcMap.lookup(key);
    736  if (!ptr) {
    737    return 0;
    738  }
    739 
    740  size_t size = ptr->value();
    741  gcMap.remove(ptr);
    742  return size;
    743 }
    744 
    745 void MemoryTracker::registerNonGCMemory(void* mem, MemoryUse use) {
    746  LockGuard<Mutex> lock(mutex);
    747 
    748  Key<void> key{mem, use};
    749  auto ptr = nonGCMap.lookupForAdd(key);
    750  if (ptr) {
    751    MOZ_CRASH_UNSAFE_PRINTF("%s assocaition %p already registered",
    752                            MemoryUseName(use), mem);
    753  }
    754 
    755  AutoEnterOOMUnsafeRegion oomUnsafe;
    756  if (!nonGCMap.add(ptr, key, 0)) {
    757    oomUnsafe.crash("MemoryTracker::registerNonGCMemory");
    758  }
    759 }
    760 
    761 void MemoryTracker::unregisterNonGCMemory(void* mem, MemoryUse use) {
    762  LockGuard<Mutex> lock(mutex);
    763 
    764  Key<void> key{mem, use};
    765  auto ptr = nonGCMap.lookup(key);
    766  if (!ptr) {
    767    MOZ_CRASH_UNSAFE_PRINTF("%s association %p not found", MemoryUseName(use),
    768                            mem);
    769  }
    770 
    771  if (ptr->value() != 0) {
    772    MOZ_CRASH_UNSAFE_PRINTF(
    773        "%s association %p still has 0x%zx bytes associated",
    774        MemoryUseName(use), mem, ptr->value());
    775  }
    776 
    777  nonGCMap.remove(ptr);
    778 }
    779 
    780 void MemoryTracker::moveNonGCMemory(void* dst, void* src, MemoryUse use) {
    781  LockGuard<Mutex> lock(mutex);
    782 
    783  Key<void> srcKey{src, use};
    784  auto srcPtr = nonGCMap.lookup(srcKey);
    785  if (!srcPtr) {
    786    MOZ_CRASH_UNSAFE_PRINTF("%s association %p not found", MemoryUseName(use),
    787                            src);
    788  }
    789 
    790  size_t nbytes = srcPtr->value();
    791  nonGCMap.remove(srcPtr);
    792 
    793  Key<void> dstKey{dst, use};
    794  auto dstPtr = nonGCMap.lookupForAdd(dstKey);
    795  if (dstPtr) {
    796    MOZ_CRASH_UNSAFE_PRINTF("%s %p already registered", MemoryUseName(use),
    797                            dst);
    798  }
    799 
    800  AutoEnterOOMUnsafeRegion oomUnsafe;
    801  if (!nonGCMap.add(dstPtr, dstKey, nbytes)) {
    802    oomUnsafe.crash("MemoryTracker::moveNonGCMemory");
    803  }
    804 }
    805 
    806 void MemoryTracker::incNonGCMemory(void* mem, size_t nbytes, MemoryUse use) {
    807  MOZ_ASSERT(isNonGCMemoryUse(use));
    808 
    809  LockGuard<Mutex> lock(mutex);
    810 
    811  Key<void> key{mem, use};
    812  auto ptr = nonGCMap.lookup(key);
    813  if (!ptr) {
    814    MOZ_CRASH_UNSAFE_PRINTF("%s allocation %p not found", MemoryUseName(use),
    815                            mem);
    816  }
    817 
    818  ptr->value() += nbytes;
    819 }
    820 
    821 void MemoryTracker::decNonGCMemory(void* mem, size_t nbytes, MemoryUse use) {
    822  MOZ_ASSERT(isNonGCMemoryUse(use));
    823 
    824  LockGuard<Mutex> lock(mutex);
    825 
    826  Key<void> key{mem, use};
    827  auto ptr = nonGCMap.lookup(key);
    828  if (!ptr) {
    829    MOZ_CRASH_UNSAFE_PRINTF("%s allocation %p not found", MemoryUseName(use),
    830                            mem);
    831  }
    832 
    833  size_t& value = ptr->value();
    834  if (nbytes > value) {
    835    MOZ_CRASH_UNSAFE_PRINTF(
    836        "%s allocation %p is too large: "
    837        "expected at most 0x%zx but got 0x%zx bytes",
    838        MemoryUseName(use), mem, value, nbytes);
    839  }
    840 
    841  value -= nbytes;
    842 }
    843 
    844 void MemoryTracker::fixupAfterMovingGC() {
    845  // Update the table after we move GC things. We don't use StableCellHasher
    846  // because that would create a difference between debug and release builds.
    847  for (GCMap::Enum e(gcMap); !e.empty(); e.popFront()) {
    848    const auto& key = e.front().key();
    849    Cell* cell = key.ptr();
    850    if (cell->isForwarded()) {
    851      cell = gc::RelocationOverlay::fromCell(cell)->forwardingAddress();
    852      e.rekeyFront(Key<Cell>{cell, key.use()});
    853    }
    854  }
    855 }
    856 
    857 template <typename Ptr>
    858 inline MemoryTracker::Key<Ptr>::Key(Ptr* ptr, MemoryUse use)
    859    : ptr_(uint64_t(ptr)), use_(uint64_t(use)) {
    860 #  ifdef JS_64BIT
    861  static_assert(sizeof(Key) == 8,
    862                "MemoryTracker::Key should be packed into 8 bytes");
    863 #  endif
    864  MOZ_ASSERT(this->ptr() == ptr);
    865  MOZ_ASSERT(this->use() == use);
    866 }
    867 
    868 template <typename Ptr>
    869 inline Ptr* MemoryTracker::Key<Ptr>::ptr() const {
    870  return reinterpret_cast<Ptr*>(ptr_);
    871 }
    872 template <typename Ptr>
    873 inline MemoryUse MemoryTracker::Key<Ptr>::use() const {
    874  return static_cast<MemoryUse>(use_);
    875 }
    876 
    877 template <typename Ptr>
    878 inline HashNumber MemoryTracker::Hasher<Ptr>::hash(const Lookup& l) {
    879  return mozilla::HashGeneric(DefaultHasher<Ptr*>::hash(l.ptr()),
    880                              DefaultHasher<unsigned>::hash(unsigned(l.use())));
    881 }
    882 
    883 template <typename Ptr>
    884 inline bool MemoryTracker::Hasher<Ptr>::match(const KeyT& k, const Lookup& l) {
    885  return k.ptr() == l.ptr() && k.use() == l.use();
    886 }
    887 
    888 template <typename Ptr>
    889 inline void MemoryTracker::Hasher<Ptr>::rekey(KeyT& k, const KeyT& newKey) {
    890  k = newKey;
    891 }
    892 
    893 #endif  // DEBUG