tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

clock.cc (26573B)


      1 // Copyright 2017 The Abseil Authors.
      2 //
      3 // Licensed under the Apache License, Version 2.0 (the "License");
      4 // you may not use this file except in compliance with the License.
      5 // You may obtain a copy of the License at
      6 //
      7 //      https://www.apache.org/licenses/LICENSE-2.0
      8 //
      9 // Unless required by applicable law or agreed to in writing, software
     10 // distributed under the License is distributed on an "AS IS" BASIS,
     11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 // See the License for the specific language governing permissions and
     13 // limitations under the License.
     14 
     15 #include "absl/time/clock.h"
     16 
     17 #include "absl/base/attributes.h"
     18 #include "absl/base/optimization.h"
     19 
     20 #ifdef _WIN32
     21 #include <windows.h>
     22 #endif
     23 
     24 #include <algorithm>
     25 #include <atomic>
     26 #include <cerrno>
     27 #include <cstdint>
     28 #include <ctime>
     29 #include <limits>
     30 
     31 #include "absl/base/internal/spinlock.h"
     32 #include "absl/base/internal/unscaledcycleclock.h"
     33 #include "absl/base/macros.h"
     34 #include "absl/base/port.h"
     35 #include "absl/base/thread_annotations.h"
     36 
     37 namespace absl {
     38 ABSL_NAMESPACE_BEGIN
     39 Time Now() {
     40  // TODO(bww): Get a timespec instead so we don't have to divide.
     41  int64_t n = absl::GetCurrentTimeNanos();
     42  if (n >= 0) {
     43    return time_internal::FromUnixDuration(
     44        time_internal::MakeDuration(n / 1000000000, n % 1000000000 * 4));
     45  }
     46  return time_internal::FromUnixDuration(absl::Nanoseconds(n));
     47 }
     48 ABSL_NAMESPACE_END
     49 }  // namespace absl
     50 
     51 // Decide if we should use the fast GetCurrentTimeNanos() algorithm based on the
     52 // cyclecounter, otherwise just get the time directly from the OS on every call.
     53 // By default, the fast algorithm based on the cyclecount is disabled because in
     54 // certain situations, for example, if the OS enters a "sleep" mode, it may
     55 // produce incorrect values immediately upon waking.
     56 // This can be chosen at compile-time via
     57 // -DABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS=[0|1]
     58 #ifndef ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
     59 #define ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 0
     60 #endif
     61 
     62 #if defined(__APPLE__) || defined(_WIN32)
     63 #include "absl/time/internal/get_current_time_chrono.inc"
     64 #else
     65 #include "absl/time/internal/get_current_time_posix.inc"
     66 #endif
     67 
     68 // Allows override by test.
     69 #ifndef GET_CURRENT_TIME_NANOS_FROM_SYSTEM
     70 #define GET_CURRENT_TIME_NANOS_FROM_SYSTEM() \
     71  ::absl::time_internal::GetCurrentTimeNanosFromSystem()
     72 #endif
     73 
     74 #if !ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
     75 namespace absl {
     76 ABSL_NAMESPACE_BEGIN
     77 int64_t GetCurrentTimeNanos() { return GET_CURRENT_TIME_NANOS_FROM_SYSTEM(); }
     78 ABSL_NAMESPACE_END
     79 }  // namespace absl
     80 #else  // Use the cyclecounter-based implementation below.
     81 
     82 // Allows override by test.
     83 #ifndef GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW
     84 #define GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW() \
     85  ::absl::time_internal::UnscaledCycleClockWrapperForGetCurrentTime::Now()
     86 #endif
     87 
     88 namespace absl {
     89 ABSL_NAMESPACE_BEGIN
     90 namespace time_internal {
     91 
     92 // On some processors, consecutive reads of the cycle counter may yield the
     93 // same value (weakly-increasing). In debug mode, clear the least significant
     94 // bits to discourage depending on a strictly-increasing Now() value.
     95 // In x86-64's debug mode, discourage depending on a strictly-increasing Now()
     96 // value.
     97 #if !defined(NDEBUG) && defined(__x86_64__)
     98 constexpr int64_t kCycleClockNowMask = ~int64_t{0xff};
     99 #else
    100 constexpr int64_t kCycleClockNowMask = ~int64_t{0};
    101 #endif
    102 
    103 // This is a friend wrapper around UnscaledCycleClock::Now()
    104 // (needed to access UnscaledCycleClock).
    105 class UnscaledCycleClockWrapperForGetCurrentTime {
    106 public:
    107  static int64_t Now() {
    108    return base_internal::UnscaledCycleClock::Now() & kCycleClockNowMask;
    109  }
    110 };
    111 }  // namespace time_internal
    112 
    113 // uint64_t is used in this module to provide an extra bit in multiplications
    114 
    115 // ---------------------------------------------------------------------
    116 // An implementation of reader-write locks that use no atomic ops in the read
    117 // case.  This is a generalization of Lamport's method for reading a multiword
    118 // clock.  Increment a word on each write acquisition, using the low-order bit
    119 // as a spinlock; the word is the high word of the "clock".  Readers read the
    120 // high word, then all other data, then the high word again, and repeat the
    121 // read if the reads of the high words yields different answers, or an odd
    122 // value (either case suggests possible interference from a writer).
    123 // Here we use a spinlock to ensure only one writer at a time, rather than
    124 // spinning on the bottom bit of the word to benefit from SpinLock
    125 // spin-delay tuning.
    126 
    127 // Acquire seqlock (*seq) and return the value to be written to unlock.
    128 static inline uint64_t SeqAcquire(std::atomic<uint64_t> *seq) {
    129  uint64_t x = seq->fetch_add(1, std::memory_order_relaxed);
    130 
    131  // We put a release fence between update to *seq and writes to shared data.
    132  // Thus all stores to shared data are effectively release operations and
    133  // update to *seq above cannot be re-ordered past any of them.  Note that
    134  // this barrier is not for the fetch_add above.  A release barrier for the
    135  // fetch_add would be before it, not after.
    136  std::atomic_thread_fence(std::memory_order_release);
    137 
    138  return x + 2;   // original word plus 2
    139 }
    140 
    141 // Release seqlock (*seq) by writing x to it---a value previously returned by
    142 // SeqAcquire.
    143 static inline void SeqRelease(std::atomic<uint64_t> *seq, uint64_t x) {
    144  // The unlock store to *seq must have release ordering so that all
    145  // updates to shared data must finish before this store.
    146  seq->store(x, std::memory_order_release);  // release lock for readers
    147 }
    148 
    149 // ---------------------------------------------------------------------
    150 
    151 // "nsscaled" is unit of time equal to a (2**kScale)th of a nanosecond.
    152 enum { kScale = 30 };
    153 
    154 // The minimum interval between samples of the time base.
    155 // We pick enough time to amortize the cost of the sample,
    156 // to get a reasonably accurate cycle counter rate reading,
    157 // and not so much that calculations will overflow 64-bits.
    158 static const uint64_t kMinNSBetweenSamples = 2000 << 20;
    159 
    160 // We require that kMinNSBetweenSamples shifted by kScale
    161 // have at least a bit left over for 64-bit calculations.
    162 static_assert(((kMinNSBetweenSamples << (kScale + 1)) >> (kScale + 1)) ==
    163               kMinNSBetweenSamples,
    164               "cannot represent kMaxBetweenSamplesNSScaled");
    165 
    166 // data from a sample of the kernel's time value
    167 struct TimeSampleAtomic {
    168  std::atomic<uint64_t> raw_ns{0};              // raw kernel time
    169  std::atomic<uint64_t> base_ns{0};             // our estimate of time
    170  std::atomic<uint64_t> base_cycles{0};         // cycle counter reading
    171  std::atomic<uint64_t> nsscaled_per_cycle{0};  // cycle period
    172  // cycles before we'll sample again (a scaled reciprocal of the period,
    173  // to avoid a division on the fast path).
    174  std::atomic<uint64_t> min_cycles_per_sample{0};
    175 };
    176 // Same again, but with non-atomic types
    177 struct TimeSample {
    178  uint64_t raw_ns = 0;                 // raw kernel time
    179  uint64_t base_ns = 0;                // our estimate of time
    180  uint64_t base_cycles = 0;            // cycle counter reading
    181  uint64_t nsscaled_per_cycle = 0;     // cycle period
    182  uint64_t min_cycles_per_sample = 0;  // approx cycles before next sample
    183 };
    184 
    185 struct ABSL_CACHELINE_ALIGNED TimeState {
    186  std::atomic<uint64_t> seq{0};
    187  TimeSampleAtomic last_sample;  // the last sample; under seq
    188 
    189  // The following counters are used only by the test code.
    190  int64_t stats_initializations{0};
    191  int64_t stats_reinitializations{0};
    192  int64_t stats_calibrations{0};
    193  int64_t stats_slow_paths{0};
    194  int64_t stats_fast_slow_paths{0};
    195 
    196  uint64_t last_now_cycles ABSL_GUARDED_BY(lock){0};
    197 
    198  // Used by GetCurrentTimeNanosFromKernel().
    199  // We try to read clock values at about the same time as the kernel clock.
    200  // This value gets adjusted up or down as estimate of how long that should
    201  // take, so we can reject attempts that take unusually long.
    202  std::atomic<uint64_t> approx_syscall_time_in_cycles{10 * 1000};
    203  // Number of times in a row we've seen a kernel time call take substantially
    204  // less than approx_syscall_time_in_cycles.
    205  std::atomic<uint32_t> kernel_time_seen_smaller{0};
    206 
    207  // A reader-writer lock protecting the static locations below.
    208  // See SeqAcquire() and SeqRelease() above.
    209  absl::base_internal::SpinLock lock{absl::kConstInit,
    210                                     base_internal::SCHEDULE_KERNEL_ONLY};
    211 };
    212 ABSL_CONST_INIT static TimeState time_state;
    213 
    214 // Return the time in ns as told by the kernel interface.  Place in *cycleclock
    215 // the value of the cycleclock at about the time of the syscall.
    216 // This call represents the time base that this module synchronizes to.
    217 // Ensures that *cycleclock does not step back by up to (1 << 16) from
    218 // last_cycleclock, to discard small backward counter steps.  (Larger steps are
    219 // assumed to be complete resyncs, which shouldn't happen.  If they do, a full
    220 // reinitialization of the outer algorithm should occur.)
    221 static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock,
    222                                             uint64_t *cycleclock)
    223    ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) {
    224  uint64_t local_approx_syscall_time_in_cycles =  // local copy
    225      time_state.approx_syscall_time_in_cycles.load(std::memory_order_relaxed);
    226 
    227  int64_t current_time_nanos_from_system;
    228  uint64_t before_cycles;
    229  uint64_t after_cycles;
    230  uint64_t elapsed_cycles;
    231  int loops = 0;
    232  do {
    233    before_cycles =
    234        static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
    235    current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM();
    236    after_cycles =
    237        static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
    238    // elapsed_cycles is unsigned, so is large on overflow
    239    elapsed_cycles = after_cycles - before_cycles;
    240    if (elapsed_cycles >= local_approx_syscall_time_in_cycles &&
    241        ++loops == 20) {  // clock changed frequencies?  Back off.
    242      loops = 0;
    243      if (local_approx_syscall_time_in_cycles < 1000 * 1000) {
    244        local_approx_syscall_time_in_cycles =
    245            (local_approx_syscall_time_in_cycles + 1) << 1;
    246      }
    247      time_state.approx_syscall_time_in_cycles.store(
    248          local_approx_syscall_time_in_cycles, std::memory_order_relaxed);
    249    }
    250  } while (elapsed_cycles >= local_approx_syscall_time_in_cycles ||
    251           last_cycleclock - after_cycles < (static_cast<uint64_t>(1) << 16));
    252 
    253  // Adjust approx_syscall_time_in_cycles to be within a factor of 2
    254  // of the typical time to execute one iteration of the loop above.
    255  if ((local_approx_syscall_time_in_cycles >> 1) < elapsed_cycles) {
    256    // measured time is no smaller than half current approximation
    257    time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed);
    258  } else if (time_state.kernel_time_seen_smaller.fetch_add(
    259                 1, std::memory_order_relaxed) >= 3) {
    260    // smaller delays several times in a row; reduce approximation by 12.5%
    261    const uint64_t new_approximation =
    262        local_approx_syscall_time_in_cycles -
    263        (local_approx_syscall_time_in_cycles >> 3);
    264    time_state.approx_syscall_time_in_cycles.store(new_approximation,
    265                                                   std::memory_order_relaxed);
    266    time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed);
    267  }
    268 
    269  *cycleclock = after_cycles;
    270  return current_time_nanos_from_system;
    271 }
    272 
    273 static int64_t GetCurrentTimeNanosSlowPath() ABSL_ATTRIBUTE_COLD;
    274 
    275 // Read the contents of *atomic into *sample.
    276 // Each field is read atomically, but to maintain atomicity between fields,
    277 // the access must be done under a lock.
    278 static void ReadTimeSampleAtomic(const struct TimeSampleAtomic *atomic,
    279                                 struct TimeSample *sample) {
    280  sample->base_ns = atomic->base_ns.load(std::memory_order_relaxed);
    281  sample->base_cycles = atomic->base_cycles.load(std::memory_order_relaxed);
    282  sample->nsscaled_per_cycle =
    283      atomic->nsscaled_per_cycle.load(std::memory_order_relaxed);
    284  sample->min_cycles_per_sample =
    285      atomic->min_cycles_per_sample.load(std::memory_order_relaxed);
    286  sample->raw_ns = atomic->raw_ns.load(std::memory_order_relaxed);
    287 }
    288 
    289 // Public routine.
    290 // Algorithm:  We wish to compute real time from a cycle counter.  In normal
    291 // operation, we construct a piecewise linear approximation to the kernel time
    292 // source, using the cycle counter value.  The start of each line segment is at
    293 // the same point as the end of the last, but may have a different slope (that
    294 // is, a different idea of the cycle counter frequency).  Every couple of
    295 // seconds, the kernel time source is sampled and compared with the current
    296 // approximation.  A new slope is chosen that, if followed for another couple
    297 // of seconds, will correct the error at the current position.  The information
    298 // for a sample is in the "last_sample" struct.  The linear approximation is
    299 //   estimated_time = last_sample.base_ns +
    300 //     last_sample.ns_per_cycle * (counter_reading - last_sample.base_cycles)
    301 // (ns_per_cycle is actually stored in different units and scaled, to avoid
    302 // overflow).  The base_ns of the next linear approximation is the
    303 // estimated_time using the last approximation; the base_cycles is the cycle
    304 // counter value at that time; the ns_per_cycle is the number of ns per cycle
    305 // measured since the last sample, but adjusted so that most of the difference
    306 // between the estimated_time and the kernel time will be corrected by the
    307 // estimated time to the next sample.  In normal operation, this algorithm
    308 // relies on:
    309 // - the cycle counter and kernel time rates not changing a lot in a few
    310 //   seconds.
    311 // - the client calling into the code often compared to a couple of seconds, so
    312 //   the time to the next correction can be estimated.
    313 // Any time ns_per_cycle is not known, a major error is detected, or the
    314 // assumption about frequent calls is violated, the implementation returns the
    315 // kernel time.  It records sufficient data that a linear approximation can
    316 // resume a little later.
    317 
    318 int64_t GetCurrentTimeNanos() {
    319  // read the data from the "last_sample" struct (but don't need raw_ns yet)
    320  // The reads of "seq" and test of the values emulate a reader lock.
    321  uint64_t base_ns;
    322  uint64_t base_cycles;
    323  uint64_t nsscaled_per_cycle;
    324  uint64_t min_cycles_per_sample;
    325  uint64_t seq_read0;
    326  uint64_t seq_read1;
    327 
    328  // If we have enough information to interpolate, the value returned will be
    329  // derived from this cycleclock-derived time estimate.  On some platforms
    330  // (POWER) the function to retrieve this value has enough complexity to
    331  // contribute to register pressure - reading it early before initializing
    332  // the other pieces of the calculation minimizes spill/restore instructions,
    333  // minimizing icache cost.
    334  uint64_t now_cycles =
    335      static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
    336 
    337  // Acquire pairs with the barrier in SeqRelease - if this load sees that
    338  // store, the shared-data reads necessarily see that SeqRelease's updates
    339  // to the same shared data.
    340  seq_read0 = time_state.seq.load(std::memory_order_acquire);
    341 
    342  base_ns = time_state.last_sample.base_ns.load(std::memory_order_relaxed);
    343  base_cycles =
    344      time_state.last_sample.base_cycles.load(std::memory_order_relaxed);
    345  nsscaled_per_cycle =
    346      time_state.last_sample.nsscaled_per_cycle.load(std::memory_order_relaxed);
    347  min_cycles_per_sample = time_state.last_sample.min_cycles_per_sample.load(
    348      std::memory_order_relaxed);
    349 
    350  // This acquire fence pairs with the release fence in SeqAcquire.  Since it
    351  // is sequenced between reads of shared data and seq_read1, the reads of
    352  // shared data are effectively acquiring.
    353  std::atomic_thread_fence(std::memory_order_acquire);
    354 
    355  // The shared-data reads are effectively acquire ordered, and the
    356  // shared-data writes are effectively release ordered. Therefore if our
    357  // shared-data reads see any of a particular update's shared-data writes,
    358  // seq_read1 is guaranteed to see that update's SeqAcquire.
    359  seq_read1 = time_state.seq.load(std::memory_order_relaxed);
    360 
    361  // Fast path.  Return if min_cycles_per_sample has not yet elapsed since the
    362  // last sample, and we read a consistent sample.  The fast path activates
    363  // only when min_cycles_per_sample is non-zero, which happens when we get an
    364  // estimate for the cycle time.  The predicate will fail if now_cycles <
    365  // base_cycles, or if some other thread is in the slow path.
    366  //
    367  // Since we now read now_cycles before base_ns, it is possible for now_cycles
    368  // to be less than base_cycles (if we were interrupted between those loads and
    369  // last_sample was updated). This is harmless, because delta_cycles will wrap
    370  // and report a time much much bigger than min_cycles_per_sample. In that case
    371  // we will take the slow path.
    372  uint64_t delta_cycles;
    373  if (seq_read0 == seq_read1 && (seq_read0 & 1) == 0 &&
    374      (delta_cycles = now_cycles - base_cycles) < min_cycles_per_sample) {
    375    return static_cast<int64_t>(
    376        base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale));
    377  }
    378  return GetCurrentTimeNanosSlowPath();
    379 }
    380 
    381 // Return (a << kScale)/b.
    382 // Zero is returned if b==0.   Scaling is performed internally to
    383 // preserve precision without overflow.
    384 static uint64_t SafeDivideAndScale(uint64_t a, uint64_t b) {
    385  // Find maximum safe_shift so that
    386  //  0 <= safe_shift <= kScale  and  (a << safe_shift) does not overflow.
    387  int safe_shift = kScale;
    388  while (((a << safe_shift) >> safe_shift) != a) {
    389    safe_shift--;
    390  }
    391  uint64_t scaled_b = b >> (kScale - safe_shift);
    392  uint64_t quotient = 0;
    393  if (scaled_b != 0) {
    394    quotient = (a << safe_shift) / scaled_b;
    395  }
    396  return quotient;
    397 }
    398 
    399 static uint64_t UpdateLastSample(
    400    uint64_t now_cycles, uint64_t now_ns, uint64_t delta_cycles,
    401    const struct TimeSample *sample) ABSL_ATTRIBUTE_COLD;
    402 
    403 // The slow path of GetCurrentTimeNanos().  This is taken while gathering
    404 // initial samples, when enough time has elapsed since the last sample, and if
    405 // any other thread is writing to last_sample.
    406 //
    407 // Manually mark this 'noinline' to minimize stack frame size of the fast
    408 // path.  Without this, sometimes a compiler may inline this big block of code
    409 // into the fast path.  That causes lots of register spills and reloads that
    410 // are unnecessary unless the slow path is taken.
    411 //
    412 // TODO(absl-team): Remove this attribute when our compiler is smart enough
    413 // to do the right thing.
    414 ABSL_ATTRIBUTE_NOINLINE
    415 static int64_t GetCurrentTimeNanosSlowPath()
    416    ABSL_LOCKS_EXCLUDED(time_state.lock) {
    417  // Serialize access to slow-path.  Fast-path readers are not blocked yet, and
    418  // code below must not modify last_sample until the seqlock is acquired.
    419  time_state.lock.Lock();
    420 
    421  // Sample the kernel time base.  This is the definition of
    422  // "now" if we take the slow path.
    423  uint64_t now_cycles;
    424  uint64_t now_ns = static_cast<uint64_t>(
    425      GetCurrentTimeNanosFromKernel(time_state.last_now_cycles, &now_cycles));
    426  time_state.last_now_cycles = now_cycles;
    427 
    428  uint64_t estimated_base_ns;
    429 
    430  // ----------
    431  // Read the "last_sample" values again; this time holding the write lock.
    432  struct TimeSample sample;
    433  ReadTimeSampleAtomic(&time_state.last_sample, &sample);
    434 
    435  // ----------
    436  // Try running the fast path again; another thread may have updated the
    437  // sample between our run of the fast path and the sample we just read.
    438  uint64_t delta_cycles = now_cycles - sample.base_cycles;
    439  if (delta_cycles < sample.min_cycles_per_sample) {
    440    // Another thread updated the sample.  This path does not take the seqlock
    441    // so that blocked readers can make progress without blocking new readers.
    442    estimated_base_ns = sample.base_ns +
    443        ((delta_cycles * sample.nsscaled_per_cycle) >> kScale);
    444    time_state.stats_fast_slow_paths++;
    445  } else {
    446    estimated_base_ns =
    447        UpdateLastSample(now_cycles, now_ns, delta_cycles, &sample);
    448  }
    449 
    450  time_state.lock.Unlock();
    451 
    452  return static_cast<int64_t>(estimated_base_ns);
    453 }
    454 
    455 // Main part of the algorithm.  Locks out readers, updates the approximation
    456 // using the new sample from the kernel, and stores the result in last_sample
    457 // for readers.  Returns the new estimated time.
    458 static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns,
    459                                 uint64_t delta_cycles,
    460                                 const struct TimeSample *sample)
    461    ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) {
    462  uint64_t estimated_base_ns = now_ns;
    463  uint64_t lock_value =
    464      SeqAcquire(&time_state.seq);  // acquire seqlock to block readers
    465 
    466  // The 5s in the next if-statement limits the time for which we will trust
    467  // the cycle counter and our last sample to give a reasonable result.
    468  // Errors in the rate of the source clock can be multiplied by the ratio
    469  // between this limit and kMinNSBetweenSamples.
    470  if (sample->raw_ns == 0 ||  // no recent sample, or clock went backwards
    471      sample->raw_ns + static_cast<uint64_t>(5) * 1000 * 1000 * 1000 < now_ns ||
    472      now_ns < sample->raw_ns || now_cycles < sample->base_cycles) {
    473    // record this sample, and forget any previously known slope.
    474    time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
    475    time_state.last_sample.base_ns.store(estimated_base_ns,
    476                                         std::memory_order_relaxed);
    477    time_state.last_sample.base_cycles.store(now_cycles,
    478                                             std::memory_order_relaxed);
    479    time_state.last_sample.nsscaled_per_cycle.store(0,
    480                                                    std::memory_order_relaxed);
    481    time_state.last_sample.min_cycles_per_sample.store(
    482        0, std::memory_order_relaxed);
    483    time_state.stats_initializations++;
    484  } else if (sample->raw_ns + 500 * 1000 * 1000 < now_ns &&
    485             sample->base_cycles + 50 < now_cycles) {
    486    // Enough time has passed to compute the cycle time.
    487    if (sample->nsscaled_per_cycle != 0) {  // Have a cycle time estimate.
    488      // Compute time from counter reading, but avoiding overflow
    489      // delta_cycles may be larger than on the fast path.
    490      uint64_t estimated_scaled_ns;
    491      int s = -1;
    492      do {
    493        s++;
    494        estimated_scaled_ns = (delta_cycles >> s) * sample->nsscaled_per_cycle;
    495      } while (estimated_scaled_ns / sample->nsscaled_per_cycle !=
    496               (delta_cycles >> s));
    497      estimated_base_ns = sample->base_ns +
    498                          (estimated_scaled_ns >> (kScale - s));
    499    }
    500 
    501    // Compute the assumed cycle time kMinNSBetweenSamples ns into the future
    502    // assuming the cycle counter rate stays the same as the last interval.
    503    uint64_t ns = now_ns - sample->raw_ns;
    504    uint64_t measured_nsscaled_per_cycle = SafeDivideAndScale(ns, delta_cycles);
    505 
    506    uint64_t assumed_next_sample_delta_cycles =
    507        SafeDivideAndScale(kMinNSBetweenSamples, measured_nsscaled_per_cycle);
    508 
    509    // Estimate low by this much.
    510    int64_t diff_ns = static_cast<int64_t>(now_ns - estimated_base_ns);
    511 
    512    // We want to set nsscaled_per_cycle so that our estimate of the ns time
    513    // at the assumed cycle time is the assumed ns time.
    514    // That is, we want to set nsscaled_per_cycle so:
    515    //  kMinNSBetweenSamples + diff_ns  ==
    516    //  (assumed_next_sample_delta_cycles * nsscaled_per_cycle) >> kScale
    517    // But we wish to damp oscillations, so instead correct only most
    518    // of our current error, by solving:
    519    //  kMinNSBetweenSamples + diff_ns - (diff_ns / 16) ==
    520    //  (assumed_next_sample_delta_cycles * nsscaled_per_cycle) >> kScale
    521    ns = static_cast<uint64_t>(static_cast<int64_t>(kMinNSBetweenSamples) +
    522                               diff_ns - (diff_ns / 16));
    523    uint64_t new_nsscaled_per_cycle =
    524        SafeDivideAndScale(ns, assumed_next_sample_delta_cycles);
    525    if (new_nsscaled_per_cycle != 0 &&
    526        diff_ns < 100 * 1000 * 1000 && -diff_ns < 100 * 1000 * 1000) {
    527      // record the cycle time measurement
    528      time_state.last_sample.nsscaled_per_cycle.store(
    529          new_nsscaled_per_cycle, std::memory_order_relaxed);
    530      uint64_t new_min_cycles_per_sample =
    531          SafeDivideAndScale(kMinNSBetweenSamples, new_nsscaled_per_cycle);
    532      time_state.last_sample.min_cycles_per_sample.store(
    533          new_min_cycles_per_sample, std::memory_order_relaxed);
    534      time_state.stats_calibrations++;
    535    } else {  // something went wrong; forget the slope
    536      time_state.last_sample.nsscaled_per_cycle.store(
    537          0, std::memory_order_relaxed);
    538      time_state.last_sample.min_cycles_per_sample.store(
    539          0, std::memory_order_relaxed);
    540      estimated_base_ns = now_ns;
    541      time_state.stats_reinitializations++;
    542    }
    543    time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
    544    time_state.last_sample.base_ns.store(estimated_base_ns,
    545                                         std::memory_order_relaxed);
    546    time_state.last_sample.base_cycles.store(now_cycles,
    547                                             std::memory_order_relaxed);
    548  } else {
    549    // have a sample, but no slope; waiting for enough time for a calibration
    550    time_state.stats_slow_paths++;
    551  }
    552 
    553  SeqRelease(&time_state.seq, lock_value);  // release the readers
    554 
    555  return estimated_base_ns;
    556 }
    557 ABSL_NAMESPACE_END
    558 }  // namespace absl
    559 #endif  // ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
    560 
    561 namespace absl {
    562 ABSL_NAMESPACE_BEGIN
    563 namespace {
    564 
    565 // Returns the maximum duration that SleepOnce() can sleep for.
    566 constexpr absl::Duration MaxSleep() {
    567 #ifdef _WIN32
    568  // Windows Sleep() takes unsigned long argument in milliseconds.
    569  return absl::Milliseconds(
    570      std::numeric_limits<unsigned long>::max());  // NOLINT(runtime/int)
    571 #else
    572  return absl::Seconds(std::numeric_limits<time_t>::max());
    573 #endif
    574 }
    575 
    576 // Sleeps for the given duration.
    577 // REQUIRES: to_sleep <= MaxSleep().
    578 void SleepOnce(absl::Duration to_sleep) {
    579 #ifdef _WIN32
    580  Sleep(static_cast<DWORD>(to_sleep / absl::Milliseconds(1)));
    581 #else
    582  struct timespec sleep_time = absl::ToTimespec(to_sleep);
    583  while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {
    584    // Ignore signals and wait for the full interval to elapse.
    585  }
    586 #endif
    587 }
    588 
    589 }  // namespace
    590 ABSL_NAMESPACE_END
    591 }  // namespace absl
    592 
    593 extern "C" {
    594 
    595 ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(
    596    absl::Duration duration) {
    597  while (duration > absl::ZeroDuration()) {
    598    absl::Duration to_sleep = std::min(duration, absl::MaxSleep());
    599    absl::SleepOnce(to_sleep);
    600    duration -= to_sleep;
    601  }
    602 }
    603 
    604 }  // extern "C"