tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

receive_time_calculator_unittest.cc (8854B)


      1 /*
      2 *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
      3 *
      4 *  Use of this source code is governed by a BSD-style license
      5 *  that can be found in the LICENSE file in the root of the source
      6 *  tree. An additional intellectual property rights grant can be found
      7 *  in the file PATENTS.  All contributing project authors may
      8 *  be found in the AUTHORS file in the root of the source tree.
      9 */
     10 
     11 #include "call/receive_time_calculator.h"
     12 
     13 #include <algorithm>
     14 #include <cmath>
     15 #include <cstdint>
     16 #include <cstdlib>
     17 #include <optional>
     18 #include <vector>
     19 
     20 #include "api/field_trials.h"
     21 #include "rtc_base/random.h"
     22 #include "rtc_base/time_utils.h"
     23 #include "test/create_test_field_trials.h"
     24 #include "test/gtest.h"
     25 
     26 namespace webrtc {
     27 namespace test {
     28 namespace {
     29 
     30 class EmulatedClock {
     31 public:
     32  explicit EmulatedClock(int seed, float drift = 0.0f)
     33      : random_(seed), clock_us_(random_.Rand<uint32_t>()), drift_(drift) {}
     34  virtual ~EmulatedClock() = default;
     35  int64_t GetClockUs() const { return clock_us_; }
     36 
     37 protected:
     38  int64_t UpdateClock(int64_t time_us) {
     39    if (!last_query_us_)
     40      last_query_us_ = time_us;
     41    int64_t skip_us = time_us - *last_query_us_;
     42    accumulated_drift_us_ += skip_us * drift_;
     43    int64_t drift_correction_us = static_cast<int64_t>(accumulated_drift_us_);
     44    accumulated_drift_us_ -= drift_correction_us;
     45    clock_us_ += skip_us + drift_correction_us;
     46    last_query_us_ = time_us;
     47    return skip_us;
     48  }
     49  Random random_;
     50 
     51 private:
     52  int64_t clock_us_;
     53  std::optional<int64_t> last_query_us_;
     54  float drift_;
     55  float accumulated_drift_us_ = 0;
     56 };
     57 
     58 class EmulatedMonotoneousClock : public EmulatedClock {
     59 public:
     60  explicit EmulatedMonotoneousClock(int seed) : EmulatedClock(seed) {}
     61  ~EmulatedMonotoneousClock() override = default;
     62 
     63  int64_t Query(int64_t time_us) {
     64    int64_t skip_us = UpdateClock(time_us);
     65 
     66    // In a stall
     67    if (stall_recovery_time_us_ > 0) {
     68      if (GetClockUs() > stall_recovery_time_us_) {
     69        stall_recovery_time_us_ = 0;
     70        return GetClockUs();
     71      } else {
     72        return stall_recovery_time_us_;
     73      }
     74    }
     75 
     76    // Check if we enter a stall
     77    for (int k = 0; k < skip_us; ++k) {
     78      if (random_.Rand<double>() < kChanceOfStallPerUs) {
     79        int64_t stall_duration_us =
     80            static_cast<int64_t>(random_.Rand<float>() * kMaxStallDurationUs);
     81        stall_recovery_time_us_ = GetClockUs() + stall_duration_us;
     82        return stall_recovery_time_us_;
     83      }
     84    }
     85    return GetClockUs();
     86  }
     87 
     88  void ForceStallUs() {
     89    int64_t stall_duration_us =
     90        static_cast<int64_t>(random_.Rand<float>() * kMaxStallDurationUs);
     91    stall_recovery_time_us_ = GetClockUs() + stall_duration_us;
     92  }
     93 
     94  bool Stalled() const { return stall_recovery_time_us_ > 0; }
     95 
     96  int64_t GetRemainingStall(int64_t /* time_us */) const {
     97    return stall_recovery_time_us_ > 0 ? stall_recovery_time_us_ - GetClockUs()
     98                                       : 0;
     99  }
    100 
    101  const int64_t kMaxStallDurationUs = kNumMicrosecsPerSec;
    102 
    103 private:
    104  const float kChanceOfStallPerUs = 5e-6f;
    105  int64_t stall_recovery_time_us_ = 0;
    106 };
    107 
    108 class EmulatedNonMonotoneousClock : public EmulatedClock {
    109 public:
    110  EmulatedNonMonotoneousClock(int seed, int64_t duration_us, float drift = 0)
    111      : EmulatedClock(seed, drift) {
    112    Pregenerate(duration_us);
    113  }
    114  ~EmulatedNonMonotoneousClock() override = default;
    115 
    116  void Pregenerate(int64_t duration_us) {
    117    int64_t time_since_reset_us = kMinTimeBetweenResetsUs;
    118    int64_t clock_offset_us = 0;
    119    for (int64_t time_us = 0; time_us < duration_us; time_us += kResolutionUs) {
    120      int64_t skip_us = UpdateClock(time_us);
    121      time_since_reset_us += skip_us;
    122      int64_t reset_us = 0;
    123      if (time_since_reset_us >= kMinTimeBetweenResetsUs) {
    124        for (int k = 0; k < skip_us; ++k) {
    125          if (random_.Rand<double>() < kChanceOfResetPerUs) {
    126            reset_us = static_cast<int64_t>(2 * random_.Rand<float>() *
    127                                            kMaxAbsResetUs) -
    128                       kMaxAbsResetUs;
    129            clock_offset_us += reset_us;
    130            time_since_reset_us = 0;
    131            break;
    132          }
    133        }
    134      }
    135      pregenerated_clock_.emplace_back(GetClockUs() + clock_offset_us);
    136      resets_us_.emplace_back(reset_us);
    137    }
    138  }
    139 
    140  int64_t Query(int64_t time_us) {
    141    size_t ixStart =
    142        (last_reset_query_time_us_ + (kResolutionUs >> 1)) / kResolutionUs + 1;
    143    size_t ixEnd = (time_us + (kResolutionUs >> 1)) / kResolutionUs;
    144    if (ixEnd >= pregenerated_clock_.size())
    145      return -1;
    146    last_reset_size_us_ = 0;
    147    for (size_t ix = ixStart; ix <= ixEnd; ++ix) {
    148      if (resets_us_[ix] != 0) {
    149        last_reset_size_us_ = resets_us_[ix];
    150      }
    151    }
    152    last_reset_query_time_us_ = time_us;
    153    return pregenerated_clock_[ixEnd];
    154  }
    155 
    156  bool WasReset() const { return last_reset_size_us_ != 0; }
    157  bool WasNegativeReset() const { return last_reset_size_us_ < 0; }
    158  int64_t GetLastResetUs() const { return last_reset_size_us_; }
    159 
    160 private:
    161  const float kChanceOfResetPerUs = 1e-6f;
    162  const int64_t kMaxAbsResetUs = kNumMicrosecsPerSec;
    163  const int64_t kMinTimeBetweenResetsUs = 3 * kNumMicrosecsPerSec;
    164  const int64_t kResolutionUs = kNumMicrosecsPerMillisec;
    165  int64_t last_reset_query_time_us_ = 0;
    166  int64_t last_reset_size_us_ = 0;
    167  std::vector<int64_t> pregenerated_clock_;
    168  std::vector<int64_t> resets_us_;
    169 };
    170 
    171 TEST(ClockRepair, NoClockDrift) {
    172  FieldTrials field_trials = CreateTestFieldTrials();
    173  const int kSeeds = 10;
    174  const int kFirstSeed = 1;
    175  const int64_t kRuntimeUs = 10 * kNumMicrosecsPerSec;
    176  const float kDrift = 0.0f;
    177  const int64_t kMaxPacketInterarrivalUs = 50 * kNumMicrosecsPerMillisec;
    178  for (int seed = kFirstSeed; seed < kSeeds + kFirstSeed; ++seed) {
    179    EmulatedMonotoneousClock monotone_clock(seed);
    180    EmulatedNonMonotoneousClock non_monotone_clock(
    181        seed + 1, kRuntimeUs + kNumMicrosecsPerSec, kDrift);
    182    ReceiveTimeCalculator reception_time_tracker(field_trials);
    183    int64_t corrected_clock_0 = 0;
    184    int64_t reset_during_stall_tol_us = 0;
    185    bool initial_clock_stall = true;
    186    int64_t accumulated_upper_bound_tolerance_us = 0;
    187    int64_t accumulated_lower_bound_tolerance_us = 0;
    188    Random random(1);
    189    monotone_clock.ForceStallUs();
    190    int64_t last_time_us = 0;
    191    bool add_tolerance_on_next_packet = false;
    192    int64_t monotone_noise_us = 1000;
    193 
    194    for (int64_t time_us = 0; time_us < kRuntimeUs;
    195         time_us += static_cast<int64_t>(random.Rand<float>() *
    196                                         kMaxPacketInterarrivalUs)) {
    197      int64_t socket_time_us = non_monotone_clock.Query(time_us);
    198      int64_t monotone_us = monotone_clock.Query(time_us) +
    199                            2 * random.Rand<float>() * monotone_noise_us -
    200                            monotone_noise_us;
    201      int64_t system_time_us = non_monotone_clock.Query(
    202          time_us + monotone_clock.GetRemainingStall(time_us));
    203 
    204      int64_t corrected_clock_us = reception_time_tracker.ReconcileReceiveTimes(
    205          socket_time_us, system_time_us, monotone_us);
    206      if (time_us == 0)
    207        corrected_clock_0 = corrected_clock_us;
    208 
    209      if (add_tolerance_on_next_packet)
    210        accumulated_lower_bound_tolerance_us -= (time_us - last_time_us);
    211 
    212      // Perfect repair cannot be achiveved if non-monotone clock resets during
    213      // a monotone clock stall.
    214      add_tolerance_on_next_packet = false;
    215      if (monotone_clock.Stalled() && non_monotone_clock.WasReset()) {
    216        reset_during_stall_tol_us =
    217            std::max(reset_during_stall_tol_us, time_us - last_time_us);
    218        if (non_monotone_clock.WasNegativeReset()) {
    219          add_tolerance_on_next_packet = true;
    220        }
    221        if (initial_clock_stall && !non_monotone_clock.WasNegativeReset()) {
    222          // Positive resets during an initial clock stall cannot be repaired
    223          // and error will propagate through rest of trace.
    224          accumulated_upper_bound_tolerance_us +=
    225              std::abs(non_monotone_clock.GetLastResetUs());
    226        }
    227      } else {
    228        reset_during_stall_tol_us = 0;
    229        initial_clock_stall = false;
    230      }
    231      int64_t err = corrected_clock_us - corrected_clock_0 - time_us;
    232 
    233      // Resets during stalls may lead to small errors temporarily.
    234      int64_t lower_tol_us = accumulated_lower_bound_tolerance_us -
    235                             reset_during_stall_tol_us - monotone_noise_us -
    236                             2 * kNumMicrosecsPerMillisec;
    237      EXPECT_GE(err, lower_tol_us);
    238      int64_t upper_tol_us = accumulated_upper_bound_tolerance_us +
    239                             monotone_noise_us + 2 * kNumMicrosecsPerMillisec;
    240      EXPECT_LE(err, upper_tol_us);
    241 
    242      last_time_us = time_us;
    243    }
    244  }
    245 }
    246 }  // namespace
    247 }  // namespace test
    248 }  // namespace webrtc