tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

raw_hash_set_benchmark.cc (20194B)


      1 // Copyright 2018 The Abseil Authors.
      2 //
      3 // Licensed under the Apache License, Version 2.0 (the "License");
      4 // you may not use this file except in compliance with the License.
      5 // You may obtain a copy of the License at
      6 //
      7 //      https://www.apache.org/licenses/LICENSE-2.0
      8 //
      9 // Unless required by applicable law or agreed to in writing, software
     10 // distributed under the License is distributed on an "AS IS" BASIS,
     11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 // See the License for the specific language governing permissions and
     13 // limitations under the License.
     14 
     15 #include <algorithm>
     16 #include <array>
     17 #include <cmath>
     18 #include <cstddef>
     19 #include <cstdint>
     20 #include <limits>
     21 #include <numeric>
     22 #include <random>
     23 #include <string>
     24 #include <tuple>
     25 #include <utility>
     26 #include <vector>
     27 
     28 #include "absl/base/internal/raw_logging.h"
     29 #include "absl/container/internal/container_memory.h"
     30 #include "absl/container/internal/hash_function_defaults.h"
     31 #include "absl/container/internal/raw_hash_set.h"
     32 #include "absl/random/random.h"
     33 #include "absl/strings/str_format.h"
     34 #include "benchmark/benchmark.h"
     35 
     36 namespace absl {
     37 ABSL_NAMESPACE_BEGIN
     38 namespace container_internal {
     39 
     40 struct RawHashSetTestOnlyAccess {
     41  template <typename C>
     42  static auto GetSlots(const C& c) -> decltype(c.slots_) {
     43    return c.slots_;
     44  }
     45 };
     46 
     47 namespace {
     48 
     49 struct IntPolicy {
     50  using slot_type = int64_t;
     51  using key_type = int64_t;
     52  using init_type = int64_t;
     53 
     54  static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
     55  static void destroy(void*, int64_t*) {}
     56  static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
     57    *new_slot = *old_slot;
     58  }
     59 
     60  static int64_t& element(slot_type* slot) { return *slot; }
     61 
     62  template <class F>
     63  static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
     64    return std::forward<F>(f)(x, x);
     65  }
     66 
     67  template <class Hash>
     68  static constexpr HashSlotFn get_hash_slot_fn() {
     69    return nullptr;
     70  }
     71 };
     72 
     73 class StringPolicy {
     74  template <class F, class K, class V,
     75            class = typename std::enable_if<
     76                std::is_convertible<const K&, absl::string_view>::value>::type>
     77  decltype(std::declval<F>()(
     78      std::declval<const absl::string_view&>(), std::piecewise_construct,
     79      std::declval<std::tuple<K>>(),
     80      std::declval<V>())) static apply_impl(F&& f,
     81                                            std::pair<std::tuple<K>, V> p) {
     82    const absl::string_view& key = std::get<0>(p.first);
     83    return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
     84                              std::move(p.second));
     85  }
     86 
     87 public:
     88  struct slot_type {
     89    struct ctor {};
     90 
     91    template <class... Ts>
     92    slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
     93 
     94    std::pair<std::string, std::string> pair;
     95  };
     96 
     97  using key_type = std::string;
     98  using init_type = std::pair<std::string, std::string>;
     99 
    100  template <class allocator_type, class... Args>
    101  static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
    102    std::allocator_traits<allocator_type>::construct(
    103        *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
    104  }
    105 
    106  template <class allocator_type>
    107  static void destroy(allocator_type* alloc, slot_type* slot) {
    108    std::allocator_traits<allocator_type>::destroy(*alloc, slot);
    109  }
    110 
    111  template <class allocator_type>
    112  static void transfer(allocator_type* alloc, slot_type* new_slot,
    113                       slot_type* old_slot) {
    114    construct(alloc, new_slot, std::move(old_slot->pair));
    115    destroy(alloc, old_slot);
    116  }
    117 
    118  static std::pair<std::string, std::string>& element(slot_type* slot) {
    119    return slot->pair;
    120  }
    121 
    122  template <class F, class... Args>
    123  static auto apply(F&& f, Args&&... args)
    124      -> decltype(apply_impl(std::forward<F>(f),
    125                             PairArgs(std::forward<Args>(args)...))) {
    126    return apply_impl(std::forward<F>(f),
    127                      PairArgs(std::forward<Args>(args)...));
    128  }
    129 
    130  template <class Hash>
    131  static constexpr HashSlotFn get_hash_slot_fn() {
    132    return nullptr;
    133  }
    134 };
    135 
    136 struct StringHash : container_internal::hash_default_hash<absl::string_view> {
    137  using is_transparent = void;
    138 };
    139 struct StringEq : std::equal_to<absl::string_view> {
    140  using is_transparent = void;
    141 };
    142 
    143 struct StringTable
    144    : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
    145  using Base = typename StringTable::raw_hash_set;
    146  StringTable() {}
    147  using Base::Base;
    148 };
    149 
    150 struct IntTable
    151    : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
    152                   std::equal_to<int64_t>, std::allocator<int64_t>> {
    153  using Base = typename IntTable::raw_hash_set;
    154  IntTable() {}
    155  using Base::Base;
    156 };
    157 
    158 struct string_generator {
    159  template <class RNG>
    160  std::string operator()(RNG& rng) const {
    161    std::string res;
    162    res.resize(size);
    163    std::uniform_int_distribution<uint32_t> printable_ascii(0x20, 0x7E);
    164    std::generate(res.begin(), res.end(), [&] { return printable_ascii(rng); });
    165    return res;
    166  }
    167 
    168  size_t size;
    169 };
    170 
    171 // Model a cache in steady state.
    172 //
    173 // On a table of size N, keep deleting the LRU entry and add a random one.
    174 void BM_CacheInSteadyState(benchmark::State& state) {
    175  std::random_device rd;
    176  std::mt19937 rng(rd());
    177  string_generator gen{12};
    178  StringTable t;
    179  std::deque<std::string> keys;
    180  while (t.size() < state.range(0)) {
    181    auto x = t.emplace(gen(rng), gen(rng));
    182    if (x.second) keys.push_back(x.first->first);
    183  }
    184  ABSL_RAW_CHECK(state.range(0) >= 10, "");
    185  while (state.KeepRunning()) {
    186    // Some cache hits.
    187    std::deque<std::string>::const_iterator it;
    188    for (int i = 0; i != 90; ++i) {
    189      if (i % 10 == 0) it = keys.end();
    190      ::benchmark::DoNotOptimize(t.find(*--it));
    191    }
    192    // Some cache misses.
    193    for (int i = 0; i != 10; ++i) ::benchmark::DoNotOptimize(t.find(gen(rng)));
    194    ABSL_RAW_CHECK(t.erase(keys.front()), keys.front().c_str());
    195    keys.pop_front();
    196    while (true) {
    197      auto x = t.emplace(gen(rng), gen(rng));
    198      if (x.second) {
    199        keys.push_back(x.first->first);
    200        break;
    201      }
    202    }
    203  }
    204  state.SetItemsProcessed(state.iterations());
    205  state.SetLabel(absl::StrFormat("load_factor=%.2f", t.load_factor()));
    206 }
    207 
    208 template <typename Benchmark>
    209 void CacheInSteadyStateArgs(Benchmark* bm) {
    210  // The default.
    211  const float max_load_factor = 0.875;
    212  // When the cache is at the steady state, the probe sequence will equal
    213  // capacity if there is no reclamation of deleted slots. Pick a number large
    214  // enough to make the benchmark slow for that case.
    215  const size_t capacity = 1 << 10;
    216 
    217  // Check N data points to cover load factors in [0.4, 0.8).
    218  const size_t kNumPoints = 10;
    219  for (size_t i = 0; i != kNumPoints; ++i)
    220    bm->Arg(std::ceil(
    221        capacity * (max_load_factor + i * max_load_factor / kNumPoints) / 2));
    222 }
    223 BENCHMARK(BM_CacheInSteadyState)->Apply(CacheInSteadyStateArgs);
    224 
    225 void BM_EraseEmplace(benchmark::State& state) {
    226  IntTable t;
    227  int64_t size = state.range(0);
    228  for (int64_t i = 0; i < size; ++i) {
    229    t.emplace(i);
    230  }
    231  while (state.KeepRunningBatch(size)) {
    232    for (int64_t i = 0; i < size; ++i) {
    233      benchmark::DoNotOptimize(t);
    234      t.erase(i);
    235      t.emplace(i);
    236    }
    237  }
    238 }
    239 BENCHMARK(BM_EraseEmplace)->Arg(1)->Arg(2)->Arg(4)->Arg(8)->Arg(16)->Arg(100);
    240 
    241 void BM_EndComparison(benchmark::State& state) {
    242  StringTable t = {{"a", "a"}, {"b", "b"}};
    243  auto it = t.begin();
    244  for (auto i : state) {
    245    benchmark::DoNotOptimize(t);
    246    benchmark::DoNotOptimize(it);
    247    benchmark::DoNotOptimize(it != t.end());
    248  }
    249 }
    250 BENCHMARK(BM_EndComparison);
    251 
    252 void BM_Iteration(benchmark::State& state) {
    253  std::random_device rd;
    254  std::mt19937 rng(rd());
    255  string_generator gen{12};
    256  StringTable t;
    257 
    258  size_t capacity = state.range(0);
    259  size_t size = state.range(1);
    260  t.reserve(capacity);
    261 
    262  while (t.size() < size) {
    263    t.emplace(gen(rng), gen(rng));
    264  }
    265 
    266  for (auto i : state) {
    267    benchmark::DoNotOptimize(t);
    268    for (auto it = t.begin(); it != t.end(); ++it) {
    269      benchmark::DoNotOptimize(*it);
    270    }
    271  }
    272 }
    273 
    274 BENCHMARK(BM_Iteration)
    275    ->ArgPair(1, 1)
    276    ->ArgPair(2, 2)
    277    ->ArgPair(4, 4)
    278    ->ArgPair(7, 7)
    279    ->ArgPair(10, 10)
    280    ->ArgPair(15, 15)
    281    ->ArgPair(16, 16)
    282    ->ArgPair(54, 54)
    283    ->ArgPair(100, 100)
    284    ->ArgPair(400, 400)
    285    // empty
    286    ->ArgPair(0, 0)
    287    ->ArgPair(10, 0)
    288    ->ArgPair(100, 0)
    289    ->ArgPair(1000, 0)
    290    ->ArgPair(10000, 0)
    291    // sparse
    292    ->ArgPair(100, 1)
    293    ->ArgPair(1000, 10);
    294 
    295 void BM_CopyCtorSparseInt(benchmark::State& state) {
    296  std::random_device rd;
    297  std::mt19937 rng(rd());
    298  IntTable t;
    299  std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
    300 
    301  size_t size = state.range(0);
    302  t.reserve(size * 10);
    303  while (t.size() < size) {
    304    t.emplace(dist(rng));
    305  }
    306 
    307  for (auto i : state) {
    308    IntTable t2 = t;
    309    benchmark::DoNotOptimize(t2);
    310  }
    311 }
    312 BENCHMARK(BM_CopyCtorSparseInt)->Range(1, 4096);
    313 
    314 void BM_CopyCtorInt(benchmark::State& state) {
    315  std::random_device rd;
    316  std::mt19937 rng(rd());
    317  IntTable t;
    318  std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
    319 
    320  size_t size = state.range(0);
    321  while (t.size() < size) {
    322    t.emplace(dist(rng));
    323  }
    324 
    325  for (auto i : state) {
    326    IntTable t2 = t;
    327    benchmark::DoNotOptimize(t2);
    328  }
    329 }
    330 BENCHMARK(BM_CopyCtorInt)->Range(0, 4096);
    331 
    332 void BM_CopyCtorString(benchmark::State& state) {
    333  std::random_device rd;
    334  std::mt19937 rng(rd());
    335  StringTable t;
    336  std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
    337 
    338  size_t size = state.range(0);
    339  while (t.size() < size) {
    340    t.emplace(std::to_string(dist(rng)), std::to_string(dist(rng)));
    341  }
    342 
    343  for (auto i : state) {
    344    StringTable t2 = t;
    345    benchmark::DoNotOptimize(t2);
    346  }
    347 }
    348 BENCHMARK(BM_CopyCtorString)->Range(0, 4096);
    349 
    350 void BM_CopyAssign(benchmark::State& state) {
    351  std::random_device rd;
    352  std::mt19937 rng(rd());
    353  IntTable t;
    354  std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
    355  while (t.size() < state.range(0)) {
    356    t.emplace(dist(rng));
    357  }
    358 
    359  IntTable t2;
    360  for (auto _ : state) {
    361    t2 = t;
    362    benchmark::DoNotOptimize(t2);
    363  }
    364 }
    365 BENCHMARK(BM_CopyAssign)->Range(128, 4096);
    366 
    367 void BM_RangeCtor(benchmark::State& state) {
    368  std::random_device rd;
    369  std::mt19937 rng(rd());
    370  std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
    371  std::vector<int> values;
    372  const size_t desired_size = state.range(0);
    373  while (values.size() < desired_size) {
    374    values.emplace_back(dist(rng));
    375  }
    376 
    377  for (auto unused : state) {
    378    IntTable t{values.begin(), values.end()};
    379    benchmark::DoNotOptimize(t);
    380  }
    381 }
    382 BENCHMARK(BM_RangeCtor)->Range(128, 65536);
    383 
    384 void BM_NoOpReserveIntTable(benchmark::State& state) {
    385  IntTable t;
    386  t.reserve(100000);
    387  for (auto _ : state) {
    388    benchmark::DoNotOptimize(t);
    389    t.reserve(100000);
    390  }
    391 }
    392 BENCHMARK(BM_NoOpReserveIntTable);
    393 
    394 void BM_NoOpReserveStringTable(benchmark::State& state) {
    395  StringTable t;
    396  t.reserve(100000);
    397  for (auto _ : state) {
    398    benchmark::DoNotOptimize(t);
    399    t.reserve(100000);
    400  }
    401 }
    402 BENCHMARK(BM_NoOpReserveStringTable);
    403 
    404 void BM_ReserveIntTable(benchmark::State& state) {
    405  constexpr size_t kBatchSize = 1024;
    406  size_t reserve_size = static_cast<size_t>(state.range(0));
    407 
    408  std::vector<IntTable> tables;
    409  while (state.KeepRunningBatch(kBatchSize)) {
    410    state.PauseTiming();
    411    tables.clear();
    412    tables.resize(kBatchSize);
    413    state.ResumeTiming();
    414    for (auto& t : tables) {
    415      benchmark::DoNotOptimize(t);
    416      t.reserve(reserve_size);
    417      benchmark::DoNotOptimize(t);
    418    }
    419  }
    420 }
    421 BENCHMARK(BM_ReserveIntTable)
    422    ->Arg(1)
    423    ->Arg(2)
    424    ->Arg(4)
    425    ->Arg(8)
    426    ->Arg(16)
    427    ->Arg(32)
    428    ->Arg(64)
    429    ->Arg(128)
    430    ->Arg(256)
    431    ->Arg(512);
    432 
    433 void BM_ReserveStringTable(benchmark::State& state) {
    434  constexpr size_t kBatchSize = 1024;
    435  size_t reserve_size = static_cast<size_t>(state.range(0));
    436 
    437  std::vector<StringTable> tables;
    438  while (state.KeepRunningBatch(kBatchSize)) {
    439    state.PauseTiming();
    440    tables.clear();
    441    tables.resize(kBatchSize);
    442    state.ResumeTiming();
    443    for (auto& t : tables) {
    444      benchmark::DoNotOptimize(t);
    445      t.reserve(reserve_size);
    446      benchmark::DoNotOptimize(t);
    447    }
    448  }
    449 }
    450 BENCHMARK(BM_ReserveStringTable)
    451    ->Arg(1)
    452    ->Arg(2)
    453    ->Arg(4)
    454    ->Arg(8)
    455    ->Arg(16)
    456    ->Arg(32)
    457    ->Arg(64)
    458    ->Arg(128)
    459    ->Arg(256)
    460    ->Arg(512);
    461 
    462 // Like std::iota, except that ctrl_t doesn't support operator++.
    463 template <typename CtrlIter>
    464 void Iota(CtrlIter begin, CtrlIter end, int value) {
    465  for (; begin != end; ++begin, ++value) {
    466    *begin = static_cast<ctrl_t>(value);
    467  }
    468 }
    469 
    470 void BM_Group_Match(benchmark::State& state) {
    471  std::array<ctrl_t, Group::kWidth> group;
    472  Iota(group.begin(), group.end(), -4);
    473  Group g{group.data()};
    474  h2_t h = 1;
    475  for (auto _ : state) {
    476    ::benchmark::DoNotOptimize(h);
    477    ::benchmark::DoNotOptimize(g);
    478    ::benchmark::DoNotOptimize(g.Match(h));
    479  }
    480 }
    481 BENCHMARK(BM_Group_Match);
    482 
    483 void BM_GroupPortable_Match(benchmark::State& state) {
    484  std::array<ctrl_t, GroupPortableImpl::kWidth> group;
    485  Iota(group.begin(), group.end(), -4);
    486  GroupPortableImpl g{group.data()};
    487  h2_t h = 1;
    488  for (auto _ : state) {
    489    ::benchmark::DoNotOptimize(h);
    490    ::benchmark::DoNotOptimize(g);
    491    ::benchmark::DoNotOptimize(g.Match(h));
    492  }
    493 }
    494 BENCHMARK(BM_GroupPortable_Match);
    495 
    496 void BM_Group_MaskEmpty(benchmark::State& state) {
    497  std::array<ctrl_t, Group::kWidth> group;
    498  Iota(group.begin(), group.end(), -4);
    499  Group g{group.data()};
    500  for (auto _ : state) {
    501    ::benchmark::DoNotOptimize(g);
    502    ::benchmark::DoNotOptimize(g.MaskEmpty());
    503  }
    504 }
    505 BENCHMARK(BM_Group_MaskEmpty);
    506 
    507 void BM_Group_MaskEmptyOrDeleted(benchmark::State& state) {
    508  std::array<ctrl_t, Group::kWidth> group;
    509  Iota(group.begin(), group.end(), -4);
    510  Group g{group.data()};
    511  for (auto _ : state) {
    512    ::benchmark::DoNotOptimize(g);
    513    ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted());
    514  }
    515 }
    516 BENCHMARK(BM_Group_MaskEmptyOrDeleted);
    517 
    518 void BM_Group_MaskNonFull(benchmark::State& state) {
    519  std::array<ctrl_t, Group::kWidth> group;
    520  Iota(group.begin(), group.end(), -4);
    521  Group g{group.data()};
    522  for (auto _ : state) {
    523    ::benchmark::DoNotOptimize(g);
    524    ::benchmark::DoNotOptimize(g.MaskNonFull());
    525  }
    526 }
    527 BENCHMARK(BM_Group_MaskNonFull);
    528 
    529 void BM_Group_CountLeadingEmptyOrDeleted(benchmark::State& state) {
    530  std::array<ctrl_t, Group::kWidth> group;
    531  Iota(group.begin(), group.end(), -2);
    532  Group g{group.data()};
    533  for (auto _ : state) {
    534    ::benchmark::DoNotOptimize(g);
    535    ::benchmark::DoNotOptimize(g.CountLeadingEmptyOrDeleted());
    536  }
    537 }
    538 BENCHMARK(BM_Group_CountLeadingEmptyOrDeleted);
    539 
    540 void BM_Group_MatchFirstEmptyOrDeleted(benchmark::State& state) {
    541  std::array<ctrl_t, Group::kWidth> group;
    542  Iota(group.begin(), group.end(), -2);
    543  Group g{group.data()};
    544  for (auto _ : state) {
    545    ::benchmark::DoNotOptimize(g);
    546    ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted().LowestBitSet());
    547  }
    548 }
    549 BENCHMARK(BM_Group_MatchFirstEmptyOrDeleted);
    550 
    551 void BM_Group_MatchFirstNonFull(benchmark::State& state) {
    552  std::array<ctrl_t, Group::kWidth> group;
    553  Iota(group.begin(), group.end(), -2);
    554  Group g{group.data()};
    555  for (auto _ : state) {
    556    ::benchmark::DoNotOptimize(g);
    557    ::benchmark::DoNotOptimize(g.MaskNonFull().LowestBitSet());
    558  }
    559 }
    560 BENCHMARK(BM_Group_MatchFirstNonFull);
    561 
    562 void BM_DropDeletes(benchmark::State& state) {
    563  constexpr size_t capacity = (1 << 20) - 1;
    564  std::vector<ctrl_t> ctrl(capacity + 1 + Group::kWidth);
    565  ctrl[capacity] = ctrl_t::kSentinel;
    566  std::vector<ctrl_t> pattern = {ctrl_t::kEmpty,   static_cast<ctrl_t>(2),
    567                                 ctrl_t::kDeleted, static_cast<ctrl_t>(2),
    568                                 ctrl_t::kEmpty,   static_cast<ctrl_t>(1),
    569                                 ctrl_t::kDeleted};
    570  for (size_t i = 0; i != capacity; ++i) {
    571    ctrl[i] = pattern[i % pattern.size()];
    572  }
    573  while (state.KeepRunning()) {
    574    state.PauseTiming();
    575    std::vector<ctrl_t> ctrl_copy = ctrl;
    576    state.ResumeTiming();
    577    ConvertDeletedToEmptyAndFullToDeleted(ctrl_copy.data(), capacity);
    578    ::benchmark::DoNotOptimize(ctrl_copy[capacity]);
    579  }
    580 }
    581 BENCHMARK(BM_DropDeletes);
    582 
    583 void BM_Resize(benchmark::State& state) {
    584  // For now just measure a small cheap hash table since we
    585  // are mostly interested in the overhead of type-erasure
    586  // in resize().
    587  constexpr int kElements = 64;
    588  const int kCapacity = kElements * 2;
    589 
    590  IntTable table;
    591  for (int i = 0; i < kElements; i++) {
    592    table.insert(i);
    593  }
    594  for (auto unused : state) {
    595    table.rehash(0);
    596    table.rehash(kCapacity);
    597  }
    598 }
    599 BENCHMARK(BM_Resize);
    600 
    601 void BM_EraseIf(benchmark::State& state) {
    602  int64_t num_elements = state.range(0);
    603  size_t num_erased = static_cast<size_t>(state.range(1));
    604 
    605  constexpr size_t kRepetitions = 64;
    606 
    607  absl::BitGen rng;
    608 
    609  std::vector<std::vector<int64_t>> keys(kRepetitions);
    610  std::vector<IntTable> tables;
    611  std::vector<int64_t> threshold;
    612  for (auto& k : keys) {
    613    tables.push_back(IntTable());
    614    auto& table = tables.back();
    615    for (int64_t i = 0; i < num_elements; i++) {
    616      // We use random keys to reduce noise.
    617      k.push_back(
    618          absl::Uniform<int64_t>(rng, 0, std::numeric_limits<int64_t>::max()));
    619      if (!table.insert(k.back()).second) {
    620        k.pop_back();
    621        --i;  // duplicated value, retrying
    622      }
    623    }
    624    std::sort(k.begin(), k.end());
    625    threshold.push_back(static_cast<int64_t>(num_erased) < num_elements
    626                            ? k[num_erased]
    627                            : std::numeric_limits<int64_t>::max());
    628  }
    629 
    630  while (state.KeepRunningBatch(static_cast<int64_t>(kRepetitions) *
    631                                std::max(num_elements, int64_t{1}))) {
    632    benchmark::DoNotOptimize(tables);
    633    for (size_t t_id = 0; t_id < kRepetitions; t_id++) {
    634      auto& table = tables[t_id];
    635      benchmark::DoNotOptimize(num_erased);
    636      auto pred = [t = threshold[t_id]](int64_t key) { return key < t; };
    637      benchmark::DoNotOptimize(pred);
    638      benchmark::DoNotOptimize(table);
    639      absl::container_internal::EraseIf(pred, &table);
    640    }
    641    state.PauseTiming();
    642    for (size_t t_id = 0; t_id < kRepetitions; t_id++) {
    643      auto& k = keys[t_id];
    644      auto& table = tables[t_id];
    645      for (size_t i = 0; i < num_erased; i++) {
    646        table.insert(k[i]);
    647      }
    648    }
    649    state.ResumeTiming();
    650  }
    651 }
    652 
    653 BENCHMARK(BM_EraseIf)
    654    ->ArgNames({"num_elements", "num_erased"})
    655    ->ArgPair(10, 0)
    656    ->ArgPair(1000, 0)
    657    ->ArgPair(10, 5)
    658    ->ArgPair(1000, 500)
    659    ->ArgPair(10, 10)
    660    ->ArgPair(1000, 1000);
    661 
    662 }  // namespace
    663 }  // namespace container_internal
    664 ABSL_NAMESPACE_END
    665 }  // namespace absl
    666 
    667 // These methods are here to make it easy to examine the assembly for targeted
    668 // parts of the API.
    669 auto CodegenAbslRawHashSetInt64Find(absl::container_internal::IntTable* table,
    670                                    int64_t key) -> decltype(table->find(key)) {
    671  return table->find(key);
    672 }
    673 
    674 bool CodegenAbslRawHashSetInt64FindNeEnd(
    675    absl::container_internal::IntTable* table, int64_t key) {
    676  return table->find(key) != table->end();
    677 }
    678 
    679 // This is useful because the find isn't inlined but the iterator comparison is.
    680 bool CodegenAbslRawHashSetStringFindNeEnd(
    681    absl::container_internal::StringTable* table, const std::string& key) {
    682  return table->find(key) != table->end();
    683 }
    684 
    685 auto CodegenAbslRawHashSetInt64Insert(absl::container_internal::IntTable* table,
    686                                      int64_t key)
    687    -> decltype(table->insert(key)) {
    688  return table->insert(key);
    689 }
    690 
    691 bool CodegenAbslRawHashSetInt64Contains(
    692    absl::container_internal::IntTable* table, int64_t key) {
    693  return table->contains(key);
    694 }
    695 
    696 void CodegenAbslRawHashSetInt64Iterate(
    697    absl::container_internal::IntTable* table) {
    698  for (auto x : *table) benchmark::DoNotOptimize(x);
    699 }
    700 
    701 int odr =
    702    (::benchmark::DoNotOptimize(std::make_tuple(
    703         &CodegenAbslRawHashSetInt64Find, &CodegenAbslRawHashSetInt64FindNeEnd,
    704         &CodegenAbslRawHashSetStringFindNeEnd,
    705         &CodegenAbslRawHashSetInt64Insert, &CodegenAbslRawHashSetInt64Contains,
    706         &CodegenAbslRawHashSetInt64Iterate)),
    707     1);