tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

raw_hash_set_test.cc (134766B)


      1 // Copyright 2018 The Abseil Authors.
      2 //
      3 // Licensed under the Apache License, Version 2.0 (the "License");
      4 // you may not use this file except in compliance with the License.
      5 // You may obtain a copy of the License at
      6 //
      7 //      https://www.apache.org/licenses/LICENSE-2.0
      8 //
      9 // Unless required by applicable law or agreed to in writing, software
     10 // distributed under the License is distributed on an "AS IS" BASIS,
     11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 // See the License for the specific language governing permissions and
     13 // limitations under the License.
     14 
     15 #include "absl/container/internal/raw_hash_set.h"
     16 
     17 #include <sys/types.h>
     18 
     19 #include <algorithm>
     20 #include <array>
     21 #include <atomic>
     22 #include <bitset>
     23 #include <cmath>
     24 #include <cstddef>
     25 #include <cstdint>
     26 #include <cstring>
     27 #include <deque>
     28 #include <functional>
     29 #include <iostream>
     30 #include <iterator>
     31 #include <limits>
     32 #include <list>
     33 #include <map>
     34 #include <memory>
     35 #include <numeric>
     36 #include <ostream>
     37 #include <random>
     38 #include <set>
     39 #include <string>
     40 #include <tuple>
     41 #include <type_traits>
     42 #include <unordered_map>
     43 #include <unordered_set>
     44 #include <utility>
     45 #include <vector>
     46 
     47 #include "gmock/gmock.h"
     48 #include "gtest/gtest.h"
     49 #include "absl/base/attributes.h"
     50 #include "absl/base/config.h"
     51 #include "absl/base/internal/cycleclock.h"
     52 #include "absl/base/prefetch.h"
     53 #include "absl/container/flat_hash_map.h"
     54 #include "absl/container/flat_hash_set.h"
     55 #include "absl/container/internal/container_memory.h"
     56 #include "absl/container/internal/hash_function_defaults.h"
     57 #include "absl/container/internal/hash_policy_testing.h"
     58 // TODO(b/382423690): Separate tests that depend only on
     59 // hashtable_control_bytes.
     60 #include "absl/container/internal/hashtable_control_bytes.h"
     61 #include "absl/container/internal/hashtable_debug.h"
     62 #include "absl/container/internal/hashtablez_sampler.h"
     63 #include "absl/container/internal/test_allocator.h"
     64 #include "absl/container/internal/test_instance_tracker.h"
     65 #include "absl/container/node_hash_set.h"
     66 #include "absl/functional/function_ref.h"
     67 #include "absl/hash/hash.h"
     68 #include "absl/log/check.h"
     69 #include "absl/log/log.h"
     70 #include "absl/memory/memory.h"
     71 #include "absl/meta/type_traits.h"
     72 #include "absl/numeric/int128.h"
     73 #include "absl/strings/str_cat.h"
     74 #include "absl/strings/string_view.h"
     75 #include "absl/types/optional.h"
     76 
     77 namespace absl {
     78 ABSL_NAMESPACE_BEGIN
     79 namespace container_internal {
     80 
     81 struct RawHashSetTestOnlyAccess {
     82  template <typename C>
     83  static auto GetCommon(const C& c) -> decltype(c.common()) {
     84    return c.common();
     85  }
     86  template <typename C>
     87  static auto GetSlots(const C& c) -> decltype(c.slot_array()) {
     88    return c.slot_array();
     89  }
     90  template <typename C>
     91  static size_t CountTombstones(const C& c) {
     92    return c.common().TombstonesCount();
     93  }
     94 };
     95 
     96 namespace {
     97 
     98 using ::testing::ElementsAre;
     99 using ::testing::ElementsAreArray;
    100 using ::testing::Eq;
    101 using ::testing::Ge;
    102 using ::testing::Lt;
    103 using ::testing::Pair;
    104 using ::testing::UnorderedElementsAre;
    105 using ::testing::UnorderedElementsAreArray;
    106 
    107 // Convenience function to static cast to ctrl_t.
    108 ctrl_t CtrlT(int i) { return static_cast<ctrl_t>(i); }
    109 
    110 // Enables sampling with 1 percent sampling rate and
    111 // resets the rate counter for the current thread.
    112 void SetSamplingRateTo1Percent() {
    113  SetHashtablezEnabled(true);
    114  SetHashtablezSampleParameter(100);  // Sample ~1% of tables.
    115  // Reset rate counter for the current thread.
    116  TestOnlyRefreshSamplingStateForCurrentThread();
    117 }
    118 
    119 // Disables sampling and resets the rate counter for the current thread.
    120 void DisableSampling() {
    121  SetHashtablezEnabled(false);
    122  SetHashtablezSampleParameter(1 << 16);
    123  // Reset rate counter for the current thread.
    124  TestOnlyRefreshSamplingStateForCurrentThread();
    125 }
    126 
    127 TEST(GrowthInfoTest, GetGrowthLeft) {
    128  GrowthInfo gi;
    129  gi.InitGrowthLeftNoDeleted(5);
    130  EXPECT_EQ(gi.GetGrowthLeft(), 5);
    131  gi.OverwriteFullAsDeleted();
    132  EXPECT_EQ(gi.GetGrowthLeft(), 5);
    133 }
    134 
    135 TEST(GrowthInfoTest, HasNoDeleted) {
    136  GrowthInfo gi;
    137  gi.InitGrowthLeftNoDeleted(5);
    138  EXPECT_TRUE(gi.HasNoDeleted());
    139  gi.OverwriteFullAsDeleted();
    140  EXPECT_FALSE(gi.HasNoDeleted());
    141  // After reinitialization we have no deleted slots.
    142  gi.InitGrowthLeftNoDeleted(5);
    143  EXPECT_TRUE(gi.HasNoDeleted());
    144 }
    145 
    146 TEST(GrowthInfoTest, HasNoDeletedAndGrowthLeft) {
    147  GrowthInfo gi;
    148  gi.InitGrowthLeftNoDeleted(5);
    149  EXPECT_TRUE(gi.HasNoDeletedAndGrowthLeft());
    150  gi.OverwriteFullAsDeleted();
    151  EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
    152  gi.InitGrowthLeftNoDeleted(0);
    153  EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
    154  gi.OverwriteFullAsDeleted();
    155  EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
    156  // After reinitialization we have no deleted slots.
    157  gi.InitGrowthLeftNoDeleted(5);
    158  EXPECT_TRUE(gi.HasNoDeletedAndGrowthLeft());
    159 }
    160 
    161 TEST(GrowthInfoTest, HasNoGrowthLeftAndNoDeleted) {
    162  GrowthInfo gi;
    163  gi.InitGrowthLeftNoDeleted(1);
    164  EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
    165  gi.OverwriteEmptyAsFull();
    166  EXPECT_TRUE(gi.HasNoGrowthLeftAndNoDeleted());
    167  gi.OverwriteFullAsDeleted();
    168  EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
    169  gi.OverwriteFullAsEmpty();
    170  EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
    171  gi.InitGrowthLeftNoDeleted(0);
    172  EXPECT_TRUE(gi.HasNoGrowthLeftAndNoDeleted());
    173  gi.OverwriteFullAsEmpty();
    174  EXPECT_FALSE(gi.HasNoGrowthLeftAndNoDeleted());
    175 }
    176 
    177 TEST(GrowthInfoTest, OverwriteFullAsEmpty) {
    178  GrowthInfo gi;
    179  gi.InitGrowthLeftNoDeleted(5);
    180  gi.OverwriteFullAsEmpty();
    181  EXPECT_EQ(gi.GetGrowthLeft(), 6);
    182  gi.OverwriteFullAsDeleted();
    183  EXPECT_EQ(gi.GetGrowthLeft(), 6);
    184  gi.OverwriteFullAsEmpty();
    185  EXPECT_EQ(gi.GetGrowthLeft(), 7);
    186  EXPECT_FALSE(gi.HasNoDeleted());
    187 }
    188 
    189 TEST(GrowthInfoTest, OverwriteEmptyAsFull) {
    190  GrowthInfo gi;
    191  gi.InitGrowthLeftNoDeleted(5);
    192  gi.OverwriteEmptyAsFull();
    193  EXPECT_EQ(gi.GetGrowthLeft(), 4);
    194  gi.OverwriteFullAsDeleted();
    195  EXPECT_EQ(gi.GetGrowthLeft(), 4);
    196  gi.OverwriteEmptyAsFull();
    197  EXPECT_EQ(gi.GetGrowthLeft(), 3);
    198  EXPECT_FALSE(gi.HasNoDeleted());
    199 }
    200 
    201 TEST(GrowthInfoTest, OverwriteControlAsFull) {
    202  GrowthInfo gi;
    203  gi.InitGrowthLeftNoDeleted(5);
    204  gi.OverwriteControlAsFull(ctrl_t::kEmpty);
    205  EXPECT_EQ(gi.GetGrowthLeft(), 4);
    206  gi.OverwriteControlAsFull(ctrl_t::kDeleted);
    207  EXPECT_EQ(gi.GetGrowthLeft(), 4);
    208  gi.OverwriteFullAsDeleted();
    209  gi.OverwriteControlAsFull(ctrl_t::kDeleted);
    210  // We do not count number of deleted, so the bit sticks till the next rehash.
    211  EXPECT_FALSE(gi.HasNoDeletedAndGrowthLeft());
    212  EXPECT_FALSE(gi.HasNoDeleted());
    213 }
    214 
    215 TEST(GrowthInfoTest, HasNoGrowthLeftAssumingMayHaveDeleted) {
    216  GrowthInfo gi;
    217  gi.InitGrowthLeftNoDeleted(1);
    218  gi.OverwriteFullAsDeleted();
    219  EXPECT_EQ(gi.GetGrowthLeft(), 1);
    220  EXPECT_FALSE(gi.HasNoGrowthLeftAssumingMayHaveDeleted());
    221  gi.OverwriteControlAsFull(ctrl_t::kDeleted);
    222  EXPECT_EQ(gi.GetGrowthLeft(), 1);
    223  EXPECT_FALSE(gi.HasNoGrowthLeftAssumingMayHaveDeleted());
    224  gi.OverwriteFullAsEmpty();
    225  EXPECT_EQ(gi.GetGrowthLeft(), 2);
    226  EXPECT_FALSE(gi.HasNoGrowthLeftAssumingMayHaveDeleted());
    227  gi.OverwriteEmptyAsFull();
    228  EXPECT_EQ(gi.GetGrowthLeft(), 1);
    229  EXPECT_FALSE(gi.HasNoGrowthLeftAssumingMayHaveDeleted());
    230  gi.OverwriteEmptyAsFull();
    231  EXPECT_EQ(gi.GetGrowthLeft(), 0);
    232  EXPECT_TRUE(gi.HasNoGrowthLeftAssumingMayHaveDeleted());
    233 }
    234 
    235 TEST(Util, OptimalMemcpySizeForSooSlotTransfer) {
    236  EXPECT_EQ(1, OptimalMemcpySizeForSooSlotTransfer(1));
    237  ASSERT_EQ(4, OptimalMemcpySizeForSooSlotTransfer(2));
    238  ASSERT_EQ(4, OptimalMemcpySizeForSooSlotTransfer(3));
    239  for (size_t slot_size = 4; slot_size <= 8; ++slot_size) {
    240    ASSERT_EQ(8, OptimalMemcpySizeForSooSlotTransfer(slot_size));
    241  }
    242  // If maximum amount of memory is 16, then we can copy up to 16 bytes.
    243  for (size_t slot_size = 9; slot_size <= 16; ++slot_size) {
    244    ASSERT_EQ(16,
    245              OptimalMemcpySizeForSooSlotTransfer(slot_size,
    246                                                  /*max_soo_slot_size=*/16));
    247    ASSERT_EQ(16,
    248              OptimalMemcpySizeForSooSlotTransfer(slot_size,
    249                                                  /*max_soo_slot_size=*/24));
    250  }
    251  // But we shouldn't try to copy more than maximum amount of memory.
    252  for (size_t slot_size = 9; slot_size <= 12; ++slot_size) {
    253    ASSERT_EQ(12, OptimalMemcpySizeForSooSlotTransfer(
    254                      slot_size, /*max_soo_slot_size=*/12));
    255  }
    256  for (size_t slot_size = 17; slot_size <= 24; ++slot_size) {
    257    ASSERT_EQ(24,
    258              OptimalMemcpySizeForSooSlotTransfer(slot_size,
    259                                                  /*max_soo_slot_size=*/24));
    260  }
    261  // We shouldn't copy more than maximum.
    262  for (size_t slot_size = 17; slot_size <= 20; ++slot_size) {
    263    ASSERT_EQ(20,
    264              OptimalMemcpySizeForSooSlotTransfer(slot_size,
    265                                                  /*max_soo_slot_size=*/20));
    266  }
    267 }
    268 
    269 TEST(Util, NormalizeCapacity) {
    270  EXPECT_EQ(1, NormalizeCapacity(0));
    271  EXPECT_EQ(1, NormalizeCapacity(1));
    272  EXPECT_EQ(3, NormalizeCapacity(2));
    273  EXPECT_EQ(3, NormalizeCapacity(3));
    274  EXPECT_EQ(7, NormalizeCapacity(4));
    275  EXPECT_EQ(7, NormalizeCapacity(7));
    276  EXPECT_EQ(15, NormalizeCapacity(8));
    277  EXPECT_EQ(15, NormalizeCapacity(15));
    278  EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1));
    279  EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2));
    280 }
    281 
    282 TEST(Util, GrowthAndCapacity) {
    283  // Verify that GrowthToCapacity gives the minimum capacity that has enough
    284  // growth.
    285  for (size_t growth = 0; growth < 10000; ++growth) {
    286    SCOPED_TRACE(growth);
    287    size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
    288    // The capacity is large enough for `growth`.
    289    EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
    290    // For (capacity+1) < kWidth, growth should equal capacity.
    291    if (capacity + 1 < Group::kWidth) {
    292      EXPECT_THAT(CapacityToGrowth(capacity), Eq(capacity));
    293    } else {
    294      EXPECT_THAT(CapacityToGrowth(capacity), Lt(capacity));
    295    }
    296    if (growth != 0 && capacity > 1) {
    297      // There is no smaller capacity that works.
    298      EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
    299    }
    300  }
    301 
    302  for (size_t capacity = Group::kWidth - 1; capacity < 10000;
    303       capacity = 2 * capacity + 1) {
    304    SCOPED_TRACE(capacity);
    305    size_t growth = CapacityToGrowth(capacity);
    306    EXPECT_THAT(growth, Lt(capacity));
    307    EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity);
    308    EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity);
    309  }
    310 }
    311 
    312 TEST(Util, probe_seq) {
    313  probe_seq<16> seq(0, 127);
    314  auto gen = [&]() {
    315    size_t res = seq.offset();
    316    seq.next();
    317    return res;
    318  };
    319  std::vector<size_t> offsets(8);
    320  std::generate_n(offsets.begin(), 8, gen);
    321  EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
    322  seq = probe_seq<16>(128, 127);
    323  std::generate_n(offsets.begin(), 8, gen);
    324  EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
    325 }
    326 
    327 TEST(BitMask, Smoke) {
    328  EXPECT_FALSE((BitMask<uint8_t, 8>(0)));
    329  EXPECT_TRUE((BitMask<uint8_t, 8>(5)));
    330 
    331  EXPECT_THAT((BitMask<uint8_t, 8>(0)), ElementsAre());
    332  EXPECT_THAT((BitMask<uint8_t, 8>(0x1)), ElementsAre(0));
    333  EXPECT_THAT((BitMask<uint8_t, 8>(0x2)), ElementsAre(1));
    334  EXPECT_THAT((BitMask<uint8_t, 8>(0x3)), ElementsAre(0, 1));
    335  EXPECT_THAT((BitMask<uint8_t, 8>(0x4)), ElementsAre(2));
    336  EXPECT_THAT((BitMask<uint8_t, 8>(0x5)), ElementsAre(0, 2));
    337  EXPECT_THAT((BitMask<uint8_t, 8>(0x55)), ElementsAre(0, 2, 4, 6));
    338  EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7));
    339 }
    340 
    341 TEST(BitMask, WithShift_MatchPortable) {
    342  // See the non-SSE version of Group for details on what this math is for.
    343  uint64_t ctrl = 0x1716151413121110;
    344  uint64_t hash = 0x12;
    345  constexpr uint64_t lsbs = 0x0101010101010101ULL;
    346  auto x = ctrl ^ (lsbs * hash);
    347  uint64_t mask = (x - lsbs) & ~x & kMsbs8Bytes;
    348  EXPECT_EQ(0x0000000080800000, mask);
    349 
    350  BitMask<uint64_t, 8, 3> b(mask);
    351  EXPECT_EQ(*b, 2);
    352 }
    353 
    354 constexpr uint64_t kSome8BytesMask = /*  */ 0x8000808080008000ULL;
    355 constexpr uint64_t kSome8BytesMaskAllOnes = 0xff00ffffff00ff00ULL;
    356 constexpr auto kSome8BytesMaskBits = std::array<int, 5>{1, 3, 4, 5, 7};
    357 
    358 
    359 TEST(BitMask, WithShift_FullMask) {
    360  EXPECT_THAT((BitMask<uint64_t, 8, 3>(kMsbs8Bytes)),
    361              ElementsAre(0, 1, 2, 3, 4, 5, 6, 7));
    362  EXPECT_THAT(
    363      (BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(kMsbs8Bytes)),
    364      ElementsAre(0, 1, 2, 3, 4, 5, 6, 7));
    365  EXPECT_THAT(
    366      (BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(~uint64_t{0})),
    367      ElementsAre(0, 1, 2, 3, 4, 5, 6, 7));
    368 }
    369 
    370 TEST(BitMask, WithShift_EmptyMask) {
    371  EXPECT_THAT((BitMask<uint64_t, 8, 3>(0)), ElementsAre());
    372  EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(0)),
    373              ElementsAre());
    374 }
    375 
    376 TEST(BitMask, WithShift_SomeMask) {
    377  EXPECT_THAT((BitMask<uint64_t, 8, 3>(kSome8BytesMask)),
    378              ElementsAreArray(kSome8BytesMaskBits));
    379  EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(
    380                  kSome8BytesMask)),
    381              ElementsAreArray(kSome8BytesMaskBits));
    382  EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(
    383                  kSome8BytesMaskAllOnes)),
    384              ElementsAreArray(kSome8BytesMaskBits));
    385 }
    386 
    387 TEST(BitMask, WithShift_SomeMaskExtraBitsForNullify) {
    388  // Verify that adding extra bits into non zero bytes is fine.
    389  uint64_t extra_bits = 77;
    390  for (int i = 0; i < 100; ++i) {
    391    // Add extra bits, but keep zero bytes untouched.
    392    uint64_t extra_mask = extra_bits & kSome8BytesMaskAllOnes;
    393    EXPECT_THAT((BitMask<uint64_t, 8, 3, /*NullifyBitsOnIteration=*/true>(
    394                    kSome8BytesMask | extra_mask)),
    395                ElementsAreArray(kSome8BytesMaskBits))
    396        << i << " " << extra_mask;
    397    extra_bits = (extra_bits + 1) * 3;
    398  }
    399 }
    400 
    401 TEST(BitMask, LeadingTrailing) {
    402  EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
    403  EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
    404 
    405  EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15);
    406  EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0);
    407 
    408  EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0);
    409  EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15);
    410 
    411  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
    412  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
    413 
    414  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).LeadingZeros()), 7);
    415  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).TrailingZeros()), 0);
    416 
    417  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).LeadingZeros()), 0);
    418  EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7);
    419 }
    420 
    421 TEST(Group, EmptyGroup) {
    422  for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h));
    423 }
    424 
    425 TEST(Group, Match) {
    426  if (Group::kWidth == 16) {
    427    ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted,  CtrlT(3),
    428                      ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
    429                      CtrlT(7),       CtrlT(5), CtrlT(3),          CtrlT(1),
    430                      CtrlT(1),       CtrlT(1), CtrlT(1),          CtrlT(1)};
    431    EXPECT_THAT(Group{group}.Match(0), ElementsAre());
    432    EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
    433    EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
    434    EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
    435    EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
    436  } else if (Group::kWidth == 8) {
    437    ctrl_t group[] = {ctrl_t::kEmpty,    CtrlT(1), CtrlT(2),
    438                      ctrl_t::kDeleted,  CtrlT(2), CtrlT(1),
    439                      ctrl_t::kSentinel, CtrlT(1)};
    440    EXPECT_THAT(Group{group}.Match(0), ElementsAre());
    441    EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
    442    EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
    443  } else {
    444    FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
    445  }
    446 }
    447 
    448 TEST(Group, MaskEmpty) {
    449  if (Group::kWidth == 16) {
    450    ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted,  CtrlT(3),
    451                      ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
    452                      CtrlT(7),       CtrlT(5), CtrlT(3),          CtrlT(1),
    453                      CtrlT(1),       CtrlT(1), CtrlT(1),          CtrlT(1)};
    454    EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
    455    EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 4);
    456  } else if (Group::kWidth == 8) {
    457    ctrl_t group[] = {ctrl_t::kEmpty,    CtrlT(1), CtrlT(2),
    458                      ctrl_t::kDeleted,  CtrlT(2), CtrlT(1),
    459                      ctrl_t::kSentinel, CtrlT(1)};
    460    EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
    461    EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 0);
    462  } else {
    463    FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
    464  }
    465 }
    466 
    467 TEST(Group, MaskFull) {
    468  if (Group::kWidth == 16) {
    469    ctrl_t group[] = {
    470        ctrl_t::kEmpty, CtrlT(1),          ctrl_t::kDeleted,  CtrlT(3),
    471        ctrl_t::kEmpty, CtrlT(5),          ctrl_t::kSentinel, CtrlT(7),
    472        CtrlT(7),       CtrlT(5),          ctrl_t::kDeleted,  CtrlT(1),
    473        CtrlT(1),       ctrl_t::kSentinel, ctrl_t::kEmpty,    CtrlT(1)};
    474    EXPECT_THAT(Group{group}.MaskFull(),
    475                ElementsAre(1, 3, 5, 7, 8, 9, 11, 12, 15));
    476  } else if (Group::kWidth == 8) {
    477    ctrl_t group[] = {ctrl_t::kEmpty,    CtrlT(1), ctrl_t::kEmpty,
    478                      ctrl_t::kDeleted,  CtrlT(2), ctrl_t::kSentinel,
    479                      ctrl_t::kSentinel, CtrlT(1)};
    480    EXPECT_THAT(Group{group}.MaskFull(), ElementsAre(1, 4, 7));
    481  } else {
    482    FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
    483  }
    484 }
    485 
    486 TEST(Group, MaskNonFull) {
    487  if (Group::kWidth == 16) {
    488    ctrl_t group[] = {
    489        ctrl_t::kEmpty, CtrlT(1),          ctrl_t::kDeleted,  CtrlT(3),
    490        ctrl_t::kEmpty, CtrlT(5),          ctrl_t::kSentinel, CtrlT(7),
    491        CtrlT(7),       CtrlT(5),          ctrl_t::kDeleted,  CtrlT(1),
    492        CtrlT(1),       ctrl_t::kSentinel, ctrl_t::kEmpty,    CtrlT(1)};
    493    EXPECT_THAT(Group{group}.MaskNonFull(),
    494                ElementsAre(0, 2, 4, 6, 10, 13, 14));
    495  } else if (Group::kWidth == 8) {
    496    ctrl_t group[] = {ctrl_t::kEmpty,    CtrlT(1), ctrl_t::kEmpty,
    497                      ctrl_t::kDeleted,  CtrlT(2), ctrl_t::kSentinel,
    498                      ctrl_t::kSentinel, CtrlT(1)};
    499    EXPECT_THAT(Group{group}.MaskNonFull(), ElementsAre(0, 2, 3, 5, 6));
    500  } else {
    501    FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
    502  }
    503 }
    504 
    505 TEST(Group, MaskEmptyOrDeleted) {
    506  if (Group::kWidth == 16) {
    507    ctrl_t group[] = {ctrl_t::kEmpty,   CtrlT(1), ctrl_t::kEmpty,    CtrlT(3),
    508                      ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
    509                      CtrlT(7),         CtrlT(5), CtrlT(3),          CtrlT(1),
    510                      CtrlT(1),         CtrlT(1), CtrlT(1),          CtrlT(1)};
    511    EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
    512    EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 4);
    513  } else if (Group::kWidth == 8) {
    514    ctrl_t group[] = {ctrl_t::kEmpty,    CtrlT(1), CtrlT(2),
    515                      ctrl_t::kDeleted,  CtrlT(2), CtrlT(1),
    516                      ctrl_t::kSentinel, CtrlT(1)};
    517    EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
    518    EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 3);
    519  } else {
    520    FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
    521  }
    522 }
    523 
    524 TEST(Batch, DropDeletes) {
    525  constexpr size_t kCapacity = 63;
    526  constexpr size_t kGroupWidth = container_internal::Group::kWidth;
    527  std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
    528  ctrl[kCapacity] = ctrl_t::kSentinel;
    529  std::vector<ctrl_t> pattern = {
    530      ctrl_t::kEmpty, CtrlT(2), ctrl_t::kDeleted, CtrlT(2),
    531      ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted};
    532  for (size_t i = 0; i != kCapacity; ++i) {
    533    ctrl[i] = pattern[i % pattern.size()];
    534    if (i < kGroupWidth - 1)
    535      ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
    536  }
    537  ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
    538  ASSERT_EQ(ctrl[kCapacity], ctrl_t::kSentinel);
    539  for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) {
    540    ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
    541    if (i == kCapacity) expected = ctrl_t::kSentinel;
    542    if (expected == ctrl_t::kDeleted) expected = ctrl_t::kEmpty;
    543    if (IsFull(expected)) expected = ctrl_t::kDeleted;
    544    EXPECT_EQ(ctrl[i], expected)
    545        << i << " " << static_cast<int>(pattern[i % pattern.size()]);
    546  }
    547 }
    548 
    549 TEST(Group, CountLeadingEmptyOrDeleted) {
    550  const std::vector<ctrl_t> empty_examples = {ctrl_t::kEmpty, ctrl_t::kDeleted};
    551  const std::vector<ctrl_t> full_examples = {
    552      CtrlT(0), CtrlT(1), CtrlT(2),   CtrlT(3),
    553      CtrlT(5), CtrlT(9), CtrlT(127), ctrl_t::kSentinel};
    554 
    555  for (ctrl_t empty : empty_examples) {
    556    std::vector<ctrl_t> e(Group::kWidth, empty);
    557    EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted());
    558    for (ctrl_t full : full_examples) {
    559      for (size_t i = 0; i != Group::kWidth; ++i) {
    560        std::vector<ctrl_t> f(Group::kWidth, empty);
    561        f[i] = full;
    562        EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted());
    563      }
    564      std::vector<ctrl_t> f(Group::kWidth, empty);
    565      f[Group::kWidth * 2 / 3] = full;
    566      f[Group::kWidth / 2] = full;
    567      EXPECT_EQ(Group::kWidth / 2,
    568                Group{f.data()}.CountLeadingEmptyOrDeleted());
    569    }
    570  }
    571 }
    572 
    573 template <class T, bool kTransferable = false, bool kSoo = false>
    574 struct ValuePolicy {
    575  using slot_type = T;
    576  using key_type = T;
    577  using init_type = T;
    578 
    579  template <class Allocator, class... Args>
    580  static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
    581    absl::allocator_traits<Allocator>::construct(*alloc, slot,
    582                                                 std::forward<Args>(args)...);
    583  }
    584 
    585  template <class Allocator>
    586  static void destroy(Allocator* alloc, slot_type* slot) {
    587    absl::allocator_traits<Allocator>::destroy(*alloc, slot);
    588  }
    589 
    590  template <class Allocator>
    591  static std::integral_constant<bool, kTransferable> transfer(
    592      Allocator* alloc, slot_type* new_slot, slot_type* old_slot) {
    593    construct(alloc, new_slot, std::move(*old_slot));
    594    destroy(alloc, old_slot);
    595    return {};
    596  }
    597 
    598  static T& element(slot_type* slot) { return *slot; }
    599 
    600  template <class F, class... Args>
    601  static decltype(absl::container_internal::DecomposeValue(
    602      std::declval<F>(), std::declval<Args>()...))
    603  apply(F&& f, Args&&... args) {
    604    return absl::container_internal::DecomposeValue(
    605        std::forward<F>(f), std::forward<Args>(args)...);
    606  }
    607 
    608  template <class Hash>
    609  static constexpr HashSlotFn get_hash_slot_fn() {
    610    return nullptr;
    611  }
    612 
    613  static constexpr bool soo_enabled() { return kSoo; }
    614 };
    615 
    616 using IntPolicy = ValuePolicy<int64_t>;
    617 using Uint8Policy = ValuePolicy<uint8_t>;
    618 
    619 // For testing SOO.
    620 template <int N>
    621 class SizedValue {
    622 public:
    623  SizedValue(int64_t v) {  // NOLINT
    624    vals_[0] = v;
    625  }
    626  SizedValue() : SizedValue(0) {}
    627  SizedValue(const SizedValue&) = default;
    628  SizedValue& operator=(const SizedValue&) = default;
    629 
    630  int64_t operator*() const {
    631    // Suppress erroneous uninitialized memory errors on GCC.
    632 #if !defined(__clang__) && defined(__GNUC__)
    633 #pragma GCC diagnostic push
    634 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
    635 #endif
    636    return vals_[0];
    637 #if !defined(__clang__) && defined(__GNUC__)
    638 #pragma GCC diagnostic pop
    639 #endif
    640  }
    641  explicit operator int() const { return **this; }
    642  explicit operator int64_t() const { return **this; }
    643 
    644  template <typename H>
    645  friend H AbslHashValue(H h, SizedValue sv) {
    646    return H::combine(std::move(h), *sv);
    647  }
    648  bool operator==(const SizedValue& rhs) const { return **this == *rhs; }
    649 
    650 private:
    651  int64_t vals_[N / sizeof(int64_t)];
    652 };
    653 template <int N, bool kSoo>
    654 using SizedValuePolicy =
    655    ValuePolicy<SizedValue<N>, /*kTransferable=*/true, kSoo>;
    656 
    657 // Value is aligned as type T and contains N copies of it.
    658 template <typename T, int N>
    659 class AlignedValue {
    660 public:
    661  AlignedValue(int64_t v) {  // NOLINT
    662    for (int i = 0; i < N; ++i) {
    663      vals_[i] = v;
    664      if (sizeof(T) < sizeof(int64_t)) {
    665        v >>= (8 * sizeof(T));
    666      } else {
    667        v = 0;
    668      }
    669    }
    670  }
    671  AlignedValue() : AlignedValue(0) {}
    672  AlignedValue(const AlignedValue&) = default;
    673  AlignedValue& operator=(const AlignedValue&) = default;
    674 
    675  int64_t operator*() const {
    676    if (sizeof(T) == sizeof(int64_t)) {
    677      return vals_[0];
    678    }
    679    int64_t result = 0;
    680    for (int i = N - 1; i >= 0; --i) {
    681      result <<= (8 * sizeof(T));
    682      result += vals_[i];
    683    }
    684    return result;
    685  }
    686  explicit operator int() const { return **this; }
    687  explicit operator int64_t() const { return **this; }
    688 
    689  template <typename H>
    690  friend H AbslHashValue(H h, AlignedValue sv) {
    691    return H::combine(std::move(h), *sv);
    692  }
    693  bool operator==(const AlignedValue& rhs) const { return **this == *rhs; }
    694 
    695 private:
    696  T vals_[N];
    697 };
    698 
    699 class StringPolicy {
    700  template <class F, class K, class V,
    701            class = typename std::enable_if<
    702                std::is_convertible<const K&, absl::string_view>::value>::type>
    703  decltype(std::declval<F>()(
    704      std::declval<const absl::string_view&>(), std::piecewise_construct,
    705      std::declval<std::tuple<K>>(),
    706      std::declval<V>())) static apply_impl(F&& f,
    707                                            std::pair<std::tuple<K>, V> p) {
    708    const absl::string_view& key = std::get<0>(p.first);
    709    return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
    710                              std::move(p.second));
    711  }
    712 
    713 public:
    714  struct slot_type {
    715    struct ctor {};
    716 
    717    template <class... Ts>
    718    explicit slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
    719 
    720    std::pair<std::string, std::string> pair;
    721  };
    722 
    723  using key_type = std::string;
    724  using init_type = std::pair<std::string, std::string>;
    725 
    726  template <class allocator_type, class... Args>
    727  static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
    728    std::allocator_traits<allocator_type>::construct(
    729        *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
    730  }
    731 
    732  template <class allocator_type>
    733  static void destroy(allocator_type* alloc, slot_type* slot) {
    734    std::allocator_traits<allocator_type>::destroy(*alloc, slot);
    735  }
    736 
    737  template <class allocator_type>
    738  static void transfer(allocator_type* alloc, slot_type* new_slot,
    739                       slot_type* old_slot) {
    740    construct(alloc, new_slot, std::move(old_slot->pair));
    741    destroy(alloc, old_slot);
    742  }
    743 
    744  static std::pair<std::string, std::string>& element(slot_type* slot) {
    745    return slot->pair;
    746  }
    747 
    748  template <class F, class... Args>
    749  static auto apply(F&& f, Args&&... args)
    750      -> decltype(apply_impl(std::forward<F>(f),
    751                             PairArgs(std::forward<Args>(args)...))) {
    752    return apply_impl(std::forward<F>(f),
    753                      PairArgs(std::forward<Args>(args)...));
    754  }
    755 
    756  template <class Hash>
    757  static constexpr HashSlotFn get_hash_slot_fn() {
    758    return nullptr;
    759  }
    760 };
    761 
    762 struct StringHash : absl::Hash<absl::string_view> {
    763  using is_transparent = void;
    764 };
    765 struct StringEq : std::equal_to<absl::string_view> {
    766  using is_transparent = void;
    767 };
    768 
    769 struct StringTable
    770    : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
    771  using Base = typename StringTable::raw_hash_set;
    772  StringTable() = default;
    773  using Base::Base;
    774 };
    775 
    776 template <typename T, bool kTransferable = false, bool kSoo = false,
    777          class Alloc = std::allocator<T>>
    778 struct ValueTable
    779    : raw_hash_set<ValuePolicy<T, kTransferable, kSoo>, hash_default_hash<T>,
    780                   std::equal_to<T>, Alloc> {
    781  using Base = typename ValueTable::raw_hash_set;
    782  using Base::Base;
    783 };
    784 
    785 using IntTable = ValueTable<int64_t>;
    786 using Uint8Table = ValueTable<uint8_t>;
    787 
    788 using TransferableIntTable = ValueTable<int64_t, /*kTransferable=*/true>;
    789 
    790 template <typename T>
    791 struct CustomAlloc : std::allocator<T> {
    792  CustomAlloc() = default;
    793 
    794  template <typename U>
    795  explicit CustomAlloc(const CustomAlloc<U>& /*other*/) {}
    796 
    797  template <class U>
    798  struct rebind {
    799    using other = CustomAlloc<U>;
    800  };
    801 };
    802 
    803 struct CustomAllocIntTable
    804    : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
    805                   std::equal_to<int64_t>, CustomAlloc<int64_t>> {
    806  using Base = typename CustomAllocIntTable::raw_hash_set;
    807  using Base::Base;
    808 };
    809 
    810 template <typename T>
    811 struct ChangingSizeAndTrackingTypeAlloc : std::allocator<T> {
    812  ChangingSizeAndTrackingTypeAlloc() = default;
    813 
    814  template <typename U>
    815  explicit ChangingSizeAndTrackingTypeAlloc(
    816      const ChangingSizeAndTrackingTypeAlloc<U>& other) {
    817    EXPECT_EQ(other.type_id,
    818              ChangingSizeAndTrackingTypeAlloc<U>::ComputeTypeId());
    819  }
    820 
    821  template <class U>
    822  struct rebind {
    823    using other = ChangingSizeAndTrackingTypeAlloc<U>;
    824  };
    825 
    826  T* allocate(size_t n) {
    827    EXPECT_EQ(type_id, ComputeTypeId());
    828    return std::allocator<T>::allocate(n);
    829  }
    830 
    831  void deallocate(T* p, std::size_t n) {
    832    EXPECT_EQ(type_id, ComputeTypeId());
    833    return std::allocator<T>::deallocate(p, n);
    834  }
    835 
    836  static size_t ComputeTypeId() { return absl::HashOf(typeid(T).name()); }
    837 
    838  // We add extra data to make the allocator size being changed.
    839  // This also make type_id positioned differently, so that assertions in the
    840  // allocator can catch bugs more likely.
    841  char data_before[sizeof(T) * 3] = {0};
    842  size_t type_id = ComputeTypeId();
    843  char data_after[sizeof(T) * 5] = {0};
    844 };
    845 
    846 struct ChangingSizeAllocIntTable
    847    : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
    848                   std::equal_to<int64_t>,
    849                   ChangingSizeAndTrackingTypeAlloc<int64_t>> {
    850  using Base = typename ChangingSizeAllocIntTable::raw_hash_set;
    851  using Base::Base;
    852 };
    853 
    854 struct MinimumAlignmentUint8Table
    855    : raw_hash_set<Uint8Policy, hash_default_hash<uint8_t>,
    856                   std::equal_to<uint8_t>, MinimumAlignmentAlloc<uint8_t>> {
    857  using Base = typename MinimumAlignmentUint8Table::raw_hash_set;
    858  using Base::Base;
    859 };
    860 
    861 // Allows for freezing the allocator to expect no further allocations.
    862 template <typename T>
    863 struct FreezableAlloc : std::allocator<T> {
    864  explicit FreezableAlloc(bool* f) : frozen(f) {}
    865 
    866  template <typename U>
    867  explicit FreezableAlloc(const FreezableAlloc<U>& other)
    868      : frozen(other.frozen) {}
    869 
    870  template <class U>
    871  struct rebind {
    872    using other = FreezableAlloc<U>;
    873  };
    874 
    875  T* allocate(size_t n) {
    876    EXPECT_FALSE(*frozen);
    877    return std::allocator<T>::allocate(n);
    878  }
    879 
    880  bool* frozen;
    881 };
    882 
    883 template <int N>
    884 struct FreezableSizedValueSooTable
    885    : raw_hash_set<SizedValuePolicy<N, /*kSoo=*/true>,
    886                   container_internal::hash_default_hash<SizedValue<N>>,
    887                   std::equal_to<SizedValue<N>>,
    888                   FreezableAlloc<SizedValue<N>>> {
    889  using Base = typename FreezableSizedValueSooTable::raw_hash_set;
    890  using Base::Base;
    891 };
    892 
    893 struct BadFastHash {
    894  template <class T>
    895  size_t operator()(const T&) const {
    896    return 0;
    897  }
    898 };
    899 
    900 struct BadHashFreezableIntTable
    901    : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int64_t>,
    902                   FreezableAlloc<int64_t>> {
    903  using Base = typename BadHashFreezableIntTable::raw_hash_set;
    904  using Base::Base;
    905 };
    906 
    907 struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int64_t>,
    908                               std::allocator<int>> {
    909  using Base = typename BadTable::raw_hash_set;
    910  BadTable() = default;
    911  using Base::Base;
    912 };
    913 
    914 constexpr size_t kNonSooSize = sizeof(HeapOrSoo) + 8;
    915 using NonSooIntTableSlotType = SizedValue<kNonSooSize>;
    916 static_assert(sizeof(NonSooIntTableSlotType) >= kNonSooSize, "too small");
    917 using NonSooIntTable = ValueTable<NonSooIntTableSlotType>;
    918 using SooInt32Table =
    919    ValueTable<int32_t, /*kTransferable=*/true, /*kSoo=*/true>;
    920 using SooIntTable = ValueTable<int64_t, /*kTransferable=*/true, /*kSoo=*/true>;
    921 using NonMemcpyableSooIntTable =
    922    ValueTable<int64_t, /*kTransferable=*/false, /*kSoo=*/true>;
    923 using MemcpyableSooIntCustomAllocTable =
    924    ValueTable<int64_t, /*kTransferable=*/true, /*kSoo=*/true,
    925               ChangingSizeAndTrackingTypeAlloc<int64_t>>;
    926 using NonMemcpyableSooIntCustomAllocTable =
    927    ValueTable<int64_t, /*kTransferable=*/false, /*kSoo=*/true,
    928               ChangingSizeAndTrackingTypeAlloc<int64_t>>;
    929 
    930 TEST(Table, EmptyFunctorOptimization) {
    931  static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, "");
    932  static_assert(std::is_empty<std::allocator<int>>::value, "");
    933 
    934  struct MockTable {
    935    size_t capacity;
    936    uint64_t size;
    937    void* ctrl;
    938    void* slots;
    939  };
    940  struct StatelessHash {
    941    size_t operator()(absl::string_view) const { return 0; }
    942  };
    943  struct StatefulHash : StatelessHash {
    944    uint64_t dummy;
    945  };
    946 
    947  struct GenerationData {
    948    size_t reserved_growth;
    949    size_t reservation_size;
    950    GenerationType* generation;
    951  };
    952 
    953 // Ignore unreachable-code warning. Compiler thinks one branch of each ternary
    954 // conditional is unreachable.
    955 #if defined(__clang__)
    956 #pragma clang diagnostic push
    957 #pragma clang diagnostic ignored "-Wunreachable-code"
    958 #endif
    959  constexpr size_t mock_size = sizeof(MockTable);
    960  constexpr size_t generation_size =
    961      SwisstableGenerationsEnabled() ? sizeof(GenerationData) : 0;
    962 #if defined(__clang__)
    963 #pragma clang diagnostic pop
    964 #endif
    965 
    966  EXPECT_EQ(
    967      mock_size + generation_size,
    968      sizeof(
    969          raw_hash_set<StringPolicy, StatelessHash,
    970                       std::equal_to<absl::string_view>, std::allocator<int>>));
    971 
    972  EXPECT_EQ(
    973      mock_size + sizeof(StatefulHash) + generation_size,
    974      sizeof(
    975          raw_hash_set<StringPolicy, StatefulHash,
    976                       std::equal_to<absl::string_view>, std::allocator<int>>));
    977 }
    978 
    979 template <class TableType>
    980 class SooTest : public testing::Test {};
    981 
    982 using SooTableTypes =
    983    ::testing::Types<SooIntTable, NonSooIntTable, NonMemcpyableSooIntTable,
    984                     MemcpyableSooIntCustomAllocTable,
    985                     NonMemcpyableSooIntCustomAllocTable>;
    986 TYPED_TEST_SUITE(SooTest, SooTableTypes);
    987 
    988 TYPED_TEST(SooTest, Empty) {
    989  TypeParam t;
    990  EXPECT_EQ(0, t.size());
    991  EXPECT_TRUE(t.empty());
    992 }
    993 
    994 TYPED_TEST(SooTest, LookupEmpty) {
    995  TypeParam t;
    996  auto it = t.find(0);
    997  EXPECT_TRUE(it == t.end());
    998 }
    999 
   1000 TYPED_TEST(SooTest, Insert1) {
   1001  TypeParam t;
   1002  EXPECT_TRUE(t.find(0) == t.end());
   1003  auto res = t.emplace(0);
   1004  EXPECT_TRUE(res.second);
   1005  EXPECT_THAT(*res.first, 0);
   1006  EXPECT_EQ(1, t.size());
   1007  EXPECT_THAT(*t.find(0), 0);
   1008 }
   1009 
   1010 TYPED_TEST(SooTest, Insert2) {
   1011  TypeParam t;
   1012  EXPECT_TRUE(t.find(0) == t.end());
   1013  auto res = t.emplace(0);
   1014  EXPECT_TRUE(res.second);
   1015  EXPECT_THAT(*res.first, 0);
   1016  EXPECT_EQ(1, t.size());
   1017  EXPECT_TRUE(t.find(1) == t.end());
   1018  res = t.emplace(1);
   1019  EXPECT_TRUE(res.second);
   1020  EXPECT_THAT(*res.first, 1);
   1021  EXPECT_EQ(2, t.size());
   1022  EXPECT_THAT(*t.find(0), 0);
   1023  EXPECT_THAT(*t.find(1), 1);
   1024 }
   1025 
   1026 TEST(Table, InsertCollision) {
   1027  BadTable t;
   1028  EXPECT_TRUE(t.find(1) == t.end());
   1029  auto res = t.emplace(1);
   1030  EXPECT_TRUE(res.second);
   1031  EXPECT_THAT(*res.first, 1);
   1032  EXPECT_EQ(1, t.size());
   1033 
   1034  EXPECT_TRUE(t.find(2) == t.end());
   1035  res = t.emplace(2);
   1036  EXPECT_THAT(*res.first, 2);
   1037  EXPECT_TRUE(res.second);
   1038  EXPECT_EQ(2, t.size());
   1039 
   1040  EXPECT_THAT(*t.find(1), 1);
   1041  EXPECT_THAT(*t.find(2), 2);
   1042 }
   1043 
   1044 // Test that we do not add existent element in case we need to search through
   1045 // many groups with deleted elements
   1046 TEST(Table, InsertCollisionAndFindAfterDelete) {
   1047  BadTable t;  // all elements go to the same group.
   1048  // Have at least 2 groups with Group::kWidth collisions
   1049  // plus some extra collisions in the last group.
   1050  constexpr size_t kNumInserts = Group::kWidth * 2 + 5;
   1051  for (size_t i = 0; i < kNumInserts; ++i) {
   1052    auto res = t.emplace(i);
   1053    EXPECT_TRUE(res.second);
   1054    EXPECT_THAT(*res.first, i);
   1055    EXPECT_EQ(i + 1, t.size());
   1056  }
   1057 
   1058  // Remove elements one by one and check
   1059  // that we still can find all other elements.
   1060  for (size_t i = 0; i < kNumInserts; ++i) {
   1061    EXPECT_EQ(1, t.erase(i)) << i;
   1062    for (size_t j = i + 1; j < kNumInserts; ++j) {
   1063      EXPECT_THAT(*t.find(j), j);
   1064      auto res = t.emplace(j);
   1065      EXPECT_FALSE(res.second) << i << " " << j;
   1066      EXPECT_THAT(*res.first, j);
   1067      EXPECT_EQ(kNumInserts - i - 1, t.size());
   1068    }
   1069  }
   1070  EXPECT_TRUE(t.empty());
   1071 }
   1072 
   1073 TYPED_TEST(SooTest, EraseInSmallTables) {
   1074  for (int64_t size = 0; size < 64; ++size) {
   1075    TypeParam t;
   1076    for (int64_t i = 0; i < size; ++i) {
   1077      t.insert(i);
   1078    }
   1079    for (int64_t i = 0; i < size; ++i) {
   1080      t.erase(i);
   1081      EXPECT_EQ(t.size(), size - i - 1);
   1082      for (int64_t j = i + 1; j < size; ++j) {
   1083        EXPECT_THAT(*t.find(j), j);
   1084      }
   1085    }
   1086    EXPECT_TRUE(t.empty());
   1087  }
   1088 }
   1089 
   1090 TYPED_TEST(SooTest, InsertWithinCapacity) {
   1091  TypeParam t;
   1092  t.reserve(10);
   1093  const size_t original_capacity = t.capacity();
   1094  const auto addr = [&](int i) {
   1095    return reinterpret_cast<uintptr_t>(&*t.find(i));
   1096  };
   1097  // Inserting an element does not change capacity.
   1098  t.insert(0);
   1099  EXPECT_THAT(t.capacity(), original_capacity);
   1100  const uintptr_t original_addr_0 = addr(0);
   1101  // Inserting another element does not rehash.
   1102  t.insert(1);
   1103  EXPECT_THAT(t.capacity(), original_capacity);
   1104  EXPECT_THAT(addr(0), original_addr_0);
   1105  // Inserting lots of duplicate elements does not rehash.
   1106  for (int i = 0; i < 100; ++i) {
   1107    t.insert(i % 10);
   1108  }
   1109  EXPECT_THAT(t.capacity(), original_capacity);
   1110  EXPECT_THAT(addr(0), original_addr_0);
   1111  // Inserting a range of duplicate elements does not rehash.
   1112  std::vector<int> dup_range;
   1113  for (int i = 0; i < 100; ++i) {
   1114    dup_range.push_back(i % 10);
   1115  }
   1116  t.insert(dup_range.begin(), dup_range.end());
   1117  EXPECT_THAT(t.capacity(), original_capacity);
   1118  EXPECT_THAT(addr(0), original_addr_0);
   1119 }
   1120 
   1121 template <class TableType>
   1122 class SmallTableResizeTest : public testing::Test {};
   1123 
   1124 using SmallTableTypes = ::testing::Types<
   1125    IntTable, TransferableIntTable, SooIntTable,
   1126    // int8
   1127    ValueTable<int8_t, /*kTransferable=*/true, /*kSoo=*/true>,
   1128    ValueTable<int8_t, /*kTransferable=*/false, /*kSoo=*/true>,
   1129    // int16
   1130    ValueTable<int16_t, /*kTransferable=*/true, /*kSoo=*/true>,
   1131    ValueTable<int16_t, /*kTransferable=*/false, /*kSoo=*/true>,
   1132    // int128
   1133    ValueTable<SizedValue<16>, /*kTransferable=*/true, /*kSoo=*/true>,
   1134    ValueTable<SizedValue<16>, /*kTransferable=*/false, /*kSoo=*/true>,
   1135    // int192
   1136    ValueTable<SizedValue<24>, /*kTransferable=*/true, /*kSoo=*/true>,
   1137    ValueTable<SizedValue<24>, /*kTransferable=*/false, /*kSoo=*/true>,
   1138    // Special tables.
   1139    MinimumAlignmentUint8Table, CustomAllocIntTable, ChangingSizeAllocIntTable,
   1140    BadTable,
   1141    // alignment 1, size 2.
   1142    ValueTable<AlignedValue<uint8_t, 2>, /*kTransferable=*/true, /*kSoo=*/true>,
   1143    ValueTable<AlignedValue<uint8_t, 2>, /*kTransferable=*/false,
   1144               /*kSoo=*/true>,
   1145    // alignment 1, size 7.
   1146    ValueTable<AlignedValue<uint8_t, 7>, /*kTransferable=*/true, /*kSoo=*/true>,
   1147    ValueTable<AlignedValue<uint8_t, 7>, /*kTransferable=*/false,
   1148               /*kSoo=*/true>,
   1149    // alignment 2, size 6.
   1150    ValueTable<AlignedValue<uint16_t, 3>, /*kTransferable=*/true,
   1151               /*kSoo=*/true>,
   1152    ValueTable<AlignedValue<uint16_t, 3>, /*kTransferable=*/false,
   1153               /*kSoo=*/true>,
   1154    // alignment 2, size 10.
   1155    ValueTable<AlignedValue<uint16_t, 5>, /*kTransferable=*/true,
   1156               /*kSoo=*/true>,
   1157    ValueTable<AlignedValue<uint16_t, 5>, /*kTransferable=*/false,
   1158               /*kSoo=*/true>>;
   1159 TYPED_TEST_SUITE(SmallTableResizeTest, SmallTableTypes);
   1160 
   1161 TYPED_TEST(SmallTableResizeTest, InsertIntoSmallTable) {
   1162  TypeParam t;
   1163  for (int i = 0; i < 32; ++i) {
   1164    t.insert(i);
   1165    ASSERT_EQ(t.size(), i + 1);
   1166    for (int j = 0; j < i + 1; ++j) {
   1167      EXPECT_TRUE(t.find(j) != t.end());
   1168      EXPECT_EQ(*t.find(j), j);
   1169    }
   1170  }
   1171 }
   1172 
   1173 TYPED_TEST(SmallTableResizeTest, ResizeGrowSmallTables) {
   1174  for (size_t source_size = 0; source_size < 32; ++source_size) {
   1175    for (size_t target_size = source_size; target_size < 32; ++target_size) {
   1176      for (bool rehash : {false, true}) {
   1177        SCOPED_TRACE(absl::StrCat("source_size: ", source_size,
   1178                                  ", target_size: ", target_size,
   1179                                  ", rehash: ", rehash));
   1180        TypeParam t;
   1181        for (size_t i = 0; i < source_size; ++i) {
   1182          t.insert(static_cast<int>(i));
   1183        }
   1184        if (rehash) {
   1185          t.rehash(target_size);
   1186        } else {
   1187          t.reserve(target_size);
   1188        }
   1189        for (size_t i = 0; i < source_size; ++i) {
   1190          EXPECT_TRUE(t.find(static_cast<int>(i)) != t.end());
   1191          EXPECT_EQ(*t.find(static_cast<int>(i)), static_cast<int>(i));
   1192        }
   1193      }
   1194    }
   1195  }
   1196 }
   1197 
   1198 TYPED_TEST(SmallTableResizeTest, ResizeReduceSmallTables) {
   1199  DisableSampling();
   1200  for (size_t source_size = 0; source_size < 32; ++source_size) {
   1201    for (size_t target_size = 0; target_size <= source_size; ++target_size) {
   1202      TypeParam t;
   1203      size_t inserted_count = std::min<size_t>(source_size, 5);
   1204      for (size_t i = 0; i < inserted_count; ++i) {
   1205        t.insert(static_cast<int>(i));
   1206      }
   1207      const size_t minimum_capacity = t.capacity();
   1208      t.reserve(source_size);
   1209      t.rehash(target_size);
   1210      if (target_size == 0) {
   1211        EXPECT_EQ(t.capacity(), minimum_capacity)
   1212            << "rehash(0) must resize to the minimum capacity";
   1213      }
   1214      for (size_t i = 0; i < inserted_count; ++i) {
   1215        EXPECT_TRUE(t.find(static_cast<int>(i)) != t.end());
   1216        EXPECT_EQ(*t.find(static_cast<int>(i)), static_cast<int>(i));
   1217      }
   1218    }
   1219  }
   1220 }
   1221 
   1222 TEST(Table, LazyEmplace) {
   1223  StringTable t;
   1224  bool called = false;
   1225  auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
   1226    called = true;
   1227    f("abc", "ABC");
   1228  });
   1229  EXPECT_TRUE(called);
   1230  EXPECT_THAT(*it, Pair("abc", "ABC"));
   1231  called = false;
   1232  it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
   1233    called = true;
   1234    f("abc", "DEF");
   1235  });
   1236  EXPECT_FALSE(called);
   1237  EXPECT_THAT(*it, Pair("abc", "ABC"));
   1238 }
   1239 
   1240 TYPED_TEST(SooTest, ContainsEmpty) {
   1241  TypeParam t;
   1242 
   1243  EXPECT_FALSE(t.contains(0));
   1244 }
   1245 
   1246 TYPED_TEST(SooTest, Contains1) {
   1247  TypeParam t;
   1248 
   1249  EXPECT_TRUE(t.insert(0).second);
   1250  EXPECT_TRUE(t.contains(0));
   1251  EXPECT_FALSE(t.contains(1));
   1252 
   1253  EXPECT_EQ(1, t.erase(0));
   1254  EXPECT_FALSE(t.contains(0));
   1255 }
   1256 
   1257 TYPED_TEST(SooTest, Contains2) {
   1258  TypeParam t;
   1259 
   1260  EXPECT_TRUE(t.insert(0).second);
   1261  EXPECT_TRUE(t.contains(0));
   1262  EXPECT_FALSE(t.contains(1));
   1263 
   1264  t.clear();
   1265  EXPECT_FALSE(t.contains(0));
   1266 }
   1267 
   1268 int decompose_constructed;
   1269 int decompose_copy_constructed;
   1270 int decompose_copy_assigned;
   1271 int decompose_move_constructed;
   1272 int decompose_move_assigned;
   1273 struct DecomposeType {
   1274  DecomposeType(int i = 0) : i(i) {  // NOLINT
   1275    ++decompose_constructed;
   1276  }
   1277 
   1278  explicit DecomposeType(const char* d) : DecomposeType(*d) {}
   1279 
   1280  DecomposeType(const DecomposeType& other) : i(other.i) {
   1281    ++decompose_copy_constructed;
   1282  }
   1283  DecomposeType& operator=(const DecomposeType& other) {
   1284    ++decompose_copy_assigned;
   1285    i = other.i;
   1286    return *this;
   1287  }
   1288  DecomposeType(DecomposeType&& other) : i(other.i) {
   1289    ++decompose_move_constructed;
   1290  }
   1291  DecomposeType& operator=(DecomposeType&& other) {
   1292    ++decompose_move_assigned;
   1293    i = other.i;
   1294    return *this;
   1295  }
   1296 
   1297  int i;
   1298 };
   1299 
   1300 struct DecomposeHash {
   1301  using is_transparent = void;
   1302  size_t operator()(const DecomposeType& a) const { return a.i; }
   1303  size_t operator()(int a) const { return a; }
   1304  size_t operator()(const char* a) const { return *a; }
   1305 };
   1306 
   1307 struct DecomposeEq {
   1308  using is_transparent = void;
   1309  bool operator()(const DecomposeType& a, const DecomposeType& b) const {
   1310    return a.i == b.i;
   1311  }
   1312  bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
   1313  bool operator()(const DecomposeType& a, const char* b) const {
   1314    return a.i == *b;
   1315  }
   1316 };
   1317 
   1318 struct DecomposePolicy {
   1319  using slot_type = DecomposeType;
   1320  using key_type = DecomposeType;
   1321  using init_type = DecomposeType;
   1322 
   1323  template <typename T>
   1324  static void construct(void*, DecomposeType* slot, T&& v) {
   1325    ::new (slot) DecomposeType(std::forward<T>(v));
   1326  }
   1327  static void destroy(void*, DecomposeType* slot) { slot->~DecomposeType(); }
   1328  static DecomposeType& element(slot_type* slot) { return *slot; }
   1329 
   1330  template <class F, class T>
   1331  static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) {
   1332    return std::forward<F>(f)(x, x);
   1333  }
   1334 
   1335  template <class Hash>
   1336  static constexpr HashSlotFn get_hash_slot_fn() {
   1337    return nullptr;
   1338  }
   1339 };
   1340 
   1341 template <typename Hash, typename Eq>
   1342 void TestDecompose(bool construct_three) {
   1343  DecomposeType elem{0};
   1344  const int one = 1;
   1345  const char* three_p = "3";
   1346  const auto& three = three_p;
   1347  const int elem_vector_count = 256;
   1348  std::vector<DecomposeType> elem_vector(elem_vector_count, DecomposeType{0});
   1349  std::iota(elem_vector.begin(), elem_vector.end(), 0);
   1350 
   1351  using DecomposeSet =
   1352      raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>>;
   1353  DecomposeSet set1;
   1354 
   1355  decompose_constructed = 0;
   1356  int expected_constructed = 0;
   1357  EXPECT_EQ(expected_constructed, decompose_constructed);
   1358  set1.insert(elem);
   1359  EXPECT_EQ(expected_constructed, decompose_constructed);
   1360  set1.insert(1);
   1361  EXPECT_EQ(++expected_constructed, decompose_constructed);
   1362  set1.emplace("3");
   1363  EXPECT_EQ(++expected_constructed, decompose_constructed);
   1364  EXPECT_EQ(expected_constructed, decompose_constructed);
   1365 
   1366  {  // insert(T&&)
   1367    set1.insert(1);
   1368    EXPECT_EQ(expected_constructed, decompose_constructed);
   1369  }
   1370 
   1371  {  // insert(const T&)
   1372    set1.insert(one);
   1373    EXPECT_EQ(expected_constructed, decompose_constructed);
   1374  }
   1375 
   1376  {  // insert(hint, T&&)
   1377    set1.insert(set1.begin(), 1);
   1378    EXPECT_EQ(expected_constructed, decompose_constructed);
   1379  }
   1380 
   1381  {  // insert(hint, const T&)
   1382    set1.insert(set1.begin(), one);
   1383    EXPECT_EQ(expected_constructed, decompose_constructed);
   1384  }
   1385 
   1386  {  // emplace(...)
   1387    set1.emplace(1);
   1388    EXPECT_EQ(expected_constructed, decompose_constructed);
   1389    set1.emplace("3");
   1390    expected_constructed += construct_three;
   1391    EXPECT_EQ(expected_constructed, decompose_constructed);
   1392    set1.emplace(one);
   1393    EXPECT_EQ(expected_constructed, decompose_constructed);
   1394    set1.emplace(three);
   1395    expected_constructed += construct_three;
   1396    EXPECT_EQ(expected_constructed, decompose_constructed);
   1397  }
   1398 
   1399  {  // emplace_hint(...)
   1400    set1.emplace_hint(set1.begin(), 1);
   1401    EXPECT_EQ(expected_constructed, decompose_constructed);
   1402    set1.emplace_hint(set1.begin(), "3");
   1403    expected_constructed += construct_three;
   1404    EXPECT_EQ(expected_constructed, decompose_constructed);
   1405    set1.emplace_hint(set1.begin(), one);
   1406    EXPECT_EQ(expected_constructed, decompose_constructed);
   1407    set1.emplace_hint(set1.begin(), three);
   1408    expected_constructed += construct_three;
   1409    EXPECT_EQ(expected_constructed, decompose_constructed);
   1410  }
   1411 
   1412  decompose_copy_constructed = 0;
   1413  decompose_copy_assigned = 0;
   1414  decompose_move_constructed = 0;
   1415  decompose_move_assigned = 0;
   1416  int expected_copy_constructed = 0;
   1417  int expected_move_constructed = 0;
   1418  {  // raw_hash_set(first, last) with random-access iterators
   1419    DecomposeSet set2(elem_vector.begin(), elem_vector.end());
   1420    // Expect exactly one copy-constructor call for each element if no
   1421    // rehashing is done.
   1422    expected_copy_constructed += elem_vector_count;
   1423    EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
   1424    EXPECT_EQ(expected_move_constructed, decompose_move_constructed);
   1425    EXPECT_EQ(0, decompose_move_assigned);
   1426    EXPECT_EQ(0, decompose_copy_assigned);
   1427  }
   1428 
   1429  {  // raw_hash_set(first, last) with forward iterators
   1430    std::list<DecomposeType> elem_list(elem_vector.begin(), elem_vector.end());
   1431    expected_copy_constructed = decompose_copy_constructed;
   1432    DecomposeSet set2(elem_list.begin(), elem_list.end());
   1433    // Expect exactly N elements copied into set, expect at most 2*N elements
   1434    // moving internally for all resizing needed (for a growth factor of 2).
   1435    expected_copy_constructed += elem_vector_count;
   1436    EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
   1437    expected_move_constructed += elem_vector_count;
   1438    EXPECT_LT(expected_move_constructed, decompose_move_constructed);
   1439    expected_move_constructed += elem_vector_count;
   1440    EXPECT_GE(expected_move_constructed, decompose_move_constructed);
   1441    EXPECT_EQ(0, decompose_move_assigned);
   1442    EXPECT_EQ(0, decompose_copy_assigned);
   1443    expected_copy_constructed = decompose_copy_constructed;
   1444    expected_move_constructed = decompose_move_constructed;
   1445  }
   1446 
   1447  {  // insert(first, last)
   1448    DecomposeSet set2;
   1449    set2.insert(elem_vector.begin(), elem_vector.end());
   1450    // Expect exactly N elements copied into set, expect at most 2*N elements
   1451    // moving internally for all resizing needed (for a growth factor of 2).
   1452    const int expected_new_elements = elem_vector_count;
   1453    const int expected_max_element_moves = 2 * elem_vector_count;
   1454    expected_copy_constructed += expected_new_elements;
   1455    EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
   1456    expected_move_constructed += expected_max_element_moves;
   1457    EXPECT_GE(expected_move_constructed, decompose_move_constructed);
   1458    EXPECT_EQ(0, decompose_move_assigned);
   1459    EXPECT_EQ(0, decompose_copy_assigned);
   1460    expected_copy_constructed = decompose_copy_constructed;
   1461    expected_move_constructed = decompose_move_constructed;
   1462  }
   1463 }
   1464 
   1465 TEST(Table, Decompose) {
   1466  if (SwisstableGenerationsEnabled()) {
   1467    GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
   1468  }
   1469 
   1470  TestDecompose<DecomposeHash, DecomposeEq>(false);
   1471 
   1472  struct TransparentHashIntOverload {
   1473    size_t operator()(const DecomposeType& a) const { return a.i; }
   1474    size_t operator()(int a) const { return a; }
   1475  };
   1476  struct TransparentEqIntOverload {
   1477    bool operator()(const DecomposeType& a, const DecomposeType& b) const {
   1478      return a.i == b.i;
   1479    }
   1480    bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
   1481  };
   1482  TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
   1483  TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
   1484  TestDecompose<DecomposeHash, TransparentEqIntOverload>(true);
   1485 }
   1486 
   1487 // Returns the largest m such that a table with m elements has the same number
   1488 // of buckets as a table with n elements.
   1489 size_t MaxDensitySize(size_t n) {
   1490  IntTable t;
   1491  t.reserve(n);
   1492  for (size_t i = 0; i != n; ++i) t.emplace(i);
   1493  const size_t c = t.bucket_count();
   1494  while (c == t.bucket_count()) t.emplace(n++);
   1495  return t.size() - 1;
   1496 }
   1497 
   1498 struct Modulo1000Hash {
   1499  size_t operator()(int64_t x) const { return static_cast<size_t>(x) % 1000; }
   1500 };
   1501 
   1502 struct Modulo1000HashTable
   1503    : public raw_hash_set<IntPolicy, Modulo1000Hash, std::equal_to<int64_t>,
   1504                          std::allocator<int>> {};
   1505 
   1506 // Test that rehash with no resize happen in case of many deleted slots.
   1507 TEST(Table, RehashWithNoResize) {
   1508  if (SwisstableGenerationsEnabled()) {
   1509    GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
   1510  }
   1511 
   1512  Modulo1000HashTable t;
   1513  // Adding the same length (and the same hash) strings
   1514  // to have at least kMinFullGroups groups
   1515  // with Group::kWidth collisions. Then fill up to MaxDensitySize;
   1516  const size_t kMinFullGroups = 7;
   1517  std::vector<int> keys;
   1518  for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) {
   1519    int k = i * 1000;
   1520    t.emplace(k);
   1521    keys.push_back(k);
   1522  }
   1523  const size_t capacity = t.capacity();
   1524 
   1525  // Remove elements from all groups except the first and the last one.
   1526  // All elements removed from full groups will be marked as ctrl_t::kDeleted.
   1527  const size_t erase_begin = Group::kWidth / 2;
   1528  const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
   1529  for (size_t i = erase_begin; i < erase_end; ++i) {
   1530    EXPECT_EQ(1, t.erase(keys[i])) << i;
   1531  }
   1532  keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end);
   1533 
   1534  auto last_key = keys.back();
   1535  size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key);
   1536 
   1537  // Make sure that we have to make a lot of probes for last key.
   1538  ASSERT_GT(last_key_num_probes, kMinFullGroups);
   1539 
   1540  int x = 1;
   1541  // Insert and erase one element, before inplace rehash happen.
   1542  while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) {
   1543    t.emplace(x);
   1544    ASSERT_EQ(capacity, t.capacity());
   1545    // All elements should be there.
   1546    ASSERT_TRUE(t.find(x) != t.end()) << x;
   1547    for (const auto& k : keys) {
   1548      ASSERT_TRUE(t.find(k) != t.end()) << k;
   1549    }
   1550    t.erase(x);
   1551    ++x;
   1552  }
   1553 }
   1554 
   1555 TYPED_TEST(SooTest, InsertEraseStressTest) {
   1556  TypeParam t;
   1557  const size_t kMinElementCount = 50;
   1558  std::deque<int> keys;
   1559  size_t i = 0;
   1560  for (; i < MaxDensitySize(kMinElementCount); ++i) {
   1561    t.emplace(static_cast<int64_t>(i));
   1562    keys.push_back(i);
   1563  }
   1564  const size_t kNumIterations = 20000;
   1565  for (; i < kNumIterations; ++i) {
   1566    ASSERT_EQ(1, t.erase(keys.front()));
   1567    keys.pop_front();
   1568    t.emplace(static_cast<int64_t>(i));
   1569    keys.push_back(i);
   1570  }
   1571 }
   1572 
   1573 TEST(Table, InsertOverloads) {
   1574  StringTable t;
   1575  // These should all trigger the insert(init_type) overload.
   1576  t.insert({{}, {}});
   1577  t.insert({"ABC", {}});
   1578  t.insert({"DEF", "!!!"});
   1579 
   1580  EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""),
   1581                                      Pair("DEF", "!!!")));
   1582 }
   1583 
   1584 TYPED_TEST(SooTest, LargeTable) {
   1585  TypeParam t;
   1586  for (int64_t i = 0; i != 10000; ++i) {
   1587    t.emplace(i << 40);
   1588    ASSERT_EQ(t.size(), i + 1);
   1589  }
   1590  for (int64_t i = 0; i != 10000; ++i)
   1591    ASSERT_EQ(i << 40, static_cast<int64_t>(*t.find(i << 40)));
   1592 }
   1593 
   1594 // Timeout if copy is quadratic as it was in Rust.
   1595 TYPED_TEST(SooTest, EnsureNonQuadraticAsInRust) {
   1596  static const size_t kLargeSize = 1 << 15;
   1597 
   1598  TypeParam t;
   1599  for (size_t i = 0; i != kLargeSize; ++i) {
   1600    t.insert(i);
   1601  }
   1602 
   1603  // If this is quadratic, the test will timeout.
   1604  TypeParam t2;
   1605  for (const auto& entry : t) t2.insert(entry);
   1606 }
   1607 
   1608 TYPED_TEST(SooTest, ClearBug) {
   1609  if (SwisstableGenerationsEnabled()) {
   1610    GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
   1611  }
   1612 
   1613  TypeParam t;
   1614  constexpr size_t capacity = container_internal::Group::kWidth - 1;
   1615  constexpr size_t max_size = capacity / 2 + 1;
   1616  for (size_t i = 0; i < max_size; ++i) {
   1617    t.insert(i);
   1618  }
   1619  ASSERT_EQ(capacity, t.capacity());
   1620  intptr_t original = reinterpret_cast<intptr_t>(&*t.find(2));
   1621  t.clear();
   1622  ASSERT_EQ(capacity, t.capacity());
   1623  for (size_t i = 0; i < max_size; ++i) {
   1624    t.insert(i);
   1625  }
   1626  ASSERT_EQ(capacity, t.capacity());
   1627  intptr_t second = reinterpret_cast<intptr_t>(&*t.find(2));
   1628  // We are checking that original and second are close enough to each other
   1629  // that they are probably still in the same group.  This is not strictly
   1630  // guaranteed.
   1631  EXPECT_LT(static_cast<size_t>(std::abs(original - second)),
   1632            capacity * sizeof(typename TypeParam::value_type));
   1633 }
   1634 
   1635 TYPED_TEST(SooTest, Erase) {
   1636  TypeParam t;
   1637  EXPECT_TRUE(t.find(0) == t.end());
   1638  auto res = t.emplace(0);
   1639  EXPECT_TRUE(res.second);
   1640  EXPECT_EQ(1, t.size());
   1641  t.erase(res.first);
   1642  EXPECT_EQ(0, t.size());
   1643  EXPECT_TRUE(t.find(0) == t.end());
   1644 }
   1645 
   1646 TYPED_TEST(SooTest, EraseMaintainsValidIterator) {
   1647  TypeParam t;
   1648  const int kNumElements = 100;
   1649  for (int i = 0; i < kNumElements; i++) {
   1650    EXPECT_TRUE(t.emplace(i).second);
   1651  }
   1652  EXPECT_EQ(t.size(), kNumElements);
   1653 
   1654  int num_erase_calls = 0;
   1655  auto it = t.begin();
   1656  while (it != t.end()) {
   1657    t.erase(it++);
   1658    num_erase_calls++;
   1659  }
   1660 
   1661  EXPECT_TRUE(t.empty());
   1662  EXPECT_EQ(num_erase_calls, kNumElements);
   1663 }
   1664 
   1665 TYPED_TEST(SooTest, EraseBeginEnd) {
   1666  TypeParam t;
   1667  for (int i = 0; i < 10; ++i) t.insert(i);
   1668  EXPECT_EQ(t.size(), 10);
   1669  t.erase(t.begin(), t.end());
   1670  EXPECT_EQ(t.size(), 0);
   1671 }
   1672 
   1673 // Collect N bad keys by following algorithm:
   1674 // 1. Create an empty table and reserve it to 2 * N.
   1675 // 2. Insert N random elements.
   1676 // 3. Take first Group::kWidth - 1 to bad_keys array.
   1677 // 4. Clear the table without resize.
   1678 // 5. Go to point 2 while N keys not collected
   1679 std::vector<int64_t> CollectBadMergeKeys(size_t N) {
   1680  static constexpr int kGroupSize = Group::kWidth - 1;
   1681 
   1682  auto topk_range = [](size_t b, size_t e,
   1683                       IntTable* t) -> std::vector<int64_t> {
   1684    for (size_t i = b; i != e; ++i) {
   1685      t->emplace(i);
   1686    }
   1687    std::vector<int64_t> res;
   1688    res.reserve(kGroupSize);
   1689    auto it = t->begin();
   1690    for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) {
   1691      res.push_back(*it);
   1692    }
   1693    return res;
   1694  };
   1695 
   1696  std::vector<int64_t> bad_keys;
   1697  bad_keys.reserve(N);
   1698  IntTable t;
   1699  t.reserve(N * 2);
   1700 
   1701  for (size_t b = 0; bad_keys.size() < N; b += N) {
   1702    auto keys = topk_range(b, b + N, &t);
   1703    bad_keys.insert(bad_keys.end(), keys.begin(), keys.end());
   1704    t.erase(t.begin(), t.end());
   1705    EXPECT_TRUE(t.empty());
   1706  }
   1707  return bad_keys;
   1708 }
   1709 
   1710 struct ProbeStats {
   1711  // Number of elements with specific probe length over all tested tables.
   1712  std::vector<size_t> all_probes_histogram;
   1713  // Ratios total_probe_length/size for every tested table.
   1714  std::vector<double> single_table_ratios;
   1715 
   1716  // Average ratio total_probe_length/size over tables.
   1717  double AvgRatio() const {
   1718    return std::accumulate(single_table_ratios.begin(),
   1719                           single_table_ratios.end(), 0.0) /
   1720           single_table_ratios.size();
   1721  }
   1722 
   1723  // Maximum ratio total_probe_length/size over tables.
   1724  double MaxRatio() const {
   1725    return *std::max_element(single_table_ratios.begin(),
   1726                             single_table_ratios.end());
   1727  }
   1728 
   1729  // Percentile ratio total_probe_length/size over tables.
   1730  double PercentileRatio(double Percentile = 0.95) const {
   1731    auto r = single_table_ratios;
   1732    auto mid = r.begin() + static_cast<size_t>(r.size() * Percentile);
   1733    if (mid != r.end()) {
   1734      std::nth_element(r.begin(), mid, r.end());
   1735      return *mid;
   1736    } else {
   1737      return MaxRatio();
   1738    }
   1739  }
   1740 
   1741  // Maximum probe length over all elements and all tables.
   1742  size_t MaxProbe() const { return all_probes_histogram.size(); }
   1743 
   1744  // Fraction of elements with specified probe length.
   1745  std::vector<double> ProbeNormalizedHistogram() const {
   1746    double total_elements = std::accumulate(all_probes_histogram.begin(),
   1747                                            all_probes_histogram.end(), 0ull);
   1748    std::vector<double> res;
   1749    for (size_t p : all_probes_histogram) {
   1750      res.push_back(p / total_elements);
   1751    }
   1752    return res;
   1753  }
   1754 
   1755  size_t PercentileProbe(double Percentile = 0.99) const {
   1756    size_t idx = 0;
   1757    for (double p : ProbeNormalizedHistogram()) {
   1758      if (Percentile > p) {
   1759        Percentile -= p;
   1760        ++idx;
   1761      } else {
   1762        return idx;
   1763      }
   1764    }
   1765    return idx;
   1766  }
   1767 
   1768  friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) {
   1769    out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio()
   1770        << ", PercentileRatio:" << s.PercentileRatio()
   1771        << ", MaxProbe:" << s.MaxProbe() << ", Probes=[";
   1772    for (double p : s.ProbeNormalizedHistogram()) {
   1773      out << p << ",";
   1774    }
   1775    out << "]}";
   1776 
   1777    return out;
   1778  }
   1779 };
   1780 
   1781 struct ExpectedStats {
   1782  double avg_ratio;
   1783  double max_ratio;
   1784  std::vector<std::pair<double, double>> pecentile_ratios;
   1785  std::vector<std::pair<double, double>> pecentile_probes;
   1786 
   1787  friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) {
   1788    out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio
   1789        << ", PercentileRatios: [";
   1790    for (auto el : s.pecentile_ratios) {
   1791      out << el.first << ":" << el.second << ", ";
   1792    }
   1793    out << "], PercentileProbes: [";
   1794    for (auto el : s.pecentile_probes) {
   1795      out << el.first << ":" << el.second << ", ";
   1796    }
   1797    out << "]}";
   1798 
   1799    return out;
   1800  }
   1801 };
   1802 
   1803 void VerifyStats(size_t size, const ExpectedStats& exp,
   1804                 const ProbeStats& stats) {
   1805  EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats;
   1806  EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats;
   1807  for (auto pr : exp.pecentile_ratios) {
   1808    EXPECT_LE(stats.PercentileRatio(pr.first), pr.second)
   1809        << size << " " << pr.first << " " << stats;
   1810  }
   1811 
   1812  for (auto pr : exp.pecentile_probes) {
   1813    EXPECT_LE(stats.PercentileProbe(pr.first), pr.second)
   1814        << size << " " << pr.first << " " << stats;
   1815  }
   1816 }
   1817 
   1818 using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
   1819 
   1820 // Collect total ProbeStats on num_iters iterations of the following algorithm:
   1821 // 1. Create new table and reserve it to keys.size() * 2
   1822 // 2. Insert all keys xored with seed
   1823 // 3. Collect ProbeStats from final table.
   1824 ProbeStats CollectProbeStatsOnKeysXoredWithSeed(
   1825    const std::vector<int64_t>& keys, size_t num_iters) {
   1826  const size_t reserve_size = keys.size() * 2;
   1827 
   1828  ProbeStats stats;
   1829 
   1830  int64_t seed = 0x71b1a19b907d6e33;
   1831  while (num_iters--) {
   1832    seed = static_cast<int64_t>(static_cast<uint64_t>(seed) * 17 + 13);
   1833    IntTable t1;
   1834    t1.reserve(reserve_size);
   1835    for (const auto& key : keys) {
   1836      t1.emplace(key ^ seed);
   1837    }
   1838 
   1839    auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
   1840    stats.all_probes_histogram.resize(
   1841        std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
   1842    std::transform(probe_histogram.begin(), probe_histogram.end(),
   1843                   stats.all_probes_histogram.begin(),
   1844                   stats.all_probes_histogram.begin(), std::plus<size_t>());
   1845 
   1846    size_t total_probe_seq_length = 0;
   1847    for (size_t i = 0; i < probe_histogram.size(); ++i) {
   1848      total_probe_seq_length += i * probe_histogram[i];
   1849    }
   1850    stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
   1851                                        keys.size());
   1852    t1.erase(t1.begin(), t1.end());
   1853  }
   1854  return stats;
   1855 }
   1856 
   1857 ExpectedStats XorSeedExpectedStats() {
   1858  constexpr bool kRandomizesInserts =
   1859 #ifdef NDEBUG
   1860      false;
   1861 #else   // NDEBUG
   1862      true;
   1863 #endif  // NDEBUG
   1864 
   1865  // The effective load factor is larger in non-opt mode because we insert
   1866  // elements out of order.
   1867  switch (container_internal::Group::kWidth) {
   1868    case 8:
   1869      if (kRandomizesInserts) {
   1870        return {0.05,
   1871                1.0,
   1872                {{0.95, 0.5}},
   1873                {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
   1874      } else {
   1875        return {0.05,
   1876                2.0,
   1877                {{0.95, 0.1}},
   1878                {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
   1879      }
   1880    case 16:
   1881      if (kRandomizesInserts) {
   1882        return {0.1,
   1883                2.0,
   1884                {{0.95, 0.1}},
   1885                {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
   1886      } else {
   1887        return {0.05,
   1888                1.0,
   1889                {{0.95, 0.05}},
   1890                {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
   1891      }
   1892  }
   1893  LOG(FATAL) << "Unknown Group width";
   1894  return {};
   1895 }
   1896 
   1897 // TODO(b/80415403): Figure out why this test is so flaky, esp. on MSVC
   1898 TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
   1899  ProbeStatsPerSize stats;
   1900  std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
   1901  for (size_t size : sizes) {
   1902    stats[size] =
   1903        CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200);
   1904  }
   1905  auto expected = XorSeedExpectedStats();
   1906  for (size_t size : sizes) {
   1907    auto& stat = stats[size];
   1908    VerifyStats(size, expected, stat);
   1909    LOG(INFO) << size << " " << stat;
   1910  }
   1911 }
   1912 
   1913 // Collect total ProbeStats on num_iters iterations of the following algorithm:
   1914 // 1. Create new table
   1915 // 2. Select 10% of keys and insert 10 elements key * 17 + j * 13
   1916 // 3. Collect ProbeStats from final table
   1917 ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
   1918    const std::vector<int64_t>& keys, size_t num_iters) {
   1919  ProbeStats stats;
   1920 
   1921  std::random_device rd;
   1922  std::mt19937 rng(rd());
   1923  auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
   1924  std::uniform_int_distribution<size_t> dist(0, keys.size() - 1);
   1925  while (num_iters--) {
   1926    IntTable t1;
   1927    size_t num_keys = keys.size() / 10;
   1928    size_t start = dist(rng);
   1929    for (size_t i = 0; i != num_keys; ++i) {
   1930      for (size_t j = 0; j != 10; ++j) {
   1931        t1.emplace(linear_transform(keys[(i + start) % keys.size()], j));
   1932      }
   1933    }
   1934 
   1935    auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
   1936    stats.all_probes_histogram.resize(
   1937        std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
   1938    std::transform(probe_histogram.begin(), probe_histogram.end(),
   1939                   stats.all_probes_histogram.begin(),
   1940                   stats.all_probes_histogram.begin(), std::plus<size_t>());
   1941 
   1942    size_t total_probe_seq_length = 0;
   1943    for (size_t i = 0; i < probe_histogram.size(); ++i) {
   1944      total_probe_seq_length += i * probe_histogram[i];
   1945    }
   1946    stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
   1947                                        t1.size());
   1948    t1.erase(t1.begin(), t1.end());
   1949  }
   1950  return stats;
   1951 }
   1952 
   1953 ExpectedStats LinearTransformExpectedStats() {
   1954  constexpr bool kRandomizesInserts =
   1955 #ifdef NDEBUG
   1956      false;
   1957 #else   // NDEBUG
   1958      true;
   1959 #endif  // NDEBUG
   1960 
   1961  // The effective load factor is larger in non-opt mode because we insert
   1962  // elements out of order.
   1963  switch (container_internal::Group::kWidth) {
   1964    case 8:
   1965      if (kRandomizesInserts) {
   1966        return {0.1,
   1967                0.5,
   1968                {{0.95, 0.3}},
   1969                {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
   1970      } else {
   1971        return {0.4,
   1972                0.6,
   1973                {{0.95, 0.5}},
   1974                {{0.95, 1}, {0.99, 14}, {0.999, 23}, {0.9999, 26}}};
   1975      }
   1976    case 16:
   1977      if (kRandomizesInserts) {
   1978        return {0.1,
   1979                0.4,
   1980                {{0.95, 0.3}},
   1981                {{0.95, 1}, {0.99, 2}, {0.999, 9}, {0.9999, 15}}};
   1982      } else {
   1983        return {0.05,
   1984                0.2,
   1985                {{0.95, 0.1}},
   1986                {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
   1987      }
   1988  }
   1989  LOG(FATAL) << "Unknown Group width";
   1990  return {};
   1991 }
   1992 
   1993 // TODO(b/80415403): Figure out why this test is so flaky.
   1994 TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
   1995  ProbeStatsPerSize stats;
   1996  std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
   1997  for (size_t size : sizes) {
   1998    stats[size] = CollectProbeStatsOnLinearlyTransformedKeys(
   1999        CollectBadMergeKeys(size), 300);
   2000  }
   2001  auto expected = LinearTransformExpectedStats();
   2002  for (size_t size : sizes) {
   2003    auto& stat = stats[size];
   2004    VerifyStats(size, expected, stat);
   2005    LOG(INFO) << size << " " << stat;
   2006  }
   2007 }
   2008 
   2009 TEST(Table, EraseCollision) {
   2010  BadTable t;
   2011 
   2012  // 1 2 3
   2013  t.emplace(1);
   2014  t.emplace(2);
   2015  t.emplace(3);
   2016  EXPECT_THAT(*t.find(1), 1);
   2017  EXPECT_THAT(*t.find(2), 2);
   2018  EXPECT_THAT(*t.find(3), 3);
   2019  EXPECT_EQ(3, t.size());
   2020 
   2021  // 1 DELETED 3
   2022  t.erase(t.find(2));
   2023  EXPECT_THAT(*t.find(1), 1);
   2024  EXPECT_TRUE(t.find(2) == t.end());
   2025  EXPECT_THAT(*t.find(3), 3);
   2026  EXPECT_EQ(2, t.size());
   2027 
   2028  // DELETED DELETED 3
   2029  t.erase(t.find(1));
   2030  EXPECT_TRUE(t.find(1) == t.end());
   2031  EXPECT_TRUE(t.find(2) == t.end());
   2032  EXPECT_THAT(*t.find(3), 3);
   2033  EXPECT_EQ(1, t.size());
   2034 
   2035  // DELETED DELETED DELETED
   2036  t.erase(t.find(3));
   2037  EXPECT_TRUE(t.find(1) == t.end());
   2038  EXPECT_TRUE(t.find(2) == t.end());
   2039  EXPECT_TRUE(t.find(3) == t.end());
   2040  EXPECT_EQ(0, t.size());
   2041 }
   2042 
   2043 TEST(Table, EraseInsertProbing) {
   2044  BadTable t(100);
   2045 
   2046  // 1 2 3 4
   2047  t.emplace(1);
   2048  t.emplace(2);
   2049  t.emplace(3);
   2050  t.emplace(4);
   2051 
   2052  // 1 DELETED 3 DELETED
   2053  t.erase(t.find(2));
   2054  t.erase(t.find(4));
   2055 
   2056  // 1 10 3 11 12
   2057  t.emplace(10);
   2058  t.emplace(11);
   2059  t.emplace(12);
   2060 
   2061  EXPECT_EQ(5, t.size());
   2062  EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
   2063 }
   2064 
   2065 TEST(Table, GrowthInfoDeletedBit) {
   2066  BadTable t;
   2067  EXPECT_TRUE(
   2068      RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
   2069  int64_t init_count = static_cast<int64_t>(
   2070      CapacityToGrowth(NormalizeCapacity(Group::kWidth + 1)));
   2071  for (int64_t i = 0; i < init_count; ++i) {
   2072    t.insert(i);
   2073  }
   2074  EXPECT_TRUE(
   2075      RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
   2076  t.erase(0);
   2077  EXPECT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 1);
   2078  EXPECT_FALSE(
   2079      RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
   2080  t.rehash(0);
   2081  EXPECT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 0);
   2082  EXPECT_TRUE(
   2083      RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted());
   2084 }
   2085 
   2086 TYPED_TEST(SooTest, Clear) {
   2087  TypeParam t;
   2088  EXPECT_TRUE(t.find(0) == t.end());
   2089  t.clear();
   2090  EXPECT_TRUE(t.find(0) == t.end());
   2091  auto res = t.emplace(0);
   2092  EXPECT_TRUE(res.second);
   2093  EXPECT_EQ(1, t.size());
   2094  t.clear();
   2095  EXPECT_EQ(0, t.size());
   2096  EXPECT_TRUE(t.find(0) == t.end());
   2097 }
   2098 
   2099 TYPED_TEST(SooTest, Swap) {
   2100  TypeParam t;
   2101  EXPECT_TRUE(t.find(0) == t.end());
   2102  auto res = t.emplace(0);
   2103  EXPECT_TRUE(res.second);
   2104  EXPECT_EQ(1, t.size());
   2105  TypeParam u;
   2106  t.swap(u);
   2107  EXPECT_EQ(0, t.size());
   2108  EXPECT_EQ(1, u.size());
   2109  EXPECT_TRUE(t.find(0) == t.end());
   2110  EXPECT_THAT(*u.find(0), 0);
   2111 }
   2112 
   2113 TYPED_TEST(SooTest, Rehash) {
   2114  TypeParam t;
   2115  EXPECT_TRUE(t.find(0) == t.end());
   2116  t.emplace(0);
   2117  t.emplace(1);
   2118  EXPECT_EQ(2, t.size());
   2119  t.rehash(128);
   2120  EXPECT_EQ(2, t.size());
   2121  EXPECT_THAT(*t.find(0), 0);
   2122  EXPECT_THAT(*t.find(1), 1);
   2123 }
   2124 
   2125 TYPED_TEST(SooTest, RehashDoesNotRehashWhenNotNecessary) {
   2126  TypeParam t;
   2127  t.emplace(0);
   2128  t.emplace(1);
   2129  auto* p = &*t.find(0);
   2130  t.rehash(1);
   2131  EXPECT_EQ(p, &*t.find(0));
   2132 }
   2133 
   2134 // Following two tests use non-SOO table because they test for 0 capacity.
   2135 TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
   2136  NonSooIntTable t;
   2137  t.rehash(0);
   2138  EXPECT_EQ(0, t.bucket_count());
   2139 }
   2140 
   2141 TEST(Table, RehashZeroDeallocatesEmptyTable) {
   2142  NonSooIntTable t;
   2143  t.emplace(0);
   2144  t.clear();
   2145  EXPECT_NE(0, t.bucket_count());
   2146  t.rehash(0);
   2147  EXPECT_EQ(0, t.bucket_count());
   2148 }
   2149 
   2150 TYPED_TEST(SooTest, RehashZeroForcesRehash) {
   2151  TypeParam t;
   2152  t.emplace(0);
   2153  t.emplace(1);
   2154  auto* p = &*t.find(0);
   2155  t.rehash(0);
   2156  EXPECT_NE(p, &*t.find(0));
   2157 }
   2158 
   2159 TEST(Table, ConstructFromInitList) {
   2160  using P = std::pair<std::string, std::string>;
   2161  struct Q {
   2162    operator P() const { return {}; }  // NOLINT
   2163  };
   2164  StringTable t = {P(), Q(), {}, {{}, {}}};
   2165 }
   2166 
   2167 TYPED_TEST(SooTest, CopyConstruct) {
   2168  TypeParam t;
   2169  t.emplace(0);
   2170  EXPECT_EQ(1, t.size());
   2171  {
   2172    TypeParam u(t);
   2173    EXPECT_EQ(1, u.size());
   2174    EXPECT_THAT(*u.find(0), 0);
   2175  }
   2176  {
   2177    TypeParam u{t};
   2178    EXPECT_EQ(1, u.size());
   2179    EXPECT_THAT(*u.find(0), 0);
   2180  }
   2181  {
   2182    TypeParam u = t;
   2183    EXPECT_EQ(1, u.size());
   2184    EXPECT_THAT(*u.find(0), 0);
   2185  }
   2186 }
   2187 
   2188 TYPED_TEST(SooTest, CopyAssignment) {
   2189  std::vector<size_t> sizes = {0, 1, 7, 25};
   2190  for (size_t source_size : sizes) {
   2191    for (size_t target_size : sizes) {
   2192      SCOPED_TRACE(absl::StrCat("source_size: ", source_size,
   2193                                " target_size: ", target_size));
   2194      TypeParam source;
   2195      std::vector<int> source_elements;
   2196      for (size_t i = 0; i < source_size; ++i) {
   2197        source.emplace(static_cast<int>(i) * 2);
   2198        source_elements.push_back(static_cast<int>(i) * 2);
   2199      }
   2200      TypeParam target;
   2201      for (size_t i = 0; i < target_size; ++i) {
   2202        target.emplace(static_cast<int>(i) * 3);
   2203      }
   2204      target = source;
   2205      ASSERT_EQ(target.size(), source_size);
   2206      ASSERT_THAT(target, UnorderedElementsAreArray(source_elements));
   2207    }
   2208  }
   2209 }
   2210 
   2211 TYPED_TEST(SooTest, CopyConstructWithSampling) {
   2212  SetSamplingRateTo1Percent();
   2213  for (int i = 0; i < 10000; ++i) {
   2214    TypeParam t;
   2215    t.emplace(0);
   2216    EXPECT_EQ(1, t.size());
   2217    {
   2218      TypeParam u(t);
   2219      EXPECT_EQ(1, u.size());
   2220      EXPECT_THAT(*u.find(0), 0);
   2221    }
   2222  }
   2223 }
   2224 
   2225 TYPED_TEST(SooTest, CopyDifferentSizes) {
   2226  TypeParam t;
   2227 
   2228  for (int i = 0; i < 100; ++i) {
   2229    t.emplace(i);
   2230    TypeParam c = t;
   2231    for (int j = 0; j <= i; ++j) {
   2232      ASSERT_TRUE(c.find(j) != c.end()) << "i=" << i << " j=" << j;
   2233    }
   2234    // Testing find miss to verify that table is not full.
   2235    ASSERT_TRUE(c.find(-1) == c.end());
   2236  }
   2237 }
   2238 
   2239 TYPED_TEST(SooTest, CopyDifferentCapacities) {
   2240  for (int cap = 1; cap < 100; cap = cap * 2 + 1) {
   2241    TypeParam t;
   2242    t.reserve(static_cast<size_t>(cap));
   2243    for (int i = 0; i <= cap; ++i) {
   2244      t.emplace(i);
   2245      if (i != cap && i % 5 != 0) {
   2246        continue;
   2247      }
   2248      TypeParam c = t;
   2249      for (int j = 0; j <= i; ++j) {
   2250        ASSERT_TRUE(c.find(j) != c.end())
   2251            << "cap=" << cap << " i=" << i << " j=" << j;
   2252      }
   2253      // Testing find miss to verify that table is not full.
   2254      ASSERT_TRUE(c.find(-1) == c.end());
   2255    }
   2256  }
   2257 }
   2258 
   2259 TEST(Table, CopyConstructWithAlloc) {
   2260  StringTable t;
   2261  t.emplace("a", "b");
   2262  EXPECT_EQ(1, t.size());
   2263  StringTable u(t, Alloc<std::pair<std::string, std::string>>());
   2264  EXPECT_EQ(1, u.size());
   2265  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
   2266 }
   2267 
   2268 struct ExplicitAllocIntTable
   2269    : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
   2270                   std::equal_to<int64_t>, Alloc<int64_t>> {
   2271  ExplicitAllocIntTable() = default;
   2272 };
   2273 
   2274 TEST(Table, AllocWithExplicitCtor) {
   2275  ExplicitAllocIntTable t;
   2276  EXPECT_EQ(0, t.size());
   2277 }
   2278 
   2279 TEST(Table, MoveConstruct) {
   2280  {
   2281    StringTable t;
   2282    t.emplace("a", "b");
   2283    EXPECT_EQ(1, t.size());
   2284 
   2285    StringTable u(std::move(t));
   2286    EXPECT_EQ(1, u.size());
   2287    EXPECT_THAT(*u.find("a"), Pair("a", "b"));
   2288  }
   2289  {
   2290    StringTable t;
   2291    t.emplace("a", "b");
   2292    EXPECT_EQ(1, t.size());
   2293 
   2294    StringTable u{std::move(t)};
   2295    EXPECT_EQ(1, u.size());
   2296    EXPECT_THAT(*u.find("a"), Pair("a", "b"));
   2297  }
   2298  {
   2299    StringTable t;
   2300    t.emplace("a", "b");
   2301    EXPECT_EQ(1, t.size());
   2302 
   2303    StringTable u = std::move(t);
   2304    EXPECT_EQ(1, u.size());
   2305    EXPECT_THAT(*u.find("a"), Pair("a", "b"));
   2306  }
   2307 }
   2308 
   2309 TEST(Table, MoveConstructWithAlloc) {
   2310  StringTable t;
   2311  t.emplace("a", "b");
   2312  EXPECT_EQ(1, t.size());
   2313  StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
   2314  EXPECT_EQ(1, u.size());
   2315  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
   2316 }
   2317 
   2318 TEST(Table, CopyAssign) {
   2319  StringTable t;
   2320  t.emplace("a", "b");
   2321  EXPECT_EQ(1, t.size());
   2322  StringTable u;
   2323  u = t;
   2324  EXPECT_EQ(1, u.size());
   2325  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
   2326 }
   2327 
   2328 TEST(Table, CopySelfAssign) {
   2329  StringTable t;
   2330  t.emplace("a", "b");
   2331  EXPECT_EQ(1, t.size());
   2332  t = *&t;
   2333  EXPECT_EQ(1, t.size());
   2334  EXPECT_THAT(*t.find("a"), Pair("a", "b"));
   2335 }
   2336 
   2337 TEST(Table, MoveAssign) {
   2338  StringTable t;
   2339  t.emplace("a", "b");
   2340  EXPECT_EQ(1, t.size());
   2341  StringTable u;
   2342  u = std::move(t);
   2343  EXPECT_EQ(1, u.size());
   2344  EXPECT_THAT(*u.find("a"), Pair("a", "b"));
   2345 }
   2346 
   2347 TEST(Table, MoveSelfAssign) {
   2348  StringTable t;
   2349  t.emplace("a", "b");
   2350  EXPECT_EQ(1, t.size());
   2351  t = std::move(*&t);
   2352  if (SwisstableGenerationsEnabled()) {
   2353    // NOLINTNEXTLINE(bugprone-use-after-move)
   2354    EXPECT_DEATH_IF_SUPPORTED(t.contains("a"), "self-move-assigned");
   2355  }
   2356  // As long as we don't crash, it's fine.
   2357 }
   2358 
   2359 TEST(Table, Equality) {
   2360  StringTable t;
   2361  std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
   2362                                                        {"aa", "bb"}};
   2363  t.insert(std::begin(v), std::end(v));
   2364  StringTable u = t;
   2365  EXPECT_EQ(u, t);
   2366 }
   2367 
   2368 TEST(Table, Equality2) {
   2369  StringTable t;
   2370  std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"},
   2371                                                         {"aa", "bb"}};
   2372  t.insert(std::begin(v1), std::end(v1));
   2373  StringTable u;
   2374  std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
   2375                                                         {"aa", "aa"}};
   2376  u.insert(std::begin(v2), std::end(v2));
   2377  EXPECT_NE(u, t);
   2378 }
   2379 
   2380 TEST(Table, Equality3) {
   2381  StringTable t;
   2382  std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"},
   2383                                                         {"bb", "bb"}};
   2384  t.insert(std::begin(v1), std::end(v1));
   2385  StringTable u;
   2386  std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
   2387                                                         {"aa", "aa"}};
   2388  u.insert(std::begin(v2), std::end(v2));
   2389  EXPECT_NE(u, t);
   2390 }
   2391 
   2392 TYPED_TEST(SooTest, NumDeletedRegression) {
   2393  TypeParam t;
   2394  t.emplace(0);
   2395  t.erase(t.find(0));
   2396  // construct over a deleted slot.
   2397  t.emplace(0);
   2398  t.clear();
   2399 }
   2400 
   2401 TYPED_TEST(SooTest, FindFullDeletedRegression) {
   2402  TypeParam t;
   2403  for (int i = 0; i < 1000; ++i) {
   2404    t.emplace(i);
   2405    t.erase(t.find(i));
   2406  }
   2407  EXPECT_EQ(0, t.size());
   2408 }
   2409 
   2410 TYPED_TEST(SooTest, ReplacingDeletedSlotDoesNotRehash) {
   2411  // We need to disable hashtablez to avoid issues related to SOO and sampling.
   2412  DisableSampling();
   2413 
   2414  size_t n;
   2415  {
   2416    // Compute n such that n is the maximum number of elements before rehash.
   2417    TypeParam t;
   2418    t.emplace(0);
   2419    size_t c = t.bucket_count();
   2420    for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
   2421    --n;
   2422  }
   2423  TypeParam t;
   2424  t.rehash(n);
   2425  const size_t c = t.bucket_count();
   2426  for (size_t i = 0; i != n; ++i) t.emplace(i);
   2427  EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
   2428  t.erase(0);
   2429  t.emplace(0);
   2430  EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
   2431 }
   2432 
   2433 TEST(Table, NoThrowMoveConstruct) {
   2434  ASSERT_TRUE(
   2435      std::is_nothrow_copy_constructible<absl::Hash<absl::string_view>>::value);
   2436  ASSERT_TRUE(std::is_nothrow_copy_constructible<
   2437              std::equal_to<absl::string_view>>::value);
   2438  ASSERT_TRUE(std::is_nothrow_copy_constructible<std::allocator<int>>::value);
   2439  EXPECT_TRUE(std::is_nothrow_move_constructible<StringTable>::value);
   2440 }
   2441 
   2442 TEST(Table, NoThrowMoveAssign) {
   2443  ASSERT_TRUE(
   2444      std::is_nothrow_move_assignable<absl::Hash<absl::string_view>>::value);
   2445  ASSERT_TRUE(
   2446      std::is_nothrow_move_assignable<std::equal_to<absl::string_view>>::value);
   2447  ASSERT_TRUE(std::is_nothrow_move_assignable<std::allocator<int>>::value);
   2448  ASSERT_TRUE(
   2449      absl::allocator_traits<std::allocator<int>>::is_always_equal::value);
   2450  EXPECT_TRUE(std::is_nothrow_move_assignable<StringTable>::value);
   2451 }
   2452 
   2453 TEST(Table, NoThrowSwappable) {
   2454  ASSERT_TRUE(
   2455      container_internal::IsNoThrowSwappable<absl::Hash<absl::string_view>>());
   2456  ASSERT_TRUE(container_internal::IsNoThrowSwappable<
   2457              std::equal_to<absl::string_view>>());
   2458  ASSERT_TRUE(container_internal::IsNoThrowSwappable<std::allocator<int>>());
   2459  EXPECT_TRUE(container_internal::IsNoThrowSwappable<StringTable>());
   2460 }
   2461 
   2462 TEST(Table, HeterogeneousLookup) {
   2463  struct Hash {
   2464    size_t operator()(int64_t i) const { return i; }
   2465    size_t operator()(double i) const {
   2466      ADD_FAILURE();
   2467      return i;
   2468    }
   2469  };
   2470  struct Eq {
   2471    bool operator()(int64_t a, int64_t b) const { return a == b; }
   2472    bool operator()(double a, int64_t b) const {
   2473      ADD_FAILURE();
   2474      return a == b;
   2475    }
   2476    bool operator()(int64_t a, double b) const {
   2477      ADD_FAILURE();
   2478      return a == b;
   2479    }
   2480    bool operator()(double a, double b) const {
   2481      ADD_FAILURE();
   2482      return a == b;
   2483    }
   2484  };
   2485 
   2486  struct THash {
   2487    using is_transparent = void;
   2488    size_t operator()(int64_t i) const { return i; }
   2489    size_t operator()(double i) const { return i; }
   2490  };
   2491  struct TEq {
   2492    using is_transparent = void;
   2493    bool operator()(int64_t a, int64_t b) const { return a == b; }
   2494    bool operator()(double a, int64_t b) const { return a == b; }
   2495    bool operator()(int64_t a, double b) const { return a == b; }
   2496    bool operator()(double a, double b) const { return a == b; }
   2497  };
   2498 
   2499  raw_hash_set<IntPolicy, Hash, Eq, Alloc<int64_t>> s{0, 1, 2};
   2500  // It will convert to int64_t before the query.
   2501  EXPECT_EQ(1, *s.find(double{1.1}));
   2502 
   2503  raw_hash_set<IntPolicy, THash, TEq, Alloc<int64_t>> ts{0, 1, 2};
   2504  // It will try to use the double, and fail to find the object.
   2505  EXPECT_TRUE(ts.find(1.1) == ts.end());
   2506 }
   2507 
   2508 template <class Table>
   2509 using CallFind = decltype(std::declval<Table&>().find(17));
   2510 
   2511 template <class Table>
   2512 using CallErase = decltype(std::declval<Table&>().erase(17));
   2513 
   2514 template <class Table>
   2515 using CallExtract = decltype(std::declval<Table&>().extract(17));
   2516 
   2517 template <class Table>
   2518 using CallPrefetch = decltype(std::declval<Table&>().prefetch(17));
   2519 
   2520 template <class Table>
   2521 using CallCount = decltype(std::declval<Table&>().count(17));
   2522 
   2523 template <template <typename> class C, class Table, class = void>
   2524 struct VerifyResultOf : std::false_type {};
   2525 
   2526 template <template <typename> class C, class Table>
   2527 struct VerifyResultOf<C, Table, absl::void_t<C<Table>>> : std::true_type {};
   2528 
   2529 TEST(Table, HeterogeneousLookupOverloads) {
   2530  using NonTransparentTable =
   2531      raw_hash_set<StringPolicy, absl::Hash<absl::string_view>,
   2532                   std::equal_to<absl::string_view>, std::allocator<int>>;
   2533 
   2534  EXPECT_FALSE((VerifyResultOf<CallFind, NonTransparentTable>()));
   2535  EXPECT_FALSE((VerifyResultOf<CallErase, NonTransparentTable>()));
   2536  EXPECT_FALSE((VerifyResultOf<CallExtract, NonTransparentTable>()));
   2537  EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
   2538  EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
   2539 
   2540  using TransparentTable =
   2541      raw_hash_set<StringPolicy, hash_default_hash<absl::string_view>,
   2542                   hash_default_eq<absl::string_view>, std::allocator<int>>;
   2543 
   2544  EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
   2545  EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
   2546  EXPECT_TRUE((VerifyResultOf<CallExtract, TransparentTable>()));
   2547  EXPECT_TRUE((VerifyResultOf<CallPrefetch, TransparentTable>()));
   2548  EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>()));
   2549 }
   2550 
   2551 TEST(Iterator, IsDefaultConstructible) {
   2552  StringTable::iterator i;
   2553  EXPECT_TRUE(i == StringTable::iterator());
   2554 }
   2555 
   2556 TEST(ConstIterator, IsDefaultConstructible) {
   2557  StringTable::const_iterator i;
   2558  EXPECT_TRUE(i == StringTable::const_iterator());
   2559 }
   2560 
   2561 TEST(Iterator, ConvertsToConstIterator) {
   2562  StringTable::iterator i;
   2563  EXPECT_TRUE(i == StringTable::const_iterator());
   2564 }
   2565 
   2566 TEST(Iterator, Iterates) {
   2567  IntTable t;
   2568  for (size_t i = 3; i != 6; ++i) EXPECT_TRUE(t.emplace(i).second);
   2569  EXPECT_THAT(t, UnorderedElementsAre(3, 4, 5));
   2570 }
   2571 
   2572 TEST(Table, Merge) {
   2573  StringTable t1, t2;
   2574  t1.emplace("0", "-0");
   2575  t1.emplace("1", "-1");
   2576  t2.emplace("0", "~0");
   2577  t2.emplace("2", "~2");
   2578 
   2579  EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1")));
   2580  EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"), Pair("2", "~2")));
   2581 
   2582  t1.merge(t2);
   2583  EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"),
   2584                                       Pair("2", "~2")));
   2585  EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
   2586 }
   2587 
   2588 TEST(Table, IteratorEmplaceConstructibleRequirement) {
   2589  struct Value {
   2590    explicit Value(absl::string_view view) : value(view) {}
   2591    std::string value;
   2592 
   2593    bool operator==(const Value& other) const { return value == other.value; }
   2594  };
   2595  struct H {
   2596    size_t operator()(const Value& v) const {
   2597      return absl::Hash<std::string>{}(v.value);
   2598    }
   2599  };
   2600 
   2601  struct Table : raw_hash_set<ValuePolicy<Value>, H, std::equal_to<Value>,
   2602                              std::allocator<Value>> {
   2603    using Base = typename Table::raw_hash_set;
   2604    using Base::Base;
   2605  };
   2606 
   2607  std::string input[3]{"A", "B", "C"};
   2608 
   2609  Table t(std::begin(input), std::end(input));
   2610  EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"}));
   2611 
   2612  input[0] = "D";
   2613  input[1] = "E";
   2614  input[2] = "F";
   2615  t.insert(std::begin(input), std::end(input));
   2616  EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"},
   2617                                      Value{"D"}, Value{"E"}, Value{"F"}));
   2618 }
   2619 
   2620 TEST(Nodes, EmptyNodeType) {
   2621  using node_type = StringTable::node_type;
   2622  node_type n;
   2623  EXPECT_FALSE(n);
   2624  EXPECT_TRUE(n.empty());
   2625 
   2626  EXPECT_TRUE((std::is_same<node_type::allocator_type,
   2627                            StringTable::allocator_type>::value));
   2628 }
   2629 
   2630 TEST(Nodes, ExtractInsert) {
   2631  constexpr char k0[] = "Very long string zero.";
   2632  constexpr char k1[] = "Very long string one.";
   2633  constexpr char k2[] = "Very long string two.";
   2634  StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
   2635  EXPECT_THAT(t,
   2636              UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
   2637 
   2638  auto node = t.extract(k0);
   2639  EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
   2640  EXPECT_TRUE(node);
   2641  EXPECT_FALSE(node.empty());
   2642 
   2643  StringTable t2;
   2644  StringTable::insert_return_type res = t2.insert(std::move(node));
   2645  EXPECT_TRUE(res.inserted);
   2646  EXPECT_THAT(*res.position, Pair(k0, ""));
   2647  EXPECT_FALSE(res.node);
   2648  EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
   2649 
   2650  // Not there.
   2651  EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
   2652  node = t.extract("Not there!");
   2653  EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
   2654  EXPECT_FALSE(node);
   2655 
   2656  // Inserting nothing.
   2657  res = t2.insert(std::move(node));
   2658  EXPECT_FALSE(res.inserted);
   2659  EXPECT_EQ(res.position, t2.end());
   2660  EXPECT_FALSE(res.node);
   2661  EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
   2662 
   2663  t.emplace(k0, "1");
   2664  node = t.extract(k0);
   2665 
   2666  // Insert duplicate.
   2667  res = t2.insert(std::move(node));
   2668  EXPECT_FALSE(res.inserted);
   2669  EXPECT_THAT(*res.position, Pair(k0, ""));
   2670  EXPECT_TRUE(res.node);
   2671  EXPECT_FALSE(node);  // NOLINT(bugprone-use-after-move)
   2672 }
   2673 
   2674 TYPED_TEST(SooTest, HintInsert) {
   2675  TypeParam t = {1, 2, 3};
   2676  auto node = t.extract(1);
   2677  EXPECT_THAT(t, UnorderedElementsAre(2, 3));
   2678  auto it = t.insert(t.begin(), std::move(node));
   2679  EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
   2680  EXPECT_EQ(*it, 1);
   2681  EXPECT_FALSE(node);  // NOLINT(bugprone-use-after-move)
   2682 
   2683  node = t.extract(2);
   2684  EXPECT_THAT(t, UnorderedElementsAre(1, 3));
   2685  // reinsert 2 to make the next insert fail.
   2686  t.insert(2);
   2687  EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
   2688  it = t.insert(t.begin(), std::move(node));
   2689  EXPECT_EQ(*it, 2);
   2690  // The node was not emptied by the insert call.
   2691  EXPECT_TRUE(node);  // NOLINT(bugprone-use-after-move)
   2692 }
   2693 
   2694 template <typename T>
   2695 T MakeSimpleTable(size_t size, bool do_reserve) {
   2696  T t;
   2697  if (do_reserve) t.reserve(size);
   2698  while (t.size() < size) t.insert(t.size());
   2699  return t;
   2700 }
   2701 
   2702 template <typename T>
   2703 std::vector<int> OrderOfIteration(const T& t) {
   2704  std::vector<int> res;
   2705  for (auto i : t) res.push_back(static_cast<int>(i));
   2706  return res;
   2707 }
   2708 
   2709 // Generate irrelevant seeds to avoid being stuck in the same last bit
   2710 // in seed.
   2711 void GenerateIrrelevantSeeds(int cnt) {
   2712  for (int i = cnt % 17; i > 0; --i) {
   2713    NextSeedBaseNumber();
   2714  }
   2715 }
   2716 
   2717 // These IterationOrderChanges tests depend on non-deterministic behavior.
   2718 // We are injecting non-determinism to the table.
   2719 // We have to retry enough times to make sure that the seed changes in bits that
   2720 // matter for the iteration order.
   2721 TYPED_TEST(SooTest, IterationOrderChangesByInstance) {
   2722  DisableSampling();  // We do not want test to pass only because of sampling.
   2723  for (bool do_reserve : {false, true}) {
   2724    for (size_t size : {2u, 6u, 12u, 20u}) {
   2725      SCOPED_TRACE(absl::StrCat("size: ", size, " do_reserve: ", do_reserve));
   2726      const auto reference_table = MakeSimpleTable<TypeParam>(size, do_reserve);
   2727      const auto reference = OrderOfIteration(reference_table);
   2728 
   2729      bool found_difference = false;
   2730      for (int i = 0; !found_difference && i < 500; ++i) {
   2731        auto new_table = MakeSimpleTable<TypeParam>(size, do_reserve);
   2732        found_difference = OrderOfIteration(new_table) != reference;
   2733        GenerateIrrelevantSeeds(i);
   2734      }
   2735      if (!found_difference) {
   2736        FAIL() << "Iteration order remained the same across many attempts.";
   2737      }
   2738    }
   2739  }
   2740 }
   2741 
   2742 TYPED_TEST(SooTest, IterationOrderChangesOnRehash) {
   2743  DisableSampling();  // We do not want test to pass only because of sampling.
   2744 
   2745  // We test different sizes with many small numbers, because small table
   2746  // resize has a different codepath.
   2747  // Note: iteration order for size() <= 1 is always the same.
   2748  for (bool do_reserve : {false, true}) {
   2749    for (size_t size : {2u, 3u, 6u, 7u, 12u, 15u, 20u, 50u}) {
   2750      for (size_t rehash_size : {
   2751               size_t{0},        // Force rehash is guaranteed.
   2752               size * 10  // Rehash to the larger capacity is guaranteed.
   2753           }) {
   2754        SCOPED_TRACE(absl::StrCat("size: ", size, " rehash_size: ", rehash_size,
   2755                                  " do_reserve: ", do_reserve));
   2756        bool ok = false;
   2757        auto t = MakeSimpleTable<TypeParam>(size, do_reserve);
   2758        const size_t original_capacity = t.capacity();
   2759        auto reference = OrderOfIteration(t);
   2760        for (int i = 0; i < 500; ++i) {
   2761          if (i > 0 && rehash_size != 0) {
   2762            // Rehash back to original size.
   2763            t.rehash(0);
   2764            ASSERT_EQ(t.capacity(), original_capacity);
   2765            reference = OrderOfIteration(t);
   2766          }
   2767          // Force rehash.
   2768          t.rehash(rehash_size);
   2769          auto trial = OrderOfIteration(t);
   2770          if (trial != reference) {
   2771            // We are done.
   2772            ok = true;
   2773            break;
   2774          }
   2775          GenerateIrrelevantSeeds(i);
   2776        }
   2777        EXPECT_TRUE(ok)
   2778            << "Iteration order remained the same across many attempts " << size
   2779            << "->" << rehash_size << ".";
   2780      }
   2781    }
   2782  }
   2783 }
   2784 
   2785 // Verify that pointers are invalidated as soon as a second element is inserted.
   2786 // This prevents dependency on pointer stability on small tables.
   2787 TYPED_TEST(SooTest, UnstablePointers) {
   2788  // We need to disable hashtablez to avoid issues related to SOO and sampling.
   2789  DisableSampling();
   2790 
   2791  TypeParam table;
   2792 
   2793  const auto addr = [&](int i) {
   2794    return reinterpret_cast<uintptr_t>(&*table.find(i));
   2795  };
   2796 
   2797  table.insert(0);
   2798  const uintptr_t old_ptr = addr(0);
   2799 
   2800  // This causes a rehash.
   2801  table.insert(1);
   2802 
   2803  EXPECT_NE(old_ptr, addr(0));
   2804 }
   2805 
   2806 TEST(TableDeathTest, InvalidIteratorAsserts) {
   2807  if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
   2808    GTEST_SKIP() << "Assertions not enabled.";
   2809 
   2810  NonSooIntTable t;
   2811  // Extra simple "regexp" as regexp support is highly varied across platforms.
   2812  EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
   2813                            "erase.* called on end.. iterator.");
   2814  typename NonSooIntTable::iterator iter;
   2815  EXPECT_DEATH_IF_SUPPORTED(
   2816      ++iter, "operator.* called on default-constructed iterator.");
   2817  t.insert(0);
   2818  iter = t.begin();
   2819  t.erase(iter);
   2820  const char* const kErasedDeathMessage =
   2821      SwisstableGenerationsEnabled()
   2822          ? "operator.* called on invalid iterator.*was likely erased"
   2823          : "operator.* called on invalid iterator.*might have been "
   2824            "erased.*config=asan";
   2825  EXPECT_DEATH_IF_SUPPORTED(++iter, kErasedDeathMessage);
   2826 }
   2827 
   2828 TEST(TableDeathTest, InvalidIteratorAssertsSoo) {
   2829  if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
   2830    GTEST_SKIP() << "Assertions not enabled.";
   2831 
   2832  SooIntTable t;
   2833  // Extra simple "regexp" as regexp support is highly varied across platforms.
   2834  EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
   2835                            "erase.* called on end.. iterator.");
   2836  typename SooIntTable::iterator iter;
   2837  EXPECT_DEATH_IF_SUPPORTED(
   2838      ++iter, "operator.* called on default-constructed iterator.");
   2839 
   2840  // We can't detect the erased iterator case as invalid in SOO mode because
   2841  // the control is static constant.
   2842 }
   2843 
   2844 // Invalid iterator use can trigger use-after-free in asan/hwasan,
   2845 // use-of-uninitialized-value in msan, or invalidated iterator assertions.
   2846 constexpr const char* kInvalidIteratorDeathMessage =
   2847    "use-after-free|use-of-uninitialized-value|invalidated "
   2848    "iterator|Invalid iterator|invalid iterator";
   2849 
   2850 // MSVC doesn't support | in regex.
   2851 #if defined(_MSC_VER)
   2852 constexpr bool kMsvc = true;
   2853 #else
   2854 constexpr bool kMsvc = false;
   2855 #endif
   2856 
   2857 TYPED_TEST(SooTest, IteratorInvalidAssertsEqualityOperator) {
   2858  if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
   2859    GTEST_SKIP() << "Assertions not enabled.";
   2860 
   2861  TypeParam t;
   2862  t.insert(1);
   2863  t.insert(2);
   2864  t.insert(3);
   2865  auto iter1 = t.begin();
   2866  auto iter2 = std::next(iter1);
   2867  ASSERT_NE(iter1, t.end());
   2868  ASSERT_NE(iter2, t.end());
   2869  t.erase(iter1);
   2870  // Extra simple "regexp" as regexp support is highly varied across platforms.
   2871  const char* const kErasedDeathMessage =
   2872      SwisstableGenerationsEnabled()
   2873          ? "Invalid iterator comparison.*was likely erased"
   2874          : "Invalid iterator comparison.*might have been erased.*config=asan";
   2875  EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
   2876  EXPECT_DEATH_IF_SUPPORTED(void(iter2 != iter1), kErasedDeathMessage);
   2877  t.erase(iter2);
   2878  EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
   2879 
   2880  TypeParam t1, t2;
   2881  t1.insert(0);
   2882  t2.insert(0);
   2883  iter1 = t1.begin();
   2884  iter2 = t2.begin();
   2885  const char* const kContainerDiffDeathMessage =
   2886      SwisstableGenerationsEnabled()
   2887          ? "Invalid iterator comparison.*iterators from different.* hashtables"
   2888          : "Invalid iterator comparison.*may be from different "
   2889            ".*containers.*config=asan";
   2890  EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage);
   2891  EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage);
   2892 }
   2893 
   2894 TYPED_TEST(SooTest, IteratorInvalidAssertsEqualityOperatorRehash) {
   2895  if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
   2896    GTEST_SKIP() << "Assertions not enabled.";
   2897  if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regex.";
   2898 #ifdef ABSL_HAVE_THREAD_SANITIZER
   2899  GTEST_SKIP() << "ThreadSanitizer test runs fail on use-after-free even in "
   2900                  "EXPECT_DEATH.";
   2901 #endif
   2902 
   2903  TypeParam t;
   2904  t.insert(0);
   2905  auto iter = t.begin();
   2906 
   2907  // Trigger a rehash in t.
   2908  for (int i = 0; i < 10; ++i) t.insert(i);
   2909 
   2910  const char* const kRehashedDeathMessage =
   2911      SwisstableGenerationsEnabled()
   2912          ? kInvalidIteratorDeathMessage
   2913          : "Invalid iterator comparison.*might have rehashed.*config=asan";
   2914  EXPECT_DEATH_IF_SUPPORTED(void(iter == t.begin()), kRehashedDeathMessage);
   2915 }
   2916 
   2917 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
   2918 template <typename T>
   2919 class RawHashSamplerTest : public testing::Test {};
   2920 
   2921 using RawHashSamplerTestTypes = ::testing::Types<
   2922    // 32 bits to make sure that table is Soo for 32 bits platform as well.
   2923    // 64 bits table is not SOO due to alignment.
   2924    SooInt32Table,
   2925    NonSooIntTable>;
   2926 TYPED_TEST_SUITE(RawHashSamplerTest, RawHashSamplerTestTypes);
   2927 
   2928 TYPED_TEST(RawHashSamplerTest, Sample) {
   2929  constexpr bool soo_enabled = std::is_same<SooInt32Table, TypeParam>::value;
   2930  // Enable the feature even if the prod default is off.
   2931  SetSamplingRateTo1Percent();
   2932 
   2933  ASSERT_EQ(TypeParam().capacity(), soo_enabled ? SooCapacity() : 0);
   2934 
   2935  auto& sampler = GlobalHashtablezSampler();
   2936  size_t start_size = 0;
   2937 
   2938  // Reserve these utility tables, so that if they sampled, they'll be
   2939  // preexisting.
   2940  absl::flat_hash_set<const HashtablezInfo*> preexisting_info(10);
   2941  absl::flat_hash_map<size_t, int> observed_checksums(10);
   2942  absl::flat_hash_map<ssize_t, int> reservations(10);
   2943 
   2944  start_size += sampler.Iterate([&](const HashtablezInfo& info) {
   2945    preexisting_info.insert(&info);
   2946    ++start_size;
   2947  });
   2948 
   2949  std::vector<TypeParam> tables;
   2950  for (int i = 0; i < 1000000; ++i) {
   2951    tables.emplace_back();
   2952 
   2953    const bool do_reserve = (i % 10 > 5);
   2954    const bool do_rehash = !do_reserve && (i % 10 > 0);
   2955 
   2956    if (do_reserve) {
   2957      // Don't reserve on all tables.
   2958      tables.back().reserve(10 * (i % 10));
   2959    }
   2960 
   2961    tables.back().insert(1);
   2962    tables.back().insert(i % 5);
   2963 
   2964    if (do_rehash) {
   2965      // Rehash some other tables.
   2966      tables.back().rehash(10 * (i % 10));
   2967    }
   2968  }
   2969  size_t end_size = 0;
   2970  end_size += sampler.Iterate([&](const HashtablezInfo& info) {
   2971    ++end_size;
   2972    if (preexisting_info.contains(&info)) return;
   2973    observed_checksums[info.hashes_bitwise_xor.load(
   2974        std::memory_order_relaxed)]++;
   2975    reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
   2976    EXPECT_EQ(info.inline_element_size, sizeof(typename TypeParam::value_type));
   2977    EXPECT_EQ(info.key_size, sizeof(typename TypeParam::key_type));
   2978    EXPECT_EQ(info.value_size, sizeof(typename TypeParam::value_type));
   2979 
   2980    if (soo_enabled) {
   2981      EXPECT_EQ(info.soo_capacity, SooCapacity());
   2982    } else {
   2983      EXPECT_EQ(info.soo_capacity, 0);
   2984    }
   2985  });
   2986 
   2987  // Expect that we sampled at the requested sampling rate of ~1%.
   2988  EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
   2989              0.01, 0.005);
   2990  EXPECT_EQ(observed_checksums.size(), 5);
   2991  for (const auto& [_, count] : observed_checksums) {
   2992    EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05);
   2993  }
   2994 
   2995  EXPECT_EQ(reservations.size(), 10);
   2996  for (const auto& [reservation, count] : reservations) {
   2997    EXPECT_GE(reservation, 0);
   2998    EXPECT_LT(reservation, 100);
   2999 
   3000    EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.1, 0.05)
   3001        << reservation;
   3002  }
   3003 }
   3004 
   3005 std::vector<const HashtablezInfo*> SampleSooMutation(
   3006    absl::FunctionRef<void(SooInt32Table&)> mutate_table) {
   3007  // Enable the feature even if the prod default is off.
   3008  SetSamplingRateTo1Percent();
   3009 
   3010  auto& sampler = GlobalHashtablezSampler();
   3011  size_t start_size = 0;
   3012  // Reserve the table, so that if it sampled, it'll be preexisting.
   3013  absl::flat_hash_set<const HashtablezInfo*> preexisting_info(10);
   3014  start_size += sampler.Iterate([&](const HashtablezInfo& info) {
   3015    preexisting_info.insert(&info);
   3016    ++start_size;
   3017  });
   3018 
   3019  std::vector<SooInt32Table> tables;
   3020  for (int i = 0; i < 1000000; ++i) {
   3021    tables.emplace_back();
   3022    mutate_table(tables.back());
   3023  }
   3024  size_t end_size = 0;
   3025  std::vector<const HashtablezInfo*> infos;
   3026  end_size += sampler.Iterate([&](const HashtablezInfo& info) {
   3027    ++end_size;
   3028    if (preexisting_info.contains(&info)) return;
   3029    infos.push_back(&info);
   3030  });
   3031 
   3032  // Expect that we sampled at the requested sampling rate of ~1%.
   3033  EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
   3034              0.01, 0.005);
   3035  return infos;
   3036 }
   3037 
   3038 TEST(RawHashSamplerTest, SooTableInsertToEmpty) {
   3039  if (SooInt32Table().capacity() != SooCapacity()) {
   3040    CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
   3041    GTEST_SKIP() << "not SOO on this platform";
   3042  }
   3043  std::vector<const HashtablezInfo*> infos =
   3044      SampleSooMutation([](SooInt32Table& t) { t.insert(1); });
   3045 
   3046  for (const HashtablezInfo* info : infos) {
   3047    ASSERT_EQ(info->inline_element_size,
   3048              sizeof(typename SooInt32Table::value_type));
   3049    ASSERT_EQ(info->soo_capacity, SooCapacity());
   3050    ASSERT_EQ(info->capacity, NextCapacity(SooCapacity()));
   3051    ASSERT_EQ(info->size, 1);
   3052    ASSERT_EQ(info->max_reserve, 0);
   3053    ASSERT_EQ(info->num_erases, 0);
   3054    ASSERT_EQ(info->max_probe_length, 0);
   3055    ASSERT_EQ(info->total_probe_length, 0);
   3056  }
   3057 }
   3058 
   3059 TEST(RawHashSamplerTest, SooTableReserveToEmpty) {
   3060  if (SooInt32Table().capacity() != SooCapacity()) {
   3061    CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
   3062    GTEST_SKIP() << "not SOO on this platform";
   3063  }
   3064  std::vector<const HashtablezInfo*> infos =
   3065      SampleSooMutation([](SooInt32Table& t) { t.reserve(100); });
   3066 
   3067  for (const HashtablezInfo* info : infos) {
   3068    ASSERT_EQ(info->inline_element_size,
   3069              sizeof(typename SooInt32Table::value_type));
   3070    ASSERT_EQ(info->soo_capacity, SooCapacity());
   3071    ASSERT_GE(info->capacity, 100);
   3072    ASSERT_EQ(info->size, 0);
   3073    ASSERT_EQ(info->max_reserve, 100);
   3074    ASSERT_EQ(info->num_erases, 0);
   3075    ASSERT_EQ(info->max_probe_length, 0);
   3076    ASSERT_EQ(info->total_probe_length, 0);
   3077  }
   3078 }
   3079 
   3080 // This tests that reserve on a full SOO table doesn't incorrectly result in new
   3081 // (over-)sampling.
   3082 TEST(RawHashSamplerTest, SooTableReserveToFullSoo) {
   3083  if (SooInt32Table().capacity() != SooCapacity()) {
   3084    CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
   3085    GTEST_SKIP() << "not SOO on this platform";
   3086  }
   3087  std::vector<const HashtablezInfo*> infos =
   3088      SampleSooMutation([](SooInt32Table& t) {
   3089        t.insert(1);
   3090        t.reserve(100);
   3091      });
   3092 
   3093  for (const HashtablezInfo* info : infos) {
   3094    ASSERT_EQ(info->inline_element_size,
   3095              sizeof(typename SooInt32Table::value_type));
   3096    ASSERT_EQ(info->soo_capacity, SooCapacity());
   3097    ASSERT_GE(info->capacity, 100);
   3098    ASSERT_EQ(info->size, 1);
   3099    ASSERT_EQ(info->max_reserve, 100);
   3100    ASSERT_EQ(info->num_erases, 0);
   3101    ASSERT_EQ(info->max_probe_length, 0);
   3102    ASSERT_EQ(info->total_probe_length, 0);
   3103  }
   3104 }
   3105 
   3106 // This tests that rehash(0) on a sampled table with size that fits in SOO
   3107 // doesn't incorrectly result in losing sampling.
   3108 TEST(RawHashSamplerTest, SooTableRehashShrinkWhenSizeFitsInSoo) {
   3109  if (SooInt32Table().capacity() != SooCapacity()) {
   3110    CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
   3111    GTEST_SKIP() << "not SOO on this platform";
   3112  }
   3113  std::vector<const HashtablezInfo*> infos =
   3114      SampleSooMutation([](SooInt32Table& t) {
   3115        t.reserve(100);
   3116        t.insert(1);
   3117        EXPECT_GE(t.capacity(), 100);
   3118        t.rehash(0);
   3119      });
   3120 
   3121  for (const HashtablezInfo* info : infos) {
   3122    ASSERT_EQ(info->inline_element_size,
   3123              sizeof(typename SooInt32Table::value_type));
   3124    ASSERT_EQ(info->soo_capacity, SooCapacity());
   3125    ASSERT_EQ(info->capacity, NextCapacity(SooCapacity()));
   3126    ASSERT_EQ(info->size, 1);
   3127    ASSERT_EQ(info->max_reserve, 100);
   3128    ASSERT_EQ(info->num_erases, 0);
   3129    ASSERT_EQ(info->max_probe_length, 0);
   3130    ASSERT_EQ(info->total_probe_length, 0);
   3131  }
   3132 }
   3133 #endif  // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
   3134 
   3135 TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
   3136  // Enable the feature even if the prod default is off.
   3137  SetSamplingRateTo1Percent();
   3138 
   3139  auto& sampler = GlobalHashtablezSampler();
   3140  size_t start_size = 0;
   3141  start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
   3142 
   3143  std::vector<CustomAllocIntTable> tables;
   3144  for (int i = 0; i < 100000; ++i) {
   3145    tables.emplace_back();
   3146    tables.back().insert(1);
   3147  }
   3148  size_t end_size = 0;
   3149  end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
   3150 
   3151  EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
   3152              0.00, 0.001);
   3153 }
   3154 
   3155 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
   3156 template <class TableType>
   3157 class SanitizerTest : public testing::Test {};
   3158 
   3159 using SanitizerTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
   3160 TYPED_TEST_SUITE(SanitizerTest, SanitizerTableTypes);
   3161 
   3162 TYPED_TEST(SanitizerTest, PoisoningUnused) {
   3163  TypeParam t;
   3164  for (size_t reserve_size = 2; reserve_size < 1024;
   3165       reserve_size = reserve_size * 3 / 2) {
   3166    t.reserve(reserve_size);
   3167    // Insert something to force an allocation.
   3168    int64_t& v = *t.insert(0).first;
   3169 
   3170    // Make sure there is something to test.
   3171    ASSERT_GT(t.capacity(), 1);
   3172 
   3173    int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
   3174    for (size_t i = 0; i < t.capacity(); ++i) {
   3175      EXPECT_EQ(slots + i != &v, __asan_address_is_poisoned(slots + i)) << i;
   3176    }
   3177  }
   3178 }
   3179 
   3180 // TODO(b/289225379): poison inline space when empty SOO.
   3181 TEST(Sanitizer, PoisoningOnErase) {
   3182  NonSooIntTable t;
   3183  auto& v = *t.insert(0).first;
   3184 
   3185  EXPECT_FALSE(__asan_address_is_poisoned(&v));
   3186  t.erase(0);
   3187  EXPECT_TRUE(__asan_address_is_poisoned(&v));
   3188 }
   3189 #endif  // ABSL_HAVE_ADDRESS_SANITIZER
   3190 
   3191 template <typename T>
   3192 class AlignOneTest : public ::testing::Test {};
   3193 using AlignOneTestTypes =
   3194    ::testing::Types<Uint8Table, MinimumAlignmentUint8Table>;
   3195 TYPED_TEST_SUITE(AlignOneTest, AlignOneTestTypes);
   3196 
   3197 TYPED_TEST(AlignOneTest, AlignOne) {
   3198  // We previously had a bug in which we were copying a control byte over the
   3199  // first slot when alignof(value_type) is 1. We test repeated
   3200  // insertions/erases and verify that the behavior is correct.
   3201  TypeParam t;
   3202  std::bitset<256> verifier;
   3203 
   3204  // Do repeated insertions/erases from the table.
   3205  for (int64_t i = 0; i < 10000; ++i) {
   3206    SCOPED_TRACE(i);
   3207    const uint8_t u = (i * -i) & 0xFF;
   3208    auto it = t.find(u);
   3209    if (it == t.end()) {
   3210      ASSERT_FALSE(verifier.test(u));
   3211      t.insert(u);
   3212      verifier.set(u);
   3213    } else {
   3214      ASSERT_TRUE(verifier.test(u));
   3215      t.erase(it);
   3216      verifier.reset(u);
   3217    }
   3218  }
   3219 
   3220  EXPECT_EQ(t.size(), verifier.count());
   3221  for (uint8_t u : t) {
   3222    ASSERT_TRUE(verifier.test(u));
   3223  }
   3224 }
   3225 
   3226 TEST(Iterator, InvalidUseCrashesWithSanitizers) {
   3227  if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
   3228  if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
   3229 
   3230  NonSooIntTable t;
   3231  // Start with 1 element so that `it` is never an end iterator.
   3232  t.insert(-1);
   3233  for (int i = 0; i < 10; ++i) {
   3234    auto it = t.begin();
   3235    t.insert(i);
   3236    EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
   3237    EXPECT_DEATH_IF_SUPPORTED(void(it == t.begin()),
   3238                              kInvalidIteratorDeathMessage);
   3239  }
   3240 }
   3241 
   3242 TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
   3243  if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
   3244  if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
   3245 
   3246  IntTable t;
   3247  t.reserve(10);
   3248  t.insert(0);
   3249  auto it = t.begin();
   3250  // Reserved growth can't rehash.
   3251  for (int i = 1; i < 10; ++i) {
   3252    t.insert(i);
   3253    EXPECT_EQ(*it, 0);
   3254  }
   3255  // ptr will become invalidated on rehash.
   3256  const int64_t* ptr = &*it;
   3257  (void)ptr;
   3258 
   3259  // erase decreases size but does not decrease reserved growth so the next
   3260  // insertion still invalidates iterators.
   3261  t.erase(0);
   3262  // The first insert after reserved growth is 0 is guaranteed to rehash when
   3263  // generations are enabled.
   3264  t.insert(10);
   3265  EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
   3266  EXPECT_DEATH_IF_SUPPORTED(void(it == t.begin()),
   3267                            kInvalidIteratorDeathMessage);
   3268 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
   3269  EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free");
   3270 #endif
   3271 }
   3272 
   3273 TEST(Iterator, InvalidUseWithMoveCrashesWithSanitizers) {
   3274  if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
   3275  if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
   3276 
   3277  NonSooIntTable t1, t2;
   3278  t1.insert(1);
   3279  auto it = t1.begin();
   3280  // ptr will become invalidated on rehash.
   3281  const auto* ptr = &*it;
   3282  (void)ptr;
   3283 
   3284  t2 = std::move(t1);
   3285  EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
   3286  EXPECT_DEATH_IF_SUPPORTED(void(it == t2.begin()),
   3287                            kInvalidIteratorDeathMessage);
   3288 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
   3289  EXPECT_DEATH_IF_SUPPORTED(std::cout << **ptr, "heap-use-after-free");
   3290 #endif
   3291 }
   3292 
   3293 TYPED_TEST(SooTest, ReservedGrowthUpdatesWhenTableDoesntGrow) {
   3294  TypeParam t;
   3295  for (int i = 0; i < 8; ++i) t.insert(i);
   3296  // Want to insert twice without invalidating iterators so reserve.
   3297  const size_t cap = t.capacity();
   3298  t.reserve(t.size() + 2);
   3299  // We want to be testing the case in which the reserve doesn't grow the table.
   3300  ASSERT_EQ(cap, t.capacity());
   3301  auto it = t.find(0);
   3302  t.insert(100);
   3303  t.insert(200);
   3304  // `it` shouldn't have been invalidated.
   3305  EXPECT_EQ(*it, 0);
   3306 }
   3307 
   3308 template <class TableType>
   3309 class InstanceTrackerTest : public testing::Test {};
   3310 
   3311 using ::absl::test_internal::CopyableMovableInstance;
   3312 using ::absl::test_internal::InstanceTracker;
   3313 
   3314 struct InstanceTrackerHash {
   3315  size_t operator()(const CopyableMovableInstance& t) const {
   3316    return absl::HashOf(t.value());
   3317  }
   3318 };
   3319 
   3320 using InstanceTrackerTableTypes = ::testing::Types<
   3321    absl::node_hash_set<CopyableMovableInstance, InstanceTrackerHash>,
   3322    absl::flat_hash_set<CopyableMovableInstance, InstanceTrackerHash>>;
   3323 TYPED_TEST_SUITE(InstanceTrackerTest, InstanceTrackerTableTypes);
   3324 
   3325 TYPED_TEST(InstanceTrackerTest, EraseIfAll) {
   3326  using Table = TypeParam;
   3327  InstanceTracker tracker;
   3328  for (int size = 0; size < 100; ++size) {
   3329    Table t;
   3330    for (int i = 0; i < size; ++i) {
   3331      t.emplace(i);
   3332    }
   3333    absl::erase_if(t, [](const auto&) { return true; });
   3334    ASSERT_EQ(t.size(), 0);
   3335  }
   3336  EXPECT_EQ(tracker.live_instances(), 0);
   3337 }
   3338 
   3339 TYPED_TEST(InstanceTrackerTest, EraseIfNone) {
   3340  using Table = TypeParam;
   3341  InstanceTracker tracker;
   3342  {
   3343    Table t;
   3344    for (size_t size = 0; size < 100; ++size) {
   3345      absl::erase_if(t, [](const auto&) { return false; });
   3346      ASSERT_EQ(t.size(), size);
   3347      t.emplace(size);
   3348    }
   3349  }
   3350  EXPECT_EQ(tracker.live_instances(), 0);
   3351 }
   3352 
   3353 TYPED_TEST(InstanceTrackerTest, EraseIfPartial) {
   3354  using Table = TypeParam;
   3355  InstanceTracker tracker;
   3356  for (int mod : {0, 1}) {
   3357    for (int size = 0; size < 100; ++size) {
   3358      SCOPED_TRACE(absl::StrCat(mod, " ", size));
   3359      Table t;
   3360      std::vector<CopyableMovableInstance> expected;
   3361      for (int i = 0; i < size; ++i) {
   3362        t.emplace(i);
   3363        if (i % 2 != mod) {
   3364          expected.emplace_back(i);
   3365        }
   3366      }
   3367      absl::erase_if(t, [mod](const auto& x) { return x.value() % 2 == mod; });
   3368      ASSERT_THAT(t, testing::UnorderedElementsAreArray(expected));
   3369    }
   3370  }
   3371  EXPECT_EQ(tracker.live_instances(), 0);
   3372 }
   3373 
   3374 TYPED_TEST(SooTest, EraseIfAll) {
   3375  auto pred = [](const auto&) { return true; };
   3376  for (int size = 0; size < 100; ++size) {
   3377    TypeParam t;
   3378    for (int i = 0; i < size; ++i) t.insert(i);
   3379    absl::container_internal::EraseIf(pred, &t);
   3380    ASSERT_EQ(t.size(), 0);
   3381  }
   3382 }
   3383 
   3384 TYPED_TEST(SooTest, EraseIfNone) {
   3385  auto pred = [](const auto&) { return false; };
   3386  TypeParam t;
   3387  for (size_t size = 0; size < 100; ++size) {
   3388    absl::container_internal::EraseIf(pred, &t);
   3389    ASSERT_EQ(t.size(), size);
   3390    t.insert(size);
   3391  }
   3392 }
   3393 
   3394 TYPED_TEST(SooTest, EraseIfPartial) {
   3395  for (int mod : {0, 1}) {
   3396    auto pred = [&](const auto& x) {
   3397      return static_cast<int64_t>(x) % 2 == mod;
   3398    };
   3399    for (int size = 0; size < 100; ++size) {
   3400      SCOPED_TRACE(absl::StrCat(mod, " ", size));
   3401      TypeParam t;
   3402      std::vector<int64_t> expected;
   3403      for (int i = 0; i < size; ++i) {
   3404        t.insert(i);
   3405        if (i % 2 != mod) {
   3406          expected.push_back(i);
   3407        }
   3408      }
   3409      absl::container_internal::EraseIf(pred, &t);
   3410      ASSERT_THAT(t, testing::UnorderedElementsAreArray(expected));
   3411    }
   3412  }
   3413 }
   3414 
   3415 TYPED_TEST(SooTest, ForEach) {
   3416  TypeParam t;
   3417  std::vector<int64_t> expected;
   3418  for (int size = 0; size < 100; ++size) {
   3419    SCOPED_TRACE(size);
   3420    {
   3421      SCOPED_TRACE("mutable iteration");
   3422      std::vector<int64_t> actual;
   3423      auto f = [&](auto& x) { actual.push_back(static_cast<int64_t>(x)); };
   3424      absl::container_internal::ForEach(f, &t);
   3425      ASSERT_THAT(actual, testing::UnorderedElementsAreArray(expected));
   3426    }
   3427    {
   3428      SCOPED_TRACE("const iteration");
   3429      std::vector<int64_t> actual;
   3430      auto f = [&](auto& x) {
   3431        static_assert(
   3432            std::is_const<std::remove_reference_t<decltype(x)>>::value,
   3433            "no mutable values should be passed to const ForEach");
   3434        actual.push_back(static_cast<int64_t>(x));
   3435      };
   3436      const auto& ct = t;
   3437      absl::container_internal::ForEach(f, &ct);
   3438      ASSERT_THAT(actual, testing::UnorderedElementsAreArray(expected));
   3439    }
   3440    t.insert(size);
   3441    expected.push_back(size);
   3442  }
   3443 }
   3444 
   3445 TEST(Table, ForEachMutate) {
   3446  StringTable t;
   3447  using ValueType = StringTable::value_type;
   3448  std::vector<ValueType> expected;
   3449  for (int size = 0; size < 100; ++size) {
   3450    SCOPED_TRACE(size);
   3451    std::vector<ValueType> actual;
   3452    auto f = [&](ValueType& x) {
   3453      actual.push_back(x);
   3454      x.second += "a";
   3455    };
   3456    absl::container_internal::ForEach(f, &t);
   3457    ASSERT_THAT(actual, testing::UnorderedElementsAreArray(expected));
   3458    for (ValueType& v : expected) {
   3459      v.second += "a";
   3460    }
   3461    ASSERT_THAT(t, testing::UnorderedElementsAreArray(expected));
   3462    t.emplace(std::to_string(size), std::to_string(size));
   3463    expected.emplace_back(std::to_string(size), std::to_string(size));
   3464  }
   3465 }
   3466 
   3467 TYPED_TEST(SooTest, EraseIfReentryDeath) {
   3468  if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
   3469 
   3470  auto erase_if_with_removal_reentrance = [](size_t reserve_size) {
   3471    TypeParam t;
   3472    t.reserve(reserve_size);
   3473    int64_t first_value = -1;
   3474    t.insert(1024);
   3475    t.insert(5078);
   3476    auto pred = [&](const auto& x) {
   3477      if (first_value == -1) {
   3478        first_value = static_cast<int64_t>(x);
   3479        return false;
   3480      }
   3481      // We erase on second call to `pred` to reduce the chance that assertion
   3482      // will happen in IterateOverFullSlots.
   3483      t.erase(first_value);
   3484      return true;
   3485    };
   3486    absl::container_internal::EraseIf(pred, &t);
   3487  };
   3488  // Removal will likely happen in a different group.
   3489  EXPECT_DEATH_IF_SUPPORTED(erase_if_with_removal_reentrance(1024 * 16),
   3490                            "hash table was modified unexpectedly");
   3491  // Removal will happen in the same group.
   3492  EXPECT_DEATH_IF_SUPPORTED(
   3493      erase_if_with_removal_reentrance(CapacityToGrowth(Group::kWidth - 1)),
   3494      "hash table was modified unexpectedly");
   3495 }
   3496 
   3497 // This test is useful to test soo branch.
   3498 TYPED_TEST(SooTest, EraseIfReentrySingleElementDeath) {
   3499  if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
   3500 
   3501  auto erase_if_with_removal_reentrance = []() {
   3502    TypeParam t;
   3503    t.insert(1024);
   3504    auto pred = [&](const auto& x) {
   3505      // We erase ourselves in order to confuse the erase_if.
   3506      t.erase(static_cast<int64_t>(x));
   3507      return false;
   3508    };
   3509    absl::container_internal::EraseIf(pred, &t);
   3510  };
   3511  EXPECT_DEATH_IF_SUPPORTED(erase_if_with_removal_reentrance(),
   3512                            "hash table was modified unexpectedly");
   3513 }
   3514 
   3515 TEST(Table, EraseBeginEndResetsReservedGrowth) {
   3516  bool frozen = false;
   3517  BadHashFreezableIntTable t{FreezableAlloc<int64_t>(&frozen)};
   3518  t.reserve(100);
   3519  const size_t cap = t.capacity();
   3520  frozen = true;  // no further allocs allowed
   3521 
   3522  for (int i = 0; i < 10; ++i) {
   3523    // Create a long run (hash function returns constant).
   3524    for (int j = 0; j < 100; ++j) t.insert(j);
   3525    // Erase elements from the middle of the long run, which creates
   3526    // tombstones.
   3527    for (int j = 30; j < 60; ++j) t.erase(j);
   3528    EXPECT_EQ(t.size(), 70);
   3529    EXPECT_EQ(t.capacity(), cap);
   3530    ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 30);
   3531 
   3532    t.erase(t.begin(), t.end());
   3533 
   3534    EXPECT_EQ(t.size(), 0);
   3535    EXPECT_EQ(t.capacity(), cap);
   3536    ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 0);
   3537  }
   3538 }
   3539 
   3540 TEST(Table, GenerationInfoResetsOnClear) {
   3541  if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
   3542  if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
   3543 
   3544  NonSooIntTable t;
   3545  for (int i = 0; i < 1000; ++i) t.insert(i);
   3546  t.reserve(t.size() + 100);
   3547 
   3548  t.clear();
   3549 
   3550  t.insert(0);
   3551  auto it = t.begin();
   3552  t.insert(1);
   3553  EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
   3554 }
   3555 
   3556 TEST(Table, InvalidReferenceUseCrashesWithSanitizers) {
   3557  if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
   3558 #ifdef ABSL_HAVE_MEMORY_SANITIZER
   3559  GTEST_SKIP() << "MSan fails to detect some of these rehashes.";
   3560 #endif
   3561 
   3562  NonSooIntTable t;
   3563  t.insert(0);
   3564  // Rehashing is guaranteed on every insertion while capacity is less than
   3565  // RehashProbabilityConstant().
   3566  int i = 0;
   3567  while (t.capacity() <= RehashProbabilityConstant()) {
   3568    // ptr will become invalidated on rehash.
   3569    const auto* ptr = &*t.begin();
   3570    t.insert(++i);
   3571    EXPECT_DEATH_IF_SUPPORTED(std::cout << **ptr, "use-after-free") << i;
   3572  }
   3573 }
   3574 
   3575 TEST(Iterator, InvalidComparisonDifferentTables) {
   3576  if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
   3577 
   3578  NonSooIntTable t1, t2;
   3579  NonSooIntTable::iterator default_constructed_iter;
   3580  // We randomly use one of N empty generations for generations from empty
   3581  // hashtables. In general, we won't always detect when iterators from
   3582  // different empty hashtables are compared, but in this test case, we
   3583  // should deterministically detect the error due to our randomness yielding
   3584  // consecutive random generations.
   3585  EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == t2.end()),
   3586                            "Invalid iterator comparison.*empty hashtables");
   3587  EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == default_constructed_iter),
   3588                            "Invalid iterator comparison.*default-constructed");
   3589  t1.insert(0);
   3590  EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
   3591                            "Invalid iterator comparison.*empty hashtable");
   3592  EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == default_constructed_iter),
   3593                            "Invalid iterator comparison.*default-constructed");
   3594  t2.insert(0);
   3595  EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
   3596                            "Invalid iterator comparison.*end.. iterator");
   3597  EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.begin()),
   3598                            "Invalid iterator comparison.*non-end");
   3599 }
   3600 
   3601 template <typename Alloc>
   3602 using RawHashSetAlloc = raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
   3603                                     std::equal_to<int64_t>, Alloc>;
   3604 
   3605 TEST(Table, AllocatorPropagation) { TestAllocPropagation<RawHashSetAlloc>(); }
   3606 
   3607 struct CountedHash {
   3608  size_t operator()(int64_t value) const {
   3609    ++count;
   3610    return static_cast<size_t>(value);
   3611  }
   3612  mutable int count = 0;
   3613 };
   3614 
   3615 struct CountedHashIntTable
   3616    : raw_hash_set<IntPolicy, CountedHash, std::equal_to<int>,
   3617                   std::allocator<int>> {
   3618  using Base = typename CountedHashIntTable::raw_hash_set;
   3619  using Base::Base;
   3620 };
   3621 
   3622 TEST(Table, CountedHash) {
   3623  // Verify that raw_hash_set does not compute redundant hashes.
   3624 #ifdef NDEBUG
   3625  constexpr bool kExpectMinimumHashes = true;
   3626 #else
   3627  constexpr bool kExpectMinimumHashes = false;
   3628 #endif
   3629  if (!kExpectMinimumHashes) {
   3630    GTEST_SKIP() << "Only run under NDEBUG: `assert` statements may cause "
   3631                    "redundant hashing.";
   3632  }
   3633 
   3634  using Table = CountedHashIntTable;
   3635  auto HashCount = [](const Table& t) { return t.hash_function().count; };
   3636  {
   3637    Table t;
   3638    EXPECT_EQ(HashCount(t), 0);
   3639  }
   3640  {
   3641    Table t;
   3642    t.insert(1);
   3643    EXPECT_EQ(HashCount(t), 1);
   3644    t.erase(1);
   3645    EXPECT_EQ(HashCount(t), 2);
   3646  }
   3647  {
   3648    Table t;
   3649    t.insert(3);
   3650    EXPECT_EQ(HashCount(t), 1);
   3651    auto node = t.extract(3);
   3652    EXPECT_EQ(HashCount(t), 2);
   3653    t.insert(std::move(node));
   3654    EXPECT_EQ(HashCount(t), 3);
   3655  }
   3656  {
   3657    Table t;
   3658    t.emplace(5);
   3659    EXPECT_EQ(HashCount(t), 1);
   3660  }
   3661  {
   3662    Table src;
   3663    src.insert(7);
   3664    Table dst;
   3665    dst.merge(src);
   3666    EXPECT_EQ(HashCount(dst), 1);
   3667  }
   3668 }
   3669 
   3670 // IterateOverFullSlots doesn't support SOO.
   3671 TEST(Table, IterateOverFullSlotsEmpty) {
   3672  NonSooIntTable t;
   3673  using SlotType = NonSooIntTableSlotType;
   3674  auto fail_if_any = [](const ctrl_t*, void* i) {
   3675    FAIL() << "expected no slots " << **static_cast<SlotType*>(i);
   3676  };
   3677  container_internal::IterateOverFullSlots(
   3678      RawHashSetTestOnlyAccess::GetCommon(t), sizeof(SlotType), fail_if_any);
   3679  for (size_t i = 0; i < 256; ++i) {
   3680    t.reserve(i);
   3681    container_internal::IterateOverFullSlots(
   3682        RawHashSetTestOnlyAccess::GetCommon(t), sizeof(SlotType), fail_if_any);
   3683  }
   3684 }
   3685 
   3686 TEST(Table, IterateOverFullSlotsFull) {
   3687  NonSooIntTable t;
   3688  using SlotType = NonSooIntTableSlotType;
   3689 
   3690  std::vector<int64_t> expected_slots;
   3691  for (int64_t idx = 0; idx < 128; ++idx) {
   3692    t.insert(idx);
   3693    expected_slots.push_back(idx);
   3694 
   3695    std::vector<int64_t> slots;
   3696    container_internal::IterateOverFullSlots(
   3697        RawHashSetTestOnlyAccess::GetCommon(t), sizeof(SlotType),
   3698        [&t, &slots](const ctrl_t* ctrl, void* slot) {
   3699          SlotType* i = static_cast<SlotType*>(slot);
   3700          ptrdiff_t ctrl_offset =
   3701              ctrl - RawHashSetTestOnlyAccess::GetCommon(t).control();
   3702          ptrdiff_t slot_offset = i - RawHashSetTestOnlyAccess::GetSlots(t);
   3703          ASSERT_EQ(ctrl_offset, slot_offset);
   3704          slots.push_back(**i);
   3705        });
   3706    EXPECT_THAT(slots, testing::UnorderedElementsAreArray(expected_slots));
   3707  }
   3708 }
   3709 
   3710 TEST(Table, IterateOverFullSlotsDeathOnRemoval) {
   3711  if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
   3712 
   3713  auto iterate_with_reentrant_removal = [](int64_t size,
   3714                                           int64_t reserve_size = -1) {
   3715    if (reserve_size == -1) reserve_size = size;
   3716    for (int64_t idx = 0; idx < size; ++idx) {
   3717      NonSooIntTable t;
   3718      using SlotType = NonSooIntTableSlotType;
   3719      t.reserve(static_cast<size_t>(reserve_size));
   3720      for (int val = 0; val <= idx; ++val) {
   3721        t.insert(val);
   3722      }
   3723 
   3724      container_internal::IterateOverFullSlots(
   3725          RawHashSetTestOnlyAccess::GetCommon(t), sizeof(SlotType),
   3726          [&t](const ctrl_t*, void* slot) {
   3727            int64_t value = **static_cast<SlotType*>(slot);
   3728            // Erase the other element from 2*k and 2*k+1 pair.
   3729            t.erase(value ^ 1);
   3730          });
   3731    }
   3732  };
   3733 
   3734  EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_removal(128),
   3735                            "hash table was modified unexpectedly");
   3736  // Removal will likely happen in a different group.
   3737  EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_removal(14, 1024 * 16),
   3738                            "hash table was modified unexpectedly");
   3739  // Removal will happen in the same group.
   3740  EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_removal(static_cast<int64_t>(
   3741                                CapacityToGrowth(Group::kWidth - 1))),
   3742                            "hash table was modified unexpectedly");
   3743 }
   3744 
   3745 TEST(Table, IterateOverFullSlotsDeathOnInsert) {
   3746  if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
   3747 
   3748  auto iterate_with_reentrant_insert = [](int64_t reserve_size,
   3749                                          int64_t size_divisor = 2) {
   3750    int64_t size = reserve_size / size_divisor;
   3751    for (int64_t idx = 1; idx <= size; ++idx) {
   3752      NonSooIntTable t;
   3753      using SlotType = NonSooIntTableSlotType;
   3754      t.reserve(static_cast<size_t>(reserve_size));
   3755      for (int val = 1; val <= idx; ++val) {
   3756        t.insert(val);
   3757      }
   3758 
   3759      container_internal::IterateOverFullSlots(
   3760          RawHashSetTestOnlyAccess::GetCommon(t), sizeof(SlotType),
   3761          [&t](const ctrl_t*, void* slot) {
   3762            int64_t value = **static_cast<SlotType*>(slot);
   3763            t.insert(-value);
   3764          });
   3765    }
   3766  };
   3767 
   3768  EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_insert(128),
   3769                            "hash table was modified unexpectedly");
   3770  // Insert will likely happen in a different group.
   3771  EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_insert(1024 * 16, 1024 * 2),
   3772                            "hash table was modified unexpectedly");
   3773  // Insert will happen in the same group.
   3774  EXPECT_DEATH_IF_SUPPORTED(iterate_with_reentrant_insert(static_cast<int64_t>(
   3775                                CapacityToGrowth(Group::kWidth - 1))),
   3776                            "hash table was modified unexpectedly");
   3777 }
   3778 
   3779 template <typename T>
   3780 class SooTable : public testing::Test {};
   3781 using FreezableSooTableTypes =
   3782    ::testing::Types<FreezableSizedValueSooTable<8>,
   3783                     FreezableSizedValueSooTable<16>>;
   3784 TYPED_TEST_SUITE(SooTable, FreezableSooTableTypes);
   3785 
   3786 TYPED_TEST(SooTable, Basic) {
   3787  bool frozen = true;
   3788  TypeParam t{FreezableAlloc<typename TypeParam::value_type>(&frozen)};
   3789  if (t.capacity() != SooCapacity()) {
   3790    CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
   3791    GTEST_SKIP() << "not SOO on this platform";
   3792  }
   3793 
   3794  t.insert(0);
   3795  EXPECT_EQ(t.capacity(), 1);
   3796  auto it = t.find(0);
   3797  EXPECT_EQ(it, t.begin());
   3798  ASSERT_NE(it, t.end());
   3799  EXPECT_EQ(*it, 0);
   3800  EXPECT_EQ(++it, t.end());
   3801  EXPECT_EQ(t.find(1), t.end());
   3802  EXPECT_EQ(t.size(), 1);
   3803 
   3804  t.erase(0);
   3805  EXPECT_EQ(t.size(), 0);
   3806  t.insert(1);
   3807  it = t.find(1);
   3808  EXPECT_EQ(it, t.begin());
   3809  ASSERT_NE(it, t.end());
   3810  EXPECT_EQ(*it, 1);
   3811 
   3812  t.clear();
   3813  EXPECT_EQ(t.size(), 0);
   3814 }
   3815 
   3816 TEST(Table, RehashToSooUnsampled) {
   3817  SooIntTable t;
   3818  if (t.capacity() != SooCapacity()) {
   3819    CHECK_LT(sizeof(void*), 8) << "missing SOO coverage";
   3820    GTEST_SKIP() << "not SOO on this platform";
   3821  }
   3822 
   3823  // We disable hashtablez sampling for this test to ensure that the table isn't
   3824  // sampled. When the table is sampled, it won't rehash down to SOO.
   3825  DisableSampling();
   3826 
   3827  t.reserve(100);
   3828  t.insert(0);
   3829  EXPECT_EQ(*t.begin(), 0);
   3830 
   3831  t.rehash(0);  // Rehash back down to SOO table.
   3832 
   3833  EXPECT_EQ(t.capacity(), SooCapacity());
   3834  EXPECT_EQ(t.size(), 1);
   3835  EXPECT_EQ(*t.begin(), 0);
   3836  EXPECT_EQ(t.find(0), t.begin());
   3837  EXPECT_EQ(t.find(1), t.end());
   3838 }
   3839 
   3840 TEST(Table, ReserveToNonSoo) {
   3841  for (size_t reserve_capacity : {2u, 8u, 100000u}) {
   3842    SooIntTable t;
   3843    t.insert(0);
   3844 
   3845    t.reserve(reserve_capacity);
   3846 
   3847    EXPECT_EQ(t.find(0), t.begin());
   3848    EXPECT_EQ(t.size(), 1);
   3849    EXPECT_EQ(*t.begin(), 0);
   3850    EXPECT_EQ(t.find(1), t.end());
   3851  }
   3852 }
   3853 
   3854 struct InconsistentHashEqType {
   3855  InconsistentHashEqType(int v1, int v2) : v1(v1), v2(v2) {}
   3856  template <typename H>
   3857  friend H AbslHashValue(H h, InconsistentHashEqType t) {
   3858    return H::combine(std::move(h), t.v1);
   3859  }
   3860  bool operator==(InconsistentHashEqType t) const { return v2 == t.v2; }
   3861  int v1, v2;
   3862 };
   3863 
   3864 TEST(Iterator, InconsistentHashEqFunctorsValidation) {
   3865  if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
   3866 
   3867  ValueTable<InconsistentHashEqType> t;
   3868  for (int i = 0; i < 10; ++i) t.insert({i, i});
   3869  // We need to find/insert multiple times to guarantee that we get the
   3870  // assertion because it's possible for the hash to collide with the inserted
   3871  // element that has v2==0. In those cases, the new element won't be inserted.
   3872  auto find_conflicting_elems = [&] {
   3873    for (int i = 100; i < 20000; ++i) {
   3874      EXPECT_EQ(t.find({i, 0}), t.end());
   3875    }
   3876  };
   3877  EXPECT_DEATH_IF_SUPPORTED(find_conflicting_elems(),
   3878                            "hash/eq functors are inconsistent.");
   3879  auto insert_conflicting_elems = [&] {
   3880    for (int i = 100; i < 20000; ++i) {
   3881      EXPECT_EQ(t.insert({i, 0}).second, false);
   3882    }
   3883  };
   3884  EXPECT_DEATH_IF_SUPPORTED(insert_conflicting_elems(),
   3885                            "hash/eq functors are inconsistent.");
   3886 }
   3887 
   3888 struct ConstructCaller {
   3889  explicit ConstructCaller(int v) : val(v) {}
   3890  ConstructCaller(int v, absl::FunctionRef<void()> func) : val(v) { func(); }
   3891  template <typename H>
   3892  friend H AbslHashValue(H h, const ConstructCaller& d) {
   3893    return H::combine(std::move(h), d.val);
   3894  }
   3895  bool operator==(const ConstructCaller& c) const { return val == c.val; }
   3896 
   3897  int val;
   3898 };
   3899 
   3900 struct DestroyCaller {
   3901  explicit DestroyCaller(int v) : val(v) {}
   3902  DestroyCaller(int v, absl::FunctionRef<void()> func)
   3903      : val(v), destroy_func(func) {}
   3904  DestroyCaller(DestroyCaller&& that)
   3905      : val(that.val), destroy_func(std::move(that.destroy_func)) {
   3906    that.Deactivate();
   3907  }
   3908  ~DestroyCaller() {
   3909    if (destroy_func) (*destroy_func)();
   3910  }
   3911  void Deactivate() { destroy_func = absl::nullopt; }
   3912 
   3913  template <typename H>
   3914  friend H AbslHashValue(H h, const DestroyCaller& d) {
   3915    return H::combine(std::move(h), d.val);
   3916  }
   3917  bool operator==(const DestroyCaller& d) const { return val == d.val; }
   3918 
   3919  int val;
   3920  absl::optional<absl::FunctionRef<void()>> destroy_func;
   3921 };
   3922 
   3923 TEST(Table, ReentrantCallsFail) {
   3924 #ifdef NDEBUG
   3925  GTEST_SKIP() << "Reentrant checks only enabled in debug mode.";
   3926 #else
   3927  {
   3928    ValueTable<ConstructCaller> t;
   3929    t.insert(ConstructCaller{0});
   3930    auto erase_begin = [&] { t.erase(t.begin()); };
   3931    EXPECT_DEATH_IF_SUPPORTED(t.emplace(1, erase_begin), "");
   3932  }
   3933  {
   3934    ValueTable<DestroyCaller> t;
   3935    t.insert(DestroyCaller{0});
   3936    auto find_0 = [&] { t.find(DestroyCaller{0}); };
   3937    t.insert(DestroyCaller{1, find_0});
   3938    for (int i = 10; i < 20; ++i) t.insert(DestroyCaller{i});
   3939    EXPECT_DEATH_IF_SUPPORTED(t.clear(), "");
   3940    for (auto& elem : t) elem.Deactivate();
   3941  }
   3942  {
   3943    ValueTable<DestroyCaller> t;
   3944    t.insert(DestroyCaller{0});
   3945    auto insert_1 = [&] { t.insert(DestroyCaller{1}); };
   3946    t.insert(DestroyCaller{1, insert_1});
   3947    for (int i = 10; i < 20; ++i) t.insert(DestroyCaller{i});
   3948    EXPECT_DEATH_IF_SUPPORTED(t.clear(), "");
   3949    for (auto& elem : t) elem.Deactivate();
   3950  }
   3951 #endif
   3952 }
   3953 
   3954 // TODO(b/328794765): this check is very useful to run with ASAN in opt mode.
   3955 TEST(Table, DestroyedCallsFail) {
   3956 #ifdef NDEBUG
   3957  ASSERT_EQ(SwisstableAssertAccessToDestroyedTable(),
   3958            SwisstableGenerationsEnabled());
   3959 #else
   3960  ASSERT_TRUE(SwisstableAssertAccessToDestroyedTable());
   3961 #endif
   3962  if (!SwisstableAssertAccessToDestroyedTable()) {
   3963    GTEST_SKIP() << "Validation not enabled.";
   3964  }
   3965 #if !defined(__clang__) && defined(__GNUC__)
   3966  GTEST_SKIP() << "Flaky on GCC.";
   3967 #endif
   3968  absl::optional<IntTable> t;
   3969  t.emplace({1});
   3970  IntTable* t_ptr = &*t;
   3971  EXPECT_TRUE(t_ptr->contains(1));
   3972  t.reset();
   3973  std::string expected_death_message =
   3974 #if defined(ABSL_HAVE_MEMORY_SANITIZER)
   3975      "use-of-uninitialized-value";
   3976 #else
   3977      "destroyed hash table";
   3978 #endif
   3979  EXPECT_DEATH_IF_SUPPORTED(t_ptr->contains(1), expected_death_message);
   3980 }
   3981 
   3982 TEST(Table, DestroyedCallsFailDuringDestruction) {
   3983  if (!SwisstableAssertAccessToDestroyedTable()) {
   3984    GTEST_SKIP() << "Validation not enabled.";
   3985  }
   3986 #if !defined(__clang__) && defined(__GNUC__)
   3987  GTEST_SKIP() << "Flaky on GCC.";
   3988 #endif
   3989  // When EXPECT_DEATH_IF_SUPPORTED is not executed, the code after it is not
   3990  // executed as well.
   3991  // We need to destruct the table correctly in such a case.
   3992  // Must be defined before the table for correct destruction order.
   3993  bool do_lookup = false;
   3994 
   3995  using Table = absl::flat_hash_map<int, std::shared_ptr<int>>;
   3996  absl::optional<Table> t = Table();
   3997  Table* t_ptr = &*t;
   3998  auto destroy = [&](int* ptr) {
   3999    if (do_lookup) {
   4000      ASSERT_TRUE(t_ptr->contains(*ptr));
   4001    }
   4002    delete ptr;
   4003  };
   4004  t->insert({0, std::shared_ptr<int>(new int(0), destroy)});
   4005  auto destroy_with_lookup = [&] {
   4006    do_lookup = true;
   4007    t.reset();
   4008  };
   4009  std::string expected_death_message =
   4010 #ifdef NDEBUG
   4011      "destroyed hash table";
   4012 #else
   4013      "Reentrant container access";
   4014 #endif
   4015  EXPECT_DEATH_IF_SUPPORTED(destroy_with_lookup(), expected_death_message);
   4016 }
   4017 
   4018 TEST(Table, MovedFromCallsFail) {
   4019  if (!SwisstableGenerationsEnabled()) {
   4020    GTEST_SKIP() << "Moved-from checks only enabled in sanitizer mode.";
   4021    return;
   4022  }
   4023 
   4024  {
   4025    ABSL_ATTRIBUTE_UNUSED IntTable t1, t2, t3;
   4026    t1.insert(1);
   4027    t2 = std::move(t1);
   4028    // NOLINTNEXTLINE(bugprone-use-after-move)
   4029    EXPECT_DEATH_IF_SUPPORTED(t1.contains(1), "moved-from");
   4030    // NOLINTNEXTLINE(bugprone-use-after-move)
   4031    EXPECT_DEATH_IF_SUPPORTED(t1.swap(t3), "moved-from");
   4032    // NOLINTNEXTLINE(bugprone-use-after-move)
   4033    EXPECT_DEATH_IF_SUPPORTED(t1.merge(t3), "moved-from");
   4034    // NOLINTNEXTLINE(bugprone-use-after-move)
   4035    EXPECT_DEATH_IF_SUPPORTED(IntTable{t1}, "moved-from");
   4036    // NOLINTNEXTLINE(bugprone-use-after-move)
   4037    EXPECT_DEATH_IF_SUPPORTED(t1.begin(), "moved-from");
   4038    // NOLINTNEXTLINE(bugprone-use-after-move)
   4039    EXPECT_DEATH_IF_SUPPORTED(t1.end(), "moved-from");
   4040    // NOLINTNEXTLINE(bugprone-use-after-move)
   4041    EXPECT_DEATH_IF_SUPPORTED(t1.size(), "moved-from");
   4042  }
   4043  {
   4044    ABSL_ATTRIBUTE_UNUSED IntTable t1;
   4045    t1.insert(1);
   4046    ABSL_ATTRIBUTE_UNUSED IntTable t2(std::move(t1));
   4047    // NOLINTNEXTLINE(bugprone-use-after-move)
   4048    EXPECT_DEATH_IF_SUPPORTED(t1.contains(1), "moved-from");
   4049    t1.clear();  // Clearing a moved-from table is allowed.
   4050  }
   4051  {
   4052    // Test that using a table (t3) that was moved-to from a moved-from table
   4053    // (t1) fails.
   4054    ABSL_ATTRIBUTE_UNUSED IntTable t1, t2, t3;
   4055    t1.insert(1);
   4056    t2 = std::move(t1);
   4057    // NOLINTNEXTLINE(bugprone-use-after-move)
   4058    t3 = std::move(t1);
   4059    EXPECT_DEATH_IF_SUPPORTED(t3.contains(1), "moved-from");
   4060  }
   4061 }
   4062 
   4063 TEST(HashtableSize, GenerateNewSeedDoesntChangeSize) {
   4064  size_t size = 1;
   4065  do {
   4066    HashtableSize hs(no_seed_empty_tag_t{});
   4067    hs.increment_size(size);
   4068    EXPECT_EQ(hs.size(), size);
   4069    hs.generate_new_seed();
   4070    EXPECT_EQ(hs.size(), size);
   4071    size = size * 2 + 1;
   4072  } while (size < MaxValidSizeFor1ByteSlot());
   4073 }
   4074 
   4075 TEST(Table, MaxValidSize) {
   4076  IntTable t;
   4077  EXPECT_EQ(MaxValidSize(sizeof(IntTable::value_type)), t.max_size());
   4078  if constexpr (sizeof(size_t) == 8) {
   4079    for (size_t i = 0; i < 35; ++i) {
   4080      SCOPED_TRACE(i);
   4081      size_t slot_size = size_t{1} << i;
   4082      size_t max_size = MaxValidSize(slot_size);
   4083      ASSERT_FALSE(IsAboveValidSize(max_size, slot_size));
   4084      ASSERT_TRUE(IsAboveValidSize(max_size + 1, slot_size));
   4085      ASSERT_LT(max_size, uint64_t{1} << 60);
   4086      // For non gigantic slot sizes we expect max size to be at least 2^40.
   4087      if (i <= 22) {
   4088        ASSERT_FALSE(IsAboveValidSize(size_t{1} << 40, slot_size));
   4089        ASSERT_GE(max_size, uint64_t{1} << 40);
   4090      }
   4091      ASSERT_LT(NormalizeCapacity(GrowthToLowerboundCapacity(max_size)),
   4092                uint64_t{1} << HashtableSize::kSizeBitCount);
   4093      ASSERT_LT(absl::uint128(max_size) * slot_size, uint64_t{1} << 63);
   4094    }
   4095  }
   4096  EXPECT_LT(MaxValidSize</*kSizeOfSizeT=*/4>(1), 1 << 30);
   4097  EXPECT_LT(MaxValidSize</*kSizeOfSizeT=*/4>(2), 1 << 29);
   4098  for (size_t i = 0; i < 29; ++i) {
   4099    size_t slot_size = size_t{1} << i;
   4100    size_t max_size = MaxValidSize</*kSizeOfSizeT=*/4>(slot_size);
   4101    ASSERT_FALSE(IsAboveValidSize</*kSizeOfSizeT=*/4>(max_size, slot_size));
   4102    ASSERT_TRUE(IsAboveValidSize</*kSizeOfSizeT=*/4>(max_size + 1, slot_size));
   4103    ASSERT_LT(max_size, 1 << 30);
   4104    size_t max_capacity =
   4105        NormalizeCapacity(GrowthToLowerboundCapacity(max_size));
   4106    ASSERT_LT(max_capacity, (size_t{1} << 31) / slot_size);
   4107    ASSERT_GT(max_capacity, (1 << 29) / slot_size);
   4108    ASSERT_LT(max_capacity * slot_size, size_t{1} << 31);
   4109  }
   4110 }
   4111 
   4112 TEST(Table, MaxSizeOverflow) {
   4113  size_t overflow = (std::numeric_limits<size_t>::max)();
   4114  EXPECT_DEATH_IF_SUPPORTED(IntTable t(overflow), "Hash table size overflow");
   4115  IntTable t;
   4116  EXPECT_DEATH_IF_SUPPORTED(t.reserve(overflow), "Hash table size overflow");
   4117  EXPECT_DEATH_IF_SUPPORTED(t.rehash(overflow), "Hash table size overflow");
   4118  size_t slightly_overflow = MaxValidSize(sizeof(IntTable::value_type)) + 1;
   4119  size_t slightly_overflow_capacity =
   4120      NextCapacity(NormalizeCapacity(slightly_overflow));
   4121  EXPECT_DEATH_IF_SUPPORTED(IntTable t2(slightly_overflow_capacity - 10),
   4122                            "Hash table size overflow");
   4123  EXPECT_DEATH_IF_SUPPORTED(t.reserve(slightly_overflow),
   4124                            "Hash table size overflow");
   4125  EXPECT_DEATH_IF_SUPPORTED(t.rehash(slightly_overflow),
   4126                            "Hash table size overflow");
   4127 }
   4128 
   4129 // TODO(b/397453582): Remove support for const hasher and ermove this test.
   4130 TEST(Table, ConstLambdaHash) {
   4131  int64_t multiplier = 17;
   4132  // Make sure that code compiles and work OK with non-empty hasher with const
   4133  // qualifier.
   4134  const auto hash = [multiplier](SizedValue<64> value) -> size_t {
   4135    return static_cast<size_t>(static_cast<int64_t>(value) * multiplier);
   4136  };
   4137  static_assert(!std::is_empty_v<decltype(hash)>);
   4138  absl::flat_hash_set<SizedValue<64>, decltype(hash)> t(0, hash);
   4139  t.insert(1);
   4140  EXPECT_EQ(t.size(), 1);
   4141  EXPECT_EQ(t.find(1), t.begin());
   4142  EXPECT_EQ(t.find(2), t.end());
   4143  t.insert(2);
   4144  EXPECT_EQ(t.size(), 2);
   4145  EXPECT_NE(t.find(1), t.end());
   4146  EXPECT_NE(t.find(2), t.end());
   4147  EXPECT_EQ(t.find(3), t.end());
   4148 }
   4149 
   4150 }  // namespace
   4151 }  // namespace container_internal
   4152 ABSL_NAMESPACE_END
   4153 }  // namespace absl