tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

MurmurHash3.cpp (8458B)


      1 //-----------------------------------------------------------------------------
      2 // MurmurHash3 was written by Austin Appleby, and is placed in the public
      3 // domain. The author hereby disclaims copyright to this source code.
      4 
      5 // Note - The x86 and x64 versions do _not_ produce the same results, as the
      6 // algorithms are optimized for their respective platforms. You can still
      7 // compile and run any of them on any platform, but your performance with the
      8 // non-native version will be less than optimal.
      9 
     10 #include "MurmurHash3.h"
     11 
     12 namespace {
     13 
     14 //-----------------------------------------------------------------------------
     15 // Platform-specific functions and macros
     16 
     17 // Microsoft Visual Studio
     18 
     19 #if defined(_MSC_VER)
     20 
     21 #  define FORCE_INLINE __forceinline
     22 
     23 #  define ROTL32(x, y) _rotl(x, y)
     24 #  define ROTL64(x, y) _rotl64(x, y)
     25 
     26 #  define BIG_CONSTANT(x) (x)
     27 
     28 // Other compilers
     29 
     30 #else  // defined(_MSC_VER)
     31 
     32 // We can't do always_inline, becasue -Werror -Wattribute will trigger
     33 // a "might not be able to inline" warning.
     34 // #define	FORCE_INLINE __attribute__((always_inline))
     35 #  define FORCE_INLINE inline
     36 
     37 inline uint32_t rotl32(uint32_t x, int8_t r) {
     38  return (x << r) | (x >> (32 - r));
     39 }
     40 
     41 inline uint64_t rotl64(uint64_t x, int8_t r) {
     42  return (x << r) | (x >> (64 - r));
     43 }
     44 
     45 #  define ROTL32(x, y) rotl32(x, y)
     46 #  define ROTL64(x, y) rotl64(x, y)
     47 
     48 #  define BIG_CONSTANT(x) (x##LLU)
     49 
     50 #endif  // !defined(_MSC_VER)
     51 
     52 //-----------------------------------------------------------------------------
     53 // Block read - if your platform needs to do endian-swapping or can only
     54 // handle aligned reads, do the conversion here
     55 
     56 FORCE_INLINE uint32_t getblock(const uint32_t* p, int i) { return p[i]; }
     57 
     58 FORCE_INLINE uint64_t getblock(const uint64_t* p, int i) { return p[i]; }
     59 
     60 //-----------------------------------------------------------------------------
     61 // Finalization mix - force all bits of a hash block to avalanche
     62 
     63 FORCE_INLINE uint32_t fmix(uint32_t h) {
     64  h ^= h >> 16;
     65  h *= 0x85ebca6b;
     66  h ^= h >> 13;
     67  h *= 0xc2b2ae35;
     68  h ^= h >> 16;
     69 
     70  return h;
     71 }
     72 
     73 //----------
     74 
     75 FORCE_INLINE uint64_t fmix(uint64_t k) {
     76  k ^= k >> 33;
     77  k *= BIG_CONSTANT(0xff51afd7ed558ccd);
     78  k ^= k >> 33;
     79  k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
     80  k ^= k >> 33;
     81 
     82  return k;
     83 }
     84 
     85 }  // unnamed namespace
     86 
     87 //-----------------------------------------------------------------------------
     88 
     89 void MurmurHash3_x86_32(const void* key, int len, uint32_t seed, void* out) {
     90  const uint8_t* data = (const uint8_t*)key;
     91  const int nblocks = len / 4;
     92 
     93  uint32_t h1 = seed;
     94 
     95  const uint32_t c1 = 0xcc9e2d51;
     96  const uint32_t c2 = 0x1b873593;
     97 
     98  //----------
     99  // body
    100 
    101  const uint32_t* blocks = (const uint32_t*)(data + nblocks * 4);
    102 
    103  for (int i = -nblocks; i; i++) {
    104    uint32_t k1 = getblock(blocks, i);
    105 
    106    k1 *= c1;
    107    k1 = ROTL32(k1, 15);
    108    k1 *= c2;
    109 
    110    h1 ^= k1;
    111    h1 = ROTL32(h1, 13);
    112    h1 = h1 * 5 + 0xe6546b64;
    113  }
    114 
    115  //----------
    116  // tail
    117 
    118  const uint8_t* tail = (const uint8_t*)(data + nblocks * 4);
    119 
    120  uint32_t k1 = 0;
    121 
    122  switch (len & 3) {
    123    case 3:
    124      k1 ^= tail[2] << 16;
    125    case 2:
    126      k1 ^= tail[1] << 8;
    127    case 1:
    128      k1 ^= tail[0];
    129      k1 *= c1;
    130      k1 = ROTL32(k1, 15);
    131      k1 *= c2;
    132      h1 ^= k1;
    133  }
    134 
    135  //----------
    136  // finalization
    137 
    138  h1 ^= len;
    139 
    140  h1 = fmix(h1);
    141 
    142  *(uint32_t*)out = h1;
    143 }
    144 
    145 //-----------------------------------------------------------------------------
    146 
    147 void MurmurHash3_x86_128(const void* key, const int len, uint32_t seed,
    148                         void* out) {
    149  const uint8_t* data = (const uint8_t*)key;
    150  const int nblocks = len / 16;
    151 
    152  uint32_t h1 = seed;
    153  uint32_t h2 = seed;
    154  uint32_t h3 = seed;
    155  uint32_t h4 = seed;
    156 
    157  const uint32_t c1 = 0x239b961b;
    158  const uint32_t c2 = 0xab0e9789;
    159  const uint32_t c3 = 0x38b34ae5;
    160  const uint32_t c4 = 0xa1e38b93;
    161 
    162  //----------
    163  // body
    164 
    165  const uint32_t* blocks = (const uint32_t*)(data + nblocks * 16);
    166 
    167  for (int i = -nblocks; i; i++) {
    168    uint32_t k1 = getblock(blocks, i * 4 + 0);
    169    uint32_t k2 = getblock(blocks, i * 4 + 1);
    170    uint32_t k3 = getblock(blocks, i * 4 + 2);
    171    uint32_t k4 = getblock(blocks, i * 4 + 3);
    172 
    173    k1 *= c1;
    174    k1 = ROTL32(k1, 15);
    175    k1 *= c2;
    176    h1 ^= k1;
    177 
    178    h1 = ROTL32(h1, 19);
    179    h1 += h2;
    180    h1 = h1 * 5 + 0x561ccd1b;
    181 
    182    k2 *= c2;
    183    k2 = ROTL32(k2, 16);
    184    k2 *= c3;
    185    h2 ^= k2;
    186 
    187    h2 = ROTL32(h2, 17);
    188    h2 += h3;
    189    h2 = h2 * 5 + 0x0bcaa747;
    190 
    191    k3 *= c3;
    192    k3 = ROTL32(k3, 17);
    193    k3 *= c4;
    194    h3 ^= k3;
    195 
    196    h3 = ROTL32(h3, 15);
    197    h3 += h4;
    198    h3 = h3 * 5 + 0x96cd1c35;
    199 
    200    k4 *= c4;
    201    k4 = ROTL32(k4, 18);
    202    k4 *= c1;
    203    h4 ^= k4;
    204 
    205    h4 = ROTL32(h4, 13);
    206    h4 += h1;
    207    h4 = h4 * 5 + 0x32ac3b17;
    208  }
    209 
    210  //----------
    211  // tail
    212 
    213  const uint8_t* tail = (const uint8_t*)(data + nblocks * 16);
    214 
    215  uint32_t k1 = 0;
    216  uint32_t k2 = 0;
    217  uint32_t k3 = 0;
    218  uint32_t k4 = 0;
    219 
    220  switch (len & 15) {
    221    case 15:
    222      k4 ^= tail[14] << 16;
    223    case 14:
    224      k4 ^= tail[13] << 8;
    225    case 13:
    226      k4 ^= tail[12] << 0;
    227      k4 *= c4;
    228      k4 = ROTL32(k4, 18);
    229      k4 *= c1;
    230      h4 ^= k4;
    231 
    232    case 12:
    233      k3 ^= tail[11] << 24;
    234    case 11:
    235      k3 ^= tail[10] << 16;
    236    case 10:
    237      k3 ^= tail[9] << 8;
    238    case 9:
    239      k3 ^= tail[8] << 0;
    240      k3 *= c3;
    241      k3 = ROTL32(k3, 17);
    242      k3 *= c4;
    243      h3 ^= k3;
    244 
    245    case 8:
    246      k2 ^= tail[7] << 24;
    247    case 7:
    248      k2 ^= tail[6] << 16;
    249    case 6:
    250      k2 ^= tail[5] << 8;
    251    case 5:
    252      k2 ^= tail[4] << 0;
    253      k2 *= c2;
    254      k2 = ROTL32(k2, 16);
    255      k2 *= c3;
    256      h2 ^= k2;
    257 
    258    case 4:
    259      k1 ^= tail[3] << 24;
    260    case 3:
    261      k1 ^= tail[2] << 16;
    262    case 2:
    263      k1 ^= tail[1] << 8;
    264    case 1:
    265      k1 ^= tail[0] << 0;
    266      k1 *= c1;
    267      k1 = ROTL32(k1, 15);
    268      k1 *= c2;
    269      h1 ^= k1;
    270  }
    271 
    272  //----------
    273  // finalization
    274 
    275  h1 ^= len;
    276  h2 ^= len;
    277  h3 ^= len;
    278  h4 ^= len;
    279 
    280  h1 += h2;
    281  h1 += h3;
    282  h1 += h4;
    283  h2 += h1;
    284  h3 += h1;
    285  h4 += h1;
    286 
    287  h1 = fmix(h1);
    288  h2 = fmix(h2);
    289  h3 = fmix(h3);
    290  h4 = fmix(h4);
    291 
    292  h1 += h2;
    293  h1 += h3;
    294  h1 += h4;
    295  h2 += h1;
    296  h3 += h1;
    297  h4 += h1;
    298 
    299  ((uint32_t*)out)[0] = h1;
    300  ((uint32_t*)out)[1] = h2;
    301  ((uint32_t*)out)[2] = h3;
    302  ((uint32_t*)out)[3] = h4;
    303 }
    304 
    305 //-----------------------------------------------------------------------------
    306 
    307 void MurmurHash3_x64_128(const void* key, const int len, const uint32_t seed,
    308                         void* out) {
    309  const uint8_t* data = (const uint8_t*)key;
    310  const int nblocks = len / 16;
    311 
    312  uint64_t h1 = seed;
    313  uint64_t h2 = seed;
    314 
    315  const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
    316  const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
    317 
    318  //----------
    319  // body
    320 
    321  const uint64_t* blocks = (const uint64_t*)(data);
    322 
    323  for (int i = 0; i < nblocks; i++) {
    324    uint64_t k1 = getblock(blocks, i * 2 + 0);
    325    uint64_t k2 = getblock(blocks, i * 2 + 1);
    326 
    327    k1 *= c1;
    328    k1 = ROTL64(k1, 31);
    329    k1 *= c2;
    330    h1 ^= k1;
    331 
    332    h1 = ROTL64(h1, 27);
    333    h1 += h2;
    334    h1 = h1 * 5 + 0x52dce729;
    335 
    336    k2 *= c2;
    337    k2 = ROTL64(k2, 33);
    338    k2 *= c1;
    339    h2 ^= k2;
    340 
    341    h2 = ROTL64(h2, 31);
    342    h2 += h1;
    343    h2 = h2 * 5 + 0x38495ab5;
    344  }
    345 
    346  //----------
    347  // tail
    348 
    349  const uint8_t* tail = (const uint8_t*)(data + nblocks * 16);
    350 
    351  uint64_t k1 = 0;
    352  uint64_t k2 = 0;
    353 
    354  switch (len & 15) {
    355    case 15:
    356      k2 ^= uint64_t(tail[14]) << 48;
    357    case 14:
    358      k2 ^= uint64_t(tail[13]) << 40;
    359    case 13:
    360      k2 ^= uint64_t(tail[12]) << 32;
    361    case 12:
    362      k2 ^= uint64_t(tail[11]) << 24;
    363    case 11:
    364      k2 ^= uint64_t(tail[10]) << 16;
    365    case 10:
    366      k2 ^= uint64_t(tail[9]) << 8;
    367    case 9:
    368      k2 ^= uint64_t(tail[8]) << 0;
    369      k2 *= c2;
    370      k2 = ROTL64(k2, 33);
    371      k2 *= c1;
    372      h2 ^= k2;
    373 
    374    case 8:
    375      k1 ^= uint64_t(tail[7]) << 56;
    376    case 7:
    377      k1 ^= uint64_t(tail[6]) << 48;
    378    case 6:
    379      k1 ^= uint64_t(tail[5]) << 40;
    380    case 5:
    381      k1 ^= uint64_t(tail[4]) << 32;
    382    case 4:
    383      k1 ^= uint64_t(tail[3]) << 24;
    384    case 3:
    385      k1 ^= uint64_t(tail[2]) << 16;
    386    case 2:
    387      k1 ^= uint64_t(tail[1]) << 8;
    388    case 1:
    389      k1 ^= uint64_t(tail[0]) << 0;
    390      k1 *= c1;
    391      k1 = ROTL64(k1, 31);
    392      k1 *= c2;
    393      h1 ^= k1;
    394  }
    395 
    396  //----------
    397  // finalization
    398 
    399  h1 ^= len;
    400  h2 ^= len;
    401 
    402  h1 += h2;
    403  h2 += h1;
    404 
    405  h1 = fmix(h1);
    406  h2 = fmix(h2);
    407 
    408  h1 += h2;
    409  h2 += h1;
    410 
    411  ((uint64_t*)out)[0] = h1;
    412  ((uint64_t*)out)[1] = h2;
    413 }
    414 
    415 //-----------------------------------------------------------------------------