xxhash.c (34655B)
1 /* 2 * xxHash - Fast Hash algorithm 3 * Copyright (C) 2012-2016, Yann Collet 4 * 5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are 9 * met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following disclaimer 15 * in the documentation and/or other materials provided with the 16 * distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * You can contact the author at : 31 * - xxHash homepage: http://www.xxhash.com 32 * - xxHash source repository : https://github.com/Cyan4973/xxHash 33 */ 34 35 36 /* ************************************* 37 * Tuning parameters 38 ***************************************/ 39 /*!XXH_FORCE_MEMORY_ACCESS : 40 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. 41 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. 42 * The below switch allow to select different access method for improved performance. 43 * Method 0 (default) : use `memcpy()`. Safe and portable. 44 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). 45 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. 46 * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. 47 * It can generate buggy code on targets which do not support unaligned memory accesses. 48 * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) 49 * See http://stackoverflow.com/a/32095106/646947 for details. 50 * Prefer these methods in priority order (0 > 1 > 2) 51 */ 52 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ 53 # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ 54 || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ 55 || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) 56 # define XXH_FORCE_MEMORY_ACCESS 2 57 # elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ 58 (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ 59 || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ 60 || defined(__ARM_ARCH_7S__) )) 61 # define XXH_FORCE_MEMORY_ACCESS 1 62 # endif 63 #endif 64 65 /*!XXH_ACCEPT_NULL_INPUT_POINTER : 66 * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault. 67 * When this macro is enabled, xxHash actively checks input for null pointer. 68 * It it is, result for null input pointers is the same as a null-length input. 69 */ 70 #ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ 71 # define XXH_ACCEPT_NULL_INPUT_POINTER 0 72 #endif 73 74 /*!XXH_FORCE_NATIVE_FORMAT : 75 * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. 76 * Results are therefore identical for little-endian and big-endian CPU. 77 * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. 78 * Should endian-independence be of no importance for your application, you may set the #define below to 1, 79 * to improve speed for Big-endian CPU. 80 * This option has no impact on Little_Endian CPU. 81 */ 82 #ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ 83 # define XXH_FORCE_NATIVE_FORMAT 0 84 #endif 85 86 /*!XXH_FORCE_ALIGN_CHECK : 87 * This is a minor performance trick, only useful with lots of very small keys. 88 * It means : check for aligned/unaligned input. 89 * The check costs one initial branch per hash; 90 * set it to 0 when the input is guaranteed to be aligned, 91 * or when alignment doesn't matter for performance. 92 */ 93 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ 94 # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) 95 # define XXH_FORCE_ALIGN_CHECK 0 96 # else 97 # define XXH_FORCE_ALIGN_CHECK 1 98 # endif 99 #endif 100 101 102 /* ************************************* 103 * Includes & Memory related functions 104 ***************************************/ 105 /*! Modify the local functions below should you wish to use some other memory routines 106 * for malloc(), free() */ 107 #include <stdlib.h> 108 static void* XXH_malloc(size_t s) { return malloc(s); } 109 static void XXH_free (void* p) { free(p); } 110 /*! and for memcpy() */ 111 #include <string.h> 112 static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } 113 114 #include <assert.h> /* assert */ 115 116 #define XXH_STATIC_LINKING_ONLY 117 #include "xxhash.h" 118 119 120 /* ************************************* 121 * Compiler Specific Options 122 ***************************************/ 123 #ifdef _MSC_VER /* Visual Studio */ 124 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ 125 # define FORCE_INLINE static __forceinline 126 #else 127 # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ 128 # ifdef __GNUC__ 129 # define FORCE_INLINE static inline __attribute__((always_inline)) 130 # else 131 # define FORCE_INLINE static inline 132 # endif 133 # else 134 # define FORCE_INLINE static 135 # endif /* __STDC_VERSION__ */ 136 #endif 137 138 139 /* ************************************* 140 * Basic Types 141 ***************************************/ 142 #ifndef MEM_MODULE 143 # if !defined (__VMS) \ 144 && (defined (__cplusplus) \ 145 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) 146 # include <stdint.h> 147 typedef uint8_t BYTE; 148 typedef uint16_t U16; 149 typedef uint32_t U32; 150 # else 151 typedef unsigned char BYTE; 152 typedef unsigned short U16; 153 typedef unsigned int U32; 154 # endif 155 #endif 156 157 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) 158 159 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ 160 static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } 161 162 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) 163 164 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ 165 /* currently only defined for gcc and icc */ 166 typedef union { U32 u32; } __attribute__((packed)) unalign; 167 static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } 168 169 #else 170 171 /* portable and safe solution. Generally efficient. 172 * see : http://stackoverflow.com/a/32095106/646947 173 */ 174 static U32 XXH_read32(const void* memPtr) 175 { 176 U32 val; 177 memcpy(&val, memPtr, sizeof(val)); 178 return val; 179 } 180 181 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ 182 183 184 /* **************************************** 185 * Compiler-specific Functions and Macros 186 ******************************************/ 187 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) 188 189 /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ 190 #if defined(_MSC_VER) 191 # define XXH_rotl32(x,r) _rotl(x,r) 192 # define XXH_rotl64(x,r) _rotl64(x,r) 193 #else 194 # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) 195 # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) 196 #endif 197 198 #if defined(_MSC_VER) /* Visual Studio */ 199 # define XXH_swap32 _byteswap_ulong 200 #elif XXH_GCC_VERSION >= 403 201 # define XXH_swap32 __builtin_bswap32 202 #else 203 static U32 XXH_swap32 (U32 x) 204 { 205 return ((x << 24) & 0xff000000 ) | 206 ((x << 8) & 0x00ff0000 ) | 207 ((x >> 8) & 0x0000ff00 ) | 208 ((x >> 24) & 0x000000ff ); 209 } 210 #endif 211 212 213 /* ************************************* 214 * Architecture Macros 215 ***************************************/ 216 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; 217 218 /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ 219 #ifndef XXH_CPU_LITTLE_ENDIAN 220 static int XXH_isLittleEndian(void) 221 { 222 const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ 223 return one.c[0]; 224 } 225 # define XXH_CPU_LITTLE_ENDIAN (XXH_isLittleEndian()) 226 #endif 227 228 229 /* *************************** 230 * Memory reads 231 *****************************/ 232 typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; 233 234 FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) 235 { 236 if (align==XXH_unaligned) 237 return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); 238 else 239 return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); 240 } 241 242 FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) 243 { 244 return XXH_readLE32_align(ptr, endian, XXH_unaligned); 245 } 246 247 static U32 XXH_readBE32(const void* ptr) 248 { 249 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); 250 } 251 252 253 /* ************************************* 254 * Macros 255 ***************************************/ 256 #define XXH_STATIC_ASSERT(c) do { enum { XXH_sa = 1/(int)(!!(c)) }; } while(0) /* use after variable declarations */ 257 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } 258 259 260 /* ******************************************************************* 261 * 32-bit hash functions 262 *********************************************************************/ 263 static const U32 PRIME32_1 = 2654435761U; /* 0b10011110001101110111100110110001 */ 264 static const U32 PRIME32_2 = 2246822519U; /* 0b10000101111010111100101001110111 */ 265 static const U32 PRIME32_3 = 3266489917U; /* 0b11000010101100101010111000111101 */ 266 static const U32 PRIME32_4 = 668265263U; /* 0b00100111110101001110101100101111 */ 267 static const U32 PRIME32_5 = 374761393U; /* 0b00010110010101100110011110110001 */ 268 269 static U32 XXH32_round(U32 seed, U32 input) 270 { 271 seed += input * PRIME32_2; 272 seed = XXH_rotl32(seed, 13); 273 seed *= PRIME32_1; 274 return seed; 275 } 276 277 /* mix all bits */ 278 static U32 XXH32_avalanche(U32 h32) 279 { 280 h32 ^= h32 >> 15; 281 h32 *= PRIME32_2; 282 h32 ^= h32 >> 13; 283 h32 *= PRIME32_3; 284 h32 ^= h32 >> 16; 285 return(h32); 286 } 287 288 #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) 289 290 static U32 291 XXH32_finalize(U32 h32, const void* ptr, size_t len, 292 XXH_endianess endian, XXH_alignment align) 293 294 { 295 const BYTE* p = (const BYTE*)ptr; 296 297 #define PROCESS1 \ 298 h32 += (*p++) * PRIME32_5; \ 299 h32 = XXH_rotl32(h32, 11) * PRIME32_1 300 301 #define PROCESS4 \ 302 h32 += XXH_get32bits(p) * PRIME32_3; \ 303 p+=4; \ 304 h32 = XXH_rotl32(h32, 17) * PRIME32_4 305 306 switch(len&15) /* or switch(bEnd - p) */ 307 { 308 case 12: PROCESS4; 309 /* fallthrough */ 310 case 8: PROCESS4; 311 /* fallthrough */ 312 case 4: PROCESS4; 313 return XXH32_avalanche(h32); 314 315 case 13: PROCESS4; 316 /* fallthrough */ 317 case 9: PROCESS4; 318 /* fallthrough */ 319 case 5: PROCESS4; 320 PROCESS1; 321 return XXH32_avalanche(h32); 322 323 case 14: PROCESS4; 324 /* fallthrough */ 325 case 10: PROCESS4; 326 /* fallthrough */ 327 case 6: PROCESS4; 328 PROCESS1; 329 PROCESS1; 330 return XXH32_avalanche(h32); 331 332 case 15: PROCESS4; 333 /* fallthrough */ 334 case 11: PROCESS4; 335 /* fallthrough */ 336 case 7: PROCESS4; 337 /* fallthrough */ 338 case 3: PROCESS1; 339 /* fallthrough */ 340 case 2: PROCESS1; 341 /* fallthrough */ 342 case 1: PROCESS1; 343 /* fallthrough */ 344 case 0: return XXH32_avalanche(h32); 345 } 346 assert(0); 347 return h32; /* reaching this point is deemed impossible */ 348 } 349 350 351 FORCE_INLINE U32 352 XXH32_endian_align(const void* input, size_t len, U32 seed, 353 XXH_endianess endian, XXH_alignment align) 354 { 355 const BYTE* p = (const BYTE*)input; 356 const BYTE* bEnd = p + len; 357 U32 h32; 358 359 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) 360 if (p==NULL) { 361 len=0; 362 bEnd=p=(const BYTE*)(size_t)16; 363 } 364 #endif 365 366 if (len>=16) { 367 const BYTE* const limit = bEnd - 15; 368 U32 v1 = seed + PRIME32_1 + PRIME32_2; 369 U32 v2 = seed + PRIME32_2; 370 U32 v3 = seed + 0; 371 U32 v4 = seed - PRIME32_1; 372 373 do { 374 v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; 375 v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; 376 v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; 377 v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; 378 } while (p < limit); 379 380 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) 381 + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); 382 } else { 383 h32 = seed + PRIME32_5; 384 } 385 386 h32 += (U32)len; 387 388 return XXH32_finalize(h32, p, len&15, endian, align); 389 } 390 391 392 XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) 393 { 394 #if 0 395 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ 396 XXH32_state_t state; 397 XXH32_reset(&state, seed); 398 XXH32_update(&state, input, len); 399 return XXH32_digest(&state); 400 #else 401 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 402 403 if (XXH_FORCE_ALIGN_CHECK) { 404 if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ 405 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 406 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); 407 else 408 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); 409 } } 410 411 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 412 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); 413 else 414 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); 415 #endif 416 } 417 418 419 420 /*====== Hash streaming ======*/ 421 422 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) 423 { 424 return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); 425 } 426 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) 427 { 428 XXH_free(statePtr); 429 return XXH_OK; 430 } 431 432 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) 433 { 434 memcpy(dstState, srcState, sizeof(*dstState)); 435 } 436 437 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) 438 { 439 XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ 440 memset(&state, 0, sizeof(state)); 441 state.v1 = seed + PRIME32_1 + PRIME32_2; 442 state.v2 = seed + PRIME32_2; 443 state.v3 = seed + 0; 444 state.v4 = seed - PRIME32_1; 445 /* do not write into reserved, planned to be removed in a future version */ 446 memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); 447 return XXH_OK; 448 } 449 450 451 FORCE_INLINE XXH_errorcode 452 XXH32_update_endian(XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) 453 { 454 if (input==NULL) 455 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) 456 return XXH_OK; 457 #else 458 return XXH_ERROR; 459 #endif 460 461 { const BYTE* p = (const BYTE*)input; 462 const BYTE* const bEnd = p + len; 463 464 state->total_len_32 += (unsigned)len; 465 state->large_len |= (len>=16) | (state->total_len_32>=16); 466 467 if (state->memsize + len < 16) { /* fill in tmp buffer */ 468 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); 469 state->memsize += (unsigned)len; 470 return XXH_OK; 471 } 472 473 if (state->memsize) { /* some data left from previous update */ 474 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); 475 { const U32* p32 = state->mem32; 476 state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; 477 state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; 478 state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; 479 state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); 480 } 481 p += 16-state->memsize; 482 state->memsize = 0; 483 } 484 485 if (p <= bEnd-16) { 486 const BYTE* const limit = bEnd - 16; 487 U32 v1 = state->v1; 488 U32 v2 = state->v2; 489 U32 v3 = state->v3; 490 U32 v4 = state->v4; 491 492 do { 493 v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; 494 v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; 495 v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; 496 v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; 497 } while (p<=limit); 498 499 state->v1 = v1; 500 state->v2 = v2; 501 state->v3 = v3; 502 state->v4 = v4; 503 } 504 505 if (p < bEnd) { 506 XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); 507 state->memsize = (unsigned)(bEnd-p); 508 } 509 } 510 511 return XXH_OK; 512 } 513 514 515 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) 516 { 517 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 518 519 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 520 return XXH32_update_endian(state_in, input, len, XXH_littleEndian); 521 else 522 return XXH32_update_endian(state_in, input, len, XXH_bigEndian); 523 } 524 525 526 FORCE_INLINE U32 527 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) 528 { 529 U32 h32; 530 531 if (state->large_len) { 532 h32 = XXH_rotl32(state->v1, 1) 533 + XXH_rotl32(state->v2, 7) 534 + XXH_rotl32(state->v3, 12) 535 + XXH_rotl32(state->v4, 18); 536 } else { 537 h32 = state->v3 /* == seed */ + PRIME32_5; 538 } 539 540 h32 += state->total_len_32; 541 542 return XXH32_finalize(h32, state->mem32, state->memsize, endian, XXH_aligned); 543 } 544 545 546 XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) 547 { 548 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 549 550 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 551 return XXH32_digest_endian(state_in, XXH_littleEndian); 552 else 553 return XXH32_digest_endian(state_in, XXH_bigEndian); 554 } 555 556 557 /*====== Canonical representation ======*/ 558 559 /*! Default XXH result types are basic unsigned 32 and 64 bits. 560 * The canonical representation follows human-readable write convention, aka big-endian (large digits first). 561 * These functions allow transformation of hash result into and from its canonical format. 562 * This way, hash values can be written into a file or buffer, remaining comparable across different systems. 563 */ 564 565 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) 566 { 567 XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); 568 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); 569 memcpy(dst, &hash, sizeof(*dst)); 570 } 571 572 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) 573 { 574 return XXH_readBE32(src); 575 } 576 577 578 #ifndef XXH_NO_LONG_LONG 579 580 /* ******************************************************************* 581 * 64-bit hash functions 582 *********************************************************************/ 583 584 /*====== Memory access ======*/ 585 586 #ifndef MEM_MODULE 587 # define MEM_MODULE 588 # if !defined (__VMS) \ 589 && (defined (__cplusplus) \ 590 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) 591 # include <stdint.h> 592 typedef uint64_t U64; 593 # else 594 /* if compiler doesn't support unsigned long long, replace by another 64-bit type */ 595 typedef unsigned long long U64; 596 # endif 597 #endif 598 599 600 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) 601 602 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ 603 static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } 604 605 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) 606 607 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ 608 /* currently only defined for gcc and icc */ 609 typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64; 610 static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } 611 612 #else 613 614 /* portable and safe solution. Generally efficient. 615 * see : http://stackoverflow.com/a/32095106/646947 616 */ 617 618 static U64 XXH_read64(const void* memPtr) 619 { 620 U64 val; 621 memcpy(&val, memPtr, sizeof(val)); 622 return val; 623 } 624 625 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ 626 627 #if defined(_MSC_VER) /* Visual Studio */ 628 # define XXH_swap64 _byteswap_uint64 629 #elif XXH_GCC_VERSION >= 403 630 # define XXH_swap64 __builtin_bswap64 631 #else 632 static U64 XXH_swap64 (U64 x) 633 { 634 return ((x << 56) & 0xff00000000000000ULL) | 635 ((x << 40) & 0x00ff000000000000ULL) | 636 ((x << 24) & 0x0000ff0000000000ULL) | 637 ((x << 8) & 0x000000ff00000000ULL) | 638 ((x >> 8) & 0x00000000ff000000ULL) | 639 ((x >> 24) & 0x0000000000ff0000ULL) | 640 ((x >> 40) & 0x000000000000ff00ULL) | 641 ((x >> 56) & 0x00000000000000ffULL); 642 } 643 #endif 644 645 FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) 646 { 647 if (align==XXH_unaligned) 648 return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); 649 else 650 return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); 651 } 652 653 FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) 654 { 655 return XXH_readLE64_align(ptr, endian, XXH_unaligned); 656 } 657 658 static U64 XXH_readBE64(const void* ptr) 659 { 660 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); 661 } 662 663 664 /*====== xxh64 ======*/ 665 666 static const U64 PRIME64_1 = 11400714785074694791ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */ 667 static const U64 PRIME64_2 = 14029467366897019727ULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */ 668 static const U64 PRIME64_3 = 1609587929392839161ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */ 669 static const U64 PRIME64_4 = 9650029242287828579ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */ 670 static const U64 PRIME64_5 = 2870177450012600261ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */ 671 672 static U64 XXH64_round(U64 acc, U64 input) 673 { 674 acc += input * PRIME64_2; 675 acc = XXH_rotl64(acc, 31); 676 acc *= PRIME64_1; 677 return acc; 678 } 679 680 static U64 XXH64_mergeRound(U64 acc, U64 val) 681 { 682 val = XXH64_round(0, val); 683 acc ^= val; 684 acc = acc * PRIME64_1 + PRIME64_4; 685 return acc; 686 } 687 688 static U64 XXH64_avalanche(U64 h64) 689 { 690 h64 ^= h64 >> 33; 691 h64 *= PRIME64_2; 692 h64 ^= h64 >> 29; 693 h64 *= PRIME64_3; 694 h64 ^= h64 >> 32; 695 return h64; 696 } 697 698 699 #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) 700 701 static U64 702 XXH64_finalize(U64 h64, const void* ptr, size_t len, 703 XXH_endianess endian, XXH_alignment align) 704 { 705 const BYTE* p = (const BYTE*)ptr; 706 707 #define PROCESS1_64 \ 708 h64 ^= (*p++) * PRIME64_5; \ 709 h64 = XXH_rotl64(h64, 11) * PRIME64_1 710 711 #define PROCESS4_64 \ 712 h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \ 713 p+=4; \ 714 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3 715 716 #define PROCESS8_64 do { \ 717 U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \ 718 p+=8; \ 719 h64 ^= k1; \ 720 h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \ 721 } while (0) 722 723 switch(len&31) { 724 case 24: PROCESS8_64; 725 /* fallthrough */ 726 case 16: PROCESS8_64; 727 /* fallthrough */ 728 case 8: PROCESS8_64; 729 return XXH64_avalanche(h64); 730 731 case 28: PROCESS8_64; 732 /* fallthrough */ 733 case 20: PROCESS8_64; 734 /* fallthrough */ 735 case 12: PROCESS8_64; 736 /* fallthrough */ 737 case 4: PROCESS4_64; 738 return XXH64_avalanche(h64); 739 740 case 25: PROCESS8_64; 741 /* fallthrough */ 742 case 17: PROCESS8_64; 743 /* fallthrough */ 744 case 9: PROCESS8_64; 745 PROCESS1_64; 746 return XXH64_avalanche(h64); 747 748 case 29: PROCESS8_64; 749 /* fallthrough */ 750 case 21: PROCESS8_64; 751 /* fallthrough */ 752 case 13: PROCESS8_64; 753 /* fallthrough */ 754 case 5: PROCESS4_64; 755 PROCESS1_64; 756 return XXH64_avalanche(h64); 757 758 case 26: PROCESS8_64; 759 /* fallthrough */ 760 case 18: PROCESS8_64; 761 /* fallthrough */ 762 case 10: PROCESS8_64; 763 PROCESS1_64; 764 PROCESS1_64; 765 return XXH64_avalanche(h64); 766 767 case 30: PROCESS8_64; 768 /* fallthrough */ 769 case 22: PROCESS8_64; 770 /* fallthrough */ 771 case 14: PROCESS8_64; 772 /* fallthrough */ 773 case 6: PROCESS4_64; 774 PROCESS1_64; 775 PROCESS1_64; 776 return XXH64_avalanche(h64); 777 778 case 27: PROCESS8_64; 779 /* fallthrough */ 780 case 19: PROCESS8_64; 781 /* fallthrough */ 782 case 11: PROCESS8_64; 783 PROCESS1_64; 784 PROCESS1_64; 785 PROCESS1_64; 786 return XXH64_avalanche(h64); 787 788 case 31: PROCESS8_64; 789 /* fallthrough */ 790 case 23: PROCESS8_64; 791 /* fallthrough */ 792 case 15: PROCESS8_64; 793 /* fallthrough */ 794 case 7: PROCESS4_64; 795 /* fallthrough */ 796 case 3: PROCESS1_64; 797 /* fallthrough */ 798 case 2: PROCESS1_64; 799 /* fallthrough */ 800 case 1: PROCESS1_64; 801 /* fallthrough */ 802 case 0: return XXH64_avalanche(h64); 803 } 804 805 /* impossible to reach */ 806 assert(0); 807 return 0; /* unreachable, but some compilers complain without it */ 808 } 809 810 FORCE_INLINE U64 811 XXH64_endian_align(const void* input, size_t len, U64 seed, 812 XXH_endianess endian, XXH_alignment align) 813 { 814 const BYTE* p = (const BYTE*)input; 815 const BYTE* bEnd = p + len; 816 U64 h64; 817 818 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) 819 if (p==NULL) { 820 len=0; 821 bEnd=p=(const BYTE*)(size_t)32; 822 } 823 #endif 824 825 if (len>=32) { 826 const BYTE* const limit = bEnd - 32; 827 U64 v1 = seed + PRIME64_1 + PRIME64_2; 828 U64 v2 = seed + PRIME64_2; 829 U64 v3 = seed + 0; 830 U64 v4 = seed - PRIME64_1; 831 832 do { 833 v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; 834 v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; 835 v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; 836 v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; 837 } while (p<=limit); 838 839 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); 840 h64 = XXH64_mergeRound(h64, v1); 841 h64 = XXH64_mergeRound(h64, v2); 842 h64 = XXH64_mergeRound(h64, v3); 843 h64 = XXH64_mergeRound(h64, v4); 844 845 } else { 846 h64 = seed + PRIME64_5; 847 } 848 849 h64 += (U64) len; 850 851 return XXH64_finalize(h64, p, len, endian, align); 852 } 853 854 855 XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) 856 { 857 #if 0 858 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ 859 XXH64_state_t state; 860 XXH64_reset(&state, seed); 861 XXH64_update(&state, input, len); 862 return XXH64_digest(&state); 863 #else 864 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 865 866 if (XXH_FORCE_ALIGN_CHECK) { 867 if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ 868 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 869 return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); 870 else 871 return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); 872 } } 873 874 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 875 return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); 876 else 877 return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); 878 #endif 879 } 880 881 /*====== Hash Streaming ======*/ 882 883 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) 884 { 885 return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); 886 } 887 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) 888 { 889 XXH_free(statePtr); 890 return XXH_OK; 891 } 892 893 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) 894 { 895 memcpy(dstState, srcState, sizeof(*dstState)); 896 } 897 898 XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) 899 { 900 XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ 901 memset(&state, 0, sizeof(state)); 902 state.v1 = seed + PRIME64_1 + PRIME64_2; 903 state.v2 = seed + PRIME64_2; 904 state.v3 = seed + 0; 905 state.v4 = seed - PRIME64_1; 906 /* do not write into reserved, planned to be removed in a future version */ 907 memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); 908 return XXH_OK; 909 } 910 911 FORCE_INLINE XXH_errorcode 912 XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) 913 { 914 if (input==NULL) 915 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) 916 return XXH_OK; 917 #else 918 return XXH_ERROR; 919 #endif 920 921 { const BYTE* p = (const BYTE*)input; 922 const BYTE* const bEnd = p + len; 923 924 state->total_len += len; 925 926 if (state->memsize + len < 32) { /* fill in tmp buffer */ 927 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); 928 state->memsize += (U32)len; 929 return XXH_OK; 930 } 931 932 if (state->memsize) { /* tmp buffer is full */ 933 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); 934 state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); 935 state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); 936 state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); 937 state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); 938 p += 32-state->memsize; 939 state->memsize = 0; 940 } 941 942 if (p+32 <= bEnd) { 943 const BYTE* const limit = bEnd - 32; 944 U64 v1 = state->v1; 945 U64 v2 = state->v2; 946 U64 v3 = state->v3; 947 U64 v4 = state->v4; 948 949 do { 950 v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; 951 v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; 952 v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; 953 v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; 954 } while (p<=limit); 955 956 state->v1 = v1; 957 state->v2 = v2; 958 state->v3 = v3; 959 state->v4 = v4; 960 } 961 962 if (p < bEnd) { 963 XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); 964 state->memsize = (unsigned)(bEnd-p); 965 } 966 } 967 968 return XXH_OK; 969 } 970 971 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) 972 { 973 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 974 975 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 976 return XXH64_update_endian(state_in, input, len, XXH_littleEndian); 977 else 978 return XXH64_update_endian(state_in, input, len, XXH_bigEndian); 979 } 980 981 FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) 982 { 983 U64 h64; 984 985 if (state->total_len >= 32) { 986 U64 const v1 = state->v1; 987 U64 const v2 = state->v2; 988 U64 const v3 = state->v3; 989 U64 const v4 = state->v4; 990 991 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); 992 h64 = XXH64_mergeRound(h64, v1); 993 h64 = XXH64_mergeRound(h64, v2); 994 h64 = XXH64_mergeRound(h64, v3); 995 h64 = XXH64_mergeRound(h64, v4); 996 } else { 997 h64 = state->v3 /*seed*/ + PRIME64_5; 998 } 999 1000 h64 += (U64) state->total_len; 1001 1002 return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian, XXH_aligned); 1003 } 1004 1005 XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) 1006 { 1007 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 1008 1009 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 1010 return XXH64_digest_endian(state_in, XXH_littleEndian); 1011 else 1012 return XXH64_digest_endian(state_in, XXH_bigEndian); 1013 } 1014 1015 1016 /*====== Canonical representation ======*/ 1017 1018 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) 1019 { 1020 XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); 1021 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); 1022 memcpy(dst, &hash, sizeof(*dst)); 1023 } 1024 1025 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) 1026 { 1027 return XXH_readBE64(src); 1028 } 1029 1030 #endif /* XXH_NO_LONG_LONG */