xxhash.h (268319B)
1 /* 2 * xxHash - Extremely Fast Hash algorithm 3 * Header File 4 * Copyright (C) 2012-2023 Yann Collet 5 * 6 * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php) 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are 10 * met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following disclaimer 16 * in the documentation and/or other materials provided with the 17 * distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * You can contact the author at: 32 * - xxHash homepage: https://www.xxhash.com 33 * - xxHash source repository: https://github.com/Cyan4973/xxHash 34 */ 35 36 /*! 37 * @mainpage xxHash 38 * 39 * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed 40 * limits. 41 * 42 * It is proposed in four flavors, in three families: 43 * 1. @ref XXH32_family 44 * - Classic 32-bit hash function. Simple, compact, and runs on almost all 45 * 32-bit and 64-bit systems. 46 * 2. @ref XXH64_family 47 * - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most 48 * 64-bit systems (but _not_ 32-bit systems). 49 * 3. @ref XXH3_family 50 * - Modern 64-bit and 128-bit hash function family which features improved 51 * strength and performance across the board, especially on smaller data. 52 * It benefits greatly from SIMD and 64-bit without requiring it. 53 * 54 * Benchmarks 55 * --- 56 * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04. 57 * The open source benchmark program is compiled with clang v10.0 using -O3 flag. 58 * 59 * | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity | 60 * | -------------------- | ------- | ----: | ---------------: | ------------------: | 61 * | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 | 62 * | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 | 63 * | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 | 64 * | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 | 65 * | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 | 66 * | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 | 67 * | RAM sequential read | | N/A | 28.0 GB/s | N/A | 68 * | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 | 69 * | City64 | | 64 | 22.0 GB/s | 76.6 | 70 * | T1ha2 | | 64 | 22.0 GB/s | 99.0 | 71 * | City128 | | 128 | 21.7 GB/s | 57.7 | 72 * | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 | 73 * | XXH64() | | 64 | 19.4 GB/s | 71.0 | 74 * | SpookyHash | | 64 | 19.3 GB/s | 53.2 | 75 * | Mum | | 64 | 18.0 GB/s | 67.0 | 76 * | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 | 77 * | XXH32() | | 32 | 9.7 GB/s | 71.9 | 78 * | City32 | | 32 | 9.1 GB/s | 66.0 | 79 * | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 | 80 * | Murmur3 | | 32 | 3.9 GB/s | 56.1 | 81 * | SipHash* | | 64 | 3.0 GB/s | 43.2 | 82 * | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 | 83 * | HighwayHash | | 64 | 1.4 GB/s | 6.0 | 84 * | FNV64 | | 64 | 1.2 GB/s | 62.7 | 85 * | Blake2* | | 256 | 1.1 GB/s | 5.1 | 86 * | SHA1* | | 160 | 0.8 GB/s | 5.6 | 87 * | MD5* | | 128 | 0.6 GB/s | 7.8 | 88 * @note 89 * - Hashes which require a specific ISA extension are noted. SSE2 is also noted, 90 * even though it is mandatory on x64. 91 * - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic 92 * by modern standards. 93 * - Small data velocity is a rough average of algorithm's efficiency for small 94 * data. For more accurate information, see the wiki. 95 * - More benchmarks and strength tests are found on the wiki: 96 * https://github.com/Cyan4973/xxHash/wiki 97 * 98 * Usage 99 * ------ 100 * All xxHash variants use a similar API. Changing the algorithm is a trivial 101 * substitution. 102 * 103 * @pre 104 * For functions which take an input and length parameter, the following 105 * requirements are assumed: 106 * - The range from [`input`, `input + length`) is valid, readable memory. 107 * - The only exception is if the `length` is `0`, `input` may be `NULL`. 108 * - For C++, the objects must have the *TriviallyCopyable* property, as the 109 * functions access bytes directly as if it was an array of `unsigned char`. 110 * 111 * @anchor single_shot_example 112 * **Single Shot** 113 * 114 * These functions are stateless functions which hash a contiguous block of memory, 115 * immediately returning the result. They are the easiest and usually the fastest 116 * option. 117 * 118 * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits() 119 * 120 * @code{.c} 121 * #include <string.h> 122 * #include "xxhash.h" 123 * 124 * // Example for a function which hashes a null terminated string with XXH32(). 125 * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed) 126 * { 127 * // NULL pointers are only valid if the length is zero 128 * size_t length = (string == NULL) ? 0 : strlen(string); 129 * return XXH32(string, length, seed); 130 * } 131 * @endcode 132 * 133 * 134 * @anchor streaming_example 135 * **Streaming** 136 * 137 * These groups of functions allow incremental hashing of unknown size, even 138 * more than what would fit in a size_t. 139 * 140 * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset() 141 * 142 * @code{.c} 143 * #include <stdio.h> 144 * #include <assert.h> 145 * #include "xxhash.h" 146 * // Example for a function which hashes a FILE incrementally with XXH3_64bits(). 147 * XXH64_hash_t hashFile(FILE* f) 148 * { 149 * // Allocate a state struct. Do not just use malloc() or new. 150 * XXH3_state_t* state = XXH3_createState(); 151 * assert(state != NULL && "Out of memory!"); 152 * // Reset the state to start a new hashing session. 153 * XXH3_64bits_reset(state); 154 * char buffer[4096]; 155 * size_t count; 156 * // Read the file in chunks 157 * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) { 158 * // Run update() as many times as necessary to process the data 159 * XXH3_64bits_update(state, buffer, count); 160 * } 161 * // Retrieve the finalized hash. This will not change the state. 162 * XXH64_hash_t result = XXH3_64bits_digest(state); 163 * // Free the state. Do not use free(). 164 * XXH3_freeState(state); 165 * return result; 166 * } 167 * @endcode 168 * 169 * Streaming functions generate the xxHash value from an incremental input. 170 * This method is slower than single-call functions, due to state management. 171 * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. 172 * 173 * An XXH state must first be allocated using `XXH*_createState()`. 174 * 175 * Start a new hash by initializing the state with a seed using `XXH*_reset()`. 176 * 177 * Then, feed the hash state by calling `XXH*_update()` as many times as necessary. 178 * 179 * The function returns an error code, with 0 meaning OK, and any other value 180 * meaning there is an error. 181 * 182 * Finally, a hash value can be produced anytime, by using `XXH*_digest()`. 183 * This function returns the nn-bits hash as an int or long long. 184 * 185 * It's still possible to continue inserting input into the hash state after a 186 * digest, and generate new hash values later on by invoking `XXH*_digest()`. 187 * 188 * When done, release the state using `XXH*_freeState()`. 189 * 190 * 191 * @anchor canonical_representation_example 192 * **Canonical Representation** 193 * 194 * The default return values from XXH functions are unsigned 32, 64 and 128 bit 195 * integers. 196 * This the simplest and fastest format for further post-processing. 197 * 198 * However, this leaves open the question of what is the order on the byte level, 199 * since little and big endian conventions will store the same number differently. 200 * 201 * The canonical representation settles this issue by mandating big-endian 202 * convention, the same convention as human-readable numbers (large digits first). 203 * 204 * When writing hash values to storage, sending them over a network, or printing 205 * them, it's highly recommended to use the canonical representation to ensure 206 * portability across a wider range of systems, present and future. 207 * 208 * The following functions allow transformation of hash values to and from 209 * canonical format. 210 * 211 * XXH32_canonicalFromHash(), XXH32_hashFromCanonical(), 212 * XXH64_canonicalFromHash(), XXH64_hashFromCanonical(), 213 * XXH128_canonicalFromHash(), XXH128_hashFromCanonical(), 214 * 215 * @code{.c} 216 * #include <stdio.h> 217 * #include "xxhash.h" 218 * 219 * // Example for a function which prints XXH32_hash_t in human readable format 220 * void printXxh32(XXH32_hash_t hash) 221 * { 222 * XXH32_canonical_t cano; 223 * XXH32_canonicalFromHash(&cano, hash); 224 * size_t i; 225 * for(i = 0; i < sizeof(cano.digest); ++i) { 226 * printf("%02x", cano.digest[i]); 227 * } 228 * printf("\n"); 229 * } 230 * 231 * // Example for a function which converts XXH32_canonical_t to XXH32_hash_t 232 * XXH32_hash_t convertCanonicalToXxh32(XXH32_canonical_t cano) 233 * { 234 * XXH32_hash_t hash = XXH32_hashFromCanonical(&cano); 235 * return hash; 236 * } 237 * @endcode 238 * 239 * 240 * @file xxhash.h 241 * xxHash prototypes and implementation 242 */ 243 244 #if defined (__cplusplus) 245 extern "C" { 246 #endif 247 248 /* **************************** 249 * INLINE mode 250 ******************************/ 251 /*! 252 * @defgroup public Public API 253 * Contains details on the public xxHash functions. 254 * @{ 255 */ 256 #ifdef XXH_DOXYGEN 257 /*! 258 * @brief Gives access to internal state declaration, required for static allocation. 259 * 260 * Incompatible with dynamic linking, due to risks of ABI changes. 261 * 262 * Usage: 263 * @code{.c} 264 * #define XXH_STATIC_LINKING_ONLY 265 * #include "xxhash.h" 266 * @endcode 267 */ 268 # define XXH_STATIC_LINKING_ONLY 269 /* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */ 270 271 /*! 272 * @brief Gives access to internal definitions. 273 * 274 * Usage: 275 * @code{.c} 276 * #define XXH_STATIC_LINKING_ONLY 277 * #define XXH_IMPLEMENTATION 278 * #include "xxhash.h" 279 * @endcode 280 */ 281 # define XXH_IMPLEMENTATION 282 /* Do not undef XXH_IMPLEMENTATION for Doxygen */ 283 284 /*! 285 * @brief Exposes the implementation and marks all functions as `inline`. 286 * 287 * Use these build macros to inline xxhash into the target unit. 288 * Inlining improves performance on small inputs, especially when the length is 289 * expressed as a compile-time constant: 290 * 291 * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html 292 * 293 * It also keeps xxHash symbols private to the unit, so they are not exported. 294 * 295 * Usage: 296 * @code{.c} 297 * #define XXH_INLINE_ALL 298 * #include "xxhash.h" 299 * @endcode 300 * Do not compile and link xxhash.o as a separate object, as it is not useful. 301 */ 302 # define XXH_INLINE_ALL 303 # undef XXH_INLINE_ALL 304 /*! 305 * @brief Exposes the implementation without marking functions as inline. 306 */ 307 # define XXH_PRIVATE_API 308 # undef XXH_PRIVATE_API 309 /*! 310 * @brief Emulate a namespace by transparently prefixing all symbols. 311 * 312 * If you want to include _and expose_ xxHash functions from within your own 313 * library, but also want to avoid symbol collisions with other libraries which 314 * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix 315 * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE 316 * (therefore, avoid empty or numeric values). 317 * 318 * Note that no change is required within the calling program as long as it 319 * includes `xxhash.h`: Regular symbol names will be automatically translated 320 * by this header. 321 */ 322 # define XXH_NAMESPACE /* YOUR NAME HERE */ 323 # undef XXH_NAMESPACE 324 #endif 325 326 #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \ 327 && !defined(XXH_INLINE_ALL_31684351384) 328 /* this section should be traversed only once */ 329 # define XXH_INLINE_ALL_31684351384 330 /* give access to the advanced API, required to compile implementations */ 331 # undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */ 332 # define XXH_STATIC_LINKING_ONLY 333 /* make all functions private */ 334 # undef XXH_PUBLIC_API 335 # if defined(__GNUC__) 336 # define XXH_PUBLIC_API static __inline __attribute__((__unused__)) 337 # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) 338 # define XXH_PUBLIC_API static inline 339 # elif defined(_MSC_VER) 340 # define XXH_PUBLIC_API static __inline 341 # else 342 /* note: this version may generate warnings for unused static functions */ 343 # define XXH_PUBLIC_API static 344 # endif 345 346 /* 347 * This part deals with the special case where a unit wants to inline xxHash, 348 * but "xxhash.h" has previously been included without XXH_INLINE_ALL, 349 * such as part of some previously included *.h header file. 350 * Without further action, the new include would just be ignored, 351 * and functions would effectively _not_ be inlined (silent failure). 352 * The following macros solve this situation by prefixing all inlined names, 353 * avoiding naming collision with previous inclusions. 354 */ 355 /* Before that, we unconditionally #undef all symbols, 356 * in case they were already defined with XXH_NAMESPACE. 357 * They will then be redefined for XXH_INLINE_ALL 358 */ 359 # undef XXH_versionNumber 360 /* XXH32 */ 361 # undef XXH32 362 # undef XXH32_createState 363 # undef XXH32_freeState 364 # undef XXH32_reset 365 # undef XXH32_update 366 # undef XXH32_digest 367 # undef XXH32_copyState 368 # undef XXH32_canonicalFromHash 369 # undef XXH32_hashFromCanonical 370 /* XXH64 */ 371 # undef XXH64 372 # undef XXH64_createState 373 # undef XXH64_freeState 374 # undef XXH64_reset 375 # undef XXH64_update 376 # undef XXH64_digest 377 # undef XXH64_copyState 378 # undef XXH64_canonicalFromHash 379 # undef XXH64_hashFromCanonical 380 /* XXH3_64bits */ 381 # undef XXH3_64bits 382 # undef XXH3_64bits_withSecret 383 # undef XXH3_64bits_withSeed 384 # undef XXH3_64bits_withSecretandSeed 385 # undef XXH3_createState 386 # undef XXH3_freeState 387 # undef XXH3_copyState 388 # undef XXH3_64bits_reset 389 # undef XXH3_64bits_reset_withSeed 390 # undef XXH3_64bits_reset_withSecret 391 # undef XXH3_64bits_update 392 # undef XXH3_64bits_digest 393 # undef XXH3_generateSecret 394 /* XXH3_128bits */ 395 # undef XXH128 396 # undef XXH3_128bits 397 # undef XXH3_128bits_withSeed 398 # undef XXH3_128bits_withSecret 399 # undef XXH3_128bits_reset 400 # undef XXH3_128bits_reset_withSeed 401 # undef XXH3_128bits_reset_withSecret 402 # undef XXH3_128bits_reset_withSecretandSeed 403 # undef XXH3_128bits_update 404 # undef XXH3_128bits_digest 405 # undef XXH128_isEqual 406 # undef XXH128_cmp 407 # undef XXH128_canonicalFromHash 408 # undef XXH128_hashFromCanonical 409 /* Finally, free the namespace itself */ 410 # undef XXH_NAMESPACE 411 412 /* employ the namespace for XXH_INLINE_ALL */ 413 # define XXH_NAMESPACE XXH_INLINE_ 414 /* 415 * Some identifiers (enums, type names) are not symbols, 416 * but they must nonetheless be renamed to avoid redeclaration. 417 * Alternative solution: do not redeclare them. 418 * However, this requires some #ifdefs, and has a more dispersed impact. 419 * Meanwhile, renaming can be achieved in a single place. 420 */ 421 # define XXH_IPREF(Id) XXH_NAMESPACE ## Id 422 # define XXH_OK XXH_IPREF(XXH_OK) 423 # define XXH_ERROR XXH_IPREF(XXH_ERROR) 424 # define XXH_errorcode XXH_IPREF(XXH_errorcode) 425 # define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t) 426 # define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) 427 # define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t) 428 # define XXH32_state_s XXH_IPREF(XXH32_state_s) 429 # define XXH32_state_t XXH_IPREF(XXH32_state_t) 430 # define XXH64_state_s XXH_IPREF(XXH64_state_s) 431 # define XXH64_state_t XXH_IPREF(XXH64_state_t) 432 # define XXH3_state_s XXH_IPREF(XXH3_state_s) 433 # define XXH3_state_t XXH_IPREF(XXH3_state_t) 434 # define XXH128_hash_t XXH_IPREF(XXH128_hash_t) 435 /* Ensure the header is parsed again, even if it was previously included */ 436 # undef XXHASH_H_5627135585666179 437 # undef XXHASH_H_STATIC_13879238742 438 #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ 439 440 /* **************************************************************** 441 * Stable API 442 *****************************************************************/ 443 #ifndef XXHASH_H_5627135585666179 444 #define XXHASH_H_5627135585666179 1 445 446 /*! @brief Marks a global symbol. */ 447 #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) 448 # if defined(_WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) 449 # ifdef XXH_EXPORT 450 # define XXH_PUBLIC_API __declspec(dllexport) 451 # elif XXH_IMPORT 452 # define XXH_PUBLIC_API __declspec(dllimport) 453 # endif 454 # else 455 # define XXH_PUBLIC_API /* do nothing */ 456 # endif 457 #endif 458 459 #ifdef XXH_NAMESPACE 460 # define XXH_CAT(A,B) A##B 461 # define XXH_NAME2(A,B) XXH_CAT(A,B) 462 # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) 463 /* XXH32 */ 464 # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) 465 # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) 466 # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) 467 # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) 468 # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) 469 # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) 470 # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) 471 # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) 472 # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) 473 /* XXH64 */ 474 # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) 475 # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) 476 # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) 477 # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) 478 # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) 479 # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) 480 # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) 481 # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) 482 # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) 483 /* XXH3_64bits */ 484 # define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) 485 # define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) 486 # define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) 487 # define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed) 488 # define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) 489 # define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) 490 # define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) 491 # define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) 492 # define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) 493 # define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) 494 # define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed) 495 # define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) 496 # define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) 497 # define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) 498 # define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed) 499 /* XXH3_128bits */ 500 # define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) 501 # define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) 502 # define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) 503 # define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) 504 # define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed) 505 # define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) 506 # define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) 507 # define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) 508 # define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed) 509 # define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) 510 # define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) 511 # define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) 512 # define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) 513 # define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) 514 # define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) 515 #endif 516 517 518 /* ************************************* 519 * Compiler specifics 520 ***************************************/ 521 522 /* specific declaration modes for Windows */ 523 #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) 524 # if defined(_WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) 525 # ifdef XXH_EXPORT 526 # define XXH_PUBLIC_API __declspec(dllexport) 527 # elif XXH_IMPORT 528 # define XXH_PUBLIC_API __declspec(dllimport) 529 # endif 530 # else 531 # define XXH_PUBLIC_API /* do nothing */ 532 # endif 533 #endif 534 535 #if defined (__GNUC__) 536 # define XXH_CONSTF __attribute__((__const__)) 537 # define XXH_PUREF __attribute__((__pure__)) 538 # define XXH_MALLOCF __attribute__((__malloc__)) 539 #else 540 # define XXH_CONSTF /* disable */ 541 # define XXH_PUREF 542 # define XXH_MALLOCF 543 #endif 544 545 /* ************************************* 546 * Version 547 ***************************************/ 548 #define XXH_VERSION_MAJOR 0 549 #define XXH_VERSION_MINOR 8 550 #define XXH_VERSION_RELEASE 3 551 /*! @brief Version number, encoded as two digits each */ 552 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) 553 554 /*! 555 * @brief Obtains the xxHash version. 556 * 557 * This is mostly useful when xxHash is compiled as a shared library, 558 * since the returned value comes from the library, as opposed to header file. 559 * 560 * @return @ref XXH_VERSION_NUMBER of the invoked library. 561 */ 562 XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void); 563 564 565 /* **************************** 566 * Common basic types 567 ******************************/ 568 #include <stddef.h> /* size_t */ 569 /*! 570 * @brief Exit code for the streaming API. 571 */ 572 typedef enum { 573 XXH_OK = 0, /*!< OK */ 574 XXH_ERROR /*!< Error */ 575 } XXH_errorcode; 576 577 578 /*-********************************************************************** 579 * 32-bit hash 580 ************************************************************************/ 581 #if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */ 582 /*! 583 * @brief An unsigned 32-bit integer. 584 * 585 * Not necessarily defined to `uint32_t` but functionally equivalent. 586 */ 587 typedef uint32_t XXH32_hash_t; 588 589 #elif !defined (__VMS) \ 590 && (defined (__cplusplus) \ 591 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) 592 # ifdef _AIX 593 # include <inttypes.h> 594 # else 595 # include <stdint.h> 596 # endif 597 typedef uint32_t XXH32_hash_t; 598 599 #else 600 # include <limits.h> 601 # if UINT_MAX == 0xFFFFFFFFUL 602 typedef unsigned int XXH32_hash_t; 603 # elif ULONG_MAX == 0xFFFFFFFFUL 604 typedef unsigned long XXH32_hash_t; 605 # else 606 # error "unsupported platform: need a 32-bit type" 607 # endif 608 #endif 609 610 /*! 611 * @} 612 * 613 * @defgroup XXH32_family XXH32 family 614 * @ingroup public 615 * Contains functions used in the classic 32-bit xxHash algorithm. 616 * 617 * @note 618 * XXH32 is useful for older platforms, with no or poor 64-bit performance. 619 * Note that the @ref XXH3_family provides competitive speed for both 32-bit 620 * and 64-bit systems, and offers true 64/128 bit hash results. 621 * 622 * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families 623 * @see @ref XXH32_impl for implementation details 624 * @{ 625 */ 626 627 /*! 628 * @brief Calculates the 32-bit hash of @p input using xxHash32. 629 * 630 * @param input The block of data to be hashed, at least @p length bytes in size. 631 * @param length The length of @p input, in bytes. 632 * @param seed The 32-bit seed to alter the hash's output predictably. 633 * 634 * @pre 635 * The memory between @p input and @p input + @p length must be valid, 636 * readable, contiguous memory. However, if @p length is `0`, @p input may be 637 * `NULL`. In C++, this also must be *TriviallyCopyable*. 638 * 639 * @return The calculated 32-bit xxHash32 value. 640 * 641 * @see @ref single_shot_example "Single Shot Example" for an example. 642 */ 643 XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); 644 645 #ifndef XXH_NO_STREAM 646 /*! 647 * @typedef struct XXH32_state_s XXH32_state_t 648 * @brief The opaque state struct for the XXH32 streaming API. 649 * 650 * @see XXH32_state_s for details. 651 * @see @ref streaming_example "Streaming Example" 652 */ 653 typedef struct XXH32_state_s XXH32_state_t; 654 655 /*! 656 * @brief Allocates an @ref XXH32_state_t. 657 * 658 * @return An allocated pointer of @ref XXH32_state_t on success. 659 * @return `NULL` on failure. 660 * 661 * @note Must be freed with XXH32_freeState(). 662 * 663 * @see @ref streaming_example "Streaming Example" 664 */ 665 XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void); 666 /*! 667 * @brief Frees an @ref XXH32_state_t. 668 * 669 * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState(). 670 * 671 * @return @ref XXH_OK. 672 * 673 * @note @p statePtr must be allocated with XXH32_createState(). 674 * 675 * @see @ref streaming_example "Streaming Example" 676 * 677 */ 678 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); 679 /*! 680 * @brief Copies one @ref XXH32_state_t to another. 681 * 682 * @param dst_state The state to copy to. 683 * @param src_state The state to copy from. 684 * @pre 685 * @p dst_state and @p src_state must not be `NULL` and must not overlap. 686 */ 687 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); 688 689 /*! 690 * @brief Resets an @ref XXH32_state_t to begin a new hash. 691 * 692 * @param statePtr The state struct to reset. 693 * @param seed The 32-bit seed to alter the hash result predictably. 694 * 695 * @pre 696 * @p statePtr must not be `NULL`. 697 * 698 * @return @ref XXH_OK on success. 699 * @return @ref XXH_ERROR on failure. 700 * 701 * @note This function resets and seeds a state. Call it before @ref XXH32_update(). 702 * 703 * @see @ref streaming_example "Streaming Example" 704 */ 705 XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed); 706 707 /*! 708 * @brief Consumes a block of @p input to an @ref XXH32_state_t. 709 * 710 * @param statePtr The state struct to update. 711 * @param input The block of data to be hashed, at least @p length bytes in size. 712 * @param length The length of @p input, in bytes. 713 * 714 * @pre 715 * @p statePtr must not be `NULL`. 716 * @pre 717 * The memory between @p input and @p input + @p length must be valid, 718 * readable, contiguous memory. However, if @p length is `0`, @p input may be 719 * `NULL`. In C++, this also must be *TriviallyCopyable*. 720 * 721 * @return @ref XXH_OK on success. 722 * @return @ref XXH_ERROR on failure. 723 * 724 * @note Call this to incrementally consume blocks of data. 725 * 726 * @see @ref streaming_example "Streaming Example" 727 */ 728 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); 729 730 /*! 731 * @brief Returns the calculated hash value from an @ref XXH32_state_t. 732 * 733 * @param statePtr The state struct to calculate the hash from. 734 * 735 * @pre 736 * @p statePtr must not be `NULL`. 737 * 738 * @return The calculated 32-bit xxHash32 value from that state. 739 * 740 * @note 741 * Calling XXH32_digest() will not affect @p statePtr, so you can update, 742 * digest, and update again. 743 * 744 * @see @ref streaming_example "Streaming Example" 745 */ 746 XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); 747 #endif /* !XXH_NO_STREAM */ 748 749 /******* Canonical representation *******/ 750 751 /*! 752 * @brief Canonical (big endian) representation of @ref XXH32_hash_t. 753 */ 754 typedef struct { 755 unsigned char digest[4]; /*!< Hash bytes, big endian */ 756 } XXH32_canonical_t; 757 758 /*! 759 * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t. 760 * 761 * @param dst The @ref XXH32_canonical_t pointer to be stored to. 762 * @param hash The @ref XXH32_hash_t to be converted. 763 * 764 * @pre 765 * @p dst must not be `NULL`. 766 * 767 * @see @ref canonical_representation_example "Canonical Representation Example" 768 */ 769 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); 770 771 /*! 772 * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t. 773 * 774 * @param src The @ref XXH32_canonical_t to convert. 775 * 776 * @pre 777 * @p src must not be `NULL`. 778 * 779 * @return The converted hash. 780 * 781 * @see @ref canonical_representation_example "Canonical Representation Example" 782 */ 783 XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); 784 785 786 /*! @cond Doxygen ignores this part */ 787 #ifdef __has_attribute 788 # define XXH_HAS_ATTRIBUTE(x) __has_attribute(x) 789 #else 790 # define XXH_HAS_ATTRIBUTE(x) 0 791 #endif 792 /*! @endcond */ 793 794 /*! @cond Doxygen ignores this part */ 795 /* 796 * C23 __STDC_VERSION__ number hasn't been specified yet. For now 797 * leave as `201711L` (C17 + 1). 798 * TODO: Update to correct value when its been specified. 799 */ 800 #define XXH_C23_VN 201711L 801 /*! @endcond */ 802 803 /*! @cond Doxygen ignores this part */ 804 /* C-language Attributes are added in C23. */ 805 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute) 806 # define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) 807 #else 808 # define XXH_HAS_C_ATTRIBUTE(x) 0 809 #endif 810 /*! @endcond */ 811 812 /*! @cond Doxygen ignores this part */ 813 #if defined(__cplusplus) && defined(__has_cpp_attribute) 814 # define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) 815 #else 816 # define XXH_HAS_CPP_ATTRIBUTE(x) 0 817 #endif 818 /*! @endcond */ 819 820 /*! @cond Doxygen ignores this part */ 821 /* 822 * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute 823 * introduced in CPP17 and C23. 824 * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough 825 * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough 826 */ 827 #if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough) 828 # define XXH_FALLTHROUGH [[fallthrough]] 829 #elif XXH_HAS_ATTRIBUTE(__fallthrough__) 830 # define XXH_FALLTHROUGH __attribute__ ((__fallthrough__)) 831 #else 832 # define XXH_FALLTHROUGH /* fallthrough */ 833 #endif 834 /*! @endcond */ 835 836 /*! @cond Doxygen ignores this part */ 837 /* 838 * Define XXH_NOESCAPE for annotated pointers in public API. 839 * https://clang.llvm.org/docs/AttributeReference.html#noescape 840 * As of writing this, only supported by clang. 841 */ 842 #if XXH_HAS_ATTRIBUTE(noescape) 843 # define XXH_NOESCAPE __attribute__((__noescape__)) 844 #else 845 # define XXH_NOESCAPE 846 #endif 847 /*! @endcond */ 848 849 850 /*! 851 * @} 852 * @ingroup public 853 * @{ 854 */ 855 856 #ifndef XXH_NO_LONG_LONG 857 /*-********************************************************************** 858 * 64-bit hash 859 ************************************************************************/ 860 #if defined(XXH_DOXYGEN) /* don't include <stdint.h> */ 861 /*! 862 * @brief An unsigned 64-bit integer. 863 * 864 * Not necessarily defined to `uint64_t` but functionally equivalent. 865 */ 866 typedef uint64_t XXH64_hash_t; 867 #elif !defined (__VMS) \ 868 && (defined (__cplusplus) \ 869 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) 870 # ifdef _AIX 871 # include <inttypes.h> 872 # else 873 # include <stdint.h> 874 # endif 875 typedef uint64_t XXH64_hash_t; 876 #else 877 # include <limits.h> 878 # if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL 879 /* LP64 ABI says uint64_t is unsigned long */ 880 typedef unsigned long XXH64_hash_t; 881 # else 882 /* the following type must have a width of 64-bit */ 883 typedef unsigned long long XXH64_hash_t; 884 # endif 885 #endif 886 887 /*! 888 * @} 889 * 890 * @defgroup XXH64_family XXH64 family 891 * @ingroup public 892 * @{ 893 * Contains functions used in the classic 64-bit xxHash algorithm. 894 * 895 * @note 896 * XXH3 provides competitive speed for both 32-bit and 64-bit systems, 897 * and offers true 64/128 bit hash results. 898 * It provides better speed for systems with vector processing capabilities. 899 */ 900 901 /*! 902 * @brief Calculates the 64-bit hash of @p input using xxHash64. 903 * 904 * @param input The block of data to be hashed, at least @p length bytes in size. 905 * @param length The length of @p input, in bytes. 906 * @param seed The 64-bit seed to alter the hash's output predictably. 907 * 908 * @pre 909 * The memory between @p input and @p input + @p length must be valid, 910 * readable, contiguous memory. However, if @p length is `0`, @p input may be 911 * `NULL`. In C++, this also must be *TriviallyCopyable*. 912 * 913 * @return The calculated 64-bit xxHash64 value. 914 * 915 * @see @ref single_shot_example "Single Shot Example" for an example. 916 */ 917 XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); 918 919 /******* Streaming *******/ 920 #ifndef XXH_NO_STREAM 921 /*! 922 * @brief The opaque state struct for the XXH64 streaming API. 923 * 924 * @see XXH64_state_s for details. 925 * @see @ref streaming_example "Streaming Example" 926 */ 927 typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ 928 929 /*! 930 * @brief Allocates an @ref XXH64_state_t. 931 * 932 * @return An allocated pointer of @ref XXH64_state_t on success. 933 * @return `NULL` on failure. 934 * 935 * @note Must be freed with XXH64_freeState(). 936 * 937 * @see @ref streaming_example "Streaming Example" 938 */ 939 XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void); 940 941 /*! 942 * @brief Frees an @ref XXH64_state_t. 943 * 944 * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState(). 945 * 946 * @return @ref XXH_OK. 947 * 948 * @note @p statePtr must be allocated with XXH64_createState(). 949 * 950 * @see @ref streaming_example "Streaming Example" 951 */ 952 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); 953 954 /*! 955 * @brief Copies one @ref XXH64_state_t to another. 956 * 957 * @param dst_state The state to copy to. 958 * @param src_state The state to copy from. 959 * @pre 960 * @p dst_state and @p src_state must not be `NULL` and must not overlap. 961 */ 962 XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state); 963 964 /*! 965 * @brief Resets an @ref XXH64_state_t to begin a new hash. 966 * 967 * @param statePtr The state struct to reset. 968 * @param seed The 64-bit seed to alter the hash result predictably. 969 * 970 * @pre 971 * @p statePtr must not be `NULL`. 972 * 973 * @return @ref XXH_OK on success. 974 * @return @ref XXH_ERROR on failure. 975 * 976 * @note This function resets and seeds a state. Call it before @ref XXH64_update(). 977 * 978 * @see @ref streaming_example "Streaming Example" 979 */ 980 XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed); 981 982 /*! 983 * @brief Consumes a block of @p input to an @ref XXH64_state_t. 984 * 985 * @param statePtr The state struct to update. 986 * @param input The block of data to be hashed, at least @p length bytes in size. 987 * @param length The length of @p input, in bytes. 988 * 989 * @pre 990 * @p statePtr must not be `NULL`. 991 * @pre 992 * The memory between @p input and @p input + @p length must be valid, 993 * readable, contiguous memory. However, if @p length is `0`, @p input may be 994 * `NULL`. In C++, this also must be *TriviallyCopyable*. 995 * 996 * @return @ref XXH_OK on success. 997 * @return @ref XXH_ERROR on failure. 998 * 999 * @note Call this to incrementally consume blocks of data. 1000 * 1001 * @see @ref streaming_example "Streaming Example" 1002 */ 1003 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); 1004 1005 /*! 1006 * @brief Returns the calculated hash value from an @ref XXH64_state_t. 1007 * 1008 * @param statePtr The state struct to calculate the hash from. 1009 * 1010 * @pre 1011 * @p statePtr must not be `NULL`. 1012 * 1013 * @return The calculated 64-bit xxHash64 value from that state. 1014 * 1015 * @note 1016 * Calling XXH64_digest() will not affect @p statePtr, so you can update, 1017 * digest, and update again. 1018 * 1019 * @see @ref streaming_example "Streaming Example" 1020 */ 1021 XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr); 1022 #endif /* !XXH_NO_STREAM */ 1023 /******* Canonical representation *******/ 1024 1025 /*! 1026 * @brief Canonical (big endian) representation of @ref XXH64_hash_t. 1027 */ 1028 typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t; 1029 1030 /*! 1031 * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t. 1032 * 1033 * @param dst The @ref XXH64_canonical_t pointer to be stored to. 1034 * @param hash The @ref XXH64_hash_t to be converted. 1035 * 1036 * @pre 1037 * @p dst must not be `NULL`. 1038 * 1039 * @see @ref canonical_representation_example "Canonical Representation Example" 1040 */ 1041 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash); 1042 1043 /*! 1044 * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t. 1045 * 1046 * @param src The @ref XXH64_canonical_t to convert. 1047 * 1048 * @pre 1049 * @p src must not be `NULL`. 1050 * 1051 * @return The converted hash. 1052 * 1053 * @see @ref canonical_representation_example "Canonical Representation Example" 1054 */ 1055 XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src); 1056 1057 #ifndef XXH_NO_XXH3 1058 1059 /*! 1060 * @} 1061 * ************************************************************************ 1062 * @defgroup XXH3_family XXH3 family 1063 * @ingroup public 1064 * @{ 1065 * 1066 * XXH3 is a more recent hash algorithm featuring: 1067 * - Improved speed for both small and large inputs 1068 * - True 64-bit and 128-bit outputs 1069 * - SIMD acceleration 1070 * - Improved 32-bit viability 1071 * 1072 * Speed analysis methodology is explained here: 1073 * 1074 * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html 1075 * 1076 * Compared to XXH64, expect XXH3 to run approximately 1077 * ~2x faster on large inputs and >3x faster on small ones, 1078 * exact differences vary depending on platform. 1079 * 1080 * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic, 1081 * but does not require it. 1082 * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3 1083 * at competitive speeds, even without vector support. Further details are 1084 * explained in the implementation. 1085 * 1086 * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD 1087 * implementations for many common platforms: 1088 * - AVX512 1089 * - AVX2 1090 * - SSE2 1091 * - ARM NEON 1092 * - WebAssembly SIMD128 1093 * - POWER8 VSX 1094 * - s390x ZVector 1095 * This can be controlled via the @ref XXH_VECTOR macro, but it automatically 1096 * selects the best version according to predefined macros. For the x86 family, an 1097 * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c. 1098 * 1099 * XXH3 implementation is portable: 1100 * it has a generic C90 formulation that can be compiled on any platform, 1101 * all implementations generate exactly the same hash value on all platforms. 1102 * Starting from v0.8.0, it's also labelled "stable", meaning that 1103 * any future version will also generate the same hash value. 1104 * 1105 * XXH3 offers 2 variants, _64bits and _128bits. 1106 * 1107 * When only 64 bits are needed, prefer invoking the _64bits variant, as it 1108 * reduces the amount of mixing, resulting in faster speed on small inputs. 1109 * It's also generally simpler to manipulate a scalar return type than a struct. 1110 * 1111 * The API supports one-shot hashing, streaming mode, and custom secrets. 1112 */ 1113 1114 /*! 1115 * @ingroup tuning 1116 * @brief Possible values for @ref XXH_VECTOR. 1117 * 1118 * Unless set explicitly, determined automatically. 1119 */ 1120 # define XXH_SCALAR 0 /*!< Portable scalar version */ 1121 # define XXH_SSE2 1 /*!< SSE2 for Pentium 4, Opteron, all x86_64. */ 1122 # define XXH_AVX2 2 /*!< AVX2 for Haswell and Bulldozer */ 1123 # define XXH_AVX512 3 /*!< AVX512 for Skylake and Icelake */ 1124 # define XXH_NEON 4 /*!< NEON for most ARMv7-A, all AArch64, and WASM SIMD128 */ 1125 # define XXH_VSX 5 /*!< VSX and ZVector for POWER8/z13 (64-bit) */ 1126 # define XXH_SVE 6 /*!< SVE for some ARMv8-A and ARMv9-A */ 1127 # define XXH_LSX 7 /*!< LSX (128-bit SIMD) for LoongArch64 */ 1128 1129 1130 /*-********************************************************************** 1131 * XXH3 64-bit variant 1132 ************************************************************************/ 1133 1134 /*! 1135 * @brief Calculates 64-bit unseeded variant of XXH3 hash of @p input. 1136 * 1137 * @param input The block of data to be hashed, at least @p length bytes in size. 1138 * @param length The length of @p input, in bytes. 1139 * 1140 * @pre 1141 * The memory between @p input and @p input + @p length must be valid, 1142 * readable, contiguous memory. However, if @p length is `0`, @p input may be 1143 * `NULL`. In C++, this also must be *TriviallyCopyable*. 1144 * 1145 * @return The calculated 64-bit XXH3 hash value. 1146 * 1147 * @note 1148 * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of `0`, however 1149 * it may have slightly better performance due to constant propagation of the 1150 * defaults. 1151 * 1152 * @see 1153 * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants 1154 * @see @ref single_shot_example "Single Shot Example" for an example. 1155 */ 1156 XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length); 1157 1158 /*! 1159 * @brief Calculates 64-bit seeded variant of XXH3 hash of @p input. 1160 * 1161 * @param input The block of data to be hashed, at least @p length bytes in size. 1162 * @param length The length of @p input, in bytes. 1163 * @param seed The 64-bit seed to alter the hash result predictably. 1164 * 1165 * @pre 1166 * The memory between @p input and @p input + @p length must be valid, 1167 * readable, contiguous memory. However, if @p length is `0`, @p input may be 1168 * `NULL`. In C++, this also must be *TriviallyCopyable*. 1169 * 1170 * @return The calculated 64-bit XXH3 hash value. 1171 * 1172 * @note 1173 * seed == 0 produces the same results as @ref XXH3_64bits(). 1174 * 1175 * This variant generates a custom secret on the fly based on default secret 1176 * altered using the @p seed value. 1177 * 1178 * While this operation is decently fast, note that it's not completely free. 1179 * 1180 * @see @ref single_shot_example "Single Shot Example" for an example. 1181 */ 1182 XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); 1183 1184 /*! 1185 * The bare minimum size for a custom secret. 1186 * 1187 * @see 1188 * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(), 1189 * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret(). 1190 */ 1191 #define XXH3_SECRET_SIZE_MIN 136 1192 1193 /*! 1194 * @brief Calculates 64-bit variant of XXH3 with a custom "secret". 1195 * 1196 * @param data The block of data to be hashed, at least @p len bytes in size. 1197 * @param len The length of @p data, in bytes. 1198 * @param secret The secret data. 1199 * @param secretSize The length of @p secret, in bytes. 1200 * 1201 * @return The calculated 64-bit XXH3 hash value. 1202 * 1203 * @pre 1204 * The memory between @p data and @p data + @p len must be valid, 1205 * readable, contiguous memory. However, if @p length is `0`, @p data may be 1206 * `NULL`. In C++, this also must be *TriviallyCopyable*. 1207 * 1208 * It's possible to provide any blob of bytes as a "secret" to generate the hash. 1209 * This makes it more difficult for an external actor to prepare an intentional collision. 1210 * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN). 1211 * However, the quality of the secret impacts the dispersion of the hash algorithm. 1212 * Therefore, the secret _must_ look like a bunch of random bytes. 1213 * Avoid "trivial" or structured data such as repeated sequences or a text document. 1214 * Whenever in doubt about the "randomness" of the blob of bytes, 1215 * consider employing @ref XXH3_generateSecret() instead (see below). 1216 * It will generate a proper high entropy secret derived from the blob of bytes. 1217 * Another advantage of using XXH3_generateSecret() is that 1218 * it guarantees that all bits within the initial blob of bytes 1219 * will impact every bit of the output. 1220 * This is not necessarily the case when using the blob of bytes directly 1221 * because, when hashing _small_ inputs, only a portion of the secret is employed. 1222 * 1223 * @see @ref single_shot_example "Single Shot Example" for an example. 1224 */ 1225 XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); 1226 1227 1228 /******* Streaming *******/ 1229 #ifndef XXH_NO_STREAM 1230 /* 1231 * Streaming requires state maintenance. 1232 * This operation costs memory and CPU. 1233 * As a consequence, streaming is slower than one-shot hashing. 1234 * For better performance, prefer one-shot functions whenever applicable. 1235 */ 1236 1237 /*! 1238 * @brief The opaque state struct for the XXH3 streaming API. 1239 * 1240 * @see XXH3_state_s for details. 1241 * @see @ref streaming_example "Streaming Example" 1242 */ 1243 typedef struct XXH3_state_s XXH3_state_t; 1244 XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void); 1245 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr); 1246 1247 /*! 1248 * @brief Copies one @ref XXH3_state_t to another. 1249 * 1250 * @param dst_state The state to copy to. 1251 * @param src_state The state to copy from. 1252 * @pre 1253 * @p dst_state and @p src_state must not be `NULL` and must not overlap. 1254 */ 1255 XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state); 1256 1257 /*! 1258 * @brief Resets an @ref XXH3_state_t to begin a new hash. 1259 * 1260 * @param statePtr The state struct to reset. 1261 * 1262 * @pre 1263 * @p statePtr must not be `NULL`. 1264 * 1265 * @return @ref XXH_OK on success. 1266 * @return @ref XXH_ERROR on failure. 1267 * 1268 * @note 1269 * - This function resets `statePtr` and generate a secret with default parameters. 1270 * - Call this function before @ref XXH3_64bits_update(). 1271 * - Digest will be equivalent to `XXH3_64bits()`. 1272 * 1273 * @see @ref streaming_example "Streaming Example" 1274 * 1275 */ 1276 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); 1277 1278 /*! 1279 * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. 1280 * 1281 * @param statePtr The state struct to reset. 1282 * @param seed The 64-bit seed to alter the hash result predictably. 1283 * 1284 * @pre 1285 * @p statePtr must not be `NULL`. 1286 * 1287 * @return @ref XXH_OK on success. 1288 * @return @ref XXH_ERROR on failure. 1289 * 1290 * @note 1291 * - This function resets `statePtr` and generate a secret from `seed`. 1292 * - Call this function before @ref XXH3_64bits_update(). 1293 * - Digest will be equivalent to `XXH3_64bits_withSeed()`. 1294 * 1295 * @see @ref streaming_example "Streaming Example" 1296 * 1297 */ 1298 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); 1299 1300 /*! 1301 * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. 1302 * 1303 * @param statePtr The state struct to reset. 1304 * @param secret The secret data. 1305 * @param secretSize The length of @p secret, in bytes. 1306 * 1307 * @pre 1308 * @p statePtr must not be `NULL`. 1309 * 1310 * @return @ref XXH_OK on success. 1311 * @return @ref XXH_ERROR on failure. 1312 * 1313 * @note 1314 * `secret` is referenced, it _must outlive_ the hash streaming session. 1315 * 1316 * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN, 1317 * and the quality of produced hash values depends on secret's entropy 1318 * (secret's content should look like a bunch of random bytes). 1319 * When in doubt about the randomness of a candidate `secret`, 1320 * consider employing `XXH3_generateSecret()` instead (see below). 1321 * 1322 * @see @ref streaming_example "Streaming Example" 1323 */ 1324 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); 1325 1326 /*! 1327 * @brief Consumes a block of @p input to an @ref XXH3_state_t. 1328 * 1329 * @param statePtr The state struct to update. 1330 * @param input The block of data to be hashed, at least @p length bytes in size. 1331 * @param length The length of @p input, in bytes. 1332 * 1333 * @pre 1334 * @p statePtr must not be `NULL`. 1335 * @pre 1336 * The memory between @p input and @p input + @p length must be valid, 1337 * readable, contiguous memory. However, if @p length is `0`, @p input may be 1338 * `NULL`. In C++, this also must be *TriviallyCopyable*. 1339 * 1340 * @return @ref XXH_OK on success. 1341 * @return @ref XXH_ERROR on failure. 1342 * 1343 * @note Call this to incrementally consume blocks of data. 1344 * 1345 * @see @ref streaming_example "Streaming Example" 1346 */ 1347 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); 1348 1349 /*! 1350 * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t. 1351 * 1352 * @param statePtr The state struct to calculate the hash from. 1353 * 1354 * @pre 1355 * @p statePtr must not be `NULL`. 1356 * 1357 * @return The calculated XXH3 64-bit hash value from that state. 1358 * 1359 * @note 1360 * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update, 1361 * digest, and update again. 1362 * 1363 * @see @ref streaming_example "Streaming Example" 1364 */ 1365 XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); 1366 #endif /* !XXH_NO_STREAM */ 1367 1368 /* note : canonical representation of XXH3 is the same as XXH64 1369 * since they both produce XXH64_hash_t values */ 1370 1371 1372 /*-********************************************************************** 1373 * XXH3 128-bit variant 1374 ************************************************************************/ 1375 1376 /*! 1377 * @brief The return value from 128-bit hashes. 1378 * 1379 * Stored in little endian order, although the fields themselves are in native 1380 * endianness. 1381 */ 1382 typedef struct { 1383 XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */ 1384 XXH64_hash_t high64; /*!< `value >> 64` */ 1385 } XXH128_hash_t; 1386 1387 /*! 1388 * @brief Calculates 128-bit unseeded variant of XXH3 of @p data. 1389 * 1390 * @param data The block of data to be hashed, at least @p length bytes in size. 1391 * @param len The length of @p data, in bytes. 1392 * 1393 * @return The calculated 128-bit variant of XXH3 value. 1394 * 1395 * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead 1396 * for shorter inputs. 1397 * 1398 * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of `0`, however 1399 * it may have slightly better performance due to constant propagation of the 1400 * defaults. 1401 * 1402 * @see XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants 1403 * @see @ref single_shot_example "Single Shot Example" for an example. 1404 */ 1405 XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len); 1406 /*! @brief Calculates 128-bit seeded variant of XXH3 hash of @p data. 1407 * 1408 * @param data The block of data to be hashed, at least @p length bytes in size. 1409 * @param len The length of @p data, in bytes. 1410 * @param seed The 64-bit seed to alter the hash result predictably. 1411 * 1412 * @return The calculated 128-bit variant of XXH3 value. 1413 * 1414 * @note 1415 * seed == 0 produces the same results as @ref XXH3_64bits(). 1416 * 1417 * This variant generates a custom secret on the fly based on default secret 1418 * altered using the @p seed value. 1419 * 1420 * While this operation is decently fast, note that it's not completely free. 1421 * 1422 * @see XXH3_128bits(), XXH3_128bits_withSecret(): other seeding variants 1423 * @see @ref single_shot_example "Single Shot Example" for an example. 1424 */ 1425 XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); 1426 /*! 1427 * @brief Calculates 128-bit variant of XXH3 with a custom "secret". 1428 * 1429 * @param data The block of data to be hashed, at least @p len bytes in size. 1430 * @param len The length of @p data, in bytes. 1431 * @param secret The secret data. 1432 * @param secretSize The length of @p secret, in bytes. 1433 * 1434 * @return The calculated 128-bit variant of XXH3 value. 1435 * 1436 * It's possible to provide any blob of bytes as a "secret" to generate the hash. 1437 * This makes it more difficult for an external actor to prepare an intentional collision. 1438 * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN). 1439 * However, the quality of the secret impacts the dispersion of the hash algorithm. 1440 * Therefore, the secret _must_ look like a bunch of random bytes. 1441 * Avoid "trivial" or structured data such as repeated sequences or a text document. 1442 * Whenever in doubt about the "randomness" of the blob of bytes, 1443 * consider employing @ref XXH3_generateSecret() instead (see below). 1444 * It will generate a proper high entropy secret derived from the blob of bytes. 1445 * Another advantage of using XXH3_generateSecret() is that 1446 * it guarantees that all bits within the initial blob of bytes 1447 * will impact every bit of the output. 1448 * This is not necessarily the case when using the blob of bytes directly 1449 * because, when hashing _small_ inputs, only a portion of the secret is employed. 1450 * 1451 * @see @ref single_shot_example "Single Shot Example" for an example. 1452 */ 1453 XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); 1454 1455 /******* Streaming *******/ 1456 #ifndef XXH_NO_STREAM 1457 /* 1458 * Streaming requires state maintenance. 1459 * This operation costs memory and CPU. 1460 * As a consequence, streaming is slower than one-shot hashing. 1461 * For better performance, prefer one-shot functions whenever applicable. 1462 * 1463 * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits(). 1464 * Use already declared XXH3_createState() and XXH3_freeState(). 1465 * 1466 * All reset and streaming functions have same meaning as their 64-bit counterpart. 1467 */ 1468 1469 /*! 1470 * @brief Resets an @ref XXH3_state_t to begin a new hash. 1471 * 1472 * @param statePtr The state struct to reset. 1473 * 1474 * @pre 1475 * @p statePtr must not be `NULL`. 1476 * 1477 * @return @ref XXH_OK on success. 1478 * @return @ref XXH_ERROR on failure. 1479 * 1480 * @note 1481 * - This function resets `statePtr` and generate a secret with default parameters. 1482 * - Call it before @ref XXH3_128bits_update(). 1483 * - Digest will be equivalent to `XXH3_128bits()`. 1484 * 1485 * @see @ref streaming_example "Streaming Example" 1486 */ 1487 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); 1488 1489 /*! 1490 * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. 1491 * 1492 * @param statePtr The state struct to reset. 1493 * @param seed The 64-bit seed to alter the hash result predictably. 1494 * 1495 * @pre 1496 * @p statePtr must not be `NULL`. 1497 * 1498 * @return @ref XXH_OK on success. 1499 * @return @ref XXH_ERROR on failure. 1500 * 1501 * @note 1502 * - This function resets `statePtr` and generate a secret from `seed`. 1503 * - Call it before @ref XXH3_128bits_update(). 1504 * - Digest will be equivalent to `XXH3_128bits_withSeed()`. 1505 * 1506 * @see @ref streaming_example "Streaming Example" 1507 */ 1508 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); 1509 /*! 1510 * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. 1511 * 1512 * @param statePtr The state struct to reset. 1513 * @param secret The secret data. 1514 * @param secretSize The length of @p secret, in bytes. 1515 * 1516 * @pre 1517 * @p statePtr must not be `NULL`. 1518 * 1519 * @return @ref XXH_OK on success. 1520 * @return @ref XXH_ERROR on failure. 1521 * 1522 * `secret` is referenced, it _must outlive_ the hash streaming session. 1523 * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN, 1524 * and the quality of produced hash values depends on secret's entropy 1525 * (secret's content should look like a bunch of random bytes). 1526 * When in doubt about the randomness of a candidate `secret`, 1527 * consider employing `XXH3_generateSecret()` instead (see below). 1528 * 1529 * @see @ref streaming_example "Streaming Example" 1530 */ 1531 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); 1532 1533 /*! 1534 * @brief Consumes a block of @p input to an @ref XXH3_state_t. 1535 * 1536 * Call this to incrementally consume blocks of data. 1537 * 1538 * @param statePtr The state struct to update. 1539 * @param input The block of data to be hashed, at least @p length bytes in size. 1540 * @param length The length of @p input, in bytes. 1541 * 1542 * @pre 1543 * @p statePtr must not be `NULL`. 1544 * 1545 * @return @ref XXH_OK on success. 1546 * @return @ref XXH_ERROR on failure. 1547 * 1548 * @note 1549 * The memory between @p input and @p input + @p length must be valid, 1550 * readable, contiguous memory. However, if @p length is `0`, @p input may be 1551 * `NULL`. In C++, this also must be *TriviallyCopyable*. 1552 * 1553 */ 1554 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); 1555 1556 /*! 1557 * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t. 1558 * 1559 * @param statePtr The state struct to calculate the hash from. 1560 * 1561 * @pre 1562 * @p statePtr must not be `NULL`. 1563 * 1564 * @return The calculated XXH3 128-bit hash value from that state. 1565 * 1566 * @note 1567 * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update, 1568 * digest, and update again. 1569 * 1570 */ 1571 XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); 1572 #endif /* !XXH_NO_STREAM */ 1573 1574 /* Following helper functions make it possible to compare XXH128_hast_t values. 1575 * Since XXH128_hash_t is a structure, this capability is not offered by the language. 1576 * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */ 1577 1578 /*! 1579 * @brief Check equality of two XXH128_hash_t values 1580 * 1581 * @param h1 The 128-bit hash value. 1582 * @param h2 Another 128-bit hash value. 1583 * 1584 * @return `1` if `h1` and `h2` are equal. 1585 * @return `0` if they are not. 1586 */ 1587 XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); 1588 1589 /*! 1590 * @brief Compares two @ref XXH128_hash_t 1591 * 1592 * This comparator is compatible with stdlib's `qsort()`/`bsearch()`. 1593 * 1594 * @param h128_1 Left-hand side value 1595 * @param h128_2 Right-hand side value 1596 * 1597 * @return >0 if @p h128_1 > @p h128_2 1598 * @return =0 if @p h128_1 == @p h128_2 1599 * @return <0 if @p h128_1 < @p h128_2 1600 */ 1601 XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2); 1602 1603 1604 /******* Canonical representation *******/ 1605 typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t; 1606 1607 1608 /*! 1609 * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t. 1610 * 1611 * @param dst The @ref XXH128_canonical_t pointer to be stored to. 1612 * @param hash The @ref XXH128_hash_t to be converted. 1613 * 1614 * @pre 1615 * @p dst must not be `NULL`. 1616 * @see @ref canonical_representation_example "Canonical Representation Example" 1617 */ 1618 XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash); 1619 1620 /*! 1621 * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t. 1622 * 1623 * @param src The @ref XXH128_canonical_t to convert. 1624 * 1625 * @pre 1626 * @p src must not be `NULL`. 1627 * 1628 * @return The converted hash. 1629 * @see @ref canonical_representation_example "Canonical Representation Example" 1630 */ 1631 XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src); 1632 1633 1634 #endif /* !XXH_NO_XXH3 */ 1635 #endif /* XXH_NO_LONG_LONG */ 1636 1637 /*! 1638 * @} 1639 */ 1640 #endif /* XXHASH_H_5627135585666179 */ 1641 1642 1643 1644 #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) 1645 #define XXHASH_H_STATIC_13879238742 1646 /* **************************************************************************** 1647 * This section contains declarations which are not guaranteed to remain stable. 1648 * They may change in future versions, becoming incompatible with a different 1649 * version of the library. 1650 * These declarations should only be used with static linking. 1651 * Never use them in association with dynamic linking! 1652 ***************************************************************************** */ 1653 1654 /* 1655 * These definitions are only present to allow static allocation 1656 * of XXH states, on stack or in a struct, for example. 1657 * Never **ever** access their members directly. 1658 */ 1659 1660 /*! 1661 * @internal 1662 * @brief Structure for XXH32 streaming API. 1663 * 1664 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, 1665 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is 1666 * an opaque type. This allows fields to safely be changed. 1667 * 1668 * Typedef'd to @ref XXH32_state_t. 1669 * Do not access the members of this struct directly. 1670 * @see XXH64_state_s, XXH3_state_s 1671 */ 1672 struct XXH32_state_s { 1673 XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */ 1674 XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ 1675 XXH32_hash_t acc[4]; /*!< Accumulator lanes */ 1676 unsigned char buffer[16]; /*!< Internal buffer for partial reads. */ 1677 XXH32_hash_t bufferedSize; /*!< Amount of data in @ref buffer */ 1678 XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */ 1679 }; /* typedef'd to XXH32_state_t */ 1680 1681 1682 #ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ 1683 1684 /*! 1685 * @internal 1686 * @brief Structure for XXH64 streaming API. 1687 * 1688 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, 1689 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is 1690 * an opaque type. This allows fields to safely be changed. 1691 * 1692 * Typedef'd to @ref XXH64_state_t. 1693 * Do not access the members of this struct directly. 1694 * @see XXH32_state_s, XXH3_state_s 1695 */ 1696 struct XXH64_state_s { 1697 XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */ 1698 XXH64_hash_t acc[4]; /*!< Accumulator lanes */ 1699 unsigned char buffer[32]; /*!< Internal buffer for partial reads.. */ 1700 XXH32_hash_t bufferedSize; /*!< Amount of data in @ref buffer */ 1701 XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/ 1702 XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */ 1703 }; /* typedef'd to XXH64_state_t */ 1704 1705 #ifndef XXH_NO_XXH3 1706 1707 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */ 1708 # define XXH_ALIGN(n) _Alignas(n) 1709 #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */ 1710 /* In C++ alignas() is a keyword */ 1711 # define XXH_ALIGN(n) alignas(n) 1712 #elif defined(__GNUC__) 1713 # define XXH_ALIGN(n) __attribute__ ((aligned(n))) 1714 #elif defined(_MSC_VER) 1715 # define XXH_ALIGN(n) __declspec(align(n)) 1716 #else 1717 # define XXH_ALIGN(n) /* disabled */ 1718 #endif 1719 1720 /* Old GCC versions only accept the attribute after the type in structures. */ 1721 #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ 1722 && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \ 1723 && defined(__GNUC__) 1724 # define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) 1725 #else 1726 # define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type 1727 #endif 1728 1729 /*! 1730 * @brief The size of the internal XXH3 buffer. 1731 * 1732 * This is the optimal update size for incremental hashing. 1733 * 1734 * @see XXH3_64b_update(), XXH3_128b_update(). 1735 */ 1736 #define XXH3_INTERNALBUFFER_SIZE 256 1737 1738 /*! 1739 * @internal 1740 * @brief Default size of the secret buffer (and @ref XXH3_kSecret). 1741 * 1742 * This is the size used in @ref XXH3_kSecret and the seeded functions. 1743 * 1744 * Not to be confused with @ref XXH3_SECRET_SIZE_MIN. 1745 */ 1746 #define XXH3_SECRET_DEFAULT_SIZE 192 1747 1748 /*! 1749 * @internal 1750 * @brief Structure for XXH3 streaming API. 1751 * 1752 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, 1753 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. 1754 * Otherwise it is an opaque type. 1755 * Never use this definition in combination with dynamic library. 1756 * This allows fields to safely be changed in the future. 1757 * 1758 * @note ** This structure has a strict alignment requirement of 64 bytes!! ** 1759 * Do not allocate this with `malloc()` or `new`, 1760 * it will not be sufficiently aligned. 1761 * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation. 1762 * 1763 * Typedef'd to @ref XXH3_state_t. 1764 * Do never access the members of this struct directly. 1765 * 1766 * @see XXH3_INITSTATE() for stack initialization. 1767 * @see XXH3_createState(), XXH3_freeState(). 1768 * @see XXH32_state_s, XXH64_state_s 1769 */ 1770 struct XXH3_state_s { 1771 XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); 1772 /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */ 1773 XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); 1774 /*!< Used to store a custom secret generated from a seed. */ 1775 XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); 1776 /*!< The internal buffer. @see XXH32_state_s::mem32 */ 1777 XXH32_hash_t bufferedSize; 1778 /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */ 1779 XXH32_hash_t useSeed; 1780 /*!< Reserved field. Needed for padding on 64-bit. */ 1781 size_t nbStripesSoFar; 1782 /*!< Number or stripes processed. */ 1783 XXH64_hash_t totalLen; 1784 /*!< Total length hashed. 64-bit even on 32-bit targets. */ 1785 size_t nbStripesPerBlock; 1786 /*!< Number of stripes per block. */ 1787 size_t secretLimit; 1788 /*!< Size of @ref customSecret or @ref extSecret */ 1789 XXH64_hash_t seed; 1790 /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */ 1791 XXH64_hash_t reserved64; 1792 /*!< Reserved field. */ 1793 const unsigned char* extSecret; 1794 /*!< Reference to an external secret for the _withSecret variants, NULL 1795 * for other variants. */ 1796 /* note: there may be some padding at the end due to alignment on 64 bytes */ 1797 }; /* typedef'd to XXH3_state_t */ 1798 1799 #undef XXH_ALIGN_MEMBER 1800 1801 /*! 1802 * @brief Initializes a stack-allocated `XXH3_state_s`. 1803 * 1804 * When the @ref XXH3_state_t structure is merely emplaced on stack, 1805 * it should be initialized with XXH3_INITSTATE() or a memset() 1806 * in case its first reset uses XXH3_NNbits_reset_withSeed(). 1807 * This init can be omitted if the first reset uses default or _withSecret mode. 1808 * This operation isn't necessary when the state is created with XXH3_createState(). 1809 * Note that this doesn't prepare the state for a streaming operation, 1810 * it's still necessary to use XXH3_NNbits_reset*() afterwards. 1811 */ 1812 #define XXH3_INITSTATE(XXH3_state_ptr) \ 1813 do { \ 1814 XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \ 1815 tmp_xxh3_state_ptr->seed = 0; \ 1816 tmp_xxh3_state_ptr->extSecret = NULL; \ 1817 } while(0) 1818 1819 1820 /*! 1821 * @brief Calculates the 128-bit hash of @p data using XXH3. 1822 * 1823 * @param data The block of data to be hashed, at least @p len bytes in size. 1824 * @param len The length of @p data, in bytes. 1825 * @param seed The 64-bit seed to alter the hash's output predictably. 1826 * 1827 * @pre 1828 * The memory between @p data and @p data + @p len must be valid, 1829 * readable, contiguous memory. However, if @p len is `0`, @p data may be 1830 * `NULL`. In C++, this also must be *TriviallyCopyable*. 1831 * 1832 * @return The calculated 128-bit XXH3 value. 1833 * 1834 * @see @ref single_shot_example "Single Shot Example" for an example. 1835 */ 1836 XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); 1837 1838 1839 /* === Experimental API === */ 1840 /* Symbols defined below must be considered tied to a specific library version. */ 1841 1842 /*! 1843 * @brief Derive a high-entropy secret from any user-defined content, named customSeed. 1844 * 1845 * @param secretBuffer A writable buffer for derived high-entropy secret data. 1846 * @param secretSize Size of secretBuffer, in bytes. Must be >= XXH3_SECRET_SIZE_MIN. 1847 * @param customSeed A user-defined content. 1848 * @param customSeedSize Size of customSeed, in bytes. 1849 * 1850 * @return @ref XXH_OK on success. 1851 * @return @ref XXH_ERROR on failure. 1852 * 1853 * The generated secret can be used in combination with `*_withSecret()` functions. 1854 * The `_withSecret()` variants are useful to provide a higher level of protection 1855 * than 64-bit seed, as it becomes much more difficult for an external actor to 1856 * guess how to impact the calculation logic. 1857 * 1858 * The function accepts as input a custom seed of any length and any content, 1859 * and derives from it a high-entropy secret of length @p secretSize into an 1860 * already allocated buffer @p secretBuffer. 1861 * 1862 * The generated secret can then be used with any `*_withSecret()` variant. 1863 * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(), 1864 * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret() 1865 * are part of this list. They all accept a `secret` parameter 1866 * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN) 1867 * _and_ feature very high entropy (consist of random-looking bytes). 1868 * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can 1869 * be employed to ensure proper quality. 1870 * 1871 * @p customSeed can be anything. It can have any size, even small ones, 1872 * and its content can be anything, even "poor entropy" sources such as a bunch 1873 * of zeroes. The resulting `secret` will nonetheless provide all required qualities. 1874 * 1875 * @pre 1876 * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN 1877 * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior. 1878 * 1879 * Example code: 1880 * @code{.c} 1881 * #include <stdio.h> 1882 * #include <stdlib.h> 1883 * #include <string.h> 1884 * #define XXH_STATIC_LINKING_ONLY // expose unstable API 1885 * #include "xxhash.h" 1886 * // Hashes argv[2] using the entropy from argv[1]. 1887 * int main(int argc, char* argv[]) 1888 * { 1889 * char secret[XXH3_SECRET_SIZE_MIN]; 1890 * if (argv != 3) { return 1; } 1891 * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1])); 1892 * XXH64_hash_t h = XXH3_64bits_withSecret( 1893 * argv[2], strlen(argv[2]), 1894 * secret, sizeof(secret) 1895 * ); 1896 * printf("%016llx\n", (unsigned long long) h); 1897 * } 1898 * @endcode 1899 */ 1900 XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize); 1901 1902 /*! 1903 * @brief Generate the same secret as the _withSeed() variants. 1904 * 1905 * @param secretBuffer A writable buffer of @ref XXH3_SECRET_DEFAULT_SIZE bytes 1906 * @param seed The 64-bit seed to alter the hash result predictably. 1907 * 1908 * The generated secret can be used in combination with 1909 *`*_withSecret()` and `_withSecretandSeed()` variants. 1910 * 1911 * Example C++ `std::string` hash class: 1912 * @code{.cpp} 1913 * #include <string> 1914 * #define XXH_STATIC_LINKING_ONLY // expose unstable API 1915 * #include "xxhash.h" 1916 * // Slow, seeds each time 1917 * class HashSlow { 1918 * XXH64_hash_t seed; 1919 * public: 1920 * HashSlow(XXH64_hash_t s) : seed{s} {} 1921 * size_t operator()(const std::string& x) const { 1922 * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)}; 1923 * } 1924 * }; 1925 * // Fast, caches the seeded secret for future uses. 1926 * class HashFast { 1927 * unsigned char secret[XXH3_SECRET_DEFAULT_SIZE]; 1928 * public: 1929 * HashFast(XXH64_hash_t s) { 1930 * XXH3_generateSecret_fromSeed(secret, seed); 1931 * } 1932 * size_t operator()(const std::string& x) const { 1933 * return size_t{ 1934 * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret)) 1935 * }; 1936 * } 1937 * }; 1938 * @endcode 1939 */ 1940 XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed); 1941 1942 /*! 1943 * @brief Maximum size of "short" key in bytes. 1944 */ 1945 #define XXH3_MIDSIZE_MAX 240 1946 1947 /*! 1948 * @brief Calculates 64/128-bit seeded variant of XXH3 hash of @p data. 1949 * 1950 * @param data The block of data to be hashed, at least @p len bytes in size. 1951 * @param len The length of @p data, in bytes. 1952 * @param secret The secret data. 1953 * @param secretSize The length of @p secret, in bytes. 1954 * @param seed The 64-bit seed to alter the hash result predictably. 1955 * 1956 * These variants generate hash values using either: 1957 * - @p seed for "short" keys (< @ref XXH3_MIDSIZE_MAX = 240 bytes) 1958 * - @p secret for "large" keys (>= @ref XXH3_MIDSIZE_MAX). 1959 * 1960 * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`. 1961 * `_withSeed()` has to generate the secret on the fly for "large" keys. 1962 * It's fast, but can be perceptible for "not so large" keys (< 1 KB). 1963 * `_withSecret()` has to generate the masks on the fly for "small" keys, 1964 * which requires more instructions than _withSeed() variants. 1965 * Therefore, _withSecretandSeed variant combines the best of both worlds. 1966 * 1967 * When @p secret has been generated by XXH3_generateSecret_fromSeed(), 1968 * this variant produces *exactly* the same results as `_withSeed()` variant, 1969 * hence offering only a pure speed benefit on "large" input, 1970 * by skipping the need to regenerate the secret for every large input. 1971 * 1972 * Another usage scenario is to hash the secret to a 64-bit hash value, 1973 * for example with XXH3_64bits(), which then becomes the seed, 1974 * and then employ both the seed and the secret in _withSecretandSeed(). 1975 * On top of speed, an added benefit is that each bit in the secret 1976 * has a 50% chance to swap each bit in the output, via its impact to the seed. 1977 * 1978 * This is not guaranteed when using the secret directly in "small data" scenarios, 1979 * because only portions of the secret are employed for small data. 1980 */ 1981 XXH_PUBLIC_API XXH_PUREF XXH64_hash_t 1982 XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len, 1983 XXH_NOESCAPE const void* secret, size_t secretSize, 1984 XXH64_hash_t seed); 1985 1986 /*! 1987 * @brief Calculates 128-bit seeded variant of XXH3 hash of @p data. 1988 * 1989 * @param data The memory segment to be hashed, at least @p len bytes in size. 1990 * @param length The length of @p data, in bytes. 1991 * @param secret The secret used to alter hash result predictably. 1992 * @param secretSize The length of @p secret, in bytes (must be >= XXH3_SECRET_SIZE_MIN) 1993 * @param seed64 The 64-bit seed to alter the hash result predictably. 1994 * 1995 * @return @ref XXH_OK on success. 1996 * @return @ref XXH_ERROR on failure. 1997 * 1998 * @see XXH3_64bits_withSecretandSeed(): contract is the same. 1999 */ 2000 XXH_PUBLIC_API XXH_PUREF XXH128_hash_t 2001 XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, 2002 XXH_NOESCAPE const void* secret, size_t secretSize, 2003 XXH64_hash_t seed64); 2004 2005 #ifndef XXH_NO_STREAM 2006 /*! 2007 * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. 2008 * 2009 * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). 2010 * @param secret The secret data. 2011 * @param secretSize The length of @p secret, in bytes. 2012 * @param seed64 The 64-bit seed to alter the hash result predictably. 2013 * 2014 * @return @ref XXH_OK on success. 2015 * @return @ref XXH_ERROR on failure. 2016 * 2017 * @see XXH3_64bits_withSecretandSeed(). Contract is identical. 2018 */ 2019 XXH_PUBLIC_API XXH_errorcode 2020 XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, 2021 XXH_NOESCAPE const void* secret, size_t secretSize, 2022 XXH64_hash_t seed64); 2023 2024 /*! 2025 * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. 2026 * 2027 * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). 2028 * @param secret The secret data. 2029 * @param secretSize The length of @p secret, in bytes. 2030 * @param seed64 The 64-bit seed to alter the hash result predictably. 2031 * 2032 * @return @ref XXH_OK on success. 2033 * @return @ref XXH_ERROR on failure. 2034 * 2035 * @see XXH3_64bits_withSecretandSeed(). Contract is identical. 2036 * 2037 * Note: there was a bug in an earlier version of this function (<= v0.8.2) 2038 * that would make it generate an incorrect hash value 2039 * when @p seed == 0 and @p length < XXH3_MIDSIZE_MAX 2040 * and @p secret is different from XXH3_generateSecret_fromSeed(). 2041 * As stated in the contract, the correct hash result must be 2042 * the same as XXH3_128bits_withSeed() when @p length <= XXH3_MIDSIZE_MAX. 2043 * Results generated by this older version are wrong, hence not comparable. 2044 */ 2045 XXH_PUBLIC_API XXH_errorcode 2046 XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, 2047 XXH_NOESCAPE const void* secret, size_t secretSize, 2048 XXH64_hash_t seed64); 2049 2050 #endif /* !XXH_NO_STREAM */ 2051 2052 #endif /* !XXH_NO_XXH3 */ 2053 #endif /* XXH_NO_LONG_LONG */ 2054 #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) 2055 # define XXH_IMPLEMENTATION 2056 #endif 2057 2058 #endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */ 2059 2060 2061 /* ======================================================================== */ 2062 /* ======================================================================== */ 2063 /* ======================================================================== */ 2064 2065 2066 /*-********************************************************************** 2067 * xxHash implementation 2068 *-********************************************************************** 2069 * xxHash's implementation used to be hosted inside xxhash.c. 2070 * 2071 * However, inlining requires implementation to be visible to the compiler, 2072 * hence be included alongside the header. 2073 * Previously, implementation was hosted inside xxhash.c, 2074 * which was then #included when inlining was activated. 2075 * This construction created issues with a few build and install systems, 2076 * as it required xxhash.c to be stored in /include directory. 2077 * 2078 * xxHash implementation is now directly integrated within xxhash.h. 2079 * As a consequence, xxhash.c is no longer needed in /include. 2080 * 2081 * xxhash.c is still available and is still useful. 2082 * In a "normal" setup, when xxhash is not inlined, 2083 * xxhash.h only exposes the prototypes and public symbols, 2084 * while xxhash.c can be built into an object file xxhash.o 2085 * which can then be linked into the final binary. 2086 ************************************************************************/ 2087 2088 #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \ 2089 || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387) 2090 # define XXH_IMPLEM_13a8737387 2091 2092 /* ************************************* 2093 * Tuning parameters 2094 ***************************************/ 2095 2096 /*! 2097 * @defgroup tuning Tuning parameters 2098 * @{ 2099 * 2100 * Various macros to control xxHash's behavior. 2101 */ 2102 #ifdef XXH_DOXYGEN 2103 /*! 2104 * @brief Define this to disable 64-bit code. 2105 * 2106 * Useful if only using the @ref XXH32_family and you have a strict C90 compiler. 2107 */ 2108 # define XXH_NO_LONG_LONG 2109 # undef XXH_NO_LONG_LONG /* don't actually */ 2110 /*! 2111 * @brief Controls how unaligned memory is accessed. 2112 * 2113 * By default, access to unaligned memory is controlled by `memcpy()`, which is 2114 * safe and portable. 2115 * 2116 * Unfortunately, on some target/compiler combinations, the generated assembly 2117 * is sub-optimal. 2118 * 2119 * The below switch allow selection of a different access method 2120 * in the search for improved performance. 2121 * 2122 * @par Possible options: 2123 * 2124 * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy` 2125 * @par 2126 * Use `memcpy()`. Safe and portable. Note that most modern compilers will 2127 * eliminate the function call and treat it as an unaligned access. 2128 * 2129 * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))` 2130 * @par 2131 * Depends on compiler extensions and is therefore not portable. 2132 * This method is safe _if_ your compiler supports it, 2133 * and *generally* as fast or faster than `memcpy`. 2134 * 2135 * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast 2136 * @par 2137 * Casts directly and dereferences. This method doesn't depend on the 2138 * compiler, but it violates the C standard as it directly dereferences an 2139 * unaligned pointer. It can generate buggy code on targets which do not 2140 * support unaligned memory accesses, but in some circumstances, it's the 2141 * only known way to get the most performance. 2142 * 2143 * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift 2144 * @par 2145 * Also portable. This can generate the best code on old compilers which don't 2146 * inline small `memcpy()` calls, and it might also be faster on big-endian 2147 * systems which lack a native byteswap instruction. However, some compilers 2148 * will emit literal byteshifts even if the target supports unaligned access. 2149 * 2150 * 2151 * @warning 2152 * Methods 1 and 2 rely on implementation-defined behavior. Use these with 2153 * care, as what works on one compiler/platform/optimization level may cause 2154 * another to read garbage data or even crash. 2155 * 2156 * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. 2157 * 2158 * Prefer these methods in priority order (0 > 3 > 1 > 2) 2159 */ 2160 # define XXH_FORCE_MEMORY_ACCESS 0 2161 2162 /*! 2163 * @def XXH_SIZE_OPT 2164 * @brief Controls how much xxHash optimizes for size. 2165 * 2166 * xxHash, when compiled, tends to result in a rather large binary size. This 2167 * is mostly due to heavy usage to forced inlining and constant folding of the 2168 * @ref XXH3_family to increase performance. 2169 * 2170 * However, some developers prefer size over speed. This option can 2171 * significantly reduce the size of the generated code. When using the `-Os` 2172 * or `-Oz` options on GCC or Clang, this is defined to 1 by default, 2173 * otherwise it is defined to 0. 2174 * 2175 * Most of these size optimizations can be controlled manually. 2176 * 2177 * This is a number from 0-2. 2178 * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed 2179 * comes first. 2180 * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more 2181 * conservative and disables hacks that increase code size. It implies the 2182 * options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0, 2183 * and @ref XXH3_NEON_LANES == 8 if they are not already defined. 2184 * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible. 2185 * Performance may cry. For example, the single shot functions just use the 2186 * streaming API. 2187 */ 2188 # define XXH_SIZE_OPT 0 2189 2190 /*! 2191 * @def XXH_FORCE_ALIGN_CHECK 2192 * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32() 2193 * and XXH64() only). 2194 * 2195 * This is an important performance trick for architectures without decent 2196 * unaligned memory access performance. 2197 * 2198 * It checks for input alignment, and when conditions are met, uses a "fast 2199 * path" employing direct 32-bit/64-bit reads, resulting in _dramatically 2200 * faster_ read speed. 2201 * 2202 * The check costs one initial branch per hash, which is generally negligible, 2203 * but not zero. 2204 * 2205 * Moreover, it's not useful to generate an additional code path if memory 2206 * access uses the same instruction for both aligned and unaligned 2207 * addresses (e.g. x86 and aarch64). 2208 * 2209 * In these cases, the alignment check can be removed by setting this macro to 0. 2210 * Then the code will always use unaligned memory access. 2211 * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips 2212 * which are platforms known to offer good unaligned memory accesses performance. 2213 * 2214 * It is also disabled by default when @ref XXH_SIZE_OPT >= 1. 2215 * 2216 * This option does not affect XXH3 (only XXH32 and XXH64). 2217 */ 2218 # define XXH_FORCE_ALIGN_CHECK 0 2219 2220 /*! 2221 * @def XXH_NO_INLINE_HINTS 2222 * @brief When non-zero, sets all functions to `static`. 2223 * 2224 * By default, xxHash tries to force the compiler to inline almost all internal 2225 * functions. 2226 * 2227 * This can usually improve performance due to reduced jumping and improved 2228 * constant folding, but significantly increases the size of the binary which 2229 * might not be favorable. 2230 * 2231 * Additionally, sometimes the forced inlining can be detrimental to performance, 2232 * depending on the architecture. 2233 * 2234 * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the 2235 * compiler full control on whether to inline or not. 2236 * 2237 * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if 2238 * @ref XXH_SIZE_OPT >= 1, this will automatically be defined. 2239 */ 2240 # define XXH_NO_INLINE_HINTS 0 2241 2242 /*! 2243 * @def XXH3_INLINE_SECRET 2244 * @brief Determines whether to inline the XXH3 withSecret code. 2245 * 2246 * When the secret size is known, the compiler can improve the performance 2247 * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret(). 2248 * 2249 * However, if the secret size is not known, it doesn't have any benefit. This 2250 * happens when xxHash is compiled into a global symbol. Therefore, if 2251 * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0. 2252 * 2253 * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers 2254 * that are *sometimes* force inline on -Og, and it is impossible to automatically 2255 * detect this optimization level. 2256 */ 2257 # define XXH3_INLINE_SECRET 0 2258 2259 /*! 2260 * @def XXH32_ENDJMP 2261 * @brief Whether to use a jump for `XXH32_finalize`. 2262 * 2263 * For performance, `XXH32_finalize` uses multiple branches in the finalizer. 2264 * This is generally preferable for performance, 2265 * but depending on exact architecture, a jmp may be preferable. 2266 * 2267 * This setting is only possibly making a difference for very small inputs. 2268 */ 2269 # define XXH32_ENDJMP 0 2270 2271 /*! 2272 * @internal 2273 * @brief Redefines old internal names. 2274 * 2275 * For compatibility with code that uses xxHash's internals before the names 2276 * were changed to improve namespacing. There is no other reason to use this. 2277 */ 2278 # define XXH_OLD_NAMES 2279 # undef XXH_OLD_NAMES /* don't actually use, it is ugly. */ 2280 2281 /*! 2282 * @def XXH_NO_STREAM 2283 * @brief Disables the streaming API. 2284 * 2285 * When xxHash is not inlined and the streaming functions are not used, disabling 2286 * the streaming functions can improve code size significantly, especially with 2287 * the @ref XXH3_family which tends to make constant folded copies of itself. 2288 */ 2289 # define XXH_NO_STREAM 2290 # undef XXH_NO_STREAM /* don't actually */ 2291 #endif /* XXH_DOXYGEN */ 2292 /*! 2293 * @} 2294 */ 2295 2296 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ 2297 /* prefer __packed__ structures (method 1) for GCC 2298 * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy 2299 * which for some reason does unaligned loads. */ 2300 # if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED)) 2301 # define XXH_FORCE_MEMORY_ACCESS 1 2302 # endif 2303 #endif 2304 2305 #ifndef XXH_SIZE_OPT 2306 /* default to 1 for -Os or -Oz */ 2307 # if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__) 2308 # define XXH_SIZE_OPT 1 2309 # else 2310 # define XXH_SIZE_OPT 0 2311 # endif 2312 #endif 2313 2314 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ 2315 /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */ 2316 # if XXH_SIZE_OPT >= 1 || \ 2317 defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \ 2318 || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */ 2319 # define XXH_FORCE_ALIGN_CHECK 0 2320 # else 2321 # define XXH_FORCE_ALIGN_CHECK 1 2322 # endif 2323 #endif 2324 2325 #ifndef XXH_NO_INLINE_HINTS 2326 # if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */ 2327 # define XXH_NO_INLINE_HINTS 1 2328 # else 2329 # define XXH_NO_INLINE_HINTS 0 2330 # endif 2331 #endif 2332 2333 #ifndef XXH3_INLINE_SECRET 2334 # if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \ 2335 || !defined(XXH_INLINE_ALL) 2336 # define XXH3_INLINE_SECRET 0 2337 # else 2338 # define XXH3_INLINE_SECRET 1 2339 # endif 2340 #endif 2341 2342 #ifndef XXH32_ENDJMP 2343 /* generally preferable for performance */ 2344 # define XXH32_ENDJMP 0 2345 #endif 2346 2347 /*! 2348 * @defgroup impl Implementation 2349 * @{ 2350 */ 2351 2352 2353 /* ************************************* 2354 * Includes & Memory related functions 2355 ***************************************/ 2356 #if defined(XXH_NO_STREAM) 2357 /* nothing */ 2358 #elif defined(XXH_NO_STDLIB) 2359 2360 /* When requesting to disable any mention of stdlib, 2361 * the library loses the ability to invoked malloc / free. 2362 * In practice, it means that functions like `XXH*_createState()` 2363 * will always fail, and return NULL. 2364 * This flag is useful in situations where 2365 * xxhash.h is integrated into some kernel, embedded or limited environment 2366 * without access to dynamic allocation. 2367 */ 2368 2369 static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; } 2370 static void XXH_free(void* p) { (void)p; } 2371 2372 #else 2373 2374 /* 2375 * Modify the local functions below should you wish to use 2376 * different memory routines for malloc() and free() 2377 */ 2378 #include <stdlib.h> 2379 2380 /*! 2381 * @internal 2382 * @brief Modify this function to use a different routine than malloc(). 2383 */ 2384 static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); } 2385 2386 /*! 2387 * @internal 2388 * @brief Modify this function to use a different routine than free(). 2389 */ 2390 static void XXH_free(void* p) { free(p); } 2391 2392 #endif /* XXH_NO_STDLIB */ 2393 2394 #include <string.h> 2395 2396 /*! 2397 * @internal 2398 * @brief Modify this function to use a different routine than memcpy(). 2399 */ 2400 static void* XXH_memcpy(void* dest, const void* src, size_t size) 2401 { 2402 return memcpy(dest,src,size); 2403 } 2404 2405 #include <limits.h> /* ULLONG_MAX */ 2406 2407 2408 /* ************************************* 2409 * Compiler Specific Options 2410 ***************************************/ 2411 #ifdef _MSC_VER /* Visual Studio warning fix */ 2412 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ 2413 #endif 2414 2415 #if XXH_NO_INLINE_HINTS /* disable inlining hints */ 2416 # if defined(__GNUC__) || defined(__clang__) 2417 # define XXH_FORCE_INLINE static __attribute__((__unused__)) 2418 # else 2419 # define XXH_FORCE_INLINE static 2420 # endif 2421 # define XXH_NO_INLINE static 2422 /* enable inlining hints */ 2423 #elif defined(__GNUC__) || defined(__clang__) 2424 # define XXH_FORCE_INLINE static __inline__ __attribute__((__always_inline__, __unused__)) 2425 # define XXH_NO_INLINE static __attribute__((__noinline__)) 2426 #elif defined(_MSC_VER) /* Visual Studio */ 2427 # define XXH_FORCE_INLINE static __forceinline 2428 # define XXH_NO_INLINE static __declspec(noinline) 2429 #elif defined (__cplusplus) \ 2430 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ 2431 # define XXH_FORCE_INLINE static inline 2432 # define XXH_NO_INLINE static 2433 #else 2434 # define XXH_FORCE_INLINE static 2435 # define XXH_NO_INLINE static 2436 #endif 2437 2438 #if defined(XXH_INLINE_ALL) 2439 # define XXH_STATIC XXH_FORCE_INLINE 2440 #else 2441 # define XXH_STATIC static 2442 #endif 2443 2444 #if XXH3_INLINE_SECRET 2445 # define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE 2446 #else 2447 # define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE 2448 #endif 2449 2450 #if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */ 2451 # define XXH_RESTRICT /* disable */ 2452 #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ 2453 # define XXH_RESTRICT restrict 2454 #elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \ 2455 || (defined (__clang__)) \ 2456 || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \ 2457 || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300)) 2458 /* 2459 * There are a LOT more compilers that recognize __restrict but this 2460 * covers the major ones. 2461 */ 2462 # define XXH_RESTRICT __restrict 2463 #else 2464 # define XXH_RESTRICT /* disable */ 2465 #endif 2466 2467 /* ************************************* 2468 * Debug 2469 ***************************************/ 2470 /*! 2471 * @ingroup tuning 2472 * @def XXH_DEBUGLEVEL 2473 * @brief Sets the debugging level. 2474 * 2475 * XXH_DEBUGLEVEL is expected to be defined externally, typically via the 2476 * compiler's command line options. The value must be a number. 2477 */ 2478 #ifndef XXH_DEBUGLEVEL 2479 # ifdef DEBUGLEVEL /* backwards compat */ 2480 # define XXH_DEBUGLEVEL DEBUGLEVEL 2481 # else 2482 # define XXH_DEBUGLEVEL 0 2483 # endif 2484 #endif 2485 2486 #if (XXH_DEBUGLEVEL>=1) 2487 # include <assert.h> /* note: can still be disabled with NDEBUG */ 2488 # define XXH_ASSERT(c) assert(c) 2489 #else 2490 # if defined(__INTEL_COMPILER) 2491 # define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c)) 2492 # else 2493 # define XXH_ASSERT(c) XXH_ASSUME(c) 2494 # endif 2495 #endif 2496 2497 /* note: use after variable declarations */ 2498 #ifndef XXH_STATIC_ASSERT 2499 # if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ 2500 # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0) 2501 # elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */ 2502 # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0) 2503 # else 2504 # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0) 2505 # endif 2506 # define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c) 2507 #endif 2508 2509 /*! 2510 * @internal 2511 * @def XXH_COMPILER_GUARD(var) 2512 * @brief Used to prevent unwanted optimizations for @p var. 2513 * 2514 * It uses an empty GCC inline assembly statement with a register constraint 2515 * which forces @p var into a general purpose register (eg eax, ebx, ecx 2516 * on x86) and marks it as modified. 2517 * 2518 * This is used in a few places to avoid unwanted autovectorization (e.g. 2519 * XXH32_round()). All vectorization we want is explicit via intrinsics, 2520 * and _usually_ isn't wanted elsewhere. 2521 * 2522 * We also use it to prevent unwanted constant folding for AArch64 in 2523 * XXH3_initCustomSecret_scalar(). 2524 */ 2525 #if defined(__GNUC__) || defined(__clang__) 2526 # define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var)) 2527 #else 2528 # define XXH_COMPILER_GUARD(var) ((void)0) 2529 #endif 2530 2531 /* Specifically for NEON vectors which use the "w" constraint, on 2532 * Clang. */ 2533 #if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__) 2534 # define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var)) 2535 #else 2536 # define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0) 2537 #endif 2538 2539 /* ************************************* 2540 * Basic Types 2541 ***************************************/ 2542 #if !defined (__VMS) \ 2543 && (defined (__cplusplus) \ 2544 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) 2545 # ifdef _AIX 2546 # include <inttypes.h> 2547 # else 2548 # include <stdint.h> 2549 # endif 2550 typedef uint8_t xxh_u8; 2551 #else 2552 typedef unsigned char xxh_u8; 2553 #endif 2554 typedef XXH32_hash_t xxh_u32; 2555 2556 #ifdef XXH_OLD_NAMES 2557 # warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly" 2558 # define BYTE xxh_u8 2559 # define U8 xxh_u8 2560 # define U32 xxh_u32 2561 #endif 2562 2563 /* *** Memory access *** */ 2564 2565 /*! 2566 * @internal 2567 * @fn xxh_u32 XXH_read32(const void* ptr) 2568 * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness. 2569 * 2570 * Affected by @ref XXH_FORCE_MEMORY_ACCESS. 2571 * 2572 * @param ptr The pointer to read from. 2573 * @return The 32-bit native endian integer from the bytes at @p ptr. 2574 */ 2575 2576 /*! 2577 * @internal 2578 * @fn xxh_u32 XXH_readLE32(const void* ptr) 2579 * @brief Reads an unaligned 32-bit little endian integer from @p ptr. 2580 * 2581 * Affected by @ref XXH_FORCE_MEMORY_ACCESS. 2582 * 2583 * @param ptr The pointer to read from. 2584 * @return The 32-bit little endian integer from the bytes at @p ptr. 2585 */ 2586 2587 /*! 2588 * @internal 2589 * @fn xxh_u32 XXH_readBE32(const void* ptr) 2590 * @brief Reads an unaligned 32-bit big endian integer from @p ptr. 2591 * 2592 * Affected by @ref XXH_FORCE_MEMORY_ACCESS. 2593 * 2594 * @param ptr The pointer to read from. 2595 * @return The 32-bit big endian integer from the bytes at @p ptr. 2596 */ 2597 2598 /*! 2599 * @internal 2600 * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align) 2601 * @brief Like @ref XXH_readLE32(), but has an option for aligned reads. 2602 * 2603 * Affected by @ref XXH_FORCE_MEMORY_ACCESS. 2604 * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is 2605 * always @ref XXH_alignment::XXH_unaligned. 2606 * 2607 * @param ptr The pointer to read from. 2608 * @param align Whether @p ptr is aligned. 2609 * @pre 2610 * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte 2611 * aligned. 2612 * @return The 32-bit little endian integer from the bytes at @p ptr. 2613 */ 2614 2615 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) 2616 /* 2617 * Manual byteshift. Best for old compilers which don't inline memcpy. 2618 * We actually directly use XXH_readLE32 and XXH_readBE32. 2619 */ 2620 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) 2621 2622 /* 2623 * Force direct memory access. Only works on CPU which support unaligned memory 2624 * access in hardware. 2625 */ 2626 static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; } 2627 2628 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) 2629 2630 /* 2631 * __attribute__((aligned(1))) is supported by gcc and clang. Originally the 2632 * documentation claimed that it only increased the alignment, but actually it 2633 * can decrease it on gcc, clang, and icc: 2634 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, 2635 * https://gcc.godbolt.org/z/xYez1j67Y. 2636 */ 2637 #ifdef XXH_OLD_NAMES 2638 typedef union { xxh_u32 u32; } __attribute__((__packed__)) unalign; 2639 #endif 2640 static xxh_u32 XXH_read32(const void* ptr) 2641 { 2642 typedef __attribute__((__aligned__(1))) xxh_u32 xxh_unalign32; 2643 return *((const xxh_unalign32*)ptr); 2644 } 2645 2646 #else 2647 2648 /* 2649 * Portable and safe solution. Generally efficient. 2650 * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html 2651 */ 2652 static xxh_u32 XXH_read32(const void* memPtr) 2653 { 2654 xxh_u32 val; 2655 XXH_memcpy(&val, memPtr, sizeof(val)); 2656 return val; 2657 } 2658 2659 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ 2660 2661 2662 /* *** Endianness *** */ 2663 2664 /*! 2665 * @ingroup tuning 2666 * @def XXH_CPU_LITTLE_ENDIAN 2667 * @brief Whether the target is little endian. 2668 * 2669 * Defined to 1 if the target is little endian, or 0 if it is big endian. 2670 * It can be defined externally, for example on the compiler command line. 2671 * 2672 * If it is not defined, 2673 * a runtime check (which is usually constant folded) is used instead. 2674 * 2675 * @note 2676 * This is not necessarily defined to an integer constant. 2677 * 2678 * @see XXH_isLittleEndian() for the runtime check. 2679 */ 2680 #ifndef XXH_CPU_LITTLE_ENDIAN 2681 /* 2682 * Try to detect endianness automatically, to avoid the nonstandard behavior 2683 * in `XXH_isLittleEndian()` 2684 */ 2685 # if defined(_WIN32) /* Windows is always little endian */ \ 2686 || defined(__LITTLE_ENDIAN__) \ 2687 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) 2688 # define XXH_CPU_LITTLE_ENDIAN 1 2689 # elif defined(__BIG_ENDIAN__) \ 2690 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) 2691 # define XXH_CPU_LITTLE_ENDIAN 0 2692 # else 2693 /*! 2694 * @internal 2695 * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN. 2696 * 2697 * Most compilers will constant fold this. 2698 */ 2699 static int XXH_isLittleEndian(void) 2700 { 2701 /* 2702 * Portable and well-defined behavior. 2703 * Don't use static: it is detrimental to performance. 2704 */ 2705 const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; 2706 return one.c[0]; 2707 } 2708 # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() 2709 # endif 2710 #endif 2711 2712 2713 2714 2715 /* **************************************** 2716 * Compiler-specific Functions and Macros 2717 ******************************************/ 2718 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) 2719 2720 #ifdef __has_builtin 2721 # define XXH_HAS_BUILTIN(x) __has_builtin(x) 2722 #else 2723 # define XXH_HAS_BUILTIN(x) 0 2724 #endif 2725 2726 2727 2728 /* 2729 * C23 and future versions have standard "unreachable()". 2730 * Once it has been implemented reliably we can add it as an 2731 * additional case: 2732 * 2733 * ``` 2734 * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) 2735 * # include <stddef.h> 2736 * # ifdef unreachable 2737 * # define XXH_UNREACHABLE() unreachable() 2738 * # endif 2739 * #endif 2740 * ``` 2741 * 2742 * Note C++23 also has std::unreachable() which can be detected 2743 * as follows: 2744 * ``` 2745 * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L) 2746 * # include <utility> 2747 * # define XXH_UNREACHABLE() std::unreachable() 2748 * #endif 2749 * ``` 2750 * NB: `__cpp_lib_unreachable` is defined in the `<version>` header. 2751 * We don't use that as including `<utility>` in `extern "C"` blocks 2752 * doesn't work on GCC12 2753 */ 2754 2755 #if XXH_HAS_BUILTIN(__builtin_unreachable) 2756 # define XXH_UNREACHABLE() __builtin_unreachable() 2757 2758 #elif defined(_MSC_VER) 2759 # define XXH_UNREACHABLE() __assume(0) 2760 2761 #else 2762 # define XXH_UNREACHABLE() 2763 #endif 2764 2765 #if XXH_HAS_BUILTIN(__builtin_assume) 2766 # define XXH_ASSUME(c) __builtin_assume(c) 2767 #else 2768 # define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); } 2769 #endif 2770 2771 /*! 2772 * @internal 2773 * @def XXH_rotl32(x,r) 2774 * @brief 32-bit rotate left. 2775 * 2776 * @param x The 32-bit integer to be rotated. 2777 * @param r The number of bits to rotate. 2778 * @pre 2779 * @p r > 0 && @p r < 32 2780 * @note 2781 * @p x and @p r may be evaluated multiple times. 2782 * @return The rotated result. 2783 */ 2784 #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \ 2785 && XXH_HAS_BUILTIN(__builtin_rotateleft64) 2786 # define XXH_rotl32 __builtin_rotateleft32 2787 # define XXH_rotl64 __builtin_rotateleft64 2788 #elif XXH_HAS_BUILTIN(__builtin_stdc_rotate_left) 2789 # define XXH_rotl32 __builtin_stdc_rotate_left 2790 # define XXH_rotl64 __builtin_stdc_rotate_left 2791 /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */ 2792 #elif defined(_MSC_VER) 2793 # define XXH_rotl32(x,r) _rotl(x,r) 2794 # define XXH_rotl64(x,r) _rotl64(x,r) 2795 #else 2796 # define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) 2797 # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r)))) 2798 #endif 2799 2800 /*! 2801 * @internal 2802 * @fn xxh_u32 XXH_swap32(xxh_u32 x) 2803 * @brief A 32-bit byteswap. 2804 * 2805 * @param x The 32-bit integer to byteswap. 2806 * @return @p x, byteswapped. 2807 */ 2808 #if defined(_MSC_VER) /* Visual Studio */ 2809 # define XXH_swap32 _byteswap_ulong 2810 #elif XXH_GCC_VERSION >= 403 2811 # define XXH_swap32 __builtin_bswap32 2812 #else 2813 static xxh_u32 XXH_swap32 (xxh_u32 x) 2814 { 2815 return ((x << 24) & 0xff000000 ) | 2816 ((x << 8) & 0x00ff0000 ) | 2817 ((x >> 8) & 0x0000ff00 ) | 2818 ((x >> 24) & 0x000000ff ); 2819 } 2820 #endif 2821 2822 2823 /* *************************** 2824 * Memory reads 2825 *****************************/ 2826 2827 /*! 2828 * @internal 2829 * @brief Enum to indicate whether a pointer is aligned. 2830 */ 2831 typedef enum { 2832 XXH_aligned, /*!< Aligned */ 2833 XXH_unaligned /*!< Possibly unaligned */ 2834 } XXH_alignment; 2835 2836 /* 2837 * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. 2838 * 2839 * This is ideal for older compilers which don't inline memcpy. 2840 */ 2841 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) 2842 2843 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr) 2844 { 2845 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; 2846 return bytePtr[0] 2847 | ((xxh_u32)bytePtr[1] << 8) 2848 | ((xxh_u32)bytePtr[2] << 16) 2849 | ((xxh_u32)bytePtr[3] << 24); 2850 } 2851 2852 XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr) 2853 { 2854 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; 2855 return bytePtr[3] 2856 | ((xxh_u32)bytePtr[2] << 8) 2857 | ((xxh_u32)bytePtr[1] << 16) 2858 | ((xxh_u32)bytePtr[0] << 24); 2859 } 2860 2861 #else 2862 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr) 2863 { 2864 return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); 2865 } 2866 2867 static xxh_u32 XXH_readBE32(const void* ptr) 2868 { 2869 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); 2870 } 2871 #endif 2872 2873 XXH_FORCE_INLINE xxh_u32 2874 XXH_readLE32_align(const void* ptr, XXH_alignment align) 2875 { 2876 if (align==XXH_unaligned) { 2877 return XXH_readLE32(ptr); 2878 } else { 2879 return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr); 2880 } 2881 } 2882 2883 2884 /* ************************************* 2885 * Misc 2886 ***************************************/ 2887 /*! @ingroup public */ 2888 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } 2889 2890 2891 /* ******************************************************************* 2892 * 32-bit hash functions 2893 *********************************************************************/ 2894 /*! 2895 * @} 2896 * @defgroup XXH32_impl XXH32 implementation 2897 * @ingroup impl 2898 * 2899 * Details on the XXH32 implementation. 2900 * @{ 2901 */ 2902 /* #define instead of static const, to be used as initializers */ 2903 #define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */ 2904 #define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */ 2905 #define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */ 2906 #define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */ 2907 #define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */ 2908 2909 #ifdef XXH_OLD_NAMES 2910 # define PRIME32_1 XXH_PRIME32_1 2911 # define PRIME32_2 XXH_PRIME32_2 2912 # define PRIME32_3 XXH_PRIME32_3 2913 # define PRIME32_4 XXH_PRIME32_4 2914 # define PRIME32_5 XXH_PRIME32_5 2915 #endif 2916 2917 /*! 2918 * @internal 2919 * @brief Normal stripe processing routine. 2920 * 2921 * This shuffles the bits so that any bit from @p input impacts several bits in 2922 * @p acc. 2923 * 2924 * @param acc The accumulator lane. 2925 * @param input The stripe of input to mix. 2926 * @return The mixed accumulator lane. 2927 */ 2928 static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) 2929 { 2930 acc += input * XXH_PRIME32_2; 2931 acc = XXH_rotl32(acc, 13); 2932 acc *= XXH_PRIME32_1; 2933 #if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE) 2934 /* 2935 * UGLY HACK: 2936 * A compiler fence is used to prevent GCC and Clang from 2937 * autovectorizing the XXH32 loop (pragmas and attributes don't work for some 2938 * reason) without globally disabling SSE4.1. 2939 * 2940 * The reason we want to avoid vectorization is because despite working on 2941 * 4 integers at a time, there are multiple factors slowing XXH32 down on 2942 * SSE4: 2943 * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on 2944 * newer chips!) making it slightly slower to multiply four integers at 2945 * once compared to four integers independently. Even when pmulld was 2946 * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE 2947 * just to multiply unless doing a long operation. 2948 * 2949 * - Four instructions are required to rotate, 2950 * movqda tmp, v // not required with VEX encoding 2951 * pslld tmp, 13 // tmp <<= 13 2952 * psrld v, 19 // x >>= 19 2953 * por v, tmp // x |= tmp 2954 * compared to one for scalar: 2955 * roll v, 13 // reliably fast across the board 2956 * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason 2957 * 2958 * - Instruction level parallelism is actually more beneficial here because 2959 * the SIMD actually serializes this operation: While v1 is rotating, v2 2960 * can load data, while v3 can multiply. SSE forces them to operate 2961 * together. 2962 * 2963 * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing 2964 * the loop. NEON is only faster on the A53, and with the newer cores, it is less 2965 * than half the speed. 2966 * 2967 * Additionally, this is used on WASM SIMD128 because it JITs to the same 2968 * SIMD instructions and has the same issue. 2969 */ 2970 XXH_COMPILER_GUARD(acc); 2971 #endif 2972 return acc; 2973 } 2974 2975 /*! 2976 * @internal 2977 * @brief Mixes all bits to finalize the hash. 2978 * 2979 * The final mix ensures that all input bits have a chance to impact any bit in 2980 * the output digest, resulting in an unbiased distribution. 2981 * 2982 * @param hash The hash to avalanche. 2983 * @return The avalanched hash. 2984 */ 2985 static xxh_u32 XXH32_avalanche(xxh_u32 hash) 2986 { 2987 hash ^= hash >> 15; 2988 hash *= XXH_PRIME32_2; 2989 hash ^= hash >> 13; 2990 hash *= XXH_PRIME32_3; 2991 hash ^= hash >> 16; 2992 return hash; 2993 } 2994 2995 #define XXH_get32bits(p) XXH_readLE32_align(p, align) 2996 2997 /*! 2998 * @internal 2999 * @brief Sets up the initial accumulator state for XXH32(). 3000 */ 3001 XXH_FORCE_INLINE void 3002 XXH32_initAccs(xxh_u32 *acc, xxh_u32 seed) 3003 { 3004 XXH_ASSERT(acc != NULL); 3005 acc[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2; 3006 acc[1] = seed + XXH_PRIME32_2; 3007 acc[2] = seed + 0; 3008 acc[3] = seed - XXH_PRIME32_1; 3009 } 3010 3011 /*! 3012 * @internal 3013 * @brief Consumes a block of data for XXH32(). 3014 * 3015 * @return the end input pointer. 3016 */ 3017 XXH_FORCE_INLINE const xxh_u8 * 3018 XXH32_consumeLong( 3019 xxh_u32 *XXH_RESTRICT acc, 3020 xxh_u8 const *XXH_RESTRICT input, 3021 size_t len, 3022 XXH_alignment align 3023 ) 3024 { 3025 const xxh_u8* const bEnd = input + len; 3026 const xxh_u8* const limit = bEnd - 15; 3027 XXH_ASSERT(acc != NULL); 3028 XXH_ASSERT(input != NULL); 3029 XXH_ASSERT(len >= 16); 3030 do { 3031 acc[0] = XXH32_round(acc[0], XXH_get32bits(input)); input += 4; 3032 acc[1] = XXH32_round(acc[1], XXH_get32bits(input)); input += 4; 3033 acc[2] = XXH32_round(acc[2], XXH_get32bits(input)); input += 4; 3034 acc[3] = XXH32_round(acc[3], XXH_get32bits(input)); input += 4; 3035 } while (input < limit); 3036 3037 return input; 3038 } 3039 3040 /*! 3041 * @internal 3042 * @brief Merges the accumulator lanes together for XXH32() 3043 */ 3044 XXH_FORCE_INLINE XXH_PUREF xxh_u32 3045 XXH32_mergeAccs(const xxh_u32 *acc) 3046 { 3047 XXH_ASSERT(acc != NULL); 3048 return XXH_rotl32(acc[0], 1) + XXH_rotl32(acc[1], 7) 3049 + XXH_rotl32(acc[2], 12) + XXH_rotl32(acc[3], 18); 3050 } 3051 3052 /*! 3053 * @internal 3054 * @brief Processes the last 0-15 bytes of @p ptr. 3055 * 3056 * There may be up to 15 bytes remaining to consume from the input. 3057 * This final stage will digest them to ensure that all input bytes are present 3058 * in the final mix. 3059 * 3060 * @param hash The hash to finalize. 3061 * @param ptr The pointer to the remaining input. 3062 * @param len The remaining length, modulo 16. 3063 * @param align Whether @p ptr is aligned. 3064 * @return The finalized hash. 3065 * @see XXH64_finalize(). 3066 */ 3067 static XXH_PUREF xxh_u32 3068 XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) 3069 { 3070 #define XXH_PROCESS1 do { \ 3071 hash += (*ptr++) * XXH_PRIME32_5; \ 3072 hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \ 3073 } while (0) 3074 3075 #define XXH_PROCESS4 do { \ 3076 hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \ 3077 ptr += 4; \ 3078 hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \ 3079 } while (0) 3080 3081 if (ptr==NULL) XXH_ASSERT(len == 0); 3082 3083 /* Compact rerolled version; generally faster */ 3084 if (!XXH32_ENDJMP) { 3085 len &= 15; 3086 while (len >= 4) { 3087 XXH_PROCESS4; 3088 len -= 4; 3089 } 3090 while (len > 0) { 3091 XXH_PROCESS1; 3092 --len; 3093 } 3094 return XXH32_avalanche(hash); 3095 } else { 3096 switch(len&15) /* or switch(bEnd - p) */ { 3097 case 12: XXH_PROCESS4; 3098 XXH_FALLTHROUGH; /* fallthrough */ 3099 case 8: XXH_PROCESS4; 3100 XXH_FALLTHROUGH; /* fallthrough */ 3101 case 4: XXH_PROCESS4; 3102 return XXH32_avalanche(hash); 3103 3104 case 13: XXH_PROCESS4; 3105 XXH_FALLTHROUGH; /* fallthrough */ 3106 case 9: XXH_PROCESS4; 3107 XXH_FALLTHROUGH; /* fallthrough */ 3108 case 5: XXH_PROCESS4; 3109 XXH_PROCESS1; 3110 return XXH32_avalanche(hash); 3111 3112 case 14: XXH_PROCESS4; 3113 XXH_FALLTHROUGH; /* fallthrough */ 3114 case 10: XXH_PROCESS4; 3115 XXH_FALLTHROUGH; /* fallthrough */ 3116 case 6: XXH_PROCESS4; 3117 XXH_PROCESS1; 3118 XXH_PROCESS1; 3119 return XXH32_avalanche(hash); 3120 3121 case 15: XXH_PROCESS4; 3122 XXH_FALLTHROUGH; /* fallthrough */ 3123 case 11: XXH_PROCESS4; 3124 XXH_FALLTHROUGH; /* fallthrough */ 3125 case 7: XXH_PROCESS4; 3126 XXH_FALLTHROUGH; /* fallthrough */ 3127 case 3: XXH_PROCESS1; 3128 XXH_FALLTHROUGH; /* fallthrough */ 3129 case 2: XXH_PROCESS1; 3130 XXH_FALLTHROUGH; /* fallthrough */ 3131 case 1: XXH_PROCESS1; 3132 XXH_FALLTHROUGH; /* fallthrough */ 3133 case 0: return XXH32_avalanche(hash); 3134 } 3135 XXH_ASSERT(0); 3136 return hash; /* reaching this point is deemed impossible */ 3137 } 3138 } 3139 3140 #ifdef XXH_OLD_NAMES 3141 # define PROCESS1 XXH_PROCESS1 3142 # define PROCESS4 XXH_PROCESS4 3143 #else 3144 # undef XXH_PROCESS1 3145 # undef XXH_PROCESS4 3146 #endif 3147 3148 /*! 3149 * @internal 3150 * @brief The implementation for @ref XXH32(). 3151 * 3152 * @param input , len , seed Directly passed from @ref XXH32(). 3153 * @param align Whether @p input is aligned. 3154 * @return The calculated hash. 3155 */ 3156 XXH_FORCE_INLINE XXH_PUREF xxh_u32 3157 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align) 3158 { 3159 xxh_u32 h32; 3160 3161 if (input==NULL) XXH_ASSERT(len == 0); 3162 3163 if (len>=16) { 3164 xxh_u32 acc[4]; 3165 XXH32_initAccs(acc, seed); 3166 3167 input = XXH32_consumeLong(acc, input, len, align); 3168 3169 h32 = XXH32_mergeAccs(acc); 3170 } else { 3171 h32 = seed + XXH_PRIME32_5; 3172 } 3173 3174 h32 += (xxh_u32)len; 3175 3176 return XXH32_finalize(h32, input, len&15, align); 3177 } 3178 3179 /*! @ingroup XXH32_family */ 3180 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed) 3181 { 3182 #if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 3183 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ 3184 XXH32_state_t state; 3185 XXH32_reset(&state, seed); 3186 XXH32_update(&state, (const xxh_u8*)input, len); 3187 return XXH32_digest(&state); 3188 #else 3189 if (XXH_FORCE_ALIGN_CHECK) { 3190 if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ 3191 return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); 3192 } } 3193 3194 return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); 3195 #endif 3196 } 3197 3198 3199 3200 /******* Hash streaming *******/ 3201 #ifndef XXH_NO_STREAM 3202 /*! @ingroup XXH32_family */ 3203 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) 3204 { 3205 return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); 3206 } 3207 /*! @ingroup XXH32_family */ 3208 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) 3209 { 3210 XXH_free(statePtr); 3211 return XXH_OK; 3212 } 3213 3214 /*! @ingroup XXH32_family */ 3215 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) 3216 { 3217 XXH_memcpy(dstState, srcState, sizeof(*dstState)); 3218 } 3219 3220 /*! @ingroup XXH32_family */ 3221 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed) 3222 { 3223 XXH_ASSERT(statePtr != NULL); 3224 memset(statePtr, 0, sizeof(*statePtr)); 3225 XXH32_initAccs(statePtr->acc, seed); 3226 return XXH_OK; 3227 } 3228 3229 3230 /*! @ingroup XXH32_family */ 3231 XXH_PUBLIC_API XXH_errorcode 3232 XXH32_update(XXH32_state_t* state, const void* input, size_t len) 3233 { 3234 if (input==NULL) { 3235 XXH_ASSERT(len == 0); 3236 return XXH_OK; 3237 } 3238 3239 state->total_len_32 += (XXH32_hash_t)len; 3240 state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16)); 3241 3242 XXH_ASSERT(state->bufferedSize < sizeof(state->buffer)); 3243 if (len < sizeof(state->buffer) - state->bufferedSize) { /* fill in tmp buffer */ 3244 XXH_memcpy(state->buffer + state->bufferedSize, input, len); 3245 state->bufferedSize += (XXH32_hash_t)len; 3246 return XXH_OK; 3247 } 3248 3249 { const xxh_u8* xinput = (const xxh_u8*)input; 3250 const xxh_u8* const bEnd = xinput + len; 3251 3252 if (state->bufferedSize) { /* non-empty buffer: complete first */ 3253 XXH_memcpy(state->buffer + state->bufferedSize, xinput, sizeof(state->buffer) - state->bufferedSize); 3254 xinput += sizeof(state->buffer) - state->bufferedSize; 3255 /* then process one round */ 3256 (void)XXH32_consumeLong(state->acc, state->buffer, sizeof(state->buffer), XXH_aligned); 3257 state->bufferedSize = 0; 3258 } 3259 3260 XXH_ASSERT(xinput <= bEnd); 3261 if ((size_t)(bEnd - xinput) >= sizeof(state->buffer)) { 3262 /* Process the remaining data */ 3263 xinput = XXH32_consumeLong(state->acc, xinput, (size_t)(bEnd - xinput), XXH_unaligned); 3264 } 3265 3266 if (xinput < bEnd) { 3267 /* Copy the leftover to the tmp buffer */ 3268 XXH_memcpy(state->buffer, xinput, (size_t)(bEnd-xinput)); 3269 state->bufferedSize = (unsigned)(bEnd-xinput); 3270 } 3271 } 3272 3273 return XXH_OK; 3274 } 3275 3276 3277 /*! @ingroup XXH32_family */ 3278 XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state) 3279 { 3280 xxh_u32 h32; 3281 3282 if (state->large_len) { 3283 h32 = XXH32_mergeAccs(state->acc); 3284 } else { 3285 h32 = state->acc[2] /* == seed */ + XXH_PRIME32_5; 3286 } 3287 3288 h32 += state->total_len_32; 3289 3290 return XXH32_finalize(h32, state->buffer, state->bufferedSize, XXH_aligned); 3291 } 3292 #endif /* !XXH_NO_STREAM */ 3293 3294 /******* Canonical representation *******/ 3295 3296 /*! @ingroup XXH32_family */ 3297 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) 3298 { 3299 XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); 3300 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); 3301 XXH_memcpy(dst, &hash, sizeof(*dst)); 3302 } 3303 /*! @ingroup XXH32_family */ 3304 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) 3305 { 3306 return XXH_readBE32(src); 3307 } 3308 3309 3310 #ifndef XXH_NO_LONG_LONG 3311 3312 /* ******************************************************************* 3313 * 64-bit hash functions 3314 *********************************************************************/ 3315 /*! 3316 * @} 3317 * @ingroup impl 3318 * @{ 3319 */ 3320 /******* Memory access *******/ 3321 3322 typedef XXH64_hash_t xxh_u64; 3323 3324 #ifdef XXH_OLD_NAMES 3325 # define U64 xxh_u64 3326 #endif 3327 3328 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) 3329 /* 3330 * Manual byteshift. Best for old compilers which don't inline memcpy. 3331 * We actually directly use XXH_readLE64 and XXH_readBE64. 3332 */ 3333 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) 3334 3335 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ 3336 static xxh_u64 XXH_read64(const void* memPtr) 3337 { 3338 return *(const xxh_u64*) memPtr; 3339 } 3340 3341 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) 3342 3343 /* 3344 * __attribute__((aligned(1))) is supported by gcc and clang. Originally the 3345 * documentation claimed that it only increased the alignment, but actually it 3346 * can decrease it on gcc, clang, and icc: 3347 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, 3348 * https://gcc.godbolt.org/z/xYez1j67Y. 3349 */ 3350 #ifdef XXH_OLD_NAMES 3351 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((__packed__)) unalign64; 3352 #endif 3353 static xxh_u64 XXH_read64(const void* ptr) 3354 { 3355 typedef __attribute__((__aligned__(1))) xxh_u64 xxh_unalign64; 3356 return *((const xxh_unalign64*)ptr); 3357 } 3358 3359 #else 3360 3361 /* 3362 * Portable and safe solution. Generally efficient. 3363 * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html 3364 */ 3365 static xxh_u64 XXH_read64(const void* memPtr) 3366 { 3367 xxh_u64 val; 3368 XXH_memcpy(&val, memPtr, sizeof(val)); 3369 return val; 3370 } 3371 3372 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ 3373 3374 #if defined(_MSC_VER) /* Visual Studio */ 3375 # define XXH_swap64 _byteswap_uint64 3376 #elif XXH_GCC_VERSION >= 403 3377 # define XXH_swap64 __builtin_bswap64 3378 #else 3379 static xxh_u64 XXH_swap64(xxh_u64 x) 3380 { 3381 return ((x << 56) & 0xff00000000000000ULL) | 3382 ((x << 40) & 0x00ff000000000000ULL) | 3383 ((x << 24) & 0x0000ff0000000000ULL) | 3384 ((x << 8) & 0x000000ff00000000ULL) | 3385 ((x >> 8) & 0x00000000ff000000ULL) | 3386 ((x >> 24) & 0x0000000000ff0000ULL) | 3387 ((x >> 40) & 0x000000000000ff00ULL) | 3388 ((x >> 56) & 0x00000000000000ffULL); 3389 } 3390 #endif 3391 3392 3393 /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */ 3394 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) 3395 3396 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr) 3397 { 3398 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; 3399 return bytePtr[0] 3400 | ((xxh_u64)bytePtr[1] << 8) 3401 | ((xxh_u64)bytePtr[2] << 16) 3402 | ((xxh_u64)bytePtr[3] << 24) 3403 | ((xxh_u64)bytePtr[4] << 32) 3404 | ((xxh_u64)bytePtr[5] << 40) 3405 | ((xxh_u64)bytePtr[6] << 48) 3406 | ((xxh_u64)bytePtr[7] << 56); 3407 } 3408 3409 XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr) 3410 { 3411 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; 3412 return bytePtr[7] 3413 | ((xxh_u64)bytePtr[6] << 8) 3414 | ((xxh_u64)bytePtr[5] << 16) 3415 | ((xxh_u64)bytePtr[4] << 24) 3416 | ((xxh_u64)bytePtr[3] << 32) 3417 | ((xxh_u64)bytePtr[2] << 40) 3418 | ((xxh_u64)bytePtr[1] << 48) 3419 | ((xxh_u64)bytePtr[0] << 56); 3420 } 3421 3422 #else 3423 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr) 3424 { 3425 return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); 3426 } 3427 3428 static xxh_u64 XXH_readBE64(const void* ptr) 3429 { 3430 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); 3431 } 3432 #endif 3433 3434 XXH_FORCE_INLINE xxh_u64 3435 XXH_readLE64_align(const void* ptr, XXH_alignment align) 3436 { 3437 if (align==XXH_unaligned) 3438 return XXH_readLE64(ptr); 3439 else 3440 return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr); 3441 } 3442 3443 3444 /******* xxh64 *******/ 3445 /*! 3446 * @} 3447 * @defgroup XXH64_impl XXH64 implementation 3448 * @ingroup impl 3449 * 3450 * Details on the XXH64 implementation. 3451 * @{ 3452 */ 3453 /* #define rather that static const, to be used as initializers */ 3454 #define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */ 3455 #define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */ 3456 #define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */ 3457 #define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */ 3458 #define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */ 3459 3460 #ifdef XXH_OLD_NAMES 3461 # define PRIME64_1 XXH_PRIME64_1 3462 # define PRIME64_2 XXH_PRIME64_2 3463 # define PRIME64_3 XXH_PRIME64_3 3464 # define PRIME64_4 XXH_PRIME64_4 3465 # define PRIME64_5 XXH_PRIME64_5 3466 #endif 3467 3468 /*! @copydoc XXH32_round */ 3469 static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) 3470 { 3471 acc += input * XXH_PRIME64_2; 3472 acc = XXH_rotl64(acc, 31); 3473 acc *= XXH_PRIME64_1; 3474 #if (defined(__AVX512F__)) && !defined(XXH_ENABLE_AUTOVECTORIZE) 3475 /* 3476 * DISABLE AUTOVECTORIZATION: 3477 * A compiler fence is used to prevent GCC and Clang from 3478 * autovectorizing the XXH64 loop (pragmas and attributes don't work for some 3479 * reason) without globally disabling AVX512. 3480 * 3481 * Autovectorization of XXH64 tends to be detrimental, 3482 * though the exact outcome may change depending on exact cpu and compiler version. 3483 * For information, it has been reported as detrimental for Skylake-X, 3484 * but possibly beneficial for Zen4. 3485 * 3486 * The default is to disable auto-vectorization, 3487 * but you can select to enable it instead using `XXH_ENABLE_AUTOVECTORIZE` build variable. 3488 */ 3489 XXH_COMPILER_GUARD(acc); 3490 #endif 3491 return acc; 3492 } 3493 3494 static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) 3495 { 3496 val = XXH64_round(0, val); 3497 acc ^= val; 3498 acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4; 3499 return acc; 3500 } 3501 3502 /*! @copydoc XXH32_avalanche */ 3503 static xxh_u64 XXH64_avalanche(xxh_u64 hash) 3504 { 3505 hash ^= hash >> 33; 3506 hash *= XXH_PRIME64_2; 3507 hash ^= hash >> 29; 3508 hash *= XXH_PRIME64_3; 3509 hash ^= hash >> 32; 3510 return hash; 3511 } 3512 3513 3514 #define XXH_get64bits(p) XXH_readLE64_align(p, align) 3515 3516 /*! 3517 * @internal 3518 * @brief Sets up the initial accumulator state for XXH64(). 3519 */ 3520 XXH_FORCE_INLINE void 3521 XXH64_initAccs(xxh_u64 *acc, xxh_u64 seed) 3522 { 3523 XXH_ASSERT(acc != NULL); 3524 acc[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2; 3525 acc[1] = seed + XXH_PRIME64_2; 3526 acc[2] = seed + 0; 3527 acc[3] = seed - XXH_PRIME64_1; 3528 } 3529 3530 /*! 3531 * @internal 3532 * @brief Consumes a block of data for XXH64(). 3533 * 3534 * @return the end input pointer. 3535 */ 3536 XXH_FORCE_INLINE const xxh_u8 * 3537 XXH64_consumeLong( 3538 xxh_u64 *XXH_RESTRICT acc, 3539 xxh_u8 const *XXH_RESTRICT input, 3540 size_t len, 3541 XXH_alignment align 3542 ) 3543 { 3544 const xxh_u8* const bEnd = input + len; 3545 const xxh_u8* const limit = bEnd - 31; 3546 XXH_ASSERT(acc != NULL); 3547 XXH_ASSERT(input != NULL); 3548 XXH_ASSERT(len >= 32); 3549 do { 3550 /* reroll on 32-bit */ 3551 if (sizeof(void *) < sizeof(xxh_u64)) { 3552 size_t i; 3553 for (i = 0; i < 4; i++) { 3554 acc[i] = XXH64_round(acc[i], XXH_get64bits(input)); 3555 input += 8; 3556 } 3557 } else { 3558 acc[0] = XXH64_round(acc[0], XXH_get64bits(input)); input += 8; 3559 acc[1] = XXH64_round(acc[1], XXH_get64bits(input)); input += 8; 3560 acc[2] = XXH64_round(acc[2], XXH_get64bits(input)); input += 8; 3561 acc[3] = XXH64_round(acc[3], XXH_get64bits(input)); input += 8; 3562 } 3563 } while (input < limit); 3564 3565 return input; 3566 } 3567 3568 /*! 3569 * @internal 3570 * @brief Merges the accumulator lanes together for XXH64() 3571 */ 3572 XXH_FORCE_INLINE XXH_PUREF xxh_u64 3573 XXH64_mergeAccs(const xxh_u64 *acc) 3574 { 3575 XXH_ASSERT(acc != NULL); 3576 { 3577 xxh_u64 h64 = XXH_rotl64(acc[0], 1) + XXH_rotl64(acc[1], 7) 3578 + XXH_rotl64(acc[2], 12) + XXH_rotl64(acc[3], 18); 3579 /* reroll on 32-bit */ 3580 if (sizeof(void *) < sizeof(xxh_u64)) { 3581 size_t i; 3582 for (i = 0; i < 4; i++) { 3583 h64 = XXH64_mergeRound(h64, acc[i]); 3584 } 3585 } else { 3586 h64 = XXH64_mergeRound(h64, acc[0]); 3587 h64 = XXH64_mergeRound(h64, acc[1]); 3588 h64 = XXH64_mergeRound(h64, acc[2]); 3589 h64 = XXH64_mergeRound(h64, acc[3]); 3590 } 3591 return h64; 3592 } 3593 } 3594 3595 /*! 3596 * @internal 3597 * @brief Processes the last 0-31 bytes of @p ptr. 3598 * 3599 * There may be up to 31 bytes remaining to consume from the input. 3600 * This final stage will digest them to ensure that all input bytes are present 3601 * in the final mix. 3602 * 3603 * @param hash The hash to finalize. 3604 * @param ptr The pointer to the remaining input. 3605 * @param len The remaining length, modulo 32. 3606 * @param align Whether @p ptr is aligned. 3607 * @return The finalized hash 3608 * @see XXH32_finalize(). 3609 */ 3610 XXH_STATIC XXH_PUREF xxh_u64 3611 XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) 3612 { 3613 if (ptr==NULL) XXH_ASSERT(len == 0); 3614 len &= 31; 3615 while (len >= 8) { 3616 xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); 3617 ptr += 8; 3618 hash ^= k1; 3619 hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4; 3620 len -= 8; 3621 } 3622 if (len >= 4) { 3623 hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; 3624 ptr += 4; 3625 hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; 3626 len -= 4; 3627 } 3628 while (len > 0) { 3629 hash ^= (*ptr++) * XXH_PRIME64_5; 3630 hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1; 3631 --len; 3632 } 3633 return XXH64_avalanche(hash); 3634 } 3635 3636 #ifdef XXH_OLD_NAMES 3637 # define PROCESS1_64 XXH_PROCESS1_64 3638 # define PROCESS4_64 XXH_PROCESS4_64 3639 # define PROCESS8_64 XXH_PROCESS8_64 3640 #else 3641 # undef XXH_PROCESS1_64 3642 # undef XXH_PROCESS4_64 3643 # undef XXH_PROCESS8_64 3644 #endif 3645 3646 /*! 3647 * @internal 3648 * @brief The implementation for @ref XXH64(). 3649 * 3650 * @param input , len , seed Directly passed from @ref XXH64(). 3651 * @param align Whether @p input is aligned. 3652 * @return The calculated hash. 3653 */ 3654 XXH_FORCE_INLINE XXH_PUREF xxh_u64 3655 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) 3656 { 3657 xxh_u64 h64; 3658 if (input==NULL) XXH_ASSERT(len == 0); 3659 3660 if (len>=32) { /* Process a large block of data */ 3661 xxh_u64 acc[4]; 3662 XXH64_initAccs(acc, seed); 3663 3664 input = XXH64_consumeLong(acc, input, len, align); 3665 3666 h64 = XXH64_mergeAccs(acc); 3667 } else { 3668 h64 = seed + XXH_PRIME64_5; 3669 } 3670 3671 h64 += (xxh_u64) len; 3672 3673 return XXH64_finalize(h64, input, len, align); 3674 } 3675 3676 3677 /*! @ingroup XXH64_family */ 3678 XXH_PUBLIC_API XXH64_hash_t XXH64 (XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) 3679 { 3680 #if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 3681 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ 3682 XXH64_state_t state; 3683 XXH64_reset(&state, seed); 3684 XXH64_update(&state, (const xxh_u8*)input, len); 3685 return XXH64_digest(&state); 3686 #else 3687 if (XXH_FORCE_ALIGN_CHECK) { 3688 if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ 3689 return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); 3690 } } 3691 3692 return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); 3693 3694 #endif 3695 } 3696 3697 /******* Hash Streaming *******/ 3698 #ifndef XXH_NO_STREAM 3699 /*! @ingroup XXH64_family*/ 3700 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) 3701 { 3702 return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); 3703 } 3704 /*! @ingroup XXH64_family */ 3705 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) 3706 { 3707 XXH_free(statePtr); 3708 return XXH_OK; 3709 } 3710 3711 /*! @ingroup XXH64_family */ 3712 XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState) 3713 { 3714 XXH_memcpy(dstState, srcState, sizeof(*dstState)); 3715 } 3716 3717 /*! @ingroup XXH64_family */ 3718 XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed) 3719 { 3720 XXH_ASSERT(statePtr != NULL); 3721 memset(statePtr, 0, sizeof(*statePtr)); 3722 XXH64_initAccs(statePtr->acc, seed); 3723 return XXH_OK; 3724 } 3725 3726 /*! @ingroup XXH64_family */ 3727 XXH_PUBLIC_API XXH_errorcode 3728 XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len) 3729 { 3730 if (input==NULL) { 3731 XXH_ASSERT(len == 0); 3732 return XXH_OK; 3733 } 3734 3735 state->total_len += len; 3736 3737 XXH_ASSERT(state->bufferedSize <= sizeof(state->buffer)); 3738 if (len < sizeof(state->buffer) - state->bufferedSize) { /* fill in tmp buffer */ 3739 XXH_memcpy(state->buffer + state->bufferedSize, input, len); 3740 state->bufferedSize += (XXH32_hash_t)len; 3741 return XXH_OK; 3742 } 3743 3744 { const xxh_u8* xinput = (const xxh_u8*)input; 3745 const xxh_u8* const bEnd = xinput + len; 3746 3747 if (state->bufferedSize) { /* non-empty buffer => complete first */ 3748 XXH_memcpy(state->buffer + state->bufferedSize, xinput, sizeof(state->buffer) - state->bufferedSize); 3749 xinput += sizeof(state->buffer) - state->bufferedSize; 3750 /* and process one round */ 3751 (void)XXH64_consumeLong(state->acc, state->buffer, sizeof(state->buffer), XXH_aligned); 3752 state->bufferedSize = 0; 3753 } 3754 3755 XXH_ASSERT(xinput <= bEnd); 3756 if ((size_t)(bEnd - xinput) >= sizeof(state->buffer)) { 3757 /* Process the remaining data */ 3758 xinput = XXH64_consumeLong(state->acc, xinput, (size_t)(bEnd - xinput), XXH_unaligned); 3759 } 3760 3761 if (xinput < bEnd) { 3762 /* Copy the leftover to the tmp buffer */ 3763 XXH_memcpy(state->buffer, xinput, (size_t)(bEnd-xinput)); 3764 state->bufferedSize = (unsigned)(bEnd-xinput); 3765 } 3766 } 3767 3768 return XXH_OK; 3769 } 3770 3771 3772 /*! @ingroup XXH64_family */ 3773 XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state) 3774 { 3775 xxh_u64 h64; 3776 3777 if (state->total_len >= 32) { 3778 h64 = XXH64_mergeAccs(state->acc); 3779 } else { 3780 h64 = state->acc[2] /*seed*/ + XXH_PRIME64_5; 3781 } 3782 3783 h64 += (xxh_u64) state->total_len; 3784 3785 return XXH64_finalize(h64, state->buffer, (size_t)state->total_len, XXH_aligned); 3786 } 3787 #endif /* !XXH_NO_STREAM */ 3788 3789 /******* Canonical representation *******/ 3790 3791 /*! @ingroup XXH64_family */ 3792 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash) 3793 { 3794 XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); 3795 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); 3796 XXH_memcpy(dst, &hash, sizeof(*dst)); 3797 } 3798 3799 /*! @ingroup XXH64_family */ 3800 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src) 3801 { 3802 return XXH_readBE64(src); 3803 } 3804 3805 #ifndef XXH_NO_XXH3 3806 3807 /* ********************************************************************* 3808 * XXH3 3809 * New generation hash designed for speed on small keys and vectorization 3810 ************************************************************************ */ 3811 /*! 3812 * @} 3813 * @defgroup XXH3_impl XXH3 implementation 3814 * @ingroup impl 3815 * @{ 3816 */ 3817 3818 /* === Compiler specifics === */ 3819 3820 3821 #if (defined(__GNUC__) && (__GNUC__ >= 3)) \ 3822 || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \ 3823 || defined(__clang__) 3824 # define XXH_likely(x) __builtin_expect(x, 1) 3825 # define XXH_unlikely(x) __builtin_expect(x, 0) 3826 #else 3827 # define XXH_likely(x) (x) 3828 # define XXH_unlikely(x) (x) 3829 #endif 3830 3831 #ifndef XXH_HAS_INCLUDE 3832 # ifdef __has_include 3833 /* 3834 * Not defined as XXH_HAS_INCLUDE(x) (function-like) because 3835 * this causes segfaults in Apple Clang 4.2 (on Mac OS X 10.7 Lion) 3836 */ 3837 # define XXH_HAS_INCLUDE __has_include 3838 # else 3839 # define XXH_HAS_INCLUDE(x) 0 3840 # endif 3841 #endif 3842 3843 #if defined(__GNUC__) || defined(__clang__) 3844 # if defined(__ARM_FEATURE_SVE) 3845 # include <arm_sve.h> 3846 # endif 3847 # if defined(__ARM_NEON__) || defined(__ARM_NEON) \ 3848 || (defined(_M_ARM) && _M_ARM >= 7) \ 3849 || defined(_M_ARM64) || defined(_M_ARM64EC) \ 3850 || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* WASM SIMD128 via SIMDe */ 3851 # define inline __inline__ /* circumvent a clang bug */ 3852 # include <arm_neon.h> 3853 # undef inline 3854 # elif defined(__AVX2__) 3855 # include <immintrin.h> 3856 # elif defined(__SSE2__) 3857 # include <emmintrin.h> 3858 # elif defined(__loongarch_sx) 3859 # include <lsxintrin.h> 3860 # endif 3861 #endif 3862 3863 #if defined(_MSC_VER) 3864 # include <intrin.h> 3865 #endif 3866 3867 /* 3868 * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while 3869 * remaining a true 64-bit/128-bit hash function. 3870 * 3871 * This is done by prioritizing a subset of 64-bit operations that can be 3872 * emulated without too many steps on the average 32-bit machine. 3873 * 3874 * For example, these two lines seem similar, and run equally fast on 64-bit: 3875 * 3876 * xxh_u64 x; 3877 * x ^= (x >> 47); // good 3878 * x ^= (x >> 13); // bad 3879 * 3880 * However, to a 32-bit machine, there is a major difference. 3881 * 3882 * x ^= (x >> 47) looks like this: 3883 * 3884 * x.lo ^= (x.hi >> (47 - 32)); 3885 * 3886 * while x ^= (x >> 13) looks like this: 3887 * 3888 * // note: funnel shifts are not usually cheap. 3889 * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13)); 3890 * x.hi ^= (x.hi >> 13); 3891 * 3892 * The first one is significantly faster than the second, simply because the 3893 * shift is larger than 32. This means: 3894 * - All the bits we need are in the upper 32 bits, so we can ignore the lower 3895 * 32 bits in the shift. 3896 * - The shift result will always fit in the lower 32 bits, and therefore, 3897 * we can ignore the upper 32 bits in the xor. 3898 * 3899 * Thanks to this optimization, XXH3 only requires these features to be efficient: 3900 * 3901 * - Usable unaligned access 3902 * - A 32-bit or 64-bit ALU 3903 * - If 32-bit, a decent ADC instruction 3904 * - A 32 or 64-bit multiply with a 64-bit result 3905 * - For the 128-bit variant, a decent byteswap helps short inputs. 3906 * 3907 * The first two are already required by XXH32, and almost all 32-bit and 64-bit 3908 * platforms which can run XXH32 can run XXH3 efficiently. 3909 * 3910 * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one 3911 * notable exception. 3912 * 3913 * First of all, Thumb-1 lacks support for the UMULL instruction which 3914 * performs the important long multiply. This means numerous __aeabi_lmul 3915 * calls. 3916 * 3917 * Second of all, the 8 functional registers are just not enough. 3918 * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need 3919 * Lo registers, and this shuffling results in thousands more MOVs than A32. 3920 * 3921 * A32 and T32 don't have this limitation. They can access all 14 registers, 3922 * do a 32->64 multiply with UMULL, and the flexible operand allowing free 3923 * shifts is helpful, too. 3924 * 3925 * Therefore, we do a quick sanity check. 3926 * 3927 * If compiling Thumb-1 for a target which supports ARM instructions, we will 3928 * emit a warning, as it is not a "sane" platform to compile for. 3929 * 3930 * Usually, if this happens, it is because of an accident and you probably need 3931 * to specify -march, as you likely meant to compile for a newer architecture. 3932 * 3933 * Credit: large sections of the vectorial and asm source code paths 3934 * have been contributed by @easyaspi314 3935 */ 3936 #if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM) 3937 # warning "XXH3 is highly inefficient without ARM or Thumb-2." 3938 #endif 3939 3940 /* ========================================== 3941 * Vectorization detection 3942 * ========================================== */ 3943 3944 #ifdef XXH_DOXYGEN 3945 /*! 3946 * @ingroup tuning 3947 * @brief Overrides the vectorization implementation chosen for XXH3. 3948 * 3949 * Can be defined to 0 to disable SIMD or any of the values mentioned in 3950 * @ref XXH_VECTOR_TYPE. 3951 * 3952 * If this is not defined, it uses predefined macros to determine the best 3953 * implementation. 3954 */ 3955 # define XXH_VECTOR XXH_SCALAR 3956 /*! 3957 * @ingroup tuning 3958 * @brief Selects the minimum alignment for XXH3's accumulators. 3959 * 3960 * When using SIMD, this should match the alignment required for said vector 3961 * type, so, for example, 32 for AVX2. 3962 * 3963 * Default: Auto detected. 3964 */ 3965 # define XXH_ACC_ALIGN 8 3966 #endif 3967 3968 /* Actual definition */ 3969 #ifndef XXH_DOXYGEN 3970 #endif 3971 3972 #ifndef XXH_VECTOR /* can be defined on command line */ 3973 # if defined(__ARM_FEATURE_SVE) 3974 # define XXH_VECTOR XXH_SVE 3975 # elif ( \ 3976 defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \ 3977 || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \ 3978 || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* wasm simd128 via SIMDe */ \ 3979 ) && ( \ 3980 defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \ 3981 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ 3982 ) 3983 # define XXH_VECTOR XXH_NEON 3984 # elif defined(__AVX512F__) 3985 # define XXH_VECTOR XXH_AVX512 3986 # elif defined(__AVX2__) 3987 # define XXH_VECTOR XXH_AVX2 3988 # elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) 3989 # define XXH_VECTOR XXH_SSE2 3990 # elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \ 3991 || (defined(__s390x__) && defined(__VEC__)) \ 3992 && defined(__GNUC__) /* TODO: IBM XL */ 3993 # define XXH_VECTOR XXH_VSX 3994 # elif defined(__loongarch_sx) 3995 # define XXH_VECTOR XXH_LSX 3996 # else 3997 # define XXH_VECTOR XXH_SCALAR 3998 # endif 3999 #endif 4000 4001 /* __ARM_FEATURE_SVE is only supported by GCC & Clang. */ 4002 #if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE) 4003 # ifdef _MSC_VER 4004 # pragma warning(once : 4606) 4005 # else 4006 # warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead." 4007 # endif 4008 # undef XXH_VECTOR 4009 # define XXH_VECTOR XXH_SCALAR 4010 #endif 4011 4012 /* 4013 * Controls the alignment of the accumulator, 4014 * for compatibility with aligned vector loads, which are usually faster. 4015 */ 4016 #ifndef XXH_ACC_ALIGN 4017 # if defined(XXH_X86DISPATCH) 4018 # define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */ 4019 # elif XXH_VECTOR == XXH_SCALAR /* scalar */ 4020 # define XXH_ACC_ALIGN 8 4021 # elif XXH_VECTOR == XXH_SSE2 /* sse2 */ 4022 # define XXH_ACC_ALIGN 16 4023 # elif XXH_VECTOR == XXH_AVX2 /* avx2 */ 4024 # define XXH_ACC_ALIGN 32 4025 # elif XXH_VECTOR == XXH_NEON /* neon */ 4026 # define XXH_ACC_ALIGN 16 4027 # elif XXH_VECTOR == XXH_VSX /* vsx */ 4028 # define XXH_ACC_ALIGN 16 4029 # elif XXH_VECTOR == XXH_AVX512 /* avx512 */ 4030 # define XXH_ACC_ALIGN 64 4031 # elif XXH_VECTOR == XXH_SVE /* sve */ 4032 # define XXH_ACC_ALIGN 64 4033 # elif XXH_VECTOR == XXH_LSX /* lsx */ 4034 # define XXH_ACC_ALIGN 64 4035 # endif 4036 #endif 4037 4038 #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \ 4039 || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512 4040 # define XXH_SEC_ALIGN XXH_ACC_ALIGN 4041 #elif XXH_VECTOR == XXH_SVE 4042 # define XXH_SEC_ALIGN XXH_ACC_ALIGN 4043 #else 4044 # define XXH_SEC_ALIGN 8 4045 #endif 4046 4047 #if defined(__GNUC__) || defined(__clang__) 4048 # define XXH_ALIASING __attribute__((__may_alias__)) 4049 #else 4050 # define XXH_ALIASING /* nothing */ 4051 #endif 4052 4053 /* 4054 * UGLY HACK: 4055 * GCC usually generates the best code with -O3 for xxHash. 4056 * 4057 * However, when targeting AVX2, it is overzealous in its unrolling resulting 4058 * in code roughly 3/4 the speed of Clang. 4059 * 4060 * There are other issues, such as GCC splitting _mm256_loadu_si256 into 4061 * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which 4062 * only applies to Sandy and Ivy Bridge... which don't even support AVX2. 4063 * 4064 * That is why when compiling the AVX2 version, it is recommended to use either 4065 * -O2 -mavx2 -march=haswell 4066 * or 4067 * -O2 -mavx2 -mno-avx256-split-unaligned-load 4068 * for decent performance, or to use Clang instead. 4069 * 4070 * Fortunately, we can control the first one with a pragma that forces GCC into 4071 * -O2, but the other one we can't control without "failed to inline always 4072 * inline function due to target mismatch" warnings. 4073 */ 4074 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ 4075 && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ 4076 && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */ 4077 # pragma GCC push_options 4078 # pragma GCC optimize("-O2") 4079 #endif 4080 4081 #if XXH_VECTOR == XXH_NEON 4082 4083 /* 4084 * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3 4085 * optimizes out the entire hashLong loop because of the aliasing violation. 4086 * 4087 * However, GCC is also inefficient at load-store optimization with vld1q/vst1q, 4088 * so the only option is to mark it as aliasing. 4089 */ 4090 typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING; 4091 4092 /*! 4093 * @internal 4094 * @brief `vld1q_u64` but faster and alignment-safe. 4095 * 4096 * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only 4097 * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86). 4098 * 4099 * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it 4100 * prohibits load-store optimizations. Therefore, a direct dereference is used. 4101 * 4102 * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe 4103 * unaligned load. 4104 */ 4105 #if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) 4106 XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */ 4107 { 4108 return *(xxh_aliasing_uint64x2_t const *)ptr; 4109 } 4110 #else 4111 XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) 4112 { 4113 return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr)); 4114 } 4115 #endif 4116 4117 /*! 4118 * @internal 4119 * @brief `vmlal_u32` on low and high halves of a vector. 4120 * 4121 * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with 4122 * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32` 4123 * with `vmlal_u32`. 4124 */ 4125 #if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11 4126 XXH_FORCE_INLINE uint64x2_t 4127 XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) 4128 { 4129 /* Inline assembly is the only way */ 4130 __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs)); 4131 return acc; 4132 } 4133 XXH_FORCE_INLINE uint64x2_t 4134 XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) 4135 { 4136 /* This intrinsic works as expected */ 4137 return vmlal_high_u32(acc, lhs, rhs); 4138 } 4139 #else 4140 /* Portable intrinsic versions */ 4141 XXH_FORCE_INLINE uint64x2_t 4142 XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) 4143 { 4144 return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs)); 4145 } 4146 /*! @copydoc XXH_vmlal_low_u32 4147 * Assume the compiler converts this to vmlal_high_u32 on aarch64 */ 4148 XXH_FORCE_INLINE uint64x2_t 4149 XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) 4150 { 4151 return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs)); 4152 } 4153 #endif 4154 4155 /*! 4156 * @ingroup tuning 4157 * @brief Controls the NEON to scalar ratio for XXH3 4158 * 4159 * This can be set to 2, 4, 6, or 8. 4160 * 4161 * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used. 4162 * 4163 * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those 4164 * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU 4165 * bandwidth. 4166 * 4167 * This is even more noticeable on the more advanced cores like the Cortex-A76 which 4168 * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once. 4169 * 4170 * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes 4171 * and 2 scalar lanes, which is chosen by default. 4172 * 4173 * This does not apply to Apple processors or 32-bit processors, which run better with 4174 * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes. 4175 * 4176 * This change benefits CPUs with large micro-op buffers without negatively affecting 4177 * most other CPUs: 4178 * 4179 * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. | 4180 * |:----------------------|:--------------------|----------:|-----------:|------:| 4181 * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% | 4182 * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% | 4183 * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% | 4184 * | Apple M1 | 4 NEON/8 micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% | 4185 * 4186 * It also seems to fix some bad codegen on GCC, making it almost as fast as clang. 4187 * 4188 * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning 4189 * it effectively becomes worse 4. 4190 * 4191 * @see XXH3_accumulate_512_neon() 4192 */ 4193 # ifndef XXH3_NEON_LANES 4194 # if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \ 4195 && !defined(__APPLE__) && XXH_SIZE_OPT <= 0 4196 # define XXH3_NEON_LANES 6 4197 # else 4198 # define XXH3_NEON_LANES XXH_ACC_NB 4199 # endif 4200 # endif 4201 #endif /* XXH_VECTOR == XXH_NEON */ 4202 4203 /* 4204 * VSX and Z Vector helpers. 4205 * 4206 * This is very messy, and any pull requests to clean this up are welcome. 4207 * 4208 * There are a lot of problems with supporting VSX and s390x, due to 4209 * inconsistent intrinsics, spotty coverage, and multiple endiannesses. 4210 */ 4211 #if XXH_VECTOR == XXH_VSX 4212 /* Annoyingly, these headers _may_ define three macros: `bool`, `vector`, 4213 * and `pixel`. This is a problem for obvious reasons. 4214 * 4215 * These keywords are unnecessary; the spec literally says they are 4216 * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd 4217 * after including the header. 4218 * 4219 * We use pragma push_macro/pop_macro to keep the namespace clean. */ 4220 # pragma push_macro("bool") 4221 # pragma push_macro("vector") 4222 # pragma push_macro("pixel") 4223 /* silence potential macro redefined warnings */ 4224 # undef bool 4225 # undef vector 4226 # undef pixel 4227 4228 # if defined(__s390x__) 4229 # include <s390intrin.h> 4230 # else 4231 # include <altivec.h> 4232 # endif 4233 4234 /* Restore the original macro values, if applicable. */ 4235 # pragma pop_macro("pixel") 4236 # pragma pop_macro("vector") 4237 # pragma pop_macro("bool") 4238 4239 typedef __vector unsigned long long xxh_u64x2; 4240 typedef __vector unsigned char xxh_u8x16; 4241 typedef __vector unsigned xxh_u32x4; 4242 4243 /* 4244 * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue. 4245 */ 4246 typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING; 4247 4248 # ifndef XXH_VSX_BE 4249 # if defined(__BIG_ENDIAN__) \ 4250 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) 4251 # define XXH_VSX_BE 1 4252 # elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__ 4253 # warning "-maltivec=be is not recommended. Please use native endianness." 4254 # define XXH_VSX_BE 1 4255 # else 4256 # define XXH_VSX_BE 0 4257 # endif 4258 # endif /* !defined(XXH_VSX_BE) */ 4259 4260 # if XXH_VSX_BE 4261 # if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__)) 4262 # define XXH_vec_revb vec_revb 4263 # else 4264 /*! 4265 * A polyfill for POWER9's vec_revb(). 4266 */ 4267 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) 4268 { 4269 xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 4270 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 }; 4271 return vec_perm(val, val, vByteSwap); 4272 } 4273 # endif 4274 # endif /* XXH_VSX_BE */ 4275 4276 /*! 4277 * Performs an unaligned vector load and byte swaps it on big endian. 4278 */ 4279 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) 4280 { 4281 xxh_u64x2 ret; 4282 XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2)); 4283 # if XXH_VSX_BE 4284 ret = XXH_vec_revb(ret); 4285 # endif 4286 return ret; 4287 } 4288 4289 /* 4290 * vec_mulo and vec_mule are very problematic intrinsics on PowerPC 4291 * 4292 * These intrinsics weren't added until GCC 8, despite existing for a while, 4293 * and they are endian dependent. Also, their meaning swap depending on version. 4294 * */ 4295 # if defined(__s390x__) 4296 /* s390x is always big endian, no issue on this platform */ 4297 # define XXH_vec_mulo vec_mulo 4298 # define XXH_vec_mule vec_mule 4299 # elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__) 4300 /* Clang has a better way to control this, we can just use the builtin which doesn't swap. */ 4301 /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */ 4302 # define XXH_vec_mulo __builtin_altivec_vmulouw 4303 # define XXH_vec_mule __builtin_altivec_vmuleuw 4304 # else 4305 /* gcc needs inline assembly */ 4306 /* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */ 4307 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b) 4308 { 4309 xxh_u64x2 result; 4310 __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); 4311 return result; 4312 } 4313 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) 4314 { 4315 xxh_u64x2 result; 4316 __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); 4317 return result; 4318 } 4319 # endif /* XXH_vec_mulo, XXH_vec_mule */ 4320 #endif /* XXH_VECTOR == XXH_VSX */ 4321 4322 #if XXH_VECTOR == XXH_SVE 4323 #define ACCRND(acc, offset) \ 4324 do { \ 4325 svuint64_t input_vec = svld1_u64(mask, xinput + offset); \ 4326 svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \ 4327 svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \ 4328 svuint64_t swapped = svtbl_u64(input_vec, kSwap); \ 4329 svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \ 4330 svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \ 4331 svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \ 4332 acc = svadd_u64_x(mask, acc, mul); \ 4333 } while (0) 4334 #endif /* XXH_VECTOR == XXH_SVE */ 4335 4336 /* prefetch 4337 * can be disabled, by declaring XXH_NO_PREFETCH build macro */ 4338 #if defined(XXH_NO_PREFETCH) 4339 # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ 4340 #else 4341 # if XXH_SIZE_OPT >= 1 4342 # define XXH_PREFETCH(ptr) (void)(ptr) 4343 # elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */ 4344 # include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ 4345 # define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) 4346 # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) 4347 # define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) 4348 # else 4349 # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ 4350 # endif 4351 #endif /* XXH_NO_PREFETCH */ 4352 4353 4354 /* ========================================== 4355 * XXH3 default settings 4356 * ========================================== */ 4357 4358 #define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */ 4359 4360 #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN) 4361 # error "default keyset is not large enough" 4362 #endif 4363 4364 /*! Pseudorandom secret taken directly from FARSH. */ 4365 XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { 4366 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c, 4367 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, 4368 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, 4369 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c, 4370 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, 4371 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, 4372 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d, 4373 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, 4374 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, 4375 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e, 4376 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, 4377 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, 4378 }; 4379 4380 static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */ 4381 static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */ 4382 4383 #ifdef XXH_OLD_NAMES 4384 # define kSecret XXH3_kSecret 4385 #endif 4386 4387 #ifdef XXH_DOXYGEN 4388 /*! 4389 * @brief Calculates a 32-bit to 64-bit long multiply. 4390 * 4391 * Implemented as a macro. 4392 * 4393 * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't 4394 * need to (but it shouldn't need to anyways, it is about 7 instructions to do 4395 * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we 4396 * use that instead of the normal method. 4397 * 4398 * If you are compiling for platforms like Thumb-1 and don't have a better option, 4399 * you may also want to write your own long multiply routine here. 4400 * 4401 * @param x, y Numbers to be multiplied 4402 * @return 64-bit product of the low 32 bits of @p x and @p y. 4403 */ 4404 XXH_FORCE_INLINE xxh_u64 4405 XXH_mult32to64(xxh_u64 x, xxh_u64 y) 4406 { 4407 return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF); 4408 } 4409 #elif defined(_MSC_VER) && defined(_M_IX86) 4410 # define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y)) 4411 #else 4412 /* 4413 * Downcast + upcast is usually better than masking on older compilers like 4414 * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers. 4415 * 4416 * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands 4417 * and perform a full 64x64 multiply -- entirely redundant on 32-bit. 4418 */ 4419 # define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y)) 4420 #endif 4421 4422 /*! 4423 * @brief Calculates a 64->128-bit long multiply. 4424 * 4425 * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar 4426 * version. 4427 * 4428 * @param lhs , rhs The 64-bit integers to be multiplied 4429 * @return The 128-bit result represented in an @ref XXH128_hash_t. 4430 */ 4431 static XXH128_hash_t 4432 XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) 4433 { 4434 /* 4435 * GCC/Clang __uint128_t method. 4436 * 4437 * On most 64-bit targets, GCC and Clang define a __uint128_t type. 4438 * This is usually the best way as it usually uses a native long 64-bit 4439 * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64. 4440 * 4441 * Usually. 4442 * 4443 * Despite being a 32-bit platform, Clang (and emscripten) define this type 4444 * despite not having the arithmetic for it. This results in a laggy 4445 * compiler builtin call which calculates a full 128-bit multiply. 4446 * In that case it is best to use the portable one. 4447 * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677 4448 */ 4449 #if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \ 4450 && defined(__SIZEOF_INT128__) \ 4451 || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) 4452 4453 __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs; 4454 XXH128_hash_t r128; 4455 r128.low64 = (xxh_u64)(product); 4456 r128.high64 = (xxh_u64)(product >> 64); 4457 return r128; 4458 4459 /* 4460 * MSVC for x64's _umul128 method. 4461 * 4462 * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct); 4463 * 4464 * This compiles to single operand MUL on x64. 4465 */ 4466 #elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC) 4467 4468 #ifndef _MSC_VER 4469 # pragma intrinsic(_umul128) 4470 #endif 4471 xxh_u64 product_high; 4472 xxh_u64 const product_low = _umul128(lhs, rhs, &product_high); 4473 XXH128_hash_t r128; 4474 r128.low64 = product_low; 4475 r128.high64 = product_high; 4476 return r128; 4477 4478 /* 4479 * MSVC for ARM64's __umulh method. 4480 * 4481 * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method. 4482 */ 4483 #elif defined(_M_ARM64) || defined(_M_ARM64EC) 4484 4485 #ifndef _MSC_VER 4486 # pragma intrinsic(__umulh) 4487 #endif 4488 XXH128_hash_t r128; 4489 r128.low64 = lhs * rhs; 4490 r128.high64 = __umulh(lhs, rhs); 4491 return r128; 4492 4493 #else 4494 /* 4495 * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. 4496 * 4497 * This is a fast and simple grade school multiply, which is shown below 4498 * with base 10 arithmetic instead of base 0x100000000. 4499 * 4500 * 9 3 // D2 lhs = 93 4501 * x 7 5 // D2 rhs = 75 4502 * ---------- 4503 * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15 4504 * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45 4505 * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21 4506 * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63 4507 * --------- 4508 * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27 4509 * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67 4510 * --------- 4511 * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975 4512 * 4513 * The reasons for adding the products like this are: 4514 * 1. It avoids manual carry tracking. Just like how 4515 * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX. 4516 * This avoids a lot of complexity. 4517 * 4518 * 2. It hints for, and on Clang, compiles to, the powerful UMAAL 4519 * instruction available in ARM's Digital Signal Processing extension 4520 * in 32-bit ARMv6 and later, which is shown below: 4521 * 4522 * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm) 4523 * { 4524 * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm; 4525 * *RdLo = (xxh_u32)(product & 0xFFFFFFFF); 4526 * *RdHi = (xxh_u32)(product >> 32); 4527 * } 4528 * 4529 * This instruction was designed for efficient long multiplication, and 4530 * allows this to be calculated in only 4 instructions at speeds 4531 * comparable to some 64-bit ALUs. 4532 * 4533 * 3. It isn't terrible on other platforms. Usually this will be a couple 4534 * of 32-bit ADD/ADCs. 4535 */ 4536 4537 /* First calculate all of the cross products. */ 4538 xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF); 4539 xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF); 4540 xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32); 4541 xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32); 4542 4543 /* Now add the products together. These will never overflow. */ 4544 xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; 4545 xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; 4546 xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF); 4547 4548 XXH128_hash_t r128; 4549 r128.low64 = lower; 4550 r128.high64 = upper; 4551 return r128; 4552 #endif 4553 } 4554 4555 /*! 4556 * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it. 4557 * 4558 * The reason for the separate function is to prevent passing too many structs 4559 * around by value. This will hopefully inline the multiply, but we don't force it. 4560 * 4561 * @param lhs , rhs The 64-bit integers to multiply 4562 * @return The low 64 bits of the product XOR'd by the high 64 bits. 4563 * @see XXH_mult64to128() 4564 */ 4565 static xxh_u64 4566 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) 4567 { 4568 XXH128_hash_t product = XXH_mult64to128(lhs, rhs); 4569 return product.low64 ^ product.high64; 4570 } 4571 4572 /*! Seems to produce slightly better code on GCC for some reason. */ 4573 XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) 4574 { 4575 XXH_ASSERT(0 <= shift && shift < 64); 4576 return v64 ^ (v64 >> shift); 4577 } 4578 4579 /* 4580 * This is a fast avalanche stage, 4581 * suitable when input bits are already partially mixed 4582 */ 4583 static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) 4584 { 4585 h64 = XXH_xorshift64(h64, 37); 4586 h64 *= PRIME_MX1; 4587 h64 = XXH_xorshift64(h64, 32); 4588 return h64; 4589 } 4590 4591 /* 4592 * This is a stronger avalanche, 4593 * inspired by Pelle Evensen's rrmxmx 4594 * preferable when input has not been previously mixed 4595 */ 4596 static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) 4597 { 4598 /* this mix is inspired by Pelle Evensen's rrmxmx */ 4599 h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24); 4600 h64 *= PRIME_MX2; 4601 h64 ^= (h64 >> 35) + len ; 4602 h64 *= PRIME_MX2; 4603 return XXH_xorshift64(h64, 28); 4604 } 4605 4606 4607 /* ========================================== 4608 * Short keys 4609 * ========================================== 4610 * One of the shortcomings of XXH32 and XXH64 was that their performance was 4611 * sub-optimal on short lengths. It used an iterative algorithm which strongly 4612 * favored lengths that were a multiple of 4 or 8. 4613 * 4614 * Instead of iterating over individual inputs, we use a set of single shot 4615 * functions which piece together a range of lengths and operate in constant time. 4616 * 4617 * Additionally, the number of multiplies has been significantly reduced. This 4618 * reduces latency, especially when emulating 64-bit multiplies on 32-bit. 4619 * 4620 * Depending on the platform, this may or may not be faster than XXH32, but it 4621 * is almost guaranteed to be faster than XXH64. 4622 */ 4623 4624 /* 4625 * At very short lengths, there isn't enough input to fully hide secrets, or use 4626 * the entire secret. 4627 * 4628 * There is also only a limited amount of mixing we can do before significantly 4629 * impacting performance. 4630 * 4631 * Therefore, we use different sections of the secret and always mix two secret 4632 * samples with an XOR. This should have no effect on performance on the 4633 * seedless or withSeed variants because everything _should_ be constant folded 4634 * by modern compilers. 4635 * 4636 * The XOR mixing hides individual parts of the secret and increases entropy. 4637 * 4638 * This adds an extra layer of strength for custom secrets. 4639 */ 4640 XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t 4641 XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 4642 { 4643 XXH_ASSERT(input != NULL); 4644 XXH_ASSERT(1 <= len && len <= 3); 4645 XXH_ASSERT(secret != NULL); 4646 /* 4647 * len = 1: combined = { input[0], 0x01, input[0], input[0] } 4648 * len = 2: combined = { input[1], 0x02, input[0], input[1] } 4649 * len = 3: combined = { input[2], 0x03, input[0], input[1] } 4650 */ 4651 { xxh_u8 const c1 = input[0]; 4652 xxh_u8 const c2 = input[len >> 1]; 4653 xxh_u8 const c3 = input[len - 1]; 4654 xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) 4655 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); 4656 xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; 4657 xxh_u64 const keyed = (xxh_u64)combined ^ bitflip; 4658 return XXH64_avalanche(keyed); 4659 } 4660 } 4661 4662 XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t 4663 XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 4664 { 4665 XXH_ASSERT(input != NULL); 4666 XXH_ASSERT(secret != NULL); 4667 XXH_ASSERT(4 <= len && len <= 8); 4668 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; 4669 { xxh_u32 const input1 = XXH_readLE32(input); 4670 xxh_u32 const input2 = XXH_readLE32(input + len - 4); 4671 xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed; 4672 xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32); 4673 xxh_u64 const keyed = input64 ^ bitflip; 4674 return XXH3_rrmxmx(keyed, len); 4675 } 4676 } 4677 4678 XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t 4679 XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 4680 { 4681 XXH_ASSERT(input != NULL); 4682 XXH_ASSERT(secret != NULL); 4683 XXH_ASSERT(9 <= len && len <= 16); 4684 { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed; 4685 xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed; 4686 xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1; 4687 xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2; 4688 xxh_u64 const acc = len 4689 + XXH_swap64(input_lo) + input_hi 4690 + XXH3_mul128_fold64(input_lo, input_hi); 4691 return XXH3_avalanche(acc); 4692 } 4693 } 4694 4695 XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t 4696 XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 4697 { 4698 XXH_ASSERT(len <= 16); 4699 { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed); 4700 if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed); 4701 if (len) return XXH3_len_1to3_64b(input, len, secret, seed); 4702 return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64))); 4703 } 4704 } 4705 4706 /* 4707 * DISCLAIMER: There are known *seed-dependent* multicollisions here due to 4708 * multiplication by zero, affecting hashes of lengths 17 to 240. 4709 * 4710 * However, they are very unlikely. 4711 * 4712 * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all 4713 * unseeded non-cryptographic hashes, it does not attempt to defend itself 4714 * against specially crafted inputs, only random inputs. 4715 * 4716 * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes 4717 * cancelling out the secret is taken an arbitrary number of times (addressed 4718 * in XXH3_accumulate_512), this collision is very unlikely with random inputs 4719 * and/or proper seeding: 4720 * 4721 * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a 4722 * function that is only called up to 16 times per hash with up to 240 bytes of 4723 * input. 4724 * 4725 * This is not too bad for a non-cryptographic hash function, especially with 4726 * only 64 bit outputs. 4727 * 4728 * The 128-bit variant (which trades some speed for strength) is NOT affected 4729 * by this, although it is always a good idea to use a proper seed if you care 4730 * about strength. 4731 */ 4732 XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input, 4733 const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64) 4734 { 4735 #if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ 4736 && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \ 4737 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */ 4738 /* 4739 * UGLY HACK: 4740 * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in 4741 * slower code. 4742 * 4743 * By forcing seed64 into a register, we disrupt the cost model and 4744 * cause it to scalarize. See `XXH32_round()` 4745 * 4746 * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600, 4747 * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on 4748 * GCC 9.2, despite both emitting scalar code. 4749 * 4750 * GCC generates much better scalar code than Clang for the rest of XXH3, 4751 * which is why finding a more optimal codepath is an interest. 4752 */ 4753 XXH_COMPILER_GUARD(seed64); 4754 #endif 4755 { xxh_u64 const input_lo = XXH_readLE64(input); 4756 xxh_u64 const input_hi = XXH_readLE64(input+8); 4757 return XXH3_mul128_fold64( 4758 input_lo ^ (XXH_readLE64(secret) + seed64), 4759 input_hi ^ (XXH_readLE64(secret+8) - seed64) 4760 ); 4761 } 4762 } 4763 4764 /* For mid range keys, XXH3 uses a Mum-hash variant. */ 4765 XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t 4766 XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, 4767 const xxh_u8* XXH_RESTRICT secret, size_t secretSize, 4768 XXH64_hash_t seed) 4769 { 4770 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; 4771 XXH_ASSERT(16 < len && len <= 128); 4772 4773 { xxh_u64 acc = len * XXH_PRIME64_1; 4774 #if XXH_SIZE_OPT >= 1 4775 /* Smaller and cleaner, but slightly slower. */ 4776 unsigned int i = (unsigned int)(len - 1) / 32; 4777 do { 4778 acc += XXH3_mix16B(input+16 * i, secret+32*i, seed); 4779 acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed); 4780 } while (i-- != 0); 4781 #else 4782 if (len > 32) { 4783 if (len > 64) { 4784 if (len > 96) { 4785 acc += XXH3_mix16B(input+48, secret+96, seed); 4786 acc += XXH3_mix16B(input+len-64, secret+112, seed); 4787 } 4788 acc += XXH3_mix16B(input+32, secret+64, seed); 4789 acc += XXH3_mix16B(input+len-48, secret+80, seed); 4790 } 4791 acc += XXH3_mix16B(input+16, secret+32, seed); 4792 acc += XXH3_mix16B(input+len-32, secret+48, seed); 4793 } 4794 acc += XXH3_mix16B(input+0, secret+0, seed); 4795 acc += XXH3_mix16B(input+len-16, secret+16, seed); 4796 #endif 4797 return XXH3_avalanche(acc); 4798 } 4799 } 4800 4801 XXH_NO_INLINE XXH_PUREF XXH64_hash_t 4802 XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, 4803 const xxh_u8* XXH_RESTRICT secret, size_t secretSize, 4804 XXH64_hash_t seed) 4805 { 4806 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; 4807 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); 4808 4809 #define XXH3_MIDSIZE_STARTOFFSET 3 4810 #define XXH3_MIDSIZE_LASTOFFSET 17 4811 4812 { xxh_u64 acc = len * XXH_PRIME64_1; 4813 xxh_u64 acc_end; 4814 unsigned int const nbRounds = (unsigned int)len / 16; 4815 unsigned int i; 4816 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); 4817 for (i=0; i<8; i++) { 4818 acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed); 4819 } 4820 /* last bytes */ 4821 acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); 4822 XXH_ASSERT(nbRounds >= 8); 4823 acc = XXH3_avalanche(acc); 4824 #if defined(__clang__) /* Clang */ \ 4825 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ 4826 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ 4827 /* 4828 * UGLY HACK: 4829 * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86. 4830 * In everywhere else, it uses scalar code. 4831 * 4832 * For 64->128-bit multiplies, even if the NEON was 100% optimal, it 4833 * would still be slower than UMAAL (see XXH_mult64to128). 4834 * 4835 * Unfortunately, Clang doesn't handle the long multiplies properly and 4836 * converts them to the nonexistent "vmulq_u64" intrinsic, which is then 4837 * scalarized into an ugly mess of VMOV.32 instructions. 4838 * 4839 * This mess is difficult to avoid without turning autovectorization 4840 * off completely, but they are usually relatively minor and/or not 4841 * worth it to fix. 4842 * 4843 * This loop is the easiest to fix, as unlike XXH32, this pragma 4844 * _actually works_ because it is a loop vectorization instead of an 4845 * SLP vectorization. 4846 */ 4847 #pragma clang loop vectorize(disable) 4848 #endif 4849 for (i=8 ; i < nbRounds; i++) { 4850 /* 4851 * Prevents clang for unrolling the acc loop and interleaving with this one. 4852 */ 4853 XXH_COMPILER_GUARD(acc); 4854 acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed); 4855 } 4856 return XXH3_avalanche(acc + acc_end); 4857 } 4858 } 4859 4860 4861 /* ======= Long Keys ======= */ 4862 4863 #define XXH_STRIPE_LEN 64 4864 #define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */ 4865 #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64)) 4866 4867 #ifdef XXH_OLD_NAMES 4868 # define STRIPE_LEN XXH_STRIPE_LEN 4869 # define ACC_NB XXH_ACC_NB 4870 #endif 4871 4872 #ifndef XXH_PREFETCH_DIST 4873 # ifdef __clang__ 4874 # define XXH_PREFETCH_DIST 320 4875 # else 4876 # if (XXH_VECTOR == XXH_AVX512) 4877 # define XXH_PREFETCH_DIST 512 4878 # else 4879 # define XXH_PREFETCH_DIST 384 4880 # endif 4881 # endif /* __clang__ */ 4882 #endif /* XXH_PREFETCH_DIST */ 4883 4884 /* 4885 * These macros are to generate an XXH3_accumulate() function. 4886 * The two arguments select the name suffix and target attribute. 4887 * 4888 * The name of this symbol is XXH3_accumulate_<name>() and it calls 4889 * XXH3_accumulate_512_<name>(). 4890 * 4891 * It may be useful to hand implement this function if the compiler fails to 4892 * optimize the inline function. 4893 */ 4894 #define XXH3_ACCUMULATE_TEMPLATE(name) \ 4895 void \ 4896 XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \ 4897 const xxh_u8* XXH_RESTRICT input, \ 4898 const xxh_u8* XXH_RESTRICT secret, \ 4899 size_t nbStripes) \ 4900 { \ 4901 size_t n; \ 4902 for (n = 0; n < nbStripes; n++ ) { \ 4903 const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \ 4904 XXH_PREFETCH(in + XXH_PREFETCH_DIST); \ 4905 XXH3_accumulate_512_##name( \ 4906 acc, \ 4907 in, \ 4908 secret + n*XXH_SECRET_CONSUME_RATE); \ 4909 } \ 4910 } 4911 4912 4913 XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) 4914 { 4915 if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); 4916 XXH_memcpy(dst, &v64, sizeof(v64)); 4917 } 4918 4919 /* Several intrinsic functions below are supposed to accept __int64 as argument, 4920 * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ . 4921 * However, several environments do not define __int64 type, 4922 * requiring a workaround. 4923 */ 4924 #if !defined (__VMS) \ 4925 && (defined (__cplusplus) \ 4926 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) 4927 typedef int64_t xxh_i64; 4928 #else 4929 /* the following type must have a width of 64-bit */ 4930 typedef long long xxh_i64; 4931 #endif 4932 4933 4934 /* 4935 * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized. 4936 * 4937 * It is a hardened version of UMAC, based off of FARSH's implementation. 4938 * 4939 * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD 4940 * implementations, and it is ridiculously fast. 4941 * 4942 * We harden it by mixing the original input to the accumulators as well as the product. 4943 * 4944 * This means that in the (relatively likely) case of a multiply by zero, the 4945 * original input is preserved. 4946 * 4947 * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve 4948 * cross-pollination, as otherwise the upper and lower halves would be 4949 * essentially independent. 4950 * 4951 * This doesn't matter on 64-bit hashes since they all get merged together in 4952 * the end, so we skip the extra step. 4953 * 4954 * Both XXH3_64bits and XXH3_128bits use this subroutine. 4955 */ 4956 4957 #if (XXH_VECTOR == XXH_AVX512) \ 4958 || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0) 4959 4960 #ifndef XXH_TARGET_AVX512 4961 # define XXH_TARGET_AVX512 /* disable attribute target */ 4962 #endif 4963 4964 XXH_FORCE_INLINE XXH_TARGET_AVX512 void 4965 XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc, 4966 const void* XXH_RESTRICT input, 4967 const void* XXH_RESTRICT secret) 4968 { 4969 __m512i* const xacc = (__m512i *) acc; 4970 XXH_ASSERT((((size_t)acc) & 63) == 0); 4971 XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); 4972 4973 { 4974 /* data_vec = input[0]; */ 4975 __m512i const data_vec = _mm512_loadu_si512 (input); 4976 /* key_vec = secret[0]; */ 4977 __m512i const key_vec = _mm512_loadu_si512 (secret); 4978 /* data_key = data_vec ^ key_vec; */ 4979 __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); 4980 /* data_key_lo = data_key >> 32; */ 4981 __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32); 4982 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ 4983 __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo); 4984 /* xacc[0] += swap(data_vec); */ 4985 __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2)); 4986 __m512i const sum = _mm512_add_epi64(*xacc, data_swap); 4987 /* xacc[0] += product; */ 4988 *xacc = _mm512_add_epi64(product, sum); 4989 } 4990 } 4991 XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512) 4992 4993 /* 4994 * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing. 4995 * 4996 * Multiplication isn't perfect, as explained by Google in HighwayHash: 4997 * 4998 * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to 4999 * // varying degrees. In descending order of goodness, bytes 5000 * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32. 5001 * // As expected, the upper and lower bytes are much worse. 5002 * 5003 * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291 5004 * 5005 * Since our algorithm uses a pseudorandom secret to add some variance into the 5006 * mix, we don't need to (or want to) mix as often or as much as HighwayHash does. 5007 * 5008 * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid 5009 * extraction. 5010 * 5011 * Both XXH3_64bits and XXH3_128bits use this subroutine. 5012 */ 5013 5014 XXH_FORCE_INLINE XXH_TARGET_AVX512 void 5015 XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) 5016 { 5017 XXH_ASSERT((((size_t)acc) & 63) == 0); 5018 XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); 5019 { __m512i* const xacc = (__m512i*) acc; 5020 const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1); 5021 5022 /* xacc[0] ^= (xacc[0] >> 47) */ 5023 __m512i const acc_vec = *xacc; 5024 __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47); 5025 /* xacc[0] ^= secret; */ 5026 __m512i const key_vec = _mm512_loadu_si512 (secret); 5027 __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */); 5028 5029 /* xacc[0] *= XXH_PRIME32_1; */ 5030 __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32); 5031 __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32); 5032 __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32); 5033 *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32)); 5034 } 5035 } 5036 5037 XXH_FORCE_INLINE XXH_TARGET_AVX512 void 5038 XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64) 5039 { 5040 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0); 5041 XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64); 5042 XXH_ASSERT(((size_t)customSecret & 63) == 0); 5043 (void)(&XXH_writeLE64); 5044 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i); 5045 __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64); 5046 __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos); 5047 5048 const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret); 5049 __m512i* const dest = ( __m512i*) customSecret; 5050 int i; 5051 XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */ 5052 XXH_ASSERT(((size_t)dest & 63) == 0); 5053 for (i=0; i < nbRounds; ++i) { 5054 dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed); 5055 } } 5056 } 5057 5058 #endif 5059 5060 #if (XXH_VECTOR == XXH_AVX2) \ 5061 || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0) 5062 5063 #ifndef XXH_TARGET_AVX2 5064 # define XXH_TARGET_AVX2 /* disable attribute target */ 5065 #endif 5066 5067 XXH_FORCE_INLINE XXH_TARGET_AVX2 void 5068 XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc, 5069 const void* XXH_RESTRICT input, 5070 const void* XXH_RESTRICT secret) 5071 { 5072 XXH_ASSERT((((size_t)acc) & 31) == 0); 5073 { __m256i* const xacc = (__m256i *) acc; 5074 /* Unaligned. This is mainly for pointer arithmetic, and because 5075 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ 5076 const __m256i* const xinput = (const __m256i *) input; 5077 /* Unaligned. This is mainly for pointer arithmetic, and because 5078 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ 5079 const __m256i* const xsecret = (const __m256i *) secret; 5080 5081 size_t i; 5082 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { 5083 /* data_vec = xinput[i]; */ 5084 __m256i const data_vec = _mm256_loadu_si256 (xinput+i); 5085 /* key_vec = xsecret[i]; */ 5086 __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); 5087 /* data_key = data_vec ^ key_vec; */ 5088 __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); 5089 /* data_key_lo = data_key >> 32; */ 5090 __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32); 5091 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ 5092 __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo); 5093 /* xacc[i] += swap(data_vec); */ 5094 __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2)); 5095 __m256i const sum = _mm256_add_epi64(xacc[i], data_swap); 5096 /* xacc[i] += product; */ 5097 xacc[i] = _mm256_add_epi64(product, sum); 5098 } } 5099 } 5100 XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2) 5101 5102 XXH_FORCE_INLINE XXH_TARGET_AVX2 void 5103 XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) 5104 { 5105 XXH_ASSERT((((size_t)acc) & 31) == 0); 5106 { __m256i* const xacc = (__m256i*) acc; 5107 /* Unaligned. This is mainly for pointer arithmetic, and because 5108 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ 5109 const __m256i* const xsecret = (const __m256i *) secret; 5110 const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1); 5111 5112 size_t i; 5113 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { 5114 /* xacc[i] ^= (xacc[i] >> 47) */ 5115 __m256i const acc_vec = xacc[i]; 5116 __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47); 5117 __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted); 5118 /* xacc[i] ^= xsecret; */ 5119 __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); 5120 __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); 5121 5122 /* xacc[i] *= XXH_PRIME32_1; */ 5123 __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32); 5124 __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32); 5125 __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32); 5126 xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); 5127 } 5128 } 5129 } 5130 5131 XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) 5132 { 5133 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0); 5134 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6); 5135 XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64); 5136 (void)(&XXH_writeLE64); 5137 XXH_PREFETCH(customSecret); 5138 { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64); 5139 5140 const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret); 5141 __m256i* dest = ( __m256i*) customSecret; 5142 5143 # if defined(__GNUC__) || defined(__clang__) 5144 /* 5145 * On GCC & Clang, marking 'dest' as modified will cause the compiler: 5146 * - do not extract the secret from sse registers in the internal loop 5147 * - use less common registers, and avoid pushing these reg into stack 5148 */ 5149 XXH_COMPILER_GUARD(dest); 5150 # endif 5151 XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */ 5152 XXH_ASSERT(((size_t)dest & 31) == 0); 5153 5154 /* GCC -O2 need unroll loop manually */ 5155 dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed); 5156 dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed); 5157 dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed); 5158 dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed); 5159 dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed); 5160 dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed); 5161 } 5162 } 5163 5164 #endif 5165 5166 /* x86dispatch always generates SSE2 */ 5167 #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH) 5168 5169 #ifndef XXH_TARGET_SSE2 5170 # define XXH_TARGET_SSE2 /* disable attribute target */ 5171 #endif 5172 5173 XXH_FORCE_INLINE XXH_TARGET_SSE2 void 5174 XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc, 5175 const void* XXH_RESTRICT input, 5176 const void* XXH_RESTRICT secret) 5177 { 5178 /* SSE2 is just a half-scale version of the AVX2 version. */ 5179 XXH_ASSERT((((size_t)acc) & 15) == 0); 5180 { __m128i* const xacc = (__m128i *) acc; 5181 /* Unaligned. This is mainly for pointer arithmetic, and because 5182 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ 5183 const __m128i* const xinput = (const __m128i *) input; 5184 /* Unaligned. This is mainly for pointer arithmetic, and because 5185 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ 5186 const __m128i* const xsecret = (const __m128i *) secret; 5187 5188 size_t i; 5189 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { 5190 /* data_vec = xinput[i]; */ 5191 __m128i const data_vec = _mm_loadu_si128 (xinput+i); 5192 /* key_vec = xsecret[i]; */ 5193 __m128i const key_vec = _mm_loadu_si128 (xsecret+i); 5194 /* data_key = data_vec ^ key_vec; */ 5195 __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); 5196 /* data_key_lo = data_key >> 32; */ 5197 __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); 5198 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ 5199 __m128i const product = _mm_mul_epu32 (data_key, data_key_lo); 5200 /* xacc[i] += swap(data_vec); */ 5201 __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2)); 5202 __m128i const sum = _mm_add_epi64(xacc[i], data_swap); 5203 /* xacc[i] += product; */ 5204 xacc[i] = _mm_add_epi64(product, sum); 5205 } } 5206 } 5207 XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2) 5208 5209 XXH_FORCE_INLINE XXH_TARGET_SSE2 void 5210 XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) 5211 { 5212 XXH_ASSERT((((size_t)acc) & 15) == 0); 5213 { __m128i* const xacc = (__m128i*) acc; 5214 /* Unaligned. This is mainly for pointer arithmetic, and because 5215 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ 5216 const __m128i* const xsecret = (const __m128i *) secret; 5217 const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1); 5218 5219 size_t i; 5220 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { 5221 /* xacc[i] ^= (xacc[i] >> 47) */ 5222 __m128i const acc_vec = xacc[i]; 5223 __m128i const shifted = _mm_srli_epi64 (acc_vec, 47); 5224 __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted); 5225 /* xacc[i] ^= xsecret[i]; */ 5226 __m128i const key_vec = _mm_loadu_si128 (xsecret+i); 5227 __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); 5228 5229 /* xacc[i] *= XXH_PRIME32_1; */ 5230 __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); 5231 __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32); 5232 __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32); 5233 xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32)); 5234 } 5235 } 5236 } 5237 5238 XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) 5239 { 5240 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); 5241 (void)(&XXH_writeLE64); 5242 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i); 5243 5244 # if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900 5245 /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */ 5246 XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) }; 5247 __m128i const seed = _mm_load_si128((__m128i const*)seed64x2); 5248 # else 5249 __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64); 5250 # endif 5251 int i; 5252 5253 const void* const src16 = XXH3_kSecret; 5254 __m128i* dst16 = (__m128i*) customSecret; 5255 # if defined(__GNUC__) || defined(__clang__) 5256 /* 5257 * On GCC & Clang, marking 'dest' as modified will cause the compiler: 5258 * - do not extract the secret from sse registers in the internal loop 5259 * - use less common registers, and avoid pushing these reg into stack 5260 */ 5261 XXH_COMPILER_GUARD(dst16); 5262 # endif 5263 XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */ 5264 XXH_ASSERT(((size_t)dst16 & 15) == 0); 5265 5266 for (i=0; i < nbRounds; ++i) { 5267 dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed); 5268 } } 5269 } 5270 5271 #endif 5272 5273 #if (XXH_VECTOR == XXH_NEON) 5274 5275 /* forward declarations for the scalar routines */ 5276 XXH_FORCE_INLINE void 5277 XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input, 5278 void const* XXH_RESTRICT secret, size_t lane); 5279 5280 XXH_FORCE_INLINE void 5281 XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, 5282 void const* XXH_RESTRICT secret, size_t lane); 5283 5284 /*! 5285 * @internal 5286 * @brief The bulk processing loop for NEON and WASM SIMD128. 5287 * 5288 * The NEON code path is actually partially scalar when running on AArch64. This 5289 * is to optimize the pipelining and can have up to 15% speedup depending on the 5290 * CPU, and it also mitigates some GCC codegen issues. 5291 * 5292 * @see XXH3_NEON_LANES for configuring this and details about this optimization. 5293 * 5294 * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit 5295 * integers instead of the other platforms which mask full 64-bit vectors, 5296 * so the setup is more complicated than just shifting right. 5297 * 5298 * Additionally, there is an optimization for 4 lanes at once noted below. 5299 * 5300 * Since, as stated, the most optimal amount of lanes for Cortexes is 6, 5301 * there needs to be *three* versions of the accumulate operation used 5302 * for the remaining 2 lanes. 5303 * 5304 * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap 5305 * nearly perfectly. 5306 */ 5307 5308 XXH_FORCE_INLINE void 5309 XXH3_accumulate_512_neon( void* XXH_RESTRICT acc, 5310 const void* XXH_RESTRICT input, 5311 const void* XXH_RESTRICT secret) 5312 { 5313 XXH_ASSERT((((size_t)acc) & 15) == 0); 5314 XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0); 5315 { /* GCC for darwin arm64 does not like aliasing here */ 5316 xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc; 5317 /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ 5318 uint8_t const* xinput = (const uint8_t *) input; 5319 uint8_t const* xsecret = (const uint8_t *) secret; 5320 5321 size_t i; 5322 #ifdef __wasm_simd128__ 5323 /* 5324 * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret 5325 * is constant propagated, which results in it converting it to this 5326 * inside the loop: 5327 * 5328 * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0) 5329 * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0) 5330 * ... 5331 * 5332 * This requires a full 32-bit address immediate (and therefore a 6 byte 5333 * instruction) as well as an add for each offset. 5334 * 5335 * Putting an asm guard prevents it from folding (at the cost of losing 5336 * the alignment hint), and uses the free offset in `v128.load` instead 5337 * of adding secret_offset each time which overall reduces code size by 5338 * about a kilobyte and improves performance. 5339 */ 5340 XXH_COMPILER_GUARD(xsecret); 5341 #endif 5342 /* Scalar lanes use the normal scalarRound routine */ 5343 for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { 5344 XXH3_scalarRound(acc, input, secret, i); 5345 } 5346 i = 0; 5347 /* 4 NEON lanes at a time. */ 5348 for (; i+1 < XXH3_NEON_LANES / 2; i+=2) { 5349 /* data_vec = xinput[i]; */ 5350 uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16)); 5351 uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16)); 5352 /* key_vec = xsecret[i]; */ 5353 uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16)); 5354 uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16)); 5355 /* data_swap = swap(data_vec) */ 5356 uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1); 5357 uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1); 5358 /* data_key = data_vec ^ key_vec; */ 5359 uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1); 5360 uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2); 5361 5362 /* 5363 * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a 5364 * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to 5365 * get one vector with the low 32 bits of each lane, and one vector 5366 * with the high 32 bits of each lane. 5367 * 5368 * The intrinsic returns a double vector because the original ARMv7-a 5369 * instruction modified both arguments in place. AArch64 and SIMD128 emit 5370 * two instructions from this intrinsic. 5371 * 5372 * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ] 5373 * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ] 5374 */ 5375 uint32x4x2_t unzipped = vuzpq_u32( 5376 vreinterpretq_u32_u64(data_key_1), 5377 vreinterpretq_u32_u64(data_key_2) 5378 ); 5379 /* data_key_lo = data_key & 0xFFFFFFFF */ 5380 uint32x4_t data_key_lo = unzipped.val[0]; 5381 /* data_key_hi = data_key >> 32 */ 5382 uint32x4_t data_key_hi = unzipped.val[1]; 5383 /* 5384 * Then, we can split the vectors horizontally and multiply which, as for most 5385 * widening intrinsics, have a variant that works on both high half vectors 5386 * for free on AArch64. A similar instruction is available on SIMD128. 5387 * 5388 * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi 5389 */ 5390 uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi); 5391 uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi); 5392 /* 5393 * Clang reorders 5394 * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s 5395 * c += a; // add acc.2d, acc.2d, swap.2d 5396 * to 5397 * c += a; // add acc.2d, acc.2d, swap.2d 5398 * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s 5399 * 5400 * While it would make sense in theory since the addition is faster, 5401 * for reasons likely related to umlal being limited to certain NEON 5402 * pipelines, this is worse. A compiler guard fixes this. 5403 */ 5404 XXH_COMPILER_GUARD_CLANG_NEON(sum_1); 5405 XXH_COMPILER_GUARD_CLANG_NEON(sum_2); 5406 /* xacc[i] = acc_vec + sum; */ 5407 xacc[i] = vaddq_u64(xacc[i], sum_1); 5408 xacc[i+1] = vaddq_u64(xacc[i+1], sum_2); 5409 } 5410 /* Operate on the remaining NEON lanes 2 at a time. */ 5411 for (; i < XXH3_NEON_LANES / 2; i++) { 5412 /* data_vec = xinput[i]; */ 5413 uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16)); 5414 /* key_vec = xsecret[i]; */ 5415 uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); 5416 /* acc_vec_2 = swap(data_vec) */ 5417 uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1); 5418 /* data_key = data_vec ^ key_vec; */ 5419 uint64x2_t data_key = veorq_u64(data_vec, key_vec); 5420 /* For two lanes, just use VMOVN and VSHRN. */ 5421 /* data_key_lo = data_key & 0xFFFFFFFF; */ 5422 uint32x2_t data_key_lo = vmovn_u64(data_key); 5423 /* data_key_hi = data_key >> 32; */ 5424 uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32); 5425 /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */ 5426 uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi); 5427 /* Same Clang workaround as before */ 5428 XXH_COMPILER_GUARD_CLANG_NEON(sum); 5429 /* xacc[i] = acc_vec + sum; */ 5430 xacc[i] = vaddq_u64 (xacc[i], sum); 5431 } 5432 } 5433 } 5434 XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon) 5435 5436 XXH_FORCE_INLINE void 5437 XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) 5438 { 5439 XXH_ASSERT((((size_t)acc) & 15) == 0); 5440 5441 { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc; 5442 uint8_t const* xsecret = (uint8_t const*) secret; 5443 5444 size_t i; 5445 /* WASM uses operator overloads and doesn't need these. */ 5446 #ifndef __wasm_simd128__ 5447 /* { prime32_1, prime32_1 } */ 5448 uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1); 5449 /* { 0, prime32_1, 0, prime32_1 } */ 5450 uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32)); 5451 #endif 5452 5453 /* AArch64 uses both scalar and neon at the same time */ 5454 for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { 5455 XXH3_scalarScrambleRound(acc, secret, i); 5456 } 5457 for (i=0; i < XXH3_NEON_LANES / 2; i++) { 5458 /* xacc[i] ^= (xacc[i] >> 47); */ 5459 uint64x2_t acc_vec = xacc[i]; 5460 uint64x2_t shifted = vshrq_n_u64(acc_vec, 47); 5461 uint64x2_t data_vec = veorq_u64(acc_vec, shifted); 5462 5463 /* xacc[i] ^= xsecret[i]; */ 5464 uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); 5465 uint64x2_t data_key = veorq_u64(data_vec, key_vec); 5466 /* xacc[i] *= XXH_PRIME32_1 */ 5467 #ifdef __wasm_simd128__ 5468 /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */ 5469 xacc[i] = data_key * XXH_PRIME32_1; 5470 #else 5471 /* 5472 * Expanded version with portable NEON intrinsics 5473 * 5474 * lo(x) * lo(y) + (hi(x) * lo(y) << 32) 5475 * 5476 * prod_hi = hi(data_key) * lo(prime) << 32 5477 * 5478 * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector 5479 * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits 5480 * and avoid the shift. 5481 */ 5482 uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi); 5483 /* Extract low bits for vmlal_u32 */ 5484 uint32x2_t data_key_lo = vmovn_u64(data_key); 5485 /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */ 5486 xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo); 5487 #endif 5488 } 5489 } 5490 } 5491 #endif 5492 5493 #if (XXH_VECTOR == XXH_VSX) 5494 5495 XXH_FORCE_INLINE void 5496 XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc, 5497 const void* XXH_RESTRICT input, 5498 const void* XXH_RESTRICT secret) 5499 { 5500 /* presumed aligned */ 5501 xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; 5502 xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */ 5503 xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */ 5504 xxh_u64x2 const v32 = { 32, 32 }; 5505 size_t i; 5506 for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { 5507 /* data_vec = xinput[i]; */ 5508 xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i); 5509 /* key_vec = xsecret[i]; */ 5510 xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); 5511 xxh_u64x2 const data_key = data_vec ^ key_vec; 5512 /* shuffled = (data_key << 32) | (data_key >> 32); */ 5513 xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32); 5514 /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */ 5515 xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled); 5516 /* acc_vec = xacc[i]; */ 5517 xxh_u64x2 acc_vec = xacc[i]; 5518 acc_vec += product; 5519 5520 /* swap high and low halves */ 5521 #ifdef __s390x__ 5522 acc_vec += vec_permi(data_vec, data_vec, 2); 5523 #else 5524 acc_vec += vec_xxpermdi(data_vec, data_vec, 2); 5525 #endif 5526 xacc[i] = acc_vec; 5527 } 5528 } 5529 XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx) 5530 5531 XXH_FORCE_INLINE void 5532 XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) 5533 { 5534 XXH_ASSERT((((size_t)acc) & 15) == 0); 5535 5536 { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; 5537 const xxh_u8* const xsecret = (const xxh_u8*) secret; 5538 /* constants */ 5539 xxh_u64x2 const v32 = { 32, 32 }; 5540 xxh_u64x2 const v47 = { 47, 47 }; 5541 xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 }; 5542 size_t i; 5543 for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { 5544 /* xacc[i] ^= (xacc[i] >> 47); */ 5545 xxh_u64x2 const acc_vec = xacc[i]; 5546 xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47); 5547 5548 /* xacc[i] ^= xsecret[i]; */ 5549 xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); 5550 xxh_u64x2 const data_key = data_vec ^ key_vec; 5551 5552 /* xacc[i] *= XXH_PRIME32_1 */ 5553 /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */ 5554 xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime); 5555 /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */ 5556 xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime); 5557 xacc[i] = prod_odd + (prod_even << v32); 5558 } } 5559 } 5560 5561 #endif 5562 5563 #if (XXH_VECTOR == XXH_SVE) 5564 5565 XXH_FORCE_INLINE void 5566 XXH3_accumulate_512_sve( void* XXH_RESTRICT acc, 5567 const void* XXH_RESTRICT input, 5568 const void* XXH_RESTRICT secret) 5569 { 5570 uint64_t *xacc = (uint64_t *)acc; 5571 const uint64_t *xinput = (const uint64_t *)(const void *)input; 5572 const uint64_t *xsecret = (const uint64_t *)(const void *)secret; 5573 svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); 5574 uint64_t element_count = svcntd(); 5575 if (element_count >= 8) { 5576 svbool_t mask = svptrue_pat_b64(SV_VL8); 5577 svuint64_t vacc = svld1_u64(mask, xacc); 5578 ACCRND(vacc, 0); 5579 svst1_u64(mask, xacc, vacc); 5580 } else if (element_count == 2) { /* sve128 */ 5581 svbool_t mask = svptrue_pat_b64(SV_VL2); 5582 svuint64_t acc0 = svld1_u64(mask, xacc + 0); 5583 svuint64_t acc1 = svld1_u64(mask, xacc + 2); 5584 svuint64_t acc2 = svld1_u64(mask, xacc + 4); 5585 svuint64_t acc3 = svld1_u64(mask, xacc + 6); 5586 ACCRND(acc0, 0); 5587 ACCRND(acc1, 2); 5588 ACCRND(acc2, 4); 5589 ACCRND(acc3, 6); 5590 svst1_u64(mask, xacc + 0, acc0); 5591 svst1_u64(mask, xacc + 2, acc1); 5592 svst1_u64(mask, xacc + 4, acc2); 5593 svst1_u64(mask, xacc + 6, acc3); 5594 } else { 5595 svbool_t mask = svptrue_pat_b64(SV_VL4); 5596 svuint64_t acc0 = svld1_u64(mask, xacc + 0); 5597 svuint64_t acc1 = svld1_u64(mask, xacc + 4); 5598 ACCRND(acc0, 0); 5599 ACCRND(acc1, 4); 5600 svst1_u64(mask, xacc + 0, acc0); 5601 svst1_u64(mask, xacc + 4, acc1); 5602 } 5603 } 5604 5605 XXH_FORCE_INLINE void 5606 XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc, 5607 const xxh_u8* XXH_RESTRICT input, 5608 const xxh_u8* XXH_RESTRICT secret, 5609 size_t nbStripes) 5610 { 5611 if (nbStripes != 0) { 5612 uint64_t *xacc = (uint64_t *)acc; 5613 const uint64_t *xinput = (const uint64_t *)(const void *)input; 5614 const uint64_t *xsecret = (const uint64_t *)(const void *)secret; 5615 svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); 5616 uint64_t element_count = svcntd(); 5617 if (element_count >= 8) { 5618 svbool_t mask = svptrue_pat_b64(SV_VL8); 5619 svuint64_t vacc = svld1_u64(mask, xacc + 0); 5620 do { 5621 /* svprfd(svbool_t, void *, enum svfprop); */ 5622 svprfd(mask, xinput + 128, SV_PLDL1STRM); 5623 ACCRND(vacc, 0); 5624 xinput += 8; 5625 xsecret += 1; 5626 nbStripes--; 5627 } while (nbStripes != 0); 5628 5629 svst1_u64(mask, xacc + 0, vacc); 5630 } else if (element_count == 2) { /* sve128 */ 5631 svbool_t mask = svptrue_pat_b64(SV_VL2); 5632 svuint64_t acc0 = svld1_u64(mask, xacc + 0); 5633 svuint64_t acc1 = svld1_u64(mask, xacc + 2); 5634 svuint64_t acc2 = svld1_u64(mask, xacc + 4); 5635 svuint64_t acc3 = svld1_u64(mask, xacc + 6); 5636 do { 5637 svprfd(mask, xinput + 128, SV_PLDL1STRM); 5638 ACCRND(acc0, 0); 5639 ACCRND(acc1, 2); 5640 ACCRND(acc2, 4); 5641 ACCRND(acc3, 6); 5642 xinput += 8; 5643 xsecret += 1; 5644 nbStripes--; 5645 } while (nbStripes != 0); 5646 5647 svst1_u64(mask, xacc + 0, acc0); 5648 svst1_u64(mask, xacc + 2, acc1); 5649 svst1_u64(mask, xacc + 4, acc2); 5650 svst1_u64(mask, xacc + 6, acc3); 5651 } else { 5652 svbool_t mask = svptrue_pat_b64(SV_VL4); 5653 svuint64_t acc0 = svld1_u64(mask, xacc + 0); 5654 svuint64_t acc1 = svld1_u64(mask, xacc + 4); 5655 do { 5656 svprfd(mask, xinput + 128, SV_PLDL1STRM); 5657 ACCRND(acc0, 0); 5658 ACCRND(acc1, 4); 5659 xinput += 8; 5660 xsecret += 1; 5661 nbStripes--; 5662 } while (nbStripes != 0); 5663 5664 svst1_u64(mask, xacc + 0, acc0); 5665 svst1_u64(mask, xacc + 4, acc1); 5666 } 5667 } 5668 } 5669 5670 #endif 5671 5672 #if (XXH_VECTOR == XXH_LSX) 5673 #define _LSX_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w)) 5674 5675 XXH_FORCE_INLINE void 5676 XXH3_accumulate_512_lsx( void* XXH_RESTRICT acc, 5677 const void* XXH_RESTRICT input, 5678 const void* XXH_RESTRICT secret) 5679 { 5680 XXH_ASSERT((((size_t)acc) & 15) == 0); 5681 { 5682 __m128i* const xacc = (__m128i *) acc; 5683 const __m128i* const xinput = (const __m128i *) input; 5684 const __m128i* const xsecret = (const __m128i *) secret; 5685 5686 for (size_t i = 0; i < XXH_STRIPE_LEN / sizeof(__m128i); i++) { 5687 /* data_vec = xinput[i]; */ 5688 __m128i const data_vec = __lsx_vld(xinput + i, 0); 5689 /* key_vec = xsecret[i]; */ 5690 __m128i const key_vec = __lsx_vld(xsecret + i, 0); 5691 /* data_key = data_vec ^ key_vec; */ 5692 __m128i const data_key = __lsx_vxor_v(data_vec, key_vec); 5693 /* data_key_lo = data_key >> 32; */ 5694 __m128i const data_key_lo = __lsx_vsrli_d(data_key, 32); 5695 // __m128i const data_key_lo = __lsx_vsrli_d(data_key, 32); 5696 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ 5697 __m128i const product = __lsx_vmulwev_d_wu(data_key, data_key_lo); 5698 /* xacc[i] += swap(data_vec); */ 5699 __m128i const data_swap = __lsx_vshuf4i_w(data_vec, _LSX_SHUFFLE(1, 0, 3, 2)); 5700 __m128i const sum = __lsx_vadd_d(xacc[i], data_swap); 5701 /* xacc[i] += product; */ 5702 xacc[i] = __lsx_vadd_d(product, sum); 5703 } 5704 } 5705 } 5706 XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(lsx) 5707 5708 XXH_FORCE_INLINE void 5709 XXH3_scrambleAcc_lsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) 5710 { 5711 XXH_ASSERT((((size_t)acc) & 15) == 0); 5712 { 5713 __m128i* const xacc = (__m128i*) acc; 5714 const __m128i* const xsecret = (const __m128i *) secret; 5715 const __m128i prime32 = __lsx_vreplgr2vr_w((int)XXH_PRIME32_1); 5716 5717 for (size_t i = 0; i < XXH_STRIPE_LEN / sizeof(__m128i); i++) { 5718 /* xacc[i] ^= (xacc[i] >> 47) */ 5719 __m128i const acc_vec = xacc[i]; 5720 __m128i const shifted = __lsx_vsrli_d(acc_vec, 47); 5721 __m128i const data_vec = __lsx_vxor_v(acc_vec, shifted); 5722 /* xacc[i] ^= xsecret[i]; */ 5723 __m128i const key_vec = __lsx_vld(xsecret + i, 0); 5724 __m128i const data_key = __lsx_vxor_v(data_vec, key_vec); 5725 5726 /* xacc[i] *= XXH_PRIME32_1; */ 5727 __m128i const data_key_hi = __lsx_vsrli_d(data_key, 32); 5728 __m128i const prod_lo = __lsx_vmulwev_d_wu(data_key, prime32); 5729 __m128i const prod_hi = __lsx_vmulwev_d_wu(data_key_hi, prime32); 5730 xacc[i] = __lsx_vadd_d(prod_lo, __lsx_vslli_d(prod_hi, 32)); 5731 } 5732 } 5733 } 5734 5735 #endif 5736 5737 /* scalar variants - universal */ 5738 5739 #if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__)) 5740 /* 5741 * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they 5742 * emit an excess mask and a full 64-bit multiply-add (MADD X-form). 5743 * 5744 * While this might not seem like much, as AArch64 is a 64-bit architecture, only 5745 * big Cortex designs have a full 64-bit multiplier. 5746 * 5747 * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit 5748 * multiplies expand to 2-3 multiplies in microcode. This has a major penalty 5749 * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline. 5750 * 5751 * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does 5752 * not have this penalty and does the mask automatically. 5753 */ 5754 XXH_FORCE_INLINE xxh_u64 5755 XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) 5756 { 5757 xxh_u64 ret; 5758 /* note: %x = 64-bit register, %w = 32-bit register */ 5759 __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc)); 5760 return ret; 5761 } 5762 #else 5763 XXH_FORCE_INLINE xxh_u64 5764 XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) 5765 { 5766 return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc; 5767 } 5768 #endif 5769 5770 /*! 5771 * @internal 5772 * @brief Scalar round for @ref XXH3_accumulate_512_scalar(). 5773 * 5774 * This is extracted to its own function because the NEON path uses a combination 5775 * of NEON and scalar. 5776 */ 5777 XXH_FORCE_INLINE void 5778 XXH3_scalarRound(void* XXH_RESTRICT acc, 5779 void const* XXH_RESTRICT input, 5780 void const* XXH_RESTRICT secret, 5781 size_t lane) 5782 { 5783 xxh_u64* xacc = (xxh_u64*) acc; 5784 xxh_u8 const* xinput = (xxh_u8 const*) input; 5785 xxh_u8 const* xsecret = (xxh_u8 const*) secret; 5786 XXH_ASSERT(lane < XXH_ACC_NB); 5787 XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0); 5788 { 5789 xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8); 5790 xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8); 5791 xacc[lane ^ 1] += data_val; /* swap adjacent lanes */ 5792 xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]); 5793 } 5794 } 5795 5796 /*! 5797 * @internal 5798 * @brief Processes a 64 byte block of data using the scalar path. 5799 */ 5800 XXH_FORCE_INLINE void 5801 XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc, 5802 const void* XXH_RESTRICT input, 5803 const void* XXH_RESTRICT secret) 5804 { 5805 size_t i; 5806 /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */ 5807 #if defined(__GNUC__) && !defined(__clang__) \ 5808 && (defined(__arm__) || defined(__thumb2__)) \ 5809 && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \ 5810 && XXH_SIZE_OPT <= 0 5811 # pragma GCC unroll 8 5812 #endif 5813 for (i=0; i < XXH_ACC_NB; i++) { 5814 XXH3_scalarRound(acc, input, secret, i); 5815 } 5816 } 5817 XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar) 5818 5819 /*! 5820 * @internal 5821 * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar(). 5822 * 5823 * This is extracted to its own function because the NEON path uses a combination 5824 * of NEON and scalar. 5825 */ 5826 XXH_FORCE_INLINE void 5827 XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, 5828 void const* XXH_RESTRICT secret, 5829 size_t lane) 5830 { 5831 xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ 5832 const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ 5833 XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0); 5834 XXH_ASSERT(lane < XXH_ACC_NB); 5835 { 5836 xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8); 5837 xxh_u64 acc64 = xacc[lane]; 5838 acc64 = XXH_xorshift64(acc64, 47); 5839 acc64 ^= key64; 5840 acc64 *= XXH_PRIME32_1; 5841 xacc[lane] = acc64; 5842 } 5843 } 5844 5845 /*! 5846 * @internal 5847 * @brief Scrambles the accumulators after a large chunk has been read 5848 */ 5849 XXH_FORCE_INLINE void 5850 XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) 5851 { 5852 size_t i; 5853 for (i=0; i < XXH_ACC_NB; i++) { 5854 XXH3_scalarScrambleRound(acc, secret, i); 5855 } 5856 } 5857 5858 XXH_FORCE_INLINE void 5859 XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) 5860 { 5861 /* 5862 * We need a separate pointer for the hack below, 5863 * which requires a non-const pointer. 5864 * Any decent compiler will optimize this out otherwise. 5865 */ 5866 const xxh_u8* kSecretPtr = XXH3_kSecret; 5867 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); 5868 5869 #if defined(__GNUC__) && defined(__aarch64__) 5870 /* 5871 * UGLY HACK: 5872 * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are 5873 * placed sequentially, in order, at the top of the unrolled loop. 5874 * 5875 * While MOVK is great for generating constants (2 cycles for a 64-bit 5876 * constant compared to 4 cycles for LDR), it fights for bandwidth with 5877 * the arithmetic instructions. 5878 * 5879 * I L S 5880 * MOVK 5881 * MOVK 5882 * MOVK 5883 * MOVK 5884 * ADD 5885 * SUB STR 5886 * STR 5887 * By forcing loads from memory (as the asm line causes the compiler to assume 5888 * that XXH3_kSecretPtr has been changed), the pipelines are used more 5889 * efficiently: 5890 * I L S 5891 * LDR 5892 * ADD LDR 5893 * SUB STR 5894 * STR 5895 * 5896 * See XXH3_NEON_LANES for details on the pipsline. 5897 * 5898 * XXH3_64bits_withSeed, len == 256, Snapdragon 835 5899 * without hack: 2654.4 MB/s 5900 * with hack: 3202.9 MB/s 5901 */ 5902 XXH_COMPILER_GUARD(kSecretPtr); 5903 #endif 5904 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; 5905 int i; 5906 for (i=0; i < nbRounds; i++) { 5907 /* 5908 * The asm hack causes the compiler to assume that kSecretPtr aliases with 5909 * customSecret, and on aarch64, this prevented LDP from merging two 5910 * loads together for free. Putting the loads together before the stores 5911 * properly generates LDP. 5912 */ 5913 xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64; 5914 xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64; 5915 XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo); 5916 XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi); 5917 } } 5918 } 5919 5920 5921 typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t); 5922 typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*); 5923 typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64); 5924 5925 5926 #if (XXH_VECTOR == XXH_AVX512) 5927 5928 #define XXH3_accumulate_512 XXH3_accumulate_512_avx512 5929 #define XXH3_accumulate XXH3_accumulate_avx512 5930 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512 5931 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512 5932 5933 #elif (XXH_VECTOR == XXH_AVX2) 5934 5935 #define XXH3_accumulate_512 XXH3_accumulate_512_avx2 5936 #define XXH3_accumulate XXH3_accumulate_avx2 5937 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2 5938 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2 5939 5940 #elif (XXH_VECTOR == XXH_SSE2) 5941 5942 #define XXH3_accumulate_512 XXH3_accumulate_512_sse2 5943 #define XXH3_accumulate XXH3_accumulate_sse2 5944 #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2 5945 #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2 5946 5947 #elif (XXH_VECTOR == XXH_NEON) 5948 5949 #define XXH3_accumulate_512 XXH3_accumulate_512_neon 5950 #define XXH3_accumulate XXH3_accumulate_neon 5951 #define XXH3_scrambleAcc XXH3_scrambleAcc_neon 5952 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar 5953 5954 #elif (XXH_VECTOR == XXH_VSX) 5955 5956 #define XXH3_accumulate_512 XXH3_accumulate_512_vsx 5957 #define XXH3_accumulate XXH3_accumulate_vsx 5958 #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx 5959 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar 5960 5961 #elif (XXH_VECTOR == XXH_SVE) 5962 #define XXH3_accumulate_512 XXH3_accumulate_512_sve 5963 #define XXH3_accumulate XXH3_accumulate_sve 5964 #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar 5965 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar 5966 5967 #elif (XXH_VECTOR == XXH_LSX) 5968 #define XXH3_accumulate_512 XXH3_accumulate_512_lsx 5969 #define XXH3_accumulate XXH3_accumulate_lsx 5970 #define XXH3_scrambleAcc XXH3_scrambleAcc_lsx 5971 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar 5972 5973 #else /* scalar */ 5974 5975 #define XXH3_accumulate_512 XXH3_accumulate_512_scalar 5976 #define XXH3_accumulate XXH3_accumulate_scalar 5977 #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar 5978 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar 5979 5980 #endif 5981 5982 #if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */ 5983 # undef XXH3_initCustomSecret 5984 # define XXH3_initCustomSecret XXH3_initCustomSecret_scalar 5985 #endif 5986 5987 XXH_FORCE_INLINE void 5988 XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc, 5989 const xxh_u8* XXH_RESTRICT input, size_t len, 5990 const xxh_u8* XXH_RESTRICT secret, size_t secretSize, 5991 XXH3_f_accumulate f_acc, 5992 XXH3_f_scrambleAcc f_scramble) 5993 { 5994 size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; 5995 size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock; 5996 size_t const nb_blocks = (len - 1) / block_len; 5997 5998 size_t n; 5999 6000 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); 6001 6002 for (n = 0; n < nb_blocks; n++) { 6003 f_acc(acc, input + n*block_len, secret, nbStripesPerBlock); 6004 f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN); 6005 } 6006 6007 /* last partial block */ 6008 XXH_ASSERT(len > XXH_STRIPE_LEN); 6009 { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN; 6010 XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); 6011 f_acc(acc, input + nb_blocks*block_len, secret, nbStripes); 6012 6013 /* last stripe */ 6014 { const xxh_u8* const p = input + len - XXH_STRIPE_LEN; 6015 #define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */ 6016 XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); 6017 } } 6018 } 6019 6020 XXH_FORCE_INLINE xxh_u64 6021 XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret) 6022 { 6023 return XXH3_mul128_fold64( 6024 acc[0] ^ XXH_readLE64(secret), 6025 acc[1] ^ XXH_readLE64(secret+8) ); 6026 } 6027 6028 static XXH_PUREF XXH64_hash_t 6029 XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start) 6030 { 6031 xxh_u64 result64 = start; 6032 size_t i = 0; 6033 6034 for (i = 0; i < 4; i++) { 6035 result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i); 6036 #if defined(__clang__) /* Clang */ \ 6037 && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \ 6038 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ 6039 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ 6040 /* 6041 * UGLY HACK: 6042 * Prevent autovectorization on Clang ARMv7-a. Exact same problem as 6043 * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b. 6044 * XXH3_64bits, len == 256, Snapdragon 835: 6045 * without hack: 2063.7 MB/s 6046 * with hack: 2560.7 MB/s 6047 */ 6048 XXH_COMPILER_GUARD(result64); 6049 #endif 6050 } 6051 6052 return XXH3_avalanche(result64); 6053 } 6054 6055 /* do not align on 8, so that the secret is different from the accumulator */ 6056 #define XXH_SECRET_MERGEACCS_START 11 6057 6058 static XXH_PUREF XXH64_hash_t 6059 XXH3_finalizeLong_64b(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 len) 6060 { 6061 return XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START, len * XXH_PRIME64_1); 6062 } 6063 6064 #define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \ 6065 XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 } 6066 6067 XXH_FORCE_INLINE XXH64_hash_t 6068 XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len, 6069 const void* XXH_RESTRICT secret, size_t secretSize, 6070 XXH3_f_accumulate f_acc, 6071 XXH3_f_scrambleAcc f_scramble) 6072 { 6073 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; 6074 6075 XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble); 6076 6077 /* converge into final hash */ 6078 XXH_STATIC_ASSERT(sizeof(acc) == 64); 6079 XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); 6080 return XXH3_finalizeLong_64b(acc, (const xxh_u8*)secret, (xxh_u64)len); 6081 } 6082 6083 /* 6084 * It's important for performance to transmit secret's size (when it's static) 6085 * so that the compiler can properly optimize the vectorized loop. 6086 * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set. 6087 * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE 6088 * breaks -Og, this is XXH_NO_INLINE. 6089 */ 6090 XXH3_WITH_SECRET_INLINE XXH64_hash_t 6091 XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len, 6092 XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) 6093 { 6094 (void)seed64; 6095 return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc); 6096 } 6097 6098 /* 6099 * It's preferable for performance that XXH3_hashLong is not inlined, 6100 * as it results in a smaller function for small data, easier to the instruction cache. 6101 * Note that inside this no_inline function, we do inline the internal loop, 6102 * and provide a statically defined secret size to allow optimization of vector loop. 6103 */ 6104 XXH_NO_INLINE XXH_PUREF XXH64_hash_t 6105 XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len, 6106 XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) 6107 { 6108 (void)seed64; (void)secret; (void)secretLen; 6109 return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc); 6110 } 6111 6112 /* 6113 * XXH3_hashLong_64b_withSeed(): 6114 * Generate a custom key based on alteration of default XXH3_kSecret with the seed, 6115 * and then use this key for long mode hashing. 6116 * 6117 * This operation is decently fast but nonetheless costs a little bit of time. 6118 * Try to avoid it whenever possible (typically when seed==0). 6119 * 6120 * It's important for performance that XXH3_hashLong is not inlined. Not sure 6121 * why (uop cache maybe?), but the difference is large and easily measurable. 6122 */ 6123 XXH_FORCE_INLINE XXH64_hash_t 6124 XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len, 6125 XXH64_hash_t seed, 6126 XXH3_f_accumulate f_acc, 6127 XXH3_f_scrambleAcc f_scramble, 6128 XXH3_f_initCustomSecret f_initSec) 6129 { 6130 #if XXH_SIZE_OPT <= 0 6131 if (seed == 0) 6132 return XXH3_hashLong_64b_internal(input, len, 6133 XXH3_kSecret, sizeof(XXH3_kSecret), 6134 f_acc, f_scramble); 6135 #endif 6136 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; 6137 f_initSec(secret, seed); 6138 return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret), 6139 f_acc, f_scramble); 6140 } 6141 } 6142 6143 /* 6144 * It's important for performance that XXH3_hashLong is not inlined. 6145 */ 6146 XXH_NO_INLINE XXH64_hash_t 6147 XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len, 6148 XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) 6149 { 6150 (void)secret; (void)secretLen; 6151 return XXH3_hashLong_64b_withSeed_internal(input, len, seed, 6152 XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); 6153 } 6154 6155 6156 typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t, 6157 XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t); 6158 6159 XXH_FORCE_INLINE XXH64_hash_t 6160 XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len, 6161 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, 6162 XXH3_hashLong64_f f_hashLong) 6163 { 6164 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); 6165 /* 6166 * If an action is to be taken if `secretLen` condition is not respected, 6167 * it should be done here. 6168 * For now, it's a contract pre-condition. 6169 * Adding a check and a branch here would cost performance at every hash. 6170 * Also, note that function signature doesn't offer room to return an error. 6171 */ 6172 if (len <= 16) 6173 return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); 6174 if (len <= 128) 6175 return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); 6176 if (len <= XXH3_MIDSIZE_MAX) 6177 return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); 6178 return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen); 6179 } 6180 6181 6182 /* === Public entry point === */ 6183 6184 /*! @ingroup XXH3_family */ 6185 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length) 6186 { 6187 return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); 6188 } 6189 6190 /*! @ingroup XXH3_family */ 6191 XXH_PUBLIC_API XXH64_hash_t 6192 XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize) 6193 { 6194 return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret); 6195 } 6196 6197 /*! @ingroup XXH3_family */ 6198 XXH_PUBLIC_API XXH64_hash_t 6199 XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed) 6200 { 6201 return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); 6202 } 6203 6204 XXH_PUBLIC_API XXH64_hash_t 6205 XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) 6206 { 6207 if (length <= XXH3_MIDSIZE_MAX) 6208 return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); 6209 return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize); 6210 } 6211 6212 6213 /* === XXH3 streaming === */ 6214 #ifndef XXH_NO_STREAM 6215 /* 6216 * Malloc's a pointer that is always aligned to @align. 6217 * 6218 * This must be freed with `XXH_alignedFree()`. 6219 * 6220 * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte 6221 * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2 6222 * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON. 6223 * 6224 * This underalignment previously caused a rather obvious crash which went 6225 * completely unnoticed due to XXH3_createState() not actually being tested. 6226 * Credit to RedSpah for noticing this bug. 6227 * 6228 * The alignment is done manually: Functions like posix_memalign or _mm_malloc 6229 * are avoided: To maintain portability, we would have to write a fallback 6230 * like this anyways, and besides, testing for the existence of library 6231 * functions without relying on external build tools is impossible. 6232 * 6233 * The method is simple: Overallocate, manually align, and store the offset 6234 * to the original behind the returned pointer. 6235 * 6236 * Align must be a power of 2 and 8 <= align <= 128. 6237 */ 6238 static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align) 6239 { 6240 XXH_ASSERT(align <= 128 && align >= 8); /* range check */ 6241 XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */ 6242 XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */ 6243 { /* Overallocate to make room for manual realignment and an offset byte */ 6244 xxh_u8* base = (xxh_u8*)XXH_malloc(s + align); 6245 if (base != NULL) { 6246 /* 6247 * Get the offset needed to align this pointer. 6248 * 6249 * Even if the returned pointer is aligned, there will always be 6250 * at least one byte to store the offset to the original pointer. 6251 */ 6252 size_t offset = align - ((size_t)base & (align - 1)); /* base % align */ 6253 /* Add the offset for the now-aligned pointer */ 6254 xxh_u8* ptr = base + offset; 6255 6256 XXH_ASSERT((size_t)ptr % align == 0); 6257 6258 /* Store the offset immediately before the returned pointer. */ 6259 ptr[-1] = (xxh_u8)offset; 6260 return ptr; 6261 } 6262 return NULL; 6263 } 6264 } 6265 /* 6266 * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass 6267 * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout. 6268 */ 6269 static void XXH_alignedFree(void* p) 6270 { 6271 if (p != NULL) { 6272 xxh_u8* ptr = (xxh_u8*)p; 6273 /* Get the offset byte we added in XXH_malloc. */ 6274 xxh_u8 offset = ptr[-1]; 6275 /* Free the original malloc'd pointer */ 6276 xxh_u8* base = ptr - offset; 6277 XXH_free(base); 6278 } 6279 } 6280 /*! @ingroup XXH3_family */ 6281 /*! 6282 * @brief Allocate an @ref XXH3_state_t. 6283 * 6284 * @return An allocated pointer of @ref XXH3_state_t on success. 6285 * @return `NULL` on failure. 6286 * 6287 * @note Must be freed with XXH3_freeState(). 6288 * 6289 * @see @ref streaming_example "Streaming Example" 6290 */ 6291 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) 6292 { 6293 XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64); 6294 if (state==NULL) return NULL; 6295 XXH3_INITSTATE(state); 6296 return state; 6297 } 6298 6299 /*! @ingroup XXH3_family */ 6300 /*! 6301 * @brief Frees an @ref XXH3_state_t. 6302 * 6303 * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). 6304 * 6305 * @return @ref XXH_OK. 6306 * 6307 * @note Must be allocated with XXH3_createState(). 6308 * 6309 * @see @ref streaming_example "Streaming Example" 6310 */ 6311 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr) 6312 { 6313 XXH_alignedFree(statePtr); 6314 return XXH_OK; 6315 } 6316 6317 /*! @ingroup XXH3_family */ 6318 XXH_PUBLIC_API void 6319 XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state) 6320 { 6321 XXH_memcpy(dst_state, src_state, sizeof(*dst_state)); 6322 } 6323 6324 static void 6325 XXH3_reset_internal(XXH3_state_t* statePtr, 6326 XXH64_hash_t seed, 6327 const void* secret, size_t secretSize) 6328 { 6329 size_t const initStart = offsetof(XXH3_state_t, bufferedSize); 6330 size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart; 6331 XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart); 6332 XXH_ASSERT(statePtr != NULL); 6333 /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */ 6334 memset((char*)statePtr + initStart, 0, initLength); 6335 statePtr->acc[0] = XXH_PRIME32_3; 6336 statePtr->acc[1] = XXH_PRIME64_1; 6337 statePtr->acc[2] = XXH_PRIME64_2; 6338 statePtr->acc[3] = XXH_PRIME64_3; 6339 statePtr->acc[4] = XXH_PRIME64_4; 6340 statePtr->acc[5] = XXH_PRIME32_2; 6341 statePtr->acc[6] = XXH_PRIME64_5; 6342 statePtr->acc[7] = XXH_PRIME32_1; 6343 statePtr->seed = seed; 6344 statePtr->useSeed = (seed != 0); 6345 statePtr->extSecret = (const unsigned char*)secret; 6346 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); 6347 statePtr->secretLimit = secretSize - XXH_STRIPE_LEN; 6348 statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; 6349 } 6350 6351 /*! @ingroup XXH3_family */ 6352 XXH_PUBLIC_API XXH_errorcode 6353 XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) 6354 { 6355 if (statePtr == NULL) return XXH_ERROR; 6356 XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); 6357 return XXH_OK; 6358 } 6359 6360 /*! @ingroup XXH3_family */ 6361 XXH_PUBLIC_API XXH_errorcode 6362 XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) 6363 { 6364 if (statePtr == NULL) return XXH_ERROR; 6365 XXH3_reset_internal(statePtr, 0, secret, secretSize); 6366 if (secret == NULL) return XXH_ERROR; 6367 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; 6368 return XXH_OK; 6369 } 6370 6371 /*! @ingroup XXH3_family */ 6372 XXH_PUBLIC_API XXH_errorcode 6373 XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) 6374 { 6375 if (statePtr == NULL) return XXH_ERROR; 6376 if (seed==0) return XXH3_64bits_reset(statePtr); 6377 if ((seed != statePtr->seed) || (statePtr->extSecret != NULL)) 6378 XXH3_initCustomSecret(statePtr->customSecret, seed); 6379 XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); 6380 return XXH_OK; 6381 } 6382 6383 /*! @ingroup XXH3_family */ 6384 XXH_PUBLIC_API XXH_errorcode 6385 XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64) 6386 { 6387 if (statePtr == NULL) return XXH_ERROR; 6388 if (secret == NULL) return XXH_ERROR; 6389 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; 6390 XXH3_reset_internal(statePtr, seed64, secret, secretSize); 6391 statePtr->useSeed = 1; /* always, even if seed64==0 */ 6392 return XXH_OK; 6393 } 6394 6395 /*! 6396 * @internal 6397 * @brief Processes a large input for XXH3_update() and XXH3_digest_long(). 6398 * 6399 * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block. 6400 * 6401 * @param acc Pointer to the 8 accumulator lanes 6402 * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in the block* 6403 * @param nbStripesPerBlock Number of stripes in a block 6404 * @param input Input pointer 6405 * @param nbStripes Number of stripes to process 6406 * @param secret Secret pointer 6407 * @param secretLimit Offset of the last block in @p secret 6408 * @param f_acc Pointer to an XXH3_accumulate implementation 6409 * @param f_scramble Pointer to an XXH3_scrambleAcc implementation 6410 * @return Pointer past the end of @p input after processing 6411 */ 6412 XXH_FORCE_INLINE const xxh_u8 * 6413 XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc, 6414 size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock, 6415 const xxh_u8* XXH_RESTRICT input, size_t nbStripes, 6416 const xxh_u8* XXH_RESTRICT secret, size_t secretLimit, 6417 XXH3_f_accumulate f_acc, 6418 XXH3_f_scrambleAcc f_scramble) 6419 { 6420 const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE; 6421 /* Process full blocks */ 6422 if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) { 6423 /* Process the initial partial block... */ 6424 size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr; 6425 6426 do { 6427 /* Accumulate and scramble */ 6428 f_acc(acc, input, initialSecret, nbStripesThisIter); 6429 f_scramble(acc, secret + secretLimit); 6430 input += nbStripesThisIter * XXH_STRIPE_LEN; 6431 nbStripes -= nbStripesThisIter; 6432 /* Then continue the loop with the full block size */ 6433 nbStripesThisIter = nbStripesPerBlock; 6434 initialSecret = secret; 6435 } while (nbStripes >= nbStripesPerBlock); 6436 *nbStripesSoFarPtr = 0; 6437 } 6438 /* Process a partial block */ 6439 if (nbStripes > 0) { 6440 f_acc(acc, input, initialSecret, nbStripes); 6441 input += nbStripes * XXH_STRIPE_LEN; 6442 *nbStripesSoFarPtr += nbStripes; 6443 } 6444 /* Return end pointer */ 6445 return input; 6446 } 6447 6448 #ifndef XXH3_STREAM_USE_STACK 6449 # if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */ 6450 # define XXH3_STREAM_USE_STACK 1 6451 # endif 6452 #endif 6453 /* 6454 * Both XXH3_64bits_update and XXH3_128bits_update use this routine. 6455 */ 6456 XXH_FORCE_INLINE XXH_errorcode 6457 XXH3_update(XXH3_state_t* XXH_RESTRICT const state, 6458 const xxh_u8* XXH_RESTRICT input, size_t len, 6459 XXH3_f_accumulate f_acc, 6460 XXH3_f_scrambleAcc f_scramble) 6461 { 6462 if (input==NULL) { 6463 XXH_ASSERT(len == 0); 6464 return XXH_OK; 6465 } 6466 6467 XXH_ASSERT(state != NULL); 6468 { const xxh_u8* const bEnd = input + len; 6469 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; 6470 #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 6471 /* For some reason, gcc and MSVC seem to suffer greatly 6472 * when operating accumulators directly into state. 6473 * Operating into stack space seems to enable proper optimization. 6474 * clang, on the other hand, doesn't seem to need this trick */ 6475 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; 6476 XXH_memcpy(acc, state->acc, sizeof(acc)); 6477 #else 6478 xxh_u64* XXH_RESTRICT const acc = state->acc; 6479 #endif 6480 state->totalLen += len; 6481 XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE); 6482 6483 /* small input : just fill in tmp buffer */ 6484 if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) { 6485 XXH_memcpy(state->buffer + state->bufferedSize, input, len); 6486 state->bufferedSize += (XXH32_hash_t)len; 6487 return XXH_OK; 6488 } 6489 6490 /* total input is now > XXH3_INTERNALBUFFER_SIZE */ 6491 #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN) 6492 XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */ 6493 6494 /* 6495 * Internal buffer is partially filled (always, except at beginning) 6496 * Complete it, then consume it. 6497 */ 6498 if (state->bufferedSize) { 6499 size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize; 6500 XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); 6501 input += loadSize; 6502 XXH3_consumeStripes(acc, 6503 &state->nbStripesSoFar, state->nbStripesPerBlock, 6504 state->buffer, XXH3_INTERNALBUFFER_STRIPES, 6505 secret, state->secretLimit, 6506 f_acc, f_scramble); 6507 state->bufferedSize = 0; 6508 } 6509 XXH_ASSERT(input < bEnd); 6510 if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) { 6511 size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN; 6512 input = XXH3_consumeStripes(acc, 6513 &state->nbStripesSoFar, state->nbStripesPerBlock, 6514 input, nbStripes, 6515 secret, state->secretLimit, 6516 f_acc, f_scramble); 6517 XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); 6518 6519 } 6520 /* Some remaining input (always) : buffer it */ 6521 XXH_ASSERT(input < bEnd); 6522 XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE); 6523 XXH_ASSERT(state->bufferedSize == 0); 6524 XXH_memcpy(state->buffer, input, (size_t)(bEnd-input)); 6525 state->bufferedSize = (XXH32_hash_t)(bEnd-input); 6526 #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 6527 /* save stack accumulators into state */ 6528 XXH_memcpy(state->acc, acc, sizeof(acc)); 6529 #endif 6530 } 6531 6532 return XXH_OK; 6533 } 6534 6535 /*! @ingroup XXH3_family */ 6536 XXH_PUBLIC_API XXH_errorcode 6537 XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) 6538 { 6539 return XXH3_update(state, (const xxh_u8*)input, len, 6540 XXH3_accumulate, XXH3_scrambleAcc); 6541 } 6542 6543 6544 XXH_FORCE_INLINE void 6545 XXH3_digest_long (XXH64_hash_t* acc, 6546 const XXH3_state_t* state, 6547 const unsigned char* secret) 6548 { 6549 xxh_u8 lastStripe[XXH_STRIPE_LEN]; 6550 const xxh_u8* lastStripePtr; 6551 6552 /* 6553 * Digest on a local copy. This way, the state remains unaltered, and it can 6554 * continue ingesting more input afterwards. 6555 */ 6556 XXH_memcpy(acc, state->acc, sizeof(state->acc)); 6557 if (state->bufferedSize >= XXH_STRIPE_LEN) { 6558 /* Consume remaining stripes then point to remaining data in buffer */ 6559 size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN; 6560 size_t nbStripesSoFar = state->nbStripesSoFar; 6561 XXH3_consumeStripes(acc, 6562 &nbStripesSoFar, state->nbStripesPerBlock, 6563 state->buffer, nbStripes, 6564 secret, state->secretLimit, 6565 XXH3_accumulate, XXH3_scrambleAcc); 6566 lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN; 6567 } else { /* bufferedSize < XXH_STRIPE_LEN */ 6568 /* Copy to temp buffer */ 6569 size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize; 6570 XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */ 6571 XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); 6572 XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); 6573 lastStripePtr = lastStripe; 6574 } 6575 /* Last stripe */ 6576 XXH3_accumulate_512(acc, 6577 lastStripePtr, 6578 secret + state->secretLimit - XXH_SECRET_LASTACC_START); 6579 } 6580 6581 /*! @ingroup XXH3_family */ 6582 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state) 6583 { 6584 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; 6585 if (state->totalLen > XXH3_MIDSIZE_MAX) { 6586 XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; 6587 XXH3_digest_long(acc, state, secret); 6588 return XXH3_finalizeLong_64b(acc, secret, (xxh_u64)state->totalLen); 6589 } 6590 /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */ 6591 if (state->useSeed) 6592 return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); 6593 return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), 6594 secret, state->secretLimit + XXH_STRIPE_LEN); 6595 } 6596 #endif /* !XXH_NO_STREAM */ 6597 6598 6599 /* ========================================== 6600 * XXH3 128 bits (a.k.a XXH128) 6601 * ========================================== 6602 * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant, 6603 * even without counting the significantly larger output size. 6604 * 6605 * For example, extra steps are taken to avoid the seed-dependent collisions 6606 * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B). 6607 * 6608 * This strength naturally comes at the cost of some speed, especially on short 6609 * lengths. Note that longer hashes are about as fast as the 64-bit version 6610 * due to it using only a slight modification of the 64-bit loop. 6611 * 6612 * XXH128 is also more oriented towards 64-bit machines. It is still extremely 6613 * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64). 6614 */ 6615 6616 XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t 6617 XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 6618 { 6619 /* A doubled version of 1to3_64b with different constants. */ 6620 XXH_ASSERT(input != NULL); 6621 XXH_ASSERT(1 <= len && len <= 3); 6622 XXH_ASSERT(secret != NULL); 6623 /* 6624 * len = 1: combinedl = { input[0], 0x01, input[0], input[0] } 6625 * len = 2: combinedl = { input[1], 0x02, input[0], input[1] } 6626 * len = 3: combinedl = { input[2], 0x03, input[0], input[1] } 6627 */ 6628 { xxh_u8 const c1 = input[0]; 6629 xxh_u8 const c2 = input[len >> 1]; 6630 xxh_u8 const c3 = input[len - 1]; 6631 xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24) 6632 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); 6633 xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13); 6634 xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; 6635 xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed; 6636 xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl; 6637 xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph; 6638 XXH128_hash_t h128; 6639 h128.low64 = XXH64_avalanche(keyed_lo); 6640 h128.high64 = XXH64_avalanche(keyed_hi); 6641 return h128; 6642 } 6643 } 6644 6645 XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t 6646 XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 6647 { 6648 XXH_ASSERT(input != NULL); 6649 XXH_ASSERT(secret != NULL); 6650 XXH_ASSERT(4 <= len && len <= 8); 6651 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; 6652 { xxh_u32 const input_lo = XXH_readLE32(input); 6653 xxh_u32 const input_hi = XXH_readLE32(input + len - 4); 6654 xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32); 6655 xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed; 6656 xxh_u64 const keyed = input_64 ^ bitflip; 6657 6658 /* Shift len to the left to ensure it is even, this avoids even multiplies. */ 6659 XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2)); 6660 6661 m128.high64 += (m128.low64 << 1); 6662 m128.low64 ^= (m128.high64 >> 3); 6663 6664 m128.low64 = XXH_xorshift64(m128.low64, 35); 6665 m128.low64 *= PRIME_MX2; 6666 m128.low64 = XXH_xorshift64(m128.low64, 28); 6667 m128.high64 = XXH3_avalanche(m128.high64); 6668 return m128; 6669 } 6670 } 6671 6672 XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t 6673 XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 6674 { 6675 XXH_ASSERT(input != NULL); 6676 XXH_ASSERT(secret != NULL); 6677 XXH_ASSERT(9 <= len && len <= 16); 6678 { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed; 6679 xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed; 6680 xxh_u64 const input_lo = XXH_readLE64(input); 6681 xxh_u64 input_hi = XXH_readLE64(input + len - 8); 6682 XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1); 6683 /* 6684 * Put len in the middle of m128 to ensure that the length gets mixed to 6685 * both the low and high bits in the 128x64 multiply below. 6686 */ 6687 m128.low64 += (xxh_u64)(len - 1) << 54; 6688 input_hi ^= bitfliph; 6689 /* 6690 * Add the high 32 bits of input_hi to the high 32 bits of m128, then 6691 * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to 6692 * the high 64 bits of m128. 6693 * 6694 * The best approach to this operation is different on 32-bit and 64-bit. 6695 */ 6696 if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */ 6697 /* 6698 * 32-bit optimized version, which is more readable. 6699 * 6700 * On 32-bit, it removes an ADC and delays a dependency between the two 6701 * halves of m128.high64, but it generates an extra mask on 64-bit. 6702 */ 6703 m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2); 6704 } else { 6705 /* 6706 * 64-bit optimized (albeit more confusing) version. 6707 * 6708 * Uses some properties of addition and multiplication to remove the mask: 6709 * 6710 * Let: 6711 * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF) 6712 * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000) 6713 * c = XXH_PRIME32_2 6714 * 6715 * a + (b * c) 6716 * Inverse Property: x + y - x == y 6717 * a + (b * (1 + c - 1)) 6718 * Distributive Property: x * (y + z) == (x * y) + (x * z) 6719 * a + (b * 1) + (b * (c - 1)) 6720 * Identity Property: x * 1 == x 6721 * a + b + (b * (c - 1)) 6722 * 6723 * Substitute a, b, and c: 6724 * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) 6725 * 6726 * Since input_hi.hi + input_hi.lo == input_hi, we get this: 6727 * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) 6728 */ 6729 m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1); 6730 } 6731 /* m128 ^= XXH_swap64(m128 >> 64); */ 6732 m128.low64 ^= XXH_swap64(m128.high64); 6733 6734 { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */ 6735 XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2); 6736 h128.high64 += m128.high64 * XXH_PRIME64_2; 6737 6738 h128.low64 = XXH3_avalanche(h128.low64); 6739 h128.high64 = XXH3_avalanche(h128.high64); 6740 return h128; 6741 } } 6742 } 6743 6744 /* 6745 * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN 6746 */ 6747 XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t 6748 XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) 6749 { 6750 XXH_ASSERT(len <= 16); 6751 { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed); 6752 if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed); 6753 if (len) return XXH3_len_1to3_128b(input, len, secret, seed); 6754 { XXH128_hash_t h128; 6755 xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72); 6756 xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88); 6757 h128.low64 = XXH64_avalanche(seed ^ bitflipl); 6758 h128.high64 = XXH64_avalanche( seed ^ bitfliph); 6759 return h128; 6760 } } 6761 } 6762 6763 /* 6764 * A bit slower than XXH3_mix16B, but handles multiply by zero better. 6765 */ 6766 XXH_FORCE_INLINE XXH128_hash_t 6767 XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2, 6768 const xxh_u8* secret, XXH64_hash_t seed) 6769 { 6770 acc.low64 += XXH3_mix16B (input_1, secret+0, seed); 6771 acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8); 6772 acc.high64 += XXH3_mix16B (input_2, secret+16, seed); 6773 acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8); 6774 return acc; 6775 } 6776 6777 6778 XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t 6779 XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, 6780 const xxh_u8* XXH_RESTRICT secret, size_t secretSize, 6781 XXH64_hash_t seed) 6782 { 6783 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; 6784 XXH_ASSERT(16 < len && len <= 128); 6785 6786 { XXH128_hash_t acc; 6787 acc.low64 = len * XXH_PRIME64_1; 6788 acc.high64 = 0; 6789 6790 #if XXH_SIZE_OPT >= 1 6791 { 6792 /* Smaller, but slightly slower. */ 6793 unsigned int i = (unsigned int)(len - 1) / 32; 6794 do { 6795 acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed); 6796 } while (i-- != 0); 6797 } 6798 #else 6799 if (len > 32) { 6800 if (len > 64) { 6801 if (len > 96) { 6802 acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed); 6803 } 6804 acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed); 6805 } 6806 acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed); 6807 } 6808 acc = XXH128_mix32B(acc, input, input+len-16, secret, seed); 6809 #endif 6810 { XXH128_hash_t h128; 6811 h128.low64 = acc.low64 + acc.high64; 6812 h128.high64 = (acc.low64 * XXH_PRIME64_1) 6813 + (acc.high64 * XXH_PRIME64_4) 6814 + ((len - seed) * XXH_PRIME64_2); 6815 h128.low64 = XXH3_avalanche(h128.low64); 6816 h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); 6817 return h128; 6818 } 6819 } 6820 } 6821 6822 XXH_NO_INLINE XXH_PUREF XXH128_hash_t 6823 XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, 6824 const xxh_u8* XXH_RESTRICT secret, size_t secretSize, 6825 XXH64_hash_t seed) 6826 { 6827 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; 6828 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); 6829 6830 { XXH128_hash_t acc; 6831 unsigned i; 6832 acc.low64 = len * XXH_PRIME64_1; 6833 acc.high64 = 0; 6834 /* 6835 * We set as `i` as offset + 32. We do this so that unchanged 6836 * `len` can be used as upper bound. This reaches a sweet spot 6837 * where both x86 and aarch64 get simple agen and good codegen 6838 * for the loop. 6839 */ 6840 for (i = 32; i < 160; i += 32) { 6841 acc = XXH128_mix32B(acc, 6842 input + i - 32, 6843 input + i - 16, 6844 secret + i - 32, 6845 seed); 6846 } 6847 acc.low64 = XXH3_avalanche(acc.low64); 6848 acc.high64 = XXH3_avalanche(acc.high64); 6849 /* 6850 * NB: `i <= len` will duplicate the last 32-bytes if 6851 * len % 32 was zero. This is an unfortunate necessity to keep 6852 * the hash result stable. 6853 */ 6854 for (i=160; i <= len; i += 32) { 6855 acc = XXH128_mix32B(acc, 6856 input + i - 32, 6857 input + i - 16, 6858 secret + XXH3_MIDSIZE_STARTOFFSET + i - 160, 6859 seed); 6860 } 6861 /* last bytes */ 6862 acc = XXH128_mix32B(acc, 6863 input + len - 16, 6864 input + len - 32, 6865 secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, 6866 (XXH64_hash_t)0 - seed); 6867 6868 { XXH128_hash_t h128; 6869 h128.low64 = acc.low64 + acc.high64; 6870 h128.high64 = (acc.low64 * XXH_PRIME64_1) 6871 + (acc.high64 * XXH_PRIME64_4) 6872 + ((len - seed) * XXH_PRIME64_2); 6873 h128.low64 = XXH3_avalanche(h128.low64); 6874 h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); 6875 return h128; 6876 } 6877 } 6878 } 6879 6880 static XXH_PUREF XXH128_hash_t 6881 XXH3_finalizeLong_128b(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, size_t secretSize, xxh_u64 len) 6882 { 6883 XXH128_hash_t h128; 6884 h128.low64 = XXH3_finalizeLong_64b(acc, secret, len); 6885 h128.high64 = XXH3_mergeAccs(acc, secret + secretSize 6886 - XXH_STRIPE_LEN - XXH_SECRET_MERGEACCS_START, 6887 ~(len * XXH_PRIME64_2)); 6888 return h128; 6889 } 6890 6891 XXH_FORCE_INLINE XXH128_hash_t 6892 XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len, 6893 const xxh_u8* XXH_RESTRICT secret, size_t secretSize, 6894 XXH3_f_accumulate f_acc, 6895 XXH3_f_scrambleAcc f_scramble) 6896 { 6897 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; 6898 6899 XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble); 6900 6901 /* converge into final hash */ 6902 XXH_STATIC_ASSERT(sizeof(acc) == 64); 6903 XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); 6904 return XXH3_finalizeLong_128b(acc, secret, secretSize, (xxh_u64)len); 6905 } 6906 6907 /* 6908 * It's important for performance that XXH3_hashLong() is not inlined. 6909 */ 6910 XXH_NO_INLINE XXH_PUREF XXH128_hash_t 6911 XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len, 6912 XXH64_hash_t seed64, 6913 const void* XXH_RESTRICT secret, size_t secretLen) 6914 { 6915 (void)seed64; (void)secret; (void)secretLen; 6916 return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), 6917 XXH3_accumulate, XXH3_scrambleAcc); 6918 } 6919 6920 /* 6921 * It's important for performance to pass @p secretLen (when it's static) 6922 * to the compiler, so that it can properly optimize the vectorized loop. 6923 * 6924 * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE 6925 * breaks -Og, this is XXH_NO_INLINE. 6926 */ 6927 XXH3_WITH_SECRET_INLINE XXH128_hash_t 6928 XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len, 6929 XXH64_hash_t seed64, 6930 const void* XXH_RESTRICT secret, size_t secretLen) 6931 { 6932 (void)seed64; 6933 return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen, 6934 XXH3_accumulate, XXH3_scrambleAcc); 6935 } 6936 6937 XXH_FORCE_INLINE XXH128_hash_t 6938 XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len, 6939 XXH64_hash_t seed64, 6940 XXH3_f_accumulate f_acc, 6941 XXH3_f_scrambleAcc f_scramble, 6942 XXH3_f_initCustomSecret f_initSec) 6943 { 6944 if (seed64 == 0) 6945 return XXH3_hashLong_128b_internal(input, len, 6946 XXH3_kSecret, sizeof(XXH3_kSecret), 6947 f_acc, f_scramble); 6948 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; 6949 f_initSec(secret, seed64); 6950 return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret), 6951 f_acc, f_scramble); 6952 } 6953 } 6954 6955 /* 6956 * It's important for performance that XXH3_hashLong is not inlined. 6957 */ 6958 XXH_NO_INLINE XXH128_hash_t 6959 XXH3_hashLong_128b_withSeed(const void* input, size_t len, 6960 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen) 6961 { 6962 (void)secret; (void)secretLen; 6963 return XXH3_hashLong_128b_withSeed_internal(input, len, seed64, 6964 XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); 6965 } 6966 6967 typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t, 6968 XXH64_hash_t, const void* XXH_RESTRICT, size_t); 6969 6970 XXH_FORCE_INLINE XXH128_hash_t 6971 XXH3_128bits_internal(const void* input, size_t len, 6972 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, 6973 XXH3_hashLong128_f f_hl128) 6974 { 6975 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); 6976 /* 6977 * If an action is to be taken if `secret` conditions are not respected, 6978 * it should be done here. 6979 * For now, it's a contract pre-condition. 6980 * Adding a check and a branch here would cost performance at every hash. 6981 */ 6982 if (len <= 16) 6983 return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); 6984 if (len <= 128) 6985 return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); 6986 if (len <= XXH3_MIDSIZE_MAX) 6987 return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); 6988 return f_hl128(input, len, seed64, secret, secretLen); 6989 } 6990 6991 6992 /* === Public XXH128 API === */ 6993 6994 /*! @ingroup XXH3_family */ 6995 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len) 6996 { 6997 return XXH3_128bits_internal(input, len, 0, 6998 XXH3_kSecret, sizeof(XXH3_kSecret), 6999 XXH3_hashLong_128b_default); 7000 } 7001 7002 /*! @ingroup XXH3_family */ 7003 XXH_PUBLIC_API XXH128_hash_t 7004 XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize) 7005 { 7006 return XXH3_128bits_internal(input, len, 0, 7007 (const xxh_u8*)secret, secretSize, 7008 XXH3_hashLong_128b_withSecret); 7009 } 7010 7011 /*! @ingroup XXH3_family */ 7012 XXH_PUBLIC_API XXH128_hash_t 7013 XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) 7014 { 7015 return XXH3_128bits_internal(input, len, seed, 7016 XXH3_kSecret, sizeof(XXH3_kSecret), 7017 XXH3_hashLong_128b_withSeed); 7018 } 7019 7020 /*! @ingroup XXH3_family */ 7021 XXH_PUBLIC_API XXH128_hash_t 7022 XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) 7023 { 7024 if (len <= XXH3_MIDSIZE_MAX) 7025 return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); 7026 return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize); 7027 } 7028 7029 /*! @ingroup XXH3_family */ 7030 XXH_PUBLIC_API XXH128_hash_t 7031 XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) 7032 { 7033 return XXH3_128bits_withSeed(input, len, seed); 7034 } 7035 7036 7037 /* === XXH3 128-bit streaming === */ 7038 #ifndef XXH_NO_STREAM 7039 /* 7040 * All initialization and update functions are identical to 64-bit streaming variant. 7041 * The only difference is the finalization routine. 7042 */ 7043 7044 /*! @ingroup XXH3_family */ 7045 XXH_PUBLIC_API XXH_errorcode 7046 XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) 7047 { 7048 return XXH3_64bits_reset(statePtr); 7049 } 7050 7051 /*! @ingroup XXH3_family */ 7052 XXH_PUBLIC_API XXH_errorcode 7053 XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) 7054 { 7055 return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize); 7056 } 7057 7058 /*! @ingroup XXH3_family */ 7059 XXH_PUBLIC_API XXH_errorcode 7060 XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) 7061 { 7062 return XXH3_64bits_reset_withSeed(statePtr, seed); 7063 } 7064 7065 /*! @ingroup XXH3_family */ 7066 XXH_PUBLIC_API XXH_errorcode 7067 XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) 7068 { 7069 return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed); 7070 } 7071 7072 /*! @ingroup XXH3_family */ 7073 XXH_PUBLIC_API XXH_errorcode 7074 XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) 7075 { 7076 return XXH3_64bits_update(state, input, len); 7077 } 7078 7079 /*! @ingroup XXH3_family */ 7080 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state) 7081 { 7082 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; 7083 if (state->totalLen > XXH3_MIDSIZE_MAX) { 7084 XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; 7085 XXH3_digest_long(acc, state, secret); 7086 XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); 7087 return XXH3_finalizeLong_128b(acc, secret, state->secretLimit + XXH_STRIPE_LEN, (xxh_u64)state->totalLen); 7088 } 7089 /* len <= XXH3_MIDSIZE_MAX : short code */ 7090 if (state->useSeed) 7091 return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); 7092 return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen), 7093 secret, state->secretLimit + XXH_STRIPE_LEN); 7094 } 7095 #endif /* !XXH_NO_STREAM */ 7096 /* 128-bit utility functions */ 7097 7098 #include <string.h> /* memcmp, memcpy */ 7099 7100 /* return : 1 is equal, 0 if different */ 7101 /*! @ingroup XXH3_family */ 7102 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) 7103 { 7104 /* note : XXH128_hash_t is compact, it has no padding byte */ 7105 return !(memcmp(&h1, &h2, sizeof(h1))); 7106 } 7107 7108 /* This prototype is compatible with stdlib's qsort(). 7109 * @return : >0 if *h128_1 > *h128_2 7110 * <0 if *h128_1 < *h128_2 7111 * =0 if *h128_1 == *h128_2 */ 7112 /*! @ingroup XXH3_family */ 7113 XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2) 7114 { 7115 XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1; 7116 XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2; 7117 int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64); 7118 /* note : bets that, in most cases, hash values are different */ 7119 if (hcmp) return hcmp; 7120 return (h1.low64 > h2.low64) - (h2.low64 > h1.low64); 7121 } 7122 7123 7124 /*====== Canonical representation ======*/ 7125 /*! @ingroup XXH3_family */ 7126 XXH_PUBLIC_API void 7127 XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash) 7128 { 7129 XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); 7130 if (XXH_CPU_LITTLE_ENDIAN) { 7131 hash.high64 = XXH_swap64(hash.high64); 7132 hash.low64 = XXH_swap64(hash.low64); 7133 } 7134 XXH_memcpy(dst, &hash.high64, sizeof(hash.high64)); 7135 XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); 7136 } 7137 7138 /*! @ingroup XXH3_family */ 7139 XXH_PUBLIC_API XXH128_hash_t 7140 XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src) 7141 { 7142 XXH128_hash_t h; 7143 h.high64 = XXH_readBE64(src); 7144 h.low64 = XXH_readBE64(src->digest + 8); 7145 return h; 7146 } 7147 7148 7149 7150 /* ========================================== 7151 * Secret generators 7152 * ========================================== 7153 */ 7154 #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) 7155 7156 XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128) 7157 { 7158 XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 ); 7159 XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 ); 7160 } 7161 7162 /*! @ingroup XXH3_family */ 7163 XXH_PUBLIC_API XXH_errorcode 7164 XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize) 7165 { 7166 #if (XXH_DEBUGLEVEL >= 1) 7167 XXH_ASSERT(secretBuffer != NULL); 7168 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); 7169 #else 7170 /* production mode, assert() are disabled */ 7171 if (secretBuffer == NULL) return XXH_ERROR; 7172 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; 7173 #endif 7174 7175 if (customSeedSize == 0) { 7176 customSeed = XXH3_kSecret; 7177 customSeedSize = XXH_SECRET_DEFAULT_SIZE; 7178 } 7179 #if (XXH_DEBUGLEVEL >= 1) 7180 XXH_ASSERT(customSeed != NULL); 7181 #else 7182 if (customSeed == NULL) return XXH_ERROR; 7183 #endif 7184 7185 /* Fill secretBuffer with a copy of customSeed - repeat as needed */ 7186 { size_t pos = 0; 7187 while (pos < secretSize) { 7188 size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize); 7189 memcpy((char*)secretBuffer + pos, customSeed, toCopy); 7190 pos += toCopy; 7191 } } 7192 7193 { size_t const nbSeg16 = secretSize / 16; 7194 size_t n; 7195 XXH128_canonical_t scrambler; 7196 XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0)); 7197 for (n=0; n<nbSeg16; n++) { 7198 XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n); 7199 XXH3_combine16((char*)secretBuffer + n*16, h128); 7200 } 7201 /* last segment */ 7202 XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler)); 7203 } 7204 return XXH_OK; 7205 } 7206 7207 /*! @ingroup XXH3_family */ 7208 XXH_PUBLIC_API void 7209 XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed) 7210 { 7211 XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; 7212 XXH3_initCustomSecret(secret, seed); 7213 XXH_ASSERT(secretBuffer != NULL); 7214 memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE); 7215 } 7216 7217 7218 7219 /* Pop our optimization override from above */ 7220 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ 7221 && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ 7222 && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */ 7223 # pragma GCC pop_options 7224 #endif 7225 7226 #endif /* XXH_NO_LONG_LONG */ 7227 7228 #endif /* XXH_NO_XXH3 */ 7229 7230 /*! 7231 * @} 7232 */ 7233 #endif /* XXH_IMPLEMENTATION */ 7234 7235 7236 #if defined (__cplusplus) 7237 } /* extern "C" */ 7238 #endif