34 #include <ripple/beast/hash/impl/xxhash.h>
45 #if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || \
46 defined(__x86_64__) || defined(_M_X64)
47 #define XXH_USE_UNALIGNED_ACCESS 1
66 #define XXH_FORCE_NATIVE_FORMAT 0
72 #ifdef _MSC_VER // Visual Studio
74 disable : 4127) // disable: C4127: conditional expression is constant
77 #ifdef _MSC_VER // Visual Studio
78 #define FORCE_INLINE static __forceinline
81 #define FORCE_INLINE static inline __attribute__((always_inline))
83 #define FORCE_INLINE static inline
107 XXH_memcpy(
void* dest,
const void* src,
size_t size)
109 return memcpy(dest, src, size);
115 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
117 typedef uint8_t BYTE;
118 typedef uint16_t U16;
119 typedef uint32_t U32;
121 typedef uint64_t U64;
123 typedef unsigned char BYTE;
124 typedef unsigned short U16;
125 typedef unsigned int U32;
126 typedef signed int S32;
127 typedef unsigned long long U64;
130 #if defined(__GNUC__) && !defined(XXH_USE_UNALIGNED_ACCESS)
131 #define _PACKED __attribute__((packed))
136 #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
140 #pragma pack(push, 1)
156 #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
160 #define A32(x) (((U32_S*)(x))->v)
161 #define A64(x) (((U64_S*)(x))->v)
166 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
170 #if defined(_MSC_VER)
171 #define XXH_rotl32(x, r) _rotl(x, r)
172 #define XXH_rotl64(x, r) _rotl64(x, r)
174 #define XXH_rotl32(x, r) ((x << r) | (x >> (32 - r)))
175 #define XXH_rotl64(x, r) ((x << r) | (x >> (64 - r)))
178 #if defined(_MSC_VER) // Visual Studio
179 #define XXH_swap32 _byteswap_ulong
180 #define XXH_swap64 _byteswap_uint64
181 #elif GCC_VERSION >= 403
182 #define XXH_swap32 __builtin_bswap32
183 #define XXH_swap64 __builtin_bswap64
188 return ((x << 24) & 0xff000000) | ((x << 8) & 0x00ff0000) |
189 ((x >> 8) & 0x0000ff00) | ((x >> 24) & 0x000000ff);
194 return ((x << 56) & 0xff00000000000000ULL) |
195 ((x << 40) & 0x00ff000000000000ULL) |
196 ((x << 24) & 0x0000ff0000000000ULL) |
197 ((x << 8) & 0x000000ff00000000ULL) |
198 ((x >> 8) & 0x00000000ff000000ULL) |
199 ((x >> 24) & 0x0000000000ff0000ULL) |
200 ((x >> 40) & 0x000000000000ff00ULL) |
201 ((x >> 56) & 0x00000000000000ffULL);
208 #define PRIME32_1 2654435761U
209 #define PRIME32_2 2246822519U
210 #define PRIME32_3 3266489917U
211 #define PRIME32_4 668265263U
212 #define PRIME32_5 374761393U
214 #define PRIME64_1 11400714785074694791ULL
215 #define PRIME64_2 14029467366897019727ULL
216 #define PRIME64_3 1609587929392839161ULL
217 #define PRIME64_4 9650029242287828579ULL
218 #define PRIME64_5 2870177450012600261ULL
224 #ifndef XXH_CPU_LITTLE_ENDIAN // It is possible to define XXH_CPU_LITTLE_ENDIAN
228 #define XXH_CPU_LITTLE_ENDIAN (*(char*)(&one))
234 #define XXH_STATIC_ASSERT(c) \
236 enum { XXH_static_assert = 1 / (!!(c)) }; \
237 } // use only *after* variable declarations
285 const BYTE* p = (
const BYTE*)input;
286 const BYTE* bEnd = p + len;
288 #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
290 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
294 bEnd = p = (
const BYTE*)(
size_t)16;
300 const BYTE*
const limit = bEnd - 16;
301 U32 v1 = seed + PRIME32_1 + PRIME32_2;
302 U32 v2 = seed + PRIME32_2;
304 U32 v4 = seed - PRIME32_1;
308 v1 += XXH_get32bits(p) * PRIME32_2;
309 v1 = XXH_rotl32(v1, 13);
312 v2 += XXH_get32bits(p) * PRIME32_2;
313 v2 = XXH_rotl32(v2, 13);
316 v3 += XXH_get32bits(p) * PRIME32_2;
317 v3 = XXH_rotl32(v3, 13);
320 v4 += XXH_get32bits(p) * PRIME32_2;
321 v4 = XXH_rotl32(v4, 13);
324 }
while (p <= limit);
326 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) +
331 h32 = seed + PRIME32_5;
336 while (p + 4 <= bEnd)
338 h32 += XXH_get32bits(p) * PRIME32_3;
339 h32 = XXH_rotl32(h32, 17) * PRIME32_4;
345 h32 += (*p) * PRIME32_5;
346 h32 = XXH_rotl32(h32, 11) * PRIME32_1;
360 XXH32(
const void* input,
size_t len,
unsigned seed)
371 #if !defined(XXH_USE_UNALIGNED_ACCESS)
372 if ((((
size_t)input) & 3) ==
401 const BYTE* p = (
const BYTE*)input;
402 const BYTE* bEnd = p + len;
404 #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
406 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
410 bEnd = p = (
const BYTE*)(
size_t)32;
416 const BYTE*
const limit = bEnd - 32;
417 U64 v1 = seed + PRIME64_1 + PRIME64_2;
418 U64 v2 = seed + PRIME64_2;
420 U64 v4 = seed - PRIME64_1;
424 v1 += XXH_get64bits(p) * PRIME64_2;
426 v1 = XXH_rotl64(v1, 31);
428 v2 += XXH_get64bits(p) * PRIME64_2;
430 v2 = XXH_rotl64(v2, 31);
432 v3 += XXH_get64bits(p) * PRIME64_2;
434 v3 = XXH_rotl64(v3, 31);
436 v4 += XXH_get64bits(p) * PRIME64_2;
438 v4 = XXH_rotl64(v4, 31);
440 }
while (p <= limit);
442 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) +
446 v1 = XXH_rotl64(v1, 31);
449 h64 = h64 * PRIME64_1 + PRIME64_4;
452 v2 = XXH_rotl64(v2, 31);
455 h64 = h64 * PRIME64_1 + PRIME64_4;
458 v3 = XXH_rotl64(v3, 31);
461 h64 = h64 * PRIME64_1 + PRIME64_4;
464 v4 = XXH_rotl64(v4, 31);
467 h64 = h64 * PRIME64_1 + PRIME64_4;
471 h64 = seed + PRIME64_5;
476 while (p + 8 <= bEnd)
478 U64 k1 = XXH_get64bits(p);
480 k1 = XXH_rotl64(k1, 31);
483 h64 = XXH_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
489 h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
490 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
496 h64 ^= (*p) * PRIME64_5;
497 h64 = XXH_rotl64(h64, 11) * PRIME64_1;
511 XXH64(
const void* input,
size_t len,
unsigned long long seed)
522 #if !defined(XXH_USE_UNALIGNED_ACCESS)
523 if ((((
size_t)input) & 7) ==
612 state->
v1 = seed + PRIME32_1 + PRIME32_2;
613 state->
v2 = seed + PRIME32_2;
614 state->
v3 = seed + 0;
615 state->
v4 = seed - PRIME32_1;
626 state->
v1 = seed + PRIME64_1 + PRIME64_2;
627 state->
v2 = seed + PRIME64_2;
628 state->
v3 = seed + 0;
629 state->
v4 = seed - PRIME64_1;
643 const BYTE* p = (
const BYTE*)input;
644 const BYTE*
const bEnd = p + len;
646 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
655 XXH_memcpy((BYTE*)(state->
mem32) + state->
memsize, input, len);
665 const U32* p32 = state->
mem32;
667 state->
v1 = XXH_rotl32(state->
v1, 13);
668 state->
v1 *= PRIME32_1;
671 state->
v2 = XXH_rotl32(state->
v2, 13);
672 state->
v2 *= PRIME32_1;
675 state->
v3 = XXH_rotl32(state->
v3, 13);
676 state->
v3 *= PRIME32_1;
679 state->
v4 = XXH_rotl32(state->
v4, 13);
680 state->
v4 *= PRIME32_1;
689 const BYTE*
const limit = bEnd - 16;
698 v1 = XXH_rotl32(v1, 13);
702 v2 = XXH_rotl32(v2, 13);
706 v3 = XXH_rotl32(v3, 13);
710 v4 = XXH_rotl32(v4, 13);
713 }
while (p <= limit);
723 XXH_memcpy(state->
mem32, p, bEnd - p);
724 state->
memsize = (int)(bEnd - p);
745 const BYTE* p = (
const BYTE*)state->
mem32;
751 h32 = XXH_rotl32(state->
v1, 1) + XXH_rotl32(state->
v2, 7) +
752 XXH_rotl32(state->
v3, 12) + XXH_rotl32(state->
v4, 18);
756 h32 = state->
seed + PRIME32_5;
761 while (p + 4 <= bEnd)
764 h32 = XXH_rotl32(h32, 17) * PRIME32_4;
770 h32 += (*p) * PRIME32_5;
771 h32 = XXH_rotl32(h32, 11) * PRIME32_1;
803 const BYTE* p = (
const BYTE*)input;
804 const BYTE*
const bEnd = p + len;
806 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
815 XXH_memcpy(((BYTE*)state->
mem64) + state->
memsize, input, len);
825 const U64* p64 = state->
mem64;
827 state->
v1 = XXH_rotl64(state->
v1, 31);
828 state->
v1 *= PRIME64_1;
831 state->
v2 = XXH_rotl64(state->
v2, 31);
832 state->
v2 *= PRIME64_1;
835 state->
v3 = XXH_rotl64(state->
v3, 31);
836 state->
v3 *= PRIME64_1;
839 state->
v4 = XXH_rotl64(state->
v4, 31);
840 state->
v4 *= PRIME64_1;
849 const BYTE*
const limit = bEnd - 32;
858 v1 = XXH_rotl64(v1, 31);
862 v2 = XXH_rotl64(v2, 31);
866 v3 = XXH_rotl64(v3, 31);
870 v4 = XXH_rotl64(v4, 31);
873 }
while (p <= limit);
883 XXH_memcpy(state->
mem64, p, bEnd - p);
884 state->
memsize = (int)(bEnd - p);
905 const BYTE* p = (
const BYTE*)state->
mem64;
916 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) +
920 v1 = XXH_rotl64(v1, 31);
923 h64 = h64 * PRIME64_1 + PRIME64_4;
926 v2 = XXH_rotl64(v2, 31);
929 h64 = h64 * PRIME64_1 + PRIME64_4;
932 v3 = XXH_rotl64(v3, 31);
935 h64 = h64 * PRIME64_1 + PRIME64_4;
938 v4 = XXH_rotl64(v4, 31);
941 h64 = h64 * PRIME64_1 + PRIME64_4;
945 h64 = state->
seed + PRIME64_5;
950 while (p + 8 <= bEnd)
954 k1 = XXH_rotl64(k1, 31);
957 h64 = XXH_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
964 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
970 h64 ^= (*p) * PRIME64_5;
971 h64 = XXH_rotl64(h64, 11) * PRIME64_1;