rippled
xxhash.cpp
1 /*
2 xxHash - Fast Hash algorithm
3 Copyright (C) 2012-2014, Yann Collet.
4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are
8 met:
9 
10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above
13 copyright notice, this list of conditions and the following disclaimer
14 in the documentation and/or other materials provided with the
15 distribution.
16 
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 
29 You can contact the author at :
30 - xxHash source repository : http://code.google.com/p/xxhash/
31 - public discussion board : https://groups.google.com/forum/#!forum/lz4c
32 */
33 
34 #include <ripple/beast/hash/impl/xxhash.h>
35 
36 //**************************************
37 // Tuning parameters
38 //**************************************
39 // Unaligned memory access is automatically enabled for "common" CPU, such as
40 // x86. For others CPU, the compiler will be more cautious, and insert extra
41 // code to ensure aligned access is respected. If you know your target CPU
42 // supports unaligned memory access, you want to force this option manually to
43 // improve performance. You can also enable this parameter if you know your
44 // input data will always be aligned (boundaries of 4, for U32).
45 #if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || \
46  defined(__x86_64__) || defined(_M_X64)
47 #define XXH_USE_UNALIGNED_ACCESS 1
48 #endif
49 
50 // XXH_ACCEPT_NULL_INPUT_POINTER :
51 // If the input pointer is a null pointer, xxHash default behavior is to trigger
52 // a memory access error, since it is a bad pointer. When this option is
53 // enabled, xxHash output for null input pointers will be the same as a
54 // null-length input. This option has a very small performance cost (only
55 // measurable on small inputs). By default, this option is disabled. To enable
56 // it, uncomment below define : #define XXH_ACCEPT_NULL_INPUT_POINTER 1
57 
58 // XXH_FORCE_NATIVE_FORMAT :
59 // By default, xxHash library provides endian-independant Hash values, based on
60 // little-endian convention. Results are therefore identical for little-endian
61 // and big-endian CPU. This comes at a performance cost for big-endian CPU,
62 // since some swapping is required to emulate little-endian format. Should
63 // endian-independance be of no importance for your application, you may set the
64 // #define below to 1. It will improve speed for Big-endian CPU. This option has
65 // no impact on Little_Endian CPU.
66 #define XXH_FORCE_NATIVE_FORMAT 0
67 
68 //**************************************
69 // Compiler Specific Options
70 //**************************************
71 // Disable some Visual warning messages
72 #ifdef _MSC_VER // Visual Studio
73 #pragma warning( \
74  disable : 4127) // disable: C4127: conditional expression is constant
75 #endif
76 
77 #ifdef _MSC_VER // Visual Studio
78 #define FORCE_INLINE static __forceinline
79 #else
80 #ifdef __GNUC__
81 #define FORCE_INLINE static inline __attribute__((always_inline))
82 #else
83 #define FORCE_INLINE static inline
84 #endif
85 #endif
86 
87 //**************************************
88 // Includes & Memory related functions
89 //**************************************
90 //#include "xxhash.h"
91 // Modify the local functions below should you wish to use some other memory
92 // routines for malloc(), free()
93 #include <stdlib.h>
94 static void*
95 XXH_malloc(size_t s)
96 {
97  return malloc(s);
98 }
99 static void
100 XXH_free(void* p)
101 {
102  free(p);
103 }
104 // for memcpy()
105 #include <string.h>
106 static void*
107 XXH_memcpy(void* dest, const void* src, size_t size)
108 {
109  return memcpy(dest, src, size);
110 }
111 
112 //**************************************
113 // Basic Types
114 //**************************************
115 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
116 #include <stdint.h>
117 typedef uint8_t BYTE;
118 typedef uint16_t U16;
119 typedef uint32_t U32;
120 typedef int32_t S32;
121 typedef uint64_t U64;
122 #else
123 typedef unsigned char BYTE;
124 typedef unsigned short U16;
125 typedef unsigned int U32;
126 typedef signed int S32;
127 typedef unsigned long long U64;
128 #endif
129 
130 #if defined(__GNUC__) && !defined(XXH_USE_UNALIGNED_ACCESS)
131 #define _PACKED __attribute__((packed))
132 #else
133 #define _PACKED
134 #endif
135 
136 #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
137 #ifdef __IBMC__
138 #pragma pack(1)
139 #else
140 #pragma pack(push, 1)
141 #endif
142 #endif
143 
144 namespace beast {
145 namespace detail {
146 
147 typedef struct _U32_S
148 {
149  U32 v;
150 } _PACKED U32_S;
151 typedef struct _U64_S
152 {
153  U64 v;
154 } _PACKED U64_S;
155 
156 #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
157 #pragma pack(pop)
158 #endif
159 
160 #define A32(x) (((U32_S*)(x))->v)
161 #define A64(x) (((U64_S*)(x))->v)
162 
163 //***************************************
164 // Compiler-specific Functions and Macros
165 //***************************************
166 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
167 
168 // Note : although _rotl exists for minGW (GCC under windows), performance seems
169 // poor
170 #if defined(_MSC_VER)
171 #define XXH_rotl32(x, r) _rotl(x, r)
172 #define XXH_rotl64(x, r) _rotl64(x, r)
173 #else
174 #define XXH_rotl32(x, r) ((x << r) | (x >> (32 - r)))
175 #define XXH_rotl64(x, r) ((x << r) | (x >> (64 - r)))
176 #endif
177 
178 #if defined(_MSC_VER) // Visual Studio
179 #define XXH_swap32 _byteswap_ulong
180 #define XXH_swap64 _byteswap_uint64
181 #elif GCC_VERSION >= 403
182 #define XXH_swap32 __builtin_bswap32
183 #define XXH_swap64 __builtin_bswap64
184 #else
185 static inline U32
186 XXH_swap32(U32 x)
187 {
188  return ((x << 24) & 0xff000000) | ((x << 8) & 0x00ff0000) |
189  ((x >> 8) & 0x0000ff00) | ((x >> 24) & 0x000000ff);
190 }
191 static inline U64
192 XXH_swap64(U64 x)
193 {
194  return ((x << 56) & 0xff00000000000000ULL) |
195  ((x << 40) & 0x00ff000000000000ULL) |
196  ((x << 24) & 0x0000ff0000000000ULL) |
197  ((x << 8) & 0x000000ff00000000ULL) |
198  ((x >> 8) & 0x00000000ff000000ULL) |
199  ((x >> 24) & 0x0000000000ff0000ULL) |
200  ((x >> 40) & 0x000000000000ff00ULL) |
201  ((x >> 56) & 0x00000000000000ffULL);
202 }
203 #endif
204 
205 //**************************************
206 // Constants
207 //**************************************
208 #define PRIME32_1 2654435761U
209 #define PRIME32_2 2246822519U
210 #define PRIME32_3 3266489917U
211 #define PRIME32_4 668265263U
212 #define PRIME32_5 374761393U
213 
214 #define PRIME64_1 11400714785074694791ULL
215 #define PRIME64_2 14029467366897019727ULL
216 #define PRIME64_3 1609587929392839161ULL
217 #define PRIME64_4 9650029242287828579ULL
218 #define PRIME64_5 2870177450012600261ULL
219 
220 //**************************************
221 // Architecture Macros
222 //**************************************
223 typedef enum { XXH_bigEndian = 0, XXH_littleEndian = 1 } XXH_endianess;
224 #ifndef XXH_CPU_LITTLE_ENDIAN // It is possible to define XXH_CPU_LITTLE_ENDIAN
225  // externally, for example using a compiler
226  // switch
227 static const int one = 1;
228 #define XXH_CPU_LITTLE_ENDIAN (*(char*)(&one))
229 #endif
230 
231 //**************************************
232 // Macros
233 //**************************************
234 #define XXH_STATIC_ASSERT(c) \
235  { \
236  enum { XXH_static_assert = 1 / (!!(c)) }; \
237  } // use only *after* variable declarations
238 
239 //****************************
240 // Memory reads
241 //****************************
243 
244 FORCE_INLINE U32
245 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
246 {
247  if (align == XXH_unaligned)
248  return endian == XXH_littleEndian ? A32(ptr) : XXH_swap32(A32(ptr));
249  else
250  return endian == XXH_littleEndian ? *(U32*)ptr : XXH_swap32(*(U32*)ptr);
251 }
252 
253 FORCE_INLINE U32
254 XXH_readLE32(const void* ptr, XXH_endianess endian)
255 {
256  return XXH_readLE32_align(ptr, endian, XXH_unaligned);
257 }
258 
259 FORCE_INLINE U64
260 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
261 {
262  if (align == XXH_unaligned)
263  return endian == XXH_littleEndian ? A64(ptr) : XXH_swap64(A64(ptr));
264  else
265  return endian == XXH_littleEndian ? *(U64*)ptr : XXH_swap64(*(U64*)ptr);
266 }
267 
268 FORCE_INLINE U64
269 XXH_readLE64(const void* ptr, XXH_endianess endian)
270 {
271  return XXH_readLE64_align(ptr, endian, XXH_unaligned);
272 }
273 
274 //****************************
275 // Simple Hash Functions
276 //****************************
277 FORCE_INLINE U32
279  const void* input,
280  size_t len,
281  U32 seed,
282  XXH_endianess endian,
283  XXH_alignment align)
284 {
285  const BYTE* p = (const BYTE*)input;
286  const BYTE* bEnd = p + len;
287  U32 h32;
288 #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
289 
290 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
291  if (p == NULL)
292  {
293  len = 0;
294  bEnd = p = (const BYTE*)(size_t)16;
295  }
296 #endif
297 
298  if (len >= 16)
299  {
300  const BYTE* const limit = bEnd - 16;
301  U32 v1 = seed + PRIME32_1 + PRIME32_2;
302  U32 v2 = seed + PRIME32_2;
303  U32 v3 = seed + 0;
304  U32 v4 = seed - PRIME32_1;
305 
306  do
307  {
308  v1 += XXH_get32bits(p) * PRIME32_2;
309  v1 = XXH_rotl32(v1, 13);
310  v1 *= PRIME32_1;
311  p += 4;
312  v2 += XXH_get32bits(p) * PRIME32_2;
313  v2 = XXH_rotl32(v2, 13);
314  v2 *= PRIME32_1;
315  p += 4;
316  v3 += XXH_get32bits(p) * PRIME32_2;
317  v3 = XXH_rotl32(v3, 13);
318  v3 *= PRIME32_1;
319  p += 4;
320  v4 += XXH_get32bits(p) * PRIME32_2;
321  v4 = XXH_rotl32(v4, 13);
322  v4 *= PRIME32_1;
323  p += 4;
324  } while (p <= limit);
325 
326  h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) +
327  XXH_rotl32(v4, 18);
328  }
329  else
330  {
331  h32 = seed + PRIME32_5;
332  }
333 
334  h32 += (U32)len;
335 
336  while (p + 4 <= bEnd)
337  {
338  h32 += XXH_get32bits(p) * PRIME32_3;
339  h32 = XXH_rotl32(h32, 17) * PRIME32_4;
340  p += 4;
341  }
342 
343  while (p < bEnd)
344  {
345  h32 += (*p) * PRIME32_5;
346  h32 = XXH_rotl32(h32, 11) * PRIME32_1;
347  p++;
348  }
349 
350  h32 ^= h32 >> 15;
351  h32 *= PRIME32_2;
352  h32 ^= h32 >> 13;
353  h32 *= PRIME32_3;
354  h32 ^= h32 >> 16;
355 
356  return h32;
357 }
358 
359 unsigned int
360 XXH32(const void* input, size_t len, unsigned seed)
361 {
362 #if 0
363  // Simple version, good for code maintenance, but unfortunately slow for small inputs
364  XXH32_state_t state;
365  XXH32_reset(&state, seed);
366  XXH32_update(&state, input, len);
367  return XXH32_digest(&state);
368 #else
369  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
370 
371 #if !defined(XXH_USE_UNALIGNED_ACCESS)
372  if ((((size_t)input) & 3) ==
373  0) // Input is aligned, let's leverage the speed advantage
374  {
375  if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
376  return XXH32_endian_align(
377  input, len, seed, XXH_littleEndian, XXH_aligned);
378  else
379  return XXH32_endian_align(
380  input, len, seed, XXH_bigEndian, XXH_aligned);
381  }
382 #endif
383 
384  if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
385  return XXH32_endian_align(
386  input, len, seed, XXH_littleEndian, XXH_unaligned);
387  else
388  return XXH32_endian_align(
389  input, len, seed, XXH_bigEndian, XXH_unaligned);
390 #endif
391 }
392 
393 FORCE_INLINE U64
395  const void* input,
396  size_t len,
397  U64 seed,
398  XXH_endianess endian,
399  XXH_alignment align)
400 {
401  const BYTE* p = (const BYTE*)input;
402  const BYTE* bEnd = p + len;
403  U64 h64;
404 #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
405 
406 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
407  if (p == NULL)
408  {
409  len = 0;
410  bEnd = p = (const BYTE*)(size_t)32;
411  }
412 #endif
413 
414  if (len >= 32)
415  {
416  const BYTE* const limit = bEnd - 32;
417  U64 v1 = seed + PRIME64_1 + PRIME64_2;
418  U64 v2 = seed + PRIME64_2;
419  U64 v3 = seed + 0;
420  U64 v4 = seed - PRIME64_1;
421 
422  do
423  {
424  v1 += XXH_get64bits(p) * PRIME64_2;
425  p += 8;
426  v1 = XXH_rotl64(v1, 31);
427  v1 *= PRIME64_1;
428  v2 += XXH_get64bits(p) * PRIME64_2;
429  p += 8;
430  v2 = XXH_rotl64(v2, 31);
431  v2 *= PRIME64_1;
432  v3 += XXH_get64bits(p) * PRIME64_2;
433  p += 8;
434  v3 = XXH_rotl64(v3, 31);
435  v3 *= PRIME64_1;
436  v4 += XXH_get64bits(p) * PRIME64_2;
437  p += 8;
438  v4 = XXH_rotl64(v4, 31);
439  v4 *= PRIME64_1;
440  } while (p <= limit);
441 
442  h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) +
443  XXH_rotl64(v4, 18);
444 
445  v1 *= PRIME64_2;
446  v1 = XXH_rotl64(v1, 31);
447  v1 *= PRIME64_1;
448  h64 ^= v1;
449  h64 = h64 * PRIME64_1 + PRIME64_4;
450 
451  v2 *= PRIME64_2;
452  v2 = XXH_rotl64(v2, 31);
453  v2 *= PRIME64_1;
454  h64 ^= v2;
455  h64 = h64 * PRIME64_1 + PRIME64_4;
456 
457  v3 *= PRIME64_2;
458  v3 = XXH_rotl64(v3, 31);
459  v3 *= PRIME64_1;
460  h64 ^= v3;
461  h64 = h64 * PRIME64_1 + PRIME64_4;
462 
463  v4 *= PRIME64_2;
464  v4 = XXH_rotl64(v4, 31);
465  v4 *= PRIME64_1;
466  h64 ^= v4;
467  h64 = h64 * PRIME64_1 + PRIME64_4;
468  }
469  else
470  {
471  h64 = seed + PRIME64_5;
472  }
473 
474  h64 += (U64)len;
475 
476  while (p + 8 <= bEnd)
477  {
478  U64 k1 = XXH_get64bits(p);
479  k1 *= PRIME64_2;
480  k1 = XXH_rotl64(k1, 31);
481  k1 *= PRIME64_1;
482  h64 ^= k1;
483  h64 = XXH_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
484  p += 8;
485  }
486 
487  if (p + 4 <= bEnd)
488  {
489  h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
490  h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
491  p += 4;
492  }
493 
494  while (p < bEnd)
495  {
496  h64 ^= (*p) * PRIME64_5;
497  h64 = XXH_rotl64(h64, 11) * PRIME64_1;
498  p++;
499  }
500 
501  h64 ^= h64 >> 33;
502  h64 *= PRIME64_2;
503  h64 ^= h64 >> 29;
504  h64 *= PRIME64_3;
505  h64 ^= h64 >> 32;
506 
507  return h64;
508 }
509 
510 unsigned long long
511 XXH64(const void* input, size_t len, unsigned long long seed)
512 {
513 #if 0
514  // Simple version, good for code maintenance, but unfortunately slow for small inputs
515  XXH64_state_t state;
516  XXH64_reset(&state, seed);
517  XXH64_update(&state, input, len);
518  return XXH64_digest(&state);
519 #else
520  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
521 
522 #if !defined(XXH_USE_UNALIGNED_ACCESS)
523  if ((((size_t)input) & 7) ==
524  0) // Input is aligned, let's leverage the speed advantage
525  {
526  if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
527  return XXH64_endian_align(
528  input, len, seed, XXH_littleEndian, XXH_aligned);
529  else
530  return XXH64_endian_align(
531  input, len, seed, XXH_bigEndian, XXH_aligned);
532  }
533 #endif
534 
535  if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
536  return XXH64_endian_align(
537  input, len, seed, XXH_littleEndian, XXH_unaligned);
538  else
539  return XXH64_endian_align(
540  input, len, seed, XXH_bigEndian, XXH_unaligned);
541 #endif
542 }
543 
544 /****************************************************
545  * Advanced Hash Functions
546  ****************************************************/
547 
548 /*** Allocation ***/
549 typedef struct
550 {
551  U64 total_len;
552  U32 seed;
553  U32 v1;
554  U32 v2;
555  U32 v3;
556  U32 v4;
557  U32 mem32[4]; /* defined as U32 for alignment */
558  U32 memsize;
560 
561 typedef struct
562 {
564  U64 seed;
565  U64 v1;
566  U64 v2;
567  U64 v3;
568  U64 v4;
569  U64 mem64[4]; /* defined as U64 for alignment */
570  U32 memsize;
572 
575 {
576  static_assert(
577  sizeof(XXH32_state_t) >= sizeof(XXH_istate32_t),
578  ""); // A compilation error here means XXH32_state_t is not large
579  // enough
580  return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
581 }
584 {
585  XXH_free(statePtr);
586  return XXH_OK;
587 };
588 
589 XXH64_state_t*
591 {
592  static_assert(
593  sizeof(XXH64_state_t) >= sizeof(XXH_istate64_t),
594  ""); // A compilation error here means XXH64_state_t is not large
595  // enough
596  return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
597 }
600 {
601  XXH_free(statePtr);
602  return XXH_OK;
603 };
604 
605 /*** Hash feed ***/
606 
608 XXH32_reset(XXH32_state_t* state_in, U32 seed)
609 {
610  XXH_istate32_t* state = (XXH_istate32_t*)state_in;
611  state->seed = seed;
612  state->v1 = seed + PRIME32_1 + PRIME32_2;
613  state->v2 = seed + PRIME32_2;
614  state->v3 = seed + 0;
615  state->v4 = seed - PRIME32_1;
616  state->total_len = 0;
617  state->memsize = 0;
618  return XXH_OK;
619 }
620 
622 XXH64_reset(XXH64_state_t* state_in, unsigned long long seed)
623 {
624  XXH_istate64_t* state = (XXH_istate64_t*)state_in;
625  state->seed = seed;
626  state->v1 = seed + PRIME64_1 + PRIME64_2;
627  state->v2 = seed + PRIME64_2;
628  state->v3 = seed + 0;
629  state->v4 = seed - PRIME64_1;
630  state->total_len = 0;
631  state->memsize = 0;
632  return XXH_OK;
633 }
634 
635 FORCE_INLINE XXH_errorcode
637  XXH32_state_t* state_in,
638  const void* input,
639  size_t len,
640  XXH_endianess endian)
641 {
642  XXH_istate32_t* state = (XXH_istate32_t*)state_in;
643  const BYTE* p = (const BYTE*)input;
644  const BYTE* const bEnd = p + len;
645 
646 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
647  if (input == NULL)
648  return XXH_ERROR;
649 #endif
650 
651  state->total_len += len;
652 
653  if (state->memsize + len < 16) // fill in tmp buffer
654  {
655  XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
656  state->memsize += (U32)len;
657  return XXH_OK;
658  }
659 
660  if (state->memsize) // some data left from previous update
661  {
662  XXH_memcpy(
663  (BYTE*)(state->mem32) + state->memsize, input, 16 - state->memsize);
664  {
665  const U32* p32 = state->mem32;
666  state->v1 += XXH_readLE32(p32, endian) * PRIME32_2;
667  state->v1 = XXH_rotl32(state->v1, 13);
668  state->v1 *= PRIME32_1;
669  p32++;
670  state->v2 += XXH_readLE32(p32, endian) * PRIME32_2;
671  state->v2 = XXH_rotl32(state->v2, 13);
672  state->v2 *= PRIME32_1;
673  p32++;
674  state->v3 += XXH_readLE32(p32, endian) * PRIME32_2;
675  state->v3 = XXH_rotl32(state->v3, 13);
676  state->v3 *= PRIME32_1;
677  p32++;
678  state->v4 += XXH_readLE32(p32, endian) * PRIME32_2;
679  state->v4 = XXH_rotl32(state->v4, 13);
680  state->v4 *= PRIME32_1;
681  p32++;
682  }
683  p += 16 - state->memsize;
684  state->memsize = 0;
685  }
686 
687  if (p <= bEnd - 16)
688  {
689  const BYTE* const limit = bEnd - 16;
690  U32 v1 = state->v1;
691  U32 v2 = state->v2;
692  U32 v3 = state->v3;
693  U32 v4 = state->v4;
694 
695  do
696  {
697  v1 += XXH_readLE32(p, endian) * PRIME32_2;
698  v1 = XXH_rotl32(v1, 13);
699  v1 *= PRIME32_1;
700  p += 4;
701  v2 += XXH_readLE32(p, endian) * PRIME32_2;
702  v2 = XXH_rotl32(v2, 13);
703  v2 *= PRIME32_1;
704  p += 4;
705  v3 += XXH_readLE32(p, endian) * PRIME32_2;
706  v3 = XXH_rotl32(v3, 13);
707  v3 *= PRIME32_1;
708  p += 4;
709  v4 += XXH_readLE32(p, endian) * PRIME32_2;
710  v4 = XXH_rotl32(v4, 13);
711  v4 *= PRIME32_1;
712  p += 4;
713  } while (p <= limit);
714 
715  state->v1 = v1;
716  state->v2 = v2;
717  state->v3 = v3;
718  state->v4 = v4;
719  }
720 
721  if (p < bEnd)
722  {
723  XXH_memcpy(state->mem32, p, bEnd - p);
724  state->memsize = (int)(bEnd - p);
725  }
726 
727  return XXH_OK;
728 }
729 
731 XXH32_update(XXH32_state_t* state_in, const void* input, size_t len)
732 {
733  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
734 
735  if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
736  return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
737  else
738  return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
739 }
740 
741 FORCE_INLINE U32
743 {
744  XXH_istate32_t* state = (XXH_istate32_t*)state_in;
745  const BYTE* p = (const BYTE*)state->mem32;
746  BYTE* bEnd = (BYTE*)(state->mem32) + state->memsize;
747  U32 h32;
748 
749  if (state->total_len >= 16)
750  {
751  h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) +
752  XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
753  }
754  else
755  {
756  h32 = state->seed + PRIME32_5;
757  }
758 
759  h32 += (U32)state->total_len;
760 
761  while (p + 4 <= bEnd)
762  {
763  h32 += XXH_readLE32(p, endian) * PRIME32_3;
764  h32 = XXH_rotl32(h32, 17) * PRIME32_4;
765  p += 4;
766  }
767 
768  while (p < bEnd)
769  {
770  h32 += (*p) * PRIME32_5;
771  h32 = XXH_rotl32(h32, 11) * PRIME32_1;
772  p++;
773  }
774 
775  h32 ^= h32 >> 15;
776  h32 *= PRIME32_2;
777  h32 ^= h32 >> 13;
778  h32 *= PRIME32_3;
779  h32 ^= h32 >> 16;
780 
781  return h32;
782 }
783 
784 U32
785 XXH32_digest(const XXH32_state_t* state_in)
786 {
787  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
788 
789  if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
790  return XXH32_digest_endian(state_in, XXH_littleEndian);
791  else
792  return XXH32_digest_endian(state_in, XXH_bigEndian);
793 }
794 
795 FORCE_INLINE XXH_errorcode
797  XXH64_state_t* state_in,
798  const void* input,
799  size_t len,
800  XXH_endianess endian)
801 {
802  XXH_istate64_t* state = (XXH_istate64_t*)state_in;
803  const BYTE* p = (const BYTE*)input;
804  const BYTE* const bEnd = p + len;
805 
806 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
807  if (input == NULL)
808  return XXH_ERROR;
809 #endif
810 
811  state->total_len += len;
812 
813  if (state->memsize + len < 32) // fill in tmp buffer
814  {
815  XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
816  state->memsize += (U32)len;
817  return XXH_OK;
818  }
819 
820  if (state->memsize) // some data left from previous update
821  {
822  XXH_memcpy(
823  ((BYTE*)state->mem64) + state->memsize, input, 32 - state->memsize);
824  {
825  const U64* p64 = state->mem64;
826  state->v1 += XXH_readLE64(p64, endian) * PRIME64_2;
827  state->v1 = XXH_rotl64(state->v1, 31);
828  state->v1 *= PRIME64_1;
829  p64++;
830  state->v2 += XXH_readLE64(p64, endian) * PRIME64_2;
831  state->v2 = XXH_rotl64(state->v2, 31);
832  state->v2 *= PRIME64_1;
833  p64++;
834  state->v3 += XXH_readLE64(p64, endian) * PRIME64_2;
835  state->v3 = XXH_rotl64(state->v3, 31);
836  state->v3 *= PRIME64_1;
837  p64++;
838  state->v4 += XXH_readLE64(p64, endian) * PRIME64_2;
839  state->v4 = XXH_rotl64(state->v4, 31);
840  state->v4 *= PRIME64_1;
841  p64++;
842  }
843  p += 32 - state->memsize;
844  state->memsize = 0;
845  }
846 
847  if (p + 32 <= bEnd)
848  {
849  const BYTE* const limit = bEnd - 32;
850  U64 v1 = state->v1;
851  U64 v2 = state->v2;
852  U64 v3 = state->v3;
853  U64 v4 = state->v4;
854 
855  do
856  {
857  v1 += XXH_readLE64(p, endian) * PRIME64_2;
858  v1 = XXH_rotl64(v1, 31);
859  v1 *= PRIME64_1;
860  p += 8;
861  v2 += XXH_readLE64(p, endian) * PRIME64_2;
862  v2 = XXH_rotl64(v2, 31);
863  v2 *= PRIME64_1;
864  p += 8;
865  v3 += XXH_readLE64(p, endian) * PRIME64_2;
866  v3 = XXH_rotl64(v3, 31);
867  v3 *= PRIME64_1;
868  p += 8;
869  v4 += XXH_readLE64(p, endian) * PRIME64_2;
870  v4 = XXH_rotl64(v4, 31);
871  v4 *= PRIME64_1;
872  p += 8;
873  } while (p <= limit);
874 
875  state->v1 = v1;
876  state->v2 = v2;
877  state->v3 = v3;
878  state->v4 = v4;
879  }
880 
881  if (p < bEnd)
882  {
883  XXH_memcpy(state->mem64, p, bEnd - p);
884  state->memsize = (int)(bEnd - p);
885  }
886 
887  return XXH_OK;
888 }
889 
891 XXH64_update(XXH64_state_t* state_in, const void* input, size_t len)
892 {
893  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
894 
895  if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
896  return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
897  else
898  return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
899 }
900 
901 FORCE_INLINE U64
903 {
904  XXH_istate64_t* state = (XXH_istate64_t*)state_in;
905  const BYTE* p = (const BYTE*)state->mem64;
906  BYTE* bEnd = (BYTE*)state->mem64 + state->memsize;
907  U64 h64;
908 
909  if (state->total_len >= 32)
910  {
911  U64 v1 = state->v1;
912  U64 v2 = state->v2;
913  U64 v3 = state->v3;
914  U64 v4 = state->v4;
915 
916  h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) +
917  XXH_rotl64(v4, 18);
918 
919  v1 *= PRIME64_2;
920  v1 = XXH_rotl64(v1, 31);
921  v1 *= PRIME64_1;
922  h64 ^= v1;
923  h64 = h64 * PRIME64_1 + PRIME64_4;
924 
925  v2 *= PRIME64_2;
926  v2 = XXH_rotl64(v2, 31);
927  v2 *= PRIME64_1;
928  h64 ^= v2;
929  h64 = h64 * PRIME64_1 + PRIME64_4;
930 
931  v3 *= PRIME64_2;
932  v3 = XXH_rotl64(v3, 31);
933  v3 *= PRIME64_1;
934  h64 ^= v3;
935  h64 = h64 * PRIME64_1 + PRIME64_4;
936 
937  v4 *= PRIME64_2;
938  v4 = XXH_rotl64(v4, 31);
939  v4 *= PRIME64_1;
940  h64 ^= v4;
941  h64 = h64 * PRIME64_1 + PRIME64_4;
942  }
943  else
944  {
945  h64 = state->seed + PRIME64_5;
946  }
947 
948  h64 += (U64)state->total_len;
949 
950  while (p + 8 <= bEnd)
951  {
952  U64 k1 = XXH_readLE64(p, endian);
953  k1 *= PRIME64_2;
954  k1 = XXH_rotl64(k1, 31);
955  k1 *= PRIME64_1;
956  h64 ^= k1;
957  h64 = XXH_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
958  p += 8;
959  }
960 
961  if (p + 4 <= bEnd)
962  {
963  h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
964  h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
965  p += 4;
966  }
967 
968  while (p < bEnd)
969  {
970  h64 ^= (*p) * PRIME64_5;
971  h64 = XXH_rotl64(h64, 11) * PRIME64_1;
972  p++;
973  }
974 
975  h64 ^= h64 >> 33;
976  h64 *= PRIME64_2;
977  h64 ^= h64 >> 29;
978  h64 *= PRIME64_3;
979  h64 ^= h64 >> 32;
980 
981  return h64;
982 }
983 
984 unsigned long long
985 XXH64_digest(const XXH64_state_t* state_in)
986 {
987  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
988 
989  if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
990  return XXH64_digest_endian(state_in, XXH_littleEndian);
991  else
992  return XXH64_digest_endian(state_in, XXH_bigEndian);
993 }
994 
995 } // namespace detail
996 } // namespace beast
beast::detail::one
static const int one
Definition: xxhash.cpp:227
beast::detail::XXH_istate32_t
Definition: xxhash.cpp:549
beast::detail::XXH32_freeState
XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr)
Definition: xxhash.cpp:583
beast::detail::XXH_istate64_t::memsize
U32 memsize
Definition: xxhash.cpp:570
beast::detail::_U64_S
Definition: xxhash.cpp:151
beast::detail::XXH_littleEndian
@ XXH_littleEndian
Definition: xxhash.cpp:223
beast::detail::XXH_readLE64_align
FORCE_INLINE U64 XXH_readLE64_align(const void *ptr, XXH_endianess endian, XXH_alignment align)
Definition: xxhash.cpp:260
beast::detail::XXH_istate32_t::v3
U32 v3
Definition: xxhash.cpp:557
beast::detail::XXH_istate32_t::v2
U32 v2
Definition: xxhash.cpp:556
beast::detail::XXH_errorcode
XXH_errorcode
Definition: xxhash.h:79
beast::detail::XXH_alignment
XXH_alignment
Definition: xxhash.cpp:242
beast::detail::XXH64_update
XXH_errorcode XXH64_update(XXH64_state_t *state_in, const void *input, size_t len)
Definition: xxhash.cpp:891
beast::detail::XXH32_digest
U32 XXH32_digest(const XXH32_state_t *state_in)
Definition: xxhash.cpp:785
beast::detail::XXH64_digest_endian
FORCE_INLINE U64 XXH64_digest_endian(const XXH64_state_t *state_in, XXH_endianess endian)
Definition: xxhash.cpp:902
beast::detail::XXH64_update_endian
FORCE_INLINE XXH_errorcode XXH64_update_endian(XXH64_state_t *state_in, const void *input, size_t len, XXH_endianess endian)
Definition: xxhash.cpp:796
beast::detail::XXH_endianess
XXH_endianess
Definition: xxhash.cpp:223
beast::detail::XXH_istate32_t::v1
U32 v1
Definition: xxhash.cpp:555
beast::detail::XXH_istate32_t::v4
U32 v4
Definition: xxhash.cpp:558
beast::detail::XXH_readLE32_align
FORCE_INLINE U32 XXH_readLE32_align(const void *ptr, XXH_endianess endian, XXH_alignment align)
Definition: xxhash.cpp:245
beast::detail::XXH_readLE32
FORCE_INLINE U32 XXH_readLE32(const void *ptr, XXH_endianess endian)
Definition: xxhash.cpp:254
beast::detail::XXH32_endian_align
FORCE_INLINE U32 XXH32_endian_align(const void *input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
Definition: xxhash.cpp:278
beast::detail::XXH64_reset
XXH_errorcode XXH64_reset(XXH64_state_t *state_in, unsigned long long seed)
Definition: xxhash.cpp:622
beast::detail::XXH_OK
@ XXH_OK
Definition: xxhash.h:83
beast::detail::XXH64_digest
unsigned long long XXH64_digest(const XXH64_state_t *state_in)
Definition: xxhash.cpp:985
beast::detail::XXH64_freeState
XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr)
Definition: xxhash.cpp:599
beast::detail::XXH32_update
XXH_errorcode XXH32_update(XXH32_state_t *state_in, const void *input, size_t len)
Definition: xxhash.cpp:731
beast::detail::XXH_istate32_t::total_len
U64 total_len
Definition: xxhash.cpp:553
beast::detail::XXH_aligned
@ XXH_aligned
Definition: xxhash.cpp:242
beast::detail::XXH_istate32_t::mem32
U32 mem32[4]
Definition: xxhash.cpp:559
beast::detail::XXH_ERROR
@ XXH_ERROR
Definition: xxhash.h:83
beast::detail::XXH64_endian_align
FORCE_INLINE U64 XXH64_endian_align(const void *input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
Definition: xxhash.cpp:394
beast::detail::XXH_unaligned
@ XXH_unaligned
Definition: xxhash.cpp:242
beast::detail::XXH_istate64_t::v2
U64 v2
Definition: xxhash.cpp:566
beast::detail::XXH_istate64_t::total_len
U64 total_len
Definition: xxhash.cpp:563
beast::detail::XXH_istate64_t::seed
U64 seed
Definition: xxhash.cpp:564
beast::detail::U64_S
struct beast::detail::_U64_S U64_S
beast::detail::XXH_istate64_t
Definition: xxhash.cpp:561
beast::detail::XXH32_reset
XXH_errorcode XXH32_reset(XXH32_state_t *state_in, U32 seed)
Definition: xxhash.cpp:608
beast::detail::U32_S
struct beast::detail::_U32_S U32_S
beast::detail::XXH32_state_t
Definition: xxhash.h:103
beast::detail::XXH_istate64_t::v4
U64 v4
Definition: xxhash.cpp:568
beast::detail::XXH_istate64_t::v3
U64 v3
Definition: xxhash.cpp:567
beast::detail::XXH_readLE64
FORCE_INLINE U64 XXH_readLE64(const void *ptr, XXH_endianess endian)
Definition: xxhash.cpp:269
beast::detail::XXH_istate32_t::memsize
U32 memsize
Definition: xxhash.cpp:560
beast::detail::XXH_istate64_t::mem64
U64 mem64[4]
Definition: xxhash.cpp:569
std::free
T free(T... args)
beast::detail::XXH_bigEndian
@ XXH_bigEndian
Definition: xxhash.cpp:223
beast::detail::XXH_istate32_t::seed
U32 seed
Definition: xxhash.cpp:554
beast::detail::_U32_S::v
U32 v
Definition: xxhash.cpp:149
std::memcpy
T memcpy(T... args)
std::malloc
T malloc(T... args)
beast::detail::XXH32
unsigned int XXH32(const void *input, size_t len, unsigned seed)
Definition: xxhash.cpp:360
beast::detail::_U32_S
Definition: xxhash.cpp:147
beast::detail::XXH64_createState
XXH64_state_t * XXH64_createState(void)
Definition: xxhash.cpp:590
beast::detail::XXH32_createState
XXH32_state_t * XXH32_createState(void)
Definition: xxhash.cpp:574
beast::detail::XXH_istate64_t::v1
U64 v1
Definition: xxhash.cpp:565
beast::detail::_U64_S::v
U64 v
Definition: xxhash.cpp:153
beast::detail::XXH32_digest_endian
FORCE_INLINE U32 XXH32_digest_endian(const XXH32_state_t *state_in, XXH_endianess endian)
Definition: xxhash.cpp:742
beast::detail::XXH64
unsigned long long XXH64(const void *input, size_t len, unsigned long long seed)
Definition: xxhash.cpp:511
beast::detail::XXH64_state_t
Definition: xxhash.h:107
beast::detail::XXH32_update_endian
FORCE_INLINE XXH_errorcode XXH32_update_endian(XXH32_state_t *state_in, const void *input, size_t len, XXH_endianess endian)
Definition: xxhash.cpp:636
beast
Definition: base_uint.h:677