rippled
xxhash.cpp
1 /*
2 xxHash - Fast Hash algorithm
3 Copyright (C) 2012-2014, Yann Collet.
4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are
8 met:
9 
10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above
13 copyright notice, this list of conditions and the following disclaimer
14 in the documentation and/or other materials provided with the
15 distribution.
16 
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 
29 You can contact the author at :
30 - xxHash source repository : http://code.google.com/p/xxhash/
31 - public discussion board : https://groups.google.com/forum/#!forum/lz4c
32 */
33 
34 #include <ripple/beast/hash/impl/xxhash.h>
35 
36 //**************************************
37 // Tuning parameters
38 //**************************************
39 // Unaligned memory access is automatically enabled for "common" CPU, such as x86.
40 // For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
41 // If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
42 // You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
43 #if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
44 # define XXH_USE_UNALIGNED_ACCESS 1
45 #endif
46 
47 // XXH_ACCEPT_NULL_INPUT_POINTER :
48 // If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
49 // When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
50 // This option has a very small performance cost (only measurable on small inputs).
51 // By default, this option is disabled. To enable it, uncomment below define :
52 // #define XXH_ACCEPT_NULL_INPUT_POINTER 1
53 
54 // XXH_FORCE_NATIVE_FORMAT :
55 // By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
56 // Results are therefore identical for little-endian and big-endian CPU.
57 // This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
58 // Should endian-independance be of no importance for your application, you may set the #define below to 1.
59 // It will improve speed for Big-endian CPU.
60 // This option has no impact on Little_Endian CPU.
61 #define XXH_FORCE_NATIVE_FORMAT 0
62 
63 //**************************************
64 // Compiler Specific Options
65 //**************************************
66 // Disable some Visual warning messages
67 #ifdef _MSC_VER // Visual Studio
68 # pragma warning(disable : 4127) // disable: C4127: conditional expression is constant
69 #endif
70 
71 #ifdef _MSC_VER // Visual Studio
72 # define FORCE_INLINE static __forceinline
73 #else
74 # ifdef __GNUC__
75 # define FORCE_INLINE static inline __attribute__((always_inline))
76 # else
77 # define FORCE_INLINE static inline
78 # endif
79 #endif
80 
81 //**************************************
82 // Includes & Memory related functions
83 //**************************************
84 //#include "xxhash.h"
85 // Modify the local functions below should you wish to use some other memory routines
86 // for malloc(), free()
87 #include <stdlib.h>
88 static void* XXH_malloc(size_t s) { return malloc(s); }
89 static void XXH_free (void* p) { free(p); }
90 // for memcpy()
91 #include <string.h>
92 static void* XXH_memcpy(void* dest, const void* src, size_t size)
93 {
94  return memcpy(dest,src,size);
95 }
96 
97 
98 //**************************************
99 // Basic Types
100 //**************************************
101 #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
102 # include <stdint.h>
103 typedef uint8_t BYTE;
104 typedef uint16_t U16;
105 typedef uint32_t U32;
106 typedef int32_t S32;
107 typedef uint64_t U64;
108 #else
109 typedef unsigned char BYTE;
110 typedef unsigned short U16;
111 typedef unsigned int U32;
112 typedef signed int S32;
113 typedef unsigned long long U64;
114 #endif
115 
116 #if defined(__GNUC__) && !defined(XXH_USE_UNALIGNED_ACCESS)
117 # define _PACKED __attribute__ ((packed))
118 #else
119 # define _PACKED
120 #endif
121 
122 #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
123 # ifdef __IBMC__
124 # pragma pack(1)
125 # else
126 # pragma pack(push, 1)
127 # endif
128 #endif
129 
130 namespace beast {
131 namespace detail {
132 
133 typedef struct _U32_S
134 {
135  U32 v;
136 } _PACKED U32_S;
137 typedef struct _U64_S
138 {
139  U64 v;
140 } _PACKED U64_S;
141 
142 #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
143 # pragma pack(pop)
144 #endif
145 
146 #define A32(x) (((U32_S *)(x))->v)
147 #define A64(x) (((U64_S *)(x))->v)
148 
149 
150 //***************************************
151 // Compiler-specific Functions and Macros
152 //***************************************
153 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
154 
155 // Note : although _rotl exists for minGW (GCC under windows), performance seems poor
156 #if defined(_MSC_VER)
157 # define XXH_rotl32(x,r) _rotl(x,r)
158 # define XXH_rotl64(x,r) _rotl64(x,r)
159 #else
160 # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
161 # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
162 #endif
163 
164 #if defined(_MSC_VER) // Visual Studio
165 # define XXH_swap32 _byteswap_ulong
166 # define XXH_swap64 _byteswap_uint64
167 #elif GCC_VERSION >= 403
168 # define XXH_swap32 __builtin_bswap32
169 # define XXH_swap64 __builtin_bswap64
170 #else
171 static inline U32 XXH_swap32 (U32 x)
172 {
173  return ((x << 24) & 0xff000000 ) |
174  ((x << 8) & 0x00ff0000 ) |
175  ((x >> 8) & 0x0000ff00 ) |
176  ((x >> 24) & 0x000000ff );
177 }
178 static inline U64 XXH_swap64 (U64 x)
179 {
180  return ((x << 56) & 0xff00000000000000ULL) |
181  ((x << 40) & 0x00ff000000000000ULL) |
182  ((x << 24) & 0x0000ff0000000000ULL) |
183  ((x << 8) & 0x000000ff00000000ULL) |
184  ((x >> 8) & 0x00000000ff000000ULL) |
185  ((x >> 24) & 0x0000000000ff0000ULL) |
186  ((x >> 40) & 0x000000000000ff00ULL) |
187  ((x >> 56) & 0x00000000000000ffULL);
188 }
189 #endif
190 
191 
192 //**************************************
193 // Constants
194 //**************************************
195 #define PRIME32_1 2654435761U
196 #define PRIME32_2 2246822519U
197 #define PRIME32_3 3266489917U
198 #define PRIME32_4 668265263U
199 #define PRIME32_5 374761393U
200 
201 #define PRIME64_1 11400714785074694791ULL
202 #define PRIME64_2 14029467366897019727ULL
203 #define PRIME64_3 1609587929392839161ULL
204 #define PRIME64_4 9650029242287828579ULL
205 #define PRIME64_5 2870177450012600261ULL
206 
207 //**************************************
208 // Architecture Macros
209 //**************************************
211 #ifndef XXH_CPU_LITTLE_ENDIAN // It is possible to define XXH_CPU_LITTLE_ENDIAN externally, for example using a compiler switch
212 static const int one = 1;
213 # define XXH_CPU_LITTLE_ENDIAN (*(char*)(&one))
214 #endif
215 
216 
217 //**************************************
218 // Macros
219 //**************************************
220 #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(!!(c)) }; } // use only *after* variable declarations
221 
222 
223 //****************************
224 // Memory reads
225 //****************************
227 
228 FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
229 {
230  if (align==XXH_unaligned)
231  return endian==XXH_littleEndian ? A32(ptr) : XXH_swap32(A32(ptr));
232  else
233  return endian==XXH_littleEndian ? *(U32*)ptr : XXH_swap32(*(U32*)ptr);
234 }
235 
236 FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
237 {
239 }
240 
241 FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
242 {
243  if (align==XXH_unaligned)
244  return endian==XXH_littleEndian ? A64(ptr) : XXH_swap64(A64(ptr));
245  else
246  return endian==XXH_littleEndian ? *(U64*)ptr : XXH_swap64(*(U64*)ptr);
247 }
248 
249 FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
250 {
252 }
253 
254 
255 //****************************
256 // Simple Hash Functions
257 //****************************
258 FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
259 {
260  const BYTE* p = (const BYTE*)input;
261  const BYTE* bEnd = p + len;
262  U32 h32;
263 #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
264 
265 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
266  if (p==NULL)
267  {
268  len=0;
269  bEnd=p=(const BYTE*)(size_t)16;
270  }
271 #endif
272 
273  if (len>=16)
274  {
275  const BYTE* const limit = bEnd - 16;
276  U32 v1 = seed + PRIME32_1 + PRIME32_2;
277  U32 v2 = seed + PRIME32_2;
278  U32 v3 = seed + 0;
279  U32 v4 = seed - PRIME32_1;
280 
281  do
282  {
283  v1 += XXH_get32bits(p) * PRIME32_2;
284  v1 = XXH_rotl32(v1, 13);
285  v1 *= PRIME32_1;
286  p+=4;
287  v2 += XXH_get32bits(p) * PRIME32_2;
288  v2 = XXH_rotl32(v2, 13);
289  v2 *= PRIME32_1;
290  p+=4;
291  v3 += XXH_get32bits(p) * PRIME32_2;
292  v3 = XXH_rotl32(v3, 13);
293  v3 *= PRIME32_1;
294  p+=4;
295  v4 += XXH_get32bits(p) * PRIME32_2;
296  v4 = XXH_rotl32(v4, 13);
297  v4 *= PRIME32_1;
298  p+=4;
299  }
300  while (p<=limit);
301 
302  h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
303  }
304  else
305  {
306  h32 = seed + PRIME32_5;
307  }
308 
309  h32 += (U32) len;
310 
311  while (p+4<=bEnd)
312  {
313  h32 += XXH_get32bits(p) * PRIME32_3;
314  h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
315  p+=4;
316  }
317 
318  while (p<bEnd)
319  {
320  h32 += (*p) * PRIME32_5;
321  h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
322  p++;
323  }
324 
325  h32 ^= h32 >> 15;
326  h32 *= PRIME32_2;
327  h32 ^= h32 >> 13;
328  h32 *= PRIME32_3;
329  h32 ^= h32 >> 16;
330 
331  return h32;
332 }
333 
334 
335 unsigned int XXH32 (const void* input, size_t len, unsigned seed)
336 {
337 #if 0
338  // Simple version, good for code maintenance, but unfortunately slow for small inputs
339  XXH32_state_t state;
340  XXH32_reset(&state, seed);
341  XXH32_update(&state, input, len);
342  return XXH32_digest(&state);
343 #else
344  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
345 
346 # if !defined(XXH_USE_UNALIGNED_ACCESS)
347  if ((((size_t)input) & 3) == 0) // Input is aligned, let's leverage the speed advantage
348  {
349  if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
350  return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
351  else
352  return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
353  }
354 # endif
355 
356  if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
357  return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
358  else
359  return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
360 #endif
361 }
362 
363 FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
364 {
365  const BYTE* p = (const BYTE*)input;
366  const BYTE* bEnd = p + len;
367  U64 h64;
368 #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
369 
370 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
371  if (p==NULL)
372  {
373  len=0;
374  bEnd=p=(const BYTE*)(size_t)32;
375  }
376 #endif
377 
378  if (len>=32)
379  {
380  const BYTE* const limit = bEnd - 32;
381  U64 v1 = seed + PRIME64_1 + PRIME64_2;
382  U64 v2 = seed + PRIME64_2;
383  U64 v3 = seed + 0;
384  U64 v4 = seed - PRIME64_1;
385 
386  do
387  {
388  v1 += XXH_get64bits(p) * PRIME64_2;
389  p+=8;
390  v1 = XXH_rotl64(v1, 31);
391  v1 *= PRIME64_1;
392  v2 += XXH_get64bits(p) * PRIME64_2;
393  p+=8;
394  v2 = XXH_rotl64(v2, 31);
395  v2 *= PRIME64_1;
396  v3 += XXH_get64bits(p) * PRIME64_2;
397  p+=8;
398  v3 = XXH_rotl64(v3, 31);
399  v3 *= PRIME64_1;
400  v4 += XXH_get64bits(p) * PRIME64_2;
401  p+=8;
402  v4 = XXH_rotl64(v4, 31);
403  v4 *= PRIME64_1;
404  }
405  while (p<=limit);
406 
407  h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
408 
409  v1 *= PRIME64_2;
410  v1 = XXH_rotl64(v1, 31);
411  v1 *= PRIME64_1;
412  h64 ^= v1;
413  h64 = h64 * PRIME64_1 + PRIME64_4;
414 
415  v2 *= PRIME64_2;
416  v2 = XXH_rotl64(v2, 31);
417  v2 *= PRIME64_1;
418  h64 ^= v2;
419  h64 = h64 * PRIME64_1 + PRIME64_4;
420 
421  v3 *= PRIME64_2;
422  v3 = XXH_rotl64(v3, 31);
423  v3 *= PRIME64_1;
424  h64 ^= v3;
425  h64 = h64 * PRIME64_1 + PRIME64_4;
426 
427  v4 *= PRIME64_2;
428  v4 = XXH_rotl64(v4, 31);
429  v4 *= PRIME64_1;
430  h64 ^= v4;
431  h64 = h64 * PRIME64_1 + PRIME64_4;
432  }
433  else
434  {
435  h64 = seed + PRIME64_5;
436  }
437 
438  h64 += (U64) len;
439 
440  while (p+8<=bEnd)
441  {
442  U64 k1 = XXH_get64bits(p);
443  k1 *= PRIME64_2;
444  k1 = XXH_rotl64(k1,31);
445  k1 *= PRIME64_1;
446  h64 ^= k1;
447  h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
448  p+=8;
449  }
450 
451  if (p+4<=bEnd)
452  {
453  h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
454  h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
455  p+=4;
456  }
457 
458  while (p<bEnd)
459  {
460  h64 ^= (*p) * PRIME64_5;
461  h64 = XXH_rotl64(h64, 11) * PRIME64_1;
462  p++;
463  }
464 
465  h64 ^= h64 >> 33;
466  h64 *= PRIME64_2;
467  h64 ^= h64 >> 29;
468  h64 *= PRIME64_3;
469  h64 ^= h64 >> 32;
470 
471  return h64;
472 }
473 
474 
475 unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
476 {
477 #if 0
478  // Simple version, good for code maintenance, but unfortunately slow for small inputs
479  XXH64_state_t state;
480  XXH64_reset(&state, seed);
481  XXH64_update(&state, input, len);
482  return XXH64_digest(&state);
483 #else
484  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
485 
486 # if !defined(XXH_USE_UNALIGNED_ACCESS)
487  if ((((size_t)input) & 7)==0) // Input is aligned, let's leverage the speed advantage
488  {
489  if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
490  return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
491  else
492  return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
493  }
494 # endif
495 
496  if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
497  return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
498  else
499  return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
500 #endif
501 }
502 
503 /****************************************************
504  * Advanced Hash Functions
505 ****************************************************/
506 
507 /*** Allocation ***/
508 typedef struct
509 {
510  U64 total_len;
511  U32 seed;
512  U32 v1;
513  U32 v2;
514  U32 v3;
515  U32 v4;
516  U32 mem32[4]; /* defined as U32 for alignment */
517  U32 memsize;
519 
520 typedef struct
521 {
523  U64 seed;
524  U64 v1;
525  U64 v2;
526  U64 v3;
527  U64 v4;
528  U64 mem64[4]; /* defined as U64 for alignment */
529  U32 memsize;
531 
532 
534 {
535  static_assert(sizeof(XXH32_state_t) >= sizeof(XXH_istate32_t), ""); // A compilation error here means XXH32_state_t is not large enough
536  return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
537 }
539 {
540  XXH_free(statePtr);
541  return XXH_OK;
542 };
543 
545 {
546  static_assert(sizeof(XXH64_state_t) >= sizeof(XXH_istate64_t), ""); // A compilation error here means XXH64_state_t is not large enough
547  return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
548 }
550 {
551  XXH_free(statePtr);
552  return XXH_OK;
553 };
554 
555 
556 /*** Hash feed ***/
557 
559 {
560  XXH_istate32_t* state = (XXH_istate32_t*) state_in;
561  state->seed = seed;
562  state->v1 = seed + PRIME32_1 + PRIME32_2;
563  state->v2 = seed + PRIME32_2;
564  state->v3 = seed + 0;
565  state->v4 = seed - PRIME32_1;
566  state->total_len = 0;
567  state->memsize = 0;
568  return XXH_OK;
569 }
570 
571 XXH_errorcode XXH64_reset(XXH64_state_t* state_in, unsigned long long seed)
572 {
573  XXH_istate64_t* state = (XXH_istate64_t*) state_in;
574  state->seed = seed;
575  state->v1 = seed + PRIME64_1 + PRIME64_2;
576  state->v2 = seed + PRIME64_2;
577  state->v3 = seed + 0;
578  state->v4 = seed - PRIME64_1;
579  state->total_len = 0;
580  state->memsize = 0;
581  return XXH_OK;
582 }
583 
584 
585 FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state_in, const void* input, size_t len, XXH_endianess endian)
586 {
587  XXH_istate32_t* state = (XXH_istate32_t *) state_in;
588  const BYTE* p = (const BYTE*)input;
589  const BYTE* const bEnd = p + len;
590 
591 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
592  if (input==NULL) return XXH_ERROR;
593 #endif
594 
595  state->total_len += len;
596 
597  if (state->memsize + len < 16) // fill in tmp buffer
598  {
599  XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
600  state->memsize += (U32)len;
601  return XXH_OK;
602  }
603 
604  if (state->memsize) // some data left from previous update
605  {
606  XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
607  {
608  const U32* p32 = state->mem32;
609  state->v1 += XXH_readLE32(p32, endian) * PRIME32_2;
610  state->v1 = XXH_rotl32(state->v1, 13);
611  state->v1 *= PRIME32_1;
612  p32++;
613  state->v2 += XXH_readLE32(p32, endian) * PRIME32_2;
614  state->v2 = XXH_rotl32(state->v2, 13);
615  state->v2 *= PRIME32_1;
616  p32++;
617  state->v3 += XXH_readLE32(p32, endian) * PRIME32_2;
618  state->v3 = XXH_rotl32(state->v3, 13);
619  state->v3 *= PRIME32_1;
620  p32++;
621  state->v4 += XXH_readLE32(p32, endian) * PRIME32_2;
622  state->v4 = XXH_rotl32(state->v4, 13);
623  state->v4 *= PRIME32_1;
624  p32++;
625  }
626  p += 16-state->memsize;
627  state->memsize = 0;
628  }
629 
630  if (p <= bEnd-16)
631  {
632  const BYTE* const limit = bEnd - 16;
633  U32 v1 = state->v1;
634  U32 v2 = state->v2;
635  U32 v3 = state->v3;
636  U32 v4 = state->v4;
637 
638  do
639  {
640  v1 += XXH_readLE32(p, endian) * PRIME32_2;
641  v1 = XXH_rotl32(v1, 13);
642  v1 *= PRIME32_1;
643  p+=4;
644  v2 += XXH_readLE32(p, endian) * PRIME32_2;
645  v2 = XXH_rotl32(v2, 13);
646  v2 *= PRIME32_1;
647  p+=4;
648  v3 += XXH_readLE32(p, endian) * PRIME32_2;
649  v3 = XXH_rotl32(v3, 13);
650  v3 *= PRIME32_1;
651  p+=4;
652  v4 += XXH_readLE32(p, endian) * PRIME32_2;
653  v4 = XXH_rotl32(v4, 13);
654  v4 *= PRIME32_1;
655  p+=4;
656  }
657  while (p<=limit);
658 
659  state->v1 = v1;
660  state->v2 = v2;
661  state->v3 = v3;
662  state->v4 = v4;
663  }
664 
665  if (p < bEnd)
666  {
667  XXH_memcpy(state->mem32, p, bEnd-p);
668  state->memsize = (int)(bEnd-p);
669  }
670 
671  return XXH_OK;
672 }
673 
674 XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
675 {
676  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
677 
678  if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
679  return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
680  else
681  return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
682 }
683 
684 
685 
686 FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state_in, XXH_endianess endian)
687 {
688  XXH_istate32_t* state = (XXH_istate32_t*) state_in;
689  const BYTE * p = (const BYTE*)state->mem32;
690  BYTE* bEnd = (BYTE*)(state->mem32) + state->memsize;
691  U32 h32;
692 
693  if (state->total_len >= 16)
694  {
695  h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
696  }
697  else
698  {
699  h32 = state->seed + PRIME32_5;
700  }
701 
702  h32 += (U32) state->total_len;
703 
704  while (p+4<=bEnd)
705  {
706  h32 += XXH_readLE32(p, endian) * PRIME32_3;
707  h32 = XXH_rotl32(h32, 17) * PRIME32_4;
708  p+=4;
709  }
710 
711  while (p<bEnd)
712  {
713  h32 += (*p) * PRIME32_5;
714  h32 = XXH_rotl32(h32, 11) * PRIME32_1;
715  p++;
716  }
717 
718  h32 ^= h32 >> 15;
719  h32 *= PRIME32_2;
720  h32 ^= h32 >> 13;
721  h32 *= PRIME32_3;
722  h32 ^= h32 >> 16;
723 
724  return h32;
725 }
726 
727 
728 U32 XXH32_digest (const XXH32_state_t* state_in)
729 {
730  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
731 
732  if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
733  return XXH32_digest_endian(state_in, XXH_littleEndian);
734  else
735  return XXH32_digest_endian(state_in, XXH_bigEndian);
736 }
737 
738 
739 FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state_in, const void* input, size_t len, XXH_endianess endian)
740 {
741  XXH_istate64_t * state = (XXH_istate64_t *) state_in;
742  const BYTE* p = (const BYTE*)input;
743  const BYTE* const bEnd = p + len;
744 
745 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
746  if (input==NULL) return XXH_ERROR;
747 #endif
748 
749  state->total_len += len;
750 
751  if (state->memsize + len < 32) // fill in tmp buffer
752  {
753  XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
754  state->memsize += (U32)len;
755  return XXH_OK;
756  }
757 
758  if (state->memsize) // some data left from previous update
759  {
760  XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
761  {
762  const U64* p64 = state->mem64;
763  state->v1 += XXH_readLE64(p64, endian) * PRIME64_2;
764  state->v1 = XXH_rotl64(state->v1, 31);
765  state->v1 *= PRIME64_1;
766  p64++;
767  state->v2 += XXH_readLE64(p64, endian) * PRIME64_2;
768  state->v2 = XXH_rotl64(state->v2, 31);
769  state->v2 *= PRIME64_1;
770  p64++;
771  state->v3 += XXH_readLE64(p64, endian) * PRIME64_2;
772  state->v3 = XXH_rotl64(state->v3, 31);
773  state->v3 *= PRIME64_1;
774  p64++;
775  state->v4 += XXH_readLE64(p64, endian) * PRIME64_2;
776  state->v4 = XXH_rotl64(state->v4, 31);
777  state->v4 *= PRIME64_1;
778  p64++;
779  }
780  p += 32-state->memsize;
781  state->memsize = 0;
782  }
783 
784  if (p+32 <= bEnd)
785  {
786  const BYTE* const limit = bEnd - 32;
787  U64 v1 = state->v1;
788  U64 v2 = state->v2;
789  U64 v3 = state->v3;
790  U64 v4 = state->v4;
791 
792  do
793  {
794  v1 += XXH_readLE64(p, endian) * PRIME64_2;
795  v1 = XXH_rotl64(v1, 31);
796  v1 *= PRIME64_1;
797  p+=8;
798  v2 += XXH_readLE64(p, endian) * PRIME64_2;
799  v2 = XXH_rotl64(v2, 31);
800  v2 *= PRIME64_1;
801  p+=8;
802  v3 += XXH_readLE64(p, endian) * PRIME64_2;
803  v3 = XXH_rotl64(v3, 31);
804  v3 *= PRIME64_1;
805  p+=8;
806  v4 += XXH_readLE64(p, endian) * PRIME64_2;
807  v4 = XXH_rotl64(v4, 31);
808  v4 *= PRIME64_1;
809  p+=8;
810  }
811  while (p<=limit);
812 
813  state->v1 = v1;
814  state->v2 = v2;
815  state->v3 = v3;
816  state->v4 = v4;
817  }
818 
819  if (p < bEnd)
820  {
821  XXH_memcpy(state->mem64, p, bEnd-p);
822  state->memsize = (int)(bEnd-p);
823  }
824 
825  return XXH_OK;
826 }
827 
828 XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
829 {
830  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
831 
832  if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
833  return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
834  else
835  return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
836 }
837 
838 
839 
840 FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state_in, XXH_endianess endian)
841 {
842  XXH_istate64_t * state = (XXH_istate64_t *) state_in;
843  const BYTE * p = (const BYTE*)state->mem64;
844  BYTE* bEnd = (BYTE*)state->mem64 + state->memsize;
845  U64 h64;
846 
847  if (state->total_len >= 32)
848  {
849  U64 v1 = state->v1;
850  U64 v2 = state->v2;
851  U64 v3 = state->v3;
852  U64 v4 = state->v4;
853 
854  h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
855 
856  v1 *= PRIME64_2;
857  v1 = XXH_rotl64(v1, 31);
858  v1 *= PRIME64_1;
859  h64 ^= v1;
860  h64 = h64*PRIME64_1 + PRIME64_4;
861 
862  v2 *= PRIME64_2;
863  v2 = XXH_rotl64(v2, 31);
864  v2 *= PRIME64_1;
865  h64 ^= v2;
866  h64 = h64*PRIME64_1 + PRIME64_4;
867 
868  v3 *= PRIME64_2;
869  v3 = XXH_rotl64(v3, 31);
870  v3 *= PRIME64_1;
871  h64 ^= v3;
872  h64 = h64*PRIME64_1 + PRIME64_4;
873 
874  v4 *= PRIME64_2;
875  v4 = XXH_rotl64(v4, 31);
876  v4 *= PRIME64_1;
877  h64 ^= v4;
878  h64 = h64*PRIME64_1 + PRIME64_4;
879  }
880  else
881  {
882  h64 = state->seed + PRIME64_5;
883  }
884 
885  h64 += (U64) state->total_len;
886 
887  while (p+8<=bEnd)
888  {
889  U64 k1 = XXH_readLE64(p, endian);
890  k1 *= PRIME64_2;
891  k1 = XXH_rotl64(k1,31);
892  k1 *= PRIME64_1;
893  h64 ^= k1;
894  h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
895  p+=8;
896  }
897 
898  if (p+4<=bEnd)
899  {
900  h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
901  h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
902  p+=4;
903  }
904 
905  while (p<bEnd)
906  {
907  h64 ^= (*p) * PRIME64_5;
908  h64 = XXH_rotl64(h64, 11) * PRIME64_1;
909  p++;
910  }
911 
912  h64 ^= h64 >> 33;
913  h64 *= PRIME64_2;
914  h64 ^= h64 >> 29;
915  h64 *= PRIME64_3;
916  h64 ^= h64 >> 32;
917 
918  return h64;
919 }
920 
921 
922 unsigned long long XXH64_digest (const XXH64_state_t* state_in)
923 {
924  XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
925 
926  if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
927  return XXH64_digest_endian(state_in, XXH_littleEndian);
928  else
929  return XXH64_digest_endian(state_in, XXH_bigEndian);
930 }
931 
932 
933 } // detail
934 } // beast
beast::detail::one
static const int one
Definition: xxhash.cpp:212
beast::detail::XXH_istate32_t
Definition: xxhash.cpp:508
beast::detail::XXH32_freeState
XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr)
Definition: xxhash.cpp:538
beast::detail::XXH_istate64_t::memsize
U32 memsize
Definition: xxhash.cpp:529
beast::detail::_U64_S
Definition: xxhash.cpp:137
beast::detail::XXH_littleEndian
@ XXH_littleEndian
Definition: xxhash.cpp:210
beast::detail::XXH_readLE64_align
FORCE_INLINE U64 XXH_readLE64_align(const void *ptr, XXH_endianess endian, XXH_alignment align)
Definition: xxhash.cpp:241
beast::detail::XXH_istate32_t::v3
U32 v3
Definition: xxhash.cpp:516
beast::detail::XXH_istate32_t::v2
U32 v2
Definition: xxhash.cpp:515
beast::detail::XXH_errorcode
XXH_errorcode
Definition: xxhash.h:78
beast::detail::XXH_alignment
XXH_alignment
Definition: xxhash.cpp:226
beast::detail::XXH64_update
XXH_errorcode XXH64_update(XXH64_state_t *state_in, const void *input, size_t len)
Definition: xxhash.cpp:828
beast::detail::XXH32_digest
U32 XXH32_digest(const XXH32_state_t *state_in)
Definition: xxhash.cpp:728
beast::detail::XXH64_digest_endian
FORCE_INLINE U64 XXH64_digest_endian(const XXH64_state_t *state_in, XXH_endianess endian)
Definition: xxhash.cpp:840
beast::detail::XXH64_update_endian
FORCE_INLINE XXH_errorcode XXH64_update_endian(XXH64_state_t *state_in, const void *input, size_t len, XXH_endianess endian)
Definition: xxhash.cpp:739
beast::detail::XXH_endianess
XXH_endianess
Definition: xxhash.cpp:210
beast::detail::XXH_istate32_t::v1
U32 v1
Definition: xxhash.cpp:514
beast::detail::XXH_istate32_t::v4
U32 v4
Definition: xxhash.cpp:517
beast::detail::XXH_readLE32_align
FORCE_INLINE U32 XXH_readLE32_align(const void *ptr, XXH_endianess endian, XXH_alignment align)
Definition: xxhash.cpp:228
beast::detail::XXH_readLE32
FORCE_INLINE U32 XXH_readLE32(const void *ptr, XXH_endianess endian)
Definition: xxhash.cpp:236
beast::detail::XXH32_endian_align
FORCE_INLINE U32 XXH32_endian_align(const void *input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
Definition: xxhash.cpp:258
beast::detail::XXH64_reset
XXH_errorcode XXH64_reset(XXH64_state_t *state_in, unsigned long long seed)
Definition: xxhash.cpp:571
beast::detail::XXH_OK
@ XXH_OK
Definition: xxhash.h:82
beast::detail::XXH64_digest
unsigned long long XXH64_digest(const XXH64_state_t *state_in)
Definition: xxhash.cpp:922
beast::detail::XXH64_freeState
XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr)
Definition: xxhash.cpp:549
beast::detail::XXH32_update
XXH_errorcode XXH32_update(XXH32_state_t *state_in, const void *input, size_t len)
Definition: xxhash.cpp:674
beast::detail::XXH_istate32_t::total_len
U64 total_len
Definition: xxhash.cpp:512
beast::detail::XXH_aligned
@ XXH_aligned
Definition: xxhash.cpp:226
beast::detail::XXH_istate32_t::mem32
U32 mem32[4]
Definition: xxhash.cpp:518
beast::detail::XXH_ERROR
@ XXH_ERROR
Definition: xxhash.h:82
beast::detail::XXH64_endian_align
FORCE_INLINE U64 XXH64_endian_align(const void *input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
Definition: xxhash.cpp:363
beast::detail::XXH_unaligned
@ XXH_unaligned
Definition: xxhash.cpp:226
beast::detail::XXH_istate64_t::v2
U64 v2
Definition: xxhash.cpp:525
beast::detail::XXH_istate64_t::total_len
U64 total_len
Definition: xxhash.cpp:522
beast::detail::XXH_istate64_t::seed
U64 seed
Definition: xxhash.cpp:523
beast::detail::U64_S
struct beast::detail::_U64_S U64_S
beast::detail::XXH_istate64_t
Definition: xxhash.cpp:520
beast::detail::XXH32_reset
XXH_errorcode XXH32_reset(XXH32_state_t *state_in, U32 seed)
Definition: xxhash.cpp:558
beast::detail::U32_S
struct beast::detail::_U32_S U32_S
beast::detail::XXH32_state_t
Definition: xxhash.h:105
beast::detail::XXH_istate64_t::v4
U64 v4
Definition: xxhash.cpp:527
beast::detail::XXH_istate64_t::v3
U64 v3
Definition: xxhash.cpp:526
beast::detail::XXH_readLE64
FORCE_INLINE U64 XXH_readLE64(const void *ptr, XXH_endianess endian)
Definition: xxhash.cpp:249
beast::detail::XXH_istate32_t::memsize
U32 memsize
Definition: xxhash.cpp:519
beast::detail::XXH_istate64_t::mem64
U64 mem64[4]
Definition: xxhash.cpp:528
std::free
T free(T... args)
beast::detail::XXH_bigEndian
@ XXH_bigEndian
Definition: xxhash.cpp:210
beast::detail::XXH_istate32_t::seed
U32 seed
Definition: xxhash.cpp:513
beast::detail::_U32_S::v
U32 v
Definition: xxhash.cpp:135
std::memcpy
T memcpy(T... args)
std::malloc
T malloc(T... args)
beast::detail::XXH32
unsigned int XXH32(const void *input, size_t len, unsigned seed)
Definition: xxhash.cpp:335
beast::detail::_U32_S
Definition: xxhash.cpp:133
beast::detail::XXH64_createState
XXH64_state_t * XXH64_createState(void)
Definition: xxhash.cpp:544
beast::detail::XXH32_createState
XXH32_state_t * XXH32_createState(void)
Definition: xxhash.cpp:533
beast::detail::XXH_istate64_t::v1
U64 v1
Definition: xxhash.cpp:524
beast::detail::_U64_S::v
U64 v
Definition: xxhash.cpp:139
beast::detail::XXH32_digest_endian
FORCE_INLINE U32 XXH32_digest_endian(const XXH32_state_t *state_in, XXH_endianess endian)
Definition: xxhash.cpp:686
beast::detail::XXH64
unsigned long long XXH64(const void *input, size_t len, unsigned long long seed)
Definition: xxhash.cpp:475
beast::detail::XXH64_state_t
Definition: xxhash.h:106
beast::detail::XXH32_update_endian
FORCE_INLINE XXH_errorcode XXH32_update_endian(XXH32_state_t *state_in, const void *input, size_t len, XXH_endianess endian)
Definition: xxhash.cpp:585
beast::endian
endian
Definition: endian.h:30
beast
Definition: base_uint.h:582