Files
xahaud/lib/xxhash.c
JoelKatz df26c08a34 Squashed 'src/lz4/' changes from e25b51d..baf78e7
baf78e7 Merge pull request #111 from Cyan4973/dev
6f50184 Updated NEWS
acae59a Fixed : default sparse mode disabled on stdout, to support ` >>`  redirection scenario reported by Takayuki Matsuoka (#110)
91c1b9a Performance fix : big compression speed boost for clang (+30%)
0fb0392 Merge branch 'dev' of github.com:Cyan4973/lz4 into dev
bb22103 Merge pull request #107 from t-mat/issue/103pr2
7d72c0c Added LZF
e769a0e Combine unique .lz4 file info
ffff9ee Support iterative edit and testing
a8fdeb4 Add log output of succeeded decompression to test script
5151c30 Add log of same files and sha1 hash of unique files to versionstest
26065c3 Fixed  : LZ4IO exits too early when frame crc not present, reported by Yongwoon Cho (#106)
87e560e minor tests improvements
a9ff13a minor test refactor
58b5aad Fixed sparse issue with non seekable streams (#105)
60d657a removed "flush" argument to please Travis's python version
cdb136d cosmetic changes, 2nd try
c779c80 minor cosmetic changes for Takayuki's testVersions
26021db Merge branch 'dev' of github.com:Cyan4973/lz4 into dev
e3b5bf3 Merge pull request #104 from t-mat/issue/103pr2
fe11e0b Issue #103 : Add compatibility test between releases
f02c467 Added a few more interoperability tests (32bits vs 64 bits)
05c3f66 Updated a few comments
9607848 Fixed minor typo
45e1995 minor introduction update
7d182b8 Merge pull request #102 from Cyan4973/dev
fdd0029 minor parsing update
672bfde Updated comments
b4ef93a Fixed typo
efbebd2 Added : LZ4_compress_destSize()
1c3e633 Added compilation flag -Wcast-qual
05b0aa6 Updated readme
e05088d Updated lz4hc API
b4348a4 Fixed minor Visual warning
1171303 Updated streaming examples
1b17bf2 New lz4 API, using LZ4_compress_fast()
b495c91 Merge pull request #99 from eberge/dev
791512c Fixed bug 9318
2a974d7 refactored lz4hc
1e751a7 Install the lz4frame.h header in the cmake build
ad2dd6d moved lz4frame context types to incomplete typedef
c9cbb8f Increased aligment requirement for lz4frame context pointer
ef55dfb Modified lz4frame context typedef, to enforce stricter alignment condition
87a1c70 Fixed minor static analyzer warning
d6dc0a4 streaming API : Improved ring buffer management
bda38fd Merge pull request #98 from eberge/dev
a9a24e8 cmake support for AIX, HPUX, Solaris and Windows
9c6fb8b Added LZ4_compress_fast_extState()
b05d3d7 Frame content size disabled by default when using LZ4F_compressFrame(), to be in better coherence with the advanced API LZ4F_compress_update()
b805d58 Removed obsolete functions from lz4 cli
f11afaf Removed LZ4_compress() (obsolete) from lz4
72e6794 Updated LZ4F_freeDecompressionContext(), to provide stage hint as result
a01e10d Changed LZ4F compressionLevel from unsigned to signed, in anticipation for LZ4_compress_fast() integration.
cbcdd88 Fixed frame concatenation
e18aa90 Fixed frametest
c035b7a Restored make-lz4
197982e Fixed unfinished frame (issue #75)
409f816 Updated LZ4F_getFrameInfo() behavior, related to uncomplete frame header decoding attempts
47c3040 added --no-sparse test
9fd4f1f Sparse file support is now enabled by default
7644bee test error message in multiple files mode
bce2eeb Reclassified some notification messages as errors
9e92bee stronger arm tests
2ed9dcc fix minor "divide by zero" risk
633c1ca fixed minor leak
0ed2e71 Static analyzer generates error codes on bug suspicion
2cf8a19 minor header refactoring
634e4ee Merge pull request #96 from t-mat/improve-pr-95
e328d41 minor optimization for small files
13c6e16 Removed status notification in multiple-files mode
d153aaa Add LZ4F_OBSOLETE_ENUM() to describe obsolete enums
a430b85 Multiple files decompression refactoring
4e574e7 Updated lz4frame error names
d37926b Merge pull request #95 from t-mat/issue/90
240b554 Merge pull request #94 from t-mat/dev
3d46d4b Fix LZ4_DEPRECATED() in lz4hc.h
5f732e1 Merge pull request #91 from t-mat/dev
175890f Issue#90 : Change old enum names to new one
585bab8 Issue#90 : Change old enum to macro to maximize compatibility
081bcca Issue#90 : Add LZ4F_ prefix
e1283c7 Fix LZ4_DEPRECATED() for older/non-gcc/clang/MSVC compilers
d7298d2 Replace GCC_VERSION with LZ4_GCC_VERSION
9851583 Merge pull request #93 from drcrallen/descriptiveFrameErrors
b664a72 Revert "Revert "Add more descriptive frame errors""
3f4f623 Valgrind tests generate errors
83e350d Merge branch 'dev' of github.com:Cyan4973/lz4 into dev
066e9d3 Merge pull request #92 from Cyan4973/revert-89-descriptiveFrameErrors
5a66527 Revert "Add more descriptive frame errors"
0dc8308 Merge pull request #89 from drcrallen/descriptiveFrameErrors
05a46fc Changes LZ4F_compressBound() definition using NULL prefsPtr to cover worst case instead of default.
348f509 lz4io refactoring
eabc6d8 New valgrind test with multiple files
113b150 Fix leak issue with compression of multiple files
c64200d Improved performance when compressing a lot of small files
cc24124 minor compatibility fixes
ccba7a0 Merge pull request #86 from KyleJHarper/origin/r129/multiple_inputs_patch
d535214 Add more descriptive frame errors
bc28fc1 Merge pull request #87 from t-mat/fix-example2
fd77bad Replace obsolete functions
b036eaa Add snprintf macro for MSVC
7f2f1fc Added support for continuation of file compression and decompression if input files are missing.  Should more closely match gzip/bzip2/xz and so forth.  Also removed a debug print accidentally left in.
0169502 Added new LZ4IO_decompressMultipleFilenames to allow decompression of multiple files with the -m switch added in r128 (ref: google code issue 151).  Limitation: will only process files matching LZ4_EXTENSION macro, which for now seems reasonable.
da11725 new memory leak test for fullbench using multi-files
2c79887 Shortened tests durations
42e5bc4 Updated badges
2852b9e Fixed issue #84
8f49666 Fixed : minor coverity warning
8a61000 Fixed a few coverity warnings
138673d fixed minor g++ warning
cc8d617 Merge pull request #82 from t-mat/add-lz4-prefix
81fdd9d Fixed a few Valgrind warnings
ad86910 Add LZ4 prefix to deprecation macros
66b8a4a Fixed : minor Visual warnings
62ed153 Fixed : a few minor coverity warnings
9443f3d Extended obsolete warning messages to lz4hc
973e385 Implemented obsolete warning message
be9d248 Update lz4hc API : LZ4_compressHC_safe()
a07db74 Clarified lz4frame.h inline doc
8b8e5ef fixed minor sanitize warning
c22a0e1 Updated : fuzzer tests can be programmed for a timelength
a2864fd Fixed a few minor sanitize warnings
33134fb Added : sanitize test
f344fbd Fixed a few warnings from -fsanitize=undefined
2f8a4c3 New LZ4_compress_safe() API
1853622 fixed over-cautious visual warning
b41137f minor Makefile test refactoring
327cb04 minor memory leak fix and test
43e0535 fix g++ typecast
61d7416 updated doc
17f8614 added : memtest on fullbench
d38b0b6 Merge pull request #73 from funcodeio/dev
979a991 memcpy speed as reference
157a739 Merge pull request #74 from Cyan4973/fastMode
dd69902 Removed unused lines.
43eaf8f Merge pull request #72 from fzort/master
f72761f new tests for large files with content size support (#70)
90c0104 Added : progress indicator, in fast and decompression modes
1d3ab5d Cygwin has fileno, so there's no need to use _fileno.
78d2dfd fullbench : tests of _limitedOutput variants intentionnally provides less memory space than safe (LZ4_compressBound())
ef7cd83 Fixed issue 160, reported by Eric Berge
28e237e simplified LZ4_compress_limitedOutput()
89eee0d Removed make dist
0615eb4 Stricter tests : treat warnings as errors
76a03c1 simplified LZ4_compress_withState()
6625068 simplified LZ4_compress()
886b199 Modified files rights
7b5e945 Removed Visual 2013 solution, as AppVeyor automated mode only works with a single solution
117ab8c Added : Visual 2013 solution
08b24af Updated Visual 2012 solution : + 3 projects (fullbench, frametest, datagen)
a761546 Fix : minor warning under Visual
bf146ec Removed .suo & .user files from Visual solutions
7db6678 Restored proper credit
3bba55c Fixed : Windows compilation Added : Appveyor badge
160661c Merge pull request #69 from Cyan4973/dev
8437a0e Fixed : Visual compilation
7c26b03 Updated make dist
f174964 Added : Frame documentation in MarkDown format
880381c Removed HTML Frame Format documentation
5b9fb69 minor tweak
4783cb8 Updated readme
4c227a4 Added LZ4_compress_fast()
003af71 Merge pull request #67 from Cyan4973/dev
2a82619 fixed fullbench memory allocation error
6c69dc1 faster compression in 64 bits mode
44793b8 Updated documentation
b93f629 changed file name
eeb8bea Updated comments on LZ4F_getFrameInfo()
002ec60 restored lz4hc compression ratio
987e78c Merge pull request #66 from Cyan4973/dev
8cb06d5 lz4frame validates contentSize during decompression
d5da787 Changed struct member to contentSize
2d4fed5 Merge pull request #65 from Cyan4973/dev
ce71b07 converted to markdown friendly syntax
1ba37f3 Reference format doc
5780864 Fixed : Makefile
b009767 windows friendly make clean
27f7d06 minor beautifier (make clean)
b4755c7 Added : arm cross-compilation test
a357f43 Fixed cast-align warnings on 32-bits
4a9335b Added : doc authorship
e652285 Merge pull request #63 from t-mat/comment-on-example-directory
2af52a9 Add "Examples" subtree
679afea Add README.md as table of contents
19665c9 Add document for "Line by Line Text Compression" example
438fee9 Add document for "Double Buffer" example
a38166b Add document : "Streaming API Basics"
80e71c6 Updated man page : "--[no-]X"
5950f72 Updated tests
6b923d5 Updated long commands, with reverse "--no-" variants
d0f8d40 updated dist list
00c3208 Merge pull request #61 from Cyan4973/dev
7f436a1 lz4 cli supports frame content size
a28b147 removed useless man pages
7cf4e5c Updates tests & Man pages
f02adc7 new long commands
86715b2 Some more tests related to frame content size
7ee7256 frame content size support
7d87d43 Updated lz4io sparse file support (alignment properties)
b54d256 minor lz4frame optimisation (no more malloc() on using LZ4F_compressFrame() in fast mode)
da9402c minor lz4frame refactoring
859fe3b Updated LZ4 frame format documentation
8edb7f1 Added : Readme into lib directory, to explain what does each file
e7fb4d1 lz4 utility supports "pass-through" mode
2a02455 minor refactoring
3a68324 skippable frames support
93849d1 minor CMakeLists update
471eabe Merge pull request #60 from Cyberunner23/master
ef029a1 Removed checking of CMAKE_SYSTEM_PROCESSOR when adding -fPIC, breaks when that var is '64bit'.
c9a2b14 removed -s command from lz4c specific list of legacy commands
207aafd Added :  unlz4 symbolic link to "lz4 -d"
2b55752 changed "make install" default install directory to /usr/local
bbcfe21 Added : clang test
8a87769 Fix : static analyzer test on Travis
45b0642 scan-build tests
a18fb43 Merge pull request #58 from Cyan4973/sparseFile
248b761 windows sparse file support
d11ac40 Improved sparse files support
45a357f Improved sparse file support
e38c268 Fixed minor g++ warnings
12ab415 Preliminary support for sparse files
e3f33d2 Fixed minor warnings
74a6b14 Merge pull request #57 from alexDarcy/master
f2cc4be Updated Cmake configuration for non-gnu compiler
ceec6fa g++ compatibility
6b0c39b Updated datagen (can create sparse files)
e277511 Merge pull request #54 from t-mat/gc-issue/155
e1d9b59 Fixed : static library (x64 binary)
32a85fc NetBSD compatibility (#48)
488029e Updated : compress multiple files
046bd3a Merge pull request #52 from KyleJHarper/r128/multiple_inputs
dcdd628 Fix sentinel bit pattern
de5c930 Fix sentinel size miscalculation
eed7952 Add GNU coreutil's is_nul() method to isSparse()
b372f45 Add Neil's method to isSparse()
01a24af Improve isSparse()
4a5d92b Adjust coding style
97679fa Google Code Issue 155: lz4 cli should support sparse file https://code.google.com/p/lz4/issues/detail?id=155
fa27d23 Added support for multiple input files to act more like other compressors. For example: gzip file1 file2 file3. You can now do: lz4 [args] -m file1 file2 file3. Fixes 151.
67f3b41 Merge pull request #49 from t-mat/msvc-fseeki64
e68d1c9 restored lz4 hc compression ratio
41b6ed3 Replace fseek with _fseeki64 to avoid MSVC's 2GiB barrier
8f4e201 Fix : lz4frame.h within uninstaller
9fd92de Added : Visual project directory

git-subtree-dir: src/lz4
git-subtree-split: baf78e7e4dcbdf824a76f990ffeb573d113bbbdb
2015-05-29 15:03:27 -07:00

916 lines
26 KiB
C

/*
xxHash - Fast Hash algorithm
Copyright (C) 2012-2015, Yann Collet
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- xxHash source repository : https://github.com/Cyan4973/xxHash
*/
/**************************************
* Tuning parameters
**************************************/
/* Unaligned memory access is automatically enabled for "common" CPU, such as x86.
* For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
* If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
* You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
*/
#if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
# define XXH_USE_UNALIGNED_ACCESS 1
#endif
/* XXH_ACCEPT_NULL_INPUT_POINTER :
* If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
* When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
* By default, this option is disabled. To enable it, uncomment below define :
*/
/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
/* XXH_FORCE_NATIVE_FORMAT :
* By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
* Results are therefore identical for little-endian and big-endian CPU.
* This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
* Should endian-independance be of no importance for your application, you may set the #define below to 1.
* It will improve speed for Big-endian CPU.
* This option has no impact on Little_Endian CPU.
*/
#define XXH_FORCE_NATIVE_FORMAT 0
/**************************************
* Compiler Specific Options
***************************************/
#ifdef _MSC_VER /* Visual Studio */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# define FORCE_INLINE static __forceinline
#else
# if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# ifdef __GNUC__
# define FORCE_INLINE static inline __attribute__((always_inline))
# else
# define FORCE_INLINE static inline
# endif
# else
# define FORCE_INLINE static
# endif /* __STDC_VERSION__ */
#endif
/**************************************
* Includes & Memory related functions
***************************************/
#include "xxhash.h"
/* Modify the local functions below should you wish to use some other memory routines */
/* for malloc(), free() */
#include <stdlib.h>
static void* XXH_malloc(size_t s) { return malloc(s); }
static void XXH_free (void* p) { free(p); }
/* for memcpy() */
#include <string.h>
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
/**************************************
* Basic Types
***************************************/
#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# include <stdint.h>
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
#else
typedef unsigned char BYTE;
typedef unsigned short U16;
typedef unsigned int U32;
typedef signed int S32;
typedef unsigned long long U64;
#endif
static U32 XXH_read32(const void* memPtr)
{
U32 val32;
memcpy(&val32, memPtr, 4);
return val32;
}
static U64 XXH_read64(const void* memPtr)
{
U64 val64;
memcpy(&val64, memPtr, 8);
return val64;
}
/******************************************
* Compiler-specific Functions and Macros
******************************************/
#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
#if defined(_MSC_VER)
# define XXH_rotl32(x,r) _rotl(x,r)
# define XXH_rotl64(x,r) _rotl64(x,r)
#else
# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
#endif
#if defined(_MSC_VER) /* Visual Studio */
# define XXH_swap32 _byteswap_ulong
# define XXH_swap64 _byteswap_uint64
#elif GCC_VERSION >= 403
# define XXH_swap32 __builtin_bswap32
# define XXH_swap64 __builtin_bswap64
#else
static U32 XXH_swap32 (U32 x)
{
return ((x << 24) & 0xff000000 ) |
((x << 8) & 0x00ff0000 ) |
((x >> 8) & 0x0000ff00 ) |
((x >> 24) & 0x000000ff );
}
static U64 XXH_swap64 (U64 x)
{
return ((x << 56) & 0xff00000000000000ULL) |
((x << 40) & 0x00ff000000000000ULL) |
((x << 24) & 0x0000ff0000000000ULL) |
((x << 8) & 0x000000ff00000000ULL) |
((x >> 8) & 0x00000000ff000000ULL) |
((x >> 24) & 0x0000000000ff0000ULL) |
((x >> 40) & 0x000000000000ff00ULL) |
((x >> 56) & 0x00000000000000ffULL);
}
#endif
/***************************************
* Architecture Macros
***************************************/
typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
#ifndef XXH_CPU_LITTLE_ENDIAN /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example using a compiler switch */
static const int one = 1;
# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&one))
#endif
/*****************************
* Memory reads
*****************************/
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
{
if (align==XXH_unaligned)
return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
else
return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
}
FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
{
return XXH_readLE32_align(ptr, endian, XXH_unaligned);
}
FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
{
if (align==XXH_unaligned)
return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
else
return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
}
FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
{
return XXH_readLE64_align(ptr, endian, XXH_unaligned);
}
/***************************************
* Macros
***************************************/
#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(!!(c)) }; } /* use only *after* variable declarations */
/***************************************
* Constants
***************************************/
#define PRIME32_1 2654435761U
#define PRIME32_2 2246822519U
#define PRIME32_3 3266489917U
#define PRIME32_4 668265263U
#define PRIME32_5 374761393U
#define PRIME64_1 11400714785074694791ULL
#define PRIME64_2 14029467366897019727ULL
#define PRIME64_3 1609587929392839161ULL
#define PRIME64_4 9650029242287828579ULL
#define PRIME64_5 2870177450012600261ULL
/*****************************
* Simple Hash Functions
*****************************/
FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
{
const BYTE* p = (const BYTE*)input;
const BYTE* bEnd = p + len;
U32 h32;
#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (p==NULL)
{
len=0;
bEnd=p=(const BYTE*)(size_t)16;
}
#endif
if (len>=16)
{
const BYTE* const limit = bEnd - 16;
U32 v1 = seed + PRIME32_1 + PRIME32_2;
U32 v2 = seed + PRIME32_2;
U32 v3 = seed + 0;
U32 v4 = seed - PRIME32_1;
do
{
v1 += XXH_get32bits(p) * PRIME32_2;
v1 = XXH_rotl32(v1, 13);
v1 *= PRIME32_1;
p+=4;
v2 += XXH_get32bits(p) * PRIME32_2;
v2 = XXH_rotl32(v2, 13);
v2 *= PRIME32_1;
p+=4;
v3 += XXH_get32bits(p) * PRIME32_2;
v3 = XXH_rotl32(v3, 13);
v3 *= PRIME32_1;
p+=4;
v4 += XXH_get32bits(p) * PRIME32_2;
v4 = XXH_rotl32(v4, 13);
v4 *= PRIME32_1;
p+=4;
}
while (p<=limit);
h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
}
else
{
h32 = seed + PRIME32_5;
}
h32 += (U32) len;
while (p+4<=bEnd)
{
h32 += XXH_get32bits(p) * PRIME32_3;
h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
p+=4;
}
while (p<bEnd)
{
h32 += (*p) * PRIME32_5;
h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
p++;
}
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
h32 *= PRIME32_3;
h32 ^= h32 >> 16;
return h32;
}
unsigned XXH32 (const void* input, size_t len, unsigned seed)
{
#if 0
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
XXH32_state_t state;
XXH32_reset(&state, seed);
XXH32_update(&state, input, len);
return XXH32_digest(&state);
#else
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
# if !defined(XXH_USE_UNALIGNED_ACCESS)
if ((((size_t)input) & 3) == 0) /* Input is 4-bytes aligned, leverage the speed benefit */
{
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
else
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
}
# endif
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
else
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
#endif
}
FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
{
const BYTE* p = (const BYTE*)input;
const BYTE* bEnd = p + len;
U64 h64;
#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (p==NULL)
{
len=0;
bEnd=p=(const BYTE*)(size_t)32;
}
#endif
if (len>=32)
{
const BYTE* const limit = bEnd - 32;
U64 v1 = seed + PRIME64_1 + PRIME64_2;
U64 v2 = seed + PRIME64_2;
U64 v3 = seed + 0;
U64 v4 = seed - PRIME64_1;
do
{
v1 += XXH_get64bits(p) * PRIME64_2;
p+=8;
v1 = XXH_rotl64(v1, 31);
v1 *= PRIME64_1;
v2 += XXH_get64bits(p) * PRIME64_2;
p+=8;
v2 = XXH_rotl64(v2, 31);
v2 *= PRIME64_1;
v3 += XXH_get64bits(p) * PRIME64_2;
p+=8;
v3 = XXH_rotl64(v3, 31);
v3 *= PRIME64_1;
v4 += XXH_get64bits(p) * PRIME64_2;
p+=8;
v4 = XXH_rotl64(v4, 31);
v4 *= PRIME64_1;
}
while (p<=limit);
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
v1 *= PRIME64_2;
v1 = XXH_rotl64(v1, 31);
v1 *= PRIME64_1;
h64 ^= v1;
h64 = h64 * PRIME64_1 + PRIME64_4;
v2 *= PRIME64_2;
v2 = XXH_rotl64(v2, 31);
v2 *= PRIME64_1;
h64 ^= v2;
h64 = h64 * PRIME64_1 + PRIME64_4;
v3 *= PRIME64_2;
v3 = XXH_rotl64(v3, 31);
v3 *= PRIME64_1;
h64 ^= v3;
h64 = h64 * PRIME64_1 + PRIME64_4;
v4 *= PRIME64_2;
v4 = XXH_rotl64(v4, 31);
v4 *= PRIME64_1;
h64 ^= v4;
h64 = h64 * PRIME64_1 + PRIME64_4;
}
else
{
h64 = seed + PRIME64_5;
}
h64 += (U64) len;
while (p+8<=bEnd)
{
U64 k1 = XXH_get64bits(p);
k1 *= PRIME64_2;
k1 = XXH_rotl64(k1,31);
k1 *= PRIME64_1;
h64 ^= k1;
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
p+=8;
}
if (p+4<=bEnd)
{
h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
p+=4;
}
while (p<bEnd)
{
h64 ^= (*p) * PRIME64_5;
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
p++;
}
h64 ^= h64 >> 33;
h64 *= PRIME64_2;
h64 ^= h64 >> 29;
h64 *= PRIME64_3;
h64 ^= h64 >> 32;
return h64;
}
unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
{
#if 0
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
XXH64_state_t state;
XXH64_reset(&state, seed);
XXH64_update(&state, input, len);
return XXH64_digest(&state);
#else
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
# if !defined(XXH_USE_UNALIGNED_ACCESS)
if ((((size_t)input) & 7)==0) /* Input is aligned, let's leverage the speed advantage */
{
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
else
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
}
# endif
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
else
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
#endif
}
/****************************************************
* Advanced Hash Functions
****************************************************/
/*** Allocation ***/
typedef struct
{
U64 total_len;
U32 seed;
U32 v1;
U32 v2;
U32 v3;
U32 v4;
U32 mem32[4]; /* defined as U32 for alignment */
U32 memsize;
} XXH_istate32_t;
typedef struct
{
U64 total_len;
U64 seed;
U64 v1;
U64 v2;
U64 v3;
U64 v4;
U64 mem64[4]; /* defined as U64 for alignment */
U32 memsize;
} XXH_istate64_t;
XXH32_state_t* XXH32_createState(void)
{
XXH_STATIC_ASSERT(sizeof(XXH32_state_t) >= sizeof(XXH_istate32_t)); /* A compilation error here means XXH32_state_t is not large enough */
return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
}
XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
{
XXH_free(statePtr);
return XXH_OK;
}
XXH64_state_t* XXH64_createState(void)
{
XXH_STATIC_ASSERT(sizeof(XXH64_state_t) >= sizeof(XXH_istate64_t)); /* A compilation error here means XXH64_state_t is not large enough */
return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
}
XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
{
XXH_free(statePtr);
return XXH_OK;
}
/*** Hash feed ***/
XXH_errorcode XXH32_reset(XXH32_state_t* state_in, U32 seed)
{
XXH_istate32_t* state = (XXH_istate32_t*) state_in;
state->seed = seed;
state->v1 = seed + PRIME32_1 + PRIME32_2;
state->v2 = seed + PRIME32_2;
state->v3 = seed + 0;
state->v4 = seed - PRIME32_1;
state->total_len = 0;
state->memsize = 0;
return XXH_OK;
}
XXH_errorcode XXH64_reset(XXH64_state_t* state_in, unsigned long long seed)
{
XXH_istate64_t* state = (XXH_istate64_t*) state_in;
state->seed = seed;
state->v1 = seed + PRIME64_1 + PRIME64_2;
state->v2 = seed + PRIME64_2;
state->v3 = seed + 0;
state->v4 = seed - PRIME64_1;
state->total_len = 0;
state->memsize = 0;
return XXH_OK;
}
FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state_in, const void* input, size_t len, XXH_endianess endian)
{
XXH_istate32_t* state = (XXH_istate32_t *) state_in;
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (input==NULL) return XXH_ERROR;
#endif
state->total_len += len;
if (state->memsize + len < 16) /* fill in tmp buffer */
{
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
state->memsize += (U32)len;
return XXH_OK;
}
if (state->memsize) /* some data left from previous update */
{
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
{
const U32* p32 = state->mem32;
state->v1 += XXH_readLE32(p32, endian) * PRIME32_2;
state->v1 = XXH_rotl32(state->v1, 13);
state->v1 *= PRIME32_1;
p32++;
state->v2 += XXH_readLE32(p32, endian) * PRIME32_2;
state->v2 = XXH_rotl32(state->v2, 13);
state->v2 *= PRIME32_1;
p32++;
state->v3 += XXH_readLE32(p32, endian) * PRIME32_2;
state->v3 = XXH_rotl32(state->v3, 13);
state->v3 *= PRIME32_1;
p32++;
state->v4 += XXH_readLE32(p32, endian) * PRIME32_2;
state->v4 = XXH_rotl32(state->v4, 13);
state->v4 *= PRIME32_1;
p32++;
}
p += 16-state->memsize;
state->memsize = 0;
}
if (p <= bEnd-16)
{
const BYTE* const limit = bEnd - 16;
U32 v1 = state->v1;
U32 v2 = state->v2;
U32 v3 = state->v3;
U32 v4 = state->v4;
do
{
v1 += XXH_readLE32(p, endian) * PRIME32_2;
v1 = XXH_rotl32(v1, 13);
v1 *= PRIME32_1;
p+=4;
v2 += XXH_readLE32(p, endian) * PRIME32_2;
v2 = XXH_rotl32(v2, 13);
v2 *= PRIME32_1;
p+=4;
v3 += XXH_readLE32(p, endian) * PRIME32_2;
v3 = XXH_rotl32(v3, 13);
v3 *= PRIME32_1;
p+=4;
v4 += XXH_readLE32(p, endian) * PRIME32_2;
v4 = XXH_rotl32(v4, 13);
v4 *= PRIME32_1;
p+=4;
}
while (p<=limit);
state->v1 = v1;
state->v2 = v2;
state->v3 = v3;
state->v4 = v4;
}
if (p < bEnd)
{
XXH_memcpy(state->mem32, p, bEnd-p);
state->memsize = (int)(bEnd-p);
}
return XXH_OK;
}
XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
else
return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
}
FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state_in, XXH_endianess endian)
{
const XXH_istate32_t* state = (const XXH_istate32_t*) state_in;
const BYTE * p = (const BYTE*)state->mem32;
const BYTE* bEnd = (const BYTE*)(state->mem32) + state->memsize;
U32 h32;
if (state->total_len >= 16)
{
h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
}
else
{
h32 = state->seed + PRIME32_5;
}
h32 += (U32) state->total_len;
while (p+4<=bEnd)
{
h32 += XXH_readLE32(p, endian) * PRIME32_3;
h32 = XXH_rotl32(h32, 17) * PRIME32_4;
p+=4;
}
while (p<bEnd)
{
h32 += (*p) * PRIME32_5;
h32 = XXH_rotl32(h32, 11) * PRIME32_1;
p++;
}
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
h32 *= PRIME32_3;
h32 ^= h32 >> 16;
return h32;
}
U32 XXH32_digest (const XXH32_state_t* state_in)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_digest_endian(state_in, XXH_littleEndian);
else
return XXH32_digest_endian(state_in, XXH_bigEndian);
}
FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state_in, const void* input, size_t len, XXH_endianess endian)
{
XXH_istate64_t * state = (XXH_istate64_t *) state_in;
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (input==NULL) return XXH_ERROR;
#endif
state->total_len += len;
if (state->memsize + len < 32) /* fill in tmp buffer */
{
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
state->memsize += (U32)len;
return XXH_OK;
}
if (state->memsize) /* some data left from previous update */
{
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
{
const U64* p64 = state->mem64;
state->v1 += XXH_readLE64(p64, endian) * PRIME64_2;
state->v1 = XXH_rotl64(state->v1, 31);
state->v1 *= PRIME64_1;
p64++;
state->v2 += XXH_readLE64(p64, endian) * PRIME64_2;
state->v2 = XXH_rotl64(state->v2, 31);
state->v2 *= PRIME64_1;
p64++;
state->v3 += XXH_readLE64(p64, endian) * PRIME64_2;
state->v3 = XXH_rotl64(state->v3, 31);
state->v3 *= PRIME64_1;
p64++;
state->v4 += XXH_readLE64(p64, endian) * PRIME64_2;
state->v4 = XXH_rotl64(state->v4, 31);
state->v4 *= PRIME64_1;
p64++;
}
p += 32-state->memsize;
state->memsize = 0;
}
if (p+32 <= bEnd)
{
const BYTE* const limit = bEnd - 32;
U64 v1 = state->v1;
U64 v2 = state->v2;
U64 v3 = state->v3;
U64 v4 = state->v4;
do
{
v1 += XXH_readLE64(p, endian) * PRIME64_2;
v1 = XXH_rotl64(v1, 31);
v1 *= PRIME64_1;
p+=8;
v2 += XXH_readLE64(p, endian) * PRIME64_2;
v2 = XXH_rotl64(v2, 31);
v2 *= PRIME64_1;
p+=8;
v3 += XXH_readLE64(p, endian) * PRIME64_2;
v3 = XXH_rotl64(v3, 31);
v3 *= PRIME64_1;
p+=8;
v4 += XXH_readLE64(p, endian) * PRIME64_2;
v4 = XXH_rotl64(v4, 31);
v4 *= PRIME64_1;
p+=8;
}
while (p<=limit);
state->v1 = v1;
state->v2 = v2;
state->v3 = v3;
state->v4 = v4;
}
if (p < bEnd)
{
XXH_memcpy(state->mem64, p, bEnd-p);
state->memsize = (int)(bEnd-p);
}
return XXH_OK;
}
XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
else
return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
}
FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state_in, XXH_endianess endian)
{
const XXH_istate64_t * state = (const XXH_istate64_t *) state_in;
const BYTE * p = (const BYTE*)state->mem64;
const BYTE* bEnd = (const BYTE*)state->mem64 + state->memsize;
U64 h64;
if (state->total_len >= 32)
{
U64 v1 = state->v1;
U64 v2 = state->v2;
U64 v3 = state->v3;
U64 v4 = state->v4;
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
v1 *= PRIME64_2;
v1 = XXH_rotl64(v1, 31);
v1 *= PRIME64_1;
h64 ^= v1;
h64 = h64*PRIME64_1 + PRIME64_4;
v2 *= PRIME64_2;
v2 = XXH_rotl64(v2, 31);
v2 *= PRIME64_1;
h64 ^= v2;
h64 = h64*PRIME64_1 + PRIME64_4;
v3 *= PRIME64_2;
v3 = XXH_rotl64(v3, 31);
v3 *= PRIME64_1;
h64 ^= v3;
h64 = h64*PRIME64_1 + PRIME64_4;
v4 *= PRIME64_2;
v4 = XXH_rotl64(v4, 31);
v4 *= PRIME64_1;
h64 ^= v4;
h64 = h64*PRIME64_1 + PRIME64_4;
}
else
{
h64 = state->seed + PRIME64_5;
}
h64 += (U64) state->total_len;
while (p+8<=bEnd)
{
U64 k1 = XXH_readLE64(p, endian);
k1 *= PRIME64_2;
k1 = XXH_rotl64(k1,31);
k1 *= PRIME64_1;
h64 ^= k1;
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
p+=8;
}
if (p+4<=bEnd)
{
h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
p+=4;
}
while (p<bEnd)
{
h64 ^= (*p) * PRIME64_5;
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
p++;
}
h64 ^= h64 >> 33;
h64 *= PRIME64_2;
h64 ^= h64 >> 29;
h64 *= PRIME64_3;
h64 ^= h64 >> 32;
return h64;
}
unsigned long long XXH64_digest (const XXH64_state_t* state_in)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_digest_endian(state_in, XXH_littleEndian);
else
return XXH64_digest_endian(state_in, XXH_bigEndian);
}