Integrate NuDB

This commit is contained in:
Vinnie Falco
2016-09-28 09:34:21 -04:00
parent bd93ecbd6b
commit 3b639afac2
39 changed files with 529 additions and 8143 deletions

View File

@@ -87,7 +87,7 @@
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='debug.classic|x64'">
<ClCompile>
<PreprocessorDefinitions>HAVE_USLEEP=1;SOCI_CXX_C11=1;_WIN32_WINNT=0x6000;BOOST_NO_AUTO_PTR;DEBUG;DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER;NOMINMAX;OPENSSL_NO_SSL2;WIN32_CONSOLE;_CRTDBG_MAP_ALLOC;_CRT_SECURE_NO_WARNINGS;_DEBUG;_SCL_SECURE_NO_WARNINGS;_SILENCE_STDEXT_HASH_DEPRECATION_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\build\proto;..\..\src;..\..\src\beast;..\..\src\beast\extras;..\..\src\beast\include;..\..\src\protobuf\src;..\..\src\protobuf\vsprojects;..\..\src\soci\include;..\..\src\soci\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories>..\..\build\proto;..\..\src;..\..\src\beast;..\..\src\beast\extras;..\..\src\beast\include;..\..\src\nudb\include;..\..\src\protobuf\src;..\..\src\protobuf\vsprojects;..\..\src\soci\include;..\..\src\soci\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4800;4244;4267;4503;4018</DisableSpecificWarnings>
<ExceptionHandling>Async</ExceptionHandling>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
@@ -124,7 +124,7 @@
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='debug|x64'">
<ClCompile>
<PreprocessorDefinitions>HAVE_USLEEP=1;SOCI_CXX_C11=1;_WIN32_WINNT=0x6000;BOOST_NO_AUTO_PTR;DEBUG;DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER;NOMINMAX;OPENSSL_NO_SSL2;WIN32_CONSOLE;_CRTDBG_MAP_ALLOC;_CRT_SECURE_NO_WARNINGS;_DEBUG;_SCL_SECURE_NO_WARNINGS;_SILENCE_STDEXT_HASH_DEPRECATION_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\build\proto;..\..\src;..\..\src\beast;..\..\src\beast\extras;..\..\src\beast\include;..\..\src\protobuf\src;..\..\src\protobuf\vsprojects;..\..\src\soci\include;..\..\src\soci\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories>..\..\build\proto;..\..\src;..\..\src\beast;..\..\src\beast\extras;..\..\src\beast\include;..\..\src\nudb\include;..\..\src\protobuf\src;..\..\src\protobuf\vsprojects;..\..\src\soci\include;..\..\src\soci\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4800;4244;4267;4503;4018</DisableSpecificWarnings>
<ExceptionHandling>Async</ExceptionHandling>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
@@ -161,7 +161,7 @@
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='release.classic|x64'">
<ClCompile>
<PreprocessorDefinitions>HAVE_USLEEP=1;SOCI_CXX_C11=1;_WIN32_WINNT=0x6000;BOOST_NO_AUTO_PTR;DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER;NDEBUG;NOMINMAX;OPENSSL_NO_SSL2;WIN32_CONSOLE;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;_SILENCE_STDEXT_HASH_DEPRECATION_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\build\proto;..\..\src;..\..\src\beast;..\..\src\beast\extras;..\..\src\beast\include;..\..\src\protobuf\src;..\..\src\protobuf\vsprojects;..\..\src\soci\include;..\..\src\soci\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories>..\..\build\proto;..\..\src;..\..\src\beast;..\..\src\beast\extras;..\..\src\beast\include;..\..\src\nudb\include;..\..\src\protobuf\src;..\..\src\protobuf\vsprojects;..\..\src\soci\include;..\..\src\soci\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4800;4244;4267;4503;4018</DisableSpecificWarnings>
<ExceptionHandling>Async</ExceptionHandling>
<FloatingPointModel>Precise</FloatingPointModel>
@@ -196,7 +196,7 @@
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='release|x64'">
<ClCompile>
<PreprocessorDefinitions>HAVE_USLEEP=1;SOCI_CXX_C11=1;_WIN32_WINNT=0x6000;BOOST_NO_AUTO_PTR;DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER;NDEBUG;NOMINMAX;OPENSSL_NO_SSL2;WIN32_CONSOLE;_CRT_SECURE_NO_WARNINGS;_SCL_SECURE_NO_WARNINGS;_SILENCE_STDEXT_HASH_DEPRECATION_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\..\build\proto;..\..\src;..\..\src\beast;..\..\src\beast\extras;..\..\src\beast\include;..\..\src\protobuf\src;..\..\src\protobuf\vsprojects;..\..\src\soci\include;..\..\src\soci\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories>..\..\build\proto;..\..\src;..\..\src\beast;..\..\src\beast\extras;..\..\src\beast\include;..\..\src\nudb\include;..\..\src\protobuf\src;..\..\src\protobuf\vsprojects;..\..\src\soci\include;..\..\src\soci\src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<DisableSpecificWarnings>4800;4244;4267;4503;4018</DisableSpecificWarnings>
<ExceptionHandling>Async</ExceptionHandling>
<FloatingPointModel>Precise</FloatingPointModel>
@@ -509,6 +509,86 @@
</ClCompile>
<ClInclude Include="..\..\src\lz4\lib\xxhash.h">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\basic_store.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\concepts.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\create.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\arena.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\bucket.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\buffer.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\bulkio.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\cache.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\endian.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\field.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\format.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\gentex.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\mutex.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\pool.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\stream.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\xxhash.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\error.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\file.hpp">
</ClInclude>
<None Include="..\..\src\nudb\include\nudb\impl\basic_store.ipp">
</None>
<None Include="..\..\src\nudb\include\nudb\impl\create.ipp">
</None>
<None Include="..\..\src\nudb\include\nudb\impl\error.ipp">
</None>
<None Include="..\..\src\nudb\include\nudb\impl\posix_file.ipp">
</None>
<None Include="..\..\src\nudb\include\nudb\impl\recover.ipp">
</None>
<None Include="..\..\src\nudb\include\nudb\impl\rekey.ipp">
</None>
<None Include="..\..\src\nudb\include\nudb\impl\verify.ipp">
</None>
<None Include="..\..\src\nudb\include\nudb\impl\visit.ipp">
</None>
<None Include="..\..\src\nudb\include\nudb\impl\win32_file.ipp">
</None>
<ClInclude Include="..\..\src\nudb\include\nudb\native_file.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\nudb.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\posix_file.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\progress.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\recover.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\rekey.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\store.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\type_traits.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\verify.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\version.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\visit.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\win32_file.hpp">
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\xxhasher.hpp">
</ClInclude>
<ClCompile Include="..\..\src\protobuf\src\google\protobuf\compiler\importer.cc">
<ExcludedFromBuild>True</ExcludedFromBuild>
</ClCompile>
@@ -1642,56 +1722,6 @@
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\net\IPEndpoint.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\api.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\common.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\create.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\arena.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\bucket.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\buffer.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\bulkio.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\cache.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\field.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\format.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\gentex.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\pool.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\stream.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\file.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\identity.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\posix_file.h">
</ClInclude>
<None Include="..\..\src\ripple\beast\nudb\README.md">
</None>
<ClInclude Include="..\..\src\ripple\beast\nudb\recover.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\store.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\test\common.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\test\fail_file.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\verify.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\visit.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\win32_file.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\rfc2616.h">
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\type_name.h">
@@ -4440,22 +4470,6 @@
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='debug|x64'">True</ExcludedFromBuild>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='release|x64'">True</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="..\..\src\test\beast\beast_nudb_callgrind_test.cpp">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='debug|x64'">True</ExcludedFromBuild>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='release|x64'">True</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="..\..\src\test\beast\beast_nudb_recover_test.cpp">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='debug|x64'">True</ExcludedFromBuild>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='release|x64'">True</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="..\..\src\test\beast\beast_nudb_store_test.cpp">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='debug|x64'">True</ExcludedFromBuild>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='release|x64'">True</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="..\..\src\test\beast\beast_nudb_verify_test.cpp">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='debug|x64'">True</ExcludedFromBuild>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='release|x64'">True</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="..\..\src\test\beast\beast_PropertyStream_test.cpp">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='debug|x64'">True</ExcludedFromBuild>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='release|x64'">True</ExcludedFromBuild>

View File

@@ -4,66 +4,63 @@
<Filter Include=".">
<UniqueIdentifier>{B4CFBB41-DA25-70B1-E486-4F869E226E12}</UniqueIdentifier>
</Filter>
<Filter Include="build">
<UniqueIdentifier>{65697F48-7FC6-2A4B-DB6C-56781F3990B5}</UniqueIdentifier>
<Filter Include="beast">
<UniqueIdentifier>{6DD9EDCB-63DB-D377-3F12-12825FA8437E}</UniqueIdentifier>
</Filter>
<Filter Include="build\proto">
<UniqueIdentifier>{5D2BC8F1-FF8F-3CCC-7292-795D59EB927A}</UniqueIdentifier>
<Filter Include="beast\core">
<UniqueIdentifier>{3CBEFECB-9A7C-B235-BA05-30D41AB8827D}</UniqueIdentifier>
</Filter>
<Filter Include="beast\core\detail">
<UniqueIdentifier>{79EFD023-52CA-3A59-E21D-E73328FA46EA}</UniqueIdentifier>
</Filter>
<Filter Include="beast\core\impl">
<UniqueIdentifier>{2A1F4F78-5521-936E-8CE2-BA798D578A14}</UniqueIdentifier>
</Filter>
<Filter Include="beast\http">
<UniqueIdentifier>{7138D215-DA65-98D5-EF7D-C9896685201E}</UniqueIdentifier>
</Filter>
<Filter Include="beast\http\detail">
<UniqueIdentifier>{3E84AA4C-CB48-99F0-EB35-5603FF633A51}</UniqueIdentifier>
</Filter>
<Filter Include="beast\http\impl">
<UniqueIdentifier>{932F732F-F09E-5C50-C8A1-D62342CCAA1F}</UniqueIdentifier>
</Filter>
<Filter Include="beast\unit_test">
<UniqueIdentifier>{2762284D-66E5-8B48-1F8E-67116DB1FC6B}</UniqueIdentifier>
</Filter>
<Filter Include="beast\unit_test\detail">
<UniqueIdentifier>{AC49CD8A-C2A7-FBEC-CA36-635A5303E73E}</UniqueIdentifier>
</Filter>
<Filter Include="beast\websocket">
<UniqueIdentifier>{D05C2DB7-CE99-9326-23E6-4C7828632E0E}</UniqueIdentifier>
</Filter>
<Filter Include="beast\websocket\detail">
<UniqueIdentifier>{FB0BEDED-4C83-F0AF-0450-3DB01A05BD0B}</UniqueIdentifier>
</Filter>
<Filter Include="beast\websocket\impl">
<UniqueIdentifier>{A7FC9CC0-AB8D-4252-CCB2-B67F7BE99CF5}</UniqueIdentifier>
</Filter>
<Filter Include="ed25519-donna">
<UniqueIdentifier>{9DEED977-2072-A182-5BD9-CEBF206E8C91}</UniqueIdentifier>
</Filter>
<Filter Include="extras">
<UniqueIdentifier>{477971A7-4031-9D7E-C153-2FC390C183F6}</UniqueIdentifier>
</Filter>
<Filter Include="extras\beast">
<UniqueIdentifier>{F8A3CFB1-5785-C83C-4338-5F27773F327D}</UniqueIdentifier>
</Filter>
<Filter Include="extras\beast\unit_test">
<UniqueIdentifier>{8F81A840-0F90-3664-FE3C-4EAF71864EA3}</UniqueIdentifier>
</Filter>
<Filter Include="extras\beast\unit_test\detail">
<UniqueIdentifier>{C4E6332D-1115-85EB-D574-A18E70A03124}</UniqueIdentifier>
</Filter>
<Filter Include="include">
<UniqueIdentifier>{BFB5FB0D-E563-77CD-68EA-A6E186D6240D}</UniqueIdentifier>
</Filter>
<Filter Include="include\beast">
<UniqueIdentifier>{0959FCA5-589F-980F-57AF-AB859F5A61AA}</UniqueIdentifier>
</Filter>
<Filter Include="include\beast\core">
<UniqueIdentifier>{0E36C1CA-6BDD-2413-4E69-64ED5F44F9D5}</UniqueIdentifier>
</Filter>
<Filter Include="include\beast\core\detail">
<UniqueIdentifier>{A37B2E28-7A35-B699-8B09-D51004D302FA}</UniqueIdentifier>
</Filter>
<Filter Include="include\beast\core\impl">
<UniqueIdentifier>{9EFA48D3-D5FA-A645-5DD5-ED6EBAF39EF6}</UniqueIdentifier>
</Filter>
<Filter Include="include\beast\http">
<UniqueIdentifier>{78230F53-2814-6C7D-1558-87709A2F6090}</UniqueIdentifier>
</Filter>
<Filter Include="include\beast\http\detail">
<UniqueIdentifier>{617AF364-CB0C-87ED-DA43-668BB1D6F6FC}</UniqueIdentifier>
</Filter>
<Filter Include="include\beast\http\impl">
<UniqueIdentifier>{DDE347C4-F990-B1A8-1D36-357D36685D1F}</UniqueIdentifier>
</Filter>
<Filter Include="include\beast\websocket">
<UniqueIdentifier>{A3314CD5-8DAB-E905-89CA-0DBAEF8A4FFF}</UniqueIdentifier>
</Filter>
<Filter Include="include\beast\websocket\detail">
<UniqueIdentifier>{5BB26F63-320D-F26F-D2F3-3466F745C6AD}</UniqueIdentifier>
</Filter>
<Filter Include="include\beast\websocket\impl">
<UniqueIdentifier>{2A5AEEC4-AF29-1B47-F2EE-8A478584EE5D}</UniqueIdentifier>
</Filter>
<Filter Include="lz4">
<UniqueIdentifier>{B211F8F1-22D2-47BA-C39E-F9846A844D11}</UniqueIdentifier>
</Filter>
<Filter Include="lz4\lib">
<UniqueIdentifier>{4F65E5BD-7EC4-113A-4603-B4625F16BC18}</UniqueIdentifier>
</Filter>
<Filter Include="nudb">
<UniqueIdentifier>{A5499F4E-D602-E9BA-FBB6-DCF5ED7C0D61}</UniqueIdentifier>
</Filter>
<Filter Include="nudb\detail">
<UniqueIdentifier>{B97D2E02-83E3-F589-90C1-1B3398703938}</UniqueIdentifier>
</Filter>
<Filter Include="nudb\impl">
<UniqueIdentifier>{F71FA1B4-4F94-A225-CFD2-C7A7B7966A9C}</UniqueIdentifier>
</Filter>
<Filter Include="proto">
<UniqueIdentifier>{133EFD69-2398-8391-8995-ADF0FDB69A27}</UniqueIdentifier>
</Filter>
<Filter Include="protobuf">
<UniqueIdentifier>{C535C933-C404-7C0F-2AB9-059F92DE0A80}</UniqueIdentifier>
</Filter>
@@ -184,15 +181,6 @@
<Filter Include="ripple\beast\net\impl">
<UniqueIdentifier>{38E875CA-FF93-DCC2-393B-1E2E128AD077}</UniqueIdentifier>
</Filter>
<Filter Include="ripple\beast\nudb">
<UniqueIdentifier>{92DAC974-9800-9B1C-B6A6-1E548AFC724D}</UniqueIdentifier>
</Filter>
<Filter Include="ripple\beast\nudb\detail">
<UniqueIdentifier>{6D4C9043-06DF-7F53-B6B5-3174296EEA1C}</UniqueIdentifier>
</Filter>
<Filter Include="ripple\beast\nudb\test">
<UniqueIdentifier>{DD118097-3896-F878-7160-62912C662834}</UniqueIdentifier>
</Filter>
<Filter Include="ripple\beast\unity">
<UniqueIdentifier>{D7812F07-CB10-3361-544B-EB66B18F2D7F}</UniqueIdentifier>
</Filter>
@@ -535,331 +523,331 @@
</ItemGroup>
<ItemGroup>
<ClCompile Include="..\..\build\proto\ripple.pb.cc">
<Filter>build\proto</Filter>
<Filter>proto</Filter>
</ClCompile>
<ClInclude Include="..\..\build\proto\ripple.pb.h">
<Filter>build\proto</Filter>
<Filter>proto</Filter>
</ClInclude>
<ClInclude Include="..\..\src\BeastConfig.h">
<Filter>.</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\extras\beast\unit_test\amount.hpp">
<Filter>extras\beast\unit_test</Filter>
<Filter>beast\unit_test</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\extras\beast\unit_test\detail\const_container.hpp">
<Filter>extras\beast\unit_test\detail</Filter>
<Filter>beast\unit_test\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\extras\beast\unit_test\dstream.hpp">
<Filter>extras\beast\unit_test</Filter>
<Filter>beast\unit_test</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\extras\beast\unit_test\global_suites.hpp">
<Filter>extras\beast\unit_test</Filter>
<Filter>beast\unit_test</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\extras\beast\unit_test\match.hpp">
<Filter>extras\beast\unit_test</Filter>
<Filter>beast\unit_test</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\extras\beast\unit_test\recorder.hpp">
<Filter>extras\beast\unit_test</Filter>
<Filter>beast\unit_test</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\extras\beast\unit_test\reporter.hpp">
<Filter>extras\beast\unit_test</Filter>
<Filter>beast\unit_test</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\extras\beast\unit_test\results.hpp">
<Filter>extras\beast\unit_test</Filter>
<Filter>beast\unit_test</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\extras\beast\unit_test\runner.hpp">
<Filter>extras\beast\unit_test</Filter>
<Filter>beast\unit_test</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\extras\beast\unit_test\suite.hpp">
<Filter>extras\beast\unit_test</Filter>
<Filter>beast\unit_test</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\extras\beast\unit_test\suite_info.hpp">
<Filter>extras\beast\unit_test</Filter>
<Filter>beast\unit_test</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\extras\beast\unit_test\suite_list.hpp">
<Filter>extras\beast\unit_test</Filter>
<Filter>beast\unit_test</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\extras\beast\unit_test\thread.hpp">
<Filter>extras\beast\unit_test</Filter>
<Filter>beast\unit_test</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\async_completion.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\basic_streambuf.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\bind_handler.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\buffer_cat.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\buffer_concepts.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\consuming_buffers.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\detail\base64.hpp">
<Filter>include\beast\core\detail</Filter>
<Filter>beast\core\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\detail\bind_handler.hpp">
<Filter>include\beast\core\detail</Filter>
<Filter>beast\core\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\detail\buffer_cat.hpp">
<Filter>include\beast\core\detail</Filter>
<Filter>beast\core\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\detail\buffer_concepts.hpp">
<Filter>include\beast\core\detail</Filter>
<Filter>beast\core\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\detail\ci_char_traits.hpp">
<Filter>include\beast\core\detail</Filter>
<Filter>beast\core\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\detail\empty_base_optimization.hpp">
<Filter>include\beast\core\detail</Filter>
<Filter>beast\core\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\detail\get_lowest_layer.hpp">
<Filter>include\beast\core\detail</Filter>
<Filter>beast\core\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\detail\integer_sequence.hpp">
<Filter>include\beast\core\detail</Filter>
<Filter>beast\core\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\detail\is_call_possible.hpp">
<Filter>include\beast\core\detail</Filter>
<Filter>beast\core\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\detail\sha1.hpp">
<Filter>include\beast\core\detail</Filter>
<Filter>beast\core\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\detail\stream_concepts.hpp">
<Filter>include\beast\core\detail</Filter>
<Filter>beast\core\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\detail\write_dynabuf.hpp">
<Filter>include\beast\core\detail</Filter>
<Filter>beast\core\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\dynabuf_readstream.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\error.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\handler_alloc.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\handler_concepts.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<None Include="..\..\src\beast\include\beast\core\impl\basic_streambuf.ipp">
<Filter>include\beast\core\impl</Filter>
<Filter>beast\core\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\core\impl\consuming_buffers.ipp">
<Filter>include\beast\core\impl</Filter>
<Filter>beast\core\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\core\impl\dynabuf_readstream.ipp">
<Filter>include\beast\core\impl</Filter>
<Filter>beast\core\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\core\impl\prepare_buffers.ipp">
<Filter>include\beast\core\impl</Filter>
<Filter>beast\core\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\core\impl\static_streambuf.ipp">
<Filter>include\beast\core\impl</Filter>
<Filter>beast\core\impl</Filter>
</None>
<ClInclude Include="..\..\src\beast\include\beast\core\placeholders.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\prepare_buffers.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\static_streambuf.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\static_string.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\streambuf.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\stream_concepts.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\to_string.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\core\write_dynabuf.hpp">
<Filter>include\beast\core</Filter>
<Filter>beast\core</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http.hpp">
<Filter>include\beast</Filter>
<Filter>beast</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\basic_dynabuf_body.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\basic_headers.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\basic_parser_v1.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\body_type.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\concepts.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\detail\basic_parser_v1.hpp">
<Filter>include\beast\http\detail</Filter>
<Filter>beast\http\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\detail\chunk_encode.hpp">
<Filter>include\beast\http\detail</Filter>
<Filter>beast\http\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\detail\has_content_length.hpp">
<Filter>include\beast\http\detail</Filter>
<Filter>beast\http\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\detail\rfc7230.hpp">
<Filter>include\beast\http\detail</Filter>
<Filter>beast\http\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\empty_body.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\headers.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<None Include="..\..\src\beast\include\beast\http\impl\basic_headers.ipp">
<Filter>include\beast\http\impl</Filter>
<Filter>beast\http\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\http\impl\basic_parser_v1.ipp">
<Filter>include\beast\http\impl</Filter>
<Filter>beast\http\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\http\impl\message_v1.ipp">
<Filter>include\beast\http\impl</Filter>
<Filter>beast\http\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\http\impl\read.ipp">
<Filter>include\beast\http\impl</Filter>
<Filter>beast\http\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\http\impl\rfc7230.ipp">
<Filter>include\beast\http\impl</Filter>
<Filter>beast\http\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\http\impl\write.ipp">
<Filter>include\beast\http\impl</Filter>
<Filter>beast\http\impl</Filter>
</None>
<ClInclude Include="..\..\src\beast\include\beast\http\message.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\message_v1.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\parser_v1.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\parse_error.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\read.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\reason.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\resume_context.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\rfc7230.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\streambuf_body.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\string_body.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\http\write.hpp">
<Filter>include\beast\http</Filter>
<Filter>beast\http</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\websocket.hpp">
<Filter>include\beast</Filter>
<Filter>beast</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\websocket\detail\decorator.hpp">
<Filter>include\beast\websocket\detail</Filter>
<Filter>beast\websocket\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\websocket\detail\endian.hpp">
<Filter>include\beast\websocket\detail</Filter>
<Filter>beast\websocket\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\websocket\detail\error.hpp">
<Filter>include\beast\websocket\detail</Filter>
<Filter>beast\websocket\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\websocket\detail\frame.hpp">
<Filter>include\beast\websocket\detail</Filter>
<Filter>beast\websocket\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\websocket\detail\hybi13.hpp">
<Filter>include\beast\websocket\detail</Filter>
<Filter>beast\websocket\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\websocket\detail\invokable.hpp">
<Filter>include\beast\websocket\detail</Filter>
<Filter>beast\websocket\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\websocket\detail\mask.hpp">
<Filter>include\beast\websocket\detail</Filter>
<Filter>beast\websocket\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\websocket\detail\stream_base.hpp">
<Filter>include\beast\websocket\detail</Filter>
<Filter>beast\websocket\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\websocket\detail\utf8_checker.hpp">
<Filter>include\beast\websocket\detail</Filter>
<Filter>beast\websocket\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\websocket\error.hpp">
<Filter>include\beast\websocket</Filter>
<Filter>beast\websocket</Filter>
</ClInclude>
<None Include="..\..\src\beast\include\beast\websocket\impl\accept_op.ipp">
<Filter>include\beast\websocket\impl</Filter>
<Filter>beast\websocket\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\websocket\impl\close_op.ipp">
<Filter>include\beast\websocket\impl</Filter>
<Filter>beast\websocket\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\websocket\impl\error.ipp">
<Filter>include\beast\websocket\impl</Filter>
<Filter>beast\websocket\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\websocket\impl\handshake_op.ipp">
<Filter>include\beast\websocket\impl</Filter>
<Filter>beast\websocket\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\websocket\impl\ping_op.ipp">
<Filter>include\beast\websocket\impl</Filter>
<Filter>beast\websocket\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\websocket\impl\read_frame_op.ipp">
<Filter>include\beast\websocket\impl</Filter>
<Filter>beast\websocket\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\websocket\impl\read_op.ipp">
<Filter>include\beast\websocket\impl</Filter>
<Filter>beast\websocket\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\websocket\impl\response_op.ipp">
<Filter>include\beast\websocket\impl</Filter>
<Filter>beast\websocket\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\websocket\impl\ssl.ipp">
<Filter>include\beast\websocket\impl</Filter>
<Filter>beast\websocket\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\websocket\impl\stream.ipp">
<Filter>include\beast\websocket\impl</Filter>
<Filter>beast\websocket\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\websocket\impl\teardown.ipp">
<Filter>include\beast\websocket\impl</Filter>
<Filter>beast\websocket\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\websocket\impl\write_frame_op.ipp">
<Filter>include\beast\websocket\impl</Filter>
<Filter>beast\websocket\impl</Filter>
</None>
<None Include="..\..\src\beast\include\beast\websocket\impl\write_op.ipp">
<Filter>include\beast\websocket\impl</Filter>
<Filter>beast\websocket\impl</Filter>
</None>
<ClInclude Include="..\..\src\beast\include\beast\websocket\option.hpp">
<Filter>include\beast\websocket</Filter>
<Filter>beast\websocket</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\websocket\rfc6455.hpp">
<Filter>include\beast\websocket</Filter>
<Filter>beast\websocket</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\websocket\ssl.hpp">
<Filter>include\beast\websocket</Filter>
<Filter>beast\websocket</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\websocket\stream.hpp">
<Filter>include\beast\websocket</Filter>
<Filter>beast\websocket</Filter>
</ClInclude>
<ClInclude Include="..\..\src\beast\include\beast\websocket\teardown.hpp">
<Filter>include\beast\websocket</Filter>
<Filter>beast\websocket</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ed25519-donna\curve25519-donna-32bit.h">
<Filter>ed25519-donna</Filter>
@@ -948,6 +936,126 @@
<ClInclude Include="..\..\src\lz4\lib\xxhash.h">
<Filter>lz4\lib</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\basic_store.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\concepts.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\create.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\arena.hpp">
<Filter>nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\bucket.hpp">
<Filter>nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\buffer.hpp">
<Filter>nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\bulkio.hpp">
<Filter>nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\cache.hpp">
<Filter>nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\endian.hpp">
<Filter>nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\field.hpp">
<Filter>nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\format.hpp">
<Filter>nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\gentex.hpp">
<Filter>nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\mutex.hpp">
<Filter>nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\pool.hpp">
<Filter>nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\stream.hpp">
<Filter>nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\detail\xxhash.hpp">
<Filter>nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\error.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\file.hpp">
<Filter>nudb</Filter>
</ClInclude>
<None Include="..\..\src\nudb\include\nudb\impl\basic_store.ipp">
<Filter>nudb\impl</Filter>
</None>
<None Include="..\..\src\nudb\include\nudb\impl\create.ipp">
<Filter>nudb\impl</Filter>
</None>
<None Include="..\..\src\nudb\include\nudb\impl\error.ipp">
<Filter>nudb\impl</Filter>
</None>
<None Include="..\..\src\nudb\include\nudb\impl\posix_file.ipp">
<Filter>nudb\impl</Filter>
</None>
<None Include="..\..\src\nudb\include\nudb\impl\recover.ipp">
<Filter>nudb\impl</Filter>
</None>
<None Include="..\..\src\nudb\include\nudb\impl\rekey.ipp">
<Filter>nudb\impl</Filter>
</None>
<None Include="..\..\src\nudb\include\nudb\impl\verify.ipp">
<Filter>nudb\impl</Filter>
</None>
<None Include="..\..\src\nudb\include\nudb\impl\visit.ipp">
<Filter>nudb\impl</Filter>
</None>
<None Include="..\..\src\nudb\include\nudb\impl\win32_file.ipp">
<Filter>nudb\impl</Filter>
</None>
<ClInclude Include="..\..\src\nudb\include\nudb\native_file.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\nudb.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\posix_file.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\progress.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\recover.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\rekey.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\store.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\type_traits.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\verify.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\version.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\visit.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\win32_file.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\nudb\include\nudb\xxhasher.hpp">
<Filter>nudb</Filter>
</ClInclude>
<ClCompile Include="..\..\src\protobuf\src\google\protobuf\compiler\importer.cc">
<Filter>protobuf\src\google\protobuf\compiler</Filter>
</ClCompile>
@@ -2226,81 +2334,6 @@
<ClInclude Include="..\..\src\ripple\beast\net\IPEndpoint.h">
<Filter>ripple\beast\net</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb.h">
<Filter>ripple\beast</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\api.h">
<Filter>ripple\beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\common.h">
<Filter>ripple\beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\create.h">
<Filter>ripple\beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\arena.h">
<Filter>ripple\beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\bucket.h">
<Filter>ripple\beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\buffer.h">
<Filter>ripple\beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\bulkio.h">
<Filter>ripple\beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\cache.h">
<Filter>ripple\beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\field.h">
<Filter>ripple\beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\format.h">
<Filter>ripple\beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\gentex.h">
<Filter>ripple\beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\pool.h">
<Filter>ripple\beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\detail\stream.h">
<Filter>ripple\beast\nudb\detail</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\file.h">
<Filter>ripple\beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\identity.h">
<Filter>ripple\beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\posix_file.h">
<Filter>ripple\beast\nudb</Filter>
</ClInclude>
<None Include="..\..\src\ripple\beast\nudb\README.md">
<Filter>ripple\beast\nudb</Filter>
</None>
<ClInclude Include="..\..\src\ripple\beast\nudb\recover.h">
<Filter>ripple\beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\store.h">
<Filter>ripple\beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\test\common.h">
<Filter>ripple\beast\nudb\test</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\test\fail_file.h">
<Filter>ripple\beast\nudb\test</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\verify.h">
<Filter>ripple\beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\visit.h">
<Filter>ripple\beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\nudb\win32_file.h">
<Filter>ripple\beast\nudb</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple\beast\rfc2616.h">
<Filter>ripple\beast</Filter>
</ClInclude>
@@ -5214,18 +5247,6 @@
<ClCompile Include="..\..\src\test\beast\beast_Journal_test.cpp">
<Filter>test\beast</Filter>
</ClCompile>
<ClCompile Include="..\..\src\test\beast\beast_nudb_callgrind_test.cpp">
<Filter>test\beast</Filter>
</ClCompile>
<ClCompile Include="..\..\src\test\beast\beast_nudb_recover_test.cpp">
<Filter>test\beast</Filter>
</ClCompile>
<ClCompile Include="..\..\src\test\beast\beast_nudb_store_test.cpp">
<Filter>test\beast</Filter>
</ClCompile>
<ClCompile Include="..\..\src\test\beast\beast_nudb_verify_test.cpp">
<Filter>test\beast</Filter>
</ClCompile>
<ClCompile Include="..\..\src\test\beast\beast_PropertyStream_test.cpp">
<Filter>test\beast</Filter>
</ClCompile>

View File

@@ -278,7 +278,7 @@ endif()
############################################################
include_directories(src src/beast src/beast/include src/beast/extras
src/soci/src src/soci/include)
src/nudb/include src/soci/src src/soci/include)
if (coverage)
add_compile_options(-fprofile-arcs -ftest-coverage)
@@ -696,7 +696,7 @@ if (WIN32 OR is_xcode OR NOT unity)
set(non_unity_srcs ${core_srcs})
foreach(curdir
beast/clock beast/container beast/insight beast/net beast/nudb beast/utility
beast/clock beast/container beast/insight beast/net beast/utility
app basics crypto json ledger legacy net overlay peerfinder protocol rpc
shamap server test)
file(GLOB_RECURSE cursrcs src/ripple/${curdir}/*.cpp)

View File

@@ -771,6 +771,7 @@ base.Append(CPPPATH=[
os.path.join('src', 'beast'),
os.path.join('src', 'beast', 'include'),
os.path.join('src', 'beast', 'extras'),
os.path.join('src', 'nudb', 'include'),
os.path.join(build_dir, 'proto'),
os.path.join('src','soci','src'),
os.path.join('src','soci','include'),
@@ -908,7 +909,6 @@ def get_classic_sources(toolchain):
append_sources(result, *list_sources('src/ripple/beast/container', '.cpp'))
append_sources(result, *list_sources('src/ripple/beast/insight', '.cpp'))
append_sources(result, *list_sources('src/ripple/beast/net', '.cpp'))
append_sources(result, *list_sources('src/ripple/beast/nudb', '.cpp'))
append_sources(result, *list_sources('src/ripple/beast/utility', '.cpp'))
append_sources(result, *list_sources('src/ripple/app', '.cpp'))
append_sources(result, *list_sources('src/ripple/basics', '.cpp'))
@@ -1219,7 +1219,13 @@ for key, value in aliases.iteritems():
vcxproj = base.VSProject(
os.path.join('Builds', 'VisualStudio2015', 'RippleD'),
source = [],
VSPROJECT_ROOT_DIRS = ['src/beast', 'src/beast/include', 'src/beast/extras', 'src', '.'],
VSPROJECT_ROOT_DIRS = [
'build/',
'src/beast/extras',
'src/beast/include',
'src/nudb/include',
'src',
'.'],
VSPROJECT_CONFIGS = msvc_configs)
base.Alias('vcxproj', vcxproj)

View File

@@ -1,32 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_H_INCLUDED
#define BEAST_NUDB_H_INCLUDED
#include <ripple/beast/nudb/api.h>
#include <ripple/beast/nudb/create.h>
#include <ripple/beast/nudb/common.h>
#include <ripple/beast/nudb/file.h>
#include <ripple/beast/nudb/recover.h>
#include <ripple/beast/nudb/store.h>
#include <ripple/beast/nudb/verify.h>
#include <ripple/beast/nudb/visit.h>
#endif

View File

@@ -1,288 +0,0 @@
# NuDB: A Key/Value Store For Decentralized Systems
The new breed of decentralized systems such as Ripple or Bitcoin
that use embedded key/value databases place different demands on
these database than what is traditional. NuDB provides highly
optimized and concurrent atomic, durable, and isolated fetch and
insert operations to secondary storage, along with these features:
* Low memory footprint.
* Values are immutable.
* Value sizes from 1 to 2^48 bytes (281TB).
* All keys are the same size.
* Performance independent of growth.
* Optimized for concurrent fetch.
* Key file can be rebuilt if needed.
* Inserts are atomic and consistent.
* Data files may be efficiently iterated.
* Key and data files may be on different volumes.
* Hardened against algorithmic complexity attacks.
* Header-only, nothing to build or link.
Three files are used.
* The data file holds keys and values stored sequentially and size-prefixed.
* The key file holds a series of fixed-size bucket records forming an on-disk
hash table.
* The log file stores bookkeeping information used to restore consistency when
an external failure occurs.
In typical cases a fetch costs one I/O cycle to consult the key file, and if the
key is present, one I/O cycle to read the value.
## Usage
Callers must define these parameters when _creating_ a database:
* `KeySize`: The size of a key in bytes.
* `BlockSize`: The physical size of a key file record.
The ideal block size matches the sector size or block size of the
underlying physical media that holds the key file. Functions are
provided to return a best estimate of this value for a particular
device, but a default of 4096 should work for typical installations.
The implementation tries to fit as many entries as possible in a key
file record, to maximize the amount of useful work performed per I/O.
* `LoadFactor`: The desired fraction of bucket occupancy
`LoadFactor` is chosen to make bucket overflows unlikely without
sacrificing bucket occupancy. A value of 0.50 seems to work well with
a good hash function.
Callers must also provide these parameters when a database is _opened:_
* `Appnum`: An application-defined integer constant which can be retrieved
later from the database [TODO].
* `AllocSize`: A significant multiple of the average data size.
Memory is recycled to improve performance, so NuDB needs `AllocSize` as a
hint about the average size of the data being inserted. For an average data size
of 1KB (one kilobyte), `AllocSize` of sixteen megabytes (16MB) is sufficient. If
the `AllocSize` is too low, the memory recycler will not make efficient use of
allocated blocks.
Two operations are defined: `fetch`, and `insert`.
### `fetch`
The `fetch` operation retrieves a variable length value given the
key. The caller supplies a factory used to provide a buffer for storing
the value. This interface allows custom memory allocation strategies.
### `insert`
`insert` adds a key/value pair to the store. Value data must contain at least
one byte. Duplicate keys are disallowed. Insertions are serialized, which means
[TODO].
## Implementation
All insertions are buffered in memory, with inserted values becoming
immediately discoverable in subsequent or concurrent calls to fetch.
Periodically, buffered data is safely committed to disk files using
a separate dedicated thread associated with the database. This commit
process takes place at least once per second, or more often during
a detected surge in insertion activity. In the commit process the
key/value pairs receive the following treatment:
An insertion is performed by appending a value record to the data file.
The value record has some header information including the size of the
data and a copy of the key; the data file is iteratable without the key
file. The value data follows the header. The data file is append-only
and immutable: once written, bytes are never changed.
Initially the hash table in the key file consists of a single bucket.
After the load factor is exceeded from insertions, the hash table grows
in size by one bucket by doing a "split". The split operation is the
[linear hashing algorithm](http://en.wikipedia.org/wiki/Linear_hashing)
as described by Litwin and Larson.
When a bucket is split, each key is rehashed, and either remains in the
original bucket or gets moved to the a bucket appended to the end of
the key file.
An insertion on a full bucket first triggers the "spill" algorithm.
First, a spill record is appended to the data file, containing header
information followed by the entire bucket record. Then the bucket's size is set
to zero and the offset of the spill record is stored in the bucket. At this
point the insertion may proceed normally, since the bucket is empty. Spilled
buckets in the data file are always full.
Because every bucket holds the offset of the next spill record in the
data file, the buckets form a linked list. In practice, careful
selection of capacity and load factor will keep the percentage of
buckets with one spill record to a minimum, with no bucket requiring
two spill records.
The implementation of fetch is straightforward: first the bucket in the
key file is checked, then each spill record in the linked list of
spill records is checked, until the key is found or there are no more
records. As almost all buckets have no spill records, the average
fetch requires one I/O (not including reading the value).
One complication in the scheme is when a split occurs on a bucket that
has one or more spill records. In this case, both the bucket being split
and the new bucket may overflow. This is handled by performing the
spill algorithm for each overflow that occurs. The new buckets may have
one or more spill records each, depending on the number of keys that
were originally present.
Because the data file is immutable, a bucket's original spill records
are no longer referenced after the bucket is split. These blocks of data
in the data file are unrecoverable wasted space. Correctly configured
databases can have a typical waste factor of 1%, which is acceptable.
These unused bytes can be removed by visiting each value in the value
file using an off-line process and inserting it into a new database,
then delete the old database and use the new one instead.
## Recovery
To provide atomicity and consistency, a log file associated with the
database stores information used to roll back partial commits.
## Iteration
Each record in the data file is prefixed with a header identifying
whether it is a value record or a spill record, along with the size of
the record in bytes and a copy of the key if it's a value record, so values can
be iterated by incrementing a byte counter. A key file can be regenerated from
just the data file by iterating the values and performing the key
insertion algorithm.
## Concurrency
Locks are never held during disk reads and writes. Fetches are fully
concurrent, while inserts are serialized. Inserts fail on duplicate
keys, and are atomic: they either succeed immediately or fail.
After an insert, the key is immediately visible to subsequent fetches.
## Formats
All integer values are stored as big endian. The uint48_t format
consists of 6 bytes.
### Key File
The Key File contains the Header followed by one or more
fixed-length Bucket Records.
#### Header (104 bytes)
char[8] Type The characters "nudb.key"
uint16 Version Holds the version number
uint64 UID Unique ID generated on creation
uint64 Appnum Application defined constant
uint16 KeySize Key size in bytes
uint64 Salt A random seed
uint64 Pepper The salt hashed
uint16 BlockSize Size of a file block in bytes
uint16 LoadFactor Target fraction in 65536ths
uint8[56] Reserved Zeroes
uint8[] Reserved Zero-pad to block size
`Type` identifies the file as belonging to nudb. `UID` is
generated randomly when the database is created, and this value
is stored in the data and log files as well - it's used
to determine if files belong to the same database. `Salt` is
generated when the database is created and helps prevent
complexity attacks; it is prepended to the key material
when computing a hash, or used to initialize the state of
the hash function. `Appnum` is an application defined constant
set when the database is created. It can be used for anything,
for example to distinguish between different data formats.
`Pepper` is computed by hashing `Salt` using a hash function
seeded with the salt. This is used to fingerprint the hash
function used. If a database is opened and the fingerprint
does not match the hash calculation performed using the template
argument provided when constructing the store, an exception
is thrown.
The header for the key file contains the File Header followed by
the information above. The Capacity is the number of keys per
bucket, and defines the size of a bucket record. The load factor
is the target fraction of bucket occupancy.
None of the information in the key file header or the data file
header may be changed after the database is created, including
the Appnum.
#### Bucket Record (fixed-length)
uint16 Count Number of keys in this bucket
uint48 Spill Offset of the next spill record or 0
BucketEntry[] Entries The bucket entries
#### Bucket Entry
uint48 Offset Offset in data file of the data
uint48 Size The size of the value in bytes
uint48 Hash The hash of the key
### Data File
The Data File contains the Header followed by zero or more
variable-length Value Records and Spill Records.
#### Header (92 bytes)
char[8] Type The characters "nudb.dat"
uint16 Version Holds the version number
uint64 UID Unique ID generated on creation
uint64 Appnum Application defined constant
uint16 KeySize Key size in bytes
uint8[64] (reserved) Zeroes
UID contains the same value as the salt in the corresponding key
file. This is placed in the data file so that key and value files
belonging to the same database can be identified.
#### Data Record (variable-length)
uint48 Size Size of the value in bytes
uint8[KeySize] Key The key.
uint8[Size] Data The value data.
#### Spill Record (fixed-length)
uint48 Zero All zero, identifies a spill record
uint16 Size Bytes in spill bucket (for skipping)
Bucket SpillBucket Bucket Record
### Log File
The Log file contains the Header followed by zero or more fixed size
log records. Each log record contains a snapshot of a bucket. When a
database is not closed cleanly, the recovery process applies the log
records to the key file, overwriting data that may be only partially
updated with known good information. After the log records are applied,
the data and key files are truncated to the last known good size.
#### Header (62 bytes)
char[8] Type The characters "nudb.log"
uint16 Version Holds the version number
uint64 UID Unique ID generated on creation
uint64 Appnum Application defined constant
uint16 KeySize Key size in bytes
uint64 Salt A random seed.
uint64 Pepper The salt hashed
uint16 BlockSize Size of a file block in bytes
uint64 KeyFileSize Size of key file.
uint64 DataFileSize Size of data file.
#### Log Record
uint64_t Index Bucket index (0-based)
Bucket Bucket Compact Bucket record
Compact buckets include only Size entries. These are primarily
used to minimize the volume of writes to the log file.

View File

@@ -1,110 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_API_H_INCLUDED
#define BEAST_NUDB_API_H_INCLUDED
#include <ripple/beast/nudb/create.h>
#include <ripple/beast/nudb/identity.h>
#include <ripple/beast/nudb/store.h>
#include <ripple/beast/nudb/recover.h>
#include <ripple/beast/nudb/verify.h>
#include <ripple/beast/nudb/visit.h>
#include <cstdint>
namespace beast {
namespace nudb {
// Convenience for consolidating template arguments
//
template <
class Hasher,
class Codec = identity,
class File = native_file,
std::size_t BufferSize = 16 * 1024 * 1024
>
struct api
{
using hash_type = Hasher;
using codec_type = Codec;
using file_type = File;
using store = nudb::store<Hasher, Codec, File>;
static std::size_t const buffer_size = BufferSize;
template <class... Args>
static
bool
create (
path_type const& dat_path,
path_type const& key_path,
path_type const& log_path,
std::uint64_t appnum,
std::uint64_t salt,
std::size_t key_size,
std::size_t block_size,
float load_factor,
Args&&... args)
{
return nudb::create<Hasher, Codec, File>(
dat_path, key_path, log_path,
appnum, salt, key_size, block_size,
load_factor, args...);
}
template <class... Args>
static
bool
recover (
path_type const& dat_path,
path_type const& key_path,
path_type const& log_path,
Args&&... args)
{
return nudb::recover<Hasher, Codec, File>(
dat_path, key_path, log_path, BufferSize,
args...);
}
static
verify_info
verify (
path_type const& dat_path,
path_type const& key_path)
{
return nudb::verify<Hasher>(
dat_path, key_path, BufferSize);
}
template <class Function>
static
bool
visit(
path_type const& path,
Function&& f)
{
return nudb::visit<Codec>(
path, BufferSize, f);
}
};
} // nudb
} // beast
#endif

View File

@@ -1,123 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_COMMON_H_INCLUDED
#define BEAST_NUDB_COMMON_H_INCLUDED
#include <stdexcept>
#include <string>
namespace beast {
namespace nudb {
// Commonly used types
enum class file_mode
{
scan, // read sequential
read, // read random
append, // read random, write append
write // read random, write random
};
using path_type = std::string;
// All exceptions thrown by nudb are derived
// from std::runtime_error except for fail_error
/** Thrown when a codec fails, e.g. corrupt data. */
struct codec_error : std::runtime_error
{
template <class String>
explicit
codec_error (String const& s)
: runtime_error(s)
{
}
};
/** Base class for all errors thrown by file classes. */
struct file_error : std::runtime_error
{
template <class String>
explicit
file_error (String const& s)
: runtime_error(s)
{
}
};
/** Thrown when file bytes read are less than requested. */
struct file_short_read_error : file_error
{
file_short_read_error()
: file_error (
"nudb: short read")
{
}
};
/** Thrown when file bytes written are less than requested. */
struct file_short_write_error : file_error
{
file_short_write_error()
: file_error (
"nudb: short write")
{
}
};
/** Thrown when end of istream reached while reading. */
struct short_read_error : std::runtime_error
{
short_read_error()
: std::runtime_error(
"nudb: short read")
{
}
};
/** Base class for all exceptions thrown by store. */
class store_error : public std::runtime_error
{
public:
template <class String>
explicit
store_error (String const& s)
: runtime_error(s)
{
}
};
/** Thrown when corruption in a file is detected. */
class store_corrupt_error : public store_error
{
public:
template <class String>
explicit
store_corrupt_error (String const& s)
: store_error(s)
{
}
};
} // nudb
} // beast
#endif

View File

@@ -1,164 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_CREATE_H_INCLUDED
#define BEAST_NUDB_CREATE_H_INCLUDED
#include <ripple/beast/nudb/file.h>
#include <ripple/beast/nudb/detail/bucket.h>
#include <ripple/beast/nudb/detail/format.h>
#include <algorithm>
#include <cstring>
#include <random>
#include <stdexcept>
#include <utility>
namespace beast {
namespace nudb {
namespace detail {
template <class = void>
std::uint64_t
make_uid()
{
std::random_device rng;
std::mt19937_64 gen {rng()};
std::uniform_int_distribution <std::size_t> dist;
return dist(gen);
}
}
/** Generate a random salt. */
template <class = void>
std::uint64_t
make_salt()
{
std::random_device rng;
std::mt19937_64 gen {rng()};
std::uniform_int_distribution <std::size_t> dist;
return dist(gen);
}
/** Returns the best guess at the volume's block size. */
inline
std::size_t
block_size (path_type const& /*path*/)
{
return 4096;
}
/** Create a new database.
Preconditions:
The files must not exist
Throws:
@param args Arguments passed to File constructors
@return `false` if any file could not be created.
*/
template <
class Hasher,
class Codec,
class File,
class... Args
>
bool
create (
path_type const& dat_path,
path_type const& key_path,
path_type const& log_path,
std::uint64_t appnum,
std::uint64_t salt,
std::size_t key_size,
std::size_t block_size,
float load_factor,
Args&&... args)
{
using namespace detail;
if (key_size < 1)
throw std::domain_error(
"invalid key size");
if (block_size > field<std::uint16_t>::max)
throw std::domain_error(
"nudb: block size too large");
if (load_factor <= 0.f)
throw std::domain_error(
"nudb: load factor too small");
if (load_factor >= 1.f)
throw std::domain_error(
"nudb: load factor too large");
auto const capacity =
bucket_capacity(block_size);
if (capacity < 1)
throw std::domain_error(
"nudb: block size too small");
File df(args...);
File kf(args...);
File lf(args...);
if (df.create(
file_mode::append, dat_path))
{
if (kf.create (
file_mode::append, key_path))
{
if (lf.create(
file_mode::append, log_path))
goto success;
File::erase (dat_path);
}
File::erase (key_path);
}
return false;
success:
dat_file_header dh;
dh.version = currentVersion;
dh.uid = make_uid();
dh.appnum = appnum;
dh.key_size = key_size;
key_file_header kh;
kh.version = currentVersion;
kh.uid = dh.uid;
kh.appnum = appnum;
kh.key_size = key_size;
kh.salt = salt;
kh.pepper = pepper<Hasher>(salt);
kh.block_size = block_size;
// VFALCO Should it be 65536?
// How do we set the min?
kh.load_factor = std::min<std::size_t>(
65536.0 * load_factor, 65535);
write (df, dh);
write (kf, kh);
buffer buf(block_size);
std::memset(buf.get(), 0, block_size);
bucket b (block_size, buf.get(), empty);
b.write (kf, block_size);
// VFALCO Leave log file empty?
df.sync();
kf.sync();
lf.sync();
return true;
}
} // nudb
} // beast
#endif

View File

@@ -1,246 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_DETAIL_ARENA_H_INCLUDED
#define BEAST_NUDB_DETAIL_ARENA_H_INCLUDED
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
namespace beast {
namespace nudb {
namespace detail {
/* Custom memory manager that allocates in large blocks.
No limit is placed on the size of an allocation but
alloc_size should be tuned upon construction to be a
significant multiple of the average allocation size.
When the arena is cleared, allocated memory is placed
on a free list for re-use, avoiding future system calls.
*/
template <class = void>
class arena_t
{
private:
class element;
std::size_t alloc_size_;
element* used_ = nullptr;
element* free_ = nullptr;
public:
arena_t (arena_t const&);
arena_t& operator= (arena_t const&);
~arena_t();
explicit
arena_t (std::size_t alloc_size);
arena_t& operator= (arena_t&& other);
// Makes used blocks free
void
clear();
// deletes free blocks
void
shrink_to_fit();
std::uint8_t*
alloc (std::size_t n);
template <class U>
friend
void
swap (arena_t<U>& lhs, arena_t<U>& rhs);
private:
void
dealloc (element*& list);
};
//------------------------------------------------------------------------------
template <class _>
class arena_t<_>::element
{
private:
std::size_t const capacity_;
std::size_t used_ = 0;
public:
element* next = nullptr;
explicit
element (std::size_t alloc_size)
: capacity_ (
alloc_size - sizeof(*this))
{
}
void
clear()
{
used_ = 0;
}
std::size_t
remain() const
{
return capacity_ - used_;
}
std::size_t
capacity() const
{
return capacity_;
}
std::uint8_t*
alloc (std::size_t n);
};
template <class _>
std::uint8_t*
arena_t<_>::element::alloc (std::size_t n)
{
if (n > capacity_ - used_)
return nullptr;
auto const p = const_cast<std::uint8_t*>(
reinterpret_cast<uint8_t const*>(this + 1)
) + used_;
used_ += n;
return p;
}
//------------------------------------------------------------------------------
template <class _>
arena_t<_>::arena_t (std::size_t alloc_size)
: alloc_size_ (alloc_size)
{
if (alloc_size <= sizeof(element))
throw std::domain_error(
"arena: bad alloc size");
}
template <class _>
arena_t<_>::~arena_t()
{
dealloc (used_);
dealloc (free_);
}
template <class _>
arena_t<_>&
arena_t<_>::operator= (arena_t&& other)
{
dealloc (used_);
dealloc (free_);
alloc_size_ = other.alloc_size_;
used_ = other.used_;
free_ = other.free_;
other.used_ = nullptr;
other.free_ = nullptr;
return *this;
}
template <class _>
void
arena_t<_>::clear()
{
while (used_)
{
auto const e = used_;
used_ = used_->next;
e->clear();
e->next = free_;
free_ = e;
}
}
template <class _>
void
arena_t<_>::shrink_to_fit()
{
dealloc (free_);
}
template <class _>
std::uint8_t*
arena_t<_>::alloc (std::size_t n)
{
// Undefined behavior: Zero byte allocations
assert(n != 0);
n = 8 * ((n + 7) / 8);
if (used_ && used_->remain() >= n)
return used_->alloc(n);
if (free_ && free_->remain() >= n)
{
auto const e = free_;
free_ = free_->next;
e->next = used_;
used_ = e;
return used_->alloc(n);
}
std::size_t const size = std::max(
alloc_size_, sizeof(element) + n);
element* const e = reinterpret_cast<element*>(
new std::uint8_t[size]);
::new(e) element(size);
e->next = used_;
used_ = e;
return used_->alloc(n);
}
template <class _>
void
swap (arena_t<_>& lhs, arena_t<_>& rhs)
{
using std::swap;
swap(lhs.alloc_size_, rhs.alloc_size_);
swap(lhs.used_, rhs.used_);
swap(lhs.free_, rhs.free_);
}
template <class _>
void
arena_t<_>::dealloc (element*& list)
{
while (list)
{
auto const e = list;
list = list->next;
e->~element();
delete[] reinterpret_cast<std::uint8_t*>(e);
}
}
using arena = arena_t<>;
} // detail
} // nudb
} // beast
#endif

View File

@@ -1,468 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_DETAIL_BUCKET_H_INCLUDED
#define BEAST_NUDB_DETAIL_BUCKET_H_INCLUDED
#include <ripple/beast/nudb/common.h>
#include <ripple/beast/nudb/detail/bulkio.h>
#include <ripple/beast/nudb/detail/field.h>
#include <ripple/beast/nudb/detail/format.h>
#include <cstddef>
#include <cstdint>
#include <cstring>
namespace beast {
namespace nudb {
namespace detail {
// bucket calculations:
// Returns bucket index given hash, buckets, and modulus
//
inline
std::size_t
bucket_index (std::size_t h,
std::size_t buckets, std::size_t modulus)
{
std::size_t n = h % modulus;
if (n >= buckets)
n -= modulus / 2;
return n;
}
//------------------------------------------------------------------------------
// Tag for constructing empty buckets
struct empty_t { };
static empty_t empty;
// Allows inspection and manipulation of bucket blobs in memory
template <class = void>
class bucket_t
{
private:
std::size_t block_size_; // Size of a key file block
std::size_t size_; // Current key count
std::size_t spill_; // Offset of next spill record or 0
std::uint8_t* p_; // Pointer to the bucket blob
public:
struct value_type
{
std::size_t offset;
std::size_t size;
std::size_t hash;
};
bucket_t (bucket_t const&) = default;
bucket_t& operator= (bucket_t const&) = default;
bucket_t (std::size_t block_size, void* p);
bucket_t (std::size_t block_size, void* p, empty_t);
std::size_t
block_size() const
{
return block_size_;
}
std::size_t
compact_size() const
{
return detail::bucket_size(size_);
}
bool
empty() const
{
return size_ == 0;
}
bool
full() const
{
return size_ >=
detail::bucket_capacity(block_size_);
}
std::size_t
size() const
{
return size_;
}
// Returns offset of next spill record or 0
//
std::size_t
spill() const
{
return spill_;
}
// Set offset of next spill record
//
void
spill (std::size_t offset);
// Clear contents of the bucket
//
void
clear();
// Returns the record for a key
// entry without bounds checking.
//
value_type const
at (std::size_t i) const;
value_type const
operator[] (std::size_t i) const
{
return at(i);
}
// Returns index of entry with prefix
// equal to or greater than the given prefix.
//
std::size_t
lower_bound (std::size_t h) const;
void
insert (std::size_t offset,
std::size_t size, std::size_t h);
// Erase an element by index
//
void
erase (std::size_t i);
// Read a full bucket from the
// file at the specified offset.
//
template <class File>
void
read (File& f, std::size_t offset);
// Read a compact bucket
//
template <class File>
void
read (bulk_reader<File>& r);
// Write a compact bucket to the stream.
// This only writes entries that are not empty.
//
void
write (ostream& os) const;
// Write a bucket to the file at the specified offset.
// The full block_size() bytes are written.
//
template <class File>
void
write (File& f, std::size_t offset) const;
private:
// Update size and spill in the blob
void
update();
};
//------------------------------------------------------------------------------
template <class _>
bucket_t<_>::bucket_t (
std::size_t block_size, void* p)
: block_size_ (block_size)
, p_ (reinterpret_cast<std::uint8_t*>(p))
{
// Bucket Record
istream is(p_, block_size);
detail::read<uint16_t>(is, size_); // Count
detail::read<uint48_t>(is, spill_); // Spill
}
template <class _>
bucket_t<_>::bucket_t (
std::size_t block_size, void* p, empty_t)
: block_size_ (block_size)
, size_ (0)
, spill_ (0)
, p_ (reinterpret_cast<std::uint8_t*>(p))
{
clear();
}
template <class _>
void
bucket_t<_>::spill (std::size_t offset)
{
spill_ = offset;
update();
}
template <class _>
void
bucket_t<_>::clear()
{
size_ = 0;
spill_ = 0;
std::memset(p_, 0, block_size_);
}
template <class _>
auto
bucket_t<_>::at (std::size_t i) const ->
value_type const
{
value_type result;
// Bucket Entry
std::size_t const w =
field<uint48_t>::size + // Offset
field<uint48_t>::size + // Size
field<hash_t>::size; // Prefix
// Bucket Record
detail::istream is(p_ +
field<std::uint16_t>::size + // Count
field<uint48_t>::size + // Spill
i * w, w);
// Bucket Entry
detail::read<uint48_t>(
is, result.offset); // Offset
detail::read<uint48_t>(
is, result.size); // Size
detail::read<hash_t>(
is, result.hash); // Hash
return result;
}
template <class _>
std::size_t
bucket_t<_>::lower_bound (
std::size_t h) const
{
// Bucket Entry
auto const w =
field<uint48_t>::size + // Offset
field<uint48_t>::size + // Size
field<hash_t>::size; // Hash
// Bucket Record
auto const p = p_ +
field<std::uint16_t>::size + // Count
field<uint48_t>::size + // Spill
// Bucket Entry
field<uint48_t>::size + // Offset
field<uint48_t>::size; // Size
std::size_t step;
std::size_t first = 0;
std::size_t count = size_;
while (count > 0)
{
step = count / 2;
auto const i = first + step;
std::size_t h1;
readp<hash_t>(p + i * w, h1);
if (h1 < h)
{
first = i + 1;
count -= step + 1;
}
else
{
count = step;
}
}
return first;
}
template <class _>
void
bucket_t<_>::insert (std::size_t offset,
std::size_t size, std::size_t h)
{
std::size_t i = lower_bound(h);
// Bucket Record
auto const p = p_ +
field<
std::uint16_t>::size + // Count
field<uint48_t>::size; // Spill
// Bucket Entry
std::size_t const w =
field<uint48_t>::size + // Offset
field<uint48_t>::size + // Size
field<hash_t>::size; // Hash
std::memmove (
p + (i + 1) * w,
p + i * w,
(size_ - i) * w);
size_++;
update();
// Bucket Entry
ostream os (p + i * w, w);
detail::write<uint48_t>(
os, offset); // Offset
detail::write<uint48_t>(
os, size); // Size
detail::write<hash_t>(
os, h); // Prefix
}
template <class _>
void
bucket_t<_>::erase (std::size_t i)
{
// Bucket Record
auto const p = p_ +
field<
std::uint16_t>::size + // Count
field<uint48_t>::size; // Spill
auto const w =
field<uint48_t>::size + // Offset
field<uint48_t>::size + // Size
field<hash_t>::size; // Hash
--size_;
if (i < size_)
std::memmove(
p + i * w,
p + (i + 1) * w,
(size_ - i) * w);
std::memset(p + size_ * w, 0, w);
update();
}
template <class _>
template <class File>
void
bucket_t<_>::read (File& f, std::size_t offset)
{
auto const cap = bucket_capacity (
block_size_);
// Excludes padding to block size
f.read (offset, p_, bucket_size(cap));
istream is(p_, block_size_);
detail::read<
std::uint16_t>(is, size_); // Count
detail::read<
uint48_t>(is, spill_); // Spill
if (size_ > cap)
throw store_corrupt_error(
"bad bucket size");
}
template <class _>
template <class File>
void
bucket_t<_>::read (bulk_reader<File>& r)
{
// Bucket Record (compact)
auto is = r.prepare(
detail::field<std::uint16_t>::size +
detail::field<uint48_t>::size);
detail::read<
std::uint16_t>(is, size_); // Count
detail::read<uint48_t>(
is, spill_); // Spill
update();
// Excludes empty bucket entries
auto const w = size_ * (
field<uint48_t>::size + // Offset
field<uint48_t>::size + // Size
field<hash_t>::size); // Hash
is = r.prepare (w);
std::memcpy(p_ +
field<
std::uint16_t>::size + // Count
field<uint48_t>::size, // Spill
is.data(w), w); // Entries
}
template <class _>
void
bucket_t<_>::write (ostream& os) const
{
// Does not pad up to the block size. This
// is called to write to the data file.
auto const size = compact_size();
// Bucket Record
std::memcpy (os.data(size), p_, size);
}
template <class _>
template <class File>
void
bucket_t<_>::write (File& f, std::size_t offset) const
{
// Includes zero pad up to the block
// size, to make the key file size always
// a multiple of the block size.
auto const size = compact_size();
std::memset (p_ + size, 0,
block_size_ - size);
// Bucket Record
f.write (offset, p_, block_size_);
}
template <class _>
void
bucket_t<_>::update()
{
// Bucket Record
ostream os(p_, block_size_);
detail::write<
std::uint16_t>(os, size_); // Count
detail::write<
uint48_t>(os, spill_); // Spill
}
using bucket = bucket_t<>;
// Spill bucket if full.
// The bucket is cleared after it spills.
//
template <class File>
void
maybe_spill(bucket& b, bulk_writer<File>& w)
{
if (b.full())
{
// Spill Record
auto const offset = w.offset();
auto os = w.prepare(
field<uint48_t>::size + // Zero
field<uint16_t>::size + // Size
b.compact_size());
write <uint48_t> (os, 0); // Zero
write <std::uint16_t> (
os, b.compact_size()); // Size
auto const spill =
offset + os.size();
b.write (os); // Bucket
// Update bucket
b.clear();
b.spill (spill);
}
}
} // detail
} // nudb
} // beast
#endif

View File

@@ -1,99 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_DETAIL_BUFFER_H_INCLUDED
#define BEAST_NUDB_DETAIL_BUFFER_H_INCLUDED
#include <atomic>
#include <cstdint>
#include <memory>
namespace beast {
namespace nudb {
namespace detail {
// Simple growable memory buffer
class buffer
{
private:
std::size_t size_ = 0;
std::unique_ptr<std::uint8_t[]> buf_;
public:
~buffer() = default;
buffer() = default;
buffer (buffer const&) = delete;
buffer& operator= (buffer const&) = delete;
explicit
buffer (std::size_t n)
: size_ (n)
, buf_ (new std::uint8_t[n])
{
}
buffer (buffer&& other)
: size_ (other.size_)
, buf_ (std::move(other.buf_))
{
other.size_ = 0;
}
buffer& operator= (buffer&& other)
{
size_ = other.size_;
buf_ = std::move(other.buf_);
other.size_ = 0;
return *this;
}
std::size_t
size() const
{
return size_;
}
std::uint8_t*
get() const
{
return buf_.get();
}
void
reserve (std::size_t n)
{
if (size_ < n)
buf_.reset (new std::uint8_t[n]);
size_ = n;
}
// BufferFactory
void*
operator() (std::size_t n)
{
reserve(n);
return buf_.get();
}
};
} // detail
} // nudb
} // beast
#endif

View File

@@ -1,195 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_DETAIL_BULKIO_H_INCLUDED
#define BEAST_NUDB_DETAIL_BULKIO_H_INCLUDED
#include <ripple/beast/nudb/detail/buffer.h>
#include <ripple/beast/nudb/detail/stream.h>
#include <algorithm>
#include <cstddef>
namespace beast {
namespace nudb {
namespace detail {
// Scans a file in sequential large reads
template <class File>
class bulk_reader
{
private:
File& f_;
buffer buf_;
std::size_t last_; // size of file
std::size_t offset_; // current position
std::size_t avail_; // bytes left to read in buf
std::size_t used_; // bytes consumed in buf
public:
bulk_reader (File& f, std::size_t offset,
std::size_t last, std::size_t buffer_size);
std::size_t
offset() const
{
return offset_ - avail_;
}
bool
eof() const
{
return offset() >= last_;
}
istream
prepare (std::size_t needed);
};
template <class File>
bulk_reader<File>::bulk_reader (File& f, std::size_t offset,
std::size_t last, std::size_t buffer_size)
: f_ (f)
, last_ (last)
, offset_ (offset)
, avail_ (0)
, used_ (0)
{
buf_.reserve (buffer_size);
}
template <class File>
istream
bulk_reader<File>::prepare (std::size_t needed)
{
if (needed > avail_)
{
if (offset_ + needed - avail_ > last_)
throw file_short_read_error();
if (needed > buf_.size())
{
buffer buf;
buf.reserve (needed);
std::memcpy (buf.get(),
buf_.get() + used_, avail_);
buf_ = std::move(buf);
}
else
{
std::memmove (buf_.get(),
buf_.get() + used_, avail_);
}
auto const n = std::min(
buf_.size() - avail_, last_ - offset_);
f_.read(offset_, buf_.get() + avail_, n);
offset_ += n;
avail_ += n;
used_ = 0;
}
istream is(buf_.get() + used_, needed);
used_ += needed;
avail_ -= needed;
return is;
}
//------------------------------------------------------------------------------
// Buffers file writes
// Caller must call flush manually at the end
template <class File>
class bulk_writer
{
private:
File& f_;
buffer buf_;
std::size_t offset_; // current position
std::size_t used_; // bytes written to buf
public:
bulk_writer (File& f, std::size_t offset,
std::size_t buffer_size);
ostream
prepare (std::size_t needed);
// Returns the number of bytes buffered
std::size_t
size()
{
return used_;
}
// Return current offset in file. This
// is advanced with each call to prepare.
std::size_t
offset() const
{
return offset_ + used_;
}
// flush cannot be called from the destructor
// since it can throw, so callers must do it manually.
void
flush();
};
template <class File>
bulk_writer<File>::bulk_writer (File& f,
std::size_t offset, std::size_t buffer_size)
: f_ (f)
, offset_ (offset)
, used_ (0)
{
buf_.reserve (buffer_size);
}
template <class File>
ostream
bulk_writer<File>::prepare (std::size_t needed)
{
if (used_ + needed > buf_.size())
flush();
if (needed > buf_.size())
buf_.reserve (needed);
ostream os (buf_.get() + used_, needed);
used_ += needed;
return os;
}
template <class File>
void
bulk_writer<File>::flush()
{
if (used_)
{
auto const offset = offset_;
auto const used = used_;
offset_ += used_;
used_ = 0;
f_.write (offset, buf_.get(), used);
}
}
} // detail
} // nudb
} // beast
#endif

View File

@@ -1,247 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_DETAIL_CACHE_H_INCLUDED
#define BEAST_NUDB_DETAIL_CACHE_H_INCLUDED
#include <ripple/beast/nudb/detail/arena.h>
#include <ripple/beast/nudb/detail/bucket.h>
#include <boost/iterator/transform_iterator.hpp>
#include <algorithm>
#include <cstdint>
#include <utility>
#include <vector>
#include <unordered_map>
namespace beast {
namespace nudb {
namespace detail {
// Associative container storing
// bucket blobs keyed by bucket index.
template <class = void>
class cache_t
{
public:
using value_type = std::pair<
std::size_t, bucket>;
private:
enum
{
// The arena's alloc size will be this
// multiple of the block size.
factor = 64
};
using map_type = std::unordered_map <
std::size_t, void*>;
struct transform
{
using argument_type =
typename map_type::value_type;
using result_type = value_type;
cache_t* cache_;
transform()
: cache_ (nullptr)
{
}
explicit
transform (cache_t& cache)
: cache_ (&cache)
{
}
value_type
operator() (argument_type const& e) const
{
return std::make_pair(e.first,
bucket (cache_->block_size_,
e.second));
}
};
std::size_t key_size_;
std::size_t block_size_;
arena arena_;
map_type map_;
public:
using iterator = boost::transform_iterator<
transform, typename map_type::iterator,
value_type, value_type>;
cache_t (cache_t const&) = delete;
cache_t& operator= (cache_t const&) = delete;
cache_t();
explicit
cache_t (std::size_t key_size,
std::size_t block_size);
cache_t& operator= (cache_t&& other);
iterator
begin()
{
return iterator(map_.begin(),
transform(*this));
}
iterator
end()
{
return iterator(map_.end(),
transform(*this));
}
bool
empty() const
{
return map_.empty();
}
void
clear();
void
shrink_to_fit();
iterator
find (std::size_t n);
// Create an empty bucket
//
bucket
create (std::size_t n);
// Insert a copy of a bucket.
//
iterator
insert (std::size_t n, bucket const& b);
template <class U>
friend
void
swap (cache_t<U>& lhs, cache_t<U>& rhs);
};
// Constructs a cache that will never have inserts
template <class _>
cache_t<_>::cache_t()
: key_size_ (0)
, block_size_ (0)
, arena_ (32) // arbitrary small number
{
}
template <class _>
cache_t<_>::cache_t (std::size_t key_size,
std::size_t block_size)
: key_size_ (key_size)
, block_size_ (block_size)
, arena_ (block_size * factor)
{
}
template <class _>
cache_t<_>&
cache_t<_>::operator=(cache_t&& other)
{
arena_ = std::move(other.arena_);
map_ = std::move(other.map_);
return *this;
}
template <class _>
void
cache_t<_>::clear()
{
arena_.clear();
map_.clear();
}
template <class _>
void
cache_t<_>::shrink_to_fit()
{
arena_.shrink_to_fit();
}
template <class _>
auto
cache_t<_>::find (std::size_t n) ->
iterator
{
auto const iter = map_.find(n);
if (iter == map_.end())
return iterator (map_.end(),
transform(*this));
return iterator (iter,
transform(*this));
}
template <class _>
bucket
cache_t<_>::create (std::size_t n)
{
auto const p = arena_.alloc (block_size_);
map_.emplace (n, p);
return bucket (block_size_,
p, detail::empty);
}
template <class _>
auto
cache_t<_>::insert (std::size_t n,
bucket const& b) ->
iterator
{
void* const p = arena_.alloc(
b.block_size());
ostream os(p, b.block_size());
b.write(os);
auto const result = map_.emplace(n, p);
return iterator(result.first,
transform(*this));
}
template <class U>
void
swap (cache_t<U>& lhs, cache_t<U>& rhs)
{
using std::swap;
swap(lhs.key_size_, rhs.key_size_);
swap(lhs.block_size_, rhs.block_size_);
swap(lhs.arena_, rhs.arena_);
swap(lhs.map_, rhs.map_);
}
using cache = cache_t<>;
} // detail
} // nudb
} // beast
#endif

View File

@@ -1,274 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_FIELD_H_INCLUDED
#define BEAST_NUDB_FIELD_H_INCLUDED
#include <ripple/beast/nudb/detail/stream.h>
#include <cstddef>
#include <cstdint>
#include <stdexcept>
#include <type_traits>
namespace beast {
namespace nudb {
namespace detail {
// A 24-bit integer
struct uint24_t;
// A 48-bit integer
struct uint48_t;
// These metafunctions describe the binary format of fields on disk
template <class T>
struct field;
template <>
struct field <std::uint8_t>
{
static std::size_t constexpr size = 1;
static std::size_t constexpr max = 0xff;
};
template <>
struct field <std::uint16_t>
{
static std::size_t constexpr size = 2;
static std::size_t constexpr max = 0xffff;
};
template <>
struct field <uint24_t>
{
static std::size_t constexpr size = 3;
static std::size_t constexpr max = 0xffffff;
};
template <>
struct field <std::uint32_t>
{
static std::size_t constexpr size = 4;
static std::size_t constexpr max = 0xffffffff;
};
template <>
struct field <uint48_t>
{
static std::size_t constexpr size = 6;
static std::size_t constexpr max = 0x0000ffffffffffff;
};
template <>
struct field <std::uint64_t>
{
static std::size_t constexpr size = 8;
static std::size_t constexpr max = 0xffffffffffffffff;
};
// read field from memory
template <class T, class U, std::enable_if_t<
std::is_same<T, std::uint8_t>::value>* = nullptr>
void
readp (void const* v, U& u)
{
std::uint8_t const* p =
reinterpret_cast<std::uint8_t const*>(v);
u = *p;
}
template <class T, class U, std::enable_if_t<
std::is_same<T, std::uint16_t>::value>* = nullptr>
void
readp (void const* v, U& u)
{
std::uint8_t const* p =
reinterpret_cast<std::uint8_t const*>(v);
T t;
t = T(*p++)<< 8;
t = T(*p ) | t;
u = t;
}
template <class T, class U, std::enable_if_t<
std::is_same<T, uint24_t>::value>* = nullptr>
void
readp (void const* v, U& u)
{
std::uint8_t const* p =
reinterpret_cast<std::uint8_t const*>(v);
std::uint32_t t;
t = std::uint32_t(*p++)<<16;
t = (std::uint32_t(*p++)<< 8) | t;
t = std::uint32_t(*p ) | t;
u = t;
}
template <class T, class U, std::enable_if_t<
std::is_same<T, std::uint32_t>::value>* = nullptr>
void
readp (void const* v, U& u)
{
std::uint8_t const* p =
reinterpret_cast<std::uint8_t const*>(v);
T t;
t = T(*p++)<<24;
t = (T(*p++)<<16) | t;
t = (T(*p++)<< 8) | t;
t = T(*p ) | t;
u = t;
}
template <class T, class U, std::enable_if_t<
std::is_same<T, uint48_t>::value>* = nullptr>
void
readp (void const* v, U& u)
{
std::uint8_t const* p =
reinterpret_cast<std::uint8_t const*>(v);
std::uint64_t t;
t = (std::uint64_t(*p++)<<40);
t = (std::uint64_t(*p++)<<32) | t;
t = (std::uint64_t(*p++)<<24) | t;
t = (std::uint64_t(*p++)<<16) | t;
t = (std::uint64_t(*p++)<< 8) | t;
t = std::uint64_t(*p ) | t;
u = t;
}
template <class T, class U, std::enable_if_t<
std::is_same<T, std::uint64_t>::value>* = nullptr>
void
readp (void const* v, U& u)
{
std::uint8_t const* p =
reinterpret_cast<std::uint8_t const*>(v);
T t;
t = T(*p++)<<56;
t = (T(*p++)<<48) | t;
t = (T(*p++)<<40) | t;
t = (T(*p++)<<32) | t;
t = (T(*p++)<<24) | t;
t = (T(*p++)<<16) | t;
t = (T(*p++)<< 8) | t;
t = T(*p ) | t;
u = t;
}
// read field from istream
template <class T, class U>
void
read (istream& is, U& u)
{
readp<T>(is.data(field<T>::size), u);
}
// write field to ostream
template <class T, class U, std::enable_if_t<
std::is_same<T, std::uint8_t>::value>* = nullptr>
void
write (ostream& os, U const& u)
{
std::uint8_t* p =
os.data(field<T>::size);
*p = u;
}
template <class T, class U, std::enable_if_t<
std::is_same<T, std::uint16_t>::value>* = nullptr>
void
write (ostream& os, U const& u)
{
T t = u;
std::uint8_t* p =
os.data(field<T>::size);
*p++ = (t>> 8)&0xff;
*p = t &0xff;
}
template <class T, class U, std::enable_if_t<
std::is_same<T, uint24_t>::value>* = nullptr>
void
write (ostream& os, U const& u)
{
T t = u;
std::uint8_t* p =
os.data(field<T>::size);
*p++ = (t>>16)&0xff;
*p++ = (t>> 8)&0xff;
*p = t &0xff;
}
template <class T, class U, std::enable_if_t<
std::is_same<T, std::uint32_t>::value>* = nullptr>
void
write (ostream& os, U const& u)
{
T t = u;
std::uint8_t* p =
os.data(field<T>::size);
*p++ = (t>>24)&0xff;
*p++ = (t>>16)&0xff;
*p++ = (t>> 8)&0xff;
*p = t &0xff;
}
template <class T, class U, std::enable_if_t<
std::is_same<T, uint48_t>::value>* = nullptr>
void
write (ostream& os, U const& u)
{
std::uint64_t const t = u;
std::uint8_t* p =
os.data(field<T>::size);
*p++ = (t>>40)&0xff;
*p++ = (t>>32)&0xff;
*p++ = (t>>24)&0xff;
*p++ = (t>>16)&0xff;
*p++ = (t>> 8)&0xff;
*p = t &0xff;
}
template <class T, class U, std::enable_if_t<
std::is_same<T, std::uint64_t>::value>* = nullptr>
void
write (ostream& os, U const& u)
{
T t = u;
std::uint8_t* p =
os.data(field<T>::size);
*p++ = (t>>56)&0xff;
*p++ = (t>>48)&0xff;
*p++ = (t>>40)&0xff;
*p++ = (t>>32)&0xff;
*p++ = (t>>24)&0xff;
*p++ = (t>>16)&0xff;
*p++ = (t>> 8)&0xff;
*p = t &0xff;
}
} // detail
} // nudb
} // beast
#endif

View File

@@ -1,581 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_DETAIL_FORMAT_H_INCLUDED
#define BEAST_NUDB_DETAIL_FORMAT_H_INCLUDED
#include <ripple/beast/nudb/common.h>
#include <ripple/beast/nudb/detail/buffer.h>
#include <ripple/beast/nudb/detail/field.h>
#include <ripple/beast/nudb/detail/stream.h>
#include <algorithm>
#include <array>
#include <cstdint>
#include <cstring>
#include <memory>
#include <stdexcept>
#include <type_traits>
namespace beast {
namespace nudb {
namespace detail {
// Format of the nudb files:
static std::size_t constexpr currentVersion = 2;
struct dat_file_header
{
static std::size_t constexpr size =
8 + // Type
2 + // Version
8 + // UID
8 + // Appnum
2 + // KeySize
64; // (Reserved)
char type[8];
std::size_t version;
std::uint64_t uid;
std::uint64_t appnum;
std::size_t key_size;
};
struct key_file_header
{
static std::size_t constexpr size =
8 + // Type
2 + // Version
8 + // UID
8 + // Appnum
2 + // KeySize
8 + // Salt
8 + // Pepper
2 + // BlockSize
2 + // LoadFactor
56; // (Reserved)
char type[8];
std::size_t version;
std::uint64_t uid;
std::uint64_t appnum;
std::size_t key_size;
std::uint64_t salt;
std::uint64_t pepper;
std::size_t block_size;
std::size_t load_factor;
// Computed values
std::size_t capacity;
std::size_t bucket_size;
std::size_t buckets;
std::size_t modulus;
};
struct log_file_header
{
static std::size_t constexpr size =
8 + // Type
2 + // Version
8 + // UID
8 + // Appnum
2 + // KeySize
8 + // Salt
8 + // Pepper
2 + // BlockSize
8 + // KeyFileSize
8; // DataFileSize
char type[8];
std::size_t version;
std::uint64_t uid;
std::uint64_t appnum;
std::size_t key_size;
std::uint64_t salt;
std::uint64_t pepper;
std::size_t block_size;
std::size_t key_file_size;
std::size_t dat_file_size;
};
// Type used to store hashes in buckets.
// This can be smaller than the output
// of the hash function.
//
using hash_t = uint48_t;
static_assert(field<hash_t>::size <=
sizeof(std::size_t), "");
template <class T>
std::size_t
make_hash (std::size_t h);
template<>
inline
std::size_t
make_hash<uint48_t>(std::size_t h)
{
return (h>>16)&0xffffffffffff;
}
// Returns the hash of a key given the salt.
// Note: The hash is expressed in hash_t units
//
template <class Hasher>
inline
std::size_t
hash (void const* key,
std::size_t key_size, std::size_t salt)
{
Hasher h (salt);
h (key, key_size);
return make_hash<hash_t>(static_cast<
typename Hasher::result_type>(h));
}
// Computes pepper from salt
//
template <class Hasher>
std::size_t
pepper (std::size_t salt)
{
Hasher h (salt);
h (&salt, sizeof(salt));
return static_cast<std::size_t>(h);
}
// Returns the actual size of a bucket.
// This can be smaller than the block size.
//
template <class = void>
std::size_t
bucket_size (std::size_t capacity)
{
// Bucket Record
return
field<std::uint16_t>::size + // Count
field<uint48_t>::size + // Spill
capacity * (
field<uint48_t>::size + // Offset
field<uint48_t>::size + // Size
field<hash_t>::size); // Hash
}
// Returns the number of entries that fit in a bucket
//
template <class = void>
std::size_t
bucket_capacity (std::size_t block_size)
{
// Bucket Record
auto const size =
field<std::uint16_t>::size + // Count
field<uint48_t>::size; // Spill
auto const entry_size =
field<uint48_t>::size + // Offset
field<uint48_t>::size + // Size
field<hash_t>::size; // Hash
if (block_size < key_file_header::size ||
block_size < size)
return 0;
return (block_size - size) / entry_size;
}
// Returns the number of bytes occupied by a value record
inline
std::size_t
value_size (std::size_t size,
std::size_t key_size)
{
// Data Record
return
field<uint48_t>::size + // Size
key_size + // Key
size; // Data
}
// Returns the closest power of 2 not less than x
template <class = void>
std::size_t
ceil_pow2 (unsigned long long x)
{
static const unsigned long long t[6] = {
0xFFFFFFFF00000000ull,
0x00000000FFFF0000ull,
0x000000000000FF00ull,
0x00000000000000F0ull,
0x000000000000000Cull,
0x0000000000000002ull
};
int y = (((x & (x - 1)) == 0) ? 0 : 1);
int j = 32;
int i;
for(i = 0; i < 6; i++) {
int k = (((x & t[i]) == 0) ? 0 : j);
y += k;
x >>= k;
j >>= 1;
}
return std::size_t(1)<<y;
}
//------------------------------------------------------------------------------
// Read data file header from stream
template <class = void>
void
read (istream& is, dat_file_header& dh)
{
read (is, dh.type, sizeof(dh.type));
read<std::uint16_t>(is, dh.version);
read<std::uint64_t>(is, dh.uid);
read<std::uint64_t>(is, dh.appnum);
read<std::uint16_t>(is, dh.key_size);
std::array <std::uint8_t, 64> reserved;
read (is,
reserved.data(), reserved.size());
}
// Read data file header from file
template <class File>
void
read (File& f, dat_file_header& dh)
{
std::array<std::uint8_t,
dat_file_header::size> buf;
try
{
f.read(0, buf.data(), buf.size());
}
catch (file_short_read_error const&)
{
throw store_corrupt_error(
"short data file header");
}
istream is(buf);
read (is, dh);
}
// Write data file header to stream
template <class = void>
void
write (ostream& os, dat_file_header const& dh)
{
write (os, "nudb.dat", 8);
write<std::uint16_t>(os, dh.version);
write<std::uint64_t>(os, dh.uid);
write<std::uint64_t>(os, dh.appnum);
write<std::uint16_t>(os, dh.key_size);
std::array <std::uint8_t, 64> reserved;
reserved.fill(0);
write (os,
reserved.data(), reserved.size());
}
// Write data file header to file
template <class File>
void
write (File& f, dat_file_header const& dh)
{
std::array <std::uint8_t,
dat_file_header::size> buf;
ostream os(buf);
write(os, dh);
f.write (0, buf.data(), buf.size());
}
// Read key file header from stream
template <class = void>
void
read (istream& is, std::size_t file_size,
key_file_header& kh)
{
read(is, kh.type, sizeof(kh.type));
read<std::uint16_t>(is, kh.version);
read<std::uint64_t>(is, kh.uid);
read<std::uint64_t>(is, kh.appnum);
read<std::uint16_t>(is, kh.key_size);
read<std::uint64_t>(is, kh.salt);
read<std::uint64_t>(is, kh.pepper);
read<std::uint16_t>(is, kh.block_size);
read<std::uint16_t>(is, kh.load_factor);
std::array <std::uint8_t, 56> reserved;
read (is,
reserved.data(), reserved.size());
// VFALCO These need to be checked to handle
// when the file size is too small
kh.capacity = bucket_capacity(kh.block_size);
kh.bucket_size = bucket_size(kh.capacity);
if (file_size > kh.block_size)
{
// VFALCO This should be handled elsewhere.
// we shouldn't put the computed fields
// in this header.
if (kh.block_size > 0)
kh.buckets = (file_size - kh.bucket_size)
/ kh.block_size;
else
// VFALCO Corruption or logic error
kh.buckets = 0;
}
else
{
kh.buckets = 0;
}
kh.modulus = ceil_pow2(kh.buckets);
}
// Read key file header from file
template <class File>
void
read (File& f, key_file_header& kh)
{
std::array <std::uint8_t,
key_file_header::size> buf;
try
{
f.read(0, buf.data(), buf.size());
}
catch (file_short_read_error const&)
{
throw store_corrupt_error(
"short key file header");
}
istream is(buf);
read (is, f.actual_size(), kh);
}
// Write key file header to stream
template <class = void>
void
write (ostream& os, key_file_header const& kh)
{
write (os, "nudb.key", 8);
write<std::uint16_t>(os, kh.version);
write<std::uint64_t>(os, kh.uid);
write<std::uint64_t>(os, kh.appnum);
write<std::uint16_t>(os, kh.key_size);
write<std::uint64_t>(os, kh.salt);
write<std::uint64_t>(os, kh.pepper);
write<std::uint16_t>(os, kh.block_size);
write<std::uint16_t>(os, kh.load_factor);
std::array <std::uint8_t, 56> reserved;
reserved.fill (0);
write (os,
reserved.data(), reserved.size());
}
// Write key file header to file
template <class File>
void
write (File& f, key_file_header const& kh)
{
buffer buf;
buf.reserve (kh.block_size);
if (kh.block_size < key_file_header::size)
throw std::logic_error(
"nudb: block size too small");
std::fill(buf.get(), buf.get() + buf.size(), 0);
ostream os (buf.get(), buf.size());
write (os, kh);
f.write (0, buf.get(), buf.size());
}
// Read log file header from stream
template <class = void>
void
read (istream& is, log_file_header& lh)
{
read (is, lh.type, sizeof(lh.type));
read<std::uint16_t>(is, lh.version);
read<std::uint64_t>(is, lh.uid);
read<std::uint64_t>(is, lh.appnum);
read<std::uint16_t>(is, lh.key_size);
read<std::uint64_t>(is, lh.salt);
read<std::uint64_t>(is, lh.pepper);
read<std::uint16_t>(is, lh.block_size);
read<std::uint64_t>(is, lh.key_file_size);
read<std::uint64_t>(is, lh.dat_file_size);
}
// Read log file header from file
template <class File>
void
read (File& f, log_file_header& lh)
{
std::array <std::uint8_t,
log_file_header::size> buf;
// Can throw file_short_read_error to callers
f.read (0, buf.data(), buf.size());
istream is(buf);
read (is, lh);
}
// Write log file header to stream
template <class = void>
void
write (ostream& os, log_file_header const& lh)
{
write (os, "nudb.log", 8);
write<std::uint16_t>(os, lh.version);
write<std::uint64_t>(os, lh.uid);
write<std::uint64_t>(os, lh.appnum);
write<std::uint16_t>(os, lh.key_size);
write<std::uint64_t>(os, lh.salt);
write<std::uint64_t>(os, lh.pepper);
write<std::uint16_t>(os, lh.block_size);
write<std::uint64_t>(os, lh.key_file_size);
write<std::uint64_t>(os, lh.dat_file_size);
}
// Write log file header to file
template <class File>
void
write (File& f, log_file_header const& lh)
{
std::array <std::uint8_t,
log_file_header::size> buf;
ostream os (buf);
write (os, lh);
f.write (0, buf.data(), buf.size());
}
template <class = void>
void
verify (dat_file_header const& dh)
{
std::string const type (dh.type, 8);
if (type != "nudb.dat")
throw store_corrupt_error (
"bad type in data file");
if (dh.version != currentVersion)
throw store_corrupt_error (
"bad version in data file");
if (dh.key_size < 1)
throw store_corrupt_error (
"bad key size in data file");
}
template <class Hasher>
void
verify (key_file_header const& kh)
{
std::string const type (kh.type, 8);
if (type != "nudb.key")
throw store_corrupt_error (
"bad type in key file");
if (kh.version != currentVersion)
throw store_corrupt_error (
"bad version in key file");
if (kh.key_size < 1)
throw store_corrupt_error (
"bad key size in key file");
if (kh.pepper != pepper<Hasher>(kh.salt))
throw store_corrupt_error(
"wrong hash function for key file");
if (kh.load_factor < 1)
throw store_corrupt_error (
"bad load factor in key file");
if (kh.capacity < 1)
throw store_corrupt_error (
"bad capacity in key file");
if (kh.buckets < 1)
throw store_corrupt_error (
"bad key file size");
}
template <class Hasher>
void
verify (log_file_header const& lh)
{
std::string const type (lh.type, 8);
if (type != "nudb.log")
throw store_corrupt_error (
"bad type in log file");
if (lh.version != currentVersion)
throw store_corrupt_error (
"bad version in log file");
if (lh.pepper != pepper<Hasher>(lh.salt))
throw store_corrupt_error(
"wrong hash function for log file");
if (lh.key_size < 1)
throw store_corrupt_error (
"bad key size in log file");
}
// Make sure key file and value file headers match
template <class Hasher>
void
verify (dat_file_header const& dh,
key_file_header const& kh)
{
verify<Hasher> (kh);
if (kh.uid != dh.uid)
throw store_corrupt_error(
"uid mismatch");
if (kh.appnum != dh.appnum)
throw store_corrupt_error(
"appnum mismatch");
if (kh.key_size != dh.key_size)
throw store_corrupt_error(
"key size mismatch");
}
template <class Hasher>
void
verify (key_file_header const& kh,
log_file_header const& lh)
{
verify<Hasher>(lh);
if (kh.uid != lh.uid)
throw store_corrupt_error (
"uid mismatch in log file");
if (kh.appnum != lh.appnum)
throw store_corrupt_error(
"appnum mismatch in log file");
if (kh.key_size != lh.key_size)
throw store_corrupt_error (
"key size mismatch in log file");
if (kh.salt != lh.salt)
throw store_corrupt_error (
"salt mismatch in log file");
if (kh.pepper != lh.pepper)
throw store_corrupt_error (
"pepper mismatch in log file");
if (kh.block_size != lh.block_size)
throw store_corrupt_error (
"block size mismatch in log file");
}
} // detail
} // nudb
} // beast
#endif

View File

@@ -1,274 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_DETAIL_GENTEX_H_INCLUDED
#define BEAST_NUDB_DETAIL_GENTEX_H_INCLUDED
#include <condition_variable>
#include <cstddef>
#include <mutex>
#include <system_error>
namespace beast {
namespace nudb {
namespace detail {
// Generation counting mutex
//
template <class = void>
class gentex_t
{
private:
std::mutex m_;
std::size_t gen_ = 0;
std::size_t cur_ = 0;
std::size_t prev_ = 0;
std::condition_variable cond_;
public:
gentex_t() = default;
gentex_t (gentex_t const&) = delete;
gentex_t& operator= (gentex_t const&) = delete;
void
start();
void
finish();
std::size_t
lock_gen();
void
unlock_gen (std::size_t gen);
};
template <class _>
void
gentex_t<_>::start()
{
std::lock_guard<
std::mutex> l(m_);
prev_ += cur_;
cur_ = 0;
++gen_;
}
template <class _>
void
gentex_t<_>::finish()
{
std::unique_lock<
std::mutex> l(m_);
while (prev_ > 0)
cond_.wait(l);
}
template <class _>
std::size_t
gentex_t<_>::lock_gen()
{
std::lock_guard<
std::mutex> l(m_);
++cur_;
return gen_;
}
template <class _>
void
gentex_t<_>::unlock_gen (
std::size_t gen)
{
std::lock_guard<
std::mutex> l(m_);
if (gen == gen_)
{
--cur_;
}
else
{
--prev_;
if (prev_ == 0)
cond_.notify_all();
}
}
using gentex = gentex_t<>;
//------------------------------------------------------------------------------
template <class GenerationLockable>
class genlock
{
private:
bool owned_ = false;
GenerationLockable* g_ = nullptr;
std::size_t gen_;
public:
using mutex_type = GenerationLockable;
genlock() = default;
genlock (genlock const&) = delete;
genlock& operator= (genlock const&) = delete;
genlock (genlock&& other);
genlock& operator= (genlock&& other);
explicit
genlock (mutex_type& g);
genlock (mutex_type& g, std::defer_lock_t);
~genlock();
mutex_type*
mutex() noexcept
{
return g_;
}
bool
owns_lock() const noexcept
{
return g_ && owned_;
}
explicit
operator bool() const noexcept
{
return owns_lock();
}
void
lock();
void
unlock();
mutex_type*
release() noexcept;
template <class U>
friend
void
swap (genlock<U>& lhs, genlock<U>& rhs) noexcept;
};
template <class G>
genlock<G>::genlock (genlock&& other)
: owned_ (other.owned_)
, g_ (other.g_)
{
other.owned_ = false;
other.g_ = nullptr;
}
template <class G>
genlock<G>&
genlock<G>::operator= (genlock&& other)
{
if (owns_lock())
unlock();
owned_ = other.owned_;
g_ = other.g_;
other.owned_ = false;
other.g_ = nullptr;
return *this;
}
template <class G>
genlock<G>::genlock (mutex_type& g)
: g_ (&g)
{
lock();
}
template <class G>
genlock<G>::genlock (
mutex_type& g, std::defer_lock_t)
: g_ (&g)
{
}
template <class G>
genlock<G>::~genlock()
{
if (owns_lock())
unlock();
}
template <class G>
void
genlock<G>::lock()
{
if (! g_)
throw std::system_error(std::make_error_code(
std::errc::operation_not_permitted),
"genlock: no associated mutex");
if (owned_)
throw std::system_error(std::make_error_code(
std::errc::resource_deadlock_would_occur),
"genlock: already owned");
gen_ = g_->lock_gen();
owned_ = true;
}
template <class G>
void
genlock<G>::unlock()
{
if (! g_)
throw std::system_error(std::make_error_code(
std::errc::operation_not_permitted),
"genlock: no associated mutex");
if (! owned_)
throw std::system_error(std::make_error_code(
std::errc::operation_not_permitted),
"genlock: not owned");
g_->unlock_gen (gen_);
owned_ = false;
}
template <class G>
auto
genlock<G>::release() noexcept ->
mutex_type*
{
mutex_type* const g = g_;
g_ = nullptr;
return g;
}
template <class G>
void
swap (genlock<G>& lhs, genlock<G>& rhs) noexcept
{
using namespace std;
swap (lhs.owned_, rhs.owned_);
swap (lhs.g_, rhs.g_);
}
} // detail
} // nudb
} // beast
#endif

View File

@@ -1,255 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_DETAIL_POOL_H_INCLUDED
#define BEAST_NUDB_DETAIL_POOL_H_INCLUDED
#include <ripple/beast/nudb/detail/arena.h>
#include <ripple/beast/nudb/detail/bucket.h>
#include <ripple/beast/nudb/detail/format.h>
#include <cstdint>
#include <cstring>
#include <memory>
#include <map>
#include <utility>
namespace beast {
namespace nudb {
namespace detail {
// Buffers key/value pairs in a map, associating
// them with a modifiable data file offset.
template <class = void>
class pool_t
{
public:
struct value_type;
class compare;
private:
using map_type = std::map<
value_type, std::size_t, compare>;
arena arena_;
std::size_t key_size_;
std::size_t data_size_ = 0;
map_type map_;
public:
using iterator =
typename map_type::iterator;
pool_t (pool_t const&) = delete;
pool_t& operator= (pool_t const&) = delete;
explicit
pool_t (std::size_t key_size,
std::size_t alloc_size);
pool_t& operator= (pool_t&& other);
iterator
begin()
{
return map_.begin();
}
iterator
end()
{
return map_.end();
}
bool
empty()
{
return map_.size() == 0;
}
// Returns the number of elements in the pool
std::size_t
size() const
{
return map_.size();
}
// Returns the sum of data sizes in the pool
std::size_t
data_size() const
{
return data_size_;
}
void
clear();
void
shrink_to_fit();
iterator
find (void const* key);
// Insert a value
// @param h The hash of the key
void
insert (std::size_t h, void const* key,
void const* buffer, std::size_t size);
template <class U>
friend
void
swap (pool_t<U>& lhs, pool_t<U>& rhs);
};
template <class _>
struct pool_t<_>::value_type
{
std::size_t hash;
std::size_t size;
void const* key;
void const* data;
value_type (value_type const&) = default;
value_type& operator= (value_type const&) = default;
value_type (std::size_t hash_, std::size_t size_,
void const* key_, void const* data_)
: hash (hash_)
, size (size_)
, key (key_)
, data (data_)
{
}
};
template <class _>
class pool_t<_>::compare
{
private:
std::size_t key_size_;
public:
using result_type = bool;
using first_argument_type = value_type;
using second_argument_type = value_type;
compare (compare const&) = default;
compare& operator= (compare const&) = default;
compare (std::size_t key_size)
: key_size_ (key_size)
{
}
bool
operator()(value_type const& lhs,
value_type const& rhs) const
{
return std::memcmp(
lhs.key, rhs.key, key_size_) < 0;
}
};
//------------------------------------------------------------------------------
template <class _>
pool_t<_>::pool_t (std::size_t key_size,
std::size_t alloc_size)
: arena_ (alloc_size)
, key_size_ (key_size)
, map_ (compare(key_size))
{
}
template <class _>
pool_t<_>&
pool_t<_>::operator= (pool_t&& other)
{
arena_ = std::move(other.arena_);
key_size_ = other.key_size_;
data_size_ = other.data_size_;
map_ = std::move(other.map_);
return *this;
}
template <class _>
void
pool_t<_>::clear()
{
arena_.clear();
data_size_ = 0;
map_.clear();
}
template <class _>
void
pool_t<_>::shrink_to_fit()
{
arena_.shrink_to_fit();
}
template <class _>
auto
pool_t<_>::find (void const* key) ->
iterator
{
// VFALCO need is_transparent here
value_type tmp (0, 0, key, nullptr);
auto const iter = map_.find(tmp);
return iter;
}
template <class _>
void
pool_t<_>::insert (std::size_t h,
void const* key, void const* data,
std::size_t size)
{
auto const k = arena_.alloc(key_size_);
auto const d = arena_.alloc(size);
std::memcpy(k, key, key_size_);
std::memcpy(d, data, size);
auto const result = map_.emplace(
std::piecewise_construct,
std::make_tuple(h, size, k, d),
std::make_tuple(0));
(void)result.second;
// Must not already exist!
assert(result.second);
data_size_ += size;
}
template <class _>
void
swap (pool_t<_>& lhs, pool_t<_>& rhs)
{
using std::swap;
swap(lhs.arena_, rhs.arena_);
swap(lhs.key_size_, rhs.key_size_);
swap(lhs.data_size_, rhs.data_size_);
swap(lhs.map_, rhs.map_);
}
using pool = pool_t<>;
} // detail
} // nudb
} // beast
#endif

View File

@@ -1,162 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_DETAIL_STREAM_H_INCLUDED
#define BEAST_NUDB_DETAIL_STREAM_H_INCLUDED
#include <ripple/beast/nudb/common.h>
#include <array>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <memory>
namespace beast {
namespace nudb {
namespace detail {
// Input stream from bytes
template <class = void>
class istream_t
{
private:
std::uint8_t const* buf_;
std::size_t size_ = 0;
public:
istream_t (istream_t const&) = default;
istream_t& operator= (istream_t const&) = default;
istream_t (void const* data, std::size_t size)
: buf_(reinterpret_cast<
std::uint8_t const*>(data))
, size_(size)
{
}
template <std::size_t N>
istream_t (std::array<std::uint8_t, N> const& a)
: buf_ (a.data())
, size_ (a.size())
{
}
std::uint8_t const*
data (std::size_t bytes);
std::uint8_t const*
operator()(std::size_t bytes)
{
return data(bytes);
}
};
template <class _>
std::uint8_t const*
istream_t<_>::data (std::size_t bytes)
{
if (size_ < bytes)
throw short_read_error();
auto const data = buf_;
buf_ = buf_ + bytes;
size_ -= bytes;
return data;
}
using istream = istream_t<>;
//------------------------------------------------------------------------------
// Output stream to bytes
template <class = void>
class ostream_t
{
private:
std::uint8_t* buf_;
std::size_t size_ = 0;
public:
ostream_t (ostream_t const&) = default;
ostream_t& operator= (ostream_t const&) = default;
ostream_t (void* data, std::size_t)
: buf_ (reinterpret_cast<std::uint8_t*>(data))
{
}
template <std::size_t N>
ostream_t (std::array<std::uint8_t, N>& a)
: buf_ (a.data())
{
}
// Returns the number of bytes written
std::size_t
size() const
{
return size_;
}
std::uint8_t*
data (std::size_t bytes);
std::uint8_t*
operator()(std::size_t bytes)
{
return data(bytes);
}
};
template <class _>
std::uint8_t*
ostream_t<_>::data (std::size_t bytes)
{
auto const data = buf_;
buf_ = buf_ + bytes;
size_ += bytes;
return data;
}
using ostream = ostream_t<>;
//------------------------------------------------------------------------------
// read blob
inline
void
read (istream& is,
void* buffer, std::size_t bytes)
{
std::memcpy (buffer, is.data(bytes), bytes);
}
// write blob
inline
void
write (ostream& os,
void const* buffer, std::size_t bytes)
{
std::memcpy (os.data(bytes), buffer, bytes);
}
} // detail
} // nudb
} // beast
#endif

View File

@@ -1,40 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_FILE_H_INCLUDED
#define BEAST_NUDB_FILE_H_INCLUDED
#include <ripple/beast/nudb/posix_file.h>
#include <ripple/beast/nudb/win32_file.h>
#include <string>
namespace beast {
namespace nudb {
using native_file =
#ifdef _MSC_VER
win32_file;
#else
posix_file;
#endif
} // nudb
} // beast
#endif

View File

@@ -1,64 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_IDENTITY_CODEC_H_INCLUDED
#define BEAST_NUDB_IDENTITY_CODEC_H_INCLUDED
#include <utility>
namespace beast {
namespace nudb {
/** Codec which maps input directly to output. */
class identity
{
public:
template <class... Args>
explicit
identity(Args&&... args)
{
}
char const*
name() const
{
return "none";
}
template <class BufferFactory>
std::pair<void const*, std::size_t>
compress (void const* in,
std::size_t in_size, BufferFactory&&) const
{
return std::make_pair(in, in_size);
}
template <class BufferFactory>
std::pair<void const*, std::size_t>
decompress (void const* in,
std::size_t in_size, BufferFactory&&) const
{
return std::make_pair(in, in_size);
}
};
} // nudb
} // beast
#endif

View File

@@ -1,376 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_DETAIL_POSIX_FILE_H_INCLUDED
#define BEAST_NUDB_DETAIL_POSIX_FILE_H_INCLUDED
#include <ripple/beast/nudb/common.h>
#include <cassert>
#include <cerrno>
#include <cstring>
#include <string>
#include <utility>
#ifndef BEAST_NUDB_POSIX_FILE
# ifdef _MSC_VER
# define BEAST_NUDB_POSIX_FILE 0
# else
# define BEAST_NUDB_POSIX_FILE 1
# endif
#endif
#if BEAST_NUDB_POSIX_FILE
# include <fcntl.h>
# include <sys/types.h>
# include <sys/uio.h>
# include <sys/stat.h>
# include <unistd.h>
#endif
namespace beast {
namespace nudb {
#if BEAST_NUDB_POSIX_FILE
namespace detail {
class file_posix_error : public file_error
{
public:
explicit
file_posix_error (char const* m,
int errnum = errno)
: file_error (std::string("nudb: ") + m +
", " + text(errnum))
{
}
explicit
file_posix_error (std::string const& m,
int errnum = errno)
: file_error (std::string("nudb: ") + m +
", " + text(errnum))
{
}
private:
static
std::string
text (int errnum)
{
return std::strerror(errnum);
}
};
//------------------------------------------------------------------------------
template <class = void>
class posix_file
{
private:
int fd_ = -1;
public:
posix_file() = default;
posix_file (posix_file const&) = delete;
posix_file& operator= (posix_file const&) = delete;
~posix_file();
posix_file (posix_file&&);
posix_file&
operator= (posix_file&& other);
bool
is_open() const
{
return fd_ != -1;
}
void
close();
bool
create (file_mode mode, path_type const& path);
bool
open (file_mode mode, path_type const& path);
static
bool
erase (path_type const& path);
std::size_t
actual_size() const;
void
read (std::size_t offset,
void* buffer, std::size_t bytes);
void
write (std::size_t offset,
void const* buffer, std::size_t bytes);
void
sync();
void
trunc (std::size_t length);
private:
static
std::pair<int, int>
flags (file_mode mode);
};
template <class _>
posix_file<_>::~posix_file()
{
close();
}
template <class _>
posix_file<_>::posix_file (posix_file &&other)
: fd_ (other.fd_)
{
other.fd_ = -1;
}
template <class _>
posix_file<_>&
posix_file<_>::operator= (posix_file&& other)
{
if (&other == this)
return *this;
close();
fd_ = other.fd_;
other.fd_ = -1;
return *this;
}
template <class _>
void
posix_file<_>::close()
{
if (fd_ != -1)
{
if (::close(fd_) != 0)
throw file_posix_error(
"close file");
fd_ = -1;
}
}
template <class _>
bool
posix_file<_>::create (file_mode mode,
path_type const& path)
{
auto const result = flags(mode);
assert(! is_open());
fd_ = ::open(path.c_str(), result.first);
if (fd_ != -1)
{
::close(fd_);
fd_ = -1;
return false;
}
int errnum = errno;
if (errnum != ENOENT)
throw file_posix_error(
"open file", errnum);
fd_ = ::open(path.c_str(),
result.first | O_CREAT, 0644);
if (fd_ == -1)
throw file_posix_error(
"create file");
#ifndef __APPLE__
if (::posix_fadvise(fd_, 0, 0, result.second) != 0)
throw file_posix_error(
"fadvise");
#endif
return true;
}
template <class _>
bool
posix_file<_>::open (file_mode mode,
path_type const& path)
{
assert(! is_open());
auto const result = flags(mode);
fd_ = ::open(path.c_str(), result.first);
if (fd_ == -1)
{
int errnum = errno;
if (errnum == ENOENT)
return false;
throw file_posix_error(
"open file", errnum);
}
#ifndef __APPLE__
if (::posix_fadvise(fd_, 0, 0, result.second) != 0)
throw file_posix_error(
"fadvise");
#endif
return true;
}
template <class _>
bool
posix_file<_>::erase (path_type const& path)
{
if (::unlink(path.c_str()) != 0)
{
int const ec = errno;
if (ec != ENOENT)
throw file_posix_error(
"unlink", ec);
return false;
}
return true;
}
template <class _>
std::size_t
posix_file<_>::actual_size() const
{
struct stat st;
if (::fstat(fd_, &st) != 0)
throw file_posix_error(
"fstat");
return st.st_size;
}
template <class _>
void
posix_file<_>::read (std::size_t offset,
void* buffer, std::size_t bytes)
{
while(bytes > 0)
{
auto const n = ::pread (
fd_, buffer, bytes, offset);
// VFALCO end of file should throw short_read
if (n == -1)
throw file_posix_error(
"pread");
if (n == 0)
throw file_short_read_error();
offset += n;
bytes -= n;
buffer = reinterpret_cast<
char*>(buffer) + n;
}
}
template <class _>
void
posix_file<_>::write (std::size_t offset,
void const* buffer, std::size_t bytes)
{
while(bytes > 0)
{
auto const n = ::pwrite (
fd_, buffer, bytes, offset);
if (n == -1)
throw file_posix_error(
"pwrite");
if (n == 0)
throw file_short_write_error();
offset += n;
bytes -= n;
buffer = reinterpret_cast<
char const*>(buffer) + n;
}
}
template <class _>
void
posix_file<_>::sync()
{
if (::fsync(fd_) != 0)
throw file_posix_error(
"fsync");
}
template <class _>
void
posix_file<_>::trunc (std::size_t length)
{
if (::ftruncate(fd_, length) != 0)
throw file_posix_error(
"ftruncate");
}
template <class _>
std::pair<int, int>
posix_file<_>::flags (file_mode mode)
{
std::pair<int, int> result;
switch(mode)
{
case file_mode::scan:
result.first =
O_RDONLY;
#ifndef __APPLE__
result.second =
POSIX_FADV_SEQUENTIAL;
#endif
break;
case file_mode::read:
result.first =
O_RDONLY;
#ifndef __APPLE__
result.second =
POSIX_FADV_RANDOM;
#endif
break;
case file_mode::append:
result.first =
O_RDWR |
O_APPEND;
#ifndef __APPLE__
result.second =
POSIX_FADV_RANDOM;
#endif
break;
case file_mode::write:
result.first =
O_RDWR;
#ifndef __APPLE__
result.second =
POSIX_FADV_NORMAL;
#endif
break;
}
return result;
}
} // detail
using posix_file = detail::posix_file<>;
#endif
} // nudb
} // beast
#endif

View File

@@ -1,157 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_RECOVER_H_INCLUDED
#define BEAST_NUDB_RECOVER_H_INCLUDED
#include <ripple/beast/nudb/common.h>
#include <ripple/beast/nudb/file.h>
#include <ripple/beast/nudb/detail/bucket.h>
#include <ripple/beast/nudb/detail/bulkio.h>
#include <ripple/beast/nudb/detail/format.h>
#include <algorithm>
#include <cstddef>
#include <string>
namespace beast {
namespace nudb {
/** Perform recovery on a database.
This implements the recovery algorithm by rolling back
any partially committed data.
*/
template <
class Hasher,
class Codec,
class File = native_file,
class... Args>
bool
recover (
path_type const& dat_path,
path_type const& key_path,
path_type const& log_path,
std::size_t read_size,
Args&&... args)
{
using namespace detail;
File df(args...);
File lf(args...);
File kf(args...);
if (! df.open (file_mode::append, dat_path))
return false;
if (! kf.open (file_mode::write, key_path))
return false;
if (! lf.open (file_mode::append, log_path))
return true;
dat_file_header dh;
key_file_header kh;
log_file_header lh;
try
{
read (kf, kh);
}
catch (file_short_read_error const&)
{
throw store_corrupt_error(
"short key file header");
}
// VFALCO should the number of buckets be based on the
// file size in the log record instead?
verify<Hasher>(kh);
try
{
read (df, dh);
}
catch (file_short_read_error const&)
{
throw store_corrupt_error(
"short data file header");
}
verify<Hasher>(dh, kh);
auto const lf_size = lf.actual_size();
if (lf_size == 0)
{
lf.close();
File::erase (log_path);
return true;
}
try
{
read (lf, lh);
verify<Hasher>(kh, lh);
auto const df_size = df.actual_size();
buffer buf(kh.block_size);
bucket b (kh.block_size, buf.get());
bulk_reader<File> r(lf, log_file_header::size,
lf_size, read_size);
while(! r.eof())
{
std::size_t n;
try
{
// Log Record
auto is = r.prepare(field<
std::uint64_t>::size);
read<std::uint64_t>(is, n); // Index
b.read(r); // Bucket
}
catch (store_corrupt_error const&)
{
throw store_corrupt_error(
"corrupt log record");
}
catch (file_short_read_error const&)
{
// This means that the log file never
// got fully synced. In which case, there
// were no changes made to the key file.
// So we can recover by just truncating.
break;
}
if (b.spill() &&
b.spill() + kh.bucket_size > df_size)
throw store_corrupt_error(
"bad spill in log record");
// VFALCO is this the right condition?
if (n > kh.buckets)
throw store_corrupt_error(
"bad index in log record");
b.write (kf, (n + 1) * kh.block_size);
}
kf.trunc(lh.key_file_size);
df.trunc(lh.dat_file_size);
kf.sync();
df.sync();
}
catch (file_short_read_error const&)
{
// key and data files should be consistent here
}
lf.trunc(0);
lf.sync();
lf.close();
File::erase (log_path);
return true;
}
} // nudb
} // beast
#endif

View File

@@ -1,942 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_STORE_H_INCLUDED
#define BEAST_NUDB_STORE_H_INCLUDED
#include <ripple/beast/nudb/common.h>
#include <ripple/beast/nudb/recover.h>
#include <ripple/beast/nudb/detail/bucket.h>
#include <ripple/beast/nudb/detail/buffer.h>
#include <ripple/beast/nudb/detail/bulkio.h>
#include <ripple/beast/nudb/detail/cache.h>
#include <ripple/beast/nudb/detail/format.h>
#include <ripple/beast/nudb/detail/gentex.h>
#include <ripple/beast/nudb/detail/pool.h>
#include <boost/thread/lock_types.hpp>
#include <boost/thread/shared_mutex.hpp>
#include <algorithm>
#include <array>
#include <atomic>
#include <cassert>
#include <chrono>
#include <cmath>
#include <condition_variable>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <exception>
#include <limits>
#include <memory>
#include <mutex>
#include <stdexcept>
#include <string>
#include <thread>
#include <utility>
#if DOXYGEN
#include <ripple/beast/nudb/README.md>
#endif
namespace beast {
namespace nudb {
/*
TODO
- fingerprint / checksum on log records
- size field at end of data records
allows walking backwards
- timestamp every so often on data records
allows knowing the age of the data
*/
/** A simple key/value database
@tparam Hasher The hash function to use on key
@tparam Codec The codec to apply to value data
@tparam File The type of File object to use.
*/
template <class Hasher, class Codec, class File>
class store
{
public:
using hash_type = Hasher;
using codec_type = Codec;
using file_type = File;
private:
// requires 64-bit integers or better
static_assert(sizeof(std::size_t)>=8, "");
enum
{
// Size of bulk writes
bulk_write_size = 16 * 1024 * 1024,
// Size of bulk reads during recover
recover_read_size = 16 * 1024 * 1024
};
using clock_type =
std::chrono::steady_clock;
using shared_lock_type =
boost::shared_lock<boost::shared_mutex>;
using unique_lock_type =
boost::unique_lock<boost::shared_mutex>;
struct state
{
File df;
File kf;
File lf;
path_type dp;
path_type kp;
path_type lp;
detail::pool p0;
detail::pool p1;
detail::cache c0;
detail::cache c1;
Codec const codec;
detail::key_file_header const kh;
// pool commit high water mark
std::size_t pool_thresh = 1;
state (state const&) = delete;
state& operator= (state const&) = delete;
state (File&& df_, File&& kf_, File&& lf_,
path_type const& dp_, path_type const& kp_,
path_type const& lp_,
detail::key_file_header const& kh_,
std::size_t arena_alloc_size);
};
bool open_ = false;
// VFALCO Unfortunately boost::optional doesn't support
// move construction so we use unique_ptr instead.
std::unique_ptr <state> s_; // State of an open database
std::size_t frac_; // accumulates load
std::size_t thresh_; // split threshold
std::size_t buckets_; // number of buckets
std::size_t modulus_; // hash modulus
std::mutex u_; // serializes insert()
detail::gentex g_;
boost::shared_mutex m_;
std::thread thread_;
std::condition_variable_any cond_;
// These allow insert to block, preventing the pool
// from exceeding a limit. Currently the limit is
// baked in, and can only be reached during sustained
// insertions, such as while importing.
std::size_t commit_limit_ = 1UL * 1024 * 1024 * 1024;
std::condition_variable_any cond_limit_;
std::atomic<bool> epb_; // `true` when ep_ set
std::exception_ptr ep_;
public:
store() = default;
store (store const&) = delete;
store& operator= (store const&) = delete;
/** Destroy the database.
Files are closed, memory is freed, and data that has not been
committed is discarded. To ensure that all inserted data is
written, it is necessary to call close() before destroying the
store.
This function catches all exceptions thrown by callees, so it
will be necessary to call close() before destroying the store
if callers want to catch exceptions.
Throws:
None
*/
~store();
/** Returns `true` if the database is open. */
bool
is_open() const
{
return open_;
}
path_type const&
dat_path() const
{
return s_->dp;
}
path_type const&
key_path() const
{
return s_->kp;
}
path_type const&
log_path() const
{
return s_->lp;
}
std::uint64_t
appnum() const
{
return s_->kh.appnum;
}
/** Close the database.
All data is committed before closing.
Throws:
store_error
*/
void
close();
/** Open a database.
@param args Arguments passed to File constructors
@return `true` if each file could be opened
*/
template <class... Args>
bool
open (
path_type const& dat_path,
path_type const& key_path,
path_type const& log_path,
std::size_t arena_alloc_size,
Args&&... args);
/** Fetch a value.
If key is found, Handler will be called as:
`(void)()(void const* data, std::size_t size)`
where data and size represent the value. If the
key is not found, the handler is not called.
@return `true` if a matching key was found.
*/
template <class Handler>
bool
fetch (void const* key, Handler&& handler);
/** Insert a value.
Returns:
`true` if the key was inserted,
`false` if the key already existed
*/
bool
insert (void const* key, void const* data,
std::size_t bytes);
private:
void
rethrow()
{
if (epb_.load())
std::rethrow_exception(ep_);
}
// Fetch key in loaded bucket b or its spills.
//
template <class Handler>
bool
fetch (std::size_t h, void const* key,
detail::bucket b, Handler&& handler);
// Returns `true` if the key exists
// lock is unlocked after the first bucket processed
//
bool
exists (std::size_t h, void const* key,
shared_lock_type* lock, detail::bucket b);
void
split (detail::bucket& b1, detail::bucket& b2,
detail::bucket& tmp, std::size_t n1, std::size_t n2,
std::size_t buckets, std::size_t modulus,
detail::bulk_writer<File>& w);
detail::bucket
load (std::size_t n, detail::cache& c1,
detail::cache& c0, void* buf);
void
commit();
void
run();
};
//------------------------------------------------------------------------------
template <class Hasher, class Codec, class File>
store<Hasher, Codec, File>::state::state (
File&& df_, File&& kf_, File&& lf_,
path_type const& dp_, path_type const& kp_,
path_type const& lp_,
detail::key_file_header const& kh_,
std::size_t arena_alloc_size)
: df (std::move(df_))
, kf (std::move(kf_))
, lf (std::move(lf_))
, dp (dp_)
, kp (kp_)
, lp (lp_)
, p0 (kh_.key_size, arena_alloc_size)
, p1 (kh_.key_size, arena_alloc_size)
, c0 (kh_.key_size, kh_.block_size)
, c1 (kh_.key_size, kh_.block_size)
, kh (kh_)
{
}
//------------------------------------------------------------------------------
template <class Hasher, class Codec, class File>
store<Hasher, Codec, File>::~store()
{
try
{
close();
}
catch (...)
{
// If callers want to see the exceptions
// they have to call close manually.
}
}
template <class Hasher, class Codec, class File>
template <class... Args>
bool
store<Hasher, Codec, File>::open (
path_type const& dat_path,
path_type const& key_path,
path_type const& log_path,
std::size_t arena_alloc_size,
Args&&... args)
{
using namespace detail;
if (is_open())
throw std::logic_error("nudb: already open");
epb_.store(false);
recover<Hasher, Codec, File>(
dat_path, key_path, log_path,
recover_read_size,
args...);
File df(args...);
File kf(args...);
File lf(args...);
if (! df.open (file_mode::append, dat_path))
return false;
if (! kf.open (file_mode::write, key_path))
return false;
if (! lf.create (file_mode::append, log_path))
return false;
dat_file_header dh;
key_file_header kh;
read (df, dh);
read (kf, kh);
verify (dh);
verify<Hasher> (kh);
verify<Hasher> (dh, kh);
auto s = std::make_unique<state>(
std::move(df), std::move(kf), std::move(lf),
dat_path, key_path, log_path, kh,
arena_alloc_size);
thresh_ = std::max<std::size_t>(65536UL,
kh.load_factor * kh.capacity);
frac_ = thresh_ / 2;
buckets_ = kh.buckets;
modulus_ = ceil_pow2(kh.buckets);
// VFALCO TODO This could be better
if (buckets_ < 1)
throw store_corrupt_error (
"bad key file length");
s_ = std::move(s);
open_ = true;
thread_ = std::thread(
&store::run, this);
return true;
}
template <class Hasher, class Codec, class File>
void
store<Hasher, Codec, File>::close()
{
if (open_)
{
// Set this first otherwise a
// throw can cause another close().
open_ = false;
cond_.notify_all();
thread_.join();
rethrow();
s_->lf.close();
File::erase(s_->lp);
s_.reset();
}
}
template <class Hasher, class Codec, class File>
template <class Handler>
bool
store<Hasher, Codec, File>::fetch (
void const* key, Handler&& handler)
{
using namespace detail;
rethrow();
auto const h = hash<Hasher>(
key, s_->kh.key_size, s_->kh.salt);
shared_lock_type m (m_);
{
auto iter = s_->p1.find(key);
if (iter == s_->p1.end())
{
iter = s_->p0.find(key);
if (iter == s_->p0.end())
goto next;
}
buffer buf;
auto const result =
s_->codec.decompress(
iter->first.data,
iter->first.size, buf);
handler(result.first, result.second);
return true;
}
next:
auto const n = bucket_index(
h, buckets_, modulus_);
auto const iter = s_->c1.find(n);
if (iter != s_->c1.end())
return fetch(h, key,
iter->second, handler);
// VFALCO Audit for concurrency
genlock <gentex> g (g_);
m.unlock();
buffer buf (s_->kh.block_size);
// VFALCO Constructs with garbage here
bucket b (s_->kh.block_size,
buf.get());
b.read (s_->kf,
(n + 1) * b.block_size());
return fetch(h, key, b, handler);
}
template <class Hasher, class Codec, class File>
bool
store<Hasher, Codec, File>::insert (
void const* key, void const* data,
std::size_t size)
{
using namespace detail;
rethrow();
buffer buf;
// Data Record
if (size > field<uint48_t>::max)
throw std::logic_error(
"nudb: size too large");
auto const h = hash<Hasher>(
key, s_->kh.key_size, s_->kh.salt);
std::lock_guard<std::mutex> u (u_);
{
shared_lock_type m (m_);
if (s_->p1.find(key) != s_->p1.end())
return false;
if (s_->p0.find(key) != s_->p0.end())
return false;
auto const n = bucket_index(
h, buckets_, modulus_);
auto const iter = s_->c1.find(n);
if (iter != s_->c1.end())
{
if (exists(h, key, &m,
iter->second))
return false;
// m is now unlocked
}
else
{
// VFALCO Audit for concurrency
genlock <gentex> g (g_);
m.unlock();
buf.reserve(s_->kh.block_size);
bucket b (s_->kh.block_size,
buf.get());
b.read (s_->kf,
(n + 1) * s_->kh.block_size);
if (exists(h, key, nullptr, b))
return false;
}
}
auto const result =
s_->codec.compress(data, size, buf);
// Perform insert
unique_lock_type m (m_);
s_->p1.insert (h, key,
result.first, result.second);
// Did we go over the commit limit?
if (commit_limit_ > 0 &&
s_->p1.data_size() >= commit_limit_)
{
// Yes, start a new commit
cond_.notify_all();
// Wait for pool to shrink
cond_limit_.wait(m,
[this]() { return
s_->p1.data_size() <
commit_limit_; });
}
bool const notify =
s_->p1.data_size() >= s_->pool_thresh;
m.unlock();
if (notify)
cond_.notify_all();
return true;
}
template <class Hasher, class Codec, class File>
template <class Handler>
bool
store<Hasher, Codec, File>::fetch (
std::size_t h, void const* key,
detail::bucket b, Handler&& handler)
{
using namespace detail;
buffer buf0;
buffer buf1;
for(;;)
{
for (auto i = b.lower_bound(h);
i < b.size(); ++i)
{
auto const item = b[i];
if (item.hash != h)
break;
// Data Record
auto const len =
s_->kh.key_size + // Key
item.size; // Value
buf0.reserve(len);
s_->df.read(item.offset +
field<uint48_t>::size, // Size
buf0.get(), len);
if (std::memcmp(buf0.get(), key,
s_->kh.key_size) == 0)
{
auto const result =
s_->codec.decompress(
buf0.get() + s_->kh.key_size,
item.size, buf1);
handler(result.first, result.second);
return true;
}
}
auto const spill = b.spill();
if (! spill)
break;
buf1.reserve(s_->kh.block_size);
b = bucket(s_->kh.block_size,
buf1.get());
b.read(s_->df, spill);
}
return false;
}
template <class Hasher, class Codec, class File>
bool
store<Hasher, Codec, File>::exists (
std::size_t h, void const* key,
shared_lock_type* lock, detail::bucket b)
{
using namespace detail;
buffer buf(s_->kh.key_size +
s_->kh.block_size);
void* pk = buf.get();
void* pb = buf.get() + s_->kh.key_size;
for(;;)
{
for (auto i = b.lower_bound(h);
i < b.size(); ++i)
{
auto const item = b[i];
if (item.hash != h)
break;
// Data Record
s_->df.read(item.offset +
field<uint48_t>::size, // Size
pk, s_->kh.key_size); // Key
if (std::memcmp(pk, key,
s_->kh.key_size) == 0)
return true;
}
auto spill = b.spill();
if (lock && lock->owns_lock())
lock->unlock();
if (! spill)
break;
b = bucket(s_->kh.block_size, pb);
b.read(s_->df, spill);
}
return false;
}
// Split the bucket in b1 to b2
// b1 must be loaded
// tmp is used as a temporary buffer
// splits are written but not the new buckets
//
template <class Hasher, class Codec, class File>
void
store<Hasher, Codec, File>::split (detail::bucket& b1,
detail::bucket& b2, detail::bucket& tmp,
std::size_t n1, std::size_t n2,
std::size_t buckets, std::size_t modulus,
detail::bulk_writer<File>& w)
{
using namespace detail;
// Trivial case: split empty bucket
if (b1.empty())
return;
// Split
for (std::size_t i = 0; i < b1.size();)
{
auto const e = b1[i];
auto const n = bucket_index(
e.hash, buckets, modulus);
assert(n==n1 || n==n2);
if (n == n2)
{
b2.insert (e.offset, e.size, e.hash);
b1.erase (i);
}
else
{
++i;
}
}
std::size_t spill = b1.spill();
if (spill)
{
b1.spill (0);
do
{
// If any part of the spill record is
// in the write buffer then flush first
// VFALCO Needs audit
if (spill + bucket_size(s_->kh.capacity) >
w.offset() - w.size())
w.flush();
tmp.read (s_->df, spill);
for (std::size_t i = 0; i < tmp.size(); ++i)
{
auto const e = tmp[i];
auto const n = bucket_index(
e.hash, buckets, modulus);
assert(n==n1 || n==n2);
if (n == n2)
{
maybe_spill(b2, w);
b2.insert(
e.offset, e.size, e.hash);
}
else
{
maybe_spill(b1, w);
b1.insert(
e.offset, e.size, e.hash);
}
}
spill = tmp.spill();
}
while (spill);
}
}
// Effects:
//
// Returns a bucket from caches or the key file
//
// If the bucket is found in c1, returns the
// bucket from c1.
// Else if the bucket number is greater than buckets(),
// throws.
// Else, If the bucket is found in c2, inserts the
// bucket into c1 and returns the bucket from c1.
// Else, reads the bucket from the key file, inserts
// the bucket into c0 and c1, and returns
// the bucket from c1.
//
// Preconditions:
// buf points to a buffer of at least block_size() bytes
//
// Postconditions:
// c1, and c0, and the memory pointed to by buf may be modified
//
template <class Hasher, class Codec, class File>
detail::bucket
store<Hasher, Codec, File>::load (
std::size_t n, detail::cache& c1,
detail::cache& c0, void* buf)
{
using namespace detail;
auto iter = c1.find(n);
if (iter != c1.end())
return iter->second;
iter = c0.find(n);
if (iter != c0.end())
return c1.insert (n,
iter->second)->second;
bucket tmp (s_->kh.block_size, buf);
tmp.read (s_->kf, (n + 1) *
s_->kh.block_size);
c0.insert (n, tmp);
return c1.insert (n, tmp)->second;
}
// Commit the memory pool to disk, then sync.
//
// Preconditions:
//
// Effects:
//
template <class Hasher, class Codec, class File>
void
store<Hasher, Codec, File>::commit()
{
using namespace detail;
buffer buf1 (s_->kh.block_size);
buffer buf2 (s_->kh.block_size);
bucket tmp (s_->kh.block_size, buf1.get());
// Empty cache put in place temporarily
// so we can reuse the memory from s_->c1
cache c1;
{
unique_lock_type m (m_);
if (s_->p1.empty())
return;
if (s_->p1.data_size() >= commit_limit_)
cond_limit_.notify_all();
swap (s_->c1, c1);
swap (s_->p0, s_->p1);
s_->pool_thresh = std::max(
s_->pool_thresh, s_->p0.data_size());
m.unlock();
}
// Prepare rollback information
// Log File Header
log_file_header lh;
lh.version = currentVersion; // Version
lh.uid = s_->kh.uid; // UID
lh.appnum = s_->kh.appnum; // Appnum
lh.key_size = s_->kh.key_size; // Key Size
lh.salt = s_->kh.salt; // Salt
lh.pepper = pepper<Hasher>(
lh.salt); // Pepper
lh.block_size =
s_->kh.block_size; // Block Size
lh.key_file_size =
s_->kf.actual_size(); // Key File Size
lh.dat_file_size =
s_->df.actual_size(); // Data File Size
write (s_->lf, lh);
s_->lf.sync();
// Append data and spills to data file
auto modulus = modulus_;
auto buckets = buckets_;
{
// Bulk write to avoid write amplification
bulk_writer<File> w (s_->df,
s_->df.actual_size(), bulk_write_size);
// Write inserted data to the data file
for (auto& e : s_->p0)
{
// VFALCO This could be UB since other
// threads are reading other data members
// of this object in memory
e.second = w.offset();
auto os = w.prepare (value_size(
e.first.size, s_->kh.key_size));
// Data Record
write <uint48_t> (os,
e.first.size); // Size
write (os, e.first.key,
s_->kh.key_size); // Key
write (os, e.first.data,
e.first.size); // Data
}
// Do inserts, splits, and build view
// of original and modified buckets
for (auto const e : s_->p0)
{
// VFALCO Should this be >= or > ?
if ((frac_ += 65536) >= thresh_)
{
// split
frac_ -= thresh_;
if (buckets == modulus)
modulus *= 2;
auto const n1 = buckets - (modulus / 2);
auto const n2 = buckets++;
auto b1 = load (n1, c1, s_->c0, buf2.get());
auto b2 = c1.create (n2);
// If split spills, the writer is
// flushed which can amplify writes.
split (b1, b2, tmp, n1, n2,
buckets, modulus, w);
}
// insert
auto const n = bucket_index(
e.first.hash, buckets, modulus);
auto b = load (n, c1, s_->c0, buf2.get());
// This can amplify writes if it spills.
maybe_spill(b, w);
b.insert (e.second,
e.first.size, e.first.hash);
}
w.flush();
}
// Give readers a view of the new buckets.
// This might be slightly better than the old
// view since there could be fewer spills.
{
unique_lock_type m (m_);
swap(c1, s_->c1);
s_->p0.clear();
buckets_ = buckets;
modulus_ = modulus;
g_.start();
}
// Write clean buckets to log file
// VFALCO Should the bulk_writer buffer size be tunable?
{
bulk_writer<File> w(s_->lf,
s_->lf.actual_size(), bulk_write_size);
for (auto const e : s_->c0)
{
// Log Record
auto os = w.prepare(
field<std::uint64_t>::size + // Index
e.second.compact_size()); // Bucket
// Log Record
write<std::uint64_t>(os, e.first); // Index
e.second.write(os); // Bucket
}
s_->c0.clear();
w.flush();
s_->lf.sync();
}
g_.finish();
// Write new buckets to key file
for (auto const e : s_->c1)
e.second.write (s_->kf,
(e.first + 1) * s_->kh.block_size);
// Finalize the commit
s_->df.sync();
s_->kf.sync();
s_->lf.trunc(0);
s_->lf.sync();
// Cache is no longer needed, all fetches will go straight
// to disk again. Do this after the sync, otherwise readers
// might get blocked longer due to the extra I/O.
// VFALCO is this correct?
{
unique_lock_type m (m_);
s_->c1.clear();
}
}
template <class Hasher, class Codec, class File>
void
store<Hasher, Codec, File>::run()
{
auto const pred =
[this]()
{
return
! open_ ||
s_->p1.data_size() >=
s_->pool_thresh ||
s_->p1.data_size() >=
commit_limit_;
};
try
{
while (open_)
{
for(;;)
{
using std::chrono::seconds;
unique_lock_type m (m_);
bool const timeout =
! cond_.wait_for (m,
seconds(1), pred);
if (! open_)
break;
m.unlock();
commit();
// Reclaim some memory if
// we get a spare moment.
if (timeout)
{
m.lock();
s_->pool_thresh =
std::max<std::size_t>(
1, s_->pool_thresh / 2);
s_->p1.shrink_to_fit();
s_->p0.shrink_to_fit();
s_->c1.shrink_to_fit();
s_->c0.shrink_to_fit();
m.unlock();
}
}
}
commit();
}
catch(...)
{
ep_ = std::current_exception(); // must come first
epb_.store(true);
}
}
} // nudb
} // beast
#endif

View File

@@ -1,255 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_TESTS_COMMON_H_INCLUDED
#define BEAST_NUDB_TESTS_COMMON_H_INCLUDED
#include <ripple/beast/nudb.h>
#include <ripple/beast/nudb/identity.h>
#include <ripple/beast/nudb/test/fail_file.h>
#include <ripple/beast/hash/xxhasher.h>
#include <ripple/beast/xor_shift_engine.h>
#include <cstdint>
#include <iomanip>
#include <memory>
namespace beast {
namespace nudb {
namespace test {
using key_type = std::size_t;
// xxhasher is fast and produces good results
using test_api_base =
nudb::api<xxhasher, identity, native_file>;
struct test_api : test_api_base
{
using fail_store = nudb::store<
typename test_api_base::hash_type,
typename test_api_base::codec_type,
nudb::fail_file <typename test_api_base::file_type>>;
};
static std::size_t constexpr arena_alloc_size = 16 * 1024 * 1024;
static std::uint64_t constexpr appnum = 1337;
static std::uint64_t constexpr salt = 42;
//------------------------------------------------------------------------------
// Meets the requirements of Handler
class Storage
{
private:
std::size_t size_ = 0;
std::size_t capacity_ = 0;
std::unique_ptr<std::uint8_t[]> buf_;
public:
Storage() = default;
Storage (Storage const&) = delete;
Storage& operator= (Storage const&) = delete;
std::size_t
size() const
{
return size_;
}
std::uint8_t*
get() const
{
return buf_.get();
}
std::uint8_t*
reserve (std::size_t size)
{
if (capacity_ < size)
{
capacity_ = detail::ceil_pow2(size);
buf_.reset (
new std::uint8_t[capacity_]);
}
size_ = size;
return buf_.get();
}
std::uint8_t*
operator()(void const* data, std::size_t size)
{
reserve (size);
std::memcpy(buf_.get(), data, size);
return buf_.get();
}
};
struct value_type
{
value_type() = default;
value_type (value_type const&) = default;
value_type& operator= (value_type const&) = default;
key_type key;
std::size_t size;
uint8_t* data;
};
//------------------------------------------------------------------------------
template <class Generator>
static
void
rngcpy (void* buffer, std::size_t bytes,
Generator& g)
{
using result_type =
typename Generator::result_type;
while (bytes >= sizeof(result_type))
{
auto const v = g();
memcpy(buffer, &v, sizeof(v));
buffer = reinterpret_cast<
std::uint8_t*>(buffer) + sizeof(v);
bytes -= sizeof(v);
}
if (bytes > 0)
{
auto const v = g();
memcpy(buffer, &v, bytes);
}
}
//------------------------------------------------------------------------------
class Sequence
{
public:
using key_type = test::key_type;
private:
enum
{
minSize = 250,
maxSize = 1250
};
Storage s_;
beast::xor_shift_engine gen_;
std::uniform_int_distribution<std::uint32_t> d_size_;
public:
Sequence()
: d_size_ (minSize, maxSize)
{
}
// Returns the n-th key
key_type
key (std::size_t n)
{
gen_.seed(n+1);
key_type result;
rngcpy (&result, sizeof(result), gen_);
return result;
}
// Returns the n-th value
value_type
operator[] (std::size_t n)
{
gen_.seed(n+1);
value_type v;
rngcpy (&v.key, sizeof(v.key), gen_);
v.size = d_size_(gen_);
v.data = s_.reserve(v.size);
rngcpy (v.data, v.size, gen_);
return v;
}
};
template <class T>
static
std::string
num (T t)
{
std::string s = std::to_string(t);
std::reverse(s.begin(), s.end());
std::string s2;
s2.reserve(s.size() + (s.size()+2)/3);
int n = 0;
for (auto c : s)
{
if (n == 3)
{
n = 0;
s2.insert (s2.begin(), ',');
}
++n;
s2.insert(s2.begin(), c);
}
return s2;
}
inline
void
print(std::ostream& log,
beast::nudb::verify_info const& info)
{
log <<
"avg_fetch: " << std::fixed << std::setprecision(3) << info.avg_fetch << '\n' <<
"waste: " << std::fixed << std::setprecision(3) << info.waste * 100 << "%\n" <<
"overhead: " << std::fixed << std::setprecision(1) << info.overhead * 100 << "%\n" <<
"actual_load: " << std::fixed << std::setprecision(0) << info.actual_load * 100 << "%\n" <<
"version: " << num(info.version) << '\n' <<
"uid: " << std::showbase << std::hex << info.uid << '\n' <<
"appnum: " << info.appnum << '\n' <<
"key_size: " << num(info.key_size) << '\n' <<
"salt: " << std::showbase << std::hex << info.salt << '\n' <<
"pepper: " << std::showbase << std::hex << info.pepper << '\n' <<
"block_size: " << num(info.block_size) << '\n' <<
"bucket_size: " << num(info.bucket_size) << '\n' <<
"load_factor: " << std::fixed << std::setprecision(0) << info.load_factor * 100 << "%\n" <<
"capacity: " << num(info.capacity) << '\n' <<
"buckets: " << num(info.buckets) << '\n' <<
"key_count: " << num(info.key_count) << '\n' <<
"value_count: " << num(info.value_count) << '\n' <<
"value_bytes: " << num(info.value_bytes) << '\n' <<
"spill_count: " << num(info.spill_count) << '\n' <<
"spill_count_tot: " << num(info.spill_count_tot) << '\n' <<
"spill_bytes: " << num(info.spill_bytes) << '\n' <<
"spill_bytes_tot: " << num(info.spill_bytes_tot) << '\n' <<
"key_file_size: " << num(info.key_file_size) << '\n' <<
"dat_file_size: " << num(info.dat_file_size) << std::endl;
std::string s;
for (int i = 0; i < info.hist.size(); ++i)
s += (i==0) ?
std::to_string(info.hist[i]) :
(", " + std::to_string(info.hist[i]));
log << "hist: " << s << std::endl;
}
} // test
} // nudb
} // beast
#endif

View File

@@ -1,245 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_FAIL_FILE_H_INCLUDED
#define BEAST_NUDB_FAIL_FILE_H_INCLUDED
#include <ripple/beast/nudb/common.h>
#include <atomic>
#include <cstddef>
#include <string>
#include <utility>
namespace beast {
namespace nudb {
/** Thrown when a test failure mode occurs. */
struct fail_error : std::exception
{
char const*
what() const noexcept override
{
return "test failure";
}
};
/** Countdown to test failure modue. */
class fail_counter
{
private:
std::size_t target_;
std::atomic<std::size_t> count_;
public:
fail_counter (fail_counter const&) = delete;
fail_counter& operator= (fail_counter const&) = delete;
explicit
fail_counter (std::size_t target = 0)
{
reset (target);
}
/** Reset the counter to fail at the nth step, or 0 for no failure. */
void
reset (std::size_t n = 0)
{
target_ = n;
count_.store(0);
}
bool
fail()
{
return target_ && (++count_ >= target_);
}
};
/** Wrapper to simulate file system failures. */
template <class File>
class fail_file
{
private:
File f_;
fail_counter* c_ = nullptr;
public:
fail_file() = default;
fail_file (fail_file const&) = delete;
fail_file& operator= (fail_file const&) = delete;
~fail_file() = default;
fail_file (fail_file&&);
fail_file&
operator= (fail_file&& other);
explicit
fail_file (fail_counter& c);
bool
is_open() const
{
return f_.is_open();
}
path_type const&
path() const
{
return f_.path();
}
std::size_t
actual_size() const
{
return f_.actual_size();
}
void
close()
{
f_.close();
}
bool
create (file_mode mode,
path_type const& path)
{
return f_.create(mode, path);
}
bool
open (file_mode mode,
path_type const& path)
{
return f_.open(mode, path);
}
static
void
erase (path_type const& path)
{
File::erase(path);
}
void
read (std::size_t offset,
void* buffer, std::size_t bytes)
{
f_.read(offset, buffer, bytes);
}
void
write (std::size_t offset,
void const* buffer, std::size_t bytes);
void
sync();
void
trunc (std::size_t length);
private:
bool
fail();
void
do_fail();
};
template <class File>
fail_file<File>::fail_file (fail_file&& other)
: f_ (std::move(other.f_))
, c_ (other.c_)
{
other.c_ = nullptr;
}
template <class File>
fail_file<File>&
fail_file<File>::operator= (fail_file&& other)
{
f_ = std::move(other.f_);
c_ = other.c_;
other.c_ = nullptr;
return *this;
}
template <class File>
fail_file<File>::fail_file (fail_counter& c)
: c_ (&c)
{
}
template <class File>
void
fail_file<File>::write (std::size_t offset,
void const* buffer, std::size_t bytes)
{
if (fail())
do_fail();
if (fail())
{
f_.write(offset, buffer, (bytes + 1) / 2);
do_fail();
}
f_.write(offset, buffer, bytes);
}
template <class File>
void
fail_file<File>::sync()
{
if (fail())
do_fail();
// We don't need a real sync for
// testing, it just slows things down.
//f_.sync();
}
template <class File>
void
fail_file<File>::trunc (std::size_t length)
{
if (fail())
do_fail();
f_.trunc(length);
}
template <class File>
bool
fail_file<File>::fail()
{
if (c_)
return c_->fail();
return false;
}
template <class File>
void
fail_file<File>::do_fail()
{
throw fail_error();
}
}
}
#endif

View File

@@ -1,532 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_VERIFY_H_INCLUDED
#define BEAST_NUDB_VERIFY_H_INCLUDED
#include <ripple/beast/nudb/common.h>
#include <ripple/beast/nudb/file.h>
#include <ripple/beast/nudb/detail/bucket.h>
#include <ripple/beast/nudb/detail/bulkio.h>
#include <ripple/beast/nudb/detail/format.h>
#include <algorithm>
#include <cstddef>
#include <string>
namespace beast {
namespace nudb {
/** Reports database information during verify mode. */
struct verify_info
{
// Configured
std::size_t version = 0; // API version
std::size_t uid = 0; // UID
std::size_t appnum = 0; // Appnum
std::size_t key_size = 0; // Size of a key in bytes
std::size_t salt = 0; // Salt
std::size_t pepper = 0; // Pepper
std::size_t block_size = 0; // Block size in bytes
float load_factor = 0; // Target bucket fill fraction
// Calculated
std::size_t capacity = 0; // Max keys per bucket
std::size_t buckets = 0; // Number of buckets
std::size_t bucket_size = 0; // Size of bucket in bytes
// Measured
std::size_t key_file_size = 0; // Key file size in bytes
std::size_t dat_file_size = 0; // Data file size in bytes
std::size_t key_count = 0; // Keys in buckets and active spills
std::size_t value_count = 0; // Count of values in the data file
std::size_t value_bytes = 0; // Sum of value bytes in the data file
std::size_t spill_count = 0; // used number of spill records
std::size_t spill_count_tot = 0; // Number of spill records in data file
std::size_t spill_bytes = 0; // used byte of spill records
std::size_t spill_bytes_tot = 0; // Sum of spill record bytes in data file
// Performance
float avg_fetch = 0; // average reads per fetch (excluding value)
float waste = 0; // fraction of data file bytes wasted (0..100)
float overhead = 0; // percent of extra bytes per byte of value
float actual_load = 0; // actual bucket fill fraction
// number of buckets having n spills
std::array<std::size_t, 10> hist;
verify_info()
{
hist.fill(0);
}
};
/** Verify consistency of the key and data files.
Effects:
Opens the key and data files in read-only mode.
Throws file_error if a file can't be opened.
Iterates the key and data files, throws store_corrupt_error
on broken invariants.
*/
template <class Hasher>
verify_info
verify (
path_type const& dat_path,
path_type const& key_path,
std::size_t read_size)
{
using namespace detail;
using File = native_file;
File df;
File kf;
if (! df.open (file_mode::scan, dat_path))
throw store_corrupt_error(
"no data file");
if (! kf.open (file_mode::read, key_path))
throw store_corrupt_error(
"no key file");
key_file_header kh;
dat_file_header dh;
read (df, dh);
read (kf, kh);
verify(dh);
verify<Hasher>(dh, kh);
verify_info info;
info.version = dh.version;
info.uid = dh.uid;
info.appnum = dh.appnum;
info.key_size = dh.key_size;
info.salt = kh.salt;
info.pepper = kh.pepper;
info.block_size = kh.block_size;
info.load_factor = kh.load_factor / 65536.f;
info.capacity = kh.capacity;
info.buckets = kh.buckets;
info.bucket_size = kh.bucket_size;
info.key_file_size = kf.actual_size();
info.dat_file_size = df.actual_size();
// Data Record
auto const dh_len =
field<uint48_t>::size + // Size
kh.key_size; // Key
std::size_t fetches = 0;
// Iterate Data File
buffer buf (kh.block_size + dh_len);
bucket b (kh.block_size, buf.get());
std::uint8_t* pd = buf.get() + kh.block_size;
{
bulk_reader<File> r(df,
dat_file_header::size,
df.actual_size(), read_size);
while (! r.eof())
{
auto const offset = r.offset();
// Data Record or Spill Record
auto is = r.prepare(
field<uint48_t>::size); // Size
std::size_t size;
read<uint48_t>(is, size);
if (size > 0)
{
// Data Record
is = r.prepare(
kh.key_size + // Key
size); // Data
std::uint8_t const* const key =
is.data(kh.key_size);
std::uint8_t const* const data =
is.data(size);
(void)data;
auto const h = hash<Hasher>(
key, kh.key_size, kh.salt);
// Check bucket and spills
try
{
auto const n = bucket_index(
h, kh.buckets, kh.modulus);
b.read (kf, (n + 1) * kh.block_size);
++fetches;
}
catch (file_short_read_error const&)
{
throw store_corrupt_error(
"short bucket");
}
for (;;)
{
for (auto i = b.lower_bound(h);
i < b.size(); ++i)
{
auto const item = b[i];
if (item.hash != h)
break;
if (item.offset == offset)
goto found;
++fetches;
}
auto const spill = b.spill();
if (! spill)
throw store_corrupt_error(
"orphaned value");
try
{
b.read (df, spill);
++fetches;
}
catch (file_short_read_error const&)
{
throw store_corrupt_error(
"short spill");
}
}
found:
// Update
++info.value_count;
info.value_bytes += size;
}
else
{
// Spill Record
is = r.prepare(
field<std::uint16_t>::size);
read<std::uint16_t>(is, size); // Size
if (size != kh.bucket_size)
throw store_corrupt_error(
"bad spill size");
b.read(r); // Bucket
++info.spill_count_tot;
info.spill_bytes_tot +=
field<uint48_t>::size + // Zero
field<uint16_t>::size + // Size
b.compact_size(); // Bucket
}
}
}
// Iterate Key File
{
for (std::size_t n = 0; n < kh.buckets; ++n)
{
std::size_t nspill = 0;
b.read (kf, (n + 1) * kh.block_size);
for(;;)
{
info.key_count += b.size();
for (std::size_t i = 0; i < b.size(); ++i)
{
auto const e = b[i];
try
{
df.read (e.offset, pd, dh_len);
}
catch (file_short_read_error const&)
{
throw store_corrupt_error(
"missing value");
}
// Data Record
istream is(pd, dh_len);
std::size_t size;
read<uint48_t>(is, size); // Size
void const* key =
is.data(kh.key_size); // Key
if (size != e.size)
throw store_corrupt_error(
"wrong size");
auto const h = hash<Hasher>(key,
kh.key_size, kh.salt);
if (h != e.hash)
throw store_corrupt_error(
"wrong hash");
}
if (! b.spill())
break;
try
{
b.read (df, b.spill());
++nspill;
++info.spill_count;
info.spill_bytes +=
field<uint48_t>::size + // Zero
field<uint16_t>::size + // Size
b.compact_size(); // SpillBucket
}
catch (file_short_read_error const&)
{
throw store_corrupt_error(
"missing spill");
}
}
if (nspill >= info.hist.size())
nspill = info.hist.size() - 1;
++info.hist[nspill];
}
}
float sum = 0;
for (int i = 0; i < info.hist.size(); ++i)
sum += info.hist[i] * (i + 1);
//info.avg_fetch = sum / info.buckets;
info.avg_fetch = float(fetches) / info.value_count;
info.waste = (info.spill_bytes_tot - info.spill_bytes) /
float(info.dat_file_size);
info.overhead =
float(info.key_file_size + info.dat_file_size) /
(
info.value_bytes +
info.key_count *
(info.key_size +
// Data Record
field<uint48_t>::size) // Size
) - 1;
info.actual_load = info.key_count / float(
info.capacity * info.buckets);
return info;
}
/** Verify consistency of the key and data files.
Effects:
Opens the key and data files in read-only mode.
Throws file_error if a file can't be opened.
Iterates the key and data files, throws store_corrupt_error
on broken invariants.
This uses a different algorithm that depends on allocating
a large buffer.
*/
template <class Hasher, class Progress>
verify_info
verify_fast (
path_type const& dat_path,
path_type const& key_path,
std::size_t buffer_size,
Progress&& progress)
{
using namespace detail;
using File = native_file;
File df;
File kf;
if (! df.open (file_mode::scan, dat_path))
throw store_corrupt_error(
"no data file");
if (! kf.open (file_mode::read, key_path))
throw store_corrupt_error(
"no key file");
key_file_header kh;
dat_file_header dh;
read (df, dh);
read (kf, kh);
verify(dh);
verify<Hasher>(dh, kh);
verify_info info;
info.version = dh.version;
info.uid = dh.uid;
info.appnum = dh.appnum;
info.key_size = dh.key_size;
info.salt = kh.salt;
info.pepper = kh.pepper;
info.block_size = kh.block_size;
info.load_factor = kh.load_factor / 65536.f;
info.capacity = kh.capacity;
info.buckets = kh.buckets;
info.bucket_size = kh.bucket_size;
info.key_file_size = kf.actual_size();
info.dat_file_size = df.actual_size();
std::size_t fetches = 0;
// Counts unverified keys per bucket
std::unique_ptr<std::uint32_t[]> nkeys(
new std::uint32_t[kh.buckets]);
// Verify contiguous sequential sections of the
// key file using multiple passes over the data.
//
auto const buckets = std::max<std::size_t>(1,
buffer_size / kh.block_size);
buffer buf((buckets + 1) * kh.block_size);
bucket tmp(kh.block_size, buf.get() +
buckets * kh.block_size);
std::size_t const passes =
(kh.buckets + buckets - 1) / buckets;
auto const df_size = df.actual_size();
std::size_t const work = passes * df_size;
std::size_t npass = 0;
for (std::size_t b0 = 0; b0 < kh.buckets;
b0 += buckets)
{
auto const b1 = std::min(
b0 + buckets, kh.buckets);
// Buffered range is [b0, b1)
auto const bn = b1 - b0;
kf.read((b0 + 1) * kh.block_size,
buf.get(), bn * kh.block_size);
// Count keys in buckets
for (std::size_t i = b0 ; i < b1; ++i)
{
bucket b(kh.block_size, buf.get() +
(i - b0) * kh.block_size);
nkeys[i] = b.size();
std::size_t nspill = 0;
auto spill = b.spill();
while (spill != 0)
{
tmp.read(df, spill);
nkeys[i] += tmp.size();
spill = tmp.spill();
++nspill;
++info.spill_count;
info.spill_bytes +=
field<uint48_t>::size + // Zero
field<uint16_t>::size + // Size
tmp.compact_size(); // SpillBucket
}
if (nspill >= info.hist.size())
nspill = info.hist.size() - 1;
++info.hist[nspill];
info.key_count += nkeys[i];
}
// Iterate Data File
bulk_reader<File> r(df,
dat_file_header::size, df_size,
64 * 1024 * 1024);
while (! r.eof())
{
auto const offset = r.offset();
progress(npass * df_size + offset, work);
// Data Record or Spill Record
auto is = r.prepare(
field<uint48_t>::size); // Size
std::size_t size;
read<uint48_t>(is, size);
if (size > 0)
{
// Data Record
is = r.prepare(
kh.key_size + // Key
size); // Data
std::uint8_t const* const key =
is.data(kh.key_size);
std::uint8_t const* const data =
is.data(size);
(void)data;
auto const h = hash<Hasher>(
key, kh.key_size, kh.salt);
auto const n = bucket_index(
h, kh.buckets, kh.modulus);
if (n < b0 || n >= b1)
continue;
// Check bucket and spills
bucket b (kh.block_size, buf.get() +
(n - b0) * kh.block_size);
++fetches;
for (;;)
{
for (auto i = b.lower_bound(h);
i < b.size(); ++i)
{
auto const item = b[i];
if (item.hash != h)
break;
if (item.offset == offset)
goto found;
++fetches;
}
auto const spill = b.spill();
if (! spill)
throw store_corrupt_error(
"orphaned value");
b = tmp;
try
{
b.read (df, spill);
++fetches;
}
catch (file_short_read_error const&)
{
throw store_corrupt_error(
"short spill");
}
}
found:
// Update
++info.value_count;
info.value_bytes += size;
if (nkeys[n]-- == 0)
throw store_corrupt_error(
"duplicate value");
}
else
{
// Spill Record
is = r.prepare(
field<std::uint16_t>::size);
read<std::uint16_t>(is, size); // Size
if (size != kh.bucket_size)
throw store_corrupt_error(
"bad spill size");
tmp.read(r); // Bucket
if (b0 == 0)
{
++info.spill_count_tot;
info.spill_bytes_tot +=
field<uint48_t>::size + // Zero
field<uint16_t>::size + // Size
tmp.compact_size(); // Bucket
}
}
}
++npass;
}
// Make sure every key in every bucket was visited
for (std::size_t i = 0;
i < kh.buckets; ++i)
if (nkeys[i] != 0)
throw store_corrupt_error(
"orphan value");
float sum = 0;
for (int i = 0; i < info.hist.size(); ++i)
sum += info.hist[i] * (i + 1);
//info.avg_fetch = sum / info.buckets;
info.avg_fetch = float(fetches) / info.value_count;
info.waste = (info.spill_bytes_tot - info.spill_bytes) /
float(info.dat_file_size);
info.overhead =
float(info.key_file_size + info.dat_file_size) /
(
info.value_bytes +
info.key_count *
(info.key_size +
// Data Record
field<uint48_t>::size) // Size
) - 1;
info.actual_load = info.key_count / float(
info.capacity * info.buckets);
return info;
}
} // nudb
} // beast
#endif

View File

@@ -1,111 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_VISIT_H_INCLUDED
#define BEAST_NUDB_VISIT_H_INCLUDED
#include <ripple/beast/nudb/common.h>
#include <ripple/beast/nudb/file.h>
#include <ripple/beast/nudb/detail/buffer.h>
#include <ripple/beast/nudb/detail/bulkio.h>
#include <ripple/beast/nudb/detail/format.h>
#include <algorithm>
#include <cstddef>
#include <string>
namespace beast {
namespace nudb {
/** Visit each key/data pair in a database file.
Function will be called with this signature:
bool(void const* key, std::size_t key_size,
void const* data, std::size_t size)
If Function returns false, the visit is terminated.
@return `true` if the visit completed
This only requires the data file.
*/
template <class Codec, class Function>
bool
visit(
path_type const& path,
std::size_t read_size,
Function&& f)
{
using namespace detail;
using File = native_file;
File df;
df.open (file_mode::scan, path);
dat_file_header dh;
read (df, dh);
verify (dh);
Codec codec;
// Iterate Data File
bulk_reader<File> r(
df, dat_file_header::size,
df.actual_size(), read_size);
buffer buf;
try
{
while (! r.eof())
{
// Data Record or Spill Record
std::size_t size;
auto is = r.prepare(
field<uint48_t>::size); // Size
read<uint48_t>(is, size);
if (size > 0)
{
// Data Record
is = r.prepare(
dh.key_size + // Key
size); // Data
std::uint8_t const* const key =
is.data(dh.key_size);
auto const result = codec.decompress(
is.data(size), size, buf);
if (! f(key, dh.key_size,
result.first, result.second))
return false;
}
else
{
// Spill Record
is = r.prepare(
field<std::uint16_t>::size);
read<std::uint16_t>(is, size); // Size
r.prepare(size); // skip bucket
}
}
}
catch (file_short_read_error const&)
{
throw store_corrupt_error(
"nudb: data short read");
}
return true;
}
} // nudb
} // beast
#endif

View File

@@ -1,462 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_NUDB_DETAIL_WIN32_FILE_H_INCLUDED
#define BEAST_NUDB_DETAIL_WIN32_FILE_H_INCLUDED
#include <ripple/beast/nudb/common.h>
#include <cassert>
#include <string>
#ifndef BEAST_NUDB_WIN32_FILE
# ifdef _MSC_VER
# define BEAST_NUDB_WIN32_FILE 1
# else
# define BEAST_NUDB_WIN32_FILE 0
# endif
#endif
#if BEAST_NUDB_WIN32_FILE
#pragma push_macro("NOMINMAX")
#pragma push_macro("UNICODE")
#pragma push_macro("STRICT")
# ifndef NOMINMAX
# define NOMINMAX
# endif
# ifndef UNICODE
# define UNICODE
# endif
# ifndef STRICT
# define STRICT
# endif
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <Windows.h>
#pragma pop_macro("STRICT")
#pragma pop_macro("UNICODE")
#pragma pop_macro("NOMINMAX")
#endif
namespace beast {
namespace nudb {
#if BEAST_NUDB_WIN32_FILE
namespace detail {
// Win32 error code
class file_win32_error
: public file_error
{
public:
explicit
file_win32_error (char const* m,
DWORD dwError = ::GetLastError())
: file_error (std::string("nudb: ") + m +
", " + text(dwError))
{
}
explicit
file_win32_error (std::string const& m,
DWORD dwError = ::GetLastError())
: file_error (std::string("nudb: ") + m +
", " + text(dwError))
{
}
private:
template <class = void>
static
std::string
text (DWORD dwError);
};
template <class>
std::string
file_win32_error::text (DWORD dwError)
{
LPSTR buf = nullptr;
size_t const size = FormatMessageA (
FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
NULL,
dwError,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPSTR)&buf,
0,
NULL);
std::string s;
if (size)
{
s.append(buf, size);
LocalFree (buf);
}
else
{
s = "error " + std::to_string(dwError);
}
return s;
}
//------------------------------------------------------------------------------
template <class = void>
class win32_file
{
private:
HANDLE hf_ = INVALID_HANDLE_VALUE;
public:
win32_file() = default;
win32_file (win32_file const&) = delete;
win32_file& operator= (win32_file const&) = delete;
~win32_file();
win32_file (win32_file&&);
win32_file&
operator= (win32_file&& other);
bool
is_open() const
{
return hf_ != INVALID_HANDLE_VALUE;
}
void
close();
// Returns:
// `false` if the file already exists
// `true` on success, else throws
//
bool
create (file_mode mode, std::string const& path);
// Returns:
// `false` if the file doesnt exist
// `true` on success, else throws
//
bool
open (file_mode mode, std::string const& path);
// Effects:
// Removes the file from the file system.
//
// Throws:
// Throws is an error occurs.
//
// Returns:
// `true` if the file was erased
// `false` if the file was not present
//
static
bool
erase (path_type const& path);
// Returns:
// Current file size in bytes measured by operating system
// Requires:
// is_open() == true
//
std::size_t
actual_size() const;
void
read (std::size_t offset,
void* buffer, std::size_t bytes);
void
write (std::size_t offset,
void const* buffer, std::size_t bytes);
void
sync();
void
trunc (std::size_t length);
private:
static
std::pair<DWORD, DWORD>
flags (file_mode mode);
};
template <class _>
win32_file<_>::~win32_file()
{
close();
}
template <class _>
win32_file<_>::win32_file (win32_file&& other)
: hf_ (other.hf_)
{
other.hf_ = INVALID_HANDLE_VALUE;
}
template <class _>
win32_file<_>&
win32_file<_>::operator= (win32_file&& other)
{
if (&other == this)
return *this;
close();
hf_ = other.hf_;
other.hf_ = INVALID_HANDLE_VALUE;
return *this;
}
template <class _>
void
win32_file<_>::close()
{
if (hf_ != INVALID_HANDLE_VALUE)
{
::CloseHandle(hf_);
hf_ = INVALID_HANDLE_VALUE;
}
}
template <class _>
bool
win32_file<_>::create (file_mode mode,
std::string const& path)
{
assert(! is_open());
auto const f = flags(mode);
hf_ = ::CreateFileA (path.c_str(),
f.first,
0,
NULL,
CREATE_NEW,
f.second,
NULL);
if (hf_ == INVALID_HANDLE_VALUE)
{
DWORD const dwError = ::GetLastError();
if (dwError != ERROR_FILE_EXISTS)
throw file_win32_error(
"create file", dwError);
return false;
}
return true;
}
template <class _>
bool
win32_file<_>::open (file_mode mode,
std::string const& path)
{
assert(! is_open());
auto const f = flags(mode);
hf_ = ::CreateFileA (path.c_str(),
f.first,
0,
NULL,
OPEN_EXISTING,
f.second,
NULL);
if (hf_ == INVALID_HANDLE_VALUE)
{
DWORD const dwError = ::GetLastError();
if (dwError != ERROR_FILE_NOT_FOUND &&
dwError != ERROR_PATH_NOT_FOUND)
throw file_win32_error(
"open file", dwError);
return false;
}
return true;
}
template <class _>
bool
win32_file<_>::erase (path_type const& path)
{
BOOL const bSuccess =
::DeleteFileA(path.c_str());
if (! bSuccess)
{
DWORD dwError = ::GetLastError();
if (dwError != ERROR_FILE_NOT_FOUND &&
dwError != ERROR_PATH_NOT_FOUND)
throw file_win32_error(
"erase file");
return false;
}
return true;
}
// Return: Current file size in bytes measured by operating system
template <class _>
std::size_t
win32_file<_>::actual_size() const
{
assert(is_open());
LARGE_INTEGER fileSize;
if (! ::GetFileSizeEx(hf_, &fileSize))
throw file_win32_error(
"size file");
return static_cast<std::size_t>(fileSize.QuadPart);
}
template <class _>
void
win32_file<_>::read (std::size_t offset,
void* buffer, std::size_t bytes)
{
while(bytes > 0)
{
DWORD bytesRead;
LARGE_INTEGER li;
li.QuadPart = static_cast<LONGLONG>(offset);
OVERLAPPED ov;
ov.Offset = li.LowPart;
ov.OffsetHigh = li.HighPart;
ov.hEvent = NULL;
BOOL const bSuccess = ::ReadFile(
hf_, buffer, bytes, &bytesRead, &ov);
if (! bSuccess)
{
DWORD const dwError = ::GetLastError();
if (dwError != ERROR_HANDLE_EOF)
throw file_win32_error(
"read file", dwError);
throw file_short_read_error();
}
if (bytesRead == 0)
throw file_short_read_error();
offset += bytesRead;
bytes -= bytesRead;
buffer = reinterpret_cast<char*>(
buffer) + bytesRead;
}
}
template <class _>
void
win32_file<_>::write (std::size_t offset,
void const* buffer, std::size_t bytes)
{
while(bytes > 0)
{
LARGE_INTEGER li;
li.QuadPart = static_cast<LONGLONG>(offset);
OVERLAPPED ov;
ov.Offset = li.LowPart;
ov.OffsetHigh = li.HighPart;
ov.hEvent = NULL;
DWORD bytesWritten;
BOOL const bSuccess = ::WriteFile(
hf_, buffer, bytes, &bytesWritten, &ov);
if (! bSuccess)
throw file_win32_error(
"write file");
if (bytesWritten == 0)
throw file_short_write_error();
offset += bytesWritten;
bytes -= bytesWritten;
buffer = reinterpret_cast<
char const*>(buffer) +
bytesWritten;
}
}
template <class _>
void
win32_file<_>::sync()
{
BOOL const bSuccess =
::FlushFileBuffers(hf_);
if (! bSuccess)
throw file_win32_error(
"sync file");
}
template <class _>
void
win32_file<_>::trunc (std::size_t length)
{
LARGE_INTEGER li;
li.QuadPart = length;
BOOL bSuccess;
bSuccess = ::SetFilePointerEx(
hf_, li, NULL, FILE_BEGIN);
if (bSuccess)
bSuccess = SetEndOfFile(hf_);
if (! bSuccess)
throw file_win32_error(
"trunc file");
}
template <class _>
std::pair<DWORD, DWORD>
win32_file<_>::flags (file_mode mode)
{
std::pair<DWORD, DWORD> result(0, 0);
switch (mode)
{
case file_mode::scan:
result.first =
GENERIC_READ;
result.second =
FILE_FLAG_SEQUENTIAL_SCAN;
break;
case file_mode::read:
result.first =
GENERIC_READ;
result.second =
FILE_FLAG_RANDOM_ACCESS;
break;
case file_mode::append:
result.first =
GENERIC_READ | GENERIC_WRITE;
result.second =
FILE_FLAG_RANDOM_ACCESS
//| FILE_FLAG_NO_BUFFERING
//| FILE_FLAG_WRITE_THROUGH
;
break;
case file_mode::write:
result.first =
GENERIC_READ | GENERIC_WRITE;
result.second =
FILE_FLAG_RANDOM_ACCESS;
break;
}
return result;
}
} // detail
using win32_file = detail::win32_file<>;
#endif
} // nudb
} // detail
#endif

View File

@@ -25,9 +25,7 @@
#include <ripple/nodestore/impl/codec.h>
#include <ripple/nodestore/impl/DecodedBlob.h>
#include <ripple/nodestore/impl/EncodedBlob.h>
#include <ripple/beast/nudb.h>
#include <ripple/beast/nudb/visit.h>
#include <ripple/beast/hash/xxhasher.h>
#include <nudb/nudb.hpp>
#include <boost/filesystem.hpp>
#include <cassert>
#include <chrono>
@@ -52,13 +50,10 @@ public:
currentType = 1
};
using api = beast::nudb::api<
beast::xxhasher, nodeobject_codec>;
beast::Journal journal_;
size_t const keyBytes_;
std::string const name_;
api::store db_;
nudb::store db_;
std::atomic <bool> deletePath_;
Scheduler& scheduler_;
@@ -78,15 +73,19 @@ public:
auto const dp = (folder / "nudb.dat").string();
auto const kp = (folder / "nudb.key").string ();
auto const lp = (folder / "nudb.log").string ();
using beast::nudb::make_salt;
api::create (dp, kp, lp,
currentType, make_salt(), keyBytes,
beast::nudb::block_size(kp),
0.50);
try
{
if (! db_.open (dp, kp, lp, arena_alloc_size))
Throw<std::runtime_error> ("nodestore: open failed");
nudb::error_code ec;
nudb::create<nudb::xxhasher>(dp, kp, lp,
currentType, nudb::make_salt(), keyBytes,
nudb::block_size(kp), 0.50, ec);
if(ec == nudb::errc::file_exists)
ec = {};
if(ec)
Throw<nudb::system_error>(ec);
db_.open (dp, kp, lp, ec);
if(ec)
Throw<nudb::system_error>(ec);
if (db_.appnum() != currentType)
Throw<std::runtime_error> ("nodestore: unknown appnum");
}
@@ -114,7 +113,10 @@ public:
{
if (db_.is_open())
{
db_.close();
nudb::error_code ec;
db_.close(ec);
if(ec)
Throw<nudb::system_error>(ec);
if (deletePath_)
{
boost::filesystem::remove_all (name_);
@@ -127,10 +129,14 @@ public:
{
Status status;
pno->reset();
if (! db_.fetch (key,
nudb::error_code ec;
db_.fetch (key,
[key, pno, &status](void const* data, std::size_t size)
{
DecodedBlob decoded (key, data, size);
nudb::detail::buffer bf;
auto const result =
nodeobject_decompress(data, size, bf);
DecodedBlob decoded (key, result.first, result.second);
if (! decoded.wasOk ())
{
status = dataCorrupt;
@@ -138,10 +144,11 @@ public:
}
*pno = decoded.createObject();
status = ok;
}))
{
}, ec);
if(ec == nudb::error::key_not_found)
return notFound;
}
if(ec)
Throw<nudb::system_error>(ec);
return status;
}
@@ -163,8 +170,13 @@ public:
{
EncodedBlob e;
e.prepare (no);
db_.insert (e.getKey(),
e.getData(), e.getSize());
nudb::error_code ec;
nudb::detail::buffer bf;
auto const result = nodeobject_compress(
e.getData(), e.getSize(), bf);
db_.insert (e.getKey(), result.first, result.second, ec);
if(ec && ec != nudb::error::key_exists)
Throw<nudb::system_error>(ec);
}
void
@@ -204,20 +216,32 @@ public:
auto const kp = db_.key_path();
auto const lp = db_.log_path();
//auto const appnum = db_.appnum();
db_.close();
api::visit (dp,
nudb::error_code ec;
db_.close(ec);
if(ec)
Throw<nudb::system_error>(ec);
nudb::visit(dp,
[&](
void const* key, std::size_t key_bytes,
void const* data, std::size_t size)
void const* data, std::size_t size,
nudb::error_code&)
{
DecodedBlob decoded (key, data, size);
nudb::detail::buffer bf;
auto const result =
nodeobject_decompress(data, size, bf);
DecodedBlob decoded (key, result.first, result.second);
if (! decoded.wasOk ())
return false;
{
ec = make_error_code(nudb::error::missing_value);
return;
}
f (decoded.createObject());
return true;
});
db_.open (dp, kp, lp,
arena_alloc_size);
}, nudb::no_progress{}, ec);
if(ec)
Throw<nudb::system_error>(ec);
db_.open(dp, kp, lp, ec);
if(ec)
Throw<nudb::system_error>(ec);
}
int
@@ -238,10 +262,18 @@ public:
auto const dp = db_.dat_path();
auto const kp = db_.key_path();
auto const lp = db_.log_path();
db_.close();
api::verify (dp, kp);
db_.open (dp, kp, lp,
arena_alloc_size);
nudb::error_code ec;
db_.close(ec);
if(ec)
Throw<nudb::system_error>(ec);
nudb::verify_info vi;
nudb::verify<nudb::xxhasher>(
vi, dp, kp, 0, nudb::no_progress{}, ec);
if(ec)
Throw<nudb::system_error>(ec);
db_.open (dp, kp, lp, ec);
if(ec)
Throw<nudb::system_error>(ec);
}
/** Returns the number of file handles the backend expects to need */

View File

@@ -21,8 +21,7 @@
#define RIPPLE_NODESTORE_CODEC_H_INCLUDED
#include <ripple/basics/contract.h>
#include <ripple/beast/nudb/common.h>
#include <ripple/beast/nudb/detail/field.h>
#include <nudb/detail/field.hpp>
#include <ripple/nodestore/impl/varint.h>
#include <ripple/nodestore/NodeObject.h>
#include <ripple/protocol/HashPrefix.h>
@@ -35,8 +34,6 @@
namespace ripple {
namespace NodeStore {
namespace detail {
template <class BufferFactory>
std::pair<void const*, std::size_t>
snappy_compress (void const* in,
@@ -63,14 +60,14 @@ snappy_decompress (void const* in,
if (! snappy::GetUncompressedLength(
reinterpret_cast<char const*>(in),
in_size, &result.second))
Throw<beast::nudb::codec_error> (
Throw<std::runtime_error> (
"snappy decompress");
void* const out = bf(result.second);
result.first = out;
if (! snappy::RawUncompress(
reinterpret_cast<char const*>(in), in_size,
reinterpret_cast<char*>(out)))
Throw<beast::nudb::codec_error> (
Throw<std::runtime_error> (
"snappy decompress");
return result;
}
@@ -80,15 +77,15 @@ std::pair<void const*, std::size_t>
lz4_decompress (void const* in,
std::size_t in_size, BufferFactory&& bf)
{
using beast::nudb::codec_error;
using namespace beast::nudb::detail;
using std::runtime_error;
using namespace nudb::detail;
std::pair<void const*, std::size_t> result;
std::uint8_t const* p = reinterpret_cast<
std::uint8_t const*>(in);
auto const n = read_varint(
p, in_size, result.second);
if (n == 0)
Throw<codec_error> (
Throw<std::runtime_error> (
"lz4 decompress");
void* const out = bf(result.second);
result.first = out;
@@ -96,7 +93,7 @@ lz4_decompress (void const* in,
reinterpret_cast<char const*>(in) + n,
reinterpret_cast<char*>(out),
result.second) + n != in_size)
Throw<codec_error> (
Throw<std::runtime_error> (
"lz4 decompress");
return result;
}
@@ -106,8 +103,8 @@ std::pair<void const*, std::size_t>
lz4_compress (void const* in,
std::size_t in_size, BufferFactory&& bf)
{
using beast::nudb::codec_error;
using namespace beast::nudb::detail;
using std::runtime_error;
using namespace nudb::detail;
std::pair<void const*, std::size_t> result;
std::array<std::uint8_t, varint_traits<
std::size_t>::max> vi;
@@ -124,7 +121,7 @@ lz4_compress (void const* in,
reinterpret_cast<char*>(out + n),
in_size);
if (out_size == 0)
Throw<codec_error> (
Throw<std::runtime_error> (
"lz4 compress");
result.second = n + out_size;
return result;
@@ -146,8 +143,7 @@ std::pair<void const*, std::size_t>
nodeobject_decompress (void const* in,
std::size_t in_size, BufferFactory&& bf)
{
using beast::nudb::codec_error;
using namespace beast::nudb::detail;
using namespace nudb::detail;
std::uint8_t const* p = reinterpret_cast<
std::uint8_t const*>(in);
@@ -155,7 +151,7 @@ nodeobject_decompress (void const* in,
auto const vn = read_varint(
p, in_size, type);
if (vn == 0)
Throw<codec_error> (
Throw<std::runtime_error> (
"nodeobject decompress");
p += vn;
in_size -= vn;
@@ -180,7 +176,7 @@ nodeobject_decompress (void const* in,
auto const hs =
field<std::uint16_t>::size; // Mask
if (in_size < hs + 32)
Throw<codec_error> (
Throw<std::runtime_error> (
"nodeobject codec: short inner node");
istream is(p, in_size);
std::uint16_t mask;
@@ -193,9 +189,10 @@ nodeobject_decompress (void const* in,
write<std::uint32_t>(os, 0);
write<std::uint32_t>(os, 0);
write<std::uint8_t> (os, hotUNKNOWN);
write<std::uint32_t>(os, HashPrefix::innerNode);
write<std::uint32_t>(os,
static_cast<std::uint32_t>(HashPrefix::innerNode));
if (mask == 0)
Throw<codec_error> (
Throw<std::runtime_error> (
"nodeobject codec: empty inner node");
std::uint16_t bit = 0x8000;
for (int i = 16; i--; bit >>= 1)
@@ -203,7 +200,7 @@ nodeobject_decompress (void const* in,
if (mask & bit)
{
if (in_size < 32)
Throw<codec_error> (
Throw<std::runtime_error> (
"nodeobject codec: short inner node");
std::memcpy(os.data(32), is(32), 32);
in_size -= 32;
@@ -214,14 +211,14 @@ nodeobject_decompress (void const* in,
}
}
if (in_size > 0)
Throw<codec_error> (
Throw<std::runtime_error> (
"nodeobject codec: long inner node");
break;
}
case 3: // full v1 inner node
{
if (in_size != 16 * 32) // hashes
Throw<codec_error> (
Throw<std::runtime_error> (
"nodeobject codec: short full inner node");
istream is(p, in_size);
result.second = 525;
@@ -231,7 +228,8 @@ nodeobject_decompress (void const* in,
write<std::uint32_t>(os, 0);
write<std::uint32_t>(os, 0);
write<std::uint8_t> (os, hotUNKNOWN);
write<std::uint32_t>(os, HashPrefix::innerNode);
write<std::uint32_t>(os,
static_cast<std::uint32_t>(HashPrefix::innerNode));
write(os, is(512), 512);
break;
}
@@ -240,7 +238,7 @@ nodeobject_decompress (void const* in,
auto const hs =
field<std::uint16_t>::size; // Mask size
if (in_size < hs + 65)
Throw<codec_error> (
Throw<std::runtime_error> (
"nodeobject codec: short inner node");
istream is(p, in_size);
std::uint16_t mask;
@@ -256,9 +254,10 @@ nodeobject_decompress (void const* in,
write<std::uint32_t>(os, 0);
write<std::uint32_t>(os, 0);
write<std::uint8_t> (os, hotUNKNOWN);
write<std::uint32_t>(os, HashPrefix::innerNodeV2);
write<std::uint32_t>(os,
static_cast<std::uint32_t>(HashPrefix::innerNodeV2));
if (mask == 0)
Throw<codec_error> (
Throw<std::runtime_error> (
"nodeobject codec: empty inner node");
std::uint16_t bit = 0x8000;
for (int i = 16; i--; bit >>= 1)
@@ -266,7 +265,7 @@ nodeobject_decompress (void const* in,
if (mask & bit)
{
if (in_size < 32)
Throw<codec_error> (
Throw<std::runtime_error> (
"nodeobject codec: short inner node");
std::memcpy(os.data(32), is(32), 32);
in_size -= 32;
@@ -278,12 +277,12 @@ nodeobject_decompress (void const* in,
}
write<std::uint8_t>(os, depth);
if (in_size < (depth+1)/2)
Throw<codec_error> (
Throw<std::runtime_error> (
"nodeobject codec: short inner node");
std::memcpy(os.data((depth+1)/2), is((depth+1)/2), (depth+1)/2);
in_size -= (depth+1)/2;
if (in_size > 0)
Throw<codec_error> (
Throw<std::runtime_error> (
"nodeobject codec: long inner node");
break;
}
@@ -295,7 +294,7 @@ nodeobject_decompress (void const* in,
in_size -= 1;
result.second = 525 + 1 + (depth+1)/2;
if (in_size != 16 * 32 + (depth+1)/2) // hashes and common
Throw<codec_error> (
Throw<std::runtime_error> (
"nodeobject codec: short full inner node");
void* const out = bf(result.second);
result.first = out;
@@ -303,14 +302,15 @@ nodeobject_decompress (void const* in,
write<std::uint32_t>(os, 0);
write<std::uint32_t>(os, 0);
write<std::uint8_t> (os, hotUNKNOWN);
write<std::uint32_t>(os, HashPrefix::innerNodeV2);
write<std::uint32_t>(os,
static_cast<std::uint32_t>(HashPrefix::innerNodeV2));
write(os, is(512), 512);
write<std::uint8_t>(os, depth);
write(os, is((depth+1)/2), (depth+1)/2);
break;
}
default:
Throw<codec_error> (
Throw<std::runtime_error> (
"nodeobject codec: bad type=" +
std::to_string(type));
};
@@ -336,8 +336,8 @@ std::pair<void const*, std::size_t>
nodeobject_compress (void const* in,
std::size_t in_size, BufferFactory&& bf)
{
using beast::nudb::codec_error;
using namespace beast::nudb::detail;
using std::runtime_error;
using namespace nudb::detail;
std::size_t type = 1;
// Check for inner node v1
@@ -529,8 +529,6 @@ nodeobject_compress (void const* in,
return result;
}
} // detail
// Modifies an inner node to erase the ledger
// sequence and type information so the codec
// verification can pass.
@@ -539,8 +537,7 @@ template <class = void>
void
filter_inner (void* in, std::size_t in_size)
{
using beast::nudb::codec_error;
using namespace beast::nudb::detail;
using namespace nudb::detail;
// Check for inner node
if (in_size == 525)
@@ -564,106 +561,6 @@ filter_inner (void* in, std::size_t in_size)
}
}
//------------------------------------------------------------------------------
class snappy_codec
{
public:
template <class... Args>
explicit
snappy_codec(Args&&... args)
{
}
char const*
name() const
{
return "snappy";
}
template <class BufferFactory>
std::pair<void const*, std::size_t>
compress (void const* in,
std::size_t in_size, BufferFactory&& bf) const
{
return snappy_compress(in, in_size, bf);
}
template <class BufferFactory>
std::pair<void const*, std::size_t>
decompress (void const* in,
std::size_t in_size, BufferFactory&& bf) const
{
return snappy_decompress(in, in_size, bf);
}
};
class lz4_codec
{
public:
template <class... Args>
explicit
lz4_codec(Args&&... args)
{
}
char const*
name() const
{
return "lz4";
}
template <class BufferFactory>
std::pair<void const*, std::size_t>
decompress (void const* in,
std::size_t in_size, BufferFactory&& bf) const
{
return lz4_compress(in, in_size, bf);
}
template <class BufferFactory>
std::pair<void const*, std::size_t>
compress (void const* in,
std::size_t in_size, BufferFactory&& bf) const
{
return lz4_compress(in, in_size, bf);
}
};
class nodeobject_codec
{
public:
template <class... Args>
explicit
nodeobject_codec(Args&&... args)
{
}
char const*
name() const
{
return "nodeobject";
}
template <class BufferFactory>
std::pair<void const*, std::size_t>
decompress (void const* in,
std::size_t in_size, BufferFactory&& bf) const
{
return detail::nodeobject_decompress(
in, in_size, bf);
}
template <class BufferFactory>
std::pair<void const*, std::size_t>
compress (void const* in,
std::size_t in_size, BufferFactory&& bf) const
{
return detail::nodeobject_compress(
in, in_size, bf);
}
};
} // NodeStore
} // ripple

View File

@@ -20,13 +20,12 @@
#ifndef BEAST_NUDB_VARINT_H_INCLUDED
#define BEAST_NUDB_VARINT_H_INCLUDED
#include <ripple/beast/nudb/detail/stream.h>
#include <nudb/detail/stream.hpp>
#include <cstdint>
#include <type_traits>
namespace ripple {
namespace NodeStore {
namespace detail {
// This is a variant of the base128 varint format from
// google protocol buffers:
@@ -127,7 +126,7 @@ write_varint (void* p0, std::size_t v)
template <class T, std::enable_if_t<
std::is_same<T, varint>::value>* = nullptr>
void
read (beast::nudb::detail::istream& is, std::size_t& u)
read (nudb::detail::istream& is, std::size_t& u)
{
auto p0 = is(1);
auto p1 = p0;
@@ -141,13 +140,12 @@ read (beast::nudb::detail::istream& is, std::size_t& u)
template <class T, std::enable_if_t<
std::is_same<T, varint>::value>* = nullptr>
void
write (beast::nudb::detail::ostream& os, std::size_t t)
write (nudb::detail::ostream& os, std::size_t t)
{
write_varint(os.data(
size_varint(t)), t);
}
} // detail
} // NodeStore
} // ripple

View File

@@ -1,111 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/beast/nudb/test/common.h>
#include <ripple/beast/utility/temp_dir.h>
#include <ripple/beast/xor_shift_engine.h>
#include <ripple/beast/unit_test.h>
#include <cmath>
#include <cstring>
#include <memory>
#include <random>
#include <utility>
namespace beast {
namespace nudb {
namespace test {
// This test is designed for callgrind runs to find hotspots
class callgrind_test : public unit_test::suite
{
public:
// Creates and opens a database, performs a bunch
// of inserts, then alternates fetching all the keys
// with keys not present.
void
do_test (std::size_t count,
path_type const& path)
{
auto const dp = path + ".dat";
auto const kp = path + ".key";
auto const lp = path + ".log";
test_api::create (dp, kp, lp,
appnum,
salt,
sizeof(nudb::test::key_type),
nudb::block_size(path),
0.50);
test_api::store db;
if (! expect (db.open(dp, kp, lp,
arena_alloc_size), "open"))
return;
expect (db.appnum() == appnum, "appnum");
Sequence seq;
for (std::size_t i = 0; i < count; ++i)
{
auto const v = seq[i];
expect (db.insert(&v.key, v.data, v.size),
"insert");
}
Storage s;
for (std::size_t i = 0; i < count * 2; ++i)
{
if (! (i%2))
{
auto const v = seq[i/2];
expect (db.fetch (&v.key, s), "fetch");
expect (s.size() == v.size, "size");
expect (std::memcmp(s.get(),
v.data, v.size) == 0, "data");
}
else
{
auto const v = seq[count + i/2];
expect (! db.fetch (&v.key, s),
"fetch missing");
}
}
db.close();
nudb::native_file::erase (dp);
nudb::native_file::erase (kp);
nudb::native_file::erase (lp);
}
void
run() override
{
enum
{
// higher numbers, more pain
N = 100000
};
testcase (beast::unit_test::abort_on_fail);
beast::temp_dir tempDir;
do_test (N, tempDir.path());
}
};
BEAST_DEFINE_TESTSUITE_MANUAL(callgrind,nudb,beast);
} // test
} // nudb
} // beast

View File

@@ -1,181 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/beast/nudb/test/common.h>
#include <ripple/beast/xor_shift_engine.h>
#include <ripple/beast/unit_test.h>
#include <ripple/beast/utility/temp_dir.h>
#include <cmath>
#include <cstring>
#include <memory>
#include <random>
#include <utility>
namespace beast {
namespace nudb {
namespace test {
class basic_recover_test : public unit_test::suite
{
public:
// Creates and opens a database, performs a bunch
// of inserts, then fetches all of them to make sure
// they are there. Uses a fail_file that causes the n-th
// I/O to fail, causing an exception.
void
do_work (std::size_t count, float load_factor,
nudb::path_type const& path, fail_counter& c)
{
auto const dp = path + ".dat";
auto const kp = path + ".key";
auto const lp = path + ".log";
test_api::file_type::erase (dp);
test_api::file_type::erase (kp);
test_api::file_type::erase (lp);
expect(test_api::create (
dp, kp, lp, appnum, salt, sizeof(key_type),
block_size(path), load_factor), "create");
test_api::fail_store db;
if (! expect(db.open(dp, kp, lp,
arena_alloc_size, c), "open"))
{
// VFALCO open should never fail here, we need
// to report this and terminate the test.
}
expect (db.appnum() == appnum, "appnum");
Sequence seq;
for (std::size_t i = 0; i < count; ++i)
{
auto const v = seq[i];
expect(db.insert(&v.key, v.data, v.size),
"insert");
}
Storage s;
for (std::size_t i = 0; i < count; ++i)
{
auto const v = seq[i];
if (! expect(db.fetch (&v.key, s),
"fetch"))
break;
if (! expect(s.size() == v.size, "size"))
break;
if (! expect(std::memcmp(s.get(),
v.data, v.size) == 0, "data"))
break;
}
db.close();
verify_info info;
try
{
info = test_api::verify(dp, kp);
}
catch(...)
{
print(log, info);
throw;
}
test_api::file_type::erase (dp);
test_api::file_type::erase (kp);
test_api::file_type::erase (lp);
}
void
do_recover (path_type const& path,
fail_counter& c)
{
auto const dp = path + ".dat";
auto const kp = path + ".key";
auto const lp = path + ".log";
recover<test_api::hash_type,
test_api::codec_type, fail_file<
test_api::file_type>>(dp, kp, lp,
test_api::buffer_size, c);
test_api::verify(dp, kp);
test_api::file_type::erase (dp);
test_api::file_type::erase (kp);
test_api::file_type::erase (lp);
}
void
test_recover (float load_factor, std::size_t count)
{
testcase << count << " inserts";
beast::temp_dir tempDir;
auto const path = tempDir.path();
for (std::size_t n = 1;;++n)
{
try
{
fail_counter c(n);
do_work (count, load_factor, path, c);
break;
}
catch (nudb::fail_error const&)
{
}
for (std::size_t m = 1;;++m)
{
fail_counter c(m);
try
{
do_recover (path, c);
break;
}
catch (nudb::fail_error const&)
{
}
}
}
}
};
class recover_test : public basic_recover_test
{
public:
void
run() override
{
float lf = 0.55f;
test_recover (lf, 0);
test_recover (lf, 10);
test_recover (lf, 100);
}
};
BEAST_DEFINE_TESTSUITE(recover,nudb,beast);
class recover_big_test : public basic_recover_test
{
public:
void
run() override
{
float lf = 0.90f;
test_recover (lf, 1000);
test_recover (lf, 10000);
test_recover (lf, 100000);
}
};
} // test
} // nudb
} // beast

View File

@@ -1,143 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/beast/nudb/test/common.h>
#include <ripple/beast/utility/temp_dir.h>
#include <ripple/beast/xor_shift_engine.h>
#include <ripple/beast/unit_test.h>
#include <cmath>
#include <iomanip>
#include <memory>
#include <random>
#include <utility>
namespace beast {
namespace nudb {
namespace test {
// Basic, single threaded test that verifies the
// correct operation of the store. Load factor is
// set high to ensure that spill records are created,
// exercised, and split.
//
class store_test : public unit_test::suite
{
public:
void
do_test (std::size_t N,
std::size_t block_size, float load_factor)
{
testcase (beast::unit_test::abort_on_fail);
beast::temp_dir tempDir;
auto const dp = tempDir.file ("nudb.dat");
auto const kp = tempDir.file ("nudb.key");
auto const lp = tempDir.file ("nudb.log");
Sequence seq;
test_api::store db;
try
{
expect (test_api::create (dp, kp, lp, appnum,
salt, sizeof(key_type), block_size,
load_factor), "create");
expect (db.open(dp, kp, lp,
arena_alloc_size), "open");
Storage s;
// insert
for (std::size_t i = 0; i < N; ++i)
{
auto const v = seq[i];
expect (db.insert(
&v.key, v.data, v.size), "insert 1");
}
// fetch
for (std::size_t i = 0; i < N; ++i)
{
auto const v = seq[i];
bool const found = db.fetch (&v.key, s);
expect (found, "not found");
expect (s.size() == v.size, "wrong size");
expect (std::memcmp(s.get(),
v.data, v.size) == 0, "not equal");
}
// insert duplicates
for (std::size_t i = 0; i < N; ++i)
{
auto const v = seq[i];
expect (! db.insert(&v.key,
v.data, v.size), "insert duplicate");
}
// insert/fetch
for (std::size_t i = 0; i < N; ++i)
{
auto v = seq[i];
bool const found = db.fetch (&v.key, s);
expect (found, "missing");
expect (s.size() == v.size, "wrong size");
expect (memcmp(s.get(),
v.data, v.size) == 0, "wrong data");
v = seq[i + N];
expect (db.insert(&v.key, v.data, v.size),
"insert 2");
}
db.close();
//auto const stats = test_api::verify(dp, kp);
auto const stats = verify<test_api::hash_type>(
dp, kp, 1 * 1024 * 1024);
expect (stats.hist[1] > 0, "no splits");
print (log, stats);
}
catch (nudb::store_error const& e)
{
fail (e.what());
}
catch (std::exception const& e)
{
fail (e.what());
}
expect (test_api::file_type::erase(dp));
expect (test_api::file_type::erase(kp));
expect (! test_api::file_type::erase(lp));
}
void
run() override
{
enum
{
#ifndef NDEBUG
N = 5000 // debug
#else
N = 50000
#endif
,block_size = 256
};
float const load_factor = 0.95f;
do_test (N, block_size, load_factor);
}
};
BEAST_DEFINE_TESTSUITE(store,nudb,beast);
} // test
} // nudb
} // beast

View File

@@ -1,286 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2014, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/beast/nudb/verify.h>
#include <ripple/beast/nudb/test/common.h>
#include <ripple/beast/unit_test.h>
#include <ripple/beast/clock/basic_seconds_clock.h>
#include <chrono>
#include <iomanip>
#include <ostream>
namespace beast {
namespace nudb {
namespace test {
namespace detail {
class save_stream_state
{
std::ostream& os_;
std::streamsize precision_;
std::ios::fmtflags flags_;
std::ios::char_type fill_;
public:
~save_stream_state()
{
os_.precision(precision_);
os_.flags(flags_);
os_.fill(fill_);
}
save_stream_state(save_stream_state const&) = delete;
save_stream_state& operator=(save_stream_state const&) = delete;
explicit save_stream_state(std::ostream& os)
: os_(os)
, precision_(os.precision())
, flags_(os.flags())
, fill_(os.fill())
{
}
};
template <class Rep, class Period>
std::ostream&
pretty_time(std::ostream& os, std::chrono::duration<Rep, Period> d)
{
save_stream_state _(os);
using namespace std::chrono;
if (d < microseconds{1})
{
// use nanoseconds
if (d < nanoseconds{100})
{
// use floating
using ns = duration<float, std::nano>;
os << std::fixed << std::setprecision(1) << ns(d).count();
}
else
{
// use integral
os << std::chrono::duration_cast<nanoseconds>(d).count();
}
os << "ns";
}
else if (d < milliseconds{1})
{
// use microseconds
if (d < microseconds{100})
{
// use floating
using ms = duration<float, std::micro>;
os << std::fixed << std::setprecision(1) << ms(d).count();
}
else
{
// use integral
os << std::chrono::duration_cast<microseconds>(d).count();
}
os << "us";
}
else if (d < seconds{1})
{
// use milliseconds
if (d < milliseconds{100})
{
// use floating
using ms = duration<float, std::milli>;
os << std::fixed << std::setprecision(1) << ms(d).count();
}
else
{
// use integral
os << std::chrono::duration_cast<milliseconds>(d).count();
}
os << "ms";
}
else if (d < minutes{1})
{
// use seconds
if (d < seconds{100})
{
// use floating
using s = duration<float>;
os << std::fixed << std::setprecision(1) << s(d).count();
}
else
{
// use integral
os << std::chrono::duration_cast<seconds>(d).count();
}
os << "s";
}
else
{
// use minutes
if (d < minutes{100})
{
// use floating
using m = duration<float, std::ratio<60>>;
os << std::fixed << std::setprecision(1) << m(d).count();
}
else
{
// use integral
os << std::chrono::duration_cast<minutes>(d).count();
}
os << "min";
}
return os;
}
template <class Period, class Rep>
inline
std::string
fmtdur(std::chrono::duration<Period, Rep> const& d)
{
std::stringstream ss;
pretty_time(ss, d);
return ss.str();
}
} // detail
//------------------------------------------------------------------------------
template <class Log>
class progress
{
private:
using clock_type =
beast::basic_seconds_clock<
std::chrono::steady_clock>;
Log& log_;
clock_type::time_point start_ = clock_type::now();
clock_type::time_point now_ = clock_type::now();
clock_type::time_point report_ = clock_type::now();
std::size_t prev_ = 0;
bool estimate_ = false;
public:
explicit
progress(Log& log)
: log_(log)
{
}
void
operator()(std::size_t w, std::size_t w1)
{
using namespace std::chrono;
auto const now = clock_type::now();
if (now == now_)
return;
now_ = now;
auto const elapsed = now - start_;
if (! estimate_)
{
if (elapsed < seconds(15))
return;
estimate_ = true;
}
else if (now - report_ <
std::chrono::seconds(60))
{
return;
}
auto const rate =
elapsed.count() / double(w);
clock_type::duration const remain(
static_cast<clock_type::duration::rep>(
(w1 - w) * rate));
log_ <<
"Remaining: " << detail::fmtdur(remain) <<
" (" << w << " of " << w1 <<
" in " << detail::fmtdur(elapsed) <<
", " << (w - prev_) <<
" in " << detail::fmtdur(now - report_) <<
")";
report_ = now;
prev_ = w;
}
void
finish()
{
log_ <<
"Total time: " << detail::fmtdur(
clock_type::now() - start_);
}
};
//------------------------------------------------------------------------------
class verify_test : public unit_test::suite
{
public:
// Runs verify on the database and reports statistics
void
do_verify (nudb::path_type const& path)
{
auto const dp = path + ".dat";
auto const kp = path + ".key";
print(log, test_api::verify(dp, kp));
}
void
run() override
{
testcase(beast::unit_test::abort_on_fail) << "verify " << arg();
if (arg().empty())
return fail("missing unit test argument");
do_verify(arg());
pass();
}
};
class verify_fast_test : public unit_test::suite
{
public:
// Runs verify on the database and reports statistics
void
do_verify (nudb::path_type const& path)
{
auto const dp = path + ".dat";
auto const kp = path + ".key";
progress<decltype(log)> p(log);
// VFALCO HACK 32gb hardcoded!
auto const info = verify_fast<
test_api::hash_type>(
dp, kp, 34359738368, p);
print(log, info);
}
void
run() override
{
testcase(beast::unit_test::abort_on_fail) << "verify_fast " << arg();
if (arg().empty())
return fail("missing unit test argument");
do_verify(arg());
pass();
}
};
BEAST_DEFINE_TESTSUITE_MANUAL(verify,nudb,beast);
BEAST_DEFINE_TESTSUITE_MANUAL(verify_fast,nudb,beast);
} // test
} // nudb
} // beast

View File

@@ -18,15 +18,15 @@
//==============================================================================
#include <BeastConfig.h>
#include <ripple/beast/hash/xxhasher.h>
#include <ripple/basics/contract.h>
#include <ripple/nodestore/impl/codec.h>
#include <ripple/beast/clock/basic_seconds_clock.h>
#include <ripple/beast/rfc2616.h>
#include <ripple/beast/core/LexicalCast.h>
#include <ripple/beast/nudb/create.h>
#include <ripple/beast/nudb/detail/format.h>
#include <ripple/beast/unit_test.h>
#include <nudb/create.hpp>
#include <nudb/detail/format.hpp>
#include <nudb/xxhasher.hpp>
#include <beast/core/detail/ci_char_traits.hpp>
#include <boost/regex.hpp>
#include <algorithm>
@@ -304,8 +304,8 @@ public:
{
testcase(beast::unit_test::abort_on_fail) << arg();
using namespace beast::nudb;
using namespace beast::nudb::detail;
using namespace nudb;
using namespace nudb::detail;
pass();
auto const args = parse_args(arg());
@@ -354,8 +354,7 @@ public:
auto const from_path = args.at("from");
auto const to_path = args.at("to");
using hash_type = beast::xxhasher;
using codec_type = nodeobject_codec;
using hash_type = nudb::xxhasher;
auto const bulk_size = 64 * 1024 * 1024;
float const load_factor = 0.5;
@@ -395,12 +394,17 @@ public:
dh.key_size = 32;
native_file df;
df.create(file_mode::append, dp);
error_code ec;
df.create(file_mode::append, dp, ec);
if (ec)
Throw<nudb::system_error>(ec);
bulk_writer<native_file> dw(
df, 0, bulk_size);
{
{
auto os = dw.prepare(dat_file_header::size);
auto os = dw.prepare(dat_file_header::size, ec);
if (ec)
Throw<nudb::system_error>(ec);
write(os, dh);
}
rocksdb::ReadOptions options;
@@ -410,7 +414,6 @@ public:
db->NewIterator(options));
buffer buf;
codec_type codec;
for (it->SeekToFirst (); it->Valid (); it->Next())
{
if (it->key().size() != 32)
@@ -424,12 +427,12 @@ public:
new char[size]);
std::memcpy(clean.get(), data, size);
filter_inner(clean.get(), size);
auto const out = codec.compress(
auto const out = nodeobject_compress(
clean.get(), size, buf);
// Verify codec correctness
{
buffer buf2;
auto const check = codec.decompress(
auto const check = nodeobject_decompress(
out.first, out.second, buf2);
BEAST_EXPECT(check.second == size);
BEAST_EXPECT(std::memcmp(
@@ -439,7 +442,9 @@ public:
auto os = dw.prepare(
field<uint48_t>::size + // Size
32 + // Key
out.second);
out.second, ec);
if (ec)
Throw<nudb::system_error>(ec);
write<uint48_t>(os, out.second);
std::memcpy(os.data(32), key, 32);
std::memcpy(os.data(out.second),
@@ -447,14 +452,17 @@ public:
++nitems;
nbytes += size;
}
dw.flush();
dw.flush(ec);
if (ec)
Throw<nudb::system_error>(ec);
}
db.reset();
log <<
"Import data: " << detail::fmtdur(
std::chrono::steady_clock::now() - start);
auto const df_size =
df.actual_size();
auto const df_size = df.size(ec);
if (ec)
Throw<nudb::system_error>(ec);
// Create key file
key_file_header kh;
kh.version = currentVersion;
@@ -470,13 +478,17 @@ public:
kh.block_size) * load_factor));
kh.modulus = ceil_pow2(kh.buckets);
native_file kf;
kf.create(file_mode::append, kp);
kf.create(file_mode::append, kp, ec);
if (ec)
Throw<nudb::system_error>(ec);
buffer buf(kh.block_size);
{
std::memset(buf.get(), 0, kh.block_size);
ostream os(buf.get(), kh.block_size);
write(os, kh);
kf.write(0, buf.get(), kh.block_size);
kf.write(0, buf.get(), kh.block_size, ec);
if (ec)
Throw<nudb::system_error>(ec);
}
// Build contiguous sequential sections of the
// key file using multiple passes over the data.
@@ -518,14 +530,18 @@ public:
// Data Record or Spill Record
std::size_t size;
auto is = r.prepare(
field<uint48_t>::size); // Size
field<uint48_t>::size, ec); // Size
if (ec)
Throw<nudb::system_error>(ec);
read<uint48_t>(is, size);
if (size > 0)
{
// Data Record
is = r.prepare(
dh.key_size + // Key
size); // Data
size, ec); // Data
if (ec)
Throw<nudb::system_error>(ec);
std::uint8_t const* const key =
is.data(dh.key_size);
auto const h = hash<hash_type>(
@@ -538,7 +554,9 @@ public:
continue;
bucket b(kh.block_size, buf.get() +
(n - b0) * kh.block_size);
maybe_spill(b, dw);
maybe_spill(b, dw, ec);
if (ec)
Throw<nudb::system_error>(ec);
b.insert(offset, size, h);
}
else
@@ -546,16 +564,24 @@ public:
// VFALCO Should never get here
// Spill Record
is = r.prepare(
field<std::uint16_t>::size);
field<std::uint16_t>::size, ec);
if (ec)
Throw<nudb::system_error>(ec);
read<std::uint16_t>(is, size); // Size
r.prepare(size); // skip
r.prepare(size, ec); // skip
if (ec)
Throw<nudb::system_error>(ec);
}
}
kf.write((b0 + 1) * kh.block_size,
buf.get(), bn * kh.block_size);
buf.get(), bn * kh.block_size, ec);
if (ec)
Throw<nudb::system_error>(ec);
++npass;
}
dw.flush();
dw.flush(ec);
if (ec)
Throw<nudb::system_error>(ec);
p.finish(log);
}
};
@@ -566,187 +592,6 @@ BEAST_DEFINE_TESTSUITE(import,NodeStore,ripple);
//------------------------------------------------------------------------------
class rekey_test : public beast::unit_test::suite
{
public:
void
run() override
{
testcase(beast::unit_test::abort_on_fail) << arg();
} // NodeStore
} // ripple
using namespace beast::nudb;
using namespace beast::nudb::detail;
pass();
auto const args = parse_args(arg());
bool usage = args.empty();
if (! usage &&
args.find("path") == args.end())
{
log <<
"Missing parameter: path";
usage = true;
}
if (! usage &&
args.find("items") == args.end())
{
log <<
"Missing parameter: items";
usage = true;
}
if (! usage &&
args.find("buffer") == args.end())
{
log <<
"Missing parameter: buffer";
usage = true;
}
if (usage)
{
log <<
"Usage:\n" <<
"--unittest-arg=path=<path>,items=<items>,buffer=<buffer>\n" <<
"path: NuDB path to rekey (without the .dat)\n" <<
"items: Number of items in the .dat file\n" <<
"buffer: Buffer size (bigger is faster)\n" <<
"NuDB key file must not already exist.";
return;
}
std::size_t const buffer_size =
std::stoull(args.at("buffer"));
auto const path = args.at("path");
std::size_t const items =
std::stoull(args.at("items"));
using hash_type = beast::xxhasher;
auto const bulk_size = 64 * 1024 * 1024;
float const load_factor = 0.5;
auto const dp = path + ".dat";
auto const kp = path + ".key";
log <<
"path: " << path << "\n"
"items: " << items << "\n"
"buffer: " << buffer_size;
// Create data file with values
native_file df;
df.open(file_mode::append, dp);
dat_file_header dh;
read(df, dh);
auto const df_size = df.actual_size();
bulk_writer<native_file> dw(
df, df_size, bulk_size);
// Create key file
key_file_header kh;
kh.version = currentVersion;
kh.uid = dh.uid;
kh.appnum = dh.appnum;
kh.key_size = 32;
kh.salt = make_salt();
kh.pepper = pepper<hash_type>(kh.salt);
kh.block_size = block_size(kp);
kh.load_factor = std::min<std::size_t>(
65536.0 * load_factor, 65535);
kh.buckets = std::ceil(items / (bucket_capacity(
kh.block_size) * load_factor));
kh.modulus = ceil_pow2(kh.buckets);
native_file kf;
kf.create(file_mode::append, kp);
buffer buf(kh.block_size);
{
std::memset(buf.get(), 0, kh.block_size);
ostream os(buf.get(), kh.block_size);
write(os, kh);
kf.write(0, buf.get(), kh.block_size);
}
// Build contiguous sequential sections of the
// key file using multiple passes over the data.
//
auto const buckets = std::max<std::size_t>(1,
buffer_size / kh.block_size);
buf.reserve(buckets * kh.block_size);
auto const passes =
(kh.buckets + buckets - 1) / buckets;
log <<
"buckets: " << kh.buckets << "\n"
"data: " << df_size << "\n"
"passes: " << passes;
progress p(df_size * passes);
std::size_t npass = 0;
for (std::size_t b0 = 0; b0 < kh.buckets;
b0 += buckets)
{
auto const b1 = std::min(
b0 + buckets, kh.buckets);
// Buffered range is [b0, b1)
auto const bn = b1 - b0;
// Create empty buckets
for (std::size_t i = 0; i < bn; ++i)
{
bucket b(kh.block_size,
buf.get() + i * kh.block_size,
empty);
}
// Insert all keys into buckets
// Iterate Data File
bulk_reader<native_file> r(
df, dat_file_header::size,
df_size, bulk_size);
while (! r.eof())
{
auto const offset = r.offset();
// Data Record or Spill Record
std::size_t size;
auto is = r.prepare(
field<uint48_t>::size); // Size
read<uint48_t>(is, size);
if (size > 0)
{
// Data Record
is = r.prepare(
dh.key_size + // Key
size); // Data
std::uint8_t const* const key =
is.data(dh.key_size);
auto const h = hash<hash_type>(
key, dh.key_size, kh.salt);
auto const n = bucket_index(
h, kh.buckets, kh.modulus);
p(log,
npass * df_size + r.offset());
if (n < b0 || n >= b1)
continue;
bucket b(kh.block_size, buf.get() +
(n - b0) * kh.block_size);
maybe_spill(b, dw);
b.insert(offset, size, h);
}
else
{
// VFALCO Should never get here
// Spill Record
is = r.prepare(
field<std::uint16_t>::size);
read<std::uint16_t>(is, size); // Size
r.prepare(size); // skip
}
}
kf.write((b0 + 1) * kh.block_size,
buf.get(), bn * kh.block_size);
++npass;
}
dw.flush();
p.finish(log);
}
};
BEAST_DEFINE_TESTSUITE(rekey,NodeStore,ripple);
}
}

View File

@@ -36,15 +36,15 @@ public:
for (auto const v : vv)
{
std::array<std::uint8_t,
detail::varint_traits<
varint_traits<
std::size_t>::max> vi;
auto const n0 =
detail::write_varint(
write_varint(
vi.data(), v);
expect (n0 > 0, "write error");
std::size_t v1;
auto const n1 =
detail::read_varint(
read_varint(
vi.data(), n0, v1);
expect(n1 == n0, "read error");
expect(v == v1, "wrong value");

View File

@@ -24,10 +24,6 @@
#include <test/beast/beast_basic_seconds_clock_test.cpp>
#include <test/beast/beast_Debug_test.cpp>
#include <test/beast/beast_Journal_test.cpp>
#include <test/beast/beast_nudb_callgrind_test.cpp>
#include <test/beast/beast_nudb_recover_test.cpp>
#include <test/beast/beast_nudb_store_test.cpp>
#include <test/beast/beast_nudb_verify_test.cpp>
#include <test/beast/beast_PropertyStream_test.cpp>
#include <test/beast/beast_tagged_integer_test.cpp>
#include <test/beast/beast_weak_fn_test.cpp>