mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-21 19:45:53 +00:00
Merge branch 'develop' of github.com:ripple/rippled into develop
This commit is contained in:
4
.gitignore
vendored
4
.gitignore
vendored
@@ -63,3 +63,7 @@ DerivedData
|
||||
|
||||
# Intel Parallel Studio 2013 XE
|
||||
My Amplifier XE Results - RippleD
|
||||
|
||||
# KeyvaDB files
|
||||
*.key
|
||||
*.val
|
||||
|
||||
@@ -120,6 +120,19 @@
|
||||
//#define BEAST_BIND_USES_TR1 1
|
||||
//#define BEAST_BIND_USES_BOOST 1
|
||||
|
||||
//#define BEAST_UNIT_TESTS 1
|
||||
//------------------------------------------------------------------------------
|
||||
//
|
||||
// Ripple compilation settings
|
||||
//
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** Config: RIPPLE_VERIFY_NODEOBJECT_KEYS
|
||||
|
||||
This verifies that the hash of node objects matches the payload.
|
||||
It is quite expensive so normally this is turned off!
|
||||
*/
|
||||
#ifndef RIPPLE_VERIFY_NODEOBJECT_KEYS
|
||||
//#define RIPPLE_VERIFY_NODEOBJECT_KEYS 1
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -63,6 +63,7 @@ SOURCES += \
|
||||
../../Subtrees/beast/modules/beast_basics/beast_basics.cpp \
|
||||
../../Subtrees/beast/modules/beast_core/beast_core.cpp \
|
||||
../../Subtrees/beast/modules/beast_crypto/beast_crypto.cpp \
|
||||
../../Subtrees/beast/modules/beast_db/beast_db.cpp \
|
||||
../../modules/ripple_app/ripple_app_pt1.cpp \
|
||||
../../modules/ripple_app/ripple_app_pt2.cpp \
|
||||
../../modules/ripple_app/ripple_app_pt3.cpp \
|
||||
|
||||
@@ -157,6 +157,24 @@
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_KeyvaDB.cpp">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_KeyvaDBBackendFactory.cpp">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_MdbBackendFactory.cpp">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_NodeObject.cpp">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
@@ -802,12 +820,6 @@
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\cpp\ripple\ripple_ProofOfWorkFactoryUnitTests.cpp">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\cpp\ripple\ripple_RippleCalc.cpp">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
@@ -1027,6 +1039,7 @@
|
||||
<ClCompile Include="..\..\Subtrees\beast\modules\beast_basics\beast_basics.cpp" />
|
||||
<ClCompile Include="..\..\Subtrees\beast\modules\beast_core\beast_core.cpp" />
|
||||
<ClCompile Include="..\..\Subtrees\beast\modules\beast_crypto\beast_crypto.cpp" />
|
||||
<ClCompile Include="..\..\Subtrees\beast\modules\beast_db\beast_db.cpp" />
|
||||
<ClCompile Include="..\..\Subtrees\leveldb\db\builder.cc">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
@@ -1402,6 +1415,9 @@
|
||||
<ClInclude Include="..\..\modules\ripple_app\ledger\ripple_LedgerHistory.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_app\ledger\SerializedValidation.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_HyperLevelDBBackendFactory.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_KeyvaDB.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_KeyvaDBBackendFactory.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_MdbBackendFactory.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NodeObject.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NodeStore.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_LevelDBBackendFactory.h" />
|
||||
@@ -1441,6 +1457,7 @@
|
||||
<ClInclude Include="..\..\modules\ripple_basio\ripple_basio_impl.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_client\ripple_client.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_core\functional\ripple_Config.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_core\functional\ripple_ConfigSections.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_core\functional\ripple_ILoadFeeTrack.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_core\functional\ripple_Job.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_core\functional\ripple_JobQueue.h" />
|
||||
@@ -1732,7 +1749,7 @@
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>_CRTDBG_MAP_ALLOC;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
|
||||
@@ -594,9 +594,6 @@
|
||||
<ClCompile Include="..\..\src\cpp\ripple\ripple_ProofOfWorkFactory.cpp">
|
||||
<Filter>[1] Ripple\ripple_app\_misc</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\cpp\ripple\ripple_ProofOfWorkFactoryUnitTests.cpp">
|
||||
<Filter>[1] Ripple\ripple_app\_misc</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\cpp\ripple\ripple_SerializedLedger.cpp">
|
||||
<Filter>[1] Ripple\ripple_app\_misc</Filter>
|
||||
</ClCompile>
|
||||
@@ -807,9 +804,6 @@
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_LevelDBBackendFactory.cpp">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_NullBackendFactory.cpp">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_mdb\ripple_mdb.c">
|
||||
<Filter>[1] Ripple\ripple_mdb</Filter>
|
||||
</ClCompile>
|
||||
@@ -897,6 +891,21 @@
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_HyperLevelDBBackendFactory.cpp">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_KeyvaDB.cpp">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_KeyvaDBBackendFactory.cpp">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_NullBackendFactory.cpp">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_MdbBackendFactory.cpp">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\Subtrees\beast\modules\beast_db\beast_db.cpp">
|
||||
<Filter>[0] Subtrees\beast</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="..\..\Subtrees\sqlite\sqlite3.h">
|
||||
@@ -963,9 +972,6 @@
|
||||
<ClInclude Include="..\..\modules\ripple_basics\utility\ripple_RandomNumbers.h">
|
||||
<Filter>[1] Ripple\ripple_basics\utility</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\ripple_basics\utility\ripple_ScopedLock.h">
|
||||
<Filter>[1] Ripple\ripple_basics\utility</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\ripple_basics\utility\ripple_StringUtilities.h">
|
||||
<Filter>[1] Ripple\ripple_basics\utility</Filter>
|
||||
</ClInclude>
|
||||
@@ -1581,9 +1587,6 @@
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_LevelDBBackendFactory.h">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NullBackendFactory.h">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\ripple_mdb\ripple_mdb.h">
|
||||
<Filter>[1] Ripple\ripple_mdb</Filter>
|
||||
</ClInclude>
|
||||
@@ -1674,6 +1677,24 @@
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_HyperLevelDBBackendFactory.h">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_KeyvaDB.h">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_KeyvaDBBackendFactory.h">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NullBackendFactory.h">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_MdbBackendFactory.h">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\ripple_basics\utility\ripple_ScopedLock.h">
|
||||
<Filter>[1] Ripple\ripple_basics\utility</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\ripple_core\functional\ripple_ConfigSections.h">
|
||||
<Filter>[1] Ripple\ripple_core\functional</Filter>
|
||||
</ClInclude>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<CustomBuild Include="..\..\src\cpp\ripple\ripple.proto" />
|
||||
|
||||
@@ -122,6 +122,7 @@ COMPILED_FILES = [
|
||||
'Subtrees/beast/modules/beast_basics/beast_basics.cpp',
|
||||
'Subtrees/beast/modules/beast_core/beast_core.cpp',
|
||||
'Subtrees/beast/modules/beast_crypto/beast_crypto.cpp',
|
||||
'Subtrees/beast/modules/beast_db/beast_db.cpp',
|
||||
'modules/ripple_app/ripple_app_pt1.cpp',
|
||||
'modules/ripple_app/ripple_app_pt2.cpp',
|
||||
'modules/ripple_app/ripple_app_pt3.cpp',
|
||||
|
||||
@@ -21,6 +21,19 @@ Branch
|
||||
ripple-fork
|
||||
```
|
||||
|
||||
## LightningDB (a.k.a. MDB)
|
||||
|
||||
A supposedly fast memory-mapped key value database system
|
||||
|
||||
Repository <br>
|
||||
```
|
||||
git://gitorious.org/mdb/mdb.git
|
||||
```
|
||||
Branch
|
||||
```
|
||||
mdb.master
|
||||
```
|
||||
|
||||
## websocket
|
||||
|
||||
Ripple's fork of websocketpp has some incompatible changes and Ripple specific includes.
|
||||
|
||||
@@ -120,6 +120,4 @@
|
||||
//#define BEAST_BIND_USES_TR1 1
|
||||
//#define BEAST_BIND_USES_BOOST 1
|
||||
|
||||
#define BEAST_UNIT_TESTS 1
|
||||
|
||||
#endif
|
||||
|
||||
@@ -78,6 +78,12 @@
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</None>
|
||||
<None Include="..\..\modules\beast_db\beast_db.mm">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</None>
|
||||
<None Include="..\..\README.md" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
@@ -134,12 +140,14 @@
|
||||
<ClInclude Include="..\..\modules\beast_core\diagnostic\beast_SafeBool.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\diagnostic\beast_Throw.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\diagnostic\beast_UnitTest.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\diagnostic\beast_UnitTestUtilities.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\files\beast_DirectoryIterator.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\files\beast_File.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\files\beast_FileInputStream.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\files\beast_FileOutputStream.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\files\beast_FileSearchPath.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\files\beast_MemoryMappedFile.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\files\beast_RandomAccessFile.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\files\beast_TemporaryFile.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\json\beast_JSON.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\logging\beast_FileLogger.h" />
|
||||
@@ -162,6 +170,7 @@
|
||||
<ClInclude Include="..\..\modules\beast_core\memory\beast_MemoryAlignment.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\memory\beast_MemoryBlock.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\memory\beast_OptionalScopedPointer.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\memory\beast_RecycledObjectPool.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\memory\beast_SharedObject.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\memory\beast_ScopedPointer.h" />
|
||||
<ClInclude Include="..\..\modules\beast_core\memory\beast_SharedSingleton.h" />
|
||||
@@ -246,6 +255,8 @@
|
||||
<ClInclude Include="..\..\modules\beast_core\zip\zlib\zutil.h" />
|
||||
<ClInclude Include="..\..\modules\beast_crypto\beast_crypto.h" />
|
||||
<ClInclude Include="..\..\modules\beast_crypto\math\beast_UnsignedInteger.h" />
|
||||
<ClInclude Include="..\..\modules\beast_db\beast_db.h" />
|
||||
<ClInclude Include="..\..\modules\beast_db\keyvalue\beast_KeyvaDB.h" />
|
||||
<ClInclude Include="BeastConfig.h" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
@@ -407,6 +418,12 @@
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\beast_core\diagnostic\beast_UnitTestUtilities.cpp">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\beast_core\files\beast_DirectoryIterator.cpp">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
@@ -437,6 +454,12 @@
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\beast_core\files\beast_RandomAccessFile.cpp">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\beast_core\files\beast_TemporaryFile.cpp">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
@@ -918,6 +941,13 @@
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\beast_db\beast_db.cpp" />
|
||||
<ClCompile Include="..\..\modules\beast_db\keyvalue\beast_KeyvaDB.cpp">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<Text Include="..\..\TODO.txt" />
|
||||
|
||||
@@ -36,6 +36,9 @@
|
||||
<None Include="..\..\modules\beast_basics\beast_basics.mm">
|
||||
<Filter>beast_basics</Filter>
|
||||
</None>
|
||||
<None Include="..\..\modules\beast_db\beast_db.mm">
|
||||
<Filter>beast_db</Filter>
|
||||
</None>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<Filter Include="beast_core">
|
||||
@@ -125,6 +128,12 @@
|
||||
<Filter Include="beast_crypto\math">
|
||||
<UniqueIdentifier>{1170f2bc-2456-410a-ab2b-c45f6ed37b9e}</UniqueIdentifier>
|
||||
</Filter>
|
||||
<Filter Include="beast_db">
|
||||
<UniqueIdentifier>{4834218f-f13f-41bc-a8a0-50314a3a99a3}</UniqueIdentifier>
|
||||
</Filter>
|
||||
<Filter Include="beast_db\keyvalue">
|
||||
<UniqueIdentifier>{15a98fee-1b52-45eb-9480-514b8750d755}</UniqueIdentifier>
|
||||
</Filter>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="..\..\modules\beast_core\beast_core.h">
|
||||
@@ -623,6 +632,21 @@
|
||||
<ClInclude Include="..\..\modules\beast_core\containers\beast_SharedObjectArray.h">
|
||||
<Filter>beast_core\containers</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\beast_core\files\beast_RandomAccessFile.h">
|
||||
<Filter>beast_core\files</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\beast_core\diagnostic\beast_UnitTestUtilities.h">
|
||||
<Filter>beast_core\diagnostic</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\beast_core\memory\beast_RecycledObjectPool.h">
|
||||
<Filter>beast_core\memory</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\beast_db\beast_db.h">
|
||||
<Filter>beast_db</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\beast_db\keyvalue\beast_KeyvaDB.h">
|
||||
<Filter>beast_db\keyvalue</Filter>
|
||||
</ClInclude>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="..\..\modules\beast_core\beast_core.cpp">
|
||||
@@ -967,6 +991,18 @@
|
||||
<ClCompile Include="..\..\modules\beast_crypto\math\beast_UnsignedInteger.cpp">
|
||||
<Filter>beast_crypto\math</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\beast_core\files\beast_RandomAccessFile.cpp">
|
||||
<Filter>beast_core\files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\beast_core\diagnostic\beast_UnitTestUtilities.cpp">
|
||||
<Filter>beast_core\diagnostic</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\beast_db\beast_db.cpp">
|
||||
<Filter>beast_db</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\beast_db\keyvalue\beast_KeyvaDB.cpp">
|
||||
<Filter>beast_db\keyvalue</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<Text Include="..\..\TODO.txt" />
|
||||
|
||||
@@ -2,6 +2,12 @@
|
||||
BEAST TODO
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
- Specialize UnsignedInteger<> for peformance in the storage format
|
||||
|
||||
- Macro for acquiring a ScopedLock that records file and line.
|
||||
|
||||
- Rename HeapBlock routines to not conflict with _CRTDBG_MAP_ALLOC macros
|
||||
|
||||
- Design a WeakPtr / SharedPtr / SharedObject intrusive system
|
||||
|
||||
- Implement beast::Bimap?
|
||||
|
||||
@@ -149,12 +149,14 @@ namespace beast
|
||||
#include "diagnostic/beast_FPUFlags.cpp"
|
||||
#include "diagnostic/beast_LeakChecked.cpp"
|
||||
#include "diagnostic/beast_UnitTest.cpp"
|
||||
#include "diagnostic/beast_UnitTestUtilities.cpp"
|
||||
|
||||
#include "files/beast_DirectoryIterator.cpp"
|
||||
#include "files/beast_File.cpp"
|
||||
#include "files/beast_FileInputStream.cpp"
|
||||
#include "files/beast_FileOutputStream.cpp"
|
||||
#include "files/beast_FileSearchPath.cpp"
|
||||
#include "files/beast_RandomAccessFile.cpp"
|
||||
#include "files/beast_TemporaryFile.cpp"
|
||||
|
||||
#include "json/beast_JSON.cpp"
|
||||
|
||||
@@ -226,6 +226,7 @@ namespace beast
|
||||
#include "diagnostic/beast_Error.h"
|
||||
#include "diagnostic/beast_FPUFlags.h"
|
||||
#include "diagnostic/beast_UnitTest.h"
|
||||
#include "diagnostic/beast_UnitTestUtilities.h"
|
||||
#include "diagnostic/beast_Throw.h"
|
||||
#include "containers/beast_AbstractFifo.h"
|
||||
#include "containers/beast_Array.h"
|
||||
@@ -252,6 +253,7 @@ namespace beast
|
||||
#include "files/beast_FileOutputStream.h"
|
||||
#include "files/beast_FileSearchPath.h"
|
||||
#include "files/beast_MemoryMappedFile.h"
|
||||
#include "files/beast_RandomAccessFile.h"
|
||||
#include "files/beast_TemporaryFile.h"
|
||||
#include "json/beast_JSON.h"
|
||||
#include "logging/beast_FileLogger.h"
|
||||
@@ -274,6 +276,7 @@ namespace beast
|
||||
#include "memory/beast_WeakReference.h"
|
||||
#include "memory/beast_MemoryAlignment.h"
|
||||
#include "memory/beast_CacheLine.h"
|
||||
#include "memory/beast_RecycledObjectPool.h"
|
||||
#include "misc/beast_Result.h"
|
||||
#include "misc/beast_Uuid.h"
|
||||
#include "misc/beast_WindowsRegistry.h"
|
||||
|
||||
@@ -129,7 +129,7 @@ void AbstractFifo::finishedRead (int numRead) noexcept
|
||||
class AbstractFifoTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
AbstractFifoTests() : UnitTest ("Abstract Fifo")
|
||||
AbstractFifoTests() : UnitTest ("Abstract Fifo", "beast")
|
||||
{
|
||||
}
|
||||
|
||||
@@ -224,6 +224,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
#if BEAST_UNIT_TESTS
|
||||
static AbstractFifoTests abstractFifoTests;
|
||||
#endif
|
||||
|
||||
@@ -21,8 +21,13 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
UnitTest::UnitTest (const String& name_)
|
||||
: name (name_), runner (nullptr)
|
||||
UnitTest::UnitTest (String const& name,
|
||||
String const& group,
|
||||
When when)
|
||||
: m_name (name)
|
||||
, m_group (group)
|
||||
, m_when (when)
|
||||
, m_runner (nullptr)
|
||||
{
|
||||
getAllTests().add (this);
|
||||
}
|
||||
@@ -32,19 +37,25 @@ UnitTest::~UnitTest()
|
||||
getAllTests().removeFirstMatchingValue (this);
|
||||
}
|
||||
|
||||
Array<UnitTest*>& UnitTest::getAllTests()
|
||||
UnitTest::TestList& UnitTest::getAllTests()
|
||||
{
|
||||
static Array<UnitTest*> tests;
|
||||
return tests;
|
||||
static TestList s_tests;
|
||||
|
||||
return s_tests;
|
||||
}
|
||||
|
||||
void UnitTest::initialise() {}
|
||||
void UnitTest::shutdown() {}
|
||||
|
||||
void UnitTest::performTest (UnitTests* const runner_)
|
||||
void UnitTest::initialise()
|
||||
{
|
||||
bassert (runner_ != nullptr);
|
||||
runner = runner_;
|
||||
}
|
||||
|
||||
void UnitTest::shutdown()
|
||||
{
|
||||
}
|
||||
|
||||
void UnitTest::performTest (UnitTests* const runner)
|
||||
{
|
||||
bassert (runner != nullptr);
|
||||
m_runner = runner;
|
||||
|
||||
initialise();
|
||||
runTest();
|
||||
@@ -53,23 +64,24 @@ void UnitTest::performTest (UnitTests* const runner_)
|
||||
|
||||
void UnitTest::logMessage (const String& message)
|
||||
{
|
||||
runner->logMessage (message);
|
||||
m_runner->logMessage (message);
|
||||
}
|
||||
|
||||
void UnitTest::beginTest (const String& testName)
|
||||
{
|
||||
runner->beginNewTest (this, testName);
|
||||
m_runner->beginNewTest (this, testName);
|
||||
}
|
||||
|
||||
void UnitTest::expect (const bool result, const String& failureMessage)
|
||||
{
|
||||
if (result)
|
||||
runner->addPass();
|
||||
m_runner->addPass();
|
||||
else
|
||||
runner->addFail (failureMessage);
|
||||
m_runner->addFail (failureMessage);
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
|
||||
UnitTests::UnitTests()
|
||||
: currentTest (nullptr),
|
||||
assertOnFailure (true),
|
||||
@@ -105,8 +117,52 @@ void UnitTests::resultsUpdated()
|
||||
{
|
||||
}
|
||||
|
||||
void UnitTests::runTests (const Array<UnitTest*>& tests)
|
||||
void UnitTests::runTest (UnitTest& test)
|
||||
{
|
||||
try
|
||||
{
|
||||
test.performTest (this);
|
||||
}
|
||||
catch (std::exception& e)
|
||||
{
|
||||
String s;
|
||||
s << "Got an exception: " << e.what ();
|
||||
addFail (s);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
addFail ("Got an unhandled exception");
|
||||
}
|
||||
}
|
||||
|
||||
void UnitTests::runTest (String const& name)
|
||||
{
|
||||
results.clear();
|
||||
resultsUpdated();
|
||||
|
||||
UnitTest::TestList& tests (UnitTest::getAllTests ());
|
||||
|
||||
for (int i = 0; i < tests.size(); ++i)
|
||||
{
|
||||
UnitTest& test = *tests [i];
|
||||
|
||||
if (test.getGroup () == name && test.getWhen () == UnitTest::runAlways)
|
||||
{
|
||||
runTest (test);
|
||||
}
|
||||
else if (test.getName () == name)
|
||||
{
|
||||
runTest (test);
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
void UnitTests::runAllTests ()
|
||||
{
|
||||
UnitTest::TestList& tests (UnitTest::getAllTests ());
|
||||
|
||||
results.clear();
|
||||
resultsUpdated();
|
||||
|
||||
@@ -115,22 +171,14 @@ void UnitTests::runTests (const Array<UnitTest*>& tests)
|
||||
if (shouldAbortTests())
|
||||
break;
|
||||
|
||||
try
|
||||
{
|
||||
tests.getUnchecked(i)->performTest (this);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
addFail ("An unhandled exception was thrown!");
|
||||
}
|
||||
UnitTest& test = *tests [i];
|
||||
|
||||
if (test.getWhen () == UnitTest::runAlways)
|
||||
runTest (test);
|
||||
}
|
||||
|
||||
endTest();
|
||||
}
|
||||
|
||||
void UnitTests::runAllTests()
|
||||
{
|
||||
runTests (UnitTest::getAllTests());
|
||||
}
|
||||
|
||||
void UnitTests::logMessage (const String& message)
|
||||
@@ -150,14 +198,14 @@ void UnitTests::beginNewTest (UnitTest* const test, const String& subCategory)
|
||||
|
||||
TestResult* const r = new TestResult();
|
||||
results.add (r);
|
||||
r->unitTestName = test->getName();
|
||||
r->unitTestName = test->getGroup() + "::" + test->getName();
|
||||
r->subcategoryName = subCategory;
|
||||
r->passes = 0;
|
||||
r->failures = 0;
|
||||
|
||||
logMessage ("Test: " + r->unitTestName + "/" + subCategory + "...");
|
||||
logMessage ("Test '" + r->unitTestName + "': " + subCategory);
|
||||
|
||||
resultsUpdated();
|
||||
resultsUpdated ();
|
||||
}
|
||||
|
||||
void UnitTests::endTest()
|
||||
@@ -214,8 +262,8 @@ void UnitTests::addFail (const String& failureMessage)
|
||||
|
||||
r->failures++;
|
||||
|
||||
String message ("!!! Test ");
|
||||
message << (r->failures + r->passes) << " failed";
|
||||
String message ("Failure, #");
|
||||
message << (r->failures + r->passes);
|
||||
|
||||
if (failureMessage.isNotEmpty())
|
||||
message << ": " << failureMessage;
|
||||
|
||||
@@ -28,7 +28,6 @@
|
||||
#include "../containers/beast_OwnedArray.h"
|
||||
class UnitTests;
|
||||
|
||||
|
||||
/** This is a base class for classes that perform a unit test.
|
||||
|
||||
To write a test using this class, your code should look something like this:
|
||||
@@ -56,9 +55,10 @@ class UnitTests;
|
||||
}
|
||||
};
|
||||
|
||||
// Explicit template instantiation is required to make the unit
|
||||
// test get automatically added to the set of unit tests.
|
||||
template class UnitTestType <MyTest>;
|
||||
// This makes the unit test available in the global list
|
||||
// It doesn't have to be static.
|
||||
//
|
||||
static MyTest myTest;
|
||||
|
||||
@endcode
|
||||
|
||||
@@ -69,15 +69,38 @@ class UnitTests;
|
||||
class BEAST_API UnitTest : Uncopyable
|
||||
{
|
||||
public:
|
||||
enum When
|
||||
{
|
||||
runAlways,
|
||||
runManual
|
||||
};
|
||||
|
||||
/** The type of a list of tests.
|
||||
*/
|
||||
typedef Array <UnitTest*, CriticalSection> TestList;
|
||||
|
||||
//==============================================================================
|
||||
/** Creates a test with the given name. */
|
||||
explicit UnitTest (String const& name);
|
||||
/** Creates a test with the given name, group, and run option.
|
||||
|
||||
The group is used when you want to run all tests in a particular group
|
||||
instead of all tests in general. The run option allows you to write some
|
||||
tests that are only available manually. For examplem, a performance unit
|
||||
test that takes a long time which you might not want to run every time
|
||||
you run all tests.
|
||||
*/
|
||||
explicit UnitTest (String const& name, String const& group = "", When when = runAlways);
|
||||
|
||||
/** Destructor. */
|
||||
virtual ~UnitTest();
|
||||
|
||||
/** Returns the name of the test. */
|
||||
const String& getName() const noexcept { return name; }
|
||||
const String& getName() const noexcept { return m_name; }
|
||||
|
||||
/** Returns the group of the test. */
|
||||
String const& getGroup () const noexcept { return m_group; }
|
||||
|
||||
/** Returns the run option of the test. */
|
||||
When getWhen () const noexcept { return m_when; }
|
||||
|
||||
/** Runs the test, using the specified UnitTests.
|
||||
You shouldn't need to call this method directly - use
|
||||
@@ -86,7 +109,7 @@ public:
|
||||
void performTest (UnitTests* runner);
|
||||
|
||||
/** Returns the set of all UnitTest objects that currently exist. */
|
||||
static Array<UnitTest*>& getAllTests();
|
||||
static TestList& getAllTests();
|
||||
|
||||
//==============================================================================
|
||||
/** You can optionally implement this method to set up your test.
|
||||
@@ -155,14 +178,16 @@ public:
|
||||
|
||||
//==============================================================================
|
||||
/** Writes a message to the test log.
|
||||
This can only be called from within your runTest() method.
|
||||
This can only be called during your runTest() method.
|
||||
*/
|
||||
void logMessage (const String& message);
|
||||
|
||||
private:
|
||||
//==============================================================================
|
||||
const String name;
|
||||
UnitTests* runner;
|
||||
String const m_name;
|
||||
String const m_group;
|
||||
When const m_when;
|
||||
UnitTests* m_runner;
|
||||
};
|
||||
|
||||
//==============================================================================
|
||||
@@ -187,12 +212,14 @@ public:
|
||||
/** Destructor. */
|
||||
virtual ~UnitTests();
|
||||
|
||||
/** Runs a set of tests.
|
||||
/** Run the specified unit test.
|
||||
|
||||
The tests are performed in order, and the results are logged. To run all the
|
||||
registered UnitTest objects that exist, use runAllTests().
|
||||
Subclasses can override this to do extra stuff.
|
||||
*/
|
||||
void runTests (const Array<UnitTest*>& tests);
|
||||
virtual void runTest (UnitTest& test);
|
||||
|
||||
/** Run a particular test or group. */
|
||||
void runTest (String const& name);
|
||||
|
||||
/** Runs all the UnitTest objects that currently exist.
|
||||
This calls runTests() for all the objects listed in UnitTest::getAllTests().
|
||||
|
||||
@@ -0,0 +1,56 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of Beast: https://github.com/vinniefalco/Beast
|
||||
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
class UnitTestUtilitiesTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
UnitTestUtilitiesTests () : UnitTest ("UnitTestUtilities", "beast")
|
||||
{
|
||||
}
|
||||
|
||||
void testPayload ()
|
||||
{
|
||||
using namespace UnitTestUtilities;
|
||||
|
||||
int const maxBufferSize = 4000;
|
||||
int const minimumBytes = 1;
|
||||
int const numberOfItems = 100;
|
||||
int64 const seedValue = 50;
|
||||
|
||||
beginTest ("Payload");
|
||||
|
||||
Payload p1 (maxBufferSize);
|
||||
Payload p2 (maxBufferSize);
|
||||
|
||||
for (int i = 0; i < numberOfItems; ++i)
|
||||
{
|
||||
p1.repeatableRandomFill (minimumBytes, maxBufferSize, seedValue);
|
||||
p2.repeatableRandomFill (minimumBytes, maxBufferSize, seedValue);
|
||||
|
||||
expect (p1 == p2, "Should be equal");
|
||||
}
|
||||
}
|
||||
|
||||
void runTest ()
|
||||
{
|
||||
testPayload ();
|
||||
}
|
||||
};
|
||||
|
||||
static UnitTestUtilitiesTests unitTestUtilitiesTests;
|
||||
@@ -0,0 +1,100 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of Beast: https://github.com/vinniefalco/Beast
|
||||
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef BEAST_UNITTESTUTILITIES_H_INCLUDED
|
||||
#define BEAST_UNITTESTUTILITIES_H_INCLUDED
|
||||
|
||||
#include "../maths/beast_Random.h"
|
||||
|
||||
namespace UnitTestUtilities
|
||||
{
|
||||
|
||||
/** Fairly shuffle an array pseudo-randomly.
|
||||
*/
|
||||
template <class T>
|
||||
void repeatableShuffle (int const numberOfItems, T& arrayOfItems, int64 seedValue)
|
||||
{
|
||||
Random r (seedValue);
|
||||
|
||||
for (int i = numberOfItems - 1; i > 0; --i)
|
||||
{
|
||||
int const choice = r.nextInt (i + 1);
|
||||
|
||||
std::swap (arrayOfItems [i], arrayOfItems [choice]);
|
||||
}
|
||||
}
|
||||
|
||||
/** A block of memory used for test data.
|
||||
*/
|
||||
struct Payload
|
||||
{
|
||||
/** Construct a payload with a buffer of the specified maximum size.
|
||||
|
||||
@param maximumBytes The size of the buffer, in bytes.
|
||||
*/
|
||||
explicit Payload (int maxBufferSize)
|
||||
: bufferSize (maxBufferSize)
|
||||
, data (maxBufferSize)
|
||||
{
|
||||
}
|
||||
|
||||
/** Generate a random block of data within a certain size range.
|
||||
|
||||
@param minimumBytes The smallest number of bytes in the resulting payload.
|
||||
@param maximumBytes The largest number of bytes in the resulting payload.
|
||||
@param seedValue The value to seed the random number generator with.
|
||||
*/
|
||||
void repeatableRandomFill (int minimumBytes, int maximumBytes, int64 seedValue) noexcept
|
||||
{
|
||||
bassert (minimumBytes >=0 && maximumBytes <= bufferSize);
|
||||
|
||||
Random r (seedValue);
|
||||
|
||||
bytes = minimumBytes + r.nextInt (1 + maximumBytes - minimumBytes);
|
||||
|
||||
bassert (bytes >= minimumBytes && bytes <= bufferSize);
|
||||
|
||||
for (int i = 0; i < bytes; ++i)
|
||||
data [i] = static_cast <unsigned char> (r.nextInt ());
|
||||
}
|
||||
|
||||
/** Compare two payloads for equality.
|
||||
*/
|
||||
bool operator== (Payload const& other) const noexcept
|
||||
{
|
||||
if (bytes == other.bytes)
|
||||
{
|
||||
return memcmp (data.getData (), other.data.getData (), bytes) == 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
int const bufferSize;
|
||||
|
||||
int bytes;
|
||||
HeapBlock <char> data;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -926,7 +926,7 @@ MemoryMappedFile::MemoryMappedFile (const File& file, const Range<int64>& fileRa
|
||||
class FileTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
FileTests() : UnitTest ("File") {}
|
||||
FileTests() : UnitTest ("File", "beast") {}
|
||||
|
||||
void runTest()
|
||||
{
|
||||
@@ -1106,7 +1106,5 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
#if BEAST_UNIT_TESTS
|
||||
static FileTests fileTests;
|
||||
#endif
|
||||
|
||||
|
||||
@@ -114,7 +114,7 @@ bool FileOutputStream::write (const void* const src, const size_t numBytes)
|
||||
return true;
|
||||
}
|
||||
|
||||
void FileOutputStream::writeRepeatedByte (uint8 byte, size_t numBytes)
|
||||
bool FileOutputStream::writeRepeatedByte (uint8 byte, size_t numBytes)
|
||||
{
|
||||
bassert (((ssize_t) numBytes) >= 0);
|
||||
|
||||
@@ -123,9 +123,8 @@ void FileOutputStream::writeRepeatedByte (uint8 byte, size_t numBytes)
|
||||
memset (buffer + bytesInBuffer, byte, numBytes);
|
||||
bytesInBuffer += numBytes;
|
||||
currentPosition += numBytes;
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
OutputStream::writeRepeatedByte (byte, numBytes);
|
||||
}
|
||||
|
||||
return OutputStream::writeRepeatedByte (byte, numBytes);
|
||||
}
|
||||
@@ -27,7 +27,6 @@
|
||||
#include "beast_File.h"
|
||||
#include "../streams/beast_OutputStream.h"
|
||||
|
||||
|
||||
//==============================================================================
|
||||
/**
|
||||
An output stream that writes into a local file.
|
||||
@@ -87,11 +86,11 @@ public:
|
||||
Result truncate();
|
||||
|
||||
//==============================================================================
|
||||
void flush();
|
||||
int64 getPosition();
|
||||
bool setPosition (int64 pos);
|
||||
bool write (const void* data, size_t numBytes);
|
||||
void writeRepeatedByte (uint8 byte, size_t numTimesToRepeat);
|
||||
void flush() override;
|
||||
int64 getPosition() override;
|
||||
bool setPosition (int64) override;
|
||||
bool write (const void*, size_t) override;
|
||||
bool writeRepeatedByte (uint8 byte, size_t numTimesToRepeat) override;
|
||||
|
||||
|
||||
private:
|
||||
@@ -111,4 +110,4 @@ private:
|
||||
ssize_t writeInternal (const void*, size_t);
|
||||
};
|
||||
|
||||
#endif // BEAST_FILEOUTPUTSTREAM_BEASTHEADER
|
||||
#endif
|
||||
@@ -0,0 +1,272 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of Beast: https://github.com/vinniefalco/Beast
|
||||
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
RandomAccessFile::RandomAccessFile () noexcept
|
||||
: fileHandle (nullptr)
|
||||
, currentPosition (0)
|
||||
{
|
||||
}
|
||||
|
||||
RandomAccessFile::~RandomAccessFile ()
|
||||
{
|
||||
close ();
|
||||
}
|
||||
|
||||
Result RandomAccessFile::open (File const& path, Mode mode)
|
||||
{
|
||||
close ();
|
||||
|
||||
return nativeOpen (path, mode);
|
||||
}
|
||||
|
||||
void RandomAccessFile::close ()
|
||||
{
|
||||
if (isOpen ())
|
||||
{
|
||||
nativeFlush ();
|
||||
nativeClose ();
|
||||
}
|
||||
}
|
||||
|
||||
Result RandomAccessFile::setPosition (FileOffset newPosition)
|
||||
{
|
||||
if (newPosition != currentPosition)
|
||||
{
|
||||
// VFALCO NOTE I dislike return from the middle but
|
||||
// Result::ok() is showing up in the profile
|
||||
//
|
||||
return nativeSetPosition (newPosition);
|
||||
}
|
||||
|
||||
return Result::ok ();
|
||||
}
|
||||
|
||||
Result RandomAccessFile::read (void* buffer, ByteCount numBytes, ByteCount* pActualAmount)
|
||||
{
|
||||
return nativeRead (buffer, numBytes, pActualAmount);
|
||||
}
|
||||
|
||||
Result RandomAccessFile::write (const void* data, ByteCount numBytes, ByteCount* pActualAmount)
|
||||
{
|
||||
bassert (data != nullptr && ((ssize_t) numBytes) >= 0);
|
||||
|
||||
Result result (Result::ok ());
|
||||
|
||||
ByteCount amountWritten = 0;
|
||||
|
||||
result = nativeWrite (data, numBytes, &amountWritten);
|
||||
|
||||
if (result.wasOk ())
|
||||
currentPosition += amountWritten;
|
||||
|
||||
if (pActualAmount != nullptr)
|
||||
*pActualAmount = amountWritten;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
Result RandomAccessFile::truncate ()
|
||||
{
|
||||
Result result = flush ();
|
||||
|
||||
if (result.wasOk ())
|
||||
result = nativeTruncate ();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
Result RandomAccessFile::flush ()
|
||||
{
|
||||
return nativeFlush ();
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class RandomAccessFileTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
RandomAccessFileTests () : UnitTest ("RandomAccessFile", "beast")
|
||||
{
|
||||
}
|
||||
|
||||
enum
|
||||
{
|
||||
maxPayload = 8192
|
||||
};
|
||||
|
||||
/* For this test we will create a file which consists of a fixed
|
||||
number of variable length records. Each record is numbered sequentially
|
||||
starting at 0. To calculate the position of each record we first build
|
||||
a table of size/offset pairs using a pseudorandom number generator.
|
||||
*/
|
||||
struct Record
|
||||
{
|
||||
int index;
|
||||
int bytes;
|
||||
int offset;
|
||||
};
|
||||
|
||||
typedef HeapBlock <Record> Records;
|
||||
|
||||
// Produce the pseudo-random set of records.
|
||||
static void createRecords (HeapBlock <Record>& records,
|
||||
int numRecords,
|
||||
int maxBytes,
|
||||
int64 seedValue)
|
||||
{
|
||||
using namespace UnitTestUtilities;
|
||||
|
||||
Random r (seedValue);
|
||||
|
||||
records.malloc (numRecords);
|
||||
|
||||
int offset = 0;
|
||||
|
||||
for (int i = 0; i < numRecords; ++i)
|
||||
{
|
||||
int const bytes = r.nextInt (maxBytes) + 1;
|
||||
|
||||
records [i].index = i;
|
||||
records [i].bytes = bytes;
|
||||
records [i].offset = offset;
|
||||
|
||||
offset += bytes;
|
||||
}
|
||||
|
||||
repeatableShuffle (numRecords, records, seedValue);
|
||||
}
|
||||
|
||||
// Write all the records to the file.
|
||||
// The payload is pseudo-randomly generated.
|
||||
void writeRecords (RandomAccessFile& file,
|
||||
int numRecords,
|
||||
HeapBlock <Record> const& records,
|
||||
int64 seedValue)
|
||||
{
|
||||
using namespace UnitTestUtilities;
|
||||
|
||||
for (int i = 0; i < numRecords; ++i)
|
||||
{
|
||||
Payload p (records [i].bytes);
|
||||
|
||||
p.repeatableRandomFill (records [i].bytes,
|
||||
records [i].bytes,
|
||||
records [i].index + seedValue);
|
||||
|
||||
file.setPosition (records [i].offset);
|
||||
|
||||
Result result = file.write (p.data.getData (), p.bytes);
|
||||
|
||||
expect (result.wasOk (), "Should be ok");
|
||||
}
|
||||
}
|
||||
|
||||
// Read the records and verify the consistency.
|
||||
void readRecords (RandomAccessFile& file,
|
||||
int numRecords,
|
||||
HeapBlock <Record> const& records,
|
||||
int64 seedValue)
|
||||
{
|
||||
using namespace UnitTestUtilities;
|
||||
|
||||
for (int i = 0; i < numRecords; ++i)
|
||||
{
|
||||
Record const& record (records [i]);
|
||||
|
||||
int const bytes = record.bytes;
|
||||
|
||||
Payload p1 (bytes);
|
||||
Payload p2 (bytes);
|
||||
|
||||
p1.repeatableRandomFill (bytes, bytes, record.index + seedValue);
|
||||
|
||||
file.setPosition (record.offset);
|
||||
|
||||
Result result = file.read (p2.data.getData (), bytes);
|
||||
|
||||
expect (result.wasOk (), "Should be ok");
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
p2.bytes = bytes;
|
||||
|
||||
expect (p1 == p2, "Should be equal");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the test at the given buffer size.
|
||||
void testFile (int const numRecords)
|
||||
{
|
||||
using namespace UnitTestUtilities;
|
||||
|
||||
int const seedValue = 50;
|
||||
|
||||
beginTest (String ("numRecords=") + String (numRecords));
|
||||
|
||||
// Calculate the path
|
||||
File const path (File::createTempFile ("RandomAccessFile"));
|
||||
|
||||
// Create a predictable set of records
|
||||
HeapBlock <Record> records (numRecords);
|
||||
createRecords (records, numRecords, maxPayload, seedValue);
|
||||
|
||||
Result result (Result::ok ());
|
||||
|
||||
{
|
||||
// Create the file
|
||||
RandomAccessFile file;
|
||||
result = file.open (path, RandomAccessFile::readWrite);
|
||||
expect (result.wasOk (), "Should be ok");
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
writeRecords (file, numRecords, records, seedValue);
|
||||
|
||||
readRecords (file, numRecords, records, seedValue);
|
||||
|
||||
repeatableShuffle (numRecords, records, seedValue);
|
||||
|
||||
readRecords (file, numRecords, records, seedValue);
|
||||
}
|
||||
}
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
// Re-open the file in read only mode
|
||||
RandomAccessFile file;
|
||||
result = file.open (path, RandomAccessFile::readOnly);
|
||||
expect (result.wasOk (), "Should be ok");
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
readRecords (file, numRecords, records, seedValue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void runTest ()
|
||||
{
|
||||
testFile (10000);
|
||||
}
|
||||
|
||||
private:
|
||||
};
|
||||
|
||||
static RandomAccessFileTests randomAccessFileTests;
|
||||
197
Subtrees/beast/modules/beast_core/files/beast_RandomAccessFile.h
Normal file
197
Subtrees/beast/modules/beast_core/files/beast_RandomAccessFile.h
Normal file
@@ -0,0 +1,197 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of Beast: https://github.com/vinniefalco/Beast
|
||||
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef BEAST_RANDOMACCESSFILE_H_INCLUDED
|
||||
#define BEAST_RANDOMACCESSFILE_H_INCLUDED
|
||||
|
||||
#include "../misc/beast_Result.h"
|
||||
|
||||
/** Provides random access reading and writing to an operating system file.
|
||||
|
||||
This class wraps the underlying native operating system routines for
|
||||
opening and closing a file for reading and/or writing, seeking within
|
||||
the file, and performing read and write operations. There are also methods
|
||||
provided for obtaining an input or output stream which will work with
|
||||
the file.
|
||||
|
||||
@note All files are opened in binary mode. No text newline conversions
|
||||
are performed.
|
||||
|
||||
@note None of these members are thread safe. The caller is responsible
|
||||
for synchronization.
|
||||
|
||||
@see FileInputStream, FileOutputStream
|
||||
*/
|
||||
class BEAST_API RandomAccessFile : Uncopyable, LeakChecked <RandomAccessFile>
|
||||
{
|
||||
public:
|
||||
/** The type of an FileOffset.
|
||||
|
||||
This can be useful when writing templates.
|
||||
*/
|
||||
typedef int64 FileOffset;
|
||||
|
||||
/** The type of a byte count.
|
||||
|
||||
This can be useful when writing templates.
|
||||
*/
|
||||
typedef size_t ByteCount;
|
||||
|
||||
/** The access mode.
|
||||
|
||||
@see open
|
||||
*/
|
||||
enum Mode
|
||||
{
|
||||
readOnly,
|
||||
readWrite
|
||||
};
|
||||
|
||||
//==============================================================================
|
||||
/** Creates an unopened file object.
|
||||
|
||||
@see open, isOpen
|
||||
*/
|
||||
RandomAccessFile () noexcept;
|
||||
|
||||
/** Destroy the file object.
|
||||
|
||||
If the operating system file is open it will be closed.
|
||||
*/
|
||||
~RandomAccessFile ();
|
||||
|
||||
/** Determine if a file is open.
|
||||
|
||||
@return `true` if the operating system file is open.
|
||||
*/
|
||||
bool isOpen () const noexcept { return fileHandle != nullptr; }
|
||||
|
||||
/** Opens a file object.
|
||||
|
||||
The file is opened with the specified permissions. The initial
|
||||
position is set to the beginning of the file.
|
||||
|
||||
@note If a file is already open, it will be closed first.
|
||||
|
||||
@param path The path to the file
|
||||
@param mode The access permissions
|
||||
@return An indication of the success of the operation.
|
||||
|
||||
@see Mode
|
||||
*/
|
||||
Result open (File const& path, Mode mode);
|
||||
|
||||
/** Closes the file object.
|
||||
|
||||
Any data that needs to be flushed will be written before the file is closed.
|
||||
|
||||
@note If no file is opened, this call does nothing.
|
||||
*/
|
||||
void close ();
|
||||
|
||||
/** Retrieve the @ref File associated with this object.
|
||||
|
||||
@return The associated @ref File.
|
||||
*/
|
||||
File const& getFile () const noexcept { return file; }
|
||||
|
||||
/** Get the current position.
|
||||
|
||||
The next read or write will take place from here.
|
||||
|
||||
@return The current position, as an absolute byte FileOffset from the begining.
|
||||
*/
|
||||
FileOffset getPosition () const noexcept { return currentPosition; }
|
||||
|
||||
/** Set the current position.
|
||||
|
||||
The next read or write will take place at this location.
|
||||
|
||||
@param newPosition The byte FileOffset from the beginning of the file to move to.
|
||||
|
||||
@return `true` if the operation was successful.
|
||||
*/
|
||||
Result setPosition (FileOffset newPosition);
|
||||
|
||||
/** Read data at the current position.
|
||||
|
||||
The caller is responsible for making sure that the memory pointed to
|
||||
by `buffer` is at least as large as `bytesToRead`.
|
||||
|
||||
@note The file must have been opened with read permission.
|
||||
|
||||
@param buffer The memory to store the incoming data
|
||||
@param numBytes The number of bytes to read.
|
||||
@param pActualAmount Pointer to store the actual amount read, or `nullptr`.
|
||||
|
||||
@return `true` if all the bytes were read.
|
||||
*/
|
||||
Result read (void* buffer, ByteCount numBytes, ByteCount* pActualAmount = 0);
|
||||
|
||||
/** Write data at the current position.
|
||||
|
||||
The current position is advanced past the data written. If data is
|
||||
written past the end of the file, the file size is increased on disk.
|
||||
|
||||
The caller is responsible for making sure that the memory pointed to
|
||||
by `buffer` is at least as large as `bytesToWrite`.
|
||||
|
||||
@note The file must have been opened with write permission.
|
||||
|
||||
@param data A pointer to the data buffer to write to the file.
|
||||
@param numBytes The number of bytes to write.
|
||||
@param pActualAmount Pointer to store the actual amount written, or `nullptr`.
|
||||
|
||||
@return `true` if all the data was written.
|
||||
*/
|
||||
Result write (const void* data, ByteCount numBytes, ByteCount* pActualAmount = 0);
|
||||
|
||||
/** Truncate the file at the current position.
|
||||
*/
|
||||
Result truncate ();
|
||||
|
||||
/** Flush the output buffers.
|
||||
|
||||
This calls the operating system to make sure all data has been written.
|
||||
*/
|
||||
Result flush();
|
||||
|
||||
//==============================================================================
|
||||
private:
|
||||
// Some of these these methods are implemented natively on
|
||||
// the corresponding platform.
|
||||
//
|
||||
// See beast_posix_SharedCode.h and beast_win32_Files.cpp
|
||||
//
|
||||
Result nativeOpen (File const& path, Mode mode);
|
||||
void nativeClose ();
|
||||
Result nativeSetPosition (FileOffset newPosition);
|
||||
Result nativeRead (void* buffer, ByteCount numBytes, ByteCount* pActualAmount = 0);
|
||||
Result nativeWrite (const void* data, ByteCount numBytes, ByteCount* pActualAmount = 0);
|
||||
Result nativeTruncate ();
|
||||
Result nativeFlush ();
|
||||
|
||||
private:
|
||||
File file;
|
||||
void* fileHandle;
|
||||
FileOffset currentPosition;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -535,7 +535,7 @@ void JSON::writeToStream (OutputStream& output, const var& data, const bool allO
|
||||
class JSONTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
JSONTests() : UnitTest ("JSON") { }
|
||||
JSONTests() : UnitTest ("JSON", "beast") { }
|
||||
|
||||
static String createRandomWideCharString (Random& r)
|
||||
{
|
||||
@@ -639,6 +639,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
#if BEAST_UNIT_TESTS
|
||||
static JSONTests jsonTests;
|
||||
#endif
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
Random::Random (const int64 seedValue) noexcept
|
||||
: seed (seedValue)
|
||||
{
|
||||
nextInt (); // fixes a bug where the first int is always 0
|
||||
}
|
||||
|
||||
Random::Random()
|
||||
@@ -39,6 +40,8 @@ Random::~Random() noexcept
|
||||
void Random::setSeed (const int64 newSeed) noexcept
|
||||
{
|
||||
seed = newSeed;
|
||||
|
||||
nextInt (); // fixes a bug where the first int is always 0
|
||||
}
|
||||
|
||||
void Random::combineSeed (const int64 seedValue) noexcept
|
||||
@@ -56,6 +59,8 @@ void Random::setSeedRandomly()
|
||||
combineSeed (Time::getHighResolutionTicksPerSecond());
|
||||
combineSeed (Time::currentTimeMillis());
|
||||
globalSeed ^= seed;
|
||||
|
||||
nextInt (); // fixes a bug where the first int is always 0
|
||||
}
|
||||
|
||||
Random& Random::getSystemRandom() noexcept
|
||||
@@ -98,6 +103,23 @@ double Random::nextDouble() noexcept
|
||||
return static_cast <uint32> (nextInt()) / (double) 0xffffffff;
|
||||
}
|
||||
|
||||
void Random::nextBlob (void* buffer, size_t bytes)
|
||||
{
|
||||
int const remainder = bytes % sizeof (int64);
|
||||
|
||||
{
|
||||
int64* dest = static_cast <int64*> (buffer);
|
||||
for (int i = bytes / sizeof (int64); i > 0; --i)
|
||||
*dest++ = nextInt64 ();
|
||||
buffer = dest;
|
||||
}
|
||||
|
||||
{
|
||||
int64 const val = nextInt64 ();
|
||||
memcpy (buffer, &val, remainder);
|
||||
}
|
||||
}
|
||||
|
||||
BigInteger Random::nextLargeNumber (const BigInteger& maximumValue)
|
||||
{
|
||||
BigInteger n;
|
||||
@@ -137,7 +159,7 @@ void Random::fillBitsRandomly (BigInteger& arrayToChange, int startBit, int numB
|
||||
class RandomTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
RandomTests() : UnitTest ("Random") {}
|
||||
RandomTests() : UnitTest ("Random", "beast") {}
|
||||
|
||||
void runTest()
|
||||
{
|
||||
@@ -165,6 +187,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
#if BEAST_UNIT_TESTS
|
||||
static RandomTests randomTests;
|
||||
#endif
|
||||
|
||||
@@ -89,6 +89,10 @@ public:
|
||||
*/
|
||||
bool nextBool() noexcept;
|
||||
|
||||
/** Fills a piece of memory with random data.
|
||||
*/
|
||||
void nextBlob (void* buffer, size_t bytes);
|
||||
|
||||
/** Returns a BigInteger containing a random number.
|
||||
|
||||
@returns a random value in the range 0 to (maximumValue - 1).
|
||||
|
||||
@@ -0,0 +1,126 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of Beast: https://github.com/vinniefalco/Beast
|
||||
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef BEAST_RECYCLEDOBJECTPOOL_H_INCLUDED
|
||||
#define BEAST_RECYCLEDOBJECTPOOL_H_INCLUDED
|
||||
|
||||
/** A pool of objects which may be recycled.
|
||||
|
||||
This is a thread safe pool of objects that get re-used. It is
|
||||
primarily designed to eliminate the need for many memory allocations
|
||||
and frees when temporary buffers are needed for operations.
|
||||
|
||||
To use it, first declare a structure containing the information
|
||||
that you want to recycle. Then when you want to use a recycled object
|
||||
put a ScopedItem on your stack:
|
||||
|
||||
@code
|
||||
|
||||
struct StdString
|
||||
{
|
||||
std::string data;
|
||||
};
|
||||
|
||||
RecycledObjectPool <StdString> pool;
|
||||
|
||||
void foo ()
|
||||
{
|
||||
RecycledObjectPool <StdString>::ScopedItem item;
|
||||
|
||||
item.getObject ().data = "text";
|
||||
}
|
||||
|
||||
@endcode
|
||||
*/
|
||||
template <class Object>
|
||||
class RecycledObjectPool
|
||||
{
|
||||
public:
|
||||
struct Item : Object, LockFreeStack <Item>::Node, LeakChecked <Item>
|
||||
{
|
||||
};
|
||||
|
||||
class ScopedItem
|
||||
{
|
||||
public:
|
||||
explicit ScopedItem (RecycledObjectPool <Object>& pool)
|
||||
: m_pool (pool)
|
||||
, m_item (pool.get ())
|
||||
{
|
||||
}
|
||||
|
||||
~ScopedItem ()
|
||||
{
|
||||
m_pool.release (m_item);
|
||||
}
|
||||
|
||||
Object& getObject () noexcept
|
||||
{
|
||||
return *m_item;
|
||||
}
|
||||
|
||||
private:
|
||||
RecycledObjectPool <Object>& m_pool;
|
||||
Item* const m_item;
|
||||
};
|
||||
|
||||
public:
|
||||
RecycledObjectPool () noexcept
|
||||
{
|
||||
}
|
||||
|
||||
~RecycledObjectPool ()
|
||||
{
|
||||
for (;;)
|
||||
{
|
||||
Item* const item = m_stack.pop_front ();
|
||||
|
||||
if (item != nullptr)
|
||||
delete item;
|
||||
else
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
Item* get ()
|
||||
{
|
||||
Item* item = m_stack.pop_front ();
|
||||
|
||||
if (item == nullptr)
|
||||
{
|
||||
item = new Item;
|
||||
|
||||
if (item == nullptr)
|
||||
Throw (std::bad_alloc ());
|
||||
}
|
||||
|
||||
return item;
|
||||
}
|
||||
|
||||
void release (Item* item) noexcept
|
||||
{
|
||||
m_stack.push_front (item);
|
||||
}
|
||||
|
||||
private:
|
||||
LockFreeStack <Item> m_stack;
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -45,13 +45,16 @@
|
||||
|
||||
@code
|
||||
|
||||
class MyClass : Uncopyable
|
||||
class MyClass : public Uncopyable
|
||||
{
|
||||
public:
|
||||
//...
|
||||
};
|
||||
|
||||
@endcode
|
||||
|
||||
@note The derivation should be public or else child classes which
|
||||
also derive from Uncopyable may not compile.
|
||||
*/
|
||||
class Uncopyable
|
||||
{
|
||||
|
||||
@@ -21,6 +21,8 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
Result::Result() noexcept {}
|
||||
|
||||
Result::Result (const String& message) noexcept
|
||||
: errorMessage (message)
|
||||
{
|
||||
@@ -60,11 +62,6 @@ bool Result::operator!= (const Result& other) const noexcept
|
||||
return errorMessage != other.errorMessage;
|
||||
}
|
||||
|
||||
Result Result::ok() noexcept
|
||||
{
|
||||
return Result (String::empty);
|
||||
}
|
||||
|
||||
Result Result::fail (const String& errorMessage) noexcept
|
||||
{
|
||||
return Result (errorMessage.isEmpty() ? "Unknown Error" : errorMessage);
|
||||
|
||||
@@ -26,10 +26,7 @@
|
||||
|
||||
#include "../text/beast_String.h"
|
||||
|
||||
|
||||
//==============================================================================
|
||||
/**
|
||||
Represents the 'success' or 'failure' of an operation, and holds an associated
|
||||
/** Represents the 'success' or 'failure' of an operation, and holds an associated
|
||||
error message to describe the error when there's a failure.
|
||||
|
||||
E.g.
|
||||
@@ -60,7 +57,7 @@ class BEAST_API Result
|
||||
public:
|
||||
//==============================================================================
|
||||
/** Creates and returns a 'successful' result. */
|
||||
static Result ok() noexcept;
|
||||
static Result ok() noexcept { return Result(); }
|
||||
|
||||
/** Creates a 'failure' result.
|
||||
If you pass a blank error message in here, a default "Unknown Error" message
|
||||
@@ -94,12 +91,12 @@ public:
|
||||
const String& getErrorMessage() const noexcept;
|
||||
|
||||
//==============================================================================
|
||||
Result (const Result& other);
|
||||
Result& operator= (const Result& other);
|
||||
Result (const Result&);
|
||||
Result& operator= (const Result&);
|
||||
|
||||
#if BEAST_COMPILER_SUPPORTS_MOVE_SEMANTICS
|
||||
Result (Result&& other) noexcept;
|
||||
Result& operator= (Result&& other) noexcept;
|
||||
Result (Result&&) noexcept;
|
||||
Result& operator= (Result&&) noexcept;
|
||||
#endif
|
||||
|
||||
bool operator== (const Result& other) const noexcept;
|
||||
@@ -108,6 +105,9 @@ public:
|
||||
private:
|
||||
String errorMessage;
|
||||
|
||||
// The default constructor is not for public use!
|
||||
// Instead, use Result::ok() or Result::fail()
|
||||
Result() noexcept;
|
||||
explicit Result (const String&) noexcept;
|
||||
|
||||
// These casts are private to prevent people trying to use the Result object in numeric contexts
|
||||
@@ -115,5 +115,5 @@ private:
|
||||
operator void*() const;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#endif // BEAST_RESULT_BEASTHEADER
|
||||
|
||||
@@ -504,6 +504,184 @@ Result FileOutputStream::truncate()
|
||||
return getResultForReturnValue (ftruncate (getFD (fileHandle), (off_t) currentPosition));
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
|
||||
Result RandomAccessFile::nativeOpen (File const& path, Mode mode)
|
||||
{
|
||||
bassert (! isOpen ());
|
||||
|
||||
Result result (Result::ok ());
|
||||
|
||||
if (path.exists())
|
||||
{
|
||||
int oflag;
|
||||
switch (mode)
|
||||
{
|
||||
case readOnly:
|
||||
oflag = O_RDONLY;
|
||||
break;
|
||||
|
||||
default:
|
||||
case readWrite:
|
||||
oflag = O_RDWR;
|
||||
break;
|
||||
};
|
||||
|
||||
const int f = ::open (path.getFullPathName().toUTF8(), oflag, 00644);
|
||||
|
||||
if (f != -1)
|
||||
{
|
||||
currentPosition = lseek (f, 0, SEEK_SET);
|
||||
|
||||
if (currentPosition >= 0)
|
||||
{
|
||||
file = path;
|
||||
fileHandle = fdToVoidPointer (f);
|
||||
}
|
||||
else
|
||||
{
|
||||
result = getResultForErrno();
|
||||
::close (f);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
result = getResultForErrno();
|
||||
}
|
||||
}
|
||||
else if (mode == readWrite)
|
||||
{
|
||||
const int f = ::open (path.getFullPathName().toUTF8(), O_RDWR + O_CREAT, 00644);
|
||||
|
||||
if (f != -1)
|
||||
{
|
||||
file = path;
|
||||
fileHandle = fdToVoidPointer (f);
|
||||
}
|
||||
else
|
||||
{
|
||||
result = getResultForErrno();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// file doesn't exist and we're opening read-only
|
||||
Result::fail (String (strerror (ENOENT)));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void RandomAccessFile::nativeClose ()
|
||||
{
|
||||
bassert (isOpen ());
|
||||
|
||||
file = File::nonexistent ();
|
||||
::close (getFD (fileHandle));
|
||||
fileHandle = nullptr;
|
||||
currentPosition = 0;
|
||||
}
|
||||
|
||||
Result RandomAccessFile::nativeSetPosition (FileOffset newPosition)
|
||||
{
|
||||
bassert (isOpen ());
|
||||
|
||||
off_t const actualPosition = lseek (getFD (fileHandle), newPosition, SEEK_SET);
|
||||
|
||||
currentPosition = actualPosition;
|
||||
|
||||
if (actualPosition != newPosition)
|
||||
{
|
||||
// VFALCO NOTE I dislike return from the middle but
|
||||
// Result::ok() is showing up in the profile
|
||||
//
|
||||
return getResultForErrno();
|
||||
}
|
||||
|
||||
return Result::ok();
|
||||
}
|
||||
|
||||
Result RandomAccessFile::nativeRead (void* buffer, ByteCount numBytes, ByteCount* pActualAmount)
|
||||
{
|
||||
bassert (isOpen ());
|
||||
|
||||
ssize_t bytesRead = ::read (getFD (fileHandle), buffer, numBytes);
|
||||
|
||||
if (bytesRead < 0)
|
||||
{
|
||||
if (pActualAmount != nullptr)
|
||||
*pActualAmount = 0;
|
||||
|
||||
// VFALCO NOTE I dislike return from the middle but
|
||||
// Result::ok() is showing up in the profile
|
||||
//
|
||||
return getResultForErrno();
|
||||
}
|
||||
|
||||
currentPosition += bytesRead;
|
||||
|
||||
if (pActualAmount != nullptr)
|
||||
*pActualAmount = bytesRead;
|
||||
|
||||
return Result::ok();
|
||||
}
|
||||
|
||||
Result RandomAccessFile::nativeWrite (void const* data, ByteCount numBytes, size_t* pActualAmount)
|
||||
{
|
||||
bassert (isOpen ());
|
||||
|
||||
ssize_t bytesWritten = ::write (getFD (fileHandle), data, numBytes);
|
||||
|
||||
// write(3) says that the actual return will be exactly -1 on
|
||||
// error, but we will assume anything negative indicates failure.
|
||||
//
|
||||
if (bytesWritten < 0)
|
||||
{
|
||||
if (pActualAmount != nullptr)
|
||||
*pActualAmount = 0;
|
||||
|
||||
// VFALCO NOTE I dislike return from the middle but
|
||||
// Result::ok() is showing up in the profile
|
||||
//
|
||||
return getResultForErrno();
|
||||
}
|
||||
|
||||
if (pActualAmount != nullptr)
|
||||
*pActualAmount = bytesWritten;
|
||||
|
||||
return Result::ok();
|
||||
}
|
||||
|
||||
Result RandomAccessFile::nativeTruncate ()
|
||||
{
|
||||
bassert (isOpen ());
|
||||
|
||||
flush();
|
||||
|
||||
return getResultForReturnValue (ftruncate (getFD (fileHandle), (off_t) currentPosition));
|
||||
}
|
||||
|
||||
Result RandomAccessFile::nativeFlush ()
|
||||
{
|
||||
bassert (isOpen ());
|
||||
|
||||
Result result (Result::ok ());
|
||||
|
||||
if (fsync (getFD (fileHandle)) == -1)
|
||||
result = getResultForErrno();
|
||||
|
||||
#if BEAST_ANDROID
|
||||
// This stuff tells the OS to asynchronously update the metadata
|
||||
// that the OS has cached aboud the file - this metadata is used
|
||||
// when the device is acting as a USB drive, and unless it's explicitly
|
||||
// refreshed, it'll get out of step with the real file.
|
||||
const LocalRef<jstring> t (javaString (file.getFullPathName()));
|
||||
android.activity.callVoidMethod (BeastAppActivity.scanFile, t.get());
|
||||
#endif
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
String SystemStats::getEnvironmentVariable (const String& name, const String& defaultValue)
|
||||
{
|
||||
|
||||
@@ -307,6 +307,163 @@ Result FileOutputStream::truncate()
|
||||
: WindowsFileHelpers::getResultForLastError();
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
|
||||
Result RandomAccessFile::nativeOpen (File const& path, Mode mode)
|
||||
{
|
||||
bassert (! isOpen ());
|
||||
|
||||
Result result (Result::ok ());
|
||||
|
||||
DWORD dwDesiredAccess;
|
||||
switch (mode)
|
||||
{
|
||||
case readOnly:
|
||||
dwDesiredAccess = GENERIC_READ;
|
||||
break;
|
||||
|
||||
default:
|
||||
case readWrite:
|
||||
dwDesiredAccess = GENERIC_READ | GENERIC_WRITE;
|
||||
break;
|
||||
};
|
||||
|
||||
DWORD dwCreationDisposition;
|
||||
switch (mode)
|
||||
{
|
||||
case readOnly:
|
||||
dwCreationDisposition = OPEN_EXISTING;
|
||||
break;
|
||||
|
||||
default:
|
||||
case readWrite:
|
||||
dwCreationDisposition = OPEN_ALWAYS;
|
||||
break;
|
||||
};
|
||||
|
||||
HANDLE h = CreateFile (path.getFullPathName().toWideCharPointer(),
|
||||
dwDesiredAccess,
|
||||
FILE_SHARE_READ,
|
||||
0,
|
||||
dwCreationDisposition,
|
||||
FILE_ATTRIBUTE_NORMAL,
|
||||
0);
|
||||
|
||||
if (h != INVALID_HANDLE_VALUE)
|
||||
{
|
||||
file = path;
|
||||
fileHandle = h;
|
||||
|
||||
result = setPosition (0);
|
||||
|
||||
if (result.failed ())
|
||||
nativeClose ();
|
||||
}
|
||||
else
|
||||
{
|
||||
result = WindowsFileHelpers::getResultForLastError();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void RandomAccessFile::nativeClose ()
|
||||
{
|
||||
bassert (isOpen ());
|
||||
|
||||
CloseHandle ((HANDLE) fileHandle);
|
||||
|
||||
file = File::nonexistent ();
|
||||
fileHandle = nullptr;
|
||||
currentPosition = 0;
|
||||
}
|
||||
|
||||
Result RandomAccessFile::nativeSetPosition (FileOffset newPosition)
|
||||
{
|
||||
bassert (isOpen ());
|
||||
|
||||
Result result (Result::ok ());
|
||||
|
||||
LARGE_INTEGER li;
|
||||
li.QuadPart = newPosition;
|
||||
li.LowPart = SetFilePointer ((HANDLE) fileHandle,
|
||||
(LONG) li.LowPart,
|
||||
&li.HighPart,
|
||||
FILE_BEGIN);
|
||||
|
||||
if (li.LowPart != INVALID_SET_FILE_POINTER)
|
||||
{
|
||||
currentPosition = li.QuadPart;
|
||||
}
|
||||
else
|
||||
{
|
||||
result = WindowsFileHelpers::getResultForLastError();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
Result RandomAccessFile::nativeRead (void* buffer, ByteCount numBytes, ByteCount* pActualAmount)
|
||||
{
|
||||
bassert (isOpen ());
|
||||
|
||||
Result result (Result::ok ());
|
||||
|
||||
DWORD actualNum = 0;
|
||||
|
||||
if (! ReadFile ((HANDLE) fileHandle, buffer, (DWORD) numBytes, &actualNum, 0))
|
||||
result = WindowsFileHelpers::getResultForLastError();
|
||||
|
||||
currentPosition += actualNum;
|
||||
|
||||
if (pActualAmount != nullptr)
|
||||
*pActualAmount = actualNum;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
Result RandomAccessFile::nativeWrite (void const* data, ByteCount numBytes, size_t* pActualAmount)
|
||||
{
|
||||
bassert (isOpen ());
|
||||
|
||||
Result result (Result::ok ());
|
||||
|
||||
DWORD actualNum = 0;
|
||||
|
||||
if (! WriteFile ((HANDLE) fileHandle, data, (DWORD) numBytes, &actualNum, 0))
|
||||
result = WindowsFileHelpers::getResultForLastError();
|
||||
|
||||
if (pActualAmount != nullptr)
|
||||
*pActualAmount = actualNum;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
Result RandomAccessFile::nativeTruncate ()
|
||||
{
|
||||
bassert (isOpen ());
|
||||
|
||||
Result result (Result::ok ());
|
||||
|
||||
if (! SetEndOfFile ((HANDLE) fileHandle))
|
||||
result = WindowsFileHelpers::getResultForLastError();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
Result RandomAccessFile::nativeFlush ()
|
||||
{
|
||||
bassert (isOpen ());
|
||||
|
||||
Result result (Result::ok ());
|
||||
|
||||
if (! FlushFileBuffers ((HANDLE) fileHandle))
|
||||
result = WindowsFileHelpers::getResultForLastError();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
//==============================================================================
|
||||
void MemoryMappedFile::openInternal (const File& file, AccessMode mode)
|
||||
{
|
||||
|
||||
@@ -65,6 +65,8 @@ short InputStream::readShortBigEndian()
|
||||
|
||||
int InputStream::readInt()
|
||||
{
|
||||
static_bassert (sizeof (int) == 4);
|
||||
|
||||
char temp[4];
|
||||
|
||||
if (read (temp, 4) == 4)
|
||||
@@ -73,6 +75,16 @@ int InputStream::readInt()
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32 InputStream::readInt32()
|
||||
{
|
||||
char temp[4];
|
||||
|
||||
if (read (temp, 4) == 4)
|
||||
return (int32) ByteOrder::littleEndianInt (temp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int InputStream::readIntBigEndian()
|
||||
{
|
||||
char temp[4];
|
||||
@@ -83,6 +95,16 @@ int InputStream::readIntBigEndian()
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32 InputStream::readInt32BigEndian()
|
||||
{
|
||||
char temp[4];
|
||||
|
||||
if (read (temp, 4) == 4)
|
||||
return (int32) ByteOrder::bigEndianInt (temp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int InputStream::readCompressedInt()
|
||||
{
|
||||
const uint8 sizeByte = (uint8) readByte();
|
||||
@@ -229,3 +251,71 @@ void InputStream::skipNextBytes (int64 numBytesToSkip)
|
||||
numBytesToSkip -= read (temp, (int) bmin (numBytesToSkip, (int64) skipBufferSize));
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// Unfortunately, putting these in the header causes duplicate
|
||||
// definition linker errors, even with the inline keyword!
|
||||
|
||||
template <>
|
||||
char InputStream::readType <char> () { return readByte (); }
|
||||
|
||||
template <>
|
||||
short InputStream::readType <short> () { return readShort (); }
|
||||
|
||||
template <>
|
||||
int32 InputStream::readType <int32> () { return readInt32 (); }
|
||||
|
||||
template <>
|
||||
int64 InputStream::readType <int64> () { return readInt64 (); }
|
||||
|
||||
template <>
|
||||
unsigned char InputStream::readType <unsigned char> () { return static_cast <unsigned char> (readByte ()); }
|
||||
|
||||
template <>
|
||||
unsigned short InputStream::readType <unsigned short> () { return static_cast <unsigned short> (readShort ()); }
|
||||
|
||||
template <>
|
||||
uint32 InputStream::readType <uint32> () { return static_cast <uint32> (readInt32 ()); }
|
||||
|
||||
template <>
|
||||
uint64 InputStream::readType <uint64> () { return static_cast <uint64> (readInt64 ()); }
|
||||
|
||||
template <>
|
||||
float InputStream::readType <float> () { return readFloat (); }
|
||||
|
||||
template <>
|
||||
double InputStream::readType <double> () { return readDouble (); }
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
template <>
|
||||
char InputStream::readTypeBigEndian <char> () { return readByte (); }
|
||||
|
||||
template <>
|
||||
short InputStream::readTypeBigEndian <short> () { return readShortBigEndian (); }
|
||||
|
||||
template <>
|
||||
int32 InputStream::readTypeBigEndian <int32> () { return readInt32BigEndian (); }
|
||||
|
||||
template <>
|
||||
int64 InputStream::readTypeBigEndian <int64> () { return readInt64BigEndian (); }
|
||||
|
||||
template <>
|
||||
unsigned char InputStream::readTypeBigEndian <unsigned char> () { return static_cast <unsigned char> (readByte ()); }
|
||||
|
||||
template <>
|
||||
unsigned short InputStream::readTypeBigEndian <unsigned short> () { return static_cast <unsigned short> (readShortBigEndian ()); }
|
||||
|
||||
template <>
|
||||
uint32 InputStream::readTypeBigEndian <uint32> () { return static_cast <uint32> (readInt32BigEndian ()); }
|
||||
|
||||
template <>
|
||||
uint64 InputStream::readTypeBigEndian <uint64> () { return static_cast <uint64> (readInt64BigEndian ()); }
|
||||
|
||||
template <>
|
||||
float InputStream::readTypeBigEndian <float> () { return readFloatBigEndian (); }
|
||||
|
||||
template <>
|
||||
double InputStream::readTypeBigEndian <double> () { return readDoubleBigEndian (); }
|
||||
|
||||
|
||||
@@ -92,7 +92,7 @@ public:
|
||||
|
||||
/** Reads a boolean from the stream.
|
||||
|
||||
The bool is encoded as a single byte - 1 for true, 0 for false.
|
||||
The bool is encoded as a single byte - non-zero for true, 0 for false.
|
||||
|
||||
If the stream is exhausted, this will return false.
|
||||
|
||||
@@ -111,10 +111,13 @@ public:
|
||||
*/
|
||||
virtual short readShort();
|
||||
|
||||
// VFALCO TODO Implement these functions
|
||||
//virtual int16 readInt16 ();
|
||||
//virtual uint16 readUInt16 ();
|
||||
|
||||
/** Reads two bytes from the stream as a little-endian 16-bit value.
|
||||
|
||||
If the next two bytes read are byte1 and byte2, this returns
|
||||
(byte2 | (byte1 << 8)).
|
||||
If the next two bytes read are byte1 and byte2, this returns (byte1 | (byte2 << 8)).
|
||||
|
||||
If the stream is exhausted partway through reading the bytes, this will return zero.
|
||||
|
||||
@@ -131,6 +134,13 @@ public:
|
||||
|
||||
@see OutputStream::writeInt, readIntBigEndian
|
||||
*/
|
||||
virtual int32 readInt32();
|
||||
|
||||
// VFALCO TODO Implement these functions
|
||||
//virtual int16 readInt16BigEndian ();
|
||||
//virtual uint16 readUInt16BigEndian ();
|
||||
|
||||
// DEPRECATED, assumes sizeof(int) == 4!
|
||||
virtual int readInt();
|
||||
|
||||
/** Reads four bytes from the stream as a big-endian 32-bit value.
|
||||
@@ -142,6 +152,9 @@ public:
|
||||
|
||||
@see OutputStream::writeIntBigEndian, readInt
|
||||
*/
|
||||
virtual int32 readInt32BigEndian();
|
||||
|
||||
// DEPRECATED, assumes sizeof(int) == 4!
|
||||
virtual int readIntBigEndian();
|
||||
|
||||
/** Reads eight bytes from the stream as a little-endian 64-bit value.
|
||||
@@ -216,6 +229,49 @@ public:
|
||||
*/
|
||||
virtual int readCompressedInt();
|
||||
|
||||
/** Reads a type using a template specialization.
|
||||
|
||||
This is useful when doing template meta-programming.
|
||||
*/
|
||||
template <class T>
|
||||
T readType ();
|
||||
|
||||
/** Reads a type using a template specialization.
|
||||
|
||||
The variable is passed as a parameter so that the template type
|
||||
can be deduced.
|
||||
|
||||
This is useful when doing template meta-programming.
|
||||
*/
|
||||
template <class T>
|
||||
void readTypeInto (T* p)
|
||||
{
|
||||
*p = readType <T> ();
|
||||
}
|
||||
|
||||
/** Reads a type from a big endian stream using a template specialization.
|
||||
|
||||
The raw encoding of the type is read from the stream as a big-endian value
|
||||
where applicable.
|
||||
|
||||
This is useful when doing template meta-programming.
|
||||
*/
|
||||
template <class T>
|
||||
T readTypeBigEndian ();
|
||||
|
||||
/** Reads a type using a template specialization.
|
||||
|
||||
The variable is passed as a parameter so that the template type
|
||||
can be deduced.
|
||||
|
||||
This is useful when doing template meta-programming.
|
||||
*/
|
||||
template <class T>
|
||||
void readTypeBigEndianInto (T* p)
|
||||
{
|
||||
*p = readTypeBigEndian <T> ();
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
/** Reads a UTF-8 string from the stream, up to the next linefeed or carriage return.
|
||||
|
||||
@@ -289,4 +345,4 @@ protected:
|
||||
InputStream() noexcept {}
|
||||
};
|
||||
|
||||
#endif // BEAST_INPUTSTREAM_BEASTHEADER
|
||||
#endif
|
||||
|
||||
@@ -92,7 +92,7 @@ int64 MemoryInputStream::getPosition()
|
||||
class MemoryStreamTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
MemoryStreamTests() : UnitTest ("MemoryStream") { }
|
||||
MemoryStreamTests() : UnitTest ("MemoryStream", "beast") { }
|
||||
|
||||
void runTest()
|
||||
{
|
||||
@@ -148,6 +148,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
#if BEAST_UNIT_TESTS
|
||||
static MemoryStreamTests memoryStreamTests;
|
||||
#endif
|
||||
|
||||
@@ -22,23 +22,28 @@
|
||||
//==============================================================================
|
||||
|
||||
MemoryOutputStream::MemoryOutputStream (const size_t initialSize)
|
||||
: data (internalBlock),
|
||||
position (0),
|
||||
size (0)
|
||||
: blockToUse (&internalBlock), externalData (nullptr),
|
||||
position (0), size (0), availableSize (0)
|
||||
{
|
||||
internalBlock.setSize (initialSize, false);
|
||||
}
|
||||
|
||||
MemoryOutputStream::MemoryOutputStream (MemoryBlock& memoryBlockToWriteTo,
|
||||
const bool appendToExistingBlockContent)
|
||||
: data (memoryBlockToWriteTo),
|
||||
position (0),
|
||||
size (0)
|
||||
: blockToUse (&memoryBlockToWriteTo), externalData (nullptr),
|
||||
position (0), size (0), availableSize (0)
|
||||
{
|
||||
if (appendToExistingBlockContent)
|
||||
position = size = memoryBlockToWriteTo.getSize();
|
||||
}
|
||||
|
||||
MemoryOutputStream::MemoryOutputStream (void* destBuffer, size_t destBufferSize)
|
||||
: blockToUse (nullptr), externalData (destBuffer),
|
||||
position (0), size (0), availableSize (destBufferSize)
|
||||
{
|
||||
bassert (externalData != nullptr); // This must be a valid pointer.
|
||||
}
|
||||
|
||||
MemoryOutputStream::~MemoryOutputStream()
|
||||
{
|
||||
trimExternalBlockSize();
|
||||
@@ -51,13 +56,14 @@ void MemoryOutputStream::flush()
|
||||
|
||||
void MemoryOutputStream::trimExternalBlockSize()
|
||||
{
|
||||
if (&data != &internalBlock)
|
||||
data.setSize (size, false);
|
||||
if (blockToUse != &internalBlock && blockToUse != nullptr)
|
||||
blockToUse->setSize (size, false);
|
||||
}
|
||||
|
||||
void MemoryOutputStream::preallocate (const size_t bytesToPreallocate)
|
||||
{
|
||||
data.ensureSize (bytesToPreallocate + 1);
|
||||
if (blockToUse != nullptr)
|
||||
blockToUse->ensureSize (bytesToPreallocate + 1);
|
||||
}
|
||||
|
||||
void MemoryOutputStream::reset() noexcept
|
||||
@@ -71,10 +77,24 @@ char* MemoryOutputStream::prepareToWrite (size_t numBytes)
|
||||
bassert ((ssize_t) numBytes >= 0);
|
||||
size_t storageNeeded = position + numBytes;
|
||||
|
||||
if (storageNeeded >= data.getSize())
|
||||
data.ensureSize ((storageNeeded + bmin (storageNeeded / 2, (size_t) (1024 * 1024)) + 32) & ~31u);
|
||||
char* data;
|
||||
|
||||
char* const writePointer = static_cast <char*> (data.getData()) + position;
|
||||
if (blockToUse != nullptr)
|
||||
{
|
||||
if (storageNeeded >= blockToUse->getSize())
|
||||
blockToUse->ensureSize ((storageNeeded + bmin (storageNeeded / 2, (size_t) (1024 * 1024)) + 32) & ~31u);
|
||||
|
||||
data = static_cast <char*> (blockToUse->getData());
|
||||
}
|
||||
else
|
||||
{
|
||||
if (storageNeeded > availableSize)
|
||||
return nullptr;
|
||||
|
||||
data = static_cast <char*> (externalData);
|
||||
}
|
||||
|
||||
char* const writePointer = data + position;
|
||||
position += numBytes;
|
||||
size = bmax (size, position);
|
||||
return writePointer;
|
||||
@@ -82,23 +102,43 @@ char* MemoryOutputStream::prepareToWrite (size_t numBytes)
|
||||
|
||||
bool MemoryOutputStream::write (const void* const buffer, size_t howMany)
|
||||
{
|
||||
bassert (buffer != nullptr && ((ssize_t) howMany) >= 0);
|
||||
|
||||
if (howMany > 0)
|
||||
memcpy (prepareToWrite (howMany), buffer, howMany);
|
||||
bassert (buffer != nullptr);
|
||||
|
||||
if (howMany == 0)
|
||||
return true;
|
||||
|
||||
if (char* dest = prepareToWrite (howMany))
|
||||
{
|
||||
memcpy (dest, buffer, howMany);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void MemoryOutputStream::writeRepeatedByte (uint8 byte, size_t howMany)
|
||||
bool MemoryOutputStream::writeRepeatedByte (uint8 byte, size_t howMany)
|
||||
{
|
||||
if (howMany > 0)
|
||||
memset (prepareToWrite (howMany), byte, howMany);
|
||||
if (howMany == 0)
|
||||
return true;
|
||||
|
||||
if (char* dest = prepareToWrite (howMany))
|
||||
{
|
||||
memset (dest, byte, howMany);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void MemoryOutputStream::appendUTF8Char (beast_wchar c)
|
||||
bool MemoryOutputStream::appendUTF8Char (beast_wchar c)
|
||||
{
|
||||
CharPointer_UTF8 (prepareToWrite (CharPointer_UTF8::getBytesRequiredFor (c))).write (c);
|
||||
if (char* dest = prepareToWrite (CharPointer_UTF8::getBytesRequiredFor (c)))
|
||||
{
|
||||
CharPointer_UTF8 (dest).write (c);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
MemoryBlock MemoryOutputStream::getMemoryBlock() const
|
||||
@@ -108,10 +148,13 @@ MemoryBlock MemoryOutputStream::getMemoryBlock() const
|
||||
|
||||
const void* MemoryOutputStream::getData() const noexcept
|
||||
{
|
||||
if (data.getSize() > size)
|
||||
static_cast <char*> (data.getData()) [size] = 0;
|
||||
if (blockToUse == nullptr)
|
||||
return externalData;
|
||||
|
||||
return data.getData();
|
||||
if (blockToUse->getSize() > size)
|
||||
static_cast <char*> (blockToUse->getData()) [size] = 0;
|
||||
|
||||
return blockToUse->getData();
|
||||
}
|
||||
|
||||
bool MemoryOutputStream::setPosition (int64 newPosition)
|
||||
@@ -137,7 +180,8 @@ int MemoryOutputStream::writeFromInputStream (InputStream& source, int64 maxNumB
|
||||
if (maxNumBytesToWrite > availableData)
|
||||
maxNumBytesToWrite = availableData;
|
||||
|
||||
preallocate (data.getSize() + (size_t) maxNumBytesToWrite);
|
||||
if (blockToUse != nullptr)
|
||||
preallocate (blockToUse->getSize() + (size_t) maxNumBytesToWrite);
|
||||
}
|
||||
|
||||
return OutputStream::writeFromInputStream (source, maxNumBytesToWrite);
|
||||
|
||||
@@ -28,7 +28,13 @@
|
||||
#include "../memory/beast_MemoryBlock.h"
|
||||
#include "../memory/beast_ScopedPointer.h"
|
||||
|
||||
//==============================================================================
|
||||
/**
|
||||
Writes data to an internal memory buffer, which grows as required.
|
||||
|
||||
The data that was written into the stream can then be accessed later as
|
||||
a contiguous block of memory.
|
||||
*/
|
||||
//==============================================================================
|
||||
/**
|
||||
Writes data to an internal memory buffer, which grows as required.
|
||||
@@ -43,7 +49,6 @@ class BEAST_API MemoryOutputStream
|
||||
public:
|
||||
//==============================================================================
|
||||
/** Creates an empty memory stream, ready to be written into.
|
||||
|
||||
@param initialSize the intial amount of capacity to allocate for writing into
|
||||
*/
|
||||
MemoryOutputStream (size_t initialSize = 256);
|
||||
@@ -63,6 +68,14 @@ public:
|
||||
MemoryOutputStream (MemoryBlock& memoryBlockToWriteTo,
|
||||
bool appendToExistingBlockContent);
|
||||
|
||||
/** Creates a MemoryOutputStream that will write into a user-supplied, fixed-size
|
||||
block of memory.
|
||||
|
||||
When using this mode, the stream will write directly into this memory area until
|
||||
it's full, at which point write operations will fail.
|
||||
*/
|
||||
MemoryOutputStream (void* destBuffer, size_t destBufferSize);
|
||||
|
||||
/** Destructor.
|
||||
This will free any data that was written to it.
|
||||
*/
|
||||
@@ -88,7 +101,7 @@ public:
|
||||
void preallocate (size_t bytesToPreallocate);
|
||||
|
||||
/** Appends the utf-8 bytes for a unicode character */
|
||||
void appendUTF8Char (beast_wchar character);
|
||||
bool appendUTF8Char (beast_wchar character);
|
||||
|
||||
/** Returns a String created from the (UTF8) data that has been written to the stream. */
|
||||
String toUTF8() const;
|
||||
@@ -108,24 +121,24 @@ public:
|
||||
*/
|
||||
void flush();
|
||||
|
||||
bool write (const void* buffer, size_t howMany);
|
||||
int64 getPosition() { return position; }
|
||||
bool setPosition (int64 newPosition);
|
||||
int writeFromInputStream (InputStream& source, int64 maxNumBytesToWrite);
|
||||
void writeRepeatedByte (uint8 byte, size_t numTimesToRepeat);
|
||||
bool write (const void*, size_t) override;
|
||||
int64 getPosition() override { return position; }
|
||||
bool setPosition (int64) override;
|
||||
int writeFromInputStream (InputStream&, int64 maxNumBytesToWrite) override;
|
||||
bool writeRepeatedByte (uint8 byte, size_t numTimesToRepeat) override;
|
||||
|
||||
private:
|
||||
//==============================================================================
|
||||
MemoryBlock& data;
|
||||
MemoryBlock internalBlock;
|
||||
size_t position, size;
|
||||
|
||||
void trimExternalBlockSize();
|
||||
char* prepareToWrite (size_t);
|
||||
|
||||
//==============================================================================
|
||||
MemoryBlock* const blockToUse;
|
||||
MemoryBlock internalBlock;
|
||||
void* externalData;
|
||||
size_t position, size, availableSize;
|
||||
};
|
||||
|
||||
/** Copies all the data that has been written to a MemoryOutputStream into another stream. */
|
||||
OutputStream& BEAST_CALLTYPE operator<< (OutputStream& stream, const MemoryOutputStream& streamToRead);
|
||||
|
||||
|
||||
#endif // BEAST_MEMORYOUTPUTSTREAM_BEASTHEADER
|
||||
#endif
|
||||
@@ -61,48 +61,69 @@ OutputStream::~OutputStream()
|
||||
}
|
||||
|
||||
//==============================================================================
|
||||
void OutputStream::writeBool (const bool b)
|
||||
bool OutputStream::writeBool (const bool b)
|
||||
{
|
||||
writeByte (b ? (char) 1
|
||||
return writeByte (b ? (char) 1
|
||||
: (char) 0);
|
||||
}
|
||||
|
||||
void OutputStream::writeByte (char byte)
|
||||
bool OutputStream::writeByte (char byte)
|
||||
{
|
||||
write (&byte, 1);
|
||||
return write (&byte, 1);
|
||||
}
|
||||
|
||||
void OutputStream::writeRepeatedByte (uint8 byte, size_t numTimesToRepeat)
|
||||
bool OutputStream::writeRepeatedByte (uint8 byte, size_t numTimesToRepeat)
|
||||
{
|
||||
for (size_t i = 0; i < numTimesToRepeat; ++i)
|
||||
writeByte ((char) byte);
|
||||
if (! writeByte ((char) byte))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void OutputStream::writeShort (short value)
|
||||
bool OutputStream::writeShort (short value)
|
||||
{
|
||||
const unsigned short v = ByteOrder::swapIfBigEndian ((unsigned short) value);
|
||||
write (&v, 2);
|
||||
return write (&v, 2);
|
||||
}
|
||||
|
||||
void OutputStream::writeShortBigEndian (short value)
|
||||
bool OutputStream::writeShortBigEndian (short value)
|
||||
{
|
||||
const unsigned short v = ByteOrder::swapIfLittleEndian ((unsigned short) value);
|
||||
write (&v, 2);
|
||||
return write (&v, 2);
|
||||
}
|
||||
|
||||
void OutputStream::writeInt (int value)
|
||||
bool OutputStream::writeInt32 (int32 value)
|
||||
{
|
||||
static_bassert (sizeof (int32) == 4);
|
||||
|
||||
const unsigned int v = ByteOrder::swapIfBigEndian ((uint32) value);
|
||||
return write (&v, 4);
|
||||
}
|
||||
|
||||
bool OutputStream::writeInt (int value)
|
||||
{
|
||||
static_bassert (sizeof (int) == 4);
|
||||
|
||||
const unsigned int v = ByteOrder::swapIfBigEndian ((unsigned int) value);
|
||||
write (&v, 4);
|
||||
return write (&v, 4);
|
||||
}
|
||||
|
||||
void OutputStream::writeIntBigEndian (int value)
|
||||
bool OutputStream::writeInt32BigEndian (int value)
|
||||
{
|
||||
const unsigned int v = ByteOrder::swapIfLittleEndian ((unsigned int) value);
|
||||
write (&v, 4);
|
||||
static_bassert (sizeof (int32) == 4);
|
||||
const uint32 v = ByteOrder::swapIfLittleEndian ((uint32) value);
|
||||
return write (&v, 4);
|
||||
}
|
||||
|
||||
void OutputStream::writeCompressedInt (int value)
|
||||
bool OutputStream::writeIntBigEndian (int value)
|
||||
{
|
||||
static_bassert (sizeof (int) == 4);
|
||||
const unsigned int v = ByteOrder::swapIfLittleEndian ((unsigned int) value);
|
||||
return write (&v, 4);
|
||||
}
|
||||
|
||||
bool OutputStream::writeCompressedInt (int value)
|
||||
{
|
||||
unsigned int un = (value < 0) ? (unsigned int) -value
|
||||
: (unsigned int) value;
|
||||
@@ -121,60 +142,60 @@ void OutputStream::writeCompressedInt (int value)
|
||||
if (value < 0)
|
||||
data[0] |= 0x80;
|
||||
|
||||
write (data, num + 1);
|
||||
return write (data, num + 1);
|
||||
}
|
||||
|
||||
void OutputStream::writeInt64 (int64 value)
|
||||
bool OutputStream::writeInt64 (int64 value)
|
||||
{
|
||||
const uint64 v = ByteOrder::swapIfBigEndian ((uint64) value);
|
||||
write (&v, 8);
|
||||
return write (&v, 8);
|
||||
}
|
||||
|
||||
void OutputStream::writeInt64BigEndian (int64 value)
|
||||
bool OutputStream::writeInt64BigEndian (int64 value)
|
||||
{
|
||||
const uint64 v = ByteOrder::swapIfLittleEndian ((uint64) value);
|
||||
write (&v, 8);
|
||||
return write (&v, 8);
|
||||
}
|
||||
|
||||
void OutputStream::writeFloat (float value)
|
||||
bool OutputStream::writeFloat (float value)
|
||||
{
|
||||
union { int asInt; float asFloat; } n;
|
||||
n.asFloat = value;
|
||||
writeInt (n.asInt);
|
||||
return writeInt (n.asInt);
|
||||
}
|
||||
|
||||
void OutputStream::writeFloatBigEndian (float value)
|
||||
bool OutputStream::writeFloatBigEndian (float value)
|
||||
{
|
||||
union { int asInt; float asFloat; } n;
|
||||
n.asFloat = value;
|
||||
writeIntBigEndian (n.asInt);
|
||||
return writeIntBigEndian (n.asInt);
|
||||
}
|
||||
|
||||
void OutputStream::writeDouble (double value)
|
||||
bool OutputStream::writeDouble (double value)
|
||||
{
|
||||
union { int64 asInt; double asDouble; } n;
|
||||
n.asDouble = value;
|
||||
writeInt64 (n.asInt);
|
||||
return writeInt64 (n.asInt);
|
||||
}
|
||||
|
||||
void OutputStream::writeDoubleBigEndian (double value)
|
||||
bool OutputStream::writeDoubleBigEndian (double value)
|
||||
{
|
||||
union { int64 asInt; double asDouble; } n;
|
||||
n.asDouble = value;
|
||||
writeInt64BigEndian (n.asInt);
|
||||
return writeInt64BigEndian (n.asInt);
|
||||
}
|
||||
|
||||
void OutputStream::writeString (const String& text)
|
||||
bool OutputStream::writeString (const String& text)
|
||||
{
|
||||
// (This avoids using toUTF8() to prevent the memory bloat that it would leave behind
|
||||
// if lots of large, persistent strings were to be written to streams).
|
||||
const size_t numBytes = text.getNumBytesAsUTF8() + 1;
|
||||
HeapBlock<char> temp (numBytes);
|
||||
text.copyToUTF8 (temp, numBytes);
|
||||
write (temp, numBytes);
|
||||
return write (temp, numBytes);
|
||||
}
|
||||
|
||||
void OutputStream::writeText (const String& text, const bool asUTF16,
|
||||
bool OutputStream::writeText (const String& text, const bool asUTF16,
|
||||
const bool writeUTF16ByteOrderMark)
|
||||
{
|
||||
if (asUTF16)
|
||||
@@ -196,7 +217,9 @@ void OutputStream::writeText (const String& text, const bool asUTF16,
|
||||
writeShort ((short) '\r');
|
||||
|
||||
lastCharWasReturn = (c == L'\r');
|
||||
writeShort ((short) c);
|
||||
|
||||
if (! writeShort ((short) c))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -209,9 +232,12 @@ void OutputStream::writeText (const String& text, const bool asUTF16,
|
||||
if (*t == '\n')
|
||||
{
|
||||
if (t > src)
|
||||
write (src, (int) (t - src));
|
||||
if (! write (src, (int) (t - src)))
|
||||
return false;
|
||||
|
||||
if (! write ("\r\n", 2))
|
||||
return false;
|
||||
|
||||
write ("\r\n", 2);
|
||||
src = t + 1;
|
||||
}
|
||||
else if (*t == '\r')
|
||||
@@ -222,7 +248,8 @@ void OutputStream::writeText (const String& text, const bool asUTF16,
|
||||
else if (*t == 0)
|
||||
{
|
||||
if (t > src)
|
||||
write (src, (int) (t - src));
|
||||
if (! write (src, (int) (t - src)))
|
||||
return false;
|
||||
|
||||
break;
|
||||
}
|
||||
@@ -230,6 +257,8 @@ void OutputStream::writeText (const String& text, const bool asUTF16,
|
||||
++t;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int OutputStream::writeFromInputStream (InputStream& source, int64 numBytesToWrite)
|
||||
@@ -318,3 +347,70 @@ BEAST_API OutputStream& BEAST_CALLTYPE operator<< (OutputStream& stream, const N
|
||||
{
|
||||
return stream << stream.getNewLineString();
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// Unfortunately, putting these in the header causes duplicate
|
||||
// definition linker errors, even with the inline keyword!
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeType <char> (char v) { return writeByte (v); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeType <short> (short v) { return writeShort (v); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeType <int32> (int32 v) { return writeInt32 (v); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeType <int64> (int64 v) { return writeInt64 (v); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeType <unsigned char> (unsigned char v) { return writeByte (static_cast <char> (v)); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeType <unsigned short> (unsigned short v) { return writeShort (static_cast <short> (v)); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeType <uint32> (uint32 v) { return writeInt32 (static_cast <int32> (v)); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeType <uint64> (uint64 v) { return writeInt64 (static_cast <int64> (v)); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeType <float> (float v) { return writeFloat (v); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeType <double> (double v) { return writeDouble (v); }
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeTypeBigEndian <char> (char v) { return writeByte (v); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeTypeBigEndian <short> (short v) { return writeShortBigEndian (v); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeTypeBigEndian <int32> (int32 v) { return writeInt32BigEndian (v); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeTypeBigEndian <int64> (int64 v) { return writeInt64BigEndian (v); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeTypeBigEndian <unsigned char> (unsigned char v) { return writeByte (static_cast <char> (v)); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeTypeBigEndian <unsigned short> (unsigned short v) { return writeShortBigEndian (static_cast <short> (v)); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeTypeBigEndian <uint32> (uint32 v) { return writeInt32BigEndian (static_cast <int32> (v)); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeTypeBigEndian <uint64> (uint64 v) { return writeInt64BigEndian (static_cast <int64> (v)); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeTypeBigEndian <float> (float v) { return writeFloatBigEndian (v); }
|
||||
|
||||
template <>
|
||||
BEAST_API bool OutputStream::writeTypeBigEndian <double> (double v) { return writeDoubleBigEndian (v); }
|
||||
|
||||
@@ -40,9 +40,7 @@ class File;
|
||||
|
||||
@see InputStream, MemoryOutputStream, FileOutputStream
|
||||
*/
|
||||
class BEAST_API OutputStream
|
||||
: public Uncopyable
|
||||
, LeakChecked <OutputStream>
|
||||
class BEAST_API OutputStream : public Uncopyable
|
||||
{
|
||||
protected:
|
||||
//==============================================================================
|
||||
@@ -92,75 +90,111 @@ public:
|
||||
|
||||
//==============================================================================
|
||||
/** Writes a single byte to the stream.
|
||||
|
||||
@returns false if the write operation fails for some reason
|
||||
@see InputStream::readByte
|
||||
*/
|
||||
virtual void writeByte (char byte);
|
||||
virtual bool writeByte (char byte);
|
||||
|
||||
/** Writes a boolean to the stream as a single byte.
|
||||
This is encoded as a binary byte (not as text) with a value of 1 or 0.
|
||||
@returns false if the write operation fails for some reason
|
||||
@see InputStream::readBool
|
||||
*/
|
||||
virtual void writeBool (bool boolValue);
|
||||
virtual bool writeBool (bool boolValue);
|
||||
|
||||
/** Writes a 16-bit integer to the stream in a little-endian byte order.
|
||||
This will write two bytes to the stream: (value & 0xff), then (value >> 8).
|
||||
@returns false if the write operation fails for some reason
|
||||
@see InputStream::readShort
|
||||
*/
|
||||
virtual void writeShort (short value);
|
||||
virtual bool writeShort (short value);
|
||||
|
||||
/** Writes a 16-bit integer to the stream in a big-endian byte order.
|
||||
This will write two bytes to the stream: (value >> 8), then (value & 0xff).
|
||||
@returns false if the write operation fails for some reason
|
||||
@see InputStream::readShortBigEndian
|
||||
*/
|
||||
virtual void writeShortBigEndian (short value);
|
||||
virtual bool writeShortBigEndian (short value);
|
||||
|
||||
/** Writes a 32-bit integer to the stream in a little-endian byte order.
|
||||
@returns false if the write operation fails for some reason
|
||||
@see InputStream::readInt
|
||||
*/
|
||||
virtual void writeInt (int value);
|
||||
virtual bool writeInt32 (int32 value);
|
||||
|
||||
// DEPRECATED, assumes sizeof (int) == 4!
|
||||
virtual bool writeInt (int value);
|
||||
|
||||
/** Writes a 32-bit integer to the stream in a big-endian byte order.
|
||||
@returns false if the write operation fails for some reason
|
||||
@see InputStream::readIntBigEndian
|
||||
*/
|
||||
virtual void writeIntBigEndian (int value);
|
||||
virtual bool writeInt32BigEndian (int32 value);
|
||||
|
||||
// DEPRECATED, assumes sizeof (int) == 4!
|
||||
virtual bool writeIntBigEndian (int value);
|
||||
|
||||
/** Writes a 64-bit integer to the stream in a little-endian byte order.
|
||||
@returns false if the write operation fails for some reason
|
||||
@see InputStream::readInt64
|
||||
*/
|
||||
virtual void writeInt64 (int64 value);
|
||||
virtual bool writeInt64 (int64 value);
|
||||
|
||||
/** Writes a 64-bit integer to the stream in a big-endian byte order.
|
||||
@returns false if the write operation fails for some reason
|
||||
@see InputStream::readInt64BigEndian
|
||||
*/
|
||||
virtual void writeInt64BigEndian (int64 value);
|
||||
virtual bool writeInt64BigEndian (int64 value);
|
||||
|
||||
/** Writes a 32-bit floating point value to the stream in a binary format.
|
||||
The binary 32-bit encoding of the float is written as a little-endian int.
|
||||
@returns false if the write operation fails for some reason
|
||||
@see InputStream::readFloat
|
||||
*/
|
||||
virtual void writeFloat (float value);
|
||||
virtual bool writeFloat (float value);
|
||||
|
||||
/** Writes a 32-bit floating point value to the stream in a binary format.
|
||||
The binary 32-bit encoding of the float is written as a big-endian int.
|
||||
@returns false if the write operation fails for some reason
|
||||
@see InputStream::readFloatBigEndian
|
||||
*/
|
||||
virtual void writeFloatBigEndian (float value);
|
||||
virtual bool writeFloatBigEndian (float value);
|
||||
|
||||
/** Writes a 64-bit floating point value to the stream in a binary format.
|
||||
The eight raw bytes of the double value are written out as a little-endian 64-bit int.
|
||||
@returns false if the write operation fails for some reason
|
||||
@see InputStream::readDouble
|
||||
*/
|
||||
virtual void writeDouble (double value);
|
||||
virtual bool writeDouble (double value);
|
||||
|
||||
/** Writes a 64-bit floating point value to the stream in a binary format.
|
||||
The eight raw bytes of the double value are written out as a big-endian 64-bit int.
|
||||
@see InputStream::readDoubleBigEndian
|
||||
@returns false if the write operation fails for some reason
|
||||
*/
|
||||
virtual void writeDoubleBigEndian (double value);
|
||||
virtual bool writeDoubleBigEndian (double value);
|
||||
|
||||
/** Writes a byte to the output stream a given number of times. */
|
||||
virtual void writeRepeatedByte (uint8 byte, size_t numTimesToRepeat);
|
||||
/** Write a type using a template specialization.
|
||||
|
||||
This is useful when doing template meta-programming.
|
||||
*/
|
||||
template <class T>
|
||||
bool writeType (T value);
|
||||
|
||||
/** Write a type using a template specialization.
|
||||
|
||||
The raw encoding of the type is written to the stream as a big-endian value
|
||||
where applicable.
|
||||
|
||||
This is useful when doing template meta-programming.
|
||||
*/
|
||||
template <class T>
|
||||
bool writeTypeBigEndian (T value);
|
||||
|
||||
/** Writes a byte to the output stream a given number of times.
|
||||
@returns false if the write operation fails for some reason
|
||||
*/
|
||||
virtual bool writeRepeatedByte (uint8 byte, size_t numTimesToRepeat);
|
||||
|
||||
/** Writes a condensed binary encoding of a 32-bit integer.
|
||||
|
||||
@@ -170,9 +204,10 @@ public:
|
||||
|
||||
The format used is: number of significant bytes + up to 4 bytes in little-endian order.
|
||||
|
||||
@returns false if the write operation fails for some reason
|
||||
@see InputStream::readCompressedInt
|
||||
*/
|
||||
virtual void writeCompressedInt (int value);
|
||||
virtual bool writeCompressedInt (int value);
|
||||
|
||||
/** Stores a string in the stream in a binary format.
|
||||
|
||||
@@ -184,9 +219,10 @@ public:
|
||||
|
||||
For appending text to a file, instead use writeText, or operator<<
|
||||
|
||||
@returns false if the write operation fails for some reason
|
||||
@see InputStream::readString, writeText, operator<<
|
||||
*/
|
||||
virtual void writeString (const String& text);
|
||||
virtual bool writeString (const String& text);
|
||||
|
||||
/** Writes a string of text to the stream.
|
||||
|
||||
@@ -195,8 +231,9 @@ public:
|
||||
of a file).
|
||||
|
||||
The method also replaces '\\n' characters in the text with '\\r\\n'.
|
||||
@returns false if the write operation fails for some reason
|
||||
*/
|
||||
virtual void writeText (const String& text,
|
||||
virtual bool writeText (const String& text,
|
||||
bool asUTF16,
|
||||
bool writeUTF16ByteOrderMark);
|
||||
|
||||
@@ -206,6 +243,7 @@ public:
|
||||
@param maxNumBytesToWrite the number of bytes to read from the stream (if this is
|
||||
less than zero, it will keep reading until the input
|
||||
is exhausted)
|
||||
@returns the number of bytes written
|
||||
*/
|
||||
virtual int writeFromInputStream (InputStream& source, int64 maxNumBytesToWrite);
|
||||
|
||||
@@ -258,5 +296,4 @@ BEAST_API OutputStream& BEAST_CALLTYPE operator<< (OutputStream& stream, InputSt
|
||||
*/
|
||||
BEAST_API OutputStream& BEAST_CALLTYPE operator<< (OutputStream& stream, const NewLine&);
|
||||
|
||||
|
||||
#endif // BEAST_OUTPUTSTREAM_BEASTHEADER
|
||||
#endif
|
||||
|
||||
@@ -2078,7 +2078,7 @@ String String::fromUTF8 (const char* const buffer, int bufferSizeBytes)
|
||||
class StringTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
StringTests() : UnitTest ("String") { }
|
||||
StringTests() : UnitTest ("String", "beast") { }
|
||||
|
||||
template <class CharPointerType>
|
||||
struct TestUTFConversion
|
||||
@@ -2402,6 +2402,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
#if BEAST_UNIT_TESTS
|
||||
static StringTests stringTests;
|
||||
#endif
|
||||
|
||||
@@ -177,7 +177,7 @@ String TextDiff::Change::appliedTo (const String& text) const noexcept
|
||||
class DiffTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
DiffTests() : UnitTest ("TextDiff") {}
|
||||
DiffTests() : UnitTest ("TextDiff", "beast") {}
|
||||
|
||||
static String createString()
|
||||
{
|
||||
@@ -229,6 +229,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
#if BEAST_UNIT_TESTS
|
||||
static DiffTests diffTests;
|
||||
#endif
|
||||
|
||||
@@ -61,7 +61,7 @@ String ChildProcess::readAllProcessOutput()
|
||||
class ChildProcessTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
ChildProcessTests() : UnitTest ("ChildProcess") {}
|
||||
ChildProcessTests() : UnitTest ("ChildProcess", "beast") {}
|
||||
|
||||
void runTest()
|
||||
{
|
||||
@@ -82,6 +82,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
#if BEAST_UNIT_TESTS
|
||||
static ChildProcessTests childProcessTests;
|
||||
#endif
|
||||
|
||||
@@ -255,7 +255,7 @@ void SpinLock::enter() const noexcept
|
||||
class AtomicTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
AtomicTests() : UnitTest ("Atomic") {}
|
||||
AtomicTests() : UnitTest ("Atomic", "beast") {}
|
||||
|
||||
void runTest()
|
||||
{
|
||||
@@ -350,6 +350,4 @@ public:
|
||||
};
|
||||
};
|
||||
|
||||
#if BEAST_UNIT_TESTS
|
||||
static AtomicTests atomicTests;
|
||||
#endif
|
||||
|
||||
@@ -161,7 +161,7 @@ bool GZIPCompressorOutputStream::setPosition (int64 /*newPosition*/)
|
||||
class GZIPTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
GZIPTests() : UnitTest ("GZIP") {}
|
||||
GZIPTests() : UnitTest ("GZIP", "beast") {}
|
||||
|
||||
void runTest()
|
||||
{
|
||||
@@ -205,6 +205,4 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
#if BEAST_UNIT_TESTS
|
||||
static GZIPTests gzipTests;
|
||||
#endif
|
||||
|
||||
@@ -80,9 +80,9 @@ public:
|
||||
*/
|
||||
void flush();
|
||||
|
||||
int64 getPosition();
|
||||
bool setPosition (int64 newPosition);
|
||||
bool write (const void* destBuffer, size_t howMany);
|
||||
int64 getPosition() override;
|
||||
bool setPosition (int64) override;
|
||||
bool write (const void*, size_t) override;
|
||||
|
||||
/** These are preset values that can be used for the constructor's windowBits paramter.
|
||||
For more info about this, see the zlib documentation for its windowBits parameter.
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
class UnsignedIntegerTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
UnsignedIntegerTests () : UnitTest ("UnsignedInteger")
|
||||
UnsignedIntegerTests () : UnitTest ("UnsignedInteger", "beast")
|
||||
{
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ public:
|
||||
{
|
||||
String s;
|
||||
|
||||
s << "UnsignedInteger <" << String(Bytes) << ">";
|
||||
s << "Bytes=" << String(Bytes);
|
||||
|
||||
beginTest (s);
|
||||
|
||||
@@ -82,6 +82,4 @@ public:
|
||||
private:
|
||||
};
|
||||
|
||||
#if BEAST_UNIT_TESTS
|
||||
static UnsignedIntegerTests unsignedIntegerTests;
|
||||
#endif
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
|
||||
@tparam Bytes The number of bytes of storage.
|
||||
*/
|
||||
template <unsigned int Bytes>
|
||||
template <size_t Bytes>
|
||||
class UnsignedInteger : public SafeBool <UnsignedInteger <Bytes> >
|
||||
{
|
||||
public:
|
||||
@@ -76,10 +76,10 @@ public:
|
||||
template <class IntegerType>
|
||||
UnsignedInteger <Bytes>& operator= (IntegerType value)
|
||||
{
|
||||
static_bassert (sizeof (Bytes) >= sizeof (IntegerType));
|
||||
static_bassert (Bytes >= sizeof (IntegerType));
|
||||
clear ();
|
||||
value = ByteOrder::swapIfLittleEndian (value);
|
||||
memcpy (end () - sizeof (value), &value, sizeof (value));
|
||||
memcpy (end () - sizeof (value), &value, bmin (Bytes, sizeof (value)));
|
||||
return *this;
|
||||
}
|
||||
|
||||
@@ -234,28 +234,28 @@ public:
|
||||
*/
|
||||
bool operator< (UnsignedInteger <Bytes> const& other) const noexcept
|
||||
{
|
||||
return compare (other) == -1;
|
||||
return compare (other) < 0;
|
||||
}
|
||||
|
||||
/** Ordered comparison.
|
||||
*/
|
||||
bool operator<= (UnsignedInteger <Bytes> const& other) const noexcept
|
||||
{
|
||||
return compare (other) != 1;
|
||||
return compare (other) <= 0;
|
||||
}
|
||||
|
||||
/** Ordered comparison.
|
||||
*/
|
||||
bool operator> (UnsignedInteger <Bytes> const& other) const noexcept
|
||||
{
|
||||
return compare (other) == 1;
|
||||
return compare (other) > 0;
|
||||
}
|
||||
|
||||
/** Ordered comparison.
|
||||
*/
|
||||
bool operator>= (UnsignedInteger <Bytes> const& other) const noexcept
|
||||
{
|
||||
return compare (other) != -1;
|
||||
return compare (other) >= 0;
|
||||
}
|
||||
|
||||
/** Perform bitwise logical-not.
|
||||
|
||||
31
Subtrees/beast/modules/beast_db/beast_db.cpp
Normal file
31
Subtrees/beast/modules/beast_db/beast_db.cpp
Normal file
@@ -0,0 +1,31 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of Beast: https://github.com/vinniefalco/Beast
|
||||
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "BeastConfig.h"
|
||||
|
||||
#include "beast_db.h"
|
||||
|
||||
#include "../beast_crypto/beast_crypto.h"
|
||||
|
||||
namespace beast
|
||||
{
|
||||
|
||||
#include "keyvalue/beast_KeyvaDB.cpp"
|
||||
|
||||
}
|
||||
52
Subtrees/beast/modules/beast_db/beast_db.h
Normal file
52
Subtrees/beast/modules/beast_db/beast_db.h
Normal file
@@ -0,0 +1,52 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of Beast: https://github.com/vinniefalco/Beast
|
||||
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef BEAST_BEAST_DB_H_INCLUDED
|
||||
#define BEAST_BEAST_DB_H_INCLUDED
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/* If you fail to make sure that all your compile units are building Beast with
|
||||
the same set of option flags, then there's a risk that different compile
|
||||
units will treat the classes as having different memory layouts, leading to
|
||||
very nasty memory corruption errors when they all get linked together.
|
||||
That's why it's best to always include the BeastConfig.h file before any
|
||||
beast headers.
|
||||
*/
|
||||
#ifndef BEAST_BEASTCONFIG_H_INCLUDED
|
||||
# ifdef _MSC_VER
|
||||
# pragma message ("Have you included your BeastConfig.h file before including the Beast headers?")
|
||||
# else
|
||||
# warning "Have you included your BeastConfig.h file before including the Beast headers?"
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#include "../beast_core/beast_core.h"
|
||||
#include "../beast_basics/beast_basics.h"
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
namespace beast
|
||||
{
|
||||
|
||||
#include "keyvalue/beast_KeyvaDB.h"
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
20
Subtrees/beast/modules/beast_db/beast_db.mm
Normal file
20
Subtrees/beast/modules/beast_db/beast_db.mm
Normal file
@@ -0,0 +1,20 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of Beast: https://github.com/vinniefalco/Beast
|
||||
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "beast_db.cpp"
|
||||
861
Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.cpp
Normal file
861
Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.cpp
Normal file
@@ -0,0 +1,861 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of Beast: https://github.com/vinniefalco/Beast
|
||||
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
/*
|
||||
|
||||
TODO
|
||||
|
||||
- Check consistency / range checking on read
|
||||
|
||||
- Cache top level tree nodes
|
||||
|
||||
- Coalesce I/O in RandomAccessFile
|
||||
|
||||
- Delete / file compaction
|
||||
|
||||
*/
|
||||
|
||||
class KeyvaDBImp : public KeyvaDB
|
||||
{
|
||||
private:
|
||||
// These are stored in big endian format in the file.
|
||||
|
||||
// A file offset.
|
||||
typedef int64 FileOffset;
|
||||
|
||||
// Index of a key.
|
||||
//
|
||||
// The value is broken up into two parts. The key block index,
|
||||
// and a 1 based index within the keyblock corresponding to the
|
||||
// internal key number.
|
||||
//
|
||||
typedef int32 KeyIndex;
|
||||
typedef int32 KeyBlockIndex;
|
||||
|
||||
// Size of a value.
|
||||
typedef uint32 ByteSize;
|
||||
|
||||
private:
|
||||
// returns the number of keys in a key block with the specified depth
|
||||
static int calcKeysAtDepth (int depth)
|
||||
{
|
||||
return (1U << depth) - 1;
|
||||
}
|
||||
|
||||
// returns the number of bytes in a key record
|
||||
static int calcKeyRecordBytes (int keyBytes)
|
||||
{
|
||||
// This depends on the format of a serialized key record
|
||||
return
|
||||
sizeof (FileOffset) +
|
||||
sizeof (ByteSize) +
|
||||
sizeof (KeyIndex) +
|
||||
sizeof (KeyIndex) +
|
||||
keyBytes
|
||||
;
|
||||
}
|
||||
|
||||
// returns the number of bytes in a key block
|
||||
static int calcKeyBlockBytes (int depth, int keyBytes)
|
||||
{
|
||||
return calcKeysAtDepth (depth) * calcKeyRecordBytes (keyBytes);
|
||||
}
|
||||
|
||||
public:
|
||||
enum
|
||||
{
|
||||
currentVersion = 1
|
||||
};
|
||||
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
struct KeyAddress
|
||||
{
|
||||
// 1 based key block number
|
||||
uint32 blockNumber;
|
||||
|
||||
// 1 based key index within the block, breadth-first left to right
|
||||
uint32 keyNumber;
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
// The size of the fixed area at the beginning of the key file.
|
||||
// This is used to store some housekeeping information like the
|
||||
// key size and version number.
|
||||
//
|
||||
masterHeaderBytes = 1000
|
||||
};
|
||||
|
||||
// The master record is at the beginning of the key file
|
||||
struct MasterRecord
|
||||
{
|
||||
// version number, starting from 1
|
||||
int32 version;
|
||||
|
||||
KeyBlockIndex nextKeyBlockIndex;
|
||||
|
||||
void write (OutputStream& stream)
|
||||
{
|
||||
stream.writeTypeBigEndian (version);
|
||||
}
|
||||
|
||||
void read (InputStream& stream)
|
||||
{
|
||||
stream.readTypeBigEndianInto (&version);
|
||||
}
|
||||
};
|
||||
|
||||
// Key records are indexed starting at one.
|
||||
struct KeyRecord : Uncopyable
|
||||
{
|
||||
explicit KeyRecord (void* const keyStorage)
|
||||
: key (keyStorage)
|
||||
{
|
||||
}
|
||||
|
||||
// Absolute byte FileOffset in the value file.
|
||||
FileOffset valFileOffset;
|
||||
|
||||
// Size of the corresponding value, in bytes.
|
||||
ByteSize valSize;
|
||||
|
||||
// Key record index of left node, or 0.
|
||||
KeyIndex leftIndex;
|
||||
|
||||
// Key record index of right node, or 0.
|
||||
KeyIndex rightIndex;
|
||||
|
||||
// Points to keyBytes storage of the key.
|
||||
void* const key;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
// A complete keyblock. The contents of the memory for the key block
|
||||
// are identical to the format on disk. Therefore it is necessary to
|
||||
// use the serialization routines to extract or update the key records.
|
||||
//
|
||||
class KeyBlock : Uncopyable
|
||||
{
|
||||
public:
|
||||
KeyBlock (int depth, int keyBytes)
|
||||
: m_depth (depth)
|
||||
, m_keyBytes (keyBytes)
|
||||
, m_storage (calcKeyBlockBytes (depth, keyBytes))
|
||||
{
|
||||
}
|
||||
|
||||
void read (InputStream& stream)
|
||||
{
|
||||
stream.read (m_storage.getData (), calcKeyBlockBytes (m_depth, m_keyBytes));
|
||||
}
|
||||
|
||||
void write (OutputStream& stream)
|
||||
{
|
||||
stream.write (m_storage.getData (), calcKeyBlockBytes (m_depth, m_keyBytes));
|
||||
}
|
||||
|
||||
void readKeyRecord (KeyRecord* keyRecord, int keyIndex)
|
||||
{
|
||||
bassert (keyIndex >=1 && keyIndex <= calcKeysAtDepth (m_depth));
|
||||
|
||||
size_t const byteOffset = (keyIndex - 1) * calcKeyRecordBytes (m_keyBytes);
|
||||
|
||||
MemoryInputStream stream (
|
||||
addBytesToPointer (m_storage.getData (), byteOffset),
|
||||
calcKeyRecordBytes (m_keyBytes),
|
||||
false);
|
||||
|
||||
stream.readTypeBigEndianInto (&keyRecord->valFileOffset);
|
||||
stream.readTypeBigEndianInto (&keyRecord->valSize);
|
||||
stream.readTypeBigEndianInto (&keyRecord->leftIndex);
|
||||
stream.readTypeBigEndianInto (&keyRecord->rightIndex);
|
||||
stream.read (keyRecord->key, m_keyBytes);
|
||||
}
|
||||
|
||||
#if 0
|
||||
void writeKeyRecord (KeyRecord const& keyRecord, int keyIndex)
|
||||
{
|
||||
bassert (keyIndex >=1 && keyIndex <= calcKeysAtDepth (m_depth));
|
||||
|
||||
#if 0
|
||||
size_t const byteOffset = (keyIndex - 1) * calcKeyRecordBytes (m_keyBytes);
|
||||
|
||||
MemoryOutputStream stream (
|
||||
addBytesToPointer (m_storage.getData (), byteOffset),
|
||||
calcKeyRecordBytes (m_keyBytes));
|
||||
|
||||
stream.writeTypeBigEndian (keyRecord.valFileOffset);
|
||||
stream.writeTypeBigEndian (keyRecord.valSize);
|
||||
stream.writeTypeBigEndian (keyRecord.leftIndex);
|
||||
stream.writeTypeBigEndian (keyRecord.rightIndex);
|
||||
stream.write (keyRecord.key, m_keyBytes);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
private:
|
||||
int const m_depth;
|
||||
int const m_keyBytes;
|
||||
MemoryBlock m_storage;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
// Concurrent data
|
||||
//
|
||||
struct State
|
||||
{
|
||||
RandomAccessFile keyFile;
|
||||
RandomAccessFile valFile;
|
||||
MasterRecord masterRecord;
|
||||
KeyIndex newKeyIndex;
|
||||
FileOffset valFileSize;
|
||||
|
||||
bool hasKeys () const noexcept
|
||||
{
|
||||
return newKeyIndex > 1;
|
||||
}
|
||||
};
|
||||
|
||||
typedef SharedData <State> SharedState;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
int const m_keyBytes;
|
||||
int const m_keyBlockDepth;
|
||||
SharedState m_state;
|
||||
HeapBlock <char> m_keyStorage;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
KeyvaDBImp (int keyBytes,
|
||||
int keyBlockDepth,
|
||||
File keyPath,
|
||||
File valPath)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_keyBlockDepth (keyBlockDepth)
|
||||
, m_keyStorage (keyBytes)
|
||||
{
|
||||
SharedState::WriteAccess state (m_state);
|
||||
|
||||
openFile (&state->keyFile, keyPath);
|
||||
|
||||
int64 const fileSize = state->keyFile.getFile ().getSize ();
|
||||
|
||||
if (fileSize == 0)
|
||||
{
|
||||
// VFALCO TODO Better error handling here
|
||||
// initialize the key file
|
||||
Result result = state->keyFile.setPosition (masterHeaderBytes - 1);
|
||||
if (result.wasOk ())
|
||||
{
|
||||
char byte = 0;
|
||||
|
||||
result = state->keyFile.write (&byte, 1);
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
state->keyFile.flush ();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
state->newKeyIndex = 1 + (state->keyFile.getFile ().getSize () - masterHeaderBytes)
|
||||
/ calcKeyRecordBytes (m_keyBytes);
|
||||
|
||||
openFile (&state->valFile, valPath);
|
||||
|
||||
state->valFileSize = state->valFile.getFile ().getSize ();
|
||||
}
|
||||
|
||||
~KeyvaDBImp ()
|
||||
{
|
||||
SharedState::WriteAccess state (m_state);
|
||||
|
||||
flushInternal (state);
|
||||
}
|
||||
|
||||
// Open a file for reading and writing.
|
||||
// Creates the file if it doesn't exist.
|
||||
static void openFile (RandomAccessFile* file, File path)
|
||||
{
|
||||
Result const result = file->open (path, RandomAccessFile::readWrite);
|
||||
|
||||
if (! result)
|
||||
{
|
||||
String s;
|
||||
s << "KeyvaDB: Couldn't open " << path.getFileName () << " for writing.";
|
||||
Throw (std::runtime_error (s.toStdString ()));
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Result createMasterRecord (SharedState::WriteAccess& state)
|
||||
{
|
||||
MemoryBlock buffer (masterHeaderBytes, true);
|
||||
|
||||
Result result = state->keyFile.setPosition (0);
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
MasterRecord mr;
|
||||
|
||||
mr.version = 1;
|
||||
|
||||
result = state->keyFile.write (buffer.getData (), buffer.getSize ());
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
FileOffset calcKeyRecordOffset (KeyIndex keyIndex)
|
||||
{
|
||||
bassert (keyIndex > 0);
|
||||
|
||||
FileOffset const byteOffset = masterHeaderBytes + (keyIndex - 1) * calcKeyRecordBytes (m_keyBytes);
|
||||
|
||||
return byteOffset;
|
||||
}
|
||||
|
||||
// Read a key record into memory.
|
||||
// VFALCO TODO Return a Result and do validity checking on all inputs
|
||||
//
|
||||
void readKeyRecord (KeyRecord* const keyRecord,
|
||||
KeyIndex const keyIndex,
|
||||
SharedState::WriteAccess& state)
|
||||
{
|
||||
FileOffset const byteOffset = calcKeyRecordOffset (keyIndex);
|
||||
|
||||
Result result = state->keyFile.setPosition (byteOffset);
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
MemoryBlock data (calcKeyRecordBytes (m_keyBytes));
|
||||
|
||||
size_t bytesRead;
|
||||
|
||||
result = state->keyFile.read (data.getData (), calcKeyRecordBytes (m_keyBytes), &bytesRead);
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
if (bytesRead == calcKeyRecordBytes (m_keyBytes))
|
||||
{
|
||||
MemoryInputStream stream (data, false);
|
||||
|
||||
// This defines the file format!
|
||||
stream.readTypeBigEndianInto (&keyRecord->valFileOffset);
|
||||
stream.readTypeBigEndianInto (&keyRecord->valSize);
|
||||
stream.readTypeBigEndianInto (&keyRecord->leftIndex);
|
||||
stream.readTypeBigEndianInto (&keyRecord->rightIndex);
|
||||
|
||||
// Grab the key
|
||||
stream.read (keyRecord->key, m_keyBytes);
|
||||
}
|
||||
else
|
||||
{
|
||||
result = Result::fail ("KeyvaDB: amountRead != calcKeyRecordBytes()");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (! result.wasOk ())
|
||||
{
|
||||
String s;
|
||||
s << "KeyvaDB readKeyRecord failed in " << state->keyFile.getFile ().getFileName ();
|
||||
Throw (std::runtime_error (s.toStdString ()));
|
||||
}
|
||||
}
|
||||
|
||||
// Write a key record from memory
|
||||
void writeKeyRecord (KeyRecord const& keyRecord,
|
||||
KeyIndex const keyIndex,
|
||||
SharedState::WriteAccess& state,
|
||||
bool includingKey)
|
||||
{
|
||||
FileOffset const byteOffset = calcKeyRecordOffset (keyIndex);
|
||||
|
||||
int const bytes = calcKeyRecordBytes (m_keyBytes) - (includingKey ? 0 : m_keyBytes);
|
||||
|
||||
// VFALCO TODO Recycle this buffer
|
||||
MemoryBlock data (bytes);
|
||||
|
||||
{
|
||||
MemoryOutputStream stream (data, false);
|
||||
|
||||
// This defines the file format!
|
||||
stream.writeTypeBigEndian (keyRecord.valFileOffset);
|
||||
stream.writeTypeBigEndian (keyRecord.valSize);
|
||||
stream.writeTypeBigEndian (keyRecord.leftIndex);
|
||||
stream.writeTypeBigEndian (keyRecord.rightIndex);
|
||||
|
||||
// Write the key
|
||||
if (includingKey)
|
||||
stream.write (keyRecord.key, m_keyBytes);
|
||||
}
|
||||
|
||||
Result result = state->keyFile.setPosition (byteOffset);
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
size_t bytesWritten;
|
||||
|
||||
result = state->keyFile.write (data.getData (), bytes, &bytesWritten);
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
if (bytesWritten != bytes)
|
||||
{
|
||||
result = Result::fail ("KeyvaDB: bytesWritten != bytes");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!result.wasOk ())
|
||||
{
|
||||
String s;
|
||||
s << "KeyvaDB: writeKeyRecord failed in " << state->keyFile.getFile ().getFileName ();
|
||||
Throw (std::runtime_error (s.toStdString ()));
|
||||
}
|
||||
}
|
||||
|
||||
// Append a value to the value file.
|
||||
// VFALCO TODO return a Result
|
||||
void writeValue (void const* const value, ByteSize valueBytes, SharedState::WriteAccess& state)
|
||||
{
|
||||
Result result = state->valFile.setPosition (state->valFileSize);
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
size_t bytesWritten;
|
||||
|
||||
result = state->valFile.write (value, valueBytes, &bytesWritten);
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
if (bytesWritten == valueBytes)
|
||||
{
|
||||
state->valFileSize += valueBytes;
|
||||
}
|
||||
else
|
||||
{
|
||||
result = Result::fail ("KeyvaDB: bytesWritten != valueBytes");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (! result.wasOk ())
|
||||
{
|
||||
String s;
|
||||
s << "KeyvaDB: writeValue failed in " << state->valFile.getFile ().getFileName ();
|
||||
Throw (std::runtime_error (s.toStdString ()));
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
struct FindResult : Uncopyable
|
||||
{
|
||||
FindResult (void* const keyStorage)
|
||||
: keyRecord (keyStorage)
|
||||
{
|
||||
}
|
||||
|
||||
int compare; // result of the last comparison
|
||||
KeyIndex keyIndex; // index we looked at last
|
||||
//KeyBlock keyBlock; // KeyBlock we looked at last
|
||||
KeyRecord keyRecord; // KeyRecord we looked at last
|
||||
};
|
||||
|
||||
// Find a key. If the key doesn't exist, enough information
|
||||
// is left behind in the result to perform an insertion.
|
||||
//
|
||||
// Returns true if the key was found.
|
||||
//
|
||||
bool find (FindResult* findResult, void const* key, SharedState::WriteAccess& state)
|
||||
{
|
||||
// Not okay to call this with an empty key file!
|
||||
bassert (state->hasKeys ());
|
||||
|
||||
// This performs a standard binary search
|
||||
|
||||
findResult->keyIndex = 1;
|
||||
|
||||
do
|
||||
{
|
||||
readKeyRecord (&findResult->keyRecord, findResult->keyIndex, state);
|
||||
|
||||
findResult->compare = memcmp (key, findResult->keyRecord.key, m_keyBytes);
|
||||
|
||||
if (findResult->compare < 0)
|
||||
{
|
||||
if (findResult->keyRecord.leftIndex != 0)
|
||||
{
|
||||
// Go left
|
||||
findResult->keyIndex = findResult->keyRecord.leftIndex;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Insert position is to the left
|
||||
break;
|
||||
}
|
||||
}
|
||||
else if (findResult->compare > 0)
|
||||
{
|
||||
if (findResult->keyRecord.rightIndex != 0)
|
||||
{
|
||||
// Go right
|
||||
findResult->keyIndex = findResult->keyRecord.rightIndex;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Insert position is to the right
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
while (findResult->compare != 0);
|
||||
|
||||
return findResult->compare == 0;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
bool get (void const* key, GetCallback* callback)
|
||||
{
|
||||
FindResult findResult (m_keyStorage.getData ());
|
||||
|
||||
SharedState::WriteAccess state (m_state);
|
||||
|
||||
bool found = false;
|
||||
|
||||
if (state->hasKeys ())
|
||||
{
|
||||
found = find (&findResult, key, state);
|
||||
|
||||
if (found)
|
||||
{
|
||||
void* const destStorage = callback->getStorageForValue (findResult.keyRecord.valSize);
|
||||
|
||||
Result result = state->valFile.setPosition (findResult.keyRecord.valFileOffset);
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
size_t bytesRead;
|
||||
|
||||
result = state->valFile.read (destStorage, findResult.keyRecord.valSize, &bytesRead);
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
if (bytesRead != findResult.keyRecord.valSize)
|
||||
{
|
||||
result = Result::fail ("KeyvaDB: bytesRead != valSize");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (! result.wasOk ())
|
||||
{
|
||||
String s;
|
||||
s << "KeyvaDB: get in " << state->valFile.getFile ().getFileName ();
|
||||
Throw (std::runtime_error (s.toStdString ()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
// Write a key value pair. Does nothing if the key exists.
|
||||
void put (void const* key, void const* value, int valueBytes)
|
||||
{
|
||||
bassert (valueBytes > 0);
|
||||
|
||||
SharedState::WriteAccess state (m_state);
|
||||
|
||||
if (state->hasKeys ())
|
||||
{
|
||||
// Search for the key
|
||||
|
||||
FindResult findResult (m_keyStorage.getData ());
|
||||
|
||||
bool const found = find (&findResult, key, state);
|
||||
|
||||
if (! found )
|
||||
{
|
||||
bassert (findResult.compare != 0);
|
||||
|
||||
// Binary tree insertion.
|
||||
// Link the last key record to the new key
|
||||
{
|
||||
if (findResult.compare < 0)
|
||||
{
|
||||
findResult.keyRecord.leftIndex = state->newKeyIndex;
|
||||
}
|
||||
else
|
||||
{
|
||||
findResult.keyRecord.rightIndex = state->newKeyIndex;
|
||||
}
|
||||
|
||||
writeKeyRecord (findResult.keyRecord, findResult.keyIndex, state, false);
|
||||
}
|
||||
|
||||
// Write the new key
|
||||
{
|
||||
findResult.keyRecord.valFileOffset = state->valFileSize;
|
||||
findResult.keyRecord.valSize = valueBytes;
|
||||
findResult.keyRecord.leftIndex = 0;
|
||||
findResult.keyRecord.rightIndex = 0;
|
||||
|
||||
memcpy (findResult.keyRecord.key, key, m_keyBytes);
|
||||
|
||||
writeKeyRecord (findResult.keyRecord, state->newKeyIndex, state, true);
|
||||
}
|
||||
|
||||
// Key file has grown by one.
|
||||
++state->newKeyIndex;
|
||||
|
||||
// Write the value
|
||||
writeValue (value, valueBytes, state);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Key already exists, do nothing.
|
||||
// We could check to make sure the payloads are the same.
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
//
|
||||
// Write first key
|
||||
//
|
||||
|
||||
KeyRecord keyRecord (m_keyStorage.getData ());
|
||||
|
||||
keyRecord.valFileOffset = state->valFileSize;
|
||||
keyRecord.valSize = valueBytes;
|
||||
keyRecord.leftIndex = 0;
|
||||
keyRecord.rightIndex = 0;
|
||||
|
||||
memcpy (keyRecord.key, key, m_keyBytes);
|
||||
|
||||
writeKeyRecord (keyRecord, state->newKeyIndex, state, true);
|
||||
|
||||
// Key file has grown by one.
|
||||
++state->newKeyIndex;
|
||||
|
||||
//
|
||||
// Write value
|
||||
//
|
||||
|
||||
bassert (state->valFileSize == 0);
|
||||
|
||||
writeValue (value, valueBytes, state);
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void flush ()
|
||||
{
|
||||
SharedState::WriteAccess state (m_state);
|
||||
|
||||
flushInternal (state);
|
||||
}
|
||||
|
||||
void flushInternal (SharedState::WriteAccess& state)
|
||||
{
|
||||
state->keyFile.flush ();
|
||||
state->valFile.flush ();
|
||||
}
|
||||
};
|
||||
|
||||
KeyvaDB* KeyvaDB::New (int keyBytes, int keyBlockDepth, File keyPath, File valPath)
|
||||
{
|
||||
return new KeyvaDBImp (keyBytes, keyBlockDepth, keyPath, valPath);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class KeyvaDBTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
enum
|
||||
{
|
||||
maxPayloadBytes = 8 * 1024
|
||||
};
|
||||
|
||||
KeyvaDBTests () : UnitTest ("KeyvaDB", "ripple")
|
||||
{
|
||||
}
|
||||
|
||||
// Retrieval callback stores the value in a Payload object for comparison
|
||||
struct PayloadGetCallback : KeyvaDB::GetCallback
|
||||
{
|
||||
UnitTestUtilities::Payload payload;
|
||||
|
||||
PayloadGetCallback () : payload (maxPayloadBytes)
|
||||
{
|
||||
}
|
||||
|
||||
void* getStorageForValue (int valueBytes)
|
||||
{
|
||||
bassert (valueBytes <= maxPayloadBytes);
|
||||
|
||||
payload.bytes = valueBytes;
|
||||
|
||||
return payload.data.getData ();
|
||||
}
|
||||
};
|
||||
|
||||
KeyvaDB* createDB (unsigned int keyBytes, File const& path)
|
||||
{
|
||||
File const keyPath = path.withFileExtension (".key");
|
||||
File const valPath = path.withFileExtension (".val");
|
||||
|
||||
return KeyvaDB::New (keyBytes, 1, keyPath, valPath);
|
||||
}
|
||||
|
||||
void deleteDBFiles (File const& path)
|
||||
{
|
||||
File const keyPath = path.withFileExtension (".key");
|
||||
File const valPath = path.withFileExtension (".val");
|
||||
|
||||
keyPath.deleteFile ();
|
||||
valPath.deleteFile ();
|
||||
}
|
||||
|
||||
template <size_t KeyBytes>
|
||||
void testKeySize (unsigned int const maxItems)
|
||||
{
|
||||
using namespace UnitTestUtilities;
|
||||
|
||||
typedef UnsignedInteger <KeyBytes> KeyType;
|
||||
|
||||
int64 const seedValue = 50;
|
||||
|
||||
String s;
|
||||
|
||||
s << "keyBytes=" << String (uint64(KeyBytes)) << ", maxItems=" << String (maxItems);
|
||||
beginTest (s);
|
||||
|
||||
// Set up the key and value files
|
||||
File const path (File::createTempFile (""));
|
||||
|
||||
{
|
||||
// open the db
|
||||
ScopedPointer <KeyvaDB> db (createDB (KeyBytes, path));
|
||||
|
||||
Payload payload (maxPayloadBytes);
|
||||
Payload check (maxPayloadBytes);
|
||||
|
||||
{
|
||||
// Create an array of ascending integers.
|
||||
HeapBlock <unsigned int> items (maxItems);
|
||||
for (unsigned int i = 0; i < maxItems; ++i)
|
||||
items [i] = i;
|
||||
|
||||
// Now shuffle it deterministically.
|
||||
repeatableShuffle (maxItems, items, seedValue);
|
||||
|
||||
// Write all the keys of integers.
|
||||
for (unsigned int i = 0; i < maxItems; ++i)
|
||||
{
|
||||
unsigned int keyIndex = items [i];
|
||||
|
||||
KeyType const key = KeyType::createFromInteger (keyIndex);
|
||||
|
||||
payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue);
|
||||
|
||||
db->put (key.cbegin (), payload.data.getData (), payload.bytes);
|
||||
|
||||
{
|
||||
// VFALCO TODO Check what we just wrote?
|
||||
//db->get (key.cbegin (), check.data.getData (), payload.bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// Go through all of our keys and try to retrieve them.
|
||||
// since this is done in ascending order, we should get
|
||||
// random seeks at this point.
|
||||
//
|
||||
PayloadGetCallback cb;
|
||||
for (unsigned int keyIndex = 0; keyIndex < maxItems; ++keyIndex)
|
||||
{
|
||||
KeyType const v = KeyType::createFromInteger (keyIndex);
|
||||
|
||||
bool const found = db->get (v.cbegin (), &cb);
|
||||
|
||||
expect (found, "Should be found");
|
||||
|
||||
if (found)
|
||||
{
|
||||
payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue);
|
||||
|
||||
expect (payload == cb.payload, "Should be equal");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// Re-open the database and confirm the data
|
||||
ScopedPointer <KeyvaDB> db (createDB (KeyBytes, path));
|
||||
|
||||
Payload payload (maxPayloadBytes);
|
||||
Payload check (maxPayloadBytes);
|
||||
|
||||
PayloadGetCallback cb;
|
||||
for (unsigned int keyIndex = 0; keyIndex < maxItems; ++keyIndex)
|
||||
{
|
||||
KeyType const v = KeyType::createFromInteger (keyIndex);
|
||||
|
||||
bool const found = db->get (v.cbegin (), &cb);
|
||||
|
||||
expect (found, "Should be found");
|
||||
|
||||
if (found)
|
||||
{
|
||||
payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue);
|
||||
|
||||
expect (payload == cb.payload, "Should be equal");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
deleteDBFiles (path);
|
||||
}
|
||||
|
||||
void runTest ()
|
||||
{
|
||||
testKeySize <4> (500);
|
||||
testKeySize <32> (4000);
|
||||
}
|
||||
};
|
||||
|
||||
static KeyvaDBTests keyvaDBTests;
|
||||
55
Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.h
Normal file
55
Subtrees/beast/modules/beast_db/keyvalue/beast_KeyvaDB.h
Normal file
@@ -0,0 +1,55 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of Beast: https://github.com/vinniefalco/Beast
|
||||
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef BEAST_KEYVADB_H_INCLUDED
|
||||
#define BEAST_KEYVADB_H_INCLUDED
|
||||
|
||||
/** Specialized Key/value database
|
||||
|
||||
Once written, a value can never be modified.
|
||||
*/
|
||||
class KeyvaDB : LeakChecked <KeyvaDB>
|
||||
{
|
||||
public:
|
||||
class GetCallback
|
||||
{
|
||||
public:
|
||||
virtual void* getStorageForValue (int valueBytes) = 0;
|
||||
};
|
||||
|
||||
static KeyvaDB* New (int keyBytes,
|
||||
int keyBlockDepth,
|
||||
File keyPath,
|
||||
File valPath);
|
||||
|
||||
virtual ~KeyvaDB () { }
|
||||
|
||||
// VFALCO TODO Make the return value a Result so we can
|
||||
// detect corruption and errors!
|
||||
//
|
||||
virtual bool get (void const* key, GetCallback* callback) = 0;
|
||||
|
||||
// VFALCO TODO Use Result for return value
|
||||
//
|
||||
virtual void put (void const* key, void const* value, int valueBytes) = 0;
|
||||
|
||||
virtual void flush () = 0;
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -3,7 +3,9 @@
|
||||
########################################################################
|
||||
# Configuration. The compiler options must enable threaded compilation.
|
||||
#
|
||||
# Preprocessor macros (for CPPFLAGS) of interest:
|
||||
# Preprocessor macros (for CPPFLAGS) of interest...
|
||||
# Note that the defaults should already be correct for most
|
||||
# platforms; you should not need to change any of these:
|
||||
#
|
||||
# To compile successfully if the default does not:
|
||||
# - MDB_USE_POSIX_SEM (enabled by default on BSD, Apple)
|
||||
@@ -11,7 +13,7 @@
|
||||
# semaphores and shared mutexes have different behaviors and
|
||||
# different problems, see the Caveats section in lmdb.h.
|
||||
#
|
||||
# For best performence or to compile successfully:
|
||||
# For best performance or to compile successfully:
|
||||
# - MDB_DSYNC = "O_DSYNC" (default) or "O_SYNC" (less efficient)
|
||||
# If O_DSYNC is undefined but exists in /usr/include,
|
||||
# preferably set some compiler flag to get the definition.
|
||||
@@ -25,14 +27,13 @@
|
||||
# Data format:
|
||||
# - MDB_MAXKEYSIZE
|
||||
# Controls data packing and limits, see mdb.c.
|
||||
#
|
||||
# Debugging:
|
||||
# - MDB_DEBUG, MDB_PARANOID.
|
||||
# You might need to change this if the default size is too small.
|
||||
#
|
||||
CC = gcc
|
||||
W = -W -Wall -Wno-unused-parameter -Wbad-function-cast
|
||||
THREADS = -pthread
|
||||
OPT = -O2 -g
|
||||
CFLAGS = -pthread $(OPT) $(W) $(XCFLAGS)
|
||||
CFLAGS = $(THREADS) $(OPT) $(W) $(XCFLAGS)
|
||||
LDLIBS =
|
||||
SOLIBS =
|
||||
prefix = /usr/local
|
||||
@@ -166,7 +166,7 @@ typedef int mdb_filehandle_t;
|
||||
/** Library minor version */
|
||||
#define MDB_VERSION_MINOR 9
|
||||
/** Library patch version */
|
||||
#define MDB_VERSION_PATCH 6
|
||||
#define MDB_VERSION_PATCH 7
|
||||
|
||||
/** Combine args a,b,c into a single integer for easy version comparisons */
|
||||
#define MDB_VERINT(a,b,c) (((a) << 24) | ((b) << 16) | (c))
|
||||
@@ -889,6 +889,15 @@ int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *d
|
||||
*/
|
||||
int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat);
|
||||
|
||||
/** @brief Retrieve the DB flags for a database handle.
|
||||
*
|
||||
* @param[in] env An environment handle returned by #mdb_env_create()
|
||||
* @param[in] dbi A database handle returned by #mdb_dbi_open()
|
||||
* @param[out] flags Address where the flags will be returned.
|
||||
* @return A non-zero error value on failure and 0 on success.
|
||||
*/
|
||||
int mdb_dbi_flags(MDB_env *env, MDB_dbi dbi, unsigned int *flags);
|
||||
|
||||
/** @brief Close a database handle.
|
||||
*
|
||||
* This call is not mutex protected. Handles should only be closed by
|
||||
@@ -1289,6 +1298,31 @@ int mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b);
|
||||
* @return < 0 if a < b, 0 if a == b, > 0 if a > b
|
||||
*/
|
||||
int mdb_dcmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b);
|
||||
|
||||
/** @brief A callback function used to print a message from the library.
|
||||
*
|
||||
* @param[in] msg The string to be printed.
|
||||
* @param[in] ctx An arbitrary context pointer for the callback.
|
||||
* @return < 0 on failure, 0 on success.
|
||||
*/
|
||||
typedef int (MDB_msg_func)(const char *msg, void *ctx);
|
||||
|
||||
/** @brief Dump the entries in the reader lock table.
|
||||
*
|
||||
* @param[in] env An environment handle returned by #mdb_env_create()
|
||||
* @param[in] func A #MDB_msg_func function
|
||||
* @param[in] ctx Anything the message function needs
|
||||
* @return < 0 on failure, 0 on success.
|
||||
*/
|
||||
int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx);
|
||||
|
||||
/** @brief Check for stale entries in the reader lock table.
|
||||
*
|
||||
* @param[in] env An environment handle returned by #mdb_env_create()
|
||||
* @param[out] dead Number of stale slots that were cleared
|
||||
* @return 0 on success, non-zero on failure.
|
||||
*/
|
||||
int mdb_reader_check(MDB_env *env, int *dead);
|
||||
/** @} */
|
||||
|
||||
#ifdef __cplusplus
|
||||
File diff suppressed because it is too large
Load Diff
@@ -13,6 +13,8 @@ mdb_stat \- LMDB environment status tool
|
||||
[\c
|
||||
.BR \-n ]
|
||||
[\c
|
||||
.BR \-r [ r ]]
|
||||
[\c
|
||||
.BR \-a \ |
|
||||
.BI \-s \ subdb\fR]
|
||||
.SH DESCRIPTION
|
||||
@@ -32,6 +34,16 @@ If \fB\-fff\fP is given, display the full list of page IDs in the freelist.
|
||||
.BR \-n
|
||||
Display the status of an LMDB database which does not use subdirectories.
|
||||
.TP
|
||||
.BR \-r
|
||||
Display information about the environment reader table.
|
||||
Shows the process ID, thread ID, and transaction ID for each active
|
||||
reader slot. The process ID and transaction ID are in decimal, the
|
||||
thread ID is in hexadecimal. The transaction ID is displayed as "-"
|
||||
if the reader does not currently have a read transaction open.
|
||||
If \fB\-rr\fP is given, check for stale entries in the reader
|
||||
table and clear them. The reader table will be printed again
|
||||
after the check is performed.
|
||||
.TP
|
||||
.BR \-a
|
||||
Display the status of all of the subdatabases in the environment.
|
||||
.TP
|
||||
@@ -31,7 +31,7 @@ static void prstat(MDB_stat *ms)
|
||||
|
||||
static void usage(char *prog)
|
||||
{
|
||||
fprintf(stderr, "usage: %s dbpath [-e] [-f[f[f]]] [-n] [-a|-s subdb]\n", prog);
|
||||
fprintf(stderr, "usage: %s dbpath [-n] [-e] [-r[r]] [-f[f[f]]] [-a|-s subdb]\n", prog);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ int main(int argc, char *argv[])
|
||||
char *prog = argv[0];
|
||||
char *envname;
|
||||
char *subname = NULL;
|
||||
int alldbs = 0, envinfo = 0, envflags = 0, freinfo = 0;
|
||||
int alldbs = 0, envinfo = 0, envflags = 0, freinfo = 0, rdrinfo = 0;
|
||||
|
||||
if (argc < 2) {
|
||||
usage(prog);
|
||||
@@ -56,10 +56,11 @@ int main(int argc, char *argv[])
|
||||
* -s: print stat of only the named subDB
|
||||
* -e: print env info
|
||||
* -f: print freelist info
|
||||
* -r: print reader info
|
||||
* -n: use NOSUBDIR flag on env_open
|
||||
* (default) print stat of only the main DB
|
||||
*/
|
||||
while ((i = getopt(argc, argv, "aefns:")) != EOF) {
|
||||
while ((i = getopt(argc, argv, "aefnrs:")) != EOF) {
|
||||
switch(i) {
|
||||
case 'a':
|
||||
if (subname)
|
||||
@@ -75,6 +76,9 @@ int main(int argc, char *argv[])
|
||||
case 'n':
|
||||
envflags |= MDB_NOSUBDIR;
|
||||
break;
|
||||
case 'r':
|
||||
rdrinfo++;
|
||||
break;
|
||||
case 's':
|
||||
if (alldbs)
|
||||
usage(prog);
|
||||
@@ -100,11 +104,6 @@ int main(int argc, char *argv[])
|
||||
printf("mdb_env_open failed, error %d %s\n", rc, mdb_strerror(rc));
|
||||
goto env_close;
|
||||
}
|
||||
rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn);
|
||||
if (rc) {
|
||||
printf("mdb_txn_begin failed, error %d %s\n", rc, mdb_strerror(rc));
|
||||
goto env_close;
|
||||
}
|
||||
|
||||
if (envinfo) {
|
||||
rc = mdb_env_stat(env, &mst);
|
||||
@@ -120,6 +119,25 @@ int main(int argc, char *argv[])
|
||||
printf(" Number of readers used: %u\n", mei.me_numreaders);
|
||||
}
|
||||
|
||||
if (rdrinfo) {
|
||||
printf("Reader Table Status\n");
|
||||
rc = mdb_reader_list(env, (MDB_msg_func *)fputs, stdout);
|
||||
if (rdrinfo > 1) {
|
||||
int dead;
|
||||
mdb_reader_check(env, &dead);
|
||||
printf(" %d stale readers cleared.\n", dead);
|
||||
rc = mdb_reader_list(env, (MDB_msg_func *)fputs, stdout);
|
||||
}
|
||||
if (!(subname || alldbs || freinfo))
|
||||
goto env_close;
|
||||
}
|
||||
|
||||
rc = mdb_txn_begin(env, NULL, MDB_RDONLY, &txn);
|
||||
if (rc) {
|
||||
printf("mdb_txn_begin failed, error %d %s\n", rc, mdb_strerror(rc));
|
||||
goto env_close;
|
||||
}
|
||||
|
||||
if (freinfo) {
|
||||
MDB_cursor *cursor;
|
||||
MDB_val key, data;
|
||||
@@ -31,8 +31,7 @@
|
||||
*/
|
||||
#define CMP(x,y) ( (x) < (y) ? -1 : (x) > (y) )
|
||||
|
||||
#if 0 /* superseded by append/sort */
|
||||
static unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id )
|
||||
unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id )
|
||||
{
|
||||
/*
|
||||
* binary search of id in ids
|
||||
@@ -67,6 +66,7 @@ static unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id )
|
||||
return cursor;
|
||||
}
|
||||
|
||||
#if 0 /* superseded by append/sort */
|
||||
int mdb_midl_insert( MDB_IDL ids, MDB_ID id )
|
||||
{
|
||||
unsigned x, i;
|
||||
@@ -74,14 +74,12 @@ typedef MDB_ID *MDB_IDL;
|
||||
xidl[xlen] = (id); \
|
||||
} while (0)
|
||||
|
||||
#if 0 /* superseded by append/sort */
|
||||
/** Insert an ID into an IDL.
|
||||
* @param[in,out] ids The IDL to insert into.
|
||||
* @param[in] id The ID to insert.
|
||||
* @return 0 on success, -1 if ID was already present, -2 on error.
|
||||
/** Search for an ID in an IDL.
|
||||
* @param[in] ids The IDL to search.
|
||||
* @param[in] id The ID to search for.
|
||||
* @return The index of the first ID greater than or equal to \b id.
|
||||
*/
|
||||
int mdb_midl_insert( MDB_IDL ids, MDB_ID id );
|
||||
#endif
|
||||
unsigned mdb_midl_search( MDB_IDL ids, MDB_ID id );
|
||||
|
||||
/** Allocate an IDL.
|
||||
* Allocates memory for an IDL of the given size.
|
||||
36
TODO.txt
36
TODO.txt
@@ -2,16 +2,40 @@
|
||||
RIPPLE TODO
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
- Examples for different backend key/value config settings
|
||||
Items marked '*' can be handled by third parties.
|
||||
|
||||
- Unit Test attention
|
||||
|
||||
- NodeStore backend unit test
|
||||
|
||||
- Validations unit test
|
||||
Vinnie's Short List (Changes day to day)
|
||||
- Make theConfig a SharedSingleton to prevent leak warnings
|
||||
- Add fast backend to the unit test
|
||||
- Refactor Section code into ConfigFile
|
||||
- Change NodeStore config file format to multiline key/value pairs
|
||||
- Improved Mutex to track deadlocks
|
||||
- Memory NodeStore::Backend for unit tests [*]
|
||||
- Finish unit tests and code for Validators
|
||||
- Import beast::db and use it in SQliteBackend
|
||||
- Convert some Ripple boost unit tests to Beast. [*]
|
||||
- Move all code into modules/
|
||||
- Work on KeyvaDB
|
||||
[*] These can be handled by external developers
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
- Raise the warning level and fix everything
|
||||
|
||||
* Restyle all the macros in ripple_ConfigSection.h
|
||||
|
||||
* Replace all throw with beast::Throw
|
||||
Only in the ripple sources, not in Subtrees/ or protobuf or websocket
|
||||
|
||||
- Replace base_uint and uintXXX with UnsignedInteger
|
||||
* Need to specialize UnsignedInteger to work efficiently with 4 and 8 byte
|
||||
multiples of the size.
|
||||
|
||||
- Rewrite boost program_options in Beast
|
||||
|
||||
- Validations unit test
|
||||
|
||||
- Replace endian conversion calls with beast calls:
|
||||
htobe32, be32toh, ntohl, etc...
|
||||
Start by removing the system headers which provide these routines, if possible
|
||||
@@ -118,8 +142,6 @@ RIPPLE TODO
|
||||
|
||||
- Make LevelDB and Ripple code work with both Unicode and non-Unicode Windows APIs
|
||||
|
||||
- Raise the warning level and fix everything
|
||||
|
||||
- Go searching through VFALCO notes and fix everything
|
||||
|
||||
- Deal with function-level statics used for SqliteDatabase (like in
|
||||
|
||||
@@ -283,32 +283,15 @@ const char* WalletDBInit[] =
|
||||
int WalletDBCount = NUMBER (WalletDBInit);
|
||||
|
||||
// Hash node database holds nodes indexed by hash
|
||||
const char* HashNodeDBInit[] =
|
||||
{
|
||||
"PRAGMA synchronous=NORMAL;",
|
||||
"PRAGMA journal_mode=WAL;",
|
||||
"PRAGMA journal_size_limit=1582080;",
|
||||
|
||||
#if (ULONG_MAX > UINT_MAX) && !defined (NO_SQLITE_MMAP)
|
||||
"PRAGMA mmap_size=171798691840;",
|
||||
#endif
|
||||
|
||||
"BEGIN TRANSACTION;",
|
||||
|
||||
"CREATE TABLE CommittedObjects ( \
|
||||
Hash CHARACTER(64) PRIMARY KEY, \
|
||||
ObjType CHAR(1) NOT NULL, \
|
||||
LedgerIndex BIGINT UNSIGNED, \
|
||||
Object BLOB \
|
||||
);",
|
||||
|
||||
"END TRANSACTION;"
|
||||
};
|
||||
// VFALCO TODO Remove this since it looks unused
|
||||
/*
|
||||
|
||||
int HashNodeDBCount = NUMBER (HashNodeDBInit);
|
||||
*/
|
||||
|
||||
// Net node database holds nodes seen on the network
|
||||
// XXX Not really used needs replacement.
|
||||
/*
|
||||
const char* NetNodeDBInit[] =
|
||||
{
|
||||
"CREATE TABLE KnownNodes ( \
|
||||
@@ -320,7 +303,10 @@ const char* NetNodeDBInit[] =
|
||||
};
|
||||
|
||||
int NetNodeDBCount = NUMBER (NetNodeDBInit);
|
||||
*/
|
||||
|
||||
// This appears to be unused
|
||||
/*
|
||||
const char* PathFindDBInit[] =
|
||||
{
|
||||
"PRAGMA synchronous = OFF; ",
|
||||
@@ -353,5 +339,5 @@ const char* PathFindDBInit[] =
|
||||
};
|
||||
|
||||
int PathFindDBCount = NUMBER (PathFindDBInit);
|
||||
*/
|
||||
|
||||
// vim:ts=4
|
||||
|
||||
@@ -12,19 +12,11 @@ extern const char* RpcDBInit[];
|
||||
extern const char* TxnDBInit[];
|
||||
extern const char* LedgerDBInit[];
|
||||
extern const char* WalletDBInit[];
|
||||
extern const char* HashNodeDBInit[];
|
||||
|
||||
// VFALCO TODO Figure out what these counts are for
|
||||
extern int RpcDBCount;
|
||||
extern int TxnDBCount;
|
||||
extern int LedgerDBCount;
|
||||
extern int WalletDBCount;
|
||||
extern int HashNodeDBCount;
|
||||
|
||||
// VFALCO TODO Seems these two aren't used so delete EVERYTHING.
|
||||
extern const char* NetNodeDBInit[];
|
||||
extern const char* PathFindDBInit[];
|
||||
extern int NetNodeDBCount;
|
||||
extern int PathFindDBCount;
|
||||
|
||||
#endif
|
||||
|
||||
@@ -529,10 +529,12 @@ void Ledger::saveAcceptedLedger (Job&, bool fromConsensus)
|
||||
assert (getTransHash () == mTransactionMap->getHash ());
|
||||
|
||||
// Save the ledger header in the hashed object store
|
||||
{
|
||||
Serializer s (128);
|
||||
s.add32 (HashPrefix::ledgerMaster);
|
||||
addRaw (s);
|
||||
getApp().getNodeStore ().store (hotLEDGER, mLedgerSeq, s.peekData (), mHash);
|
||||
getApp().getNodeStore ().store (hotLEDGER, mLedgerSeq, s.modData (), mHash);
|
||||
}
|
||||
|
||||
AcceptedLedger::pointer aLedger = AcceptedLedger::makeAcceptedLedger (shared_from_this ());
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ bool InboundLedger::tryLocal ()
|
||||
if (!mHaveBase)
|
||||
{
|
||||
// Nothing we can do without the ledger base
|
||||
NodeObject::pointer node = getApp().getNodeStore ().retrieve (mHash);
|
||||
NodeObject::pointer node = getApp().getNodeStore ().fetch (mHash);
|
||||
|
||||
if (!node)
|
||||
{
|
||||
@@ -672,7 +672,7 @@ bool InboundLedger::takeBase (const std::string& data) // data must not have has
|
||||
Serializer s (data.size () + 4);
|
||||
s.add32 (HashPrefix::ledgerMaster);
|
||||
s.addRaw (data);
|
||||
getApp().getNodeStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.peekData (), mHash);
|
||||
getApp().getNodeStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.modData (), mHash);
|
||||
|
||||
progress ();
|
||||
|
||||
|
||||
@@ -6,118 +6,208 @@
|
||||
|
||||
#if RIPPLE_HYPERLEVELDB_AVAILABLE
|
||||
|
||||
class HyperLevelDBBackendFactory::Backend : public NodeStore::Backend
|
||||
class HyperLevelDBBackendFactory::Backend
|
||||
: public NodeStore::Backend
|
||||
, public NodeStore::BatchWriter::Callback
|
||||
, LeakChecked <HyperLevelDBBackendFactory::Backend>
|
||||
{
|
||||
public:
|
||||
Backend (StringPairArray const& keyValues)
|
||||
: mName(keyValues ["path"].toStdString ())
|
||||
, mDB(NULL)
|
||||
typedef RecycledObjectPool <std::string> StringPool;
|
||||
|
||||
Backend (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_batch (*this, scheduler)
|
||||
, m_name (keyValues ["path"].toStdString ())
|
||||
{
|
||||
if (mName.empty())
|
||||
throw std::runtime_error ("Missing path in LevelDB backend");
|
||||
if (m_name.empty ())
|
||||
Throw (std::runtime_error ("Missing path in LevelDB backend"));
|
||||
|
||||
hyperleveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
|
||||
if (keyValues["cache_mb"].isEmpty())
|
||||
if (keyValues ["cache_mb"].isEmpty ())
|
||||
{
|
||||
options.block_cache = hyperleveldb::NewLRUCache (theConfig.getSize (siHashNodeDBCache) * 1024 * 1024);
|
||||
}
|
||||
else
|
||||
{
|
||||
options.block_cache = hyperleveldb::NewLRUCache (keyValues["cache_mb"].getIntValue() * 1024L * 1024L);
|
||||
}
|
||||
|
||||
if (keyValues["filter_bits"].isEmpty())
|
||||
if (keyValues ["filter_bits"].isEmpty())
|
||||
{
|
||||
if (theConfig.NODE_SIZE >= 2)
|
||||
options.filter_policy = hyperleveldb::NewBloomFilterPolicy (10);
|
||||
}
|
||||
else if (keyValues["filter_bits"].getIntValue() != 0)
|
||||
options.filter_policy = hyperleveldb::NewBloomFilterPolicy (keyValues["filter_bits"].getIntValue());
|
||||
else if (keyValues ["filter_bits"].getIntValue() != 0)
|
||||
{
|
||||
options.filter_policy = hyperleveldb::NewBloomFilterPolicy (keyValues ["filter_bits"].getIntValue ());
|
||||
}
|
||||
|
||||
if (!keyValues["open_files"].isEmpty())
|
||||
options.max_open_files = keyValues["open_files"].getIntValue();
|
||||
if (! keyValues["open_files"].isEmpty ())
|
||||
{
|
||||
options.max_open_files = keyValues ["open_files"].getIntValue();
|
||||
}
|
||||
|
||||
hyperleveldb::Status status = hyperleveldb::DB::Open (options, mName, &mDB);
|
||||
if (!status.ok () || !mDB)
|
||||
throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
|
||||
hyperleveldb::DB* db = nullptr;
|
||||
hyperleveldb::Status status = hyperleveldb::DB::Open (options, m_name, &db);
|
||||
if (!status.ok () || !db)
|
||||
Throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
|
||||
|
||||
m_db = db;
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
delete mDB;
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
std::string getName()
|
||||
{
|
||||
return mName;
|
||||
return m_name;
|
||||
}
|
||||
|
||||
bool bulkStore (const std::vector< NodeObject::pointer >& objs)
|
||||
{
|
||||
hyperleveldb::WriteBatch batch;
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
BOOST_FOREACH (NodeObject::ref obj, objs)
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
Blob blob (toBlob (obj));
|
||||
batch.Put (
|
||||
hyperleveldb::Slice (reinterpret_cast<char const*>(obj->getHash ().begin ()), 256 / 8),
|
||||
hyperleveldb::Slice (reinterpret_cast<char const*>(&blob.front ()), blob.size ()));
|
||||
pObject->reset ();
|
||||
|
||||
Status status (ok);
|
||||
|
||||
hyperleveldb::ReadOptions const options;
|
||||
hyperleveldb::Slice const slice (static_cast <char const*> (key), m_keyBytes);
|
||||
|
||||
{
|
||||
// These are reused std::string objects,
|
||||
// required for leveldb's funky interface.
|
||||
//
|
||||
StringPool::ScopedItem item (m_stringPool);
|
||||
std::string& string = item.getObject ();
|
||||
|
||||
hyperleveldb::Status getStatus = m_db->Get (options, slice, &string);
|
||||
|
||||
if (getStatus.ok ())
|
||||
{
|
||||
NodeStore::DecodedBlob decoded (key, string.data (), string.size ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
*pObject = decoded.createObject ();
|
||||
}
|
||||
else
|
||||
{
|
||||
// Decoding failed, probably corrupted!
|
||||
//
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (getStatus.IsCorruption ())
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
else if (getStatus.IsNotFound ())
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
}
|
||||
}
|
||||
return mDB->Write (hyperleveldb::WriteOptions (), &batch).ok ();
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash)
|
||||
{
|
||||
std::string sData;
|
||||
if (!mDB->Get (hyperleveldb::ReadOptions (),
|
||||
hyperleveldb::Slice (reinterpret_cast<char const*>(hash.begin ()), 256 / 8), &sData).ok ())
|
||||
{
|
||||
return NodeObject::pointer();
|
||||
}
|
||||
return fromBinary(hash, &sData[0], sData.size ());
|
||||
return status;
|
||||
}
|
||||
|
||||
void visitAll (FUNCTION_TYPE<void (NodeObject::pointer)> func)
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
hyperleveldb::Iterator* it = mDB->NewIterator (hyperleveldb::ReadOptions ());
|
||||
m_batch.store (object);
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
hyperleveldb::WriteBatch wb;
|
||||
|
||||
{
|
||||
NodeStore::EncodedBlob::Pool::ScopedItem item (m_blobPool);
|
||||
|
||||
BOOST_FOREACH (NodeObject::ref object, batch)
|
||||
{
|
||||
item.getObject ().prepare (object);
|
||||
|
||||
wb.Put (
|
||||
hyperleveldb::Slice (reinterpret_cast <char const*> (
|
||||
item.getObject ().getKey ()), m_keyBytes),
|
||||
hyperleveldb::Slice (reinterpret_cast <char const*> (
|
||||
item.getObject ().getData ()), item.getObject ().getSize ()));
|
||||
}
|
||||
}
|
||||
|
||||
hyperleveldb::WriteOptions const options;
|
||||
|
||||
m_db->Write (options, &wb).ok ();
|
||||
}
|
||||
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
hyperleveldb::ReadOptions const options;
|
||||
|
||||
ScopedPointer <hyperleveldb::Iterator> it (m_db->NewIterator (options));
|
||||
|
||||
for (it->SeekToFirst (); it->Valid (); it->Next ())
|
||||
{
|
||||
if (it->key ().size () == 256 / 8)
|
||||
if (it->key ().size () == m_keyBytes)
|
||||
{
|
||||
uint256 hash;
|
||||
memcpy(hash.begin(), it->key ().data(), 256 / 8);
|
||||
func (fromBinary (hash, it->value ().data (), it->value ().size ()));
|
||||
}
|
||||
}
|
||||
}
|
||||
NodeStore::DecodedBlob decoded (it->key ().data (),
|
||||
it->value ().data (),
|
||||
it->value ().size ());
|
||||
|
||||
Blob toBlob(NodeObject::ref obj)
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
Blob rawData (9 + obj->getData ().size ());
|
||||
unsigned char* bufPtr = &rawData.front();
|
||||
NodeObject::Ptr object (decoded.createObject ());
|
||||
|
||||
*reinterpret_cast<uint32*> (bufPtr + 0) = ntohl (obj->getIndex ());
|
||||
*reinterpret_cast<uint32*> (bufPtr + 4) = ntohl (obj->getIndex ());
|
||||
* (bufPtr + 8) = static_cast<unsigned char> (obj->getType ());
|
||||
memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ());
|
||||
|
||||
return rawData;
|
||||
callback.visitObject (object);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Uh oh, corrupted data!
|
||||
WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256 (it->key ().data ());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// VFALCO NOTE What does it mean to find an
|
||||
// incorrectly sized key? Corruption?
|
||||
WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size ();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NodeObject::pointer fromBinary(uint256 const& hash,
|
||||
char const* data, int size)
|
||||
int getWriteLoad ()
|
||||
{
|
||||
if (size < 9)
|
||||
throw std::runtime_error ("undersized object");
|
||||
return m_batch.getWriteLoad ();
|
||||
}
|
||||
|
||||
uint32 index = htonl (*reinterpret_cast<const uint32*> (data));
|
||||
int htype = data[8];
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
return boost::make_shared<NodeObject> (static_cast<NodeObjectType> (htype), index,
|
||||
data + 9, size - 9, hash);
|
||||
void writeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
private:
|
||||
std::string mName;
|
||||
hyperleveldb::DB* mDB;
|
||||
size_t const m_keyBytes;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
NodeStore::BatchWriter m_batch;
|
||||
StringPool m_stringPool;
|
||||
NodeStore::EncodedBlob::Pool m_blobPool;
|
||||
std::string m_name;
|
||||
ScopedPointer <hyperleveldb::DB> m_db;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -142,9 +232,12 @@ String HyperLevelDBBackendFactory::getName () const
|
||||
return "HyperLevelDB";
|
||||
}
|
||||
|
||||
NodeStore::Backend* HyperLevelDBBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* HyperLevelDBBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new HyperLevelDBBackendFactory::Backend (keyValues);
|
||||
return new HyperLevelDBBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -23,7 +23,10 @@ public:
|
||||
static HyperLevelDBBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
179
modules/ripple_app/node/ripple_KeyvaDBBackendFactory.cpp
Normal file
179
modules/ripple_app/node/ripple_KeyvaDBBackendFactory.cpp
Normal file
@@ -0,0 +1,179 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
class KeyvaDBBackendFactory::Backend : public NodeStore::Backend
|
||||
{
|
||||
private:
|
||||
typedef RecycledObjectPool <MemoryBlock> MemoryPool;
|
||||
typedef RecycledObjectPool <NodeStore::EncodedBlob> EncodedBlobPool;
|
||||
|
||||
public:
|
||||
Backend (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_path (keyValues ["path"])
|
||||
, m_db (KeyvaDB::New (
|
||||
keyBytes,
|
||||
3,
|
||||
File::getCurrentWorkingDirectory().getChildFile (m_path).withFileExtension ("key"),
|
||||
File::getCurrentWorkingDirectory().getChildFile (m_path).withFileExtension ("val")))
|
||||
{
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
}
|
||||
|
||||
std::string getName ()
|
||||
{
|
||||
return m_path.toStdString ();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
pObject->reset ();
|
||||
|
||||
Status status (ok);
|
||||
|
||||
struct Callback : KeyvaDB::GetCallback
|
||||
{
|
||||
explicit Callback (MemoryBlock& block)
|
||||
: m_block (block)
|
||||
{
|
||||
}
|
||||
|
||||
void* getStorageForValue (int valueBytes)
|
||||
{
|
||||
m_size = valueBytes;
|
||||
m_block.ensureSize (valueBytes);
|
||||
|
||||
return m_block.getData ();
|
||||
}
|
||||
|
||||
void const* getData () const noexcept
|
||||
{
|
||||
return m_block.getData ();
|
||||
}
|
||||
|
||||
size_t getSize () const noexcept
|
||||
{
|
||||
return m_size;
|
||||
}
|
||||
|
||||
private:
|
||||
MemoryBlock& m_block;
|
||||
size_t m_size;
|
||||
};
|
||||
|
||||
MemoryPool::ScopedItem item (m_memoryPool);
|
||||
MemoryBlock& block (item.getObject ());
|
||||
|
||||
Callback cb (block);
|
||||
|
||||
// VFALCO TODO Can't we get KeyvaDB to provide a proper status?
|
||||
//
|
||||
bool const found = m_db->get (key, &cb);
|
||||
|
||||
if (found)
|
||||
{
|
||||
NodeStore::DecodedBlob decoded (key, cb.getData (), cb.getSize ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
*pObject = decoded.createObject ();
|
||||
|
||||
status = ok;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
EncodedBlobPool::ScopedItem item (m_blobPool);
|
||||
NodeStore::EncodedBlob& encoded (item.getObject ());
|
||||
|
||||
encoded.prepare (object);
|
||||
|
||||
m_db->put (encoded.getKey (), encoded.getData (), encoded.getSize ());
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
for (int i = 0; i < batch.size (); ++i)
|
||||
store (batch [i]);
|
||||
}
|
||||
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
// VFALCO TODO Implement this!
|
||||
//
|
||||
bassertfalse;
|
||||
//m_db->visitAll ();
|
||||
}
|
||||
|
||||
int getWriteLoad ()
|
||||
{
|
||||
// we dont do pending writes
|
||||
return 0;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
private:
|
||||
size_t const m_keyBytes;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
String m_path;
|
||||
ScopedPointer <KeyvaDB> m_db;
|
||||
MemoryPool m_memoryPool;
|
||||
EncodedBlobPool m_blobPool;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
KeyvaDBBackendFactory::KeyvaDBBackendFactory ()
|
||||
{
|
||||
}
|
||||
|
||||
KeyvaDBBackendFactory::~KeyvaDBBackendFactory ()
|
||||
{
|
||||
}
|
||||
|
||||
KeyvaDBBackendFactory& KeyvaDBBackendFactory::getInstance ()
|
||||
{
|
||||
static KeyvaDBBackendFactory instance;
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
String KeyvaDBBackendFactory::getName () const
|
||||
{
|
||||
return "KeyvaDB";
|
||||
}
|
||||
|
||||
NodeStore::Backend* KeyvaDBBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new KeyvaDBBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
30
modules/ripple_app/node/ripple_KeyvaDBBackendFactory.h
Normal file
30
modules/ripple_app/node/ripple_KeyvaDBBackendFactory.h
Normal file
@@ -0,0 +1,30 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_KEYVABACKENDFACTORY_H_INCLUDED
|
||||
#define RIPPLE_KEYVABACKENDFACTORY_H_INCLUDED
|
||||
|
||||
/** Factory to produce KeyvaDB backends for the NodeStore.
|
||||
*/
|
||||
class KeyvaDBBackendFactory : public NodeStore::BackendFactory
|
||||
{
|
||||
private:
|
||||
class Backend;
|
||||
|
||||
KeyvaDBBackendFactory ();
|
||||
~KeyvaDBBackendFactory ();
|
||||
|
||||
public:
|
||||
static KeyvaDBBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -4,23 +4,38 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
class LevelDBBackendFactory::Backend : public NodeStore::Backend
|
||||
class LevelDBBackendFactory::Backend
|
||||
: public NodeStore::Backend
|
||||
, public NodeStore::BatchWriter::Callback
|
||||
, LeakChecked <LevelDBBackendFactory::Backend>
|
||||
{
|
||||
public:
|
||||
Backend (StringPairArray const& keyValues)
|
||||
: mName(keyValues ["path"].toStdString ())
|
||||
, mDB(NULL)
|
||||
typedef RecycledObjectPool <std::string> StringPool;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Backend (int keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_batch (*this, scheduler)
|
||||
, m_name (keyValues ["path"].toStdString ())
|
||||
{
|
||||
if (mName.empty())
|
||||
throw std::runtime_error ("Missing path in LevelDB backend");
|
||||
if (m_name.empty())
|
||||
Throw (std::runtime_error ("Missing path in LevelDB backend"));
|
||||
|
||||
leveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
|
||||
if (keyValues["cache_mb"].isEmpty())
|
||||
{
|
||||
options.block_cache = leveldb::NewLRUCache (theConfig.getSize (siHashNodeDBCache) * 1024 * 1024);
|
||||
}
|
||||
else
|
||||
{
|
||||
options.block_cache = leveldb::NewLRUCache (keyValues["cache_mb"].getIntValue() * 1024L * 1024L);
|
||||
}
|
||||
|
||||
if (keyValues["filter_bits"].isEmpty())
|
||||
{
|
||||
@@ -28,94 +43,171 @@ public:
|
||||
options.filter_policy = leveldb::NewBloomFilterPolicy (10);
|
||||
}
|
||||
else if (keyValues["filter_bits"].getIntValue() != 0)
|
||||
{
|
||||
options.filter_policy = leveldb::NewBloomFilterPolicy (keyValues["filter_bits"].getIntValue());
|
||||
}
|
||||
|
||||
if (!keyValues["open_files"].isEmpty())
|
||||
if (! keyValues["open_files"].isEmpty())
|
||||
{
|
||||
options.max_open_files = keyValues["open_files"].getIntValue();
|
||||
}
|
||||
|
||||
leveldb::Status status = leveldb::DB::Open (options, mName, &mDB);
|
||||
if (!status.ok () || !mDB)
|
||||
throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
|
||||
leveldb::DB* db = nullptr;
|
||||
leveldb::Status status = leveldb::DB::Open (options, m_name, &db);
|
||||
if (!status.ok () || !db)
|
||||
Throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
|
||||
|
||||
m_db = db;
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
delete mDB;
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
std::string getName()
|
||||
{
|
||||
return mName;
|
||||
return m_name;
|
||||
}
|
||||
|
||||
bool bulkStore (const std::vector< NodeObject::pointer >& objs)
|
||||
{
|
||||
leveldb::WriteBatch batch;
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
BOOST_FOREACH (NodeObject::ref obj, objs)
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
Blob blob (toBlob (obj));
|
||||
batch.Put (
|
||||
leveldb::Slice (reinterpret_cast<char const*>(obj->getHash ().begin ()), 256 / 8),
|
||||
leveldb::Slice (reinterpret_cast<char const*>(&blob.front ()), blob.size ()));
|
||||
pObject->reset ();
|
||||
|
||||
Status status (ok);
|
||||
|
||||
leveldb::ReadOptions const options;
|
||||
leveldb::Slice const slice (static_cast <char const*> (key), m_keyBytes);
|
||||
|
||||
{
|
||||
// These are reused std::string objects,
|
||||
// required for leveldb's funky interface.
|
||||
//
|
||||
StringPool::ScopedItem item (m_stringPool);
|
||||
std::string& string = item.getObject ();
|
||||
|
||||
leveldb::Status getStatus = m_db->Get (options, slice, &string);
|
||||
|
||||
if (getStatus.ok ())
|
||||
{
|
||||
NodeStore::DecodedBlob decoded (key, string.data (), string.size ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
*pObject = decoded.createObject ();
|
||||
}
|
||||
else
|
||||
{
|
||||
// Decoding failed, probably corrupted!
|
||||
//
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (getStatus.IsCorruption ())
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
else if (getStatus.IsNotFound ())
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
}
|
||||
}
|
||||
return mDB->Write (leveldb::WriteOptions (), &batch).ok ();
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash)
|
||||
{
|
||||
std::string sData;
|
||||
if (!mDB->Get (leveldb::ReadOptions (),
|
||||
leveldb::Slice (reinterpret_cast<char const*>(hash.begin ()), 256 / 8), &sData).ok ())
|
||||
{
|
||||
return NodeObject::pointer();
|
||||
}
|
||||
return fromBinary(hash, &sData[0], sData.size ());
|
||||
return status;
|
||||
}
|
||||
|
||||
void visitAll (FUNCTION_TYPE<void (NodeObject::pointer)> func)
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
leveldb::Iterator* it = mDB->NewIterator (leveldb::ReadOptions ());
|
||||
m_batch.store (object);
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
leveldb::WriteBatch wb;
|
||||
|
||||
{
|
||||
NodeStore::EncodedBlob::Pool::ScopedItem item (m_blobPool);
|
||||
|
||||
BOOST_FOREACH (NodeObject::ref object, batch)
|
||||
{
|
||||
item.getObject ().prepare (object);
|
||||
|
||||
wb.Put (
|
||||
leveldb::Slice (reinterpret_cast <char const*> (item.getObject ().getKey ()),
|
||||
m_keyBytes),
|
||||
leveldb::Slice (reinterpret_cast <char const*> (item.getObject ().getData ()),
|
||||
item.getObject ().getSize ()));
|
||||
}
|
||||
}
|
||||
|
||||
leveldb::WriteOptions const options;
|
||||
|
||||
m_db->Write (options, &wb).ok ();
|
||||
}
|
||||
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
leveldb::ReadOptions const options;
|
||||
|
||||
ScopedPointer <leveldb::Iterator> it (m_db->NewIterator (options));
|
||||
|
||||
for (it->SeekToFirst (); it->Valid (); it->Next ())
|
||||
{
|
||||
if (it->key ().size () == 256 / 8)
|
||||
if (it->key ().size () == m_keyBytes)
|
||||
{
|
||||
uint256 hash;
|
||||
memcpy(hash.begin(), it->key ().data(), 256 / 8);
|
||||
func (fromBinary (hash, it->value ().data (), it->value ().size ()));
|
||||
}
|
||||
}
|
||||
}
|
||||
NodeStore::DecodedBlob decoded (it->key ().data (),
|
||||
it->value ().data (),
|
||||
it->value ().size ());
|
||||
|
||||
Blob toBlob(NodeObject::ref obj)
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
Blob rawData (9 + obj->getData ().size ());
|
||||
unsigned char* bufPtr = &rawData.front();
|
||||
NodeObject::Ptr object (decoded.createObject ());
|
||||
|
||||
*reinterpret_cast<uint32*> (bufPtr + 0) = ntohl (obj->getIndex ());
|
||||
*reinterpret_cast<uint32*> (bufPtr + 4) = ntohl (obj->getIndex ());
|
||||
* (bufPtr + 8) = static_cast<unsigned char> (obj->getType ());
|
||||
memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ());
|
||||
|
||||
return rawData;
|
||||
callback.visitObject (object);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Uh oh, corrupted data!
|
||||
WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256 (it->key ().data ());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// VFALCO NOTE What does it mean to find an
|
||||
// incorrectly sized key? Corruption?
|
||||
WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size ();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NodeObject::pointer fromBinary(uint256 const& hash,
|
||||
char const* data, int size)
|
||||
int getWriteLoad ()
|
||||
{
|
||||
if (size < 9)
|
||||
throw std::runtime_error ("undersized object");
|
||||
return m_batch.getWriteLoad ();
|
||||
}
|
||||
|
||||
uint32 index = htonl (*reinterpret_cast<const uint32*> (data));
|
||||
int htype = data[8];
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
return boost::make_shared<NodeObject> (static_cast<NodeObjectType> (htype), index,
|
||||
data + 9, size - 9, hash);
|
||||
void writeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
private:
|
||||
std::string mName;
|
||||
leveldb::DB* mDB;
|
||||
size_t const m_keyBytes;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
NodeStore::BatchWriter m_batch;
|
||||
StringPool m_stringPool;
|
||||
NodeStore::EncodedBlob::Pool m_blobPool;
|
||||
std::string m_name;
|
||||
ScopedPointer <leveldb::DB> m_db;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -140,9 +232,12 @@ String LevelDBBackendFactory::getName () const
|
||||
return "LevelDB";
|
||||
}
|
||||
|
||||
NodeStore::Backend* LevelDBBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* LevelDBBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new LevelDBBackendFactory::Backend (keyValues);
|
||||
return new LevelDBBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -21,7 +21,10 @@ public:
|
||||
static LevelDBBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -6,171 +6,242 @@
|
||||
|
||||
#if RIPPLE_MDB_AVAILABLE
|
||||
|
||||
class MdbBackendFactory::Backend : public NodeStore::Backend
|
||||
class MdbBackendFactory::Backend
|
||||
: public NodeStore::Backend
|
||||
, public NodeStore::BatchWriter::Callback
|
||||
, LeakChecked <MdbBackendFactory::Backend>
|
||||
{
|
||||
public:
|
||||
explicit Backend (StringPairArray const& keyValues)
|
||||
: m_env (nullptr)
|
||||
typedef NodeStore::Batch Batch;
|
||||
typedef NodeStore::EncodedBlob EncodedBlob;
|
||||
typedef NodeStore::DecodedBlob DecodedBlob;
|
||||
|
||||
explicit Backend (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_batch (*this, scheduler)
|
||||
, m_env (nullptr)
|
||||
{
|
||||
if (keyValues ["path"].isEmpty ())
|
||||
throw std::runtime_error ("Missing path in MDB backend");
|
||||
String path (keyValues ["path"]);
|
||||
|
||||
int error = 0;
|
||||
if (path.isEmpty ())
|
||||
Throw (std::runtime_error ("Missing path in MDB backend"));
|
||||
|
||||
error = mdb_env_create (&m_env);
|
||||
m_basePath = path.toStdString();
|
||||
|
||||
if (error == 0) // Should use the size of the file plus the free space on the disk
|
||||
error = mdb_env_set_mapsize(m_env, 512L * 1024L * 1024L * 1024L);
|
||||
// Regarding the path supplied to mdb_env_open:
|
||||
// This directory must already exist and be writable.
|
||||
//
|
||||
File dir (File::getCurrentWorkingDirectory().getChildFile (path));
|
||||
Result result = dir.createDirectory ();
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
int error = mdb_env_create (&m_env);
|
||||
|
||||
// Should use the size of the file plus the free space on the disk
|
||||
if (error == 0)
|
||||
error = mdb_env_set_mapsize (m_env, 512L * 1024L * 1024L * 1024L);
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_env_open (
|
||||
m_env,
|
||||
keyValues ["path"].toStdString().c_str (),
|
||||
m_basePath.c_str (),
|
||||
MDB_NOTLS,
|
||||
0664);
|
||||
|
||||
MDB_txn * txn;
|
||||
if (error == 0)
|
||||
error = mdb_txn_begin(m_env, NULL, 0, &txn);
|
||||
if (error == 0)
|
||||
error = mdb_dbi_open(txn, NULL, 0, &m_dbi);
|
||||
if (error == 0)
|
||||
error = mdb_txn_commit(txn);
|
||||
MDB_txn* txn;
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_txn_begin (m_env, NULL, 0, &txn);
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_dbi_open (txn, NULL, 0, &m_dbi);
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_txn_commit (txn);
|
||||
|
||||
if (error != 0)
|
||||
{
|
||||
String s;
|
||||
s << "Error #" << error << " creating mdb environment";
|
||||
throw std::runtime_error (s.toStdString ());
|
||||
Throw (std::runtime_error (s.toStdString ()));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
String s;
|
||||
s << "MDB Backend failed to create directory, " << result.getErrorMessage ();
|
||||
Throw (std::runtime_error (s.toStdString().c_str()));
|
||||
}
|
||||
m_name = keyValues ["path"].toStdString();
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
if (m_env != nullptr)
|
||||
{
|
||||
mdb_dbi_close(m_env, m_dbi);
|
||||
mdb_dbi_close (m_env, m_dbi);
|
||||
mdb_env_close (m_env);
|
||||
}
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
std::string getName()
|
||||
{
|
||||
return m_name;
|
||||
return m_basePath;
|
||||
}
|
||||
|
||||
bool bulkStore (std::vector <NodeObject::pointer> const& objs)
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
template <class T>
|
||||
unsigned char* mdb_cast (T* p)
|
||||
{
|
||||
MDB_txn *txn = nullptr;
|
||||
int rc = 0;
|
||||
return const_cast <unsigned char*> (static_cast <unsigned char const*> (p));
|
||||
}
|
||||
|
||||
rc = mdb_txn_begin(m_env, NULL, 0, &txn);
|
||||
|
||||
if (rc == 0)
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
BOOST_FOREACH (NodeObject::ref obj, objs)
|
||||
pObject->reset ();
|
||||
|
||||
Status status (ok);
|
||||
|
||||
MDB_txn* txn = nullptr;
|
||||
|
||||
int error = 0;
|
||||
|
||||
error = mdb_txn_begin (m_env, NULL, MDB_RDONLY, &txn);
|
||||
|
||||
if (error == 0)
|
||||
{
|
||||
MDB_val key, data;
|
||||
Blob blob (toBlob (obj));
|
||||
MDB_val dbkey;
|
||||
MDB_val data;
|
||||
|
||||
key.mv_size = (256 / 8);
|
||||
key.mv_data = const_cast<unsigned char *>(obj->getHash().begin());
|
||||
dbkey.mv_size = m_keyBytes;
|
||||
dbkey.mv_data = mdb_cast (key);
|
||||
|
||||
data.mv_size = blob.size();
|
||||
data.mv_data = &blob.front();
|
||||
error = mdb_get (txn, m_dbi, &dbkey, &data);
|
||||
|
||||
rc = mdb_put(txn, m_dbi, &key, &data, 0);
|
||||
if (rc != 0)
|
||||
if (error == 0)
|
||||
{
|
||||
assert(false);
|
||||
DecodedBlob decoded (key, data.mv_data, data.mv_size);
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
*pObject = decoded.createObject ();
|
||||
}
|
||||
else
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else if (error == MDB_NOTFOUND)
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
|
||||
WriteLog (lsWARNING, NodeObject) << "MDB txn failed, code=" << error;
|
||||
}
|
||||
|
||||
mdb_txn_abort (txn);
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
|
||||
WriteLog (lsWARNING, NodeObject) << "MDB txn failed, code=" << error;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
m_batch.store (object);
|
||||
}
|
||||
|
||||
void storeBatch (Batch const& batch)
|
||||
{
|
||||
MDB_txn* txn = nullptr;
|
||||
|
||||
int error = 0;
|
||||
|
||||
error = mdb_txn_begin (m_env, NULL, 0, &txn);
|
||||
|
||||
if (error == 0)
|
||||
{
|
||||
EncodedBlob::Pool::ScopedItem item (m_blobPool);
|
||||
|
||||
BOOST_FOREACH (NodeObject::Ptr const& object, batch)
|
||||
{
|
||||
EncodedBlob& encoded (item.getObject ());
|
||||
|
||||
encoded.prepare (object);
|
||||
|
||||
MDB_val key;
|
||||
key.mv_size = m_keyBytes;
|
||||
key.mv_data = mdb_cast (encoded.getKey ());
|
||||
|
||||
MDB_val data;
|
||||
data.mv_size = encoded.getSize ();
|
||||
data.mv_data = mdb_cast (encoded.getData ());
|
||||
|
||||
error = mdb_put (txn, m_dbi, &key, &data, 0);
|
||||
|
||||
if (error != 0)
|
||||
{
|
||||
WriteLog (lsWARNING, NodeObject) << "mdb_put failed, error=" << error;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (error == 0)
|
||||
{
|
||||
error = mdb_txn_commit(txn);
|
||||
|
||||
if (error != 0)
|
||||
{
|
||||
WriteLog (lsWARNING, NodeObject) << "mdb_txn_commit failed, error=" << error;
|
||||
}
|
||||
}
|
||||
else
|
||||
assert(false);
|
||||
|
||||
if (rc == 0)
|
||||
rc = mdb_txn_commit(txn);
|
||||
else if (txn)
|
||||
mdb_txn_abort(txn);
|
||||
|
||||
assert(rc == 0);
|
||||
return rc == 0;
|
||||
{
|
||||
mdb_txn_abort (txn);
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash)
|
||||
{
|
||||
NodeObject::pointer ret;
|
||||
|
||||
MDB_txn *txn = nullptr;
|
||||
int rc = 0;
|
||||
|
||||
rc = mdb_txn_begin(m_env, NULL, MDB_RDONLY, &txn);
|
||||
|
||||
if (rc == 0)
|
||||
{
|
||||
MDB_val key, data;
|
||||
|
||||
key.mv_size = (256 / 8);
|
||||
key.mv_data = const_cast<unsigned char *>(hash.begin());
|
||||
|
||||
rc = mdb_get(txn, m_dbi, &key, &data);
|
||||
if (rc == 0)
|
||||
ret = fromBinary(hash, static_cast<char *>(data.mv_data), data.mv_size);
|
||||
else
|
||||
assert(rc == MDB_NOTFOUND);
|
||||
}
|
||||
else
|
||||
assert(false);
|
||||
|
||||
mdb_txn_abort(txn);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void visitAll (FUNCTION_TYPE <void (NodeObject::pointer)> func)
|
||||
{ // WRITEME
|
||||
assert(false);
|
||||
}
|
||||
|
||||
Blob toBlob (NodeObject::ref obj) const
|
||||
{
|
||||
Blob rawData (9 + obj->getData ().size ());
|
||||
unsigned char* bufPtr = &rawData.front();
|
||||
|
||||
*reinterpret_cast <uint32*> (bufPtr + 0) = ntohl (obj->getIndex ());
|
||||
|
||||
*reinterpret_cast <uint32*> (bufPtr + 4) = ntohl (obj->getIndex ());
|
||||
|
||||
*(bufPtr + 8) = static_cast <unsigned char> (obj->getType ());
|
||||
|
||||
memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ());
|
||||
|
||||
return rawData;
|
||||
WriteLog (lsWARNING, NodeObject) << "mdb_txn_begin failed, error=" << error;
|
||||
}
|
||||
}
|
||||
|
||||
NodeObject::pointer fromBinary (uint256 const& hash, char const* data, int size) const
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
if (size < 9)
|
||||
throw std::runtime_error ("undersized object");
|
||||
// VFALCO TODO Implement this!
|
||||
bassertfalse;
|
||||
}
|
||||
|
||||
uint32 const index = htonl (*reinterpret_cast <uint32 const*> (data));
|
||||
int getWriteLoad ()
|
||||
{
|
||||
return m_batch.getWriteLoad ();
|
||||
}
|
||||
|
||||
int const htype = data [8];
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
return boost::make_shared <NodeObject> (
|
||||
static_cast <NodeObjectType> (htype),
|
||||
index,
|
||||
data + 9,
|
||||
size - 9,
|
||||
hash);
|
||||
void writeBatch (Batch const& batch)
|
||||
{
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
private:
|
||||
std::string m_name;
|
||||
size_t const m_keyBytes;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
NodeStore::BatchWriter m_batch;
|
||||
NodeStore::EncodedBlob::Pool m_blobPool;
|
||||
std::string m_basePath;
|
||||
MDB_env* m_env;
|
||||
MDB_dbi m_dbi;
|
||||
};
|
||||
@@ -197,9 +268,12 @@ String MdbBackendFactory::getName () const
|
||||
return "mdb";
|
||||
}
|
||||
|
||||
NodeStore::Backend* MdbBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* MdbBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new MdbBackendFactory::Backend (keyValues);
|
||||
return new MdbBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -25,7 +25,10 @@ public:
|
||||
static MdbBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -6,30 +6,32 @@
|
||||
|
||||
SETUP_LOG (NodeObject)
|
||||
|
||||
NodeObject::NodeObject (
|
||||
NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob const& binaryDataToCopy,
|
||||
uint256 const& hash)
|
||||
: mType (type)
|
||||
, mHash (hash)
|
||||
, mLedgerIndex (ledgerIndex)
|
||||
, mData (binaryDataToCopy)
|
||||
{
|
||||
}
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
NodeObject::NodeObject (
|
||||
NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
void const* bufferToCopy,
|
||||
int bytesInBuffer,
|
||||
uint256 const& hash)
|
||||
Blob& data,
|
||||
uint256 const& hash,
|
||||
PrivateAccess)
|
||||
: mType (type)
|
||||
, mHash (hash)
|
||||
, mLedgerIndex (ledgerIndex)
|
||||
, mData (static_cast <unsigned char const*> (bufferToCopy),
|
||||
static_cast <unsigned char const*> (bufferToCopy) + bytesInBuffer)
|
||||
{
|
||||
// Take over the caller's buffer
|
||||
mData.swap (data);
|
||||
}
|
||||
|
||||
NodeObject::Ptr NodeObject::createObject (
|
||||
NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob& data,
|
||||
uint256 const & hash)
|
||||
{
|
||||
// The boost::ref is important or
|
||||
// else it will be passed by value!
|
||||
return boost::make_shared <NodeObject> (
|
||||
type, ledgerIndex, boost::ref (data), hash, PrivateAccess ());
|
||||
}
|
||||
|
||||
NodeObjectType NodeObject::getType () const
|
||||
@@ -51,3 +53,39 @@ Blob const& NodeObject::getData () const
|
||||
{
|
||||
return mData;
|
||||
}
|
||||
|
||||
bool NodeObject::isCloneOf (NodeObject::Ptr const& other) const
|
||||
{
|
||||
if (mType != other->mType)
|
||||
return false;
|
||||
|
||||
if (mHash != other->mHash)
|
||||
return false;
|
||||
|
||||
if (mLedgerIndex != other->mLedgerIndex)
|
||||
return false;
|
||||
|
||||
if (mData != other->mData)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class NodeObjectTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
|
||||
NodeObjectTests () : UnitTest ("NodeObject", "ripple")
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void runTest ()
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
static NodeObjectTests nodeObjectTests;
|
||||
|
||||
|
||||
@@ -34,27 +34,60 @@ class NodeObject : public CountedObject <NodeObject>
|
||||
public:
|
||||
static char const* getCountedObjectName () { return "NodeObject"; }
|
||||
|
||||
enum
|
||||
{
|
||||
/** Size of the fixed keys, in bytes.
|
||||
|
||||
We use a 256-bit hash for the keys.
|
||||
|
||||
@see NodeObject
|
||||
*/
|
||||
keyBytes = 32,
|
||||
};
|
||||
|
||||
/** The type used to hold the hash.
|
||||
|
||||
The hahes are fixed size, SHA256.
|
||||
|
||||
@note The key size can be retrieved with `Hash::sizeInBytes`
|
||||
*/
|
||||
typedef UnsignedInteger <32> Hash;
|
||||
|
||||
// Please use this one. For a reference use Ptr const&
|
||||
typedef boost::shared_ptr <NodeObject> Ptr;
|
||||
|
||||
// These are DEPRECATED, type names are capitalized.
|
||||
typedef boost::shared_ptr <NodeObject> pointer;
|
||||
typedef pointer const& ref;
|
||||
|
||||
/** Create from a vector of data.
|
||||
|
||||
@note A copy of the data is created.
|
||||
*/
|
||||
private:
|
||||
// This hack is used to make the constructor effectively private
|
||||
// except for when we use it in the call to make_shared.
|
||||
// There's no portable way to make make_shared<> a friend work.
|
||||
struct PrivateAccess { };
|
||||
public:
|
||||
// This constructor is private, use createObject instead.
|
||||
NodeObject (NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob const & binaryDataToCopy,
|
||||
uint256 const & hash);
|
||||
Blob& data,
|
||||
uint256 const& hash,
|
||||
PrivateAccess);
|
||||
|
||||
/** Create from an area of memory.
|
||||
/** Create an object from fields.
|
||||
|
||||
@note A copy of the data is created.
|
||||
The caller's variable is modified during this call. The
|
||||
underlying storage for the Blob is taken over by the NodeObject.
|
||||
|
||||
@param type The type of object.
|
||||
@param ledgerIndex The ledger in which this object appears.
|
||||
@param data A buffer containing the payload. The caller's variable
|
||||
is overwritten.
|
||||
@param hash The 256-bit hash of the payload data.
|
||||
*/
|
||||
NodeObject (NodeObjectType type,
|
||||
static Ptr createObject (NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
void const * bufferToCopy,
|
||||
int bytesInBuffer,
|
||||
uint256 const & hash);
|
||||
Blob& data,
|
||||
uint256 const& hash);
|
||||
|
||||
/** Retrieve the type of this object.
|
||||
*/
|
||||
@@ -73,11 +106,30 @@ public:
|
||||
*/
|
||||
Blob const& getData () const;
|
||||
|
||||
/** See if this object has the same data as another object.
|
||||
*/
|
||||
bool isCloneOf (NodeObject::Ptr const& other) const;
|
||||
|
||||
/** Binary function that satisfies the strict-weak-ordering requirement.
|
||||
|
||||
This compares the hashes of both objects and returns true if
|
||||
the first hash is considered to go before the second.
|
||||
|
||||
@see std::sort
|
||||
*/
|
||||
struct LessThan
|
||||
{
|
||||
inline bool operator() (NodeObject::Ptr const& lhs, NodeObject::Ptr const& rhs) const noexcept
|
||||
{
|
||||
return lhs->getHash () < rhs->getHash ();
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
NodeObjectType const mType;
|
||||
uint256 const mHash;
|
||||
LedgerIndex const mLedgerIndex;
|
||||
Blob const mData;
|
||||
NodeObjectType mType;
|
||||
uint256 mHash;
|
||||
LedgerIndex mLedgerIndex;
|
||||
Blob mData;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,60 +8,282 @@
|
||||
#define RIPPLE_NODESTORE_H_INCLUDED
|
||||
|
||||
/** Persistency layer for NodeObject
|
||||
|
||||
A Node is a ledger object which is uniquely identified by a key, which is
|
||||
the 256-bit hash of the body of the node. The payload is a variable length
|
||||
block of serialized data.
|
||||
|
||||
All ledger data is stored as node objects and as such, needs to be persisted
|
||||
between launches. Furthermore, since the set of node objects will in
|
||||
general be larger than the amount of available memory, purged node objects
|
||||
which are later accessed must be retrieved from the node store.
|
||||
|
||||
@see NodeObject
|
||||
*/
|
||||
class NodeStore : LeakChecked <NodeStore>
|
||||
class NodeStore
|
||||
{
|
||||
public:
|
||||
/** Back end used for the store.
|
||||
enum
|
||||
{
|
||||
// This is only used to pre-allocate the array for
|
||||
// batch objects and does not affect the amount written.
|
||||
//
|
||||
batchWritePreallocationSize = 128
|
||||
};
|
||||
|
||||
typedef std::vector <NodeObject::Ptr> Batch;
|
||||
|
||||
typedef StringPairArray Parameters;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Parsed key/value blob into NodeObject components.
|
||||
|
||||
This will extract the information required to construct a NodeObject. It
|
||||
also does consistency checking and returns the result, so it is possible
|
||||
to determine if the data is corrupted without throwing an exception. Not
|
||||
all forms of corruption are detected so further analysis will be needed
|
||||
to eliminate false negatives.
|
||||
|
||||
@note This defines the database format of a NodeObject!
|
||||
*/
|
||||
class DecodedBlob
|
||||
{
|
||||
public:
|
||||
/** Construct the decoded blob from raw data. */
|
||||
DecodedBlob (void const* key, void const* value, int valueBytes);
|
||||
|
||||
/** Determine if the decoding was successful. */
|
||||
bool wasOk () const noexcept { return m_success; }
|
||||
|
||||
/** Create a NodeObject from this data. */
|
||||
NodeObject::Ptr createObject ();
|
||||
|
||||
private:
|
||||
bool m_success;
|
||||
|
||||
void const* m_key;
|
||||
LedgerIndex m_ledgerIndex;
|
||||
NodeObjectType m_objectType;
|
||||
unsigned char const* m_objectData;
|
||||
int m_dataBytes;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Utility for producing flattened node objects.
|
||||
|
||||
These get recycled to prevent many small allocations.
|
||||
|
||||
@note This defines the database format of a NodeObject!
|
||||
*/
|
||||
struct EncodedBlob
|
||||
{
|
||||
typedef RecycledObjectPool <EncodedBlob> Pool;
|
||||
|
||||
void prepare (NodeObject::Ptr const& object);
|
||||
|
||||
void const* getKey () const noexcept { return m_key; }
|
||||
|
||||
size_t getSize () const noexcept { return m_size; }
|
||||
|
||||
void const* getData () const noexcept { return m_data.getData (); }
|
||||
|
||||
private:
|
||||
void const* m_key;
|
||||
MemoryBlock m_data;
|
||||
size_t m_size;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Provides optional asynchronous scheduling for backends.
|
||||
|
||||
For improved performance, a backend has the option of performing writes
|
||||
in batches. These writes can be scheduled using the provided scheduler
|
||||
object.
|
||||
|
||||
@see BatchWriter
|
||||
*/
|
||||
class Scheduler
|
||||
{
|
||||
public:
|
||||
/** Derived classes perform scheduled tasks. */
|
||||
struct Task
|
||||
{
|
||||
virtual ~Task () { }
|
||||
|
||||
/** Performs the task.
|
||||
|
||||
The call may take place on a foreign thread.
|
||||
*/
|
||||
virtual void performScheduledTask () = 0;
|
||||
};
|
||||
|
||||
/** Schedules a task.
|
||||
|
||||
Depending on the implementation, this could happen
|
||||
immediately or get deferred.
|
||||
*/
|
||||
virtual void scheduleTask (Task* task) = 0;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Helps with batch writing.
|
||||
|
||||
The batch writes are performed with a scheduled task. Use of the
|
||||
class it not required. A backend can implement its own write batching,
|
||||
or skip write batching if doing so yields a performance benefit.
|
||||
|
||||
@see Scheduler
|
||||
*/
|
||||
// VFALCO NOTE I'm not entirely happy having placed this here,
|
||||
// because whoever needs to use NodeStore certainly doesn't
|
||||
// need to see the implementation details of BatchWriter.
|
||||
//
|
||||
class BatchWriter : private Scheduler::Task
|
||||
{
|
||||
public:
|
||||
/** This callback does the actual writing. */
|
||||
struct Callback
|
||||
{
|
||||
virtual void writeBatch (Batch const& batch) = 0;
|
||||
};
|
||||
|
||||
/** Create a batch writer. */
|
||||
BatchWriter (Callback& callback, Scheduler& scheduler);
|
||||
|
||||
/** Destroy a batch writer.
|
||||
|
||||
Anything pending in the batch is written out before this returns.
|
||||
*/
|
||||
~BatchWriter ();
|
||||
|
||||
/** Store the object.
|
||||
|
||||
This will add to the batch and initiate a scheduled task to
|
||||
write the batch out.
|
||||
*/
|
||||
void store (NodeObject::Ptr const& object);
|
||||
|
||||
/** Get an estimate of the amount of writing I/O pending. */
|
||||
int getWriteLoad ();
|
||||
|
||||
private:
|
||||
void performScheduledTask ();
|
||||
void writeBatch ();
|
||||
void waitForWriting ();
|
||||
|
||||
private:
|
||||
typedef boost::recursive_mutex LockType;
|
||||
typedef boost::condition_variable_any CondvarType;
|
||||
|
||||
Callback& m_callback;
|
||||
Scheduler& m_scheduler;
|
||||
LockType mWriteMutex;
|
||||
CondvarType mWriteCondition;
|
||||
int mWriteGeneration;
|
||||
int mWriteLoad;
|
||||
bool mWritePending;
|
||||
Batch mWriteSet;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** A backend used for the store.
|
||||
|
||||
The NodeStore uses a swappable backend so that other database systems
|
||||
can be tried. Different databases may offer various features such
|
||||
as improved performance, fault tolerant or distributed storage, or
|
||||
all in-memory operation.
|
||||
|
||||
A given instance of a backend is fixed to a particular key size.
|
||||
*/
|
||||
class Backend
|
||||
{
|
||||
public:
|
||||
// VFALCO TODO Move the function definition to the .cpp
|
||||
Backend ()
|
||||
: mWriteGeneration(0)
|
||||
, mWriteLoad(0)
|
||||
, mWritePending(false)
|
||||
/** Return codes from operations. */
|
||||
enum Status
|
||||
{
|
||||
mWriteSet.reserve(128);
|
||||
}
|
||||
|
||||
virtual ~Backend () { }
|
||||
|
||||
virtual std::string getDataBaseName() = 0;
|
||||
|
||||
// Store/retrieve a single object
|
||||
// These functions must be thread safe
|
||||
virtual bool store (NodeObject::ref);
|
||||
virtual NodeObject::pointer retrieve (uint256 const &hash) = 0;
|
||||
|
||||
// Store a group of objects
|
||||
// This function will only be called from a single thread
|
||||
virtual bool bulkStore (const std::vector< NodeObject::pointer >&) = 0;
|
||||
|
||||
// Visit every object in the database
|
||||
// This function will only be called during an import operation
|
||||
//
|
||||
// VFALCO TODO Replace FUNCTION_TYPE with a beast lift.
|
||||
//
|
||||
virtual void visitAll (FUNCTION_TYPE <void (NodeObject::pointer)>) = 0;
|
||||
|
||||
// VFALCO TODO Put this bulk writing logic into a separate class.
|
||||
virtual void bulkWrite (Job &);
|
||||
virtual void waitWrite ();
|
||||
virtual int getWriteLoad ();
|
||||
|
||||
protected:
|
||||
// VFALCO TODO Put this bulk writing logic into a separate class.
|
||||
boost::mutex mWriteMutex;
|
||||
boost::condition_variable mWriteCondition;
|
||||
int mWriteGeneration;
|
||||
int mWriteLoad;
|
||||
bool mWritePending;
|
||||
std::vector <boost::shared_ptr<NodeObject> > mWriteSet;
|
||||
ok,
|
||||
notFound,
|
||||
dataCorrupt,
|
||||
unknown
|
||||
};
|
||||
|
||||
public:
|
||||
/** Destroy the backend.
|
||||
|
||||
All open files are closed and flushed. If there are batched writes
|
||||
or other tasks scheduled, they will be completed before this call
|
||||
returns.
|
||||
*/
|
||||
virtual ~Backend () { }
|
||||
|
||||
/** Get the human-readable name of this backend.
|
||||
|
||||
This is used for diagnostic output.
|
||||
*/
|
||||
virtual std::string getName() = 0;
|
||||
|
||||
/** Fetch a single object.
|
||||
|
||||
If the object is not found or an error is encountered, the
|
||||
result will indicate the condition.
|
||||
|
||||
@note This will be called concurrently.
|
||||
|
||||
@param key A pointer to the key data.
|
||||
@param pObject [out] The created object if successful.
|
||||
|
||||
@return The result of the operation.
|
||||
*/
|
||||
virtual Status fetch (void const* key, NodeObject::Ptr* pObject) = 0;
|
||||
|
||||
/** Store a single object.
|
||||
|
||||
Depending on the implementation this may happen immediately
|
||||
or deferred using a scheduled task.
|
||||
|
||||
@note This will be called concurrently.
|
||||
|
||||
@param object The object to store.
|
||||
*/
|
||||
virtual void store (NodeObject::Ptr const& object) = 0;
|
||||
|
||||
/** Store a group of objects.
|
||||
|
||||
@note This function will not be called concurrently with
|
||||
itself or @ref store.
|
||||
*/
|
||||
virtual void storeBatch (Batch const& batch) = 0;
|
||||
|
||||
/** Callback for iterating through objects.
|
||||
|
||||
@see visitAll
|
||||
*/
|
||||
struct VisitCallback
|
||||
{
|
||||
virtual void visitObject (NodeObject::Ptr const& object) = 0;
|
||||
};
|
||||
|
||||
/** Visit every object in the database
|
||||
|
||||
This is usually called during import.
|
||||
|
||||
@note This routine will not be called concurrently with itself
|
||||
or other methods.
|
||||
|
||||
@see import, VisitCallback
|
||||
*/
|
||||
virtual void visitAll (VisitCallback& callback) = 0;
|
||||
|
||||
/** Estimate the number of write operations pending. */
|
||||
virtual int getWriteLoad () = 0;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Factory to produce backends.
|
||||
*/
|
||||
class BackendFactory
|
||||
@@ -69,67 +291,142 @@ public:
|
||||
public:
|
||||
virtual ~BackendFactory () { }
|
||||
|
||||
/** Retrieve the name of this factory.
|
||||
*/
|
||||
/** Retrieve the name of this factory. */
|
||||
virtual String getName () const = 0;
|
||||
|
||||
/** Create an instance of this factory's backend.
|
||||
|
||||
@param keyBytes The fixed number of bytes per key.
|
||||
@param keyValues A set of key/value configuration pairs.
|
||||
@param scheduler The scheduler to use for running tasks.
|
||||
|
||||
@return A pointer to the Backend object.
|
||||
*/
|
||||
virtual Backend* createInstance (StringPairArray const& keyValues) = 0;
|
||||
virtual Backend* createInstance (size_t keyBytes,
|
||||
Parameters const& parameters,
|
||||
Scheduler& scheduler) = 0;
|
||||
};
|
||||
|
||||
public:
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Construct a node store.
|
||||
|
||||
parameters has the format:
|
||||
The parameters are key value pairs passed to the backend. The
|
||||
'type' key must exist, it defines the choice of backend. Most
|
||||
backends also require a 'path' field.
|
||||
|
||||
<key>=<value>['|'<key>=<value>]
|
||||
Some choices for 'type' are:
|
||||
HyperLevelDB, LevelDB, SQLite, KeyvaDB, MDB
|
||||
|
||||
The key "type" must exist, it defines the backend. For example
|
||||
"type=LevelDB|path=/mnt/ephemeral"
|
||||
If the fastBackendParameter is omitted or empty, no ephemeral database
|
||||
is used. If the scheduler parameter is omited or unspecified, a
|
||||
synchronous scheduler is used which performs all tasks immediately on
|
||||
the caller's thread.
|
||||
|
||||
@note If the database cannot be opened or created, an exception is thrown.
|
||||
|
||||
@param backendParameters The parameter string for the persistent backend.
|
||||
@param fastBackendParameters [optional] The parameter string for the ephemeral backend.
|
||||
@param scheduler [optional The scheduler to use for performing asynchronous tasks.
|
||||
|
||||
@return The opened database.
|
||||
*/
|
||||
// VFALCO NOTE Is cacheSize in bytes? objects? KB?
|
||||
// Is cacheAge in minutes? seconds?
|
||||
//
|
||||
NodeStore (String backendParameters,
|
||||
String fastBackendParameters,
|
||||
int cacheSize,
|
||||
int cacheAge);
|
||||
static NodeStore* New (Parameters const& backendParameters,
|
||||
Parameters fastBackendParameters = Parameters (),
|
||||
Scheduler& scheduler = getSynchronousScheduler ());
|
||||
|
||||
/** Get the synchronous scheduler.
|
||||
|
||||
The synchronous scheduler performs all tasks immediately, before
|
||||
returning to the caller, using the caller's thread.
|
||||
*/
|
||||
static Scheduler& getSynchronousScheduler ();
|
||||
|
||||
/** Destroy the node store.
|
||||
|
||||
All pending operations are completed, pending writes flushed,
|
||||
and files closed before this returns.
|
||||
*/
|
||||
virtual ~NodeStore () { }
|
||||
|
||||
/** Retrieve the name associated with this backend.
|
||||
|
||||
This is used for diagnostics and may not reflect the actual path
|
||||
or paths used by the underlying backend.
|
||||
*/
|
||||
virtual String getName () const = 0;
|
||||
|
||||
/** Add the specified backend factory to the list of available factories.
|
||||
|
||||
The names of available factories are compared against the "type"
|
||||
value in the parameter list on construction.
|
||||
|
||||
@param factory The factory to add.
|
||||
*/
|
||||
static void addBackendFactory (BackendFactory& factory);
|
||||
|
||||
float getCacheHitRate ();
|
||||
/** Fetch an object.
|
||||
|
||||
bool store (NodeObjectType type, uint32 index, Blob const& data,
|
||||
uint256 const& hash);
|
||||
If the object is known to be not in the database, isn't found in the
|
||||
database during the fetch, or failed to load correctly during the fetch,
|
||||
`nullptr` is returned.
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash);
|
||||
@note This can be called concurrently.
|
||||
|
||||
void waitWrite ();
|
||||
void tune (int size, int age);
|
||||
void sweep ();
|
||||
int getWriteLoad ();
|
||||
@param hash The key of the object to retrieve.
|
||||
|
||||
int import (String sourceBackendParameters);
|
||||
@return The object, or nullptr if it couldn't be retrieved.
|
||||
*/
|
||||
virtual NodeObject::pointer fetch (uint256 const& hash) = 0;
|
||||
|
||||
private:
|
||||
void importVisitor (std::vector <NodeObject::pointer>& objects, NodeObject::pointer object);
|
||||
/** Store the object.
|
||||
|
||||
static Backend* createBackend (String const& parameters);
|
||||
The caller's Blob parameter is overwritten.
|
||||
|
||||
static Array <BackendFactory*> s_factories;
|
||||
@param type The type of object.
|
||||
@param ledgerIndex The ledger in which the object appears.
|
||||
@param data The payload of the object. The caller's
|
||||
variable is overwritten.
|
||||
@param hash The 256-bit hash of the payload data.
|
||||
|
||||
private:
|
||||
ScopedPointer <Backend> m_backend;
|
||||
ScopedPointer <Backend> m_fastBackend;
|
||||
@return `true` if the object was stored?
|
||||
*/
|
||||
virtual void store (NodeObjectType type,
|
||||
uint32 ledgerIndex,
|
||||
Blob& data,
|
||||
uint256 const& hash) = 0;
|
||||
|
||||
/** Visit every object in the database
|
||||
|
||||
This is usually called during import.
|
||||
|
||||
@note This routine will not be called concurrently with itself
|
||||
or other methods.
|
||||
|
||||
@see import
|
||||
*/
|
||||
virtual void visitAll (Backend::VisitCallback& callback) = 0;
|
||||
|
||||
/** Import objects from another database. */
|
||||
virtual void import (NodeStore& sourceDatabase) = 0;
|
||||
|
||||
|
||||
/** Retrieve the estimated number of pending write operations.
|
||||
|
||||
This is used for diagnostics.
|
||||
*/
|
||||
virtual int getWriteLoad () = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
virtual float getCacheHitRate () = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
// TODO Document the parameter meanings.
|
||||
virtual void tune (int size, int age) = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
virtual void sweep () = 0;
|
||||
|
||||
TaggedCache<uint256, NodeObject, UptimeTimerAdapter> mCache;
|
||||
KeyCache <uint256, UptimeTimerAdapter> mNegativeCache;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -15,29 +15,32 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
std::string getName()
|
||||
{
|
||||
return std::string ();
|
||||
}
|
||||
|
||||
bool store (NodeObject::ref obj)
|
||||
Status fetch (void const*, NodeObject::Ptr*)
|
||||
{
|
||||
return false;
|
||||
return notFound;
|
||||
}
|
||||
|
||||
bool bulkStore (const std::vector< NodeObject::pointer >& objs)
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash)
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
return NodeObject::pointer ();
|
||||
}
|
||||
|
||||
void visitAll (FUNCTION_TYPE <void (NodeObject::pointer)> func)
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
}
|
||||
|
||||
int getWriteLoad ()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -62,7 +65,10 @@ String NullBackendFactory::getName () const
|
||||
return "none";
|
||||
}
|
||||
|
||||
NodeStore::Backend* NullBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* NullBackendFactory::createInstance (
|
||||
size_t,
|
||||
StringPairArray const&,
|
||||
NodeStore::Scheduler&)
|
||||
{
|
||||
return new NullBackendFactory::Backend;
|
||||
}
|
||||
|
||||
@@ -23,7 +23,10 @@ public:
|
||||
static NullBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -4,89 +4,169 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
static const char* s_nodeStoreDBInit [] =
|
||||
{
|
||||
"PRAGMA synchronous=NORMAL;",
|
||||
"PRAGMA journal_mode=WAL;",
|
||||
"PRAGMA journal_size_limit=1582080;",
|
||||
|
||||
#if (ULONG_MAX > UINT_MAX) && !defined (NO_SQLITE_MMAP)
|
||||
"PRAGMA mmap_size=171798691840;",
|
||||
#endif
|
||||
|
||||
"BEGIN TRANSACTION;",
|
||||
|
||||
"CREATE TABLE CommittedObjects ( \
|
||||
Hash CHARACTER(64) PRIMARY KEY, \
|
||||
ObjType CHAR(1) NOT NULL, \
|
||||
LedgerIndex BIGINT UNSIGNED, \
|
||||
Object BLOB \
|
||||
);",
|
||||
|
||||
"END TRANSACTION;"
|
||||
};
|
||||
|
||||
static int s_nodeStoreDBCount = NUMBER (s_nodeStoreDBInit);
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class SqliteBackendFactory::Backend : public NodeStore::Backend
|
||||
{
|
||||
public:
|
||||
Backend(std::string const& path) : mName(path)
|
||||
Backend (size_t keyBytes, std::string const& path)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_name (path)
|
||||
, m_db (new DatabaseCon(path, s_nodeStoreDBInit, s_nodeStoreDBCount))
|
||||
{
|
||||
mDb = new DatabaseCon(path, HashNodeDBInit, HashNodeDBCount);
|
||||
mDb->getDB()->executeSQL(boost::str(boost::format("PRAGMA cache_size=-%d;") %
|
||||
(theConfig.getSize(siHashNodeDBCache) * 1024)));
|
||||
String s;
|
||||
|
||||
// VFALCO TODO Remove this dependency on theConfig
|
||||
//
|
||||
s << "PRAGMA cache_size=-" << String (theConfig.getSize(siHashNodeDBCache) * 1024);
|
||||
m_db->getDB()->executeSQL (s.toStdString ().c_str ());
|
||||
}
|
||||
|
||||
Backend()
|
||||
~Backend()
|
||||
{
|
||||
delete mDb;
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
std::string getName()
|
||||
{
|
||||
return mName;
|
||||
return m_name;
|
||||
}
|
||||
|
||||
bool bulkStore(const std::vector< NodeObject::pointer >& objects)
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
ScopedLock sl(mDb->getDBLock());
|
||||
static SqliteStatement pStB(mDb->getDB()->getSqliteDB(), "BEGIN TRANSACTION;");
|
||||
static SqliteStatement pStE(mDb->getDB()->getSqliteDB(), "END TRANSACTION;");
|
||||
static SqliteStatement pSt(mDb->getDB()->getSqliteDB(),
|
||||
Status result = ok;
|
||||
|
||||
pObject->reset ();
|
||||
|
||||
{
|
||||
ScopedLock sl (m_db->getDBLock());
|
||||
|
||||
uint256 const hash (key);
|
||||
|
||||
static SqliteStatement pSt (m_db->getDB()->getSqliteDB(),
|
||||
"SELECT ObjType,LedgerIndex,Object FROM CommittedObjects WHERE Hash = ?;");
|
||||
|
||||
pSt.bind (1, hash.GetHex());
|
||||
|
||||
if (pSt.isRow (pSt.step()))
|
||||
{
|
||||
// VFALCO NOTE This is unfortunately needed,
|
||||
// the DatabaseCon creates the blob?
|
||||
Blob data (pSt.getBlob (2));
|
||||
*pObject = NodeObject::createObject (
|
||||
getTypeFromString (pSt.peekString (0)),
|
||||
pSt.getUInt32 (1),
|
||||
data,
|
||||
hash);
|
||||
}
|
||||
else
|
||||
{
|
||||
result = notFound;
|
||||
}
|
||||
|
||||
pSt.reset();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
NodeStore::Batch batch;
|
||||
|
||||
batch.push_back (object);
|
||||
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
// VFALCO TODO Rewrite this to use Beast::db
|
||||
|
||||
ScopedLock sl (m_db->getDBLock());
|
||||
|
||||
static SqliteStatement pStB (m_db->getDB()->getSqliteDB(), "BEGIN TRANSACTION;");
|
||||
static SqliteStatement pStE (m_db->getDB()->getSqliteDB(), "END TRANSACTION;");
|
||||
static SqliteStatement pSt (m_db->getDB()->getSqliteDB(),
|
||||
"INSERT OR IGNORE INTO CommittedObjects "
|
||||
"(Hash,ObjType,LedgerIndex,Object) VALUES (?, ?, ?, ?);");
|
||||
|
||||
pStB.step();
|
||||
pStB.reset();
|
||||
|
||||
BOOST_FOREACH(NodeObject::ref object, objects)
|
||||
BOOST_FOREACH (NodeObject::Ptr const& object, batch)
|
||||
{
|
||||
bind(pSt, object);
|
||||
doBind (pSt, object);
|
||||
|
||||
pSt.step();
|
||||
pSt.reset();
|
||||
}
|
||||
|
||||
pStE.step();
|
||||
pStE.reset();
|
||||
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve(uint256 const& hash)
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
NodeObject::pointer ret;
|
||||
// No lock needed as per the visitAll() API
|
||||
|
||||
{
|
||||
ScopedLock sl(mDb->getDBLock());
|
||||
static SqliteStatement pSt(mDb->getDB()->getSqliteDB(),
|
||||
"SELECT ObjType,LedgerIndex,Object FROM CommittedObjects WHERE Hash = ?;");
|
||||
|
||||
pSt.bind(1, hash.GetHex());
|
||||
|
||||
if (pSt.isRow(pSt.step()))
|
||||
ret = boost::make_shared<NodeObject>(getType(pSt.peekString(0)), pSt.getUInt32(1), pSt.getBlob(2), hash);
|
||||
|
||||
pSt.reset();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void visitAll(FUNCTION_TYPE<void (NodeObject::pointer)> func)
|
||||
{
|
||||
uint256 hash;
|
||||
|
||||
static SqliteStatement pSt(mDb->getDB()->getSqliteDB(),
|
||||
static SqliteStatement pSt(m_db->getDB()->getSqliteDB(),
|
||||
"SELECT ObjType,LedgerIndex,Object,Hash FROM CommittedObjects;");
|
||||
|
||||
while (pSt.isRow(pSt.step()))
|
||||
while (pSt.isRow (pSt.step()))
|
||||
{
|
||||
hash.SetHexExact(pSt.getString(3));
|
||||
func(boost::make_shared<NodeObject>(getType(pSt.peekString(0)), pSt.getUInt32(1), pSt.getBlob(2), hash));
|
||||
|
||||
// VFALCO NOTE This is unfortunately needed,
|
||||
// the DatabaseCon creates the blob?
|
||||
Blob data (pSt.getBlob (2));
|
||||
NodeObject::Ptr const object (NodeObject::createObject (
|
||||
getTypeFromString (pSt.peekString (0)),
|
||||
pSt.getUInt32 (1),
|
||||
data,
|
||||
hash));
|
||||
|
||||
callback.visitObject (object);
|
||||
}
|
||||
|
||||
pSt.reset();
|
||||
pSt.reset ();
|
||||
}
|
||||
|
||||
void bind(SqliteStatement& statement, NodeObject::ref object)
|
||||
int getWriteLoad ()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void doBind (SqliteStatement& statement, NodeObject::ref object)
|
||||
{
|
||||
char const* type;
|
||||
switch (object->getType())
|
||||
@@ -104,25 +184,27 @@ public:
|
||||
statement.bindStatic(4, object->getData());
|
||||
}
|
||||
|
||||
NodeObjectType getType(std::string const& type)
|
||||
NodeObjectType getTypeFromString (std::string const& s)
|
||||
{
|
||||
NodeObjectType htype = hotUNKNOWN;
|
||||
if (!type.empty())
|
||||
NodeObjectType type = hotUNKNOWN;
|
||||
|
||||
if (!s.empty ())
|
||||
{
|
||||
switch (type[0])
|
||||
switch (s [0])
|
||||
{
|
||||
case 'L': htype = hotLEDGER; break;
|
||||
case 'T': htype = hotTRANSACTION; break;
|
||||
case 'A': htype = hotACCOUNT_NODE; break;
|
||||
case 'N': htype = hotTRANSACTION_NODE; break;
|
||||
case 'L': type = hotLEDGER; break;
|
||||
case 'T': type = hotTRANSACTION; break;
|
||||
case 'A': type = hotACCOUNT_NODE; break;
|
||||
case 'N': type = hotTRANSACTION_NODE; break;
|
||||
}
|
||||
}
|
||||
return htype;
|
||||
return type;
|
||||
}
|
||||
|
||||
private:
|
||||
std::string mName;
|
||||
DatabaseCon* mDb;
|
||||
size_t const m_keyBytes;
|
||||
std::string const m_name;
|
||||
ScopedPointer <DatabaseCon> m_db;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -147,7 +229,10 @@ String SqliteBackendFactory::getName () const
|
||||
return "Sqlite";
|
||||
}
|
||||
|
||||
NodeStore::Backend* SqliteBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* SqliteBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new Backend (keyValues ["path"].toStdString ());
|
||||
return new Backend (keyBytes, keyValues ["path"].toStdString ());
|
||||
}
|
||||
|
||||
@@ -21,7 +21,10 @@ public:
|
||||
static SqliteBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -65,6 +65,8 @@
|
||||
|
||||
#include "../ripple_core/ripple_core.h"
|
||||
|
||||
#include "beast/modules/beast_db/beast_db.h"
|
||||
|
||||
// VFALCO TODO fix these warnings!
|
||||
#ifdef _MSC_VER
|
||||
//#pragma warning (push) // Causes spurious C4503 "decorated name exceeds maximum length"
|
||||
@@ -102,8 +104,9 @@ namespace ripple
|
||||
|
||||
#include "node/ripple_NodeObject.h"
|
||||
#include "node/ripple_NodeStore.h"
|
||||
#include "node/ripple_LevelDBBackendFactory.h"
|
||||
#include "node/ripple_HyperLevelDBBackendFactory.h"
|
||||
#include "node/ripple_KeyvaDBBackendFactory.h"
|
||||
#include "node/ripple_LevelDBBackendFactory.h"
|
||||
#include "node/ripple_MdbBackendFactory.h"
|
||||
#include "node/ripple_NullBackendFactory.h"
|
||||
#include "node/ripple_SqliteBackendFactory.h"
|
||||
@@ -154,10 +157,10 @@ namespace ripple
|
||||
#include "src/cpp/ripple/TransactionMaster.h"
|
||||
#include "src/cpp/ripple/ripple_LocalCredentials.h"
|
||||
#include "src/cpp/ripple/WSDoor.h"
|
||||
#include "src/cpp/ripple/ripple_Application.h"
|
||||
#include "src/cpp/ripple/RPCHandler.h"
|
||||
#include "src/cpp/ripple/TransactionQueue.h"
|
||||
#include "ledger/OrderBookDB.h"
|
||||
#include "src/cpp/ripple/ripple_Application.h"
|
||||
#include "src/cpp/ripple/CallRPC.h"
|
||||
#include "src/cpp/ripple/Transactor.h"
|
||||
#include "src/cpp/ripple/ChangeTransactor.h"
|
||||
@@ -244,10 +247,11 @@ static const uint64 tenTo17m1 = tenTo17 - 1;
|
||||
#include "basics/ripple_RPCServerHandler.cpp"
|
||||
#include "node/ripple_NodeObject.cpp"
|
||||
#include "node/ripple_NodeStore.cpp"
|
||||
#include "node/ripple_LevelDBBackendFactory.cpp"
|
||||
#include "node/ripple_HyperLevelDBBackendFactory.cpp"
|
||||
#include "node/ripple_MdbBackendFactory.cpp"
|
||||
#include "node/ripple_KeyvaDBBackendFactory.cpp"
|
||||
#include "node/ripple_LevelDBBackendFactory.cpp"
|
||||
#include "node/ripple_NullBackendFactory.cpp"
|
||||
#include "node/ripple_MdbBackendFactory.cpp"
|
||||
#include "node/ripple_SqliteBackendFactory.cpp"
|
||||
|
||||
#include "ledger/Ledger.cpp"
|
||||
@@ -427,7 +431,6 @@ static DH* handleTmpDh (SSL* ssl, int is_export, int iKeyLength)
|
||||
#include "ledger/LedgerUnitTests.cpp"
|
||||
#include "src/cpp/ripple/ripple_SHAMapUnitTests.cpp"
|
||||
#include "src/cpp/ripple/ripple_SHAMapSyncUnitTests.cpp"
|
||||
#include "src/cpp/ripple/ripple_ProofOfWorkFactoryUnitTests.cpp" // Requires ProofOfWorkFactory.h
|
||||
#include "src/cpp/ripple/ripple_SerializedTransactionUnitTests.cpp"
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -62,9 +62,75 @@ public:
|
||||
void sweep ();
|
||||
void clear ();
|
||||
|
||||
bool touch (const key_type& key);
|
||||
/** Refresh the expiration time on a key.
|
||||
|
||||
@param key The key to refresh.
|
||||
@return `true` if the key was found and the object is cached.
|
||||
*/
|
||||
bool refreshIfPresent (const key_type& key)
|
||||
{
|
||||
bool found = false;
|
||||
|
||||
// If present, make current in cache
|
||||
boost::recursive_mutex::scoped_lock sl (mLock);
|
||||
|
||||
cache_iterator cit = mCache.find (key);
|
||||
|
||||
if (cit != mCache.end ())
|
||||
{
|
||||
cache_entry& entry = cit->second;
|
||||
|
||||
if (! entry.isCached ())
|
||||
{
|
||||
// Convert weak to strong.
|
||||
entry.ptr = entry.lock ();
|
||||
|
||||
if (entry.isCached ())
|
||||
{
|
||||
// We just put the object back in cache
|
||||
++mCacheCount;
|
||||
entry.touch ();
|
||||
found = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Couldn't get strong pointer,
|
||||
// object fell out of the cache so remove the entry.
|
||||
mCache.erase (cit);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// It's cached so update the timer
|
||||
entry.touch ();
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// not present
|
||||
}
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
bool del (const key_type& key, bool valid);
|
||||
|
||||
/** Replace aliased objects with originals.
|
||||
|
||||
Due to concurrency it is possible for two separate objects with
|
||||
the same content and referring to the same unique "thing" to exist.
|
||||
This routine eliminates the duplicate and performs a replacement
|
||||
on the callers shared pointer if needed.
|
||||
|
||||
@param key The key corresponding to the object
|
||||
@param data A shared pointer to the data corresponding to the object.
|
||||
@param replace `true` if `data` is the up to date version of the object.
|
||||
|
||||
@return `true` if the operation was successful.
|
||||
*/
|
||||
bool canonicalize (const key_type& key, boost::shared_ptr<c_Data>& data, bool replace = false);
|
||||
|
||||
bool store (const key_type& key, const c_Data& data);
|
||||
boost::shared_ptr<c_Data> fetch (const key_type& key);
|
||||
bool retrieve (const key_type& key, c_Data& data);
|
||||
@@ -264,40 +330,6 @@ void TaggedCache<c_Key, c_Data, Timer>::sweep ()
|
||||
}
|
||||
}
|
||||
|
||||
template<typename c_Key, typename c_Data, class Timer>
|
||||
bool TaggedCache<c_Key, c_Data, Timer>::touch (const key_type& key)
|
||||
{
|
||||
// If present, make current in cache
|
||||
boost::recursive_mutex::scoped_lock sl (mLock);
|
||||
|
||||
cache_iterator cit = mCache.find (key);
|
||||
|
||||
if (cit == mCache.end ()) // Don't have the object
|
||||
return false;
|
||||
|
||||
cache_entry& entry = cit->second;
|
||||
|
||||
if (entry.isCached ())
|
||||
{
|
||||
entry.touch ();
|
||||
return true;
|
||||
}
|
||||
|
||||
entry.ptr = entry.lock ();
|
||||
|
||||
if (entry.isCached ())
|
||||
{
|
||||
// We just put the object back in cache
|
||||
++mCacheCount;
|
||||
entry.touch ();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Object fell out
|
||||
mCache.erase (cit);
|
||||
return false;
|
||||
}
|
||||
|
||||
template<typename c_Key, typename c_Data, class Timer>
|
||||
bool TaggedCache<c_Key, c_Data, Timer>::del (const key_type& key, bool valid)
|
||||
{
|
||||
@@ -326,6 +358,7 @@ bool TaggedCache<c_Key, c_Data, Timer>::del (const key_type& key, bool valid)
|
||||
return ret;
|
||||
}
|
||||
|
||||
// VFALCO NOTE What does it mean to canonicalize the data?
|
||||
template<typename c_Key, typename c_Data, class Timer>
|
||||
bool TaggedCache<c_Key, c_Data, Timer>::canonicalize (const key_type& key, boost::shared_ptr<c_Data>& data, bool replace)
|
||||
{
|
||||
|
||||
@@ -19,6 +19,10 @@ inline int Testuint256AdHoc (std::vector<std::string> vArg);
|
||||
|
||||
// We have to keep a separate base class without constructors
|
||||
// so the compiler will let us use it in a union
|
||||
//
|
||||
// VFALCO NOTE This class produces undefined behavior when
|
||||
// BITS is not a multiple of 32!!!
|
||||
//
|
||||
template<unsigned int BITS>
|
||||
class base_uint
|
||||
{
|
||||
@@ -30,6 +34,22 @@ protected:
|
||||
unsigned int pn[WIDTH];
|
||||
|
||||
public:
|
||||
base_uint ()
|
||||
{
|
||||
}
|
||||
|
||||
/** Construct from a raw pointer.
|
||||
|
||||
The buffer pointed to by `data` must be at least 32 bytes.
|
||||
*/
|
||||
explicit base_uint (void const* data)
|
||||
{
|
||||
// BITS must be a multiple of 32
|
||||
static_bassert ((BITS % 32) == 0);
|
||||
|
||||
memcpy (&pn [0], data, BITS / 8);
|
||||
}
|
||||
|
||||
bool isZero () const
|
||||
{
|
||||
for (int i = 0; i < WIDTH; i++)
|
||||
@@ -345,14 +365,24 @@ public:
|
||||
return reinterpret_cast<unsigned char*> (pn + WIDTH);
|
||||
}
|
||||
|
||||
const unsigned char* begin () const
|
||||
unsigned char const* cbegin () const noexcept
|
||||
{
|
||||
return reinterpret_cast<const unsigned char*> (pn);
|
||||
return reinterpret_cast <unsigned char const*> (pn);
|
||||
}
|
||||
|
||||
const unsigned char* end () const
|
||||
unsigned char const* cend () const noexcept
|
||||
{
|
||||
return reinterpret_cast<const unsigned char*> (pn + WIDTH);
|
||||
return reinterpret_cast<unsigned char const*> (pn + WIDTH);
|
||||
}
|
||||
|
||||
const unsigned char* begin () const noexcept
|
||||
{
|
||||
return cbegin ();
|
||||
}
|
||||
|
||||
const unsigned char* end () const noexcept
|
||||
{
|
||||
return cend ();
|
||||
}
|
||||
|
||||
unsigned int size () const
|
||||
@@ -474,6 +504,11 @@ public:
|
||||
*this = b;
|
||||
}
|
||||
|
||||
explicit uint256 (void const* data)
|
||||
: base_uint256 (data)
|
||||
{
|
||||
}
|
||||
|
||||
uint256& operator= (uint64 uHost)
|
||||
{
|
||||
zero ();
|
||||
@@ -590,7 +625,7 @@ template<unsigned int BITS> inline std::ostream& operator<< (std::ostream& out,
|
||||
|
||||
inline int Testuint256AdHoc (std::vector<std::string> vArg)
|
||||
{
|
||||
uint256 g (0);
|
||||
uint256 g (uint64 (0));
|
||||
|
||||
printf ("%s\n", g.ToString ().c_str ());
|
||||
--g;
|
||||
|
||||
@@ -106,7 +106,7 @@ int SectionCount (Section& secSource, const std::string& strSection)
|
||||
{
|
||||
Section::mapped_type* pmtEntries = SectionEntries (secSource, strSection);
|
||||
|
||||
return pmtEntries ? -1 : pmtEntries->size ();
|
||||
return pmtEntries ? pmtEntries->size () : 0;
|
||||
}
|
||||
|
||||
bool SectionSingleB (Section& secSource, const std::string& strSection, std::string& strValue)
|
||||
@@ -128,4 +128,37 @@ bool SectionSingleB (Section& secSource, const std::string& strSection, std::str
|
||||
return bSingle;
|
||||
}
|
||||
|
||||
// vim:ts=4
|
||||
StringPairArray parseKeyValueSection (Section& secSource, String const& strSection)
|
||||
{
|
||||
StringPairArray result;
|
||||
|
||||
// yuck.
|
||||
std::string const stdStrSection (strSection.toStdString ());
|
||||
|
||||
int const count = SectionCount (secSource, stdStrSection);
|
||||
|
||||
typedef Section::mapped_type Entries;
|
||||
|
||||
Entries* const entries = SectionEntries (secSource, stdStrSection);
|
||||
|
||||
if (entries != nullptr)
|
||||
{
|
||||
for (Entries::const_iterator iter = entries->begin (); iter != entries->end (); ++iter)
|
||||
{
|
||||
String const line (iter->c_str ());
|
||||
|
||||
int const equalPos = line.indexOfChar ('=');
|
||||
|
||||
if (equalPos != -1)
|
||||
{
|
||||
String const key = line.substring (0, equalPos);
|
||||
String const value = line.substring (equalPos + 1, line.length ());
|
||||
|
||||
result.set (key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@@ -20,4 +20,11 @@ bool SectionSingleB (Section& secSource, const std::string& strSection, std::str
|
||||
int SectionCount (Section& secSource, const std::string& strSection);
|
||||
Section::mapped_type* SectionEntries (Section& secSource, const std::string& strSection);
|
||||
|
||||
/** Parse a section of lines as a key/value array.
|
||||
|
||||
Each line is in the form <key>=<value>.
|
||||
Spaces are considered part of the key and value.
|
||||
*/
|
||||
StringPairArray parseKeyValueSection (Section& secSource, String const& strSection);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -37,7 +37,7 @@ public:
|
||||
};
|
||||
|
||||
// A class that unlocks on construction and locks on destruction
|
||||
|
||||
/*
|
||||
class ScopedUnlock
|
||||
{
|
||||
protected:
|
||||
@@ -80,5 +80,6 @@ private:
|
||||
ScopedUnlock (const ScopedUnlock&); // no implementation
|
||||
ScopedUnlock& operator= (const ScopedUnlock&); // no implementation
|
||||
};
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user