mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-27 22:45:52 +00:00
Plug backends into NodeStore
This commit is contained in:
@@ -37,13 +37,13 @@
|
|||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_NodeStoreLevelDB.cpp">
|
<ClCompile Include="..\..\modules\ripple_app\node\ripple_LevelDBBackendFactory.cpp">
|
||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_NodeStoreSqlite.cpp">
|
<ClCompile Include="..\..\modules\ripple_app\node\ripple_SqliteBackendFactory.cpp">
|
||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||||
@@ -1344,8 +1344,8 @@
|
|||||||
<ClInclude Include="..\..\modules\ripple_app\basics\ripple_Version.h" />
|
<ClInclude Include="..\..\modules\ripple_app\basics\ripple_Version.h" />
|
||||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NodeObject.h" />
|
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NodeObject.h" />
|
||||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NodeStore.h" />
|
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NodeStore.h" />
|
||||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NodeStoreLevelDB.h" />
|
<ClInclude Include="..\..\modules\ripple_app\node\ripple_LevelDBBackendFactory.h" />
|
||||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NodeStoreSqlite.h" />
|
<ClInclude Include="..\..\modules\ripple_app\node\ripple_SqliteBackendFactory.h" />
|
||||||
<ClInclude Include="..\..\modules\ripple_app\ripple_app.h" />
|
<ClInclude Include="..\..\modules\ripple_app\ripple_app.h" />
|
||||||
<ClInclude Include="..\..\modules\ripple_basics\containers\ripple_KeyCache.h" />
|
<ClInclude Include="..\..\modules\ripple_basics\containers\ripple_KeyCache.h" />
|
||||||
<ClInclude Include="..\..\modules\ripple_basics\containers\ripple_RangeSet.h" />
|
<ClInclude Include="..\..\modules\ripple_basics\containers\ripple_RangeSet.h" />
|
||||||
|
|||||||
@@ -855,10 +855,10 @@
|
|||||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_NodeStore.cpp">
|
<ClCompile Include="..\..\modules\ripple_app\node\ripple_NodeStore.cpp">
|
||||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_NodeStoreLevelDB.cpp">
|
<ClCompile Include="..\..\modules\ripple_app\node\ripple_SqliteBackendFactory.cpp">
|
||||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_NodeStoreSqlite.cpp">
|
<ClCompile Include="..\..\modules\ripple_app\node\ripple_LevelDBBackendFactory.cpp">
|
||||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
@@ -1602,10 +1602,10 @@
|
|||||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NodeStore.h">
|
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NodeStore.h">
|
||||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||||
</ClInclude>
|
</ClInclude>
|
||||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NodeStoreLevelDB.h">
|
<ClInclude Include="..\..\modules\ripple_app\node\ripple_SqliteBackendFactory.h">
|
||||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||||
</ClInclude>
|
</ClInclude>
|
||||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NodeStoreSqlite.h">
|
<ClInclude Include="..\..\modules\ripple_app\node\ripple_LevelDBBackendFactory.h">
|
||||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||||
</ClInclude>
|
</ClInclude>
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
|
|||||||
@@ -133,9 +133,9 @@ String LevelDBBackendFactory::getName () const
|
|||||||
return "LevelDB";
|
return "LevelDB";
|
||||||
}
|
}
|
||||||
|
|
||||||
NodeStore::Backend* LevelDBBackendFactory::createInstance (HashMap <String, String> const& keyValueParameters)
|
NodeStore::Backend* LevelDBBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||||
{
|
{
|
||||||
return nullptr;
|
return new LevelDBBackendFactory::Backend (keyValues ["path"].toStdString ());
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
@@ -4,8 +4,8 @@
|
|||||||
*/
|
*/
|
||||||
//==============================================================================
|
//==============================================================================
|
||||||
|
|
||||||
#ifndef RIPPLE_NODESTORELEVELDB_H_INCLUDED
|
#ifndef RIPPLE_LEVELDBBACKENDFACTORY_H_INCLUDED
|
||||||
#define RIPPLE_NODESTORELEVELDB_H_INCLUDED
|
#define RIPPLE_LEVELDBBACKENDFACTORY_H_INCLUDED
|
||||||
|
|
||||||
/** Factory to produce LevelDB backends for the NodeStore.
|
/** Factory to produce LevelDB backends for the NodeStore.
|
||||||
*/
|
*/
|
||||||
@@ -21,7 +21,7 @@ public:
|
|||||||
static LevelDBBackendFactory& getInstance ();
|
static LevelDBBackendFactory& getInstance ();
|
||||||
|
|
||||||
String getName () const;
|
String getName () const;
|
||||||
NodeStore::Backend* createInstance (HashMap <String, String> const& keyValueParameters);
|
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@@ -4,16 +4,62 @@
|
|||||||
*/
|
*/
|
||||||
//==============================================================================
|
//==============================================================================
|
||||||
|
|
||||||
NodeStore::NodeStore (int cacheSize, int cacheAge) :
|
Array <NodeStore::BackendFactory*> NodeStore::s_factories;
|
||||||
mCache ("NodeStore", cacheSize, cacheAge), mNegativeCache ("HashedObjectNegativeCache", 0, 120),
|
|
||||||
mWriteGeneration (0), mWriteLoad (0), mWritePending (false), mLevelDB (false), mEphemeralDB (false)
|
NodeStore::NodeStore (String parameters, int cacheSize, int cacheAge)
|
||||||
|
: mCache ("NodeStore", cacheSize, cacheAge)
|
||||||
|
, mNegativeCache ("HashedObjectNegativeCache", 0, 120)
|
||||||
|
, mWriteGeneration (0)
|
||||||
|
, mWriteLoad (0)
|
||||||
|
, mWritePending (false)
|
||||||
|
, mLevelDB (false)
|
||||||
|
, mEphemeralDB (false)
|
||||||
{
|
{
|
||||||
|
StringPairArray keyValues = parseKeyValueParameters (parameters, '|');
|
||||||
|
|
||||||
|
String const& type = keyValues ["type"];
|
||||||
|
|
||||||
|
if (type.isNotEmpty ())
|
||||||
|
{
|
||||||
|
BackendFactory* factory = nullptr;
|
||||||
|
|
||||||
|
for (int i = 0; i < s_factories.size (); ++i)
|
||||||
|
{
|
||||||
|
if (s_factories [i]->getName () == type)
|
||||||
|
{
|
||||||
|
factory = s_factories [i];
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (factory != nullptr)
|
||||||
|
{
|
||||||
|
m_backend = factory->createInstance (keyValues);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
throw std::runtime_error ("unkown backend type");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
throw std::runtime_error ("missing backend type");
|
||||||
|
}
|
||||||
|
|
||||||
mWriteSet.reserve (128);
|
mWriteSet.reserve (128);
|
||||||
|
|
||||||
|
// VFALCO TODO Eliminate usage of theConfig
|
||||||
|
// This can be done by passing required parameters through
|
||||||
|
// the backendParameters string.
|
||||||
|
//
|
||||||
if (theConfig.NODE_DB == "leveldb" || theConfig.NODE_DB == "LevelDB")
|
if (theConfig.NODE_DB == "leveldb" || theConfig.NODE_DB == "LevelDB")
|
||||||
|
{
|
||||||
mLevelDB = true;
|
mLevelDB = true;
|
||||||
|
}
|
||||||
else if (theConfig.NODE_DB == "SQLite" || theConfig.NODE_DB == "sqlite")
|
else if (theConfig.NODE_DB == "SQLite" || theConfig.NODE_DB == "sqlite")
|
||||||
|
{
|
||||||
mLevelDB = false;
|
mLevelDB = false;
|
||||||
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
WriteLog (lsFATAL, NodeObject) << "Incorrect database selection";
|
WriteLog (lsFATAL, NodeObject) << "Incorrect database selection";
|
||||||
@@ -21,7 +67,43 @@ NodeStore::NodeStore (int cacheSize, int cacheAge) :
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!theConfig.LDB_EPHEMERAL.empty ())
|
if (!theConfig.LDB_EPHEMERAL.empty ())
|
||||||
|
{
|
||||||
|
// VFALCO NOTE This is cryptic
|
||||||
mEphemeralDB = true;
|
mEphemeralDB = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void NodeStore::addBackendFactory (BackendFactory& factory)
|
||||||
|
{
|
||||||
|
s_factories.add (&factory);
|
||||||
|
}
|
||||||
|
|
||||||
|
// DEPRECATED
|
||||||
|
bool NodeStore::isLevelDB ()
|
||||||
|
{
|
||||||
|
return mLevelDB;
|
||||||
|
}
|
||||||
|
|
||||||
|
float NodeStore::getCacheHitRate ()
|
||||||
|
{
|
||||||
|
return mCache.getHitRate ();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool NodeStore::store (NodeObjectType type, uint32 index, Blob const& data,
|
||||||
|
uint256 const& hash)
|
||||||
|
{
|
||||||
|
if (mLevelDB)
|
||||||
|
return storeLevelDB (type, index, data, hash);
|
||||||
|
|
||||||
|
return storeSQLite (type, index, data, hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
NodeObject::pointer NodeStore::retrieve (uint256 const& hash)
|
||||||
|
{
|
||||||
|
if (mLevelDB)
|
||||||
|
return retrieveLevelDB (hash);
|
||||||
|
|
||||||
|
return retrieveSQLite (hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
void NodeStore::tune (int size, int age)
|
void NodeStore::tune (int size, int age)
|
||||||
@@ -30,6 +112,12 @@ void NodeStore::tune (int size, int age)
|
|||||||
mCache.setTargetAge (age);
|
mCache.setTargetAge (age);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void NodeStore::sweep ()
|
||||||
|
{
|
||||||
|
mCache.sweep ();
|
||||||
|
mNegativeCache.sweep ();
|
||||||
|
}
|
||||||
|
|
||||||
void NodeStore::waitWrite ()
|
void NodeStore::waitWrite ()
|
||||||
{
|
{
|
||||||
boost::mutex::scoped_lock sl (mWriteMutex);
|
boost::mutex::scoped_lock sl (mWriteMutex);
|
||||||
@@ -617,5 +705,3 @@ int NodeStore::import (const std::string& file)
|
|||||||
WriteLog (lsWARNING, NodeObject) << "Imported " << count << " nodes";
|
WriteLog (lsWARNING, NodeObject) << "Imported " << count << " nodes";
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
// vim:ts=4
|
|
||||||
|
|||||||
@@ -23,15 +23,14 @@ public:
|
|||||||
//
|
//
|
||||||
typedef boost::shared_ptr <Backend> pointer;
|
typedef boost::shared_ptr <Backend> pointer;
|
||||||
|
|
||||||
Backend() { ; }
|
virtual ~Backend () { }
|
||||||
virtual ~Backend() { ; }
|
|
||||||
|
|
||||||
virtual std::string getDataBaseName() = 0;
|
virtual std::string getDataBaseName() = 0;
|
||||||
|
|
||||||
// Store/retrieve a single object
|
// Store/retrieve a single object
|
||||||
// These functions must be thread safe
|
// These functions must be thread safe
|
||||||
virtual bool store(NodeObject::ref) = 0;
|
virtual bool store (NodeObject::ref) = 0;
|
||||||
virtual NodeObject::pointer retrieve(uint256 const &hash) = 0;
|
virtual NodeObject::pointer retrieve (uint256 const &hash) = 0;
|
||||||
|
|
||||||
// Store a group of objects
|
// Store a group of objects
|
||||||
// This function will only be called from a single thread
|
// This function will only be called from a single thread
|
||||||
@@ -56,38 +55,36 @@ public:
|
|||||||
|
|
||||||
/** Create an instance of this factory's backend.
|
/** Create an instance of this factory's backend.
|
||||||
*/
|
*/
|
||||||
virtual Backend* createInstance (HashMap <String, String> const& keyValueParameters) = 0;
|
virtual Backend* createInstance (StringPairArray const& keyValues) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
NodeStore (int cacheSize, int cacheAge);
|
/** Construct a node store.
|
||||||
|
|
||||||
bool isLevelDB ()
|
parameters has the format:
|
||||||
{
|
|
||||||
return mLevelDB;
|
|
||||||
}
|
|
||||||
|
|
||||||
float getCacheHitRate ()
|
<key>=<value>['|'<key>=<value>]
|
||||||
{
|
|
||||||
return mCache.getHitRate ();
|
The key "type" must exist, it defines the backend. For example
|
||||||
}
|
"type=LevelDB|path=/mnt/ephemeral"
|
||||||
|
*/
|
||||||
|
NodeStore (String parameters, int cacheSize, int cacheAge);
|
||||||
|
|
||||||
|
/** Add the specified backend factory to the list of available factories.
|
||||||
|
|
||||||
|
The names of available factories are compared against the "type"
|
||||||
|
value in the parameter list on construction.
|
||||||
|
*/
|
||||||
|
static void addBackendFactory (BackendFactory& factory);
|
||||||
|
|
||||||
|
bool isLevelDB ();
|
||||||
|
|
||||||
|
float getCacheHitRate ();
|
||||||
|
|
||||||
bool store (NodeObjectType type, uint32 index, Blob const& data,
|
bool store (NodeObjectType type, uint32 index, Blob const& data,
|
||||||
uint256 const& hash)
|
uint256 const& hash);
|
||||||
{
|
|
||||||
if (mLevelDB)
|
|
||||||
return storeLevelDB (type, index, data, hash);
|
|
||||||
|
|
||||||
return storeSQLite (type, index, data, hash);
|
NodeObject::pointer retrieve (uint256 const& hash);
|
||||||
}
|
|
||||||
|
|
||||||
NodeObject::pointer retrieve (uint256 const& hash)
|
|
||||||
{
|
|
||||||
if (mLevelDB)
|
|
||||||
return retrieveLevelDB (hash);
|
|
||||||
|
|
||||||
return retrieveSQLite (hash);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool storeSQLite (NodeObjectType type, uint32 index, Blob const& data,
|
bool storeSQLite (NodeObjectType type, uint32 index, Blob const& data,
|
||||||
uint256 const& hash);
|
uint256 const& hash);
|
||||||
@@ -102,11 +99,7 @@ public:
|
|||||||
|
|
||||||
void waitWrite ();
|
void waitWrite ();
|
||||||
void tune (int size, int age);
|
void tune (int size, int age);
|
||||||
void sweep ()
|
void sweep ();
|
||||||
{
|
|
||||||
mCache.sweep ();
|
|
||||||
mNegativeCache.sweep ();
|
|
||||||
}
|
|
||||||
int getWriteLoad ();
|
int getWriteLoad ();
|
||||||
|
|
||||||
int import (const std::string& fileName);
|
int import (const std::string& fileName);
|
||||||
@@ -117,6 +110,11 @@ private:
|
|||||||
static void LLWrite (const std::vector< boost::shared_ptr<NodeObject> >& set, leveldb::DB* db);
|
static void LLWrite (const std::vector< boost::shared_ptr<NodeObject> >& set, leveldb::DB* db);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
static Array <BackendFactory*> s_factories;
|
||||||
|
|
||||||
|
private:
|
||||||
|
ScopedPointer <Backend> m_backend;
|
||||||
|
|
||||||
TaggedCache<uint256, NodeObject, UptimeTimerAdapter> mCache;
|
TaggedCache<uint256, NodeObject, UptimeTimerAdapter> mCache;
|
||||||
KeyCache <uint256, UptimeTimerAdapter> mNegativeCache;
|
KeyCache <uint256, UptimeTimerAdapter> mNegativeCache;
|
||||||
|
|
||||||
|
|||||||
@@ -159,7 +159,7 @@ String SqliteBackendFactory::getName () const
|
|||||||
return "Sqlite";
|
return "Sqlite";
|
||||||
}
|
}
|
||||||
|
|
||||||
NodeStore::Backend* SqliteBackendFactory::createInstance (HashMap <String, String> const& keyValueParameters)
|
NodeStore::Backend* SqliteBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||||
{
|
{
|
||||||
return new Backend (keyValueParameters ["path"].toStdString ());
|
return new Backend (keyValues ["path"].toStdString ());
|
||||||
}
|
}
|
||||||
@@ -4,8 +4,8 @@
|
|||||||
*/
|
*/
|
||||||
//==============================================================================
|
//==============================================================================
|
||||||
|
|
||||||
#ifndef RIPPLE_NODESTORESQLITE_H_INCLUDED
|
#ifndef RIPPLE_SQLITEBACKENDFACTORY_H_INCLUDED
|
||||||
#define RIPPLE_NODESTORESQLITE_H_INCLUDED
|
#define RIPPLE_SQLITEBACKENDFACTORY_H_INCLUDED
|
||||||
|
|
||||||
/** Factory to produce SQLite backends for the NodeStore.
|
/** Factory to produce SQLite backends for the NodeStore.
|
||||||
*/
|
*/
|
||||||
@@ -21,7 +21,7 @@ public:
|
|||||||
static SqliteBackendFactory& getInstance ();
|
static SqliteBackendFactory& getInstance ();
|
||||||
|
|
||||||
String getName () const;
|
String getName () const;
|
||||||
NodeStore::Backend* createInstance (HashMap <String, String> const& keyValueParameters);
|
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@@ -96,8 +96,8 @@ namespace ripple
|
|||||||
|
|
||||||
#include "node/ripple_NodeObject.h"
|
#include "node/ripple_NodeObject.h"
|
||||||
#include "node/ripple_NodeStore.h"
|
#include "node/ripple_NodeStore.h"
|
||||||
#include "node/ripple_NodeStoreLevelDB.h"
|
#include "node/ripple_SqliteBackendFactory.h"
|
||||||
#include "node/ripple_NodeStoreSqlite.h"
|
#include "node/ripple_LevelDBBackendFactory.h"
|
||||||
|
|
||||||
#include "src/cpp/ripple/ripple_SHAMapItem.h"
|
#include "src/cpp/ripple/ripple_SHAMapItem.h"
|
||||||
#include "src/cpp/ripple/ripple_SHAMapNode.h"
|
#include "src/cpp/ripple/ripple_SHAMapNode.h"
|
||||||
@@ -236,8 +236,8 @@ static const uint64 tenTo17m1 = tenTo17 - 1;
|
|||||||
#include "basics/ripple_RPCServerHandler.cpp"
|
#include "basics/ripple_RPCServerHandler.cpp"
|
||||||
#include "node/ripple_NodeObject.cpp"
|
#include "node/ripple_NodeObject.cpp"
|
||||||
#include "node/ripple_NodeStore.cpp"
|
#include "node/ripple_NodeStore.cpp"
|
||||||
#include "node/ripple_NodeStoreLevelDB.cpp"
|
#include "node/ripple_LevelDBBackendFactory.cpp"
|
||||||
#include "node/ripple_NodeStoreSqlite.cpp"
|
#include "node/ripple_SqliteBackendFactory.cpp"
|
||||||
|
|
||||||
#include "src/cpp/ripple/Ledger.cpp"
|
#include "src/cpp/ripple/Ledger.cpp"
|
||||||
#include "src/cpp/ripple/ripple_SHAMapDelta.cpp"
|
#include "src/cpp/ripple/ripple_SHAMapDelta.cpp"
|
||||||
|
|||||||
@@ -271,3 +271,42 @@ std::string addressToString (void const* address)
|
|||||||
return strHex (static_cast <char const*> (address) - static_cast <char const*> (0));
|
return strHex (static_cast <char const*> (address) - static_cast <char const*> (0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
StringPairArray parseKeyValueParameters (String parameters, beast_wchar delimiter)
|
||||||
|
{
|
||||||
|
StringPairArray keyValues;
|
||||||
|
|
||||||
|
while (parameters.isNotEmpty ())
|
||||||
|
{
|
||||||
|
String pair;
|
||||||
|
|
||||||
|
{
|
||||||
|
int const delimiterPos = parameters.indexOfChar (delimiter);
|
||||||
|
|
||||||
|
if (delimiterPos != -1)
|
||||||
|
{
|
||||||
|
pair = parameters.substring (0, delimiterPos);
|
||||||
|
|
||||||
|
parameters = parameters.substring (delimiterPos + 1);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
pair = parameters;
|
||||||
|
|
||||||
|
parameters = String::empty;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int const equalPos = pair.indexOfChar ('=');
|
||||||
|
|
||||||
|
if (equalPos != -1)
|
||||||
|
{
|
||||||
|
String const key = pair.substring (0, equalPos);
|
||||||
|
String const value = pair.substring (equalPos + 1, pair.length ());
|
||||||
|
|
||||||
|
keyValues.set (key, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return keyValues;
|
||||||
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -214,4 +214,8 @@ bool parseUrl (const std::string& strUrl, std::string& strScheme, std::string& s
|
|||||||
*/
|
*/
|
||||||
extern std::string addressToString (void const* address);
|
extern std::string addressToString (void const* address);
|
||||||
|
|
||||||
|
/** Parse a pipe delimited key/value parameter string.
|
||||||
|
*/
|
||||||
|
StringPairArray parseKeyValueParameters (String parameters, beast_wchar delimiter);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -533,7 +533,7 @@ void Ledger::saveAcceptedLedger (Job&, bool fromConsensus)
|
|||||||
Serializer s (128);
|
Serializer s (128);
|
||||||
s.add32 (HashPrefix::ledgerMaster);
|
s.add32 (HashPrefix::ledgerMaster);
|
||||||
addRaw (s);
|
addRaw (s);
|
||||||
getApp().getHashedObjectStore ().store (hotLEDGER, mLedgerSeq, s.peekData (), mHash);
|
getApp().getNodeStore ().store (hotLEDGER, mLedgerSeq, s.peekData (), mHash);
|
||||||
|
|
||||||
AcceptedLedger::pointer aLedger = AcceptedLedger::makeAcceptedLedger (shared_from_this ());
|
AcceptedLedger::pointer aLedger = AcceptedLedger::makeAcceptedLedger (shared_from_this ());
|
||||||
|
|
||||||
|
|||||||
@@ -2401,7 +2401,7 @@ Json::Value RPCHandler::doGetCounts (Json::Value params, LoadType* loadType, Sco
|
|||||||
if (dbKB > 0)
|
if (dbKB > 0)
|
||||||
ret["dbKBLedger"] = dbKB;
|
ret["dbKBLedger"] = dbKB;
|
||||||
|
|
||||||
if (!getApp().getHashedObjectStore ().isLevelDB ())
|
if (!getApp().getNodeStore ().isLevelDB ())
|
||||||
{
|
{
|
||||||
dbKB = getApp().getHashNodeDB ()->getDB ()->getKBUsedDB ();
|
dbKB = getApp().getHashNodeDB ()->getDB ()->getKBUsedDB ();
|
||||||
|
|
||||||
@@ -2414,10 +2414,10 @@ Json::Value RPCHandler::doGetCounts (Json::Value params, LoadType* loadType, Sco
|
|||||||
if (dbKB > 0)
|
if (dbKB > 0)
|
||||||
ret["dbKBTransaction"] = dbKB;
|
ret["dbKBTransaction"] = dbKB;
|
||||||
|
|
||||||
ret["write_load"] = getApp().getHashedObjectStore ().getWriteLoad ();
|
ret["write_load"] = getApp().getNodeStore ().getWriteLoad ();
|
||||||
|
|
||||||
ret["SLE_hit_rate"] = getApp().getSLECache ().getHitRate ();
|
ret["SLE_hit_rate"] = getApp().getSLECache ().getHitRate ();
|
||||||
ret["node_hit_rate"] = getApp().getHashedObjectStore ().getCacheHitRate ();
|
ret["node_hit_rate"] = getApp().getNodeStore ().getCacheHitRate ();
|
||||||
ret["ledger_hit_rate"] = getApp().getLedgerMaster ().getCacheHitRate ();
|
ret["ledger_hit_rate"] = getApp().getLedgerMaster ().getCacheHitRate ();
|
||||||
ret["AL_hit_rate"] = AcceptedLedger::getCacheHitRate ();
|
ret["AL_hit_rate"] = AcceptedLedger::getCacheHitRate ();
|
||||||
|
|
||||||
|
|||||||
@@ -25,7 +25,62 @@ public:
|
|||||||
|
|
||||||
class Holder;
|
class Holder;
|
||||||
|
|
||||||
Application ();
|
Application ()
|
||||||
|
//
|
||||||
|
// VFALCO NOTE Change this to control whether or not the Application
|
||||||
|
// object is destroyed on exit
|
||||||
|
//
|
||||||
|
#if 1
|
||||||
|
// Application object will be deleted on exit. If the code doesn't exit
|
||||||
|
// cleanly this could cause hangs or crashes on exit.
|
||||||
|
//
|
||||||
|
: SharedSingleton <Application> (SingletonLifetime::persistAfterCreation)
|
||||||
|
#else
|
||||||
|
// This will make it so that the Application object is not deleted on exit.
|
||||||
|
//
|
||||||
|
: SharedSingleton <Application> (SingletonLifetime::neverDestroyed)
|
||||||
|
#endif
|
||||||
|
, mIOService ((theConfig.NODE_SIZE >= 2) ? 2 : 1)
|
||||||
|
, mIOWork (mIOService)
|
||||||
|
, mNetOps (&mLedgerMaster)
|
||||||
|
, m_rpcServerHandler (mNetOps)
|
||||||
|
, mTempNodeCache ("NodeCache", 16384, 90)
|
||||||
|
, m_nodeStore ("type=LevelDB|path=/mnt/stuff|compact=1", 16384, 300)
|
||||||
|
, mSLECache ("LedgerEntryCache", 4096, 120)
|
||||||
|
, mSNTPClient (mAuxService)
|
||||||
|
, mJobQueue (mIOService)
|
||||||
|
// VFALCO New stuff
|
||||||
|
, mFeatures (IFeatures::New (2 * 7 * 24 * 60 * 60, 200)) // two weeks, 200/256
|
||||||
|
, mFeeVote (IFeeVote::New (10, 50 * SYSTEM_CURRENCY_PARTS, 12.5 * SYSTEM_CURRENCY_PARTS))
|
||||||
|
, mFeeTrack (ILoadFeeTrack::New ())
|
||||||
|
, mHashRouter (IHashRouter::New (IHashRouter::getDefaultHoldTime ()))
|
||||||
|
, mValidations (IValidations::New ())
|
||||||
|
, mUNL (UniqueNodeList::New ())
|
||||||
|
, mProofOfWorkFactory (IProofOfWorkFactory::New ())
|
||||||
|
, mPeers (IPeers::New (mIOService))
|
||||||
|
, m_loadManager (ILoadManager::New ())
|
||||||
|
// VFALCO End new stuff
|
||||||
|
// VFALCO TODO replace all NULL with nullptr
|
||||||
|
, mRpcDB (NULL)
|
||||||
|
, mTxnDB (NULL)
|
||||||
|
, mLedgerDB (NULL)
|
||||||
|
, mWalletDB (NULL) // VFALCO NOTE are all these 'NULL' ctor params necessary?
|
||||||
|
, mNetNodeDB (NULL)
|
||||||
|
, mPathFindDB (NULL)
|
||||||
|
, mHashNodeDB (NULL)
|
||||||
|
, mHashNodeLDB (NULL)
|
||||||
|
, mEphemeralLDB (NULL)
|
||||||
|
, mPeerDoor (NULL)
|
||||||
|
, mRPCDoor (NULL)
|
||||||
|
, mWSPublicDoor (NULL)
|
||||||
|
, mWSPrivateDoor (NULL)
|
||||||
|
, mSweepTimer (mAuxService)
|
||||||
|
, mShutdown (false)
|
||||||
|
{
|
||||||
|
// VFALCO TODO remove these once the call is thread safe.
|
||||||
|
HashMaps::getInstance ().initializeNonce <size_t> ();
|
||||||
|
}
|
||||||
|
|
||||||
~Application ();
|
~Application ();
|
||||||
|
|
||||||
LocalCredentials& getLocalCredentials ()
|
LocalCredentials& getLocalCredentials ()
|
||||||
@@ -63,9 +118,9 @@ public:
|
|||||||
return mTempNodeCache;
|
return mTempNodeCache;
|
||||||
}
|
}
|
||||||
|
|
||||||
NodeStore& getHashedObjectStore ()
|
NodeStore& getNodeStore ()
|
||||||
{
|
{
|
||||||
return mHashedObjectStore;
|
return m_nodeStore;
|
||||||
}
|
}
|
||||||
|
|
||||||
JobQueue& getJobQueue ()
|
JobQueue& getJobQueue ()
|
||||||
@@ -223,7 +278,7 @@ private:
|
|||||||
NetworkOPs mNetOps;
|
NetworkOPs mNetOps;
|
||||||
RPCServerHandler m_rpcServerHandler;
|
RPCServerHandler m_rpcServerHandler;
|
||||||
NodeCache mTempNodeCache;
|
NodeCache mTempNodeCache;
|
||||||
NodeStore mHashedObjectStore;
|
NodeStore m_nodeStore;
|
||||||
SLECache mSLECache;
|
SLECache mSLECache;
|
||||||
SNTPClient mSNTPClient;
|
SNTPClient mSNTPClient;
|
||||||
JobQueue mJobQueue;
|
JobQueue mJobQueue;
|
||||||
@@ -264,62 +319,6 @@ private:
|
|||||||
bool volatile mShutdown;
|
bool volatile mShutdown;
|
||||||
};
|
};
|
||||||
|
|
||||||
Application::Application ()
|
|
||||||
//
|
|
||||||
// VFALCO NOTE Change this to control whether or not the Application
|
|
||||||
// object is destroyed on exit
|
|
||||||
//
|
|
||||||
#if 1
|
|
||||||
// Application object will be deleted on exit. If the code doesn't exit
|
|
||||||
// cleanly this could cause hangs or crashes on exit.
|
|
||||||
//
|
|
||||||
: SharedSingleton <Application> (SingletonLifetime::persistAfterCreation)
|
|
||||||
#else
|
|
||||||
// This will make it so that the Application object is not deleted on exit.
|
|
||||||
//
|
|
||||||
: SharedSingleton <Application> (SingletonLifetime::neverDestroyed)
|
|
||||||
#endif
|
|
||||||
, mIOService ((theConfig.NODE_SIZE >= 2) ? 2 : 1)
|
|
||||||
, mIOWork (mIOService)
|
|
||||||
, mNetOps (&mLedgerMaster)
|
|
||||||
, m_rpcServerHandler (mNetOps)
|
|
||||||
, mTempNodeCache ("NodeCache", 16384, 90)
|
|
||||||
, mHashedObjectStore (16384, 300)
|
|
||||||
, mSLECache ("LedgerEntryCache", 4096, 120)
|
|
||||||
, mSNTPClient (mAuxService)
|
|
||||||
, mJobQueue (mIOService)
|
|
||||||
// VFALCO New stuff
|
|
||||||
, mFeatures (IFeatures::New (2 * 7 * 24 * 60 * 60, 200)) // two weeks, 200/256
|
|
||||||
, mFeeVote (IFeeVote::New (10, 50 * SYSTEM_CURRENCY_PARTS, 12.5 * SYSTEM_CURRENCY_PARTS))
|
|
||||||
, mFeeTrack (ILoadFeeTrack::New ())
|
|
||||||
, mHashRouter (IHashRouter::New (IHashRouter::getDefaultHoldTime ()))
|
|
||||||
, mValidations (IValidations::New ())
|
|
||||||
, mUNL (UniqueNodeList::New ())
|
|
||||||
, mProofOfWorkFactory (IProofOfWorkFactory::New ())
|
|
||||||
, mPeers (IPeers::New (mIOService))
|
|
||||||
, m_loadManager (ILoadManager::New ())
|
|
||||||
// VFALCO End new stuff
|
|
||||||
// VFALCO TODO replace all NULL with nullptr
|
|
||||||
, mRpcDB (NULL)
|
|
||||||
, mTxnDB (NULL)
|
|
||||||
, mLedgerDB (NULL)
|
|
||||||
, mWalletDB (NULL) // VFALCO NOTE are all these 'NULL' ctor params necessary?
|
|
||||||
, mNetNodeDB (NULL)
|
|
||||||
, mPathFindDB (NULL)
|
|
||||||
, mHashNodeDB (NULL)
|
|
||||||
, mHashNodeLDB (NULL)
|
|
||||||
, mEphemeralLDB (NULL)
|
|
||||||
, mPeerDoor (NULL)
|
|
||||||
, mRPCDoor (NULL)
|
|
||||||
, mWSPublicDoor (NULL)
|
|
||||||
, mWSPrivateDoor (NULL)
|
|
||||||
, mSweepTimer (mAuxService)
|
|
||||||
, mShutdown (false)
|
|
||||||
{
|
|
||||||
// VFALCO TODO remove these once the call is thread safe.
|
|
||||||
HashMaps::getInstance ().initializeNonce <size_t> ();
|
|
||||||
}
|
|
||||||
|
|
||||||
Application::~Application ()
|
Application::~Application ()
|
||||||
{
|
{
|
||||||
// VFALCO TODO Wrap these in ScopedPointer
|
// VFALCO TODO Wrap these in ScopedPointer
|
||||||
@@ -341,7 +340,7 @@ void Application::stop ()
|
|||||||
StopSustain ();
|
StopSustain ();
|
||||||
mShutdown = true;
|
mShutdown = true;
|
||||||
mIOService.stop ();
|
mIOService.stop ();
|
||||||
mHashedObjectStore.waitWrite ();
|
m_nodeStore.waitWrite ();
|
||||||
mValidations->flush ();
|
mValidations->flush ();
|
||||||
mAuxService.stop ();
|
mAuxService.stop ();
|
||||||
mJobQueue.shutdown ();
|
mJobQueue.shutdown ();
|
||||||
@@ -452,7 +451,7 @@ void Application::setup ()
|
|||||||
if (theConfig.LDB_IMPORT)
|
if (theConfig.LDB_IMPORT)
|
||||||
options.write_buffer_size = 32 << 20;
|
options.write_buffer_size = 32 << 20;
|
||||||
|
|
||||||
if (mHashedObjectStore.isLevelDB ())
|
if (m_nodeStore.isLevelDB ())
|
||||||
{
|
{
|
||||||
WriteLog (lsINFO, Application) << "LevelDB used for nodes";
|
WriteLog (lsINFO, Application) << "LevelDB used for nodes";
|
||||||
leveldb::Status status = leveldb::DB::Open (options, (theConfig.DATA_DIR / "hashnode").string (), &mHashNodeLDB);
|
leveldb::Status status = leveldb::DB::Open (options, (theConfig.DATA_DIR / "hashnode").string (), &mHashNodeLDB);
|
||||||
@@ -486,7 +485,7 @@ void Application::setup ()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!mHashedObjectStore.isLevelDB ())
|
if (!m_nodeStore.isLevelDB ())
|
||||||
{
|
{
|
||||||
getApp().getHashNodeDB ()->getDB ()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
|
getApp().getHashNodeDB ()->getDB ()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
|
||||||
(theConfig.getSize (siHashNodeDBCache) * 1024)));
|
(theConfig.getSize (siHashNodeDBCache) * 1024)));
|
||||||
@@ -548,7 +547,7 @@ void Application::setup ()
|
|||||||
getUNL ().nodeBootstrap ();
|
getUNL ().nodeBootstrap ();
|
||||||
|
|
||||||
mValidations->tune (theConfig.getSize (siValidationsSize), theConfig.getSize (siValidationsAge));
|
mValidations->tune (theConfig.getSize (siValidationsSize), theConfig.getSize (siValidationsAge));
|
||||||
mHashedObjectStore.tune (theConfig.getSize (siNodeCacheSize), theConfig.getSize (siNodeCacheAge));
|
m_nodeStore.tune (theConfig.getSize (siNodeCacheSize), theConfig.getSize (siNodeCacheAge));
|
||||||
mLedgerMaster.tune (theConfig.getSize (siLedgerSize), theConfig.getSize (siLedgerAge));
|
mLedgerMaster.tune (theConfig.getSize (siLedgerSize), theConfig.getSize (siLedgerAge));
|
||||||
mSLECache.setTargetSize (theConfig.getSize (siSLECacheSize));
|
mSLECache.setTargetSize (theConfig.getSize (siSLECacheSize));
|
||||||
mSLECache.setTargetAge (theConfig.getSize (siSLECacheAge));
|
mSLECache.setTargetAge (theConfig.getSize (siSLECacheAge));
|
||||||
@@ -723,7 +722,7 @@ void Application::sweep ()
|
|||||||
// have listeners register for "onSweep ()" notification.
|
// have listeners register for "onSweep ()" notification.
|
||||||
//
|
//
|
||||||
mMasterTransaction.sweep ();
|
mMasterTransaction.sweep ();
|
||||||
mHashedObjectStore.sweep ();
|
m_nodeStore.sweep ();
|
||||||
mLedgerMaster.sweep ();
|
mLedgerMaster.sweep ();
|
||||||
mTempNodeCache.sweep ();
|
mTempNodeCache.sweep ();
|
||||||
mValidations->sweep ();
|
mValidations->sweep ();
|
||||||
@@ -995,7 +994,7 @@ void Application::updateTables (bool ldbImport)
|
|||||||
exit (1);
|
exit (1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (getApp().getHashedObjectStore ().isLevelDB ())
|
if (getApp().getNodeStore ().isLevelDB ())
|
||||||
{
|
{
|
||||||
boost::filesystem::path hashPath = theConfig.DATA_DIR / "hashnode.db";
|
boost::filesystem::path hashPath = theConfig.DATA_DIR / "hashnode.db";
|
||||||
|
|
||||||
@@ -1004,7 +1003,7 @@ void Application::updateTables (bool ldbImport)
|
|||||||
if (theConfig.LDB_IMPORT)
|
if (theConfig.LDB_IMPORT)
|
||||||
{
|
{
|
||||||
Log (lsWARNING) << "Importing SQLite -> LevelDB";
|
Log (lsWARNING) << "Importing SQLite -> LevelDB";
|
||||||
getApp().getHashedObjectStore ().import (hashPath.string ());
|
getApp().getNodeStore ().import (hashPath.string ());
|
||||||
Log (lsWARNING) << "Remove or remname the hashnode.db file";
|
Log (lsWARNING) << "Remove or remname the hashnode.db file";
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ public:
|
|||||||
virtual UniqueNodeList& getUNL () = 0;
|
virtual UniqueNodeList& getUNL () = 0;
|
||||||
virtual IValidations& getValidations () = 0;
|
virtual IValidations& getValidations () = 0;
|
||||||
|
|
||||||
virtual NodeStore& getHashedObjectStore () = 0;
|
virtual NodeStore& getNodeStore () = 0;
|
||||||
virtual JobQueue& getJobQueue () = 0;
|
virtual JobQueue& getJobQueue () = 0;
|
||||||
virtual InboundLedgers& getInboundLedgers () = 0;
|
virtual InboundLedgers& getInboundLedgers () = 0;
|
||||||
virtual LedgerMaster& getLedgerMaster () = 0;
|
virtual LedgerMaster& getLedgerMaster () = 0;
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ bool InboundLedger::tryLocal ()
|
|||||||
if (!mHaveBase)
|
if (!mHaveBase)
|
||||||
{
|
{
|
||||||
// Nothing we can do without the ledger base
|
// Nothing we can do without the ledger base
|
||||||
NodeObject::pointer node = getApp().getHashedObjectStore ().retrieve (mHash);
|
NodeObject::pointer node = getApp().getNodeStore ().retrieve (mHash);
|
||||||
|
|
||||||
if (!node)
|
if (!node)
|
||||||
{
|
{
|
||||||
@@ -60,7 +60,7 @@ bool InboundLedger::tryLocal ()
|
|||||||
|
|
||||||
WriteLog (lsTRACE, InboundLedger) << "Ledger base found in fetch pack";
|
WriteLog (lsTRACE, InboundLedger) << "Ledger base found in fetch pack";
|
||||||
mLedger = boost::make_shared<Ledger> (data, true);
|
mLedger = boost::make_shared<Ledger> (data, true);
|
||||||
getApp().getHashedObjectStore ().store (hotLEDGER, mLedger->getLedgerSeq (), data, mHash);
|
getApp().getNodeStore ().store (hotLEDGER, mLedger->getLedgerSeq (), data, mHash);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@@ -658,7 +658,7 @@ bool InboundLedger::takeBase (const std::string& data) // data must not have has
|
|||||||
Serializer s (data.size () + 4);
|
Serializer s (data.size () + 4);
|
||||||
s.add32 (HashPrefix::ledgerMaster);
|
s.add32 (HashPrefix::ledgerMaster);
|
||||||
s.addRaw (data);
|
s.addRaw (data);
|
||||||
getApp().getHashedObjectStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.peekData (), mHash);
|
getApp().getNodeStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.peekData (), mHash);
|
||||||
|
|
||||||
progress ();
|
progress ();
|
||||||
|
|
||||||
|
|||||||
@@ -181,9 +181,10 @@ int rippleMain (int argc, char** argv)
|
|||||||
po::positional_options_description p;
|
po::positional_options_description p;
|
||||||
p.add ("parameters", -1);
|
p.add ("parameters", -1);
|
||||||
|
|
||||||
//
|
// These must be added before the Application object is created
|
||||||
// Prepare to run
|
NodeStore::addBackendFactory (SqliteBackendFactory::getInstance ());
|
||||||
//
|
NodeStore::addBackendFactory (LevelDBBackendFactory::getInstance ());
|
||||||
|
|
||||||
|
|
||||||
if (! RandomNumbers::getInstance ().initialize ())
|
if (! RandomNumbers::getInstance ().initialize ())
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1535,7 +1535,7 @@ void PeerImp::recvGetObjectByHash (const boost::shared_ptr<protocol::TMGetObject
|
|||||||
if (obj.has_hash () && (obj.hash ().size () == (256 / 8)))
|
if (obj.has_hash () && (obj.hash ().size () == (256 / 8)))
|
||||||
{
|
{
|
||||||
memcpy (hash.begin (), obj.hash ().data (), 256 / 8);
|
memcpy (hash.begin (), obj.hash ().data (), 256 / 8);
|
||||||
NodeObject::pointer hObj = getApp().getHashedObjectStore ().retrieve (hash);
|
NodeObject::pointer hObj = getApp().getNodeStore ().retrieve (hash);
|
||||||
|
|
||||||
if (hObj)
|
if (hObj)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -824,7 +824,7 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternalNT (const SHAMapNode& id, uint2
|
|||||||
if (!getApp().running ())
|
if (!getApp().running ())
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
NodeObject::pointer obj (getApp().getHashedObjectStore ().retrieve (hash));
|
NodeObject::pointer obj (getApp().getNodeStore ().retrieve (hash));
|
||||||
|
|
||||||
if (!obj)
|
if (!obj)
|
||||||
{
|
{
|
||||||
@@ -937,7 +937,7 @@ int SHAMap::flushDirty (DirtyMap& map, int maxNodes, NodeObjectType t, uint32 se
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
getApp().getHashedObjectStore ().store (t, seq, s.peekData (), it->second->getNodeHash ());
|
getApp().getNodeStore ().store (t, seq, s.peekData (), it->second->getNodeHash ());
|
||||||
|
|
||||||
if (flushed++ >= maxNodes)
|
if (flushed++ >= maxNodes)
|
||||||
return flushed;
|
return flushed;
|
||||||
|
|||||||
@@ -73,7 +73,7 @@ void AccountStateSF::gotNode (bool fromFilter,
|
|||||||
Blob const& nodeData,
|
Blob const& nodeData,
|
||||||
SHAMapTreeNode::TNType)
|
SHAMapTreeNode::TNType)
|
||||||
{
|
{
|
||||||
getApp().getHashedObjectStore ().store (hotACCOUNT_NODE, mLedgerSeq, nodeData, nodeHash);
|
getApp().getNodeStore ().store (hotACCOUNT_NODE, mLedgerSeq, nodeData, nodeHash);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AccountStateSF::haveNode (SHAMapNode const& id,
|
bool AccountStateSF::haveNode (SHAMapNode const& id,
|
||||||
@@ -96,7 +96,7 @@ void TransactionStateSF::gotNode (bool fromFilter,
|
|||||||
Blob const& nodeData,
|
Blob const& nodeData,
|
||||||
SHAMapTreeNode::TNType type)
|
SHAMapTreeNode::TNType type)
|
||||||
{
|
{
|
||||||
getApp().getHashedObjectStore ().store (
|
getApp().getNodeStore ().store (
|
||||||
(type == SHAMapTreeNode::tnTRANSACTION_NM) ? hotTRANSACTION : hotTRANSACTION_NODE,
|
(type == SHAMapTreeNode::tnTRANSACTION_NM) ? hotTRANSACTION : hotTRANSACTION_NODE,
|
||||||
mLedgerSeq,
|
mLedgerSeq,
|
||||||
nodeData,
|
nodeData,
|
||||||
|
|||||||
Reference in New Issue
Block a user