mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-20 11:05:54 +00:00
Refactor NodeStore
This commit is contained in:
@@ -169,6 +169,12 @@
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_MdbBackendFactory.cpp">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_NodeObject.cpp">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
@@ -1416,6 +1422,7 @@
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_HyperLevelDBBackendFactory.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_KeyvaDB.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_KeyvaDBBackendFactory.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_MdbBackendFactory.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NodeObject.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NodeStore.h" />
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_LevelDBBackendFactory.h" />
|
||||
|
||||
@@ -807,9 +807,6 @@
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_LevelDBBackendFactory.cpp">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_NullBackendFactory.cpp">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_mdb\ripple_mdb.c">
|
||||
<Filter>[1] Ripple\ripple_mdb</Filter>
|
||||
</ClCompile>
|
||||
@@ -903,6 +900,12 @@
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_KeyvaDBBackendFactory.cpp">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_NullBackendFactory.cpp">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\modules\ripple_app\node\ripple_MdbBackendFactory.cpp">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="..\..\Subtrees\sqlite\sqlite3.h">
|
||||
@@ -1587,9 +1590,6 @@
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_LevelDBBackendFactory.h">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NullBackendFactory.h">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\ripple_mdb\ripple_mdb.h">
|
||||
<Filter>[1] Ripple\ripple_mdb</Filter>
|
||||
</ClInclude>
|
||||
@@ -1686,6 +1686,12 @@
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_KeyvaDBBackendFactory.h">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_NullBackendFactory.h">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\modules\ripple_app\node\ripple_MdbBackendFactory.h">
|
||||
<Filter>[1] Ripple\ripple_app\node</Filter>
|
||||
</ClInclude>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<CustomBuild Include="..\..\src\cpp\ripple\ripple.proto" />
|
||||
|
||||
4
TODO.txt
4
TODO.txt
@@ -10,7 +10,11 @@ Vinnie's Short List (Changes day to day)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
- Replace master lock with
|
||||
|
||||
- Replace base_uint and uintXXX with UnsignedInteger
|
||||
* Need to specialize UnsignedInteger to work efficiently with 4 and 8 byte
|
||||
multiples of the size.
|
||||
|
||||
- Rewrite boost program_options in Beast
|
||||
|
||||
|
||||
@@ -283,32 +283,15 @@ const char* WalletDBInit[] =
|
||||
int WalletDBCount = NUMBER (WalletDBInit);
|
||||
|
||||
// Hash node database holds nodes indexed by hash
|
||||
const char* HashNodeDBInit[] =
|
||||
{
|
||||
"PRAGMA synchronous=NORMAL;",
|
||||
"PRAGMA journal_mode=WAL;",
|
||||
"PRAGMA journal_size_limit=1582080;",
|
||||
|
||||
#if (ULONG_MAX > UINT_MAX) && !defined (NO_SQLITE_MMAP)
|
||||
"PRAGMA mmap_size=171798691840;",
|
||||
#endif
|
||||
|
||||
"BEGIN TRANSACTION;",
|
||||
|
||||
"CREATE TABLE CommittedObjects ( \
|
||||
Hash CHARACTER(64) PRIMARY KEY, \
|
||||
ObjType CHAR(1) NOT NULL, \
|
||||
LedgerIndex BIGINT UNSIGNED, \
|
||||
Object BLOB \
|
||||
);",
|
||||
|
||||
"END TRANSACTION;"
|
||||
};
|
||||
// VFALCO TODO Remove this since it looks unused
|
||||
/*
|
||||
|
||||
int HashNodeDBCount = NUMBER (HashNodeDBInit);
|
||||
*/
|
||||
|
||||
// Net node database holds nodes seen on the network
|
||||
// XXX Not really used needs replacement.
|
||||
/*
|
||||
const char* NetNodeDBInit[] =
|
||||
{
|
||||
"CREATE TABLE KnownNodes ( \
|
||||
@@ -320,7 +303,10 @@ const char* NetNodeDBInit[] =
|
||||
};
|
||||
|
||||
int NetNodeDBCount = NUMBER (NetNodeDBInit);
|
||||
*/
|
||||
|
||||
// This appears to be unused
|
||||
/*
|
||||
const char* PathFindDBInit[] =
|
||||
{
|
||||
"PRAGMA synchronous = OFF; ",
|
||||
@@ -353,5 +339,5 @@ const char* PathFindDBInit[] =
|
||||
};
|
||||
|
||||
int PathFindDBCount = NUMBER (PathFindDBInit);
|
||||
*/
|
||||
|
||||
// vim:ts=4
|
||||
|
||||
@@ -12,19 +12,11 @@ extern const char* RpcDBInit[];
|
||||
extern const char* TxnDBInit[];
|
||||
extern const char* LedgerDBInit[];
|
||||
extern const char* WalletDBInit[];
|
||||
extern const char* HashNodeDBInit[];
|
||||
|
||||
// VFALCO TODO Figure out what these counts are for
|
||||
extern int RpcDBCount;
|
||||
extern int TxnDBCount;
|
||||
extern int LedgerDBCount;
|
||||
extern int WalletDBCount;
|
||||
extern int HashNodeDBCount;
|
||||
|
||||
// VFALCO TODO Seems these two aren't used so delete EVERYTHING.
|
||||
extern const char* NetNodeDBInit[];
|
||||
extern const char* PathFindDBInit[];
|
||||
extern int NetNodeDBCount;
|
||||
extern int PathFindDBCount;
|
||||
|
||||
#endif
|
||||
|
||||
@@ -9,8 +9,9 @@
|
||||
class HyperLevelDBBackendFactory::Backend : public NodeStore::Backend
|
||||
{
|
||||
public:
|
||||
Backend (StringPairArray const& keyValues)
|
||||
: mName(keyValues ["path"].toStdString ())
|
||||
Backend (size_t keyBytes, StringPairArray const& keyValues)
|
||||
: m_keyBytes (keyBytes)
|
||||
, mName(keyValues ["path"].toStdString ())
|
||||
, mDB(NULL)
|
||||
{
|
||||
if (mName.empty())
|
||||
@@ -58,7 +59,7 @@ public:
|
||||
{
|
||||
Blob blob (toBlob (obj));
|
||||
batch.Put (
|
||||
hyperleveldb::Slice (reinterpret_cast<char const*>(obj->getHash ().begin ()), 256 / 8),
|
||||
hyperleveldb::Slice (reinterpret_cast<char const*>(obj->getHash ().begin ()), m_keyBytes),
|
||||
hyperleveldb::Slice (reinterpret_cast<char const*>(&blob.front ()), blob.size ()));
|
||||
}
|
||||
return mDB->Write (hyperleveldb::WriteOptions (), &batch).ok ();
|
||||
@@ -68,7 +69,7 @@ public:
|
||||
{
|
||||
std::string sData;
|
||||
if (!mDB->Get (hyperleveldb::ReadOptions (),
|
||||
hyperleveldb::Slice (reinterpret_cast<char const*>(hash.begin ()), 256 / 8), &sData).ok ())
|
||||
hyperleveldb::Slice (reinterpret_cast<char const*>(hash.begin ()), m_keyBytes), &sData).ok ())
|
||||
{
|
||||
return NodeObject::pointer();
|
||||
}
|
||||
@@ -80,10 +81,10 @@ public:
|
||||
hyperleveldb::Iterator* it = mDB->NewIterator (hyperleveldb::ReadOptions ());
|
||||
for (it->SeekToFirst (); it->Valid (); it->Next ())
|
||||
{
|
||||
if (it->key ().size () == 256 / 8)
|
||||
if (it->key ().size () == m_keyBytes)
|
||||
{
|
||||
uint256 hash;
|
||||
memcpy(hash.begin(), it->key ().data(), 256 / 8);
|
||||
memcpy(hash.begin(), it->key ().data(), m_keyBytes);
|
||||
func (fromBinary (hash, it->value ().data (), it->value ().size ()));
|
||||
}
|
||||
}
|
||||
@@ -116,6 +117,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
size_t const m_keyBytes;
|
||||
std::string mName;
|
||||
hyperleveldb::DB* mDB;
|
||||
};
|
||||
@@ -142,9 +144,9 @@ String HyperLevelDBBackendFactory::getName () const
|
||||
return "HyperLevelDB";
|
||||
}
|
||||
|
||||
NodeStore::Backend* HyperLevelDBBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* HyperLevelDBBackendFactory::createInstance (size_t keyBytes, StringPairArray const& keyValues)
|
||||
{
|
||||
return new HyperLevelDBBackendFactory::Backend (keyValues);
|
||||
return new HyperLevelDBBackendFactory::Backend (keyBytes, keyValues);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -23,7 +23,7 @@ public:
|
||||
static HyperLevelDBBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
NodeStore::Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -3,6 +3,19 @@
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
/*
|
||||
|
||||
TODO
|
||||
|
||||
- Check consistency / range checking on read
|
||||
|
||||
- Cache top level tree nodes
|
||||
|
||||
- Coalesce I/O in RandomAccessFile
|
||||
|
||||
- Delete / file compaction
|
||||
|
||||
*/
|
||||
|
||||
class KeyvaDBImp : public KeyvaDB
|
||||
{
|
||||
@@ -336,10 +349,10 @@ public:
|
||||
|
||||
bool get (void const* key, GetCallback* callback)
|
||||
{
|
||||
// VFALCO TODD Swap these two lines
|
||||
SharedState::WriteAccess state (m_state);
|
||||
FindResult findResult (m_keyStorage.getData ());
|
||||
|
||||
SharedState::WriteAccess state (m_state);
|
||||
|
||||
bool found = false;
|
||||
|
||||
if (state->hasKeys ())
|
||||
@@ -348,7 +361,7 @@ public:
|
||||
|
||||
if (found)
|
||||
{
|
||||
void* const destStorage = callback->createStorageForValue (findResult.keyRecord.valSize);
|
||||
void* const destStorage = callback->getStorageForValue (findResult.keyRecord.valSize);
|
||||
|
||||
RandomAccessFileInputStream stream (state->valFile);
|
||||
|
||||
@@ -536,7 +549,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void* createStorageForValue (int valueBytes)
|
||||
void* getStorageForValue (int valueBytes)
|
||||
{
|
||||
bassert (valueBytes <= maxPayloadBytes);
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ public:
|
||||
class GetCallback
|
||||
{
|
||||
public:
|
||||
virtual void* createStorageForValue (int valueBytes) = 0;
|
||||
virtual void* getStorageForValue (int valueBytes) = 0;
|
||||
};
|
||||
|
||||
static KeyvaDB* New (int keyBytes,
|
||||
@@ -25,8 +25,13 @@ public:
|
||||
|
||||
virtual ~KeyvaDB () { }
|
||||
|
||||
// VFALCO TODO Make the return value a Result so we can
|
||||
// detect corruption and errors!
|
||||
//
|
||||
virtual bool get (void const* key, GetCallback* callback) = 0;
|
||||
|
||||
// VFALCO TODO Use Result for return value
|
||||
//
|
||||
virtual void put (void const* key, void const* value, int valueBytes) = 0;
|
||||
|
||||
virtual void flush () = 0;
|
||||
|
||||
@@ -7,15 +7,9 @@
|
||||
class KeyvaDBBackendFactory::Backend : public NodeStore::Backend
|
||||
{
|
||||
public:
|
||||
typedef UnsignedInteger <32> Key;
|
||||
|
||||
enum
|
||||
{
|
||||
keyBytes = Key::sizeInBytes
|
||||
};
|
||||
|
||||
explicit Backend (StringPairArray const& keyValues)
|
||||
: m_path (keyValues ["path"])
|
||||
Backend (size_t keyBytes, StringPairArray const& keyValues)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_path (keyValues ["path"])
|
||||
, m_db (KeyvaDB::New (
|
||||
keyBytes,
|
||||
File::getCurrentWorkingDirectory().getChildFile (m_path).withFileExtension ("key"),
|
||||
@@ -33,6 +27,48 @@ public:
|
||||
return m_path.toStdString ();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status get (void const* key, GetCallback* callback)
|
||||
{
|
||||
Status status (ok);
|
||||
|
||||
struct ForwardingGetCallback : KeyvaDB::GetCallback
|
||||
{
|
||||
ForwardingGetCallback (Backend::GetCallback* callback)
|
||||
: m_callback (callback)
|
||||
{
|
||||
}
|
||||
|
||||
void* getStorageForValue (int valueBytes)
|
||||
{
|
||||
return m_callback->getStorageForValue (valueBytes);
|
||||
}
|
||||
|
||||
private:
|
||||
Backend::GetCallback* const m_callback;
|
||||
};
|
||||
|
||||
ForwardingGetCallback cb (callback);
|
||||
|
||||
// VFALCO TODO Can't we get KeyvaDB to provide a proper status?
|
||||
//
|
||||
bool const found = m_db->get (key, &cb);
|
||||
|
||||
if (found)
|
||||
{
|
||||
status = ok;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void writeObject (NodeObject::ref object)
|
||||
{
|
||||
Blob blob (toBlob (object));
|
||||
@@ -54,7 +90,7 @@ public:
|
||||
int valueBytes;
|
||||
HeapBlock <char> data;
|
||||
|
||||
void* createStorageForValue (int valueBytes_)
|
||||
void* getStorageForValue (int valueBytes_)
|
||||
{
|
||||
valueBytes = valueBytes_;
|
||||
|
||||
@@ -112,6 +148,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
size_t const m_keyBytes;
|
||||
String m_path;
|
||||
ScopedPointer <KeyvaDB> m_db;
|
||||
};
|
||||
@@ -138,9 +175,9 @@ String KeyvaDBBackendFactory::getName () const
|
||||
return "KeyvaDB";
|
||||
}
|
||||
|
||||
NodeStore::Backend* KeyvaDBBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* KeyvaDBBackendFactory::createInstance (size_t keyBytes, StringPairArray const& keyValues)
|
||||
{
|
||||
return new KeyvaDBBackendFactory::Backend (keyValues);
|
||||
return new KeyvaDBBackendFactory::Backend (keyBytes, keyValues);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -21,7 +21,7 @@ public:
|
||||
static KeyvaDBBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
NodeStore::Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -7,11 +7,12 @@
|
||||
class LevelDBBackendFactory::Backend : public NodeStore::Backend
|
||||
{
|
||||
public:
|
||||
Backend (StringPairArray const& keyValues)
|
||||
: mName(keyValues ["path"].toStdString ())
|
||||
, mDB(NULL)
|
||||
Backend (int keyBytes, StringPairArray const& keyValues)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_name(keyValues ["path"].toStdString ())
|
||||
, m_db(NULL)
|
||||
{
|
||||
if (mName.empty())
|
||||
if (m_name.empty())
|
||||
throw std::runtime_error ("Missing path in LevelDB backend");
|
||||
|
||||
leveldb::Options options;
|
||||
@@ -33,21 +34,83 @@ public:
|
||||
if (!keyValues["open_files"].isEmpty())
|
||||
options.max_open_files = keyValues["open_files"].getIntValue();
|
||||
|
||||
leveldb::Status status = leveldb::DB::Open (options, mName, &mDB);
|
||||
if (!status.ok () || !mDB)
|
||||
leveldb::Status status = leveldb::DB::Open (options, m_name, &m_db);
|
||||
if (!status.ok () || !m_db)
|
||||
throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
delete mDB;
|
||||
delete m_db;
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
{
|
||||
return mName;
|
||||
return m_name;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
struct StdString
|
||||
{
|
||||
std::string blob;
|
||||
};
|
||||
|
||||
typedef RecycledObjectPool <StdString> StdStringPool;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status get (void const* key, GetCallback* callback)
|
||||
{
|
||||
Status status (ok);
|
||||
|
||||
leveldb::ReadOptions const options;
|
||||
leveldb::Slice const slice (static_cast <char const*> (key), m_keyBytes);
|
||||
|
||||
{
|
||||
// These are reused std::string objects,
|
||||
// required for leveldb's funky interface.
|
||||
//
|
||||
StdStringPool::ScopedItem item (m_stringPool);
|
||||
std::string& blob = item.getObject ().blob;
|
||||
|
||||
leveldb::Status getStatus = m_db->Get (options, slice, &blob);
|
||||
|
||||
if (getStatus.ok ())
|
||||
{
|
||||
void* const buffer = callback->getStorageForValue (blob.size ());
|
||||
|
||||
if (buffer != nullptr)
|
||||
{
|
||||
memcpy (buffer, blob.data (), blob.size ());
|
||||
}
|
||||
else
|
||||
{
|
||||
Throw (std::bad_alloc ());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (getStatus.IsCorruption ())
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
else if (getStatus.IsNotFound ())
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
bool bulkStore (const std::vector< NodeObject::pointer >& objs)
|
||||
{
|
||||
leveldb::WriteBatch batch;
|
||||
@@ -56,17 +119,17 @@ public:
|
||||
{
|
||||
Blob blob (toBlob (obj));
|
||||
batch.Put (
|
||||
leveldb::Slice (reinterpret_cast<char const*>(obj->getHash ().begin ()), 256 / 8),
|
||||
leveldb::Slice (reinterpret_cast<char const*>(obj->getHash ().begin ()), m_keyBytes),
|
||||
leveldb::Slice (reinterpret_cast<char const*>(&blob.front ()), blob.size ()));
|
||||
}
|
||||
return mDB->Write (leveldb::WriteOptions (), &batch).ok ();
|
||||
return m_db->Write (leveldb::WriteOptions (), &batch).ok ();
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash)
|
||||
{
|
||||
std::string sData;
|
||||
if (!mDB->Get (leveldb::ReadOptions (),
|
||||
leveldb::Slice (reinterpret_cast<char const*>(hash.begin ()), 256 / 8), &sData).ok ())
|
||||
if (!m_db->Get (leveldb::ReadOptions (),
|
||||
leveldb::Slice (reinterpret_cast<char const*>(hash.begin ()), m_keyBytes), &sData).ok ())
|
||||
{
|
||||
return NodeObject::pointer();
|
||||
}
|
||||
@@ -75,15 +138,20 @@ public:
|
||||
|
||||
void visitAll (FUNCTION_TYPE<void (NodeObject::pointer)> func)
|
||||
{
|
||||
leveldb::Iterator* it = mDB->NewIterator (leveldb::ReadOptions ());
|
||||
leveldb::Iterator* it = m_db->NewIterator (leveldb::ReadOptions ());
|
||||
for (it->SeekToFirst (); it->Valid (); it->Next ())
|
||||
{
|
||||
if (it->key ().size () == 256 / 8)
|
||||
if (it->key ().size () == m_keyBytes)
|
||||
{
|
||||
uint256 hash;
|
||||
memcpy(hash.begin(), it->key ().data(), 256 / 8);
|
||||
memcpy(hash.begin(), it->key ().data(), m_keyBytes);
|
||||
func (fromBinary (hash, it->value ().data (), it->value ().size ()));
|
||||
}
|
||||
else
|
||||
{
|
||||
// VFALCO NOTE What does it mean to find an
|
||||
// incorrectly sized key? Corruption?
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,8 +182,10 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
std::string mName;
|
||||
leveldb::DB* mDB;
|
||||
size_t const m_keyBytes;
|
||||
StdStringPool m_stringPool;
|
||||
std::string m_name;
|
||||
leveldb::DB* m_db;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -140,9 +210,9 @@ String LevelDBBackendFactory::getName () const
|
||||
return "LevelDB";
|
||||
}
|
||||
|
||||
NodeStore::Backend* LevelDBBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* LevelDBBackendFactory::createInstance (size_t keyBytes, StringPairArray const& keyValues)
|
||||
{
|
||||
return new LevelDBBackendFactory::Backend (keyValues);
|
||||
return new LevelDBBackendFactory::Backend (keyBytes, keyValues);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -21,7 +21,7 @@ public:
|
||||
static LevelDBBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
NodeStore::Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
|
||||
SETUP_LOG (NodeObject)
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
NodeObject::NodeObject (
|
||||
NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
@@ -32,80 +34,6 @@ NodeObject::NodeObject (
|
||||
{
|
||||
}
|
||||
|
||||
NodeObject::NodeObject (void const* key, void const* value, int valueBytes)
|
||||
{
|
||||
DecodedBlob decoded (key, value, valueBytes);
|
||||
|
||||
if (decoded.success)
|
||||
{
|
||||
mType = decoded.objectType;
|
||||
mHash = uint256 (key);
|
||||
mLedgerIndex = decoded.ledgerIndex;
|
||||
mData = Blob (decoded.objectData, decoded.objectData + decoded.dataBytes);
|
||||
}
|
||||
else
|
||||
{
|
||||
// VFALCO TODO Write the hex version of key to the string for diagnostics.
|
||||
String s;
|
||||
s << "NodeStore:: DecodedBlob failed";
|
||||
Throw (s);
|
||||
}
|
||||
}
|
||||
|
||||
NodeObject::DecodedBlob::DecodedBlob (void const* key, void const* value, int valueBytes)
|
||||
{
|
||||
/* Data format:
|
||||
|
||||
Bytes
|
||||
|
||||
0...3 LedgerIndex 32-bit big endian integer
|
||||
4...7 Unused? An unused copy of the LedgerIndex
|
||||
8 char One of NodeObjectType
|
||||
9...end The body of the object data
|
||||
*/
|
||||
|
||||
success = false;
|
||||
key = key;
|
||||
// VFALCO NOTE Ledger indexes should have started at 1
|
||||
ledgerIndex = LedgerIndex (-1);
|
||||
objectType = hotUNKNOWN;
|
||||
objectData = nullptr;
|
||||
dataBytes = bmin (0, valueBytes - 9);
|
||||
|
||||
if (dataBytes > 4)
|
||||
{
|
||||
LedgerIndex const* index = static_cast <LedgerIndex const*> (value);
|
||||
ledgerIndex = ByteOrder::swapIfLittleEndian (*index);
|
||||
}
|
||||
|
||||
// VFALCO NOTE What about bytes 4 through 7 inclusive?
|
||||
|
||||
if (dataBytes > 8)
|
||||
{
|
||||
unsigned char const* byte = static_cast <unsigned char const*> (value);
|
||||
objectType = static_cast <NodeObjectType> (byte [8]);
|
||||
}
|
||||
|
||||
if (dataBytes > 9)
|
||||
{
|
||||
objectData = static_cast <unsigned char const*> (value) + 9;
|
||||
|
||||
switch (objectType)
|
||||
{
|
||||
case hotUNKNOWN:
|
||||
default:
|
||||
break;
|
||||
|
||||
case hotLEDGER:
|
||||
case hotTRANSACTION:
|
||||
case hotACCOUNT_NODE:
|
||||
case hotTRANSACTION_NODE:
|
||||
success = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NodeObjectType NodeObject::getType () const
|
||||
{
|
||||
return mType;
|
||||
@@ -125,3 +53,32 @@ Blob const& NodeObject::getData () const
|
||||
{
|
||||
return mData;
|
||||
}
|
||||
|
||||
bool NodeObject::isCloneOf (NodeObject const& other) const
|
||||
{
|
||||
return
|
||||
mType == other.mType &&
|
||||
mHash == other.mHash &&
|
||||
mLedgerIndex == other.mLedgerIndex &&
|
||||
mData == other.mData
|
||||
;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class NodeObjectTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
|
||||
NodeObjectTests () : UnitTest ("NodeObject")
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void runTest ()
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
static NodeObjectTests nodeObjectTests;
|
||||
|
||||
|
||||
@@ -64,40 +64,6 @@ public:
|
||||
int bytesInBuffer,
|
||||
uint256 const & hash);
|
||||
|
||||
/** Create from a key/value blob.
|
||||
|
||||
This is the format in which a NodeObject is stored in the
|
||||
persistent storage layer.
|
||||
|
||||
@see NodeStore
|
||||
*/
|
||||
NodeObject (void const* key, void const* value, int valueBytes);
|
||||
|
||||
/** Parsed key/value blob into NodeObject components.
|
||||
|
||||
This will extract the information required to construct
|
||||
a NodeObject. It also does consistency checking and returns
|
||||
the result, so it is possible to determine if the data
|
||||
is corrupted without throwing an exception. Note all forms
|
||||
of corruption are detected so further analysis will be
|
||||
needed to eliminate false positives.
|
||||
|
||||
This is the format in which a NodeObject is stored in the
|
||||
persistent storage layer.
|
||||
*/
|
||||
struct DecodedBlob
|
||||
{
|
||||
DecodedBlob (void const* key, void const* value, int valueBytes);
|
||||
|
||||
bool success;
|
||||
|
||||
void const* key;
|
||||
LedgerIndex ledgerIndex;
|
||||
NodeObjectType objectType;
|
||||
unsigned char const* objectData;
|
||||
int dataBytes;
|
||||
};
|
||||
|
||||
/** Retrieve the type of this object.
|
||||
*/
|
||||
NodeObjectType getType () const;
|
||||
@@ -115,6 +81,10 @@ public:
|
||||
*/
|
||||
Blob const& getData () const;
|
||||
|
||||
/** See if this object has the same data as another object.
|
||||
*/
|
||||
bool isCloneOf (NodeObject const& other) const;
|
||||
|
||||
private:
|
||||
NodeObjectType mType;
|
||||
uint256 mHash;
|
||||
|
||||
@@ -95,220 +95,519 @@ int NodeStore::Backend::getWriteLoad ()
|
||||
// NodeStore
|
||||
//
|
||||
|
||||
Array <NodeStore::BackendFactory*> NodeStore::s_factories;
|
||||
|
||||
NodeStore::NodeStore (String backendParameters,
|
||||
String fastBackendParameters,
|
||||
int cacheSize,
|
||||
int cacheAge)
|
||||
: m_backend (createBackend (backendParameters))
|
||||
, m_fastBackend (fastBackendParameters.isNotEmpty () ? createBackend (fastBackendParameters)
|
||||
: nullptr)
|
||||
, m_cache ("NodeStore", cacheSize, cacheAge)
|
||||
, m_negativeCache ("NoteStoreNegativeCache", 0, 120)
|
||||
class NodeStoreImp : public NodeStore
|
||||
{
|
||||
}
|
||||
|
||||
void NodeStore::addBackendFactory (BackendFactory& factory)
|
||||
{
|
||||
s_factories.add (&factory);
|
||||
}
|
||||
|
||||
float NodeStore::getCacheHitRate ()
|
||||
{
|
||||
return m_cache.getHitRate ();
|
||||
}
|
||||
|
||||
void NodeStore::tune (int size, int age)
|
||||
{
|
||||
m_cache.setTargetSize (size);
|
||||
m_cache.setTargetAge (age);
|
||||
}
|
||||
|
||||
void NodeStore::sweep ()
|
||||
{
|
||||
m_cache.sweep ();
|
||||
m_negativeCache.sweep ();
|
||||
}
|
||||
|
||||
void NodeStore::waitWrite ()
|
||||
{
|
||||
m_backend->waitWrite ();
|
||||
if (m_fastBackend)
|
||||
m_fastBackend->waitWrite ();
|
||||
}
|
||||
|
||||
int NodeStore::getWriteLoad ()
|
||||
{
|
||||
return m_backend->getWriteLoad ();
|
||||
}
|
||||
|
||||
bool NodeStore::store (NodeObjectType type, uint32 index,
|
||||
Blob const& data, uint256 const& hash)
|
||||
{
|
||||
bool wasStored = false;
|
||||
|
||||
bool const keyFoundAndObjectCached = m_cache.refreshIfPresent (hash);
|
||||
|
||||
// VFALCO NOTE What happens if the key is found, but the object
|
||||
// fell out of the cache? We will end up passing it
|
||||
// to the backend anyway.
|
||||
//
|
||||
if (! keyFoundAndObjectCached)
|
||||
public:
|
||||
/** Size of a key.
|
||||
*/
|
||||
enum
|
||||
{
|
||||
keyBytes = 32
|
||||
};
|
||||
|
||||
// VFALCO TODO Rename this to RIPPLE_NODESTORE_VERIFY_HASHES and make
|
||||
// it be 1 or 0 instead of merely defined or undefined.
|
||||
//
|
||||
#ifdef PARANOID
|
||||
assert (hash == Serializer::getSHA512Half (data));
|
||||
#endif
|
||||
/** Parsed key/value blob into NodeObject components.
|
||||
|
||||
NodeObject::pointer object = boost::make_shared<NodeObject> (type, index, data, hash);
|
||||
This will extract the information required to construct
|
||||
a NodeObject. It also does consistency checking and returns
|
||||
the result, so it is possible to determine if the data
|
||||
is corrupted without throwing an exception. Note all forms
|
||||
of corruption are detected so further analysis will be
|
||||
needed to eliminate false positives.
|
||||
|
||||
// VFALCO NOTE What does it mean to canonicalize an object?
|
||||
//
|
||||
if (!m_cache.canonicalize (hash, object))
|
||||
This is the format in which a NodeObject is stored in the
|
||||
persistent storage layer.
|
||||
*/
|
||||
struct DecodedBlob
|
||||
{
|
||||
/** Construct the decoded blob from raw data.
|
||||
|
||||
The `success` member will indicate if the operation was succesful.
|
||||
*/
|
||||
DecodedBlob (void const* keyParam, void const* value, int valueBytes)
|
||||
{
|
||||
m_backend->store (object);
|
||||
/* Data format:
|
||||
|
||||
if (m_fastBackend)
|
||||
m_fastBackend->store (object);
|
||||
Bytes
|
||||
|
||||
0...3 LedgerIndex 32-bit big endian integer
|
||||
4...7 Unused? An unused copy of the LedgerIndex
|
||||
8 char One of NodeObjectType
|
||||
9...end The body of the object data
|
||||
*/
|
||||
|
||||
success = false;
|
||||
key = keyParam;
|
||||
// VFALCO NOTE Ledger indexes should have started at 1
|
||||
ledgerIndex = LedgerIndex (-1);
|
||||
objectType = hotUNKNOWN;
|
||||
objectData = nullptr;
|
||||
dataBytes = bmax (0, valueBytes - 9);
|
||||
|
||||
if (dataBytes > 4)
|
||||
{
|
||||
LedgerIndex const* index = static_cast <LedgerIndex const*> (value);
|
||||
ledgerIndex = ByteOrder::swapIfLittleEndian (*index);
|
||||
}
|
||||
|
||||
// VFALCO NOTE What about bytes 4 through 7 inclusive?
|
||||
|
||||
if (dataBytes > 8)
|
||||
{
|
||||
unsigned char const* byte = static_cast <unsigned char const*> (value);
|
||||
objectType = static_cast <NodeObjectType> (byte [8]);
|
||||
}
|
||||
|
||||
if (dataBytes > 9)
|
||||
{
|
||||
objectData = static_cast <unsigned char const*> (value) + 9;
|
||||
|
||||
switch (objectType)
|
||||
{
|
||||
case hotUNKNOWN:
|
||||
default:
|
||||
break;
|
||||
|
||||
case hotLEDGER:
|
||||
case hotTRANSACTION:
|
||||
case hotACCOUNT_NODE:
|
||||
case hotTRANSACTION_NODE:
|
||||
success = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m_negativeCache.del (hash);
|
||||
|
||||
wasStored = true;
|
||||
}
|
||||
|
||||
return wasStored;
|
||||
}
|
||||
|
||||
NodeObject::pointer NodeStore::retrieve (uint256 const& hash)
|
||||
{
|
||||
NodeObject::pointer obj = m_cache.fetch (hash);
|
||||
|
||||
if (obj || m_negativeCache.isPresent (hash))
|
||||
return obj;
|
||||
|
||||
if (m_fastBackend)
|
||||
{
|
||||
obj = retrieve (m_fastBackend, hash);
|
||||
|
||||
if (obj)
|
||||
/** Create a NodeObject from this data.
|
||||
*/
|
||||
NodeObject::pointer createObject ()
|
||||
{
|
||||
m_cache.canonicalize (hash, obj);
|
||||
return obj;
|
||||
NodeObject::pointer object;
|
||||
|
||||
if (success)
|
||||
{
|
||||
// VFALCO NOTE I dislke these shared pointers from boost
|
||||
object = boost::make_shared <NodeObject> (
|
||||
objectType, ledgerIndex, objectData, dataBytes, uint256 (key));
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
bool success;
|
||||
|
||||
void const* key;
|
||||
LedgerIndex ledgerIndex;
|
||||
NodeObjectType objectType;
|
||||
unsigned char const* objectData;
|
||||
int dataBytes;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
class EncodedBlob
|
||||
{
|
||||
HeapBlock <char> data;
|
||||
};
|
||||
|
||||
public:
|
||||
NodeStoreImp (String backendParameters,
|
||||
String fastBackendParameters,
|
||||
int cacheSize,
|
||||
int cacheAge)
|
||||
: m_backend (createBackend (backendParameters))
|
||||
, m_fastBackend (fastBackendParameters.isNotEmpty () ? createBackend (fastBackendParameters)
|
||||
: nullptr)
|
||||
, m_cache ("NodeStore", cacheSize, cacheAge)
|
||||
, m_negativeCache ("NoteStoreNegativeCache", 0, 120)
|
||||
{
|
||||
}
|
||||
|
||||
~NodeStoreImp ()
|
||||
{
|
||||
// m_hooks->onRetrieveBegin ()
|
||||
|
||||
// VFALCO TODO Why is this an autoptr? Why can't it just be a plain old object?
|
||||
// VFALCO NOTE This shouldn't be necessary, the backend can
|
||||
// just handle it in the destructor.
|
||||
//
|
||||
LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtHO_READ, "HOS::retrieve"));
|
||||
m_backend->waitWrite ();
|
||||
|
||||
obj = retrieve (m_backend, hash);
|
||||
if (m_fastBackend)
|
||||
m_fastBackend->waitWrite ();
|
||||
}
|
||||
|
||||
float getCacheHitRate ()
|
||||
{
|
||||
return m_cache.getHitRate ();
|
||||
}
|
||||
|
||||
void tune (int size, int age)
|
||||
{
|
||||
m_cache.setTargetSize (size);
|
||||
m_cache.setTargetAge (age);
|
||||
}
|
||||
|
||||
void sweep ()
|
||||
{
|
||||
m_cache.sweep ();
|
||||
m_negativeCache.sweep ();
|
||||
}
|
||||
|
||||
int getWriteLoad ()
|
||||
{
|
||||
return m_backend->getWriteLoad ();
|
||||
}
|
||||
|
||||
bool store (NodeObjectType type,
|
||||
uint32 index,
|
||||
Blob const& data,
|
||||
uint256 const& hash)
|
||||
{
|
||||
bool wasStored = false;
|
||||
|
||||
bool const keyFoundAndObjectCached = m_cache.refreshIfPresent (hash);
|
||||
|
||||
// VFALCO NOTE What happens if the key is found, but the object
|
||||
// fell out of the cache? We will end up passing it
|
||||
// to the backend anyway.
|
||||
//
|
||||
if (! keyFoundAndObjectCached)
|
||||
{
|
||||
|
||||
// VFALCO TODO Rename this to RIPPLE_NODESTORE_VERIFY_HASHES and make
|
||||
// it be 1 or 0 instead of merely defined or undefined.
|
||||
//
|
||||
#ifdef PARANOID
|
||||
assert (hash == Serializer::getSHA512Half (data));
|
||||
#endif
|
||||
|
||||
NodeObject::pointer object = boost::make_shared <NodeObject> (type, index, data, hash);
|
||||
|
||||
// VFALCO NOTE What does it mean to canonicalize an object?
|
||||
//
|
||||
if (!m_cache.canonicalize (hash, object))
|
||||
{
|
||||
m_backend->store (object);
|
||||
|
||||
if (m_fastBackend)
|
||||
m_fastBackend->store (object);
|
||||
}
|
||||
|
||||
m_negativeCache.del (hash);
|
||||
|
||||
wasStored = true;
|
||||
}
|
||||
|
||||
return wasStored;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash)
|
||||
{
|
||||
// See if the object already exists in the cache
|
||||
//
|
||||
NodeObject::pointer obj = m_cache.fetch (hash);
|
||||
|
||||
if (obj == nullptr)
|
||||
{
|
||||
m_negativeCache.add (hash);
|
||||
// It's not in the cache, see if we can skip checking the db.
|
||||
//
|
||||
if (! m_negativeCache.isPresent (hash))
|
||||
{
|
||||
// There's still a chance it could be in one of the databases.
|
||||
|
||||
// VFALCO TODO Eliminate return from middle of function
|
||||
bool foundInFastBackend = false;
|
||||
|
||||
return obj; // VFALCO NOTE This is nullptr, why return obj?
|
||||
// Check the fast backend database if we have one
|
||||
//
|
||||
if (m_fastBackend != nullptr)
|
||||
{
|
||||
obj = retrieveInternal (m_fastBackend, hash);
|
||||
|
||||
// If we found the object, avoid storing it again later.
|
||||
if (obj != nullptr)
|
||||
foundInFastBackend = true;
|
||||
}
|
||||
|
||||
// Are we still without an object?
|
||||
//
|
||||
if (obj == nullptr)
|
||||
{
|
||||
// Yes so at last we will try the main database.
|
||||
//
|
||||
{
|
||||
// Monitor this operation's load since it is expensive.
|
||||
|
||||
// m_hooks->onRetrieveBegin ()
|
||||
|
||||
// VFALCO TODO Why is this an autoptr? Why can't it just be a plain old object?
|
||||
//
|
||||
LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtHO_READ, "HOS::retrieve"));
|
||||
|
||||
obj = retrieveInternal (m_backend, hash);
|
||||
|
||||
// m_hooks->onRetrieveEnd ()
|
||||
}
|
||||
|
||||
// If it's not in the main database, remember that so we
|
||||
// can skip the lookup for the same object again later.
|
||||
//
|
||||
if (obj == nullptr)
|
||||
m_negativeCache.add (hash);
|
||||
}
|
||||
|
||||
// Did we finally get something?
|
||||
//
|
||||
if (obj != nullptr)
|
||||
{
|
||||
// Yes it so canonicalize. This solves the problem where
|
||||
// more than one thread has its own copy of the same object.
|
||||
//
|
||||
m_cache.canonicalize (hash, obj);
|
||||
|
||||
if (! foundInFastBackend)
|
||||
{
|
||||
// If we have a fast back end, store it there for later.
|
||||
//
|
||||
if (m_fastBackend != nullptr)
|
||||
m_fastBackend->store (obj);
|
||||
|
||||
// Since this was a 'hard' fetch, we will log it.
|
||||
//
|
||||
WriteLog (lsTRACE, NodeObject) << "HOS: " << hash << " fetch: in db";
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// hash is known not to be in the database
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// found it!
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
// VFALCO NOTE What does this do?
|
||||
m_cache.canonicalize (hash, obj);
|
||||
NodeObject::pointer retrieveInternal (Backend* backend, uint256 const& hash)
|
||||
{
|
||||
// VFALCO TODO Make this not allocate and free on each call
|
||||
//
|
||||
struct MyGetCallback : Backend::GetCallback
|
||||
{
|
||||
void* getStorageForValue (size_t sizeInBytes)
|
||||
{
|
||||
bytes = sizeInBytes;
|
||||
data.malloc (sizeInBytes);
|
||||
|
||||
if (m_fastBackend)
|
||||
m_fastBackend->store(obj);
|
||||
return &data [0];
|
||||
}
|
||||
|
||||
WriteLog (lsTRACE, NodeObject) << "HOS: " << hash << " fetch: in db";
|
||||
size_t bytes;
|
||||
HeapBlock <char> data;
|
||||
};
|
||||
|
||||
return obj;
|
||||
NodeObject::pointer object;
|
||||
|
||||
MyGetCallback cb;
|
||||
Backend::Status const status = backend->get (hash.begin (), &cb);
|
||||
|
||||
if (status == Backend::ok)
|
||||
{
|
||||
// Deserialize the payload into its components.
|
||||
//
|
||||
DecodedBlob decoded (hash.begin (), cb.data.getData (), cb.bytes);
|
||||
|
||||
if (decoded.success)
|
||||
{
|
||||
object = decoded.createObject ();
|
||||
}
|
||||
else
|
||||
{
|
||||
// Houston, we've had a problem. Data is likely corrupt.
|
||||
|
||||
// VFALCO TODO Deal with encountering corrupt data!
|
||||
|
||||
WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << hash;
|
||||
}
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
void importVisitor (
|
||||
std::vector <NodeObject::pointer>& objects,
|
||||
NodeObject::pointer object)
|
||||
{
|
||||
if (objects.size() >= bulkWriteBatchSize)
|
||||
{
|
||||
m_backend->bulkStore (objects);
|
||||
|
||||
objects.clear ();
|
||||
objects.reserve (bulkWriteBatchSize);
|
||||
}
|
||||
|
||||
objects.push_back (object);
|
||||
}
|
||||
|
||||
int import (String sourceBackendParameters)
|
||||
{
|
||||
ScopedPointer <NodeStore::Backend> srcBackend (createBackend (sourceBackendParameters));
|
||||
|
||||
WriteLog (lsWARNING, NodeObject) <<
|
||||
"Node import from '" << srcBackend->getDataBaseName() << "' to '"
|
||||
<< m_backend->getDataBaseName() << "'.";
|
||||
|
||||
std::vector <NodeObject::pointer> objects;
|
||||
|
||||
objects.reserve (bulkWriteBatchSize);
|
||||
|
||||
srcBackend->visitAll (BIND_TYPE (&NodeStoreImp::importVisitor, this, boost::ref (objects), P_1));
|
||||
|
||||
if (!objects.empty ())
|
||||
m_backend->bulkStore (objects);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
NodeStore::Backend* createBackend (String const& parameters)
|
||||
{
|
||||
Backend* backend = nullptr;
|
||||
|
||||
StringPairArray keyValues = parseKeyValueParameters (parameters, '|');
|
||||
|
||||
String const& type = keyValues ["type"];
|
||||
|
||||
if (type.isNotEmpty ())
|
||||
{
|
||||
BackendFactory* factory = nullptr;
|
||||
|
||||
for (int i = 0; i < s_factories.size (); ++i)
|
||||
{
|
||||
if (s_factories [i]->getName () == type)
|
||||
{
|
||||
factory = s_factories [i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (factory != nullptr)
|
||||
{
|
||||
backend = factory->createInstance (keyBytes, keyValues);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw std::runtime_error ("unkown backend type");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
throw std::runtime_error ("missing backend type");
|
||||
}
|
||||
|
||||
return backend;
|
||||
}
|
||||
|
||||
static void addBackendFactory (BackendFactory& factory)
|
||||
{
|
||||
s_factories.add (&factory);
|
||||
}
|
||||
|
||||
private:
|
||||
static Array <NodeStore::BackendFactory*> s_factories;
|
||||
|
||||
RecycledObjectPool <EncodedBlob> m_blobPool;
|
||||
|
||||
// Persistent key/value storage.
|
||||
ScopedPointer <Backend> m_backend;
|
||||
|
||||
// Larger key/value storage, but not necessarily persistent.
|
||||
ScopedPointer <Backend> m_fastBackend;
|
||||
|
||||
// VFALCO NOTE What are these things for? We need comments.
|
||||
TaggedCache <uint256, NodeObject, UptimeTimerAdapter> m_cache;
|
||||
KeyCache <uint256, UptimeTimerAdapter> m_negativeCache;
|
||||
};
|
||||
|
||||
Array <NodeStore::BackendFactory*> NodeStoreImp::s_factories;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
void NodeStore::addBackendFactory (BackendFactory& factory)
|
||||
{
|
||||
NodeStoreImp::addBackendFactory (factory);
|
||||
}
|
||||
|
||||
NodeStore* NodeStore::New (String backendParameters,
|
||||
String fastBackendParameters,
|
||||
int cacheSize,
|
||||
int cacheAge)
|
||||
{
|
||||
return new NodeStoreImp (backendParameters,
|
||||
fastBackendParameters,
|
||||
cacheSize,
|
||||
cacheAge);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
NodeObject::pointer NodeStore::retrieve (Backend* backend, uint256 const& hash)
|
||||
class NodeStoreTests : public UnitTest
|
||||
{
|
||||
return backend->retrieve (hash);
|
||||
}
|
||||
|
||||
void NodeStore::importVisitor (
|
||||
std::vector <NodeObject::pointer>& objects,
|
||||
NodeObject::pointer object)
|
||||
{
|
||||
if (objects.size() >= bulkWriteBatchSize)
|
||||
public:
|
||||
enum
|
||||
{
|
||||
m_backend->bulkStore (objects);
|
||||
maxPayloadBytes = 1000,
|
||||
|
||||
objects.clear ();
|
||||
objects.reserve (bulkWriteBatchSize);
|
||||
numObjects = 1000
|
||||
};
|
||||
|
||||
NodeStoreTests () : UnitTest ("NodeStore")
|
||||
{
|
||||
}
|
||||
|
||||
objects.push_back (object);
|
||||
}
|
||||
|
||||
int NodeStore::import (String sourceBackendParameters)
|
||||
{
|
||||
ScopedPointer <NodeStore::Backend> srcBackend (createBackend (sourceBackendParameters));
|
||||
|
||||
WriteLog (lsWARNING, NodeObject) <<
|
||||
"Node import from '" << srcBackend->getDataBaseName() << "' to '"
|
||||
<< m_backend->getDataBaseName() << "'.";
|
||||
|
||||
std::vector <NodeObject::pointer> objects;
|
||||
|
||||
objects.reserve (bulkWriteBatchSize);
|
||||
|
||||
srcBackend->visitAll (BIND_TYPE (&NodeStore::importVisitor, this, boost::ref (objects), P_1));
|
||||
|
||||
if (!objects.empty ())
|
||||
m_backend->bulkStore (objects);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
NodeStore::Backend* NodeStore::createBackend (String const& parameters)
|
||||
{
|
||||
Backend* backend = nullptr;
|
||||
|
||||
StringPairArray keyValues = parseKeyValueParameters (parameters, '|');
|
||||
|
||||
String const& type = keyValues ["type"];
|
||||
|
||||
if (type.isNotEmpty ())
|
||||
// Create a pseudo-random object
|
||||
static NodeObject* createNodeObject (int index, int64 seedValue, HeapBlock <char>& payloadBuffer)
|
||||
{
|
||||
BackendFactory* factory = nullptr;
|
||||
Random r (seedValue + index);
|
||||
|
||||
for (int i = 0; i < s_factories.size (); ++i)
|
||||
NodeObjectType type;
|
||||
switch (r.nextInt (4))
|
||||
{
|
||||
if (s_factories [i]->getName () == type)
|
||||
{
|
||||
factory = s_factories [i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
case 0: type = hotLEDGER; break;
|
||||
case 1: type = hotTRANSACTION; break;
|
||||
case 2: type = hotACCOUNT_NODE; break;
|
||||
case 3: type = hotTRANSACTION_NODE; break;
|
||||
default:
|
||||
type = hotUNKNOWN;
|
||||
break;
|
||||
};
|
||||
|
||||
if (factory != nullptr)
|
||||
{
|
||||
backend = factory->createInstance (keyValues);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw std::runtime_error ("unkown backend type");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
throw std::runtime_error ("missing backend type");
|
||||
LedgerIndex ledgerIndex = 1 + r.nextInt (1024 * 1024);
|
||||
|
||||
uint256 hash;
|
||||
r.nextBlob (hash.begin (), hash.size ());
|
||||
|
||||
int payloadBytes = 1 + r.nextInt (maxPayloadBytes);
|
||||
r.nextBlob (payloadBuffer.getData (), payloadBytes);
|
||||
|
||||
return new NodeObject (type, ledgerIndex, payloadBuffer.getData (), payloadBytes, hash);
|
||||
}
|
||||
|
||||
return backend;
|
||||
}
|
||||
void runTest ()
|
||||
{
|
||||
beginTest ("create");
|
||||
|
||||
int64 const seedValue = 50;
|
||||
|
||||
HeapBlock <char> payloadBuffer (maxPayloadBytes);
|
||||
|
||||
for (int i = 0; i < numObjects; ++i)
|
||||
{
|
||||
ScopedPointer <NodeObject> object (createNodeObject (i, seedValue, payloadBuffer));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static NodeStoreTests nodeStoreTests;
|
||||
|
||||
@@ -34,14 +34,55 @@ public:
|
||||
};
|
||||
|
||||
/** Back end used for the store.
|
||||
|
||||
A Backend implements a persistent key/value storage system.
|
||||
Keys sizes are all fixed within the same database.
|
||||
*/
|
||||
class Backend
|
||||
{
|
||||
public:
|
||||
/** Return codes from operations.
|
||||
*/
|
||||
enum Status
|
||||
{
|
||||
ok,
|
||||
notFound,
|
||||
dataCorrupt,
|
||||
unknown
|
||||
};
|
||||
|
||||
Backend ();
|
||||
|
||||
virtual ~Backend () { }
|
||||
|
||||
/** Provides storage for retrieved objects.
|
||||
*/
|
||||
struct GetCallback
|
||||
{
|
||||
/** Get storage for an object.
|
||||
|
||||
@param sizeInBytes The number of bytes needed to store the value.
|
||||
|
||||
@return A pointer to a buffer large enough to hold all the bytes.
|
||||
*/
|
||||
virtual void* getStorageForValue (size_t sizeInBytes) = 0;
|
||||
};
|
||||
|
||||
/** Retrieve a single object.
|
||||
|
||||
If the object is not found or an error is encountered, the
|
||||
result will indicate the condition.
|
||||
|
||||
@param key A pointer to the key data.
|
||||
@param callback The callback used to obtain storage for the value.
|
||||
|
||||
@return The result of the operation.
|
||||
*/
|
||||
virtual Status get (void const* key, GetCallback* callback) { return notFound; }
|
||||
|
||||
|
||||
|
||||
|
||||
/** Store a single object.
|
||||
*/
|
||||
// VFALCO TODO Why should the Backend know or care about NodeObject?
|
||||
@@ -54,13 +95,6 @@ public:
|
||||
*/
|
||||
virtual NodeObject::pointer retrieve (uint256 const &hash) = 0;
|
||||
|
||||
struct GetCallback
|
||||
{
|
||||
virtual void* getBufferForValue (int valueBytes) = 0;
|
||||
};
|
||||
|
||||
virtual bool get (void const* key, GetCallback* callback) { return false; }
|
||||
|
||||
// Visit every object in the database
|
||||
// This function will only be called during an import operation
|
||||
//
|
||||
@@ -69,7 +103,7 @@ public:
|
||||
virtual void visitAll (FUNCTION_TYPE <void (NodeObject::pointer)>) = 0;
|
||||
|
||||
private:
|
||||
friend class NodeStore;
|
||||
friend class NodeStoreImp;
|
||||
|
||||
// VFALCO TODO Put this bulk writing logic into a separate class.
|
||||
// NOTE Why are these virtual?
|
||||
@@ -115,8 +149,13 @@ public:
|
||||
virtual String getName () const = 0;
|
||||
|
||||
/** Create an instance of this factory's backend.
|
||||
|
||||
@param keyBytes The fixed number of bytes per key.
|
||||
@param keyValues A set of key/value configuration pairs.
|
||||
|
||||
@return A pointer to the Backend object.
|
||||
*/
|
||||
virtual Backend* createInstance (StringPairArray const& keyValues) = 0;
|
||||
virtual Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues) = 0;
|
||||
};
|
||||
|
||||
public:
|
||||
@@ -133,10 +172,10 @@ public:
|
||||
// Is cacheAge in minutes? seconds?
|
||||
// These should be in the parameters.
|
||||
//
|
||||
NodeStore (String backendParameters,
|
||||
String fastBackendParameters,
|
||||
int cacheSize,
|
||||
int cacheAge);
|
||||
static NodeStore* New (String backendParameters,
|
||||
String fastBackendParameters,
|
||||
int cacheSize,
|
||||
int cacheAge);
|
||||
|
||||
/** Add the specified backend factory to the list of available factories.
|
||||
|
||||
@@ -146,52 +185,31 @@ public:
|
||||
static void addBackendFactory (BackendFactory& factory);
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
float getCacheHitRate ();
|
||||
virtual float getCacheHitRate () = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
bool store (NodeObjectType type, uint32 index, Blob const& data,
|
||||
uint256 const& hash);
|
||||
virtual bool store (NodeObjectType type, uint32 index, Blob const& data,
|
||||
uint256 const& hash) = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
NodeObject::pointer retrieve (uint256 const& hash);
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
void waitWrite ();
|
||||
// TODO Replace uint256 with void*
|
||||
//
|
||||
virtual NodeObject::pointer retrieve (uint256 const& hash) = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
// TODO Document the parameter meanings.
|
||||
void tune (int size, int age);
|
||||
virtual void tune (int size, int age) = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
void sweep ();
|
||||
virtual void sweep () = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
// What are the units of the return value?
|
||||
int getWriteLoad ();
|
||||
virtual int getWriteLoad () = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
// NOTE What's the return value?
|
||||
int import (String sourceBackendParameters);
|
||||
|
||||
private:
|
||||
NodeObject::pointer retrieve (Backend* backend, uint256 const& hash);
|
||||
|
||||
void importVisitor (std::vector <NodeObject::pointer>& objects, NodeObject::pointer object);
|
||||
|
||||
static Backend* createBackend (String const& parameters);
|
||||
|
||||
static Array <BackendFactory*> s_factories;
|
||||
|
||||
private:
|
||||
// Persistent key/value storage.
|
||||
ScopedPointer <Backend> m_backend;
|
||||
|
||||
// Larger key/value storage, but not necessarily persistent.
|
||||
ScopedPointer <Backend> m_fastBackend;
|
||||
|
||||
// VFALCO NOTE What are these things for? We need comments.
|
||||
TaggedCache <uint256, NodeObject, UptimeTimerAdapter> m_cache;
|
||||
KeyCache <uint256, UptimeTimerAdapter> m_negativeCache;
|
||||
virtual int import (String sourceBackendParameters) = 0;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -62,7 +62,7 @@ String NullBackendFactory::getName () const
|
||||
return "none";
|
||||
}
|
||||
|
||||
NodeStore::Backend* NullBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* NullBackendFactory::createInstance (size_t, StringPairArray const&)
|
||||
{
|
||||
return new NullBackendFactory::Backend;
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ public:
|
||||
static NullBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
NodeStore::Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -4,32 +4,65 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
static const char* s_nodeStoreDBInit [] =
|
||||
{
|
||||
"PRAGMA synchronous=NORMAL;",
|
||||
"PRAGMA journal_mode=WAL;",
|
||||
"PRAGMA journal_size_limit=1582080;",
|
||||
|
||||
#if (ULONG_MAX > UINT_MAX) && !defined (NO_SQLITE_MMAP)
|
||||
"PRAGMA mmap_size=171798691840;",
|
||||
#endif
|
||||
|
||||
"BEGIN TRANSACTION;",
|
||||
|
||||
"CREATE TABLE CommittedObjects ( \
|
||||
Hash CHARACTER(64) PRIMARY KEY, \
|
||||
ObjType CHAR(1) NOT NULL, \
|
||||
LedgerIndex BIGINT UNSIGNED, \
|
||||
Object BLOB \
|
||||
);",
|
||||
|
||||
"END TRANSACTION;"
|
||||
};
|
||||
|
||||
static int s_nodeStoreDBCount = NUMBER (s_nodeStoreDBInit);
|
||||
|
||||
class SqliteBackendFactory::Backend : public NodeStore::Backend
|
||||
{
|
||||
public:
|
||||
Backend(std::string const& path) : mName(path)
|
||||
Backend (size_t keyBytes, std::string const& path)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_name (path)
|
||||
, m_db (new DatabaseCon(path, s_nodeStoreDBInit, s_nodeStoreDBCount))
|
||||
{
|
||||
mDb = new DatabaseCon(path, HashNodeDBInit, HashNodeDBCount);
|
||||
mDb->getDB()->executeSQL(boost::str(boost::format("PRAGMA cache_size=-%d;") %
|
||||
(theConfig.getSize(siHashNodeDBCache) * 1024)));
|
||||
String s;
|
||||
|
||||
// VFALCO TODO Remove this dependency on theConfig
|
||||
//
|
||||
s << "PRAGMA cache_size=-" << String (theConfig.getSize(siHashNodeDBCache) * 1024);
|
||||
m_db->getDB()->executeSQL (s.toStdString ().c_str ());
|
||||
|
||||
//m_db->getDB()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
|
||||
// (theConfig.getSize(siHashNodeDBCache) * 1024)));
|
||||
}
|
||||
|
||||
Backend()
|
||||
~Backend()
|
||||
{
|
||||
delete mDb;
|
||||
delete m_db;
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
{
|
||||
return mName;
|
||||
return m_name;
|
||||
}
|
||||
|
||||
bool bulkStore(const std::vector< NodeObject::pointer >& objects)
|
||||
bool bulkStore (const std::vector< NodeObject::pointer >& objects)
|
||||
{
|
||||
ScopedLock sl(mDb->getDBLock());
|
||||
static SqliteStatement pStB(mDb->getDB()->getSqliteDB(), "BEGIN TRANSACTION;");
|
||||
static SqliteStatement pStE(mDb->getDB()->getSqliteDB(), "END TRANSACTION;");
|
||||
static SqliteStatement pSt(mDb->getDB()->getSqliteDB(),
|
||||
ScopedLock sl(m_db->getDBLock());
|
||||
static SqliteStatement pStB(m_db->getDB()->getSqliteDB(), "BEGIN TRANSACTION;");
|
||||
static SqliteStatement pStE(m_db->getDB()->getSqliteDB(), "END TRANSACTION;");
|
||||
static SqliteStatement pSt(m_db->getDB()->getSqliteDB(),
|
||||
"INSERT OR IGNORE INTO CommittedObjects "
|
||||
"(Hash,ObjType,LedgerIndex,Object) VALUES (?, ?, ?, ?);");
|
||||
|
||||
@@ -55,8 +88,8 @@ public:
|
||||
NodeObject::pointer ret;
|
||||
|
||||
{
|
||||
ScopedLock sl(mDb->getDBLock());
|
||||
static SqliteStatement pSt(mDb->getDB()->getSqliteDB(),
|
||||
ScopedLock sl(m_db->getDBLock());
|
||||
static SqliteStatement pSt(m_db->getDB()->getSqliteDB(),
|
||||
"SELECT ObjType,LedgerIndex,Object FROM CommittedObjects WHERE Hash = ?;");
|
||||
|
||||
pSt.bind(1, hash.GetHex());
|
||||
@@ -74,7 +107,7 @@ public:
|
||||
{
|
||||
uint256 hash;
|
||||
|
||||
static SqliteStatement pSt(mDb->getDB()->getSqliteDB(),
|
||||
static SqliteStatement pSt(m_db->getDB()->getSqliteDB(),
|
||||
"SELECT ObjType,LedgerIndex,Object,Hash FROM CommittedObjects;");
|
||||
|
||||
while (pSt.isRow(pSt.step()))
|
||||
@@ -121,8 +154,9 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
std::string mName;
|
||||
DatabaseCon* mDb;
|
||||
size_t const m_keyBytes;
|
||||
std::string const m_name;
|
||||
ScopedPointer <DatabaseCon> m_db;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -147,7 +181,7 @@ String SqliteBackendFactory::getName () const
|
||||
return "Sqlite";
|
||||
}
|
||||
|
||||
NodeStore::Backend* SqliteBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* SqliteBackendFactory::createInstance (size_t keyBytes, StringPairArray const& keyValues)
|
||||
{
|
||||
return new Backend (keyValues ["path"].toStdString ());
|
||||
return new Backend (keyBytes, keyValues ["path"].toStdString ());
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ public:
|
||||
static SqliteBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
NodeStore::Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -102,9 +102,9 @@ namespace ripple
|
||||
|
||||
#include "node/ripple_NodeObject.h"
|
||||
#include "node/ripple_NodeStore.h"
|
||||
#include "node/ripple_LevelDBBackendFactory.h"
|
||||
#include "node/ripple_HyperLevelDBBackendFactory.h"
|
||||
#include "node/ripple_KeyvaDBBackendFactory.h"
|
||||
#include "node/ripple_LevelDBBackendFactory.h"
|
||||
#include "node/ripple_MdbBackendFactory.h"
|
||||
#include "node/ripple_NullBackendFactory.h"
|
||||
#include "node/ripple_SqliteBackendFactory.h"
|
||||
@@ -245,14 +245,14 @@ static const uint64 tenTo17m1 = tenTo17 - 1;
|
||||
#include "basics/ripple_RPCServerHandler.cpp"
|
||||
#include "node/ripple_NodeObject.cpp"
|
||||
#include "node/ripple_NodeStore.cpp"
|
||||
#include "node/ripple_LevelDBBackendFactory.cpp"
|
||||
#include "node/ripple_HyperLevelDBBackendFactory.cpp"
|
||||
#include "node/ripple_MdbBackendFactory.cpp"
|
||||
#include "node/ripple_NullBackendFactory.cpp"
|
||||
#include "node/ripple_SqliteBackendFactory.cpp"
|
||||
#include "node/ripple_KeyvaDB.h" // private
|
||||
#include "node/ripple_KeyvaDB.cpp"
|
||||
#include "node/ripple_KeyvaDBBackendFactory.cpp"
|
||||
#include "node/ripple_LevelDBBackendFactory.cpp"
|
||||
#include "node/ripple_NullBackendFactory.cpp"
|
||||
#include "node/ripple_MdbBackendFactory.cpp"
|
||||
#include "node/ripple_SqliteBackendFactory.cpp"
|
||||
|
||||
#include "ledger/Ledger.cpp"
|
||||
#include "src/cpp/ripple/ripple_SHAMapDelta.cpp"
|
||||
|
||||
@@ -115,7 +115,22 @@ public:
|
||||
}
|
||||
|
||||
bool del (const key_type& key, bool valid);
|
||||
|
||||
/** Replace aliased objects with originals.
|
||||
|
||||
Due to concurrency it is possible for two separate objects with
|
||||
the same content and referring to the same unique "thing" to exist.
|
||||
This routine eliminates the duplicate and performs a replacement
|
||||
on the callers shared pointer if needed.
|
||||
|
||||
@param key The key corresponding to the object
|
||||
@param data A shared pointer to the data corresponding to the object.
|
||||
@param replace `true` if `data` is the up to date version of the object.
|
||||
|
||||
@return `true` if the operation was successful.
|
||||
*/
|
||||
bool canonicalize (const key_type& key, boost::shared_ptr<c_Data>& data, bool replace = false);
|
||||
|
||||
bool store (const key_type& key, const c_Data& data);
|
||||
boost::shared_ptr<c_Data> fetch (const key_type& key);
|
||||
bool retrieve (const key_type& key, c_Data& data);
|
||||
|
||||
@@ -44,6 +44,9 @@ void NetworkOPs::processNetTimer ()
|
||||
{
|
||||
ScopedLock sl (getApp().getMasterLock ());
|
||||
|
||||
Application& app (getApp ());
|
||||
ILoadManager& mgr (app.getLoadManager ());
|
||||
|
||||
getApp().getLoadManager ().resetDeadlockDetector ();
|
||||
|
||||
std::size_t const numPeers = getApp().getPeers ().getPeerVector ().size ();
|
||||
|
||||
@@ -46,10 +46,11 @@ public:
|
||||
, mNetOps (&mLedgerMaster)
|
||||
, m_rpcServerHandler (mNetOps)
|
||||
, mTempNodeCache ("NodeCache", 16384, 90)
|
||||
, m_nodeStore (
|
||||
, m_nodeStore (NodeStore::New (
|
||||
theConfig.NODE_DB,
|
||||
theConfig.FASTNODE_DB,
|
||||
16384, 300)
|
||||
16384,
|
||||
300))
|
||||
, mSLECache ("LedgerEntryCache", 4096, 120)
|
||||
, mSNTPClient (mAuxService)
|
||||
, mJobQueue (mIOService)
|
||||
@@ -70,11 +71,6 @@ public:
|
||||
, mTxnDB (NULL)
|
||||
, mLedgerDB (NULL)
|
||||
, mWalletDB (NULL) // VFALCO NOTE are all these 'NULL' ctor params necessary?
|
||||
, mNetNodeDB (NULL)
|
||||
, mPathFindDB (NULL)
|
||||
, mHashNodeDB (NULL)
|
||||
, mHashNodeLDB (NULL)
|
||||
, mEphemeralLDB (NULL)
|
||||
, mPeerDoor (NULL)
|
||||
, mRPCDoor (NULL)
|
||||
, mWSPublicDoor (NULL)
|
||||
@@ -92,13 +88,6 @@ public:
|
||||
delete mTxnDB;
|
||||
delete mLedgerDB;
|
||||
delete mWalletDB;
|
||||
delete mHashNodeDB;
|
||||
delete mNetNodeDB;
|
||||
delete mPathFindDB;
|
||||
delete mHashNodeLDB;
|
||||
|
||||
if (mEphemeralLDB != nullptr)
|
||||
delete mEphemeralLDB;
|
||||
}
|
||||
|
||||
LocalCredentials& getLocalCredentials ()
|
||||
@@ -138,7 +127,7 @@ public:
|
||||
|
||||
NodeStore& getNodeStore ()
|
||||
{
|
||||
return m_nodeStore;
|
||||
return *m_nodeStore;
|
||||
}
|
||||
|
||||
JobQueue& getJobQueue ()
|
||||
@@ -247,27 +236,6 @@ public:
|
||||
{
|
||||
return mWalletDB;
|
||||
}
|
||||
DatabaseCon* getNetNodeDB ()
|
||||
{
|
||||
return mNetNodeDB;
|
||||
}
|
||||
DatabaseCon* getPathFindDB ()
|
||||
{
|
||||
return mPathFindDB;
|
||||
}
|
||||
DatabaseCon* getHashNodeDB ()
|
||||
{
|
||||
return mHashNodeDB;
|
||||
}
|
||||
|
||||
leveldb::DB* getHashNodeLDB ()
|
||||
{
|
||||
return mHashNodeLDB;
|
||||
}
|
||||
leveldb::DB* getEphemeralLDB ()
|
||||
{
|
||||
return mEphemeralLDB;
|
||||
}
|
||||
|
||||
bool isShutdown ()
|
||||
{
|
||||
@@ -302,7 +270,7 @@ private:
|
||||
NetworkOPs mNetOps;
|
||||
RPCServerHandler m_rpcServerHandler;
|
||||
NodeCache mTempNodeCache;
|
||||
NodeStore m_nodeStore;
|
||||
ScopedPointer <NodeStore> m_nodeStore;
|
||||
SLECache mSLECache;
|
||||
SNTPClient mSNTPClient;
|
||||
JobQueue mJobQueue;
|
||||
@@ -326,13 +294,6 @@ private:
|
||||
DatabaseCon* mTxnDB;
|
||||
DatabaseCon* mLedgerDB;
|
||||
DatabaseCon* mWalletDB;
|
||||
DatabaseCon* mNetNodeDB;
|
||||
DatabaseCon* mPathFindDB;
|
||||
DatabaseCon* mHashNodeDB;
|
||||
|
||||
// VFALCO TODO Wrap this in an interface
|
||||
leveldb::DB* mHashNodeLDB;
|
||||
leveldb::DB* mEphemeralLDB;
|
||||
|
||||
ScopedPointer <PeerDoor> mPeerDoor;
|
||||
ScopedPointer <RPCDoor> mRPCDoor;
|
||||
@@ -353,19 +314,11 @@ void ApplicationImp::stop ()
|
||||
StopSustain ();
|
||||
mShutdown = true;
|
||||
mIOService.stop ();
|
||||
// VFALCO TODO We shouldn't have to explicitly call this function.
|
||||
// The NodeStore destructor should take care of it.
|
||||
m_nodeStore.waitWrite ();
|
||||
m_nodeStore = nullptr;
|
||||
mValidations->flush ();
|
||||
mAuxService.stop ();
|
||||
mJobQueue.shutdown ();
|
||||
|
||||
delete mHashNodeLDB;
|
||||
mHashNodeLDB = NULL;
|
||||
|
||||
delete mEphemeralLDB;
|
||||
mEphemeralLDB = NULL;
|
||||
|
||||
WriteLog (lsINFO, Application) << "Stopped: " << mIOService.stopped ();
|
||||
mShutdown = false;
|
||||
}
|
||||
@@ -445,16 +398,11 @@ void ApplicationImp::setup ()
|
||||
boost::thread t1 (BIND_TYPE (&InitDB, &mRpcDB, "rpc.db", RpcDBInit, RpcDBCount));
|
||||
boost::thread t2 (BIND_TYPE (&InitDB, &mTxnDB, "transaction.db", TxnDBInit, TxnDBCount));
|
||||
boost::thread t3 (BIND_TYPE (&InitDB, &mLedgerDB, "ledger.db", LedgerDBInit, LedgerDBCount));
|
||||
boost::thread t4 (BIND_TYPE (&InitDB, &mWalletDB, "wallet.db", WalletDBInit, WalletDBCount));
|
||||
t1.join ();
|
||||
t2.join ();
|
||||
t3.join ();
|
||||
|
||||
boost::thread t4 (BIND_TYPE (&InitDB, &mWalletDB, "wallet.db", WalletDBInit, WalletDBCount));
|
||||
boost::thread t6 (BIND_TYPE (&InitDB, &mNetNodeDB, "netnode.db", NetNodeDBInit, NetNodeDBCount));
|
||||
boost::thread t7 (BIND_TYPE (&InitDB, &mPathFindDB, "pathfind.db", PathFindDBInit, PathFindDBCount));
|
||||
t4.join ();
|
||||
t6.join ();
|
||||
t7.join ();
|
||||
|
||||
leveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
@@ -515,7 +463,7 @@ void ApplicationImp::setup ()
|
||||
getUNL ().nodeBootstrap ();
|
||||
|
||||
mValidations->tune (theConfig.getSize (siValidationsSize), theConfig.getSize (siValidationsAge));
|
||||
m_nodeStore.tune (theConfig.getSize (siNodeCacheSize), theConfig.getSize (siNodeCacheAge));
|
||||
m_nodeStore->tune (theConfig.getSize (siNodeCacheSize), theConfig.getSize (siNodeCacheAge));
|
||||
mLedgerMaster.tune (theConfig.getSize (siLedgerSize), theConfig.getSize (siLedgerAge));
|
||||
mSLECache.setTargetSize (theConfig.getSize (siSLECacheSize));
|
||||
mSLECache.setTargetAge (theConfig.getSize (siSLECacheAge));
|
||||
@@ -697,7 +645,7 @@ void ApplicationImp::doSweep(Job& j)
|
||||
//
|
||||
|
||||
mMasterTransaction.sweep ();
|
||||
m_nodeStore.sweep ();
|
||||
m_nodeStore->sweep ();
|
||||
mLedgerMaster.sweep ();
|
||||
mTempNodeCache.sweep ();
|
||||
mValidations->sweep ();
|
||||
|
||||
@@ -89,15 +89,9 @@ public:
|
||||
It looks like this is used to store the unique node list.
|
||||
*/
|
||||
// VFALCO TODO Rename, document this
|
||||
// NOTE This will be replaced by class Validators
|
||||
//
|
||||
virtual DatabaseCon* getWalletDB () = 0;
|
||||
// VFALCO NOTE It looks like this isn't used...
|
||||
//virtual DatabaseCon* getNetNodeDB () = 0;
|
||||
// VFALCO NOTE It looks like this isn't used...
|
||||
//virtual DatabaseCon* getPathFindDB () = 0;
|
||||
virtual DatabaseCon* getHashNodeDB () = 0;
|
||||
|
||||
virtual leveldb::DB* getHashNodeLDB () = 0;
|
||||
virtual leveldb::DB* getEphemeralLDB () = 0;
|
||||
|
||||
virtual bool getSystemTimeOffset (int& offset) = 0;
|
||||
virtual bool isShutdown () = 0;
|
||||
|
||||
@@ -257,15 +257,16 @@ int rippleMain (int argc, char** argv)
|
||||
p.add ("parameters", -1);
|
||||
|
||||
// These must be added before the Application object is created
|
||||
NodeStore::addBackendFactory (SqliteBackendFactory::getInstance ());
|
||||
NodeStore::addBackendFactory (LevelDBBackendFactory::getInstance ());
|
||||
NodeStore::addBackendFactory (KeyvaDBBackendFactory::getInstance ());
|
||||
#if RIPPLE_HYPERLEVELDB_AVAILABLE
|
||||
NodeStore::addBackendFactory (HyperLevelDBBackendFactory::getInstance ());
|
||||
#endif
|
||||
NodeStore::addBackendFactory (KeyvaDBBackendFactory::getInstance ());
|
||||
NodeStore::addBackendFactory (LevelDBBackendFactory::getInstance ());
|
||||
#if RIPPLE_MDB_AVAILABLE
|
||||
NodeStore::addBackendFactory (MdbBackendFactory::getInstance ());
|
||||
#endif
|
||||
NodeStore::addBackendFactory (NullBackendFactory::getInstance ());
|
||||
NodeStore::addBackendFactory (SqliteBackendFactory::getInstance ());
|
||||
|
||||
if (! RandomNumbers::getInstance ().initialize ())
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user