mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-28 15:05:53 +00:00
Refactor NodeStore
This commit is contained in:
@@ -120,4 +120,19 @@
|
||||
//#define BEAST_BIND_USES_TR1 1
|
||||
//#define BEAST_BIND_USES_BOOST 1
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
//
|
||||
// Ripple compilation settings
|
||||
//
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/** Config: RIPPLE_VERIFY_NODEOBJECT_KEYS
|
||||
|
||||
This verifies that the hash of node objects matches the payload.
|
||||
It is quite expensive so normally this is turned off!
|
||||
*/
|
||||
#ifndef RIPPLE_VERIFY_NODEOBJECT_KEYS
|
||||
//#define RIPPLE_VERIFY_NODEOBJECT_KEYS 1
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
9
TODO.txt
9
TODO.txt
@@ -3,16 +3,19 @@ RIPPLE TODO
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Vinnie's Short List (Changes day to day)
|
||||
- Finish writing the NodeStore unit tests
|
||||
- Finish converting backends to new API
|
||||
- Memory NodeStore::Backend for unit tests
|
||||
- Performance test for NodeStore::Backend
|
||||
- Improved Mutex to track deadlocks
|
||||
- Convert some Ripple boost unit tests to Beast.
|
||||
- Eliminate new technical in NodeStore::Backend
|
||||
- Work on KeyvaDB
|
||||
- Import beast::db and use it in SQliteBackend
|
||||
- Finish unit tests and code for Validators
|
||||
- Convert some Ripple boost unit tests to Beast.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
- Replace master lock with
|
||||
- Replace all throw with beast::Throw
|
||||
|
||||
- Replace base_uint and uintXXX with UnsignedInteger
|
||||
* Need to specialize UnsignedInteger to work efficiently with 4 and 8 byte
|
||||
|
||||
@@ -530,10 +530,12 @@ void Ledger::saveAcceptedLedger (Job&, bool fromConsensus)
|
||||
assert (getTransHash () == mTransactionMap->getHash ());
|
||||
|
||||
// Save the ledger header in the hashed object store
|
||||
{
|
||||
Serializer s (128);
|
||||
s.add32 (HashPrefix::ledgerMaster);
|
||||
addRaw (s);
|
||||
getApp().getNodeStore ().store (hotLEDGER, mLedgerSeq, s.peekData (), mHash);
|
||||
getApp().getNodeStore ().store (hotLEDGER, mLedgerSeq, s.modData (), mHash);
|
||||
}
|
||||
|
||||
AcceptedLedger::pointer aLedger = AcceptedLedger::makeAcceptedLedger (shared_from_this ());
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ bool InboundLedger::tryLocal ()
|
||||
if (!mHaveBase)
|
||||
{
|
||||
// Nothing we can do without the ledger base
|
||||
NodeObject::pointer node = getApp().getNodeStore ().retrieve (mHash);
|
||||
NodeObject::pointer node = getApp().getNodeStore ().fetch (mHash);
|
||||
|
||||
if (!node)
|
||||
{
|
||||
@@ -672,7 +672,7 @@ bool InboundLedger::takeBase (const std::string& data) // data must not have has
|
||||
Serializer s (data.size () + 4);
|
||||
s.add32 (HashPrefix::ledgerMaster);
|
||||
s.addRaw (data);
|
||||
getApp().getNodeStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.peekData (), mHash);
|
||||
getApp().getNodeStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.modData (), mHash);
|
||||
|
||||
progress ();
|
||||
|
||||
|
||||
@@ -6,24 +6,36 @@
|
||||
|
||||
#if RIPPLE_HYPERLEVELDB_AVAILABLE
|
||||
|
||||
class HyperLevelDBBackendFactory::Backend : public NodeStore::Backend
|
||||
class HyperLevelDBBackendFactory::Backend
|
||||
: public NodeStore::Backend
|
||||
, public NodeStore::BatchWriter::Callback
|
||||
, LeakChecked <HyperLevelDBBackendFactory::Backend>
|
||||
{
|
||||
public:
|
||||
Backend (size_t keyBytes, StringPairArray const& keyValues)
|
||||
typedef RecycledObjectPool <std::string> StringPool;
|
||||
|
||||
Backend (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, mName(keyValues ["path"].toStdString ())
|
||||
, mDB(NULL)
|
||||
, m_scheduler (scheduler)
|
||||
, m_batch (*this, scheduler)
|
||||
, m_name (keyValues ["path"].toStdString ())
|
||||
{
|
||||
if (mName.empty())
|
||||
throw std::runtime_error ("Missing path in LevelDB backend");
|
||||
if (m_name.empty ())
|
||||
Throw (std::runtime_error ("Missing path in LevelDB backend"));
|
||||
|
||||
hyperleveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
|
||||
if (keyValues ["cache_mb"].isEmpty ())
|
||||
{
|
||||
options.block_cache = hyperleveldb::NewLRUCache (theConfig.getSize (siHashNodeDBCache) * 1024 * 1024);
|
||||
}
|
||||
else
|
||||
{
|
||||
options.block_cache = hyperleveldb::NewLRUCache (keyValues["cache_mb"].getIntValue() * 1024L * 1024L);
|
||||
}
|
||||
|
||||
if (keyValues ["filter_bits"].isEmpty())
|
||||
{
|
||||
@@ -31,95 +43,171 @@ public:
|
||||
options.filter_policy = hyperleveldb::NewBloomFilterPolicy (10);
|
||||
}
|
||||
else if (keyValues ["filter_bits"].getIntValue() != 0)
|
||||
{
|
||||
options.filter_policy = hyperleveldb::NewBloomFilterPolicy (keyValues ["filter_bits"].getIntValue ());
|
||||
}
|
||||
|
||||
if (! keyValues["open_files"].isEmpty ())
|
||||
{
|
||||
options.max_open_files = keyValues ["open_files"].getIntValue();
|
||||
}
|
||||
|
||||
hyperleveldb::Status status = hyperleveldb::DB::Open (options, mName, &mDB);
|
||||
if (!status.ok () || !mDB)
|
||||
throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
|
||||
hyperleveldb::DB* db = nullptr;
|
||||
hyperleveldb::Status status = hyperleveldb::DB::Open (options, m_name, &db);
|
||||
if (!status.ok () || !db)
|
||||
Throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
|
||||
|
||||
m_db = db;
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
delete mDB;
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
std::string getName()
|
||||
{
|
||||
return mName;
|
||||
return m_name;
|
||||
}
|
||||
|
||||
bool bulkStore (const std::vector< NodeObject::pointer >& objs)
|
||||
{
|
||||
hyperleveldb::WriteBatch batch;
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
BOOST_FOREACH (NodeObject::ref obj, objs)
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
Blob blob (toBlob (obj));
|
||||
batch.Put (
|
||||
hyperleveldb::Slice (reinterpret_cast<char const*>(obj->getHash ().begin ()), m_keyBytes),
|
||||
hyperleveldb::Slice (reinterpret_cast<char const*>(&blob.front ()), blob.size ()));
|
||||
pObject->reset ();
|
||||
|
||||
Status status (ok);
|
||||
|
||||
hyperleveldb::ReadOptions const options;
|
||||
hyperleveldb::Slice const slice (static_cast <char const*> (key), m_keyBytes);
|
||||
|
||||
{
|
||||
// These are reused std::string objects,
|
||||
// required for leveldb's funky interface.
|
||||
//
|
||||
StringPool::ScopedItem item (m_stringPool);
|
||||
std::string& string = item.getObject ();
|
||||
|
||||
hyperleveldb::Status getStatus = m_db->Get (options, slice, &string);
|
||||
|
||||
if (getStatus.ok ())
|
||||
{
|
||||
NodeStore::DecodedBlob decoded (key, string.data (), string.size ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
*pObject = decoded.createObject ();
|
||||
}
|
||||
else
|
||||
{
|
||||
// Decoding failed, probably corrupted!
|
||||
//
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (getStatus.IsCorruption ())
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
else if (getStatus.IsNotFound ())
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
}
|
||||
}
|
||||
return mDB->Write (hyperleveldb::WriteOptions (), &batch).ok ();
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash)
|
||||
{
|
||||
std::string sData;
|
||||
if (!mDB->Get (hyperleveldb::ReadOptions (),
|
||||
hyperleveldb::Slice (reinterpret_cast<char const*>(hash.begin ()), m_keyBytes), &sData).ok ())
|
||||
{
|
||||
return NodeObject::pointer();
|
||||
}
|
||||
return fromBinary(hash, &sData[0], sData.size ());
|
||||
return status;
|
||||
}
|
||||
|
||||
void visitAll (FUNCTION_TYPE<void (NodeObject::pointer)> func)
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
hyperleveldb::Iterator* it = mDB->NewIterator (hyperleveldb::ReadOptions ());
|
||||
m_batch.store (object);
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
hyperleveldb::WriteBatch wb;
|
||||
|
||||
{
|
||||
NodeStore::EncodedBlob::Pool::ScopedItem item (m_blobPool);
|
||||
|
||||
BOOST_FOREACH (NodeObject::ref object, batch)
|
||||
{
|
||||
item.getObject ().prepare (object);
|
||||
|
||||
wb.Put (
|
||||
hyperleveldb::Slice (reinterpret_cast <char const*> (
|
||||
item.getObject ().getKey ()), m_keyBytes),
|
||||
hyperleveldb::Slice (reinterpret_cast <char const*> (
|
||||
item.getObject ().getData ()), item.getObject ().getSize ()));
|
||||
}
|
||||
}
|
||||
|
||||
hyperleveldb::WriteOptions const options;
|
||||
|
||||
m_db->Write (options, &wb).ok ();
|
||||
}
|
||||
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
hyperleveldb::ReadOptions const options;
|
||||
|
||||
ScopedPointer <hyperleveldb::Iterator> it (m_db->NewIterator (options));
|
||||
|
||||
for (it->SeekToFirst (); it->Valid (); it->Next ())
|
||||
{
|
||||
if (it->key ().size () == m_keyBytes)
|
||||
{
|
||||
uint256 hash;
|
||||
memcpy(hash.begin(), it->key ().data(), m_keyBytes);
|
||||
func (fromBinary (hash, it->value ().data (), it->value ().size ()));
|
||||
}
|
||||
}
|
||||
}
|
||||
NodeStore::DecodedBlob decoded (it->key ().data (),
|
||||
it->value ().data (),
|
||||
it->value ().size ());
|
||||
|
||||
Blob toBlob(NodeObject::ref obj)
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
Blob rawData (9 + obj->getData ().size ());
|
||||
unsigned char* bufPtr = &rawData.front();
|
||||
NodeObject::Ptr object (decoded.createObject ());
|
||||
|
||||
*reinterpret_cast<uint32*> (bufPtr + 0) = ntohl (obj->getIndex ());
|
||||
*reinterpret_cast<uint32*> (bufPtr + 4) = ntohl (obj->getIndex ());
|
||||
* (bufPtr + 8) = static_cast<unsigned char> (obj->getType ());
|
||||
memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ());
|
||||
|
||||
return rawData;
|
||||
callback.visitObject (object);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Uh oh, corrupted data!
|
||||
WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256 (it->key ().data ());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// VFALCO NOTE What does it mean to find an
|
||||
// incorrectly sized key? Corruption?
|
||||
WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size ();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NodeObject::pointer fromBinary(uint256 const& hash,
|
||||
char const* data, int size)
|
||||
int getWriteLoad ()
|
||||
{
|
||||
if (size < 9)
|
||||
throw std::runtime_error ("undersized object");
|
||||
return m_batch.getWriteLoad ();
|
||||
}
|
||||
|
||||
uint32 index = htonl (*reinterpret_cast<const uint32*> (data));
|
||||
int htype = data[8];
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
return boost::make_shared<NodeObject> (static_cast<NodeObjectType> (htype), index,
|
||||
data + 9, size - 9, hash);
|
||||
void writeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
private:
|
||||
size_t const m_keyBytes;
|
||||
std::string mName;
|
||||
hyperleveldb::DB* mDB;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
NodeStore::BatchWriter m_batch;
|
||||
StringPool m_stringPool;
|
||||
NodeStore::EncodedBlob::Pool m_blobPool;
|
||||
std::string m_name;
|
||||
ScopedPointer <hyperleveldb::DB> m_db;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -144,9 +232,12 @@ String HyperLevelDBBackendFactory::getName () const
|
||||
return "HyperLevelDB";
|
||||
}
|
||||
|
||||
NodeStore::Backend* HyperLevelDBBackendFactory::createInstance (size_t keyBytes, StringPairArray const& keyValues)
|
||||
NodeStore::Backend* HyperLevelDBBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new HyperLevelDBBackendFactory::Backend (keyBytes, keyValues);
|
||||
return new HyperLevelDBBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -23,7 +23,10 @@ public:
|
||||
static HyperLevelDBBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -536,7 +536,7 @@ public:
|
||||
maxPayloadBytes = 8 * 1024
|
||||
};
|
||||
|
||||
KeyvaDBTests () : UnitTest ("KeyvaDB")
|
||||
KeyvaDBTests () : UnitTest ("KeyvaDB", "ripple")
|
||||
{
|
||||
}
|
||||
|
||||
@@ -572,10 +572,14 @@ public:
|
||||
s << "keyBytes=" << String (KeyBytes) << ", maxItems=" << String (maxItems);
|
||||
beginTest (s);
|
||||
|
||||
// Set up the key and value files and open the db.
|
||||
File const keyPath = File::createTempFile ("").withFileExtension (".key");
|
||||
File const valPath = File::createTempFile ("").withFileExtension (".val");
|
||||
ScopedPointer <KeyvaDB> db (KeyvaDB::New (KeyBytes, keyPath, valPath, true));
|
||||
// Set up the key and value files
|
||||
File const tempFile (File::createTempFile (""));
|
||||
File const keyPath = tempFile.withFileExtension (".key");
|
||||
File const valPath = tempFile.withFileExtension (".val");
|
||||
|
||||
{
|
||||
// open the db
|
||||
ScopedPointer <KeyvaDB> db (KeyvaDB::New (KeyBytes, keyPath, valPath, false));
|
||||
|
||||
Payload payload (maxPayloadBytes);
|
||||
Payload check (maxPayloadBytes);
|
||||
@@ -628,10 +632,36 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// Re-open the database and confirm the data
|
||||
ScopedPointer <KeyvaDB> db (KeyvaDB::New (KeyBytes, keyPath, valPath, false));
|
||||
|
||||
Payload payload (maxPayloadBytes);
|
||||
Payload check (maxPayloadBytes);
|
||||
|
||||
PayloadGetCallback cb;
|
||||
for (unsigned int keyIndex = 0; keyIndex < maxItems; ++keyIndex)
|
||||
{
|
||||
KeyType const v = KeyType::createFromInteger (keyIndex);
|
||||
|
||||
bool const found = db->get (v.cbegin (), &cb);
|
||||
|
||||
expect (found, "Should be found");
|
||||
|
||||
payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue);
|
||||
|
||||
expect (payload == cb.payload, "Should be equal");
|
||||
}
|
||||
}
|
||||
|
||||
keyPath.deleteFile ();
|
||||
valPath.deleteFile ();
|
||||
}
|
||||
|
||||
void runTest ()
|
||||
{
|
||||
testKeySize <4> (512);
|
||||
testKeySize <32> (4096);
|
||||
testKeySize <4> (500);
|
||||
testKeySize <32> (4000);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -6,9 +6,16 @@
|
||||
|
||||
class KeyvaDBBackendFactory::Backend : public NodeStore::Backend
|
||||
{
|
||||
private:
|
||||
typedef RecycledObjectPool <MemoryBlock> MemoryPool;
|
||||
typedef RecycledObjectPool <NodeStore::EncodedBlob> EncodedBlobPool;
|
||||
|
||||
public:
|
||||
Backend (size_t keyBytes, StringPairArray const& keyValues)
|
||||
Backend (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_path (keyValues ["path"])
|
||||
, m_db (KeyvaDB::New (
|
||||
keyBytes,
|
||||
@@ -22,34 +29,53 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
std::string getDataBaseName ()
|
||||
std::string getName ()
|
||||
{
|
||||
return m_path.toStdString ();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status get (void const* key, GetCallback* callback)
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
pObject->reset ();
|
||||
|
||||
Status status (ok);
|
||||
|
||||
struct ForwardingGetCallback : KeyvaDB::GetCallback
|
||||
struct Callback : KeyvaDB::GetCallback
|
||||
{
|
||||
ForwardingGetCallback (Backend::GetCallback* callback)
|
||||
: m_callback (callback)
|
||||
explicit Callback (MemoryBlock& block)
|
||||
: m_block (block)
|
||||
{
|
||||
}
|
||||
|
||||
void* getStorageForValue (int valueBytes)
|
||||
{
|
||||
return m_callback->getStorageForValue (valueBytes);
|
||||
m_size = valueBytes;
|
||||
m_block.ensureSize (valueBytes);
|
||||
|
||||
return m_block.getData ();
|
||||
}
|
||||
|
||||
void const* getData () const noexcept
|
||||
{
|
||||
return m_block.getData ();
|
||||
}
|
||||
|
||||
size_t getSize () const noexcept
|
||||
{
|
||||
return m_size;
|
||||
}
|
||||
|
||||
private:
|
||||
Backend::GetCallback* const m_callback;
|
||||
MemoryBlock& m_block;
|
||||
size_t m_size;
|
||||
};
|
||||
|
||||
ForwardingGetCallback cb (callback);
|
||||
MemoryPool::ScopedItem item (m_memoryPool);
|
||||
MemoryBlock& block (item.getObject ());
|
||||
|
||||
Callback cb (block);
|
||||
|
||||
// VFALCO TODO Can't we get KeyvaDB to provide a proper status?
|
||||
//
|
||||
@@ -57,9 +83,20 @@ public:
|
||||
|
||||
if (found)
|
||||
{
|
||||
NodeStore::DecodedBlob decoded (key, cb.getData (), cb.getSize ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
*pObject = decoded.createObject ();
|
||||
|
||||
status = ok;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
@@ -67,90 +104,45 @@ public:
|
||||
return status;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void writeObject (NodeObject::ref object)
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
Blob blob (toBlob (object));
|
||||
m_db->put (object->getHash ().begin (), &blob [0], blob.size ());
|
||||
EncodedBlobPool::ScopedItem item (m_blobPool);
|
||||
NodeStore::EncodedBlob& encoded (item.getObject ());
|
||||
|
||||
encoded.prepare (object);
|
||||
|
||||
m_db->put (encoded.getKey (), encoded.getData (), encoded.getSize ());
|
||||
}
|
||||
|
||||
bool bulkStore (std::vector <NodeObject::pointer> const& objs)
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
for (size_t i = 0; i < objs.size (); ++i)
|
||||
{
|
||||
writeObject (objs [i]);
|
||||
for (int i = 0; i < batch.size (); ++i)
|
||||
store (batch [i]);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct MyGetCallback : KeyvaDB::GetCallback
|
||||
{
|
||||
int valueBytes;
|
||||
HeapBlock <char> data;
|
||||
|
||||
void* getStorageForValue (int valueBytes_)
|
||||
{
|
||||
valueBytes = valueBytes_;
|
||||
|
||||
data.malloc (valueBytes);
|
||||
|
||||
return data.getData ();
|
||||
}
|
||||
};
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash)
|
||||
{
|
||||
NodeObject::pointer result;
|
||||
|
||||
MyGetCallback cb;
|
||||
|
||||
bool const found = m_db->get (hash.begin (), &cb);
|
||||
|
||||
if (found)
|
||||
{
|
||||
result = fromBinary (hash, cb.data.getData (), cb.valueBytes);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void visitAll (FUNCTION_TYPE<void (NodeObject::pointer)> func)
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
// VFALCO TODO Implement this!
|
||||
//
|
||||
bassertfalse;
|
||||
//m_db->visitAll ();
|
||||
}
|
||||
|
||||
Blob toBlob (NodeObject::ref obj)
|
||||
int getWriteLoad ()
|
||||
{
|
||||
Blob rawData (9 + obj->getData ().size ());
|
||||
unsigned char* bufPtr = &rawData.front();
|
||||
|
||||
*reinterpret_cast<uint32*> (bufPtr + 0) = ntohl (obj->getIndex ());
|
||||
*reinterpret_cast<uint32*> (bufPtr + 4) = ntohl (obj->getIndex ());
|
||||
* (bufPtr + 8) = static_cast<unsigned char> (obj->getType ());
|
||||
memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ());
|
||||
|
||||
return rawData;
|
||||
// we dont do pending writes
|
||||
return 0;
|
||||
}
|
||||
|
||||
NodeObject::pointer fromBinary (uint256 const& hash, char const* data, int size)
|
||||
{
|
||||
if (size < 9)
|
||||
throw std::runtime_error ("undersized object");
|
||||
|
||||
uint32 index = htonl (*reinterpret_cast <const uint32*> (data));
|
||||
|
||||
int htype = data[8];
|
||||
|
||||
return boost::make_shared <NodeObject> (static_cast<NodeObjectType> (htype), index,
|
||||
data + 9, size - 9, hash);
|
||||
}
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
private:
|
||||
size_t const m_keyBytes;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
String m_path;
|
||||
ScopedPointer <KeyvaDB> m_db;
|
||||
MemoryPool m_memoryPool;
|
||||
EncodedBlobPool m_blobPool;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -175,9 +167,12 @@ String KeyvaDBBackendFactory::getName () const
|
||||
return "KeyvaDB";
|
||||
}
|
||||
|
||||
NodeStore::Backend* KeyvaDBBackendFactory::createInstance (size_t keyBytes, StringPairArray const& keyValues)
|
||||
NodeStore::Backend* KeyvaDBBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new KeyvaDBBackendFactory::Backend (keyBytes, keyValues);
|
||||
return new KeyvaDBBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -21,7 +21,10 @@ public:
|
||||
static KeyvaDBBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -4,24 +4,38 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
class LevelDBBackendFactory::Backend : public NodeStore::Backend
|
||||
class LevelDBBackendFactory::Backend
|
||||
: public NodeStore::Backend
|
||||
, public NodeStore::BatchWriter::Callback
|
||||
, LeakChecked <LevelDBBackendFactory::Backend>
|
||||
{
|
||||
public:
|
||||
Backend (int keyBytes, StringPairArray const& keyValues)
|
||||
typedef RecycledObjectPool <std::string> StringPool;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Backend (int keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_batch (*this, scheduler)
|
||||
, m_name (keyValues ["path"].toStdString ())
|
||||
, m_db(NULL)
|
||||
{
|
||||
if (m_name.empty())
|
||||
throw std::runtime_error ("Missing path in LevelDB backend");
|
||||
Throw (std::runtime_error ("Missing path in LevelDB backend"));
|
||||
|
||||
leveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
|
||||
if (keyValues["cache_mb"].isEmpty())
|
||||
{
|
||||
options.block_cache = leveldb::NewLRUCache (theConfig.getSize (siHashNodeDBCache) * 1024 * 1024);
|
||||
}
|
||||
else
|
||||
{
|
||||
options.block_cache = leveldb::NewLRUCache (keyValues["cache_mb"].getIntValue() * 1024L * 1024L);
|
||||
}
|
||||
|
||||
if (keyValues["filter_bits"].isEmpty())
|
||||
{
|
||||
@@ -29,39 +43,38 @@ public:
|
||||
options.filter_policy = leveldb::NewBloomFilterPolicy (10);
|
||||
}
|
||||
else if (keyValues["filter_bits"].getIntValue() != 0)
|
||||
{
|
||||
options.filter_policy = leveldb::NewBloomFilterPolicy (keyValues["filter_bits"].getIntValue());
|
||||
}
|
||||
|
||||
if (! keyValues["open_files"].isEmpty())
|
||||
{
|
||||
options.max_open_files = keyValues["open_files"].getIntValue();
|
||||
}
|
||||
|
||||
leveldb::Status status = leveldb::DB::Open (options, m_name, &m_db);
|
||||
if (!status.ok () || !m_db)
|
||||
throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
|
||||
leveldb::DB* db = nullptr;
|
||||
leveldb::Status status = leveldb::DB::Open (options, m_name, &db);
|
||||
if (!status.ok () || !db)
|
||||
Throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
|
||||
|
||||
m_db = db;
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
delete m_db;
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
std::string getName()
|
||||
{
|
||||
return m_name;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
struct StdString
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
std::string blob;
|
||||
};
|
||||
pObject->reset ();
|
||||
|
||||
typedef RecycledObjectPool <StdString> StdStringPool;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status get (void const* key, GetCallback* callback)
|
||||
{
|
||||
Status status (ok);
|
||||
|
||||
leveldb::ReadOptions const options;
|
||||
@@ -71,22 +84,24 @@ public:
|
||||
// These are reused std::string objects,
|
||||
// required for leveldb's funky interface.
|
||||
//
|
||||
StdStringPool::ScopedItem item (m_stringPool);
|
||||
std::string& blob = item.getObject ().blob;
|
||||
StringPool::ScopedItem item (m_stringPool);
|
||||
std::string& string = item.getObject ();
|
||||
|
||||
leveldb::Status getStatus = m_db->Get (options, slice, &blob);
|
||||
leveldb::Status getStatus = m_db->Get (options, slice, &string);
|
||||
|
||||
if (getStatus.ok ())
|
||||
{
|
||||
void* const buffer = callback->getStorageForValue (blob.size ());
|
||||
NodeStore::DecodedBlob decoded (key, string.data (), string.size ());
|
||||
|
||||
if (buffer != nullptr)
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
memcpy (buffer, blob.data (), blob.size ());
|
||||
*pObject = decoded.createObject ();
|
||||
}
|
||||
else
|
||||
{
|
||||
Throw (std::bad_alloc ());
|
||||
// Decoding failed, probably corrupted!
|
||||
//
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -109,83 +124,90 @@ public:
|
||||
return status;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
bool bulkStore (const std::vector< NodeObject::pointer >& objs)
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
leveldb::WriteBatch batch;
|
||||
|
||||
BOOST_FOREACH (NodeObject::ref obj, objs)
|
||||
{
|
||||
Blob blob (toBlob (obj));
|
||||
batch.Put (
|
||||
leveldb::Slice (reinterpret_cast<char const*>(obj->getHash ().begin ()), m_keyBytes),
|
||||
leveldb::Slice (reinterpret_cast<char const*>(&blob.front ()), blob.size ()));
|
||||
}
|
||||
return m_db->Write (leveldb::WriteOptions (), &batch).ok ();
|
||||
m_batch.store (object);
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash)
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
std::string sData;
|
||||
if (!m_db->Get (leveldb::ReadOptions (),
|
||||
leveldb::Slice (reinterpret_cast<char const*>(hash.begin ()), m_keyBytes), &sData).ok ())
|
||||
leveldb::WriteBatch wb;
|
||||
|
||||
{
|
||||
return NodeObject::pointer();
|
||||
NodeStore::EncodedBlob::Pool::ScopedItem item (m_blobPool);
|
||||
|
||||
BOOST_FOREACH (NodeObject::ref object, batch)
|
||||
{
|
||||
item.getObject ().prepare (object);
|
||||
|
||||
wb.Put (
|
||||
leveldb::Slice (reinterpret_cast <char const*> (item.getObject ().getKey ()),
|
||||
m_keyBytes),
|
||||
leveldb::Slice (reinterpret_cast <char const*> (item.getObject ().getData ()),
|
||||
item.getObject ().getSize ()));
|
||||
}
|
||||
return fromBinary(hash, &sData[0], sData.size ());
|
||||
}
|
||||
|
||||
void visitAll (FUNCTION_TYPE<void (NodeObject::pointer)> func)
|
||||
leveldb::WriteOptions const options;
|
||||
|
||||
m_db->Write (options, &wb).ok ();
|
||||
}
|
||||
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
leveldb::Iterator* it = m_db->NewIterator (leveldb::ReadOptions ());
|
||||
leveldb::ReadOptions const options;
|
||||
|
||||
ScopedPointer <leveldb::Iterator> it (m_db->NewIterator (options));
|
||||
|
||||
for (it->SeekToFirst (); it->Valid (); it->Next ())
|
||||
{
|
||||
if (it->key ().size () == m_keyBytes)
|
||||
{
|
||||
uint256 hash;
|
||||
memcpy(hash.begin(), it->key ().data(), m_keyBytes);
|
||||
func (fromBinary (hash, it->value ().data (), it->value ().size ()));
|
||||
NodeStore::DecodedBlob decoded (it->key ().data (),
|
||||
it->value ().data (),
|
||||
it->value ().size ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
NodeObject::Ptr object (decoded.createObject ());
|
||||
|
||||
callback.visitObject (object);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Uh oh, corrupted data!
|
||||
WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256 (it->key ().data ());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// VFALCO NOTE What does it mean to find an
|
||||
// incorrectly sized key? Corruption?
|
||||
WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size ();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Blob toBlob(NodeObject::ref obj)
|
||||
int getWriteLoad ()
|
||||
{
|
||||
Blob rawData (9 + obj->getData ().size ());
|
||||
unsigned char* bufPtr = &rawData.front();
|
||||
|
||||
*reinterpret_cast<uint32*> (bufPtr + 0) = ntohl (obj->getIndex ());
|
||||
*reinterpret_cast<uint32*> (bufPtr + 4) = ntohl (obj->getIndex ());
|
||||
* (bufPtr + 8) = static_cast<unsigned char> (obj->getType ());
|
||||
memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ());
|
||||
|
||||
return rawData;
|
||||
return m_batch.getWriteLoad ();
|
||||
}
|
||||
|
||||
NodeObject::pointer fromBinary(uint256 const& hash,
|
||||
char const* data, int size)
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void writeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
if (size < 9)
|
||||
throw std::runtime_error ("undersized object");
|
||||
|
||||
uint32 index = htonl (*reinterpret_cast<const uint32*> (data));
|
||||
int htype = data[8];
|
||||
|
||||
return boost::make_shared<NodeObject> (static_cast<NodeObjectType> (htype), index,
|
||||
data + 9, size - 9, hash);
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
private:
|
||||
size_t const m_keyBytes;
|
||||
StdStringPool m_stringPool;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
NodeStore::BatchWriter m_batch;
|
||||
StringPool m_stringPool;
|
||||
NodeStore::EncodedBlob::Pool m_blobPool;
|
||||
std::string m_name;
|
||||
leveldb::DB* m_db;
|
||||
ScopedPointer <leveldb::DB> m_db;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -210,9 +232,12 @@ String LevelDBBackendFactory::getName () const
|
||||
return "LevelDB";
|
||||
}
|
||||
|
||||
NodeStore::Backend* LevelDBBackendFactory::createInstance (size_t keyBytes, StringPairArray const& keyValues)
|
||||
NodeStore::Backend* LevelDBBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new LevelDBBackendFactory::Backend (keyBytes, keyValues);
|
||||
return new LevelDBBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -21,7 +21,10 @@ public:
|
||||
static LevelDBBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -6,14 +6,30 @@
|
||||
|
||||
#if RIPPLE_MDB_AVAILABLE
|
||||
|
||||
class MdbBackendFactory::Backend : public NodeStore::Backend
|
||||
class MdbBackendFactory::Backend
|
||||
: public NodeStore::Backend
|
||||
, public NodeStore::BatchWriter::Callback
|
||||
, LeakChecked <MdbBackendFactory::Backend>
|
||||
{
|
||||
public:
|
||||
explicit Backend (StringPairArray const& keyValues)
|
||||
: m_env (nullptr)
|
||||
typedef NodeStore::Batch Batch;
|
||||
typedef NodeStore::EncodedBlob EncodedBlob;
|
||||
typedef NodeStore::DecodedBlob DecodedBlob;
|
||||
|
||||
explicit Backend (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_batch (*this, scheduler)
|
||||
, m_env (nullptr)
|
||||
{
|
||||
if (keyValues ["path"].isEmpty ())
|
||||
throw std::runtime_error ("Missing path in MDB backend");
|
||||
String path (keyValues ["path"]);
|
||||
|
||||
m_name = path.toStdString();
|
||||
|
||||
if (path.isEmpty ())
|
||||
Throw (std::runtime_error ("Missing path in MDB backend"));
|
||||
|
||||
int error = 0;
|
||||
|
||||
@@ -25,26 +41,27 @@ public:
|
||||
if (error == 0)
|
||||
error = mdb_env_open (
|
||||
m_env,
|
||||
keyValues ["path"].toStdString().c_str (),
|
||||
m_name.c_str (),
|
||||
MDB_NOTLS,
|
||||
0664);
|
||||
|
||||
MDB_txn* txn;
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_txn_begin (m_env, NULL, 0, &txn);
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_dbi_open (txn, NULL, 0, &m_dbi);
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_txn_commit (txn);
|
||||
|
||||
|
||||
if (error != 0)
|
||||
{
|
||||
String s;
|
||||
s << "Error #" << error << " creating mdb environment";
|
||||
throw std::runtime_error (s.toStdString ());
|
||||
Throw (std::runtime_error (s.toStdString ()));
|
||||
}
|
||||
m_name = keyValues ["path"].toStdString();
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
@@ -56,120 +73,160 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
std::string getName()
|
||||
{
|
||||
return m_name;
|
||||
}
|
||||
|
||||
bool bulkStore (std::vector <NodeObject::pointer> const& objs)
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
template <class T>
|
||||
unsigned char* mdb_cast (T* p)
|
||||
{
|
||||
return const_cast <unsigned char*> (static_cast <unsigned char const*> (p));
|
||||
}
|
||||
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
pObject->reset ();
|
||||
|
||||
Status status (ok);
|
||||
|
||||
MDB_txn* txn = nullptr;
|
||||
|
||||
int error = 0;
|
||||
|
||||
error = mdb_txn_begin (m_env, NULL, MDB_RDONLY, &txn);
|
||||
|
||||
if (error == 0)
|
||||
{
|
||||
MDB_val dbkey;
|
||||
MDB_val data;
|
||||
|
||||
dbkey.mv_size = m_keyBytes;
|
||||
dbkey.mv_data = mdb_cast (key);
|
||||
|
||||
error = mdb_get (txn, m_dbi, &dbkey, &data);
|
||||
|
||||
if (error == 0)
|
||||
{
|
||||
DecodedBlob decoded (key, data.mv_data, data.mv_size);
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
*pObject = decoded.createObject ();
|
||||
}
|
||||
else
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else if (error == MDB_NOTFOUND)
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
|
||||
WriteLog (lsWARNING, NodeObject) << "MDB txn failed, code=" << error;
|
||||
}
|
||||
|
||||
mdb_txn_abort (txn);
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
|
||||
WriteLog (lsWARNING, NodeObject) << "MDB txn failed, code=" << error;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
m_batch.store (object);
|
||||
}
|
||||
|
||||
void storeBatch (Batch const& batch)
|
||||
{
|
||||
MDB_txn* txn = nullptr;
|
||||
int rc = 0;
|
||||
|
||||
rc = mdb_txn_begin(m_env, NULL, 0, &txn);
|
||||
int error = 0;
|
||||
|
||||
if (rc == 0)
|
||||
error = mdb_txn_begin (m_env, NULL, 0, &txn);
|
||||
|
||||
if (error == 0)
|
||||
{
|
||||
BOOST_FOREACH (NodeObject::ref obj, objs)
|
||||
EncodedBlob::Pool::ScopedItem item (m_blobPool);
|
||||
|
||||
BOOST_FOREACH (NodeObject::Ptr const& object, batch)
|
||||
{
|
||||
MDB_val key, data;
|
||||
Blob blob (toBlob (obj));
|
||||
EncodedBlob& encoded (item.getObject ());
|
||||
|
||||
key.mv_size = (256 / 8);
|
||||
key.mv_data = const_cast<unsigned char *>(obj->getHash().begin());
|
||||
encoded.prepare (object);
|
||||
|
||||
data.mv_size = blob.size();
|
||||
data.mv_data = &blob.front();
|
||||
MDB_val key;
|
||||
key.mv_size = m_keyBytes;
|
||||
key.mv_data = mdb_cast (encoded.getKey ());
|
||||
|
||||
rc = mdb_put(txn, m_dbi, &key, &data, 0);
|
||||
if (rc != 0)
|
||||
MDB_val data;
|
||||
data.mv_size = encoded.getSize ();
|
||||
data.mv_data = mdb_cast (encoded.getData ());
|
||||
|
||||
error = mdb_put (txn, m_dbi, &key, &data, 0);
|
||||
|
||||
if (error != 0)
|
||||
{
|
||||
assert(false);
|
||||
WriteLog (lsWARNING, NodeObject) << "mdb_put failed, error=" << error;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (error == 0)
|
||||
{
|
||||
error = mdb_txn_commit(txn);
|
||||
|
||||
if (error != 0)
|
||||
{
|
||||
WriteLog (lsWARNING, NodeObject) << "mdb_txn_commit failed, error=" << error;
|
||||
}
|
||||
}
|
||||
else
|
||||
assert(false);
|
||||
|
||||
if (rc == 0)
|
||||
rc = mdb_txn_commit(txn);
|
||||
else if (txn)
|
||||
{
|
||||
mdb_txn_abort (txn);
|
||||
|
||||
assert(rc == 0);
|
||||
return rc == 0;
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash)
|
||||
{
|
||||
NodeObject::pointer ret;
|
||||
|
||||
MDB_txn *txn = nullptr;
|
||||
int rc = 0;
|
||||
|
||||
rc = mdb_txn_begin(m_env, NULL, MDB_RDONLY, &txn);
|
||||
|
||||
if (rc == 0)
|
||||
{
|
||||
MDB_val key, data;
|
||||
|
||||
key.mv_size = (256 / 8);
|
||||
key.mv_data = const_cast<unsigned char *>(hash.begin());
|
||||
|
||||
rc = mdb_get(txn, m_dbi, &key, &data);
|
||||
if (rc == 0)
|
||||
ret = fromBinary(hash, static_cast<char *>(data.mv_data), data.mv_size);
|
||||
else
|
||||
assert(rc == MDB_NOTFOUND);
|
||||
}
|
||||
else
|
||||
assert(false);
|
||||
|
||||
mdb_txn_abort(txn);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void visitAll (FUNCTION_TYPE <void (NodeObject::pointer)> func)
|
||||
{ // WRITEME
|
||||
assert(false);
|
||||
}
|
||||
|
||||
Blob toBlob (NodeObject::ref obj) const
|
||||
{
|
||||
Blob rawData (9 + obj->getData ().size ());
|
||||
unsigned char* bufPtr = &rawData.front();
|
||||
|
||||
*reinterpret_cast <uint32*> (bufPtr + 0) = ntohl (obj->getIndex ());
|
||||
|
||||
*reinterpret_cast <uint32*> (bufPtr + 4) = ntohl (obj->getIndex ());
|
||||
|
||||
*(bufPtr + 8) = static_cast <unsigned char> (obj->getType ());
|
||||
|
||||
memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ());
|
||||
|
||||
return rawData;
|
||||
WriteLog (lsWARNING, NodeObject) << "mdb_txn_begin failed, error=" << error;
|
||||
}
|
||||
}
|
||||
|
||||
NodeObject::pointer fromBinary (uint256 const& hash, char const* data, int size) const
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
if (size < 9)
|
||||
throw std::runtime_error ("undersized object");
|
||||
// VFALCO TODO Implement this!
|
||||
bassertfalse;
|
||||
}
|
||||
|
||||
uint32 const index = htonl (*reinterpret_cast <uint32 const*> (data));
|
||||
int getWriteLoad ()
|
||||
{
|
||||
return m_batch.getWriteLoad ();
|
||||
}
|
||||
|
||||
int const htype = data [8];
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
return boost::make_shared <NodeObject> (
|
||||
static_cast <NodeObjectType> (htype),
|
||||
index,
|
||||
data + 9,
|
||||
size - 9,
|
||||
hash);
|
||||
void writeBatch (Batch const& batch)
|
||||
{
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
private:
|
||||
size_t const m_keyBytes;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
NodeStore::BatchWriter m_batch;
|
||||
NodeStore::EncodedBlob::Pool m_blobPool;
|
||||
std::string m_name;
|
||||
MDB_env* m_env;
|
||||
MDB_dbi m_dbi;
|
||||
@@ -197,9 +254,12 @@ String MdbBackendFactory::getName () const
|
||||
return "mdb";
|
||||
}
|
||||
|
||||
NodeStore::Backend* MdbBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* MdbBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new MdbBackendFactory::Backend (keyValues);
|
||||
return new MdbBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -25,7 +25,10 @@ public:
|
||||
static MdbBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -11,27 +11,27 @@ SETUP_LOG (NodeObject)
|
||||
NodeObject::NodeObject (
|
||||
NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob const& binaryDataToCopy,
|
||||
uint256 const& hash)
|
||||
Blob& data,
|
||||
uint256 const& hash,
|
||||
PrivateAccess)
|
||||
: mType (type)
|
||||
, mHash (hash)
|
||||
, mLedgerIndex (ledgerIndex)
|
||||
, mData (binaryDataToCopy)
|
||||
{
|
||||
// Take over the caller's buffer
|
||||
mData.swap (data);
|
||||
}
|
||||
|
||||
NodeObject::NodeObject (
|
||||
NodeObject::Ptr NodeObject::createObject (
|
||||
NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
void const* bufferToCopy,
|
||||
int bytesInBuffer,
|
||||
Blob& data,
|
||||
uint256 const & hash)
|
||||
: mType (type)
|
||||
, mHash (hash)
|
||||
, mLedgerIndex (ledgerIndex)
|
||||
, mData (static_cast <unsigned char const*> (bufferToCopy),
|
||||
static_cast <unsigned char const*> (bufferToCopy) + bytesInBuffer)
|
||||
{
|
||||
// The boost::ref is important or
|
||||
// else it will be passed by value!
|
||||
return boost::make_shared <NodeObject> (
|
||||
type, ledgerIndex, boost::ref (data), hash, PrivateAccess ());
|
||||
}
|
||||
|
||||
NodeObjectType NodeObject::getType () const
|
||||
@@ -54,14 +54,21 @@ Blob const& NodeObject::getData () const
|
||||
return mData;
|
||||
}
|
||||
|
||||
bool NodeObject::isCloneOf (NodeObject const& other) const
|
||||
bool NodeObject::isCloneOf (NodeObject::Ptr const& other) const
|
||||
{
|
||||
return
|
||||
mType == other.mType &&
|
||||
mHash == other.mHash &&
|
||||
mLedgerIndex == other.mLedgerIndex &&
|
||||
mData == other.mData
|
||||
;
|
||||
if (mType != other->mType)
|
||||
return false;
|
||||
|
||||
if (mHash != other->mHash)
|
||||
return false;
|
||||
|
||||
if (mLedgerIndex != other->mLedgerIndex)
|
||||
return false;
|
||||
|
||||
if (mData != other->mData)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -70,7 +77,7 @@ class NodeObjectTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
|
||||
NodeObjectTests () : UnitTest ("NodeObject")
|
||||
NodeObjectTests () : UnitTest ("NodeObject", "ripple")
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -42,26 +42,40 @@ public:
|
||||
*/
|
||||
typedef UnsignedInteger <32> Hash;
|
||||
|
||||
// Please use this one. For a reference use Ptr const&
|
||||
typedef boost::shared_ptr <NodeObject> Ptr;
|
||||
|
||||
// These are DEPRECATED, type names are capitalized.
|
||||
typedef boost::shared_ptr <NodeObject> pointer;
|
||||
typedef pointer const& ref;
|
||||
|
||||
/** Create from a vector of data.
|
||||
|
||||
@note A copy of the data is created.
|
||||
*/
|
||||
private:
|
||||
// This hack is used to make the constructor effectively private
|
||||
// except for when we use it in the call to make_shared.
|
||||
// There's no portable way to make make_shared<> a friend work.
|
||||
struct PrivateAccess { };
|
||||
public:
|
||||
// This constructor is private, use createObject instead.
|
||||
NodeObject (NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob const & binaryDataToCopy,
|
||||
uint256 const & hash);
|
||||
Blob& data,
|
||||
uint256 const& hash,
|
||||
PrivateAccess);
|
||||
|
||||
/** Create from an area of memory.
|
||||
/** Create an object from fields.
|
||||
|
||||
@note A copy of the data is created.
|
||||
The caller's variable is modified during this call. The
|
||||
underlying storage for the Blob is taken over by the NodeObject.
|
||||
|
||||
@param type The type of object.
|
||||
@param ledgerIndex The ledger in which this object appears.
|
||||
@param data A buffer containing the payload. The caller's variable
|
||||
is overwritten.
|
||||
@param hash The 256-bit hash of the payload data.
|
||||
*/
|
||||
NodeObject (NodeObjectType type,
|
||||
static Ptr createObject (NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
void const * bufferToCopy,
|
||||
int bytesInBuffer,
|
||||
Blob& data,
|
||||
uint256 const& hash);
|
||||
|
||||
/** Retrieve the type of this object.
|
||||
@@ -83,7 +97,22 @@ public:
|
||||
|
||||
/** See if this object has the same data as another object.
|
||||
*/
|
||||
bool isCloneOf (NodeObject const& other) const;
|
||||
bool isCloneOf (NodeObject::Ptr const& other) const;
|
||||
|
||||
/** Binary function that satisfies the strict-weak-ordering requirement.
|
||||
|
||||
This compares the hashes of both objects and returns true if
|
||||
the first hash is considered to go before the second.
|
||||
|
||||
@see std::sort
|
||||
*/
|
||||
struct LessThan
|
||||
{
|
||||
inline bool operator() (NodeObject::Ptr const& lhs, NodeObject::Ptr const& rhs) const noexcept
|
||||
{
|
||||
return lhs->getHash () < rhs->getHash ();
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
NodeObjectType mType;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,31 +8,196 @@
|
||||
#define RIPPLE_NODESTORE_H_INCLUDED
|
||||
|
||||
/** Persistency layer for NodeObject
|
||||
|
||||
A Node is a ledger object which is uniquely identified by a key,
|
||||
which is the 256-bit hash of the body of the node. The payload is
|
||||
a variable length block of serialized data.
|
||||
|
||||
All ledger data is stored as node objects and as such, needs to
|
||||
be persisted between launches. Furthermore, since the set of
|
||||
node objects will in general be larger than the amount of available
|
||||
memory, purged node objects which are later accessed must be retrieved
|
||||
from the node store.
|
||||
*/
|
||||
class NodeStore : LeakChecked <NodeStore>
|
||||
class NodeStore
|
||||
{
|
||||
public:
|
||||
enum
|
||||
{
|
||||
/** This is the largest number of key/value pairs we
|
||||
will write during a bulk write.
|
||||
*/
|
||||
// VFALCO TODO Make this a tunable parameter in the key value pairs
|
||||
bulkWriteBatchSize = 128
|
||||
|
||||
/** Size of the fixed keys, in bytes.
|
||||
|
||||
We use a 256-bit hash for the keys.
|
||||
|
||||
@see NodeObject
|
||||
*/
|
||||
,keyBytes = 32 // 256 bit hash
|
||||
keyBytes = 32,
|
||||
|
||||
// This is only used to pre-allocate the array for
|
||||
// batch objects and does not affect the amount written.
|
||||
//
|
||||
batchWritePreallocationSize = 128
|
||||
};
|
||||
|
||||
/** Interface to inform callers of cetain activities.
|
||||
typedef std::vector <NodeObject::Ptr> Batch;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Parsed key/value blob into NodeObject components.
|
||||
|
||||
This will extract the information required to construct
|
||||
a NodeObject. It also does consistency checking and returns
|
||||
the result, so it is possible to determine if the data
|
||||
is corrupted without throwing an exception. Note all forms
|
||||
of corruption are detected so further analysis will be
|
||||
needed to eliminate false positives.
|
||||
|
||||
@note This is the format in which a NodeObject is stored in the
|
||||
persistent storage layer.
|
||||
*/
|
||||
class Hooks
|
||||
class DecodedBlob
|
||||
{
|
||||
virtual void onRetrieveBegin () { }
|
||||
virtual void onRetrieveEnd () { }
|
||||
public:
|
||||
/** Construct the decoded blob from raw data.
|
||||
*/
|
||||
DecodedBlob (void const* key, void const* value, int valueBytes);
|
||||
|
||||
/** Determine if the decoding was successful.
|
||||
*/
|
||||
bool wasOk () const noexcept { return m_success; }
|
||||
|
||||
/** Create a NodeObject from this data.
|
||||
*/
|
||||
NodeObject::Ptr createObject ();
|
||||
|
||||
private:
|
||||
bool m_success;
|
||||
|
||||
void const* m_key;
|
||||
LedgerIndex m_ledgerIndex;
|
||||
NodeObjectType m_objectType;
|
||||
unsigned char const* m_objectData;
|
||||
int m_dataBytes;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Utility for producing flattened node objects.
|
||||
|
||||
These get recycled to prevent many small allocations.
|
||||
|
||||
@note This is the format in which a NodeObject is stored in the
|
||||
persistent storage layer.
|
||||
*/
|
||||
struct EncodedBlob
|
||||
{
|
||||
typedef RecycledObjectPool <EncodedBlob> Pool;
|
||||
|
||||
void prepare (NodeObject::Ptr const& object);
|
||||
|
||||
void const* getKey () const noexcept { return m_key; }
|
||||
|
||||
size_t getSize () const noexcept { return m_size; }
|
||||
|
||||
void const* getData () const noexcept { return m_data.getData (); }
|
||||
|
||||
private:
|
||||
void const* m_key;
|
||||
MemoryBlock m_data;
|
||||
size_t m_size;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Provides the asynchronous scheduling feature.
|
||||
*/
|
||||
class Scheduler
|
||||
{
|
||||
public:
|
||||
/** Derived classes perform scheduled tasks.
|
||||
*/
|
||||
struct Task
|
||||
{
|
||||
virtual ~Task () { }
|
||||
|
||||
/** Performs the task.
|
||||
|
||||
The call may take place on a foreign thread.
|
||||
*/
|
||||
virtual void performScheduledTask () = 0;
|
||||
};
|
||||
|
||||
/** Schedules a task.
|
||||
|
||||
Depending on the implementation, this could happen
|
||||
immediately or get deferred.
|
||||
*/
|
||||
virtual void scheduleTask (Task* task) = 0;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** A helper to assist with batch writing.
|
||||
|
||||
The batch writes are performed with a scheduled task.
|
||||
|
||||
@see Scheduler
|
||||
*/
|
||||
// VFALCO NOTE I'm not entirely happy having placed this here,
|
||||
// because whoever needs to use NodeStore certainly doesn't
|
||||
// need to see the implementation details of BatchWriter.
|
||||
//
|
||||
class BatchWriter : private Scheduler::Task
|
||||
{
|
||||
public:
|
||||
/** This callback does the actual writing.
|
||||
*/
|
||||
struct Callback
|
||||
{
|
||||
virtual void writeBatch (Batch const& batch) = 0;
|
||||
};
|
||||
|
||||
/** Create a batch writer.
|
||||
*/
|
||||
BatchWriter (Callback& callback, Scheduler& scheduler);
|
||||
|
||||
/** Destroy a batch writer.
|
||||
|
||||
Anything pending in the batch is written out before this returns.
|
||||
*/
|
||||
~BatchWriter ();
|
||||
|
||||
/** Store the object.
|
||||
|
||||
This will add to the batch and initiate a scheduled task to
|
||||
write the batch out.
|
||||
*/
|
||||
void store (NodeObject::ref object);
|
||||
|
||||
/** Get an estimate of the amount of writing I/O pending.
|
||||
*/
|
||||
int getWriteLoad ();
|
||||
|
||||
private:
|
||||
void performScheduledTask ();
|
||||
void writeBatch ();
|
||||
void waitForWriting ();
|
||||
|
||||
private:
|
||||
typedef boost::recursive_mutex LockType;
|
||||
typedef boost::condition_variable_any CondvarType;
|
||||
|
||||
Callback& m_callback;
|
||||
Scheduler& m_scheduler;
|
||||
LockType mWriteMutex;
|
||||
CondvarType mWriteCondition;
|
||||
int mWriteGeneration;
|
||||
int mWriteLoad;
|
||||
bool mWritePending;
|
||||
Batch mWriteSet;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Back end used for the store.
|
||||
|
||||
A Backend implements a persistent key/value storage system.
|
||||
@@ -51,92 +216,76 @@ public:
|
||||
unknown
|
||||
};
|
||||
|
||||
Backend ();
|
||||
/** Destroy the backend.
|
||||
|
||||
All open files are closed and flushed. If there are batched
|
||||
writes or other tasks scheduled, they will be completed before
|
||||
this call returns.
|
||||
*/
|
||||
virtual ~Backend () { }
|
||||
|
||||
/** Provides storage for retrieved objects.
|
||||
/** Get the human-readable name of this backend.
|
||||
|
||||
This is used for diagnostic output.
|
||||
*/
|
||||
struct GetCallback
|
||||
{
|
||||
/** Get storage for an object.
|
||||
virtual std::string getName() = 0;
|
||||
|
||||
@param sizeInBytes The number of bytes needed to store the value.
|
||||
|
||||
@return A pointer to a buffer large enough to hold all the bytes.
|
||||
*/
|
||||
virtual void* getStorageForValue (size_t sizeInBytes) = 0;
|
||||
};
|
||||
|
||||
/** Retrieve a single object.
|
||||
/** Fetch a single object.
|
||||
|
||||
If the object is not found or an error is encountered, the
|
||||
result will indicate the condition.
|
||||
|
||||
@note This will be called concurrently.
|
||||
|
||||
@param key A pointer to the key data.
|
||||
@param callback The callback used to obtain storage for the value.
|
||||
@param pObject [out] The created object if successful.
|
||||
|
||||
@return The result of the operation.
|
||||
*/
|
||||
virtual Status get (void const* key, GetCallback* callback) { return notFound; }
|
||||
|
||||
|
||||
|
||||
virtual Status fetch (void const* key, NodeObject::Ptr* pObject) = 0;
|
||||
|
||||
/** Store a single object.
|
||||
|
||||
Depending on the implementation this may happen immediately
|
||||
or deferred using a scheduled task.
|
||||
|
||||
@note This will be called concurrently.
|
||||
|
||||
@param object The object to store.
|
||||
*/
|
||||
// VFALCO TODO Why should the Backend know or care about NodeObject?
|
||||
// It should just deal with a fixed key and raw data.
|
||||
//
|
||||
virtual bool store (NodeObject::ref);
|
||||
//virtual bool put (void const* key, void const* value, int valueBytes) { return false; }
|
||||
virtual void store (NodeObject::Ptr const& object) = 0;
|
||||
|
||||
/** Retrieve an individual object.
|
||||
/** Store a group of objects.
|
||||
|
||||
@note This function will not be called concurrently with
|
||||
itself or @ref store.
|
||||
*/
|
||||
virtual NodeObject::pointer retrieve (uint256 const &hash) = 0;
|
||||
virtual void storeBatch (Batch const& batch) = 0;
|
||||
|
||||
// Visit every object in the database
|
||||
// This function will only be called during an import operation
|
||||
//
|
||||
// VFALCO TODO Replace FUNCTION_TYPE with a beast lift.
|
||||
//
|
||||
virtual void visitAll (FUNCTION_TYPE <void (NodeObject::pointer)>) = 0;
|
||||
/** Callback for iterating through objects.
|
||||
|
||||
private:
|
||||
friend class NodeStoreImp;
|
||||
|
||||
// VFALCO TODO Put this bulk writing logic into a separate class.
|
||||
// NOTE Why are these virtual?
|
||||
void bulkWrite (Job &);
|
||||
void waitWrite ();
|
||||
int getWriteLoad ();
|
||||
|
||||
private:
|
||||
virtual std::string getDataBaseName() = 0;
|
||||
|
||||
// Store a group of objects
|
||||
// This function will only be called from a single thread
|
||||
// VFALCO NOTE It looks like NodeStore throws this into the job queue?
|
||||
virtual bool bulkStore (const std::vector< NodeObject::pointer >&) = 0;
|
||||
|
||||
protected:
|
||||
// VFALCO TODO Put this bulk writing logic into a separate class.
|
||||
boost::mutex mWriteMutex;
|
||||
boost::condition_variable mWriteCondition;
|
||||
int mWriteGeneration;
|
||||
int mWriteLoad;
|
||||
bool mWritePending;
|
||||
std::vector <boost::shared_ptr <NodeObject> > mWriteSet;
|
||||
};
|
||||
|
||||
public:
|
||||
// Helper functions for the backend
|
||||
class BackendHelper
|
||||
@see visitAll
|
||||
*/
|
||||
struct VisitCallback
|
||||
{
|
||||
public:
|
||||
virtual void visitObject (NodeObject::Ptr const& object) = 0;
|
||||
};
|
||||
|
||||
public:
|
||||
/** Visit every object in the database
|
||||
|
||||
This is usually called during import.
|
||||
|
||||
@see import
|
||||
*/
|
||||
virtual void visitAll (VisitCallback& callback) = 0;
|
||||
|
||||
/** Estimate the number of write operations pending.
|
||||
*/
|
||||
virtual int getWriteLoad () = 0;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Factory to produce backends.
|
||||
*/
|
||||
class BackendFactory
|
||||
@@ -152,49 +301,88 @@ public:
|
||||
|
||||
@param keyBytes The fixed number of bytes per key.
|
||||
@param keyValues A set of key/value configuration pairs.
|
||||
@param scheduler The scheduler to use for running tasks.
|
||||
|
||||
@return A pointer to the Backend object.
|
||||
*/
|
||||
virtual Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues) = 0;
|
||||
virtual Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
Scheduler& scheduler) = 0;
|
||||
};
|
||||
|
||||
public:
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Construct a node store.
|
||||
|
||||
parameters has the format:
|
||||
Parameter strings have the format:
|
||||
|
||||
<key>=<value>['|'<key>=<value>]
|
||||
|
||||
The key "type" must exist, it defines the backend. For example
|
||||
"type=LevelDB|path=/mnt/ephemeral"
|
||||
The key "type" must exist, it defines the choice of backend.
|
||||
For example
|
||||
`type=LevelDB|path=/mnt/ephemeral`
|
||||
|
||||
@param backendParameters The parameter string for the persistent backend.
|
||||
@param fastBackendParameters The parameter string for the ephemeral backend.
|
||||
@param cacheSize ?
|
||||
@param cacheAge ?
|
||||
@param scheduler The scheduler to use for performing asynchronous tasks.
|
||||
|
||||
@return A pointer to the created object.
|
||||
*/
|
||||
// VFALCO NOTE Is cacheSize in bytes? objects? KB?
|
||||
// Is cacheAge in minutes? seconds?
|
||||
// These should be in the parameters.
|
||||
//
|
||||
static NodeStore* New (String backendParameters,
|
||||
String fastBackendParameters,
|
||||
int cacheSize,
|
||||
int cacheAge);
|
||||
Scheduler& scheduler);
|
||||
|
||||
/** Destroy the node store.
|
||||
|
||||
All pending operations are completed, pending writes flushed,
|
||||
and files closed before this returns.
|
||||
*/
|
||||
virtual ~NodeStore () { }
|
||||
|
||||
/** Add the specified backend factory to the list of available factories.
|
||||
|
||||
The names of available factories are compared against the "type"
|
||||
value in the parameter list on construction.
|
||||
|
||||
@param factory The factory to add.
|
||||
*/
|
||||
static void addBackendFactory (BackendFactory& factory);
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
virtual float getCacheHitRate () = 0;
|
||||
/** Fetch an object.
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
virtual bool store (NodeObjectType type, uint32 index, Blob const& data,
|
||||
If the object is known to be not in the database, not
|
||||
in the database, or failed to load correctly, nullptr is
|
||||
returned.
|
||||
|
||||
@note This can be called concurrently.
|
||||
|
||||
@param hash The key of the object to retrieve.
|
||||
|
||||
@return The object, or nullptr if it couldn't be retrieved.
|
||||
*/
|
||||
virtual NodeObject::pointer fetch (uint256 const& hash) = 0;
|
||||
|
||||
/** Store the object.
|
||||
|
||||
The caller's Blob parameter is overwritten.
|
||||
|
||||
@param type The type of object.
|
||||
@param ledgerIndex The ledger in which the object appears.
|
||||
@param data The payload of the object. The caller's
|
||||
variable is overwritten.
|
||||
@param hash The 256-bit hash of the payload data.
|
||||
|
||||
@return `true` if the object was stored?
|
||||
*/
|
||||
virtual void store (NodeObjectType type,
|
||||
uint32 ledgerIndex,
|
||||
Blob& data,
|
||||
uint256 const& hash) = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
// TODO Replace uint256 with void*
|
||||
//
|
||||
virtual NodeObject::pointer retrieve (uint256 const& hash) = 0;
|
||||
virtual float getCacheHitRate () = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
// TODO Document the parameter meanings.
|
||||
@@ -203,13 +391,14 @@ public:
|
||||
// VFALCO TODO Document this.
|
||||
virtual void sweep () = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
// What are the units of the return value?
|
||||
/** Retrieve the estimated number of pending write operations.
|
||||
|
||||
This is used for diagnostics.
|
||||
*/
|
||||
virtual int getWriteLoad () = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
// NOTE What's the return value?
|
||||
virtual int import (String sourceBackendParameters) = 0;
|
||||
virtual void import (String sourceBackendParameters) = 0;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -15,29 +15,32 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
std::string getName()
|
||||
{
|
||||
return std::string ();
|
||||
}
|
||||
|
||||
bool store (NodeObject::ref obj)
|
||||
Status fetch (void const*, NodeObject::Ptr*)
|
||||
{
|
||||
return false;
|
||||
return notFound;
|
||||
}
|
||||
|
||||
bool bulkStore (const std::vector< NodeObject::pointer >& objs)
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash)
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
return NodeObject::pointer ();
|
||||
}
|
||||
|
||||
void visitAll (FUNCTION_TYPE <void (NodeObject::pointer)> func)
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
}
|
||||
|
||||
int getWriteLoad ()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -62,7 +65,10 @@ String NullBackendFactory::getName () const
|
||||
return "none";
|
||||
}
|
||||
|
||||
NodeStore::Backend* NullBackendFactory::createInstance (size_t, StringPairArray const&)
|
||||
NodeStore::Backend* NullBackendFactory::createInstance (
|
||||
size_t,
|
||||
StringPairArray const&,
|
||||
NodeStore::Scheduler&)
|
||||
{
|
||||
return new NullBackendFactory::Backend;
|
||||
}
|
||||
|
||||
@@ -23,7 +23,10 @@ public:
|
||||
static NullBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -28,6 +28,8 @@ static const char* s_nodeStoreDBInit [] =
|
||||
|
||||
static int s_nodeStoreDBCount = NUMBER (s_nodeStoreDBInit);
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class SqliteBackendFactory::Backend : public NodeStore::Backend
|
||||
{
|
||||
public:
|
||||
@@ -42,24 +44,72 @@ public:
|
||||
//
|
||||
s << "PRAGMA cache_size=-" << String (theConfig.getSize(siHashNodeDBCache) * 1024);
|
||||
m_db->getDB()->executeSQL (s.toStdString ().c_str ());
|
||||
|
||||
//m_db->getDB()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
|
||||
// (theConfig.getSize(siHashNodeDBCache) * 1024)));
|
||||
}
|
||||
|
||||
~Backend()
|
||||
{
|
||||
delete m_db;
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
std::string getName()
|
||||
{
|
||||
return m_name;
|
||||
}
|
||||
|
||||
bool bulkStore (const std::vector< NodeObject::pointer >& objects)
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
Status result = ok;
|
||||
|
||||
pObject->reset ();
|
||||
|
||||
{
|
||||
ScopedLock sl (m_db->getDBLock());
|
||||
|
||||
uint256 const hash (key);
|
||||
|
||||
static SqliteStatement pSt (m_db->getDB()->getSqliteDB(),
|
||||
"SELECT ObjType,LedgerIndex,Object FROM CommittedObjects WHERE Hash = ?;");
|
||||
|
||||
pSt.bind (1, hash.GetHex());
|
||||
|
||||
if (pSt.isRow (pSt.step()))
|
||||
{
|
||||
// VFALCO NOTE This is unfortunately needed,
|
||||
// the DatabaseCon creates the blob?
|
||||
Blob data (pSt.getBlob (2));
|
||||
*pObject = NodeObject::createObject (
|
||||
getTypeFromString (pSt.peekString (0)),
|
||||
pSt.getUInt32 (1),
|
||||
data,
|
||||
hash);
|
||||
}
|
||||
else
|
||||
{
|
||||
result = notFound;
|
||||
}
|
||||
|
||||
pSt.reset();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
NodeStore::Batch batch;
|
||||
|
||||
batch.push_back (object);
|
||||
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
// VFALCO TODO Rewrite this to use Beast::db
|
||||
|
||||
ScopedLock sl (m_db->getDBLock());
|
||||
|
||||
static SqliteStatement pStB (m_db->getDB()->getSqliteDB(), "BEGIN TRANSACTION;");
|
||||
static SqliteStatement pStE (m_db->getDB()->getSqliteDB(), "END TRANSACTION;");
|
||||
static SqliteStatement pSt (m_db->getDB()->getSqliteDB(),
|
||||
@@ -69,42 +119,22 @@ public:
|
||||
pStB.step();
|
||||
pStB.reset();
|
||||
|
||||
BOOST_FOREACH(NodeObject::ref object, objects)
|
||||
BOOST_FOREACH (NodeObject::Ptr const& object, batch)
|
||||
{
|
||||
bind(pSt, object);
|
||||
doBind (pSt, object);
|
||||
|
||||
pSt.step();
|
||||
pSt.reset();
|
||||
}
|
||||
|
||||
pStE.step();
|
||||
pStE.reset();
|
||||
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve(uint256 const& hash)
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
NodeObject::pointer ret;
|
||||
// No lock needed as per the visitAll() API
|
||||
|
||||
{
|
||||
ScopedLock sl(m_db->getDBLock());
|
||||
static SqliteStatement pSt(m_db->getDB()->getSqliteDB(),
|
||||
"SELECT ObjType,LedgerIndex,Object FROM CommittedObjects WHERE Hash = ?;");
|
||||
|
||||
pSt.bind(1, hash.GetHex());
|
||||
|
||||
if (pSt.isRow(pSt.step()))
|
||||
ret = boost::make_shared<NodeObject>(getType(pSt.peekString(0)), pSt.getUInt32(1), pSt.getBlob(2), hash);
|
||||
|
||||
pSt.reset();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void visitAll(FUNCTION_TYPE<void (NodeObject::pointer)> func)
|
||||
{
|
||||
uint256 hash;
|
||||
|
||||
static SqliteStatement pSt(m_db->getDB()->getSqliteDB(),
|
||||
@@ -113,13 +143,30 @@ public:
|
||||
while (pSt.isRow (pSt.step()))
|
||||
{
|
||||
hash.SetHexExact(pSt.getString(3));
|
||||
func(boost::make_shared<NodeObject>(getType(pSt.peekString(0)), pSt.getUInt32(1), pSt.getBlob(2), hash));
|
||||
|
||||
// VFALCO NOTE This is unfortunately needed,
|
||||
// the DatabaseCon creates the blob?
|
||||
Blob data (pSt.getBlob (2));
|
||||
NodeObject::Ptr const object (NodeObject::createObject (
|
||||
getTypeFromString (pSt.peekString (0)),
|
||||
pSt.getUInt32 (1),
|
||||
data,
|
||||
hash));
|
||||
|
||||
callback.visitObject (object);
|
||||
}
|
||||
|
||||
pSt.reset ();
|
||||
}
|
||||
|
||||
void bind(SqliteStatement& statement, NodeObject::ref object)
|
||||
int getWriteLoad ()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void doBind (SqliteStatement& statement, NodeObject::ref object)
|
||||
{
|
||||
char const* type;
|
||||
switch (object->getType())
|
||||
@@ -137,20 +184,21 @@ public:
|
||||
statement.bindStatic(4, object->getData());
|
||||
}
|
||||
|
||||
NodeObjectType getType(std::string const& type)
|
||||
NodeObjectType getTypeFromString (std::string const& s)
|
||||
{
|
||||
NodeObjectType htype = hotUNKNOWN;
|
||||
if (!type.empty())
|
||||
NodeObjectType type = hotUNKNOWN;
|
||||
|
||||
if (!s.empty ())
|
||||
{
|
||||
switch (type[0])
|
||||
switch (s [0])
|
||||
{
|
||||
case 'L': htype = hotLEDGER; break;
|
||||
case 'T': htype = hotTRANSACTION; break;
|
||||
case 'A': htype = hotACCOUNT_NODE; break;
|
||||
case 'N': htype = hotTRANSACTION_NODE; break;
|
||||
case 'L': type = hotLEDGER; break;
|
||||
case 'T': type = hotTRANSACTION; break;
|
||||
case 'A': type = hotACCOUNT_NODE; break;
|
||||
case 'N': type = hotTRANSACTION_NODE; break;
|
||||
}
|
||||
}
|
||||
return htype;
|
||||
return type;
|
||||
}
|
||||
|
||||
private:
|
||||
@@ -181,7 +229,10 @@ String SqliteBackendFactory::getName () const
|
||||
return "Sqlite";
|
||||
}
|
||||
|
||||
NodeStore::Backend* SqliteBackendFactory::createInstance (size_t keyBytes, StringPairArray const& keyValues)
|
||||
NodeStore::Backend* SqliteBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new Backend (keyBytes, keyValues ["path"].toStdString ());
|
||||
}
|
||||
|
||||
@@ -21,7 +21,10 @@ public:
|
||||
static SqliteBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -365,14 +365,24 @@ public:
|
||||
return reinterpret_cast<unsigned char*> (pn + WIDTH);
|
||||
}
|
||||
|
||||
const unsigned char* begin () const
|
||||
unsigned char const* cbegin () const noexcept
|
||||
{
|
||||
return reinterpret_cast<const unsigned char*> (pn);
|
||||
return reinterpret_cast <unsigned char const*> (pn);
|
||||
}
|
||||
|
||||
const unsigned char* end () const
|
||||
unsigned char const* cend () const noexcept
|
||||
{
|
||||
return reinterpret_cast<const unsigned char*> (pn + WIDTH);
|
||||
return reinterpret_cast<unsigned char const*> (pn + WIDTH);
|
||||
}
|
||||
|
||||
const unsigned char* begin () const noexcept
|
||||
{
|
||||
return cbegin ();
|
||||
}
|
||||
|
||||
const unsigned char* end () const noexcept
|
||||
{
|
||||
return cend ();
|
||||
}
|
||||
|
||||
unsigned int size () const
|
||||
|
||||
@@ -402,7 +402,7 @@ private:
|
||||
class ValidatorListTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
ValidatorListTests () : UnitTest ("ValidatorList")
|
||||
ValidatorListTests () : UnitTest ("ValidatorList", "ripple")
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -222,23 +222,33 @@
|
||||
# Examples: RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE
|
||||
# shfArahZT9Q9ckTf3s1psJ7C7qzVN
|
||||
#
|
||||
#
|
||||
#
|
||||
# [node_db]
|
||||
# [temp_db]
|
||||
#
|
||||
# Set the choice of databases for storing Node objects.
|
||||
#
|
||||
# Format (without spaces):
|
||||
# <key> '=' <value> [ '|' <key> '=' <value> ]...
|
||||
#
|
||||
# Examples:
|
||||
# type=HyperLevelDB|path=db/hashnode
|
||||
# Choices for 'type':
|
||||
#
|
||||
# Choices for 'type' (not case-sensitive)
|
||||
# HyperLevelDB Use an improved version of LevelDB (preferred)
|
||||
# LevelDB Use Google's LevelDB database (deprecated)
|
||||
# MDB Use MDB
|
||||
# none Use no backend
|
||||
# KeyvaDB Use OpenCoin's KeyvaDB (experimental)
|
||||
# SQLite Use SQLite
|
||||
#
|
||||
# Required keys:
|
||||
# path Location to store the database (all types)
|
||||
#
|
||||
# Optional keys:
|
||||
# ...
|
||||
# (none yet)
|
||||
#
|
||||
# Notes
|
||||
# The 'node_db' entry configures the primary, persistent storage.
|
||||
# The 'temp_db' configures a look-aside cache for high volume storage
|
||||
|
||||
@@ -48,8 +48,7 @@ void NetworkOPs::processNetTimer ()
|
||||
// VFALCO NOTE This is for diagnosing a crash on exit
|
||||
Application& app (getApp ());
|
||||
ILoadManager& mgr (app.getLoadManager ());
|
||||
|
||||
getApp().getLoadManager ().resetDeadlockDetector ();
|
||||
mgr.resetDeadlockDetector ();
|
||||
|
||||
std::size_t const numPeers = getApp().getPeers ().getPeerVector ().size ();
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ class ApplicationImp
|
||||
: public Application
|
||||
, public SharedSingleton <ApplicationImp>
|
||||
, public Validators::Listener
|
||||
, public NodeStore::Scheduler
|
||||
, LeakChecked <ApplicationImp>
|
||||
{
|
||||
public:
|
||||
@@ -46,15 +47,14 @@ public:
|
||||
, mNetOps (new NetworkOPs (&mLedgerMaster))
|
||||
, m_rpcServerHandler (*mNetOps)
|
||||
, mTempNodeCache ("NodeCache", 16384, 90)
|
||||
, m_nodeStore (NodeStore::New (
|
||||
theConfig.NODE_DB,
|
||||
theConfig.FASTNODE_DB,
|
||||
16384,
|
||||
300))
|
||||
, mSLECache ("LedgerEntryCache", 4096, 120)
|
||||
, mSNTPClient (mAuxService)
|
||||
, mJobQueue (mIOService)
|
||||
// VFALCO New stuff
|
||||
, m_nodeStore (NodeStore::New (
|
||||
theConfig.NODE_DB,
|
||||
theConfig.FASTNODE_DB,
|
||||
*this))
|
||||
, m_validators (Validators::New (this))
|
||||
, mFeatures (IFeatures::New (2 * 7 * 24 * 60 * 60, 200)) // two weeks, 200/256
|
||||
, mFeeVote (IFeeVote::New (10, 50 * SYSTEM_CURRENCY_PARTS, 12.5 * SYSTEM_CURRENCY_PARTS))
|
||||
@@ -92,6 +92,23 @@ public:
|
||||
delete mWalletDB;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
static void callScheduledTask (NodeStore::Scheduler::Task* task, Job&)
|
||||
{
|
||||
task->performScheduledTask ();
|
||||
}
|
||||
|
||||
void scheduleTask (NodeStore::Scheduler::Task* task)
|
||||
{
|
||||
getJobQueue ().addJob (
|
||||
jtWRITE,
|
||||
"NodeObject::store",
|
||||
BIND_TYPE (&ApplicationImp::callScheduledTask, task, P_1));
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
LocalCredentials& getLocalCredentials ()
|
||||
{
|
||||
return m_localCredentials ;
|
||||
@@ -272,7 +289,6 @@ private:
|
||||
ScopedPointer <NetworkOPs> mNetOps;
|
||||
RPCServerHandler m_rpcServerHandler;
|
||||
NodeCache mTempNodeCache;
|
||||
ScopedPointer <NodeStore> m_nodeStore;
|
||||
SLECache mSLECache;
|
||||
SNTPClient mSNTPClient;
|
||||
JobQueue mJobQueue;
|
||||
@@ -280,16 +296,17 @@ private:
|
||||
OrderBookDB mOrderBookDB;
|
||||
|
||||
// VFALCO Clean stuff
|
||||
beast::ScopedPointer <Validators> m_validators;
|
||||
beast::ScopedPointer <IFeatures> mFeatures;
|
||||
beast::ScopedPointer <IFeeVote> mFeeVote;
|
||||
beast::ScopedPointer <ILoadFeeTrack> mFeeTrack;
|
||||
beast::ScopedPointer <IHashRouter> mHashRouter;
|
||||
beast::ScopedPointer <IValidations> mValidations;
|
||||
beast::ScopedPointer <UniqueNodeList> mUNL;
|
||||
beast::ScopedPointer <IProofOfWorkFactory> mProofOfWorkFactory;
|
||||
beast::ScopedPointer <IPeers> mPeers;
|
||||
beast::ScopedPointer <ILoadManager> m_loadManager;
|
||||
ScopedPointer <NodeStore> m_nodeStore;
|
||||
ScopedPointer <Validators> m_validators;
|
||||
ScopedPointer <IFeatures> mFeatures;
|
||||
ScopedPointer <IFeeVote> mFeeVote;
|
||||
ScopedPointer <ILoadFeeTrack> mFeeTrack;
|
||||
ScopedPointer <IHashRouter> mHashRouter;
|
||||
ScopedPointer <IValidations> mValidations;
|
||||
ScopedPointer <UniqueNodeList> mUNL;
|
||||
ScopedPointer <IProofOfWorkFactory> mProofOfWorkFactory;
|
||||
ScopedPointer <IPeers> mPeers;
|
||||
ScopedPointer <ILoadManager> m_loadManager;
|
||||
// VFALCO End Clean stuff
|
||||
|
||||
DatabaseCon* mRpcDB;
|
||||
@@ -382,7 +399,7 @@ void ApplicationImp::setup ()
|
||||
|
||||
if (!theConfig.DEBUG_LOGFILE.empty ())
|
||||
{
|
||||
// Let BEAST_DEBUG messages go to the file but only WARNING or higher to regular output (unless verbose)
|
||||
// Let debug messages go to the file but only WARNING or higher to regular output (unless verbose)
|
||||
Log::setLogFile (theConfig.DEBUG_LOGFILE);
|
||||
|
||||
if (Log::getMinSeverity () > lsDEBUG)
|
||||
|
||||
@@ -65,12 +65,12 @@ public:
|
||||
|
||||
char const* getFileName () const noexcept
|
||||
{
|
||||
return m_fileName;
|
||||
return m_fileName.get ();
|
||||
}
|
||||
|
||||
int getLineNumber () const noexcept
|
||||
{
|
||||
return m_lineNumber;
|
||||
return m_lineNumber.get ();
|
||||
}
|
||||
|
||||
private:
|
||||
@@ -78,19 +78,19 @@ public:
|
||||
|
||||
void setOwner (char const* fileName, int lineNumber)
|
||||
{
|
||||
m_fileName = fileName;
|
||||
m_lineNumber = lineNumber;
|
||||
m_fileName.set (fileName);
|
||||
m_lineNumber.set (lineNumber);
|
||||
}
|
||||
|
||||
void resetOwner ()
|
||||
{
|
||||
m_fileName = "";
|
||||
m_lineNumber = 0;
|
||||
m_fileName.set ("");
|
||||
m_lineNumber.set (0);
|
||||
}
|
||||
|
||||
boost::recursive_mutex m_mutex;
|
||||
char const* m_fileName;
|
||||
int m_lineNumber;
|
||||
Atomic <char const*> m_fileName;
|
||||
Atomic <int> m_lineNumber;
|
||||
};
|
||||
|
||||
class ScopedLockType
|
||||
|
||||
@@ -156,15 +156,6 @@ static void runBeastUnitTests (std::string const& individualTest = "")
|
||||
{
|
||||
tr.runTest (individualTest.c_str ());
|
||||
}
|
||||
|
||||
// Report
|
||||
for (int i = 0; i < tr.getNumResults (); ++i)
|
||||
{
|
||||
UnitTests::TestResult const& r (*tr.getResult (i));
|
||||
|
||||
for (int j = 0; j < r.messages.size (); ++j)
|
||||
Log::out () << r.messages [j].toStdString ();
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -257,16 +248,16 @@ int rippleMain (int argc, char** argv)
|
||||
p.add ("parameters", -1);
|
||||
|
||||
// These must be added before the Application object is created
|
||||
NodeStore::addBackendFactory (KeyvaDBBackendFactory::getInstance ());
|
||||
NodeStore::addBackendFactory (LevelDBBackendFactory::getInstance ());
|
||||
NodeStore::addBackendFactory (NullBackendFactory::getInstance ());
|
||||
NodeStore::addBackendFactory (SqliteBackendFactory::getInstance ());
|
||||
#if RIPPLE_HYPERLEVELDB_AVAILABLE
|
||||
NodeStore::addBackendFactory (HyperLevelDBBackendFactory::getInstance ());
|
||||
#endif
|
||||
NodeStore::addBackendFactory (KeyvaDBBackendFactory::getInstance ());
|
||||
NodeStore::addBackendFactory (LevelDBBackendFactory::getInstance ());
|
||||
#if RIPPLE_MDB_AVAILABLE
|
||||
NodeStore::addBackendFactory (MdbBackendFactory::getInstance ());
|
||||
#endif
|
||||
NodeStore::addBackendFactory (NullBackendFactory::getInstance ());
|
||||
NodeStore::addBackendFactory (SqliteBackendFactory::getInstance ());
|
||||
|
||||
if (! RandomNumbers::getInstance ().initialize ())
|
||||
{
|
||||
|
||||
@@ -1554,7 +1554,7 @@ void PeerImp::recvGetObjectByHash (const boost::shared_ptr<protocol::TMGetObject
|
||||
if (obj.has_hash () && (obj.hash ().size () == (256 / 8)))
|
||||
{
|
||||
memcpy (hash.begin (), obj.hash ().data (), 256 / 8);
|
||||
NodeObject::pointer hObj = getApp().getNodeStore ().retrieve (hash);
|
||||
NodeObject::pointer hObj = getApp().getNodeStore ().fetch (hash);
|
||||
|
||||
if (hObj)
|
||||
{
|
||||
|
||||
@@ -236,7 +236,7 @@ IProofOfWorkFactory* IProofOfWorkFactory::New ()
|
||||
class ProofOfWorkTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
ProofOfWorkTests () : UnitTest ("ProofOfWork")
|
||||
ProofOfWorkTests () : UnitTest ("ProofOfWork", "ripple")
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -205,7 +205,7 @@ SHAMapTreeNode::pointer SHAMap::getNode (const SHAMapNode& id, uint256 const& ha
|
||||
|
||||
if (node)
|
||||
{
|
||||
#ifdef BEAST_DEBUG
|
||||
#if BEAST_DEBUG
|
||||
|
||||
if (node->getNodeHash () != hash)
|
||||
{
|
||||
@@ -213,7 +213,7 @@ SHAMapTreeNode::pointer SHAMap::getNode (const SHAMapNode& id, uint256 const& ha
|
||||
WriteLog (lsFATAL, SHAMap) << "ID: " << id;
|
||||
WriteLog (lsFATAL, SHAMap) << "TgtHash " << hash;
|
||||
WriteLog (lsFATAL, SHAMap) << "NodHash " << node->getNodeHash ();
|
||||
throw std::runtime_error ("invalid node");
|
||||
Throw (std::runtime_error ("invalid node"));
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -230,7 +230,7 @@ SHAMapTreeNode* SHAMap::getNodePointer (const SHAMapNode& id, uint256 const& has
|
||||
SHAMapTreeNode* ret = getNodePointerNT (id, hash);
|
||||
|
||||
if (!ret)
|
||||
throw SHAMapMissingNode (mType, id, hash);
|
||||
Throw (SHAMapMissingNode (mType, id, hash));
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -251,7 +251,7 @@ SHAMapTreeNode* SHAMap::getNodePointer (const SHAMapNode& id, uint256 const& has
|
||||
SHAMapTreeNode* ret = getNodePointerNT (id, hash, filter);
|
||||
|
||||
if (!ret)
|
||||
throw SHAMapMissingNode (mType, id, hash);
|
||||
Throw (SHAMapMissingNode (mType, id, hash));
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -493,7 +493,7 @@ SHAMapItem::pointer SHAMap::peekNextItem (uint256 const& id, SHAMapTreeNode::TNT
|
||||
firstNode = firstBelow (firstNode);
|
||||
|
||||
if (!firstNode || firstNode->isInner ())
|
||||
throw std::runtime_error ("missing/corrupt node");
|
||||
Throw (std::runtime_error ("missing/corrupt node"));
|
||||
|
||||
type = firstNode->getType ();
|
||||
return firstNode->peekItem ();
|
||||
@@ -531,7 +531,7 @@ SHAMapItem::pointer SHAMap::peekPrevItem (uint256 const& id)
|
||||
SHAMapTreeNode* item = firstBelow (node.get ());
|
||||
|
||||
if (!item)
|
||||
throw std::runtime_error ("missing node");
|
||||
Throw (std::runtime_error ("missing node"));
|
||||
|
||||
return item->peekItem ();
|
||||
}
|
||||
@@ -597,7 +597,7 @@ bool SHAMap::delItem (uint256 const& id)
|
||||
std::stack<SHAMapTreeNode::pointer> stack = getStack (id, true);
|
||||
|
||||
if (stack.empty ())
|
||||
throw std::runtime_error ("missing node");
|
||||
Throw (std::runtime_error ("missing node"));
|
||||
|
||||
SHAMapTreeNode::pointer leaf = stack.top ();
|
||||
stack.pop ();
|
||||
@@ -678,7 +678,7 @@ bool SHAMap::addGiveItem (SHAMapItem::ref item, bool isTransaction, bool hasMeta
|
||||
std::stack<SHAMapTreeNode::pointer> stack = getStack (tag, true);
|
||||
|
||||
if (stack.empty ())
|
||||
throw std::runtime_error ("missing node");
|
||||
Throw (std::runtime_error ("missing node"));
|
||||
|
||||
SHAMapTreeNode::pointer node = stack.top ();
|
||||
stack.pop ();
|
||||
@@ -703,7 +703,7 @@ bool SHAMap::addGiveItem (SHAMapItem::ref item, bool isTransaction, bool hasMeta
|
||||
WriteLog (lsFATAL, SHAMap) << "NewNode: " << *newNode;
|
||||
dump ();
|
||||
assert (false);
|
||||
throw std::runtime_error ("invalid inner node");
|
||||
Throw (std::runtime_error ("invalid inner node"));
|
||||
}
|
||||
|
||||
trackNewNode (newNode);
|
||||
@@ -776,7 +776,7 @@ bool SHAMap::updateGiveItem (SHAMapItem::ref item, bool isTransaction, bool hasM
|
||||
std::stack<SHAMapTreeNode::pointer> stack = getStack (tag, true);
|
||||
|
||||
if (stack.empty ())
|
||||
throw std::runtime_error ("missing node");
|
||||
Throw (std::runtime_error ("missing node"));
|
||||
|
||||
SHAMapTreeNode::pointer node = stack.top ();
|
||||
stack.pop ();
|
||||
@@ -810,7 +810,7 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternal (const SHAMapNode& id, uint256
|
||||
SHAMapTreeNode::pointer ret = fetchNodeExternalNT (id, hash);
|
||||
|
||||
if (!ret)
|
||||
throw SHAMapMissingNode (mType, id, hash);
|
||||
Throw (SHAMapMissingNode (mType, id, hash));
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -825,8 +825,7 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternalNT (const SHAMapNode& id, uint2
|
||||
// These are for diagnosing a crash on exit
|
||||
Application& app (getApp ());
|
||||
NodeStore& nodeStore (app.getNodeStore ());
|
||||
|
||||
NodeObject::pointer obj (getApp().getNodeStore ().retrieve (hash));
|
||||
NodeObject::pointer obj (nodeStore.fetch (hash));
|
||||
|
||||
if (!obj)
|
||||
{
|
||||
@@ -889,8 +888,11 @@ bool SHAMap::fetchRoot (uint256 const& hash, SHAMapSyncFilter* filter)
|
||||
}
|
||||
|
||||
SHAMapTreeNode::pointer newRoot = fetchNodeExternalNT(SHAMapNode(), hash);
|
||||
|
||||
if (newRoot)
|
||||
{
|
||||
root = newRoot;
|
||||
}
|
||||
else
|
||||
{
|
||||
Blob nodeData;
|
||||
@@ -939,7 +941,7 @@ int SHAMap::flushDirty (DirtyMap& map, int maxNodes, NodeObjectType t, uint32 se
|
||||
|
||||
#endif
|
||||
|
||||
getApp().getNodeStore ().store (t, seq, s.peekData (), it->second->getNodeHash ());
|
||||
getApp().getNodeStore ().store (t, seq, s.modData (), it->second->getNodeHash ());
|
||||
|
||||
if (flushed++ >= maxNodes)
|
||||
return flushed;
|
||||
|
||||
@@ -128,7 +128,7 @@ SHAMapNode SHAMapNode::getChildNodeID (int m) const
|
||||
// Which branch would contain the specified hash
|
||||
int SHAMapNode::selectBranch (uint256 const& hash) const
|
||||
{
|
||||
#ifdef PARANOID
|
||||
#if RIPPLE_VERIFY_NODEOBJECT_KEYS
|
||||
|
||||
if (mDepth >= 64)
|
||||
{
|
||||
|
||||
@@ -243,7 +243,7 @@ SHAMapAddNode SHAMap::addRootNode (Blob const& rootNode, SHANodeFormat format,
|
||||
{
|
||||
Serializer s;
|
||||
root->addRaw (s, snfPREFIX);
|
||||
filter->gotNode (false, *root, root->getNodeHash (), s.peekData (), root->getType ());
|
||||
filter->gotNode (false, *root, root->getNodeHash (), s.modData (), root->getType ());
|
||||
}
|
||||
|
||||
return SHAMapAddNode::useful ();
|
||||
@@ -281,7 +281,7 @@ SHAMapAddNode SHAMap::addRootNode (uint256 const& hash, Blob const& rootNode, SH
|
||||
{
|
||||
Serializer s;
|
||||
root->addRaw (s, snfPREFIX);
|
||||
filter->gotNode (false, *root, root->getNodeHash (), s.peekData (), root->getType ());
|
||||
filter->gotNode (false, *root, root->getNodeHash (), s.modData (), root->getType ());
|
||||
}
|
||||
|
||||
return SHAMapAddNode::useful ();
|
||||
@@ -345,7 +345,7 @@ SHAMapAddNode SHAMap::addKnownNode (const SHAMapNode& node, Blob const& rawNode,
|
||||
{
|
||||
Serializer s;
|
||||
newNode->addRaw (s, snfPREFIX);
|
||||
filter->gotNode (false, node, iNode->getChildHash (branch), s.peekData (), newNode->getType ());
|
||||
filter->gotNode (false, node, iNode->getChildHash (branch), s.modData (), newNode->getType ());
|
||||
}
|
||||
|
||||
mTNByID[node] = newNode;
|
||||
|
||||
@@ -12,29 +12,18 @@
|
||||
class SHAMapSyncFilter
|
||||
{
|
||||
public:
|
||||
SHAMapSyncFilter ()
|
||||
{
|
||||
}
|
||||
|
||||
virtual ~SHAMapSyncFilter ()
|
||||
{
|
||||
}
|
||||
virtual ~SHAMapSyncFilter () { }
|
||||
|
||||
// Note that the nodeData is overwritten by this call
|
||||
virtual void gotNode (bool fromFilter,
|
||||
SHAMapNode const& id,
|
||||
uint256 const& nodeHash,
|
||||
Blob const& nodeData,
|
||||
SHAMapTreeNode::TNType type)
|
||||
{
|
||||
}
|
||||
Blob& nodeData,
|
||||
SHAMapTreeNode::TNType type) = 0;
|
||||
|
||||
virtual bool haveNode (SHAMapNode const& id,
|
||||
uint256 const& nodeHash,
|
||||
Blob& nodeData)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
Blob& nodeData) = 0;
|
||||
};
|
||||
|
||||
#endif
|
||||
// vim:ts=4
|
||||
|
||||
@@ -9,7 +9,7 @@ ConsensusTransSetSF::ConsensusTransSetSF ()
|
||||
}
|
||||
|
||||
void ConsensusTransSetSF::gotNode (bool fromFilter, const SHAMapNode& id, uint256 const& nodeHash,
|
||||
Blob const& nodeData, SHAMapTreeNode::TNType type)
|
||||
Blob& nodeData, SHAMapTreeNode::TNType type)
|
||||
{
|
||||
if (fromFilter)
|
||||
return;
|
||||
@@ -70,7 +70,7 @@ AccountStateSF::AccountStateSF (uint32 ledgerSeq)
|
||||
void AccountStateSF::gotNode (bool fromFilter,
|
||||
SHAMapNode const& id,
|
||||
uint256 const& nodeHash,
|
||||
Blob const& nodeData,
|
||||
Blob& nodeData,
|
||||
SHAMapTreeNode::TNType)
|
||||
{
|
||||
getApp().getNodeStore ().store (hotACCOUNT_NODE, mLedgerSeq, nodeData, nodeHash);
|
||||
@@ -93,7 +93,7 @@ TransactionStateSF::TransactionStateSF (uint32 ledgerSeq)
|
||||
void TransactionStateSF::gotNode (bool fromFilter,
|
||||
SHAMapNode const& id,
|
||||
uint256 const& nodeHash,
|
||||
Blob const& nodeData,
|
||||
Blob& nodeData,
|
||||
SHAMapTreeNode::TNType type)
|
||||
{
|
||||
getApp().getNodeStore ().store (
|
||||
|
||||
@@ -17,10 +17,11 @@ class ConsensusTransSetSF : public SHAMapSyncFilter
|
||||
public:
|
||||
ConsensusTransSetSF ();
|
||||
|
||||
// Note that the nodeData is overwritten by this call
|
||||
void gotNode (bool fromFilter,
|
||||
SHAMapNode const& id,
|
||||
uint256 const& nodeHash,
|
||||
Blob const& nodeData,
|
||||
Blob& nodeData,
|
||||
SHAMapTreeNode::TNType);
|
||||
|
||||
bool haveNode (SHAMapNode const& id,
|
||||
@@ -35,10 +36,11 @@ class AccountStateSF : public SHAMapSyncFilter
|
||||
public:
|
||||
explicit AccountStateSF (uint32 ledgerSeq);
|
||||
|
||||
// Note that the nodeData is overwritten by this call
|
||||
void gotNode (bool fromFilter,
|
||||
SHAMapNode const& id,
|
||||
uint256 const& nodeHash,
|
||||
Blob const& nodeData,
|
||||
Blob& nodeData,
|
||||
SHAMapTreeNode::TNType);
|
||||
|
||||
bool haveNode (SHAMapNode const& id,
|
||||
@@ -56,10 +58,11 @@ class TransactionStateSF : public SHAMapSyncFilter
|
||||
public:
|
||||
explicit TransactionStateSF (uint32 ledgerSeq);
|
||||
|
||||
// Note that the nodeData is overwritten by this call
|
||||
void gotNode (bool fromFilter,
|
||||
SHAMapNode const& id,
|
||||
uint256 const& nodeHash,
|
||||
Blob const& nodeData,
|
||||
Blob& nodeData,
|
||||
SHAMapTreeNode::TNType);
|
||||
|
||||
bool haveNode (SHAMapNode const& id,
|
||||
|
||||
@@ -207,7 +207,7 @@ SHAMapTreeNode::SHAMapTreeNode (const SHAMapNode& id, Blob const& rawNode, uint3
|
||||
if (hashValid)
|
||||
{
|
||||
mHash = hash;
|
||||
#ifdef PARANOID
|
||||
#if RIPPLE_VERIFY_NODEOBJECT_KEYS
|
||||
updateHash ();
|
||||
assert (mHash == hash);
|
||||
#endif
|
||||
@@ -225,7 +225,7 @@ bool SHAMapTreeNode::updateHash ()
|
||||
if (mIsBranch != 0)
|
||||
{
|
||||
nh = Serializer::getPrefixHash (HashPrefix::innerNode, reinterpret_cast<unsigned char*> (mHashes), sizeof (mHashes));
|
||||
#ifdef PARANOID
|
||||
#if RIPPLE_VERIFY_NODEOBJECT_KEYS
|
||||
Serializer s;
|
||||
s.add32 (HashPrefix::innerNode);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user