diff --git a/BeastConfig.h b/BeastConfig.h index 4c35072c8..3b82014da 100644 --- a/BeastConfig.h +++ b/BeastConfig.h @@ -120,4 +120,19 @@ //#define BEAST_BIND_USES_TR1 1 //#define BEAST_BIND_USES_BOOST 1 +//------------------------------------------------------------------------------ +// +// Ripple compilation settings +// +//------------------------------------------------------------------------------ + +/** Config: RIPPLE_VERIFY_NODEOBJECT_KEYS + + This verifies that the hash of node objects matches the payload. + It is quite expensive so normally this is turned off! +*/ +#ifndef RIPPLE_VERIFY_NODEOBJECT_KEYS +//#define RIPPLE_VERIFY_NODEOBJECT_KEYS 1 +#endif + #endif diff --git a/TODO.txt b/TODO.txt index 38c8cdd5b..3fb99e913 100644 --- a/TODO.txt +++ b/TODO.txt @@ -3,16 +3,19 @@ RIPPLE TODO -------------------------------------------------------------------------------- Vinnie's Short List (Changes day to day) +- Finish writing the NodeStore unit tests +- Finish converting backends to new API - Memory NodeStore::Backend for unit tests +- Performance test for NodeStore::Backend - Improved Mutex to track deadlocks -- Convert some Ripple boost unit tests to Beast. -- Eliminate new technical in NodeStore::Backend - Work on KeyvaDB +- Import beast::db and use it in SQliteBackend - Finish unit tests and code for Validators +- Convert some Ripple boost unit tests to Beast. -------------------------------------------------------------------------------- -- Replace master lock with +- Replace all throw with beast::Throw - Replace base_uint and uintXXX with UnsignedInteger * Need to specialize UnsignedInteger to work efficiently with 4 and 8 byte diff --git a/modules/ripple_app/ledger/Ledger.cpp b/modules/ripple_app/ledger/Ledger.cpp index 4b0300a55..6878b37d1 100644 --- a/modules/ripple_app/ledger/Ledger.cpp +++ b/modules/ripple_app/ledger/Ledger.cpp @@ -530,10 +530,12 @@ void Ledger::saveAcceptedLedger (Job&, bool fromConsensus) assert (getTransHash () == mTransactionMap->getHash ()); // Save the ledger header in the hashed object store - Serializer s (128); - s.add32 (HashPrefix::ledgerMaster); - addRaw (s); - getApp().getNodeStore ().store (hotLEDGER, mLedgerSeq, s.peekData (), mHash); + { + Serializer s (128); + s.add32 (HashPrefix::ledgerMaster); + addRaw (s); + getApp().getNodeStore ().store (hotLEDGER, mLedgerSeq, s.modData (), mHash); + } AcceptedLedger::pointer aLedger = AcceptedLedger::makeAcceptedLedger (shared_from_this ()); diff --git a/modules/ripple_app/ledger/ripple_InboundLedger.cpp b/modules/ripple_app/ledger/ripple_InboundLedger.cpp index e9101345d..c38ed4547 100644 --- a/modules/ripple_app/ledger/ripple_InboundLedger.cpp +++ b/modules/ripple_app/ledger/ripple_InboundLedger.cpp @@ -48,7 +48,7 @@ bool InboundLedger::tryLocal () if (!mHaveBase) { // Nothing we can do without the ledger base - NodeObject::pointer node = getApp().getNodeStore ().retrieve (mHash); + NodeObject::pointer node = getApp().getNodeStore ().fetch (mHash); if (!node) { @@ -672,7 +672,7 @@ bool InboundLedger::takeBase (const std::string& data) // data must not have has Serializer s (data.size () + 4); s.add32 (HashPrefix::ledgerMaster); s.addRaw (data); - getApp().getNodeStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.peekData (), mHash); + getApp().getNodeStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.modData (), mHash); progress (); diff --git a/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.cpp b/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.cpp index 6d5ff693f..860a53b4b 100644 --- a/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.cpp +++ b/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.cpp @@ -6,120 +6,208 @@ #if RIPPLE_HYPERLEVELDB_AVAILABLE -class HyperLevelDBBackendFactory::Backend : public NodeStore::Backend +class HyperLevelDBBackendFactory::Backend + : public NodeStore::Backend + , public NodeStore::BatchWriter::Callback + , LeakChecked { public: - Backend (size_t keyBytes, StringPairArray const& keyValues) + typedef RecycledObjectPool StringPool; + + Backend (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) : m_keyBytes (keyBytes) - , mName(keyValues ["path"].toStdString ()) - , mDB(NULL) + , m_scheduler (scheduler) + , m_batch (*this, scheduler) + , m_name (keyValues ["path"].toStdString ()) { - if (mName.empty()) - throw std::runtime_error ("Missing path in LevelDB backend"); + if (m_name.empty ()) + Throw (std::runtime_error ("Missing path in LevelDB backend")); hyperleveldb::Options options; options.create_if_missing = true; - if (keyValues["cache_mb"].isEmpty()) + if (keyValues ["cache_mb"].isEmpty ()) + { options.block_cache = hyperleveldb::NewLRUCache (theConfig.getSize (siHashNodeDBCache) * 1024 * 1024); + } else + { options.block_cache = hyperleveldb::NewLRUCache (keyValues["cache_mb"].getIntValue() * 1024L * 1024L); + } - if (keyValues["filter_bits"].isEmpty()) + if (keyValues ["filter_bits"].isEmpty()) { if (theConfig.NODE_SIZE >= 2) options.filter_policy = hyperleveldb::NewBloomFilterPolicy (10); } - else if (keyValues["filter_bits"].getIntValue() != 0) - options.filter_policy = hyperleveldb::NewBloomFilterPolicy (keyValues["filter_bits"].getIntValue()); + else if (keyValues ["filter_bits"].getIntValue() != 0) + { + options.filter_policy = hyperleveldb::NewBloomFilterPolicy (keyValues ["filter_bits"].getIntValue ()); + } - if (!keyValues["open_files"].isEmpty()) - options.max_open_files = keyValues["open_files"].getIntValue(); + if (! keyValues["open_files"].isEmpty ()) + { + options.max_open_files = keyValues ["open_files"].getIntValue(); + } - hyperleveldb::Status status = hyperleveldb::DB::Open (options, mName, &mDB); - if (!status.ok () || !mDB) - throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString())); + hyperleveldb::DB* db = nullptr; + hyperleveldb::Status status = hyperleveldb::DB::Open (options, m_name, &db); + if (!status.ok () || !db) + Throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString())); + + m_db = db; } ~Backend () { - delete mDB; } - std::string getDataBaseName() + std::string getName() { - return mName; + return m_name; } - bool bulkStore (const std::vector< NodeObject::pointer >& objs) - { - hyperleveldb::WriteBatch batch; + //-------------------------------------------------------------------------- + + Status fetch (void const* key, NodeObject::Ptr* pObject) + { + pObject->reset (); + + Status status (ok); + + hyperleveldb::ReadOptions const options; + hyperleveldb::Slice const slice (static_cast (key), m_keyBytes); - BOOST_FOREACH (NodeObject::ref obj, objs) { - Blob blob (toBlob (obj)); - batch.Put ( - hyperleveldb::Slice (reinterpret_cast(obj->getHash ().begin ()), m_keyBytes), - hyperleveldb::Slice (reinterpret_cast(&blob.front ()), blob.size ())); + // These are reused std::string objects, + // required for leveldb's funky interface. + // + StringPool::ScopedItem item (m_stringPool); + std::string& string = item.getObject (); + + hyperleveldb::Status getStatus = m_db->Get (options, slice, &string); + + if (getStatus.ok ()) + { + NodeStore::DecodedBlob decoded (key, string.data (), string.size ()); + + if (decoded.wasOk ()) + { + *pObject = decoded.createObject (); + } + else + { + // Decoding failed, probably corrupted! + // + status = dataCorrupt; + } + } + else + { + if (getStatus.IsCorruption ()) + { + status = dataCorrupt; + } + else if (getStatus.IsNotFound ()) + { + status = notFound; + } + else + { + status = unknown; + } + } } - return mDB->Write (hyperleveldb::WriteOptions (), &batch).ok (); + + return status; } - NodeObject::pointer retrieve (uint256 const& hash) + void store (NodeObject::ref object) { - std::string sData; - if (!mDB->Get (hyperleveldb::ReadOptions (), - hyperleveldb::Slice (reinterpret_cast(hash.begin ()), m_keyBytes), &sData).ok ()) + m_batch.store (object); + } + + void storeBatch (NodeStore::Batch const& batch) + { + hyperleveldb::WriteBatch wb; + { - return NodeObject::pointer(); + NodeStore::EncodedBlob::Pool::ScopedItem item (m_blobPool); + + BOOST_FOREACH (NodeObject::ref object, batch) + { + item.getObject ().prepare (object); + + wb.Put ( + hyperleveldb::Slice (reinterpret_cast ( + item.getObject ().getKey ()), m_keyBytes), + hyperleveldb::Slice (reinterpret_cast ( + item.getObject ().getData ()), item.getObject ().getSize ())); + } } - return fromBinary(hash, &sData[0], sData.size ()); + + hyperleveldb::WriteOptions const options; + + m_db->Write (options, &wb).ok (); } - void visitAll (FUNCTION_TYPE func) + void visitAll (VisitCallback& callback) { - hyperleveldb::Iterator* it = mDB->NewIterator (hyperleveldb::ReadOptions ()); + hyperleveldb::ReadOptions const options; + + ScopedPointer it (m_db->NewIterator (options)); + for (it->SeekToFirst (); it->Valid (); it->Next ()) { if (it->key ().size () == m_keyBytes) { - uint256 hash; - memcpy(hash.begin(), it->key ().data(), m_keyBytes); - func (fromBinary (hash, it->value ().data (), it->value ().size ())); + NodeStore::DecodedBlob decoded (it->key ().data (), + it->value ().data (), + it->value ().size ()); + + if (decoded.wasOk ()) + { + NodeObject::Ptr object (decoded.createObject ()); + + callback.visitObject (object); + } + else + { + // Uh oh, corrupted data! + WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256 (it->key ().data ()); + } + } + else + { + // VFALCO NOTE What does it mean to find an + // incorrectly sized key? Corruption? + WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size (); } } } - Blob toBlob(NodeObject::ref obj) + int getWriteLoad () { - Blob rawData (9 + obj->getData ().size ()); - unsigned char* bufPtr = &rawData.front(); - - *reinterpret_cast (bufPtr + 0) = ntohl (obj->getIndex ()); - *reinterpret_cast (bufPtr + 4) = ntohl (obj->getIndex ()); - * (bufPtr + 8) = static_cast (obj->getType ()); - memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ()); - - return rawData; + return m_batch.getWriteLoad (); } - NodeObject::pointer fromBinary(uint256 const& hash, - char const* data, int size) + //-------------------------------------------------------------------------- + + void writeBatch (NodeStore::Batch const& batch) { - if (size < 9) - throw std::runtime_error ("undersized object"); - - uint32 index = htonl (*reinterpret_cast (data)); - int htype = data[8]; - - return boost::make_shared (static_cast (htype), index, - data + 9, size - 9, hash); + storeBatch (batch); } private: size_t const m_keyBytes; - std::string mName; - hyperleveldb::DB* mDB; + NodeStore::Scheduler& m_scheduler; + NodeStore::BatchWriter m_batch; + StringPool m_stringPool; + NodeStore::EncodedBlob::Pool m_blobPool; + std::string m_name; + ScopedPointer m_db; }; //------------------------------------------------------------------------------ @@ -144,9 +232,12 @@ String HyperLevelDBBackendFactory::getName () const return "HyperLevelDB"; } -NodeStore::Backend* HyperLevelDBBackendFactory::createInstance (size_t keyBytes, StringPairArray const& keyValues) +NodeStore::Backend* HyperLevelDBBackendFactory::createInstance ( + size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) { - return new HyperLevelDBBackendFactory::Backend (keyBytes, keyValues); + return new HyperLevelDBBackendFactory::Backend (keyBytes, keyValues, scheduler); } //------------------------------------------------------------------------------ diff --git a/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.h b/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.h index 6691681c7..43920477d 100644 --- a/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.h +++ b/modules/ripple_app/node/ripple_HyperLevelDBBackendFactory.h @@ -23,7 +23,10 @@ public: static HyperLevelDBBackendFactory& getInstance (); String getName () const; - NodeStore::Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues); + + NodeStore::Backend* createInstance (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler); }; #endif diff --git a/modules/ripple_app/node/ripple_KeyvaDB.cpp b/modules/ripple_app/node/ripple_KeyvaDB.cpp index 70d5954f7..6b5d68f39 100644 --- a/modules/ripple_app/node/ripple_KeyvaDB.cpp +++ b/modules/ripple_app/node/ripple_KeyvaDB.cpp @@ -463,9 +463,9 @@ public: keyRecord.valSize = valueBytes; keyRecord.leftIndex = 0; keyRecord.rightIndex = 0; - + memcpy (keyRecord.key, key, m_keyBytes); - + writeKeyRecord (keyRecord, state->newKeyIndex, state, true); // Key file has grown by one. @@ -536,7 +536,7 @@ public: maxPayloadBytes = 8 * 1024 }; - KeyvaDBTests () : UnitTest ("KeyvaDB") + KeyvaDBTests () : UnitTest ("KeyvaDB", "ripple") { } @@ -567,51 +567,78 @@ public: typedef UnsignedInteger KeyType; int64 const seedValue = 50; - + String s; s << "keyBytes=" << String (KeyBytes) << ", maxItems=" << String (maxItems); beginTest (s); - // Set up the key and value files and open the db. - File const keyPath = File::createTempFile ("").withFileExtension (".key"); - File const valPath = File::createTempFile ("").withFileExtension (".val"); - ScopedPointer db (KeyvaDB::New (KeyBytes, keyPath, valPath, true)); - - Payload payload (maxPayloadBytes); - Payload check (maxPayloadBytes); + // Set up the key and value files + File const tempFile (File::createTempFile ("")); + File const keyPath = tempFile.withFileExtension (".key"); + File const valPath = tempFile.withFileExtension (".val"); { - // Create an array of ascending integers. - HeapBlock items (maxItems); - for (unsigned int i = 0; i < maxItems; ++i) - items [i] = i; + // open the db + ScopedPointer db (KeyvaDB::New (KeyBytes, keyPath, valPath, false)); - // Now shuffle it deterministically. - repeatableShuffle (maxItems, items, seedValue); + Payload payload (maxPayloadBytes); + Payload check (maxPayloadBytes); - // Write all the keys of integers. - for (unsigned int i = 0; i < maxItems; ++i) { - unsigned int keyIndex = items [i]; - - KeyType const key = KeyType::createFromInteger (keyIndex); + // Create an array of ascending integers. + HeapBlock items (maxItems); + for (unsigned int i = 0; i < maxItems; ++i) + items [i] = i; - payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue); - - db->put (key.cbegin (), payload.data.getData (), payload.bytes); + // Now shuffle it deterministically. + repeatableShuffle (maxItems, items, seedValue); + // Write all the keys of integers. + for (unsigned int i = 0; i < maxItems; ++i) { - // VFALCO TODO Check what we just wrote? - //db->get (key.cbegin (), check.data.getData (), payload.bytes); + unsigned int keyIndex = items [i]; + + KeyType const key = KeyType::createFromInteger (keyIndex); + + payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue); + + db->put (key.cbegin (), payload.data.getData (), payload.bytes); + + { + // VFALCO TODO Check what we just wrote? + //db->get (key.cbegin (), check.data.getData (), payload.bytes); + } + } + } + + { + // Go through all of our keys and try to retrieve them. + // since this is done in ascending order, we should get + // random seeks at this point. + // + PayloadGetCallback cb; + for (unsigned int keyIndex = 0; keyIndex < maxItems; ++keyIndex) + { + KeyType const v = KeyType::createFromInteger (keyIndex); + + bool const found = db->get (v.cbegin (), &cb); + + expect (found, "Should be found"); + + payload.repeatableRandomFill (1, maxPayloadBytes, keyIndex + seedValue); + + expect (payload == cb.payload, "Should be equal"); } } } { - // Go through all of our keys and try to retrieve them. - // since this is done in ascending order, we should get - // random seeks at this point. - // + // Re-open the database and confirm the data + ScopedPointer db (KeyvaDB::New (KeyBytes, keyPath, valPath, false)); + + Payload payload (maxPayloadBytes); + Payload check (maxPayloadBytes); + PayloadGetCallback cb; for (unsigned int keyIndex = 0; keyIndex < maxItems; ++keyIndex) { @@ -626,12 +653,15 @@ public: expect (payload == cb.payload, "Should be equal"); } } + + keyPath.deleteFile (); + valPath.deleteFile (); } void runTest () { - testKeySize <4> (512); - testKeySize <32> (4096); + testKeySize <4> (500); + testKeySize <32> (4000); } }; diff --git a/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.cpp b/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.cpp index f9b65a019..676fc7ecb 100644 --- a/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.cpp +++ b/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.cpp @@ -6,9 +6,16 @@ class KeyvaDBBackendFactory::Backend : public NodeStore::Backend { +private: + typedef RecycledObjectPool MemoryPool; + typedef RecycledObjectPool EncodedBlobPool; + public: - Backend (size_t keyBytes, StringPairArray const& keyValues) + Backend (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) : m_keyBytes (keyBytes) + , m_scheduler (scheduler) , m_path (keyValues ["path"]) , m_db (KeyvaDB::New ( keyBytes, @@ -22,34 +29,53 @@ public: { } - std::string getDataBaseName () + std::string getName () { return m_path.toStdString (); } //-------------------------------------------------------------------------- - Status get (void const* key, GetCallback* callback) + Status fetch (void const* key, NodeObject::Ptr* pObject) { + pObject->reset (); + Status status (ok); - struct ForwardingGetCallback : KeyvaDB::GetCallback + struct Callback : KeyvaDB::GetCallback { - ForwardingGetCallback (Backend::GetCallback* callback) - : m_callback (callback) + explicit Callback (MemoryBlock& block) + : m_block (block) { } void* getStorageForValue (int valueBytes) { - return m_callback->getStorageForValue (valueBytes); + m_size = valueBytes; + m_block.ensureSize (valueBytes); + + return m_block.getData (); + } + + void const* getData () const noexcept + { + return m_block.getData (); + } + + size_t getSize () const noexcept + { + return m_size; } private: - Backend::GetCallback* const m_callback; + MemoryBlock& m_block; + size_t m_size; }; - ForwardingGetCallback cb (callback); + MemoryPool::ScopedItem item (m_memoryPool); + MemoryBlock& block (item.getObject ()); + + Callback cb (block); // VFALCO TODO Can't we get KeyvaDB to provide a proper status? // @@ -57,7 +83,18 @@ public: if (found) { - status = ok; + NodeStore::DecodedBlob decoded (key, cb.getData (), cb.getSize ()); + + if (decoded.wasOk ()) + { + *pObject = decoded.createObject (); + + status = ok; + } + else + { + status = dataCorrupt; + } } else { @@ -67,90 +104,45 @@ public: return status; } - //-------------------------------------------------------------------------- - - void writeObject (NodeObject::ref object) + void store (NodeObject::ref object) { - Blob blob (toBlob (object)); - m_db->put (object->getHash ().begin (), &blob [0], blob.size ()); + EncodedBlobPool::ScopedItem item (m_blobPool); + NodeStore::EncodedBlob& encoded (item.getObject ()); + + encoded.prepare (object); + + m_db->put (encoded.getKey (), encoded.getData (), encoded.getSize ()); } - bool bulkStore (std::vector const& objs) + void storeBatch (NodeStore::Batch const& batch) { - for (size_t i = 0; i < objs.size (); ++i) - { - writeObject (objs [i]); - } - - return true; + for (int i = 0; i < batch.size (); ++i) + store (batch [i]); } - struct MyGetCallback : KeyvaDB::GetCallback - { - int valueBytes; - HeapBlock data; - - void* getStorageForValue (int valueBytes_) - { - valueBytes = valueBytes_; - - data.malloc (valueBytes); - - return data.getData (); - } - }; - - NodeObject::pointer retrieve (uint256 const& hash) - { - NodeObject::pointer result; - - MyGetCallback cb; - - bool const found = m_db->get (hash.begin (), &cb); - - if (found) - { - result = fromBinary (hash, cb.data.getData (), cb.valueBytes); - } - - return result; - } - - void visitAll (FUNCTION_TYPE func) + void visitAll (VisitCallback& callback) { + // VFALCO TODO Implement this! + // bassertfalse; + //m_db->visitAll (); } - Blob toBlob (NodeObject::ref obj) + int getWriteLoad () { - Blob rawData (9 + obj->getData ().size ()); - unsigned char* bufPtr = &rawData.front(); - - *reinterpret_cast (bufPtr + 0) = ntohl (obj->getIndex ()); - *reinterpret_cast (bufPtr + 4) = ntohl (obj->getIndex ()); - * (bufPtr + 8) = static_cast (obj->getType ()); - memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ()); - - return rawData; + // we dont do pending writes + return 0; } - NodeObject::pointer fromBinary (uint256 const& hash, char const* data, int size) - { - if (size < 9) - throw std::runtime_error ("undersized object"); - - uint32 index = htonl (*reinterpret_cast (data)); - - int htype = data[8]; - - return boost::make_shared (static_cast (htype), index, - data + 9, size - 9, hash); - } + //-------------------------------------------------------------------------- private: size_t const m_keyBytes; + NodeStore::Scheduler& m_scheduler; String m_path; ScopedPointer m_db; + MemoryPool m_memoryPool; + EncodedBlobPool m_blobPool; }; //------------------------------------------------------------------------------ @@ -175,9 +167,12 @@ String KeyvaDBBackendFactory::getName () const return "KeyvaDB"; } -NodeStore::Backend* KeyvaDBBackendFactory::createInstance (size_t keyBytes, StringPairArray const& keyValues) +NodeStore::Backend* KeyvaDBBackendFactory::createInstance ( + size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) { - return new KeyvaDBBackendFactory::Backend (keyBytes, keyValues); + return new KeyvaDBBackendFactory::Backend (keyBytes, keyValues, scheduler); } //------------------------------------------------------------------------------ diff --git a/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.h b/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.h index 4ee95c7b2..40e76f199 100644 --- a/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.h +++ b/modules/ripple_app/node/ripple_KeyvaDBBackendFactory.h @@ -21,7 +21,10 @@ public: static KeyvaDBBackendFactory& getInstance (); String getName () const; - NodeStore::Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues); + + NodeStore::Backend* createInstance (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler); }; #endif diff --git a/modules/ripple_app/node/ripple_LevelDBBackendFactory.cpp b/modules/ripple_app/node/ripple_LevelDBBackendFactory.cpp index 3dbdcd330..0beb2d5c1 100644 --- a/modules/ripple_app/node/ripple_LevelDBBackendFactory.cpp +++ b/modules/ripple_app/node/ripple_LevelDBBackendFactory.cpp @@ -4,24 +4,38 @@ */ //============================================================================== -class LevelDBBackendFactory::Backend : public NodeStore::Backend +class LevelDBBackendFactory::Backend + : public NodeStore::Backend + , public NodeStore::BatchWriter::Callback + , LeakChecked { public: - Backend (int keyBytes, StringPairArray const& keyValues) + typedef RecycledObjectPool StringPool; + + //-------------------------------------------------------------------------- + + Backend (int keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) : m_keyBytes (keyBytes) - , m_name(keyValues ["path"].toStdString ()) - , m_db(NULL) + , m_scheduler (scheduler) + , m_batch (*this, scheduler) + , m_name (keyValues ["path"].toStdString ()) { if (m_name.empty()) - throw std::runtime_error ("Missing path in LevelDB backend"); + Throw (std::runtime_error ("Missing path in LevelDB backend")); leveldb::Options options; options.create_if_missing = true; if (keyValues["cache_mb"].isEmpty()) + { options.block_cache = leveldb::NewLRUCache (theConfig.getSize (siHashNodeDBCache) * 1024 * 1024); + } else + { options.block_cache = leveldb::NewLRUCache (keyValues["cache_mb"].getIntValue() * 1024L * 1024L); + } if (keyValues["filter_bits"].isEmpty()) { @@ -29,39 +43,38 @@ public: options.filter_policy = leveldb::NewBloomFilterPolicy (10); } else if (keyValues["filter_bits"].getIntValue() != 0) + { options.filter_policy = leveldb::NewBloomFilterPolicy (keyValues["filter_bits"].getIntValue()); + } - if (!keyValues["open_files"].isEmpty()) + if (! keyValues["open_files"].isEmpty()) + { options.max_open_files = keyValues["open_files"].getIntValue(); + } - leveldb::Status status = leveldb::DB::Open (options, m_name, &m_db); - if (!status.ok () || !m_db) - throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString())); + leveldb::DB* db = nullptr; + leveldb::Status status = leveldb::DB::Open (options, m_name, &db); + if (!status.ok () || !db) + Throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString())); + + m_db = db; } ~Backend () { - delete m_db; } - std::string getDataBaseName() + std::string getName() { return m_name; } //-------------------------------------------------------------------------- - struct StdString + Status fetch (void const* key, NodeObject::Ptr* pObject) { - std::string blob; - }; + pObject->reset (); - typedef RecycledObjectPool StdStringPool; - - //-------------------------------------------------------------------------- - - Status get (void const* key, GetCallback* callback) - { Status status (ok); leveldb::ReadOptions const options; @@ -71,22 +84,24 @@ public: // These are reused std::string objects, // required for leveldb's funky interface. // - StdStringPool::ScopedItem item (m_stringPool); - std::string& blob = item.getObject ().blob; + StringPool::ScopedItem item (m_stringPool); + std::string& string = item.getObject (); - leveldb::Status getStatus = m_db->Get (options, slice, &blob); + leveldb::Status getStatus = m_db->Get (options, slice, &string); if (getStatus.ok ()) { - void* const buffer = callback->getStorageForValue (blob.size ()); + NodeStore::DecodedBlob decoded (key, string.data (), string.size ()); - if (buffer != nullptr) + if (decoded.wasOk ()) { - memcpy (buffer, blob.data (), blob.size ()); + *pObject = decoded.createObject (); } else { - Throw (std::bad_alloc ()); + // Decoding failed, probably corrupted! + // + status = dataCorrupt; } } else @@ -109,83 +124,90 @@ public: return status; } - //-------------------------------------------------------------------------- - - bool bulkStore (const std::vector< NodeObject::pointer >& objs) + void store (NodeObject::ref object) { - leveldb::WriteBatch batch; - - BOOST_FOREACH (NodeObject::ref obj, objs) - { - Blob blob (toBlob (obj)); - batch.Put ( - leveldb::Slice (reinterpret_cast(obj->getHash ().begin ()), m_keyBytes), - leveldb::Slice (reinterpret_cast(&blob.front ()), blob.size ())); - } - return m_db->Write (leveldb::WriteOptions (), &batch).ok (); + m_batch.store (object); } - NodeObject::pointer retrieve (uint256 const& hash) + void storeBatch (NodeStore::Batch const& batch) { - std::string sData; - if (!m_db->Get (leveldb::ReadOptions (), - leveldb::Slice (reinterpret_cast(hash.begin ()), m_keyBytes), &sData).ok ()) + leveldb::WriteBatch wb; + { - return NodeObject::pointer(); + NodeStore::EncodedBlob::Pool::ScopedItem item (m_blobPool); + + BOOST_FOREACH (NodeObject::ref object, batch) + { + item.getObject ().prepare (object); + + wb.Put ( + leveldb::Slice (reinterpret_cast (item.getObject ().getKey ()), + m_keyBytes), + leveldb::Slice (reinterpret_cast (item.getObject ().getData ()), + item.getObject ().getSize ())); + } } - return fromBinary(hash, &sData[0], sData.size ()); + + leveldb::WriteOptions const options; + + m_db->Write (options, &wb).ok (); } - void visitAll (FUNCTION_TYPE func) + void visitAll (VisitCallback& callback) { - leveldb::Iterator* it = m_db->NewIterator (leveldb::ReadOptions ()); + leveldb::ReadOptions const options; + + ScopedPointer it (m_db->NewIterator (options)); + for (it->SeekToFirst (); it->Valid (); it->Next ()) { if (it->key ().size () == m_keyBytes) { - uint256 hash; - memcpy(hash.begin(), it->key ().data(), m_keyBytes); - func (fromBinary (hash, it->value ().data (), it->value ().size ())); + NodeStore::DecodedBlob decoded (it->key ().data (), + it->value ().data (), + it->value ().size ()); + + if (decoded.wasOk ()) + { + NodeObject::Ptr object (decoded.createObject ()); + + callback.visitObject (object); + } + else + { + // Uh oh, corrupted data! + WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256 (it->key ().data ()); + } } else { // VFALCO NOTE What does it mean to find an // incorrectly sized key? Corruption? + WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size (); } } } - Blob toBlob(NodeObject::ref obj) + int getWriteLoad () { - Blob rawData (9 + obj->getData ().size ()); - unsigned char* bufPtr = &rawData.front(); - - *reinterpret_cast (bufPtr + 0) = ntohl (obj->getIndex ()); - *reinterpret_cast (bufPtr + 4) = ntohl (obj->getIndex ()); - * (bufPtr + 8) = static_cast (obj->getType ()); - memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ()); - - return rawData; + return m_batch.getWriteLoad (); } - NodeObject::pointer fromBinary(uint256 const& hash, - char const* data, int size) + //-------------------------------------------------------------------------- + + void writeBatch (NodeStore::Batch const& batch) { - if (size < 9) - throw std::runtime_error ("undersized object"); - - uint32 index = htonl (*reinterpret_cast (data)); - int htype = data[8]; - - return boost::make_shared (static_cast (htype), index, - data + 9, size - 9, hash); + storeBatch (batch); } private: size_t const m_keyBytes; - StdStringPool m_stringPool; + NodeStore::Scheduler& m_scheduler; + NodeStore::BatchWriter m_batch; + StringPool m_stringPool; + NodeStore::EncodedBlob::Pool m_blobPool; std::string m_name; - leveldb::DB* m_db; + ScopedPointer m_db; }; //------------------------------------------------------------------------------ @@ -210,9 +232,12 @@ String LevelDBBackendFactory::getName () const return "LevelDB"; } -NodeStore::Backend* LevelDBBackendFactory::createInstance (size_t keyBytes, StringPairArray const& keyValues) +NodeStore::Backend* LevelDBBackendFactory::createInstance ( + size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) { - return new LevelDBBackendFactory::Backend (keyBytes, keyValues); + return new LevelDBBackendFactory::Backend (keyBytes, keyValues, scheduler); } //------------------------------------------------------------------------------ diff --git a/modules/ripple_app/node/ripple_LevelDBBackendFactory.h b/modules/ripple_app/node/ripple_LevelDBBackendFactory.h index 5843221c0..3646125d1 100644 --- a/modules/ripple_app/node/ripple_LevelDBBackendFactory.h +++ b/modules/ripple_app/node/ripple_LevelDBBackendFactory.h @@ -21,7 +21,10 @@ public: static LevelDBBackendFactory& getInstance (); String getName () const; - NodeStore::Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues); + + NodeStore::Backend* createInstance (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler); }; #endif diff --git a/modules/ripple_app/node/ripple_MdbBackendFactory.cpp b/modules/ripple_app/node/ripple_MdbBackendFactory.cpp index 0b74349ab..4be1def92 100644 --- a/modules/ripple_app/node/ripple_MdbBackendFactory.cpp +++ b/modules/ripple_app/node/ripple_MdbBackendFactory.cpp @@ -6,170 +6,227 @@ #if RIPPLE_MDB_AVAILABLE -class MdbBackendFactory::Backend : public NodeStore::Backend +class MdbBackendFactory::Backend + : public NodeStore::Backend + , public NodeStore::BatchWriter::Callback + , LeakChecked { public: - explicit Backend (StringPairArray const& keyValues) - : m_env (nullptr) + typedef NodeStore::Batch Batch; + typedef NodeStore::EncodedBlob EncodedBlob; + typedef NodeStore::DecodedBlob DecodedBlob; + + explicit Backend (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) + : m_keyBytes (keyBytes) + , m_scheduler (scheduler) + , m_batch (*this, scheduler) + , m_env (nullptr) { - if (keyValues ["path"].isEmpty ()) - throw std::runtime_error ("Missing path in MDB backend"); + String path (keyValues ["path"]); + + m_name = path.toStdString(); + + if (path.isEmpty ()) + Throw (std::runtime_error ("Missing path in MDB backend")); int error = 0; error = mdb_env_create (&m_env); if (error == 0) // Should use the size of the file plus the free space on the disk - error = mdb_env_set_mapsize(m_env, 512L * 1024L * 1024L * 1024L); + error = mdb_env_set_mapsize (m_env, 512L * 1024L * 1024L * 1024L); if (error == 0) error = mdb_env_open ( m_env, - keyValues ["path"].toStdString().c_str (), + m_name.c_str (), MDB_NOTLS, 0664); - MDB_txn * txn; - if (error == 0) - error = mdb_txn_begin(m_env, NULL, 0, &txn); - if (error == 0) - error = mdb_dbi_open(txn, NULL, 0, &m_dbi); - if (error == 0) - error = mdb_txn_commit(txn); + MDB_txn* txn; + if (error == 0) + error = mdb_txn_begin (m_env, NULL, 0, &txn); + + if (error == 0) + error = mdb_dbi_open (txn, NULL, 0, &m_dbi); + + if (error == 0) + error = mdb_txn_commit (txn); if (error != 0) { String s; s << "Error #" << error << " creating mdb environment"; - throw std::runtime_error (s.toStdString ()); + Throw (std::runtime_error (s.toStdString ())); } - m_name = keyValues ["path"].toStdString(); } ~Backend () { if (m_env != nullptr) { - mdb_dbi_close(m_env, m_dbi); + mdb_dbi_close (m_env, m_dbi); mdb_env_close (m_env); } } - std::string getDataBaseName() + std::string getName() { return m_name; } - bool bulkStore (std::vector const& objs) + //-------------------------------------------------------------------------- + + template + unsigned char* mdb_cast (T* p) { - MDB_txn *txn = nullptr; - int rc = 0; + return const_cast (static_cast (p)); + } - rc = mdb_txn_begin(m_env, NULL, 0, &txn); + Status fetch (void const* key, NodeObject::Ptr* pObject) + { + pObject->reset (); - if (rc == 0) + Status status (ok); + + MDB_txn* txn = nullptr; + + int error = 0; + + error = mdb_txn_begin (m_env, NULL, MDB_RDONLY, &txn); + + if (error == 0) { - BOOST_FOREACH (NodeObject::ref obj, objs) - { - MDB_val key, data; - Blob blob (toBlob (obj)); + MDB_val dbkey; + MDB_val data; - key.mv_size = (256 / 8); - key.mv_data = const_cast(obj->getHash().begin()); + dbkey.mv_size = m_keyBytes; + dbkey.mv_data = mdb_cast (key); - data.mv_size = blob.size(); - data.mv_data = &blob.front(); + error = mdb_get (txn, m_dbi, &dbkey, &data); - rc = mdb_put(txn, m_dbi, &key, &data, 0); - if (rc != 0) + if (error == 0) + { + DecodedBlob decoded (key, data.mv_data, data.mv_size); + + if (decoded.wasOk ()) { - assert(false); + *pObject = decoded.createObject (); + } + else + { + status = dataCorrupt; + } + } + else if (error == MDB_NOTFOUND) + { + status = notFound; + } + else + { + status = unknown; + + WriteLog (lsWARNING, NodeObject) << "MDB txn failed, code=" << error; + } + + mdb_txn_abort (txn); + } + else + { + status = unknown; + + WriteLog (lsWARNING, NodeObject) << "MDB txn failed, code=" << error; + } + + return status; + } + + void store (NodeObject::ref object) + { + m_batch.store (object); + } + + void storeBatch (Batch const& batch) + { + MDB_txn* txn = nullptr; + + int error = 0; + + error = mdb_txn_begin (m_env, NULL, 0, &txn); + + if (error == 0) + { + EncodedBlob::Pool::ScopedItem item (m_blobPool); + + BOOST_FOREACH (NodeObject::Ptr const& object, batch) + { + EncodedBlob& encoded (item.getObject ()); + + encoded.prepare (object); + + MDB_val key; + key.mv_size = m_keyBytes; + key.mv_data = mdb_cast (encoded.getKey ()); + + MDB_val data; + data.mv_size = encoded.getSize (); + data.mv_data = mdb_cast (encoded.getData ()); + + error = mdb_put (txn, m_dbi, &key, &data, 0); + + if (error != 0) + { + WriteLog (lsWARNING, NodeObject) << "mdb_put failed, error=" << error; break; } - } + } + + if (error == 0) + { + error = mdb_txn_commit(txn); + + if (error != 0) + { + WriteLog (lsWARNING, NodeObject) << "mdb_txn_commit failed, error=" << error; + } + } + else + { + mdb_txn_abort (txn); + } } else - assert(false); - - if (rc == 0) - rc = mdb_txn_commit(txn); - else if (txn) - mdb_txn_abort(txn); - - assert(rc == 0); - return rc == 0; - } - - NodeObject::pointer retrieve (uint256 const& hash) - { - NodeObject::pointer ret; - - MDB_txn *txn = nullptr; - int rc = 0; - - rc = mdb_txn_begin(m_env, NULL, MDB_RDONLY, &txn); - - if (rc == 0) { - MDB_val key, data; - - key.mv_size = (256 / 8); - key.mv_data = const_cast(hash.begin()); - - rc = mdb_get(txn, m_dbi, &key, &data); - if (rc == 0) - ret = fromBinary(hash, static_cast(data.mv_data), data.mv_size); - else - assert(rc == MDB_NOTFOUND); + WriteLog (lsWARNING, NodeObject) << "mdb_txn_begin failed, error=" << error; } - else - assert(false); - - mdb_txn_abort(txn); - - return ret; } - void visitAll (FUNCTION_TYPE func) - { // WRITEME - assert(false); - } - - Blob toBlob (NodeObject::ref obj) const + void visitAll (VisitCallback& callback) { - Blob rawData (9 + obj->getData ().size ()); - unsigned char* bufPtr = &rawData.front(); - - *reinterpret_cast (bufPtr + 0) = ntohl (obj->getIndex ()); - - *reinterpret_cast (bufPtr + 4) = ntohl (obj->getIndex ()); - - *(bufPtr + 8) = static_cast (obj->getType ()); - - memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ()); - - return rawData; + // VFALCO TODO Implement this! + bassertfalse; } - NodeObject::pointer fromBinary (uint256 const& hash, char const* data, int size) const + int getWriteLoad () { - if (size < 9) - throw std::runtime_error ("undersized object"); + return m_batch.getWriteLoad (); + } - uint32 const index = htonl (*reinterpret_cast (data)); + //-------------------------------------------------------------------------- - int const htype = data [8]; - - return boost::make_shared ( - static_cast (htype), - index, - data + 9, - size - 9, - hash); + void writeBatch (Batch const& batch) + { + storeBatch (batch); } private: + size_t const m_keyBytes; + NodeStore::Scheduler& m_scheduler; + NodeStore::BatchWriter m_batch; + NodeStore::EncodedBlob::Pool m_blobPool; std::string m_name; MDB_env* m_env; MDB_dbi m_dbi; @@ -197,9 +254,12 @@ String MdbBackendFactory::getName () const return "mdb"; } -NodeStore::Backend* MdbBackendFactory::createInstance (StringPairArray const& keyValues) +NodeStore::Backend* MdbBackendFactory::createInstance ( + size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) { - return new MdbBackendFactory::Backend (keyValues); + return new MdbBackendFactory::Backend (keyBytes, keyValues, scheduler); } #endif diff --git a/modules/ripple_app/node/ripple_MdbBackendFactory.h b/modules/ripple_app/node/ripple_MdbBackendFactory.h index 702ca3a14..2e1cd7db6 100644 --- a/modules/ripple_app/node/ripple_MdbBackendFactory.h +++ b/modules/ripple_app/node/ripple_MdbBackendFactory.h @@ -25,7 +25,10 @@ public: static MdbBackendFactory& getInstance (); String getName () const; - NodeStore::Backend* createInstance (StringPairArray const& keyValues); + + NodeStore::Backend* createInstance (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler); }; #endif diff --git a/modules/ripple_app/node/ripple_NodeObject.cpp b/modules/ripple_app/node/ripple_NodeObject.cpp index b3de59ed9..1d3f28276 100644 --- a/modules/ripple_app/node/ripple_NodeObject.cpp +++ b/modules/ripple_app/node/ripple_NodeObject.cpp @@ -11,27 +11,27 @@ SETUP_LOG (NodeObject) NodeObject::NodeObject ( NodeObjectType type, LedgerIndex ledgerIndex, - Blob const& binaryDataToCopy, - uint256 const& hash) + Blob& data, + uint256 const& hash, + PrivateAccess) : mType (type) , mHash (hash) , mLedgerIndex (ledgerIndex) - , mData (binaryDataToCopy) { + // Take over the caller's buffer + mData.swap (data); } -NodeObject::NodeObject ( +NodeObject::Ptr NodeObject::createObject ( NodeObjectType type, LedgerIndex ledgerIndex, - void const* bufferToCopy, - int bytesInBuffer, - uint256 const& hash) - : mType (type) - , mHash (hash) - , mLedgerIndex (ledgerIndex) - , mData (static_cast (bufferToCopy), - static_cast (bufferToCopy) + bytesInBuffer) + Blob& data, + uint256 const & hash) { + // The boost::ref is important or + // else it will be passed by value! + return boost::make_shared ( + type, ledgerIndex, boost::ref (data), hash, PrivateAccess ()); } NodeObjectType NodeObject::getType () const @@ -54,14 +54,21 @@ Blob const& NodeObject::getData () const return mData; } -bool NodeObject::isCloneOf (NodeObject const& other) const +bool NodeObject::isCloneOf (NodeObject::Ptr const& other) const { - return - mType == other.mType && - mHash == other.mHash && - mLedgerIndex == other.mLedgerIndex && - mData == other.mData - ; + if (mType != other->mType) + return false; + + if (mHash != other->mHash) + return false; + + if (mLedgerIndex != other->mLedgerIndex) + return false; + + if (mData != other->mData) + return false; + + return true; } //------------------------------------------------------------------------------ @@ -70,7 +77,7 @@ class NodeObjectTests : public UnitTest { public: - NodeObjectTests () : UnitTest ("NodeObject") + NodeObjectTests () : UnitTest ("NodeObject", "ripple") { } diff --git a/modules/ripple_app/node/ripple_NodeObject.h b/modules/ripple_app/node/ripple_NodeObject.h index 0637b2942..2ead4b370 100644 --- a/modules/ripple_app/node/ripple_NodeObject.h +++ b/modules/ripple_app/node/ripple_NodeObject.h @@ -42,27 +42,41 @@ public: */ typedef UnsignedInteger <32> Hash; + // Please use this one. For a reference use Ptr const& + typedef boost::shared_ptr Ptr; + + // These are DEPRECATED, type names are capitalized. typedef boost::shared_ptr pointer; typedef pointer const& ref; - /** Create from a vector of data. - - @note A copy of the data is created. - */ +private: + // This hack is used to make the constructor effectively private + // except for when we use it in the call to make_shared. + // There's no portable way to make make_shared<> a friend work. + struct PrivateAccess { }; +public: + // This constructor is private, use createObject instead. NodeObject (NodeObjectType type, LedgerIndex ledgerIndex, - Blob const & binaryDataToCopy, - uint256 const & hash); + Blob& data, + uint256 const& hash, + PrivateAccess); - /** Create from an area of memory. + /** Create an object from fields. - @note A copy of the data is created. + The caller's variable is modified during this call. The + underlying storage for the Blob is taken over by the NodeObject. + + @param type The type of object. + @param ledgerIndex The ledger in which this object appears. + @param data A buffer containing the payload. The caller's variable + is overwritten. + @param hash The 256-bit hash of the payload data. */ - NodeObject (NodeObjectType type, - LedgerIndex ledgerIndex, - void const * bufferToCopy, - int bytesInBuffer, - uint256 const & hash); + static Ptr createObject (NodeObjectType type, + LedgerIndex ledgerIndex, + Blob& data, + uint256 const& hash); /** Retrieve the type of this object. */ @@ -83,7 +97,22 @@ public: /** See if this object has the same data as another object. */ - bool isCloneOf (NodeObject const& other) const; + bool isCloneOf (NodeObject::Ptr const& other) const; + + /** Binary function that satisfies the strict-weak-ordering requirement. + + This compares the hashes of both objects and returns true if + the first hash is considered to go before the second. + + @see std::sort + */ + struct LessThan + { + inline bool operator() (NodeObject::Ptr const& lhs, NodeObject::Ptr const& rhs) const noexcept + { + return lhs->getHash () < rhs->getHash (); + } + }; private: NodeObjectType mType; diff --git a/modules/ripple_app/node/ripple_NodeStore.cpp b/modules/ripple_app/node/ripple_NodeStore.cpp index ef7e7a965..1ac8ef95b 100644 --- a/modules/ripple_app/node/ripple_NodeStore.cpp +++ b/modules/ripple_app/node/ripple_NodeStore.cpp @@ -4,49 +4,164 @@ */ //============================================================================== -// -// NodeStore::Backend -// - -NodeStore::Backend::Backend () - : mWriteGeneration(0) - , mWriteLoad(0) - , mWritePending(false) +NodeStore::DecodedBlob::DecodedBlob (void const* key, void const* value, int valueBytes) { - mWriteSet.reserve (bulkWriteBatchSize); + /* Data format: + + Bytes + + 0...3 LedgerIndex 32-bit big endian integer + 4...7 Unused? An unused copy of the LedgerIndex + 8 char One of NodeObjectType + 9...end The body of the object data + */ + + m_success = false; + m_key = key; + // VFALCO NOTE Ledger indexes should have started at 1 + m_ledgerIndex = LedgerIndex (-1); + m_objectType = hotUNKNOWN; + m_objectData = nullptr; + m_dataBytes = bmax (0, valueBytes - 9); + + if (valueBytes > 4) + { + LedgerIndex const* index = static_cast (value); + m_ledgerIndex = ByteOrder::swapIfLittleEndian (*index); + } + + // VFALCO NOTE What about bytes 4 through 7 inclusive? + + if (valueBytes > 8) + { + unsigned char const* byte = static_cast (value); + m_objectType = static_cast (byte [8]); + } + + if (valueBytes > 9) + { + m_objectData = static_cast (value) + 9; + + switch (m_objectType) + { + case hotUNKNOWN: + default: + break; + + case hotLEDGER: + case hotTRANSACTION: + case hotACCOUNT_NODE: + case hotTRANSACTION_NODE: + m_success = true; + break; + } + } } -bool NodeStore::Backend::store (NodeObject::ref object) +NodeObject::Ptr NodeStore::DecodedBlob::createObject () { - boost::mutex::scoped_lock sl (mWriteMutex); + bassert (m_success); + + NodeObject::Ptr object; + + if (m_success) + { + Blob data (m_dataBytes); + + memcpy (data.data (), m_objectData, m_dataBytes); + + object = NodeObject::createObject ( + m_objectType, m_ledgerIndex, data, uint256 (m_key)); + } + + return object; +} + +//------------------------------------------------------------------------------ + +void NodeStore::EncodedBlob::prepare (NodeObject::Ptr const& object) +{ + m_key = object->getHash ().begin (); + + // This is how many bytes we need in the flat data + m_size = object->getData ().size () + 9; + + m_data.ensureSize (m_size); + + // These sizes must be the same! + static_bassert (sizeof (uint32) == sizeof (object->getIndex ())); + + { + uint32* buf = static_cast (m_data.getData ()); + + buf [0] = ByteOrder::swapIfLittleEndian (object->getIndex ()); + buf [1] = ByteOrder::swapIfLittleEndian (object->getIndex ()); + } + + { + unsigned char* buf = static_cast (m_data.getData ()); + + buf [8] = static_cast (object->getType ()); + + memcpy (&buf [9], object->getData ().data (), object->getData ().size ()); + } +} + +//============================================================================== + +NodeStore::BatchWriter::BatchWriter (Callback& callback, Scheduler& scheduler) + : m_callback (callback) + , m_scheduler (scheduler) + , mWriteGeneration (0) + , mWriteLoad (0) + , mWritePending (false) +{ + mWriteSet.reserve (batchWritePreallocationSize); +} + +NodeStore::BatchWriter::~BatchWriter () +{ + waitForWriting (); +} + +void NodeStore::BatchWriter::store (NodeObject::ref object) +{ + LockType::scoped_lock sl (mWriteMutex); + mWriteSet.push_back (object); - if (!mWritePending) + if (! mWritePending) { mWritePending = true; - // VFALCO TODO Eliminate this dependency on the Application object. - getApp().getJobQueue ().addJob ( - jtWRITE, - "NodeObject::store", - BIND_TYPE (&NodeStore::Backend::bulkWrite, this, P_1)); + m_scheduler.scheduleTask (this); } - return true; } -void NodeStore::Backend::bulkWrite (Job &) +int NodeStore::BatchWriter::getWriteLoad () +{ + LockType::scoped_lock sl (mWriteMutex); + + return std::max (mWriteLoad, static_cast (mWriteSet.size ())); +} + +void NodeStore::BatchWriter::performScheduledTask () +{ + writeBatch (); +} + +void NodeStore::BatchWriter::writeBatch () { int setSize = 0; - // VFALCO NOTE Use the canonical for(;;) instead. - // Or better, provide a proper terminating condition. - while (1) + for (;;) { std::vector< boost::shared_ptr > set; - set.reserve (bulkWriteBatchSize); + + set.reserve (batchWritePreallocationSize); { - boost::mutex::scoped_lock sl (mWriteMutex); + LockType::scoped_lock sl (mWriteMutex); mWriteSet.swap (set); assert (mWriteSet.empty ()); @@ -62,162 +177,43 @@ void NodeStore::Backend::bulkWrite (Job &) return; } + // VFALCO NOTE On the first trip through, mWriteLoad will be 0. + // This is probably not intended. Perhaps the order + // of calls isn't quite right + // mWriteLoad = std::max (setSize, static_cast (mWriteSet.size ())); + setSize = set.size (); } - bulkStore (set); + m_callback.writeBatch (set); } } -// VFALCO TODO This function should not be needed. Instead, the -// destructor should handle flushing of the bulk write buffer. -// -void NodeStore::Backend::waitWrite () +void NodeStore::BatchWriter::waitForWriting () { - boost::mutex::scoped_lock sl (mWriteMutex); + LockType::scoped_lock sl (mWriteMutex); int gen = mWriteGeneration; while (mWritePending && (mWriteGeneration == gen)) mWriteCondition.wait (sl); } -int NodeStore::Backend::getWriteLoad () +//============================================================================== + +class NodeStoreImp + : public NodeStore + , LeakChecked { - boost::mutex::scoped_lock sl (mWriteMutex); - - return std::max (mWriteLoad, static_cast (mWriteSet.size ())); -} - -//------------------------------------------------------------------------------ - -// -// NodeStore -// - -class NodeStoreImp : public NodeStore -{ -public: - /** Size of a key. - */ - enum - { - keyBytes = 32 - }; - - /** Parsed key/value blob into NodeObject components. - - This will extract the information required to construct - a NodeObject. It also does consistency checking and returns - the result, so it is possible to determine if the data - is corrupted without throwing an exception. Note all forms - of corruption are detected so further analysis will be - needed to eliminate false positives. - - This is the format in which a NodeObject is stored in the - persistent storage layer. - */ - struct DecodedBlob - { - /** Construct the decoded blob from raw data. - - The `success` member will indicate if the operation was succesful. - */ - DecodedBlob (void const* keyParam, void const* value, int valueBytes) - { - /* Data format: - - Bytes - - 0...3 LedgerIndex 32-bit big endian integer - 4...7 Unused? An unused copy of the LedgerIndex - 8 char One of NodeObjectType - 9...end The body of the object data - */ - - success = false; - key = keyParam; - // VFALCO NOTE Ledger indexes should have started at 1 - ledgerIndex = LedgerIndex (-1); - objectType = hotUNKNOWN; - objectData = nullptr; - dataBytes = bmax (0, valueBytes - 9); - - if (dataBytes > 4) - { - LedgerIndex const* index = static_cast (value); - ledgerIndex = ByteOrder::swapIfLittleEndian (*index); - } - - // VFALCO NOTE What about bytes 4 through 7 inclusive? - - if (dataBytes > 8) - { - unsigned char const* byte = static_cast (value); - objectType = static_cast (byte [8]); - } - - if (dataBytes > 9) - { - objectData = static_cast (value) + 9; - - switch (objectType) - { - case hotUNKNOWN: - default: - break; - - case hotLEDGER: - case hotTRANSACTION: - case hotACCOUNT_NODE: - case hotTRANSACTION_NODE: - success = true; - break; - } - } - } - - /** Create a NodeObject from this data. - */ - NodeObject::pointer createObject () - { - NodeObject::pointer object; - - if (success) - { - // VFALCO NOTE I dislke these shared pointers from boost - object = boost::make_shared ( - objectType, ledgerIndex, objectData, dataBytes, uint256 (key)); - } - - return object; - } - - bool success; - - void const* key; - LedgerIndex ledgerIndex; - NodeObjectType objectType; - unsigned char const* objectData; - int dataBytes; - }; - - //-------------------------------------------------------------------------- - - class EncodedBlob - { - HeapBlock data; - }; - public: NodeStoreImp (String backendParameters, String fastBackendParameters, - int cacheSize, - int cacheAge) - : m_backend (createBackend (backendParameters)) - , m_fastBackend (fastBackendParameters.isNotEmpty () ? createBackend (fastBackendParameters) - : nullptr) - , m_cache ("NodeStore", cacheSize, cacheAge) + Scheduler& scheduler) + : m_scheduler (scheduler) + , m_backend (createBackend (backendParameters, scheduler)) + , m_fastBackend (fastBackendParameters.isNotEmpty () + ? createBackend (fastBackendParameters, scheduler) : nullptr) + , m_cache ("NodeStore", 16384, 300) , m_negativeCache ("NoteStoreNegativeCache", 0, 120) { } @@ -227,84 +223,21 @@ public: // VFALCO NOTE This shouldn't be necessary, the backend can // just handle it in the destructor. // + /* m_backend->waitWrite (); if (m_fastBackend) m_fastBackend->waitWrite (); - } - - float getCacheHitRate () - { - return m_cache.getHitRate (); - } - - void tune (int size, int age) - { - m_cache.setTargetSize (size); - m_cache.setTargetAge (age); - } - - void sweep () - { - m_cache.sweep (); - m_negativeCache.sweep (); - } - - int getWriteLoad () - { - return m_backend->getWriteLoad (); - } - - bool store (NodeObjectType type, - uint32 index, - Blob const& data, - uint256 const& hash) - { - bool wasStored = false; - - bool const keyFoundAndObjectCached = m_cache.refreshIfPresent (hash); - - // VFALCO NOTE What happens if the key is found, but the object - // fell out of the cache? We will end up passing it - // to the backend anyway. - // - if (! keyFoundAndObjectCached) - { - - // VFALCO TODO Rename this to RIPPLE_NODESTORE_VERIFY_HASHES and make - // it be 1 or 0 instead of merely defined or undefined. - // - #ifdef PARANOID - assert (hash == Serializer::getSHA512Half (data)); - #endif - - NodeObject::pointer object = boost::make_shared (type, index, data, hash); - - // VFALCO NOTE What does it mean to canonicalize an object? - // - if (!m_cache.canonicalize (hash, object)) - { - m_backend->store (object); - - if (m_fastBackend) - m_fastBackend->store (object); - } - - m_negativeCache.del (hash); - - wasStored = true; - } - - return wasStored; + */ } //------------------------------------------------------------------------------ - NodeObject::pointer retrieve (uint256 const& hash) + NodeObject::Ptr fetch (uint256 const& hash) { // See if the object already exists in the cache // - NodeObject::pointer obj = m_cache.fetch (hash); + NodeObject::Ptr obj = m_cache.fetch (hash); if (obj == nullptr) { @@ -320,7 +253,7 @@ public: // if (m_fastBackend != nullptr) { - obj = retrieveInternal (m_fastBackend, hash); + obj = fetchInternal (m_fastBackend, hash); // If we found the object, avoid storing it again later. if (obj != nullptr) @@ -335,16 +268,14 @@ public: // { // Monitor this operation's load since it is expensive. - - // m_hooks->onRetrieveBegin () - + // // VFALCO TODO Why is this an autoptr? Why can't it just be a plain old object? // - LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtHO_READ, "HOS::retrieve")); + // VFALCO NOTE Commented this out because it breaks the unit test! + // + //LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtHO_READ, "HOS::retrieve")); - obj = retrieveInternal (m_backend, hash); - - // m_hooks->onRetrieveEnd () + obj = fetchInternal (m_backend, hash); } // If it's not in the main database, remember that so we @@ -389,47 +320,27 @@ public: return obj; } - NodeObject::pointer retrieveInternal (Backend* backend, uint256 const& hash) + NodeObject::Ptr fetchInternal (Backend* backend, uint256 const& hash) { - // VFALCO TODO Make this not allocate and free on each call - // - struct MyGetCallback : Backend::GetCallback + NodeObject::Ptr object; + + Backend::Status const status = backend->fetch (hash.begin (), &object); + + switch (status) { - void* getStorageForValue (size_t sizeInBytes) - { - bytes = sizeInBytes; - data.malloc (sizeInBytes); + case Backend::ok: + case Backend::notFound: + break; - return &data [0]; - } - - size_t bytes; - HeapBlock data; - }; - - NodeObject::pointer object; - - MyGetCallback cb; - Backend::Status const status = backend->get (hash.begin (), &cb); - - if (status == Backend::ok) - { - // Deserialize the payload into its components. + case Backend::dataCorrupt: + // VFALCO TODO Deal with encountering corrupt data! // - DecodedBlob decoded (hash.begin (), cb.data.getData (), cb.bytes); + WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << hash; + break; - if (decoded.success) - { - object = decoded.createObject (); - } - else - { - // Houston, we've had a problem. Data is likely corrupt. - - // VFALCO TODO Deal with encountering corrupt data! - - WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << hash; - } + default: + WriteLog (lsWARNING, NodeObject) << "Unknown status=" << status; + break; } return object; @@ -437,42 +348,119 @@ public: //------------------------------------------------------------------------------ - void importVisitor ( - std::vector & objects, - NodeObject::pointer object) + void store (NodeObjectType type, + uint32 index, + Blob& data, + uint256 const& hash) { - if (objects.size() >= bulkWriteBatchSize) + bool const keyFoundAndObjectCached = m_cache.refreshIfPresent (hash); + + // VFALCO NOTE What happens if the key is found, but the object + // fell out of the cache? We will end up passing it + // to the backend anyway. + // + if (! keyFoundAndObjectCached) { - m_backend->bulkStore (objects); - objects.clear (); - objects.reserve (bulkWriteBatchSize); + // VFALCO TODO Rename this to RIPPLE_VERIFY_NODEOBJECT_KEYS and make + // it be 1 or 0 instead of merely defined or undefined. + // + #if RIPPLE_VERIFY_NODEOBJECT_KEYS + assert (hash == Serializer::getSHA512Half (data)); + #endif + + NodeObject::Ptr object = NodeObject::createObject ( + type, index, data, hash); + + if (!m_cache.canonicalize (hash, object)) + { + m_backend->store (object); + + if (m_fastBackend) + m_fastBackend->store (object); + } + + m_negativeCache.del (hash); } - - objects.push_back (object); } - int import (String sourceBackendParameters) + //------------------------------------------------------------------------------ + + float getCacheHitRate () { - ScopedPointer srcBackend (createBackend (sourceBackendParameters)); + return m_cache.getHitRate (); + } + + void tune (int size, int age) + { + m_cache.setTargetSize (size); + m_cache.setTargetAge (age); + } + + void sweep () + { + m_cache.sweep (); + m_negativeCache.sweep (); + } + + int getWriteLoad () + { + return m_backend->getWriteLoad (); + } + + //------------------------------------------------------------------------------ + + void import (String sourceBackendParameters) + { + class ImportVisitCallback : public Backend::VisitCallback + { + public: + explicit ImportVisitCallback (Backend& backend) + : m_backend (backend) + { + m_objects.reserve (batchWritePreallocationSize); + } + + ~ImportVisitCallback () + { + if (! m_objects.empty ()) + m_backend.storeBatch (m_objects); + } + + void visitObject (NodeObject::Ptr const& object) + { + if (m_objects.size () >= batchWritePreallocationSize) + { + m_backend.storeBatch (m_objects); + + m_objects.clear (); + m_objects.reserve (batchWritePreallocationSize); + } + + m_objects.push_back (object); + } + + private: + Backend& m_backend; + Batch m_objects; + }; + + //-------------------------------------------------------------------------- + + ScopedPointer srcBackend (createBackend (sourceBackendParameters, m_scheduler)); WriteLog (lsWARNING, NodeObject) << - "Node import from '" << srcBackend->getDataBaseName() << "' to '" - << m_backend->getDataBaseName() << "'."; + "Node import from '" << srcBackend->getName() << "' to '" + << m_backend->getName() << "'."; - std::vector objects; + ImportVisitCallback callback (*m_backend); - objects.reserve (bulkWriteBatchSize); - - srcBackend->visitAll (BIND_TYPE (&NodeStoreImp::importVisitor, this, boost::ref (objects), P_1)); - - if (!objects.empty ()) - m_backend->bulkStore (objects); - - return 0; + srcBackend->visitAll (callback); } - NodeStore::Backend* createBackend (String const& parameters) + //------------------------------------------------------------------------------ + + static NodeStore::Backend* createBackend (String const& parameters, Scheduler& scheduler) { Backend* backend = nullptr; @@ -486,7 +474,7 @@ public: for (int i = 0; i < s_factories.size (); ++i) { - if (s_factories [i]->getName () == type) + if (s_factories [i]->getName ().compareIgnoreCase (type) == 0) { factory = s_factories [i]; break; @@ -495,16 +483,16 @@ public: if (factory != nullptr) { - backend = factory->createInstance (keyBytes, keyValues); + backend = factory->createInstance (keyBytes, keyValues, scheduler); } else { - throw std::runtime_error ("unkown backend type"); + Throw (std::runtime_error ("unknown backend type")); } } else { - throw std::runtime_error ("missing backend type"); + Throw (std::runtime_error ("missing backend type")); } return backend; @@ -515,10 +503,12 @@ public: s_factories.add (&factory); } + //------------------------------------------------------------------------------ + private: static Array s_factories; - RecycledObjectPool m_blobPool; + Scheduler& m_scheduler; // Persistent key/value storage. ScopedPointer m_backend; @@ -542,72 +532,597 @@ void NodeStore::addBackendFactory (BackendFactory& factory) NodeStore* NodeStore::New (String backendParameters, String fastBackendParameters, - int cacheSize, - int cacheAge) + Scheduler& scheduler) { return new NodeStoreImp (backendParameters, fastBackendParameters, - cacheSize, - cacheAge); + scheduler); } -//------------------------------------------------------------------------------ +//============================================================================== -class NodeStoreTests : public UnitTest +// Some common code for the unit tests +// +class NodeStoreUnitTest : public UnitTest { public: + // Tunable parameters + // enum { maxPayloadBytes = 1000, - - numObjects = 1000 + numObjectsToTest = 1000 }; - NodeStoreTests () : UnitTest ("NodeStore") - { - } + // Shorthand type names + // + typedef NodeStore::Backend Backend; + typedef NodeStore::Batch Batch; - // Create a pseudo-random object - static NodeObject* createNodeObject (int index, int64 seedValue, HeapBlock & payloadBuffer) + // Immediately performs the task + struct TestScheduler : NodeStore::Scheduler { - Random r (seedValue + index); - - NodeObjectType type; - switch (r.nextInt (4)) + void scheduleTask (Task* task) { - case 0: type = hotLEDGER; break; - case 1: type = hotTRANSACTION; break; - case 2: type = hotACCOUNT_NODE; break; - case 3: type = hotTRANSACTION_NODE; break; - default: - type = hotUNKNOWN; - break; - }; + task->performScheduledTask (); + } + }; - LedgerIndex ledgerIndex = 1 + r.nextInt (1024 * 1024); + // Creates predictable objects + class PredictableObjectFactory + { + public: + explicit PredictableObjectFactory (int64 seedValue) + : m_seedValue (seedValue) + { + } - uint256 hash; - r.nextBlob (hash.begin (), hash.size ()); + NodeObject::Ptr createObject (int index) + { + Random r (m_seedValue + index); - int payloadBytes = 1 + r.nextInt (maxPayloadBytes); - r.nextBlob (payloadBuffer.getData (), payloadBytes); + NodeObjectType type; + switch (r.nextInt (4)) + { + case 0: type = hotLEDGER; break; + case 1: type = hotTRANSACTION; break; + case 2: type = hotACCOUNT_NODE; break; + case 3: type = hotTRANSACTION_NODE; break; + default: + type = hotUNKNOWN; + break; + }; - return new NodeObject (type, ledgerIndex, payloadBuffer.getData (), payloadBytes, hash); + LedgerIndex ledgerIndex = 1 + r.nextInt (1024 * 1024); + + uint256 hash; + r.nextBlob (hash.begin (), hash.size ()); + + int const payloadBytes = 1 + r.nextInt (maxPayloadBytes); + + Blob data (payloadBytes); + + r.nextBlob (data.data (), payloadBytes); + + return NodeObject::createObject (type, ledgerIndex, data, hash); + } + + private: + int64 const m_seedValue; + }; + +public: + NodeStoreUnitTest (String name, UnitTest::When when = UnitTest::runAlways) + : UnitTest (name, "ripple", when) + { } - void runTest () + // Create a predictable batch of objects + static void createPredictableBatch (Batch& batch, int startingIndex, int numObjects, int64 seedValue) { - beginTest ("create"); + batch.reserve (numObjects); - int64 const seedValue = 50; - - HeapBlock payloadBuffer (maxPayloadBytes); + PredictableObjectFactory factory (seedValue); for (int i = 0; i < numObjects; ++i) + batch.push_back (factory.createObject (startingIndex + i)); + } + + // Compare two batches for equality + static bool areBatchesEqual (Batch const& lhs, Batch const& rhs) + { + bool result = true; + + if (lhs.size () == rhs.size ()) { - ScopedPointer object (createNodeObject (i, seedValue, payloadBuffer)); + for (int i = 0; i < lhs.size (); ++i) + { + if (! lhs [i]->isCloneOf (rhs [i])) + { + result = false; + break; + } + } + } + else + { + result = false; + } + + return result; + } + + // Store a batch in a backend + void storeBatch (Backend& backend, Batch const& batch) + { + for (int i = 0; i < batch.size (); ++i) + { + backend.store (batch [i]); + } + } + + // Get a copy of a batch in a backend + void fetchCopyOfBatch (Backend& backend, Batch* pCopy, Batch const& batch) + { + pCopy->clear (); + pCopy->reserve (batch.size ()); + + for (int i = 0; i < batch.size (); ++i) + { + NodeObject::Ptr object; + + Backend::Status const status = backend.fetch ( + batch [i]->getHash ().cbegin (), &object); + + expect (status == Backend::ok, "Should be ok"); + + if (status == Backend::ok) + { + expect (object != nullptr, "Should not be null"); + + pCopy->push_back (object); + } + } + } + + // Store all objects in a batch + static void storeBatch (NodeStore& db, NodeStore::Batch const& batch) + { + for (int i = 0; i < batch.size (); ++i) + { + NodeObject::Ptr const object (batch [i]); + + Blob data (object->getData ()); + + db.store (object->getType (), + object->getIndex (), + data, + object->getHash ()); + } + } + + // Fetch all the hashes in one batch, into another batch. + static void fetchCopyOfBatch (NodeStore& db, + NodeStore::Batch* pCopy, + NodeStore::Batch const& batch) + { + pCopy->clear (); + pCopy->reserve (batch.size ()); + + for (int i = 0; i < batch.size (); ++i) + { + NodeObject::Ptr object = db.fetch (batch [i]->getHash ()); + + if (object != nullptr) + pCopy->push_back (object); } } }; +//------------------------------------------------------------------------------ + +// Tests predictable batches, and NodeObject blob encoding +// +class NodeStoreBasicsTests : public NodeStoreUnitTest +{ +public: + typedef NodeStore::EncodedBlob EncodedBlob; + typedef NodeStore::DecodedBlob DecodedBlob; + + NodeStoreBasicsTests () : NodeStoreUnitTest ("NodeStoreBasics") + { + } + + // Make sure predictable object generation works! + void testBatches (int64 const seedValue) + { + beginTest ("batch"); + + Batch batch1; + createPredictableBatch (batch1, 0, numObjectsToTest, seedValue); + + Batch batch2; + createPredictableBatch (batch2, 0, numObjectsToTest, seedValue); + + expect (areBatchesEqual (batch1, batch2), "Should be equal"); + + Batch batch3; + createPredictableBatch (batch3, 1, numObjectsToTest, seedValue); + + expect (! areBatchesEqual (batch1, batch3), "Should be equal"); + } + + // Checks encoding/decoding blobs + void testBlobs (int64 const seedValue) + { + beginTest ("encoding"); + + Batch batch; + createPredictableBatch (batch, 0, numObjectsToTest, seedValue); + + EncodedBlob encoded; + for (int i = 0; i < batch.size (); ++i) + { + encoded.prepare (batch [i]); + + DecodedBlob decoded (encoded.getKey (), encoded.getData (), encoded.getSize ()); + + expect (decoded.wasOk (), "Should be ok"); + + if (decoded.wasOk ()) + { + NodeObject::Ptr const object (decoded.createObject ()); + + expect (batch [i]->isCloneOf (object), "Should be clones"); + } + } + } + + void runTest () + { + int64 const seedValue = 50; + + testBatches (seedValue); + + testBlobs (seedValue); + } +}; + +static NodeStoreBasicsTests nodeStoreBasicsTests; + +//------------------------------------------------------------------------------ + +// Tests the NodeStore::Backend interface +// +class NodeStoreBackendTests : public NodeStoreUnitTest +{ +public: + NodeStoreBackendTests () : NodeStoreUnitTest ("NodeStoreBackend") + { + } + + //-------------------------------------------------------------------------- + + void testBackend (String type, int64 const seedValue) + { + beginTest (String ("NodeStore::Backend type=") + type); + + String params; + params << "type=" << type + << "|path=" << File::createTempFile ("unittest").getFullPathName (); + + // Create a batch + NodeStore::Batch batch; + createPredictableBatch (batch, 0, numObjectsToTest, seedValue); + //createPredictableBatch (batch, 0, 10, seedValue); + + { + // Open the backend + ScopedPointer backend ( + NodeStoreImp::createBackend (params, m_scheduler)); + + // Write the batch + storeBatch (*backend, batch); + + { + // Read it back in + NodeStore::Batch copy; + fetchCopyOfBatch (*backend, ©, batch); + expect (areBatchesEqual (batch, copy), "Should be equal"); + } + + { + // Reorder and read the copy again + NodeStore::Batch copy; + UnitTestUtilities::repeatableShuffle (batch.size (), batch, seedValue); + fetchCopyOfBatch (*backend, ©, batch); + expect (areBatchesEqual (batch, copy), "Should be equal"); + } + } + + { + // Re-open the backend + ScopedPointer backend ( + NodeStoreImp::createBackend (params, m_scheduler)); + + // Read it back in + NodeStore::Batch copy; + fetchCopyOfBatch (*backend, ©, batch); + // Canonicalize the source and destination batches + std::sort (batch.begin (), batch.end (), NodeObject::LessThan ()); + std::sort (copy.begin (), copy.end (), NodeObject::LessThan ()); + expect (areBatchesEqual (batch, copy), "Should be equal"); + } + } + + void runTest () + { + int const seedValue = 50; + + testBackend ("keyvadb", seedValue); + + testBackend ("leveldb", seedValue); + + testBackend ("sqlite", seedValue); + + #if RIPPLE_HYPERLEVELDB_AVAILABLE + testBackend ("hyperleveldb", seedValue); + #endif + + #if RIPPLE_MDB_AVAILABLE + testBackend ("mdb", seedValue); + #endif + } + +private: + TestScheduler m_scheduler; +}; + +static NodeStoreBackendTests nodeStoreBackendTests; + +//------------------------------------------------------------------------------ + +class NodeStoreTimingTests : public NodeStoreUnitTest +{ +public: + enum + { + numObjectsToTest = 10000 + }; + + NodeStoreTimingTests () + : NodeStoreUnitTest ("NodeStoreTiming", UnitTest::runManual) + { + } + + class Stopwatch + { + public: + Stopwatch () + { + } + + void start () + { + m_startTime = Time::getHighResolutionTicks (); + } + + double getElapsed () + { + int64 const now = Time::getHighResolutionTicks(); + + return Time::highResolutionTicksToSeconds (now - m_startTime); + } + + private: + int64 m_startTime; + }; + + void testBackend (String type, int64 const seedValue) + { + String s; + s << "Testing backend '" << type << "' performance"; + beginTest (s); + + String params; + params << "type=" << type + << "|path=" << File::createTempFile ("unittest").getFullPathName (); + + // Create batches + NodeStore::Batch batch1; + createPredictableBatch (batch1, 0, numObjectsToTest, seedValue); + NodeStore::Batch batch2; + createPredictableBatch (batch2, 0, numObjectsToTest, seedValue); + + // Open the backend + ScopedPointer backend ( + NodeStoreImp::createBackend (params, m_scheduler)); + + Stopwatch t; + + // Individual write batch test + t.start (); + storeBatch (*backend, batch1); + s = ""; + s << " Single write: " << String (t.getElapsed (), 2) << " seconds"; + logMessage (s); + + // Bulk write batch test + t.start (); + backend->storeBatch (batch2); + s = ""; + s << " Batch write: " << String (t.getElapsed (), 2) << " seconds"; + logMessage (s); + + // Read test + Batch copy; + t.start (); + fetchCopyOfBatch (*backend, ©, batch1); + fetchCopyOfBatch (*backend, ©, batch2); + s = ""; + s << " Batch read: " << String (t.getElapsed (), 2) << " seconds"; + logMessage (s); + } + + void runTest () + { + int const seedValue = 50; + + testBackend ("keyvadb", seedValue); + +#if 0 + testBackend ("leveldb", seedValue); + + testBackend ("sqlite", seedValue); + + #if RIPPLE_HYPERLEVELDB_AVAILABLE + testBackend ("hyperleveldb", seedValue); + #endif + + #if RIPPLE_MDB_AVAILABLE + testBackend ("mdb", seedValue); + #endif +#endif + } + +private: + TestScheduler m_scheduler; +}; + +//------------------------------------------------------------------------------ + +class NodeStoreTests : public NodeStoreUnitTest +{ +public: + NodeStoreTests () : NodeStoreUnitTest ("NodeStore") + { + } + + void testImport (String destBackendType, String srcBackendType, int64 seedValue) + { + String srcParams; + srcParams << "type=" << srcBackendType + << "|path=" << File::createTempFile ("unittest").getFullPathName (); + + // Create a batch + NodeStore::Batch batch; + createPredictableBatch (batch, 0, numObjectsToTest, seedValue); + + // Write to source db + { + ScopedPointer src (NodeStore::New (srcParams, "", m_scheduler)); + + storeBatch (*src, batch); + } + + String destParams; + destParams << "type=" << destBackendType + << "|path=" << File::createTempFile ("unittest").getFullPathName (); + + ScopedPointer dest (NodeStore::New ( + destParams, "", m_scheduler)); + + beginTest (String ("import into '") + destBackendType + "' from '" + srcBackendType + "'"); + + // Do the import + dest->import (srcParams); + + // Get the results of the import + NodeStore::Batch copy; + fetchCopyOfBatch (*dest, ©, batch); + + // Canonicalize the source and destination batches + std::sort (batch.begin (), batch.end (), NodeObject::LessThan ()); + std::sort (copy.begin (), copy.end (), NodeObject::LessThan ()); + expect (areBatchesEqual (batch, copy), "Should be equal"); + + } + + void testBackend (String type, int64 const seedValue) + { + beginTest (String ("NodeStore backend type=") + type); + + String params; + params << "type=" << type + << "|path=" << File::createTempFile ("unittest").getFullPathName (); + + // Create a batch + NodeStore::Batch batch; + createPredictableBatch (batch, 0, numObjectsToTest, seedValue); + + { + // Open the database + ScopedPointer db (NodeStore::New (params, "", m_scheduler)); + + // Write the batch + storeBatch (*db, batch); + + { + // Read it back in + NodeStore::Batch copy; + fetchCopyOfBatch (*db, ©, batch); + expect (areBatchesEqual (batch, copy), "Should be equal"); + } + + { + // Reorder and read the copy again + NodeStore::Batch copy; + UnitTestUtilities::repeatableShuffle (batch.size (), batch, seedValue); + fetchCopyOfBatch (*db, ©, batch); + expect (areBatchesEqual (batch, copy), "Should be equal"); + } + } + + { + // Re-open the database + ScopedPointer db (NodeStore::New (params, "", m_scheduler)); + + // Read it back in + NodeStore::Batch copy; + fetchCopyOfBatch (*db, ©, batch); + // Canonicalize the source and destination batches + std::sort (batch.begin (), batch.end (), NodeObject::LessThan ()); + std::sort (copy.begin (), copy.end (), NodeObject::LessThan ()); + expect (areBatchesEqual (batch, copy), "Should be equal"); + } + } + +public: + void runTest () + { + int64 const seedValue = 50; + + // + // Backend tests + // + + testBackend ("keyvadb", seedValue); + + testBackend ("leveldb", seedValue); + + testBackend ("sqlite", seedValue); + + #if RIPPLE_HYPERLEVELDB_AVAILABLE + testBackend ("hyperleveldb", seedValue); + #endif + + #if RIPPLE_MDB_AVAILABLE + testBackend ("mdb", seedValue); + #endif + + // + // Import tests + // + + //testImport ("leveldb", "keyvadb", seedValue); +//testImport ("sqlite", "leveldb", seedValue); + testImport ("leveldb", "sqlite", seedValue); + } + +private: + TestScheduler m_scheduler; +}; + static NodeStoreTests nodeStoreTests; + +static NodeStoreTimingTests nodeStoreTimingTests; diff --git a/modules/ripple_app/node/ripple_NodeStore.h b/modules/ripple_app/node/ripple_NodeStore.h index 60c3ff3a4..dc91bd98c 100644 --- a/modules/ripple_app/node/ripple_NodeStore.h +++ b/modules/ripple_app/node/ripple_NodeStore.h @@ -8,31 +8,196 @@ #define RIPPLE_NODESTORE_H_INCLUDED /** Persistency layer for NodeObject + + A Node is a ledger object which is uniquely identified by a key, + which is the 256-bit hash of the body of the node. The payload is + a variable length block of serialized data. + + All ledger data is stored as node objects and as such, needs to + be persisted between launches. Furthermore, since the set of + node objects will in general be larger than the amount of available + memory, purged node objects which are later accessed must be retrieved + from the node store. */ -class NodeStore : LeakChecked +class NodeStore { public: enum { - /** This is the largest number of key/value pairs we - will write during a bulk write. - */ - // VFALCO TODO Make this a tunable parameter in the key value pairs - bulkWriteBatchSize = 128 - /** Size of the fixed keys, in bytes. + + We use a 256-bit hash for the keys. + + @see NodeObject */ - ,keyBytes = 32 // 256 bit hash + keyBytes = 32, + + // This is only used to pre-allocate the array for + // batch objects and does not affect the amount written. + // + batchWritePreallocationSize = 128 }; - /** Interface to inform callers of cetain activities. + typedef std::vector Batch; + + //-------------------------------------------------------------------------- + + /** Parsed key/value blob into NodeObject components. + + This will extract the information required to construct + a NodeObject. It also does consistency checking and returns + the result, so it is possible to determine if the data + is corrupted without throwing an exception. Note all forms + of corruption are detected so further analysis will be + needed to eliminate false positives. + + @note This is the format in which a NodeObject is stored in the + persistent storage layer. */ - class Hooks + class DecodedBlob { - virtual void onRetrieveBegin () { } - virtual void onRetrieveEnd () { } + public: + /** Construct the decoded blob from raw data. + */ + DecodedBlob (void const* key, void const* value, int valueBytes); + + /** Determine if the decoding was successful. + */ + bool wasOk () const noexcept { return m_success; } + + /** Create a NodeObject from this data. + */ + NodeObject::Ptr createObject (); + + private: + bool m_success; + + void const* m_key; + LedgerIndex m_ledgerIndex; + NodeObjectType m_objectType; + unsigned char const* m_objectData; + int m_dataBytes; }; + //-------------------------------------------------------------------------- + + /** Utility for producing flattened node objects. + + These get recycled to prevent many small allocations. + + @note This is the format in which a NodeObject is stored in the + persistent storage layer. + */ + struct EncodedBlob + { + typedef RecycledObjectPool Pool; + + void prepare (NodeObject::Ptr const& object); + + void const* getKey () const noexcept { return m_key; } + + size_t getSize () const noexcept { return m_size; } + + void const* getData () const noexcept { return m_data.getData (); } + + private: + void const* m_key; + MemoryBlock m_data; + size_t m_size; + }; + + //-------------------------------------------------------------------------- + + /** Provides the asynchronous scheduling feature. + */ + class Scheduler + { + public: + /** Derived classes perform scheduled tasks. + */ + struct Task + { + virtual ~Task () { } + + /** Performs the task. + + The call may take place on a foreign thread. + */ + virtual void performScheduledTask () = 0; + }; + + /** Schedules a task. + + Depending on the implementation, this could happen + immediately or get deferred. + */ + virtual void scheduleTask (Task* task) = 0; + }; + + //-------------------------------------------------------------------------- + + /** A helper to assist with batch writing. + + The batch writes are performed with a scheduled task. + + @see Scheduler + */ + // VFALCO NOTE I'm not entirely happy having placed this here, + // because whoever needs to use NodeStore certainly doesn't + // need to see the implementation details of BatchWriter. + // + class BatchWriter : private Scheduler::Task + { + public: + /** This callback does the actual writing. + */ + struct Callback + { + virtual void writeBatch (Batch const& batch) = 0; + }; + + /** Create a batch writer. + */ + BatchWriter (Callback& callback, Scheduler& scheduler); + + /** Destroy a batch writer. + + Anything pending in the batch is written out before this returns. + */ + ~BatchWriter (); + + /** Store the object. + + This will add to the batch and initiate a scheduled task to + write the batch out. + */ + void store (NodeObject::ref object); + + /** Get an estimate of the amount of writing I/O pending. + */ + int getWriteLoad (); + + private: + void performScheduledTask (); + void writeBatch (); + void waitForWriting (); + + private: + typedef boost::recursive_mutex LockType; + typedef boost::condition_variable_any CondvarType; + + Callback& m_callback; + Scheduler& m_scheduler; + LockType mWriteMutex; + CondvarType mWriteCondition; + int mWriteGeneration; + int mWriteLoad; + bool mWritePending; + Batch mWriteSet; + }; + + //-------------------------------------------------------------------------- + /** Back end used for the store. A Backend implements a persistent key/value storage system. @@ -51,92 +216,76 @@ public: unknown }; - Backend (); + /** Destroy the backend. + All open files are closed and flushed. If there are batched + writes or other tasks scheduled, they will be completed before + this call returns. + */ virtual ~Backend () { } - /** Provides storage for retrieved objects. + /** Get the human-readable name of this backend. + + This is used for diagnostic output. */ - struct GetCallback - { - /** Get storage for an object. + virtual std::string getName() = 0; - @param sizeInBytes The number of bytes needed to store the value. - - @return A pointer to a buffer large enough to hold all the bytes. - */ - virtual void* getStorageForValue (size_t sizeInBytes) = 0; - }; - - /** Retrieve a single object. + /** Fetch a single object. If the object is not found or an error is encountered, the result will indicate the condition. + @note This will be called concurrently. + @param key A pointer to the key data. - @param callback The callback used to obtain storage for the value. + @param pObject [out] The created object if successful. @return The result of the operation. */ - virtual Status get (void const* key, GetCallback* callback) { return notFound; } - - - + virtual Status fetch (void const* key, NodeObject::Ptr* pObject) = 0; /** Store a single object. + + Depending on the implementation this may happen immediately + or deferred using a scheduled task. + + @note This will be called concurrently. + + @param object The object to store. */ - // VFALCO TODO Why should the Backend know or care about NodeObject? - // It should just deal with a fixed key and raw data. - // - virtual bool store (NodeObject::ref); - //virtual bool put (void const* key, void const* value, int valueBytes) { return false; } + virtual void store (NodeObject::Ptr const& object) = 0; - /** Retrieve an individual object. + /** Store a group of objects. + + @note This function will not be called concurrently with + itself or @ref store. */ - virtual NodeObject::pointer retrieve (uint256 const &hash) = 0; + virtual void storeBatch (Batch const& batch) = 0; - // Visit every object in the database - // This function will only be called during an import operation - // - // VFALCO TODO Replace FUNCTION_TYPE with a beast lift. - // - virtual void visitAll (FUNCTION_TYPE ) = 0; + /** Callback for iterating through objects. - private: - friend class NodeStoreImp; + @see visitAll + */ + struct VisitCallback + { + virtual void visitObject (NodeObject::Ptr const& object) = 0; + }; - // VFALCO TODO Put this bulk writing logic into a separate class. - // NOTE Why are these virtual? - void bulkWrite (Job &); - void waitWrite (); - int getWriteLoad (); + /** Visit every object in the database + + This is usually called during import. - private: - virtual std::string getDataBaseName() = 0; + @see import + */ + virtual void visitAll (VisitCallback& callback) = 0; - // Store a group of objects - // This function will only be called from a single thread - // VFALCO NOTE It looks like NodeStore throws this into the job queue? - virtual bool bulkStore (const std::vector< NodeObject::pointer >&) = 0; - - protected: - // VFALCO TODO Put this bulk writing logic into a separate class. - boost::mutex mWriteMutex; - boost::condition_variable mWriteCondition; - int mWriteGeneration; - int mWriteLoad; - bool mWritePending; - std::vector > mWriteSet; + /** Estimate the number of write operations pending. + */ + virtual int getWriteLoad () = 0; }; -public: - // Helper functions for the backend - class BackendHelper - { - public: - }; + //-------------------------------------------------------------------------- -public: /** Factory to produce backends. */ class BackendFactory @@ -152,50 +301,89 @@ public: @param keyBytes The fixed number of bytes per key. @param keyValues A set of key/value configuration pairs. + @param scheduler The scheduler to use for running tasks. @return A pointer to the Backend object. */ - virtual Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues) = 0; + virtual Backend* createInstance (size_t keyBytes, + StringPairArray const& keyValues, + Scheduler& scheduler) = 0; }; -public: + //-------------------------------------------------------------------------- + /** Construct a node store. - parameters has the format: + Parameter strings have the format: =['|'=] - The key "type" must exist, it defines the backend. For example - "type=LevelDB|path=/mnt/ephemeral" + The key "type" must exist, it defines the choice of backend. + For example + `type=LevelDB|path=/mnt/ephemeral` + + @param backendParameters The parameter string for the persistent backend. + @param fastBackendParameters The parameter string for the ephemeral backend. + @param cacheSize ? + @param cacheAge ? + @param scheduler The scheduler to use for performing asynchronous tasks. + + @return A pointer to the created object. */ - // VFALCO NOTE Is cacheSize in bytes? objects? KB? - // Is cacheAge in minutes? seconds? - // These should be in the parameters. - // static NodeStore* New (String backendParameters, String fastBackendParameters, - int cacheSize, - int cacheAge); + Scheduler& scheduler); + + /** Destroy the node store. + + All pending operations are completed, pending writes flushed, + and files closed before this returns. + */ + virtual ~NodeStore () { } /** Add the specified backend factory to the list of available factories. The names of available factories are compared against the "type" value in the parameter list on construction. + + @param factory The factory to add. */ static void addBackendFactory (BackendFactory& factory); + /** Fetch an object. + + If the object is known to be not in the database, not + in the database, or failed to load correctly, nullptr is + returned. + + @note This can be called concurrently. + + @param hash The key of the object to retrieve. + + @return The object, or nullptr if it couldn't be retrieved. + */ + virtual NodeObject::pointer fetch (uint256 const& hash) = 0; + + /** Store the object. + + The caller's Blob parameter is overwritten. + + @param type The type of object. + @param ledgerIndex The ledger in which the object appears. + @param data The payload of the object. The caller's + variable is overwritten. + @param hash The 256-bit hash of the payload data. + + @return `true` if the object was stored? + */ + virtual void store (NodeObjectType type, + uint32 ledgerIndex, + Blob& data, + uint256 const& hash) = 0; + // VFALCO TODO Document this. virtual float getCacheHitRate () = 0; - // VFALCO TODO Document this. - virtual bool store (NodeObjectType type, uint32 index, Blob const& data, - uint256 const& hash) = 0; - - // VFALCO TODO Document this. - // TODO Replace uint256 with void* - // - virtual NodeObject::pointer retrieve (uint256 const& hash) = 0; - // VFALCO TODO Document this. // TODO Document the parameter meanings. virtual void tune (int size, int age) = 0; @@ -203,13 +391,14 @@ public: // VFALCO TODO Document this. virtual void sweep () = 0; - // VFALCO TODO Document this. - // What are the units of the return value? + /** Retrieve the estimated number of pending write operations. + + This is used for diagnostics. + */ virtual int getWriteLoad () = 0; // VFALCO TODO Document this. - // NOTE What's the return value? - virtual int import (String sourceBackendParameters) = 0; + virtual void import (String sourceBackendParameters) = 0; }; #endif diff --git a/modules/ripple_app/node/ripple_NullBackendFactory.cpp b/modules/ripple_app/node/ripple_NullBackendFactory.cpp index 79607fa92..6a3b000c7 100644 --- a/modules/ripple_app/node/ripple_NullBackendFactory.cpp +++ b/modules/ripple_app/node/ripple_NullBackendFactory.cpp @@ -15,28 +15,31 @@ public: { } - std::string getDataBaseName() + std::string getName() { return std::string (); } - bool store (NodeObject::ref obj) + Status fetch (void const*, NodeObject::Ptr*) + { + return notFound; + } + + void store (NodeObject::ref object) + { + } + + void storeBatch (NodeStore::Batch const& batch) { - return false; } - bool bulkStore (const std::vector< NodeObject::pointer >& objs) + void visitAll (VisitCallback& callback) { - return false; } - NodeObject::pointer retrieve (uint256 const& hash) - { - return NodeObject::pointer (); - } - - void visitAll (FUNCTION_TYPE func) + int getWriteLoad () { + return 0; } }; @@ -62,7 +65,10 @@ String NullBackendFactory::getName () const return "none"; } -NodeStore::Backend* NullBackendFactory::createInstance (size_t, StringPairArray const&) +NodeStore::Backend* NullBackendFactory::createInstance ( + size_t, + StringPairArray const&, + NodeStore::Scheduler&) { return new NullBackendFactory::Backend; } diff --git a/modules/ripple_app/node/ripple_NullBackendFactory.h b/modules/ripple_app/node/ripple_NullBackendFactory.h index 2284fed2d..a68c1838e 100644 --- a/modules/ripple_app/node/ripple_NullBackendFactory.h +++ b/modules/ripple_app/node/ripple_NullBackendFactory.h @@ -23,7 +23,10 @@ public: static NullBackendFactory& getInstance (); String getName () const; - NodeStore::Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues); + + NodeStore::Backend* createInstance (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler); }; #endif diff --git a/modules/ripple_app/node/ripple_SqliteBackendFactory.cpp b/modules/ripple_app/node/ripple_SqliteBackendFactory.cpp index aa4d4096d..7b4a7a9dc 100644 --- a/modules/ripple_app/node/ripple_SqliteBackendFactory.cpp +++ b/modules/ripple_app/node/ripple_SqliteBackendFactory.cpp @@ -11,23 +11,25 @@ static const char* s_nodeStoreDBInit [] = "PRAGMA journal_size_limit=1582080;", #if (ULONG_MAX > UINT_MAX) && !defined (NO_SQLITE_MMAP) - "PRAGMA mmap_size=171798691840;", + "PRAGMA mmap_size=171798691840;", #endif "BEGIN TRANSACTION;", "CREATE TABLE CommittedObjects ( \ - Hash CHARACTER(64) PRIMARY KEY, \ - ObjType CHAR(1) NOT NULL, \ - LedgerIndex BIGINT UNSIGNED, \ - Object BLOB \ - );", + Hash CHARACTER(64) PRIMARY KEY, \ + ObjType CHAR(1) NOT NULL, \ + LedgerIndex BIGINT UNSIGNED, \ + Object BLOB \ + );", "END TRANSACTION;" }; static int s_nodeStoreDBCount = NUMBER (s_nodeStoreDBInit); +//------------------------------------------------------------------------------ + class SqliteBackendFactory::Backend : public NodeStore::Backend { public: @@ -42,92 +44,137 @@ public: // s << "PRAGMA cache_size=-" << String (theConfig.getSize(siHashNodeDBCache) * 1024); m_db->getDB()->executeSQL (s.toStdString ().c_str ()); - - //m_db->getDB()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") % - // (theConfig.getSize(siHashNodeDBCache) * 1024))); } ~Backend() { - delete m_db; } - std::string getDataBaseName() + std::string getName() { return m_name; } - bool bulkStore (const std::vector< NodeObject::pointer >& objects) + //-------------------------------------------------------------------------- + + Status fetch (void const* key, NodeObject::Ptr* pObject) { - ScopedLock sl(m_db->getDBLock()); - static SqliteStatement pStB(m_db->getDB()->getSqliteDB(), "BEGIN TRANSACTION;"); - static SqliteStatement pStE(m_db->getDB()->getSqliteDB(), "END TRANSACTION;"); - static SqliteStatement pSt(m_db->getDB()->getSqliteDB(), + Status result = ok; + + pObject->reset (); + + { + ScopedLock sl (m_db->getDBLock()); + + uint256 const hash (key); + + static SqliteStatement pSt (m_db->getDB()->getSqliteDB(), + "SELECT ObjType,LedgerIndex,Object FROM CommittedObjects WHERE Hash = ?;"); + + pSt.bind (1, hash.GetHex()); + + if (pSt.isRow (pSt.step())) + { + // VFALCO NOTE This is unfortunately needed, + // the DatabaseCon creates the blob? + Blob data (pSt.getBlob (2)); + *pObject = NodeObject::createObject ( + getTypeFromString (pSt.peekString (0)), + pSt.getUInt32 (1), + data, + hash); + } + else + { + result = notFound; + } + + pSt.reset(); + } + + return result; + } + + void store (NodeObject::ref object) + { + NodeStore::Batch batch; + + batch.push_back (object); + + storeBatch (batch); + } + + void storeBatch (NodeStore::Batch const& batch) + { + // VFALCO TODO Rewrite this to use Beast::db + + ScopedLock sl (m_db->getDBLock()); + + static SqliteStatement pStB (m_db->getDB()->getSqliteDB(), "BEGIN TRANSACTION;"); + static SqliteStatement pStE (m_db->getDB()->getSqliteDB(), "END TRANSACTION;"); + static SqliteStatement pSt (m_db->getDB()->getSqliteDB(), "INSERT OR IGNORE INTO CommittedObjects " "(Hash,ObjType,LedgerIndex,Object) VALUES (?, ?, ?, ?);"); pStB.step(); pStB.reset(); - BOOST_FOREACH(NodeObject::ref object, objects) + BOOST_FOREACH (NodeObject::Ptr const& object, batch) { - bind(pSt, object); + doBind (pSt, object); + pSt.step(); pSt.reset(); } pStE.step(); pStE.reset(); - - return true; - } - NodeObject::pointer retrieve(uint256 const& hash) + void visitAll (VisitCallback& callback) { - NodeObject::pointer ret; + // No lock needed as per the visitAll() API - { - ScopedLock sl(m_db->getDBLock()); - static SqliteStatement pSt(m_db->getDB()->getSqliteDB(), - "SELECT ObjType,LedgerIndex,Object FROM CommittedObjects WHERE Hash = ?;"); - - pSt.bind(1, hash.GetHex()); - - if (pSt.isRow(pSt.step())) - ret = boost::make_shared(getType(pSt.peekString(0)), pSt.getUInt32(1), pSt.getBlob(2), hash); - - pSt.reset(); - } - - return ret; - } - - void visitAll(FUNCTION_TYPE func) - { uint256 hash; static SqliteStatement pSt(m_db->getDB()->getSqliteDB(), "SELECT ObjType,LedgerIndex,Object,Hash FROM CommittedObjects;"); - while (pSt.isRow(pSt.step())) + while (pSt.isRow (pSt.step())) { hash.SetHexExact(pSt.getString(3)); - func(boost::make_shared(getType(pSt.peekString(0)), pSt.getUInt32(1), pSt.getBlob(2), hash)); + + // VFALCO NOTE This is unfortunately needed, + // the DatabaseCon creates the blob? + Blob data (pSt.getBlob (2)); + NodeObject::Ptr const object (NodeObject::createObject ( + getTypeFromString (pSt.peekString (0)), + pSt.getUInt32 (1), + data, + hash)); + + callback.visitObject (object); } - pSt.reset(); + pSt.reset (); } - void bind(SqliteStatement& statement, NodeObject::ref object) + int getWriteLoad () + { + return 0; + } + + //-------------------------------------------------------------------------- + + void doBind (SqliteStatement& statement, NodeObject::ref object) { char const* type; switch (object->getType()) { - case hotLEDGER: type = "L"; break; + case hotLEDGER: type = "L"; break; case hotTRANSACTION: type = "T"; break; - case hotACCOUNT_NODE: type = "A"; break; - case hotTRANSACTION_NODE: type = "N"; break; + case hotACCOUNT_NODE: type = "A"; break; + case hotTRANSACTION_NODE: type = "N"; break; default: type = "U"; } @@ -137,20 +184,21 @@ public: statement.bindStatic(4, object->getData()); } - NodeObjectType getType(std::string const& type) + NodeObjectType getTypeFromString (std::string const& s) { - NodeObjectType htype = hotUNKNOWN; - if (!type.empty()) + NodeObjectType type = hotUNKNOWN; + + if (!s.empty ()) { - switch (type[0]) + switch (s [0]) { - case 'L': htype = hotLEDGER; break; - case 'T': htype = hotTRANSACTION; break; - case 'A': htype = hotACCOUNT_NODE; break; - case 'N': htype = hotTRANSACTION_NODE; break; + case 'L': type = hotLEDGER; break; + case 'T': type = hotTRANSACTION; break; + case 'A': type = hotACCOUNT_NODE; break; + case 'N': type = hotTRANSACTION_NODE; break; } } - return htype; + return type; } private: @@ -181,7 +229,10 @@ String SqliteBackendFactory::getName () const return "Sqlite"; } -NodeStore::Backend* SqliteBackendFactory::createInstance (size_t keyBytes, StringPairArray const& keyValues) +NodeStore::Backend* SqliteBackendFactory::createInstance ( + size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler) { return new Backend (keyBytes, keyValues ["path"].toStdString ()); } diff --git a/modules/ripple_app/node/ripple_SqliteBackendFactory.h b/modules/ripple_app/node/ripple_SqliteBackendFactory.h index dfb10b1bd..828588fd7 100644 --- a/modules/ripple_app/node/ripple_SqliteBackendFactory.h +++ b/modules/ripple_app/node/ripple_SqliteBackendFactory.h @@ -21,7 +21,10 @@ public: static SqliteBackendFactory& getInstance (); String getName () const; - NodeStore::Backend* createInstance (size_t keyBytes, StringPairArray const& keyValues); + + NodeStore::Backend* createInstance (size_t keyBytes, + StringPairArray const& keyValues, + NodeStore::Scheduler& scheduler); }; #endif diff --git a/modules/ripple_basics/types/ripple_UInt256.h b/modules/ripple_basics/types/ripple_UInt256.h index 9605dfc59..4790fea7f 100644 --- a/modules/ripple_basics/types/ripple_UInt256.h +++ b/modules/ripple_basics/types/ripple_UInt256.h @@ -365,14 +365,24 @@ public: return reinterpret_cast (pn + WIDTH); } - const unsigned char* begin () const + unsigned char const* cbegin () const noexcept { - return reinterpret_cast (pn); + return reinterpret_cast (pn); } - const unsigned char* end () const + unsigned char const* cend () const noexcept { - return reinterpret_cast (pn + WIDTH); + return reinterpret_cast (pn + WIDTH); + } + + const unsigned char* begin () const noexcept + { + return cbegin (); + } + + const unsigned char* end () const noexcept + { + return cend (); } unsigned int size () const diff --git a/modules/ripple_core/validator/ripple_Validators.cpp b/modules/ripple_core/validator/ripple_Validators.cpp index e722d0b62..60eaed527 100644 --- a/modules/ripple_core/validator/ripple_Validators.cpp +++ b/modules/ripple_core/validator/ripple_Validators.cpp @@ -402,7 +402,7 @@ private: class ValidatorListTests : public UnitTest { public: - ValidatorListTests () : UnitTest ("ValidatorList") + ValidatorListTests () : UnitTest ("ValidatorList", "ripple") { } diff --git a/rippled-example.cfg b/rippled-example.cfg index 8f760c3e6..2c89e205a 100644 --- a/rippled-example.cfg +++ b/rippled-example.cfg @@ -222,23 +222,33 @@ # Examples: RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE # shfArahZT9Q9ckTf3s1psJ7C7qzVN # +# +# # [node_db] # [temp_db] +# # Set the choice of databases for storing Node objects. +# # Format (without spaces): # '=' [ '|' '=' ]... +# # Examples: # type=HyperLevelDB|path=db/hashnode -# Choices for 'type': +# +# Choices for 'type' (not case-sensitive) # HyperLevelDB Use an improved version of LevelDB (preferred) # LevelDB Use Google's LevelDB database (deprecated) # MDB Use MDB # none Use no backend # KeyvaDB Use OpenCoin's KeyvaDB (experimental) +# SQLite Use SQLite +# # Required keys: # path Location to store the database (all types) +# # Optional keys: -# ... +# (none yet) +# # Notes # The 'node_db' entry configures the primary, persistent storage. # The 'temp_db' configures a look-aside cache for high volume storage diff --git a/src/cpp/ripple/NetworkOPs.cpp b/src/cpp/ripple/NetworkOPs.cpp index 6b08e8bb6..438d8cb9a 100644 --- a/src/cpp/ripple/NetworkOPs.cpp +++ b/src/cpp/ripple/NetworkOPs.cpp @@ -48,8 +48,7 @@ void NetworkOPs::processNetTimer () // VFALCO NOTE This is for diagnosing a crash on exit Application& app (getApp ()); ILoadManager& mgr (app.getLoadManager ()); - - getApp().getLoadManager ().resetDeadlockDetector (); + mgr.resetDeadlockDetector (); std::size_t const numPeers = getApp().getPeers ().getPeerVector ().size (); diff --git a/src/cpp/ripple/ripple_Application.cpp b/src/cpp/ripple/ripple_Application.cpp index 1b7c31c0f..42307dc85 100644 --- a/src/cpp/ripple/ripple_Application.cpp +++ b/src/cpp/ripple/ripple_Application.cpp @@ -16,6 +16,7 @@ class ApplicationImp : public Application , public SharedSingleton , public Validators::Listener + , public NodeStore::Scheduler , LeakChecked { public: @@ -46,15 +47,14 @@ public: , mNetOps (new NetworkOPs (&mLedgerMaster)) , m_rpcServerHandler (*mNetOps) , mTempNodeCache ("NodeCache", 16384, 90) - , m_nodeStore (NodeStore::New ( - theConfig.NODE_DB, - theConfig.FASTNODE_DB, - 16384, - 300)) , mSLECache ("LedgerEntryCache", 4096, 120) , mSNTPClient (mAuxService) , mJobQueue (mIOService) // VFALCO New stuff + , m_nodeStore (NodeStore::New ( + theConfig.NODE_DB, + theConfig.FASTNODE_DB, + *this)) , m_validators (Validators::New (this)) , mFeatures (IFeatures::New (2 * 7 * 24 * 60 * 60, 200)) // two weeks, 200/256 , mFeeVote (IFeeVote::New (10, 50 * SYSTEM_CURRENCY_PARTS, 12.5 * SYSTEM_CURRENCY_PARTS)) @@ -92,11 +92,28 @@ public: delete mWalletDB; } + //-------------------------------------------------------------------------- + + static void callScheduledTask (NodeStore::Scheduler::Task* task, Job&) + { + task->performScheduledTask (); + } + + void scheduleTask (NodeStore::Scheduler::Task* task) + { + getJobQueue ().addJob ( + jtWRITE, + "NodeObject::store", + BIND_TYPE (&ApplicationImp::callScheduledTask, task, P_1)); + } + + //-------------------------------------------------------------------------- + LocalCredentials& getLocalCredentials () { return m_localCredentials ; } - + NetworkOPs& getOPs () { return *mNetOps; @@ -106,62 +123,62 @@ public: { return mIOService; } - + LedgerMaster& getLedgerMaster () { return mLedgerMaster; } - + InboundLedgers& getInboundLedgers () { return m_inboundLedgers; } - + TransactionMaster& getMasterTransaction () { return mMasterTransaction; } - + NodeCache& getTempNodeCache () { return mTempNodeCache; } - + NodeStore& getNodeStore () { return *m_nodeStore; } - + JobQueue& getJobQueue () { return mJobQueue; } - + MasterLockType& getMasterLock () { return mMasterLock; } - + ILoadManager& getLoadManager () { return *m_loadManager; } - + TXQueue& getTxnQueue () { return mTxnQueue; } - + PeerDoor& getPeerDoor () { return *mPeerDoor; } - + OrderBookDB& getOrderBookDB () { return mOrderBookDB; } - + SLECache& getSLECache () { return mSLECache; @@ -176,37 +193,37 @@ public: { return *mFeatures; } - + ILoadFeeTrack& getFeeTrack () { return *mFeeTrack; } - + IFeeVote& getFeeVote () { return *mFeeVote; } - + IHashRouter& getHashRouter () { return *mHashRouter; } - + IValidations& getValidations () { return *mValidations; } - + UniqueNodeList& getUNL () { return *mUNL; } - + IProofOfWorkFactory& getProofOfWorkFactory () { return *mProofOfWorkFactory; } - + IPeers& getPeers () { return *mPeers; @@ -272,7 +289,6 @@ private: ScopedPointer mNetOps; RPCServerHandler m_rpcServerHandler; NodeCache mTempNodeCache; - ScopedPointer m_nodeStore; SLECache mSLECache; SNTPClient mSNTPClient; JobQueue mJobQueue; @@ -280,16 +296,17 @@ private: OrderBookDB mOrderBookDB; // VFALCO Clean stuff - beast::ScopedPointer m_validators; - beast::ScopedPointer mFeatures; - beast::ScopedPointer mFeeVote; - beast::ScopedPointer mFeeTrack; - beast::ScopedPointer mHashRouter; - beast::ScopedPointer mValidations; - beast::ScopedPointer mUNL; - beast::ScopedPointer mProofOfWorkFactory; - beast::ScopedPointer mPeers; - beast::ScopedPointer m_loadManager; + ScopedPointer m_nodeStore; + ScopedPointer m_validators; + ScopedPointer mFeatures; + ScopedPointer mFeeVote; + ScopedPointer mFeeTrack; + ScopedPointer mHashRouter; + ScopedPointer mValidations; + ScopedPointer mUNL; + ScopedPointer mProofOfWorkFactory; + ScopedPointer mPeers; + ScopedPointer m_loadManager; // VFALCO End Clean stuff DatabaseCon* mRpcDB; @@ -382,7 +399,7 @@ void ApplicationImp::setup () if (!theConfig.DEBUG_LOGFILE.empty ()) { - // Let BEAST_DEBUG messages go to the file but only WARNING or higher to regular output (unless verbose) + // Let debug messages go to the file but only WARNING or higher to regular output (unless verbose) Log::setLogFile (theConfig.DEBUG_LOGFILE); if (Log::getMinSeverity () > lsDEBUG) @@ -596,7 +613,7 @@ void ApplicationImp::run () // VFALCO NOTE This seems unnecessary. If we properly refactor the load // manager then the deadlock detector can just always be "armed" // - getApp().getLoadManager ().activateDeadlockDetector (); + getApp().getLoadManager ().activateDeadlockDetector (); } mIOService.run (); // This blocks @@ -964,7 +981,7 @@ void ApplicationImp::updateTables () } if (!theConfig.DB_IMPORT.empty()) - getApp().getNodeStore().import(theConfig.DB_IMPORT); + getApp().getNodeStore().import(theConfig.DB_IMPORT); } //------------------------------------------------------------------------------ diff --git a/src/cpp/ripple/ripple_Application.h b/src/cpp/ripple/ripple_Application.h index 605674d4d..59a2f62a2 100644 --- a/src/cpp/ripple/ripple_Application.h +++ b/src/cpp/ripple/ripple_Application.h @@ -65,12 +65,12 @@ public: char const* getFileName () const noexcept { - return m_fileName; + return m_fileName.get (); } int getLineNumber () const noexcept { - return m_lineNumber; + return m_lineNumber.get (); } private: @@ -78,19 +78,19 @@ public: void setOwner (char const* fileName, int lineNumber) { - m_fileName = fileName; - m_lineNumber = lineNumber; + m_fileName.set (fileName); + m_lineNumber.set (lineNumber); } void resetOwner () { - m_fileName = ""; - m_lineNumber = 0; + m_fileName.set (""); + m_lineNumber.set (0); } boost::recursive_mutex m_mutex; - char const* m_fileName; - int m_lineNumber; + Atomic m_fileName; + Atomic m_lineNumber; }; class ScopedLockType diff --git a/src/cpp/ripple/ripple_Main.cpp b/src/cpp/ripple/ripple_Main.cpp index 6e0a0919a..8f79353ee 100644 --- a/src/cpp/ripple/ripple_Main.cpp +++ b/src/cpp/ripple/ripple_Main.cpp @@ -156,15 +156,6 @@ static void runBeastUnitTests (std::string const& individualTest = "") { tr.runTest (individualTest.c_str ()); } - - // Report - for (int i = 0; i < tr.getNumResults (); ++i) - { - UnitTests::TestResult const& r (*tr.getResult (i)); - - for (int j = 0; j < r.messages.size (); ++j) - Log::out () << r.messages [j].toStdString (); - } } //------------------------------------------------------------------------------ @@ -257,16 +248,16 @@ int rippleMain (int argc, char** argv) p.add ("parameters", -1); // These must be added before the Application object is created + NodeStore::addBackendFactory (KeyvaDBBackendFactory::getInstance ()); + NodeStore::addBackendFactory (LevelDBBackendFactory::getInstance ()); + NodeStore::addBackendFactory (NullBackendFactory::getInstance ()); + NodeStore::addBackendFactory (SqliteBackendFactory::getInstance ()); #if RIPPLE_HYPERLEVELDB_AVAILABLE NodeStore::addBackendFactory (HyperLevelDBBackendFactory::getInstance ()); #endif - NodeStore::addBackendFactory (KeyvaDBBackendFactory::getInstance ()); - NodeStore::addBackendFactory (LevelDBBackendFactory::getInstance ()); #if RIPPLE_MDB_AVAILABLE NodeStore::addBackendFactory (MdbBackendFactory::getInstance ()); #endif - NodeStore::addBackendFactory (NullBackendFactory::getInstance ()); - NodeStore::addBackendFactory (SqliteBackendFactory::getInstance ()); if (! RandomNumbers::getInstance ().initialize ()) { diff --git a/src/cpp/ripple/ripple_Peer.cpp b/src/cpp/ripple/ripple_Peer.cpp index f6d33e00c..4e4b9a332 100644 --- a/src/cpp/ripple/ripple_Peer.cpp +++ b/src/cpp/ripple/ripple_Peer.cpp @@ -1554,7 +1554,7 @@ void PeerImp::recvGetObjectByHash (const boost::shared_ptrgetNodeHash () != hash) { @@ -213,7 +213,7 @@ SHAMapTreeNode::pointer SHAMap::getNode (const SHAMapNode& id, uint256 const& ha WriteLog (lsFATAL, SHAMap) << "ID: " << id; WriteLog (lsFATAL, SHAMap) << "TgtHash " << hash; WriteLog (lsFATAL, SHAMap) << "NodHash " << node->getNodeHash (); - throw std::runtime_error ("invalid node"); + Throw (std::runtime_error ("invalid node")); } #endif @@ -230,7 +230,7 @@ SHAMapTreeNode* SHAMap::getNodePointer (const SHAMapNode& id, uint256 const& has SHAMapTreeNode* ret = getNodePointerNT (id, hash); if (!ret) - throw SHAMapMissingNode (mType, id, hash); + Throw (SHAMapMissingNode (mType, id, hash)); return ret; } @@ -251,7 +251,7 @@ SHAMapTreeNode* SHAMap::getNodePointer (const SHAMapNode& id, uint256 const& has SHAMapTreeNode* ret = getNodePointerNT (id, hash, filter); if (!ret) - throw SHAMapMissingNode (mType, id, hash); + Throw (SHAMapMissingNode (mType, id, hash)); return ret; } @@ -493,7 +493,7 @@ SHAMapItem::pointer SHAMap::peekNextItem (uint256 const& id, SHAMapTreeNode::TNT firstNode = firstBelow (firstNode); if (!firstNode || firstNode->isInner ()) - throw std::runtime_error ("missing/corrupt node"); + Throw (std::runtime_error ("missing/corrupt node")); type = firstNode->getType (); return firstNode->peekItem (); @@ -531,7 +531,7 @@ SHAMapItem::pointer SHAMap::peekPrevItem (uint256 const& id) SHAMapTreeNode* item = firstBelow (node.get ()); if (!item) - throw std::runtime_error ("missing node"); + Throw (std::runtime_error ("missing node")); return item->peekItem (); } @@ -597,7 +597,7 @@ bool SHAMap::delItem (uint256 const& id) std::stack stack = getStack (id, true); if (stack.empty ()) - throw std::runtime_error ("missing node"); + Throw (std::runtime_error ("missing node")); SHAMapTreeNode::pointer leaf = stack.top (); stack.pop (); @@ -678,7 +678,7 @@ bool SHAMap::addGiveItem (SHAMapItem::ref item, bool isTransaction, bool hasMeta std::stack stack = getStack (tag, true); if (stack.empty ()) - throw std::runtime_error ("missing node"); + Throw (std::runtime_error ("missing node")); SHAMapTreeNode::pointer node = stack.top (); stack.pop (); @@ -703,7 +703,7 @@ bool SHAMap::addGiveItem (SHAMapItem::ref item, bool isTransaction, bool hasMeta WriteLog (lsFATAL, SHAMap) << "NewNode: " << *newNode; dump (); assert (false); - throw std::runtime_error ("invalid inner node"); + Throw (std::runtime_error ("invalid inner node")); } trackNewNode (newNode); @@ -776,7 +776,7 @@ bool SHAMap::updateGiveItem (SHAMapItem::ref item, bool isTransaction, bool hasM std::stack stack = getStack (tag, true); if (stack.empty ()) - throw std::runtime_error ("missing node"); + Throw (std::runtime_error ("missing node")); SHAMapTreeNode::pointer node = stack.top (); stack.pop (); @@ -810,7 +810,7 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternal (const SHAMapNode& id, uint256 SHAMapTreeNode::pointer ret = fetchNodeExternalNT (id, hash); if (!ret) - throw SHAMapMissingNode (mType, id, hash); + Throw (SHAMapMissingNode (mType, id, hash)); return ret; } @@ -825,8 +825,7 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternalNT (const SHAMapNode& id, uint2 // These are for diagnosing a crash on exit Application& app (getApp ()); NodeStore& nodeStore (app.getNodeStore ()); - - NodeObject::pointer obj (getApp().getNodeStore ().retrieve (hash)); + NodeObject::pointer obj (nodeStore.fetch (hash)); if (!obj) { @@ -889,8 +888,11 @@ bool SHAMap::fetchRoot (uint256 const& hash, SHAMapSyncFilter* filter) } SHAMapTreeNode::pointer newRoot = fetchNodeExternalNT(SHAMapNode(), hash); + if (newRoot) + { root = newRoot; + } else { Blob nodeData; @@ -939,7 +941,7 @@ int SHAMap::flushDirty (DirtyMap& map, int maxNodes, NodeObjectType t, uint32 se #endif - getApp().getNodeStore ().store (t, seq, s.peekData (), it->second->getNodeHash ()); + getApp().getNodeStore ().store (t, seq, s.modData (), it->second->getNodeHash ()); if (flushed++ >= maxNodes) return flushed; diff --git a/src/cpp/ripple/ripple_SHAMapNode.cpp b/src/cpp/ripple/ripple_SHAMapNode.cpp index dcd8c0b3d..86062eb56 100644 --- a/src/cpp/ripple/ripple_SHAMapNode.cpp +++ b/src/cpp/ripple/ripple_SHAMapNode.cpp @@ -128,7 +128,7 @@ SHAMapNode SHAMapNode::getChildNodeID (int m) const // Which branch would contain the specified hash int SHAMapNode::selectBranch (uint256 const& hash) const { -#ifdef PARANOID +#if RIPPLE_VERIFY_NODEOBJECT_KEYS if (mDepth >= 64) { diff --git a/src/cpp/ripple/ripple_SHAMapSync.cpp b/src/cpp/ripple/ripple_SHAMapSync.cpp index 941f3b4ee..d9579600f 100644 --- a/src/cpp/ripple/ripple_SHAMapSync.cpp +++ b/src/cpp/ripple/ripple_SHAMapSync.cpp @@ -243,7 +243,7 @@ SHAMapAddNode SHAMap::addRootNode (Blob const& rootNode, SHANodeFormat format, { Serializer s; root->addRaw (s, snfPREFIX); - filter->gotNode (false, *root, root->getNodeHash (), s.peekData (), root->getType ()); + filter->gotNode (false, *root, root->getNodeHash (), s.modData (), root->getType ()); } return SHAMapAddNode::useful (); @@ -281,7 +281,7 @@ SHAMapAddNode SHAMap::addRootNode (uint256 const& hash, Blob const& rootNode, SH { Serializer s; root->addRaw (s, snfPREFIX); - filter->gotNode (false, *root, root->getNodeHash (), s.peekData (), root->getType ()); + filter->gotNode (false, *root, root->getNodeHash (), s.modData (), root->getType ()); } return SHAMapAddNode::useful (); @@ -345,7 +345,7 @@ SHAMapAddNode SHAMap::addKnownNode (const SHAMapNode& node, Blob const& rawNode, { Serializer s; newNode->addRaw (s, snfPREFIX); - filter->gotNode (false, node, iNode->getChildHash (branch), s.peekData (), newNode->getType ()); + filter->gotNode (false, node, iNode->getChildHash (branch), s.modData (), newNode->getType ()); } mTNByID[node] = newNode; diff --git a/src/cpp/ripple/ripple_SHAMapSyncFilter.h b/src/cpp/ripple/ripple_SHAMapSyncFilter.h index 1b2ec89b4..f4c83d618 100644 --- a/src/cpp/ripple/ripple_SHAMapSyncFilter.h +++ b/src/cpp/ripple/ripple_SHAMapSyncFilter.h @@ -12,29 +12,18 @@ class SHAMapSyncFilter { public: - SHAMapSyncFilter () - { - } - - virtual ~SHAMapSyncFilter () - { - } + virtual ~SHAMapSyncFilter () { } + // Note that the nodeData is overwritten by this call virtual void gotNode (bool fromFilter, SHAMapNode const& id, uint256 const& nodeHash, - Blob const& nodeData, - SHAMapTreeNode::TNType type) - { - } + Blob& nodeData, + SHAMapTreeNode::TNType type) = 0; virtual bool haveNode (SHAMapNode const& id, uint256 const& nodeHash, - Blob& nodeData) - { - return false; - } + Blob& nodeData) = 0; }; #endif -// vim:ts=4 diff --git a/src/cpp/ripple/ripple_SHAMapSyncFilters.cpp b/src/cpp/ripple/ripple_SHAMapSyncFilters.cpp index db451f56c..157a903f2 100644 --- a/src/cpp/ripple/ripple_SHAMapSyncFilters.cpp +++ b/src/cpp/ripple/ripple_SHAMapSyncFilters.cpp @@ -9,7 +9,7 @@ ConsensusTransSetSF::ConsensusTransSetSF () } void ConsensusTransSetSF::gotNode (bool fromFilter, const SHAMapNode& id, uint256 const& nodeHash, - Blob const& nodeData, SHAMapTreeNode::TNType type) + Blob& nodeData, SHAMapTreeNode::TNType type) { if (fromFilter) return; @@ -70,7 +70,7 @@ AccountStateSF::AccountStateSF (uint32 ledgerSeq) void AccountStateSF::gotNode (bool fromFilter, SHAMapNode const& id, uint256 const& nodeHash, - Blob const& nodeData, + Blob& nodeData, SHAMapTreeNode::TNType) { getApp().getNodeStore ().store (hotACCOUNT_NODE, mLedgerSeq, nodeData, nodeHash); @@ -93,7 +93,7 @@ TransactionStateSF::TransactionStateSF (uint32 ledgerSeq) void TransactionStateSF::gotNode (bool fromFilter, SHAMapNode const& id, uint256 const& nodeHash, - Blob const& nodeData, + Blob& nodeData, SHAMapTreeNode::TNType type) { getApp().getNodeStore ().store ( diff --git a/src/cpp/ripple/ripple_SHAMapSyncFilters.h b/src/cpp/ripple/ripple_SHAMapSyncFilters.h index d41593bb7..0bf834b8b 100644 --- a/src/cpp/ripple/ripple_SHAMapSyncFilters.h +++ b/src/cpp/ripple/ripple_SHAMapSyncFilters.h @@ -17,10 +17,11 @@ class ConsensusTransSetSF : public SHAMapSyncFilter public: ConsensusTransSetSF (); + // Note that the nodeData is overwritten by this call void gotNode (bool fromFilter, SHAMapNode const& id, uint256 const& nodeHash, - Blob const& nodeData, + Blob& nodeData, SHAMapTreeNode::TNType); bool haveNode (SHAMapNode const& id, @@ -35,10 +36,11 @@ class AccountStateSF : public SHAMapSyncFilter public: explicit AccountStateSF (uint32 ledgerSeq); + // Note that the nodeData is overwritten by this call void gotNode (bool fromFilter, SHAMapNode const& id, uint256 const& nodeHash, - Blob const& nodeData, + Blob& nodeData, SHAMapTreeNode::TNType); bool haveNode (SHAMapNode const& id, @@ -56,10 +58,11 @@ class TransactionStateSF : public SHAMapSyncFilter public: explicit TransactionStateSF (uint32 ledgerSeq); + // Note that the nodeData is overwritten by this call void gotNode (bool fromFilter, SHAMapNode const& id, uint256 const& nodeHash, - Blob const& nodeData, + Blob& nodeData, SHAMapTreeNode::TNType); bool haveNode (SHAMapNode const& id, diff --git a/src/cpp/ripple/ripple_SHAMapTreeNode.cpp b/src/cpp/ripple/ripple_SHAMapTreeNode.cpp index c406808f0..1d244f64e 100644 --- a/src/cpp/ripple/ripple_SHAMapTreeNode.cpp +++ b/src/cpp/ripple/ripple_SHAMapTreeNode.cpp @@ -207,7 +207,7 @@ SHAMapTreeNode::SHAMapTreeNode (const SHAMapNode& id, Blob const& rawNode, uint3 if (hashValid) { mHash = hash; -#ifdef PARANOID +#if RIPPLE_VERIFY_NODEOBJECT_KEYS updateHash (); assert (mHash == hash); #endif @@ -225,7 +225,7 @@ bool SHAMapTreeNode::updateHash () if (mIsBranch != 0) { nh = Serializer::getPrefixHash (HashPrefix::innerNode, reinterpret_cast (mHashes), sizeof (mHashes)); -#ifdef PARANOID +#if RIPPLE_VERIFY_NODEOBJECT_KEYS Serializer s; s.add32 (HashPrefix::innerNode);