Cleanup nodestore backend classes:

* Add README.md
* Add missing std::move calls
* Refactor visitAll in backend
This commit is contained in:
Miguel Portilla
2014-04-21 18:20:19 -04:00
committed by Vinnie Falco
parent 6e428054ef
commit 98612a7cd6
24 changed files with 263 additions and 184 deletions

View File

@@ -3045,7 +3045,6 @@
<ClInclude Include="..\..\src\ripple_core\nodestore\api\Scheduler.h" />
<ClInclude Include="..\..\src\ripple_core\nodestore\api\Task.h" />
<ClInclude Include="..\..\src\ripple_core\nodestore\api\Types.h" />
<ClInclude Include="..\..\src\ripple_core\nodestore\api\VisitCallback.h" />
<ClInclude Include="..\..\src\ripple_core\nodestore\backend\HyperDBFactory.h" />
<ClInclude Include="..\..\src\ripple_core\nodestore\backend\LevelDBFactory.h" />
<ClInclude Include="..\..\src\ripple_core\nodestore\backend\MemoryFactory.h" />
@@ -3249,6 +3248,7 @@
<None Include="..\..\src\ripple\validators\README.md" />
<None Include="..\..\src\ripple_app\ledger\TODO.md" />
<None Include="..\..\src\ripple_app\TODO.md" />
<None Include="..\..\src\ripple_core\nodestore\README.md" />
<None Include="..\..\src\ripple_overlay\README.md" />
<None Include="..\..\src\ripple_overlay\TODO.md" />
<None Include="..\..\src\ripple_rpc\README.md" />

View File

@@ -2474,9 +2474,6 @@
<ClInclude Include="..\..\src\ripple_core\nodestore\api\Factory.h">
<Filter>[2] Old Ripple\ripple_core\nodestore\api</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple_core\nodestore\api\VisitCallback.h">
<Filter>[2] Old Ripple\ripple_core\nodestore\api</Filter>
</ClInclude>
<ClInclude Include="..\..\src\ripple_core\nodestore\backend\NullFactory.h">
<Filter>[2] Old Ripple\ripple_core\nodestore\backend</Filter>
</ClInclude>
@@ -3473,6 +3470,9 @@
<None Include="..\..\src\ripple_overlay\TODO.md">
<Filter>[2] Old Ripple\ripple_overlay</Filter>
</None>
<None Include="..\..\src\ripple_core\nodestore\README.md">
<Filter>[2] Old Ripple\ripple_core\nodestore</Filter>
</None>
</ItemGroup>
<ItemGroup>
<CustomBuild Include="..\..\src\ripple\proto\ripple.proto">

View File

@@ -131,7 +131,7 @@ bool InboundLedger::tryLocal ()
"Ledger base found in fetch pack";
mLedger = boost::make_shared<Ledger> (data, true);
getApp().getNodeStore ().store (hotLEDGER,
mLedger->getLedgerSeq (), data, mHash);
mLedger->getLedgerSeq (), std::move (data), mHash);
}
else
{
@@ -790,7 +790,7 @@ bool InboundLedger::takeBase (const std::string& data)
s.add32 (HashPrefix::ledgerMaster);
s.addRaw (data);
getApp().getNodeStore ().store (hotLEDGER,
mLedger->getLedgerSeq (), s.modData (), mHash);
mLedger->getLedgerSeq (), std::move (s.modData ()), mHash);
progress ();

View File

@@ -589,7 +589,7 @@ bool Ledger::saveValidatedLedger (bool current)
Serializer s (128);
s.add32 (HashPrefix::ledgerMaster);
addRaw (s);
getApp().getNodeStore ().store (hotLEDGER, mLedgerSeq, s.modData (), mHash);
getApp().getNodeStore ().store (hotLEDGER, mLedgerSeq, std::move (s.modData ()), mHash);
}
AcceptedLedger::pointer aLedger;

View File

@@ -95,7 +95,7 @@ public:
*pObject = NodeObject::createObject (
getTypeFromString (pSt.peekString (0)),
pSt.getUInt32 (1),
data,
std::move(data),
hash);
}
else
@@ -145,9 +145,9 @@ public:
pStE.reset();
}
void visitAll (NodeStore::VisitCallback& callback)
void for_each (std::function <void(NodeObject::Ptr)> f)
{
// No lock needed as per the visitAll() API
// No lock needed as per the for_each() API
uint256 hash;
@@ -164,10 +164,10 @@ public:
NodeObject::Ptr const object (NodeObject::createObject (
getTypeFromString (pSt.peekString (0)),
pSt.getUInt32 (1),
data,
std::move(data),
hash));
callback.visitObject (object);
f (object);
}
pSt.reset ();

View File

@@ -1124,7 +1124,7 @@ int SHAMap::flushDirty (NodeMap& map, int maxNodes, NodeObjectType t, std::uint3
#endif
getApp().getNodeStore ().store (t, seq, s.modData (), it->second->getNodeHash ());
getApp().getNodeStore ().store (t, seq, std::move (s.modData ()), it->second->getNodeHash ());
if (flushed++ >= maxNodes)
return flushed;

View File

@@ -90,7 +90,7 @@ void AccountStateSF::gotNode (bool fromFilter,
Blob& nodeData,
SHAMapTreeNode::TNType)
{
getApp().getNodeStore ().store (hotACCOUNT_NODE, mLedgerSeq, nodeData, nodeHash);
getApp().getNodeStore ().store (hotACCOUNT_NODE, mLedgerSeq, std::move (nodeData), nodeHash);
}
bool AccountStateSF::haveNode (SHAMapNode const& id,
@@ -116,7 +116,7 @@ void TransactionStateSF::gotNode (bool fromFilter,
getApp().getNodeStore ().store (
(type == SHAMapTreeNode::tnTRANSACTION_NM) ? hotTRANSACTION : hotTRANSACTION_NODE,
mLedgerSeq,
nodeData,
std::move (nodeData),
nodeHash);
}

View File

@@ -0,0 +1,25 @@
# NodeStore
## Introduction
The NodeStore provides an interface that stores, in a persistent database, the collection of
NodeObject that rippled uses as its primary representation of ledger items.
## Module directory structure
nodestore
|-api // Public Interface
|
|-backend // Factory classes for various databases
|
|-impl // Private Implementation
|
|-test // Unit tests
The NodeStore class is a simple object that the Ledger uses to store entries. It has a enumeration type, a hash, a ledger index and a Blob which stores arbritary data.
# Document WIP notes
If the MemoryFactory backend database is used, do we loose persistance?

View File

@@ -76,11 +76,9 @@ public:
This is usually called during import.
@note This routine will not be called concurrently with itself
or other methods.
@see import, VisitCallback
@see import
*/
virtual void visitAll (VisitCallback& callback) = 0;
// VFALCO TODO Implement
//virtual void visitAll (std::function <void (NodeObject::Ptr)> f) = 0;
virtual void for_each (std::function <void (NodeObject::Ptr)> f) = 0;
/** Estimate the number of write operations pending. */
virtual int getWriteLoad () = 0;

View File

@@ -98,7 +98,7 @@ public:
*/
virtual void store (NodeObjectType type,
std::uint32_t ledgerIndex,
Blob& data,
Blob&& data,
uint256 const& hash) = 0;
/** Visit every object in the database
@@ -108,10 +108,10 @@ public:
or other methods.
@see import
*/
virtual void visitAll (VisitCallback& callback) = 0;
virtual void for_each(std::function <void(NodeObject::Ptr)> f) = 0;
/** Import objects from another database. */
virtual void import (Database& sourceDatabase) = 0;
virtual void import (Database& source) = 0;
/** Retrieve the estimated number of pending write operations.
This is used for diagnostics.

View File

@@ -77,7 +77,7 @@ public:
// This constructor is private, use createObject instead.
NodeObject (NodeObjectType type,
LedgerIndex ledgerIndex,
Blob& data,
Blob&& data,
uint256 const& hash,
PrivateAccess);
@@ -94,7 +94,7 @@ public:
*/
static Ptr createObject (NodeObjectType type,
LedgerIndex ledgerIndex,
Blob& data,
Blob&& data,
uint256 const& hash);
/** Retrieve the type of this object.

View File

@@ -86,14 +86,16 @@ public:
{
}
std::string getName()
std::string
getName()
{
return m_name;
}
//--------------------------------------------------------------------------
Status fetch (void const* key, NodeObject::Ptr* pObject)
Status
fetch (void const* key, NodeObject::Ptr* pObject)
{
pObject->reset ();
@@ -140,21 +142,22 @@ public:
return status;
}
void store (NodeObject::ref object)
void
store (NodeObject::ref object)
{
m_batch.store (object);
}
void storeBatch (Batch const& batch)
void
storeBatch (Batch const& batch)
{
hyperleveldb::WriteBatch wb;
EncodedBlob encoded;
// VFALCO Use range based for
BOOST_FOREACH (NodeObject::ref object, batch)
for (auto const& e : batch)
{
encoded.prepare (object);
encoded.prepare (e);
wb.Put (
hyperleveldb::Slice (reinterpret_cast <char const*> (
@@ -168,7 +171,8 @@ public:
m_db->Write (options, &wb).ok ();
}
void visitAll (VisitCallback& callback)
void
for_each (std::function <void (NodeObject::Ptr)> f)
{
hyperleveldb::ReadOptions const options;
@@ -183,9 +187,7 @@ public:
if (decoded.wasOk ())
{
NodeObject::Ptr object (decoded.createObject ());
callback.visitObject (object);
f (decoded.createObject ());
}
else
{
@@ -204,14 +206,16 @@ public:
}
}
int getWriteLoad ()
int
getWriteLoad ()
{
return m_batch.getWriteLoad ();
}
//--------------------------------------------------------------------------
void writeBatch (Batch const& batch)
void
writeBatch (Batch const& batch)
{
storeBatch (batch);
}
@@ -222,13 +226,18 @@ public:
class HyperDBFactory : public NodeStore::Factory
{
public:
beast::String getName () const
beast::String
getName () const
{
return "HyperLevelDB";
}
std::unique_ptr <Backend> createInstance (size_t keyBytes,
Parameters const& keyValues, Scheduler& scheduler, beast::Journal journal)
std::unique_ptr <Backend>
createInstance (
size_t keyBytes,
Parameters const& keyValues,
Scheduler& scheduler,
beast::Journal journal)
{
return std::make_unique <HyperDBBackend> (
keyBytes, keyValues, scheduler, journal);
@@ -237,7 +246,8 @@ public:
//------------------------------------------------------------------------------
std::unique_ptr <Factory> make_HyperDBFactory ()
std::unique_ptr <Factory>
make_HyperDBFactory ()
{
return std::make_unique <HyperDBFactory> ();
}

View File

@@ -79,14 +79,16 @@ public:
m_db.reset (db);
}
std::string getName()
std::string
getName()
{
return m_name;
}
//--------------------------------------------------------------------------
Status fetch (void const* key, NodeObject::Ptr* pObject)
Status
fetch (void const* key, NodeObject::Ptr* pObject)
{
pObject->reset ();
@@ -132,20 +134,22 @@ public:
return status;
}
void store (NodeObject::ref object)
void
store (NodeObject::ref object)
{
m_batch.store (object);
}
void storeBatch (Batch const& batch)
void
storeBatch (Batch const& batch)
{
leveldb::WriteBatch wb;
EncodedBlob encoded;
BOOST_FOREACH (NodeObject::ref object, batch)
for (auto const& e : batch)
{
encoded.prepare (object);
encoded.prepare (e);
wb.Put (
leveldb::Slice (reinterpret_cast <char const*> (
@@ -159,7 +163,8 @@ public:
m_db->Write (options, &wb).ok ();
}
void visitAll (VisitCallback& callback)
void
for_each (std::function <void(NodeObject::Ptr)> f)
{
leveldb::ReadOptions const options;
@@ -175,33 +180,35 @@ public:
if (decoded.wasOk ())
{
NodeObject::Ptr object (decoded.createObject ());
callback.visitObject (object);
f (decoded.createObject ());
}
else
{
// Uh oh, corrupted data!
WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256 (it->key ().data ());
if (m_journal.fatal) m_journal.fatal <<
"Corrupt NodeObject #" << uint256(it->key ().data ());
}
}
else
{
// VFALCO NOTE What does it mean to find an
// incorrectly sized key? Corruption?
WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size ();
if (m_journal.fatal) m_journal.fatal <<
"Bad key size = " << it->key().size();
}
}
}
int getWriteLoad ()
int
getWriteLoad ()
{
return m_batch.getWriteLoad ();
}
//--------------------------------------------------------------------------
void writeBatch (Batch const& batch)
void
writeBatch (Batch const& batch)
{
storeBatch (batch);
}
@@ -230,12 +237,14 @@ public:
{
}
beast::String getName () const
beast::String
getName () const
{
return "LevelDB";
}
std::unique_ptr <Backend> createInstance (
std::unique_ptr <Backend>
createInstance(
size_t keyBytes,
Parameters const& keyValues,
Scheduler& scheduler,
@@ -248,7 +257,8 @@ public:
//------------------------------------------------------------------------------
std::unique_ptr <Factory> make_LevelDBFactory ()
std::unique_ptr <Factory>
make_LevelDBFactory ()
{
return std::make_unique <LevelDBFactory> ();
}

View File

@@ -41,14 +41,16 @@ public:
{
}
std::string getName ()
std::string
getName ()
{
return "memory";
}
//--------------------------------------------------------------------------
Status fetch (void const* key, NodeObject::Ptr* pObject)
Status
fetch (void const* key, NodeObject::Ptr* pObject)
{
uint256 const hash (uint256::fromVoid (key));
@@ -66,7 +68,8 @@ public:
return ok;
}
void store (NodeObject::ref object)
void
store (NodeObject::ref object)
{
Map::iterator iter = m_map.find (object->getHash ());
@@ -76,19 +79,22 @@ public:
}
}
void storeBatch (Batch const& batch)
void
storeBatch (Batch const& batch)
{
for (std::size_t i = 0; i < batch.size (); ++i)
store (batch [i]);
for (auto const& e : batch)
store (e);
}
void visitAll (VisitCallback& callback)
void
for_each (std::function <void(NodeObject::Ptr)> f)
{
for (Map::const_iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
callback.visitObject (iter->second);
for (auto const& e : m_map)
f (e.second);
}
int getWriteLoad ()
int
getWriteLoad ()
{
return 0;
}
@@ -99,14 +105,18 @@ public:
class MemoryFactory : public Factory
{
public:
beast::String getName () const
beast::String
getName () const
{
return "Memory";
}
std::unique_ptr <Backend> createInstance (
size_t keyBytes, Parameters const& keyValues,
Scheduler& scheduler, beast::Journal journal)
std::unique_ptr <Backend>
createInstance (
size_t keyBytes,
Parameters const& keyValues,
Scheduler& scheduler,
beast::Journal journal)
{
return std::make_unique <MemoryBackend> (
keyBytes, keyValues, scheduler, journal);
@@ -115,7 +125,8 @@ public:
//------------------------------------------------------------------------------
std::unique_ptr <Factory> make_MemoryFactory ()
std::unique_ptr <Factory>
make_MemoryFactory ()
{
return std::make_unique <MemoryFactory> ();
}

View File

@@ -31,29 +31,35 @@ public:
{
}
std::string getName()
std::string
getName()
{
return std::string ();
}
Status fetch (void const*, NodeObject::Ptr*)
Status
fetch (void const*, NodeObject::Ptr*)
{
return notFound;
}
void store (NodeObject::ref object)
void
store (NodeObject::ref object)
{
}
void storeBatch (Batch const& batch)
void
storeBatch (Batch const& batch)
{
}
void visitAll (VisitCallback& callback)
void
for_each (std::function <void(NodeObject::Ptr)> f)
{
}
int getWriteLoad ()
int
getWriteLoad ()
{
return 0;
}
@@ -71,8 +77,11 @@ public:
return "none";
}
std::unique_ptr <Backend> createInstance (
size_t, Parameters const&, Scheduler&, beast::Journal)
std::unique_ptr <Backend>
createInstance (
size_t,
Parameters const&,
Scheduler&, beast::Journal)
{
return std::make_unique <NullBackend> ();
}

View File

@@ -45,8 +45,10 @@ public:
void (*f)(void*);
void* a;
};
static void thread_entry (void* ptr)
static
void
thread_entry (void* ptr)
{
ThreadParams* const p (reinterpret_cast <ThreadParams*> (ptr));
void (*f)(void*) = p->f;
@@ -62,7 +64,8 @@ public:
(*f)(a);
}
void StartThread (void (*f)(void*), void* a)
void
StartThread (void (*f)(void*), void* a)
{
ThreadParams* const p (new ThreadParams (f, a));
EnvWrapper::StartThread (&RocksDBEnv::thread_entry, p);
@@ -164,14 +167,16 @@ public:
{
}
std::string getName()
std::string
getName()
{
return m_name;
}
//--------------------------------------------------------------------------
Status fetch (void const* key, NodeObject::Ptr* pObject)
Status
fetch (void const* key, NodeObject::Ptr* pObject)
{
pObject->reset ();
@@ -220,20 +225,22 @@ public:
return status;
}
void store (NodeObject::ref object)
void
store (NodeObject::ref object)
{
m_batch.store (object);
}
void storeBatch (Batch const& batch)
void
storeBatch (Batch const& batch)
{
rocksdb::WriteBatch wb;
EncodedBlob encoded;
BOOST_FOREACH (NodeObject::ref object, batch)
for (auto const& e : batch)
{
encoded.prepare (object);
encoded.prepare (e);
wb.Put (
rocksdb::Slice (reinterpret_cast <char const*> (
@@ -247,7 +254,8 @@ public:
m_db->Write (options, &wb).ok ();
}
void visitAll (VisitCallback& callback)
void
for_each (std::function <void(NodeObject::Ptr)> f)
{
rocksdb::ReadOptions const options;
@@ -263,33 +271,35 @@ public:
if (decoded.wasOk ())
{
NodeObject::Ptr object (decoded.createObject ());
callback.visitObject (object);
f (decoded.createObject ());
}
else
{
// Uh oh, corrupted data!
WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256 (it->key ().data ());
if (m_journal.fatal) m_journal.fatal <<
"Corrupt NodeObject #" << uint256 (it->key ().data ());
}
}
else
{
// VFALCO NOTE What does it mean to find an
// incorrectly sized key? Corruption?
WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size ();
if (m_journal.fatal) m_journal.fatal <<
"Bad key size = " << it->key ().size ();
}
}
}
int getWriteLoad ()
int
getWriteLoad ()
{
return m_batch.getWriteLoad ();
}
//--------------------------------------------------------------------------
void writeBatch (Batch const& batch)
void
writeBatch (Batch const& batch)
{
storeBatch (batch);
}
@@ -317,14 +327,18 @@ public:
{
}
beast::String getName () const
beast::String
getName () const
{
return "RocksDB";
}
std::unique_ptr <Backend> createInstance (
size_t keyBytes, Parameters const& keyValues,
Scheduler& scheduler, beast::Journal journal)
std::unique_ptr <Backend>
createInstance (
size_t keyBytes,
Parameters const& keyValues,
Scheduler& scheduler,
beast::Journal journal)
{
return std::make_unique <RocksDBBackend> (
keyBytes, keyValues, scheduler, journal, &m_env);
@@ -333,7 +347,8 @@ public:
//------------------------------------------------------------------------------
std::unique_ptr <Factory> make_RocksDBFactory ()
std::unique_ptr <Factory>
make_RocksDBFactory ()
{
return std::make_unique <RocksDBFactory> ();
}

View File

@@ -34,7 +34,8 @@ BatchWriter::~BatchWriter ()
waitForWriting ();
}
void BatchWriter::store (NodeObject::ref object)
void
BatchWriter::store (NodeObject::ref object)
{
std::lock_guard<decltype(mWriteMutex)> sl (mWriteMutex);
@@ -48,19 +49,22 @@ void BatchWriter::store (NodeObject::ref object)
}
}
int BatchWriter::getWriteLoad ()
int
BatchWriter::getWriteLoad ()
{
std::lock_guard<decltype(mWriteMutex)> sl (mWriteMutex);
return std::max (mWriteLoad, static_cast<int> (mWriteSet.size ()));
}
void BatchWriter::performScheduledTask ()
void
BatchWriter::performScheduledTask ()
{
writeBatch ();
}
void BatchWriter::writeBatch ()
void
BatchWriter::writeBatch ()
{
for (;;)
{
@@ -90,7 +94,8 @@ void BatchWriter::writeBatch ()
}
}
void BatchWriter::waitForWriting ()
void
BatchWriter::waitForWriting ()
{
std::unique_lock <decltype(mWriteMutex)> sl (mWriteMutex);

View File

@@ -85,8 +85,8 @@ public:
m_readGenCondVar.notify_all ();
}
BOOST_FOREACH (std::thread& th, m_readThreads)
th.join ();
for (auto& e : m_readThreads)
e.join();
}
beast::String getName () const
@@ -94,7 +94,6 @@ public:
return m_backend->getName ();
}
//------------------------------------------------------------------------------
bool asyncFetch (uint256 const& hash, NodeObject::pointer& object)
@@ -190,14 +189,15 @@ public:
if (! foundInFastBackend)
{
// If we have a fast back end, store it there for later.
// If we have a fast back end, store it there for later.
//
if (m_fastBackend != nullptr)
m_fastBackend->store (obj);
// Since this was a 'hard' fetch, we will log it.
//
WriteLog (lsTRACE, NodeObject) << "HOS: " << hash << " fetch: in db";
if (m_journal.trace) m_journal.trace <<
"HOS: " << hash << " fetch: in db";
}
}
@@ -220,11 +220,13 @@ public:
case dataCorrupt:
// VFALCO TODO Deal with encountering corrupt data!
//
WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << hash;
if (m_journal.fatal) m_journal.fatal <<
"Corrupt NodeObject #" << hash;
break;
default:
WriteLog (lsWARNING, NodeObject) << "Unknown status=" << status;
if (m_journal.warning) m_journal.warning <<
"Unknown status=" << status;
break;
}
@@ -235,10 +237,10 @@ public:
void store (NodeObjectType type,
std::uint32_t index,
Blob& data,
Blob&& data,
uint256 const& hash)
{
NodeObject::Ptr object = NodeObject::createObject (type, index, data, hash);
NodeObject::Ptr object = NodeObject::createObject(type, index, std::move(data), hash);
#if RIPPLE_VERIFY_NODEOBJECT_KEYS
assert (hash == Serializer::getSHA512Half (data));
@@ -326,52 +328,30 @@ public:
//------------------------------------------------------------------------------
void visitAll (VisitCallback& callback)
void for_each (std::function <void(NodeObject::Ptr)> f)
{
m_backend->visitAll (callback);
m_backend->for_each (f);
}
void import (Database& sourceDatabase)
void import (Database& source)
{
class ImportVisitCallback : public VisitCallback
Batch b;
b.reserve (batchWritePreallocationSize);
source.for_each ([&](NodeObject::Ptr object)
{
public:
explicit ImportVisitCallback (Backend& backend)
: m_backend (backend)
if (b.size () >= batchWritePreallocationSize)
{
m_objects.reserve (batchWritePreallocationSize);
this->m_backend->storeBatch (b);
b.clear ();
b.reserve (batchWritePreallocationSize);
}
~ImportVisitCallback ()
{
if (! m_objects.empty ())
m_backend.storeBatch (m_objects);
}
b.push_back (object);
});
void visitObject (NodeObject::Ptr const& object)
{
if (m_objects.size () >= batchWritePreallocationSize)
{
m_backend.storeBatch (m_objects);
m_objects.clear ();
m_objects.reserve (batchWritePreallocationSize);
}
m_objects.push_back (object);
}
private:
Backend& m_backend;
Batch m_objects;
};
//--------------------------------------------------------------------------
ImportVisitCallback callback (*m_backend);
sourceDatabase.visitAll (callback);
if (! b.empty ())
m_backend->storeBatch (b);
}
};

View File

@@ -87,7 +87,7 @@ NodeObject::Ptr DecodedBlob::createObject ()
memcpy (data.data (), m_objectData, m_dataBytes);
object = NodeObject::createObject (
m_objectType, m_ledgerIndex, data, uint256::fromVoid (m_key));
m_objectType, m_ledgerIndex, std::move(data), uint256::fromVoid(m_key));
}
return object;

View File

@@ -28,13 +28,15 @@ DummyScheduler::~DummyScheduler ()
{
}
void DummyScheduler::scheduleTask (Task& task)
void
DummyScheduler::scheduleTask (Task& task)
{
// Invoke the task synchronously.
task.performScheduledTask();
}
void DummyScheduler::scheduledTasksStopped ()
void
DummyScheduler::scheduledTasksStopped ()
{
}

View File

@@ -20,7 +20,8 @@
namespace ripple {
namespace NodeStore {
void EncodedBlob::prepare (NodeObject::Ptr const& object)
void
EncodedBlob::prepare (NodeObject::Ptr const& object)
{
m_key = object->getHash ().begin ();

View File

@@ -38,12 +38,14 @@ public:
{
}
void add_factory (std::unique_ptr <Factory> factory)
void
add_factory (std::unique_ptr <Factory> factory)
{
m_list.emplace_back (std::move (factory));
}
void add_known_factories ()
void
add_known_factories ()
{
// This is part of the ripple_app module since it has dependencies
//addFactory (make_SqliteFactory ());
@@ -62,7 +64,8 @@ public:
#endif
}
Factory* find (std::string const& name) const
Factory*
find (std::string const& name) const
{
for (List::const_iterator iter (m_list.begin ());
iter != m_list.end (); ++iter)
@@ -71,7 +74,8 @@ public:
return nullptr;
}
static void missing_backend ()
static void
missing_backend ()
{
throw std::runtime_error (
"Your rippled.cfg is missing a [node_db] entry, "
@@ -79,8 +83,11 @@ public:
);
}
std::unique_ptr <Backend> make_Backend (Parameters const& parameters,
Scheduler& scheduler, beast::Journal journal)
std::unique_ptr <Backend>
make_Backend (
Parameters const& parameters,
Scheduler& scheduler,
beast::Journal journal)
{
std::unique_ptr <Backend> backend;
@@ -108,10 +115,14 @@ public:
return backend;
}
std::unique_ptr <Database> make_Database (std::string const& name,
Scheduler& scheduler, beast::Journal journal, int readThreads,
Parameters const& backendParameters,
Parameters fastBackendParameters)
std::unique_ptr <Database>
make_Database (
std::string const& name,
Scheduler& scheduler,
beast::Journal journal,
int readThreads,
Parameters const& backendParameters,
Parameters fastBackendParameters)
{
std::unique_ptr <Backend> backend (make_Backend (
backendParameters, scheduler, journal));
@@ -132,8 +143,8 @@ Manager::~Manager ()
{
}
std::unique_ptr <Manager> make_Manager (
std::vector <std::unique_ptr <Factory>> factories)
std::unique_ptr <Manager>
make_Manager (std::vector <std::unique_ptr <Factory>> factories)
{
return std::make_unique <ManagerImp> (std::move (factories));
}

View File

@@ -26,50 +26,52 @@ SETUP_LOG (NodeObject)
NodeObject::NodeObject (
NodeObjectType type,
LedgerIndex ledgerIndex,
Blob& data,
Blob&& data,
uint256 const& hash,
PrivateAccess)
: mType (type)
, mHash (hash)
, mLedgerIndex (ledgerIndex)
{
// Take over the caller's buffer
mData.swap (data);
mData = std::move (data);
}
NodeObject::Ptr NodeObject::createObject (
NodeObjectType type,
LedgerIndex ledgerIndex,
Blob& data,
Blob&& data,
uint256 const & hash)
{
// The boost::ref is important or
// else it will be passed by value!
return boost::make_shared <NodeObject> (
type, ledgerIndex, boost::ref (data), hash, PrivateAccess ());
type, ledgerIndex, std::move (data), hash, PrivateAccess ());
}
NodeObjectType NodeObject::getType () const
NodeObjectType
NodeObject::getType () const
{
return mType;
}
uint256 const& NodeObject::getHash () const
uint256 const&
NodeObject::getHash () const
{
return mHash;
}
LedgerIndex NodeObject::getIndex () const
LedgerIndex
NodeObject::getIndex () const
{
return mLedgerIndex;
}
Blob const& NodeObject::getData () const
Blob const&
NodeObject::getData () const
{
return mData;
}
bool NodeObject::isCloneOf (NodeObject::Ptr const& other) const
bool
NodeObject::isCloneOf (NodeObject::Ptr const& other) const
{
if (mType != other->mType)
return false;

View File

@@ -75,7 +75,7 @@ public:
r.fillBitsRandomly (data.data (), payloadBytes);
return NodeObject::createObject (type, ledgerIndex, data, hash);
return NodeObject::createObject(type, ledgerIndex, std::move(data), hash);
}
private:
@@ -163,7 +163,7 @@ public:
db.store (object->getType (),
object->getIndex (),
data,
std::move (data),
object->getHash ());
}
}