mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-06 17:27:55 +00:00
Move ./modules to ./src
This commit is contained in:
245
src/ripple_core/node/HyperLevelDBBackendFactory.cpp
Normal file
245
src/ripple_core/node/HyperLevelDBBackendFactory.cpp
Normal file
@@ -0,0 +1,245 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#if RIPPLE_HYPERLEVELDB_AVAILABLE
|
||||
|
||||
class HyperLevelDBBackendFactory::Backend
|
||||
: public NodeStore::Backend
|
||||
, public NodeStore::BatchWriter::Callback
|
||||
, LeakChecked <HyperLevelDBBackendFactory::Backend>
|
||||
{
|
||||
public:
|
||||
typedef RecycledObjectPool <std::string> StringPool;
|
||||
|
||||
Backend (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_batch (*this, scheduler)
|
||||
, m_name (keyValues ["path"].toStdString ())
|
||||
{
|
||||
if (m_name.empty ())
|
||||
Throw (std::runtime_error ("Missing path in LevelDB backend"));
|
||||
|
||||
hyperleveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
|
||||
if (keyValues ["cache_mb"].isEmpty ())
|
||||
{
|
||||
options.block_cache = hyperleveldb::NewLRUCache (getConfig ().getSize (siHashNodeDBCache) * 1024 * 1024);
|
||||
}
|
||||
else
|
||||
{
|
||||
options.block_cache = hyperleveldb::NewLRUCache (keyValues["cache_mb"].getIntValue() * 1024L * 1024L);
|
||||
}
|
||||
|
||||
if (keyValues ["filter_bits"].isEmpty())
|
||||
{
|
||||
if (getConfig ().NODE_SIZE >= 2)
|
||||
options.filter_policy = hyperleveldb::NewBloomFilterPolicy (10);
|
||||
}
|
||||
else if (keyValues ["filter_bits"].getIntValue() != 0)
|
||||
{
|
||||
options.filter_policy = hyperleveldb::NewBloomFilterPolicy (keyValues ["filter_bits"].getIntValue ());
|
||||
}
|
||||
|
||||
if (! keyValues["open_files"].isEmpty ())
|
||||
{
|
||||
options.max_open_files = keyValues ["open_files"].getIntValue();
|
||||
}
|
||||
|
||||
hyperleveldb::DB* db = nullptr;
|
||||
hyperleveldb::Status status = hyperleveldb::DB::Open (options, m_name, &db);
|
||||
if (!status.ok () || !db)
|
||||
Throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
|
||||
|
||||
m_db = db;
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
}
|
||||
|
||||
std::string getName()
|
||||
{
|
||||
return m_name;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
pObject->reset ();
|
||||
|
||||
Status status (ok);
|
||||
|
||||
hyperleveldb::ReadOptions const options;
|
||||
hyperleveldb::Slice const slice (static_cast <char const*> (key), m_keyBytes);
|
||||
|
||||
{
|
||||
// These are reused std::string objects,
|
||||
// required for leveldb's funky interface.
|
||||
//
|
||||
StringPool::ScopedItem item (m_stringPool);
|
||||
std::string& string = item.getObject ();
|
||||
|
||||
hyperleveldb::Status getStatus = m_db->Get (options, slice, &string);
|
||||
|
||||
if (getStatus.ok ())
|
||||
{
|
||||
NodeStore::DecodedBlob decoded (key, string.data (), string.size ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
*pObject = decoded.createObject ();
|
||||
}
|
||||
else
|
||||
{
|
||||
// Decoding failed, probably corrupted!
|
||||
//
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (getStatus.IsCorruption ())
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
else if (getStatus.IsNotFound ())
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
m_batch.store (object);
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
hyperleveldb::WriteBatch wb;
|
||||
|
||||
{
|
||||
NodeStore::EncodedBlob::Pool::ScopedItem item (m_blobPool);
|
||||
|
||||
BOOST_FOREACH (NodeObject::ref object, batch)
|
||||
{
|
||||
item.getObject ().prepare (object);
|
||||
|
||||
wb.Put (
|
||||
hyperleveldb::Slice (reinterpret_cast <char const*> (
|
||||
item.getObject ().getKey ()), m_keyBytes),
|
||||
hyperleveldb::Slice (reinterpret_cast <char const*> (
|
||||
item.getObject ().getData ()), item.getObject ().getSize ()));
|
||||
}
|
||||
}
|
||||
|
||||
hyperleveldb::WriteOptions const options;
|
||||
|
||||
m_db->Write (options, &wb).ok ();
|
||||
}
|
||||
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
hyperleveldb::ReadOptions const options;
|
||||
|
||||
ScopedPointer <hyperleveldb::Iterator> it (m_db->NewIterator (options));
|
||||
|
||||
for (it->SeekToFirst (); it->Valid (); it->Next ())
|
||||
{
|
||||
if (it->key ().size () == m_keyBytes)
|
||||
{
|
||||
NodeStore::DecodedBlob decoded (it->key ().data (),
|
||||
it->value ().data (),
|
||||
it->value ().size ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
NodeObject::Ptr object (decoded.createObject ());
|
||||
|
||||
callback.visitObject (object);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Uh oh, corrupted data!
|
||||
WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256::fromVoid (it->key ().data ());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// VFALCO NOTE What does it mean to find an
|
||||
// incorrectly sized key? Corruption?
|
||||
WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size ();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int getWriteLoad ()
|
||||
{
|
||||
return m_batch.getWriteLoad ();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void writeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
private:
|
||||
size_t const m_keyBytes;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
NodeStore::BatchWriter m_batch;
|
||||
StringPool m_stringPool;
|
||||
NodeStore::EncodedBlob::Pool m_blobPool;
|
||||
std::string m_name;
|
||||
ScopedPointer <hyperleveldb::DB> m_db;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
HyperLevelDBBackendFactory::HyperLevelDBBackendFactory ()
|
||||
{
|
||||
}
|
||||
|
||||
HyperLevelDBBackendFactory::~HyperLevelDBBackendFactory ()
|
||||
{
|
||||
}
|
||||
|
||||
HyperLevelDBBackendFactory& HyperLevelDBBackendFactory::getInstance ()
|
||||
{
|
||||
static HyperLevelDBBackendFactory instance;
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
String HyperLevelDBBackendFactory::getName () const
|
||||
{
|
||||
return "HyperLevelDB";
|
||||
}
|
||||
|
||||
NodeStore::Backend* HyperLevelDBBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new HyperLevelDBBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
#endif
|
||||
36
src/ripple_core/node/HyperLevelDBBackendFactory.h
Normal file
36
src/ripple_core/node/HyperLevelDBBackendFactory.h
Normal file
@@ -0,0 +1,36 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_CORE_NODE_HYPERLEVELDBBACKENDFACTORY_H_INCLUDED
|
||||
#define RIPPLE_CORE_NODE_HYPERLEVELDBBACKENDFACTORY_H_INCLUDED
|
||||
|
||||
#if RIPPLE_HYPERLEVELDB_AVAILABLE
|
||||
|
||||
/** Factory to produce HyperLevelDB backends for the NodeStore.
|
||||
|
||||
@see NodeStore
|
||||
*/
|
||||
class HyperLevelDBBackendFactory : public NodeStore::BackendFactory
|
||||
{
|
||||
private:
|
||||
class Backend;
|
||||
|
||||
HyperLevelDBBackendFactory ();
|
||||
~HyperLevelDBBackendFactory ();
|
||||
|
||||
public:
|
||||
static HyperLevelDBBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
179
src/ripple_core/node/KeyvaDBBackendFactory.cpp
Normal file
179
src/ripple_core/node/KeyvaDBBackendFactory.cpp
Normal file
@@ -0,0 +1,179 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
class KeyvaDBBackendFactory::Backend : public NodeStore::Backend
|
||||
{
|
||||
private:
|
||||
typedef RecycledObjectPool <MemoryBlock> MemoryPool;
|
||||
typedef RecycledObjectPool <NodeStore::EncodedBlob> EncodedBlobPool;
|
||||
|
||||
public:
|
||||
Backend (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_path (keyValues ["path"])
|
||||
, m_db (KeyvaDB::New (
|
||||
keyBytes,
|
||||
3,
|
||||
File::getCurrentWorkingDirectory().getChildFile (m_path).withFileExtension ("key"),
|
||||
File::getCurrentWorkingDirectory().getChildFile (m_path).withFileExtension ("val")))
|
||||
{
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
}
|
||||
|
||||
std::string getName ()
|
||||
{
|
||||
return m_path.toStdString ();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
pObject->reset ();
|
||||
|
||||
Status status (ok);
|
||||
|
||||
struct Callback : KeyvaDB::GetCallback
|
||||
{
|
||||
explicit Callback (MemoryBlock& block)
|
||||
: m_block (block)
|
||||
{
|
||||
}
|
||||
|
||||
void* getStorageForValue (int valueBytes)
|
||||
{
|
||||
m_size = valueBytes;
|
||||
m_block.ensureSize (valueBytes);
|
||||
|
||||
return m_block.getData ();
|
||||
}
|
||||
|
||||
void const* getData () const noexcept
|
||||
{
|
||||
return m_block.getData ();
|
||||
}
|
||||
|
||||
size_t getSize () const noexcept
|
||||
{
|
||||
return m_size;
|
||||
}
|
||||
|
||||
private:
|
||||
MemoryBlock& m_block;
|
||||
size_t m_size;
|
||||
};
|
||||
|
||||
MemoryPool::ScopedItem item (m_memoryPool);
|
||||
MemoryBlock& block (item.getObject ());
|
||||
|
||||
Callback cb (block);
|
||||
|
||||
// VFALCO TODO Can't we get KeyvaDB to provide a proper status?
|
||||
//
|
||||
bool const found = m_db->get (key, &cb);
|
||||
|
||||
if (found)
|
||||
{
|
||||
NodeStore::DecodedBlob decoded (key, cb.getData (), cb.getSize ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
*pObject = decoded.createObject ();
|
||||
|
||||
status = ok;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
EncodedBlobPool::ScopedItem item (m_blobPool);
|
||||
NodeStore::EncodedBlob& encoded (item.getObject ());
|
||||
|
||||
encoded.prepare (object);
|
||||
|
||||
m_db->put (encoded.getKey (), encoded.getData (), encoded.getSize ());
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
for (std::size_t i = 0; i < batch.size (); ++i)
|
||||
store (batch [i]);
|
||||
}
|
||||
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
// VFALCO TODO Implement this!
|
||||
//
|
||||
bassertfalse;
|
||||
//m_db->visitAll ();
|
||||
}
|
||||
|
||||
int getWriteLoad ()
|
||||
{
|
||||
// we dont do pending writes
|
||||
return 0;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
private:
|
||||
size_t const m_keyBytes;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
String m_path;
|
||||
ScopedPointer <KeyvaDB> m_db;
|
||||
MemoryPool m_memoryPool;
|
||||
EncodedBlobPool m_blobPool;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
KeyvaDBBackendFactory::KeyvaDBBackendFactory ()
|
||||
{
|
||||
}
|
||||
|
||||
KeyvaDBBackendFactory::~KeyvaDBBackendFactory ()
|
||||
{
|
||||
}
|
||||
|
||||
KeyvaDBBackendFactory& KeyvaDBBackendFactory::getInstance ()
|
||||
{
|
||||
static KeyvaDBBackendFactory instance;
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
String KeyvaDBBackendFactory::getName () const
|
||||
{
|
||||
return "KeyvaDB";
|
||||
}
|
||||
|
||||
NodeStore::Backend* KeyvaDBBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new KeyvaDBBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
32
src/ripple_core/node/KeyvaDBBackendFactory.h
Normal file
32
src/ripple_core/node/KeyvaDBBackendFactory.h
Normal file
@@ -0,0 +1,32 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_CORE_NODE_KEYVABACKENDFACTORY_H_INCLUDED
|
||||
#define RIPPLE_CORE_NODE_KEYVABACKENDFACTORY_H_INCLUDED
|
||||
|
||||
/** Factory to produce KeyvaDB backends for the NodeStore.
|
||||
|
||||
@see NodeStore
|
||||
*/
|
||||
class KeyvaDBBackendFactory : public NodeStore::BackendFactory
|
||||
{
|
||||
private:
|
||||
class Backend;
|
||||
|
||||
KeyvaDBBackendFactory ();
|
||||
~KeyvaDBBackendFactory ();
|
||||
|
||||
public:
|
||||
static KeyvaDBBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
248
src/ripple_core/node/LevelDBBackendFactory.cpp
Normal file
248
src/ripple_core/node/LevelDBBackendFactory.cpp
Normal file
@@ -0,0 +1,248 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
class LevelDBBackendFactory::Backend
|
||||
: public NodeStore::Backend
|
||||
, public NodeStore::BatchWriter::Callback
|
||||
, LeakChecked <LevelDBBackendFactory::Backend>
|
||||
{
|
||||
public:
|
||||
typedef RecycledObjectPool <std::string> StringPool;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Backend (int keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_batch (*this, scheduler)
|
||||
, m_name (keyValues ["path"].toStdString ())
|
||||
{
|
||||
if (m_name.empty())
|
||||
Throw (std::runtime_error ("Missing path in LevelDB backend"));
|
||||
|
||||
leveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
|
||||
if (keyValues["cache_mb"].isEmpty())
|
||||
{
|
||||
options.block_cache = leveldb::NewLRUCache (getConfig ().getSize (siHashNodeDBCache) * 1024 * 1024);
|
||||
}
|
||||
else
|
||||
{
|
||||
options.block_cache = leveldb::NewLRUCache (keyValues["cache_mb"].getIntValue() * 1024L * 1024L);
|
||||
}
|
||||
|
||||
if (keyValues["filter_bits"].isEmpty())
|
||||
{
|
||||
if (getConfig ().NODE_SIZE >= 2)
|
||||
options.filter_policy = leveldb::NewBloomFilterPolicy (10);
|
||||
}
|
||||
else if (keyValues["filter_bits"].getIntValue() != 0)
|
||||
{
|
||||
options.filter_policy = leveldb::NewBloomFilterPolicy (keyValues["filter_bits"].getIntValue());
|
||||
}
|
||||
|
||||
if (! keyValues["open_files"].isEmpty())
|
||||
{
|
||||
options.max_open_files = keyValues["open_files"].getIntValue();
|
||||
}
|
||||
|
||||
leveldb::DB* db = nullptr;
|
||||
leveldb::Status status = leveldb::DB::Open (options, m_name, &db);
|
||||
if (!status.ok () || !db)
|
||||
Throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
|
||||
|
||||
m_db = db;
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
}
|
||||
|
||||
std::string getName()
|
||||
{
|
||||
return m_name;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
pObject->reset ();
|
||||
|
||||
Status status (ok);
|
||||
|
||||
leveldb::ReadOptions const options;
|
||||
leveldb::Slice const slice (static_cast <char const*> (key), m_keyBytes);
|
||||
|
||||
{
|
||||
// These are reused std::string objects,
|
||||
// required for leveldb's funky interface.
|
||||
//
|
||||
StringPool::ScopedItem item (m_stringPool);
|
||||
std::string& string = item.getObject ();
|
||||
|
||||
leveldb::Status getStatus = m_db->Get (options, slice, &string);
|
||||
|
||||
if (getStatus.ok ())
|
||||
{
|
||||
NodeStore::DecodedBlob decoded (key, string.data (), string.size ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
*pObject = decoded.createObject ();
|
||||
}
|
||||
else
|
||||
{
|
||||
// Decoding failed, probably corrupted!
|
||||
//
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (getStatus.IsCorruption ())
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
else if (getStatus.IsNotFound ())
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
m_batch.store (object);
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
leveldb::WriteBatch wb;
|
||||
|
||||
{
|
||||
NodeStore::EncodedBlob::Pool::ScopedItem item (m_blobPool);
|
||||
|
||||
BOOST_FOREACH (NodeObject::ref object, batch)
|
||||
{
|
||||
item.getObject ().prepare (object);
|
||||
|
||||
wb.Put (
|
||||
leveldb::Slice (reinterpret_cast <char const*> (item.getObject ().getKey ()),
|
||||
m_keyBytes),
|
||||
leveldb::Slice (reinterpret_cast <char const*> (item.getObject ().getData ()),
|
||||
item.getObject ().getSize ()));
|
||||
}
|
||||
}
|
||||
|
||||
leveldb::WriteOptions const options;
|
||||
|
||||
m_db->Write (options, &wb).ok ();
|
||||
}
|
||||
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
leveldb::ReadOptions const options;
|
||||
|
||||
ScopedPointer <leveldb::Iterator> it (m_db->NewIterator (options));
|
||||
|
||||
for (it->SeekToFirst (); it->Valid (); it->Next ())
|
||||
{
|
||||
if (it->key ().size () == m_keyBytes)
|
||||
{
|
||||
NodeStore::DecodedBlob decoded (it->key ().data (),
|
||||
it->value ().data (),
|
||||
it->value ().size ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
NodeObject::Ptr object (decoded.createObject ());
|
||||
|
||||
callback.visitObject (object);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Uh oh, corrupted data!
|
||||
WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256 (it->key ().data ());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// VFALCO NOTE What does it mean to find an
|
||||
// incorrectly sized key? Corruption?
|
||||
WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size ();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int getWriteLoad ()
|
||||
{
|
||||
return m_batch.getWriteLoad ();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void writeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
private:
|
||||
size_t const m_keyBytes;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
NodeStore::BatchWriter m_batch;
|
||||
StringPool m_stringPool;
|
||||
NodeStore::EncodedBlob::Pool m_blobPool;
|
||||
std::string m_name;
|
||||
ScopedPointer <leveldb::DB> m_db;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
LevelDBBackendFactory::LevelDBBackendFactory ()
|
||||
{
|
||||
leveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
options.block_cache = leveldb::NewLRUCache (getConfig ().getSize (
|
||||
siHashNodeDBCache) * 1024 * 1024);
|
||||
}
|
||||
|
||||
LevelDBBackendFactory::~LevelDBBackendFactory ()
|
||||
{
|
||||
}
|
||||
|
||||
LevelDBBackendFactory& LevelDBBackendFactory::getInstance ()
|
||||
{
|
||||
static LevelDBBackendFactory instance;
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
String LevelDBBackendFactory::getName () const
|
||||
{
|
||||
return "LevelDB";
|
||||
}
|
||||
|
||||
NodeStore::Backend* LevelDBBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new LevelDBBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
32
src/ripple_core/node/LevelDBBackendFactory.h
Normal file
32
src/ripple_core/node/LevelDBBackendFactory.h
Normal file
@@ -0,0 +1,32 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_CORE_NODE_LEVELDBBACKENDFACTORY_H_INCLUDED
|
||||
#define RIPPLE_CORE_NODE_LEVELDBBACKENDFACTORY_H_INCLUDED
|
||||
|
||||
/** Factory to produce LevelDB backends for the NodeStore.
|
||||
|
||||
@see NodeStore
|
||||
*/
|
||||
class LevelDBBackendFactory : public NodeStore::BackendFactory
|
||||
{
|
||||
private:
|
||||
class Backend;
|
||||
|
||||
LevelDBBackendFactory ();
|
||||
~LevelDBBackendFactory ();
|
||||
|
||||
public:
|
||||
static LevelDBBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
279
src/ripple_core/node/MdbBackendFactory.cpp
Normal file
279
src/ripple_core/node/MdbBackendFactory.cpp
Normal file
@@ -0,0 +1,279 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#if RIPPLE_MDB_AVAILABLE
|
||||
|
||||
class MdbBackendFactory::Backend
|
||||
: public NodeStore::Backend
|
||||
, public NodeStore::BatchWriter::Callback
|
||||
, LeakChecked <MdbBackendFactory::Backend>
|
||||
{
|
||||
public:
|
||||
typedef NodeStore::Batch Batch;
|
||||
typedef NodeStore::EncodedBlob EncodedBlob;
|
||||
typedef NodeStore::DecodedBlob DecodedBlob;
|
||||
|
||||
explicit Backend (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_batch (*this, scheduler)
|
||||
, m_env (nullptr)
|
||||
{
|
||||
String path (keyValues ["path"]);
|
||||
|
||||
if (path.isEmpty ())
|
||||
Throw (std::runtime_error ("Missing path in MDB backend"));
|
||||
|
||||
m_basePath = path.toStdString();
|
||||
|
||||
// Regarding the path supplied to mdb_env_open:
|
||||
// This directory must already exist and be writable.
|
||||
//
|
||||
File dir (File::getCurrentWorkingDirectory().getChildFile (path));
|
||||
Result result = dir.createDirectory ();
|
||||
|
||||
if (result.wasOk ())
|
||||
{
|
||||
int error = mdb_env_create (&m_env);
|
||||
|
||||
// Should use the size of the file plus the free space on the disk
|
||||
if (error == 0)
|
||||
error = mdb_env_set_mapsize (m_env, 512L * 1024L * 1024L * 1024L);
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_env_open (
|
||||
m_env,
|
||||
m_basePath.c_str (),
|
||||
MDB_NOTLS,
|
||||
0664);
|
||||
|
||||
MDB_txn* txn;
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_txn_begin (m_env, NULL, 0, &txn);
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_dbi_open (txn, NULL, 0, &m_dbi);
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_txn_commit (txn);
|
||||
|
||||
if (error != 0)
|
||||
{
|
||||
String s;
|
||||
s << "Error #" << error << " creating mdb environment";
|
||||
Throw (std::runtime_error (s.toStdString ()));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
String s;
|
||||
s << "MDB Backend failed to create directory, " << result.getErrorMessage ();
|
||||
Throw (std::runtime_error (s.toStdString().c_str()));
|
||||
}
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
if (m_env != nullptr)
|
||||
{
|
||||
mdb_dbi_close (m_env, m_dbi);
|
||||
mdb_env_close (m_env);
|
||||
}
|
||||
}
|
||||
|
||||
std::string getName()
|
||||
{
|
||||
return m_basePath;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
template <class T>
|
||||
unsigned char* mdb_cast (T* p)
|
||||
{
|
||||
return const_cast <unsigned char*> (static_cast <unsigned char const*> (p));
|
||||
}
|
||||
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
pObject->reset ();
|
||||
|
||||
Status status (ok);
|
||||
|
||||
MDB_txn* txn = nullptr;
|
||||
|
||||
int error = 0;
|
||||
|
||||
error = mdb_txn_begin (m_env, NULL, MDB_RDONLY, &txn);
|
||||
|
||||
if (error == 0)
|
||||
{
|
||||
MDB_val dbkey;
|
||||
MDB_val data;
|
||||
|
||||
dbkey.mv_size = m_keyBytes;
|
||||
dbkey.mv_data = mdb_cast (key);
|
||||
|
||||
error = mdb_get (txn, m_dbi, &dbkey, &data);
|
||||
|
||||
if (error == 0)
|
||||
{
|
||||
DecodedBlob decoded (key, data.mv_data, data.mv_size);
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
*pObject = decoded.createObject ();
|
||||
}
|
||||
else
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else if (error == MDB_NOTFOUND)
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
|
||||
WriteLog (lsWARNING, NodeObject) << "MDB txn failed, code=" << error;
|
||||
}
|
||||
|
||||
mdb_txn_abort (txn);
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
|
||||
WriteLog (lsWARNING, NodeObject) << "MDB txn failed, code=" << error;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
m_batch.store (object);
|
||||
}
|
||||
|
||||
void storeBatch (Batch const& batch)
|
||||
{
|
||||
MDB_txn* txn = nullptr;
|
||||
|
||||
int error = 0;
|
||||
|
||||
error = mdb_txn_begin (m_env, NULL, 0, &txn);
|
||||
|
||||
if (error == 0)
|
||||
{
|
||||
EncodedBlob::Pool::ScopedItem item (m_blobPool);
|
||||
|
||||
BOOST_FOREACH (NodeObject::Ptr const& object, batch)
|
||||
{
|
||||
EncodedBlob& encoded (item.getObject ());
|
||||
|
||||
encoded.prepare (object);
|
||||
|
||||
MDB_val key;
|
||||
key.mv_size = m_keyBytes;
|
||||
key.mv_data = mdb_cast (encoded.getKey ());
|
||||
|
||||
MDB_val data;
|
||||
data.mv_size = encoded.getSize ();
|
||||
data.mv_data = mdb_cast (encoded.getData ());
|
||||
|
||||
error = mdb_put (txn, m_dbi, &key, &data, 0);
|
||||
|
||||
if (error != 0)
|
||||
{
|
||||
WriteLog (lsWARNING, NodeObject) << "mdb_put failed, error=" << error;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (error == 0)
|
||||
{
|
||||
error = mdb_txn_commit(txn);
|
||||
|
||||
if (error != 0)
|
||||
{
|
||||
WriteLog (lsWARNING, NodeObject) << "mdb_txn_commit failed, error=" << error;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mdb_txn_abort (txn);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
WriteLog (lsWARNING, NodeObject) << "mdb_txn_begin failed, error=" << error;
|
||||
}
|
||||
}
|
||||
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
// VFALCO TODO Implement this!
|
||||
bassertfalse;
|
||||
}
|
||||
|
||||
int getWriteLoad ()
|
||||
{
|
||||
return m_batch.getWriteLoad ();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void writeBatch (Batch const& batch)
|
||||
{
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
private:
|
||||
size_t const m_keyBytes;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
NodeStore::BatchWriter m_batch;
|
||||
NodeStore::EncodedBlob::Pool m_blobPool;
|
||||
std::string m_basePath;
|
||||
MDB_env* m_env;
|
||||
MDB_dbi m_dbi;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
MdbBackendFactory::MdbBackendFactory ()
|
||||
{
|
||||
}
|
||||
|
||||
MdbBackendFactory::~MdbBackendFactory ()
|
||||
{
|
||||
}
|
||||
|
||||
MdbBackendFactory& MdbBackendFactory::getInstance ()
|
||||
{
|
||||
static MdbBackendFactory instance;
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
String MdbBackendFactory::getName () const
|
||||
{
|
||||
return "mdb";
|
||||
}
|
||||
|
||||
NodeStore::Backend* MdbBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new MdbBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
#endif
|
||||
38
src/ripple_core/node/MdbBackendFactory.h
Normal file
38
src/ripple_core/node/MdbBackendFactory.h
Normal file
@@ -0,0 +1,38 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_CORE_NODE_MDBBACKENDFACTORY_H_INCLUDED
|
||||
#define RIPPLE_CORE_NODE_MDBBACKENDFACTORY_H_INCLUDED
|
||||
|
||||
#if RIPPLE_MDB_AVAILABLE
|
||||
|
||||
/** Factory to produce a backend using MDB.
|
||||
|
||||
@note MDB is not currently available for Win32
|
||||
|
||||
@see NodeStore
|
||||
*/
|
||||
class MdbBackendFactory : public NodeStore::BackendFactory
|
||||
{
|
||||
private:
|
||||
class Backend;
|
||||
|
||||
MdbBackendFactory ();
|
||||
~MdbBackendFactory ();
|
||||
|
||||
public:
|
||||
static MdbBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
113
src/ripple_core/node/MemoryBackendFactory.cpp
Normal file
113
src/ripple_core/node/MemoryBackendFactory.cpp
Normal file
@@ -0,0 +1,113 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
class MemoryBackendFactory::Backend : public NodeStore::Backend
|
||||
{
|
||||
private:
|
||||
typedef std::map <uint256 const, NodeObject::Ptr> Map;
|
||||
|
||||
public:
|
||||
Backend (size_t keyBytes, StringPairArray const& keyValues)
|
||||
: m_keyBytes (keyBytes)
|
||||
{
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
}
|
||||
|
||||
std::string getName ()
|
||||
{
|
||||
return "memory";
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
uint256 const hash (uint256::fromVoid (key));
|
||||
|
||||
Map::iterator iter = m_map.find (hash);
|
||||
|
||||
if (iter != m_map.end ())
|
||||
{
|
||||
*pObject = iter->second;
|
||||
}
|
||||
else
|
||||
{
|
||||
pObject->reset ();
|
||||
}
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
Map::iterator iter = m_map.find (object->getHash ());
|
||||
|
||||
if (iter == m_map.end ())
|
||||
{
|
||||
m_map.insert (std::make_pair (object->getHash (), object));
|
||||
}
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
for (std::size_t i = 0; i < batch.size (); ++i)
|
||||
store (batch [i]);
|
||||
}
|
||||
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
for (Map::const_iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
|
||||
callback.visitObject (iter->second);
|
||||
}
|
||||
|
||||
int getWriteLoad ()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
private:
|
||||
size_t const m_keyBytes;
|
||||
|
||||
Map m_map;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
MemoryBackendFactory::MemoryBackendFactory ()
|
||||
{
|
||||
}
|
||||
|
||||
MemoryBackendFactory::~MemoryBackendFactory ()
|
||||
{
|
||||
}
|
||||
|
||||
MemoryBackendFactory& MemoryBackendFactory::getInstance ()
|
||||
{
|
||||
static MemoryBackendFactory instance;
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
String MemoryBackendFactory::getName () const
|
||||
{
|
||||
return "Memory";
|
||||
}
|
||||
|
||||
NodeStore::Backend* MemoryBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new MemoryBackendFactory::Backend (keyBytes, keyValues);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
32
src/ripple_core/node/MemoryBackendFactory.h
Normal file
32
src/ripple_core/node/MemoryBackendFactory.h
Normal file
@@ -0,0 +1,32 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_CORE_NODE_MEMORYBACKENDFACTORY_H_INCLUDED
|
||||
#define RIPPLE_CORE_NODE_MEMORYBACKENDFACTORY_H_INCLUDED
|
||||
|
||||
/** Factory to produce a RAM based backend for the NodeStore.
|
||||
|
||||
@see NodeStore
|
||||
*/
|
||||
class MemoryBackendFactory : public NodeStore::BackendFactory
|
||||
{
|
||||
private:
|
||||
class Backend;
|
||||
|
||||
MemoryBackendFactory ();
|
||||
~MemoryBackendFactory ();
|
||||
|
||||
public:
|
||||
static MemoryBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
91
src/ripple_core/node/NodeObject.cpp
Normal file
91
src/ripple_core/node/NodeObject.cpp
Normal file
@@ -0,0 +1,91 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
SETUP_LOG (NodeObject)
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
NodeObject::NodeObject (
|
||||
NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob& data,
|
||||
uint256 const& hash,
|
||||
PrivateAccess)
|
||||
: mType (type)
|
||||
, mHash (hash)
|
||||
, mLedgerIndex (ledgerIndex)
|
||||
{
|
||||
// Take over the caller's buffer
|
||||
mData.swap (data);
|
||||
}
|
||||
|
||||
NodeObject::Ptr NodeObject::createObject (
|
||||
NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob& data,
|
||||
uint256 const & hash)
|
||||
{
|
||||
// The boost::ref is important or
|
||||
// else it will be passed by value!
|
||||
return boost::make_shared <NodeObject> (
|
||||
type, ledgerIndex, boost::ref (data), hash, PrivateAccess ());
|
||||
}
|
||||
|
||||
NodeObjectType NodeObject::getType () const
|
||||
{
|
||||
return mType;
|
||||
}
|
||||
|
||||
uint256 const& NodeObject::getHash () const
|
||||
{
|
||||
return mHash;
|
||||
}
|
||||
|
||||
LedgerIndex NodeObject::getIndex () const
|
||||
{
|
||||
return mLedgerIndex;
|
||||
}
|
||||
|
||||
Blob const& NodeObject::getData () const
|
||||
{
|
||||
return mData;
|
||||
}
|
||||
|
||||
bool NodeObject::isCloneOf (NodeObject::Ptr const& other) const
|
||||
{
|
||||
if (mType != other->mType)
|
||||
return false;
|
||||
|
||||
if (mHash != other->mHash)
|
||||
return false;
|
||||
|
||||
if (mLedgerIndex != other->mLedgerIndex)
|
||||
return false;
|
||||
|
||||
if (mData != other->mData)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class NodeObjectTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
|
||||
NodeObjectTests () : UnitTest ("NodeObject", "ripple")
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void runTest ()
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
static NodeObjectTests nodeObjectTests;
|
||||
|
||||
135
src/ripple_core/node/NodeObject.h
Normal file
135
src/ripple_core/node/NodeObject.h
Normal file
@@ -0,0 +1,135 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_CORE_NODE_NODEOBJECT_H_INCLUDED
|
||||
#define RIPPLE_CORE_NODE_NODEOBJECT_H_INCLUDED
|
||||
|
||||
/** The types of node objects.
|
||||
*/
|
||||
enum NodeObjectType
|
||||
{
|
||||
hotUNKNOWN = 0,
|
||||
hotLEDGER = 1,
|
||||
hotTRANSACTION = 2,
|
||||
hotACCOUNT_NODE = 3,
|
||||
hotTRANSACTION_NODE = 4
|
||||
};
|
||||
|
||||
/** A blob of data with associated metadata, referenced by hash.
|
||||
|
||||
The metadata includes the following:
|
||||
|
||||
- Type of the blob
|
||||
- The ledger index in which it appears
|
||||
- The SHA 256 hash
|
||||
|
||||
@note No checking is performed to make sure the hash matches the data.
|
||||
@see SHAMap
|
||||
*/
|
||||
class NodeObject : public CountedObject <NodeObject>
|
||||
{
|
||||
public:
|
||||
static char const* getCountedObjectName () { return "NodeObject"; }
|
||||
|
||||
enum
|
||||
{
|
||||
/** Size of the fixed keys, in bytes.
|
||||
|
||||
We use a 256-bit hash for the keys.
|
||||
|
||||
@see NodeObject
|
||||
*/
|
||||
keyBytes = 32,
|
||||
};
|
||||
|
||||
/** The type used to hold the hash.
|
||||
|
||||
The hahes are fixed size, SHA256.
|
||||
|
||||
@note The key size can be retrieved with `Hash::sizeInBytes`
|
||||
*/
|
||||
typedef UnsignedInteger <32> Hash;
|
||||
|
||||
// Please use this one. For a reference use Ptr const&
|
||||
typedef boost::shared_ptr <NodeObject> Ptr;
|
||||
|
||||
// These are DEPRECATED, type names are capitalized.
|
||||
typedef boost::shared_ptr <NodeObject> pointer;
|
||||
typedef pointer const& ref;
|
||||
|
||||
private:
|
||||
// This hack is used to make the constructor effectively private
|
||||
// except for when we use it in the call to make_shared.
|
||||
// There's no portable way to make make_shared<> a friend work.
|
||||
struct PrivateAccess { };
|
||||
public:
|
||||
// This constructor is private, use createObject instead.
|
||||
NodeObject (NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob& data,
|
||||
uint256 const& hash,
|
||||
PrivateAccess);
|
||||
|
||||
/** Create an object from fields.
|
||||
|
||||
The caller's variable is modified during this call. The
|
||||
underlying storage for the Blob is taken over by the NodeObject.
|
||||
|
||||
@param type The type of object.
|
||||
@param ledgerIndex The ledger in which this object appears.
|
||||
@param data A buffer containing the payload. The caller's variable
|
||||
is overwritten.
|
||||
@param hash The 256-bit hash of the payload data.
|
||||
*/
|
||||
static Ptr createObject (NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob& data,
|
||||
uint256 const& hash);
|
||||
|
||||
/** Retrieve the type of this object.
|
||||
*/
|
||||
NodeObjectType getType () const;
|
||||
|
||||
/** Retrieve the hash metadata.
|
||||
*/
|
||||
uint256 const& getHash () const;
|
||||
|
||||
/** Retrieve the ledger index in which this object appears.
|
||||
*/
|
||||
// VFALCO TODO rename to getLedgerIndex or getLedgerId
|
||||
LedgerIndex getIndex () const;
|
||||
|
||||
/** Retrieve the binary data.
|
||||
*/
|
||||
Blob const& getData () const;
|
||||
|
||||
/** See if this object has the same data as another object.
|
||||
*/
|
||||
bool isCloneOf (NodeObject::Ptr const& other) const;
|
||||
|
||||
/** Binary function that satisfies the strict-weak-ordering requirement.
|
||||
|
||||
This compares the hashes of both objects and returns true if
|
||||
the first hash is considered to go before the second.
|
||||
|
||||
@see std::sort
|
||||
*/
|
||||
struct LessThan
|
||||
{
|
||||
inline bool operator() (NodeObject::Ptr const& lhs, NodeObject::Ptr const& rhs) const noexcept
|
||||
{
|
||||
return lhs->getHash () < rhs->getHash ();
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
NodeObjectType mType;
|
||||
uint256 mHash;
|
||||
LedgerIndex mLedgerIndex;
|
||||
Blob mData;
|
||||
};
|
||||
|
||||
#endif
|
||||
1198
src/ripple_core/node/NodeStore.cpp
Normal file
1198
src/ripple_core/node/NodeStore.cpp
Normal file
File diff suppressed because it is too large
Load Diff
433
src/ripple_core/node/NodeStore.h
Normal file
433
src/ripple_core/node/NodeStore.h
Normal file
@@ -0,0 +1,433 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_CORE_NODE_NODESTORE_H_INCLUDED
|
||||
#define RIPPLE_CORE_NODE_NODESTORE_H_INCLUDED
|
||||
|
||||
/** Persistency layer for NodeObject
|
||||
|
||||
A Node is a ledger object which is uniquely identified by a key, which is
|
||||
the 256-bit hash of the body of the node. The payload is a variable length
|
||||
block of serialized data.
|
||||
|
||||
All ledger data is stored as node objects and as such, needs to be persisted
|
||||
between launches. Furthermore, since the set of node objects will in
|
||||
general be larger than the amount of available memory, purged node objects
|
||||
which are later accessed must be retrieved from the node store.
|
||||
|
||||
@see NodeObject
|
||||
*/
|
||||
class NodeStore
|
||||
{
|
||||
public:
|
||||
enum
|
||||
{
|
||||
// This is only used to pre-allocate the array for
|
||||
// batch objects and does not affect the amount written.
|
||||
//
|
||||
batchWritePreallocationSize = 128
|
||||
};
|
||||
|
||||
typedef std::vector <NodeObject::Ptr> Batch;
|
||||
|
||||
typedef StringPairArray Parameters;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Parsed key/value blob into NodeObject components.
|
||||
|
||||
This will extract the information required to construct a NodeObject. It
|
||||
also does consistency checking and returns the result, so it is possible
|
||||
to determine if the data is corrupted without throwing an exception. Not
|
||||
all forms of corruption are detected so further analysis will be needed
|
||||
to eliminate false negatives.
|
||||
|
||||
@note This defines the database format of a NodeObject!
|
||||
*/
|
||||
class DecodedBlob
|
||||
{
|
||||
public:
|
||||
/** Construct the decoded blob from raw data. */
|
||||
DecodedBlob (void const* key, void const* value, int valueBytes);
|
||||
|
||||
/** Determine if the decoding was successful. */
|
||||
bool wasOk () const noexcept { return m_success; }
|
||||
|
||||
/** Create a NodeObject from this data. */
|
||||
NodeObject::Ptr createObject ();
|
||||
|
||||
private:
|
||||
bool m_success;
|
||||
|
||||
void const* m_key;
|
||||
LedgerIndex m_ledgerIndex;
|
||||
NodeObjectType m_objectType;
|
||||
unsigned char const* m_objectData;
|
||||
int m_dataBytes;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Utility for producing flattened node objects.
|
||||
|
||||
These get recycled to prevent many small allocations.
|
||||
|
||||
@note This defines the database format of a NodeObject!
|
||||
*/
|
||||
struct EncodedBlob
|
||||
{
|
||||
typedef RecycledObjectPool <EncodedBlob> Pool;
|
||||
|
||||
void prepare (NodeObject::Ptr const& object);
|
||||
|
||||
void const* getKey () const noexcept { return m_key; }
|
||||
|
||||
size_t getSize () const noexcept { return m_size; }
|
||||
|
||||
void const* getData () const noexcept { return m_data.getData (); }
|
||||
|
||||
private:
|
||||
void const* m_key;
|
||||
MemoryBlock m_data;
|
||||
size_t m_size;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Provides optional asynchronous scheduling for backends.
|
||||
|
||||
For improved performance, a backend has the option of performing writes
|
||||
in batches. These writes can be scheduled using the provided scheduler
|
||||
object.
|
||||
|
||||
@see BatchWriter
|
||||
*/
|
||||
class Scheduler
|
||||
{
|
||||
public:
|
||||
/** Derived classes perform scheduled tasks. */
|
||||
struct Task
|
||||
{
|
||||
virtual ~Task () { }
|
||||
|
||||
/** Performs the task.
|
||||
|
||||
The call may take place on a foreign thread.
|
||||
*/
|
||||
virtual void performScheduledTask () = 0;
|
||||
};
|
||||
|
||||
/** Schedules a task.
|
||||
|
||||
Depending on the implementation, this could happen
|
||||
immediately or get deferred.
|
||||
*/
|
||||
virtual void scheduleTask (Task* task) = 0;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Helps with batch writing.
|
||||
|
||||
The batch writes are performed with a scheduled task. Use of the
|
||||
class it not required. A backend can implement its own write batching,
|
||||
or skip write batching if doing so yields a performance benefit.
|
||||
|
||||
@see Scheduler
|
||||
*/
|
||||
// VFALCO NOTE I'm not entirely happy having placed this here,
|
||||
// because whoever needs to use NodeStore certainly doesn't
|
||||
// need to see the implementation details of BatchWriter.
|
||||
//
|
||||
class BatchWriter : private Scheduler::Task
|
||||
{
|
||||
public:
|
||||
/** This callback does the actual writing. */
|
||||
struct Callback
|
||||
{
|
||||
virtual void writeBatch (Batch const& batch) = 0;
|
||||
};
|
||||
|
||||
/** Create a batch writer. */
|
||||
BatchWriter (Callback& callback, Scheduler& scheduler);
|
||||
|
||||
/** Destroy a batch writer.
|
||||
|
||||
Anything pending in the batch is written out before this returns.
|
||||
*/
|
||||
~BatchWriter ();
|
||||
|
||||
/** Store the object.
|
||||
|
||||
This will add to the batch and initiate a scheduled task to
|
||||
write the batch out.
|
||||
*/
|
||||
void store (NodeObject::Ptr const& object);
|
||||
|
||||
/** Get an estimate of the amount of writing I/O pending. */
|
||||
int getWriteLoad ();
|
||||
|
||||
private:
|
||||
void performScheduledTask ();
|
||||
void writeBatch ();
|
||||
void waitForWriting ();
|
||||
|
||||
private:
|
||||
typedef boost::recursive_mutex LockType;
|
||||
typedef boost::condition_variable_any CondvarType;
|
||||
|
||||
Callback& m_callback;
|
||||
Scheduler& m_scheduler;
|
||||
LockType mWriteMutex;
|
||||
CondvarType mWriteCondition;
|
||||
int mWriteGeneration;
|
||||
int mWriteLoad;
|
||||
bool mWritePending;
|
||||
Batch mWriteSet;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** A backend used for the store.
|
||||
|
||||
The NodeStore uses a swappable backend so that other database systems
|
||||
can be tried. Different databases may offer various features such
|
||||
as improved performance, fault tolerant or distributed storage, or
|
||||
all in-memory operation.
|
||||
|
||||
A given instance of a backend is fixed to a particular key size.
|
||||
*/
|
||||
class Backend
|
||||
{
|
||||
public:
|
||||
/** Return codes from operations. */
|
||||
enum Status
|
||||
{
|
||||
ok,
|
||||
notFound,
|
||||
dataCorrupt,
|
||||
unknown
|
||||
};
|
||||
|
||||
/** Destroy the backend.
|
||||
|
||||
All open files are closed and flushed. If there are batched writes
|
||||
or other tasks scheduled, they will be completed before this call
|
||||
returns.
|
||||
*/
|
||||
virtual ~Backend () { }
|
||||
|
||||
/** Get the human-readable name of this backend.
|
||||
|
||||
This is used for diagnostic output.
|
||||
*/
|
||||
virtual std::string getName() = 0;
|
||||
|
||||
/** Fetch a single object.
|
||||
|
||||
If the object is not found or an error is encountered, the
|
||||
result will indicate the condition.
|
||||
|
||||
@note This will be called concurrently.
|
||||
|
||||
@param key A pointer to the key data.
|
||||
@param pObject [out] The created object if successful.
|
||||
|
||||
@return The result of the operation.
|
||||
*/
|
||||
virtual Status fetch (void const* key, NodeObject::Ptr* pObject) = 0;
|
||||
|
||||
/** Store a single object.
|
||||
|
||||
Depending on the implementation this may happen immediately
|
||||
or deferred using a scheduled task.
|
||||
|
||||
@note This will be called concurrently.
|
||||
|
||||
@param object The object to store.
|
||||
*/
|
||||
virtual void store (NodeObject::Ptr const& object) = 0;
|
||||
|
||||
/** Store a group of objects.
|
||||
|
||||
@note This function will not be called concurrently with
|
||||
itself or @ref store.
|
||||
*/
|
||||
virtual void storeBatch (Batch const& batch) = 0;
|
||||
|
||||
/** Callback for iterating through objects.
|
||||
|
||||
@see visitAll
|
||||
*/
|
||||
struct VisitCallback
|
||||
{
|
||||
virtual void visitObject (NodeObject::Ptr const& object) = 0;
|
||||
};
|
||||
|
||||
/** Visit every object in the database
|
||||
|
||||
This is usually called during import.
|
||||
|
||||
@note This routine will not be called concurrently with itself
|
||||
or other methods.
|
||||
|
||||
@see import, VisitCallback
|
||||
*/
|
||||
virtual void visitAll (VisitCallback& callback) = 0;
|
||||
|
||||
/** Estimate the number of write operations pending. */
|
||||
virtual int getWriteLoad () = 0;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Factory to produce backends.
|
||||
*/
|
||||
class BackendFactory
|
||||
{
|
||||
public:
|
||||
virtual ~BackendFactory () { }
|
||||
|
||||
/** Retrieve the name of this factory. */
|
||||
virtual String getName () const = 0;
|
||||
|
||||
/** Create an instance of this factory's backend.
|
||||
|
||||
@param keyBytes The fixed number of bytes per key.
|
||||
@param keyValues A set of key/value configuration pairs.
|
||||
@param scheduler The scheduler to use for running tasks.
|
||||
|
||||
@return A pointer to the Backend object.
|
||||
*/
|
||||
virtual Backend* createInstance (size_t keyBytes,
|
||||
Parameters const& parameters,
|
||||
Scheduler& scheduler) = 0;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Construct a node store.
|
||||
|
||||
The parameters are key value pairs passed to the backend. The
|
||||
'type' key must exist, it defines the choice of backend. Most
|
||||
backends also require a 'path' field.
|
||||
|
||||
Some choices for 'type' are:
|
||||
HyperLevelDB, LevelDB, SQLite, KeyvaDB, MDB
|
||||
|
||||
If the fastBackendParameter is omitted or empty, no ephemeral database
|
||||
is used. If the scheduler parameter is omited or unspecified, a
|
||||
synchronous scheduler is used which performs all tasks immediately on
|
||||
the caller's thread.
|
||||
|
||||
@note If the database cannot be opened or created, an exception is thrown.
|
||||
|
||||
@param backendParameters The parameter string for the persistent backend.
|
||||
@param fastBackendParameters [optional] The parameter string for the ephemeral backend.
|
||||
@param scheduler [optional The scheduler to use for performing asynchronous tasks.
|
||||
|
||||
@return The opened database.
|
||||
*/
|
||||
static NodeStore* New (Parameters const& backendParameters,
|
||||
Parameters fastBackendParameters = Parameters (),
|
||||
Scheduler& scheduler = getSynchronousScheduler ());
|
||||
|
||||
/** Get the synchronous scheduler.
|
||||
|
||||
The synchronous scheduler performs all tasks immediately, before
|
||||
returning to the caller, using the caller's thread.
|
||||
*/
|
||||
static Scheduler& getSynchronousScheduler ();
|
||||
|
||||
/** Destroy the node store.
|
||||
|
||||
All pending operations are completed, pending writes flushed,
|
||||
and files closed before this returns.
|
||||
*/
|
||||
virtual ~NodeStore () { }
|
||||
|
||||
/** Retrieve the name associated with this backend.
|
||||
|
||||
This is used for diagnostics and may not reflect the actual path
|
||||
or paths used by the underlying backend.
|
||||
*/
|
||||
virtual String getName () const = 0;
|
||||
|
||||
/** Add the specified backend factory to the list of available factories.
|
||||
|
||||
The names of available factories are compared against the "type"
|
||||
value in the parameter list on construction.
|
||||
|
||||
@param factory The factory to add.
|
||||
*/
|
||||
static void addBackendFactory (BackendFactory& factory);
|
||||
|
||||
/** Fetch an object.
|
||||
|
||||
If the object is known to be not in the database, isn't found in the
|
||||
database during the fetch, or failed to load correctly during the fetch,
|
||||
`nullptr` is returned.
|
||||
|
||||
@note This can be called concurrently.
|
||||
|
||||
@param hash The key of the object to retrieve.
|
||||
|
||||
@return The object, or nullptr if it couldn't be retrieved.
|
||||
*/
|
||||
virtual NodeObject::pointer fetch (uint256 const& hash) = 0;
|
||||
|
||||
/** Store the object.
|
||||
|
||||
The caller's Blob parameter is overwritten.
|
||||
|
||||
@param type The type of object.
|
||||
@param ledgerIndex The ledger in which the object appears.
|
||||
@param data The payload of the object. The caller's
|
||||
variable is overwritten.
|
||||
@param hash The 256-bit hash of the payload data.
|
||||
|
||||
@return `true` if the object was stored?
|
||||
*/
|
||||
virtual void store (NodeObjectType type,
|
||||
uint32 ledgerIndex,
|
||||
Blob& data,
|
||||
uint256 const& hash) = 0;
|
||||
|
||||
/** Visit every object in the database
|
||||
|
||||
This is usually called during import.
|
||||
|
||||
@note This routine will not be called concurrently with itself
|
||||
or other methods.
|
||||
|
||||
@see import
|
||||
*/
|
||||
virtual void visitAll (Backend::VisitCallback& callback) = 0;
|
||||
|
||||
/** Import objects from another database. */
|
||||
virtual void import (NodeStore& sourceDatabase) = 0;
|
||||
|
||||
/** Retrieve the estimated number of pending write operations.
|
||||
This is used for diagnostics.
|
||||
*/
|
||||
virtual int getWriteLoad () = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
virtual float getCacheHitRate () = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
// TODO Document the parameter meanings.
|
||||
virtual void tune (int size, int age) = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
virtual void sweep () = 0;
|
||||
|
||||
/** Add the known Backend factories to the singleton.
|
||||
*/
|
||||
static void addAvailableBackends ();
|
||||
};
|
||||
|
||||
#endif
|
||||
77
src/ripple_core/node/NullBackendFactory.cpp
Normal file
77
src/ripple_core/node/NullBackendFactory.cpp
Normal file
@@ -0,0 +1,77 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
class NullBackendFactory::Backend : public NodeStore::Backend
|
||||
{
|
||||
public:
|
||||
Backend ()
|
||||
{
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
}
|
||||
|
||||
std::string getName()
|
||||
{
|
||||
return std::string ();
|
||||
}
|
||||
|
||||
Status fetch (void const*, NodeObject::Ptr*)
|
||||
{
|
||||
return notFound;
|
||||
}
|
||||
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
}
|
||||
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
}
|
||||
|
||||
int getWriteLoad ()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
NullBackendFactory::NullBackendFactory ()
|
||||
{
|
||||
}
|
||||
|
||||
NullBackendFactory::~NullBackendFactory ()
|
||||
{
|
||||
}
|
||||
|
||||
NullBackendFactory& NullBackendFactory::getInstance ()
|
||||
{
|
||||
static NullBackendFactory instance;
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
String NullBackendFactory::getName () const
|
||||
{
|
||||
return "none";
|
||||
}
|
||||
|
||||
NodeStore::Backend* NullBackendFactory::createInstance (
|
||||
size_t,
|
||||
StringPairArray const&,
|
||||
NodeStore::Scheduler&)
|
||||
{
|
||||
return new NullBackendFactory::Backend;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
34
src/ripple_core/node/NullBackendFactory.h
Normal file
34
src/ripple_core/node/NullBackendFactory.h
Normal file
@@ -0,0 +1,34 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_CORE_NODE_NULLBACKENDFACTORY_H_INCLUDED
|
||||
#define RIPPLE_CORE_NODE_NULLBACKENDFACTORY_H_INCLUDED
|
||||
|
||||
/** Factory to produce a null backend.
|
||||
|
||||
This is for standalone / testing mode.
|
||||
|
||||
@see NodeStore
|
||||
*/
|
||||
class NullBackendFactory : public NodeStore::BackendFactory
|
||||
{
|
||||
private:
|
||||
class Backend;
|
||||
|
||||
NullBackendFactory ();
|
||||
~NullBackendFactory ();
|
||||
|
||||
public:
|
||||
static NullBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
Reference in New Issue
Block a user