mirror of
https://github.com/XRPLF/rippled.git
synced 2025-11-20 11:05:54 +00:00
Merge branch 'develop' of github.com:ripple/rippled into develop
This commit is contained in:
@@ -283,32 +283,15 @@ const char* WalletDBInit[] =
|
||||
int WalletDBCount = NUMBER (WalletDBInit);
|
||||
|
||||
// Hash node database holds nodes indexed by hash
|
||||
const char* HashNodeDBInit[] =
|
||||
{
|
||||
"PRAGMA synchronous=NORMAL;",
|
||||
"PRAGMA journal_mode=WAL;",
|
||||
"PRAGMA journal_size_limit=1582080;",
|
||||
|
||||
#if (ULONG_MAX > UINT_MAX) && !defined (NO_SQLITE_MMAP)
|
||||
"PRAGMA mmap_size=171798691840;",
|
||||
#endif
|
||||
|
||||
"BEGIN TRANSACTION;",
|
||||
|
||||
"CREATE TABLE CommittedObjects ( \
|
||||
Hash CHARACTER(64) PRIMARY KEY, \
|
||||
ObjType CHAR(1) NOT NULL, \
|
||||
LedgerIndex BIGINT UNSIGNED, \
|
||||
Object BLOB \
|
||||
);",
|
||||
|
||||
"END TRANSACTION;"
|
||||
};
|
||||
// VFALCO TODO Remove this since it looks unused
|
||||
/*
|
||||
|
||||
int HashNodeDBCount = NUMBER (HashNodeDBInit);
|
||||
*/
|
||||
|
||||
// Net node database holds nodes seen on the network
|
||||
// XXX Not really used needs replacement.
|
||||
/*
|
||||
const char* NetNodeDBInit[] =
|
||||
{
|
||||
"CREATE TABLE KnownNodes ( \
|
||||
@@ -320,7 +303,10 @@ const char* NetNodeDBInit[] =
|
||||
};
|
||||
|
||||
int NetNodeDBCount = NUMBER (NetNodeDBInit);
|
||||
*/
|
||||
|
||||
// This appears to be unused
|
||||
/*
|
||||
const char* PathFindDBInit[] =
|
||||
{
|
||||
"PRAGMA synchronous = OFF; ",
|
||||
@@ -353,5 +339,5 @@ const char* PathFindDBInit[] =
|
||||
};
|
||||
|
||||
int PathFindDBCount = NUMBER (PathFindDBInit);
|
||||
*/
|
||||
|
||||
// vim:ts=4
|
||||
|
||||
@@ -12,19 +12,11 @@ extern const char* RpcDBInit[];
|
||||
extern const char* TxnDBInit[];
|
||||
extern const char* LedgerDBInit[];
|
||||
extern const char* WalletDBInit[];
|
||||
extern const char* HashNodeDBInit[];
|
||||
|
||||
// VFALCO TODO Figure out what these counts are for
|
||||
extern int RpcDBCount;
|
||||
extern int TxnDBCount;
|
||||
extern int LedgerDBCount;
|
||||
extern int WalletDBCount;
|
||||
extern int HashNodeDBCount;
|
||||
|
||||
// VFALCO TODO Seems these two aren't used so delete EVERYTHING.
|
||||
extern const char* NetNodeDBInit[];
|
||||
extern const char* PathFindDBInit[];
|
||||
extern int NetNodeDBCount;
|
||||
extern int PathFindDBCount;
|
||||
|
||||
#endif
|
||||
|
||||
@@ -529,10 +529,12 @@ void Ledger::saveAcceptedLedger (Job&, bool fromConsensus)
|
||||
assert (getTransHash () == mTransactionMap->getHash ());
|
||||
|
||||
// Save the ledger header in the hashed object store
|
||||
Serializer s (128);
|
||||
s.add32 (HashPrefix::ledgerMaster);
|
||||
addRaw (s);
|
||||
getApp().getNodeStore ().store (hotLEDGER, mLedgerSeq, s.peekData (), mHash);
|
||||
{
|
||||
Serializer s (128);
|
||||
s.add32 (HashPrefix::ledgerMaster);
|
||||
addRaw (s);
|
||||
getApp().getNodeStore ().store (hotLEDGER, mLedgerSeq, s.modData (), mHash);
|
||||
}
|
||||
|
||||
AcceptedLedger::pointer aLedger = AcceptedLedger::makeAcceptedLedger (shared_from_this ());
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ bool InboundLedger::tryLocal ()
|
||||
if (!mHaveBase)
|
||||
{
|
||||
// Nothing we can do without the ledger base
|
||||
NodeObject::pointer node = getApp().getNodeStore ().retrieve (mHash);
|
||||
NodeObject::pointer node = getApp().getNodeStore ().fetch (mHash);
|
||||
|
||||
if (!node)
|
||||
{
|
||||
@@ -672,7 +672,7 @@ bool InboundLedger::takeBase (const std::string& data) // data must not have has
|
||||
Serializer s (data.size () + 4);
|
||||
s.add32 (HashPrefix::ledgerMaster);
|
||||
s.addRaw (data);
|
||||
getApp().getNodeStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.peekData (), mHash);
|
||||
getApp().getNodeStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.modData (), mHash);
|
||||
|
||||
progress ();
|
||||
|
||||
|
||||
@@ -6,118 +6,208 @@
|
||||
|
||||
#if RIPPLE_HYPERLEVELDB_AVAILABLE
|
||||
|
||||
class HyperLevelDBBackendFactory::Backend : public NodeStore::Backend
|
||||
class HyperLevelDBBackendFactory::Backend
|
||||
: public NodeStore::Backend
|
||||
, public NodeStore::BatchWriter::Callback
|
||||
, LeakChecked <HyperLevelDBBackendFactory::Backend>
|
||||
{
|
||||
public:
|
||||
Backend (StringPairArray const& keyValues)
|
||||
: mName(keyValues ["path"].toStdString ())
|
||||
, mDB(NULL)
|
||||
typedef RecycledObjectPool <std::string> StringPool;
|
||||
|
||||
Backend (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_batch (*this, scheduler)
|
||||
, m_name (keyValues ["path"].toStdString ())
|
||||
{
|
||||
if (mName.empty())
|
||||
throw std::runtime_error ("Missing path in LevelDB backend");
|
||||
if (m_name.empty ())
|
||||
Throw (std::runtime_error ("Missing path in LevelDB backend"));
|
||||
|
||||
hyperleveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
|
||||
if (keyValues["cache_mb"].isEmpty())
|
||||
if (keyValues ["cache_mb"].isEmpty ())
|
||||
{
|
||||
options.block_cache = hyperleveldb::NewLRUCache (theConfig.getSize (siHashNodeDBCache) * 1024 * 1024);
|
||||
}
|
||||
else
|
||||
{
|
||||
options.block_cache = hyperleveldb::NewLRUCache (keyValues["cache_mb"].getIntValue() * 1024L * 1024L);
|
||||
}
|
||||
|
||||
if (keyValues["filter_bits"].isEmpty())
|
||||
if (keyValues ["filter_bits"].isEmpty())
|
||||
{
|
||||
if (theConfig.NODE_SIZE >= 2)
|
||||
options.filter_policy = hyperleveldb::NewBloomFilterPolicy (10);
|
||||
}
|
||||
else if (keyValues["filter_bits"].getIntValue() != 0)
|
||||
options.filter_policy = hyperleveldb::NewBloomFilterPolicy (keyValues["filter_bits"].getIntValue());
|
||||
else if (keyValues ["filter_bits"].getIntValue() != 0)
|
||||
{
|
||||
options.filter_policy = hyperleveldb::NewBloomFilterPolicy (keyValues ["filter_bits"].getIntValue ());
|
||||
}
|
||||
|
||||
if (!keyValues["open_files"].isEmpty())
|
||||
options.max_open_files = keyValues["open_files"].getIntValue();
|
||||
if (! keyValues["open_files"].isEmpty ())
|
||||
{
|
||||
options.max_open_files = keyValues ["open_files"].getIntValue();
|
||||
}
|
||||
|
||||
hyperleveldb::Status status = hyperleveldb::DB::Open (options, mName, &mDB);
|
||||
if (!status.ok () || !mDB)
|
||||
throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
|
||||
hyperleveldb::DB* db = nullptr;
|
||||
hyperleveldb::Status status = hyperleveldb::DB::Open (options, m_name, &db);
|
||||
if (!status.ok () || !db)
|
||||
Throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
|
||||
|
||||
m_db = db;
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
delete mDB;
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
std::string getName()
|
||||
{
|
||||
return mName;
|
||||
return m_name;
|
||||
}
|
||||
|
||||
bool bulkStore (const std::vector< NodeObject::pointer >& objs)
|
||||
{
|
||||
hyperleveldb::WriteBatch batch;
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
pObject->reset ();
|
||||
|
||||
Status status (ok);
|
||||
|
||||
hyperleveldb::ReadOptions const options;
|
||||
hyperleveldb::Slice const slice (static_cast <char const*> (key), m_keyBytes);
|
||||
|
||||
BOOST_FOREACH (NodeObject::ref obj, objs)
|
||||
{
|
||||
Blob blob (toBlob (obj));
|
||||
batch.Put (
|
||||
hyperleveldb::Slice (reinterpret_cast<char const*>(obj->getHash ().begin ()), 256 / 8),
|
||||
hyperleveldb::Slice (reinterpret_cast<char const*>(&blob.front ()), blob.size ()));
|
||||
// These are reused std::string objects,
|
||||
// required for leveldb's funky interface.
|
||||
//
|
||||
StringPool::ScopedItem item (m_stringPool);
|
||||
std::string& string = item.getObject ();
|
||||
|
||||
hyperleveldb::Status getStatus = m_db->Get (options, slice, &string);
|
||||
|
||||
if (getStatus.ok ())
|
||||
{
|
||||
NodeStore::DecodedBlob decoded (key, string.data (), string.size ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
*pObject = decoded.createObject ();
|
||||
}
|
||||
else
|
||||
{
|
||||
// Decoding failed, probably corrupted!
|
||||
//
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (getStatus.IsCorruption ())
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
else if (getStatus.IsNotFound ())
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
}
|
||||
}
|
||||
}
|
||||
return mDB->Write (hyperleveldb::WriteOptions (), &batch).ok ();
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash)
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
std::string sData;
|
||||
if (!mDB->Get (hyperleveldb::ReadOptions (),
|
||||
hyperleveldb::Slice (reinterpret_cast<char const*>(hash.begin ()), 256 / 8), &sData).ok ())
|
||||
m_batch.store (object);
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
hyperleveldb::WriteBatch wb;
|
||||
|
||||
{
|
||||
return NodeObject::pointer();
|
||||
NodeStore::EncodedBlob::Pool::ScopedItem item (m_blobPool);
|
||||
|
||||
BOOST_FOREACH (NodeObject::ref object, batch)
|
||||
{
|
||||
item.getObject ().prepare (object);
|
||||
|
||||
wb.Put (
|
||||
hyperleveldb::Slice (reinterpret_cast <char const*> (
|
||||
item.getObject ().getKey ()), m_keyBytes),
|
||||
hyperleveldb::Slice (reinterpret_cast <char const*> (
|
||||
item.getObject ().getData ()), item.getObject ().getSize ()));
|
||||
}
|
||||
}
|
||||
return fromBinary(hash, &sData[0], sData.size ());
|
||||
|
||||
hyperleveldb::WriteOptions const options;
|
||||
|
||||
m_db->Write (options, &wb).ok ();
|
||||
}
|
||||
|
||||
void visitAll (FUNCTION_TYPE<void (NodeObject::pointer)> func)
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
hyperleveldb::Iterator* it = mDB->NewIterator (hyperleveldb::ReadOptions ());
|
||||
hyperleveldb::ReadOptions const options;
|
||||
|
||||
ScopedPointer <hyperleveldb::Iterator> it (m_db->NewIterator (options));
|
||||
|
||||
for (it->SeekToFirst (); it->Valid (); it->Next ())
|
||||
{
|
||||
if (it->key ().size () == 256 / 8)
|
||||
if (it->key ().size () == m_keyBytes)
|
||||
{
|
||||
uint256 hash;
|
||||
memcpy(hash.begin(), it->key ().data(), 256 / 8);
|
||||
func (fromBinary (hash, it->value ().data (), it->value ().size ()));
|
||||
NodeStore::DecodedBlob decoded (it->key ().data (),
|
||||
it->value ().data (),
|
||||
it->value ().size ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
NodeObject::Ptr object (decoded.createObject ());
|
||||
|
||||
callback.visitObject (object);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Uh oh, corrupted data!
|
||||
WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256 (it->key ().data ());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// VFALCO NOTE What does it mean to find an
|
||||
// incorrectly sized key? Corruption?
|
||||
WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size ();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Blob toBlob(NodeObject::ref obj)
|
||||
int getWriteLoad ()
|
||||
{
|
||||
Blob rawData (9 + obj->getData ().size ());
|
||||
unsigned char* bufPtr = &rawData.front();
|
||||
|
||||
*reinterpret_cast<uint32*> (bufPtr + 0) = ntohl (obj->getIndex ());
|
||||
*reinterpret_cast<uint32*> (bufPtr + 4) = ntohl (obj->getIndex ());
|
||||
* (bufPtr + 8) = static_cast<unsigned char> (obj->getType ());
|
||||
memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ());
|
||||
|
||||
return rawData;
|
||||
return m_batch.getWriteLoad ();
|
||||
}
|
||||
|
||||
NodeObject::pointer fromBinary(uint256 const& hash,
|
||||
char const* data, int size)
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void writeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
if (size < 9)
|
||||
throw std::runtime_error ("undersized object");
|
||||
|
||||
uint32 index = htonl (*reinterpret_cast<const uint32*> (data));
|
||||
int htype = data[8];
|
||||
|
||||
return boost::make_shared<NodeObject> (static_cast<NodeObjectType> (htype), index,
|
||||
data + 9, size - 9, hash);
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
private:
|
||||
std::string mName;
|
||||
hyperleveldb::DB* mDB;
|
||||
size_t const m_keyBytes;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
NodeStore::BatchWriter m_batch;
|
||||
StringPool m_stringPool;
|
||||
NodeStore::EncodedBlob::Pool m_blobPool;
|
||||
std::string m_name;
|
||||
ScopedPointer <hyperleveldb::DB> m_db;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -142,9 +232,12 @@ String HyperLevelDBBackendFactory::getName () const
|
||||
return "HyperLevelDB";
|
||||
}
|
||||
|
||||
NodeStore::Backend* HyperLevelDBBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* HyperLevelDBBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new HyperLevelDBBackendFactory::Backend (keyValues);
|
||||
return new HyperLevelDBBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -23,7 +23,10 @@ public:
|
||||
static HyperLevelDBBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
179
modules/ripple_app/node/ripple_KeyvaDBBackendFactory.cpp
Normal file
179
modules/ripple_app/node/ripple_KeyvaDBBackendFactory.cpp
Normal file
@@ -0,0 +1,179 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
class KeyvaDBBackendFactory::Backend : public NodeStore::Backend
|
||||
{
|
||||
private:
|
||||
typedef RecycledObjectPool <MemoryBlock> MemoryPool;
|
||||
typedef RecycledObjectPool <NodeStore::EncodedBlob> EncodedBlobPool;
|
||||
|
||||
public:
|
||||
Backend (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_path (keyValues ["path"])
|
||||
, m_db (KeyvaDB::New (
|
||||
keyBytes,
|
||||
3,
|
||||
File::getCurrentWorkingDirectory().getChildFile (m_path).withFileExtension ("key"),
|
||||
File::getCurrentWorkingDirectory().getChildFile (m_path).withFileExtension ("val")))
|
||||
{
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
}
|
||||
|
||||
std::string getName ()
|
||||
{
|
||||
return m_path.toStdString ();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
pObject->reset ();
|
||||
|
||||
Status status (ok);
|
||||
|
||||
struct Callback : KeyvaDB::GetCallback
|
||||
{
|
||||
explicit Callback (MemoryBlock& block)
|
||||
: m_block (block)
|
||||
{
|
||||
}
|
||||
|
||||
void* getStorageForValue (int valueBytes)
|
||||
{
|
||||
m_size = valueBytes;
|
||||
m_block.ensureSize (valueBytes);
|
||||
|
||||
return m_block.getData ();
|
||||
}
|
||||
|
||||
void const* getData () const noexcept
|
||||
{
|
||||
return m_block.getData ();
|
||||
}
|
||||
|
||||
size_t getSize () const noexcept
|
||||
{
|
||||
return m_size;
|
||||
}
|
||||
|
||||
private:
|
||||
MemoryBlock& m_block;
|
||||
size_t m_size;
|
||||
};
|
||||
|
||||
MemoryPool::ScopedItem item (m_memoryPool);
|
||||
MemoryBlock& block (item.getObject ());
|
||||
|
||||
Callback cb (block);
|
||||
|
||||
// VFALCO TODO Can't we get KeyvaDB to provide a proper status?
|
||||
//
|
||||
bool const found = m_db->get (key, &cb);
|
||||
|
||||
if (found)
|
||||
{
|
||||
NodeStore::DecodedBlob decoded (key, cb.getData (), cb.getSize ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
*pObject = decoded.createObject ();
|
||||
|
||||
status = ok;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
EncodedBlobPool::ScopedItem item (m_blobPool);
|
||||
NodeStore::EncodedBlob& encoded (item.getObject ());
|
||||
|
||||
encoded.prepare (object);
|
||||
|
||||
m_db->put (encoded.getKey (), encoded.getData (), encoded.getSize ());
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
for (int i = 0; i < batch.size (); ++i)
|
||||
store (batch [i]);
|
||||
}
|
||||
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
// VFALCO TODO Implement this!
|
||||
//
|
||||
bassertfalse;
|
||||
//m_db->visitAll ();
|
||||
}
|
||||
|
||||
int getWriteLoad ()
|
||||
{
|
||||
// we dont do pending writes
|
||||
return 0;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
private:
|
||||
size_t const m_keyBytes;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
String m_path;
|
||||
ScopedPointer <KeyvaDB> m_db;
|
||||
MemoryPool m_memoryPool;
|
||||
EncodedBlobPool m_blobPool;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
KeyvaDBBackendFactory::KeyvaDBBackendFactory ()
|
||||
{
|
||||
}
|
||||
|
||||
KeyvaDBBackendFactory::~KeyvaDBBackendFactory ()
|
||||
{
|
||||
}
|
||||
|
||||
KeyvaDBBackendFactory& KeyvaDBBackendFactory::getInstance ()
|
||||
{
|
||||
static KeyvaDBBackendFactory instance;
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
String KeyvaDBBackendFactory::getName () const
|
||||
{
|
||||
return "KeyvaDB";
|
||||
}
|
||||
|
||||
NodeStore::Backend* KeyvaDBBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new KeyvaDBBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
30
modules/ripple_app/node/ripple_KeyvaDBBackendFactory.h
Normal file
30
modules/ripple_app/node/ripple_KeyvaDBBackendFactory.h
Normal file
@@ -0,0 +1,30 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_KEYVABACKENDFACTORY_H_INCLUDED
|
||||
#define RIPPLE_KEYVABACKENDFACTORY_H_INCLUDED
|
||||
|
||||
/** Factory to produce KeyvaDB backends for the NodeStore.
|
||||
*/
|
||||
class KeyvaDBBackendFactory : public NodeStore::BackendFactory
|
||||
{
|
||||
private:
|
||||
class Backend;
|
||||
|
||||
KeyvaDBBackendFactory ();
|
||||
~KeyvaDBBackendFactory ();
|
||||
|
||||
public:
|
||||
static KeyvaDBBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -4,23 +4,38 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
class LevelDBBackendFactory::Backend : public NodeStore::Backend
|
||||
class LevelDBBackendFactory::Backend
|
||||
: public NodeStore::Backend
|
||||
, public NodeStore::BatchWriter::Callback
|
||||
, LeakChecked <LevelDBBackendFactory::Backend>
|
||||
{
|
||||
public:
|
||||
Backend (StringPairArray const& keyValues)
|
||||
: mName(keyValues ["path"].toStdString ())
|
||||
, mDB(NULL)
|
||||
typedef RecycledObjectPool <std::string> StringPool;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Backend (int keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_batch (*this, scheduler)
|
||||
, m_name (keyValues ["path"].toStdString ())
|
||||
{
|
||||
if (mName.empty())
|
||||
throw std::runtime_error ("Missing path in LevelDB backend");
|
||||
if (m_name.empty())
|
||||
Throw (std::runtime_error ("Missing path in LevelDB backend"));
|
||||
|
||||
leveldb::Options options;
|
||||
options.create_if_missing = true;
|
||||
|
||||
if (keyValues["cache_mb"].isEmpty())
|
||||
{
|
||||
options.block_cache = leveldb::NewLRUCache (theConfig.getSize (siHashNodeDBCache) * 1024 * 1024);
|
||||
}
|
||||
else
|
||||
{
|
||||
options.block_cache = leveldb::NewLRUCache (keyValues["cache_mb"].getIntValue() * 1024L * 1024L);
|
||||
}
|
||||
|
||||
if (keyValues["filter_bits"].isEmpty())
|
||||
{
|
||||
@@ -28,94 +43,171 @@ public:
|
||||
options.filter_policy = leveldb::NewBloomFilterPolicy (10);
|
||||
}
|
||||
else if (keyValues["filter_bits"].getIntValue() != 0)
|
||||
{
|
||||
options.filter_policy = leveldb::NewBloomFilterPolicy (keyValues["filter_bits"].getIntValue());
|
||||
}
|
||||
|
||||
if (!keyValues["open_files"].isEmpty())
|
||||
if (! keyValues["open_files"].isEmpty())
|
||||
{
|
||||
options.max_open_files = keyValues["open_files"].getIntValue();
|
||||
}
|
||||
|
||||
leveldb::Status status = leveldb::DB::Open (options, mName, &mDB);
|
||||
if (!status.ok () || !mDB)
|
||||
throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
|
||||
leveldb::DB* db = nullptr;
|
||||
leveldb::Status status = leveldb::DB::Open (options, m_name, &db);
|
||||
if (!status.ok () || !db)
|
||||
Throw (std::runtime_error (std::string("Unable to open/create leveldb: ") + status.ToString()));
|
||||
|
||||
m_db = db;
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
delete mDB;
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
std::string getName()
|
||||
{
|
||||
return mName;
|
||||
return m_name;
|
||||
}
|
||||
|
||||
bool bulkStore (const std::vector< NodeObject::pointer >& objs)
|
||||
{
|
||||
leveldb::WriteBatch batch;
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
pObject->reset ();
|
||||
|
||||
Status status (ok);
|
||||
|
||||
leveldb::ReadOptions const options;
|
||||
leveldb::Slice const slice (static_cast <char const*> (key), m_keyBytes);
|
||||
|
||||
BOOST_FOREACH (NodeObject::ref obj, objs)
|
||||
{
|
||||
Blob blob (toBlob (obj));
|
||||
batch.Put (
|
||||
leveldb::Slice (reinterpret_cast<char const*>(obj->getHash ().begin ()), 256 / 8),
|
||||
leveldb::Slice (reinterpret_cast<char const*>(&blob.front ()), blob.size ()));
|
||||
// These are reused std::string objects,
|
||||
// required for leveldb's funky interface.
|
||||
//
|
||||
StringPool::ScopedItem item (m_stringPool);
|
||||
std::string& string = item.getObject ();
|
||||
|
||||
leveldb::Status getStatus = m_db->Get (options, slice, &string);
|
||||
|
||||
if (getStatus.ok ())
|
||||
{
|
||||
NodeStore::DecodedBlob decoded (key, string.data (), string.size ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
*pObject = decoded.createObject ();
|
||||
}
|
||||
else
|
||||
{
|
||||
// Decoding failed, probably corrupted!
|
||||
//
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (getStatus.IsCorruption ())
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
else if (getStatus.IsNotFound ())
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
}
|
||||
}
|
||||
}
|
||||
return mDB->Write (leveldb::WriteOptions (), &batch).ok ();
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash)
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
std::string sData;
|
||||
if (!mDB->Get (leveldb::ReadOptions (),
|
||||
leveldb::Slice (reinterpret_cast<char const*>(hash.begin ()), 256 / 8), &sData).ok ())
|
||||
m_batch.store (object);
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
leveldb::WriteBatch wb;
|
||||
|
||||
{
|
||||
return NodeObject::pointer();
|
||||
NodeStore::EncodedBlob::Pool::ScopedItem item (m_blobPool);
|
||||
|
||||
BOOST_FOREACH (NodeObject::ref object, batch)
|
||||
{
|
||||
item.getObject ().prepare (object);
|
||||
|
||||
wb.Put (
|
||||
leveldb::Slice (reinterpret_cast <char const*> (item.getObject ().getKey ()),
|
||||
m_keyBytes),
|
||||
leveldb::Slice (reinterpret_cast <char const*> (item.getObject ().getData ()),
|
||||
item.getObject ().getSize ()));
|
||||
}
|
||||
}
|
||||
return fromBinary(hash, &sData[0], sData.size ());
|
||||
|
||||
leveldb::WriteOptions const options;
|
||||
|
||||
m_db->Write (options, &wb).ok ();
|
||||
}
|
||||
|
||||
void visitAll (FUNCTION_TYPE<void (NodeObject::pointer)> func)
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
leveldb::Iterator* it = mDB->NewIterator (leveldb::ReadOptions ());
|
||||
leveldb::ReadOptions const options;
|
||||
|
||||
ScopedPointer <leveldb::Iterator> it (m_db->NewIterator (options));
|
||||
|
||||
for (it->SeekToFirst (); it->Valid (); it->Next ())
|
||||
{
|
||||
if (it->key ().size () == 256 / 8)
|
||||
if (it->key ().size () == m_keyBytes)
|
||||
{
|
||||
uint256 hash;
|
||||
memcpy(hash.begin(), it->key ().data(), 256 / 8);
|
||||
func (fromBinary (hash, it->value ().data (), it->value ().size ()));
|
||||
NodeStore::DecodedBlob decoded (it->key ().data (),
|
||||
it->value ().data (),
|
||||
it->value ().size ());
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
NodeObject::Ptr object (decoded.createObject ());
|
||||
|
||||
callback.visitObject (object);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Uh oh, corrupted data!
|
||||
WriteLog (lsFATAL, NodeObject) << "Corrupt NodeObject #" << uint256 (it->key ().data ());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// VFALCO NOTE What does it mean to find an
|
||||
// incorrectly sized key? Corruption?
|
||||
WriteLog (lsFATAL, NodeObject) << "Bad key size = " << it->key ().size ();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Blob toBlob(NodeObject::ref obj)
|
||||
int getWriteLoad ()
|
||||
{
|
||||
Blob rawData (9 + obj->getData ().size ());
|
||||
unsigned char* bufPtr = &rawData.front();
|
||||
|
||||
*reinterpret_cast<uint32*> (bufPtr + 0) = ntohl (obj->getIndex ());
|
||||
*reinterpret_cast<uint32*> (bufPtr + 4) = ntohl (obj->getIndex ());
|
||||
* (bufPtr + 8) = static_cast<unsigned char> (obj->getType ());
|
||||
memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ());
|
||||
|
||||
return rawData;
|
||||
return m_batch.getWriteLoad ();
|
||||
}
|
||||
|
||||
NodeObject::pointer fromBinary(uint256 const& hash,
|
||||
char const* data, int size)
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void writeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
if (size < 9)
|
||||
throw std::runtime_error ("undersized object");
|
||||
|
||||
uint32 index = htonl (*reinterpret_cast<const uint32*> (data));
|
||||
int htype = data[8];
|
||||
|
||||
return boost::make_shared<NodeObject> (static_cast<NodeObjectType> (htype), index,
|
||||
data + 9, size - 9, hash);
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
private:
|
||||
std::string mName;
|
||||
leveldb::DB* mDB;
|
||||
size_t const m_keyBytes;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
NodeStore::BatchWriter m_batch;
|
||||
StringPool m_stringPool;
|
||||
NodeStore::EncodedBlob::Pool m_blobPool;
|
||||
std::string m_name;
|
||||
ScopedPointer <leveldb::DB> m_db;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -140,9 +232,12 @@ String LevelDBBackendFactory::getName () const
|
||||
return "LevelDB";
|
||||
}
|
||||
|
||||
NodeStore::Backend* LevelDBBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* LevelDBBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new LevelDBBackendFactory::Backend (keyValues);
|
||||
return new LevelDBBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -21,7 +21,10 @@ public:
|
||||
static LevelDBBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -6,171 +6,242 @@
|
||||
|
||||
#if RIPPLE_MDB_AVAILABLE
|
||||
|
||||
class MdbBackendFactory::Backend : public NodeStore::Backend
|
||||
class MdbBackendFactory::Backend
|
||||
: public NodeStore::Backend
|
||||
, public NodeStore::BatchWriter::Callback
|
||||
, LeakChecked <MdbBackendFactory::Backend>
|
||||
{
|
||||
public:
|
||||
explicit Backend (StringPairArray const& keyValues)
|
||||
: m_env (nullptr)
|
||||
typedef NodeStore::Batch Batch;
|
||||
typedef NodeStore::EncodedBlob EncodedBlob;
|
||||
typedef NodeStore::DecodedBlob DecodedBlob;
|
||||
|
||||
explicit Backend (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_scheduler (scheduler)
|
||||
, m_batch (*this, scheduler)
|
||||
, m_env (nullptr)
|
||||
{
|
||||
if (keyValues ["path"].isEmpty ())
|
||||
throw std::runtime_error ("Missing path in MDB backend");
|
||||
String path (keyValues ["path"]);
|
||||
|
||||
int error = 0;
|
||||
if (path.isEmpty ())
|
||||
Throw (std::runtime_error ("Missing path in MDB backend"));
|
||||
|
||||
error = mdb_env_create (&m_env);
|
||||
m_basePath = path.toStdString();
|
||||
|
||||
if (error == 0) // Should use the size of the file plus the free space on the disk
|
||||
error = mdb_env_set_mapsize(m_env, 512L * 1024L * 1024L * 1024L);
|
||||
// Regarding the path supplied to mdb_env_open:
|
||||
// This directory must already exist and be writable.
|
||||
//
|
||||
File dir (File::getCurrentWorkingDirectory().getChildFile (path));
|
||||
Result result = dir.createDirectory ();
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_env_open (
|
||||
m_env,
|
||||
keyValues ["path"].toStdString().c_str (),
|
||||
MDB_NOTLS,
|
||||
0664);
|
||||
if (result.wasOk ())
|
||||
{
|
||||
int error = mdb_env_create (&m_env);
|
||||
|
||||
MDB_txn * txn;
|
||||
if (error == 0)
|
||||
error = mdb_txn_begin(m_env, NULL, 0, &txn);
|
||||
if (error == 0)
|
||||
error = mdb_dbi_open(txn, NULL, 0, &m_dbi);
|
||||
if (error == 0)
|
||||
error = mdb_txn_commit(txn);
|
||||
// Should use the size of the file plus the free space on the disk
|
||||
if (error == 0)
|
||||
error = mdb_env_set_mapsize (m_env, 512L * 1024L * 1024L * 1024L);
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_env_open (
|
||||
m_env,
|
||||
m_basePath.c_str (),
|
||||
MDB_NOTLS,
|
||||
0664);
|
||||
|
||||
if (error != 0)
|
||||
MDB_txn* txn;
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_txn_begin (m_env, NULL, 0, &txn);
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_dbi_open (txn, NULL, 0, &m_dbi);
|
||||
|
||||
if (error == 0)
|
||||
error = mdb_txn_commit (txn);
|
||||
|
||||
if (error != 0)
|
||||
{
|
||||
String s;
|
||||
s << "Error #" << error << " creating mdb environment";
|
||||
Throw (std::runtime_error (s.toStdString ()));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
String s;
|
||||
s << "Error #" << error << " creating mdb environment";
|
||||
throw std::runtime_error (s.toStdString ());
|
||||
s << "MDB Backend failed to create directory, " << result.getErrorMessage ();
|
||||
Throw (std::runtime_error (s.toStdString().c_str()));
|
||||
}
|
||||
m_name = keyValues ["path"].toStdString();
|
||||
}
|
||||
|
||||
~Backend ()
|
||||
{
|
||||
if (m_env != nullptr)
|
||||
{
|
||||
mdb_dbi_close(m_env, m_dbi);
|
||||
mdb_dbi_close (m_env, m_dbi);
|
||||
mdb_env_close (m_env);
|
||||
}
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
std::string getName()
|
||||
{
|
||||
return m_name;
|
||||
return m_basePath;
|
||||
}
|
||||
|
||||
bool bulkStore (std::vector <NodeObject::pointer> const& objs)
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
template <class T>
|
||||
unsigned char* mdb_cast (T* p)
|
||||
{
|
||||
MDB_txn *txn = nullptr;
|
||||
int rc = 0;
|
||||
return const_cast <unsigned char*> (static_cast <unsigned char const*> (p));
|
||||
}
|
||||
|
||||
rc = mdb_txn_begin(m_env, NULL, 0, &txn);
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
pObject->reset ();
|
||||
|
||||
if (rc == 0)
|
||||
Status status (ok);
|
||||
|
||||
MDB_txn* txn = nullptr;
|
||||
|
||||
int error = 0;
|
||||
|
||||
error = mdb_txn_begin (m_env, NULL, MDB_RDONLY, &txn);
|
||||
|
||||
if (error == 0)
|
||||
{
|
||||
BOOST_FOREACH (NodeObject::ref obj, objs)
|
||||
{
|
||||
MDB_val key, data;
|
||||
Blob blob (toBlob (obj));
|
||||
MDB_val dbkey;
|
||||
MDB_val data;
|
||||
|
||||
key.mv_size = (256 / 8);
|
||||
key.mv_data = const_cast<unsigned char *>(obj->getHash().begin());
|
||||
dbkey.mv_size = m_keyBytes;
|
||||
dbkey.mv_data = mdb_cast (key);
|
||||
|
||||
data.mv_size = blob.size();
|
||||
data.mv_data = &blob.front();
|
||||
error = mdb_get (txn, m_dbi, &dbkey, &data);
|
||||
|
||||
rc = mdb_put(txn, m_dbi, &key, &data, 0);
|
||||
if (rc != 0)
|
||||
if (error == 0)
|
||||
{
|
||||
DecodedBlob decoded (key, data.mv_data, data.mv_size);
|
||||
|
||||
if (decoded.wasOk ())
|
||||
{
|
||||
assert(false);
|
||||
*pObject = decoded.createObject ();
|
||||
}
|
||||
else
|
||||
{
|
||||
status = dataCorrupt;
|
||||
}
|
||||
}
|
||||
else if (error == MDB_NOTFOUND)
|
||||
{
|
||||
status = notFound;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
|
||||
WriteLog (lsWARNING, NodeObject) << "MDB txn failed, code=" << error;
|
||||
}
|
||||
|
||||
mdb_txn_abort (txn);
|
||||
}
|
||||
else
|
||||
{
|
||||
status = unknown;
|
||||
|
||||
WriteLog (lsWARNING, NodeObject) << "MDB txn failed, code=" << error;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
m_batch.store (object);
|
||||
}
|
||||
|
||||
void storeBatch (Batch const& batch)
|
||||
{
|
||||
MDB_txn* txn = nullptr;
|
||||
|
||||
int error = 0;
|
||||
|
||||
error = mdb_txn_begin (m_env, NULL, 0, &txn);
|
||||
|
||||
if (error == 0)
|
||||
{
|
||||
EncodedBlob::Pool::ScopedItem item (m_blobPool);
|
||||
|
||||
BOOST_FOREACH (NodeObject::Ptr const& object, batch)
|
||||
{
|
||||
EncodedBlob& encoded (item.getObject ());
|
||||
|
||||
encoded.prepare (object);
|
||||
|
||||
MDB_val key;
|
||||
key.mv_size = m_keyBytes;
|
||||
key.mv_data = mdb_cast (encoded.getKey ());
|
||||
|
||||
MDB_val data;
|
||||
data.mv_size = encoded.getSize ();
|
||||
data.mv_data = mdb_cast (encoded.getData ());
|
||||
|
||||
error = mdb_put (txn, m_dbi, &key, &data, 0);
|
||||
|
||||
if (error != 0)
|
||||
{
|
||||
WriteLog (lsWARNING, NodeObject) << "mdb_put failed, error=" << error;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (error == 0)
|
||||
{
|
||||
error = mdb_txn_commit(txn);
|
||||
|
||||
if (error != 0)
|
||||
{
|
||||
WriteLog (lsWARNING, NodeObject) << "mdb_txn_commit failed, error=" << error;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mdb_txn_abort (txn);
|
||||
}
|
||||
}
|
||||
else
|
||||
assert(false);
|
||||
|
||||
if (rc == 0)
|
||||
rc = mdb_txn_commit(txn);
|
||||
else if (txn)
|
||||
mdb_txn_abort(txn);
|
||||
|
||||
assert(rc == 0);
|
||||
return rc == 0;
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash)
|
||||
{
|
||||
NodeObject::pointer ret;
|
||||
|
||||
MDB_txn *txn = nullptr;
|
||||
int rc = 0;
|
||||
|
||||
rc = mdb_txn_begin(m_env, NULL, MDB_RDONLY, &txn);
|
||||
|
||||
if (rc == 0)
|
||||
{
|
||||
MDB_val key, data;
|
||||
|
||||
key.mv_size = (256 / 8);
|
||||
key.mv_data = const_cast<unsigned char *>(hash.begin());
|
||||
|
||||
rc = mdb_get(txn, m_dbi, &key, &data);
|
||||
if (rc == 0)
|
||||
ret = fromBinary(hash, static_cast<char *>(data.mv_data), data.mv_size);
|
||||
else
|
||||
assert(rc == MDB_NOTFOUND);
|
||||
WriteLog (lsWARNING, NodeObject) << "mdb_txn_begin failed, error=" << error;
|
||||
}
|
||||
else
|
||||
assert(false);
|
||||
|
||||
mdb_txn_abort(txn);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void visitAll (FUNCTION_TYPE <void (NodeObject::pointer)> func)
|
||||
{ // WRITEME
|
||||
assert(false);
|
||||
}
|
||||
|
||||
Blob toBlob (NodeObject::ref obj) const
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
Blob rawData (9 + obj->getData ().size ());
|
||||
unsigned char* bufPtr = &rawData.front();
|
||||
|
||||
*reinterpret_cast <uint32*> (bufPtr + 0) = ntohl (obj->getIndex ());
|
||||
|
||||
*reinterpret_cast <uint32*> (bufPtr + 4) = ntohl (obj->getIndex ());
|
||||
|
||||
*(bufPtr + 8) = static_cast <unsigned char> (obj->getType ());
|
||||
|
||||
memcpy (bufPtr + 9, &obj->getData ().front (), obj->getData ().size ());
|
||||
|
||||
return rawData;
|
||||
// VFALCO TODO Implement this!
|
||||
bassertfalse;
|
||||
}
|
||||
|
||||
NodeObject::pointer fromBinary (uint256 const& hash, char const* data, int size) const
|
||||
int getWriteLoad ()
|
||||
{
|
||||
if (size < 9)
|
||||
throw std::runtime_error ("undersized object");
|
||||
return m_batch.getWriteLoad ();
|
||||
}
|
||||
|
||||
uint32 const index = htonl (*reinterpret_cast <uint32 const*> (data));
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
int const htype = data [8];
|
||||
|
||||
return boost::make_shared <NodeObject> (
|
||||
static_cast <NodeObjectType> (htype),
|
||||
index,
|
||||
data + 9,
|
||||
size - 9,
|
||||
hash);
|
||||
void writeBatch (Batch const& batch)
|
||||
{
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
private:
|
||||
std::string m_name;
|
||||
size_t const m_keyBytes;
|
||||
NodeStore::Scheduler& m_scheduler;
|
||||
NodeStore::BatchWriter m_batch;
|
||||
NodeStore::EncodedBlob::Pool m_blobPool;
|
||||
std::string m_basePath;
|
||||
MDB_env* m_env;
|
||||
MDB_dbi m_dbi;
|
||||
};
|
||||
@@ -197,9 +268,12 @@ String MdbBackendFactory::getName () const
|
||||
return "mdb";
|
||||
}
|
||||
|
||||
NodeStore::Backend* MdbBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* MdbBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new MdbBackendFactory::Backend (keyValues);
|
||||
return new MdbBackendFactory::Backend (keyBytes, keyValues, scheduler);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -25,7 +25,10 @@ public:
|
||||
static MdbBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -6,30 +6,32 @@
|
||||
|
||||
SETUP_LOG (NodeObject)
|
||||
|
||||
NodeObject::NodeObject (
|
||||
NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob const& binaryDataToCopy,
|
||||
uint256 const& hash)
|
||||
: mType (type)
|
||||
, mHash (hash)
|
||||
, mLedgerIndex (ledgerIndex)
|
||||
, mData (binaryDataToCopy)
|
||||
{
|
||||
}
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
NodeObject::NodeObject (
|
||||
NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
void const* bufferToCopy,
|
||||
int bytesInBuffer,
|
||||
uint256 const& hash)
|
||||
Blob& data,
|
||||
uint256 const& hash,
|
||||
PrivateAccess)
|
||||
: mType (type)
|
||||
, mHash (hash)
|
||||
, mLedgerIndex (ledgerIndex)
|
||||
, mData (static_cast <unsigned char const*> (bufferToCopy),
|
||||
static_cast <unsigned char const*> (bufferToCopy) + bytesInBuffer)
|
||||
{
|
||||
// Take over the caller's buffer
|
||||
mData.swap (data);
|
||||
}
|
||||
|
||||
NodeObject::Ptr NodeObject::createObject (
|
||||
NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob& data,
|
||||
uint256 const & hash)
|
||||
{
|
||||
// The boost::ref is important or
|
||||
// else it will be passed by value!
|
||||
return boost::make_shared <NodeObject> (
|
||||
type, ledgerIndex, boost::ref (data), hash, PrivateAccess ());
|
||||
}
|
||||
|
||||
NodeObjectType NodeObject::getType () const
|
||||
@@ -51,3 +53,39 @@ Blob const& NodeObject::getData () const
|
||||
{
|
||||
return mData;
|
||||
}
|
||||
|
||||
bool NodeObject::isCloneOf (NodeObject::Ptr const& other) const
|
||||
{
|
||||
if (mType != other->mType)
|
||||
return false;
|
||||
|
||||
if (mHash != other->mHash)
|
||||
return false;
|
||||
|
||||
if (mLedgerIndex != other->mLedgerIndex)
|
||||
return false;
|
||||
|
||||
if (mData != other->mData)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class NodeObjectTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
|
||||
NodeObjectTests () : UnitTest ("NodeObject", "ripple")
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void runTest ()
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
static NodeObjectTests nodeObjectTests;
|
||||
|
||||
|
||||
@@ -34,27 +34,60 @@ class NodeObject : public CountedObject <NodeObject>
|
||||
public:
|
||||
static char const* getCountedObjectName () { return "NodeObject"; }
|
||||
|
||||
enum
|
||||
{
|
||||
/** Size of the fixed keys, in bytes.
|
||||
|
||||
We use a 256-bit hash for the keys.
|
||||
|
||||
@see NodeObject
|
||||
*/
|
||||
keyBytes = 32,
|
||||
};
|
||||
|
||||
/** The type used to hold the hash.
|
||||
|
||||
The hahes are fixed size, SHA256.
|
||||
|
||||
@note The key size can be retrieved with `Hash::sizeInBytes`
|
||||
*/
|
||||
typedef UnsignedInteger <32> Hash;
|
||||
|
||||
// Please use this one. For a reference use Ptr const&
|
||||
typedef boost::shared_ptr <NodeObject> Ptr;
|
||||
|
||||
// These are DEPRECATED, type names are capitalized.
|
||||
typedef boost::shared_ptr <NodeObject> pointer;
|
||||
typedef pointer const& ref;
|
||||
|
||||
/** Create from a vector of data.
|
||||
|
||||
@note A copy of the data is created.
|
||||
*/
|
||||
private:
|
||||
// This hack is used to make the constructor effectively private
|
||||
// except for when we use it in the call to make_shared.
|
||||
// There's no portable way to make make_shared<> a friend work.
|
||||
struct PrivateAccess { };
|
||||
public:
|
||||
// This constructor is private, use createObject instead.
|
||||
NodeObject (NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob const & binaryDataToCopy,
|
||||
uint256 const & hash);
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob& data,
|
||||
uint256 const& hash,
|
||||
PrivateAccess);
|
||||
|
||||
/** Create from an area of memory.
|
||||
/** Create an object from fields.
|
||||
|
||||
@note A copy of the data is created.
|
||||
The caller's variable is modified during this call. The
|
||||
underlying storage for the Blob is taken over by the NodeObject.
|
||||
|
||||
@param type The type of object.
|
||||
@param ledgerIndex The ledger in which this object appears.
|
||||
@param data A buffer containing the payload. The caller's variable
|
||||
is overwritten.
|
||||
@param hash The 256-bit hash of the payload data.
|
||||
*/
|
||||
NodeObject (NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
void const * bufferToCopy,
|
||||
int bytesInBuffer,
|
||||
uint256 const & hash);
|
||||
static Ptr createObject (NodeObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob& data,
|
||||
uint256 const& hash);
|
||||
|
||||
/** Retrieve the type of this object.
|
||||
*/
|
||||
@@ -73,11 +106,30 @@ public:
|
||||
*/
|
||||
Blob const& getData () const;
|
||||
|
||||
/** See if this object has the same data as another object.
|
||||
*/
|
||||
bool isCloneOf (NodeObject::Ptr const& other) const;
|
||||
|
||||
/** Binary function that satisfies the strict-weak-ordering requirement.
|
||||
|
||||
This compares the hashes of both objects and returns true if
|
||||
the first hash is considered to go before the second.
|
||||
|
||||
@see std::sort
|
||||
*/
|
||||
struct LessThan
|
||||
{
|
||||
inline bool operator() (NodeObject::Ptr const& lhs, NodeObject::Ptr const& rhs) const noexcept
|
||||
{
|
||||
return lhs->getHash () < rhs->getHash ();
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
NodeObjectType const mType;
|
||||
uint256 const mHash;
|
||||
LedgerIndex const mLedgerIndex;
|
||||
Blob const mData;
|
||||
NodeObjectType mType;
|
||||
uint256 mHash;
|
||||
LedgerIndex mLedgerIndex;
|
||||
Blob mData;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,60 +8,282 @@
|
||||
#define RIPPLE_NODESTORE_H_INCLUDED
|
||||
|
||||
/** Persistency layer for NodeObject
|
||||
|
||||
A Node is a ledger object which is uniquely identified by a key, which is
|
||||
the 256-bit hash of the body of the node. The payload is a variable length
|
||||
block of serialized data.
|
||||
|
||||
All ledger data is stored as node objects and as such, needs to be persisted
|
||||
between launches. Furthermore, since the set of node objects will in
|
||||
general be larger than the amount of available memory, purged node objects
|
||||
which are later accessed must be retrieved from the node store.
|
||||
|
||||
@see NodeObject
|
||||
*/
|
||||
class NodeStore : LeakChecked <NodeStore>
|
||||
class NodeStore
|
||||
{
|
||||
public:
|
||||
/** Back end used for the store.
|
||||
enum
|
||||
{
|
||||
// This is only used to pre-allocate the array for
|
||||
// batch objects and does not affect the amount written.
|
||||
//
|
||||
batchWritePreallocationSize = 128
|
||||
};
|
||||
|
||||
typedef std::vector <NodeObject::Ptr> Batch;
|
||||
|
||||
typedef StringPairArray Parameters;
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Parsed key/value blob into NodeObject components.
|
||||
|
||||
This will extract the information required to construct a NodeObject. It
|
||||
also does consistency checking and returns the result, so it is possible
|
||||
to determine if the data is corrupted without throwing an exception. Not
|
||||
all forms of corruption are detected so further analysis will be needed
|
||||
to eliminate false negatives.
|
||||
|
||||
@note This defines the database format of a NodeObject!
|
||||
*/
|
||||
class DecodedBlob
|
||||
{
|
||||
public:
|
||||
/** Construct the decoded blob from raw data. */
|
||||
DecodedBlob (void const* key, void const* value, int valueBytes);
|
||||
|
||||
/** Determine if the decoding was successful. */
|
||||
bool wasOk () const noexcept { return m_success; }
|
||||
|
||||
/** Create a NodeObject from this data. */
|
||||
NodeObject::Ptr createObject ();
|
||||
|
||||
private:
|
||||
bool m_success;
|
||||
|
||||
void const* m_key;
|
||||
LedgerIndex m_ledgerIndex;
|
||||
NodeObjectType m_objectType;
|
||||
unsigned char const* m_objectData;
|
||||
int m_dataBytes;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Utility for producing flattened node objects.
|
||||
|
||||
These get recycled to prevent many small allocations.
|
||||
|
||||
@note This defines the database format of a NodeObject!
|
||||
*/
|
||||
struct EncodedBlob
|
||||
{
|
||||
typedef RecycledObjectPool <EncodedBlob> Pool;
|
||||
|
||||
void prepare (NodeObject::Ptr const& object);
|
||||
|
||||
void const* getKey () const noexcept { return m_key; }
|
||||
|
||||
size_t getSize () const noexcept { return m_size; }
|
||||
|
||||
void const* getData () const noexcept { return m_data.getData (); }
|
||||
|
||||
private:
|
||||
void const* m_key;
|
||||
MemoryBlock m_data;
|
||||
size_t m_size;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Provides optional asynchronous scheduling for backends.
|
||||
|
||||
For improved performance, a backend has the option of performing writes
|
||||
in batches. These writes can be scheduled using the provided scheduler
|
||||
object.
|
||||
|
||||
@see BatchWriter
|
||||
*/
|
||||
class Scheduler
|
||||
{
|
||||
public:
|
||||
/** Derived classes perform scheduled tasks. */
|
||||
struct Task
|
||||
{
|
||||
virtual ~Task () { }
|
||||
|
||||
/** Performs the task.
|
||||
|
||||
The call may take place on a foreign thread.
|
||||
*/
|
||||
virtual void performScheduledTask () = 0;
|
||||
};
|
||||
|
||||
/** Schedules a task.
|
||||
|
||||
Depending on the implementation, this could happen
|
||||
immediately or get deferred.
|
||||
*/
|
||||
virtual void scheduleTask (Task* task) = 0;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Helps with batch writing.
|
||||
|
||||
The batch writes are performed with a scheduled task. Use of the
|
||||
class it not required. A backend can implement its own write batching,
|
||||
or skip write batching if doing so yields a performance benefit.
|
||||
|
||||
@see Scheduler
|
||||
*/
|
||||
// VFALCO NOTE I'm not entirely happy having placed this here,
|
||||
// because whoever needs to use NodeStore certainly doesn't
|
||||
// need to see the implementation details of BatchWriter.
|
||||
//
|
||||
class BatchWriter : private Scheduler::Task
|
||||
{
|
||||
public:
|
||||
/** This callback does the actual writing. */
|
||||
struct Callback
|
||||
{
|
||||
virtual void writeBatch (Batch const& batch) = 0;
|
||||
};
|
||||
|
||||
/** Create a batch writer. */
|
||||
BatchWriter (Callback& callback, Scheduler& scheduler);
|
||||
|
||||
/** Destroy a batch writer.
|
||||
|
||||
Anything pending in the batch is written out before this returns.
|
||||
*/
|
||||
~BatchWriter ();
|
||||
|
||||
/** Store the object.
|
||||
|
||||
This will add to the batch and initiate a scheduled task to
|
||||
write the batch out.
|
||||
*/
|
||||
void store (NodeObject::Ptr const& object);
|
||||
|
||||
/** Get an estimate of the amount of writing I/O pending. */
|
||||
int getWriteLoad ();
|
||||
|
||||
private:
|
||||
void performScheduledTask ();
|
||||
void writeBatch ();
|
||||
void waitForWriting ();
|
||||
|
||||
private:
|
||||
typedef boost::recursive_mutex LockType;
|
||||
typedef boost::condition_variable_any CondvarType;
|
||||
|
||||
Callback& m_callback;
|
||||
Scheduler& m_scheduler;
|
||||
LockType mWriteMutex;
|
||||
CondvarType mWriteCondition;
|
||||
int mWriteGeneration;
|
||||
int mWriteLoad;
|
||||
bool mWritePending;
|
||||
Batch mWriteSet;
|
||||
};
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** A backend used for the store.
|
||||
|
||||
The NodeStore uses a swappable backend so that other database systems
|
||||
can be tried. Different databases may offer various features such
|
||||
as improved performance, fault tolerant or distributed storage, or
|
||||
all in-memory operation.
|
||||
|
||||
A given instance of a backend is fixed to a particular key size.
|
||||
*/
|
||||
class Backend
|
||||
{
|
||||
public:
|
||||
// VFALCO TODO Move the function definition to the .cpp
|
||||
Backend ()
|
||||
: mWriteGeneration(0)
|
||||
, mWriteLoad(0)
|
||||
, mWritePending(false)
|
||||
/** Return codes from operations. */
|
||||
enum Status
|
||||
{
|
||||
mWriteSet.reserve(128);
|
||||
}
|
||||
ok,
|
||||
notFound,
|
||||
dataCorrupt,
|
||||
unknown
|
||||
};
|
||||
|
||||
/** Destroy the backend.
|
||||
|
||||
All open files are closed and flushed. If there are batched writes
|
||||
or other tasks scheduled, they will be completed before this call
|
||||
returns.
|
||||
*/
|
||||
virtual ~Backend () { }
|
||||
|
||||
virtual std::string getDataBaseName() = 0;
|
||||
/** Get the human-readable name of this backend.
|
||||
|
||||
// Store/retrieve a single object
|
||||
// These functions must be thread safe
|
||||
virtual bool store (NodeObject::ref);
|
||||
virtual NodeObject::pointer retrieve (uint256 const &hash) = 0;
|
||||
This is used for diagnostic output.
|
||||
*/
|
||||
virtual std::string getName() = 0;
|
||||
|
||||
// Store a group of objects
|
||||
// This function will only be called from a single thread
|
||||
virtual bool bulkStore (const std::vector< NodeObject::pointer >&) = 0;
|
||||
/** Fetch a single object.
|
||||
|
||||
// Visit every object in the database
|
||||
// This function will only be called during an import operation
|
||||
//
|
||||
// VFALCO TODO Replace FUNCTION_TYPE with a beast lift.
|
||||
//
|
||||
virtual void visitAll (FUNCTION_TYPE <void (NodeObject::pointer)>) = 0;
|
||||
If the object is not found or an error is encountered, the
|
||||
result will indicate the condition.
|
||||
|
||||
// VFALCO TODO Put this bulk writing logic into a separate class.
|
||||
virtual void bulkWrite (Job &);
|
||||
virtual void waitWrite ();
|
||||
virtual int getWriteLoad ();
|
||||
@note This will be called concurrently.
|
||||
|
||||
protected:
|
||||
// VFALCO TODO Put this bulk writing logic into a separate class.
|
||||
boost::mutex mWriteMutex;
|
||||
boost::condition_variable mWriteCondition;
|
||||
int mWriteGeneration;
|
||||
int mWriteLoad;
|
||||
bool mWritePending;
|
||||
std::vector <boost::shared_ptr<NodeObject> > mWriteSet;
|
||||
@param key A pointer to the key data.
|
||||
@param pObject [out] The created object if successful.
|
||||
|
||||
@return The result of the operation.
|
||||
*/
|
||||
virtual Status fetch (void const* key, NodeObject::Ptr* pObject) = 0;
|
||||
|
||||
/** Store a single object.
|
||||
|
||||
Depending on the implementation this may happen immediately
|
||||
or deferred using a scheduled task.
|
||||
|
||||
@note This will be called concurrently.
|
||||
|
||||
@param object The object to store.
|
||||
*/
|
||||
virtual void store (NodeObject::Ptr const& object) = 0;
|
||||
|
||||
/** Store a group of objects.
|
||||
|
||||
@note This function will not be called concurrently with
|
||||
itself or @ref store.
|
||||
*/
|
||||
virtual void storeBatch (Batch const& batch) = 0;
|
||||
|
||||
/** Callback for iterating through objects.
|
||||
|
||||
@see visitAll
|
||||
*/
|
||||
struct VisitCallback
|
||||
{
|
||||
virtual void visitObject (NodeObject::Ptr const& object) = 0;
|
||||
};
|
||||
|
||||
/** Visit every object in the database
|
||||
|
||||
This is usually called during import.
|
||||
|
||||
@note This routine will not be called concurrently with itself
|
||||
or other methods.
|
||||
|
||||
@see import, VisitCallback
|
||||
*/
|
||||
virtual void visitAll (VisitCallback& callback) = 0;
|
||||
|
||||
/** Estimate the number of write operations pending. */
|
||||
virtual int getWriteLoad () = 0;
|
||||
};
|
||||
|
||||
public:
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Factory to produce backends.
|
||||
*/
|
||||
class BackendFactory
|
||||
@@ -69,67 +291,142 @@ public:
|
||||
public:
|
||||
virtual ~BackendFactory () { }
|
||||
|
||||
/** Retrieve the name of this factory.
|
||||
*/
|
||||
/** Retrieve the name of this factory. */
|
||||
virtual String getName () const = 0;
|
||||
|
||||
/** Create an instance of this factory's backend.
|
||||
|
||||
@param keyBytes The fixed number of bytes per key.
|
||||
@param keyValues A set of key/value configuration pairs.
|
||||
@param scheduler The scheduler to use for running tasks.
|
||||
|
||||
@return A pointer to the Backend object.
|
||||
*/
|
||||
virtual Backend* createInstance (StringPairArray const& keyValues) = 0;
|
||||
virtual Backend* createInstance (size_t keyBytes,
|
||||
Parameters const& parameters,
|
||||
Scheduler& scheduler) = 0;
|
||||
};
|
||||
|
||||
public:
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
/** Construct a node store.
|
||||
|
||||
parameters has the format:
|
||||
The parameters are key value pairs passed to the backend. The
|
||||
'type' key must exist, it defines the choice of backend. Most
|
||||
backends also require a 'path' field.
|
||||
|
||||
Some choices for 'type' are:
|
||||
HyperLevelDB, LevelDB, SQLite, KeyvaDB, MDB
|
||||
|
||||
<key>=<value>['|'<key>=<value>]
|
||||
If the fastBackendParameter is omitted or empty, no ephemeral database
|
||||
is used. If the scheduler parameter is omited or unspecified, a
|
||||
synchronous scheduler is used which performs all tasks immediately on
|
||||
the caller's thread.
|
||||
|
||||
The key "type" must exist, it defines the backend. For example
|
||||
"type=LevelDB|path=/mnt/ephemeral"
|
||||
@note If the database cannot be opened or created, an exception is thrown.
|
||||
|
||||
@param backendParameters The parameter string for the persistent backend.
|
||||
@param fastBackendParameters [optional] The parameter string for the ephemeral backend.
|
||||
@param scheduler [optional The scheduler to use for performing asynchronous tasks.
|
||||
|
||||
@return The opened database.
|
||||
*/
|
||||
// VFALCO NOTE Is cacheSize in bytes? objects? KB?
|
||||
// Is cacheAge in minutes? seconds?
|
||||
//
|
||||
NodeStore (String backendParameters,
|
||||
String fastBackendParameters,
|
||||
int cacheSize,
|
||||
int cacheAge);
|
||||
static NodeStore* New (Parameters const& backendParameters,
|
||||
Parameters fastBackendParameters = Parameters (),
|
||||
Scheduler& scheduler = getSynchronousScheduler ());
|
||||
|
||||
/** Get the synchronous scheduler.
|
||||
|
||||
The synchronous scheduler performs all tasks immediately, before
|
||||
returning to the caller, using the caller's thread.
|
||||
*/
|
||||
static Scheduler& getSynchronousScheduler ();
|
||||
|
||||
/** Destroy the node store.
|
||||
|
||||
All pending operations are completed, pending writes flushed,
|
||||
and files closed before this returns.
|
||||
*/
|
||||
virtual ~NodeStore () { }
|
||||
|
||||
/** Retrieve the name associated with this backend.
|
||||
|
||||
This is used for diagnostics and may not reflect the actual path
|
||||
or paths used by the underlying backend.
|
||||
*/
|
||||
virtual String getName () const = 0;
|
||||
|
||||
/** Add the specified backend factory to the list of available factories.
|
||||
|
||||
The names of available factories are compared against the "type"
|
||||
value in the parameter list on construction.
|
||||
|
||||
@param factory The factory to add.
|
||||
*/
|
||||
static void addBackendFactory (BackendFactory& factory);
|
||||
|
||||
float getCacheHitRate ();
|
||||
/** Fetch an object.
|
||||
|
||||
bool store (NodeObjectType type, uint32 index, Blob const& data,
|
||||
uint256 const& hash);
|
||||
If the object is known to be not in the database, isn't found in the
|
||||
database during the fetch, or failed to load correctly during the fetch,
|
||||
`nullptr` is returned.
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash);
|
||||
@note This can be called concurrently.
|
||||
|
||||
void waitWrite ();
|
||||
void tune (int size, int age);
|
||||
void sweep ();
|
||||
int getWriteLoad ();
|
||||
@param hash The key of the object to retrieve.
|
||||
|
||||
int import (String sourceBackendParameters);
|
||||
@return The object, or nullptr if it couldn't be retrieved.
|
||||
*/
|
||||
virtual NodeObject::pointer fetch (uint256 const& hash) = 0;
|
||||
|
||||
private:
|
||||
void importVisitor (std::vector <NodeObject::pointer>& objects, NodeObject::pointer object);
|
||||
|
||||
static Backend* createBackend (String const& parameters);
|
||||
/** Store the object.
|
||||
|
||||
static Array <BackendFactory*> s_factories;
|
||||
The caller's Blob parameter is overwritten.
|
||||
|
||||
private:
|
||||
ScopedPointer <Backend> m_backend;
|
||||
ScopedPointer <Backend> m_fastBackend;
|
||||
@param type The type of object.
|
||||
@param ledgerIndex The ledger in which the object appears.
|
||||
@param data The payload of the object. The caller's
|
||||
variable is overwritten.
|
||||
@param hash The 256-bit hash of the payload data.
|
||||
|
||||
@return `true` if the object was stored?
|
||||
*/
|
||||
virtual void store (NodeObjectType type,
|
||||
uint32 ledgerIndex,
|
||||
Blob& data,
|
||||
uint256 const& hash) = 0;
|
||||
|
||||
/** Visit every object in the database
|
||||
|
||||
This is usually called during import.
|
||||
|
||||
@note This routine will not be called concurrently with itself
|
||||
or other methods.
|
||||
|
||||
@see import
|
||||
*/
|
||||
virtual void visitAll (Backend::VisitCallback& callback) = 0;
|
||||
|
||||
/** Import objects from another database. */
|
||||
virtual void import (NodeStore& sourceDatabase) = 0;
|
||||
|
||||
|
||||
/** Retrieve the estimated number of pending write operations.
|
||||
|
||||
This is used for diagnostics.
|
||||
*/
|
||||
virtual int getWriteLoad () = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
virtual float getCacheHitRate () = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
// TODO Document the parameter meanings.
|
||||
virtual void tune (int size, int age) = 0;
|
||||
|
||||
// VFALCO TODO Document this.
|
||||
virtual void sweep () = 0;
|
||||
|
||||
TaggedCache<uint256, NodeObject, UptimeTimerAdapter> mCache;
|
||||
KeyCache <uint256, UptimeTimerAdapter> mNegativeCache;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -15,28 +15,31 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
std::string getName()
|
||||
{
|
||||
return std::string ();
|
||||
}
|
||||
|
||||
bool store (NodeObject::ref obj)
|
||||
Status fetch (void const*, NodeObject::Ptr*)
|
||||
{
|
||||
return notFound;
|
||||
}
|
||||
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool bulkStore (const std::vector< NodeObject::pointer >& objs)
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve (uint256 const& hash)
|
||||
{
|
||||
return NodeObject::pointer ();
|
||||
}
|
||||
|
||||
void visitAll (FUNCTION_TYPE <void (NodeObject::pointer)> func)
|
||||
int getWriteLoad ()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -62,7 +65,10 @@ String NullBackendFactory::getName () const
|
||||
return "none";
|
||||
}
|
||||
|
||||
NodeStore::Backend* NullBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* NullBackendFactory::createInstance (
|
||||
size_t,
|
||||
StringPairArray const&,
|
||||
NodeStore::Scheduler&)
|
||||
{
|
||||
return new NullBackendFactory::Backend;
|
||||
}
|
||||
|
||||
@@ -23,7 +23,10 @@ public:
|
||||
static NullBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -4,97 +4,177 @@
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
static const char* s_nodeStoreDBInit [] =
|
||||
{
|
||||
"PRAGMA synchronous=NORMAL;",
|
||||
"PRAGMA journal_mode=WAL;",
|
||||
"PRAGMA journal_size_limit=1582080;",
|
||||
|
||||
#if (ULONG_MAX > UINT_MAX) && !defined (NO_SQLITE_MMAP)
|
||||
"PRAGMA mmap_size=171798691840;",
|
||||
#endif
|
||||
|
||||
"BEGIN TRANSACTION;",
|
||||
|
||||
"CREATE TABLE CommittedObjects ( \
|
||||
Hash CHARACTER(64) PRIMARY KEY, \
|
||||
ObjType CHAR(1) NOT NULL, \
|
||||
LedgerIndex BIGINT UNSIGNED, \
|
||||
Object BLOB \
|
||||
);",
|
||||
|
||||
"END TRANSACTION;"
|
||||
};
|
||||
|
||||
static int s_nodeStoreDBCount = NUMBER (s_nodeStoreDBInit);
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
class SqliteBackendFactory::Backend : public NodeStore::Backend
|
||||
{
|
||||
public:
|
||||
Backend(std::string const& path) : mName(path)
|
||||
Backend (size_t keyBytes, std::string const& path)
|
||||
: m_keyBytes (keyBytes)
|
||||
, m_name (path)
|
||||
, m_db (new DatabaseCon(path, s_nodeStoreDBInit, s_nodeStoreDBCount))
|
||||
{
|
||||
mDb = new DatabaseCon(path, HashNodeDBInit, HashNodeDBCount);
|
||||
mDb->getDB()->executeSQL(boost::str(boost::format("PRAGMA cache_size=-%d;") %
|
||||
(theConfig.getSize(siHashNodeDBCache) * 1024)));
|
||||
String s;
|
||||
|
||||
// VFALCO TODO Remove this dependency on theConfig
|
||||
//
|
||||
s << "PRAGMA cache_size=-" << String (theConfig.getSize(siHashNodeDBCache) * 1024);
|
||||
m_db->getDB()->executeSQL (s.toStdString ().c_str ());
|
||||
}
|
||||
|
||||
Backend()
|
||||
~Backend()
|
||||
{
|
||||
delete mDb;
|
||||
}
|
||||
|
||||
std::string getDataBaseName()
|
||||
std::string getName()
|
||||
{
|
||||
return mName;
|
||||
return m_name;
|
||||
}
|
||||
|
||||
bool bulkStore(const std::vector< NodeObject::pointer >& objects)
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
Status fetch (void const* key, NodeObject::Ptr* pObject)
|
||||
{
|
||||
ScopedLock sl(mDb->getDBLock());
|
||||
static SqliteStatement pStB(mDb->getDB()->getSqliteDB(), "BEGIN TRANSACTION;");
|
||||
static SqliteStatement pStE(mDb->getDB()->getSqliteDB(), "END TRANSACTION;");
|
||||
static SqliteStatement pSt(mDb->getDB()->getSqliteDB(),
|
||||
Status result = ok;
|
||||
|
||||
pObject->reset ();
|
||||
|
||||
{
|
||||
ScopedLock sl (m_db->getDBLock());
|
||||
|
||||
uint256 const hash (key);
|
||||
|
||||
static SqliteStatement pSt (m_db->getDB()->getSqliteDB(),
|
||||
"SELECT ObjType,LedgerIndex,Object FROM CommittedObjects WHERE Hash = ?;");
|
||||
|
||||
pSt.bind (1, hash.GetHex());
|
||||
|
||||
if (pSt.isRow (pSt.step()))
|
||||
{
|
||||
// VFALCO NOTE This is unfortunately needed,
|
||||
// the DatabaseCon creates the blob?
|
||||
Blob data (pSt.getBlob (2));
|
||||
*pObject = NodeObject::createObject (
|
||||
getTypeFromString (pSt.peekString (0)),
|
||||
pSt.getUInt32 (1),
|
||||
data,
|
||||
hash);
|
||||
}
|
||||
else
|
||||
{
|
||||
result = notFound;
|
||||
}
|
||||
|
||||
pSt.reset();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void store (NodeObject::ref object)
|
||||
{
|
||||
NodeStore::Batch batch;
|
||||
|
||||
batch.push_back (object);
|
||||
|
||||
storeBatch (batch);
|
||||
}
|
||||
|
||||
void storeBatch (NodeStore::Batch const& batch)
|
||||
{
|
||||
// VFALCO TODO Rewrite this to use Beast::db
|
||||
|
||||
ScopedLock sl (m_db->getDBLock());
|
||||
|
||||
static SqliteStatement pStB (m_db->getDB()->getSqliteDB(), "BEGIN TRANSACTION;");
|
||||
static SqliteStatement pStE (m_db->getDB()->getSqliteDB(), "END TRANSACTION;");
|
||||
static SqliteStatement pSt (m_db->getDB()->getSqliteDB(),
|
||||
"INSERT OR IGNORE INTO CommittedObjects "
|
||||
"(Hash,ObjType,LedgerIndex,Object) VALUES (?, ?, ?, ?);");
|
||||
|
||||
pStB.step();
|
||||
pStB.reset();
|
||||
|
||||
BOOST_FOREACH(NodeObject::ref object, objects)
|
||||
BOOST_FOREACH (NodeObject::Ptr const& object, batch)
|
||||
{
|
||||
bind(pSt, object);
|
||||
doBind (pSt, object);
|
||||
|
||||
pSt.step();
|
||||
pSt.reset();
|
||||
}
|
||||
|
||||
pStE.step();
|
||||
pStE.reset();
|
||||
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
NodeObject::pointer retrieve(uint256 const& hash)
|
||||
void visitAll (VisitCallback& callback)
|
||||
{
|
||||
NodeObject::pointer ret;
|
||||
// No lock needed as per the visitAll() API
|
||||
|
||||
{
|
||||
ScopedLock sl(mDb->getDBLock());
|
||||
static SqliteStatement pSt(mDb->getDB()->getSqliteDB(),
|
||||
"SELECT ObjType,LedgerIndex,Object FROM CommittedObjects WHERE Hash = ?;");
|
||||
|
||||
pSt.bind(1, hash.GetHex());
|
||||
|
||||
if (pSt.isRow(pSt.step()))
|
||||
ret = boost::make_shared<NodeObject>(getType(pSt.peekString(0)), pSt.getUInt32(1), pSt.getBlob(2), hash);
|
||||
|
||||
pSt.reset();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void visitAll(FUNCTION_TYPE<void (NodeObject::pointer)> func)
|
||||
{
|
||||
uint256 hash;
|
||||
|
||||
static SqliteStatement pSt(mDb->getDB()->getSqliteDB(),
|
||||
static SqliteStatement pSt(m_db->getDB()->getSqliteDB(),
|
||||
"SELECT ObjType,LedgerIndex,Object,Hash FROM CommittedObjects;");
|
||||
|
||||
while (pSt.isRow(pSt.step()))
|
||||
while (pSt.isRow (pSt.step()))
|
||||
{
|
||||
hash.SetHexExact(pSt.getString(3));
|
||||
func(boost::make_shared<NodeObject>(getType(pSt.peekString(0)), pSt.getUInt32(1), pSt.getBlob(2), hash));
|
||||
|
||||
// VFALCO NOTE This is unfortunately needed,
|
||||
// the DatabaseCon creates the blob?
|
||||
Blob data (pSt.getBlob (2));
|
||||
NodeObject::Ptr const object (NodeObject::createObject (
|
||||
getTypeFromString (pSt.peekString (0)),
|
||||
pSt.getUInt32 (1),
|
||||
data,
|
||||
hash));
|
||||
|
||||
callback.visitObject (object);
|
||||
}
|
||||
|
||||
pSt.reset();
|
||||
pSt.reset ();
|
||||
}
|
||||
|
||||
void bind(SqliteStatement& statement, NodeObject::ref object)
|
||||
int getWriteLoad ()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
|
||||
void doBind (SqliteStatement& statement, NodeObject::ref object)
|
||||
{
|
||||
char const* type;
|
||||
switch (object->getType())
|
||||
{
|
||||
case hotLEDGER: type = "L"; break;
|
||||
case hotLEDGER: type = "L"; break;
|
||||
case hotTRANSACTION: type = "T"; break;
|
||||
case hotACCOUNT_NODE: type = "A"; break;
|
||||
case hotTRANSACTION_NODE: type = "N"; break;
|
||||
case hotACCOUNT_NODE: type = "A"; break;
|
||||
case hotTRANSACTION_NODE: type = "N"; break;
|
||||
default: type = "U";
|
||||
}
|
||||
|
||||
@@ -104,25 +184,27 @@ public:
|
||||
statement.bindStatic(4, object->getData());
|
||||
}
|
||||
|
||||
NodeObjectType getType(std::string const& type)
|
||||
NodeObjectType getTypeFromString (std::string const& s)
|
||||
{
|
||||
NodeObjectType htype = hotUNKNOWN;
|
||||
if (!type.empty())
|
||||
NodeObjectType type = hotUNKNOWN;
|
||||
|
||||
if (!s.empty ())
|
||||
{
|
||||
switch (type[0])
|
||||
switch (s [0])
|
||||
{
|
||||
case 'L': htype = hotLEDGER; break;
|
||||
case 'T': htype = hotTRANSACTION; break;
|
||||
case 'A': htype = hotACCOUNT_NODE; break;
|
||||
case 'N': htype = hotTRANSACTION_NODE; break;
|
||||
case 'L': type = hotLEDGER; break;
|
||||
case 'T': type = hotTRANSACTION; break;
|
||||
case 'A': type = hotACCOUNT_NODE; break;
|
||||
case 'N': type = hotTRANSACTION_NODE; break;
|
||||
}
|
||||
}
|
||||
return htype;
|
||||
return type;
|
||||
}
|
||||
|
||||
private:
|
||||
std::string mName;
|
||||
DatabaseCon* mDb;
|
||||
size_t const m_keyBytes;
|
||||
std::string const m_name;
|
||||
ScopedPointer <DatabaseCon> m_db;
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -147,7 +229,10 @@ String SqliteBackendFactory::getName () const
|
||||
return "Sqlite";
|
||||
}
|
||||
|
||||
NodeStore::Backend* SqliteBackendFactory::createInstance (StringPairArray const& keyValues)
|
||||
NodeStore::Backend* SqliteBackendFactory::createInstance (
|
||||
size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler)
|
||||
{
|
||||
return new Backend (keyValues ["path"].toStdString ());
|
||||
return new Backend (keyBytes, keyValues ["path"].toStdString ());
|
||||
}
|
||||
|
||||
@@ -21,7 +21,10 @@ public:
|
||||
static SqliteBackendFactory& getInstance ();
|
||||
|
||||
String getName () const;
|
||||
NodeStore::Backend* createInstance (StringPairArray const& keyValues);
|
||||
|
||||
NodeStore::Backend* createInstance (size_t keyBytes,
|
||||
StringPairArray const& keyValues,
|
||||
NodeStore::Scheduler& scheduler);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -65,6 +65,8 @@
|
||||
|
||||
#include "../ripple_core/ripple_core.h"
|
||||
|
||||
#include "beast/modules/beast_db/beast_db.h"
|
||||
|
||||
// VFALCO TODO fix these warnings!
|
||||
#ifdef _MSC_VER
|
||||
//#pragma warning (push) // Causes spurious C4503 "decorated name exceeds maximum length"
|
||||
@@ -102,8 +104,9 @@ namespace ripple
|
||||
|
||||
#include "node/ripple_NodeObject.h"
|
||||
#include "node/ripple_NodeStore.h"
|
||||
#include "node/ripple_LevelDBBackendFactory.h"
|
||||
#include "node/ripple_HyperLevelDBBackendFactory.h"
|
||||
#include "node/ripple_KeyvaDBBackendFactory.h"
|
||||
#include "node/ripple_LevelDBBackendFactory.h"
|
||||
#include "node/ripple_MdbBackendFactory.h"
|
||||
#include "node/ripple_NullBackendFactory.h"
|
||||
#include "node/ripple_SqliteBackendFactory.h"
|
||||
@@ -154,10 +157,10 @@ namespace ripple
|
||||
#include "src/cpp/ripple/TransactionMaster.h"
|
||||
#include "src/cpp/ripple/ripple_LocalCredentials.h"
|
||||
#include "src/cpp/ripple/WSDoor.h"
|
||||
#include "src/cpp/ripple/ripple_Application.h"
|
||||
#include "src/cpp/ripple/RPCHandler.h"
|
||||
#include "src/cpp/ripple/TransactionQueue.h"
|
||||
#include "ledger/OrderBookDB.h"
|
||||
#include "src/cpp/ripple/ripple_Application.h"
|
||||
#include "src/cpp/ripple/CallRPC.h"
|
||||
#include "src/cpp/ripple/Transactor.h"
|
||||
#include "src/cpp/ripple/ChangeTransactor.h"
|
||||
@@ -244,10 +247,11 @@ static const uint64 tenTo17m1 = tenTo17 - 1;
|
||||
#include "basics/ripple_RPCServerHandler.cpp"
|
||||
#include "node/ripple_NodeObject.cpp"
|
||||
#include "node/ripple_NodeStore.cpp"
|
||||
#include "node/ripple_LevelDBBackendFactory.cpp"
|
||||
#include "node/ripple_HyperLevelDBBackendFactory.cpp"
|
||||
#include "node/ripple_MdbBackendFactory.cpp"
|
||||
#include "node/ripple_KeyvaDBBackendFactory.cpp"
|
||||
#include "node/ripple_LevelDBBackendFactory.cpp"
|
||||
#include "node/ripple_NullBackendFactory.cpp"
|
||||
#include "node/ripple_MdbBackendFactory.cpp"
|
||||
#include "node/ripple_SqliteBackendFactory.cpp"
|
||||
|
||||
#include "ledger/Ledger.cpp"
|
||||
@@ -427,7 +431,6 @@ static DH* handleTmpDh (SSL* ssl, int is_export, int iKeyLength)
|
||||
#include "ledger/LedgerUnitTests.cpp"
|
||||
#include "src/cpp/ripple/ripple_SHAMapUnitTests.cpp"
|
||||
#include "src/cpp/ripple/ripple_SHAMapSyncUnitTests.cpp"
|
||||
#include "src/cpp/ripple/ripple_ProofOfWorkFactoryUnitTests.cpp" // Requires ProofOfWorkFactory.h
|
||||
#include "src/cpp/ripple/ripple_SerializedTransactionUnitTests.cpp"
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@@ -62,9 +62,75 @@ public:
|
||||
void sweep ();
|
||||
void clear ();
|
||||
|
||||
bool touch (const key_type& key);
|
||||
/** Refresh the expiration time on a key.
|
||||
|
||||
@param key The key to refresh.
|
||||
@return `true` if the key was found and the object is cached.
|
||||
*/
|
||||
bool refreshIfPresent (const key_type& key)
|
||||
{
|
||||
bool found = false;
|
||||
|
||||
// If present, make current in cache
|
||||
boost::recursive_mutex::scoped_lock sl (mLock);
|
||||
|
||||
cache_iterator cit = mCache.find (key);
|
||||
|
||||
if (cit != mCache.end ())
|
||||
{
|
||||
cache_entry& entry = cit->second;
|
||||
|
||||
if (! entry.isCached ())
|
||||
{
|
||||
// Convert weak to strong.
|
||||
entry.ptr = entry.lock ();
|
||||
|
||||
if (entry.isCached ())
|
||||
{
|
||||
// We just put the object back in cache
|
||||
++mCacheCount;
|
||||
entry.touch ();
|
||||
found = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Couldn't get strong pointer,
|
||||
// object fell out of the cache so remove the entry.
|
||||
mCache.erase (cit);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// It's cached so update the timer
|
||||
entry.touch ();
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// not present
|
||||
}
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
bool del (const key_type& key, bool valid);
|
||||
|
||||
/** Replace aliased objects with originals.
|
||||
|
||||
Due to concurrency it is possible for two separate objects with
|
||||
the same content and referring to the same unique "thing" to exist.
|
||||
This routine eliminates the duplicate and performs a replacement
|
||||
on the callers shared pointer if needed.
|
||||
|
||||
@param key The key corresponding to the object
|
||||
@param data A shared pointer to the data corresponding to the object.
|
||||
@param replace `true` if `data` is the up to date version of the object.
|
||||
|
||||
@return `true` if the operation was successful.
|
||||
*/
|
||||
bool canonicalize (const key_type& key, boost::shared_ptr<c_Data>& data, bool replace = false);
|
||||
|
||||
bool store (const key_type& key, const c_Data& data);
|
||||
boost::shared_ptr<c_Data> fetch (const key_type& key);
|
||||
bool retrieve (const key_type& key, c_Data& data);
|
||||
@@ -264,40 +330,6 @@ void TaggedCache<c_Key, c_Data, Timer>::sweep ()
|
||||
}
|
||||
}
|
||||
|
||||
template<typename c_Key, typename c_Data, class Timer>
|
||||
bool TaggedCache<c_Key, c_Data, Timer>::touch (const key_type& key)
|
||||
{
|
||||
// If present, make current in cache
|
||||
boost::recursive_mutex::scoped_lock sl (mLock);
|
||||
|
||||
cache_iterator cit = mCache.find (key);
|
||||
|
||||
if (cit == mCache.end ()) // Don't have the object
|
||||
return false;
|
||||
|
||||
cache_entry& entry = cit->second;
|
||||
|
||||
if (entry.isCached ())
|
||||
{
|
||||
entry.touch ();
|
||||
return true;
|
||||
}
|
||||
|
||||
entry.ptr = entry.lock ();
|
||||
|
||||
if (entry.isCached ())
|
||||
{
|
||||
// We just put the object back in cache
|
||||
++mCacheCount;
|
||||
entry.touch ();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Object fell out
|
||||
mCache.erase (cit);
|
||||
return false;
|
||||
}
|
||||
|
||||
template<typename c_Key, typename c_Data, class Timer>
|
||||
bool TaggedCache<c_Key, c_Data, Timer>::del (const key_type& key, bool valid)
|
||||
{
|
||||
@@ -326,6 +358,7 @@ bool TaggedCache<c_Key, c_Data, Timer>::del (const key_type& key, bool valid)
|
||||
return ret;
|
||||
}
|
||||
|
||||
// VFALCO NOTE What does it mean to canonicalize the data?
|
||||
template<typename c_Key, typename c_Data, class Timer>
|
||||
bool TaggedCache<c_Key, c_Data, Timer>::canonicalize (const key_type& key, boost::shared_ptr<c_Data>& data, bool replace)
|
||||
{
|
||||
|
||||
@@ -19,6 +19,10 @@ inline int Testuint256AdHoc (std::vector<std::string> vArg);
|
||||
|
||||
// We have to keep a separate base class without constructors
|
||||
// so the compiler will let us use it in a union
|
||||
//
|
||||
// VFALCO NOTE This class produces undefined behavior when
|
||||
// BITS is not a multiple of 32!!!
|
||||
//
|
||||
template<unsigned int BITS>
|
||||
class base_uint
|
||||
{
|
||||
@@ -30,6 +34,22 @@ protected:
|
||||
unsigned int pn[WIDTH];
|
||||
|
||||
public:
|
||||
base_uint ()
|
||||
{
|
||||
}
|
||||
|
||||
/** Construct from a raw pointer.
|
||||
|
||||
The buffer pointed to by `data` must be at least 32 bytes.
|
||||
*/
|
||||
explicit base_uint (void const* data)
|
||||
{
|
||||
// BITS must be a multiple of 32
|
||||
static_bassert ((BITS % 32) == 0);
|
||||
|
||||
memcpy (&pn [0], data, BITS / 8);
|
||||
}
|
||||
|
||||
bool isZero () const
|
||||
{
|
||||
for (int i = 0; i < WIDTH; i++)
|
||||
@@ -345,14 +365,24 @@ public:
|
||||
return reinterpret_cast<unsigned char*> (pn + WIDTH);
|
||||
}
|
||||
|
||||
const unsigned char* begin () const
|
||||
unsigned char const* cbegin () const noexcept
|
||||
{
|
||||
return reinterpret_cast<const unsigned char*> (pn);
|
||||
return reinterpret_cast <unsigned char const*> (pn);
|
||||
}
|
||||
|
||||
const unsigned char* end () const
|
||||
unsigned char const* cend () const noexcept
|
||||
{
|
||||
return reinterpret_cast<const unsigned char*> (pn + WIDTH);
|
||||
return reinterpret_cast<unsigned char const*> (pn + WIDTH);
|
||||
}
|
||||
|
||||
const unsigned char* begin () const noexcept
|
||||
{
|
||||
return cbegin ();
|
||||
}
|
||||
|
||||
const unsigned char* end () const noexcept
|
||||
{
|
||||
return cend ();
|
||||
}
|
||||
|
||||
unsigned int size () const
|
||||
@@ -474,6 +504,11 @@ public:
|
||||
*this = b;
|
||||
}
|
||||
|
||||
explicit uint256 (void const* data)
|
||||
: base_uint256 (data)
|
||||
{
|
||||
}
|
||||
|
||||
uint256& operator= (uint64 uHost)
|
||||
{
|
||||
zero ();
|
||||
@@ -590,7 +625,7 @@ template<unsigned int BITS> inline std::ostream& operator<< (std::ostream& out,
|
||||
|
||||
inline int Testuint256AdHoc (std::vector<std::string> vArg)
|
||||
{
|
||||
uint256 g (0);
|
||||
uint256 g (uint64 (0));
|
||||
|
||||
printf ("%s\n", g.ToString ().c_str ());
|
||||
--g;
|
||||
|
||||
@@ -106,7 +106,7 @@ int SectionCount (Section& secSource, const std::string& strSection)
|
||||
{
|
||||
Section::mapped_type* pmtEntries = SectionEntries (secSource, strSection);
|
||||
|
||||
return pmtEntries ? -1 : pmtEntries->size ();
|
||||
return pmtEntries ? pmtEntries->size () : 0;
|
||||
}
|
||||
|
||||
bool SectionSingleB (Section& secSource, const std::string& strSection, std::string& strValue)
|
||||
@@ -128,4 +128,37 @@ bool SectionSingleB (Section& secSource, const std::string& strSection, std::str
|
||||
return bSingle;
|
||||
}
|
||||
|
||||
// vim:ts=4
|
||||
StringPairArray parseKeyValueSection (Section& secSource, String const& strSection)
|
||||
{
|
||||
StringPairArray result;
|
||||
|
||||
// yuck.
|
||||
std::string const stdStrSection (strSection.toStdString ());
|
||||
|
||||
int const count = SectionCount (secSource, stdStrSection);
|
||||
|
||||
typedef Section::mapped_type Entries;
|
||||
|
||||
Entries* const entries = SectionEntries (secSource, stdStrSection);
|
||||
|
||||
if (entries != nullptr)
|
||||
{
|
||||
for (Entries::const_iterator iter = entries->begin (); iter != entries->end (); ++iter)
|
||||
{
|
||||
String const line (iter->c_str ());
|
||||
|
||||
int const equalPos = line.indexOfChar ('=');
|
||||
|
||||
if (equalPos != -1)
|
||||
{
|
||||
String const key = line.substring (0, equalPos);
|
||||
String const value = line.substring (equalPos + 1, line.length ());
|
||||
|
||||
result.set (key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@@ -20,4 +20,11 @@ bool SectionSingleB (Section& secSource, const std::string& strSection, std::str
|
||||
int SectionCount (Section& secSource, const std::string& strSection);
|
||||
Section::mapped_type* SectionEntries (Section& secSource, const std::string& strSection);
|
||||
|
||||
/** Parse a section of lines as a key/value array.
|
||||
|
||||
Each line is in the form <key>=<value>.
|
||||
Spaces are considered part of the key and value.
|
||||
*/
|
||||
StringPairArray parseKeyValueSection (Section& secSource, String const& strSection);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -37,7 +37,7 @@ public:
|
||||
};
|
||||
|
||||
// A class that unlocks on construction and locks on destruction
|
||||
|
||||
/*
|
||||
class ScopedUnlock
|
||||
{
|
||||
protected:
|
||||
@@ -80,5 +80,6 @@ private:
|
||||
ScopedUnlock (const ScopedUnlock&); // no implementation
|
||||
ScopedUnlock& operator= (const ScopedUnlock&); // no implementation
|
||||
};
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
||||
@@ -271,7 +271,7 @@ std::string addressToString (void const* address)
|
||||
return strHex (static_cast <char const*> (address) - static_cast <char const*> (0));
|
||||
}
|
||||
|
||||
StringPairArray parseKeyValueParameters (String parameters, beast_wchar delimiter)
|
||||
StringPairArray parseDelimitedKeyValueString (String parameters, beast_wchar delimiter)
|
||||
{
|
||||
StringPairArray keyValues;
|
||||
|
||||
@@ -309,4 +309,3 @@ StringPairArray parseKeyValueParameters (String parameters, beast_wchar delimite
|
||||
|
||||
return keyValues;
|
||||
}
|
||||
|
||||
|
||||
@@ -214,8 +214,12 @@ bool parseUrl (const std::string& strUrl, std::string& strScheme, std::string& s
|
||||
*/
|
||||
extern std::string addressToString (void const* address);
|
||||
|
||||
/** Parse a pipe delimited key/value parameter string.
|
||||
/** Create a Parameters from a String.
|
||||
|
||||
Parameter strings have the format:
|
||||
|
||||
<key>=<value>['|'<key>=<value>]
|
||||
*/
|
||||
StringPairArray parseKeyValueParameters (String parameters, beast_wchar delimiter);
|
||||
extern StringPairArray parseDelimitedKeyValueString (String s, beast_wchar delimiter='|');
|
||||
|
||||
#endif
|
||||
|
||||
@@ -8,71 +8,6 @@
|
||||
// TODO: Check permissions on config file before using it.
|
||||
//
|
||||
|
||||
// VFALCO TODO Rename and replace these macros with variables.
|
||||
#define SECTION_ACCOUNT_PROBE_MAX "account_probe_max"
|
||||
#define SECTION_CLUSTER_NODES "cluster_nodes"
|
||||
#define SECTION_DATABASE_PATH "database_path"
|
||||
#define SECTION_DEBUG_LOGFILE "debug_logfile"
|
||||
#define SECTION_ELB_SUPPORT "elb_support"
|
||||
#define SECTION_FEE_DEFAULT "fee_default"
|
||||
#define SECTION_FEE_NICKNAME_CREATE "fee_nickname_create"
|
||||
#define SECTION_FEE_OFFER "fee_offer"
|
||||
#define SECTION_FEE_OPERATION "fee_operation"
|
||||
#define SECTION_FEE_ACCOUNT_RESERVE "fee_account_reserve"
|
||||
#define SECTION_FEE_OWNER_RESERVE "fee_owner_reserve"
|
||||
#define SECTION_NODE_DB "node_db"
|
||||
#define SECTION_FASTNODE_DB "temp_db"
|
||||
#define SECTION_LEDGER_HISTORY "ledger_history"
|
||||
#define SECTION_IPS "ips"
|
||||
#define SECTION_NETWORK_QUORUM "network_quorum"
|
||||
#define SECTION_NODE_SEED "node_seed"
|
||||
#define SECTION_NODE_SIZE "node_size"
|
||||
#define SECTION_PATH_SEARCH_SIZE "path_search_size"
|
||||
#define SECTION_PEER_CONNECT_LOW_WATER "peer_connect_low_water"
|
||||
#define SECTION_PEER_IP "peer_ip"
|
||||
#define SECTION_PEER_PORT "peer_port"
|
||||
#define SECTION_PEER_PRIVATE "peer_private"
|
||||
#define SECTION_PEER_SCAN_INTERVAL_MIN "peer_scan_interval_min"
|
||||
#define SECTION_PEER_SSL_CIPHER_LIST "peer_ssl_cipher_list"
|
||||
#define SECTION_PEER_START_MAX "peer_start_max"
|
||||
#define SECTION_RPC_ALLOW_REMOTE "rpc_allow_remote"
|
||||
#define SECTION_RPC_ADMIN_ALLOW "rpc_admin_allow"
|
||||
#define SECTION_RPC_ADMIN_USER "rpc_admin_user"
|
||||
#define SECTION_RPC_ADMIN_PASSWORD "rpc_admin_password"
|
||||
#define SECTION_RPC_IP "rpc_ip"
|
||||
#define SECTION_RPC_PORT "rpc_port"
|
||||
#define SECTION_RPC_USER "rpc_user"
|
||||
#define SECTION_RPC_PASSWORD "rpc_password"
|
||||
#define SECTION_RPC_STARTUP "rpc_startup"
|
||||
#define SECTION_RPC_SECURE "rpc_secure"
|
||||
#define SECTION_RPC_SSL_CERT "rpc_ssl_cert"
|
||||
#define SECTION_RPC_SSL_CHAIN "rpc_ssl_chain"
|
||||
#define SECTION_RPC_SSL_KEY "rpc_ssl_key"
|
||||
#define SECTION_SMS_FROM "sms_from"
|
||||
#define SECTION_SMS_KEY "sms_key"
|
||||
#define SECTION_SMS_SECRET "sms_secret"
|
||||
#define SECTION_SMS_TO "sms_to"
|
||||
#define SECTION_SMS_URL "sms_url"
|
||||
#define SECTION_SNTP "sntp_servers"
|
||||
#define SECTION_SSL_VERIFY "ssl_verify"
|
||||
#define SECTION_SSL_VERIFY_FILE "ssl_verify_file"
|
||||
#define SECTION_SSL_VERIFY_DIR "ssl_verify_dir"
|
||||
#define SECTION_VALIDATORS_FILE "validators_file"
|
||||
#define SECTION_VALIDATION_QUORUM "validation_quorum"
|
||||
#define SECTION_VALIDATION_SEED "validation_seed"
|
||||
#define SECTION_WEBSOCKET_PUBLIC_IP "websocket_public_ip"
|
||||
#define SECTION_WEBSOCKET_PUBLIC_PORT "websocket_public_port"
|
||||
#define SECTION_WEBSOCKET_PUBLIC_SECURE "websocket_public_secure"
|
||||
#define SECTION_WEBSOCKET_PING_FREQ "websocket_ping_frequency"
|
||||
#define SECTION_WEBSOCKET_IP "websocket_ip"
|
||||
#define SECTION_WEBSOCKET_PORT "websocket_port"
|
||||
#define SECTION_WEBSOCKET_SECURE "websocket_secure"
|
||||
#define SECTION_WEBSOCKET_SSL_CERT "websocket_ssl_cert"
|
||||
#define SECTION_WEBSOCKET_SSL_CHAIN "websocket_ssl_chain"
|
||||
#define SECTION_WEBSOCKET_SSL_KEY "websocket_ssl_key"
|
||||
#define SECTION_VALIDATORS "validators"
|
||||
#define SECTION_VALIDATORS_SITE "validators_site"
|
||||
|
||||
// Fees are in XRP.
|
||||
#define DEFAULT_FEE_DEFAULT 10
|
||||
#define DEFAULT_FEE_ACCOUNT_RESERVE 200*SYSTEM_CURRENCY_PARTS
|
||||
@@ -81,6 +16,8 @@
|
||||
#define DEFAULT_FEE_OFFER DEFAULT_FEE_DEFAULT
|
||||
#define DEFAULT_FEE_OPERATION 1
|
||||
|
||||
// VFALCO TODO Convert this to a SharedSingleton to prevent exit leaks
|
||||
//
|
||||
Config theConfig;
|
||||
|
||||
void Config::setup (const std::string& strConf, bool bTestNet, bool bQuiet)
|
||||
@@ -373,8 +310,23 @@ void Config::load ()
|
||||
(void) SectionSingleB (secConfig, SECTION_RPC_IP, m_rpcIP);
|
||||
(void) SectionSingleB (secConfig, SECTION_RPC_PASSWORD, RPC_PASSWORD);
|
||||
(void) SectionSingleB (secConfig, SECTION_RPC_USER, RPC_USER);
|
||||
(void) SectionSingleB (secConfig, SECTION_NODE_DB, NODE_DB);
|
||||
(void) SectionSingleB (secConfig, SECTION_FASTNODE_DB, FASTNODE_DB);
|
||||
|
||||
//---------------------------------------
|
||||
//
|
||||
// VFALCO BEGIN CLEAN
|
||||
//
|
||||
theConfig.nodeDatabase = parseKeyValueSection (
|
||||
secConfig, ConfigSection::nodeDatabase ());
|
||||
|
||||
theConfig.ephemeralNodeDatabase = parseKeyValueSection (
|
||||
secConfig, ConfigSection::tempNodeDatabase ());
|
||||
|
||||
theConfig.importNodeDatabase = parseKeyValueSection (
|
||||
secConfig, ConfigSection::importNodeDatabase ());
|
||||
//
|
||||
// VFALCO END CLEAN
|
||||
//
|
||||
//---------------------------------------
|
||||
|
||||
if (SectionSingleB (secConfig, SECTION_RPC_PORT, strTemp))
|
||||
m_rpcPort = boost::lexical_cast<int> (strTemp);
|
||||
|
||||
@@ -84,9 +84,41 @@ public:
|
||||
boost::filesystem::path DATA_DIR;
|
||||
boost::filesystem::path DEBUG_LOGFILE;
|
||||
boost::filesystem::path VALIDATORS_FILE; // As specifed in rippled.cfg.
|
||||
std::string NODE_DB; // Database to use for nodes
|
||||
std::string FASTNODE_DB; // Database for temporary storage
|
||||
std::string DB_IMPORT; // Import from old DB
|
||||
|
||||
/** Parameters for the main NodeStore database.
|
||||
|
||||
This is 1 or more strings of the form <key>=<value>
|
||||
The 'type' and 'path' keys are required, see rippled-example.cfg
|
||||
|
||||
@see NodeStore
|
||||
*/
|
||||
StringPairArray nodeDatabase;
|
||||
|
||||
/** Parameters for the ephemeral NodeStore database.
|
||||
|
||||
This is an auxiliary database for the NodeStore, usually placed
|
||||
on a separate faster volume. However, the volume data may not persist
|
||||
between launches. Use of the ephemeral database is optional.
|
||||
|
||||
The format is the same as that for @ref nodeDatabase
|
||||
|
||||
@see NodeStore
|
||||
*/
|
||||
StringPairArray ephemeralNodeDatabase;
|
||||
|
||||
/** Parameters for importing an old database in to the current node database.
|
||||
|
||||
If this is not empty, then it specifies the key/value parameters for
|
||||
another node database from which to import all data into the current
|
||||
node database specified by @ref nodeDatabase.
|
||||
|
||||
The format of this string is in the form:
|
||||
<key>'='<value>['|'<key>'='value]
|
||||
|
||||
@see parseDelimitedKeyValueString
|
||||
*/
|
||||
StringPairArray importNodeDatabase;
|
||||
|
||||
bool ELB_SUPPORT; // Support Amazon ELB
|
||||
|
||||
std::string VALIDATORS_SITE; // Where to find validators.txt on the Internet.
|
||||
|
||||
86
modules/ripple_core/functional/ripple_ConfigSections.h
Normal file
86
modules/ripple_core/functional/ripple_ConfigSections.h
Normal file
@@ -0,0 +1,86 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_CONFIGSECTIONS_H_INCLUDED
|
||||
#define RIPPLE_CONFIGSECTIONS_H_INCLUDED
|
||||
|
||||
// VFALCO NOTE
|
||||
//
|
||||
// Please use this style for all new sections
|
||||
// And if you're feeling generous, convert all the
|
||||
// existing macros to this format as well.
|
||||
//
|
||||
struct ConfigSection
|
||||
{
|
||||
static String nodeDatabase () { return "node_db"; }
|
||||
static String tempNodeDatabase () { return "temp_db"; }
|
||||
static String importNodeDatabase () { return "import_db"; }
|
||||
};
|
||||
|
||||
// VFALCO TODO Rename and replace these macros with variables.
|
||||
#define SECTION_ACCOUNT_PROBE_MAX "account_probe_max"
|
||||
#define SECTION_CLUSTER_NODES "cluster_nodes"
|
||||
#define SECTION_DATABASE_PATH "database_path"
|
||||
#define SECTION_DEBUG_LOGFILE "debug_logfile"
|
||||
#define SECTION_ELB_SUPPORT "elb_support"
|
||||
#define SECTION_FEE_DEFAULT "fee_default"
|
||||
#define SECTION_FEE_NICKNAME_CREATE "fee_nickname_create"
|
||||
#define SECTION_FEE_OFFER "fee_offer"
|
||||
#define SECTION_FEE_OPERATION "fee_operation"
|
||||
#define SECTION_FEE_ACCOUNT_RESERVE "fee_account_reserve"
|
||||
#define SECTION_FEE_OWNER_RESERVE "fee_owner_reserve"
|
||||
#define SECTION_LEDGER_HISTORY "ledger_history"
|
||||
#define SECTION_IPS "ips"
|
||||
#define SECTION_NETWORK_QUORUM "network_quorum"
|
||||
#define SECTION_NODE_SEED "node_seed"
|
||||
#define SECTION_NODE_SIZE "node_size"
|
||||
#define SECTION_PATH_SEARCH_SIZE "path_search_size"
|
||||
#define SECTION_PEER_CONNECT_LOW_WATER "peer_connect_low_water"
|
||||
#define SECTION_PEER_IP "peer_ip"
|
||||
#define SECTION_PEER_PORT "peer_port"
|
||||
#define SECTION_PEER_PRIVATE "peer_private"
|
||||
#define SECTION_PEER_SCAN_INTERVAL_MIN "peer_scan_interval_min"
|
||||
#define SECTION_PEER_SSL_CIPHER_LIST "peer_ssl_cipher_list"
|
||||
#define SECTION_PEER_START_MAX "peer_start_max"
|
||||
#define SECTION_RPC_ALLOW_REMOTE "rpc_allow_remote"
|
||||
#define SECTION_RPC_ADMIN_ALLOW "rpc_admin_allow"
|
||||
#define SECTION_RPC_ADMIN_USER "rpc_admin_user"
|
||||
#define SECTION_RPC_ADMIN_PASSWORD "rpc_admin_password"
|
||||
#define SECTION_RPC_IP "rpc_ip"
|
||||
#define SECTION_RPC_PORT "rpc_port"
|
||||
#define SECTION_RPC_USER "rpc_user"
|
||||
#define SECTION_RPC_PASSWORD "rpc_password"
|
||||
#define SECTION_RPC_STARTUP "rpc_startup"
|
||||
#define SECTION_RPC_SECURE "rpc_secure"
|
||||
#define SECTION_RPC_SSL_CERT "rpc_ssl_cert"
|
||||
#define SECTION_RPC_SSL_CHAIN "rpc_ssl_chain"
|
||||
#define SECTION_RPC_SSL_KEY "rpc_ssl_key"
|
||||
#define SECTION_SMS_FROM "sms_from"
|
||||
#define SECTION_SMS_KEY "sms_key"
|
||||
#define SECTION_SMS_SECRET "sms_secret"
|
||||
#define SECTION_SMS_TO "sms_to"
|
||||
#define SECTION_SMS_URL "sms_url"
|
||||
#define SECTION_SNTP "sntp_servers"
|
||||
#define SECTION_SSL_VERIFY "ssl_verify"
|
||||
#define SECTION_SSL_VERIFY_FILE "ssl_verify_file"
|
||||
#define SECTION_SSL_VERIFY_DIR "ssl_verify_dir"
|
||||
#define SECTION_VALIDATORS_FILE "validators_file"
|
||||
#define SECTION_VALIDATION_QUORUM "validation_quorum"
|
||||
#define SECTION_VALIDATION_SEED "validation_seed"
|
||||
#define SECTION_WEBSOCKET_PUBLIC_IP "websocket_public_ip"
|
||||
#define SECTION_WEBSOCKET_PUBLIC_PORT "websocket_public_port"
|
||||
#define SECTION_WEBSOCKET_PUBLIC_SECURE "websocket_public_secure"
|
||||
#define SECTION_WEBSOCKET_PING_FREQ "websocket_ping_frequency"
|
||||
#define SECTION_WEBSOCKET_IP "websocket_ip"
|
||||
#define SECTION_WEBSOCKET_PORT "websocket_port"
|
||||
#define SECTION_WEBSOCKET_SECURE "websocket_secure"
|
||||
#define SECTION_WEBSOCKET_SSL_CERT "websocket_ssl_cert"
|
||||
#define SECTION_WEBSOCKET_SSL_CHAIN "websocket_ssl_chain"
|
||||
#define SECTION_WEBSOCKET_SSL_KEY "websocket_ssl_key"
|
||||
#define SECTION_VALIDATORS "validators"
|
||||
#define SECTION_VALIDATORS_SITE "validators_site"
|
||||
|
||||
#endif
|
||||
@@ -30,6 +30,7 @@ namespace ripple
|
||||
|
||||
// VFALCO NOTE Indentation shows dependency hierarchy
|
||||
//
|
||||
/***/#include "functional/ripple_ConfigSections.h"
|
||||
/**/#include "functional/ripple_Config.h"
|
||||
/**/#include "functional/ripple_ILoadFeeTrack.h"
|
||||
/*..*/#include "functional/ripple_LoadEvent.h"
|
||||
|
||||
@@ -402,7 +402,7 @@ private:
|
||||
class ValidatorListTests : public UnitTest
|
||||
{
|
||||
public:
|
||||
ValidatorListTests () : UnitTest ("ValidatorList")
|
||||
ValidatorListTests () : UnitTest ("ValidatorList", "ripple")
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
#if RIPPLE_MDB_AVAILABLE
|
||||
|
||||
#include "libraries/liblmdb/mdb.c"
|
||||
#include "libraries/liblmdb/midl.c"
|
||||
#include "mdb/libraries/liblmdb/mdb.c"
|
||||
#include "mdb/libraries/liblmdb/midl.c"
|
||||
|
||||
#endif
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
#if ! BEAST_WIN32
|
||||
#define RIPPLE_MDB_AVAILABLE 1
|
||||
|
||||
#include "libraries/liblmdb/lmdb.h"
|
||||
#include "mdb/libraries/liblmdb/lmdb.h"
|
||||
|
||||
#else
|
||||
// mdb is unsupported on Win32
|
||||
|
||||
Reference in New Issue
Block a user