mirror of
https://github.com/Xahau/xahaud.git
synced 2025-12-06 17:27:52 +00:00
Bring in backends from the dead branch
This commit is contained in:
@@ -335,13 +335,6 @@ Application::~Application ()
|
||||
delete mEphemeralLDB;
|
||||
}
|
||||
|
||||
// VFALCO TODO Tidy these up into some class with accessors.
|
||||
//
|
||||
extern const char* RpcDBInit[], *TxnDBInit[], *LedgerDBInit[], *WalletDBInit[], *HashNodeDBInit[],
|
||||
*NetNodeDBInit[], *PathFindDBInit[];
|
||||
extern int RpcDBCount, TxnDBCount, LedgerDBCount, WalletDBCount, HashNodeDBCount,
|
||||
NetNodeDBCount, PathFindDBCount;
|
||||
|
||||
void Application::stop ()
|
||||
{
|
||||
WriteLog (lsINFO, Application) << "Received shutdown request";
|
||||
|
||||
30
src/cpp/ripple/ripple_DBInit.h
Normal file
30
src/cpp/ripple/ripple_DBInit.h
Normal file
@@ -0,0 +1,30 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_DBINIT_H_INCLUDED
|
||||
#define RIPPLE_DBINIT_H_INCLUDED
|
||||
|
||||
// VFALCO TODO Tidy these up into a class with functions and return types.
|
||||
extern const char* RpcDBInit[];
|
||||
extern const char* TxnDBInit[];
|
||||
extern const char* LedgerDBInit[];
|
||||
extern const char* WalletDBInit[];
|
||||
extern const char* HashNodeDBInit[];
|
||||
|
||||
// VFALCO TODO Figure out what these counts are for
|
||||
extern int RpcDBCount;
|
||||
extern int TxnDBCount;
|
||||
extern int LedgerDBCount;
|
||||
extern int WalletDBCount;
|
||||
extern int HashNodeDBCount;
|
||||
|
||||
// VFALCO TODO Seems these two aren't used so delete EVERYTHING.
|
||||
extern const char* NetNodeDBInit[];
|
||||
extern const char* PathFindDBInit[];
|
||||
extern int NetNodeDBCount;
|
||||
extern int PathFindDBCount;
|
||||
|
||||
#endif
|
||||
@@ -1,53 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
SETUP_LOG (HashedObject)
|
||||
|
||||
HashedObject::HashedObject (
|
||||
HashedObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob const& binaryDataToCopy,
|
||||
uint256 const& hash)
|
||||
: mType (type)
|
||||
, mHash (hash)
|
||||
, mLedgerIndex (ledgerIndex)
|
||||
, mData (binaryDataToCopy)
|
||||
{
|
||||
}
|
||||
|
||||
HashedObject::HashedObject (
|
||||
HashedObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
void const* bufferToCopy,
|
||||
int bytesInBuffer,
|
||||
uint256 const& hash)
|
||||
: mType (type)
|
||||
, mHash (hash)
|
||||
, mLedgerIndex (ledgerIndex)
|
||||
, mData (static_cast <unsigned char const*> (bufferToCopy),
|
||||
static_cast <unsigned char const*> (bufferToCopy) + bytesInBuffer)
|
||||
{
|
||||
}
|
||||
|
||||
HashedObjectType HashedObject::getType () const
|
||||
{
|
||||
return mType;
|
||||
}
|
||||
|
||||
uint256 const& HashedObject::getHash () const
|
||||
{
|
||||
return mHash;
|
||||
}
|
||||
|
||||
LedgerIndex HashedObject::getIndex () const
|
||||
{
|
||||
return mLedgerIndex;
|
||||
}
|
||||
|
||||
Blob const& HashedObject::getData () const
|
||||
{
|
||||
return mData;
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_HASHEDOBJECT_H
|
||||
#define RIPPLE_HASHEDOBJECT_H
|
||||
|
||||
/** The types of hashed objects.
|
||||
*/
|
||||
enum HashedObjectType
|
||||
{
|
||||
hotUNKNOWN = 0,
|
||||
hotLEDGER = 1,
|
||||
hotTRANSACTION = 2,
|
||||
hotACCOUNT_NODE = 3,
|
||||
hotTRANSACTION_NODE = 4
|
||||
};
|
||||
|
||||
/** A blob of data with associated metadata, referenced by hash.
|
||||
|
||||
The metadata includes the following:
|
||||
|
||||
- Type of the blob
|
||||
- The ledger index in which it appears
|
||||
- The SHA 256 hash
|
||||
|
||||
@note No checking is performed to make sure the hash matches the data.
|
||||
@see SHAMap
|
||||
*/
|
||||
// VFALCO TODO consider making the instance a private member of SHAMap
|
||||
// since its the primary user.
|
||||
//
|
||||
class HashedObject
|
||||
: public CountedObject <HashedObject>
|
||||
{
|
||||
public:
|
||||
static char const* getCountedObjectName () { return "HashedObject"; }
|
||||
|
||||
typedef boost::shared_ptr <HashedObject> pointer;
|
||||
typedef pointer const& ref;
|
||||
|
||||
/** Create from a vector of data.
|
||||
|
||||
@note A copy of the data is created.
|
||||
*/
|
||||
HashedObject (HashedObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
Blob const & binaryDataToCopy,
|
||||
uint256 const & hash);
|
||||
|
||||
/** Create from an area of memory.
|
||||
|
||||
@note A copy of the data is created.
|
||||
*/
|
||||
HashedObject (HashedObjectType type,
|
||||
LedgerIndex ledgerIndex,
|
||||
void const * bufferToCopy,
|
||||
int bytesInBuffer,
|
||||
uint256 const & hash);
|
||||
|
||||
/** Retrieve the type of this object.
|
||||
*/
|
||||
HashedObjectType getType () const;
|
||||
|
||||
/** Retrieve the hash metadata.
|
||||
*/
|
||||
uint256 const& getHash () const;
|
||||
|
||||
/** Retrieve the ledger index in which this object appears.
|
||||
*/
|
||||
// VFALCO TODO rename to getLedgerIndex or getLedgerId
|
||||
LedgerIndex getIndex () const;
|
||||
|
||||
/** Retrieve the binary data.
|
||||
*/
|
||||
Blob const& getData () const;
|
||||
|
||||
private:
|
||||
HashedObjectType const mType;
|
||||
uint256 const mHash;
|
||||
LedgerIndex const mLedgerIndex;
|
||||
Blob const mData;
|
||||
};
|
||||
|
||||
#endif
|
||||
// vim:ts=4
|
||||
@@ -1,621 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
HashedObjectStore::HashedObjectStore (int cacheSize, int cacheAge) :
|
||||
mCache ("HashedObjectStore", cacheSize, cacheAge), mNegativeCache ("HashedObjectNegativeCache", 0, 120),
|
||||
mWriteGeneration (0), mWriteLoad (0), mWritePending (false), mLevelDB (false), mEphemeralDB (false)
|
||||
{
|
||||
mWriteSet.reserve (128);
|
||||
|
||||
if (theConfig.NODE_DB == "leveldb" || theConfig.NODE_DB == "LevelDB")
|
||||
mLevelDB = true;
|
||||
else if (theConfig.NODE_DB == "SQLite" || theConfig.NODE_DB == "sqlite")
|
||||
mLevelDB = false;
|
||||
else
|
||||
{
|
||||
WriteLog (lsFATAL, HashedObject) << "Incorrect database selection";
|
||||
assert (false);
|
||||
}
|
||||
|
||||
if (!theConfig.LDB_EPHEMERAL.empty ())
|
||||
mEphemeralDB = true;
|
||||
}
|
||||
|
||||
void HashedObjectStore::tune (int size, int age)
|
||||
{
|
||||
mCache.setTargetSize (size);
|
||||
mCache.setTargetAge (age);
|
||||
}
|
||||
|
||||
void HashedObjectStore::waitWrite ()
|
||||
{
|
||||
boost::mutex::scoped_lock sl (mWriteMutex);
|
||||
int gen = mWriteGeneration;
|
||||
|
||||
while (mWritePending && (mWriteGeneration == gen))
|
||||
mWriteCondition.wait (sl);
|
||||
}
|
||||
|
||||
int HashedObjectStore::getWriteLoad ()
|
||||
{
|
||||
boost::mutex::scoped_lock sl (mWriteMutex);
|
||||
return std::max (mWriteLoad, static_cast<int> (mWriteSet.size ()));
|
||||
}
|
||||
|
||||
// low-level retrieve
|
||||
HashedObject::pointer HashedObjectStore::LLRetrieve (uint256 const& hash, leveldb::DB* db)
|
||||
{
|
||||
std::string sData;
|
||||
|
||||
leveldb::Status st = db->Get (leveldb::ReadOptions (),
|
||||
leveldb::Slice (reinterpret_cast<const char*> (hash.begin ()), hash.size ()), &sData);
|
||||
|
||||
if (!st.ok ())
|
||||
{
|
||||
assert (st.IsNotFound ());
|
||||
return HashedObject::pointer ();
|
||||
}
|
||||
|
||||
const unsigned char* bufPtr = reinterpret_cast<const unsigned char*> (&sData[0]);
|
||||
uint32 index = htonl (*reinterpret_cast<const uint32*> (bufPtr));
|
||||
int htype = bufPtr[8];
|
||||
|
||||
return boost::make_shared<HashedObject> (static_cast<HashedObjectType> (htype), index,
|
||||
bufPtr + 9, sData.size () - 9, hash);
|
||||
}
|
||||
|
||||
// low-level write single
|
||||
void HashedObjectStore::LLWrite (boost::shared_ptr<HashedObject> ptr, leveldb::DB* db)
|
||||
{
|
||||
HashedObject& obj = *ptr;
|
||||
Blob rawData (9 + obj.getData ().size ());
|
||||
unsigned char* bufPtr = &rawData.front ();
|
||||
|
||||
*reinterpret_cast<uint32*> (bufPtr + 0) = ntohl (obj.getIndex ());
|
||||
*reinterpret_cast<uint32*> (bufPtr + 4) = ntohl (obj.getIndex ());
|
||||
* (bufPtr + 8) = static_cast<unsigned char> (obj.getType ());
|
||||
memcpy (bufPtr + 9, &obj.getData ().front (), obj.getData ().size ());
|
||||
|
||||
leveldb::Status st = db->Put (leveldb::WriteOptions (),
|
||||
leveldb::Slice (reinterpret_cast<const char*> (obj.getHash ().begin ()), obj.getHash ().size ()),
|
||||
leveldb::Slice (reinterpret_cast<const char*> (bufPtr), rawData.size ()));
|
||||
|
||||
if (!st.ok ())
|
||||
{
|
||||
WriteLog (lsFATAL, HashedObject) << "Failed to store hash node";
|
||||
assert (false);
|
||||
}
|
||||
}
|
||||
|
||||
// low-level write set
|
||||
void HashedObjectStore::LLWrite (const std::vector< boost::shared_ptr<HashedObject> >& set, leveldb::DB* db)
|
||||
{
|
||||
leveldb::WriteBatch batch;
|
||||
|
||||
BOOST_FOREACH (const boost::shared_ptr<HashedObject>& it, set)
|
||||
{
|
||||
const HashedObject& obj = *it;
|
||||
Blob rawData (9 + obj.getData ().size ());
|
||||
unsigned char* bufPtr = &rawData.front ();
|
||||
|
||||
*reinterpret_cast<uint32*> (bufPtr + 0) = ntohl (obj.getIndex ());
|
||||
*reinterpret_cast<uint32*> (bufPtr + 4) = ntohl (obj.getIndex ());
|
||||
* (bufPtr + 8) = static_cast<unsigned char> (obj.getType ());
|
||||
memcpy (bufPtr + 9, &obj.getData ().front (), obj.getData ().size ());
|
||||
|
||||
batch.Put (leveldb::Slice (reinterpret_cast<const char*> (obj.getHash ().begin ()), obj.getHash ().size ()),
|
||||
leveldb::Slice (reinterpret_cast<const char*> (bufPtr), rawData.size ()));
|
||||
}
|
||||
|
||||
leveldb::Status st = db->Write (leveldb::WriteOptions (), &batch);
|
||||
|
||||
if (!st.ok ())
|
||||
{
|
||||
WriteLog (lsFATAL, HashedObject) << "Failed to store hash node";
|
||||
assert (false);
|
||||
}
|
||||
}
|
||||
|
||||
bool HashedObjectStore::storeLevelDB (HashedObjectType type, uint32 index,
|
||||
Blob const& data, uint256 const& hash)
|
||||
{
|
||||
// return: false = already in cache, true = added to cache
|
||||
if (!getApp().getHashNodeLDB ())
|
||||
return true;
|
||||
|
||||
if (mCache.touch (hash))
|
||||
return false;
|
||||
|
||||
#ifdef PARANOID
|
||||
assert (hash == Serializer::getSHA512Half (data));
|
||||
#endif
|
||||
|
||||
HashedObject::pointer object = boost::make_shared<HashedObject> (type, index, data, hash);
|
||||
|
||||
if (!mCache.canonicalize (hash, object))
|
||||
{
|
||||
boost::mutex::scoped_lock sl (mWriteMutex);
|
||||
mWriteSet.push_back (object);
|
||||
|
||||
if (!mWritePending)
|
||||
{
|
||||
mWritePending = true;
|
||||
getApp().getJobQueue ().addJob (jtWRITE, "HashedObject::store",
|
||||
BIND_TYPE (&HashedObjectStore::bulkWriteLevelDB, this, P_1));
|
||||
}
|
||||
}
|
||||
|
||||
mNegativeCache.del (hash);
|
||||
return true;
|
||||
}
|
||||
|
||||
void HashedObjectStore::bulkWriteLevelDB (Job&)
|
||||
{
|
||||
assert (mLevelDB);
|
||||
int setSize = 0;
|
||||
|
||||
while (1)
|
||||
{
|
||||
std::vector< boost::shared_ptr<HashedObject> > set;
|
||||
set.reserve (128);
|
||||
|
||||
{
|
||||
boost::mutex::scoped_lock sl (mWriteMutex);
|
||||
|
||||
mWriteSet.swap (set);
|
||||
assert (mWriteSet.empty ());
|
||||
++mWriteGeneration;
|
||||
mWriteCondition.notify_all ();
|
||||
|
||||
if (set.empty ())
|
||||
{
|
||||
mWritePending = false;
|
||||
mWriteLoad = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
mWriteLoad = std::max (setSize, static_cast<int> (mWriteSet.size ()));
|
||||
setSize = set.size ();
|
||||
}
|
||||
|
||||
LLWrite (set, getApp().getHashNodeLDB ());
|
||||
|
||||
if (mEphemeralDB)
|
||||
LLWrite (set, getApp().getEphemeralLDB ());
|
||||
}
|
||||
}
|
||||
|
||||
HashedObject::pointer HashedObjectStore::retrieveLevelDB (uint256 const& hash)
|
||||
{
|
||||
HashedObject::pointer obj = mCache.fetch (hash);
|
||||
|
||||
if (obj || mNegativeCache.isPresent (hash) || !getApp().getHashNodeLDB ())
|
||||
return obj;
|
||||
|
||||
if (mEphemeralDB)
|
||||
{
|
||||
obj = LLRetrieve (hash, getApp().getEphemeralLDB ());
|
||||
|
||||
if (obj)
|
||||
{
|
||||
mCache.canonicalize (hash, obj);
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtHO_READ, "HOS::retrieve"));
|
||||
obj = LLRetrieve (hash, getApp().getHashNodeLDB ());
|
||||
|
||||
if (!obj)
|
||||
{
|
||||
mNegativeCache.add (hash);
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
mCache.canonicalize (hash, obj);
|
||||
|
||||
if (mEphemeralDB)
|
||||
LLWrite (obj, getApp().getEphemeralLDB ());
|
||||
|
||||
WriteLog (lsTRACE, HashedObject) << "HOS: " << hash << " fetch: in db";
|
||||
return obj;
|
||||
}
|
||||
|
||||
bool HashedObjectStore::storeSQLite (HashedObjectType type, uint32 index,
|
||||
Blob const& data, uint256 const& hash)
|
||||
{
|
||||
// return: false = already in cache, true = added to cache
|
||||
if (!getApp().getHashNodeDB ())
|
||||
{
|
||||
WriteLog (lsTRACE, HashedObject) << "HOS: no db";
|
||||
return true;
|
||||
}
|
||||
|
||||
if (mCache.touch (hash))
|
||||
{
|
||||
WriteLog (lsTRACE, HashedObject) << "HOS: " << hash << " store: incache";
|
||||
return false;
|
||||
}
|
||||
|
||||
assert (hash == Serializer::getSHA512Half (data));
|
||||
|
||||
HashedObject::pointer object = boost::make_shared<HashedObject> (type, index, data, hash);
|
||||
|
||||
if (!mCache.canonicalize (hash, object))
|
||||
{
|
||||
// WriteLog (lsTRACE, HashedObject) << "Queuing write for " << hash;
|
||||
boost::mutex::scoped_lock sl (mWriteMutex);
|
||||
mWriteSet.push_back (object);
|
||||
|
||||
if (!mWritePending)
|
||||
{
|
||||
mWritePending = true;
|
||||
getApp().getJobQueue ().addJob (jtWRITE, "HashedObject::store",
|
||||
BIND_TYPE (&HashedObjectStore::bulkWriteSQLite, this, P_1));
|
||||
}
|
||||
}
|
||||
|
||||
// else
|
||||
// WriteLog (lsTRACE, HashedObject) << "HOS: already had " << hash;
|
||||
mNegativeCache.del (hash);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void HashedObjectStore::bulkWriteSQLite (Job&)
|
||||
{
|
||||
assert (!mLevelDB);
|
||||
int setSize = 0;
|
||||
|
||||
while (1)
|
||||
{
|
||||
std::vector< boost::shared_ptr<HashedObject> > set;
|
||||
set.reserve (128);
|
||||
|
||||
{
|
||||
boost::mutex::scoped_lock sl (mWriteMutex);
|
||||
mWriteSet.swap (set);
|
||||
assert (mWriteSet.empty ());
|
||||
++mWriteGeneration;
|
||||
mWriteCondition.notify_all ();
|
||||
|
||||
if (set.empty ())
|
||||
{
|
||||
mWritePending = false;
|
||||
mWriteLoad = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
mWriteLoad = std::max (setSize, static_cast<int> (mWriteSet.size ()));
|
||||
setSize = set.size ();
|
||||
}
|
||||
// WriteLog (lsTRACE, HashedObject) << "HOS: writing " << set.size();
|
||||
|
||||
#ifndef NO_SQLITE3_PREPARE
|
||||
|
||||
if (mEphemeralDB)
|
||||
LLWrite (set, getApp().getEphemeralLDB ());
|
||||
|
||||
{
|
||||
Database* db = getApp().getHashNodeDB ()->getDB ();
|
||||
|
||||
|
||||
// VFALCO TODO Get rid of the last parameter "aux", which is set to !theConfig.RUN_STANDALONE
|
||||
//
|
||||
static SqliteStatement pStB (db->getSqliteDB (), "BEGIN TRANSACTION;", !theConfig.RUN_STANDALONE);
|
||||
static SqliteStatement pStE (db->getSqliteDB (), "END TRANSACTION;", !theConfig.RUN_STANDALONE);
|
||||
static SqliteStatement pSt (db->getSqliteDB (),
|
||||
"INSERT OR IGNORE INTO CommittedObjects "
|
||||
"(Hash,ObjType,LedgerIndex,Object) VALUES (?, ?, ?, ?);", !theConfig.RUN_STANDALONE);
|
||||
|
||||
pStB.step ();
|
||||
pStB.reset ();
|
||||
|
||||
BOOST_FOREACH (const boost::shared_ptr<HashedObject>& it, set)
|
||||
{
|
||||
const char* type;
|
||||
|
||||
switch (it->getType ())
|
||||
{
|
||||
case hotLEDGER:
|
||||
type = "L";
|
||||
break;
|
||||
|
||||
case hotTRANSACTION:
|
||||
type = "T";
|
||||
break;
|
||||
|
||||
case hotACCOUNT_NODE:
|
||||
type = "A";
|
||||
break;
|
||||
|
||||
case hotTRANSACTION_NODE:
|
||||
type = "N";
|
||||
break;
|
||||
|
||||
default:
|
||||
type = "U";
|
||||
}
|
||||
|
||||
pSt.bind (1, it->getHash ().GetHex ());
|
||||
pSt.bind (2, type);
|
||||
pSt.bind (3, it->getIndex ());
|
||||
pSt.bindStatic (4, it->getData ());
|
||||
int ret = pSt.step ();
|
||||
|
||||
if (!pSt.isDone (ret))
|
||||
{
|
||||
WriteLog (lsFATAL, HashedObject) << "Error saving hashed object " << ret;
|
||||
assert (false);
|
||||
}
|
||||
|
||||
pSt.reset ();
|
||||
}
|
||||
|
||||
pStE.step ();
|
||||
pStE.reset ();
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static boost::format
|
||||
fAdd ("INSERT OR IGNORE INTO CommittedObjects "
|
||||
"(Hash,ObjType,LedgerIndex,Object) VALUES ('%s','%c','%u',%s);");
|
||||
|
||||
Database* db = getApp().getHashNodeDB ()->getDB ();
|
||||
{
|
||||
ScopedLock sl (getApp().getHashNodeDB ()->getDBLock ());
|
||||
|
||||
db->executeSQL ("BEGIN TRANSACTION;");
|
||||
|
||||
BOOST_FOREACH (const boost::shared_ptr<HashedObject>& it, set)
|
||||
{
|
||||
char type;
|
||||
|
||||
switch (it->getType ())
|
||||
{
|
||||
case hotLEDGER:
|
||||
type = 'L';
|
||||
break;
|
||||
|
||||
case hotTRANSACTION:
|
||||
type = 'T';
|
||||
break;
|
||||
|
||||
case hotACCOUNT_NODE:
|
||||
type = 'A';
|
||||
break;
|
||||
|
||||
case hotTRANSACTION_NODE:
|
||||
type = 'N';
|
||||
break;
|
||||
|
||||
default:
|
||||
type = 'U';
|
||||
}
|
||||
|
||||
db->executeSQL (boost::str (boost::format (fAdd)
|
||||
% it->getHash ().GetHex () % type % it->getIndex () % sqlEscape (it->getData ())));
|
||||
}
|
||||
|
||||
db->executeSQL ("END TRANSACTION;");
|
||||
}
|
||||
#endif
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
HashedObject::pointer HashedObjectStore::retrieveSQLite (uint256 const& hash)
|
||||
{
|
||||
HashedObject::pointer obj = mCache.fetch (hash);
|
||||
|
||||
if (obj)
|
||||
return obj;
|
||||
|
||||
if (mNegativeCache.isPresent (hash))
|
||||
return obj;
|
||||
|
||||
if (mEphemeralDB)
|
||||
{
|
||||
obj = LLRetrieve (hash, getApp().getEphemeralLDB ());
|
||||
|
||||
if (obj)
|
||||
{
|
||||
mCache.canonicalize (hash, obj);
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
if (!getApp().getHashNodeDB ())
|
||||
return obj;
|
||||
|
||||
Blob data;
|
||||
std::string type;
|
||||
uint32 index;
|
||||
|
||||
#ifndef NO_SQLITE3_PREPARE
|
||||
{
|
||||
ScopedLock sl (getApp().getHashNodeDB ()->getDBLock ());
|
||||
static SqliteStatement pSt (getApp().getHashNodeDB ()->getDB ()->getSqliteDB (),
|
||||
"SELECT ObjType,LedgerIndex,Object FROM CommittedObjects WHERE Hash = ?;");
|
||||
LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtDISK, "HOS::retrieve"));
|
||||
|
||||
pSt.bind (1, hash.GetHex ());
|
||||
int ret = pSt.step ();
|
||||
|
||||
if (pSt.isDone (ret))
|
||||
{
|
||||
pSt.reset ();
|
||||
mNegativeCache.add (hash);
|
||||
WriteLog (lsTRACE, HashedObject) << "HOS: " << hash << " fetch: not in db";
|
||||
return obj;
|
||||
}
|
||||
|
||||
type = pSt.peekString (0);
|
||||
index = pSt.getUInt32 (1);
|
||||
pSt.getBlob (2).swap (data);
|
||||
pSt.reset ();
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
std::string sql = "SELECT * FROM CommittedObjects WHERE Hash='";
|
||||
sql.append (hash.GetHex ());
|
||||
sql.append ("';");
|
||||
|
||||
|
||||
{
|
||||
ScopedLock sl (getApp().getHashNodeDB ()->getDBLock ());
|
||||
Database* db = getApp().getHashNodeDB ()->getDB ();
|
||||
|
||||
if (!db->executeSQL (sql) || !db->startIterRows ())
|
||||
{
|
||||
sl.unlock ();
|
||||
mNegativeCache.add (hash);
|
||||
return obj;
|
||||
}
|
||||
|
||||
db->getStr ("ObjType", type);
|
||||
index = db->getBigInt ("LedgerIndex");
|
||||
|
||||
int size = db->getBinary ("Object", NULL, 0);
|
||||
data.resize (size);
|
||||
db->getBinary ("Object", & (data.front ()), size);
|
||||
db->endIterRows ();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef PARANOID
|
||||
assert (Serializer::getSHA512Half (data) == hash);
|
||||
#endif
|
||||
|
||||
HashedObjectType htype = hotUNKNOWN;
|
||||
|
||||
switch (type[0])
|
||||
{
|
||||
case 'L':
|
||||
htype = hotLEDGER;
|
||||
break;
|
||||
|
||||
case 'T':
|
||||
htype = hotTRANSACTION;
|
||||
break;
|
||||
|
||||
case 'A':
|
||||
htype = hotACCOUNT_NODE;
|
||||
break;
|
||||
|
||||
case 'N':
|
||||
htype = hotTRANSACTION_NODE;
|
||||
break;
|
||||
|
||||
default:
|
||||
assert (false);
|
||||
WriteLog (lsERROR, HashedObject) << "Invalid hashed object";
|
||||
mNegativeCache.add (hash);
|
||||
return obj;
|
||||
}
|
||||
|
||||
obj = boost::make_shared<HashedObject> (htype, index, data, hash);
|
||||
mCache.canonicalize (hash, obj);
|
||||
|
||||
if (mEphemeralDB)
|
||||
LLWrite (obj, getApp().getEphemeralLDB ());
|
||||
|
||||
WriteLog (lsTRACE, HashedObject) << "HOS: " << hash << " fetch: in db";
|
||||
return obj;
|
||||
}
|
||||
|
||||
int HashedObjectStore::import (const std::string& file)
|
||||
{
|
||||
WriteLog (lsWARNING, HashedObject) << "Hashed object import from \"" << file << "\".";
|
||||
UPTR_T<Database> importDB (new SqliteDatabase (file.c_str ()));
|
||||
importDB->connect ();
|
||||
|
||||
leveldb::DB* db = getApp().getHashNodeLDB ();
|
||||
leveldb::WriteOptions wo;
|
||||
|
||||
int count = 0;
|
||||
|
||||
SQL_FOREACH (importDB, "SELECT * FROM CommittedObjects;")
|
||||
{
|
||||
uint256 hash;
|
||||
std::string hashStr;
|
||||
importDB->getStr ("Hash", hashStr);
|
||||
hash.SetHexExact (hashStr);
|
||||
|
||||
if (hash.isZero ())
|
||||
{
|
||||
WriteLog (lsWARNING, HashedObject) << "zero hash found in import table";
|
||||
}
|
||||
else
|
||||
{
|
||||
Blob rawData;
|
||||
int size = importDB->getBinary ("Object", NULL, 0);
|
||||
rawData.resize (9 + size);
|
||||
unsigned char* bufPtr = &rawData.front ();
|
||||
|
||||
importDB->getBinary ("Object", bufPtr + 9, size);
|
||||
|
||||
uint32 index = importDB->getBigInt ("LedgerIndex");
|
||||
*reinterpret_cast<uint32*> (bufPtr + 0) = ntohl (index);
|
||||
*reinterpret_cast<uint32*> (bufPtr + 4) = ntohl (index);
|
||||
|
||||
std::string type;
|
||||
importDB->getStr ("ObjType", type);
|
||||
HashedObjectType htype = hotUNKNOWN;
|
||||
|
||||
switch (type[0])
|
||||
{
|
||||
case 'L':
|
||||
htype = hotLEDGER;
|
||||
break;
|
||||
|
||||
case 'T':
|
||||
htype = hotTRANSACTION;
|
||||
break;
|
||||
|
||||
case 'A':
|
||||
htype = hotACCOUNT_NODE;
|
||||
break;
|
||||
|
||||
case 'N':
|
||||
htype = hotTRANSACTION_NODE;
|
||||
break;
|
||||
|
||||
default:
|
||||
assert (false);
|
||||
WriteLog (lsERROR, HashedObject) << "Invalid hashed object";
|
||||
}
|
||||
|
||||
* (bufPtr + 8) = static_cast<unsigned char> (htype);
|
||||
|
||||
leveldb::Status st = db->Put (wo,
|
||||
leveldb::Slice (reinterpret_cast<const char*> (hash.begin ()), hash.size ()),
|
||||
leveldb::Slice (reinterpret_cast<const char*> (bufPtr), rawData.size ()));
|
||||
|
||||
if (!st.ok ())
|
||||
{
|
||||
WriteLog (lsFATAL, HashedObject) << "Failed to store hash node";
|
||||
assert (false);
|
||||
}
|
||||
|
||||
++count;
|
||||
}
|
||||
|
||||
if ((count % 10000) == 0)
|
||||
{
|
||||
WriteLog (lsINFO, HashedObject) << "Import in progress: " << count;
|
||||
}
|
||||
}
|
||||
|
||||
WriteLog (lsWARNING, HashedObject) << "Imported " << count << " nodes";
|
||||
return count;
|
||||
}
|
||||
|
||||
// vim:ts=4
|
||||
@@ -1,88 +0,0 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
Copyright (c) 2011-2013, OpenCoin, Inc.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#ifndef RIPPLE_HASHEDOBJECTSTORE_H
|
||||
#define RIPPLE_HASHEDOBJECTSTORE_H
|
||||
|
||||
/** Persistency layer for hashed objects.
|
||||
*/
|
||||
// VFALCO TODO Move all definitions to the .cpp
|
||||
class HashedObjectStore : LeakChecked <HashedObjectStore>
|
||||
{
|
||||
public:
|
||||
HashedObjectStore (int cacheSize, int cacheAge);
|
||||
|
||||
bool isLevelDB ()
|
||||
{
|
||||
return mLevelDB;
|
||||
}
|
||||
|
||||
float getCacheHitRate ()
|
||||
{
|
||||
return mCache.getHitRate ();
|
||||
}
|
||||
|
||||
bool store (HashedObjectType type, uint32 index, Blob const& data,
|
||||
uint256 const& hash)
|
||||
{
|
||||
if (mLevelDB)
|
||||
return storeLevelDB (type, index, data, hash);
|
||||
|
||||
return storeSQLite (type, index, data, hash);
|
||||
}
|
||||
|
||||
HashedObject::pointer retrieve (uint256 const& hash)
|
||||
{
|
||||
if (mLevelDB)
|
||||
return retrieveLevelDB (hash);
|
||||
|
||||
return retrieveSQLite (hash);
|
||||
}
|
||||
|
||||
bool storeSQLite (HashedObjectType type, uint32 index, Blob const& data,
|
||||
uint256 const& hash);
|
||||
HashedObject::pointer retrieveSQLite (uint256 const& hash);
|
||||
void bulkWriteSQLite (Job&);
|
||||
|
||||
bool storeLevelDB (HashedObjectType type, uint32 index, Blob const& data,
|
||||
uint256 const& hash);
|
||||
HashedObject::pointer retrieveLevelDB (uint256 const& hash);
|
||||
void bulkWriteLevelDB (Job&);
|
||||
|
||||
|
||||
void waitWrite ();
|
||||
void tune (int size, int age);
|
||||
void sweep ()
|
||||
{
|
||||
mCache.sweep ();
|
||||
mNegativeCache.sweep ();
|
||||
}
|
||||
int getWriteLoad ();
|
||||
|
||||
int import (const std::string& fileName);
|
||||
|
||||
private:
|
||||
static HashedObject::pointer LLRetrieve (uint256 const& hash, leveldb::DB* db);
|
||||
static void LLWrite (boost::shared_ptr<HashedObject> ptr, leveldb::DB* db);
|
||||
static void LLWrite (const std::vector< boost::shared_ptr<HashedObject> >& set, leveldb::DB* db);
|
||||
|
||||
private:
|
||||
TaggedCache<uint256, HashedObject, UptimeTimerAdapter> mCache;
|
||||
KeyCache <uint256, UptimeTimerAdapter> mNegativeCache;
|
||||
|
||||
boost::mutex mWriteMutex;
|
||||
boost::condition_variable mWriteCondition;
|
||||
int mWriteGeneration;
|
||||
int mWriteLoad;
|
||||
|
||||
std::vector< boost::shared_ptr<HashedObject> > mWriteSet;
|
||||
bool mWritePending;
|
||||
bool mLevelDB;
|
||||
bool mEphemeralDB;
|
||||
};
|
||||
|
||||
#endif
|
||||
// vim:ts=4
|
||||
Reference in New Issue
Block a user