mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-06 17:27:55 +00:00
Merge branch 'master' of github.com:jedmccaleb/NewCoin
This commit is contained in:
@@ -37,7 +37,7 @@ DatabaseCon::~DatabaseCon()
|
||||
|
||||
Application::Application() :
|
||||
mUNL(mIOService),
|
||||
mNetOps(mIOService, &mMasterLedger), mNodeCache(16384, 600), mHashedObjectStore(16384, 300),
|
||||
mNetOps(mIOService, &mMasterLedger), mTempNodeCache(16384, 90), mHashedObjectStore(16384, 300),
|
||||
mTxnDB(NULL), mLedgerDB(NULL), mWalletDB(NULL), mHashNodeDB(NULL), mNetNodeDB(NULL),
|
||||
mConnectionPool(mIOService), mPeerDoor(NULL), mRPCDoor(NULL)
|
||||
{
|
||||
@@ -51,6 +51,7 @@ extern int TxnDBCount, LedgerDBCount, WalletDBCount, HashNodeDBCount, NetNodeDBC
|
||||
void Application::stop()
|
||||
{
|
||||
mIOService.stop();
|
||||
mHashedObjectStore.bulkWrite();
|
||||
|
||||
Log(lsINFO) << "Stopped: " << mIOService.stopped();
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ class Application
|
||||
LedgerAcquireMaster mMasterLedgerAcquire;
|
||||
TransactionMaster mMasterTransaction;
|
||||
NetworkOPs mNetOps;
|
||||
NodeCache mNodeCache;
|
||||
NodeCache mTempNodeCache;
|
||||
ValidationCollection mValidations;
|
||||
SuppressionTable mSuppressions;
|
||||
HashedObjectStore mHashedObjectStore;
|
||||
@@ -80,7 +80,7 @@ public:
|
||||
LedgerMaster& getMasterLedger() { return mMasterLedger; }
|
||||
LedgerAcquireMaster& getMasterLedgerAcquire() { return mMasterLedgerAcquire; }
|
||||
TransactionMaster& getMasterTransaction() { return mMasterTransaction; }
|
||||
NodeCache& getNodeCache() { return mNodeCache; }
|
||||
NodeCache& getTempNodeCache() { return mTempNodeCache; }
|
||||
HashedObjectStore& getHashedObjectStore() { return mHashedObjectStore; }
|
||||
ValidationCollection& getValidations() { return mValidations; }
|
||||
bool isNew(const uint256& s) { return mSuppressions.addSuppression(s); }
|
||||
|
||||
@@ -7,65 +7,80 @@
|
||||
#include "Application.h"
|
||||
#include "Log.h"
|
||||
|
||||
bool HashedObject::checkHash() const
|
||||
HashedObjectStore::HashedObjectStore(int cacheSize, int cacheAge) :
|
||||
mCache(cacheSize, cacheAge), mWritePending(false)
|
||||
{
|
||||
uint256 hash = Serializer::getSHA512Half(mData);
|
||||
return hash == mHash;
|
||||
mWriteSet.reserve(128);
|
||||
}
|
||||
|
||||
bool HashedObject::checkFixHash()
|
||||
{
|
||||
uint256 hash = Serializer::getSHA512Half(mData);
|
||||
if (hash == mHash) return true;
|
||||
mHash = hash;
|
||||
return false;
|
||||
}
|
||||
|
||||
void HashedObject::setHash()
|
||||
{
|
||||
mHash = Serializer::getSHA512Half(mData);
|
||||
}
|
||||
|
||||
// FIXME: Stores should be added to a queue that's serviced by an auxilliary thread or from an
|
||||
// auxilliary thread pool. These should be tied into a cache, since you need one to handle
|
||||
// an immedate read back (before the write completes)
|
||||
|
||||
bool HashedObjectStore::store(HashedObjectType type, uint32 index,
|
||||
const std::vector<unsigned char>& data, const uint256& hash)
|
||||
{
|
||||
{ // return: false=already in cache, true = added to cache
|
||||
if (!theApp->getHashNodeDB()) return true;
|
||||
HashedObject::pointer object = boost::make_shared<HashedObject>(type, index, data);
|
||||
object->setHash();
|
||||
if (object->getHash() != hash)
|
||||
throw std::runtime_error("Object added to store doesn't have valid hash");
|
||||
|
||||
std::string sql = "INSERT INTO CommittedObjects (Hash,ObjType,LedgerIndex,Object) VALUES ('";
|
||||
sql.append(hash.GetHex());
|
||||
switch(type)
|
||||
if (mCache.touch(hash))
|
||||
{
|
||||
case LEDGER: sql.append("','L','"); break;
|
||||
case TRANSACTION: sql.append("','T','"); break;
|
||||
case ACCOUNT_NODE: sql.append("','A','"); break;
|
||||
case TRANSACTION_NODE: sql.append("','N','"); break;
|
||||
default: sql.append("','U','"); break;
|
||||
Log(lsTRACE) << "HOS: " << hash.GetHex() << " store: incache";
|
||||
return false;
|
||||
}
|
||||
sql.append(boost::lexical_cast<std::string>(index));
|
||||
sql.append("',");
|
||||
std::string obj;
|
||||
theApp->getHashNodeDB()->getDB()->escape(&(data.front()), data.size(), obj);
|
||||
sql.append(obj);
|
||||
sql.append(");");
|
||||
|
||||
std::string exists =
|
||||
boost::str(boost::format("SELECT ObjType FROM CommittedObjects WHERE Hash = '%s';") % hash.GetHex());
|
||||
HashedObject::pointer object = boost::make_shared<HashedObject>(type, index, data, hash);
|
||||
|
||||
{
|
||||
boost::recursive_mutex::scoped_lock sl(mWriteMutex);
|
||||
mWriteSet.push_back(object);
|
||||
if (!mWritePending && (mWriteSet.size() >= 64))
|
||||
{
|
||||
mWritePending = true;
|
||||
boost::thread t(boost::bind(&HashedObjectStore::bulkWrite, this));
|
||||
t.detach();
|
||||
}
|
||||
}
|
||||
Log(lsTRACE) << "HOS: " << hash.GetHex() << " store: deferred";
|
||||
return true;
|
||||
}
|
||||
|
||||
void HashedObjectStore::bulkWrite()
|
||||
{
|
||||
std::vector< boost::shared_ptr<HashedObject> > set;
|
||||
set.reserve(128);
|
||||
|
||||
{
|
||||
boost::recursive_mutex::scoped_lock sl(mWriteMutex);
|
||||
mWriteSet.swap(set);
|
||||
mWritePending = false;
|
||||
}
|
||||
Log(lsINFO) << "HOS: BulkWrite " << set.size();
|
||||
|
||||
boost::format fExists("SELECT ObjType FROM CommittedObjects WHERE Hash = '%s';");
|
||||
boost::format fAdd("INSERT INTO CommittedObjects (Hash,ObjType,LedgerIndex,Object) VALUES ('%s','%c','%u',%s);");
|
||||
|
||||
ScopedLock sl(theApp->getHashNodeDB()->getDBLock());
|
||||
if (mCache.canonicalize(hash, object))
|
||||
return false;
|
||||
Database* db = theApp->getHashNodeDB()->getDB();
|
||||
if (SQL_EXISTS(db, exists))
|
||||
return false;
|
||||
return db->executeSQL(sql);
|
||||
ScopedLock sl = theApp->getHashNodeDB()->getDBLock();
|
||||
|
||||
db->executeSQL("BEGIN TRANSACTION;");
|
||||
|
||||
for (std::vector< boost::shared_ptr<HashedObject> >::iterator it = set.begin(), end = set.end(); it != end; ++it)
|
||||
{
|
||||
HashedObject& obj = **it;
|
||||
if (!SQL_EXISTS(db, boost::str(fExists % obj.getHash().GetHex())))
|
||||
{
|
||||
char type;
|
||||
switch(obj.getType())
|
||||
{
|
||||
case LEDGER: type = 'L'; break;
|
||||
case TRANSACTION: type = 'T'; break;
|
||||
case ACCOUNT_NODE: type = 'A'; break;
|
||||
case TRANSACTION_NODE: type = 'N'; break;
|
||||
default: type = 'U';
|
||||
}
|
||||
std::string rawData;
|
||||
db->escape(&(obj.getData().front()), obj.getData().size(), rawData);
|
||||
db->executeSQL(boost::str(fAdd % obj.getHash().GetHex() % type % obj.getIndex() % rawData ));
|
||||
}
|
||||
}
|
||||
|
||||
db->executeSQL("END TRANSACTION;");
|
||||
}
|
||||
|
||||
HashedObject::pointer HashedObjectStore::retrieve(const uint256& hash)
|
||||
@@ -74,7 +89,11 @@ HashedObject::pointer HashedObjectStore::retrieve(const uint256& hash)
|
||||
{
|
||||
ScopedLock sl(theApp->getHashNodeDB()->getDBLock());
|
||||
obj = mCache.fetch(hash);
|
||||
if (obj) return obj;
|
||||
if (obj)
|
||||
{
|
||||
Log(lsTRACE) << "HOS: " << hash.GetHex() << " fetch: incache";
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
if (!theApp || !theApp->getHashNodeDB()) return HashedObject::pointer();
|
||||
@@ -90,7 +109,10 @@ HashedObject::pointer HashedObjectStore::retrieve(const uint256& hash)
|
||||
Database* db = theApp->getHashNodeDB()->getDB();
|
||||
|
||||
if (!db->executeSQL(sql) || !db->startIterRows())
|
||||
{
|
||||
Log(lsTRACE) << "HOS: " << hash.GetHex() << " fetch: not in db";
|
||||
return HashedObject::pointer();
|
||||
}
|
||||
|
||||
std::string type;
|
||||
db->getStr("ObjType", type);
|
||||
@@ -115,26 +137,11 @@ HashedObject::pointer HashedObjectStore::retrieve(const uint256& hash)
|
||||
return HashedObject::pointer();
|
||||
}
|
||||
|
||||
obj = boost::make_shared<HashedObject>(htype, index, data);
|
||||
obj->mHash = hash;
|
||||
obj = boost::make_shared<HashedObject>(htype, index, data, hash);
|
||||
mCache.canonicalize(hash, obj);
|
||||
}
|
||||
#ifdef DEBUG
|
||||
assert(obj->checkHash());
|
||||
#endif
|
||||
Log(lsTRACE) << "HOS: " << hash.GetHex() << " fetch: in db";
|
||||
return obj;
|
||||
}
|
||||
|
||||
ScopedLock HashedObjectStore::beginBulk()
|
||||
{
|
||||
ScopedLock sl(theApp->getHashNodeDB()->getDBLock());
|
||||
theApp->getHashNodeDB()->getDB()->executeSQL("BEGIN TRANSACTION;");
|
||||
return sl;
|
||||
}
|
||||
|
||||
void HashedObjectStore::endBulk()
|
||||
{
|
||||
theApp->getHashNodeDB()->getDB()->executeSQL("END TRANSACTION;");
|
||||
}
|
||||
|
||||
// vim:ts=4
|
||||
|
||||
@@ -27,15 +27,13 @@ public:
|
||||
uint32 mLedgerIndex;
|
||||
std::vector<unsigned char> mData;
|
||||
|
||||
HashedObject(HashedObjectType type, uint32 index, const std::vector<unsigned char>& data) :
|
||||
mType(type), mLedgerIndex(index), mData(data) { ; }
|
||||
HashedObject(HashedObjectType type, uint32 index, const std::vector<unsigned char>& data, const uint256& hash) :
|
||||
mType(type), mHash(hash), mLedgerIndex(index), mData(data) { ; }
|
||||
|
||||
bool checkHash() const;
|
||||
bool checkFixHash();
|
||||
void setHash();
|
||||
|
||||
const std::vector<unsigned char>& getData() { return mData; }
|
||||
const uint256& getHash() { return mHash; }
|
||||
const std::vector<unsigned char>& getData() { return mData; }
|
||||
const uint256& getHash() { return mHash; }
|
||||
HashedObjectType getType() { return mType; }
|
||||
uint32 getIndex() { return mLedgerIndex; }
|
||||
};
|
||||
|
||||
class HashedObjectStore
|
||||
@@ -43,33 +41,20 @@ class HashedObjectStore
|
||||
protected:
|
||||
TaggedCache<uint256, HashedObject> mCache;
|
||||
|
||||
boost::recursive_mutex mWriteMutex;
|
||||
std::vector< boost::shared_ptr<HashedObject> > mWriteSet;
|
||||
bool mWritePending;
|
||||
|
||||
public:
|
||||
|
||||
HashedObjectStore(int cacheSize, int cacheAge) : mCache(cacheSize, cacheAge) { ; }
|
||||
HashedObjectStore(int cacheSize, int cacheAge);
|
||||
|
||||
bool store(HashedObjectType type, uint32 index, const std::vector<unsigned char>& data,
|
||||
const uint256& hash);
|
||||
|
||||
HashedObject::pointer retrieve(const uint256& hash);
|
||||
|
||||
ScopedLock beginBulk();
|
||||
void endBulk();
|
||||
};
|
||||
|
||||
class HashedObjectBulkWriter
|
||||
{
|
||||
protected:
|
||||
HashedObjectStore& mStore;
|
||||
ScopedLock sl;
|
||||
|
||||
public:
|
||||
HashedObjectBulkWriter(HashedObjectStore& ostore) : mStore(ostore), sl(mStore.beginBulk()) { ; }
|
||||
~HashedObjectBulkWriter() { mStore.endBulk(); }
|
||||
|
||||
bool store(HashedObjectType type, uint32 index, const std::vector<unsigned char>& data,
|
||||
const uint256& hash) { return mStore.store(type, index, data, hash); }
|
||||
|
||||
HashedObject::pointer retrieve(const uint256& hash) { return mStore.retrieve(hash); }
|
||||
void bulkWrite();
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <boost/make_shared.hpp>
|
||||
|
||||
#include "../json/writer.h"
|
||||
|
||||
#include "Application.h"
|
||||
#include "Ledger.h"
|
||||
#include "utils.h"
|
||||
@@ -350,6 +352,11 @@ Ledger::pointer Ledger::getSQL(const std::string& sql)
|
||||
boost::make_shared<Ledger>(prevHash, transHash, accountHash, totCoins, closingTime, ledgerSeq);
|
||||
if (ret->getHash() != ledgerHash)
|
||||
{
|
||||
Json::StyledStreamWriter ssw;
|
||||
Log(lsERROR) << "Failed on ledger";
|
||||
Json::Value p;
|
||||
ret->addJson(p, LEDGER_JSON_FULL);
|
||||
ssw.write(Log(lsERROR).ref(), p);
|
||||
assert(false);
|
||||
return Ledger::pointer();
|
||||
}
|
||||
@@ -379,7 +386,8 @@ void Ledger::addJson(Json::Value& ret, int options)
|
||||
boost::recursive_mutex::scoped_lock sl(mLock);
|
||||
ledger["parentHash"] = mParentHash.GetHex();
|
||||
|
||||
if(mClosed)
|
||||
bool full = (options & LEDGER_JSON_FULL) != 0;
|
||||
if(mClosed || full)
|
||||
{
|
||||
ledger["hash"] = mHash.GetHex();
|
||||
ledger["transactionHash"] = mTransHash.GetHex();
|
||||
@@ -391,8 +399,7 @@ void Ledger::addJson(Json::Value& ret, int options)
|
||||
else ledger["closed"] = false;
|
||||
if (mCloseTime != 0)
|
||||
ledger["closeTime"] = boost::posix_time::to_simple_string(ptFromSeconds(mCloseTime));
|
||||
bool full = (options & LEDGER_JSON_FULL) != 0;
|
||||
if (full || ((options & LEDGER_JSON_DUMP_TXNS) != 0))
|
||||
if (mTransactionMap && (full || ((options & LEDGER_JSON_DUMP_TXNS) != 0)))
|
||||
{
|
||||
Json::Value txns(Json::arrayValue);
|
||||
for (SHAMapItem::pointer item = mTransactionMap->peekFirstItem(); !!item;
|
||||
@@ -408,7 +415,7 @@ void Ledger::addJson(Json::Value& ret, int options)
|
||||
}
|
||||
ledger["transactions"] = txns;
|
||||
}
|
||||
if (full || ((options & LEDGER_JSON_DUMP_STATE) != 0))
|
||||
if (mAccountStateMap && (full || ((options & LEDGER_JSON_DUMP_STATE) != 0)))
|
||||
{
|
||||
Json::Value state(Json::arrayValue);
|
||||
for (SHAMapItem::pointer item = mAccountStateMap->peekFirstItem(); !!item;
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
|
||||
#include "Application.h"
|
||||
#include "Log.h"
|
||||
#include "SHAMapSync.h"
|
||||
|
||||
#define LA_DEBUG
|
||||
#define LEDGER_ACQUIRE_TIMEOUT 2
|
||||
@@ -82,7 +83,7 @@ void PeerSet::TimerEntry(boost::weak_ptr<PeerSet> wptr, const boost::system::err
|
||||
}
|
||||
|
||||
LedgerAcquire::LedgerAcquire(const uint256& hash) : PeerSet(hash, LEDGER_ACQUIRE_TIMEOUT),
|
||||
mFilter(&theApp->getNodeCache()), mHaveBase(false), mHaveState(false), mHaveTransactions(false)
|
||||
mHaveBase(false), mHaveState(false), mHaveTransactions(false)
|
||||
{
|
||||
#ifdef LA_DEBUG
|
||||
Log(lsTRACE) << "Acquiring ledger " << mHash.GetHex();
|
||||
@@ -168,7 +169,8 @@ void LedgerAcquire::trigger(Peer::pointer peer)
|
||||
{
|
||||
std::vector<SHAMapNode> nodeIDs;
|
||||
std::vector<uint256> nodeHashes;
|
||||
mLedger->peekTransactionMap()->getMissingNodes(nodeIDs, nodeHashes, 128, &mFilter);
|
||||
TransactionStateSF tFilter(mLedger->getHash(), mLedger->getLedgerSeq());
|
||||
mLedger->peekTransactionMap()->getMissingNodes(nodeIDs, nodeHashes, 128, &tFilter);
|
||||
if (nodeIDs.empty())
|
||||
{
|
||||
if (!mLedger->peekTransactionMap()->isValid()) mFailed = true;
|
||||
@@ -220,7 +222,8 @@ void LedgerAcquire::trigger(Peer::pointer peer)
|
||||
{
|
||||
std::vector<SHAMapNode> nodeIDs;
|
||||
std::vector<uint256> nodeHashes;
|
||||
mLedger->peekAccountStateMap()->getMissingNodes(nodeIDs, nodeHashes, 128, &mFilter);
|
||||
AccountStateSF aFilter(mLedger->getHash(), mLedger->getLedgerSeq());
|
||||
mLedger->peekAccountStateMap()->getMissingNodes(nodeIDs, nodeHashes, 128, &aFilter);
|
||||
if (nodeIDs.empty())
|
||||
{
|
||||
if (!mLedger->peekAccountStateMap()->isValid()) mFailed = true;
|
||||
@@ -297,6 +300,7 @@ bool LedgerAcquire::takeBase(const std::string& data, Peer::pointer peer)
|
||||
return false;
|
||||
}
|
||||
mHaveBase = true;
|
||||
theApp->getHashedObjectStore().store(LEDGER, mLedger->getLedgerSeq(), strCopy(data), mHash);
|
||||
progress();
|
||||
if (!mLedger->getTransHash()) mHaveTransactions = true;
|
||||
if (!mLedger->getAccountHash()) mHaveState = true;
|
||||
@@ -311,6 +315,7 @@ bool LedgerAcquire::takeTxNode(const std::list<SHAMapNode>& nodeIDs,
|
||||
if (!mHaveBase) return false;
|
||||
std::list<SHAMapNode>::const_iterator nodeIDit = nodeIDs.begin();
|
||||
std::list< std::vector<unsigned char> >::const_iterator nodeDatait = data.begin();
|
||||
TransactionStateSF tFilter(mLedger->getHash(), mLedger->getLedgerSeq());
|
||||
while (nodeIDit != nodeIDs.end())
|
||||
{
|
||||
if (nodeIDit->isRoot())
|
||||
@@ -318,7 +323,7 @@ bool LedgerAcquire::takeTxNode(const std::list<SHAMapNode>& nodeIDs,
|
||||
if (!mLedger->peekTransactionMap()->addRootNode(mLedger->getTransHash(), *nodeDatait))
|
||||
return false;
|
||||
}
|
||||
else if (!mLedger->peekTransactionMap()->addKnownNode(*nodeIDit, *nodeDatait, &mFilter))
|
||||
else if (!mLedger->peekTransactionMap()->addKnownNode(*nodeIDit, *nodeDatait, &tFilter))
|
||||
return false;
|
||||
++nodeIDit;
|
||||
++nodeDatait;
|
||||
@@ -342,6 +347,7 @@ bool LedgerAcquire::takeAsNode(const std::list<SHAMapNode>& nodeIDs,
|
||||
if (!mHaveBase) return false;
|
||||
std::list<SHAMapNode>::const_iterator nodeIDit = nodeIDs.begin();
|
||||
std::list< std::vector<unsigned char> >::const_iterator nodeDatait = data.begin();
|
||||
AccountStateSF tFilter(mLedger->getHash(), mLedger->getLedgerSeq());
|
||||
while (nodeIDit != nodeIDs.end())
|
||||
{
|
||||
if (nodeIDit->isRoot())
|
||||
@@ -349,7 +355,7 @@ bool LedgerAcquire::takeAsNode(const std::list<SHAMapNode>& nodeIDs,
|
||||
if (!mLedger->peekAccountStateMap()->addRootNode(mLedger->getAccountHash(), *nodeDatait))
|
||||
return false;
|
||||
}
|
||||
else if (!mLedger->peekAccountStateMap()->addKnownNode(*nodeIDit, *nodeDatait, &mFilter))
|
||||
else if (!mLedger->peekAccountStateMap()->addKnownNode(*nodeIDit, *nodeDatait, &tFilter))
|
||||
return false;
|
||||
++nodeIDit;
|
||||
++nodeDatait;
|
||||
|
||||
@@ -58,31 +58,6 @@ private:
|
||||
static void TimerEntry(boost::weak_ptr<PeerSet>, const boost::system::error_code& result);
|
||||
};
|
||||
|
||||
typedef TaggedCache< uint256, std::vector<unsigned char> > NodeCache;
|
||||
typedef std::vector<unsigned char> VUC;
|
||||
|
||||
class THSyncFilter : public SHAMapSyncFilter
|
||||
{
|
||||
protected:
|
||||
NodeCache* mCache; // holds nodes we see during the consensus process
|
||||
|
||||
public:
|
||||
THSyncFilter(NodeCache* cache) : mCache(cache) { ; }
|
||||
virtual void gotNode(const SHAMapNode& id, const uint256& nodeHash,
|
||||
const std::vector<unsigned char>& nodeData, bool)
|
||||
{
|
||||
boost::shared_ptr<VUC> ptr = boost::make_shared<VUC>(nodeData);
|
||||
mCache->canonicalize(nodeHash, ptr);
|
||||
}
|
||||
virtual bool haveNode(const SHAMapNode& id, const uint256& nodeHash, std::vector<unsigned char>& nodeData)
|
||||
{
|
||||
boost::shared_ptr<VUC> entry = mCache->fetch(nodeHash);
|
||||
if (!entry) return false;
|
||||
nodeData = *entry;
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
class LedgerAcquire : public PeerSet, public boost::enable_shared_from_this<LedgerAcquire>
|
||||
{ // A ledger we are trying to acquire
|
||||
public:
|
||||
@@ -90,7 +65,6 @@ public:
|
||||
|
||||
protected:
|
||||
Ledger::pointer mLedger;
|
||||
THSyncFilter mFilter;
|
||||
bool mHaveBase, mHaveState, mHaveTransactions;
|
||||
|
||||
std::vector< boost::function<void (LedgerAcquire::pointer)> > mOnComplete;
|
||||
|
||||
@@ -11,13 +11,13 @@
|
||||
#include "LedgerTiming.h"
|
||||
#include "SerializedValidation.h"
|
||||
#include "Log.h"
|
||||
#include "SHAMapSync.h"
|
||||
|
||||
#define TRUST_NETWORK
|
||||
|
||||
// #define LC_DEBUG
|
||||
|
||||
TransactionAcquire::TransactionAcquire(const uint256& hash)
|
||||
: PeerSet(hash, 1), mFilter(&theApp->getNodeCache()), mHaveRoot(false)
|
||||
TransactionAcquire::TransactionAcquire(const uint256& hash) : PeerSet(hash, 1), mHaveRoot(false)
|
||||
{
|
||||
mMap = boost::make_shared<SHAMap>();
|
||||
mMap->setSynching();
|
||||
@@ -50,9 +50,9 @@ void TransactionAcquire::trigger(Peer::pointer peer)
|
||||
}
|
||||
if (mHaveRoot)
|
||||
{
|
||||
std::vector<SHAMapNode> nodeIDs;
|
||||
std::vector<uint256> nodeHashes;
|
||||
mMap->getMissingNodes(nodeIDs, nodeHashes, 256, &mFilter);
|
||||
std::vector<SHAMapNode> nodeIDs; std::vector<uint256> nodeHashes;
|
||||
ConsensusTransSetSF sf;
|
||||
mMap->getMissingNodes(nodeIDs, nodeHashes, 256, &sf);
|
||||
if (nodeIDs.empty())
|
||||
{
|
||||
if (mMap->isValid())
|
||||
@@ -91,6 +91,7 @@ bool TransactionAcquire::takeNodes(const std::list<SHAMapNode>& nodeIDs,
|
||||
{
|
||||
std::list<SHAMapNode>::const_iterator nodeIDit = nodeIDs.begin();
|
||||
std::list< std::vector<unsigned char> >::const_iterator nodeDatait = data.begin();
|
||||
ConsensusTransSetSF sf;
|
||||
while (nodeIDit != nodeIDs.end())
|
||||
{
|
||||
if (nodeIDit->isRoot())
|
||||
@@ -104,7 +105,7 @@ bool TransactionAcquire::takeNodes(const std::list<SHAMapNode>& nodeIDs,
|
||||
return false;
|
||||
else mHaveRoot = true;
|
||||
}
|
||||
else if (!mMap->addKnownNode(*nodeIDit, *nodeDatait, &mFilter))
|
||||
else if (!mMap->addKnownNode(*nodeIDit, *nodeDatait, &sf))
|
||||
return false;
|
||||
++nodeIDit;
|
||||
++nodeDatait;
|
||||
|
||||
@@ -22,7 +22,6 @@ public:
|
||||
|
||||
protected:
|
||||
SHAMap::pointer mMap;
|
||||
THSyncFilter mFilter; // FIXME: Should use transaction master too
|
||||
bool mHaveRoot;
|
||||
|
||||
void onTimer() { trigger(Peer::pointer()); }
|
||||
|
||||
@@ -60,9 +60,11 @@ Ledger::pointer LedgerHistory::getLedgerByHash(const uint256& hash)
|
||||
Ledger::pointer ret = mLedgersByHash.fetch(hash);
|
||||
if (ret) return ret;
|
||||
|
||||
ret = Ledger::loadByHash(hash);
|
||||
if (!ret) return ret;
|
||||
assert(ret->getHash() == hash);
|
||||
// FIXME: A ledger without SHA maps isn't very useful
|
||||
// This code will need to build them
|
||||
// ret = Ledger::loadByHash(hash);
|
||||
// if (!ret) return ret;
|
||||
// assert(ret->getHash() == hash);
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl(mLedgersByHash.peekMutex());
|
||||
mLedgersByHash.canonicalize(hash, ret);
|
||||
|
||||
@@ -642,14 +642,13 @@ int SHAMap::flushDirty(int maxNodes, HashedObjectType t, uint32 seq)
|
||||
|
||||
if(mDirtyNodes)
|
||||
{
|
||||
HashedObjectBulkWriter bw(theApp->getHashedObjectStore());
|
||||
boost::unordered_map<SHAMapNode, SHAMapTreeNode::pointer>& dirtyNodes = *mDirtyNodes;
|
||||
boost::unordered_map<SHAMapNode, SHAMapTreeNode::pointer>::iterator it = dirtyNodes.begin();
|
||||
while (it != dirtyNodes.end())
|
||||
{
|
||||
s.erase();
|
||||
it->second->addRaw(s);
|
||||
bw.store(t, seq, s.peekData(), s.getSHA512Half());
|
||||
theApp->getHashedObjectStore().store(t, seq, s.peekData(), s.getSHA512Half());
|
||||
if (flushed++ >= maxNodes)
|
||||
return flushed;
|
||||
it = dirtyNodes.erase(it);
|
||||
|
||||
69
src/SHAMapSync.h
Normal file
69
src/SHAMapSync.h
Normal file
@@ -0,0 +1,69 @@
|
||||
#ifndef __SHAMAPYSNC__
|
||||
#define __SHAMAPSYNC__
|
||||
|
||||
#include "SHAMap.h"
|
||||
#include "Application.h"
|
||||
|
||||
// Sync filters allow low-level SHAMapSync code to interact correctly with
|
||||
// higher-level structures such as caches and transaction stores
|
||||
|
||||
class ConsensusTransSetSF : public SHAMapSyncFilter
|
||||
{ // sync filter for transaction sets during consensus building
|
||||
public:
|
||||
ConsensusTransSetSF() { ; }
|
||||
virtual void gotNode(const SHAMapNode& id, const uint256& nodeHash,
|
||||
const std::vector<unsigned char>& nodeData, bool isLeaf)
|
||||
{
|
||||
// WRITEME: If 'isLeaf' is true, this is a transaction
|
||||
theApp->getTempNodeCache().store(nodeHash, nodeData);
|
||||
}
|
||||
virtual bool haveNode(const SHAMapNode& id, const uint256& nodeHash, std::vector<unsigned char>& nodeData)
|
||||
{
|
||||
// WRITEME: We could check our own map, we could check transaction tables
|
||||
return theApp->getTempNodeCache().retrieve(nodeHash, nodeData);
|
||||
}
|
||||
};
|
||||
|
||||
class AccountStateSF : public SHAMapSyncFilter
|
||||
{ // sync filter for account state nodes during ledger sync
|
||||
protected:
|
||||
uint256 mLedgerHash;
|
||||
uint32 mLedgerSeq;
|
||||
|
||||
public:
|
||||
AccountStateSF(const uint256& ledgerHash, uint32 ledgerSeq) : mLedgerHash(ledgerHash), mLedgerSeq(ledgerSeq)
|
||||
{ ; }
|
||||
|
||||
virtual void gotNode(const SHAMapNode& id, const uint256& nodeHash,
|
||||
const std::vector<unsigned char>& nodeData, bool isLeaf)
|
||||
{
|
||||
theApp->getHashedObjectStore().store(ACCOUNT_NODE, mLedgerSeq, nodeData, nodeHash);
|
||||
}
|
||||
virtual bool haveNode(const SHAMapNode& id, const uint256& nodeHash, std::vector<unsigned char>& nodeData)
|
||||
{ // fetchNode already tried
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
class TransactionStateSF : public SHAMapSyncFilter
|
||||
{ // sync filter for transactions tree during ledger sync
|
||||
protected:
|
||||
uint256 mLedgerHash;
|
||||
uint32 mLedgerSeq;
|
||||
|
||||
public:
|
||||
TransactionStateSF(const uint256& ledgerHash, uint32 ledgerSeq) : mLedgerHash(ledgerHash), mLedgerSeq(ledgerSeq)
|
||||
{ ; }
|
||||
|
||||
virtual void gotNode(const SHAMapNode& id, const uint256& nodeHash,
|
||||
const std::vector<unsigned char>& nodeData, bool isLeaf)
|
||||
{
|
||||
theApp->getHashedObjectStore().store(isLeaf ? TRANSACTION : TRANSACTION_NODE, mLedgerSeq, nodeData, nodeHash);
|
||||
}
|
||||
virtual bool haveNode(const SHAMapNode& id, const uint256& nodeHash, std::vector<unsigned char>& nodeData)
|
||||
{ // fetchNode already tried
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -6,6 +6,7 @@
|
||||
#include <boost/thread/recursive_mutex.hpp>
|
||||
#include <boost/unordered_map.hpp>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
#include <boost/make_shared.hpp>
|
||||
|
||||
// This class implemented a cache and a map. The cache keeps objects alive
|
||||
// in the map. The map allows multiple code paths that reference objects
|
||||
@@ -121,7 +122,7 @@ template<typename c_Key, typename c_Data> bool TaggedCache<c_Key, c_Data>::touch
|
||||
// Is the object in the map?
|
||||
typename boost::unordered_map<key_type, weak_data_ptr>::iterator mit = mMap.find(key);
|
||||
if (mit == mMap.end()) return false;
|
||||
if (mit->second->expired())
|
||||
if (mit->second.expired())
|
||||
{ // in map, but expired
|
||||
mMap.erase(mit);
|
||||
return false;
|
||||
@@ -131,12 +132,12 @@ template<typename c_Key, typename c_Data> bool TaggedCache<c_Key, c_Data>::touch
|
||||
typename boost::unordered_map<key_type, cache_entry>::iterator cit = mCache.find(key);
|
||||
if (cit != mCache.end())
|
||||
{ // in both map and cache
|
||||
cit->second->first = time(NULL);
|
||||
cit->second.first = time(NULL);
|
||||
return true;
|
||||
}
|
||||
|
||||
// In map but not cache, put in cache
|
||||
mCache.insert(std::make_pair(key, std::make_pair(time(NULL), weak_data_ptr(cit->second->second))));
|
||||
mCache.insert(std::make_pair(key, std::make_pair(time(NULL), weak_data_ptr(cit->second.second))));
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -213,9 +214,8 @@ boost::shared_ptr<c_Data> TaggedCache<c_Key, c_Data>::fetch(const key_type& key)
|
||||
template<typename c_Key, typename c_Data>
|
||||
bool TaggedCache<c_Key, c_Data>::store(const key_type& key, const c_Data& data)
|
||||
{
|
||||
if (!canonicalize(key, boost::shared_ptr<c_Data>(data)))
|
||||
return false;
|
||||
return true;
|
||||
boost::shared_ptr<c_Data> d = boost::make_shared<c_Data>(boost::ref(data));
|
||||
return canonicalize(key, d);
|
||||
}
|
||||
|
||||
template<typename c_Key, typename c_Data>
|
||||
|
||||
@@ -76,7 +76,21 @@ bool Wallet::nodeIdentityCreate() {
|
||||
|
||||
// Make new key.
|
||||
|
||||
#ifdef CREATE_NEW_DH_PARAMS
|
||||
std::string strDh512 = DH_der_gen(512);
|
||||
#else
|
||||
static char dh512Param[] = {
|
||||
0x30, 0x46, 0x02, 0x41, 0x00, 0x98, 0x15, 0xd2, 0xd0, 0x08, 0x32, 0xda,
|
||||
0xaa, 0xac, 0xc4, 0x71, 0xa3, 0x1b, 0x11, 0xf0, 0x6c, 0x62, 0xb2, 0x35,
|
||||
0x8a, 0x10, 0x92, 0xc6, 0x0a, 0xa3, 0x84, 0x7e, 0xaf, 0x17, 0x29, 0x0b,
|
||||
0x70, 0xef, 0x07, 0x4f, 0xfc, 0x9d, 0x6d, 0x87, 0x99, 0x19, 0x09, 0x5b,
|
||||
0x6e, 0xdb, 0x57, 0x72, 0x4a, 0x7e, 0xcd, 0xaf, 0xbd, 0x3a, 0x97, 0x55,
|
||||
0x51, 0x77, 0x5a, 0x34, 0x7c, 0xe8, 0xc5, 0x71, 0x63, 0x02, 0x01, 0x02
|
||||
};
|
||||
std::string strDh512(dh512Param, sizeof(dh512Param));
|
||||
#endif
|
||||
|
||||
|
||||
#if 1
|
||||
std::string strDh1024 = strDh512; // For testing and most cases 512 is fine.
|
||||
#else
|
||||
|
||||
Reference in New Issue
Block a user