mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-06 01:07:57 +00:00
Cleanly destroy Application on exit
This commit is contained in:
File diff suppressed because it is too large
Load Diff
@@ -270,7 +270,7 @@ static const uint64 tenTo17m1 = tenTo17 - 1;
|
||||
static DH* handleTmpDh (SSL* ssl, int is_export, int iKeyLength)
|
||||
{
|
||||
// VFALCO TODO eliminate this horrendous dependency on theApp and LocalCredentials
|
||||
return 512 == iKeyLength ? theApp->getLocalCredentials ().getDh512 () : theApp->getLocalCredentials ().getDh1024 ();
|
||||
return 512 == iKeyLength ? getApp().getLocalCredentials ().getDh512 () : getApp().getLocalCredentials ().getDh1024 ();
|
||||
}
|
||||
|
||||
#include "src/cpp/ripple/ripple_RippleCalc.cpp"
|
||||
|
||||
@@ -94,10 +94,10 @@ TER ChangeTransactor::applyFeature ()
|
||||
featureObject->setFieldV256 (sfFeatures, features);
|
||||
mEngine->entryModify (featureObject);
|
||||
|
||||
theApp->getFeatureTable ().enableFeature (feature);
|
||||
getApp().getFeatureTable ().enableFeature (feature);
|
||||
|
||||
if (!theApp->getFeatureTable ().isFeatureSupported (feature))
|
||||
theApp->getOPs ().setFeatureBlocked ();
|
||||
if (!getApp().getFeatureTable ().isFeatureSupported (feature))
|
||||
getApp().getOPs ().setFeatureBlocked ();
|
||||
|
||||
return tesSUCCESS;
|
||||
}
|
||||
|
||||
@@ -135,7 +135,7 @@ Ledger::Ledger (bool /* dummy */,
|
||||
|
||||
if (prevLedger.mCloseTime == 0)
|
||||
{
|
||||
mCloseTime = roundCloseTime (theApp->getOPs ().getCloseTimeNC (), mCloseResolution);
|
||||
mCloseTime = roundCloseTime (getApp().getOPs ().getCloseTimeNC (), mCloseResolution);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -345,7 +345,7 @@ Transaction::pointer Ledger::getTransaction (uint256 const& transID) const
|
||||
|
||||
if (!item) return Transaction::pointer ();
|
||||
|
||||
Transaction::pointer txn = theApp->getMasterTransaction ().fetch (transID, false);
|
||||
Transaction::pointer txn = getApp().getMasterTransaction ().fetch (transID, false);
|
||||
|
||||
if (txn)
|
||||
return txn;
|
||||
@@ -371,7 +371,7 @@ Transaction::pointer Ledger::getTransaction (uint256 const& transID) const
|
||||
if (txn->getStatus () == NEW)
|
||||
txn->setStatus (mClosed ? COMMITTED : INCLUDED, mLedgerSeq);
|
||||
|
||||
theApp->getMasterTransaction ().canonicalize (txn, false);
|
||||
getApp().getMasterTransaction ().canonicalize (txn, false);
|
||||
return txn;
|
||||
}
|
||||
|
||||
@@ -425,7 +425,7 @@ bool Ledger::getTransaction (uint256 const& txID, Transaction::pointer& txn, Tra
|
||||
if (type == SHAMapTreeNode::tnTRANSACTION_NM)
|
||||
{
|
||||
// in tree with no metadata
|
||||
txn = theApp->getMasterTransaction ().fetch (txID, false);
|
||||
txn = getApp().getMasterTransaction ().fetch (txID, false);
|
||||
meta.reset ();
|
||||
|
||||
if (!txn)
|
||||
@@ -435,7 +435,7 @@ bool Ledger::getTransaction (uint256 const& txID, Transaction::pointer& txn, Tra
|
||||
{
|
||||
// in tree with metadata
|
||||
SerializerIterator it (item->peekSerializer ());
|
||||
txn = theApp->getMasterTransaction ().fetch (txID, false);
|
||||
txn = getApp().getMasterTransaction ().fetch (txID, false);
|
||||
|
||||
if (!txn)
|
||||
txn = Transaction::sharedTransaction (it.getVL (), true);
|
||||
@@ -450,7 +450,7 @@ bool Ledger::getTransaction (uint256 const& txID, Transaction::pointer& txn, Tra
|
||||
if (txn->getStatus () == NEW)
|
||||
txn->setStatus (mClosed ? COMMITTED : INCLUDED, mLedgerSeq);
|
||||
|
||||
theApp->getMasterTransaction ().canonicalize (txn, false);
|
||||
getApp().getMasterTransaction ().canonicalize (txn, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -531,18 +531,18 @@ void Ledger::saveAcceptedLedger (Job&, bool fromConsensus)
|
||||
Serializer s (128);
|
||||
s.add32 (HashPrefix::ledgerMaster);
|
||||
addRaw (s);
|
||||
theApp->getHashedObjectStore ().store (hotLEDGER, mLedgerSeq, s.peekData (), mHash);
|
||||
getApp().getHashedObjectStore ().store (hotLEDGER, mLedgerSeq, s.peekData (), mHash);
|
||||
|
||||
AcceptedLedger::pointer aLedger = AcceptedLedger::makeAcceptedLedger (shared_from_this ());
|
||||
|
||||
{
|
||||
ScopedLock sl (theApp->getLedgerDB ()->getDBLock ());
|
||||
theApp->getLedgerDB ()->getDB ()->executeSQL (boost::str (deleteLedger % mLedgerSeq));
|
||||
ScopedLock sl (getApp().getLedgerDB ()->getDBLock ());
|
||||
getApp().getLedgerDB ()->getDB ()->executeSQL (boost::str (deleteLedger % mLedgerSeq));
|
||||
}
|
||||
|
||||
{
|
||||
Database* db = theApp->getTxnDB ()->getDB ();
|
||||
ScopedLock dbLock (theApp->getTxnDB ()->getDBLock ());
|
||||
Database* db = getApp().getTxnDB ()->getDB ();
|
||||
ScopedLock dbLock (getApp().getTxnDB ()->getDBLock ());
|
||||
db->executeSQL ("BEGIN TRANSACTION;");
|
||||
|
||||
db->executeSQL (boost::str (deleteTrans1 % mLedgerSeq));
|
||||
@@ -551,7 +551,7 @@ void Ledger::saveAcceptedLedger (Job&, bool fromConsensus)
|
||||
BOOST_FOREACH (const AcceptedLedger::value_type & vt, aLedger->getMap ())
|
||||
{
|
||||
uint256 txID = vt.second->getTransactionID ();
|
||||
theApp->getMasterTransaction ().inLedger (txID, mLedgerSeq);
|
||||
getApp().getMasterTransaction ().inLedger (txID, mLedgerSeq);
|
||||
|
||||
db->executeSQL (boost::str (deleteAcctTrans % txID.GetHex ()));
|
||||
|
||||
@@ -596,8 +596,8 @@ void Ledger::saveAcceptedLedger (Job&, bool fromConsensus)
|
||||
}
|
||||
|
||||
{
|
||||
ScopedLock sl (theApp->getLedgerDB ()->getDBLock ());
|
||||
theApp->getLedgerDB ()->getDB ()->executeSQL (boost::str (addLedger %
|
||||
ScopedLock sl (getApp().getLedgerDB ()->getDBLock ());
|
||||
getApp().getLedgerDB ()->getDB ()->executeSQL (boost::str (addLedger %
|
||||
getHash ().GetHex () % mLedgerSeq % mParentHash.GetHex () %
|
||||
boost::lexical_cast<std::string> (mTotCoins) % mCloseTime % mParentCloseTime %
|
||||
mCloseResolution % mCloseFlags % mAccountHash.GetHex () % mTransHash.GetHex ()));
|
||||
@@ -606,8 +606,8 @@ void Ledger::saveAcceptedLedger (Job&, bool fromConsensus)
|
||||
if (!fromConsensus && (theConfig.NODE_SIZE < 2)) // tiny or small
|
||||
dropCache ();
|
||||
|
||||
if (theApp->getJobQueue ().getJobCountTotal (jtPUBOLDLEDGER) < 2)
|
||||
theApp->getLedgerMaster ().resumeAcquiring ();
|
||||
if (getApp().getJobQueue ().getJobCountTotal (jtPUBOLDLEDGER) < 2)
|
||||
getApp().getLedgerMaster ().resumeAcquiring ();
|
||||
else
|
||||
WriteLog (lsTRACE, Ledger) << "no resume, too many pending ledger saves";
|
||||
}
|
||||
@@ -618,8 +618,8 @@ Ledger::pointer Ledger::loadByIndex (uint32 ledgerIndex)
|
||||
{
|
||||
Ledger::pointer ledger;
|
||||
{
|
||||
Database* db = theApp->getLedgerDB ()->getDB ();
|
||||
ScopedLock sl (theApp->getLedgerDB ()->getDBLock ());
|
||||
Database* db = getApp().getLedgerDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getLedgerDB ()->getDBLock ());
|
||||
|
||||
SqliteStatement pSt (db->getSqliteDB (), "SELECT "
|
||||
"LedgerHash,PrevHash,AccountSetHash,TransSetHash,TotalCoins,"
|
||||
@@ -643,8 +643,8 @@ Ledger::pointer Ledger::loadByHash (uint256 const& ledgerHash)
|
||||
{
|
||||
Ledger::pointer ledger;
|
||||
{
|
||||
Database* db = theApp->getLedgerDB ()->getDB ();
|
||||
ScopedLock sl (theApp->getLedgerDB ()->getDBLock ());
|
||||
Database* db = getApp().getLedgerDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getLedgerDB ()->getDBLock ());
|
||||
|
||||
SqliteStatement pSt (db->getSqliteDB (), "SELECT "
|
||||
"LedgerHash,PrevHash,AccountSetHash,TransSetHash,TotalCoins,"
|
||||
@@ -698,8 +698,8 @@ Ledger::pointer Ledger::getSQL (const std::string& sql)
|
||||
std::string hash;
|
||||
|
||||
{
|
||||
Database* db = theApp->getLedgerDB ()->getDB ();
|
||||
ScopedLock sl (theApp->getLedgerDB ()->getDBLock ());
|
||||
Database* db = getApp().getLedgerDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getLedgerDB ()->getDBLock ());
|
||||
|
||||
if (!db->executeSQL (sql) || !db->startIterRows ())
|
||||
return Ledger::pointer ();
|
||||
@@ -731,7 +731,7 @@ Ledger::pointer Ledger::getSQL (const std::string& sql)
|
||||
|
||||
ret->setClosed ();
|
||||
|
||||
if (theApp->getOPs ().haveLedger (ledgerSeq))
|
||||
if (getApp().getOPs ().haveLedger (ledgerSeq))
|
||||
ret->setAccepted ();
|
||||
|
||||
if (ret->getHash () != ledgerHash)
|
||||
@@ -798,7 +798,7 @@ void Ledger::getSQL2 (Ledger::ref ret)
|
||||
ret->setClosed ();
|
||||
ret->setImmutable ();
|
||||
|
||||
if (theApp->getOPs ().haveLedger (ret->getLedgerSeq ()))
|
||||
if (getApp().getOPs ().haveLedger (ret->getLedgerSeq ()))
|
||||
ret->setAccepted ();
|
||||
|
||||
WriteLog (lsTRACE, Ledger) << "Loaded ledger: " << ret->getHash ().GetHex ();
|
||||
@@ -814,8 +814,8 @@ uint256 Ledger::getHashByIndex (uint32 ledgerIndex)
|
||||
|
||||
std::string hash;
|
||||
{
|
||||
Database* db = theApp->getLedgerDB ()->getDB ();
|
||||
ScopedLock sl (theApp->getLedgerDB ()->getDBLock ());
|
||||
Database* db = getApp().getLedgerDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getLedgerDB ()->getDBLock ());
|
||||
|
||||
if (!db->executeSQL (sql) || !db->startIterRows ())
|
||||
return ret;
|
||||
@@ -832,7 +832,7 @@ bool Ledger::getHashesByIndex (uint32 ledgerIndex, uint256& ledgerHash, uint256&
|
||||
{
|
||||
#ifndef NO_SQLITE3_PREPARE
|
||||
|
||||
DatabaseCon* con = theApp->getLedgerDB ();
|
||||
DatabaseCon* con = getApp().getLedgerDB ();
|
||||
ScopedLock sl (con->getDBLock ());
|
||||
|
||||
SqliteStatement pSt (con->getDB ()->getSqliteDB (),
|
||||
@@ -868,8 +868,8 @@ bool Ledger::getHashesByIndex (uint32 ledgerIndex, uint256& ledgerHash, uint256&
|
||||
|
||||
std::string hash, prevHash;
|
||||
{
|
||||
Database* db = theApp->getLedgerDB ()->getDB ();
|
||||
ScopedLock sl (theApp->getLedgerDB ()->getDBLock ());
|
||||
Database* db = getApp().getLedgerDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getLedgerDB ()->getDBLock ());
|
||||
|
||||
if (!db->executeSQL (sql) || !db->startIterRows ())
|
||||
return false;
|
||||
@@ -893,7 +893,7 @@ std::map< uint32, std::pair<uint256, uint256> > Ledger::getHashesByIndex (uint32
|
||||
{
|
||||
#ifndef NO_SQLITE_PREPARE
|
||||
std::map< uint32, std::pair<uint256, uint256> > ret;
|
||||
DatabaseCon* con = theApp->getLedgerDB ();
|
||||
DatabaseCon* con = getApp().getLedgerDB ();
|
||||
ScopedLock sl (con->getDBLock ());
|
||||
|
||||
SqliteStatement pSt (con->getDB ()->getSqliteDB (),
|
||||
@@ -1150,13 +1150,13 @@ SLE::pointer Ledger::getSLEi (uint256 const& uId)
|
||||
if (!node)
|
||||
return SLE::pointer ();
|
||||
|
||||
SLE::pointer ret = theApp->getSLECache ().fetch (hash);
|
||||
SLE::pointer ret = getApp().getSLECache ().fetch (hash);
|
||||
|
||||
if (!ret)
|
||||
{
|
||||
ret = boost::make_shared<SLE> (node->peekSerializer (), node->getTag ());
|
||||
ret->setImmutable ();
|
||||
theApp->getSLECache ().canonicalize (hash, ret);
|
||||
getApp().getSLECache ().canonicalize (hash, ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -1811,12 +1811,12 @@ uint32 Ledger::roundCloseTime (uint32 closeTime, uint32 closeResolution)
|
||||
|
||||
void Ledger::pendSave (bool fromConsensus)
|
||||
{
|
||||
if (!fromConsensus && !theApp->getHashRouter ().setFlag (getHash (), SF_SAVED))
|
||||
if (!fromConsensus && !getApp().getHashRouter ().setFlag (getHash (), SF_SAVED))
|
||||
return;
|
||||
|
||||
assert (isImmutable ());
|
||||
|
||||
theApp->getJobQueue ().addJob (
|
||||
getApp().getJobQueue ().addJob (
|
||||
fromConsensus ? jtPUBLEDGER : jtPUBOLDLEDGER,
|
||||
fromConsensus ? "Ledger::pendSave" : "Ledger::pendOldSave",
|
||||
BIND_TYPE (&Ledger::saveAcceptedLedger, shared_from_this (), P_1, fromConsensus));
|
||||
@@ -1879,7 +1879,7 @@ uint64 Ledger::scaleFeeBase (uint64 fee)
|
||||
if (!mBaseFee)
|
||||
updateFees ();
|
||||
|
||||
return theApp->getFeeTrack ().scaleFeeBase (fee, mBaseFee, mReferenceFeeUnits);
|
||||
return getApp().getFeeTrack ().scaleFeeBase (fee, mBaseFee, mReferenceFeeUnits);
|
||||
}
|
||||
|
||||
uint64 Ledger::scaleFeeLoad (uint64 fee, bool bAdmin)
|
||||
@@ -1887,7 +1887,7 @@ uint64 Ledger::scaleFeeLoad (uint64 fee, bool bAdmin)
|
||||
if (!mBaseFee)
|
||||
updateFees ();
|
||||
|
||||
return theApp->getFeeTrack ().scaleFeeLoad (fee, mBaseFee, mReferenceFeeUnits, bAdmin);
|
||||
return getApp().getFeeTrack ().scaleFeeLoad (fee, mBaseFee, mReferenceFeeUnits, bAdmin);
|
||||
}
|
||||
|
||||
std::vector<uint256> Ledger::getNeededTransactionHashes (int max, SHAMapSyncFilter* filter)
|
||||
|
||||
@@ -39,6 +39,7 @@ class SqliteStatement;
|
||||
class Ledger
|
||||
: public boost::enable_shared_from_this <Ledger>
|
||||
, public CountedObject <Ledger>
|
||||
, Uncopyable
|
||||
{
|
||||
public:
|
||||
static char const* getCountedObjectName () { return "Ledger"; }
|
||||
@@ -454,10 +455,6 @@ private:
|
||||
SHAMap::pointer mAccountStateMap;
|
||||
|
||||
mutable boost::recursive_mutex mLock;
|
||||
|
||||
// VFALCO TODO derive this from beast::Uncopyable
|
||||
Ledger (const Ledger&); // no implementation
|
||||
Ledger& operator= (const Ledger&); // no implementation
|
||||
};
|
||||
|
||||
inline LedgerStateParms operator| (const LedgerStateParms& l1, const LedgerStateParms& l2)
|
||||
|
||||
@@ -31,7 +31,7 @@ int LedgerMaster::getValidatedLedgerAge ()
|
||||
return 999999;
|
||||
}
|
||||
|
||||
int64 ret = theApp->getOPs ().getCloseTimeNC ();
|
||||
int64 ret = getApp().getOPs ().getCloseTimeNC ();
|
||||
ret -= static_cast<int64> (mValidLedger->getCloseTimeNC ());
|
||||
ret = std::max (0LL, ret);
|
||||
|
||||
@@ -132,7 +132,7 @@ Ledger::pointer LedgerMaster::closeLedger (bool recover)
|
||||
{
|
||||
TransactionEngineParams tepFlags = tapOPEN_LEDGER;
|
||||
|
||||
if (theApp->getHashRouter ().addSuppressionPeer (it->first.getTXID (), SF_SIGGOOD))
|
||||
if (getApp().getHashRouter ().addSuppressionPeer (it->first.getTXID (), SF_SIGGOOD))
|
||||
tepFlags = static_cast<TransactionEngineParams> (tepFlags | tapNO_CHECK_SIGN);
|
||||
|
||||
bool didApply;
|
||||
@@ -171,7 +171,7 @@ TER LedgerMaster::doTransaction (SerializedTransaction::ref txn, TransactionEngi
|
||||
ledger = mEngine.getLedger ();
|
||||
}
|
||||
// if (didApply)
|
||||
theApp->getOPs ().pubProposedTransaction (ledger, txn, result);
|
||||
getApp().getOPs ().pubProposedTransaction (ledger, txn, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -235,7 +235,7 @@ void LedgerMaster::asyncAccept (Ledger::pointer ledger)
|
||||
|
||||
if (it == ledgerHashes.end ())
|
||||
{
|
||||
if (theApp->isShutdown ())
|
||||
if (getApp().isShutdown ())
|
||||
return;
|
||||
|
||||
{
|
||||
@@ -272,18 +272,18 @@ bool LedgerMaster::acquireMissingLedger (Ledger::ref origLedger, uint256 const&
|
||||
if (ledger && (Ledger::getHashByIndex (ledgerSeq) == ledgerHash))
|
||||
{
|
||||
WriteLog (lsTRACE, LedgerMaster) << "Ledger hash found in database";
|
||||
theApp->getJobQueue ().addJob (jtPUBOLDLEDGER, "LedgerMaster::asyncAccept",
|
||||
getApp().getJobQueue ().addJob (jtPUBOLDLEDGER, "LedgerMaster::asyncAccept",
|
||||
BIND_TYPE (&LedgerMaster::asyncAccept, this, ledger));
|
||||
return true;
|
||||
}
|
||||
|
||||
if (theApp->getInboundLedgers ().isFailure (ledgerHash))
|
||||
if (getApp().getInboundLedgers ().isFailure (ledgerHash))
|
||||
{
|
||||
WriteLog (lsTRACE, LedgerMaster) << "Already failed to acquire " << ledgerSeq;
|
||||
return false;
|
||||
}
|
||||
|
||||
mMissingLedger = theApp->getInboundLedgers ().findCreate (ledgerHash, ledgerSeq);
|
||||
mMissingLedger = getApp().getInboundLedgers ().findCreate (ledgerHash, ledgerSeq);
|
||||
|
||||
if (mMissingLedger->isComplete ())
|
||||
{
|
||||
@@ -306,14 +306,14 @@ bool LedgerMaster::acquireMissingLedger (Ledger::ref origLedger, uint256 const&
|
||||
if (mMissingLedger->setAccept ())
|
||||
{
|
||||
if (!mMissingLedger->addOnComplete (BIND_TYPE (&LedgerMaster::missingAcquireComplete, this, P_1)))
|
||||
theApp->getIOService ().post (BIND_TYPE (&LedgerMaster::missingAcquireComplete, this, mMissingLedger));
|
||||
getApp().getIOService ().post (BIND_TYPE (&LedgerMaster::missingAcquireComplete, this, mMissingLedger));
|
||||
}
|
||||
|
||||
int fetchMax = theConfig.getSize (siLedgerFetch);
|
||||
int timeoutCount;
|
||||
int fetchCount = theApp->getInboundLedgers ().getFetchCount (timeoutCount);
|
||||
int fetchCount = getApp().getInboundLedgers ().getFetchCount (timeoutCount);
|
||||
|
||||
if ((fetchCount < fetchMax) && theApp->getOPs ().isFull ())
|
||||
if ((fetchCount < fetchMax) && getApp().getOPs ().isFull ())
|
||||
{
|
||||
if (timeoutCount > 2)
|
||||
{
|
||||
@@ -326,9 +326,9 @@ bool LedgerMaster::acquireMissingLedger (Ledger::ref origLedger, uint256 const&
|
||||
BOOST_REVERSE_FOREACH (const u_pair & it, vec)
|
||||
{
|
||||
if ((fetchCount < fetchMax) && (it.first < ledgerSeq) &&
|
||||
!mCompleteLedgers.hasValue (it.first) && !theApp->getInboundLedgers ().find (it.second))
|
||||
!mCompleteLedgers.hasValue (it.first) && !getApp().getInboundLedgers ().find (it.second))
|
||||
{
|
||||
InboundLedger::pointer acq = theApp->getInboundLedgers ().findCreate (it.second, it.first);
|
||||
InboundLedger::pointer acq = getApp().getInboundLedgers ().findCreate (it.second, it.first);
|
||||
|
||||
if (acq && acq->isComplete ())
|
||||
{
|
||||
@@ -343,7 +343,7 @@ bool LedgerMaster::acquireMissingLedger (Ledger::ref origLedger, uint256 const&
|
||||
}
|
||||
}
|
||||
|
||||
if (theApp->getOPs ().shouldFetchPack (ledgerSeq) && (ledgerSeq > 40000))
|
||||
if (getApp().getOPs ().shouldFetchPack (ledgerSeq) && (ledgerSeq > 40000))
|
||||
{
|
||||
// refill our fetch pack
|
||||
Ledger::pointer nextLedger = mLedgerHistory.getLedgerBySeq (ledgerSeq + 1);
|
||||
@@ -355,7 +355,7 @@ bool LedgerMaster::acquireMissingLedger (Ledger::ref origLedger, uint256 const&
|
||||
tmBH.set_query (true);
|
||||
tmBH.set_seq (ledgerSeq);
|
||||
tmBH.set_ledgerhash (ledgerHash.begin (), 32);
|
||||
std::vector<Peer::pointer> peerList = theApp->getPeers ().getPeerVector ();
|
||||
std::vector<Peer::pointer> peerList = getApp().getPeers ().getPeerVector ();
|
||||
|
||||
Peer::pointer target;
|
||||
int count = 0;
|
||||
@@ -424,7 +424,7 @@ void LedgerMaster::resumeAcquiring ()
|
||||
// based on a myriad of conditions which short circuit the function
|
||||
// in ways that the caller cannot expect or predict.
|
||||
//
|
||||
if (!theApp->getOPs ().isFull ())
|
||||
if (!getApp().getOPs ().isFull ())
|
||||
return;
|
||||
|
||||
boost::recursive_mutex::scoped_lock ml (mLock);
|
||||
@@ -502,7 +502,7 @@ void LedgerMaster::setFullLedger (Ledger::pointer ledger)
|
||||
WriteLog (lsDEBUG, LedgerMaster) << "Ledger " << ledger->getLedgerSeq () << " accepted :" << ledger->getHash ();
|
||||
assert (ledger->peekAccountStateMap ()->getHash ().isNonZero ());
|
||||
|
||||
if (theApp->getOPs ().isNeedNetworkLedger ())
|
||||
if (getApp().getOPs ().isNeedNetworkLedger ())
|
||||
return;
|
||||
|
||||
boost::recursive_mutex::scoped_lock ml (mLock);
|
||||
@@ -531,7 +531,7 @@ void LedgerMaster::setFullLedger (Ledger::pointer ledger)
|
||||
if (mMissingLedger && mMissingLedger->isDone ())
|
||||
{
|
||||
if (mMissingLedger->isFailed ())
|
||||
theApp->getInboundLedgers ().dropLedger (mMissingLedger->getHash ());
|
||||
getApp().getInboundLedgers ().dropLedger (mMissingLedger->getHash ());
|
||||
|
||||
mMissingLedger.reset ();
|
||||
}
|
||||
@@ -542,7 +542,7 @@ void LedgerMaster::setFullLedger (Ledger::pointer ledger)
|
||||
return;
|
||||
}
|
||||
|
||||
if (theApp->getJobQueue ().getJobCountTotal (jtPUBOLDLEDGER) > 1)
|
||||
if (getApp().getJobQueue ().getJobCountTotal (jtPUBOLDLEDGER) > 1)
|
||||
{
|
||||
WriteLog (lsDEBUG, LedgerMaster) << "Too many pending ledger saves";
|
||||
return;
|
||||
@@ -609,7 +609,7 @@ void LedgerMaster::checkAccept (uint256 const& hash, uint32 seq)
|
||||
|
||||
if (mLastValidateHash.isNonZero ())
|
||||
{
|
||||
int val = theApp->getValidations ().getTrustedValidationCount (mLastValidateHash);
|
||||
int val = getApp().getValidations ().getTrustedValidationCount (mLastValidateHash);
|
||||
val *= MIN_VALIDATION_RATIO;
|
||||
val /= 256;
|
||||
|
||||
@@ -619,10 +619,10 @@ void LedgerMaster::checkAccept (uint256 const& hash, uint32 seq)
|
||||
|
||||
if (theConfig.RUN_STANDALONE)
|
||||
minVal = 0;
|
||||
else if (theApp->getOPs ().isNeedNetworkLedger ())
|
||||
else if (getApp().getOPs ().isNeedNetworkLedger ())
|
||||
minVal = 1;
|
||||
|
||||
if (theApp->getValidations ().getTrustedValidationCount (hash) < minVal) // nothing we can do
|
||||
if (getApp().getValidations ().getTrustedValidationCount (hash) < minVal) // nothing we can do
|
||||
return;
|
||||
|
||||
WriteLog (lsINFO, LedgerMaster) << "Advancing accepted ledger to " << seq << " with >= " << minVal << " validations";
|
||||
@@ -634,7 +634,7 @@ void LedgerMaster::checkAccept (uint256 const& hash, uint32 seq)
|
||||
|
||||
if (!ledger)
|
||||
{
|
||||
theApp->getInboundLedgers ().findCreate (hash, seq);
|
||||
getApp().getInboundLedgers ().findCreate (hash, seq);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -695,13 +695,13 @@ void LedgerMaster::tryPublish ()
|
||||
}
|
||||
else
|
||||
{
|
||||
if (theApp->getInboundLedgers ().isFailure (hash))
|
||||
if (getApp().getInboundLedgers ().isFailure (hash))
|
||||
{
|
||||
WriteLog (lsWARNING, LedgerMaster) << "Unable to acquire a recent validated ledger";
|
||||
}
|
||||
else
|
||||
{
|
||||
InboundLedger::pointer acq = theApp->getInboundLedgers ().findCreate (hash, seq);
|
||||
InboundLedger::pointer acq = getApp().getInboundLedgers ().findCreate (hash, seq);
|
||||
|
||||
if (!acq->isDone ())
|
||||
{
|
||||
@@ -722,16 +722,16 @@ void LedgerMaster::tryPublish ()
|
||||
|
||||
if (!mPubLedgers.empty () && !mPubThread)
|
||||
{
|
||||
theApp->getOPs ().clearNeedNetworkLedger ();
|
||||
getApp().getOPs ().clearNeedNetworkLedger ();
|
||||
mPubThread = true;
|
||||
theApp->getJobQueue ().addJob (jtPUBLEDGER, "Ledger::pubThread",
|
||||
getApp().getJobQueue ().addJob (jtPUBLEDGER, "Ledger::pubThread",
|
||||
BIND_TYPE (&LedgerMaster::pubThread, this));
|
||||
mPathFindNewLedger = true;
|
||||
|
||||
if (!mPathFindThread)
|
||||
{
|
||||
mPathFindThread = true;
|
||||
theApp->getJobQueue ().addJob (jtUPDATE_PF, "updatePaths",
|
||||
getApp().getJobQueue ().addJob (jtUPDATE_PF, "updatePaths",
|
||||
BIND_TYPE (&LedgerMaster::updatePaths, this));
|
||||
}
|
||||
}
|
||||
@@ -757,7 +757,7 @@ void LedgerMaster::pubThread ()
|
||||
if (published && !mPathFindThread)
|
||||
{
|
||||
mPathFindThread = true;
|
||||
theApp->getJobQueue ().addJob (jtUPDATE_PF, "updatePaths",
|
||||
getApp().getJobQueue ().addJob (jtUPDATE_PF, "updatePaths",
|
||||
BIND_TYPE (&LedgerMaster::updatePaths, this));
|
||||
}
|
||||
|
||||
@@ -769,7 +769,7 @@ void LedgerMaster::pubThread ()
|
||||
{
|
||||
WriteLog (lsDEBUG, LedgerMaster) << "Publishing ledger " << l->getLedgerSeq ();
|
||||
setFullLedger (l); // OPTIMIZEME: This is actually more work than we need to do
|
||||
theApp->getOPs ().pubLedger (l);
|
||||
getApp().getOPs ().pubLedger (l);
|
||||
published = true;
|
||||
}
|
||||
}
|
||||
@@ -819,7 +819,7 @@ void LedgerMaster::newPathRequest ()
|
||||
if (!mPathFindThread)
|
||||
{
|
||||
mPathFindThread = true;
|
||||
theApp->getJobQueue ().addJob (jtUPDATE_PF, "updatePaths",
|
||||
getApp().getJobQueue ().addJob (jtUPDATE_PF, "updatePaths",
|
||||
BIND_TYPE (&LedgerMaster::updatePaths, this));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,6 +32,10 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
~LedgerMaster ()
|
||||
{
|
||||
}
|
||||
|
||||
uint32 getCurrentLedgerIndex ();
|
||||
|
||||
ScopedLock getLock ()
|
||||
|
||||
@@ -54,7 +54,7 @@ std::string NetworkOPs::strOperatingMode ()
|
||||
boost::posix_time::ptime NetworkOPs::getNetworkTimePT ()
|
||||
{
|
||||
int offset = 0;
|
||||
theApp->getSystemTimeOffset (offset);
|
||||
getApp().getSystemTimeOffset (offset);
|
||||
return boost::posix_time::microsec_clock::universal_time () + boost::posix_time::seconds (offset);
|
||||
}
|
||||
|
||||
@@ -163,7 +163,7 @@ void NetworkOPs::submitTransaction (Job&, SerializedTransaction::pointer iTrans,
|
||||
uint256 suppress = trans->getTransactionID ();
|
||||
int flags;
|
||||
|
||||
if (theApp->getHashRouter ().addSuppressionPeer (suppress, 0, flags) && ((flags & SF_RETRY) != 0))
|
||||
if (getApp().getHashRouter ().addSuppressionPeer (suppress, 0, flags) && ((flags & SF_RETRY) != 0))
|
||||
{
|
||||
WriteLog (lsWARNING, NetworkOPs) << "Redundant transactions submitted";
|
||||
return;
|
||||
@@ -182,11 +182,11 @@ void NetworkOPs::submitTransaction (Job&, SerializedTransaction::pointer iTrans,
|
||||
if (!trans->checkSign ())
|
||||
{
|
||||
WriteLog (lsWARNING, NetworkOPs) << "Submitted transaction has bad signature";
|
||||
theApp->getHashRouter ().setFlag (suppress, SF_BAD);
|
||||
getApp().getHashRouter ().setFlag (suppress, SF_BAD);
|
||||
return;
|
||||
}
|
||||
|
||||
theApp->getHashRouter ().setFlag (suppress, SF_SIGGOOD);
|
||||
getApp().getHashRouter ().setFlag (suppress, SF_SIGGOOD);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@@ -196,7 +196,7 @@ void NetworkOPs::submitTransaction (Job&, SerializedTransaction::pointer iTrans,
|
||||
}
|
||||
|
||||
// FIXME: Should submit to job queue
|
||||
theApp->getIOService ().post (boost::bind (&NetworkOPs::processTransaction, this,
|
||||
getApp().getIOService ().post (boost::bind (&NetworkOPs::processTransaction, this,
|
||||
boost::make_shared<Transaction> (trans, false), false, false, callback));
|
||||
}
|
||||
|
||||
@@ -239,17 +239,17 @@ void NetworkOPs::runTransactionQueue ()
|
||||
|
||||
for (int i = 0; i < 10; ++i)
|
||||
{
|
||||
theApp->getTxnQueue ().getJob (txn);
|
||||
getApp().getTxnQueue ().getJob (txn);
|
||||
|
||||
if (!txn)
|
||||
return;
|
||||
|
||||
{
|
||||
LoadEvent::autoptr ev = theApp->getJobQueue ().getLoadEventAP (jtTXN_PROC, "runTxnQ");
|
||||
LoadEvent::autoptr ev = getApp().getJobQueue ().getLoadEventAP (jtTXN_PROC, "runTxnQ");
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getMasterLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getMasterLock ());
|
||||
|
||||
Transaction::pointer dbtx = theApp->getMasterTransaction ().fetch (txn->getID (), true);
|
||||
Transaction::pointer dbtx = getApp().getMasterTransaction ().fetch (txn->getID (), true);
|
||||
assert (dbtx);
|
||||
|
||||
bool didApply;
|
||||
@@ -258,9 +258,9 @@ void NetworkOPs::runTransactionQueue ()
|
||||
dbtx->setResult (r);
|
||||
|
||||
if (isTemMalformed (r)) // malformed, cache bad
|
||||
theApp->getHashRouter ().setFlag (txn->getID (), SF_BAD);
|
||||
getApp().getHashRouter ().setFlag (txn->getID (), SF_BAD);
|
||||
else if (isTelLocal (r) || isTerRetry (r)) // can be retried
|
||||
theApp->getHashRouter ().setFlag (txn->getID (), SF_RETRY);
|
||||
getApp().getHashRouter ().setFlag (txn->getID (), SF_RETRY);
|
||||
|
||||
|
||||
if (isTerRetry (r))
|
||||
@@ -268,7 +268,7 @@ void NetworkOPs::runTransactionQueue ()
|
||||
// transaction should be held
|
||||
WriteLog (lsDEBUG, NetworkOPs) << "Transaction should be held: " << r;
|
||||
dbtx->setStatus (HELD);
|
||||
theApp->getMasterTransaction ().canonicalize (dbtx, true);
|
||||
getApp().getMasterTransaction ().canonicalize (dbtx, true);
|
||||
mLedgerMaster->addHeldTransaction (dbtx);
|
||||
}
|
||||
else if (r == tefPAST_SEQ)
|
||||
@@ -281,7 +281,7 @@ void NetworkOPs::runTransactionQueue ()
|
||||
{
|
||||
WriteLog (lsINFO, NetworkOPs) << "Transaction is now included in open ledger";
|
||||
dbtx->setStatus (INCLUDED);
|
||||
theApp->getMasterTransaction ().canonicalize (dbtx, true);
|
||||
getApp().getMasterTransaction ().canonicalize (dbtx, true);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -293,7 +293,7 @@ void NetworkOPs::runTransactionQueue ()
|
||||
{
|
||||
std::set<uint64> peers;
|
||||
|
||||
if (theApp->getHashRouter ().swapSet (txn->getID (), peers, SF_RELAYED))
|
||||
if (getApp().getHashRouter ().swapSet (txn->getID (), peers, SF_RELAYED))
|
||||
{
|
||||
protocol::TMTransaction tx;
|
||||
Serializer s;
|
||||
@@ -303,7 +303,7 @@ void NetworkOPs::runTransactionQueue ()
|
||||
tx.set_receivetimestamp (getNetworkTimeNC ()); // FIXME: This should be when we received it
|
||||
|
||||
PackedMessage::pointer packet = boost::make_shared<PackedMessage> (tx, protocol::mtTRANSACTION);
|
||||
theApp->getPeers ().relayMessageBut (peers, packet);
|
||||
getApp().getPeers ().relayMessageBut (peers, packet);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -311,15 +311,15 @@ void NetworkOPs::runTransactionQueue ()
|
||||
}
|
||||
}
|
||||
|
||||
if (theApp->getTxnQueue ().stopProcessing (txn))
|
||||
theApp->getIOService ().post (BIND_TYPE (&NetworkOPs::runTransactionQueue, this));
|
||||
if (getApp().getTxnQueue ().stopProcessing (txn))
|
||||
getApp().getIOService ().post (BIND_TYPE (&NetworkOPs::runTransactionQueue, this));
|
||||
}
|
||||
|
||||
Transaction::pointer NetworkOPs::processTransaction (Transaction::pointer trans, bool bAdmin, bool bFailHard, stCallback callback)
|
||||
{
|
||||
LoadEvent::autoptr ev = theApp->getJobQueue ().getLoadEventAP (jtTXN_PROC, "ProcessTXN");
|
||||
LoadEvent::autoptr ev = getApp().getJobQueue ().getLoadEventAP (jtTXN_PROC, "ProcessTXN");
|
||||
|
||||
int newFlags = theApp->getHashRouter ().getFlags (trans->getID ());
|
||||
int newFlags = getApp().getHashRouter ().getFlags (trans->getID ());
|
||||
|
||||
if ((newFlags & SF_BAD) != 0)
|
||||
{
|
||||
@@ -337,23 +337,23 @@ Transaction::pointer NetworkOPs::processTransaction (Transaction::pointer trans,
|
||||
WriteLog (lsINFO, NetworkOPs) << "Transaction has bad signature";
|
||||
trans->setStatus (INVALID);
|
||||
trans->setResult (temBAD_SIGNATURE);
|
||||
theApp->getHashRouter ().setFlag (trans->getID (), SF_BAD);
|
||||
getApp().getHashRouter ().setFlag (trans->getID (), SF_BAD);
|
||||
return trans;
|
||||
}
|
||||
|
||||
theApp->getHashRouter ().setFlag (trans->getID (), SF_SIGGOOD);
|
||||
getApp().getHashRouter ().setFlag (trans->getID (), SF_SIGGOOD);
|
||||
}
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getMasterLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getMasterLock ());
|
||||
bool didApply;
|
||||
TER r = mLedgerMaster->doTransaction (trans->getSTransaction (),
|
||||
bAdmin ? (tapOPEN_LEDGER | tapNO_CHECK_SIGN | tapADMIN) : (tapOPEN_LEDGER | tapNO_CHECK_SIGN), didApply);
|
||||
trans->setResult (r);
|
||||
|
||||
if (isTemMalformed (r)) // malformed, cache bad
|
||||
theApp->getHashRouter ().setFlag (trans->getID (), SF_BAD);
|
||||
getApp().getHashRouter ().setFlag (trans->getID (), SF_BAD);
|
||||
else if (isTelLocal (r) || isTerRetry (r)) // can be retried
|
||||
theApp->getHashRouter ().setFlag (trans->getID (), SF_RETRY);
|
||||
getApp().getHashRouter ().setFlag (trans->getID (), SF_RETRY);
|
||||
|
||||
#ifdef DEBUG
|
||||
|
||||
@@ -375,7 +375,7 @@ Transaction::pointer NetworkOPs::processTransaction (Transaction::pointer trans,
|
||||
{
|
||||
WriteLog (lsINFO, NetworkOPs) << "Transaction is now included in open ledger";
|
||||
trans->setStatus (INCLUDED);
|
||||
theApp->getMasterTransaction ().canonicalize (trans, true);
|
||||
getApp().getMasterTransaction ().canonicalize (trans, true);
|
||||
}
|
||||
else if (r == tefPAST_SEQ)
|
||||
{
|
||||
@@ -390,7 +390,7 @@ Transaction::pointer NetworkOPs::processTransaction (Transaction::pointer trans,
|
||||
// transaction should be held
|
||||
WriteLog (lsDEBUG, NetworkOPs) << "Transaction should be held: " << r;
|
||||
trans->setStatus (HELD);
|
||||
theApp->getMasterTransaction ().canonicalize (trans, true);
|
||||
getApp().getMasterTransaction ().canonicalize (trans, true);
|
||||
mLedgerMaster->addHeldTransaction (trans);
|
||||
}
|
||||
}
|
||||
@@ -404,7 +404,7 @@ Transaction::pointer NetworkOPs::processTransaction (Transaction::pointer trans,
|
||||
{
|
||||
std::set<uint64> peers;
|
||||
|
||||
if (theApp->getHashRouter ().swapSet (trans->getID (), peers, SF_RELAYED))
|
||||
if (getApp().getHashRouter ().swapSet (trans->getID (), peers, SF_RELAYED))
|
||||
{
|
||||
protocol::TMTransaction tx;
|
||||
Serializer s;
|
||||
@@ -414,7 +414,7 @@ Transaction::pointer NetworkOPs::processTransaction (Transaction::pointer trans,
|
||||
tx.set_receivetimestamp (getNetworkTimeNC ()); // FIXME: This should be when we received it
|
||||
|
||||
PackedMessage::pointer packet = boost::make_shared<PackedMessage> (tx, protocol::mtTRANSACTION);
|
||||
theApp->getPeers ().relayMessageBut (peers, packet);
|
||||
getApp().getPeers ().relayMessageBut (peers, packet);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -624,11 +624,11 @@ void NetworkOPs::checkState (const boost::system::error_code& result)
|
||||
}
|
||||
|
||||
{
|
||||
ScopedLock sl (theApp->getMasterLock ());
|
||||
ScopedLock sl (getApp().getMasterLock ());
|
||||
|
||||
theApp->getLoadManager ().resetDeadlockDetector ();
|
||||
getApp().getLoadManager ().resetDeadlockDetector ();
|
||||
|
||||
std::vector<Peer::pointer> peerList = theApp->getPeers ().getPeerVector ();
|
||||
std::vector<Peer::pointer> peerList = getApp().getPeers ().getPeerVector ();
|
||||
|
||||
// do we have sufficient peers? If not, we are disconnected.
|
||||
if (peerList.size () < theConfig.NETWORK_QUORUM)
|
||||
@@ -668,7 +668,7 @@ void NetworkOPs::checkState (const boost::system::error_code& result)
|
||||
void NetworkOPs::tryStartConsensus ()
|
||||
{
|
||||
uint256 networkClosed;
|
||||
bool ledgerChange = checkLastClosedLedger (theApp->getPeers ().getPeerVector (), networkClosed);
|
||||
bool ledgerChange = checkLastClosedLedger (getApp().getPeers ().getPeerVector (), networkClosed);
|
||||
|
||||
if (networkClosed.isZero ())
|
||||
return;
|
||||
@@ -692,7 +692,7 @@ void NetworkOPs::tryStartConsensus ()
|
||||
// check if the ledger is good enough to go to omFULL
|
||||
// Note: Do not go to omFULL if we don't have the previous ledger
|
||||
// check if the ledger is bad enough to go to omCONNECTED -- TODO
|
||||
if (theApp->getOPs ().getNetworkTimeNC () < mLedgerMaster->getCurrentLedger ()->getCloseTimeNC ())
|
||||
if (getApp().getOPs ().getNetworkTimeNC () < mLedgerMaster->getCurrentLedger ()->getCloseTimeNC ())
|
||||
setMode (omFULL);
|
||||
}
|
||||
|
||||
@@ -722,7 +722,7 @@ bool NetworkOPs::checkLastClosedLedger (const std::vector<Peer::pointer>& peerLi
|
||||
boost::unordered_map<uint256, ValidationCount> ledgers;
|
||||
{
|
||||
boost::unordered_map<uint256, currentValidationCount> current =
|
||||
theApp->getValidations ().getCurrentValidations (closedLedger, prevClosedLedger);
|
||||
getApp().getValidations ().getCurrentValidations (closedLedger, prevClosedLedger);
|
||||
typedef std::map<uint256, currentValidationCount>::value_type u256_cvc_pair;
|
||||
BOOST_FOREACH (const u256_cvc_pair & it, current)
|
||||
{
|
||||
@@ -739,7 +739,7 @@ bool NetworkOPs::checkLastClosedLedger (const std::vector<Peer::pointer>& peerLi
|
||||
if (mMode >= omTRACKING)
|
||||
{
|
||||
++ourVC.nodesUsing;
|
||||
uint160 ourAddress = theApp->getLocalCredentials ().getNodePublic ().getNodeID ();
|
||||
uint160 ourAddress = getApp().getLocalCredentials ().getNodePublic ().getNodeID ();
|
||||
|
||||
if (ourAddress > ourVC.highNodeUsing)
|
||||
ourVC.highNodeUsing = ourAddress;
|
||||
@@ -803,7 +803,7 @@ bool NetworkOPs::checkLastClosedLedger (const std::vector<Peer::pointer>& peerLi
|
||||
if (mAcquiringLedger)
|
||||
{
|
||||
mAcquiringLedger->abort ();
|
||||
theApp->getInboundLedgers ().dropLedger (mAcquiringLedger->getHash ());
|
||||
getApp().getInboundLedgers ().dropLedger (mAcquiringLedger->getHash ());
|
||||
mAcquiringLedger.reset ();
|
||||
}
|
||||
|
||||
@@ -824,11 +824,11 @@ bool NetworkOPs::checkLastClosedLedger (const std::vector<Peer::pointer>& peerLi
|
||||
WriteLog (lsINFO, NetworkOPs) << "Acquiring consensus ledger " << closedLedger;
|
||||
|
||||
if (!mAcquiringLedger || (mAcquiringLedger->getHash () != closedLedger))
|
||||
mAcquiringLedger = theApp->getInboundLedgers ().findCreate (closedLedger, 0);
|
||||
mAcquiringLedger = getApp().getInboundLedgers ().findCreate (closedLedger, 0);
|
||||
|
||||
if (!mAcquiringLedger || mAcquiringLedger->isFailed ())
|
||||
{
|
||||
theApp->getInboundLedgers ().dropLedger (closedLedger);
|
||||
getApp().getInboundLedgers ().dropLedger (closedLedger);
|
||||
WriteLog (lsERROR, NetworkOPs) << "Network ledger cannot be acquired";
|
||||
return true;
|
||||
}
|
||||
@@ -864,13 +864,13 @@ void NetworkOPs::switchLastClosedLedger (Ledger::pointer newLedger, bool duringC
|
||||
protocol::TMStatusChange s;
|
||||
s.set_newevent (protocol::neSWITCHED_LEDGER);
|
||||
s.set_ledgerseq (newLedger->getLedgerSeq ());
|
||||
s.set_networktime (theApp->getOPs ().getNetworkTimeNC ());
|
||||
s.set_networktime (getApp().getOPs ().getNetworkTimeNC ());
|
||||
uint256 hash = newLedger->getParentHash ();
|
||||
s.set_ledgerhashprevious (hash.begin (), hash.size ());
|
||||
hash = newLedger->getHash ();
|
||||
s.set_ledgerhash (hash.begin (), hash.size ());
|
||||
PackedMessage::pointer packet = boost::make_shared<PackedMessage> (s, protocol::mtSTATUS_CHANGE);
|
||||
theApp->getPeers ().relayMessage (NULL, packet);
|
||||
getApp().getPeers ().relayMessage (NULL, packet);
|
||||
}
|
||||
|
||||
int NetworkOPs::beginConsensus (uint256 const& networkClosed, Ledger::pointer closingLedger)
|
||||
@@ -918,7 +918,7 @@ bool NetworkOPs::haveConsensusObject ()
|
||||
{
|
||||
// we need to get into the consensus process
|
||||
uint256 networkClosed;
|
||||
std::vector<Peer::pointer> peerList = theApp->getPeers ().getPeerVector ();
|
||||
std::vector<Peer::pointer> peerList = getApp().getPeers ().getPeerVector ();
|
||||
bool ledgerChange = checkLastClosedLedger (peerList, networkClosed);
|
||||
|
||||
if (!ledgerChange)
|
||||
@@ -942,7 +942,7 @@ uint256 NetworkOPs::getConsensusLCL ()
|
||||
void NetworkOPs::processTrustedProposal (LedgerProposal::pointer proposal,
|
||||
boost::shared_ptr<protocol::TMProposeSet> set, RippleAddress nodePublic, uint256 checkLedger, bool sigGood)
|
||||
{
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getMasterLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getMasterLock ());
|
||||
|
||||
bool relay = true;
|
||||
|
||||
@@ -979,9 +979,9 @@ void NetworkOPs::processTrustedProposal (LedgerProposal::pointer proposal,
|
||||
if (relay)
|
||||
{
|
||||
std::set<uint64> peers;
|
||||
theApp->getHashRouter ().swapSet (proposal->getHashRouter (), peers, SF_RELAYED);
|
||||
getApp().getHashRouter ().swapSet (proposal->getHashRouter (), peers, SF_RELAYED);
|
||||
PackedMessage::pointer message = boost::make_shared<PackedMessage> (*set, protocol::mtPROPOSE_LEDGER);
|
||||
theApp->getPeers ().relayMessageBut (peers, message);
|
||||
getApp().getPeers ().relayMessageBut (peers, message);
|
||||
}
|
||||
else
|
||||
WriteLog (lsINFO, NetworkOPs) << "Not relaying trusted proposal";
|
||||
@@ -1027,7 +1027,7 @@ SHAMapAddNode NetworkOPs::gotTXData (const boost::shared_ptr<Peer>& peer, uint25
|
||||
|
||||
boost::shared_ptr<LedgerConsensus> consensus;
|
||||
{
|
||||
ScopedLock mlh(theApp->getMasterLock());
|
||||
ScopedLock mlh(getApp().getMasterLock());
|
||||
consensus = mConsensus;
|
||||
}
|
||||
|
||||
@@ -1069,7 +1069,7 @@ void NetworkOPs::endConsensus (bool correctLCL)
|
||||
{
|
||||
uint256 deadLedger = mLedgerMaster->getClosedLedger ()->getParentHash ();
|
||||
|
||||
std::vector <Peer::pointer> peerList = theApp->getPeers ().getPeerVector ();
|
||||
std::vector <Peer::pointer> peerList = getApp().getPeers ().getPeerVector ();
|
||||
|
||||
BOOST_FOREACH (Peer::ref it, peerList)
|
||||
{
|
||||
@@ -1103,8 +1103,8 @@ void NetworkOPs::pubServer ()
|
||||
|
||||
jvObj ["type"] = "serverStatus";
|
||||
jvObj ["server_status"] = strOperatingMode ();
|
||||
jvObj ["load_base"] = (mLastLoadBase = theApp->getFeeTrack ().getLoadBase ());
|
||||
jvObj ["load_factor"] = (mLastLoadFactor = theApp->getFeeTrack ().getLoadFactor ());
|
||||
jvObj ["load_base"] = (mLastLoadBase = getApp().getFeeTrack ().getLoadBase ());
|
||||
jvObj ["load_factor"] = (mLastLoadFactor = getApp().getFeeTrack ().getLoadFactor ());
|
||||
|
||||
NetworkOPs::subMapType::const_iterator it = mSubServer.begin ();
|
||||
|
||||
@@ -1133,13 +1133,13 @@ void NetworkOPs::setMode (OperatingMode om)
|
||||
|
||||
if (om == omCONNECTED)
|
||||
{
|
||||
if (theApp->getLedgerMaster ().getValidatedLedgerAge () < 60)
|
||||
if (getApp().getLedgerMaster ().getValidatedLedgerAge () < 60)
|
||||
om = omSYNCING;
|
||||
}
|
||||
|
||||
if (om == omSYNCING)
|
||||
{
|
||||
if (theApp->getLedgerMaster ().getValidatedLedgerAge () >= 60)
|
||||
if (getApp().getLedgerMaster ().getValidatedLedgerAge () >= 60)
|
||||
om = omCONNECTED;
|
||||
}
|
||||
|
||||
@@ -1217,8 +1217,8 @@ NetworkOPs::getAccountTxs (const RippleAddress& account, int32 minLedger, int32
|
||||
minLedger, maxLedger, descending, offset, limit, false, false, bAdmin);
|
||||
|
||||
{
|
||||
Database* db = theApp->getTxnDB ()->getDB ();
|
||||
ScopedLock sl (theApp->getTxnDB ()->getDBLock ());
|
||||
Database* db = getApp().getTxnDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getTxnDB ()->getDBLock ());
|
||||
|
||||
SQL_FOREACH (db, sql)
|
||||
{
|
||||
@@ -1254,8 +1254,8 @@ std::vector<NetworkOPs::txnMetaLedgerType> NetworkOPs::getAccountTxsB (
|
||||
minLedger, maxLedger, descending, offset, limit, true/*binary*/, false, bAdmin);
|
||||
|
||||
{
|
||||
Database* db = theApp->getTxnDB ()->getDB ();
|
||||
ScopedLock sl (theApp->getTxnDB ()->getDBLock ());
|
||||
Database* db = getApp().getTxnDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getTxnDB ()->getDBLock ());
|
||||
|
||||
SQL_FOREACH (db, sql)
|
||||
{
|
||||
@@ -1301,8 +1301,8 @@ NetworkOPs::countAccountTxs (const RippleAddress& account, int32 minLedger, int3
|
||||
std::string sql = NetworkOPs::transactionsSQL ("COUNT(*) AS 'TransactionCount'", account,
|
||||
minLedger, maxLedger, false, 0, -1, true, true, true);
|
||||
|
||||
Database* db = theApp->getTxnDB ()->getDB ();
|
||||
ScopedLock sl (theApp->getTxnDB ()->getDBLock ());
|
||||
Database* db = getApp().getTxnDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getTxnDB ()->getDBLock ());
|
||||
SQL_FOREACH (db, sql)
|
||||
{
|
||||
ret = db->getInt ("TransactionCount");
|
||||
@@ -1321,8 +1321,8 @@ NetworkOPs::getLedgerAffectedAccounts (uint32 ledgerSeq)
|
||||
% ledgerSeq);
|
||||
RippleAddress acct;
|
||||
{
|
||||
Database* db = theApp->getTxnDB ()->getDB ();
|
||||
ScopedLock sl (theApp->getTxnDB ()->getDBLock ());
|
||||
Database* db = getApp().getTxnDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getTxnDB ()->getDBLock ());
|
||||
SQL_FOREACH (db, sql)
|
||||
{
|
||||
if (acct.setAccountID (db->getStrBinary ("Account")))
|
||||
@@ -1335,7 +1335,7 @@ NetworkOPs::getLedgerAffectedAccounts (uint32 ledgerSeq)
|
||||
bool NetworkOPs::recvValidation (SerializedValidation::ref val, const std::string& source)
|
||||
{
|
||||
WriteLog (lsDEBUG, NetworkOPs) << "recvValidation " << val->getLedgerHash () << " from " << source;
|
||||
return theApp->getValidations ().addValidation (val, source);
|
||||
return getApp().getValidations ().addValidation (val, source);
|
||||
}
|
||||
|
||||
Json::Value NetworkOPs::getConsensusInfo ()
|
||||
@@ -1373,10 +1373,10 @@ Json::Value NetworkOPs::getServerInfo (bool human, bool admin)
|
||||
info["pubkey_validator"] = "none";
|
||||
}
|
||||
|
||||
info["pubkey_node"] = theApp->getLocalCredentials ().getNodePublic ().humanNodePublic ();
|
||||
info["pubkey_node"] = getApp().getLocalCredentials ().getNodePublic ().humanNodePublic ();
|
||||
|
||||
|
||||
info["complete_ledgers"] = theApp->getLedgerMaster ().getCompleteLedgers ();
|
||||
info["complete_ledgers"] = getApp().getLedgerMaster ().getCompleteLedgers ();
|
||||
|
||||
if (mFeatureBlocked)
|
||||
info["feature_blocked"] = true;
|
||||
@@ -1386,15 +1386,15 @@ Json::Value NetworkOPs::getServerInfo (bool human, bool admin)
|
||||
if (fp != 0)
|
||||
info["fetch_pack"] = Json::UInt (fp);
|
||||
|
||||
info["peers"] = theApp->getPeers ().getPeerCount ();
|
||||
info["peers"] = getApp().getPeers ().getPeerCount ();
|
||||
|
||||
Json::Value lastClose = Json::objectValue;
|
||||
lastClose["proposers"] = theApp->getOPs ().getPreviousProposers ();
|
||||
lastClose["proposers"] = getApp().getOPs ().getPreviousProposers ();
|
||||
|
||||
if (human)
|
||||
lastClose["converge_time_s"] = static_cast<double> (theApp->getOPs ().getPreviousConvergeTime ()) / 1000.0;
|
||||
lastClose["converge_time_s"] = static_cast<double> (getApp().getOPs ().getPreviousConvergeTime ()) / 1000.0;
|
||||
else
|
||||
lastClose["converge_time"] = Json::Int (theApp->getOPs ().getPreviousConvergeTime ());
|
||||
lastClose["converge_time"] = Json::Int (getApp().getOPs ().getPreviousConvergeTime ());
|
||||
|
||||
info["last_close"] = lastClose;
|
||||
|
||||
@@ -1402,16 +1402,16 @@ Json::Value NetworkOPs::getServerInfo (bool human, bool admin)
|
||||
// info["consensus"] = mConsensus->getJson();
|
||||
|
||||
if (admin)
|
||||
info["load"] = theApp->getJobQueue ().getJson ();
|
||||
info["load"] = getApp().getJobQueue ().getJson ();
|
||||
|
||||
if (!human)
|
||||
{
|
||||
info["load_base"] = theApp->getFeeTrack ().getLoadBase ();
|
||||
info["load_factor"] = theApp->getFeeTrack ().getLoadFactor ();
|
||||
info["load_base"] = getApp().getFeeTrack ().getLoadBase ();
|
||||
info["load_factor"] = getApp().getFeeTrack ().getLoadFactor ();
|
||||
}
|
||||
else
|
||||
info["load_factor"] =
|
||||
static_cast<double> (theApp->getFeeTrack ().getLoadFactor ()) / theApp->getFeeTrack ().getLoadBase ();
|
||||
static_cast<double> (getApp().getFeeTrack ().getLoadFactor ()) / getApp().getFeeTrack ().getLoadBase ();
|
||||
|
||||
bool valid = false;
|
||||
Ledger::pointer lpClosed = getValidatedLedger ();
|
||||
@@ -1537,7 +1537,7 @@ void NetworkOPs::pubLedger (Ledger::ref accepted)
|
||||
jvObj["txn_count"] = Json::UInt (alpAccepted->getTxnCount ());
|
||||
|
||||
if (mMode >= omSYNCING)
|
||||
jvObj["validated_ledgers"] = theApp->getLedgerMaster ().getCompleteLedgers ();
|
||||
jvObj["validated_ledgers"] = getApp().getLedgerMaster ().getCompleteLedgers ();
|
||||
|
||||
NetworkOPs::subMapType::const_iterator it = mSubLedger.begin ();
|
||||
|
||||
@@ -1569,11 +1569,11 @@ void NetworkOPs::pubLedger (Ledger::ref accepted)
|
||||
|
||||
void NetworkOPs::reportFeeChange ()
|
||||
{
|
||||
if ((theApp->getFeeTrack ().getLoadBase () == mLastLoadBase) &&
|
||||
(theApp->getFeeTrack ().getLoadFactor () == mLastLoadFactor))
|
||||
if ((getApp().getFeeTrack ().getLoadBase () == mLastLoadBase) &&
|
||||
(getApp().getFeeTrack ().getLoadFactor () == mLastLoadFactor))
|
||||
return;
|
||||
|
||||
theApp->getJobQueue ().addJob (jtCLIENT, "reportFeeChange->pubServer", BIND_TYPE (&NetworkOPs::pubServer, this));
|
||||
getApp().getJobQueue ().addJob (jtCLIENT, "reportFeeChange->pubServer", BIND_TYPE (&NetworkOPs::pubServer, this));
|
||||
}
|
||||
|
||||
Json::Value NetworkOPs::transJson (const SerializedTransaction& stTxn, TER terResult, bool bValidated,
|
||||
@@ -1648,7 +1648,7 @@ void NetworkOPs::pubValidatedTransaction (Ledger::ref alAccepted, const Accepted
|
||||
it = mSubRTTransactions.erase (it);
|
||||
}
|
||||
}
|
||||
theApp->getOrderBookDB ().processTxn (alAccepted, alTx, jvObj);
|
||||
getApp().getOrderBookDB ().processTxn (alAccepted, alTx, jvObj);
|
||||
pubAccountTransaction (alAccepted, alTx, true);
|
||||
}
|
||||
|
||||
@@ -1808,7 +1808,7 @@ bool NetworkOPs::subBook (InfoSub::ref isrListener, const uint160& currencyPays,
|
||||
const uint160& issuerPays, const uint160& issuerGets)
|
||||
{
|
||||
BookListeners::pointer listeners =
|
||||
theApp->getOrderBookDB ().makeBookListeners (currencyPays, currencyGets, issuerPays, issuerGets);
|
||||
getApp().getOrderBookDB ().makeBookListeners (currencyPays, currencyGets, issuerPays, issuerGets);
|
||||
|
||||
if (listeners)
|
||||
listeners->addSubscriber (isrListener);
|
||||
@@ -1820,7 +1820,7 @@ bool NetworkOPs::unsubBook (uint64 uSeq,
|
||||
const uint160& currencyPays, const uint160& currencyGets, const uint160& issuerPays, const uint160& issuerGets)
|
||||
{
|
||||
BookListeners::pointer listeners =
|
||||
theApp->getOrderBookDB ().getBookListeners (currencyPays, currencyGets, issuerPays, issuerGets);
|
||||
getApp().getOrderBookDB ().getBookListeners (currencyPays, currencyGets, issuerPays, issuerGets);
|
||||
|
||||
if (listeners)
|
||||
listeners->removeSubscriber (uSeq);
|
||||
@@ -1882,7 +1882,7 @@ bool NetworkOPs::subLedger (InfoSub::ref isrListener, Json::Value& jvResult)
|
||||
}
|
||||
|
||||
if ((mMode >= omSYNCING) && !isNeedNetworkLedger ())
|
||||
jvResult["validated_ledgers"] = theApp->getLedgerMaster ().getCompleteLedgers ();
|
||||
jvResult["validated_ledgers"] = getApp().getLedgerMaster ().getCompleteLedgers ();
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl (mMonitorLock);
|
||||
return mSubLedger.emplace (isrListener->getSeq (), isrListener).second;
|
||||
@@ -1909,8 +1909,8 @@ bool NetworkOPs::subServer (InfoSub::ref isrListener, Json::Value& jvResult)
|
||||
RandomNumbers::getInstance ().fillBytes (uRandom.begin (), uRandom.size ());
|
||||
jvResult["random"] = uRandom.ToString ();
|
||||
jvResult["server_status"] = strOperatingMode ();
|
||||
jvResult["load_base"] = theApp->getFeeTrack ().getLoadBase ();
|
||||
jvResult["load_factor"] = theApp->getFeeTrack ().getLoadFactor ();
|
||||
jvResult["load_base"] = getApp().getFeeTrack ().getLoadBase ();
|
||||
jvResult["load_factor"] = getApp().getFeeTrack ().getLoadFactor ();
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl (mMonitorLock);
|
||||
return mSubServer.emplace (isrListener->getSeq (), isrListener).second;
|
||||
@@ -2164,7 +2164,7 @@ void NetworkOPs::makeFetchPack (Job&, boost::weak_ptr<Peer> wPeer,
|
||||
return;
|
||||
}
|
||||
|
||||
if (theApp->getFeeTrack ().isLoaded ())
|
||||
if (getApp().getFeeTrack ().isLoaded ())
|
||||
{
|
||||
WriteLog (lsINFO, NetworkOPs) << "Too busy to make fetch pack";
|
||||
return;
|
||||
@@ -2283,17 +2283,17 @@ void NetworkOPs::gotFetchPack (bool progress, uint32 seq)
|
||||
{
|
||||
mLastFetchPack = 0;
|
||||
mFetchSeq = seq; // earliest pack we have data on
|
||||
theApp->getJobQueue ().addJob (jtLEDGER_DATA, "gotFetchPack",
|
||||
BIND_TYPE (&InboundLedgers::gotFetchPack, &theApp->getInboundLedgers (), P_1));
|
||||
getApp().getJobQueue ().addJob (jtLEDGER_DATA, "gotFetchPack",
|
||||
BIND_TYPE (&InboundLedgers::gotFetchPack, &getApp().getInboundLedgers (), P_1));
|
||||
}
|
||||
|
||||
void NetworkOPs::missingNodeInLedger (uint32 seq)
|
||||
{
|
||||
WriteLog (lsWARNING, NetworkOPs) << "We are missing a node in ledger " << seq;
|
||||
uint256 hash = theApp->getLedgerMaster ().getHashBySeq (seq);
|
||||
uint256 hash = getApp().getLedgerMaster ().getHashBySeq (seq);
|
||||
|
||||
if (hash.isNonZero ())
|
||||
theApp->getInboundLedgers ().findCreate (hash, seq);
|
||||
getApp().getInboundLedgers ().findCreate (hash, seq);
|
||||
}
|
||||
|
||||
// vim:ts=4
|
||||
|
||||
@@ -712,7 +712,7 @@ TER OfferCreateTransactor::doApply ()
|
||||
CondLog (tesSUCCESS != terResult, lsINFO, OfferCreateTransactor) << boost::str (boost::format ("OfferCreate: final terResult=%s") % transToken (terResult));
|
||||
|
||||
if (isTesSuccess (terResult))
|
||||
theApp->getOrderBookDB ().invalidate ();
|
||||
getApp().getOrderBookDB ().invalidate ();
|
||||
|
||||
return terResult;
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ void OrderBookDB::setup (Ledger::ref ledger)
|
||||
|
||||
mSeq = ledger->getLedgerSeq ();
|
||||
|
||||
LoadEvent::autoptr ev = theApp->getJobQueue ().getLoadEventAP (jtOB_SETUP, "OrderBookDB::setup");
|
||||
LoadEvent::autoptr ev = getApp().getJobQueue ().getLoadEventAP (jtOB_SETUP, "OrderBookDB::setup");
|
||||
|
||||
mDestMap.clear ();
|
||||
mSourceMap.clear ();
|
||||
|
||||
@@ -35,7 +35,7 @@ void PeerDoor::startListening ()
|
||||
Peer::pointer new_connection = Peer::New (
|
||||
mAcceptor.get_io_service (),
|
||||
mCtx,
|
||||
theApp->getPeers ().assignPeerId (),
|
||||
getApp().getPeers ().assignPeerId (),
|
||||
true);
|
||||
|
||||
mAcceptor.async_accept (new_connection->getSocket (),
|
||||
|
||||
@@ -30,7 +30,7 @@ RPCDoor::~RPCDoor ()
|
||||
|
||||
void RPCDoor::startListening ()
|
||||
{
|
||||
RPCServer::pointer new_connection = RPCServer::create (mAcceptor.get_io_service (), mSSLContext, &theApp->getOPs ());
|
||||
RPCServer::pointer new_connection = RPCServer::create (mAcceptor.get_io_service (), mSSLContext, &getApp().getOPs ());
|
||||
mAcceptor.set_option (boost::asio::ip::tcp::acceptor::reuse_address (true));
|
||||
|
||||
mAcceptor.async_accept (new_connection->getRawSocket (),
|
||||
|
||||
@@ -645,7 +645,7 @@ Json::Value RPCHandler::doConnect (Json::Value params, LoadType* loadType, Scope
|
||||
int iPort = params.isMember ("port") ? params["port"].asInt () : -1;
|
||||
|
||||
// XXX Validate legal IP and port
|
||||
theApp->getPeers ().connectTo (strIp, iPort);
|
||||
getApp().getPeers ().connectTo (strIp, iPort);
|
||||
|
||||
return "connecting";
|
||||
}
|
||||
@@ -663,7 +663,7 @@ Json::Value RPCHandler::doDataDelete (Json::Value params, LoadType* loadType, Sc
|
||||
|
||||
Json::Value ret = Json::Value (Json::objectValue);
|
||||
|
||||
if (theApp->getLocalCredentials ().dataDelete (strKey))
|
||||
if (getApp().getLocalCredentials ().dataDelete (strKey))
|
||||
{
|
||||
ret["key"] = strKey;
|
||||
}
|
||||
@@ -692,7 +692,7 @@ Json::Value RPCHandler::doDataFetch (Json::Value params, LoadType* loadType, Sco
|
||||
|
||||
ret["key"] = strKey;
|
||||
|
||||
if (theApp->getLocalCredentials ().dataFetch (strKey, strValue))
|
||||
if (getApp().getLocalCredentials ().dataFetch (strKey, strValue))
|
||||
ret["value"] = strValue;
|
||||
|
||||
return ret;
|
||||
@@ -715,7 +715,7 @@ Json::Value RPCHandler::doDataStore (Json::Value params, LoadType* loadType, Sco
|
||||
|
||||
Json::Value ret = Json::Value (Json::objectValue);
|
||||
|
||||
if (theApp->getLocalCredentials ().dataStore (strKey, strValue))
|
||||
if (getApp().getLocalCredentials ().dataStore (strKey, strValue))
|
||||
{
|
||||
ret["key"] = strKey;
|
||||
ret["value"] = strValue;
|
||||
@@ -794,7 +794,7 @@ Json::Value RPCHandler::doPeers (Json::Value, LoadType* loadType, ScopedLock& Ma
|
||||
{
|
||||
Json::Value jvResult (Json::objectValue);
|
||||
|
||||
jvResult["peers"] = theApp->getPeers ().getPeersJson ();
|
||||
jvResult["peers"] = getApp().getPeers ().getPeersJson ();
|
||||
|
||||
return jvResult;
|
||||
}
|
||||
@@ -937,7 +937,7 @@ Json::Value RPCHandler::doProofCreate (Json::Value params, LoadType* loadType, S
|
||||
}
|
||||
else
|
||||
{
|
||||
jvResult["token"] = theApp->getProofOfWorkFactory ().getProof ().getToken ();
|
||||
jvResult["token"] = getApp().getProofOfWorkFactory ().getProof ().getToken ();
|
||||
}
|
||||
|
||||
return jvResult;
|
||||
@@ -1025,7 +1025,7 @@ Json::Value RPCHandler::doProofVerify (Json::Value params, LoadType* loadType, S
|
||||
else
|
||||
{
|
||||
// XXX Proof should not be marked as used from this
|
||||
prResult = theApp->getProofOfWorkFactory ().checkProof (strToken, uSolution);
|
||||
prResult = getApp().getProofOfWorkFactory ().checkProof (strToken, uSolution);
|
||||
}
|
||||
|
||||
std::string sToken;
|
||||
@@ -1216,7 +1216,7 @@ Json::Value RPCHandler::doAccountOffers (Json::Value params, LoadType* loadType,
|
||||
// }
|
||||
Json::Value RPCHandler::doBookOffers (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder)
|
||||
{
|
||||
if (theApp->getJobQueue ().getJobCountGE (jtCLIENT) > 200)
|
||||
if (getApp().getJobQueue ().getJobCountGE (jtCLIENT) > 200)
|
||||
{
|
||||
return rpcError (rpcTOO_BUSY);
|
||||
}
|
||||
@@ -1355,7 +1355,7 @@ Json::Value RPCHandler::doPathFind (Json::Value params, LoadType* loadType, Scop
|
||||
if (request->isValid ())
|
||||
{
|
||||
mInfoSub->setPathRequest (request);
|
||||
theApp->getLedgerMaster ().newPathRequest ();
|
||||
getApp().getLedgerMaster ().newPathRequest ();
|
||||
}
|
||||
|
||||
return result;
|
||||
@@ -1393,7 +1393,7 @@ Json::Value RPCHandler::doPathFind (Json::Value params, LoadType* loadType, Scop
|
||||
// - From a trusted server, allows clients to use path without manipulation.
|
||||
Json::Value RPCHandler::doRipplePathFind (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder)
|
||||
{
|
||||
int jc = theApp->getJobQueue ().getJobCountGE (jtCLIENT);
|
||||
int jc = getApp().getJobQueue ().getJobCountGE (jtCLIENT);
|
||||
|
||||
if (jc > 200)
|
||||
{
|
||||
@@ -1775,8 +1775,8 @@ Json::Value RPCHandler::doTxHistory (Json::Value params, LoadType* loadType, Sco
|
||||
% startIndex);
|
||||
|
||||
{
|
||||
Database* db = theApp->getTxnDB ()->getDB ();
|
||||
ScopedLock sl (theApp->getTxnDB ()->getDBLock ());
|
||||
Database* db = getApp().getTxnDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getTxnDB ()->getDBLock ());
|
||||
|
||||
SQL_FOREACH (db, sql)
|
||||
{
|
||||
@@ -1808,7 +1808,7 @@ Json::Value RPCHandler::doTx (Json::Value params, LoadType* loadType, ScopedLock
|
||||
// transaction by ID
|
||||
uint256 txid (strTransaction);
|
||||
|
||||
Transaction::pointer txn = theApp->getMasterTransaction ().fetch (txid, true);
|
||||
Transaction::pointer txn = getApp().getMasterTransaction ().fetch (txid, true);
|
||||
|
||||
if (!txn)
|
||||
return rpcError (rpcTXN_NOT_FOUND);
|
||||
@@ -1893,8 +1893,8 @@ Json::Value RPCHandler::doLedger (Json::Value params, LoadType* loadType, Scoped
|
||||
{
|
||||
Json::Value ret (Json::objectValue), current (Json::objectValue), closed (Json::objectValue);
|
||||
|
||||
theApp->getLedgerMaster ().getCurrentLedger ()->addJson (current, 0);
|
||||
theApp->getLedgerMaster ().getClosedLedger ()->addJson (closed, 0);
|
||||
getApp().getLedgerMaster ().getCurrentLedger ()->addJson (current, 0);
|
||||
getApp().getLedgerMaster ().getClosedLedger ()->addJson (closed, 0);
|
||||
|
||||
ret["open"] = current;
|
||||
ret["closed"] = closed;
|
||||
@@ -2343,11 +2343,11 @@ Json::Value RPCHandler::doFeature (Json::Value params, LoadType* loadType, Scope
|
||||
if (!params.isMember ("feature"))
|
||||
{
|
||||
Json::Value jvReply = Json::objectValue;
|
||||
jvReply["features"] = theApp->getFeatureTable ().getJson (0);
|
||||
jvReply["features"] = getApp().getFeatureTable ().getJson (0);
|
||||
return jvReply;
|
||||
}
|
||||
|
||||
uint256 uFeature = theApp->getFeatureTable ().getFeature (params["feature"].asString ());
|
||||
uint256 uFeature = getApp().getFeatureTable ().getFeature (params["feature"].asString ());
|
||||
|
||||
if (uFeature.isZero ())
|
||||
{
|
||||
@@ -2358,7 +2358,7 @@ Json::Value RPCHandler::doFeature (Json::Value params, LoadType* loadType, Scope
|
||||
}
|
||||
|
||||
if (!params.isMember ("vote"))
|
||||
return theApp->getFeatureTable ().getJson (uFeature);
|
||||
return getApp().getFeatureTable ().getJson (uFeature);
|
||||
|
||||
// WRITEME
|
||||
return rpcError (rpcNOT_SUPPORTED);
|
||||
@@ -2383,34 +2383,34 @@ Json::Value RPCHandler::doGetCounts (Json::Value params, LoadType* loadType, Sco
|
||||
ret [it.first] = it.second;
|
||||
}
|
||||
|
||||
int dbKB = theApp->getLedgerDB ()->getDB ()->getKBUsedAll ();
|
||||
int dbKB = getApp().getLedgerDB ()->getDB ()->getKBUsedAll ();
|
||||
|
||||
if (dbKB > 0)
|
||||
ret["dbKBTotal"] = dbKB;
|
||||
|
||||
dbKB = theApp->getLedgerDB ()->getDB ()->getKBUsedDB ();
|
||||
dbKB = getApp().getLedgerDB ()->getDB ()->getKBUsedDB ();
|
||||
|
||||
if (dbKB > 0)
|
||||
ret["dbKBLedger"] = dbKB;
|
||||
|
||||
if (!theApp->getHashedObjectStore ().isLevelDB ())
|
||||
if (!getApp().getHashedObjectStore ().isLevelDB ())
|
||||
{
|
||||
dbKB = theApp->getHashNodeDB ()->getDB ()->getKBUsedDB ();
|
||||
dbKB = getApp().getHashNodeDB ()->getDB ()->getKBUsedDB ();
|
||||
|
||||
if (dbKB > 0)
|
||||
ret["dbKBHashNode"] = dbKB;
|
||||
}
|
||||
|
||||
dbKB = theApp->getTxnDB ()->getDB ()->getKBUsedDB ();
|
||||
dbKB = getApp().getTxnDB ()->getDB ()->getKBUsedDB ();
|
||||
|
||||
if (dbKB > 0)
|
||||
ret["dbKBTransaction"] = dbKB;
|
||||
|
||||
ret["write_load"] = theApp->getHashedObjectStore ().getWriteLoad ();
|
||||
ret["write_load"] = getApp().getHashedObjectStore ().getWriteLoad ();
|
||||
|
||||
ret["SLE_hit_rate"] = theApp->getSLECache ().getHitRate ();
|
||||
ret["node_hit_rate"] = theApp->getHashedObjectStore ().getCacheHitRate ();
|
||||
ret["ledger_hit_rate"] = theApp->getLedgerMaster ().getCacheHitRate ();
|
||||
ret["SLE_hit_rate"] = getApp().getSLECache ().getHitRate ();
|
||||
ret["node_hit_rate"] = getApp().getHashedObjectStore ().getCacheHitRate ();
|
||||
ret["ledger_hit_rate"] = getApp().getLedgerMaster ().getCacheHitRate ();
|
||||
ret["AL_hit_rate"] = AcceptedLedger::getCacheHitRate ();
|
||||
|
||||
ret["fullbelow_size"] = SHAMap::getFullBelowSize ();
|
||||
@@ -2489,13 +2489,13 @@ Json::Value RPCHandler::doUnlAdd (Json::Value params, LoadType* loadType, Scoped
|
||||
|
||||
if (raNodePublic.setNodePublic (strNode))
|
||||
{
|
||||
theApp->getUNL ().nodeAddPublic (raNodePublic, IUniqueNodeList::vsManual, strComment);
|
||||
getApp().getUNL ().nodeAddPublic (raNodePublic, IUniqueNodeList::vsManual, strComment);
|
||||
|
||||
return "adding node by public key";
|
||||
}
|
||||
else
|
||||
{
|
||||
theApp->getUNL ().nodeAddDomain (strNode, IUniqueNodeList::vsManual, strComment);
|
||||
getApp().getUNL ().nodeAddDomain (strNode, IUniqueNodeList::vsManual, strComment);
|
||||
|
||||
return "adding node by domain";
|
||||
}
|
||||
@@ -2515,13 +2515,13 @@ Json::Value RPCHandler::doUnlDelete (Json::Value params, LoadType* loadType, Sco
|
||||
|
||||
if (raNodePublic.setNodePublic (strNode))
|
||||
{
|
||||
theApp->getUNL ().nodeRemovePublic (raNodePublic);
|
||||
getApp().getUNL ().nodeRemovePublic (raNodePublic);
|
||||
|
||||
return "removing node by public key";
|
||||
}
|
||||
else
|
||||
{
|
||||
theApp->getUNL ().nodeRemoveDomain (strNode);
|
||||
getApp().getUNL ().nodeRemoveDomain (strNode);
|
||||
|
||||
return "removing node by domain";
|
||||
}
|
||||
@@ -2531,7 +2531,7 @@ Json::Value RPCHandler::doUnlList (Json::Value, LoadType* loadType, ScopedLock&
|
||||
{
|
||||
Json::Value obj (Json::objectValue);
|
||||
|
||||
obj["unl"] = theApp->getUNL ().getUnlJson ();
|
||||
obj["unl"] = getApp().getUNL ().getUnlJson ();
|
||||
|
||||
return obj;
|
||||
}
|
||||
@@ -2539,7 +2539,7 @@ Json::Value RPCHandler::doUnlList (Json::Value, LoadType* loadType, ScopedLock&
|
||||
// Populate the UNL from a local validators.txt file.
|
||||
Json::Value RPCHandler::doUnlLoad (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder)
|
||||
{
|
||||
if (theConfig.VALIDATORS_FILE.empty () || !theApp->getUNL ().nodeLoad (theConfig.VALIDATORS_FILE))
|
||||
if (theConfig.VALIDATORS_FILE.empty () || !getApp().getUNL ().nodeLoad (theConfig.VALIDATORS_FILE))
|
||||
{
|
||||
return rpcError (rpcLOAD_FAILED);
|
||||
}
|
||||
@@ -2551,7 +2551,7 @@ Json::Value RPCHandler::doUnlLoad (Json::Value, LoadType* loadType, ScopedLock&
|
||||
// Populate the UNL from ripple.com's validators.txt file.
|
||||
Json::Value RPCHandler::doUnlNetwork (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder)
|
||||
{
|
||||
theApp->getUNL ().nodeNetwork ();
|
||||
getApp().getUNL ().nodeNetwork ();
|
||||
|
||||
return "fetching";
|
||||
}
|
||||
@@ -2559,7 +2559,7 @@ Json::Value RPCHandler::doUnlNetwork (Json::Value params, LoadType* loadType, Sc
|
||||
// unl_reset
|
||||
Json::Value RPCHandler::doUnlReset (Json::Value params, LoadType* loadType, ScopedLock& MasterLockHolder)
|
||||
{
|
||||
theApp->getUNL ().nodeReset ();
|
||||
getApp().getUNL ().nodeReset ();
|
||||
|
||||
return "removing nodes";
|
||||
}
|
||||
@@ -2567,7 +2567,7 @@ Json::Value RPCHandler::doUnlReset (Json::Value params, LoadType* loadType, Scop
|
||||
// unl_score
|
||||
Json::Value RPCHandler::doUnlScore (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder)
|
||||
{
|
||||
theApp->getUNL ().nodeScore ();
|
||||
getApp().getUNL ().nodeScore ();
|
||||
|
||||
return "scoring requested";
|
||||
}
|
||||
@@ -2577,13 +2577,13 @@ Json::Value RPCHandler::doSMS (Json::Value params, LoadType* loadType, ScopedLoc
|
||||
if (!params.isMember ("text"))
|
||||
return rpcError (rpcINVALID_PARAMS);
|
||||
|
||||
HttpsClient::sendSMS (theApp->getIOService (), params["text"].asString ());
|
||||
HttpsClient::sendSMS (getApp().getIOService (), params["text"].asString ());
|
||||
|
||||
return "sms dispatched";
|
||||
}
|
||||
Json::Value RPCHandler::doStop (Json::Value, LoadType* loadType, ScopedLock& MasterLockHolder)
|
||||
{
|
||||
theApp->stop ();
|
||||
getApp().stop ();
|
||||
|
||||
return SYSTEM_NAME " server stopping";
|
||||
}
|
||||
@@ -2729,7 +2729,7 @@ Json::Value RPCHandler::lookupLedger (Json::Value params, Ledger::pointer& lpLed
|
||||
break;
|
||||
|
||||
case LEDGER_CLOSED:
|
||||
lpLedger = theApp->getLedgerMaster ().getClosedLedger ();
|
||||
lpLedger = getApp().getLedgerMaster ().getClosedLedger ();
|
||||
iLedgerIndex = lpLedger->getLedgerSeq ();
|
||||
assert (lpLedger->isImmutable () && lpLedger->isClosed ());
|
||||
break;
|
||||
@@ -3299,7 +3299,7 @@ Json::Value RPCHandler::doSubscribe (Json::Value params, LoadType* loadType, Sco
|
||||
|
||||
if (bSnapshot)
|
||||
{
|
||||
Ledger::pointer lpLedger = theApp->getLedgerMaster ().getClosedLedger ();
|
||||
Ledger::pointer lpLedger = getApp().getLedgerMaster ().getClosedLedger ();
|
||||
const Json::Value jvMarker = Json::Value (Json::nullValue);
|
||||
|
||||
if (bBoth)
|
||||
@@ -3562,7 +3562,7 @@ Json::Value RPCHandler::doCommand (const Json::Value& params, int iRole, LoadTyp
|
||||
{
|
||||
if (iRole != ADMIN)
|
||||
{
|
||||
int jc = theApp->getJobQueue ().getJobCountGE (jtCLIENT);
|
||||
int jc = getApp().getJobQueue ().getJobCountGE (jtCLIENT);
|
||||
|
||||
if (jc > 500)
|
||||
{
|
||||
@@ -3671,7 +3671,7 @@ Json::Value RPCHandler::doCommand (const Json::Value& params, int iRole, LoadTyp
|
||||
return rpcError (rpcNO_PERMISSION);
|
||||
}
|
||||
|
||||
ScopedLock MasterLockHolder (theApp->getMasterLock ());
|
||||
ScopedLock MasterLockHolder (getApp().getMasterLock ());
|
||||
|
||||
if ((commandsA[i].iOptions & optNetwork) && (mNetOps->getOperatingMode () < NetworkOPs::omSYNCING))
|
||||
{
|
||||
|
||||
@@ -75,7 +75,7 @@ void RPCSub::sendThread ()
|
||||
WriteLog (lsINFO, RPCSub) << boost::str (boost::format ("callRPC calling: %s") % mIp);
|
||||
|
||||
callRPC (
|
||||
theApp->getIOService (),
|
||||
getApp().getIOService (),
|
||||
mIp, mPort,
|
||||
mUsername, mPassword,
|
||||
mPath, "event",
|
||||
|
||||
@@ -154,8 +154,8 @@ void Transaction::save ()
|
||||
status = TXN_SQL_UNKNOWN;
|
||||
}
|
||||
|
||||
Database* db = theApp->getTxnDB ()->getDB ();
|
||||
ScopedLock dbLock (theApp->getTxnDB ()->getDBLock ());
|
||||
Database* db = getApp().getTxnDB ()->getDB ();
|
||||
ScopedLock dbLock (getApp().getTxnDB ()->getDBLock ());
|
||||
db->executeSQL (mTransaction->getSQLInsertReplaceHeader () + mTransaction->getSQL (getLedger (), status) + ";");
|
||||
}
|
||||
|
||||
@@ -231,8 +231,8 @@ Transaction::pointer Transaction::transactionFromSQL (const std::string& sql)
|
||||
rawTxn.resize (txSize);
|
||||
|
||||
{
|
||||
ScopedLock sl (theApp->getTxnDB ()->getDBLock ());
|
||||
Database* db = theApp->getTxnDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getTxnDB ()->getDBLock ());
|
||||
Database* db = getApp().getTxnDB ()->getDB ();
|
||||
|
||||
if (!db->executeSQL (sql, true) || !db->startIterRows ())
|
||||
return Transaction::pointer ();
|
||||
@@ -365,7 +365,7 @@ Json::Value Transaction::getJson (int options, bool binary) const
|
||||
|
||||
if (options == 1)
|
||||
{
|
||||
Ledger::pointer ledger = theApp->getLedgerMaster ().getLedgerBySeq (mInLedger);
|
||||
Ledger::pointer ledger = getApp().getLedgerMaster ().getLedgerBySeq (mInLedger);
|
||||
|
||||
if (ledger)
|
||||
ret["date"] = ledger->getCloseTimeNC ();
|
||||
|
||||
@@ -49,7 +49,7 @@ SerializedTransaction::pointer TransactionMaster::fetch (SHAMapItem::ref item, S
|
||||
bool checkDisk, uint32 uCommitLedger)
|
||||
{
|
||||
SerializedTransaction::pointer txn;
|
||||
Transaction::pointer iTx = theApp->getMasterTransaction ().fetch (item->getTag (), false);
|
||||
Transaction::pointer iTx = getApp().getMasterTransaction ().fetch (item->getTag (), false);
|
||||
|
||||
if (!iTx)
|
||||
{
|
||||
|
||||
@@ -37,7 +37,7 @@ public:
|
||||
// mConnection(connection_ptr()) { ; }
|
||||
|
||||
WSConnection (WSServerHandler<endpoint_type>* wshpHandler, const connection_ptr& cpConnection)
|
||||
: mHandler (wshpHandler), mConnection (cpConnection), mNetwork (theApp->getOPs ()),
|
||||
: mHandler (wshpHandler), mConnection (cpConnection), mNetwork (getApp().getOPs ()),
|
||||
mRemoteIP (cpConnection->get_socket ().lowest_layer ().remote_endpoint ().address ().to_string ()),
|
||||
mLoadSource (mRemoteIP), mPingTimer (cpConnection->get_io_service ()), mPinged (false),
|
||||
mRcvQueueRunning (false), mDead (false)
|
||||
@@ -86,7 +86,7 @@ public:
|
||||
// Utilities
|
||||
Json::Value invokeCommand (Json::Value& jvRequest)
|
||||
{
|
||||
if (theApp->getLoadManager ().shouldCutoff (mLoadSource))
|
||||
if (getApp().getLoadManager ().shouldCutoff (mLoadSource))
|
||||
{
|
||||
// VFALCO TODO This must be implemented before open sourcing
|
||||
|
||||
@@ -117,7 +117,7 @@ public:
|
||||
jvResult["id"] = jvRequest["id"];
|
||||
}
|
||||
|
||||
theApp->getLoadManager ().applyLoadCharge (mLoadSource, LT_RPCInvalid);
|
||||
getApp().getLoadManager ().applyLoadCharge (mLoadSource, LT_RPCInvalid);
|
||||
|
||||
return jvResult;
|
||||
}
|
||||
@@ -141,8 +141,8 @@ public:
|
||||
|
||||
// Debit/credit the load and see if we should include a warning.
|
||||
//
|
||||
if (theApp->getLoadManager ().applyLoadCharge (mLoadSource, loadType) &&
|
||||
theApp->getLoadManager ().shouldWarn (mLoadSource))
|
||||
if (getApp().getLoadManager ().applyLoadCharge (mLoadSource, loadType) &&
|
||||
getApp().getLoadManager ().shouldWarn (mLoadSource))
|
||||
{
|
||||
jvResult["warning"] = "load";
|
||||
}
|
||||
|
||||
@@ -98,5 +98,3 @@ void WSDoor::stop ()
|
||||
mThread->join ();
|
||||
}
|
||||
}
|
||||
|
||||
// vim:ts=4
|
||||
|
||||
@@ -198,7 +198,7 @@ public:
|
||||
ptr->preDestroy (); // Must be done before we return
|
||||
|
||||
// Must be done without holding the websocket send lock
|
||||
theApp->getJobQueue ().addJob (jtCLIENT, "WSClient::destroy",
|
||||
getApp().getJobQueue ().addJob (jtCLIENT, "WSClient::destroy",
|
||||
BIND_TYPE (&WSConnection<endpoint_type>::destroy, ptr));
|
||||
}
|
||||
|
||||
@@ -232,7 +232,7 @@ public:
|
||||
}
|
||||
|
||||
if (bRunQ)
|
||||
theApp->getJobQueue ().addJob (jtCLIENT, "WSClient::command",
|
||||
getApp().getJobQueue ().addJob (jtCLIENT, "WSClient::command",
|
||||
BIND_TYPE (&WSServerHandler<endpoint_type>::do_messages, this, P_1, cpClient));
|
||||
}
|
||||
|
||||
@@ -259,7 +259,7 @@ public:
|
||||
do_message (job, cpClient, ptr, msg);
|
||||
}
|
||||
|
||||
theApp->getJobQueue ().addJob (jtCLIENT, "WSClient::more",
|
||||
getApp().getJobQueue ().addJob (jtCLIENT, "WSClient::more",
|
||||
BIND_TYPE (&WSServerHandler<endpoint_type>::do_messages, this, P_1, cpClient));
|
||||
}
|
||||
|
||||
|
||||
@@ -7,18 +7,18 @@
|
||||
// VFALCO TODO Clean this global up
|
||||
volatile bool doShutdown = false;
|
||||
|
||||
// VFALCO TODO Wrap this up in something neater.
|
||||
//IApplication* theApp = nullptr;
|
||||
ScopedPointer <IApplication> theApp;
|
||||
|
||||
class Application;
|
||||
|
||||
SETUP_LOG (Application)
|
||||
|
||||
// VFALCO TODO Move the function definitions into the class declaration
|
||||
class Application : public IApplication
|
||||
class Application
|
||||
: public IApplication
|
||||
, LeakChecked <Application>
|
||||
{
|
||||
public:
|
||||
class Holder;
|
||||
|
||||
Application ();
|
||||
~Application ();
|
||||
|
||||
@@ -249,10 +249,10 @@ private:
|
||||
leveldb::DB* mHashNodeLDB;
|
||||
leveldb::DB* mEphemeralLDB;
|
||||
|
||||
PeerDoor* mPeerDoor;
|
||||
RPCDoor* mRPCDoor;
|
||||
WSDoor* mWSPublicDoor;
|
||||
WSDoor* mWSPrivateDoor;
|
||||
ScopedPointer <PeerDoor> mPeerDoor;
|
||||
ScopedPointer <RPCDoor> mRPCDoor;
|
||||
ScopedPointer <WSDoor> mWSPublicDoor;
|
||||
ScopedPointer <WSDoor> mWSPrivateDoor;
|
||||
|
||||
boost::asio::deadline_timer mSweepTimer;
|
||||
|
||||
@@ -460,14 +460,14 @@ void Application::setup ()
|
||||
|
||||
if (!mHashedObjectStore.isLevelDB ())
|
||||
{
|
||||
theApp->getHashNodeDB ()->getDB ()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
|
||||
getApp().getHashNodeDB ()->getDB ()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
|
||||
(theConfig.getSize (siHashNodeDBCache) * 1024)));
|
||||
theApp->getHashNodeDB ()->getDB ()->setupCheckpointing (&mJobQueue);
|
||||
getApp().getHashNodeDB ()->getDB ()->setupCheckpointing (&mJobQueue);
|
||||
}
|
||||
|
||||
theApp->getLedgerDB ()->getDB ()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
|
||||
getApp().getLedgerDB ()->getDB ()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
|
||||
(theConfig.getSize (siLgrDBCache) * 1024)));
|
||||
theApp->getTxnDB ()->getDB ()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
|
||||
getApp().getTxnDB ()->getDB ()->executeSQL (boost::str (boost::format ("PRAGMA cache_size=-%d;") %
|
||||
(theConfig.getSize (siTxnDBCache) * 1024)));
|
||||
|
||||
mTxnDB->getDB ()->setupCheckpointing (&mJobQueue);
|
||||
@@ -490,7 +490,7 @@ void Application::setup ()
|
||||
|
||||
if (!loadOldLedger (theConfig.START_LEDGER))
|
||||
{
|
||||
theApp->stop ();
|
||||
getApp().stop ();
|
||||
exit (-1);
|
||||
}
|
||||
}
|
||||
@@ -505,7 +505,7 @@ void Application::setup ()
|
||||
else
|
||||
startNewLedger ();
|
||||
|
||||
mOrderBookDB.setup (theApp->getLedgerMaster ().getCurrentLedger ());
|
||||
mOrderBookDB.setup (getApp().getLedgerMaster ().getCurrentLedger ());
|
||||
|
||||
//
|
||||
// Begin validation and ip maintenance.
|
||||
@@ -647,7 +647,7 @@ void Application::run ()
|
||||
// VFALCO NOTE This seems unnecessary. If we properly refactor the load
|
||||
// manager then the deadlock detector can just always be "armed"
|
||||
//
|
||||
theApp->getLoadManager ().activateDeadlockDetector ();
|
||||
getApp().getLoadManager ().activateDeadlockDetector ();
|
||||
}
|
||||
|
||||
mIOService.run (); // This blocks
|
||||
@@ -670,7 +670,7 @@ void Application::sweep ()
|
||||
if (space.available < (512 * 1024 * 1024))
|
||||
{
|
||||
WriteLog (lsFATAL, Application) << "Remaining free disk space is less than 512MB";
|
||||
theApp->stop ();
|
||||
getApp().stop ();
|
||||
}
|
||||
|
||||
// VFALCO NOTE Does the order of calls matter?
|
||||
@@ -809,37 +809,39 @@ bool serverOkay (std::string& reason)
|
||||
if (!theConfig.ELB_SUPPORT)
|
||||
return true;
|
||||
|
||||
/*
|
||||
if (!theApp)
|
||||
{
|
||||
reason = "Server has not started";
|
||||
return false;
|
||||
}
|
||||
*/
|
||||
|
||||
if (theApp->isShutdown ())
|
||||
if (getApp().isShutdown ())
|
||||
{
|
||||
reason = "Server is shutting down";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (theApp->getOPs ().isNeedNetworkLedger ())
|
||||
if (getApp().getOPs ().isNeedNetworkLedger ())
|
||||
{
|
||||
reason = "Not synchronized with network yet";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (theApp->getOPs ().getOperatingMode () < NetworkOPs::omSYNCING)
|
||||
if (getApp().getOPs ().getOperatingMode () < NetworkOPs::omSYNCING)
|
||||
{
|
||||
reason = "Not synchronized with network";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (theApp->getFeeTrack ().isLoaded ())
|
||||
if (getApp().getFeeTrack ().isLoaded ())
|
||||
{
|
||||
reason = "Too much load";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (theApp->getOPs ().isFeatureBlocked ())
|
||||
if (getApp().getOPs ().isFeatureBlocked ())
|
||||
{
|
||||
reason = "Server version too old";
|
||||
return false;
|
||||
@@ -882,12 +884,12 @@ static bool schemaHas (DatabaseCon* dbc, const std::string& dbName, int line, co
|
||||
|
||||
static void addTxnSeqField ()
|
||||
{
|
||||
if (schemaHas (theApp->getTxnDB (), "AccountTransactions", 0, "TxnSeq"))
|
||||
if (schemaHas (getApp().getTxnDB (), "AccountTransactions", 0, "TxnSeq"))
|
||||
return;
|
||||
|
||||
Log (lsWARNING) << "Transaction sequence field is missing";
|
||||
|
||||
Database* db = theApp->getTxnDB ()->getDB ();
|
||||
Database* db = getApp().getTxnDB ()->getDB ();
|
||||
|
||||
std::vector< std::pair<uint256, int> > txIDs;
|
||||
txIDs.reserve (300000);
|
||||
@@ -957,18 +959,18 @@ static void addTxnSeqField ()
|
||||
void Application::updateTables (bool ldbImport)
|
||||
{
|
||||
// perform any needed table updates
|
||||
assert (schemaHas (theApp->getTxnDB (), "AccountTransactions", 0, "TransID"));
|
||||
assert (!schemaHas (theApp->getTxnDB (), "AccountTransactions", 0, "foobar"));
|
||||
assert (schemaHas (getApp().getTxnDB (), "AccountTransactions", 0, "TransID"));
|
||||
assert (!schemaHas (getApp().getTxnDB (), "AccountTransactions", 0, "foobar"));
|
||||
addTxnSeqField ();
|
||||
|
||||
if (schemaHas (theApp->getTxnDB (), "AccountTransactions", 0, "PRIMARY"))
|
||||
if (schemaHas (getApp().getTxnDB (), "AccountTransactions", 0, "PRIMARY"))
|
||||
{
|
||||
Log (lsFATAL) << "AccountTransactions database should not have a primary key";
|
||||
StopSustain ();
|
||||
exit (1);
|
||||
}
|
||||
|
||||
if (theApp->getHashedObjectStore ().isLevelDB ())
|
||||
if (getApp().getHashedObjectStore ().isLevelDB ())
|
||||
{
|
||||
boost::filesystem::path hashPath = theConfig.DATA_DIR / "hashnode.db";
|
||||
|
||||
@@ -977,7 +979,7 @@ void Application::updateTables (bool ldbImport)
|
||||
if (theConfig.LDB_IMPORT)
|
||||
{
|
||||
Log (lsWARNING) << "Importing SQLite -> LevelDB";
|
||||
theApp->getHashedObjectStore ().import (hashPath.string ());
|
||||
getApp().getHashedObjectStore ().import (hashPath.string ());
|
||||
Log (lsWARNING) << "Remove or remname the hashnode.db file";
|
||||
}
|
||||
else
|
||||
@@ -991,8 +993,39 @@ void Application::updateTables (bool ldbImport)
|
||||
}
|
||||
}
|
||||
|
||||
IApplication* IApplication::New ()
|
||||
{
|
||||
return new Application;
|
||||
}
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// Since its an global with static storage duration
|
||||
// it has to be wrapped in a SharedSingleton.
|
||||
//
|
||||
class Application::Holder : public SharedSingleton <Application::Holder>
|
||||
{
|
||||
public:
|
||||
Holder ()
|
||||
: SharedSingleton <Holder> (SingletonLifetime::persistAfterCreation)
|
||||
, m_application (new Application)
|
||||
{
|
||||
}
|
||||
|
||||
~Holder ()
|
||||
{
|
||||
}
|
||||
|
||||
static Holder* createInstance ()
|
||||
{
|
||||
return new Holder;
|
||||
}
|
||||
|
||||
IApplication& getApp ()
|
||||
{
|
||||
return *m_application;
|
||||
}
|
||||
|
||||
private:
|
||||
ScopedPointer <IApplication> m_application;
|
||||
};
|
||||
|
||||
IApplication& getApp ()
|
||||
{
|
||||
return Application::Holder::getInstance ()->getApp ();
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
// VFALCO NOTE This looks like a pointless class. Figure out
|
||||
// what purpose it is really trying to serve and do it better.
|
||||
class DatabaseCon
|
||||
class DatabaseCon : LeakChecked <DatabaseCon>
|
||||
{
|
||||
public:
|
||||
DatabaseCon (const std::string& name, const char* initString[], int countInit);
|
||||
|
||||
@@ -94,8 +94,8 @@ FeatureState* Features::getCreateFeature (uint256 const& featureHash, bool creat
|
||||
query.append (featureHash.GetHex ());
|
||||
query.append ("';");
|
||||
|
||||
ScopedLock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
if (db->executeSQL (query) && db->startIterRows ())
|
||||
{
|
||||
@@ -327,8 +327,8 @@ void Features::reportValidations (const FeatureSet& set)
|
||||
|
||||
if (!changedFeatures.empty ())
|
||||
{
|
||||
ScopedLock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
db->executeSQL ("BEGIN TRANSACTION;");
|
||||
BOOST_FOREACH (uint256 const & hash, changedFeatures)
|
||||
|
||||
@@ -105,7 +105,7 @@ public:
|
||||
VotableInteger<uint32> incReserveVote (lastClosedLedger->getReserveInc (), mTargetReserveIncrement);
|
||||
|
||||
// get validations for ledger before flag
|
||||
ValidationSet set = theApp->getValidations ().getValidations (lastClosedLedger->getParentHash ());
|
||||
ValidationSet set = getApp().getValidations ().getValidations (lastClosedLedger->getParentHash ());
|
||||
BOOST_FOREACH (ValidationSet::value_type const & value, set)
|
||||
{
|
||||
SerializedValidation const& val = *value.second;
|
||||
|
||||
@@ -123,7 +123,7 @@ bool HashedObjectStore::storeLevelDB (HashedObjectType type, uint32 index,
|
||||
Blob const& data, uint256 const& hash)
|
||||
{
|
||||
// return: false = already in cache, true = added to cache
|
||||
if (!theApp->getHashNodeLDB ())
|
||||
if (!getApp().getHashNodeLDB ())
|
||||
return true;
|
||||
|
||||
if (mCache.touch (hash))
|
||||
@@ -143,7 +143,7 @@ bool HashedObjectStore::storeLevelDB (HashedObjectType type, uint32 index,
|
||||
if (!mWritePending)
|
||||
{
|
||||
mWritePending = true;
|
||||
theApp->getJobQueue ().addJob (jtWRITE, "HashedObject::store",
|
||||
getApp().getJobQueue ().addJob (jtWRITE, "HashedObject::store",
|
||||
BIND_TYPE (&HashedObjectStore::bulkWriteLevelDB, this, P_1));
|
||||
}
|
||||
}
|
||||
@@ -181,10 +181,10 @@ void HashedObjectStore::bulkWriteLevelDB (Job&)
|
||||
setSize = set.size ();
|
||||
}
|
||||
|
||||
LLWrite (set, theApp->getHashNodeLDB ());
|
||||
LLWrite (set, getApp().getHashNodeLDB ());
|
||||
|
||||
if (mEphemeralDB)
|
||||
LLWrite (set, theApp->getEphemeralLDB ());
|
||||
LLWrite (set, getApp().getEphemeralLDB ());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -192,12 +192,12 @@ HashedObject::pointer HashedObjectStore::retrieveLevelDB (uint256 const& hash)
|
||||
{
|
||||
HashedObject::pointer obj = mCache.fetch (hash);
|
||||
|
||||
if (obj || mNegativeCache.isPresent (hash) || !theApp || !theApp->getHashNodeLDB ())
|
||||
if (obj || mNegativeCache.isPresent (hash) || !getApp().getHashNodeLDB ())
|
||||
return obj;
|
||||
|
||||
if (mEphemeralDB)
|
||||
{
|
||||
obj = LLRetrieve (hash, theApp->getEphemeralLDB ());
|
||||
obj = LLRetrieve (hash, getApp().getEphemeralLDB ());
|
||||
|
||||
if (obj)
|
||||
{
|
||||
@@ -207,8 +207,8 @@ HashedObject::pointer HashedObjectStore::retrieveLevelDB (uint256 const& hash)
|
||||
}
|
||||
|
||||
{
|
||||
LoadEvent::autoptr event (theApp->getJobQueue ().getLoadEventAP (jtHO_READ, "HOS::retrieve"));
|
||||
obj = LLRetrieve (hash, theApp->getHashNodeLDB ());
|
||||
LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtHO_READ, "HOS::retrieve"));
|
||||
obj = LLRetrieve (hash, getApp().getHashNodeLDB ());
|
||||
|
||||
if (!obj)
|
||||
{
|
||||
@@ -220,7 +220,7 @@ HashedObject::pointer HashedObjectStore::retrieveLevelDB (uint256 const& hash)
|
||||
mCache.canonicalize (hash, obj);
|
||||
|
||||
if (mEphemeralDB)
|
||||
LLWrite (obj, theApp->getEphemeralLDB ());
|
||||
LLWrite (obj, getApp().getEphemeralLDB ());
|
||||
|
||||
WriteLog (lsTRACE, HashedObject) << "HOS: " << hash << " fetch: in db";
|
||||
return obj;
|
||||
@@ -230,7 +230,7 @@ bool HashedObjectStore::storeSQLite (HashedObjectType type, uint32 index,
|
||||
Blob const& data, uint256 const& hash)
|
||||
{
|
||||
// return: false = already in cache, true = added to cache
|
||||
if (!theApp->getHashNodeDB ())
|
||||
if (!getApp().getHashNodeDB ())
|
||||
{
|
||||
WriteLog (lsTRACE, HashedObject) << "HOS: no db";
|
||||
return true;
|
||||
@@ -255,7 +255,7 @@ bool HashedObjectStore::storeSQLite (HashedObjectType type, uint32 index,
|
||||
if (!mWritePending)
|
||||
{
|
||||
mWritePending = true;
|
||||
theApp->getJobQueue ().addJob (jtWRITE, "HashedObject::store",
|
||||
getApp().getJobQueue ().addJob (jtWRITE, "HashedObject::store",
|
||||
BIND_TYPE (&HashedObjectStore::bulkWriteSQLite, this, P_1));
|
||||
}
|
||||
}
|
||||
@@ -299,10 +299,10 @@ void HashedObjectStore::bulkWriteSQLite (Job&)
|
||||
#ifndef NO_SQLITE3_PREPARE
|
||||
|
||||
if (mEphemeralDB)
|
||||
LLWrite (set, theApp->getEphemeralLDB ());
|
||||
LLWrite (set, getApp().getEphemeralLDB ());
|
||||
|
||||
{
|
||||
Database* db = theApp->getHashNodeDB ()->getDB ();
|
||||
Database* db = getApp().getHashNodeDB ()->getDB ();
|
||||
|
||||
|
||||
// VFALCO TODO Get rid of the last parameter "aux", which is set to !theConfig.RUN_STANDALONE
|
||||
@@ -367,9 +367,9 @@ void HashedObjectStore::bulkWriteSQLite (Job&)
|
||||
fAdd ("INSERT OR IGNORE INTO CommittedObjects "
|
||||
"(Hash,ObjType,LedgerIndex,Object) VALUES ('%s','%c','%u',%s);");
|
||||
|
||||
Database* db = theApp->getHashNodeDB ()->getDB ();
|
||||
Database* db = getApp().getHashNodeDB ()->getDB ();
|
||||
{
|
||||
ScopedLock sl (theApp->getHashNodeDB ()->getDBLock ());
|
||||
ScopedLock sl (getApp().getHashNodeDB ()->getDBLock ());
|
||||
|
||||
db->executeSQL ("BEGIN TRANSACTION;");
|
||||
|
||||
@@ -422,7 +422,7 @@ HashedObject::pointer HashedObjectStore::retrieveSQLite (uint256 const& hash)
|
||||
|
||||
if (mEphemeralDB)
|
||||
{
|
||||
obj = LLRetrieve (hash, theApp->getEphemeralLDB ());
|
||||
obj = LLRetrieve (hash, getApp().getEphemeralLDB ());
|
||||
|
||||
if (obj)
|
||||
{
|
||||
@@ -431,7 +431,7 @@ HashedObject::pointer HashedObjectStore::retrieveSQLite (uint256 const& hash)
|
||||
}
|
||||
}
|
||||
|
||||
if (!theApp || !theApp->getHashNodeDB ())
|
||||
if (!getApp().getHashNodeDB ())
|
||||
return obj;
|
||||
|
||||
Blob data;
|
||||
@@ -440,10 +440,10 @@ HashedObject::pointer HashedObjectStore::retrieveSQLite (uint256 const& hash)
|
||||
|
||||
#ifndef NO_SQLITE3_PREPARE
|
||||
{
|
||||
ScopedLock sl (theApp->getHashNodeDB ()->getDBLock ());
|
||||
static SqliteStatement pSt (theApp->getHashNodeDB ()->getDB ()->getSqliteDB (),
|
||||
ScopedLock sl (getApp().getHashNodeDB ()->getDBLock ());
|
||||
static SqliteStatement pSt (getApp().getHashNodeDB ()->getDB ()->getSqliteDB (),
|
||||
"SELECT ObjType,LedgerIndex,Object FROM CommittedObjects WHERE Hash = ?;");
|
||||
LoadEvent::autoptr event (theApp->getJobQueue ().getLoadEventAP (jtDISK, "HOS::retrieve"));
|
||||
LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtDISK, "HOS::retrieve"));
|
||||
|
||||
pSt.bind (1, hash.GetHex ());
|
||||
int ret = pSt.step ();
|
||||
@@ -470,8 +470,8 @@ HashedObject::pointer HashedObjectStore::retrieveSQLite (uint256 const& hash)
|
||||
|
||||
|
||||
{
|
||||
ScopedLock sl (theApp->getHashNodeDB ()->getDBLock ());
|
||||
Database* db = theApp->getHashNodeDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getHashNodeDB ()->getDBLock ());
|
||||
Database* db = getApp().getHashNodeDB ()->getDB ();
|
||||
|
||||
if (!db->executeSQL (sql) || !db->startIterRows ())
|
||||
{
|
||||
@@ -525,7 +525,7 @@ HashedObject::pointer HashedObjectStore::retrieveSQLite (uint256 const& hash)
|
||||
mCache.canonicalize (hash, obj);
|
||||
|
||||
if (mEphemeralDB)
|
||||
LLWrite (obj, theApp->getEphemeralLDB ());
|
||||
LLWrite (obj, getApp().getEphemeralLDB ());
|
||||
|
||||
WriteLog (lsTRACE, HashedObject) << "HOS: " << hash << " fetch: in db";
|
||||
return obj;
|
||||
@@ -537,7 +537,7 @@ int HashedObjectStore::import (const std::string& file)
|
||||
UPTR_T<Database> importDB (new SqliteDatabase (file.c_str ()));
|
||||
importDB->connect ();
|
||||
|
||||
leveldb::DB* db = theApp->getHashNodeLDB ();
|
||||
leveldb::DB* db = getApp().getHashNodeLDB ();
|
||||
leveldb::WriteOptions wo;
|
||||
|
||||
int count = 0;
|
||||
|
||||
@@ -38,8 +38,6 @@ typedef TaggedCache <uint256, SerializedLedgerEntry, UptimeTimerAdapter> SLECach
|
||||
class IApplication
|
||||
{
|
||||
public:
|
||||
static IApplication* New ();
|
||||
|
||||
virtual ~IApplication () { }
|
||||
|
||||
/* VFALCO NOTE
|
||||
@@ -104,6 +102,6 @@ public:
|
||||
virtual void sweep () = 0;
|
||||
};
|
||||
|
||||
extern ScopedPointer <IApplication> theApp;
|
||||
extern IApplication& getApp ();
|
||||
|
||||
#endif
|
||||
|
||||
@@ -49,18 +49,18 @@ bool InboundLedger::tryLocal ()
|
||||
if (!mHaveBase)
|
||||
{
|
||||
// Nothing we can do without the ledger base
|
||||
HashedObject::pointer node = theApp->getHashedObjectStore ().retrieve (mHash);
|
||||
HashedObject::pointer node = getApp().getHashedObjectStore ().retrieve (mHash);
|
||||
|
||||
if (!node)
|
||||
{
|
||||
Blob data;
|
||||
|
||||
if (!theApp->getOPs ().getFetchPack (mHash, data))
|
||||
if (!getApp().getOPs ().getFetchPack (mHash, data))
|
||||
return false;
|
||||
|
||||
WriteLog (lsTRACE, InboundLedger) << "Ledger base found in fetch pack";
|
||||
mLedger = boost::make_shared<Ledger> (data, true);
|
||||
theApp->getHashedObjectStore ().store (hotLEDGER, mLedger->getLedgerSeq (), data, mHash);
|
||||
getApp().getHashedObjectStore ().store (hotLEDGER, mLedger->getLedgerSeq (), data, mHash);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -181,7 +181,7 @@ void InboundLedger::noAwaitData ()
|
||||
|
||||
void InboundLedger::addPeers ()
|
||||
{
|
||||
std::vector<Peer::pointer> peerList = theApp->getPeers ().getPeerVector ();
|
||||
std::vector<Peer::pointer> peerList = getApp().getPeers ().getPeerVector ();
|
||||
|
||||
int vSize = peerList.size ();
|
||||
|
||||
@@ -253,13 +253,13 @@ void InboundLedger::done ()
|
||||
if (mAccept)
|
||||
mLedger->setAccepted ();
|
||||
|
||||
theApp->getLedgerMaster ().storeLedger (mLedger);
|
||||
getApp().getLedgerMaster ().storeLedger (mLedger);
|
||||
}
|
||||
else
|
||||
theApp->getInboundLedgers ().logFailure (mHash);
|
||||
getApp().getInboundLedgers ().logFailure (mHash);
|
||||
|
||||
if (!triggers.empty ()) // We hold the PeerSet lock, so must dispatch
|
||||
theApp->getJobQueue ().addJob (jtLEDGER_DATA, "triggers",
|
||||
getApp().getJobQueue ().addJob (jtLEDGER_DATA, "triggers",
|
||||
BIND_TYPE (LADispatch, P_1, shared_from_this (), triggers));
|
||||
}
|
||||
|
||||
@@ -355,7 +355,7 @@ void InboundLedger::trigger (Peer::ref peer)
|
||||
for (boost::unordered_map<uint64, int>::iterator it = mPeers.begin (), end = mPeers.end ();
|
||||
it != end; ++it)
|
||||
{
|
||||
Peer::pointer iPeer = theApp->getPeers ().getPeerById (it->first);
|
||||
Peer::pointer iPeer = getApp().getPeers ().getPeerById (it->first);
|
||||
|
||||
if (iPeer)
|
||||
{
|
||||
@@ -527,7 +527,7 @@ void PeerSet::sendRequest (const protocol::TMGetLedger& tmGL)
|
||||
|
||||
for (boost::unordered_map<uint64, int>::iterator it = mPeers.begin (), end = mPeers.end (); it != end; ++it)
|
||||
{
|
||||
Peer::pointer peer = theApp->getPeers ().getPeerById (it->first);
|
||||
Peer::pointer peer = getApp().getPeers ().getPeerById (it->first);
|
||||
|
||||
if (peer)
|
||||
peer->sendPacket (packet, false);
|
||||
@@ -554,7 +554,7 @@ int PeerSet::getPeerCount () const
|
||||
int ret = 0;
|
||||
|
||||
for (boost::unordered_map<uint64, int>::const_iterator it = mPeers.begin (), end = mPeers.end (); it != end; ++it)
|
||||
if (theApp->getPeers ().hasPeer (it->first))
|
||||
if (getApp().getPeers ().hasPeer (it->first))
|
||||
++ret;
|
||||
|
||||
return ret;
|
||||
@@ -654,7 +654,7 @@ bool InboundLedger::takeBase (const std::string& data) // data must not have has
|
||||
Serializer s (data.size () + 4);
|
||||
s.add32 (HashPrefix::ledgerMaster);
|
||||
s.addRaw (data);
|
||||
theApp->getHashedObjectStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.peekData (), mHash);
|
||||
getApp().getHashedObjectStore ().store (hotLEDGER, mLedger->getLedgerSeq (), s.peekData (), mHash);
|
||||
|
||||
progress ();
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ InboundLedger::pointer InboundLedgers::findCreate (uint256 const& hash, uint32 s
|
||||
Ledger::pointer ledger = ptr->getLedger ();
|
||||
ledger->setClosed ();
|
||||
ledger->setImmutable ();
|
||||
theApp->getLedgerMaster ().storeLedger (ledger);
|
||||
getApp().getLedgerMaster ().storeLedger (ledger);
|
||||
WriteLog (lsDEBUG, InboundLedger) << "Acquiring ledger we already have: " << hash;
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ InfoSub::InfoSub ()
|
||||
|
||||
InfoSub::~InfoSub ()
|
||||
{
|
||||
NetworkOPs& ops = theApp->getOPs ();
|
||||
NetworkOPs& ops = getApp().getOPs ();
|
||||
ops.unsubTransactions (mSeq);
|
||||
ops.unsubRTTransactions (mSeq);
|
||||
ops.unsubLedger (mSeq);
|
||||
|
||||
@@ -18,18 +18,18 @@ LedgerConsensus::LedgerConsensus (uint256 const& prevLCLHash, Ledger::ref previo
|
||||
{
|
||||
WriteLog (lsDEBUG, LedgerConsensus) << "Creating consensus object";
|
||||
WriteLog (lsTRACE, LedgerConsensus) << "LCL:" << previousLedger->getHash () << ", ct=" << closeTime;
|
||||
mPreviousProposers = theApp->getOPs ().getPreviousProposers ();
|
||||
mPreviousMSeconds = theApp->getOPs ().getPreviousConvergeTime ();
|
||||
mPreviousProposers = getApp().getOPs ().getPreviousProposers ();
|
||||
mPreviousMSeconds = getApp().getOPs ().getPreviousConvergeTime ();
|
||||
assert (mPreviousMSeconds);
|
||||
|
||||
mCloseResolution = ContinuousLedgerTiming::getNextLedgerTimeResolution (
|
||||
mPreviousLedger->getCloseResolution (), mPreviousLedger->getCloseAgree (), previousLedger->getLedgerSeq () + 1);
|
||||
|
||||
if (mValPublic.isSet () && mValPrivate.isSet () && !theApp->getOPs ().isNeedNetworkLedger ())
|
||||
if (mValPublic.isSet () && mValPrivate.isSet () && !getApp().getOPs ().isNeedNetworkLedger ())
|
||||
{
|
||||
WriteLog (lsINFO, LedgerConsensus) << "Entering consensus process, validating";
|
||||
mValidating = true;
|
||||
mProposing = theApp->getOPs ().getOperatingMode () == NetworkOPs::omFULL;
|
||||
mProposing = getApp().getOPs ().getOperatingMode () == NetworkOPs::omFULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -41,7 +41,7 @@ LedgerConsensus::LedgerConsensus (uint256 const& prevLCLHash, Ledger::ref previo
|
||||
|
||||
if (!mHaveCorrectLCL)
|
||||
{
|
||||
theApp->getOPs ().setProposing (false, false);
|
||||
getApp().getOPs ().setProposing (false, false);
|
||||
handleLCL (mPrevLedgerHash);
|
||||
|
||||
if (!mHaveCorrectLCL)
|
||||
@@ -52,16 +52,16 @@ LedgerConsensus::LedgerConsensus (uint256 const& prevLCLHash, Ledger::ref previo
|
||||
}
|
||||
}
|
||||
else
|
||||
theApp->getOPs ().setProposing (mProposing, mValidating);
|
||||
getApp().getOPs ().setProposing (mProposing, mValidating);
|
||||
}
|
||||
|
||||
void LedgerConsensus::checkOurValidation ()
|
||||
{
|
||||
// This only covers some cases - Fix for the case where we can't ever acquire the consensus ledger
|
||||
if (!mHaveCorrectLCL || !mValPublic.isSet () || !mValPrivate.isSet () || theApp->getOPs ().isNeedNetworkLedger ())
|
||||
if (!mHaveCorrectLCL || !mValPublic.isSet () || !mValPrivate.isSet () || getApp().getOPs ().isNeedNetworkLedger ())
|
||||
return;
|
||||
|
||||
SerializedValidation::pointer lastVal = theApp->getOPs ().getLastValidation ();
|
||||
SerializedValidation::pointer lastVal = getApp().getOPs ().getLastValidation ();
|
||||
|
||||
if (lastVal)
|
||||
{
|
||||
@@ -74,19 +74,19 @@ void LedgerConsensus::checkOurValidation ()
|
||||
|
||||
uint256 signingHash;
|
||||
SerializedValidation::pointer v = boost::make_shared<SerializedValidation>
|
||||
(mPreviousLedger->getHash (), theApp->getOPs ().getValidationTimeNC (), mValPublic, false);
|
||||
(mPreviousLedger->getHash (), getApp().getOPs ().getValidationTimeNC (), mValPublic, false);
|
||||
v->setTrusted ();
|
||||
v->sign (signingHash, mValPrivate);
|
||||
theApp->getHashRouter ().addSuppression (signingHash);
|
||||
theApp->getValidations ().addValidation (v, "localMissing");
|
||||
getApp().getHashRouter ().addSuppression (signingHash);
|
||||
getApp().getValidations ().addValidation (v, "localMissing");
|
||||
Blob validation = v->getSigned ();
|
||||
protocol::TMValidation val;
|
||||
val.set_validation (&validation[0], validation.size ());
|
||||
#if 0
|
||||
theApp->getPeers ().relayMessage (NULL,
|
||||
getApp().getPeers ().relayMessage (NULL,
|
||||
boost::make_shared<PackedMessage> (val, protocol::mtVALIDATION));
|
||||
#endif
|
||||
theApp->getOPs ().setLastValidation (v);
|
||||
getApp().getOPs ().setLastValidation (v);
|
||||
WriteLog (lsWARNING, LedgerConsensus) << "Sending partial validation";
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ void LedgerConsensus::checkLCL ()
|
||||
priorLedger = mPreviousLedger->getParentHash (); // don't jump back
|
||||
|
||||
boost::unordered_map<uint256, currentValidationCount> vals =
|
||||
theApp->getValidations ().getCurrentValidations (favoredLedger, priorLedger);
|
||||
getApp().getValidations ().getCurrentValidations (favoredLedger, priorLedger);
|
||||
|
||||
typedef std::map<uint256, currentValidationCount>::value_type u256_cvc_pair;
|
||||
BOOST_FOREACH (u256_cvc_pair & it, vals)
|
||||
@@ -153,7 +153,7 @@ void LedgerConsensus::checkLCL ()
|
||||
}
|
||||
|
||||
if (mHaveCorrectLCL)
|
||||
theApp->getOPs ().consensusViewChange ();
|
||||
getApp().getOPs ().consensusViewChange ();
|
||||
|
||||
handleLCL (netLgr);
|
||||
}
|
||||
@@ -190,7 +190,7 @@ void LedgerConsensus::handleLCL (uint256 const& lclHash)
|
||||
return;
|
||||
|
||||
// we need to switch the ledger we're working from
|
||||
Ledger::pointer newLCL = theApp->getLedgerMaster ().getLedgerByHash (lclHash);
|
||||
Ledger::pointer newLCL = getApp().getLedgerMaster ().getLedgerByHash (lclHash);
|
||||
|
||||
if (newLCL)
|
||||
{
|
||||
@@ -206,9 +206,9 @@ void LedgerConsensus::handleLCL (uint256 const& lclHash)
|
||||
WriteLog (lsWARNING, LedgerConsensus) << "Need consensus ledger " << mPrevLedgerHash;
|
||||
|
||||
if (mAcquiringLedger)
|
||||
theApp->getInboundLedgers ().dropLedger (mAcquiringLedger->getHash ());
|
||||
getApp().getInboundLedgers ().dropLedger (mAcquiringLedger->getHash ());
|
||||
|
||||
mAcquiringLedger = theApp->getInboundLedgers ().findCreate (mPrevLedgerHash, 0);
|
||||
mAcquiringLedger = getApp().getInboundLedgers ().findCreate (mPrevLedgerHash, 0);
|
||||
mHaveCorrectLCL = false;
|
||||
return;
|
||||
}
|
||||
@@ -230,8 +230,8 @@ void LedgerConsensus::takeInitialPosition (Ledger& initialLedger)
|
||||
{
|
||||
// previous ledger was flag ledger
|
||||
SHAMap::pointer preSet = initialLedger.peekTransactionMap ()->snapShot (true);
|
||||
theApp->getFeeVote ().doVoting (mPreviousLedger, preSet);
|
||||
theApp->getFeatureTable ().doVoting (mPreviousLedger, preSet);
|
||||
getApp().getFeeVote ().doVoting (mPreviousLedger, preSet);
|
||||
getApp().getFeatureTable ().doVoting (mPreviousLedger, preSet);
|
||||
initialSet = preSet->snapShot (false);
|
||||
}
|
||||
else
|
||||
@@ -380,7 +380,7 @@ void LedgerConsensus::sendHaveTxSet (uint256 const& hash, bool direct)
|
||||
msg.set_hash (hash.begin (), 256 / 8);
|
||||
msg.set_status (direct ? protocol::tsHAVE : protocol::tsCAN_GET);
|
||||
PackedMessage::pointer packet = boost::make_shared<PackedMessage> (msg, protocol::mtHAVE_SET);
|
||||
theApp->getPeers ().relayMessage (NULL, packet);
|
||||
getApp().getPeers ().relayMessage (NULL, packet);
|
||||
}
|
||||
|
||||
void LedgerConsensus::adjustCount (SHAMap::ref map, const std::vector<uint160>& peers)
|
||||
@@ -405,19 +405,19 @@ void LedgerConsensus::statusChange (protocol::NodeEvent event, Ledger& ledger)
|
||||
s.set_newevent (event);
|
||||
|
||||
s.set_ledgerseq (ledger.getLedgerSeq ());
|
||||
s.set_networktime (theApp->getOPs ().getNetworkTimeNC ());
|
||||
s.set_networktime (getApp().getOPs ().getNetworkTimeNC ());
|
||||
uint256 hash = ledger.getParentHash ();
|
||||
s.set_ledgerhashprevious (hash.begin (), hash.size ());
|
||||
hash = ledger.getHash ();
|
||||
s.set_ledgerhash (hash.begin (), hash.size ());
|
||||
|
||||
uint32 uMin, uMax;
|
||||
theApp->getOPs ().getValidatedRange (uMin, uMax);
|
||||
getApp().getOPs ().getValidatedRange (uMin, uMax);
|
||||
s.set_firstseq (uMin);
|
||||
s.set_lastseq (uMax);
|
||||
|
||||
PackedMessage::pointer packet = boost::make_shared<PackedMessage> (s, protocol::mtSTATUS_CHANGE);
|
||||
theApp->getPeers ().relayMessage (NULL, packet);
|
||||
getApp().getPeers ().relayMessage (NULL, packet);
|
||||
WriteLog (lsTRACE, LedgerConsensus) << "send status change to peer";
|
||||
}
|
||||
|
||||
@@ -429,9 +429,9 @@ int LedgerConsensus::startup ()
|
||||
void LedgerConsensus::statePreClose ()
|
||||
{
|
||||
// it is shortly before ledger close time
|
||||
bool anyTransactions = theApp->getLedgerMaster ().getCurrentLedger ()->peekTransactionMap ()->getHash ().isNonZero ();
|
||||
bool anyTransactions = getApp().getLedgerMaster ().getCurrentLedger ()->peekTransactionMap ()->getHash ().isNonZero ();
|
||||
int proposersClosed = mPeerPositions.size ();
|
||||
int proposersValidated = theApp->getValidations ().getTrustedValidationCount (mPrevLedgerHash);
|
||||
int proposersValidated = getApp().getValidations ().getTrustedValidationCount (mPrevLedgerHash);
|
||||
|
||||
// This ledger is open. This computes how long since the last ledger closed
|
||||
int sinceClose;
|
||||
@@ -440,7 +440,7 @@ void LedgerConsensus::statePreClose ()
|
||||
if (mHaveCorrectLCL && mPreviousLedger->getCloseAgree ())
|
||||
{
|
||||
// we can use consensus timing
|
||||
sinceClose = 1000 * (theApp->getOPs ().getCloseTimeNC () - mPreviousLedger->getCloseTimeNC ());
|
||||
sinceClose = 1000 * (getApp().getOPs ().getCloseTimeNC () - mPreviousLedger->getCloseTimeNC ());
|
||||
idleInterval = 2 * mPreviousLedger->getCloseResolution ();
|
||||
|
||||
if (idleInterval < LEDGER_IDLE_INTERVAL)
|
||||
@@ -448,7 +448,7 @@ void LedgerConsensus::statePreClose ()
|
||||
}
|
||||
else
|
||||
{
|
||||
sinceClose = 1000 * (theApp->getOPs ().getCloseTimeNC () - theApp->getOPs ().getLastCloseTime ());
|
||||
sinceClose = 1000 * (getApp().getOPs ().getCloseTimeNC () - getApp().getOPs ().getLastCloseTime ());
|
||||
idleInterval = LEDGER_IDLE_INTERVAL;
|
||||
}
|
||||
|
||||
@@ -464,10 +464,10 @@ void LedgerConsensus::closeLedger ()
|
||||
checkOurValidation ();
|
||||
mState = lcsESTABLISH;
|
||||
mConsensusStartTime = boost::posix_time::microsec_clock::universal_time ();
|
||||
mCloseTime = theApp->getOPs ().getCloseTimeNC ();
|
||||
theApp->getOPs ().setLastCloseTime (mCloseTime);
|
||||
mCloseTime = getApp().getOPs ().getCloseTimeNC ();
|
||||
getApp().getOPs ().setLastCloseTime (mCloseTime);
|
||||
statusChange (protocol::neCLOSING_LEDGER, *mPreviousLedger);
|
||||
takeInitialPosition (*theApp->getLedgerMaster ().closeLedger (true));
|
||||
takeInitialPosition (*getApp().getLedgerMaster ().closeLedger (true));
|
||||
}
|
||||
|
||||
void LedgerConsensus::stateEstablish ()
|
||||
@@ -511,7 +511,7 @@ void LedgerConsensus::timerEntry ()
|
||||
if (doShutdown)
|
||||
{
|
||||
WriteLog (lsFATAL, LedgerConsensus) << "Shutdown requested";
|
||||
theApp->stop ();
|
||||
getApp().stop ();
|
||||
}
|
||||
|
||||
if ((mState != lcsFINISHED) && (mState != lcsACCEPTED))
|
||||
@@ -714,7 +714,7 @@ bool LedgerConsensus::haveConsensus (bool forReal)
|
||||
}
|
||||
}
|
||||
}
|
||||
int currentValidations = theApp->getValidations ().getNodesAfter (mPrevLedgerHash);
|
||||
int currentValidations = getApp().getValidations ().getNodesAfter (mPrevLedgerHash);
|
||||
|
||||
WriteLog (lsDEBUG, LedgerConsensus) << "Checking for TX consensus: agree=" << agree << ", disagree=" << disagree;
|
||||
|
||||
@@ -731,7 +731,7 @@ SHAMap::pointer LedgerConsensus::getTransactionTree (uint256 const& hash, bool d
|
||||
|
||||
if (mState == lcsPRE_CLOSE)
|
||||
{
|
||||
SHAMap::pointer currentMap = theApp->getLedgerMaster ().getCurrentLedger ()->peekTransactionMap ();
|
||||
SHAMap::pointer currentMap = getApp().getLedgerMaster ().getCurrentLedger ()->peekTransactionMap ();
|
||||
|
||||
if (currentMap->getHash () == hash)
|
||||
{
|
||||
@@ -787,7 +787,7 @@ void LedgerConsensus::startAcquiring (TransactionAcquire::pointer acquire)
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<Peer::pointer> peerList = theApp->getPeers ().getPeerVector ();
|
||||
std::vector<Peer::pointer> peerList = getApp().getPeers ().getPeerVector ();
|
||||
BOOST_FOREACH (Peer::ref peer, peerList)
|
||||
{
|
||||
if (peer->hasTxSet (acquire->getHash ()))
|
||||
@@ -812,7 +812,7 @@ void LedgerConsensus::propose ()
|
||||
Blob sig = mOurPosition->sign ();
|
||||
prop.set_nodepubkey (&pubKey[0], pubKey.size ());
|
||||
prop.set_signature (&sig[0], sig.size ());
|
||||
theApp->getPeers ().relayMessage (NULL,
|
||||
getApp().getPeers ().relayMessage (NULL,
|
||||
boost::make_shared<PackedMessage> (prop, protocol::mtPROPOSE_LEDGER));
|
||||
}
|
||||
|
||||
@@ -847,14 +847,14 @@ void LedgerConsensus::addDisputedTransaction (uint256 const& txID, Blob const& t
|
||||
txn->setVote (pit.first, cit->second->hasItem (txID));
|
||||
}
|
||||
|
||||
if (theApp->getHashRouter ().setFlag (txID, SF_RELAYED))
|
||||
if (getApp().getHashRouter ().setFlag (txID, SF_RELAYED))
|
||||
{
|
||||
protocol::TMTransaction msg;
|
||||
msg.set_rawtransaction (& (tx.front ()), tx.size ());
|
||||
msg.set_status (protocol::tsNEW);
|
||||
msg.set_receivetimestamp (theApp->getOPs ().getNetworkTimeNC ());
|
||||
msg.set_receivetimestamp (getApp().getOPs ().getNetworkTimeNC ());
|
||||
PackedMessage::pointer packet = boost::make_shared<PackedMessage> (msg, protocol::mtTRANSACTION);
|
||||
theApp->getPeers ().relayMessage (NULL, packet);
|
||||
getApp().getPeers ().relayMessage (NULL, packet);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -966,21 +966,21 @@ void LedgerConsensus::beginAccept (bool synchronous)
|
||||
return;
|
||||
}
|
||||
|
||||
theApp->getOPs ().newLCL (mPeerPositions.size (), mCurrentMSeconds, mNewLedgerHash);
|
||||
getApp().getOPs ().newLCL (mPeerPositions.size (), mCurrentMSeconds, mNewLedgerHash);
|
||||
|
||||
if (synchronous)
|
||||
accept (consensusSet, LoadEvent::pointer ());
|
||||
else
|
||||
{
|
||||
theApp->getIOService ().post (BIND_TYPE (&LedgerConsensus::accept, shared_from_this (), consensusSet,
|
||||
theApp->getJobQueue ().getLoadEvent (jtACCEPTLEDGER, "LedgerConsensus::beginAccept")));
|
||||
getApp().getIOService ().post (BIND_TYPE (&LedgerConsensus::accept, shared_from_this (), consensusSet,
|
||||
getApp().getJobQueue ().getLoadEvent (jtACCEPTLEDGER, "LedgerConsensus::beginAccept")));
|
||||
}
|
||||
}
|
||||
|
||||
void LedgerConsensus::playbackProposals ()
|
||||
{
|
||||
boost::unordered_map < uint160,
|
||||
std::list<LedgerProposal::pointer> > & storedProposals = theApp->getOPs ().peekStoredProposals ();
|
||||
std::list<LedgerProposal::pointer> > & storedProposals = getApp().getOPs ().peekStoredProposals ();
|
||||
|
||||
for (boost::unordered_map< uint160, std::list<LedgerProposal::pointer> >::iterator
|
||||
it = storedProposals.begin (), end = storedProposals.end (); it != end; ++it)
|
||||
@@ -1010,7 +1010,7 @@ void LedgerConsensus::playbackProposals ()
|
||||
#if 0 // FIXME: We can't do delayed relay because we don't have the signature
|
||||
std::set<uint64> peers
|
||||
|
||||
if (relay && theApp->getHashRouter ().swapSet (proposal.getSuppress (), set, SF_RELAYED))
|
||||
if (relay && getApp().getHashRouter ().swapSet (proposal.getSuppress (), set, SF_RELAYED))
|
||||
{
|
||||
WriteLog (lsDEBUG, LedgerConsensus) << "Stored proposal delayed relay";
|
||||
protocol::TMProposeSet set;
|
||||
@@ -1021,7 +1021,7 @@ void LedgerConsensus::playbackProposals ()
|
||||
nodepubkey
|
||||
signature
|
||||
PackedMessage::pointer message = boost::make_shared<PackedMessage> (set, protocol::mtPROPOSE_LEDGER);
|
||||
theApp->getPeers ().relayMessageBut (peers, message);
|
||||
getApp().getPeers ().relayMessageBut (peers, message);
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1043,7 +1043,7 @@ int LedgerConsensus::applyTransaction (TransactionEngine& engine, SerializedTran
|
||||
if (retryAssured)
|
||||
parms = static_cast<TransactionEngineParams> (parms | tapRETRY);
|
||||
|
||||
if (theApp->getHashRouter ().setFlag (txn->getTransactionID (), SF_SIGGOOD))
|
||||
if (getApp().getHashRouter ().setFlag (txn->getTransactionID (), SF_SIGGOOD))
|
||||
parms = static_cast<TransactionEngineParams> (parms | tapNO_CHECK_SIGN);
|
||||
|
||||
WriteLog (lsDEBUG, LedgerConsensus) << "TXN " << txn->getTransactionID ()
|
||||
@@ -1176,12 +1176,12 @@ uint32 LedgerConsensus::roundCloseTime (uint32 closeTime)
|
||||
void LedgerConsensus::accept (SHAMap::ref set, LoadEvent::pointer)
|
||||
{
|
||||
if (set->getHash ().isNonZero ()) // put our set where others can get it later
|
||||
theApp->getOPs ().takePosition (mPreviousLedger->getLedgerSeq (), set);
|
||||
getApp().getOPs ().takePosition (mPreviousLedger->getLedgerSeq (), set);
|
||||
|
||||
boost::recursive_mutex::scoped_lock masterLock (theApp->getMasterLock ());
|
||||
boost::recursive_mutex::scoped_lock masterLock (getApp().getMasterLock ());
|
||||
assert (set->getHash () == mOurPosition->getCurrentHash ());
|
||||
|
||||
theApp->getOPs ().peekStoredProposals ().clear (); // these are now obsolete
|
||||
getApp().getOPs ().peekStoredProposals ().clear (); // these are now obsolete
|
||||
|
||||
uint32 closeTime = roundCloseTime (mOurPosition->getCloseTime ());
|
||||
bool closeTimeCorrect = true;
|
||||
@@ -1245,24 +1245,24 @@ void LedgerConsensus::accept (SHAMap::ref set, LoadEvent::pointer)
|
||||
{
|
||||
uint256 signingHash;
|
||||
SerializedValidation::pointer v = boost::make_shared<SerializedValidation>
|
||||
(newLCLHash, theApp->getOPs ().getValidationTimeNC (), mValPublic, mProposing);
|
||||
(newLCLHash, getApp().getOPs ().getValidationTimeNC (), mValPublic, mProposing);
|
||||
v->setFieldU32 (sfLedgerSequence, newLCL->getLedgerSeq ());
|
||||
|
||||
if (((newLCL->getLedgerSeq () + 1) % 256) == 0) // next ledger is flag ledger
|
||||
{
|
||||
theApp->getFeeVote ().doValidation (newLCL, *v);
|
||||
theApp->getFeatureTable ().doValidation (newLCL, *v);
|
||||
getApp().getFeeVote ().doValidation (newLCL, *v);
|
||||
getApp().getFeatureTable ().doValidation (newLCL, *v);
|
||||
}
|
||||
|
||||
v->sign (signingHash, mValPrivate);
|
||||
v->setTrusted ();
|
||||
theApp->getHashRouter ().addSuppression (signingHash); // suppress it if we receive it
|
||||
theApp->getValidations ().addValidation (v, "local");
|
||||
theApp->getOPs ().setLastValidation (v);
|
||||
getApp().getHashRouter ().addSuppression (signingHash); // suppress it if we receive it
|
||||
getApp().getValidations ().addValidation (v, "local");
|
||||
getApp().getOPs ().setLastValidation (v);
|
||||
Blob validation = v->getSigned ();
|
||||
protocol::TMValidation val;
|
||||
val.set_validation (&validation[0], validation.size ());
|
||||
int j = theApp->getPeers ().relayMessage (NULL,
|
||||
int j = getApp().getPeers ().relayMessage (NULL,
|
||||
boost::make_shared<PackedMessage> (val, protocol::mtVALIDATION));
|
||||
WriteLog (lsINFO, LedgerConsensus) << "CNF Val " << newLCLHash << " to " << j << " peers";
|
||||
}
|
||||
@@ -1270,7 +1270,7 @@ void LedgerConsensus::accept (SHAMap::ref set, LoadEvent::pointer)
|
||||
WriteLog (lsINFO, LedgerConsensus) << "CNF newLCL " << newLCLHash;
|
||||
|
||||
Ledger::pointer newOL = boost::make_shared<Ledger> (true, boost::ref (*newLCL));
|
||||
ScopedLock sl ( theApp->getLedgerMaster ().getLock ());
|
||||
ScopedLock sl ( getApp().getLedgerMaster ().getLock ());
|
||||
|
||||
// Apply disputed transactions that didn't get in
|
||||
TransactionEngine engine (newOL);
|
||||
@@ -1296,9 +1296,9 @@ void LedgerConsensus::accept (SHAMap::ref set, LoadEvent::pointer)
|
||||
}
|
||||
|
||||
WriteLog (lsDEBUG, LedgerConsensus) << "Applying transactions from current open ledger";
|
||||
applyTransactions (theApp->getLedgerMaster ().getCurrentLedger ()->peekTransactionMap (), newOL, newLCL,
|
||||
applyTransactions (getApp().getLedgerMaster ().getCurrentLedger ()->peekTransactionMap (), newOL, newLCL,
|
||||
failedTransactions, true);
|
||||
theApp->getLedgerMaster ().pushLedger (newLCL, newOL, !mConsensusFail);
|
||||
getApp().getLedgerMaster ().pushLedger (newLCL, newOL, !mConsensusFail);
|
||||
mNewLedgerHash = newLCL->getHash ();
|
||||
mState = lcsACCEPTED;
|
||||
sl.unlock ();
|
||||
@@ -1323,14 +1323,14 @@ void LedgerConsensus::accept (SHAMap::ref set, LoadEvent::pointer)
|
||||
closeTotal /= closeCount;
|
||||
int offset = static_cast<int> (closeTotal) - static_cast<int> (mCloseTime);
|
||||
WriteLog (lsINFO, LedgerConsensus) << "Our close offset is estimated at " << offset << " (" << closeCount << ")";
|
||||
theApp->getOPs ().closeTimeOffset (offset);
|
||||
getApp().getOPs ().closeTimeOffset (offset);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void LedgerConsensus::endConsensus ()
|
||||
{
|
||||
theApp->getOPs ().endConsensus (mHaveCorrectLCL);
|
||||
getApp().getOPs ().endConsensus (mHaveCorrectLCL);
|
||||
}
|
||||
|
||||
void LedgerConsensus::simulate ()
|
||||
|
||||
@@ -361,20 +361,20 @@ private:
|
||||
// VFALCO TODO Eliminate the dependence on the Application object.
|
||||
// Choices include constructing with the job queue / feetracker.
|
||||
// Another option is using an observer pattern to invert the dependency.
|
||||
if (theApp->getJobQueue ().isOverloaded ())
|
||||
if (getApp().getJobQueue ().isOverloaded ())
|
||||
{
|
||||
WriteLog (lsINFO, LoadManager) << theApp->getJobQueue ().getJson (0);
|
||||
change = theApp->getFeeTrack ().raiseLocalFee ();
|
||||
WriteLog (lsINFO, LoadManager) << getApp().getJobQueue ().getJson (0);
|
||||
change = getApp().getFeeTrack ().raiseLocalFee ();
|
||||
}
|
||||
else
|
||||
{
|
||||
change = theApp->getFeeTrack ().lowerLocalFee ();
|
||||
change = getApp().getFeeTrack ().lowerLocalFee ();
|
||||
}
|
||||
|
||||
if (change)
|
||||
{
|
||||
// VFALCO TODO replace this with a Listener / observer and subscribe in NetworkOPs or Application
|
||||
theApp->getOPs ().reportFeeChange ();
|
||||
getApp().getOPs ().reportFeeChange ();
|
||||
}
|
||||
|
||||
t += boost::posix_time::seconds (1);
|
||||
|
||||
@@ -26,15 +26,15 @@ void LocalCredentials::start ()
|
||||
if (!theConfig.QUIET)
|
||||
std::cerr << "NodeIdentity: " << mNodePublicKey.humanNodePublic () << std::endl;
|
||||
|
||||
theApp->getUNL ().start ();
|
||||
getApp().getUNL ().start ();
|
||||
}
|
||||
|
||||
// Retrieve network identity.
|
||||
bool LocalCredentials::nodeIdentityLoad ()
|
||||
{
|
||||
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
ScopedLock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
bool bSuccess = false;
|
||||
|
||||
if (db->executeSQL ("SELECT * FROM NodeIdentity;") && db->startIterRows ())
|
||||
@@ -103,9 +103,9 @@ bool LocalCredentials::nodeIdentityCreate ()
|
||||
//
|
||||
// Store the node information
|
||||
//
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
ScopedLock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
ScopedLock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
db->executeSQL (str (boost::format ("INSERT INTO NodeIdentity (PublicKey,PrivateKey,Dh512,Dh1024) VALUES ('%s','%s',%s,%s);")
|
||||
% naNodePublic.humanNodePublic ()
|
||||
% naNodePrivate.humanNodePrivate ()
|
||||
@@ -121,9 +121,9 @@ bool LocalCredentials::nodeIdentityCreate ()
|
||||
|
||||
bool LocalCredentials::dataDelete (const std::string& strKey)
|
||||
{
|
||||
Database* db = theApp->getRpcDB ()->getDB ();
|
||||
Database* db = getApp().getRpcDB ()->getDB ();
|
||||
|
||||
ScopedLock sl (theApp->getRpcDB ()->getDBLock ());
|
||||
ScopedLock sl (getApp().getRpcDB ()->getDBLock ());
|
||||
|
||||
return db->executeSQL (str (boost::format ("DELETE FROM RPCData WHERE Key=%s;")
|
||||
% sqlEscape (strKey)));
|
||||
@@ -131,9 +131,9 @@ bool LocalCredentials::dataDelete (const std::string& strKey)
|
||||
|
||||
bool LocalCredentials::dataFetch (const std::string& strKey, std::string& strValue)
|
||||
{
|
||||
Database* db = theApp->getRpcDB ()->getDB ();
|
||||
Database* db = getApp().getRpcDB ()->getDB ();
|
||||
|
||||
ScopedLock sl (theApp->getRpcDB ()->getDBLock ());
|
||||
ScopedLock sl (getApp().getRpcDB ()->getDBLock ());
|
||||
|
||||
bool bSuccess = false;
|
||||
|
||||
@@ -153,9 +153,9 @@ bool LocalCredentials::dataFetch (const std::string& strKey, std::string& strVal
|
||||
|
||||
bool LocalCredentials::dataStore (const std::string& strKey, const std::string& strValue)
|
||||
{
|
||||
Database* db = theApp->getRpcDB ()->getDB ();
|
||||
Database* db = getApp().getRpcDB ()->getDB ();
|
||||
|
||||
ScopedLock sl (theApp->getRpcDB ()->getDBLock ());
|
||||
ScopedLock sl (getApp().getRpcDB ()->getDBLock ());
|
||||
|
||||
bool bSuccess = false;
|
||||
|
||||
|
||||
@@ -11,8 +11,7 @@ extern void LEFInit ();
|
||||
|
||||
void setupServer ()
|
||||
{
|
||||
theApp = IApplication::New ();
|
||||
theApp->setup ();
|
||||
getApp().setup ();
|
||||
}
|
||||
|
||||
void startServer ()
|
||||
@@ -29,7 +28,7 @@ void startServer ()
|
||||
if (!theConfig.QUIET)
|
||||
std::cerr << "Startup RPC: " << jvCommand << std::endl;
|
||||
|
||||
RPCHandler rhHandler (&theApp->getOPs ());
|
||||
RPCHandler rhHandler (&getApp().getOPs ());
|
||||
|
||||
// VFALCO TODO Clean up this magic number
|
||||
LoadType loadType = LT_RPCReference;
|
||||
@@ -40,12 +39,12 @@ void startServer ()
|
||||
}
|
||||
}
|
||||
|
||||
theApp->run (); // Blocks till we get a stop RPC.
|
||||
getApp().run (); // Blocks till we get a stop RPC.
|
||||
}
|
||||
|
||||
bool init_unit_test ()
|
||||
{
|
||||
theApp = IApplication::New ();
|
||||
getApp ();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ bool PathRequest::isValid (Ledger::ref lrLedger)
|
||||
|
||||
if (bValid)
|
||||
{
|
||||
AccountState::pointer asSrc = theApp->getOPs ().getAccountState (lrLedger, raSrcAccount);
|
||||
AccountState::pointer asSrc = getApp().getOPs ().getAccountState (lrLedger, raSrcAccount);
|
||||
|
||||
if (!asSrc)
|
||||
{
|
||||
@@ -47,7 +47,7 @@ bool PathRequest::isValid (Ledger::ref lrLedger)
|
||||
}
|
||||
else
|
||||
{
|
||||
AccountState::pointer asDst = theApp->getOPs ().getAccountState (lrLedger, raDstAccount);
|
||||
AccountState::pointer asDst = getApp().getOPs ().getAccountState (lrLedger, raDstAccount);
|
||||
Json::Value jvDestCur;
|
||||
|
||||
if (!asDst)
|
||||
|
||||
@@ -162,9 +162,9 @@ Pathfinder::Pathfinder (RippleLineCache::ref cache,
|
||||
|
||||
bValid = true;
|
||||
|
||||
theApp->getOrderBookDB ().setup (mLedger);
|
||||
getApp().getOrderBookDB ().setup (mLedger);
|
||||
|
||||
m_loadEvent = theApp->getJobQueue ().getLoadEvent (jtPATH_FIND, "FindPath");
|
||||
m_loadEvent = getApp().getJobQueue ().getLoadEvent (jtPATH_FIND, "FindPath");
|
||||
|
||||
// Construct the default path for later comparison.
|
||||
|
||||
@@ -399,7 +399,7 @@ bool Pathfinder::findPaths (const unsigned int iMaxSteps, const unsigned int iMa
|
||||
{
|
||||
// Cursor is for XRP, continue with qualifying books: XRP -> non-XRP
|
||||
std::vector<OrderBook::pointer> xrpBooks;
|
||||
theApp->getOrderBookDB ().getBooksByTakerPays (ACCOUNT_XRP, CURRENCY_XRP, xrpBooks);
|
||||
getApp().getOrderBookDB ().getBooksByTakerPays (ACCOUNT_XRP, CURRENCY_XRP, xrpBooks);
|
||||
BOOST_FOREACH (OrderBook::ref book, xrpBooks)
|
||||
{
|
||||
// New end is an order book with the currency and issuer.
|
||||
@@ -553,7 +553,7 @@ bool Pathfinder::findPaths (const unsigned int iMaxSteps, const unsigned int iMa
|
||||
|
||||
// XXX Flip argument order to norm. (currency, issuer)
|
||||
std::vector<OrderBook::pointer> books;
|
||||
theApp->getOrderBookDB ().getBooksByTakerPays (speEnd.mIssuerID, speEnd.mCurrencyID, books);
|
||||
getApp().getOrderBookDB ().getBooksByTakerPays (speEnd.mIssuerID, speEnd.mCurrencyID, books);
|
||||
|
||||
BOOST_FOREACH (OrderBook::ref book, books)
|
||||
{
|
||||
|
||||
@@ -286,7 +286,7 @@ void PeerImp::detach (const char* rsn, bool onIOStrand)
|
||||
|
||||
if (mNodePublic.isValid ())
|
||||
{
|
||||
theApp->getPeers ().peerDisconnected (shared_from_this (), mNodePublic);
|
||||
getApp().getPeers ().peerDisconnected (shared_from_this (), mNodePublic);
|
||||
|
||||
mNodePublic.clear (); // Be idempotent.
|
||||
}
|
||||
@@ -295,7 +295,7 @@ void PeerImp::detach (const char* rsn, bool onIOStrand)
|
||||
{
|
||||
// Connection might be part of scanning. Inform connect failed.
|
||||
// Might need to scan. Inform connection closed.
|
||||
theApp->getPeers ().peerClosed (shared_from_this (), mIpPort.first, mIpPort.second);
|
||||
getApp().getPeers ().peerClosed (shared_from_this (), mIpPort.first, mIpPort.second);
|
||||
|
||||
mIpPort.first.clear (); // Be idempotent.
|
||||
}
|
||||
@@ -376,7 +376,7 @@ void PeerImp::connect (const std::string& strIp, int iPort)
|
||||
|
||||
boost::asio::ip::tcp::resolver::query query (strIp, boost::lexical_cast<std::string> (iPortAct),
|
||||
boost::asio::ip::resolver_query_base::numeric_host | boost::asio::ip::resolver_query_base::numeric_service);
|
||||
boost::asio::ip::tcp::resolver resolver (theApp->getIOService ());
|
||||
boost::asio::ip::tcp::resolver resolver (getApp().getIOService ());
|
||||
boost::system::error_code err;
|
||||
boost::asio::ip::tcp::resolver::iterator itrEndpoint = resolver.resolve (query, err);
|
||||
|
||||
@@ -627,7 +627,7 @@ void PeerImp::handleReadBody (const boost::system::error_code& error)
|
||||
WriteLog (lsINFO, Peer) << "Peer: Body: Error: " << getIP () << ": " << error.category ().name () << ": " << error.message () << ": " << error;
|
||||
}
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getMasterLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getMasterLock ());
|
||||
detach ("hrb", true);
|
||||
return;
|
||||
}
|
||||
@@ -646,9 +646,9 @@ void PeerImp::processReadBuffer ()
|
||||
|
||||
// std::cerr << "PeerImp::processReadBuffer: " << mIpPort.first << " " << mIpPort.second << std::endl;
|
||||
|
||||
LoadEvent::autoptr event (theApp->getJobQueue ().getLoadEventAP (jtPEER, "PeerImp::read"));
|
||||
LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtPEER, "PeerImp::read"));
|
||||
|
||||
ScopedLock sl (theApp->getMasterLock ());
|
||||
ScopedLock sl (getApp().getMasterLock ());
|
||||
|
||||
// If connected and get a mtHELLO or if not connected and get a non-mtHELLO, wrong message was sent.
|
||||
if (mHelloed == (type == protocol::mtHELLO))
|
||||
@@ -920,7 +920,7 @@ void PeerImp::recvHello (protocol::TMHello& packet)
|
||||
mActivityTimer.async_wait (mIOStrand.wrap (boost::bind (&PeerImp::handlePingTimer, boost::static_pointer_cast <PeerImp> (shared_from_this ()),
|
||||
boost::asio::placeholders::error)));
|
||||
|
||||
uint32 ourTime = theApp->getOPs ().getNetworkTimeNC ();
|
||||
uint32 ourTime = getApp().getOPs ().getNetworkTimeNC ();
|
||||
uint32 minTime = ourTime - 20;
|
||||
uint32 maxTime = ourTime + 20;
|
||||
|
||||
@@ -977,7 +977,7 @@ void PeerImp::recvHello (protocol::TMHello& packet)
|
||||
(packet.protoversion () >> 16) << "." << (packet.protoversion () & 0xFF);
|
||||
mHello = packet;
|
||||
|
||||
if (theApp->getUNL ().nodeInCluster (mNodePublic, mNodeName))
|
||||
if (getApp().getUNL ().nodeInCluster (mNodePublic, mNodeName))
|
||||
{
|
||||
mCluster = true;
|
||||
mLoad.setPrivileged ();
|
||||
@@ -991,10 +991,10 @@ void PeerImp::recvHello (protocol::TMHello& packet)
|
||||
if (mClientConnect)
|
||||
{
|
||||
// If we connected due to scan, no longer need to scan.
|
||||
theApp->getPeers ().peerVerified (shared_from_this ());
|
||||
getApp().getPeers ().peerVerified (shared_from_this ());
|
||||
}
|
||||
|
||||
if (! theApp->getPeers ().peerConnected (shared_from_this (), mNodePublic, getIP (), getPort ()))
|
||||
if (! getApp().getPeers ().peerConnected (shared_from_this (), mNodePublic, getIP (), getPort ()))
|
||||
{
|
||||
// Already connected, self, or some other reason.
|
||||
WriteLog (lsINFO, Peer) << "Recv(Hello): Disconnect: Extraneous connection.";
|
||||
@@ -1021,7 +1021,7 @@ void PeerImp::recvHello (protocol::TMHello& packet)
|
||||
// Don't save IP address if the node wants privacy.
|
||||
// Note: We don't go so far as to delete it. If a node which has previously announced itself now wants
|
||||
// privacy, it should at least change its port.
|
||||
theApp->getPeers ().savePeer (strIP, iPort, IUniqueNodeList::vsInbound);
|
||||
getApp().getPeers ().savePeer (strIP, iPort, IUniqueNodeList::vsInbound);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1075,20 +1075,20 @@ static void checkTransaction (Job&, int flags, SerializedTransaction::pointer st
|
||||
|
||||
if (tx->getStatus () == INVALID)
|
||||
{
|
||||
theApp->getHashRouter ().setFlag (stx->getTransactionID (), SF_BAD);
|
||||
getApp().getHashRouter ().setFlag (stx->getTransactionID (), SF_BAD);
|
||||
Peer::applyLoadCharge (peer, LT_InvalidSignature);
|
||||
return;
|
||||
}
|
||||
else
|
||||
theApp->getHashRouter ().setFlag (stx->getTransactionID (), SF_SIGGOOD);
|
||||
getApp().getHashRouter ().setFlag (stx->getTransactionID (), SF_SIGGOOD);
|
||||
|
||||
theApp->getOPs ().processTransaction (tx, isSetBit (flags, SF_TRUSTED), false);
|
||||
getApp().getOPs ().processTransaction (tx, isSetBit (flags, SF_TRUSTED), false);
|
||||
|
||||
#ifndef TRUST_NETWORK
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
theApp->getHashRouter ().setFlags (stx->getTransactionID (), SF_BAD);
|
||||
getApp().getHashRouter ().setFlags (stx->getTransactionID (), SF_BAD);
|
||||
applyLoadCharge (peer, LT_InvalidRequest);
|
||||
}
|
||||
|
||||
@@ -1110,7 +1110,7 @@ void PeerImp::recvTransaction (protocol::TMTransaction& packet, ScopedLock& Mast
|
||||
|
||||
int flags;
|
||||
|
||||
if (! theApp->getHashRouter ().addSuppressionPeer (stx->getTransactionID (), mPeerId, flags))
|
||||
if (! getApp().getHashRouter ().addSuppressionPeer (stx->getTransactionID (), mPeerId, flags))
|
||||
{
|
||||
// we have seen this transaction recently
|
||||
if (isSetBit (flags, SF_BAD))
|
||||
@@ -1128,7 +1128,7 @@ void PeerImp::recvTransaction (protocol::TMTransaction& packet, ScopedLock& Mast
|
||||
if (mCluster)
|
||||
flags |= SF_TRUSTED | SF_SIGGOOD;
|
||||
|
||||
theApp->getJobQueue ().addJob (jtTRANSACTION, "recvTransction->checkTransaction",
|
||||
getApp().getJobQueue ().addJob (jtTRANSACTION, "recvTransction->checkTransaction",
|
||||
BIND_TYPE (&checkTransaction, P_1, flags, stx, boost::weak_ptr<Peer> (shared_from_this ())));
|
||||
|
||||
#ifndef TRUST_NETWORK
|
||||
@@ -1193,15 +1193,15 @@ static void checkPropose (Job& job, boost::shared_ptr<protocol::TMProposeSet> pa
|
||||
}
|
||||
|
||||
if (isTrusted)
|
||||
theApp->getOPs ().processTrustedProposal (proposal, packet, nodePublic, prevLedger, sigGood);
|
||||
getApp().getOPs ().processTrustedProposal (proposal, packet, nodePublic, prevLedger, sigGood);
|
||||
else if (sigGood && (prevLedger == consensusLCL))
|
||||
{
|
||||
// relay untrusted proposal
|
||||
WriteLog (lsTRACE, Peer) << "relaying untrusted proposal";
|
||||
std::set<uint64> peers;
|
||||
theApp->getHashRouter ().swapSet (proposal->getHashRouter (), peers, SF_RELAYED);
|
||||
getApp().getHashRouter ().swapSet (proposal->getHashRouter (), peers, SF_RELAYED);
|
||||
PackedMessage::pointer message = boost::make_shared<PackedMessage> (set, protocol::mtPROPOSE_LEDGER);
|
||||
theApp->getPeers ().relayMessageBut (peers, message);
|
||||
getApp().getPeers ().relayMessageBut (peers, message);
|
||||
}
|
||||
else
|
||||
WriteLog (lsDEBUG, Peer) << "Not relaying untrusted proposal";
|
||||
@@ -1245,7 +1245,7 @@ void PeerImp::recvPropose (const boost::shared_ptr<protocol::TMProposeSet>& pack
|
||||
|
||||
uint256 suppression = s.getSHA512Half ();
|
||||
|
||||
if (! theApp->getHashRouter ().addSuppressionPeer (suppression, mPeerId))
|
||||
if (! getApp().getHashRouter ().addSuppressionPeer (suppression, mPeerId))
|
||||
{
|
||||
WriteLog (lsTRACE, Peer) << "Received duplicate proposal from peer " << mPeerId;
|
||||
return;
|
||||
@@ -1259,8 +1259,8 @@ void PeerImp::recvPropose (const boost::shared_ptr<protocol::TMProposeSet>& pack
|
||||
return;
|
||||
}
|
||||
|
||||
bool isTrusted = theApp->getUNL ().nodeInUNL (signerPublic);
|
||||
if (!isTrusted && theApp->getFeeTrack ().isLoaded ())
|
||||
bool isTrusted = getApp().getUNL ().nodeInUNL (signerPublic);
|
||||
if (!isTrusted && getApp().getFeeTrack ().isLoaded ())
|
||||
{
|
||||
WriteLog (lsDEBUG, Peer) << "Dropping untrusted proposal due to load";
|
||||
return;
|
||||
@@ -1268,12 +1268,12 @@ void PeerImp::recvPropose (const boost::shared_ptr<protocol::TMProposeSet>& pack
|
||||
|
||||
WriteLog (lsTRACE, Peer) << "Received " << (isTrusted ? "trusted" : "UNtrusted") << " proposal from " << mPeerId;
|
||||
|
||||
uint256 consensusLCL = theApp->getOPs ().getConsensusLCL ();
|
||||
uint256 consensusLCL = getApp().getOPs ().getConsensusLCL ();
|
||||
LedgerProposal::pointer proposal = boost::make_shared<LedgerProposal> (
|
||||
prevLedger.isNonZero () ? prevLedger : consensusLCL,
|
||||
set.proposeseq (), proposeHash, set.closetime (), signerPublic, suppression);
|
||||
|
||||
theApp->getJobQueue ().addJob (isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut, "recvPropose->checkPropose",
|
||||
getApp().getJobQueue ().addJob (isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut, "recvPropose->checkPropose",
|
||||
BIND_TYPE (&checkPropose, P_1, packet, proposal, consensusLCL,
|
||||
mNodePublic, boost::weak_ptr<Peer> (shared_from_this ())));
|
||||
}
|
||||
@@ -1298,7 +1298,7 @@ void PeerImp::recvHaveTxSet (protocol::TMHaveTransactionSet& packet)
|
||||
if (packet.status () == protocol::tsHAVE)
|
||||
addTxSet (hash);
|
||||
|
||||
if (!theApp->getOPs ().hasTXSet (shared_from_this (), hash, packet.status ()))
|
||||
if (!getApp().getOPs ().hasTXSet (shared_from_this (), hash, packet.status ()))
|
||||
applyLoadCharge (LT_UnwantedData);
|
||||
}
|
||||
|
||||
@@ -1327,11 +1327,11 @@ static void checkValidation (Job&, SerializedValidation::pointer val, uint256 si
|
||||
|
||||
std::set<uint64> peers;
|
||||
|
||||
if (theApp->getOPs ().recvValidation (val, source) &&
|
||||
theApp->getHashRouter ().swapSet (signingHash, peers, SF_RELAYED))
|
||||
if (getApp().getOPs ().recvValidation (val, source) &&
|
||||
getApp().getHashRouter ().swapSet (signingHash, peers, SF_RELAYED))
|
||||
{
|
||||
PackedMessage::pointer message = boost::make_shared<PackedMessage> (*packet, protocol::mtVALIDATION);
|
||||
theApp->getPeers ().relayMessageBut (peers, message);
|
||||
getApp().getPeers ().relayMessageBut (peers, message);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1367,15 +1367,15 @@ void PeerImp::recvValidation (const boost::shared_ptr<protocol::TMValidation>& p
|
||||
|
||||
uint256 signingHash = val->getSigningHash ();
|
||||
|
||||
if (! theApp->getHashRouter ().addSuppressionPeer (signingHash, mPeerId))
|
||||
if (! getApp().getHashRouter ().addSuppressionPeer (signingHash, mPeerId))
|
||||
{
|
||||
WriteLog (lsTRACE, Peer) << "Validation is duplicate";
|
||||
return;
|
||||
}
|
||||
|
||||
bool isTrusted = theApp->getUNL ().nodeInUNL (val->getSignerPublic ());
|
||||
if (isTrusted || !theApp->getFeeTrack ().isLoaded ())
|
||||
theApp->getJobQueue ().addJob (isTrusted ? jtVALIDATION_t : jtVALIDATION_ut, "recvValidation->checkValidation",
|
||||
bool isTrusted = getApp().getUNL ().nodeInUNL (val->getSignerPublic ());
|
||||
if (isTrusted || !getApp().getFeeTrack ().isLoaded ())
|
||||
getApp().getJobQueue ().addJob (isTrusted ? jtVALIDATION_t : jtVALIDATION_ut, "recvValidation->checkValidation",
|
||||
BIND_TYPE (&checkValidation, P_1, val, signingHash, isTrusted, mCluster, packet,
|
||||
boost::weak_ptr<Peer> (shared_from_this ())));
|
||||
else
|
||||
@@ -1412,7 +1412,7 @@ void PeerImp::recvGetPeers (protocol::TMGetPeers& packet, ScopedLock& MasterLock
|
||||
MasterLockHolder.unlock ();
|
||||
std::vector<std::string> addrs;
|
||||
|
||||
theApp->getPeers ().getTopNAddrs (30, addrs);
|
||||
getApp().getPeers ().getTopNAddrs (30, addrs);
|
||||
|
||||
if (!addrs.empty ())
|
||||
{
|
||||
@@ -1454,7 +1454,7 @@ void PeerImp::recvPeers (protocol::TMPeers& packet)
|
||||
{
|
||||
//WriteLog (lsINFO, Peer) << "Peer: Learning: " << ADDRESS(this) << ": " << i << ": " << strIP << " " << iPort;
|
||||
|
||||
theApp->getPeers ().savePeer (strIP, iPort, IUniqueNodeList::vsTold);
|
||||
getApp().getPeers ().savePeer (strIP, iPort, IUniqueNodeList::vsTold);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1493,7 +1493,7 @@ void PeerImp::recvGetObjectByHash (const boost::shared_ptr<protocol::TMGetObject
|
||||
if (obj.has_hash () && (obj.hash ().size () == (256 / 8)))
|
||||
{
|
||||
memcpy (hash.begin (), obj.hash ().data (), 256 / 8);
|
||||
HashedObject::pointer hObj = theApp->getHashedObjectStore ().retrieve (hash);
|
||||
HashedObject::pointer hObj = getApp().getHashedObjectStore ().retrieve (hash);
|
||||
|
||||
if (hObj)
|
||||
{
|
||||
@@ -1534,7 +1534,7 @@ void PeerImp::recvGetObjectByHash (const boost::shared_ptr<protocol::TMGetObject
|
||||
{
|
||||
CondLog (pLDo && (pLSeq != 0), lsDEBUG, Peer) << "Recevied full fetch pack for " << pLSeq;
|
||||
pLSeq = obj.ledgerseq ();
|
||||
pLDo = !theApp->getOPs ().haveLedger (pLSeq);
|
||||
pLDo = !getApp().getOPs ().haveLedger (pLSeq);
|
||||
|
||||
if (!pLDo)
|
||||
{
|
||||
@@ -1553,7 +1553,7 @@ void PeerImp::recvGetObjectByHash (const boost::shared_ptr<protocol::TMGetObject
|
||||
boost::shared_ptr< Blob > data = boost::make_shared< Blob >
|
||||
(obj.data ().begin (), obj.data ().end ());
|
||||
|
||||
theApp->getOPs ().addFetchPack (hash, data);
|
||||
getApp().getOPs ().addFetchPack (hash, data);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1561,7 +1561,7 @@ void PeerImp::recvGetObjectByHash (const boost::shared_ptr<protocol::TMGetObject
|
||||
CondLog (pLDo && (pLSeq != 0), lsDEBUG, Peer) << "Received partial fetch pack for " << pLSeq;
|
||||
|
||||
if (packet.type () == protocol::TMGetObjectByHash::otFETCH_PACK)
|
||||
theApp->getOPs ().gotFetchPack (progress, pLSeq);
|
||||
getApp().getOPs ().gotFetchPack (progress, pLSeq);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1607,7 +1607,7 @@ void PeerImp::recvProofWork (protocol::TMProofWork& packet)
|
||||
|
||||
uint256 response;
|
||||
memcpy (response.begin (), packet.response ().data (), 256 / 8);
|
||||
POWResult r = theApp->getProofOfWorkFactory ().checkProof (packet.token (), response);
|
||||
POWResult r = getApp().getProofOfWorkFactory ().checkProof (packet.token (), response);
|
||||
|
||||
if (r == powOK)
|
||||
{
|
||||
@@ -1655,7 +1655,7 @@ void PeerImp::recvProofWork (protocol::TMProofWork& packet)
|
||||
}
|
||||
|
||||
#if 0 // Until proof of work is completed, don't do it
|
||||
theApp->getJobQueue ().addJob (
|
||||
getApp().getJobQueue ().addJob (
|
||||
jtPROOFWORK,
|
||||
"recvProof->doProof",
|
||||
BIND_TYPE (&PeerImp::doProofOfWork, P_1, boost::weak_ptr <Peer> (shared_from_this ()), pow));
|
||||
@@ -1672,7 +1672,7 @@ void PeerImp::recvStatus (protocol::TMStatusChange& packet)
|
||||
WriteLog (lsTRACE, Peer) << "Received status change from peer " << getIP ();
|
||||
|
||||
if (!packet.has_networktime ())
|
||||
packet.set_networktime (theApp->getOPs ().getNetworkTimeNC ());
|
||||
packet.set_networktime (getApp().getOPs ().getNetworkTimeNC ());
|
||||
|
||||
if (!mLastStatus.has_newstatus () || packet.has_newstatus ())
|
||||
mLastStatus = packet;
|
||||
@@ -1748,14 +1748,14 @@ void PeerImp::recvGetLedger (protocol::TMGetLedger& packet, ScopedLock& MasterLo
|
||||
|
||||
uint256 txHash;
|
||||
memcpy (txHash.begin (), packet.ledgerhash ().data (), 32);
|
||||
map = theApp->getOPs ().getTXMap (txHash);
|
||||
map = getApp().getOPs ().getTXMap (txHash);
|
||||
|
||||
if (!map)
|
||||
{
|
||||
if (packet.has_querytype () && !packet.has_requestcookie ())
|
||||
{
|
||||
WriteLog (lsDEBUG, Peer) << "Trying to route TX set request";
|
||||
std::vector<Peer::pointer> peerList = theApp->getPeers ().getPeerVector ();
|
||||
std::vector<Peer::pointer> peerList = getApp().getPeers ().getPeerVector ();
|
||||
std::vector<Peer::pointer> usablePeers;
|
||||
BOOST_FOREACH (Peer::ref peer, peerList)
|
||||
{
|
||||
@@ -1806,7 +1806,7 @@ void PeerImp::recvGetLedger (protocol::TMGetLedger& packet, ScopedLock& MasterLo
|
||||
memcpy (ledgerhash.begin (), packet.ledgerhash ().data (), 32);
|
||||
logMe += "LedgerHash:";
|
||||
logMe += ledgerhash.GetHex ();
|
||||
ledger = theApp->getLedgerMaster ().getLedgerByHash (ledgerhash);
|
||||
ledger = getApp().getLedgerMaster ().getLedgerByHash (ledgerhash);
|
||||
|
||||
CondLog (!ledger, lsTRACE, Peer) << "Don't have ledger " << ledgerhash;
|
||||
|
||||
@@ -1817,7 +1817,7 @@ void PeerImp::recvGetLedger (protocol::TMGetLedger& packet, ScopedLock& MasterLo
|
||||
if (packet.has_ledgerseq ())
|
||||
seq = packet.ledgerseq ();
|
||||
|
||||
std::vector<Peer::pointer> peerList = theApp->getPeers ().getPeerVector ();
|
||||
std::vector<Peer::pointer> peerList = getApp().getPeers ().getPeerVector ();
|
||||
std::vector<Peer::pointer> usablePeers;
|
||||
BOOST_FOREACH (Peer::ref peer, peerList)
|
||||
{
|
||||
@@ -1840,17 +1840,17 @@ void PeerImp::recvGetLedger (protocol::TMGetLedger& packet, ScopedLock& MasterLo
|
||||
}
|
||||
else if (packet.has_ledgerseq ())
|
||||
{
|
||||
ledger = theApp->getLedgerMaster ().getLedgerBySeq (packet.ledgerseq ());
|
||||
ledger = getApp().getLedgerMaster ().getLedgerBySeq (packet.ledgerseq ());
|
||||
CondLog (!ledger, lsDEBUG, Peer) << "Don't have ledger " << packet.ledgerseq ();
|
||||
}
|
||||
else if (packet.has_ltype () && (packet.ltype () == protocol::ltCURRENT))
|
||||
ledger = theApp->getLedgerMaster ().getCurrentLedger ();
|
||||
ledger = getApp().getLedgerMaster ().getCurrentLedger ();
|
||||
else if (packet.has_ltype () && (packet.ltype () == protocol::ltCLOSED) )
|
||||
{
|
||||
ledger = theApp->getLedgerMaster ().getClosedLedger ();
|
||||
ledger = getApp().getLedgerMaster ().getClosedLedger ();
|
||||
|
||||
if (ledger && !ledger->isClosed ())
|
||||
ledger = theApp->getLedgerMaster ().getLedgerBySeq (ledger->getLedgerSeq () - 1);
|
||||
ledger = getApp().getLedgerMaster ().getLedgerBySeq (ledger->getLedgerSeq () - 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -2021,7 +2021,7 @@ void PeerImp::recvLedger (const boost::shared_ptr<protocol::TMLedgerData>& packe
|
||||
|
||||
if (packet.has_requestcookie ())
|
||||
{
|
||||
Peer::pointer target = theApp->getPeers ().getPeerById (packet.requestcookie ());
|
||||
Peer::pointer target = getApp().getPeers ().getPeerById (packet.requestcookie ());
|
||||
|
||||
if (target)
|
||||
{
|
||||
@@ -2069,7 +2069,7 @@ void PeerImp::recvLedger (const boost::shared_ptr<protocol::TMLedgerData>& packe
|
||||
nodeData.push_back (Blob (node.nodedata ().begin (), node.nodedata ().end ()));
|
||||
}
|
||||
|
||||
SHAMapAddNode san = theApp->getOPs ().gotTXData (shared_from_this (), hash, nodeIDs, nodeData);
|
||||
SHAMapAddNode san = getApp().getOPs ().gotTXData (shared_from_this (), hash, nodeIDs, nodeData);
|
||||
|
||||
if (san.isInvalid ())
|
||||
applyLoadCharge (LT_UnwantedData);
|
||||
@@ -2077,9 +2077,9 @@ void PeerImp::recvLedger (const boost::shared_ptr<protocol::TMLedgerData>& packe
|
||||
return;
|
||||
}
|
||||
|
||||
if (theApp->getInboundLedgers ().awaitLedgerData (hash))
|
||||
theApp->getJobQueue ().addJob (jtLEDGER_DATA, "gotLedgerData",
|
||||
BIND_TYPE (&InboundLedgers::gotLedgerData, &theApp->getInboundLedgers (),
|
||||
if (getApp().getInboundLedgers ().awaitLedgerData (hash))
|
||||
getApp().getJobQueue ().addJob (jtLEDGER_DATA, "gotLedgerData",
|
||||
BIND_TYPE (&InboundLedgers::gotLedgerData, &getApp().getInboundLedgers (),
|
||||
P_1, hash, packet_ptr, boost::weak_ptr<Peer> (shared_from_this ())));
|
||||
else
|
||||
applyLoadCharge (LT_UnwantedData);
|
||||
@@ -2173,21 +2173,21 @@ void PeerImp::sendHello ()
|
||||
getSessionCookie (strCookie);
|
||||
mCookieHash = Serializer::getSHA512Half (strCookie);
|
||||
|
||||
theApp->getLocalCredentials ().getNodePrivate ().signNodePrivate (mCookieHash, vchSig);
|
||||
getApp().getLocalCredentials ().getNodePrivate ().signNodePrivate (mCookieHash, vchSig);
|
||||
|
||||
protocol::TMHello h;
|
||||
|
||||
h.set_protoversion (MAKE_VERSION_INT (PROTO_VERSION_MAJOR, PROTO_VERSION_MINOR));
|
||||
h.set_protoversionmin (MAKE_VERSION_INT (MIN_PROTO_MAJOR, MIN_PROTO_MINOR));
|
||||
h.set_fullversion (SERVER_VERSION);
|
||||
h.set_nettime (theApp->getOPs ().getNetworkTimeNC ());
|
||||
h.set_nodepublic (theApp->getLocalCredentials ().getNodePublic ().humanNodePublic ());
|
||||
h.set_nettime (getApp().getOPs ().getNetworkTimeNC ());
|
||||
h.set_nodepublic (getApp().getLocalCredentials ().getNodePublic ().humanNodePublic ());
|
||||
h.set_nodeproof (&vchSig[0], vchSig.size ());
|
||||
h.set_ipv4port (theConfig.PEER_PORT);
|
||||
h.set_nodeprivate (theConfig.PEER_PRIVATE);
|
||||
h.set_testnet (theConfig.TESTNET);
|
||||
|
||||
Ledger::pointer closedLedger = theApp->getLedgerMaster ().getClosedLedger ();
|
||||
Ledger::pointer closedLedger = getApp().getLedgerMaster ().getClosedLedger ();
|
||||
|
||||
if (closedLedger && closedLedger->isClosed ())
|
||||
{
|
||||
@@ -2215,7 +2215,7 @@ void PeerImp::sendGetPeers ()
|
||||
|
||||
void PeerImp::applyLoadCharge (LoadType loadType)
|
||||
{
|
||||
if (theApp->getLoadManager ().applyLoadCharge (mLoad, loadType))
|
||||
if (getApp().getLoadManager ().applyLoadCharge (mLoad, loadType))
|
||||
{
|
||||
// UNIMPLEMENTED
|
||||
|
||||
@@ -2254,7 +2254,7 @@ void PeerImp::doProofOfWork (Job&, boost::weak_ptr <Peer> peer, ProofOfWork::poi
|
||||
|
||||
void PeerImp::doFetchPack (const boost::shared_ptr<protocol::TMGetObjectByHash>& packet)
|
||||
{
|
||||
if (theApp->getFeeTrack ().isLoaded ())
|
||||
if (getApp().getFeeTrack ().isLoaded ())
|
||||
{
|
||||
WriteLog (lsINFO, Peer) << "Too busy to make fetch pack";
|
||||
return;
|
||||
@@ -2270,7 +2270,7 @@ void PeerImp::doFetchPack (const boost::shared_ptr<protocol::TMGetObjectByHash>&
|
||||
uint256 hash;
|
||||
memcpy (hash.begin (), packet->ledgerhash ().data (), 32);
|
||||
|
||||
Ledger::pointer haveLedger = theApp->getOPs ().getLedgerByHash (hash);
|
||||
Ledger::pointer haveLedger = getApp().getOPs ().getLedgerByHash (hash);
|
||||
|
||||
if (!haveLedger)
|
||||
{
|
||||
@@ -2286,7 +2286,7 @@ void PeerImp::doFetchPack (const boost::shared_ptr<protocol::TMGetObjectByHash>&
|
||||
return;
|
||||
}
|
||||
|
||||
Ledger::pointer wantLedger = theApp->getOPs ().getLedgerByHash (haveLedger->getParentHash ());
|
||||
Ledger::pointer wantLedger = getApp().getOPs ().getLedgerByHash (haveLedger->getParentHash ());
|
||||
|
||||
if (!wantLedger)
|
||||
{
|
||||
@@ -2295,8 +2295,8 @@ void PeerImp::doFetchPack (const boost::shared_ptr<protocol::TMGetObjectByHash>&
|
||||
return;
|
||||
}
|
||||
|
||||
theApp->getJobQueue ().addJob (jtPACK, "MakeFetchPack",
|
||||
BIND_TYPE (&NetworkOPs::makeFetchPack, &theApp->getOPs (), P_1,
|
||||
getApp().getJobQueue ().addJob (jtPACK, "MakeFetchPack",
|
||||
BIND_TYPE (&NetworkOPs::makeFetchPack, &getApp().getOPs (), P_1,
|
||||
boost::weak_ptr<Peer> (shared_from_this ()), packet, wantLedger, haveLedger, UptimeTimer::getInstance ().getElapsedSeconds ()));
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ PeerSet::PeerSet (uint256 const& hash, int interval)
|
||||
, mFailed (false)
|
||||
, mProgress (true)
|
||||
, mAggressive (false)
|
||||
, mTimer (theApp->getIOService ())
|
||||
, mTimer (getApp().getIOService ())
|
||||
{
|
||||
mLastAction = UptimeTimer::getInstance ().getElapsedSeconds ();
|
||||
assert ((mTimerInterval > 10) && (mTimerInterval < 30000));
|
||||
@@ -74,7 +74,7 @@ void PeerSet::TimerEntry (boost::weak_ptr<PeerSet> wptr, const boost::system::er
|
||||
|
||||
if (ptr)
|
||||
{
|
||||
int jc = theApp->getJobQueue ().getJobCountTotal (jtLEDGER_DATA);
|
||||
int jc = getApp().getJobQueue ().getJobCountTotal (jtLEDGER_DATA);
|
||||
|
||||
if (jc > 4)
|
||||
{
|
||||
@@ -82,7 +82,7 @@ void PeerSet::TimerEntry (boost::weak_ptr<PeerSet> wptr, const boost::system::er
|
||||
ptr->setTimer ();
|
||||
}
|
||||
else
|
||||
theApp->getJobQueue ().addJob (jtLEDGER_DATA, "timerEntry", BIND_TYPE (&PeerSet::TimerJobEntry, P_1, ptr));
|
||||
getApp().getJobQueue ().addJob (jtLEDGER_DATA, "timerEntry", BIND_TYPE (&PeerSet::TimerJobEntry, P_1, ptr));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -150,8 +150,8 @@ void Peers::start ()
|
||||
bool Peers::getTopNAddrs (int n, std::vector<std::string>& addrs)
|
||||
{
|
||||
// XXX Filter out other local addresses (like ipv6)
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
ScopedLock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
SQL_FOREACH (db, str (boost::format ("SELECT IpPort FROM PeerIps LIMIT %d") % n) )
|
||||
{
|
||||
@@ -169,11 +169,11 @@ bool Peers::savePeer (const std::string& strIp, int iPort, char code)
|
||||
{
|
||||
bool bNew = false;
|
||||
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
std::string ipPort = sqlEscape (str (boost::format ("%s %d") % strIp % iPort));
|
||||
|
||||
ScopedLock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
ScopedLock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
std::string sql = str (boost::format ("SELECT COUNT(*) FROM PeerIps WHERE IpPort=%s;") % ipPort);
|
||||
|
||||
if (db->executeSQL (sql) && db->startIterRows ())
|
||||
@@ -228,7 +228,7 @@ bool Peers::hasPeer (const uint64& id)
|
||||
// <-- true, if a peer is available to connect to
|
||||
bool Peers::peerAvailable (std::string& strIp, int& iPort)
|
||||
{
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
std::vector<std::string> vstrIpPort;
|
||||
|
||||
// Convert mIpMap (list of open connections) to a vector of "<ip> <port>".
|
||||
@@ -250,7 +250,7 @@ bool Peers::peerAvailable (std::string& strIp, int& iPort)
|
||||
std::string strIpPort;
|
||||
|
||||
{
|
||||
ScopedLock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
ScopedLock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
if (db->executeSQL (str (boost::format ("SELECT IpPort FROM PeerIps WHERE ScanNext IS NULL AND IpPort NOT IN (%s) LIMIT 1;")
|
||||
% strJoin (vstrIpPort.begin (), vstrIpPort.end (), ",")))
|
||||
@@ -399,12 +399,12 @@ void Peers::relayMessageTo (const std::set<uint64>& fromPeers, const PackedMessa
|
||||
void Peers::connectTo (const std::string& strIp, int iPort)
|
||||
{
|
||||
{
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
ScopedLock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
db->executeSQL (str (boost::format ("REPLACE INTO PeerIps (IpPort,Score,Source,ScanNext) values (%s,%d,'%c',0);")
|
||||
% sqlEscape (str (boost::format ("%s %d") % strIp % iPort))
|
||||
% theApp->getUNL ().iSourceScore (IUniqueNodeList::vsManual)
|
||||
% getApp().getUNL ().iSourceScore (IUniqueNodeList::vsManual)
|
||||
% char (IUniqueNodeList::vsManual)));
|
||||
}
|
||||
|
||||
@@ -425,8 +425,8 @@ Peer::pointer Peers::peerConnect (const std::string& strIp, int iPort)
|
||||
|
||||
if (mIpMap.find (pipPeer) == mIpMap.end ())
|
||||
{
|
||||
ppResult = Peer::New (theApp->getIOService (),
|
||||
theApp->getPeerDoor ().getSSLContext (),
|
||||
ppResult = Peer::New (getApp().getIOService (),
|
||||
getApp().getPeerDoor ().getSSLContext (),
|
||||
++mLastPeer,
|
||||
false);
|
||||
|
||||
@@ -501,7 +501,7 @@ bool Peers::peerConnected (Peer::ref peer, const RippleAddress& naPeer,
|
||||
|
||||
assert (!!peer);
|
||||
|
||||
if (naPeer == theApp->getLocalCredentials ().getNodePublic ())
|
||||
if (naPeer == getApp().getLocalCredentials ().getNodePublic ())
|
||||
{
|
||||
WriteLog (lsINFO, Peers) << "Pool: Connected: self: " << ADDRESS_SHARED (peer) << ": " << naPeer.humanNodePublic () << " " << strIP << " " << iPort;
|
||||
}
|
||||
@@ -601,8 +601,8 @@ bool Peers::peerScanSet (const std::string& strIp, int iPort)
|
||||
std::string strIpPort = str (boost::format ("%s %d") % strIp % iPort);
|
||||
bool bScanDirty = false;
|
||||
|
||||
ScopedLock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
if (db->executeSQL (str (boost::format ("SELECT ScanNext FROM PeerIps WHERE IpPort=%s;")
|
||||
% sqlEscape (strIpPort)))
|
||||
@@ -711,7 +711,7 @@ void Peers::peerVerified (Peer::ref peer)
|
||||
|
||||
//WriteLog (lsINFO, Peers) << str(boost::format("Pool: Scan: connected: %s %s %s (scanned)") % ADDRESS_SHARED(peer) % strIp % iPort);
|
||||
|
||||
if (peer->getNodePublic () == theApp->getLocalCredentials ().getNodePublic ())
|
||||
if (peer->getNodePublic () == getApp().getLocalCredentials ().getNodePublic ())
|
||||
{
|
||||
// Talking to ourself. We will just back off. This lets us maybe advertise our outside address.
|
||||
|
||||
@@ -720,8 +720,8 @@ void Peers::peerVerified (Peer::ref peer)
|
||||
else
|
||||
{
|
||||
// Talking with a different peer.
|
||||
ScopedLock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
db->executeSQL (boost::str (boost::format ("UPDATE PeerIps SET ScanNext=NULL,ScanInterval=0 WHERE IpPort=%s;")
|
||||
% sqlEscape (strIpPort)));
|
||||
@@ -788,8 +788,8 @@ void Peers::scanRefresh ()
|
||||
int iInterval;
|
||||
|
||||
{
|
||||
ScopedLock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
if (db->executeSQL ("SELECT * FROM PeerIps INDEXED BY PeerScanIndex WHERE ScanNext NOT NULL ORDER BY ScanNext LIMIT 1;")
|
||||
&& db->startIterRows ())
|
||||
@@ -834,8 +834,8 @@ void Peers::scanRefresh ()
|
||||
iInterval *= 2;
|
||||
|
||||
{
|
||||
ScopedLock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
ScopedLock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
db->executeSQL (boost::str (boost::format ("UPDATE PeerIps SET ScanNext=%d,ScanInterval=%d WHERE IpPort=%s;")
|
||||
% iToSeconds (tpNext)
|
||||
|
||||
@@ -821,17 +821,17 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternalNT (const SHAMapNode& id, uint2
|
||||
{
|
||||
SHAMapTreeNode::pointer ret;
|
||||
|
||||
if (!theApp->running ())
|
||||
if (!getApp().running ())
|
||||
return ret;
|
||||
|
||||
HashedObject::pointer obj (theApp->getHashedObjectStore ().retrieve (hash));
|
||||
HashedObject::pointer obj (getApp().getHashedObjectStore ().retrieve (hash));
|
||||
|
||||
if (!obj)
|
||||
{
|
||||
// WriteLog (lsTRACE, SHAMap) << "fetchNodeExternal: missing " << hash;
|
||||
if (mLedgerSeq != 0)
|
||||
{
|
||||
theApp->getOPs ().missingNodeInLedger (mLedgerSeq);
|
||||
getApp().getOPs ().missingNodeInLedger (mLedgerSeq);
|
||||
mLedgerSeq = 0;
|
||||
}
|
||||
|
||||
@@ -937,7 +937,7 @@ int SHAMap::flushDirty (DirtyMap& map, int maxNodes, HashedObjectType t, uint32
|
||||
|
||||
#endif
|
||||
|
||||
theApp->getHashedObjectStore ().store (t, seq, s.peekData (), it->second->getNodeHash ());
|
||||
getApp().getHashedObjectStore ().store (t, seq, s.peekData (), it->second->getNodeHash ());
|
||||
|
||||
if (flushed++ >= maxNodes)
|
||||
return flushed;
|
||||
|
||||
@@ -14,7 +14,7 @@ void ConsensusTransSetSF::gotNode (bool fromFilter, const SHAMapNode& id, uint25
|
||||
if (fromFilter)
|
||||
return;
|
||||
|
||||
theApp->getTempNodeCache ().store (nodeHash, nodeData);
|
||||
getApp().getTempNodeCache ().store (nodeHash, nodeData);
|
||||
|
||||
if ((type == SHAMapTreeNode::tnTRANSACTION_NM) && (nodeData.size () > 16))
|
||||
{
|
||||
@@ -27,8 +27,8 @@ void ConsensusTransSetSF::gotNode (bool fromFilter, const SHAMapNode& id, uint25
|
||||
SerializerIterator sit (s);
|
||||
SerializedTransaction::pointer stx = boost::make_shared<SerializedTransaction> (boost::ref (sit));
|
||||
assert (stx->getTransactionID () == nodeHash);
|
||||
theApp->getJobQueue ().addJob (jtTRANSACTION, "TXS->TXN",
|
||||
BIND_TYPE (&NetworkOPs::submitTransaction, &theApp->getOPs (), P_1, stx, NetworkOPs::stCallback ()));
|
||||
getApp().getJobQueue ().addJob (jtTRANSACTION, "TXS->TXN",
|
||||
BIND_TYPE (&NetworkOPs::submitTransaction, &getApp().getOPs (), P_1, stx, NetworkOPs::stCallback ()));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@@ -40,7 +40,7 @@ void ConsensusTransSetSF::gotNode (bool fromFilter, const SHAMapNode& id, uint25
|
||||
bool ConsensusTransSetSF::haveNode (const SHAMapNode& id, uint256 const& nodeHash,
|
||||
Blob& nodeData)
|
||||
{
|
||||
if (theApp->getTempNodeCache ().retrieve (nodeHash, nodeData))
|
||||
if (getApp().getTempNodeCache ().retrieve (nodeHash, nodeData))
|
||||
return true;
|
||||
|
||||
Transaction::pointer txn = Transaction::load (nodeHash);
|
||||
@@ -73,14 +73,14 @@ void AccountStateSF::gotNode (bool fromFilter,
|
||||
Blob const& nodeData,
|
||||
SHAMapTreeNode::TNType)
|
||||
{
|
||||
theApp->getHashedObjectStore ().store (hotACCOUNT_NODE, mLedgerSeq, nodeData, nodeHash);
|
||||
getApp().getHashedObjectStore ().store (hotACCOUNT_NODE, mLedgerSeq, nodeData, nodeHash);
|
||||
}
|
||||
|
||||
bool AccountStateSF::haveNode (SHAMapNode const& id,
|
||||
uint256 const& nodeHash,
|
||||
Blob& nodeData)
|
||||
{
|
||||
return theApp->getOPs ().getFetchPack (nodeHash, nodeData);
|
||||
return getApp().getOPs ().getFetchPack (nodeHash, nodeData);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -96,7 +96,7 @@ void TransactionStateSF::gotNode (bool fromFilter,
|
||||
Blob const& nodeData,
|
||||
SHAMapTreeNode::TNType type)
|
||||
{
|
||||
theApp->getHashedObjectStore ().store (
|
||||
getApp().getHashedObjectStore ().store (
|
||||
(type == SHAMapTreeNode::tnTRANSACTION_NM) ? hotTRANSACTION : hotTRANSACTION_NODE,
|
||||
mLedgerSeq,
|
||||
nodeData,
|
||||
@@ -107,5 +107,5 @@ bool TransactionStateSF::haveNode (SHAMapNode const& id,
|
||||
uint256 const& nodeHash,
|
||||
Blob& nodeData)
|
||||
{
|
||||
return theApp->getOPs ().getFetchPack (nodeHash, nodeData);
|
||||
return getApp().getOPs ().getFetchPack (nodeHash, nodeData);
|
||||
}
|
||||
|
||||
@@ -18,9 +18,9 @@ TransactionAcquire::TransactionAcquire (uint256 const& hash) : PeerSet (hash, TX
|
||||
|
||||
static void TACompletionHandler (uint256 hash, SHAMap::pointer map)
|
||||
{
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getMasterLock ());
|
||||
theApp->getOPs ().mapComplete (hash, map);
|
||||
theApp->getInboundLedgers ().dropLedger (hash);
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getMasterLock ());
|
||||
getApp().getOPs ().mapComplete (hash, map);
|
||||
getApp().getInboundLedgers ().dropLedger (hash);
|
||||
}
|
||||
|
||||
void TransactionAcquire::done ()
|
||||
@@ -39,7 +39,7 @@ void TransactionAcquire::done ()
|
||||
map = mMap;
|
||||
}
|
||||
|
||||
theApp->getIOService ().post (BIND_TYPE (&TACompletionHandler, mHash, map));
|
||||
getApp().getIOService ().post (BIND_TYPE (&TACompletionHandler, mHash, map));
|
||||
}
|
||||
|
||||
void TransactionAcquire::onTimer (bool progress)
|
||||
@@ -50,9 +50,9 @@ void TransactionAcquire::onTimer (bool progress)
|
||||
{
|
||||
WriteLog (lsWARNING, TransactionAcquire) << "Ten timeouts on TX set " << getHash ();
|
||||
{
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getMasterLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getMasterLock ());
|
||||
|
||||
if (theApp->getOPs ().stillNeedTXSet (mHash))
|
||||
if (getApp().getOPs ().stillNeedTXSet (mHash))
|
||||
{
|
||||
WriteLog (lsWARNING, TransactionAcquire) << "Still need it";
|
||||
mTimeouts = 0;
|
||||
@@ -74,7 +74,7 @@ void TransactionAcquire::onTimer (bool progress)
|
||||
WriteLog (lsWARNING, TransactionAcquire) << "Out of peers for TX set " << getHash ();
|
||||
|
||||
bool found = false;
|
||||
std::vector<Peer::pointer> peerList = theApp->getPeers ().getPeerVector ();
|
||||
std::vector<Peer::pointer> peerList = getApp().getPeers ().getPeerVector ();
|
||||
BOOST_FOREACH (Peer::ref peer, peerList)
|
||||
{
|
||||
if (peer->hasTxSet (getHash ()))
|
||||
|
||||
@@ -192,8 +192,8 @@ void UniqueNodeList::start ()
|
||||
// Load information about when we last updated.
|
||||
bool UniqueNodeList::miscLoad ()
|
||||
{
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
if (!db->executeSQL ("SELECT * FROM Misc WHERE Magic=1;")) return false;
|
||||
|
||||
@@ -212,8 +212,8 @@ bool UniqueNodeList::miscLoad ()
|
||||
// Persist update information.
|
||||
bool UniqueNodeList::miscSave ()
|
||||
{
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
db->executeSQL (str (boost::format ("REPLACE INTO Misc (Magic,FetchUpdated,ScoreUpdated) VALUES (1,%d,%d);")
|
||||
% iToSeconds (mtpFetchUpdated)
|
||||
@@ -240,8 +240,8 @@ void UniqueNodeList::trustedLoad ()
|
||||
WriteLog (lsWARNING, UniqueNodeList) << "Entry in cluster list invalid: '" << c << "'";
|
||||
}
|
||||
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock slUNL (mUNLLock);
|
||||
|
||||
mUNL.clear ();
|
||||
@@ -329,12 +329,12 @@ void UniqueNodeList::scoreCompute ()
|
||||
strIndex umDomainIdx; // Map of domain to index.
|
||||
std::vector<scoreNode> vsnNodes; // Index to scoring node.
|
||||
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
// For each entry in SeedDomains with a PublicKey:
|
||||
// - Add an entry in umPulicIdx, umDomainIdx, and vsnNodes.
|
||||
{
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
SQL_FOREACH (db, "SELECT Domain,PublicKey,Source FROM SeedDomains;")
|
||||
{
|
||||
@@ -387,7 +387,7 @@ void UniqueNodeList::scoreCompute ()
|
||||
// For each entry in SeedNodes:
|
||||
// - Add an entry in umPulicIdx, umDomainIdx, and vsnNodes.
|
||||
{
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
SQL_FOREACH (db, "SELECT PublicKey,Source FROM SeedNodes;")
|
||||
{
|
||||
@@ -451,7 +451,7 @@ void UniqueNodeList::scoreCompute ()
|
||||
std::string& strValidator = sn.strValidator;
|
||||
std::vector<int>& viReferrals = sn.viReferrals;
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
SQL_FOREACH (db, boost::str (boost::format ("SELECT Referral FROM ValidatorReferrals WHERE Validator=%s ORDER BY Entry;")
|
||||
% sqlEscape (strValidator)))
|
||||
@@ -532,7 +532,7 @@ void UniqueNodeList::scoreCompute ()
|
||||
}
|
||||
|
||||
// Persist validator scores.
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
db->executeSQL ("BEGIN;");
|
||||
db->executeSQL ("UPDATE TrustedNodes SET Score = 0 WHERE Score != 0;");
|
||||
@@ -696,7 +696,7 @@ void UniqueNodeList::scoreTimerHandler (const boost::system::error_code& err)
|
||||
scoreNext (false);
|
||||
|
||||
// Scan may be dirty due to new ips.
|
||||
theApp->getPeers ().scanRefresh ();
|
||||
getApp().getPeers ().scanRefresh ();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -761,7 +761,7 @@ void UniqueNodeList::fetchDirty ()
|
||||
// --> naNodePublic: public key of the validating node.
|
||||
void UniqueNodeList::processIps (const std::string& strSite, const RippleAddress& naNodePublic, Section::mapped_type* pmtVecStrIps)
|
||||
{
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
std::string strEscNodePublic = sqlEscape (naNodePublic.humanNodePublic ());
|
||||
|
||||
@@ -771,7 +771,7 @@ void UniqueNodeList::processIps (const std::string& strSite, const RippleAddress
|
||||
|
||||
// Remove all current Validator's entries in IpReferrals
|
||||
{
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
db->executeSQL (str (boost::format ("DELETE FROM IpReferrals WHERE Validator=%s;") % strEscNodePublic));
|
||||
// XXX Check result.
|
||||
}
|
||||
@@ -814,7 +814,7 @@ void UniqueNodeList::processIps (const std::string& strSite, const RippleAddress
|
||||
{
|
||||
vstrValues.resize (iValues);
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
db->executeSQL (str (boost::format ("INSERT INTO IpReferrals (Validator,Entry,IP,Port) VALUES %s;")
|
||||
% strJoin (vstrValues.begin (), vstrValues.end (), ",")));
|
||||
// XXX Check result.
|
||||
@@ -831,7 +831,7 @@ void UniqueNodeList::processIps (const std::string& strSite, const RippleAddress
|
||||
// --> vsWhy: reason for adding validator to SeedDomains or SeedNodes.
|
||||
int UniqueNodeList::processValidators (const std::string& strSite, const std::string& strValidatorsSrc, const RippleAddress& naNodePublic, validatorSource vsWhy, Section::mapped_type* pmtVecStrValidators)
|
||||
{
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
std::string strNodePublic = naNodePublic.isValid () ? naNodePublic.humanNodePublic () : strValidatorsSrc;
|
||||
int iValues = 0;
|
||||
|
||||
@@ -843,7 +843,7 @@ int UniqueNodeList::processValidators (const std::string& strSite, const std::st
|
||||
|
||||
// Remove all current Validator's entries in ValidatorReferrals
|
||||
{
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
db->executeSQL (str (boost::format ("DELETE FROM ValidatorReferrals WHERE Validator='%s';") % strNodePublic));
|
||||
// XXX Check result.
|
||||
@@ -915,7 +915,7 @@ int UniqueNodeList::processValidators (const std::string& strSite, const std::st
|
||||
std::string strSql = str (boost::format ("INSERT INTO ValidatorReferrals (Validator,Entry,Referral) VALUES %s;")
|
||||
% strJoin (vstrValues.begin (), vstrValues.end (), ","));
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
db->executeSQL (strSql);
|
||||
// XXX Check result.
|
||||
@@ -965,7 +965,7 @@ void UniqueNodeList::getIpsUrl (const RippleAddress& naNodePublic, Section secSi
|
||||
{
|
||||
HttpsClient::httpsGet (
|
||||
true,
|
||||
theApp->getIOService (),
|
||||
getApp().getIOService (),
|
||||
strDomain,
|
||||
443,
|
||||
strPath,
|
||||
@@ -1016,7 +1016,7 @@ void UniqueNodeList::getValidatorsUrl (const RippleAddress& naNodePublic, Sectio
|
||||
{
|
||||
HttpsClient::httpsGet (
|
||||
true,
|
||||
theApp->getIOService (),
|
||||
getApp().getIOService (),
|
||||
strDomain,
|
||||
443,
|
||||
strPath,
|
||||
@@ -1194,7 +1194,7 @@ void UniqueNodeList::fetchProcess (std::string strDomain)
|
||||
|
||||
HttpsClient::httpsGet (
|
||||
true,
|
||||
theApp->getIOService (),
|
||||
getApp().getIOService (),
|
||||
deqSites,
|
||||
443,
|
||||
NODE_FILE_PATH,
|
||||
@@ -1231,8 +1231,8 @@ void UniqueNodeList::fetchNext ()
|
||||
boost::posix_time::ptime tpNext;
|
||||
boost::posix_time::ptime tpNow;
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
if (db->executeSQL ("SELECT Domain,Next FROM SeedDomains INDEXED BY SeedDomainNext ORDER BY Next LIMIT 1;")
|
||||
&& db->startIterRows ())
|
||||
@@ -1349,12 +1349,12 @@ int UniqueNodeList::iSourceScore (validatorSource vsWhy)
|
||||
bool UniqueNodeList::getSeedDomains (const std::string& strDomain, seedDomain& dstSeedDomain)
|
||||
{
|
||||
bool bResult;
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
std::string strSql = boost::str (boost::format ("SELECT * FROM SeedDomains WHERE Domain=%s;")
|
||||
% sqlEscape (strDomain));
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
bResult = db->executeSQL (strSql) && db->startIterRows ();
|
||||
|
||||
@@ -1407,7 +1407,7 @@ bool UniqueNodeList::getSeedDomains (const std::string& strDomain, seedDomain& d
|
||||
// Persist a SeedDomain.
|
||||
void UniqueNodeList::setSeedDomains (const seedDomain& sdSource, bool bNext)
|
||||
{
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
int iNext = iToSeconds (sdSource.tpNext);
|
||||
int iScan = iToSeconds (sdSource.tpScan);
|
||||
@@ -1426,7 +1426,7 @@ void UniqueNodeList::setSeedDomains (const seedDomain& sdSource, bool bNext)
|
||||
% sqlEscape (sdSource.strComment)
|
||||
);
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
if (!db->executeSQL (strSql))
|
||||
{
|
||||
@@ -1489,12 +1489,12 @@ void UniqueNodeList::nodeAddDomain (std::string strDomain, validatorSource vsWhy
|
||||
bool UniqueNodeList::getSeedNodes (const RippleAddress& naNodePublic, seedNode& dstSeedNode)
|
||||
{
|
||||
bool bResult;
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
std::string strSql = str (boost::format ("SELECT * FROM SeedNodes WHERE PublicKey='%s';")
|
||||
% naNodePublic.humanNodePublic ());
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
bResult = db->executeSQL (strSql) && db->startIterRows ();
|
||||
|
||||
@@ -1547,7 +1547,7 @@ bool UniqueNodeList::getSeedNodes (const RippleAddress& naNodePublic, seedNode&
|
||||
// <-- bNext: true, to do fetching if needed.
|
||||
void UniqueNodeList::setSeedNodes (const seedNode& snSource, bool bNext)
|
||||
{
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
int iNext = iToSeconds (snSource.tpNext);
|
||||
int iScan = iToSeconds (snSource.tpScan);
|
||||
@@ -1568,7 +1568,7 @@ void UniqueNodeList::setSeedNodes (const seedNode& snSource, bool bNext)
|
||||
);
|
||||
|
||||
{
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
if (!db->executeSQL (strSql))
|
||||
{
|
||||
@@ -1627,8 +1627,8 @@ void UniqueNodeList::nodeAddPublic (const RippleAddress& naNodePublic, validator
|
||||
void UniqueNodeList::nodeRemovePublic (const RippleAddress& naNodePublic)
|
||||
{
|
||||
{
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
db->executeSQL (str (boost::format ("DELETE FROM SeedNodes WHERE PublicKey=%s") % sqlEscape (naNodePublic.humanNodePublic ())));
|
||||
db->executeSQL (str (boost::format ("DELETE FROM TrustedNodes WHERE PublicKey=%s") % sqlEscape (naNodePublic.humanNodePublic ())));
|
||||
@@ -1647,8 +1647,8 @@ void UniqueNodeList::nodeRemoveDomain (std::string strDomain)
|
||||
boost::to_lower (strDomain);
|
||||
|
||||
{
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
db->executeSQL (str (boost::format ("DELETE FROM SeedDomains WHERE Domain=%s") % sqlEscape (strDomain)));
|
||||
}
|
||||
@@ -1660,9 +1660,9 @@ void UniqueNodeList::nodeRemoveDomain (std::string strDomain)
|
||||
void UniqueNodeList::nodeReset ()
|
||||
{
|
||||
{
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
// XXX Check results.
|
||||
db->executeSQL ("DELETE FROM SeedDomains");
|
||||
@@ -1674,11 +1674,11 @@ void UniqueNodeList::nodeReset ()
|
||||
|
||||
Json::Value UniqueNodeList::getUnlJson ()
|
||||
{
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
Json::Value ret (Json::arrayValue);
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
SQL_FOREACH (db, "SELECT * FROM TrustedNodes;")
|
||||
{
|
||||
Json::Value node (Json::objectValue);
|
||||
@@ -1770,7 +1770,7 @@ void UniqueNodeList::nodeNetwork ()
|
||||
{
|
||||
HttpsClient::httpsGet (
|
||||
true,
|
||||
theApp->getIOService (),
|
||||
getApp().getIOService (),
|
||||
theConfig.VALIDATORS_SITE,
|
||||
443,
|
||||
theConfig.VALIDATORS_URI,
|
||||
@@ -1784,10 +1784,10 @@ void UniqueNodeList::nodeBootstrap ()
|
||||
{
|
||||
int iDomains = 0;
|
||||
int iNodes = 0;
|
||||
Database* db = theApp->getWalletDB ()->getDB ();
|
||||
Database* db = getApp().getWalletDB ()->getDB ();
|
||||
|
||||
{
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
if (db->executeSQL (str (boost::format ("SELECT COUNT(*) AS Count FROM SeedDomains WHERE Source='%s' OR Source='%c';") % vsManual % vsValidator)) && db->startIterRows ())
|
||||
iDomains = db->getInt ("Count");
|
||||
@@ -1860,7 +1860,7 @@ void UniqueNodeList::nodeBootstrap ()
|
||||
|
||||
if (!vstrValues.empty ())
|
||||
{
|
||||
boost::recursive_mutex::scoped_lock sl (theApp->getWalletDB ()->getDBLock ());
|
||||
boost::recursive_mutex::scoped_lock sl (getApp().getWalletDB ()->getDBLock ());
|
||||
|
||||
db->executeSQL (str (boost::format ("REPLACE INTO PeerIps (IpPort,Source) VALUES %s;")
|
||||
% strJoin (vstrValues.begin (), vstrValues.end (), ",")));
|
||||
|
||||
@@ -52,10 +52,10 @@ private:
|
||||
RippleAddress signer = val->getSignerPublic ();
|
||||
bool isCurrent = false;
|
||||
|
||||
if (theApp->getUNL ().nodeInUNL (signer) || val->isTrusted ())
|
||||
if (getApp().getUNL ().nodeInUNL (signer) || val->isTrusted ())
|
||||
{
|
||||
val->setTrusted ();
|
||||
uint32 now = theApp->getOPs ().getCloseTimeNC ();
|
||||
uint32 now = getApp().getOPs ().getCloseTimeNC ();
|
||||
uint32 valClose = val->getSignTime ();
|
||||
|
||||
if ((now > (valClose - LEDGER_EARLY_INTERVAL)) && (now < (valClose + LEDGER_VAL_INTERVAL)))
|
||||
@@ -104,7 +104,7 @@ private:
|
||||
<< " added " << (val->isTrusted () ? "trusted/" : "UNtrusted/") << (isCurrent ? "current" : "stale");
|
||||
|
||||
if (val->isTrusted ())
|
||||
theApp->getLedgerMaster ().checkAccept (hash);
|
||||
getApp().getLedgerMaster ().checkAccept (hash);
|
||||
|
||||
// FIXME: This never forwards untrusted validations
|
||||
return isCurrent;
|
||||
@@ -136,7 +136,7 @@ private:
|
||||
|
||||
if (set)
|
||||
{
|
||||
uint32 now = theApp->getOPs ().getNetworkTimeNC ();
|
||||
uint32 now = getApp().getOPs ().getNetworkTimeNC ();
|
||||
BOOST_FOREACH (u160_val_pair & it, *set)
|
||||
{
|
||||
bool isTrusted = it.second->isTrusted ();
|
||||
@@ -241,7 +241,7 @@ private:
|
||||
|
||||
std::list<SerializedValidation::pointer> getCurrentTrustedValidations ()
|
||||
{
|
||||
uint32 cutoff = theApp->getOPs ().getNetworkTimeNC () - LEDGER_VAL_INTERVAL;
|
||||
uint32 cutoff = getApp().getOPs ().getNetworkTimeNC () - LEDGER_VAL_INTERVAL;
|
||||
|
||||
std::list<SerializedValidation::pointer> ret;
|
||||
|
||||
@@ -276,7 +276,7 @@ private:
|
||||
boost::unordered_map<uint256, currentValidationCount>
|
||||
getCurrentValidations (uint256 currentLedger, uint256 priorLedger)
|
||||
{
|
||||
uint32 cutoff = theApp->getOPs ().getNetworkTimeNC () - LEDGER_VAL_INTERVAL;
|
||||
uint32 cutoff = getApp().getOPs ().getNetworkTimeNC () - LEDGER_VAL_INTERVAL;
|
||||
bool valCurrentLedger = currentLedger.isNonZero ();
|
||||
bool valPriorLedger = priorLedger.isNonZero ();
|
||||
|
||||
@@ -358,13 +358,13 @@ private:
|
||||
return;
|
||||
|
||||
mWriting = true;
|
||||
theApp->getJobQueue ().addJob (jtWRITE, "Validations::doWrite",
|
||||
getApp().getJobQueue ().addJob (jtWRITE, "Validations::doWrite",
|
||||
BIND_TYPE (&Validations::doWrite, this, P_1));
|
||||
}
|
||||
|
||||
void doWrite (Job&)
|
||||
{
|
||||
LoadEvent::autoptr event (theApp->getJobQueue ().getLoadEventAP (jtDISK, "ValidationWrite"));
|
||||
LoadEvent::autoptr event (getApp().getJobQueue ().getLoadEventAP (jtDISK, "ValidationWrite"));
|
||||
boost::format insVal ("INSERT INTO Validations "
|
||||
"(LedgerHash,NodePubKey,SignTime,RawData) VALUES ('%s','%s','%u',%s);");
|
||||
|
||||
@@ -378,8 +378,8 @@ private:
|
||||
mStaleValidations.swap (vector);
|
||||
sl.unlock ();
|
||||
{
|
||||
Database* db = theApp->getLedgerDB ()->getDB ();
|
||||
ScopedLock dbl (theApp->getLedgerDB ()->getDBLock ());
|
||||
Database* db = getApp().getLedgerDB ()->getDB ();
|
||||
ScopedLock dbl (getApp().getLedgerDB ()->getDBLock ());
|
||||
|
||||
Serializer s (1024);
|
||||
db->executeSQL ("BEGIN TRANSACTION;");
|
||||
|
||||
Reference in New Issue
Block a user