mirror of
https://github.com/Xahau/xahaud.git
synced 2025-12-06 17:27:52 +00:00
Switch over more files to use the new Log functions
This commit is contained in:
@@ -30,31 +30,27 @@
|
||||
#pragma warning (disable: 4244) // conversion, possible loss of data
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
#include "src/cpp/ripple/Contract.cpp"
|
||||
#include "src/cpp/ripple/Operation.cpp"
|
||||
#include "src/cpp/ripple/Contract.cpp" // no log
|
||||
#include "src/cpp/ripple/Operation.cpp" // no log
|
||||
|
||||
#include "src/cpp/ripple/AcceptedLedger.cpp"
|
||||
#include "src/cpp/ripple/AccountItems.cpp"
|
||||
#include "src/cpp/ripple/AccountState.cpp"
|
||||
#endif
|
||||
#include "src/cpp/ripple/AcceptedLedger.cpp" // no log
|
||||
#include "src/cpp/ripple/AccountItems.cpp" // no log
|
||||
#include "src/cpp/ripple/AccountState.cpp" // no log
|
||||
#include "src/cpp/ripple/FeatureTable.cpp"
|
||||
#include "src/cpp/ripple/Ledger.cpp"
|
||||
#if 0
|
||||
#include "src/cpp/ripple/LedgerAcquire.cpp"
|
||||
#include "src/cpp/ripple/LedgerConsensus.cpp"
|
||||
#include "src/cpp/ripple/LedgerEntrySet.cpp"
|
||||
#include "src/cpp/ripple/LedgerFormats.cpp"
|
||||
#include "src/cpp/ripple/LedgerHistory.cpp"
|
||||
#include "src/cpp/ripple/LedgerFormats.cpp" // no log
|
||||
#include "src/cpp/ripple/LedgerHistory.cpp" // no log
|
||||
#include "src/cpp/ripple/LedgerMaster.cpp"
|
||||
#include "src/cpp/ripple/LedgerProposal.cpp"
|
||||
#include "src/cpp/ripple/LedgerProposal.cpp" // no log
|
||||
#include "src/cpp/ripple/LedgerTiming.cpp"
|
||||
#include "src/cpp/ripple/Offer.cpp"
|
||||
#include "src/cpp/ripple/OrderBook.cpp"
|
||||
#include "src/cpp/ripple/Offer.cpp" // no log
|
||||
#include "src/cpp/ripple/OrderBook.cpp" // no log
|
||||
#include "src/cpp/ripple/OrderBookDB.cpp"
|
||||
#include "src/cpp/ripple/RippleCalc.cpp"
|
||||
#include "src/cpp/ripple/RippleState.cpp"
|
||||
#endif
|
||||
#include "src/cpp/ripple/RippleState.cpp" // no log
|
||||
|
||||
#ifdef _MSC_VER
|
||||
//#pragma warning (pop)
|
||||
|
||||
@@ -643,7 +643,7 @@ Ledger::pointer Ledger::getSQL(const std::string& sql)
|
||||
ret->setAccepted();
|
||||
if (ret->getHash() != ledgerHash)
|
||||
{
|
||||
if (sLog(lsERROR))
|
||||
if (ShouldLog (lsERROR, Ledger))
|
||||
{
|
||||
Log(lsERROR) << "Failed on ledger";
|
||||
Json::Value p;
|
||||
@@ -1463,13 +1463,13 @@ bool Ledger::walkLedger()
|
||||
{
|
||||
std::vector<SHAMapMissingNode> missingNodes1, missingNodes2;
|
||||
mAccountStateMap->walkMap(missingNodes1, 32);
|
||||
if (sLog(lsINFO) && !missingNodes1.empty())
|
||||
if (ShouldLog (lsINFO, Ledger) && !missingNodes1.empty())
|
||||
{
|
||||
Log(lsINFO) << missingNodes1.size() << " missing account node(s)";
|
||||
Log(lsINFO) << "First: " << missingNodes1[0];
|
||||
}
|
||||
mTransactionMap->walkMap(missingNodes2, 32);
|
||||
if (sLog(lsINFO) && !missingNodes2.empty())
|
||||
if (ShouldLog (lsINFO, Ledger) && !missingNodes2.empty())
|
||||
{
|
||||
Log(lsINFO) << missingNodes2.size() << " missing transaction node(s)";
|
||||
Log(lsINFO) << "First: " << missingNodes2[0];
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
#include "SHAMapSync.h"
|
||||
#include "HashPrefixes.h"
|
||||
|
||||
SETUP_LOG();
|
||||
DECLARE_INSTANCE(LedgerAcquire);
|
||||
|
||||
#define LA_DEBUG
|
||||
@@ -55,7 +54,7 @@ void PeerSet::invokeOnTimer()
|
||||
if (!mProgress)
|
||||
{
|
||||
++mTimeouts;
|
||||
cLog(lsWARNING) << "Timeout(" << mTimeouts << ") pc=" << mPeers.size() << " acquiring " << mHash;
|
||||
WriteLog (lsWARNING, LedgerAcquire) << "Timeout(" << mTimeouts << ") pc=" << mPeers.size() << " acquiring " << mHash;
|
||||
onTimer(false);
|
||||
}
|
||||
else
|
||||
@@ -78,7 +77,7 @@ void PeerSet::TimerEntry(boost::weak_ptr<PeerSet> wptr, const boost::system::err
|
||||
int jc = theApp->getJobQueue().getJobCountTotal(jtLEDGER_DATA);
|
||||
if (jc > 4)
|
||||
{
|
||||
cLog(lsDEBUG) << "Deferring PeerSet timer due to load";
|
||||
WriteLog (lsDEBUG, LedgerAcquire) << "Deferring PeerSet timer due to load";
|
||||
ptr->setTimer();
|
||||
}
|
||||
else
|
||||
@@ -102,7 +101,7 @@ LedgerAcquire::LedgerAcquire(const uint256& hash, uint32 seq) : PeerSet(hash, LE
|
||||
mByHash(true), mWaitCount(0), mSeq(seq)
|
||||
{
|
||||
#ifdef LA_DEBUG
|
||||
cLog(lsTRACE) << "Acquiring ledger " << mHash;
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Acquiring ledger " << mHash;
|
||||
#endif
|
||||
tryLocal();
|
||||
}
|
||||
@@ -129,7 +128,7 @@ bool LedgerAcquire::tryLocal()
|
||||
std::vector<unsigned char> data;
|
||||
if (!theApp->getOPs().getFetchPack(mHash, data))
|
||||
return false;
|
||||
cLog(lsTRACE) << "Ledger base found in fetch pack";
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Ledger base found in fetch pack";
|
||||
mLedger = boost::make_shared<Ledger>(data, true);
|
||||
theApp->getHashedObjectStore().store(hotLEDGER, mLedger->getLedgerSeq(), data, mHash);
|
||||
}
|
||||
@@ -140,7 +139,7 @@ bool LedgerAcquire::tryLocal()
|
||||
|
||||
if (mLedger->getHash() != mHash)
|
||||
{ // We know for a fact the ledger can never be acquired
|
||||
cLog(lsWARNING) << mHash << " cannot be a ledger";
|
||||
WriteLog (lsWARNING, LedgerAcquire) << mHash << " cannot be a ledger";
|
||||
mFailed = true;
|
||||
return true;
|
||||
}
|
||||
@@ -151,7 +150,7 @@ bool LedgerAcquire::tryLocal()
|
||||
{
|
||||
if (mLedger->getTransHash().isZero())
|
||||
{
|
||||
cLog(lsTRACE) << "No TXNs to fetch";
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "No TXNs to fetch";
|
||||
mHaveTransactions = true;
|
||||
}
|
||||
else
|
||||
@@ -160,11 +159,11 @@ bool LedgerAcquire::tryLocal()
|
||||
{
|
||||
TransactionStateSF filter(mLedger->getLedgerSeq());
|
||||
mLedger->peekTransactionMap()->fetchRoot(mLedger->getTransHash(), &filter);
|
||||
cLog(lsTRACE) << "Got root txn map locally";
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Got root txn map locally";
|
||||
std::vector<uint256> h = mLedger->getNeededTransactionHashes(1, &filter);
|
||||
if (h.empty())
|
||||
{
|
||||
cLog(lsTRACE) << "Had full txn map locally";
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Had full txn map locally";
|
||||
mHaveTransactions = true;
|
||||
}
|
||||
}
|
||||
@@ -178,7 +177,7 @@ bool LedgerAcquire::tryLocal()
|
||||
{
|
||||
if (mLedger->getAccountHash().isZero())
|
||||
{
|
||||
cLog(lsFATAL) << "We are acquiring a ledger with a zero account hash";
|
||||
WriteLog (lsFATAL, LedgerAcquire) << "We are acquiring a ledger with a zero account hash";
|
||||
mHaveState = true;
|
||||
}
|
||||
else
|
||||
@@ -187,11 +186,11 @@ bool LedgerAcquire::tryLocal()
|
||||
{
|
||||
AccountStateSF filter(mLedger->getLedgerSeq());
|
||||
mLedger->peekAccountStateMap()->fetchRoot(mLedger->getAccountHash(), &filter);
|
||||
cLog(lsTRACE) << "Got root AS map locally";
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Got root AS map locally";
|
||||
std::vector<uint256> h = mLedger->getNeededAccountStateHashes(1, &filter);
|
||||
if (h.empty())
|
||||
{
|
||||
cLog(lsTRACE) << "Had full AS map locally";
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Had full AS map locally";
|
||||
mHaveState = true;
|
||||
}
|
||||
}
|
||||
@@ -203,7 +202,7 @@ bool LedgerAcquire::tryLocal()
|
||||
|
||||
if (mHaveTransactions && mHaveState)
|
||||
{
|
||||
cLog(lsDEBUG) << "Had everything locally";
|
||||
WriteLog (lsDEBUG, LedgerAcquire) << "Had everything locally";
|
||||
mComplete = true;
|
||||
mLedger->setClosed();
|
||||
mLedger->setImmutable();
|
||||
@@ -219,7 +218,7 @@ void LedgerAcquire::onTimer(bool progress)
|
||||
|
||||
if (getTimeouts() > LEDGER_TIMEOUT_COUNT)
|
||||
{
|
||||
cLog(lsWARNING) << "Too many timeouts( " << getTimeouts() << ") for ledger " << mHash;
|
||||
WriteLog (lsWARNING, LedgerAcquire) << "Too many timeouts( " << getTimeouts() << ") for ledger " << mHash;
|
||||
setFailed();
|
||||
done();
|
||||
return;
|
||||
@@ -230,7 +229,7 @@ void LedgerAcquire::onTimer(bool progress)
|
||||
mAggressive = true;
|
||||
mByHash = true;
|
||||
int pc = getPeerCount();
|
||||
cLog(lsDEBUG) << "No progress(" << pc << ") for ledger " << pc << mHash;
|
||||
WriteLog (lsDEBUG, LedgerAcquire) << "No progress(" << pc << ") for ledger " << pc << mHash;
|
||||
if (pc == 0)
|
||||
addPeers();
|
||||
else
|
||||
@@ -300,7 +299,7 @@ void LedgerAcquire::done()
|
||||
touch();
|
||||
|
||||
#ifdef LA_DEBUG
|
||||
cLog(lsTRACE) << "Done acquiring ledger " << mHash;
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Done acquiring ledger " << mHash;
|
||||
#endif
|
||||
|
||||
assert(isComplete() || isFailed());
|
||||
@@ -341,7 +340,7 @@ void LedgerAcquire::trigger(Peer::ref peer)
|
||||
boost::recursive_mutex::scoped_lock sl(mLock);
|
||||
if (isDone())
|
||||
{
|
||||
cLog(lsDEBUG) << "Trigger on ledger: " << mHash <<
|
||||
WriteLog (lsDEBUG, LedgerAcquire) << "Trigger on ledger: " << mHash <<
|
||||
(mAborted ? " aborted": "") << (mComplete ? " completed": "") << (mFailed ? " failed" : "");
|
||||
return;
|
||||
}
|
||||
@@ -349,20 +348,20 @@ void LedgerAcquire::trigger(Peer::ref peer)
|
||||
if ((mWaitCount > 0) && peer)
|
||||
{
|
||||
mRecentPeers.push_back(peer->getPeerId());
|
||||
cLog(lsTRACE) << "Deferring peer";
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Deferring peer";
|
||||
return;
|
||||
}
|
||||
|
||||
if (sLog(lsTRACE))
|
||||
if (ShouldLog (lsTRACE, LedgerAcquire))
|
||||
{
|
||||
if (peer)
|
||||
cLog(lsTRACE) << "Trigger acquiring ledger " << mHash << " from " << peer->getIP();
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Trigger acquiring ledger " << mHash << " from " << peer->getIP();
|
||||
else
|
||||
cLog(lsTRACE) << "Trigger acquiring ledger " << mHash;
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Trigger acquiring ledger " << mHash;
|
||||
if (mComplete || mFailed)
|
||||
cLog(lsTRACE) << "complete=" << mComplete << " failed=" << mFailed;
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "complete=" << mComplete << " failed=" << mFailed;
|
||||
else
|
||||
cLog(lsTRACE) << "base=" << mHaveBase << " tx=" << mHaveTransactions << " as=" << mHaveState;
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "base=" << mHaveBase << " tx=" << mHaveTransactions << " as=" << mHaveState;
|
||||
}
|
||||
|
||||
if (!mHaveBase)
|
||||
@@ -370,7 +369,7 @@ void LedgerAcquire::trigger(Peer::ref peer)
|
||||
tryLocal();
|
||||
if (mFailed)
|
||||
{
|
||||
cLog(lsWARNING) << " failed local for " << mHash;
|
||||
WriteLog (lsWARNING, LedgerAcquire) << " failed local for " << mHash;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -391,7 +390,7 @@ void LedgerAcquire::trigger(Peer::ref peer)
|
||||
bool typeSet = false;
|
||||
BOOST_FOREACH(neededHash_t& p, need)
|
||||
{
|
||||
cLog(lsWARNING) << "Want: " << p.second;
|
||||
WriteLog (lsWARNING, LedgerAcquire) << "Want: " << p.second;
|
||||
if (!typeSet)
|
||||
{
|
||||
tmBH.set_type(p.first);
|
||||
@@ -417,11 +416,11 @@ void LedgerAcquire::trigger(Peer::ref peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
cLog(lsINFO) << "Attempting by hash fetch for ledger " << mHash;
|
||||
WriteLog (lsINFO, LedgerAcquire) << "Attempting by hash fetch for ledger " << mHash;
|
||||
}
|
||||
else
|
||||
{
|
||||
cLog(lsINFO) << "getNeededHashes says acquire is complete";
|
||||
WriteLog (lsINFO, LedgerAcquire) << "getNeededHashes says acquire is complete";
|
||||
mHaveBase = true;
|
||||
mHaveTransactions = true;
|
||||
mHaveState = true;
|
||||
@@ -433,7 +432,7 @@ void LedgerAcquire::trigger(Peer::ref peer)
|
||||
if (!mHaveBase && !mFailed)
|
||||
{
|
||||
tmGL.set_itype(ripple::liBASE);
|
||||
cLog(lsTRACE) << "Sending base request to " << (peer ? "selected peer" : "all peers");
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Sending base request to " << (peer ? "selected peer" : "all peers");
|
||||
sendRequest(tmGL, peer);
|
||||
return;
|
||||
}
|
||||
@@ -448,7 +447,7 @@ void LedgerAcquire::trigger(Peer::ref peer)
|
||||
{ // we need the root node
|
||||
tmGL.set_itype(ripple::liTX_NODE);
|
||||
*(tmGL.add_nodeids()) = SHAMapNode().getRawString();
|
||||
cLog(lsTRACE) << "Sending TX root request to " << (peer ? "selected peer" : "all peers");
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Sending TX root request to " << (peer ? "selected peer" : "all peers");
|
||||
sendRequest(tmGL, peer);
|
||||
}
|
||||
else
|
||||
@@ -481,7 +480,7 @@ void LedgerAcquire::trigger(Peer::ref peer)
|
||||
{
|
||||
*(tmGL.add_nodeids()) = it.getRawString();
|
||||
}
|
||||
cLog(lsTRACE) << "Sending TX node " << nodeIDs.size()
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Sending TX node " << nodeIDs.size()
|
||||
<< " request to " << (peer ? "selected peer" : "all peers");
|
||||
sendRequest(tmGL, peer);
|
||||
}
|
||||
@@ -496,7 +495,7 @@ void LedgerAcquire::trigger(Peer::ref peer)
|
||||
{ // we need the root node
|
||||
tmGL.set_itype(ripple::liAS_NODE);
|
||||
*(tmGL.add_nodeids()) = SHAMapNode().getRawString();
|
||||
cLog(lsTRACE) << "Sending AS root request to " << (peer ? "selected peer" : "all peers");
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Sending AS root request to " << (peer ? "selected peer" : "all peers");
|
||||
sendRequest(tmGL, peer);
|
||||
}
|
||||
else
|
||||
@@ -527,9 +526,9 @@ void LedgerAcquire::trigger(Peer::ref peer)
|
||||
tmGL.set_itype(ripple::liAS_NODE);
|
||||
BOOST_FOREACH(SHAMapNode& it, nodeIDs)
|
||||
*(tmGL.add_nodeids()) = it.getRawString();
|
||||
cLog(lsTRACE) << "Sending AS node " << nodeIDs.size()
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Sending AS node " << nodeIDs.size()
|
||||
<< " request to " << (peer ? "selected peer" : "all peers");
|
||||
tLog(nodeIDs.size() == 1, lsTRACE) << "AS node: " << nodeIDs[0];
|
||||
CondLog (nodeIDs.size() == 1, lsTRACE, LedgerAcquire) << "AS node: " << nodeIDs[0];
|
||||
sendRequest(tmGL, peer);
|
||||
}
|
||||
}
|
||||
@@ -540,7 +539,7 @@ void LedgerAcquire::trigger(Peer::ref peer)
|
||||
|
||||
if (mComplete || mFailed)
|
||||
{
|
||||
cLog(lsDEBUG) << "Done:" << (mComplete ? " complete" : "") << (mFailed ? " failed " : " ")
|
||||
WriteLog (lsDEBUG, LedgerAcquire) << "Done:" << (mComplete ? " complete" : "") << (mFailed ? " failed " : " ")
|
||||
<< mLedger->getLedgerSeq();
|
||||
sl.unlock();
|
||||
done();
|
||||
@@ -631,7 +630,7 @@ void LedgerAcquire::filterNodes(std::vector<SHAMapNode>& nodeIDs, std::vector<ui
|
||||
}
|
||||
++insertPoint;
|
||||
}
|
||||
cLog(lsDEBUG) << "filterNodes " << nodeIDs.size() << " to " << insertPoint;
|
||||
WriteLog (lsDEBUG, LedgerAcquire) << "filterNodes " << nodeIDs.size() << " to " << insertPoint;
|
||||
nodeIDs.resize(insertPoint);
|
||||
nodeHashes.resize(insertPoint);
|
||||
}
|
||||
@@ -649,7 +648,7 @@ void LedgerAcquire::filterNodes(std::vector<SHAMapNode>& nodeIDs, std::vector<ui
|
||||
bool LedgerAcquire::takeBase(const std::string& data) // data must not have hash prefix
|
||||
{ // Return value: true=normal, false=bad data
|
||||
#ifdef LA_DEBUG
|
||||
cLog(lsTRACE) << "got base acquiring ledger " << mHash;
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "got base acquiring ledger " << mHash;
|
||||
#endif
|
||||
boost::recursive_mutex::scoped_lock sl(mLock);
|
||||
if (mComplete || mFailed || mHaveBase)
|
||||
@@ -657,8 +656,8 @@ bool LedgerAcquire::takeBase(const std::string& data) // data must not have hash
|
||||
mLedger = boost::make_shared<Ledger>(data, false);
|
||||
if (mLedger->getHash() != mHash)
|
||||
{
|
||||
cLog(lsWARNING) << "Acquire hash mismatch";
|
||||
cLog(lsWARNING) << mLedger->getHash() << "!=" << mHash;
|
||||
WriteLog (lsWARNING, LedgerAcquire) << "Acquire hash mismatch";
|
||||
WriteLog (lsWARNING, LedgerAcquire) << mLedger->getHash() << "!=" << mHash;
|
||||
mLedger.reset();
|
||||
#ifdef TRUST_NETWORK
|
||||
assert(false);
|
||||
@@ -725,13 +724,13 @@ bool LedgerAcquire::takeTxNode(const std::list<SHAMapNode>& nodeIDs,
|
||||
bool LedgerAcquire::takeAsNode(const std::list<SHAMapNode>& nodeIDs,
|
||||
const std::list< std::vector<unsigned char> >& data, SMAddNode& san)
|
||||
{
|
||||
cLog(lsTRACE) << "got ASdata (" << nodeIDs.size() <<") acquiring ledger " << mHash;
|
||||
tLog(nodeIDs.size() == 1, lsTRACE) << "got AS node: " << nodeIDs.front();
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "got ASdata (" << nodeIDs.size() <<") acquiring ledger " << mHash;
|
||||
CondLog (nodeIDs.size() == 1, lsTRACE, LedgerAcquire) << "got AS node: " << nodeIDs.front();
|
||||
|
||||
boost::recursive_mutex::scoped_lock sl(mLock);
|
||||
if (!mHaveBase)
|
||||
{
|
||||
cLog(lsWARNING) << "Don't have ledger base";
|
||||
WriteLog (lsWARNING, LedgerAcquire) << "Don't have ledger base";
|
||||
return false;
|
||||
}
|
||||
if (mHaveState || mFailed)
|
||||
@@ -747,13 +746,13 @@ bool LedgerAcquire::takeAsNode(const std::list<SHAMapNode>& nodeIDs,
|
||||
if (!san.combine(mLedger->peekAccountStateMap()->addRootNode(mLedger->getAccountHash(),
|
||||
*nodeDatait, snfWIRE, &tFilter)))
|
||||
{
|
||||
cLog(lsWARNING) << "Bad ledger base";
|
||||
WriteLog (lsWARNING, LedgerAcquire) << "Bad ledger base";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else if (!san.combine(mLedger->peekAccountStateMap()->addKnownNode(*nodeIDit, *nodeDatait, &tFilter)))
|
||||
{
|
||||
cLog(lsWARNING) << "Unable to add AS node";
|
||||
WriteLog (lsWARNING, LedgerAcquire) << "Unable to add AS node";
|
||||
return false;
|
||||
}
|
||||
++nodeIDit;
|
||||
@@ -818,7 +817,7 @@ LedgerAcquire::pointer LedgerAcquireMaster::findCreate(const uint256& hash, uint
|
||||
ledger->setClosed();
|
||||
ledger->setImmutable();
|
||||
theApp->getLedgerMaster().storeLedger(ledger);
|
||||
cLog(lsDEBUG) << "Acquiring ledger we already have: " << hash;
|
||||
WriteLog (lsDEBUG, LedgerAcquire) << "Acquiring ledger we already have: " << hash;
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
@@ -923,12 +922,12 @@ void LedgerAcquireMaster::gotLedgerData(Job&, uint256 hash,
|
||||
ripple::TMLedgerData& packet = *packet_ptr;
|
||||
Peer::pointer peer = wPeer.lock();
|
||||
|
||||
cLog(lsTRACE) << "Got data (" << packet.nodes().size() << ") for acquiring ledger: " << hash;
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Got data (" << packet.nodes().size() << ") for acquiring ledger: " << hash;
|
||||
|
||||
LedgerAcquire::pointer ledger = find(hash);
|
||||
if (!ledger)
|
||||
{
|
||||
cLog(lsTRACE) << "Got data for ledger we're not acquiring";
|
||||
WriteLog (lsTRACE, LedgerAcquire) << "Got data for ledger we're not acquiring";
|
||||
if (peer)
|
||||
peer->punishPeer(LT_InvalidRequest);
|
||||
return;
|
||||
@@ -942,24 +941,24 @@ void LedgerAcquireMaster::gotLedgerData(Job&, uint256 hash,
|
||||
{
|
||||
if (packet.nodes_size() < 1)
|
||||
{
|
||||
cLog(lsWARNING) << "Got empty base data";
|
||||
WriteLog (lsWARNING, LedgerAcquire) << "Got empty base data";
|
||||
peer->punishPeer(LT_InvalidRequest);
|
||||
return;
|
||||
}
|
||||
if (!ledger->takeBase(packet.nodes(0).nodedata()))
|
||||
{
|
||||
cLog(lsWARNING) << "Got invalid base data";
|
||||
WriteLog (lsWARNING, LedgerAcquire) << "Got invalid base data";
|
||||
peer->punishPeer(LT_InvalidRequest);
|
||||
return;
|
||||
}
|
||||
SMAddNode san = SMAddNode::useful();
|
||||
if ((packet.nodes().size() > 1) && !ledger->takeAsRootNode(strCopy(packet.nodes(1).nodedata()), san))
|
||||
{
|
||||
cLog(lsWARNING) << "Included ASbase invalid";
|
||||
WriteLog (lsWARNING, LedgerAcquire) << "Included ASbase invalid";
|
||||
}
|
||||
if ((packet.nodes().size() > 2) && !ledger->takeTxRootNode(strCopy(packet.nodes(2).nodedata()), san))
|
||||
{
|
||||
cLog(lsWARNING) << "Included TXbase invalid";
|
||||
WriteLog (lsWARNING, LedgerAcquire) << "Included TXbase invalid";
|
||||
}
|
||||
if (!san.isInvalid())
|
||||
{
|
||||
@@ -967,7 +966,7 @@ void LedgerAcquireMaster::gotLedgerData(Job&, uint256 hash,
|
||||
ledger->trigger(peer);
|
||||
}
|
||||
else
|
||||
cLog(lsDEBUG) << "Peer sends invalid base data";
|
||||
WriteLog (lsDEBUG, LedgerAcquire) << "Peer sends invalid base data";
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -978,7 +977,7 @@ void LedgerAcquireMaster::gotLedgerData(Job&, uint256 hash,
|
||||
|
||||
if (packet.nodes().size() <= 0)
|
||||
{
|
||||
cLog(lsINFO) << "Got response with no nodes";
|
||||
WriteLog (lsINFO, LedgerAcquire) << "Got response with no nodes";
|
||||
peer->punishPeer(LT_InvalidRequest);
|
||||
return;
|
||||
}
|
||||
@@ -987,7 +986,7 @@ void LedgerAcquireMaster::gotLedgerData(Job&, uint256 hash,
|
||||
const ripple::TMLedgerNode& node = packet.nodes(i);
|
||||
if (!node.has_nodeid() || !node.has_nodedata())
|
||||
{
|
||||
cLog(lsWARNING) << "Got bad node";
|
||||
WriteLog (lsWARNING, LedgerAcquire) << "Got bad node";
|
||||
peer->punishPeer(LT_InvalidRequest);
|
||||
return;
|
||||
}
|
||||
@@ -1006,11 +1005,11 @@ void LedgerAcquireMaster::gotLedgerData(Job&, uint256 hash,
|
||||
ledger->trigger(peer);
|
||||
}
|
||||
else
|
||||
cLog(lsDEBUG) << "Peer sends invalid node data";
|
||||
WriteLog (lsDEBUG, LedgerAcquire) << "Peer sends invalid node data";
|
||||
return;
|
||||
}
|
||||
|
||||
cLog(lsWARNING) << "Not sure what ledger data we got";
|
||||
WriteLog (lsWARNING, LedgerAcquire) << "Not sure what ledger data we got";
|
||||
peer->punishPeer(LT_InvalidRequest);
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,8 @@
|
||||
typedef std::map<uint160, LedgerProposal::pointer>::value_type u160_prop_pair;
|
||||
typedef std::map<uint256, LCTransaction::pointer>::value_type u256_lct_pair;
|
||||
|
||||
SETUP_LOG();
|
||||
class LedgerConensus;
|
||||
|
||||
DECLARE_INSTANCE(LedgerConsensus);
|
||||
|
||||
void LCTransaction::setVote(const uint160& peer, bool votesYes)
|
||||
@@ -36,25 +37,25 @@ void LCTransaction::setVote(const uint160& peer, bool votesYes)
|
||||
{ // new vote
|
||||
if (votesYes)
|
||||
{
|
||||
cLog(lsDEBUG) << "Peer " << peer << " votes YES on " << mTransactionID;
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Peer " << peer << " votes YES on " << mTransactionID;
|
||||
++mYays;
|
||||
}
|
||||
else
|
||||
{
|
||||
cLog(lsDEBUG) << "Peer " << peer << " votes NO on " << mTransactionID;
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Peer " << peer << " votes NO on " << mTransactionID;
|
||||
++mNays;
|
||||
}
|
||||
}
|
||||
else if (votesYes && !res.first->second)
|
||||
{ // changes vote to yes
|
||||
cLog(lsDEBUG) << "Peer " << peer << " now votes YES on " << mTransactionID;
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Peer " << peer << " now votes YES on " << mTransactionID;
|
||||
--mNays;
|
||||
++mYays;
|
||||
res.first->second = true;
|
||||
}
|
||||
else if (!votesYes && res.first->second)
|
||||
{ // changes vote to no
|
||||
cLog(lsDEBUG) << "Peer " << peer << " now votes NO on " << mTransactionID;
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Peer " << peer << " now votes NO on " << mTransactionID;
|
||||
++mNays;
|
||||
--mYays;
|
||||
res.first->second = false;
|
||||
@@ -106,15 +107,15 @@ bool LCTransaction::updateVote(int percentTime, bool proposing)
|
||||
|
||||
if (newPosition == mOurVote)
|
||||
{
|
||||
cLog(lsINFO) <<
|
||||
WriteLog (lsINFO, LedgerConensus) <<
|
||||
"No change (" << (mOurVote ? "YES" : "NO") << ") : weight " << weight << ", percent " << percentTime;
|
||||
cLog(lsDEBUG) << getJson();
|
||||
WriteLog (lsDEBUG, LedgerConensus) << getJson();
|
||||
return false;
|
||||
}
|
||||
|
||||
mOurVote = newPosition;
|
||||
cLog(lsDEBUG) << "We now vote " << (mOurVote ? "YES" : "NO") << " on " << mTransactionID;
|
||||
cLog(lsDEBUG) << getJson();
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "We now vote " << (mOurVote ? "YES" : "NO") << " on " << mTransactionID;
|
||||
WriteLog (lsDEBUG, LedgerConensus) << getJson();
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -144,8 +145,8 @@ LedgerConsensus::LedgerConsensus(const uint256& prevLCLHash, Ledger::ref previou
|
||||
mCurrentMSeconds(0), mClosePercent(0), mHaveCloseTimeConsensus(false),
|
||||
mConsensusStartTime(boost::posix_time::microsec_clock::universal_time())
|
||||
{
|
||||
cLog(lsDEBUG) << "Creating consensus object";
|
||||
cLog(lsTRACE) << "LCL:" << previousLedger->getHash() <<", ct=" << closeTime;
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Creating consensus object";
|
||||
WriteLog (lsTRACE, LedgerConensus) << "LCL:" << previousLedger->getHash() <<", ct=" << closeTime;
|
||||
mPreviousProposers = theApp->getOPs().getPreviousProposers();
|
||||
mPreviousMSeconds = theApp->getOPs().getPreviousConvergeTime();
|
||||
assert(mPreviousMSeconds);
|
||||
@@ -155,13 +156,13 @@ LedgerConsensus::LedgerConsensus(const uint256& prevLCLHash, Ledger::ref previou
|
||||
|
||||
if (mValPublic.isSet() && mValPrivate.isSet() && !theApp->getOPs().isNeedNetworkLedger())
|
||||
{
|
||||
cLog(lsINFO) << "Entering consensus process, validating";
|
||||
WriteLog (lsINFO, LedgerConensus) << "Entering consensus process, validating";
|
||||
mValidating = true;
|
||||
mProposing = theApp->getOPs().getOperatingMode() == NetworkOPs::omFULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
cLog(lsINFO) << "Entering consensus process, watching";
|
||||
WriteLog (lsINFO, LedgerConensus) << "Entering consensus process, watching";
|
||||
mProposing = mValidating = false;
|
||||
}
|
||||
|
||||
@@ -173,8 +174,8 @@ LedgerConsensus::LedgerConsensus(const uint256& prevLCLHash, Ledger::ref previou
|
||||
if (!mHaveCorrectLCL)
|
||||
{
|
||||
// mProposing = mValidating = false;
|
||||
cLog(lsINFO) << "Entering consensus with: " << previousLedger->getHash();
|
||||
cLog(lsINFO) << "Correct LCL is: " << prevLCLHash;
|
||||
WriteLog (lsINFO, LedgerConensus) << "Entering consensus with: " << previousLedger->getHash();
|
||||
WriteLog (lsINFO, LedgerConensus) << "Correct LCL is: " << prevLCLHash;
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -210,7 +211,7 @@ void LedgerConsensus::checkOurValidation()
|
||||
boost::make_shared<PackedMessage>(val, ripple::mtVALIDATION));
|
||||
#endif
|
||||
theApp->getOPs().setLastValidation(v);
|
||||
cLog(lsWARNING) << "Sending partial validation";
|
||||
WriteLog (lsWARNING, LedgerConensus) << "Sending partial validation";
|
||||
}
|
||||
|
||||
void LedgerConsensus::checkLCL()
|
||||
@@ -246,15 +247,15 @@ void LedgerConsensus::checkLCL()
|
||||
default: status = "unknown";
|
||||
}
|
||||
|
||||
cLog(lsWARNING) << "View of consensus changed during " << status << " (" << netLgrCount << ") status="
|
||||
WriteLog (lsWARNING, LedgerConensus) << "View of consensus changed during " << status << " (" << netLgrCount << ") status="
|
||||
<< status << ", " << (mHaveCorrectLCL ? "CorrectLCL" : "IncorrectLCL");
|
||||
cLog(lsWARNING) << mPrevLedgerHash << " to " << netLgr;
|
||||
cLog(lsWARNING) << mPreviousLedger->getJson(0);
|
||||
WriteLog (lsWARNING, LedgerConensus) << mPrevLedgerHash << " to " << netLgr;
|
||||
WriteLog (lsWARNING, LedgerConensus) << mPreviousLedger->getJson(0);
|
||||
|
||||
if (sLog(lsDEBUG))
|
||||
if (ShouldLog (lsDEBUG, LedgerConensus))
|
||||
{
|
||||
BOOST_FOREACH(u256_cvc_pair& it, vals)
|
||||
cLog(lsDEBUG) << "V: " << it.first << ", " << it.second.first;
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "V: " << it.first << ", " << it.second.first;
|
||||
}
|
||||
|
||||
if (mHaveCorrectLCL)
|
||||
@@ -274,7 +275,7 @@ void LedgerConsensus::handleLCL(const uint256& lclHash)
|
||||
|
||||
if (mHaveCorrectLCL && mProposing && mOurPosition)
|
||||
{
|
||||
cLog(lsINFO) << "Bowing out of consensus";
|
||||
WriteLog (lsINFO, LedgerConensus) << "Bowing out of consensus";
|
||||
mOurPosition->bowOut();
|
||||
propose();
|
||||
}
|
||||
@@ -302,7 +303,7 @@ void LedgerConsensus::handleLCL(const uint256& lclHash)
|
||||
}
|
||||
else if (!mAcquiringLedger || (mAcquiringLedger->getHash() != mPrevLedgerHash))
|
||||
{ // need to start acquiring the correct consensus LCL
|
||||
cLog(lsWARNING) << "Need consensus ledger " << mPrevLedgerHash;
|
||||
WriteLog (lsWARNING, LedgerConensus) << "Need consensus ledger " << mPrevLedgerHash;
|
||||
if (mAcquiringLedger)
|
||||
theApp->getMasterLedgerAcquire().dropLedger(mAcquiringLedger->getHash());
|
||||
mAcquiringLedger = theApp->getMasterLedgerAcquire().findCreate(mPrevLedgerHash, 0);
|
||||
@@ -310,7 +311,7 @@ void LedgerConsensus::handleLCL(const uint256& lclHash)
|
||||
return;
|
||||
}
|
||||
|
||||
cLog(lsINFO) << "Have the consensus ledger " << mPrevLedgerHash;
|
||||
WriteLog (lsINFO, LedgerConensus) << "Have the consensus ledger " << mPrevLedgerHash;
|
||||
mHaveCorrectLCL = true;
|
||||
|
||||
mCloseResolution = ContinuousLedgerTiming::getNextLedgerTimeResolution(
|
||||
@@ -333,7 +334,7 @@ void LedgerConsensus::takeInitialPosition(Ledger& initialLedger)
|
||||
else
|
||||
initialSet = initialLedger.peekTransactionMap()->snapShot(false);
|
||||
uint256 txSet = initialSet->getHash();
|
||||
cLog(lsINFO) << "initial position " << txSet;
|
||||
WriteLog (lsINFO, LedgerConensus) << "initial position " << txSet;
|
||||
mapComplete(txSet, initialSet, false);
|
||||
|
||||
if (mValidating)
|
||||
@@ -401,13 +402,13 @@ void LedgerConsensus::createDisputes(SHAMap::ref m1, SHAMap::ref m2)
|
||||
|
||||
void LedgerConsensus::mapComplete(const uint256& hash, SHAMap::ref map, bool acquired)
|
||||
{
|
||||
tLog(acquired, lsINFO) << "We have acquired TXS " << hash;
|
||||
CondLog (acquired, lsINFO, LedgerConensus) << "We have acquired TXS " << hash;
|
||||
|
||||
if (!map)
|
||||
{ // this is an invalid/corrupt map
|
||||
mAcquired[hash] = map;
|
||||
mAcquiring.erase(hash);
|
||||
cLog(lsWARNING) << "A trusted node directed us to acquire an invalid TXN map";
|
||||
WriteLog (lsWARNING, LedgerConensus) << "A trusted node directed us to acquire an invalid TXN map";
|
||||
return;
|
||||
}
|
||||
assert(hash == map->getHash());
|
||||
@@ -450,7 +451,7 @@ void LedgerConsensus::mapComplete(const uint256& hash, SHAMap::ref map, bool acq
|
||||
adjustCount(map, peers);
|
||||
else
|
||||
{
|
||||
tLog(acquired, lsWARNING) << "By the time we got the map " << hash << " no peers were proposing it";
|
||||
CondLog (acquired, lsWARNING, LedgerConensus) << "By the time we got the map " << hash << " no peers were proposing it";
|
||||
}
|
||||
|
||||
sendHaveTxSet(hash, true);
|
||||
@@ -496,7 +497,7 @@ void LedgerConsensus::statusChange(ripple::NodeEvent event, Ledger& ledger)
|
||||
|
||||
PackedMessage::pointer packet = boost::make_shared<PackedMessage>(s, ripple::mtSTATUS_CHANGE);
|
||||
theApp->getConnectionPool().relayMessage(NULL, packet);
|
||||
cLog(lsTRACE) << "send status change to peer";
|
||||
WriteLog (lsTRACE, LedgerConensus) << "send status change to peer";
|
||||
}
|
||||
|
||||
int LedgerConsensus::startup()
|
||||
@@ -552,11 +553,11 @@ void LedgerConsensus::stateEstablish()
|
||||
updateOurPositions();
|
||||
if (!mHaveCloseTimeConsensus)
|
||||
{
|
||||
tLog(haveConsensus(false), lsINFO) << "We have TX consensus but not CT consensus";
|
||||
CondLog (haveConsensus(false), lsINFO, LedgerConensus) << "We have TX consensus but not CT consensus";
|
||||
}
|
||||
else if (haveConsensus(true))
|
||||
{
|
||||
cLog(lsINFO) << "Converge cutoff (" << mPeerPositions.size() << " participants)";
|
||||
WriteLog (lsINFO, LedgerConensus) << "Converge cutoff (" << mPeerPositions.size() << " participants)";
|
||||
mState = lcsFINISHED;
|
||||
beginAccept(false);
|
||||
}
|
||||
@@ -579,7 +580,7 @@ void LedgerConsensus::timerEntry()
|
||||
{
|
||||
if (doShutdown)
|
||||
{
|
||||
cLog(lsFATAL) << "Shutdown requested";
|
||||
WriteLog (lsFATAL, LedgerConensus) << "Shutdown requested";
|
||||
theApp->stop();
|
||||
}
|
||||
|
||||
@@ -618,7 +619,7 @@ void LedgerConsensus::updateOurPositions()
|
||||
if (it->second->isStale(peerCutoff))
|
||||
{ // proposal is stale
|
||||
uint160 peerID = it->second->getPeerID();
|
||||
cLog(lsWARNING) << "Removing stale proposal from " << peerID;
|
||||
WriteLog (lsWARNING, LedgerConensus) << "Removing stale proposal from " << peerID;
|
||||
BOOST_FOREACH(u256_lct_pair& it, mDisputes)
|
||||
it.second->unVote(peerID);
|
||||
it = mPeerPositions.erase(it);
|
||||
@@ -690,23 +691,23 @@ void LedgerConsensus::updateOurPositions()
|
||||
threshVote = 1;
|
||||
if (threshConsensus == 0)
|
||||
threshConsensus = 1;
|
||||
cLog(lsINFO) << "Proposers:" << mPeerPositions.size() << " nw:" << neededWeight
|
||||
WriteLog (lsINFO, LedgerConensus) << "Proposers:" << mPeerPositions.size() << " nw:" << neededWeight
|
||||
<< " thrV:" << threshVote << " thrC:" << threshConsensus;
|
||||
|
||||
for (std::map<uint32, int>::iterator it = closeTimes.begin(), end = closeTimes.end(); it != end; ++it)
|
||||
{
|
||||
cLog(lsDEBUG) << "CCTime: seq" << mPreviousLedger->getLedgerSeq() + 1 << ": " <<
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "CCTime: seq" << mPreviousLedger->getLedgerSeq() + 1 << ": " <<
|
||||
it->first << " has " << it->second << ", " << threshVote << " required";
|
||||
if (it->second >= threshVote)
|
||||
{
|
||||
cLog(lsDEBUG) << "Close time consensus reached: " << it->first;
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Close time consensus reached: " << it->first;
|
||||
closeTime = it->first;
|
||||
threshVote = it->second;
|
||||
if (threshVote >= threshConsensus)
|
||||
mHaveCloseTimeConsensus = true;
|
||||
}
|
||||
}
|
||||
tLog(!mHaveCloseTimeConsensus, lsDEBUG) << "No CT consensus: Proposers:" << mPeerPositions.size()
|
||||
CondLog (!mHaveCloseTimeConsensus, lsDEBUG, LedgerConensus) << "No CT consensus: Proposers:" << mPeerPositions.size()
|
||||
<< " Proposing:" << (mProposing ? "yes" : "no") << " Thresh:" << threshConsensus << " Pos:" << closeTime;
|
||||
}
|
||||
|
||||
@@ -722,7 +723,7 @@ void LedgerConsensus::updateOurPositions()
|
||||
if (changes)
|
||||
{
|
||||
uint256 newHash = ourPosition->getHash();
|
||||
cLog(lsINFO) << "Position change: CTime " << closeTime << ", tx " << newHash;
|
||||
WriteLog (lsINFO, LedgerConensus) << "Position change: CTime " << closeTime << ", tx " << newHash;
|
||||
if (mOurPosition->changePosition(newHash, closeTime))
|
||||
{
|
||||
if (mProposing)
|
||||
@@ -744,14 +745,14 @@ bool LedgerConsensus::haveConsensus(bool forReal)
|
||||
++agree;
|
||||
else
|
||||
{
|
||||
cLog(lsDEBUG) << it.first.GetHex() << " has " << it.second->getCurrentHash().GetHex();
|
||||
WriteLog (lsDEBUG, LedgerConensus) << it.first.GetHex() << " has " << it.second->getCurrentHash().GetHex();
|
||||
++disagree;
|
||||
}
|
||||
}
|
||||
}
|
||||
int currentValidations = theApp->getValidations().getNodesAfter(mPrevLedgerHash);
|
||||
|
||||
cLog(lsDEBUG) << "Checking for TX consensus: agree=" << agree << ", disagree=" << disagree;
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Checking for TX consensus: agree=" << agree << ", disagree=" << disagree;
|
||||
|
||||
return ContinuousLedgerTiming::haveConsensus(mPreviousProposers, agree + disagree, agree, currentValidations,
|
||||
mPreviousMSeconds, mCurrentMSeconds, forReal, mConsensusFail);
|
||||
@@ -826,7 +827,7 @@ void LedgerConsensus::startAcquiring(TransactionAcquire::pointer acquire)
|
||||
|
||||
void LedgerConsensus::propose()
|
||||
{
|
||||
cLog(lsTRACE) << "We propose: " <<
|
||||
WriteLog (lsTRACE, LedgerConensus) << "We propose: " <<
|
||||
(mOurPosition->isBowOut() ? std::string("bowOut") : mOurPosition->getCurrentHash().GetHex());
|
||||
ripple::TMProposeSet prop;
|
||||
|
||||
@@ -847,7 +848,7 @@ void LedgerConsensus::addDisputedTransaction(const uint256& txID, const std::vec
|
||||
{
|
||||
if (mDisputes.find(txID) != mDisputes.end())
|
||||
return;
|
||||
cLog(lsDEBUG) << "Transaction " << txID << " is disputed";
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Transaction " << txID << " is disputed";
|
||||
|
||||
bool ourVote = false;
|
||||
if (mOurPosition)
|
||||
@@ -886,7 +887,7 @@ bool LedgerConsensus::peerPosition(LedgerProposal::ref newPosition)
|
||||
uint160 peerID = newPosition->getPeerID();
|
||||
if (mDeadNodes.find(peerID) != mDeadNodes.end())
|
||||
{
|
||||
cLog(lsINFO) << "Position from dead node: " << peerID.GetHex();
|
||||
WriteLog (lsINFO, LedgerConensus) << "Position from dead node: " << peerID.GetHex();
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -901,12 +902,12 @@ bool LedgerConsensus::peerPosition(LedgerProposal::ref newPosition)
|
||||
|
||||
if (newPosition->getProposeSeq() == 0)
|
||||
{ // new initial close time estimate
|
||||
cLog(lsTRACE) << "Peer reports close time as " << newPosition->getCloseTime();
|
||||
WriteLog (lsTRACE, LedgerConensus) << "Peer reports close time as " << newPosition->getCloseTime();
|
||||
++mCloseTimes[newPosition->getCloseTime()];
|
||||
}
|
||||
else if (newPosition->getProposeSeq() == LedgerProposal::seqLeave)
|
||||
{ // peer bows out
|
||||
cLog(lsINFO) << "Peer bows out: " << peerID.GetHex();
|
||||
WriteLog (lsINFO, LedgerConensus) << "Peer bows out: " << peerID.GetHex();
|
||||
BOOST_FOREACH(u256_lct_pair& it, mDisputes)
|
||||
it.second->unVote(peerID);
|
||||
mPeerPositions.erase(peerID);
|
||||
@@ -915,7 +916,7 @@ bool LedgerConsensus::peerPosition(LedgerProposal::ref newPosition)
|
||||
}
|
||||
|
||||
|
||||
cLog(lsTRACE) << "Processing peer proposal "
|
||||
WriteLog (lsTRACE, LedgerConensus) << "Processing peer proposal "
|
||||
<< newPosition->getProposeSeq() << "/" << newPosition->getCurrentHash();
|
||||
currentPosition = newPosition;
|
||||
|
||||
@@ -927,7 +928,7 @@ bool LedgerConsensus::peerPosition(LedgerProposal::ref newPosition)
|
||||
}
|
||||
else
|
||||
{
|
||||
cLog(lsDEBUG) << "Don't have tx set for peer";
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Don't have tx set for peer";
|
||||
// BOOST_FOREACH(u256_lct_pair& it, mDisputes)
|
||||
// it.second->unVote(peerID);
|
||||
}
|
||||
@@ -961,7 +962,7 @@ SMAddNode LedgerConsensus::peerGaveNodes(Peer::ref peer, const uint256& setHash,
|
||||
boost::unordered_map<uint256, TransactionAcquire::pointer>::iterator acq = mAcquiring.find(setHash);
|
||||
if (acq == mAcquiring.end())
|
||||
{
|
||||
cLog(lsDEBUG) << "Got TX data for set no longer acquiring: " << setHash;
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Got TX data for set no longer acquiring: " << setHash;
|
||||
return SMAddNode();
|
||||
}
|
||||
TransactionAcquire::pointer set = acq->second; // We must keep the set around during the function
|
||||
@@ -973,7 +974,7 @@ void LedgerConsensus::beginAccept(bool synchronous)
|
||||
SHAMap::pointer consensusSet = mAcquired[mOurPosition->getCurrentHash()];
|
||||
if (!consensusSet)
|
||||
{
|
||||
cLog(lsFATAL) << "We don't have a consensus set";
|
||||
WriteLog (lsFATAL, LedgerConensus) << "We don't have a consensus set";
|
||||
abort();
|
||||
return;
|
||||
}
|
||||
@@ -1003,7 +1004,7 @@ void LedgerConsensus::playbackProposals()
|
||||
proposal->setPrevLedger(mPrevLedgerHash);
|
||||
if (proposal->checkSign())
|
||||
{
|
||||
cLog(lsINFO) << "Applying stored proposal";
|
||||
WriteLog (lsINFO, LedgerConensus) << "Applying stored proposal";
|
||||
relay = peerPosition(proposal);
|
||||
}
|
||||
}
|
||||
@@ -1012,13 +1013,13 @@ void LedgerConsensus::playbackProposals()
|
||||
|
||||
if (relay)
|
||||
{
|
||||
cLog(lsWARNING) << "We should do delayed relay of this proposal, but we cannot";
|
||||
WriteLog (lsWARNING, LedgerConensus) << "We should do delayed relay of this proposal, but we cannot";
|
||||
}
|
||||
#if 0 // FIXME: We can't do delayed relay because we don't have the signature
|
||||
std::set<uint64> peers
|
||||
if (relay && theApp->getSuppression().swapSet(proposal.getSuppress(), set, SF_RELAYED))
|
||||
{
|
||||
cLog(lsDEBUG) << "Stored proposal delayed relay";
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Stored proposal delayed relay";
|
||||
ripple::TMProposeSet set;
|
||||
set.set_proposeseq
|
||||
set.set_currenttxhash(, 256 / 8);
|
||||
@@ -1047,10 +1048,10 @@ int LedgerConsensus::applyTransaction(TransactionEngine& engine, SerializedTrans
|
||||
if (theApp->isNewFlag(txn->getTransactionID(), SF_SIGGOOD))
|
||||
parms = static_cast<TransactionEngineParams>(parms | tapNO_CHECK_SIGN);
|
||||
|
||||
cLog(lsDEBUG) << "TXN " << txn->getTransactionID()
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "TXN " << txn->getTransactionID()
|
||||
<< (openLedger ? " open" : " closed")
|
||||
<< (retryAssured ? "/retry" : "/final");
|
||||
cLog(lsTRACE) << txn->getJson(0);
|
||||
WriteLog (lsTRACE, LedgerConensus) << txn->getJson(0);
|
||||
|
||||
#ifndef TRUST_NETWORK
|
||||
try
|
||||
@@ -1061,17 +1062,17 @@ int LedgerConsensus::applyTransaction(TransactionEngine& engine, SerializedTrans
|
||||
TER result = engine.applyTransaction(*txn, parms, didApply);
|
||||
if (didApply)
|
||||
{
|
||||
cLog(lsDEBUG) << "Transaction success: " << transHuman(result);
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Transaction success: " << transHuman(result);
|
||||
return LCAT_SUCCESS;
|
||||
}
|
||||
|
||||
if (isTefFailure(result) || isTemMalformed(result) || isTelLocal(result))
|
||||
{ // failure
|
||||
cLog(lsDEBUG) << "Transaction failure: " << transHuman(result);
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Transaction failure: " << transHuman(result);
|
||||
return LCAT_FAIL;
|
||||
}
|
||||
|
||||
cLog(lsDEBUG) << "Transaction retry: " << transHuman(result);
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Transaction retry: " << transHuman(result);
|
||||
assert(!ledger->hasTransaction(txn->getTransactionID()));
|
||||
return LCAT_RETRY;
|
||||
|
||||
@@ -1079,7 +1080,7 @@ int LedgerConsensus::applyTransaction(TransactionEngine& engine, SerializedTrans
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
cLog(lsWARNING) << "Throws";
|
||||
WriteLog (lsWARNING, LedgerConensus) << "Throws";
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
@@ -1093,7 +1094,7 @@ void LedgerConsensus::applyTransactions(SHAMap::ref set, Ledger::ref applyLedger
|
||||
for (SHAMapItem::pointer item = set->peekFirstItem(); !!item; item = set->peekNextItem(item->getTag()))
|
||||
if (!checkLedger->hasTransaction(item->getTag()))
|
||||
{
|
||||
cLog(lsINFO) << "Processing candidate transaction: " << item->getTag();
|
||||
WriteLog (lsINFO, LedgerConensus) << "Processing candidate transaction: " << item->getTag();
|
||||
#ifndef TRUST_NETWORK
|
||||
try
|
||||
{
|
||||
@@ -1106,7 +1107,7 @@ void LedgerConsensus::applyTransactions(SHAMap::ref set, Ledger::ref applyLedger
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
cLog(lsWARNING) << " Throws";
|
||||
WriteLog (lsWARNING, LedgerConensus) << " Throws";
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@@ -1116,7 +1117,7 @@ void LedgerConsensus::applyTransactions(SHAMap::ref set, Ledger::ref applyLedger
|
||||
|
||||
for (int pass = 0; pass < LEDGER_TOTAL_PASSES; ++pass)
|
||||
{
|
||||
cLog(lsDEBUG) << "Pass: " << pass << " Txns: " << failedTransactions.size()
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Pass: " << pass << " Txns: " << failedTransactions.size()
|
||||
<< (certainRetry ? " retriable" : " final");
|
||||
changes = 0;
|
||||
|
||||
@@ -1142,11 +1143,11 @@ void LedgerConsensus::applyTransactions(SHAMap::ref set, Ledger::ref applyLedger
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
cLog(lsWARNING) << "Transaction throws";
|
||||
WriteLog (lsWARNING, LedgerConensus) << "Transaction throws";
|
||||
it = failedTransactions.erase(it);
|
||||
}
|
||||
}
|
||||
cLog(lsDEBUG) << "Pass: " << pass << " finished " << changes << " changes";
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Pass: " << pass << " finished " << changes << " changes";
|
||||
|
||||
// A non-retry pass made no changes
|
||||
if (!changes && !certainRetry)
|
||||
@@ -1179,10 +1180,10 @@ void LedgerConsensus::accept(SHAMap::ref set, LoadEvent::pointer)
|
||||
closeTime = mPreviousLedger->getCloseTimeNC() + 1;
|
||||
}
|
||||
|
||||
cLog(lsDEBUG) << "Report: Prop=" << (mProposing ? "yes" : "no") << " val=" << (mValidating ? "yes" : "no") <<
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Report: Prop=" << (mProposing ? "yes" : "no") << " val=" << (mValidating ? "yes" : "no") <<
|
||||
" corLCL=" << (mHaveCorrectLCL ? "yes" : "no") << " fail="<< (mConsensusFail ? "yes" : "no");
|
||||
cLog(lsDEBUG) << "Report: Prev = " << mPrevLedgerHash << ":" << mPreviousLedger->getLedgerSeq();
|
||||
cLog(lsDEBUG) << "Report: TxSt = " << set->getHash() << ", close " << closeTime << (closeTimeCorrect ? "" : "X");
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Report: Prev = " << mPrevLedgerHash << ":" << mPreviousLedger->getLedgerSeq();
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Report: TxSt = " << set->getHash() << ", close " << closeTime << (closeTimeCorrect ? "" : "X");
|
||||
|
||||
CanonicalTXSet failedTransactions(set->getHash());
|
||||
|
||||
@@ -1190,7 +1191,7 @@ void LedgerConsensus::accept(SHAMap::ref set, LoadEvent::pointer)
|
||||
|
||||
newLCL->peekTransactionMap()->armDirty();
|
||||
newLCL->peekAccountStateMap()->armDirty();
|
||||
cLog(lsDEBUG) << "Applying consensus set transactions to the last closed ledger";
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Applying consensus set transactions to the last closed ledger";
|
||||
applyTransactions(set, newLCL, newLCL, failedTransactions, false);
|
||||
newLCL->updateSkipList();
|
||||
newLCL->setClosed();
|
||||
@@ -1200,23 +1201,23 @@ void LedgerConsensus::accept(SHAMap::ref set, LoadEvent::pointer)
|
||||
// write out dirty nodes (temporarily done here) Most come before setAccepted
|
||||
int fc;
|
||||
while ((fc = SHAMap::flushDirty(*acctNodes, 256, hotACCOUNT_NODE, newLCL->getLedgerSeq())) > 0)
|
||||
{ cLog(lsTRACE) << "Flushed " << fc << " dirty state nodes"; }
|
||||
{ WriteLog (lsTRACE, LedgerConensus) << "Flushed " << fc << " dirty state nodes"; }
|
||||
while ((fc = SHAMap::flushDirty(*txnNodes, 256, hotTRANSACTION_NODE, newLCL->getLedgerSeq())) > 0)
|
||||
{ cLog(lsTRACE) << "Flushed " << fc << " dirty transaction nodes"; }
|
||||
{ WriteLog (lsTRACE, LedgerConensus) << "Flushed " << fc << " dirty transaction nodes"; }
|
||||
|
||||
newLCL->setAccepted(closeTime, mCloseResolution, closeTimeCorrect);
|
||||
newLCL->updateHash();
|
||||
newLCL->setImmutable();
|
||||
|
||||
cLog(lsDEBUG) << "Report: NewL = " << newLCL->getHash() << ":" << newLCL->getLedgerSeq();
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Report: NewL = " << newLCL->getHash() << ":" << newLCL->getLedgerSeq();
|
||||
uint256 newLCLHash = newLCL->getHash();
|
||||
|
||||
if (sLog(lsTRACE))
|
||||
if (ShouldLog (lsTRACE, LedgerConensus))
|
||||
{
|
||||
cLog(lsTRACE) << "newLCL";
|
||||
WriteLog (lsTRACE, LedgerConensus) << "newLCL";
|
||||
Json::Value p;
|
||||
newLCL->addJson(p, LEDGER_JSON_DUMP_TXRP | LEDGER_JSON_DUMP_STATE);
|
||||
cLog(lsTRACE) << p;
|
||||
WriteLog (lsTRACE, LedgerConensus) << p;
|
||||
}
|
||||
|
||||
statusChange(ripple::neACCEPTED_LEDGER, *newLCL);
|
||||
@@ -1241,10 +1242,10 @@ void LedgerConsensus::accept(SHAMap::ref set, LoadEvent::pointer)
|
||||
val.set_validation(&validation[0], validation.size());
|
||||
int j = theApp->getConnectionPool().relayMessage(NULL,
|
||||
boost::make_shared<PackedMessage>(val, ripple::mtVALIDATION));
|
||||
cLog(lsINFO) << "CNF Val " << newLCLHash << " to " << j << " peers";
|
||||
WriteLog (lsINFO, LedgerConensus) << "CNF Val " << newLCLHash << " to " << j << " peers";
|
||||
}
|
||||
else
|
||||
cLog(lsINFO) << "CNF newLCL " << newLCLHash;
|
||||
WriteLog (lsINFO, LedgerConensus) << "CNF newLCL " << newLCLHash;
|
||||
|
||||
Ledger::pointer newOL = boost::make_shared<Ledger>(true, boost::ref(*newLCL));
|
||||
ScopedLock sl( theApp->getLedgerMaster().getLock());
|
||||
@@ -1257,7 +1258,7 @@ void LedgerConsensus::accept(SHAMap::ref set, LoadEvent::pointer)
|
||||
{ // we voted NO
|
||||
try
|
||||
{
|
||||
cLog(lsDEBUG) << "Test applying disputed transaction that did not get in";
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Test applying disputed transaction that did not get in";
|
||||
SerializerIterator sit(it.second->peekTransaction());
|
||||
SerializedTransaction::pointer txn = boost::make_shared<SerializedTransaction>(boost::ref(sit));
|
||||
if (applyTransaction(engine, txn, newOL, true, false))
|
||||
@@ -1265,12 +1266,12 @@ void LedgerConsensus::accept(SHAMap::ref set, LoadEvent::pointer)
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
cLog(lsDEBUG) << "Failed to apply transaction we voted NO on";
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Failed to apply transaction we voted NO on";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cLog(lsDEBUG) << "Applying transactions from current open ledger";
|
||||
WriteLog (lsDEBUG, LedgerConensus) << "Applying transactions from current open ledger";
|
||||
applyTransactions(theApp->getLedgerMaster().getCurrentLedger()->peekTransactionMap(), newOL, newLCL,
|
||||
failedTransactions, true);
|
||||
theApp->getLedgerMaster().pushLedger(newLCL, newOL, !mConsensusFail);
|
||||
@@ -1280,12 +1281,12 @@ void LedgerConsensus::accept(SHAMap::ref set, LoadEvent::pointer)
|
||||
|
||||
if (mValidating)
|
||||
{ // see how close our close time is to other node's close time reports
|
||||
cLog(lsINFO) << "We closed at " << boost::lexical_cast<std::string>(mCloseTime);
|
||||
WriteLog (lsINFO, LedgerConensus) << "We closed at " << boost::lexical_cast<std::string>(mCloseTime);
|
||||
uint64 closeTotal = mCloseTime;
|
||||
int closeCount = 1;
|
||||
for (std::map<uint32, int>::iterator it = mCloseTimes.begin(), end = mCloseTimes.end(); it != end; ++it)
|
||||
{ // FIXME: Use median, not average
|
||||
cLog(lsINFO) << boost::lexical_cast<std::string>(it->second) << " time votes for "
|
||||
WriteLog (lsINFO, LedgerConensus) << boost::lexical_cast<std::string>(it->second) << " time votes for "
|
||||
<< boost::lexical_cast<std::string>(it->first);
|
||||
closeCount += it->second;
|
||||
closeTotal += static_cast<uint64>(it->first) * static_cast<uint64>(it->second);
|
||||
@@ -1293,7 +1294,7 @@ void LedgerConsensus::accept(SHAMap::ref set, LoadEvent::pointer)
|
||||
closeTotal += (closeCount / 2);
|
||||
closeTotal /= closeCount;
|
||||
int offset = static_cast<int>(closeTotal) - static_cast<int>(mCloseTime);
|
||||
cLog(lsINFO) << "Our close offset is estimated at " << offset << " (" << closeCount << ")";
|
||||
WriteLog (lsINFO, LedgerConensus) << "Our close offset is estimated at " << offset << " (" << closeCount << ")";
|
||||
theApp->getOPs().closeTimeOffset(offset);
|
||||
}
|
||||
|
||||
@@ -1306,12 +1307,12 @@ void LedgerConsensus::endConsensus()
|
||||
|
||||
void LedgerConsensus::simulate()
|
||||
{
|
||||
cLog(lsINFO) << "Simulating consensus";
|
||||
WriteLog (lsINFO, LedgerConensus) << "Simulating consensus";
|
||||
closeLedger();
|
||||
mCurrentMSeconds = 100;
|
||||
beginAccept(true);
|
||||
endConsensus();
|
||||
cLog(lsINFO) << "Simulation complete";
|
||||
WriteLog (lsINFO, LedgerConensus) << "Simulation complete";
|
||||
}
|
||||
|
||||
Json::Value LedgerConsensus::getJson(bool full)
|
||||
|
||||
@@ -7,8 +7,6 @@
|
||||
|
||||
#include "Log.h"
|
||||
|
||||
SETUP_LOG();
|
||||
|
||||
DECLARE_INSTANCE(LedgerEntrySetEntry);
|
||||
DECLARE_INSTANCE(LedgerEntrySet)
|
||||
|
||||
@@ -151,7 +149,7 @@ void LedgerEntrySet::entryCreate(SLE::ref sle)
|
||||
{
|
||||
|
||||
case taaDELETE:
|
||||
cLog(lsDEBUG) << "Create after Delete = Modify";
|
||||
WriteLog (lsDEBUG, LedgerEntrySet) << "Create after Delete = Modify";
|
||||
it->second.mEntry = sle;
|
||||
it->second.mAction = taaMODIFY;
|
||||
it->second.mSeq = mSeq;
|
||||
@@ -303,7 +301,7 @@ SLE::pointer LedgerEntrySet::getForMod(const uint256& node, Ledger::ref ledger,
|
||||
{
|
||||
if (it->second.mAction == taaDELETE)
|
||||
{
|
||||
cLog(lsFATAL) << "Trying to thread to deleted node";
|
||||
WriteLog (lsFATAL, LedgerEntrySet) << "Trying to thread to deleted node";
|
||||
return SLE::pointer();
|
||||
}
|
||||
if (it->second.mAction == taaCACHED)
|
||||
@@ -334,12 +332,12 @@ bool LedgerEntrySet::threadTx(const RippleAddress& threadTo, Ledger::ref ledger,
|
||||
boost::unordered_map<uint256, SLE::pointer>& newMods)
|
||||
{
|
||||
#ifdef META_DEBUG
|
||||
cLog(lsTRACE) << "Thread to " << threadTo.getAccountID();
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << "Thread to " << threadTo.getAccountID();
|
||||
#endif
|
||||
SLE::pointer sle = getForMod(Ledger::getAccountRootIndex(threadTo.getAccountID()), ledger, newMods);
|
||||
if (!sle)
|
||||
{
|
||||
cLog(lsFATAL) << "Threading to non-existent account: " << threadTo.humanAccountID();
|
||||
WriteLog (lsFATAL, LedgerEntrySet) << "Threading to non-existent account: " << threadTo.humanAccountID();
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
@@ -369,14 +367,14 @@ bool LedgerEntrySet::threadOwners(SLE::ref node, Ledger::ref ledger,
|
||||
if (node->hasOneOwner()) // thread to owner's account
|
||||
{
|
||||
#ifdef META_DEBUG
|
||||
cLog(lsTRACE) << "Thread to single owner";
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << "Thread to single owner";
|
||||
#endif
|
||||
return threadTx(node->getOwner(), ledger, newMods);
|
||||
}
|
||||
else if (node->hasTwoOwners()) // thread to owner's accounts
|
||||
{
|
||||
#ifdef META_DEBUG
|
||||
cLog(lsTRACE) << "Thread to two owners";
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << "Thread to two owners";
|
||||
#endif
|
||||
return
|
||||
threadTx(node->getFirstOwner(), ledger, newMods) &&
|
||||
@@ -401,21 +399,21 @@ void LedgerEntrySet::calcRawMeta(Serializer& s, TER result, uint32 index)
|
||||
{
|
||||
case taaMODIFY:
|
||||
#ifdef META_DEBUG
|
||||
cLog(lsTRACE) << "Modified Node " << it.first;
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << "Modified Node " << it.first;
|
||||
#endif
|
||||
type = &sfModifiedNode;
|
||||
break;
|
||||
|
||||
case taaDELETE:
|
||||
#ifdef META_DEBUG
|
||||
cLog(lsTRACE) << "Deleted Node " << it.first;
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << "Deleted Node " << it.first;
|
||||
#endif
|
||||
type = &sfDeletedNode;
|
||||
break;
|
||||
|
||||
case taaCREATE:
|
||||
#ifdef META_DEBUG
|
||||
cLog(lsTRACE) << "Created Node " << it.first;
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << "Created Node " << it.first;
|
||||
#endif
|
||||
type = &sfCreatedNode;
|
||||
break;
|
||||
@@ -510,7 +508,7 @@ void LedgerEntrySet::calcRawMeta(Serializer& s, TER result, uint32 index)
|
||||
entryModify(it.second);
|
||||
|
||||
mSet.addRaw(s, result, index);
|
||||
cLog(lsTRACE) << "Metadata:" << mSet.getJson(0);
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << "Metadata:" << mSet.getJson(0);
|
||||
}
|
||||
|
||||
TER LedgerEntrySet::dirCount(const uint256& uRootIndex, uint32& uCount)
|
||||
@@ -531,7 +529,7 @@ TER LedgerEntrySet::dirCount(const uint256& uRootIndex, uint32& uCount)
|
||||
}
|
||||
else if (uNodeDir)
|
||||
{
|
||||
cLog(lsWARNING) << "dirCount: no such node";
|
||||
WriteLog (lsWARNING, LedgerEntrySet) << "dirCount: no such node";
|
||||
|
||||
assert(false);
|
||||
|
||||
@@ -553,7 +551,7 @@ TER LedgerEntrySet::dirAdd(
|
||||
const uint256& uLedgerIndex,
|
||||
FUNCTION_TYPE<void (SLE::ref)> fDescriber)
|
||||
{
|
||||
cLog(lsTRACE) << boost::str(boost::format("dirAdd: uRootIndex=%s uLedgerIndex=%s")
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << boost::str(boost::format("dirAdd: uRootIndex=%s uLedgerIndex=%s")
|
||||
% uRootIndex.ToString()
|
||||
% uLedgerIndex.ToString());
|
||||
|
||||
@@ -637,10 +635,10 @@ TER LedgerEntrySet::dirAdd(
|
||||
svIndexes.peekValue().push_back(uLedgerIndex); // Append entry.
|
||||
sleNode->setFieldV256(sfIndexes, svIndexes); // Save entry.
|
||||
|
||||
cLog(lsTRACE) << "dirAdd: creating: root: " << uRootIndex.ToString();
|
||||
cLog(lsTRACE) << "dirAdd: appending: Entry: " << uLedgerIndex.ToString();
|
||||
cLog(lsTRACE) << "dirAdd: appending: Node: " << strHex(uNodeDir);
|
||||
// cLog(lsINFO) << "dirAdd: appending: PREV: " << svIndexes.peekValue()[0].ToString();
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << "dirAdd: creating: root: " << uRootIndex.ToString();
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << "dirAdd: appending: Entry: " << uLedgerIndex.ToString();
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << "dirAdd: appending: Node: " << strHex(uNodeDir);
|
||||
// WriteLog (lsINFO, LedgerEntrySet) << "dirAdd: appending: PREV: " << svIndexes.peekValue()[0].ToString();
|
||||
|
||||
return tesSUCCESS;
|
||||
}
|
||||
@@ -659,7 +657,7 @@ TER LedgerEntrySet::dirDelete(
|
||||
|
||||
if (!sleNode)
|
||||
{
|
||||
cLog(lsWARNING)
|
||||
WriteLog (lsWARNING, LedgerEntrySet)
|
||||
<< boost::str(boost::format("dirDelete: no such node: uRootIndex=%s uNodeDir=%s uLedgerIndex=%s")
|
||||
% uRootIndex.ToString()
|
||||
% strHex(uNodeDir)
|
||||
@@ -694,7 +692,7 @@ TER LedgerEntrySet::dirDelete(
|
||||
{
|
||||
assert(false);
|
||||
|
||||
cLog(lsWARNING) << "dirDelete: no such entry";
|
||||
WriteLog (lsWARNING, LedgerEntrySet) << "dirDelete: no such entry";
|
||||
|
||||
return tefBAD_LEDGER;
|
||||
}
|
||||
@@ -797,14 +795,14 @@ TER LedgerEntrySet::dirDelete(
|
||||
|
||||
if (!slePrevious)
|
||||
{
|
||||
cLog(lsWARNING) << "dirDelete: previous node is missing";
|
||||
WriteLog (lsWARNING, LedgerEntrySet) << "dirDelete: previous node is missing";
|
||||
|
||||
return tefBAD_LEDGER;
|
||||
}
|
||||
|
||||
if (!sleNext)
|
||||
{
|
||||
cLog(lsWARNING) << "dirDelete: next node is missing";
|
||||
WriteLog (lsWARNING, LedgerEntrySet) << "dirDelete: next node is missing";
|
||||
|
||||
return tefBAD_LEDGER;
|
||||
}
|
||||
@@ -898,7 +896,7 @@ bool LedgerEntrySet::dirNext(
|
||||
}
|
||||
|
||||
uEntryIndex = vuiIndexes[uDirEntry++];
|
||||
cLog(lsTRACE) << boost::str(boost::format("dirNext: uDirEntry=%d uEntryIndex=%s") % uDirEntry % uEntryIndex);
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << boost::str(boost::format("dirNext: uDirEntry=%d uEntryIndex=%s") % uDirEntry % uEntryIndex);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -1010,7 +1008,7 @@ STAmount LedgerEntrySet::rippleOwed(const uint160& uToAccountID, const uint160&
|
||||
{
|
||||
saBalance.zero(uCurrencyID, uToAccountID);
|
||||
|
||||
cLog(lsDEBUG) << "rippleOwed: No credit line between "
|
||||
WriteLog (lsDEBUG, LedgerEntrySet) << "rippleOwed: No credit line between "
|
||||
<< RippleAddress::createHumanAccountID(uFromAccountID)
|
||||
<< " and "
|
||||
<< RippleAddress::createHumanAccountID(uToAccountID)
|
||||
@@ -1060,7 +1058,7 @@ uint32 LedgerEntrySet::rippleTransferRate(const uint160& uIssuerID)
|
||||
? sleAccount->getFieldU32(sfTransferRate)
|
||||
: QUALITY_ONE;
|
||||
|
||||
cLog(lsDEBUG) << boost::str(boost::format("rippleTransferRate: uIssuerID=%s account_exists=%d transfer_rate=%f")
|
||||
WriteLog (lsDEBUG, LedgerEntrySet) << boost::str(boost::format("rippleTransferRate: uIssuerID=%s account_exists=%d transfer_rate=%f")
|
||||
% RippleAddress::createHumanAccountID(uIssuerID)
|
||||
% !!sleAccount
|
||||
% (uQuality/1000000000.0));
|
||||
@@ -1107,7 +1105,7 @@ uint32 LedgerEntrySet::rippleQualityIn(const uint160& uToAccountID, const uint16
|
||||
}
|
||||
}
|
||||
|
||||
cLog(lsTRACE) << boost::str(boost::format("rippleQuality: %s uToAccountID=%s uFromAccountID=%s uCurrencyID=%s bLine=%d uQuality=%f")
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << boost::str(boost::format("rippleQuality: %s uToAccountID=%s uFromAccountID=%s uCurrencyID=%s bLine=%d uQuality=%f")
|
||||
% (sfLow == sfLowQualityIn ? "in" : "out")
|
||||
% RippleAddress::createHumanAccountID(uToAccountID)
|
||||
% RippleAddress::createHumanAccountID(uFromAccountID)
|
||||
@@ -1171,7 +1169,7 @@ STAmount LedgerEntrySet::accountHolds(const uint160& uAccountID, const uint160&
|
||||
saAmount = saBalance-uReserve;
|
||||
}
|
||||
|
||||
cLog(lsTRACE) << boost::str(boost::format("accountHolds: uAccountID=%s saAmount=%s saBalance=%s uReserve=%d")
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << boost::str(boost::format("accountHolds: uAccountID=%s saAmount=%s saBalance=%s uReserve=%d")
|
||||
% RippleAddress::createHumanAccountID(uAccountID)
|
||||
% saAmount.getFullText()
|
||||
% saBalance.getFullText()
|
||||
@@ -1181,7 +1179,7 @@ STAmount LedgerEntrySet::accountHolds(const uint160& uAccountID, const uint160&
|
||||
{
|
||||
saAmount = rippleHolds(uAccountID, uCurrencyID, uIssuerID);
|
||||
|
||||
cLog(lsTRACE) << boost::str(boost::format("accountHolds: uAccountID=%s saAmount=%s")
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << boost::str(boost::format("accountHolds: uAccountID=%s saAmount=%s")
|
||||
% RippleAddress::createHumanAccountID(uAccountID)
|
||||
% saAmount.getFullText());
|
||||
}
|
||||
@@ -1203,7 +1201,7 @@ STAmount LedgerEntrySet::accountFunds(const uint160& uAccountID, const STAmount&
|
||||
{
|
||||
saFunds = saDefault;
|
||||
|
||||
cLog(lsTRACE) << boost::str(boost::format("accountFunds: uAccountID=%s saDefault=%s SELF-FUNDED")
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << boost::str(boost::format("accountFunds: uAccountID=%s saDefault=%s SELF-FUNDED")
|
||||
% RippleAddress::createHumanAccountID(uAccountID)
|
||||
% saDefault.getFullText());
|
||||
}
|
||||
@@ -1211,7 +1209,7 @@ STAmount LedgerEntrySet::accountFunds(const uint160& uAccountID, const STAmount&
|
||||
{
|
||||
saFunds = accountHolds(uAccountID, saDefault.getCurrency(), saDefault.getIssuer());
|
||||
|
||||
cLog(lsTRACE) << boost::str(boost::format("accountFunds: uAccountID=%s saDefault=%s saFunds=%s")
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << boost::str(boost::format("accountFunds: uAccountID=%s saDefault=%s saFunds=%s")
|
||||
% RippleAddress::createHumanAccountID(uAccountID)
|
||||
% saDefault.getFullText()
|
||||
% saFunds.getFullText());
|
||||
@@ -1234,7 +1232,7 @@ STAmount LedgerEntrySet::rippleTransferFee(const uint160& uSenderID, const uint1
|
||||
STAmount saTransferTotal = STAmount::multiply(saAmount, saTransitRate, saAmount.getCurrency(), saAmount.getIssuer());
|
||||
STAmount saTransferFee = saTransferTotal-saAmount;
|
||||
|
||||
cLog(lsDEBUG) << boost::str(boost::format("rippleTransferFee: saTransferFee=%s")
|
||||
WriteLog (lsDEBUG, LedgerEntrySet) << boost::str(boost::format("rippleTransferFee: saTransferFee=%s")
|
||||
% saTransferFee.getFullText());
|
||||
|
||||
return saTransferFee;
|
||||
@@ -1321,16 +1319,16 @@ TER LedgerEntrySet::trustDelete(SLE::ref sleRippleState, const uint160& uLowAcco
|
||||
uint64 uHighNode = sleRippleState->getFieldU64(sfHighNode);
|
||||
TER terResult;
|
||||
|
||||
cLog(lsTRACE) << "trustDelete: Deleting ripple line: low";
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << "trustDelete: Deleting ripple line: low";
|
||||
terResult = dirDelete(false, uLowNode, Ledger::getOwnerDirIndex(uLowAccountID), sleRippleState->getIndex(), false, !bLowNode);
|
||||
|
||||
if (tesSUCCESS == terResult)
|
||||
{
|
||||
cLog(lsTRACE) << "trustDelete: Deleting ripple line: high";
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << "trustDelete: Deleting ripple line: high";
|
||||
terResult = dirDelete(false, uHighNode, Ledger::getOwnerDirIndex(uHighAccountID), sleRippleState->getIndex(), false, !bHighNode);
|
||||
}
|
||||
|
||||
cLog(lsTRACE) << "trustDelete: Deleting ripple line: state";
|
||||
WriteLog (lsTRACE, LedgerEntrySet) << "trustDelete: Deleting ripple line: state";
|
||||
entryDelete(sleRippleState);
|
||||
|
||||
return terResult;
|
||||
@@ -1367,7 +1365,7 @@ TER LedgerEntrySet::rippleCredit(const uint160& uSenderID, const uint160& uRecei
|
||||
|
||||
saBalance.setIssuer(ACCOUNT_ONE);
|
||||
|
||||
cLog(lsDEBUG) << boost::str(boost::format("rippleCredit: create line: %s --> %s : %s")
|
||||
WriteLog (lsDEBUG, LedgerEntrySet) << boost::str(boost::format("rippleCredit: create line: %s --> %s : %s")
|
||||
% RippleAddress::createHumanAccountID(uSenderID)
|
||||
% RippleAddress::createHumanAccountID(uReceiverID)
|
||||
% saAmount.getFullText());
|
||||
@@ -1393,7 +1391,7 @@ TER LedgerEntrySet::rippleCredit(const uint160& uSenderID, const uint160& uRecei
|
||||
|
||||
saBalance -= saAmount;
|
||||
|
||||
cLog(lsDEBUG) << boost::str(boost::format("rippleCredit: %s --> %s : before=%s amount=%s after=%s")
|
||||
WriteLog (lsDEBUG, LedgerEntrySet) << boost::str(boost::format("rippleCredit: %s --> %s : before=%s amount=%s after=%s")
|
||||
% RippleAddress::createHumanAccountID(uSenderID)
|
||||
% RippleAddress::createHumanAccountID(uReceiverID)
|
||||
% saBefore.getFullText()
|
||||
@@ -1475,7 +1473,7 @@ TER LedgerEntrySet::rippleSend(const uint160& uSenderID, const uint160& uReceive
|
||||
|
||||
saActual.setIssuer(uIssuerID); // XXX Make sure this done in + above.
|
||||
|
||||
cLog(lsDEBUG) << boost::str(boost::format("rippleSend> %s -- > %s : deliver=%s fee=%s cost=%s")
|
||||
WriteLog (lsDEBUG, LedgerEntrySet) << boost::str(boost::format("rippleSend> %s -- > %s : deliver=%s fee=%s cost=%s")
|
||||
% RippleAddress::createHumanAccountID(uSenderID)
|
||||
% RippleAddress::createHumanAccountID(uReceiverID)
|
||||
% saAmount.getFullText()
|
||||
@@ -1509,7 +1507,7 @@ TER LedgerEntrySet::accountSend(const uint160& uSenderID, const uint160& uReceiv
|
||||
? entryCache(ltACCOUNT_ROOT, Ledger::getAccountRootIndex(uReceiverID))
|
||||
: SLE::pointer();
|
||||
|
||||
cLog(lsDEBUG) << boost::str(boost::format("accountSend> %s (%s) -> %s (%s) : %s")
|
||||
WriteLog (lsDEBUG, LedgerEntrySet) << boost::str(boost::format("accountSend> %s (%s) -> %s (%s) : %s")
|
||||
% RippleAddress::createHumanAccountID(uSenderID)
|
||||
% (sleSender ? (sleSender->getFieldAmount(sfBalance)).getFullText() : "-")
|
||||
% RippleAddress::createHumanAccountID(uReceiverID)
|
||||
@@ -1537,7 +1535,7 @@ TER LedgerEntrySet::accountSend(const uint160& uSenderID, const uint160& uReceiv
|
||||
entryModify(sleReceiver);
|
||||
}
|
||||
|
||||
cLog(lsDEBUG) << boost::str(boost::format("accountSend< %s (%s) -> %s (%s) : %s")
|
||||
WriteLog (lsDEBUG, LedgerEntrySet) << boost::str(boost::format("accountSend< %s (%s) -> %s (%s) : %s")
|
||||
% RippleAddress::createHumanAccountID(uSenderID)
|
||||
% (sleSender ? (sleSender->getFieldAmount(sfBalance)).getFullText() : "-")
|
||||
% RippleAddress::createHumanAccountID(uReceiverID)
|
||||
@@ -1548,7 +1546,7 @@ TER LedgerEntrySet::accountSend(const uint160& uSenderID, const uint160& uReceiv
|
||||
{
|
||||
STAmount saActual;
|
||||
|
||||
cLog(lsDEBUG) << boost::str(boost::format("accountSend: %s -> %s : %s")
|
||||
WriteLog (lsDEBUG, LedgerEntrySet) << boost::str(boost::format("accountSend: %s -> %s : %s")
|
||||
% RippleAddress::createHumanAccountID(uSenderID)
|
||||
% RippleAddress::createHumanAccountID(uReceiverID)
|
||||
% saAmount.getFullText());
|
||||
|
||||
@@ -8,8 +8,6 @@
|
||||
#include "Log.h"
|
||||
#include "PFRequest.h"
|
||||
|
||||
SETUP_LOG();
|
||||
|
||||
#define MIN_VALIDATION_RATIO 150 // 150/256ths of validations of previous ledger
|
||||
#define MAX_LEDGER_GAP 100 // Don't catch up more than 100 ledgers (cannot exceed 256)
|
||||
|
||||
@@ -52,14 +50,14 @@ void LedgerMaster::pushLedger(Ledger::pointer newLedger)
|
||||
{
|
||||
// Caller should already have properly assembled this ledger into "ready-to-close" form --
|
||||
// all candidate transactions must already be applied
|
||||
cLog(lsINFO) << "PushLedger: " << newLedger->getHash();
|
||||
WriteLog (lsINFO, LedgerMaster) << "PushLedger: " << newLedger->getHash();
|
||||
boost::recursive_mutex::scoped_lock ml(mLock);
|
||||
if (!mPubLedger)
|
||||
mPubLedger = newLedger;
|
||||
if (!!mFinalizedLedger)
|
||||
{
|
||||
mFinalizedLedger->setClosed();
|
||||
cLog(lsTRACE) << "Finalizes: " << mFinalizedLedger->getHash();
|
||||
WriteLog (lsTRACE, LedgerMaster) << "Finalizes: " << mFinalizedLedger->getHash();
|
||||
}
|
||||
mFinalizedLedger = mCurrentLedger;
|
||||
mCurrentLedger = newLedger;
|
||||
@@ -77,7 +75,7 @@ void LedgerMaster::pushLedger(Ledger::pointer newLCL, Ledger::pointer newOL, boo
|
||||
assert(newLCL->isClosed());
|
||||
assert(newLCL->isImmutable());
|
||||
mLedgerHistory.addAcceptedLedger(newLCL, fromConsensus);
|
||||
cLog(lsINFO) << "StashAccepted: " << newLCL->getHash();
|
||||
WriteLog (lsINFO, LedgerMaster) << "StashAccepted: " << newLCL->getHash();
|
||||
}
|
||||
|
||||
{
|
||||
@@ -137,10 +135,10 @@ Ledger::pointer LedgerMaster::closeLedger(bool recover)
|
||||
}
|
||||
catch (...)
|
||||
{ // CHECKME: We got a few of these
|
||||
cLog(lsWARNING) << "Held transaction throws";
|
||||
WriteLog (lsWARNING, LedgerMaster) << "Held transaction throws";
|
||||
}
|
||||
}
|
||||
tLog(recovers != 0, lsINFO) << "Recovered " << recovers << " held transactions";
|
||||
CondLog (recovers != 0, lsINFO, LedgerMaster) << "Recovered " << recovers << " held transactions";
|
||||
mHeldTransactions.reset(closingLedger->getHash());
|
||||
}
|
||||
|
||||
@@ -248,7 +246,7 @@ bool LedgerMaster::acquireMissingLedger(Ledger::ref origLedger, const uint256& l
|
||||
Ledger::pointer ledger = mLedgerHistory.getLedgerBySeq(ledgerSeq);
|
||||
if (ledger && (Ledger::getHashByIndex(ledgerSeq) == ledgerHash))
|
||||
{
|
||||
cLog(lsTRACE) << "Ledger hash found in database";
|
||||
WriteLog (lsTRACE, LedgerMaster) << "Ledger hash found in database";
|
||||
theApp->getJobQueue().addJob(jtPUBOLDLEDGER, "LedgerMaster::asyncAccept",
|
||||
BIND_TYPE(&LedgerMaster::asyncAccept, this, ledger));
|
||||
return true;
|
||||
@@ -256,7 +254,7 @@ bool LedgerMaster::acquireMissingLedger(Ledger::ref origLedger, const uint256& l
|
||||
|
||||
if (theApp->getMasterLedgerAcquire().isFailure(ledgerHash))
|
||||
{
|
||||
cLog(lsTRACE) << "Already failed to acquire " << ledgerSeq;
|
||||
WriteLog (lsTRACE, LedgerMaster) << "Already failed to acquire " << ledgerSeq;
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -289,7 +287,7 @@ bool LedgerMaster::acquireMissingLedger(Ledger::ref origLedger, const uint256& l
|
||||
{
|
||||
if (timeoutCount > 2)
|
||||
{
|
||||
cLog(lsDEBUG) << "Not acquiring due to timeouts";
|
||||
WriteLog (lsDEBUG, LedgerMaster) << "Not acquiring due to timeouts";
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -345,7 +343,7 @@ bool LedgerMaster::acquireMissingLedger(Ledger::ref origLedger, const uint256& l
|
||||
target->sendPacket(packet, false);
|
||||
}
|
||||
else
|
||||
cLog(lsTRACE) << "No peer for fetch pack";
|
||||
WriteLog (lsTRACE, LedgerMaster) << "No peer for fetch pack";
|
||||
}
|
||||
}
|
||||
|
||||
@@ -358,7 +356,7 @@ void LedgerMaster::missingAcquireComplete(LedgerAcquire::pointer acq)
|
||||
|
||||
if (acq->isFailed() && (mMissingSeq != 0))
|
||||
{
|
||||
cLog(lsWARNING) << "Acquire failed for " << mMissingSeq;
|
||||
WriteLog (lsWARNING, LedgerMaster) << "Acquire failed for " << mMissingSeq;
|
||||
}
|
||||
|
||||
mMissingLedger.reset();
|
||||
@@ -378,7 +376,7 @@ bool LedgerMaster::shouldAcquire(uint32 currentLedger, uint32 ledgerHistory, uin
|
||||
if (candidateLedger >= currentLedger)
|
||||
ret = true;
|
||||
else ret = (currentLedger - candidateLedger) <= ledgerHistory;
|
||||
cLog(lsTRACE) << "Missing ledger " << candidateLedger << (ret ? " should" : " should NOT") << " be acquired";
|
||||
WriteLog (lsTRACE, LedgerMaster) << "Missing ledger " << candidateLedger << (ret ? " should" : " should NOT") << " be acquired";
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -394,19 +392,19 @@ void LedgerMaster::resumeAcquiring()
|
||||
|
||||
if (mMissingLedger || !theConfig.LEDGER_HISTORY)
|
||||
{
|
||||
tLog(mMissingLedger, lsDEBUG) << "Fetch already in progress, not resuming";
|
||||
CondLog (mMissingLedger, lsDEBUG, LedgerMaster) << "Fetch already in progress, not resuming";
|
||||
return;
|
||||
}
|
||||
|
||||
uint32 prevMissing = mCompleteLedgers.prevMissing(mFinalizedLedger->getLedgerSeq());
|
||||
if (prevMissing == RangeSet::RangeSetAbsent)
|
||||
{
|
||||
cLog(lsDEBUG) << "no prior missing ledger, not resuming";
|
||||
WriteLog (lsDEBUG, LedgerMaster) << "no prior missing ledger, not resuming";
|
||||
return;
|
||||
}
|
||||
if (shouldAcquire(mCurrentLedger->getLedgerSeq(), theConfig.LEDGER_HISTORY, prevMissing))
|
||||
{
|
||||
cLog(lsTRACE) << "Resuming at " << prevMissing;
|
||||
WriteLog (lsTRACE, LedgerMaster) << "Resuming at " << prevMissing;
|
||||
assert(!mCompleteLedgers.hasValue(prevMissing));
|
||||
Ledger::pointer nextLedger = getLedgerBySeq(prevMissing + 1);
|
||||
if (nextLedger)
|
||||
@@ -414,7 +412,7 @@ void LedgerMaster::resumeAcquiring()
|
||||
else
|
||||
{
|
||||
mCompleteLedgers.clearValue(prevMissing);
|
||||
cLog(lsINFO) << "We have a gap at: " << prevMissing + 1;
|
||||
WriteLog (lsINFO, LedgerMaster) << "We have a gap at: " << prevMissing + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -435,7 +433,7 @@ void LedgerMaster::fixMismatch(Ledger::ref ledger)
|
||||
Ledger::pointer otherLedger = getLedgerBySeq(lSeq);
|
||||
if (otherLedger && (otherLedger->getHash() == hash))
|
||||
{ // we closed the seam
|
||||
tLog(invalidate != 0, lsWARNING) << "Match at " << lSeq << ", " <<
|
||||
CondLog (invalidate != 0, lsWARNING, LedgerMaster) << "Match at " << lSeq << ", " <<
|
||||
invalidate << " prior ledgers invalidated";
|
||||
return;
|
||||
}
|
||||
@@ -445,12 +443,12 @@ void LedgerMaster::fixMismatch(Ledger::ref ledger)
|
||||
}
|
||||
|
||||
// all prior ledgers invalidated
|
||||
tLog(invalidate != 0, lsWARNING) << "All " << invalidate << " prior ledgers invalidated";
|
||||
CondLog (invalidate != 0, lsWARNING, LedgerMaster) << "All " << invalidate << " prior ledgers invalidated";
|
||||
}
|
||||
|
||||
void LedgerMaster::setFullLedger(Ledger::pointer ledger)
|
||||
{ // A new ledger has been accepted as part of the trusted chain
|
||||
cLog(lsDEBUG) << "Ledger " << ledger->getLedgerSeq() << " accepted :" << ledger->getHash();
|
||||
WriteLog (lsDEBUG, LedgerMaster) << "Ledger " << ledger->getLedgerSeq() << " accepted :" << ledger->getHash();
|
||||
assert(ledger->peekAccountStateMap()->getHash().isNonZero());
|
||||
|
||||
if (theApp->getOPs().isNeedNetworkLedger())
|
||||
@@ -471,7 +469,7 @@ void LedgerMaster::setFullLedger(Ledger::pointer ledger)
|
||||
Ledger::pointer prevLedger = getLedgerBySeq(ledger->getLedgerSeq() - 1);
|
||||
if (!prevLedger || (prevLedger->getHash() != ledger->getParentHash()))
|
||||
{
|
||||
cLog(lsWARNING) << "Acquired ledger invalidates previous ledger: " <<
|
||||
WriteLog (lsWARNING, LedgerMaster) << "Acquired ledger invalidates previous ledger: " <<
|
||||
(prevLedger ? "hashMismatch" : "missingLedger");
|
||||
fixMismatch(ledger);
|
||||
}
|
||||
@@ -486,13 +484,13 @@ void LedgerMaster::setFullLedger(Ledger::pointer ledger)
|
||||
|
||||
if (mMissingLedger || !theConfig.LEDGER_HISTORY)
|
||||
{
|
||||
tLog(mMissingLedger, lsDEBUG) << "Fetch already in progress, " << mMissingLedger->getTimeouts() << " timeouts";
|
||||
CondLog (mMissingLedger, lsDEBUG, LedgerMaster) << "Fetch already in progress, " << mMissingLedger->getTimeouts() << " timeouts";
|
||||
return;
|
||||
}
|
||||
|
||||
if (theApp->getJobQueue().getJobCountTotal(jtPUBOLDLEDGER) > 1)
|
||||
{
|
||||
cLog(lsDEBUG) << "Too many pending ledger saves";
|
||||
WriteLog (lsDEBUG, LedgerMaster) << "Too many pending ledger saves";
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -501,10 +499,10 @@ void LedgerMaster::setFullLedger(Ledger::pointer ledger)
|
||||
{
|
||||
if (!shouldAcquire(mCurrentLedger->getLedgerSeq(), theConfig.LEDGER_HISTORY, ledger->getLedgerSeq() - 1))
|
||||
{
|
||||
cLog(lsTRACE) << "Don't need any ledgers";
|
||||
WriteLog (lsTRACE, LedgerMaster) << "Don't need any ledgers";
|
||||
return;
|
||||
}
|
||||
cLog(lsDEBUG) << "We need the ledger before the ledger we just accepted: " << ledger->getLedgerSeq() - 1;
|
||||
WriteLog (lsDEBUG, LedgerMaster) << "We need the ledger before the ledger we just accepted: " << ledger->getLedgerSeq() - 1;
|
||||
acquireMissingLedger(ledger, ledger->getParentHash(), ledger->getLedgerSeq() - 1);
|
||||
}
|
||||
else
|
||||
@@ -512,12 +510,12 @@ void LedgerMaster::setFullLedger(Ledger::pointer ledger)
|
||||
uint32 prevMissing = mCompleteLedgers.prevMissing(ledger->getLedgerSeq());
|
||||
if (prevMissing == RangeSet::RangeSetAbsent)
|
||||
{
|
||||
cLog(lsDEBUG) << "no prior missing ledger";
|
||||
WriteLog (lsDEBUG, LedgerMaster) << "no prior missing ledger";
|
||||
return;
|
||||
}
|
||||
if (shouldAcquire(mCurrentLedger->getLedgerSeq(), theConfig.LEDGER_HISTORY, prevMissing))
|
||||
{
|
||||
cLog(lsDEBUG) << "Ledger " << prevMissing << " is needed";
|
||||
WriteLog (lsDEBUG, LedgerMaster) << "Ledger " << prevMissing << " is needed";
|
||||
assert(!mCompleteLedgers.hasValue(prevMissing));
|
||||
Ledger::pointer nextLedger = getLedgerBySeq(prevMissing + 1);
|
||||
if (nextLedger)
|
||||
@@ -525,11 +523,11 @@ void LedgerMaster::setFullLedger(Ledger::pointer ledger)
|
||||
else
|
||||
{
|
||||
mCompleteLedgers.clearValue(prevMissing);
|
||||
cLog(lsWARNING) << "We have a gap we can't fix: " << prevMissing + 1;
|
||||
WriteLog (lsWARNING, LedgerMaster) << "We have a gap we can't fix: " << prevMissing + 1;
|
||||
}
|
||||
}
|
||||
else
|
||||
cLog(lsTRACE) << "Shouldn't acquire";
|
||||
WriteLog (lsTRACE, LedgerMaster) << "Shouldn't acquire";
|
||||
}
|
||||
}
|
||||
|
||||
@@ -566,7 +564,7 @@ void LedgerMaster::checkAccept(const uint256& hash, uint32 seq)
|
||||
if (theApp->getValidations().getTrustedValidationCount(hash) < minVal) // nothing we can do
|
||||
return;
|
||||
|
||||
cLog(lsINFO) << "Advancing accepted ledger to " << seq << " with >= " << minVal << " validations";
|
||||
WriteLog (lsINFO, LedgerMaster) << "Advancing accepted ledger to " << seq << " with >= " << minVal << " validations";
|
||||
|
||||
mLastValidateHash = hash;
|
||||
mLastValidateSeq = seq;
|
||||
@@ -594,7 +592,7 @@ void LedgerMaster::tryPublish()
|
||||
}
|
||||
else if (mValidLedger->getLedgerSeq() > (mPubLedger->getLedgerSeq() + MAX_LEDGER_GAP))
|
||||
{
|
||||
cLog(lsWARNING) << "Gap in validated ledger stream " << mPubLedger->getLedgerSeq() << " - " <<
|
||||
WriteLog (lsWARNING, LedgerMaster) << "Gap in validated ledger stream " << mPubLedger->getLedgerSeq() << " - " <<
|
||||
mValidLedger->getLedgerSeq() - 1;
|
||||
mPubLedger = mValidLedger;
|
||||
mPubLedgers.push_back(mValidLedger);
|
||||
@@ -603,7 +601,7 @@ void LedgerMaster::tryPublish()
|
||||
{
|
||||
for (uint32 seq = mPubLedger->getLedgerSeq() + 1; seq <= mValidLedger->getLedgerSeq(); ++seq)
|
||||
{
|
||||
cLog(lsTRACE) << "Trying to publish ledger " << seq;
|
||||
WriteLog (lsTRACE, LedgerMaster) << "Trying to publish ledger " << seq;
|
||||
|
||||
Ledger::pointer ledger;
|
||||
uint256 hash;
|
||||
@@ -618,7 +616,7 @@ void LedgerMaster::tryPublish()
|
||||
hash = mValidLedger->getLedgerHash(seq);
|
||||
if (hash.isZero())
|
||||
{
|
||||
cLog(lsFATAL) << "Ledger: " << mValidLedger->getLedgerSeq() << " does not have hash for " <<
|
||||
WriteLog (lsFATAL, LedgerMaster) << "Ledger: " << mValidLedger->getLedgerSeq() << " does not have hash for " <<
|
||||
seq;
|
||||
assert(false);
|
||||
}
|
||||
@@ -634,7 +632,7 @@ void LedgerMaster::tryPublish()
|
||||
{
|
||||
if (theApp->getMasterLedgerAcquire().isFailure(hash))
|
||||
{
|
||||
cLog(lsWARNING) << "Unable to acquire a recent validated ledger";
|
||||
WriteLog (lsWARNING, LedgerMaster) << "Unable to acquire a recent validated ledger";
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -650,7 +648,7 @@ void LedgerMaster::tryPublish()
|
||||
mPubLedgers.push_back(mPubLedger);
|
||||
}
|
||||
else
|
||||
cLog(lsWARNING) << "Failed to acquire a published ledger";
|
||||
WriteLog (lsWARNING, LedgerMaster) << "Failed to acquire a published ledger";
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -699,7 +697,7 @@ void LedgerMaster::pubThread()
|
||||
|
||||
BOOST_FOREACH(Ledger::ref l, ledgers)
|
||||
{
|
||||
cLog(lsDEBUG) << "Publishing ledger " << l->getLedgerSeq();
|
||||
WriteLog (lsDEBUG, LedgerMaster) << "Publishing ledger " << l->getLedgerSeq();
|
||||
setFullLedger(l); // OPTIMIZEME: This is actually more work than we need to do
|
||||
theApp->getOPs().pubLedger(l);
|
||||
published = true;
|
||||
|
||||
@@ -6,7 +6,9 @@
|
||||
#include <boost/format.hpp>
|
||||
|
||||
#include "Log.h"
|
||||
SETUP_LOG();
|
||||
|
||||
// VFALCO: Should rename ContinuousLedgerTiming to LedgerTiming
|
||||
struct LedgerTiming;
|
||||
|
||||
// NOTE: First and last times must be repeated
|
||||
int ContinuousLedgerTiming::LedgerTimeResolution[] = { 10, 10, 20, 30, 60, 90, 120, 120 };
|
||||
@@ -26,7 +28,7 @@ bool ContinuousLedgerTiming::shouldClose(
|
||||
if ((previousMSeconds < -1000) || (previousMSeconds > 600000) ||
|
||||
(currentMSeconds < -1000) || (currentMSeconds > 600000))
|
||||
{
|
||||
cLog(lsWARNING) <<
|
||||
WriteLog (lsWARNING, LedgerTiming) <<
|
||||
boost::str(boost::format("CLC::shouldClose range Trans=%s, Prop: %d/%d, Secs: %d (last:%d)")
|
||||
% (anyTransactions ? "yes" : "no") % previousProposers % proposersClosed
|
||||
% currentMSeconds % previousMSeconds);
|
||||
@@ -37,14 +39,14 @@ bool ContinuousLedgerTiming::shouldClose(
|
||||
{ // no transactions so far this interval
|
||||
if (proposersClosed > (previousProposers / 4)) // did we miss a transaction?
|
||||
{
|
||||
cLog(lsTRACE) << "no transactions, many proposers: now (" << proposersClosed << " closed, "
|
||||
WriteLog (lsTRACE, LedgerTiming) << "no transactions, many proposers: now (" << proposersClosed << " closed, "
|
||||
<< previousProposers << " before)";
|
||||
return true;
|
||||
}
|
||||
#if 0 // This false triggers on the genesis ledger
|
||||
if (previousMSeconds > (1000 * (LEDGER_IDLE_INTERVAL + 2))) // the last ledger was very slow to close
|
||||
{
|
||||
cLog(lsTRACE) << "was slow to converge (p=" << (previousMSeconds) << ")";
|
||||
WriteLog (lsTRACE, LedgerTiming) << "was slow to converge (p=" << (previousMSeconds) << ")";
|
||||
if (previousMSeconds < 2000)
|
||||
return previousMSeconds;
|
||||
return previousMSeconds - 1000;
|
||||
@@ -55,13 +57,13 @@ bool ContinuousLedgerTiming::shouldClose(
|
||||
|
||||
if ((openMSeconds < LEDGER_MIN_CLOSE) && ((proposersClosed + proposersValidated) < (previousProposers / 2 )))
|
||||
{
|
||||
cLog(lsDEBUG) << "Must wait minimum time before closing";
|
||||
WriteLog (lsDEBUG, LedgerTiming) << "Must wait minimum time before closing";
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((currentMSeconds < previousMSeconds) && ((proposersClosed + proposersValidated) < previousProposers))
|
||||
{
|
||||
cLog(lsDEBUG) << "We are waiting for more closes/validations";
|
||||
WriteLog (lsDEBUG, LedgerTiming) << "We are waiting for more closes/validations";
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -80,7 +82,7 @@ bool ContinuousLedgerTiming::haveConsensus(
|
||||
bool forReal, // deciding whether to stop consensus process
|
||||
bool& failed) // we can't reach a consensus
|
||||
{
|
||||
cLog(lsTRACE) << boost::str(boost::format("CLC::haveConsensus: prop=%d/%d agree=%d validated=%d time=%d/%d%s") %
|
||||
WriteLog (lsTRACE, LedgerTiming) << boost::str(boost::format("CLC::haveConsensus: prop=%d/%d agree=%d validated=%d time=%d/%d%s") %
|
||||
currentProposers % previousProposers % currentAgree % currentFinished % currentAgreeTime % previousAgreeTime %
|
||||
(forReal ? "" : "X"));
|
||||
|
||||
@@ -91,7 +93,7 @@ bool ContinuousLedgerTiming::haveConsensus(
|
||||
{ // Less than 3/4 of the last ledger's proposers are present, we may need more time
|
||||
if (currentAgreeTime < (previousAgreeTime + LEDGER_MIN_CONSENSUS))
|
||||
{
|
||||
tLog(forReal, lsTRACE) << "too fast, not enough proposers";
|
||||
CondLog (forReal, lsTRACE, LedgerTiming) << "too fast, not enough proposers";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -99,7 +101,7 @@ bool ContinuousLedgerTiming::haveConsensus(
|
||||
// If 80% of current proposers (plus us) agree on a set, we have consensus
|
||||
if (((currentAgree * 100 + 100) / (currentProposers + 1)) > 80)
|
||||
{
|
||||
tLog(forReal, lsINFO) << "normal consensus";
|
||||
CondLog (forReal, lsINFO, LedgerTiming) << "normal consensus";
|
||||
failed = false;
|
||||
return true;
|
||||
}
|
||||
@@ -107,13 +109,13 @@ bool ContinuousLedgerTiming::haveConsensus(
|
||||
// If 80% of the nodes on your UNL have moved on, you should declare consensus
|
||||
if (((currentFinished * 100) / (currentProposers + 1)) > 80)
|
||||
{
|
||||
tLog(forReal, lsWARNING) << "We see no consensus, but 80% of nodes have moved on";
|
||||
CondLog (forReal, lsWARNING, LedgerTiming) << "We see no consensus, but 80% of nodes have moved on";
|
||||
failed = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
// no consensus yet
|
||||
tLog(forReal, lsTRACE) << "no consensus";
|
||||
CondLog (forReal, lsTRACE, LedgerTiming) << "no consensus";
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -19,16 +19,20 @@ namespace boost {
|
||||
|
||||
#include "types.h"
|
||||
|
||||
// DEPRECATED
|
||||
// Put at the beginning of a C++ file that needs its own log partition
|
||||
#define SETUP_LOG() static LogPartition logPartition(__FILE__)
|
||||
#define SETUP_NLOG(x) static LogPartition logPartition(x)
|
||||
|
||||
// DEPRECATED
|
||||
// Standard conditional log
|
||||
#define cLog(x) if (!logPartition.doLog(x)) do {} while (0); else Log(x, logPartition)
|
||||
|
||||
// DEPRECATED
|
||||
// Log only if an additional condition 'c' is true. Condition is not computed if not needed
|
||||
#define tLog(c,x) if (!logPartition.doLog(x) || !(c)) do {} while(0); else Log(x, logPartition)
|
||||
|
||||
// DEPRECATED
|
||||
// Check if should log
|
||||
#define sLog(x) (logPartition.doLog(x))
|
||||
|
||||
@@ -125,7 +129,9 @@ inline Log getLog (LogSeverity level)
|
||||
return Log (level, getLogPartition <Key> ());
|
||||
}
|
||||
|
||||
#define WriteLog(s, k) if (!getLogPartition <k> ().doLog (s)) do {} while (0); else Log (s, getLogPartition <k> ())
|
||||
#define ShouldLog(s, k) (getLogPartition <k> ().doLog (s))
|
||||
#define WriteLog(s, k) if (!ShouldLog (s, k)) do {} while (0); else Log (s, getLogPartition <k> ())
|
||||
#define CondLog(c, s, k) if (!ShouldLog (s, k) || !(c)) do {} while(0); else Log(s, getLogPartition <k> ())
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -6,8 +6,6 @@
|
||||
#include "Application.h"
|
||||
#include "Log.h"
|
||||
|
||||
SETUP_LOG();
|
||||
|
||||
OrderBookDB::OrderBookDB() : mSeq(0)
|
||||
{
|
||||
|
||||
@@ -34,7 +32,7 @@ void OrderBookDB::setup(Ledger::ref ledger)
|
||||
mDestMap.clear();
|
||||
mSourceMap.clear();
|
||||
|
||||
cLog(lsDEBUG) << "OrderBookDB>";
|
||||
WriteLog (lsDEBUG, OrderBookDB) << "OrderBookDB>";
|
||||
|
||||
// walk through the entire ledger looking for orderbook entries
|
||||
uint256 currentIndex = ledger->getFirstLedgerIndex();
|
||||
@@ -63,7 +61,7 @@ void OrderBookDB::setup(Ledger::ref ledger)
|
||||
currentIndex = ledger->getNextLedgerIndex(currentIndex);
|
||||
}
|
||||
|
||||
cLog(lsDEBUG) << "OrderBookDB<";
|
||||
WriteLog (lsDEBUG, OrderBookDB) << "OrderBookDB<";
|
||||
}
|
||||
|
||||
// return list of all orderbooks that want this issuerID and currencyID
|
||||
@@ -186,7 +184,7 @@ void OrderBookDB::processTxn(Ledger::ref ledger, const ALTransaction& alTx, Json
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
cLog(lsINFO) << "Fields not found in OrderBookDB::processTxn";
|
||||
WriteLog (lsINFO, OrderBookDB) << "Fields not found in OrderBookDB::processTxn";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user