diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index e2ae117002..a318377272 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -282,8 +282,10 @@ private: if (ms.count() >= 10) m_event.notify (ms); if (ms.count() >= 500) - m_journal.warning << + { + JLOG(m_journal.warning) << "io_service latency = " << ms; + } } std::chrono::milliseconds @@ -781,12 +783,12 @@ public: } else if (ec) { - m_journal.error << "Received signal: " << signal_number - << " with error: " << ec.message(); + JLOG(m_journal.error) << "Received signal: " << signal_number + << " with error: " << ec.message(); } else { - m_journal.debug << "Received signal: " << signal_number; + JLOG(m_journal.debug) << "Received signal: " << signal_number; signalStop(); } } @@ -802,7 +804,8 @@ public: void onStart () override { - m_journal.info << "Application starting. Build is " << gitCommitID(); + JLOG(m_journal.info) + << "Application starting. Build is " << gitCommitID(); m_sweepTimer.setExpiration (10); m_entropyTimer.setRecurringExpiration (300); @@ -815,7 +818,7 @@ public: // Called to indicate shutdown. void onStop () override { - m_journal.debug << "Application stopping"; + JLOG(m_journal.debug) << "Application stopping"; m_io_latency_sampler.cancel_async (); @@ -885,7 +888,8 @@ public: // if (space.available < (512 * 1024 * 1024)) { - m_journal.fatal << "Remaining free disk space is less than 512MB"; + JLOG(m_journal.fatal) + << "Remaining free disk space is less than 512MB"; signalStop (); } } @@ -965,7 +969,7 @@ void ApplicationImp::setup() if (!initSqliteDbs ()) { - m_journal.fatal << "Can not create database connections!"; + JLOG(m_journal.fatal) << "Cannot create database connections!"; exitWithCode(3); } @@ -1008,7 +1012,7 @@ void ApplicationImp::setup() auto const startUp = config_->START_UP; if (startUp == Config::FRESH) { - m_journal.info << "Starting new Ledger"; + JLOG(m_journal.info) << "Starting new Ledger"; startGenesisLedger (); } @@ -1016,7 +1020,7 @@ void ApplicationImp::setup() startUp == Config::LOAD_FILE || startUp == Config::REPLAY) { - m_journal.info << "Loading specified Ledger"; + JLOG(m_journal.info) << "Loading specified Ledger"; if (!loadOldLedger (config_->START_LEDGER, startUp == Config::REPLAY, @@ -1044,18 +1048,20 @@ void ApplicationImp::setup() if (!cluster_->load (config().section(SECTION_CLUSTER_NODES))) { - m_journal.fatal << "Invalid entry in cluster configuration."; + JLOG(m_journal.fatal) << "Invalid entry in cluster configuration."; Throw(); } if (!validators_->load (config().section (SECTION_VALIDATORS))) { - m_journal.fatal << "Invalid entry in validator configuration."; + JLOG(m_journal.fatal) << "Invalid entry in validator configuration."; Throw(); } if (validators_->size () == 0 && !config_->RUN_STANDALONE) - m_journal.warning << "No validators are configured."; + { + JLOG(m_journal.warning) << "No validators are configured."; + } m_nodeStore->tune (config_->getSize (siNodeCacheSize), config_->getSize (siNodeCacheAge)); m_ledgerMaster->tune (config_->getSize (siLedgerSize), config_->getSize (siLedgerAge)); @@ -1102,7 +1108,7 @@ void ApplicationImp::setup() *m_collectorManager}); if (!server) { - m_journal.fatal << "Could not create Websocket for [" << + JLOG(m_journal.fatal) << "Could not create Websocket for [" << port.name << "]"; Throw (); } @@ -1117,7 +1123,10 @@ void ApplicationImp::setup() // Should this message be here, conceptually? In theory this sort // of message, if displayed, should be displayed from PeerFinder. if (config_->PEER_PRIVATE && config_->IPS_FIXED.empty ()) - m_journal.warning << "No outbound peer connections will be made"; + { + JLOG(m_journal.warning) + << "No outbound peer connections will be made"; + } // VFALCO NOTE the state timer resets the deadlock detector. // @@ -1125,7 +1134,7 @@ void ApplicationImp::setup() } else { - m_journal.warning << "Running in standalone mode"; + JLOG(m_journal.warning) << "Running in standalone mode"; m_networkOPs->setStandAlone (); } @@ -1155,9 +1164,9 @@ ApplicationImp::run() // Stop the server. When this returns, all // Stoppable objects should be stopped. - m_journal.info << "Received shutdown request"; + JLOG(m_journal.info) << "Received shutdown request"; stop (m_journal); - m_journal.info << "Done."; + JLOG(m_journal.info) << "Done."; StopSustain(); } @@ -1266,14 +1275,16 @@ bool ApplicationImp::loadOldLedger ( std::ifstream ledgerFile (ledgerID.c_str (), std::ios::in); if (!ledgerFile) { - m_journal.fatal << "Unable to open file"; + JLOG(m_journal.fatal) << "Unable to open file"; } else { Json::Reader reader; Json::Value jLedger; if (!reader.parse (ledgerFile, jLedger)) - m_journal.fatal << "Unable to parse ledger JSON"; + { + JLOG(m_journal.fatal) << "Unable to parse ledger JSON"; + } else { std::reference_wrapper ledger (jLedger); @@ -1324,7 +1335,8 @@ bool ApplicationImp::loadOldLedger ( } if (!ledger.get().isArray ()) { - m_journal.fatal << "State nodes must be an array"; + JLOG(m_journal.fatal) + << "State nodes must be an array"; } else { @@ -1340,7 +1352,6 @@ bool ApplicationImp::loadOldLedger ( entry.removeMember (jss::index); STParsedJSONObject stp ("sle", ledger.get()[index]); - // m_journal.info << "json: " << stp.object->getJson(0); if (stp.object && (uIndex.isNonZero())) { @@ -1349,11 +1360,16 @@ bool ApplicationImp::loadOldLedger ( STLedgerEntry sle (*stp.object, uIndex); bool ok = loadLedger->addSLE (sle); if (!ok) - m_journal.warning << "Couldn't add serialized ledger: " << uIndex; + { + JLOG(m_journal.warning) + << "Couldn't add serialized ledger: " + << uIndex; + } } else { - m_journal.warning << "Invalid entry in ledger"; + JLOG(m_journal.warning) + << "Invalid entry in ledger"; } } @@ -1394,8 +1410,8 @@ bool ApplicationImp::loadOldLedger ( if (!loadLedger) { - m_journal.fatal << "No Ledger found from ledgerID=" - << ledgerID << std::endl; + JLOG(m_journal.fatal) << "No Ledger found from ledgerID=" + << ledgerID << std::endl; return false; } @@ -1406,12 +1422,12 @@ bool ApplicationImp::loadOldLedger ( // this ledger holds the transactions we want to replay replayLedger = loadLedger; - m_journal.info << "Loading parent ledger"; + JLOG(m_journal.info) << "Loading parent ledger"; loadLedger = loadByHash (replayLedger->info().parentHash, *this); if (!loadLedger) { - m_journal.info << "Loading parent ledger from node store"; + JLOG(m_journal.info) << "Loading parent ledger from node store"; // Try to build the ledger from the back end auto il = std::make_shared ( @@ -1422,7 +1438,7 @@ bool ApplicationImp::loadOldLedger ( if (!loadLedger) { - m_journal.fatal << "Replay ledger missing/damaged"; + JLOG(m_journal.fatal) << "Replay ledger missing/damaged"; assert (false); return false; } @@ -1431,25 +1447,25 @@ bool ApplicationImp::loadOldLedger ( loadLedger->setClosed (); - m_journal.info << "Loading ledger " << loadLedger->getHash () << " seq:" << loadLedger->info().seq; + JLOG(m_journal.info) << "Loading ledger " << loadLedger->getHash () << " seq:" << loadLedger->info().seq; if (loadLedger->info().accountHash.isZero ()) { - m_journal.fatal << "Ledger is empty."; + JLOG(m_journal.fatal) << "Ledger is empty."; assert (false); return false; } if (!loadLedger->walkLedger (journal ("Ledger"))) { - m_journal.fatal << "Ledger is missing nodes."; + JLOG(m_journal.fatal) << "Ledger is missing nodes."; assert(false); return false; } if (!loadLedger->assertSane (journal ("Ledger"))) { - m_journal.fatal << "Ledger is not sane."; + JLOG(m_journal.fatal) << "Ledger is not sane."; assert(false); return false; } @@ -1502,12 +1518,13 @@ bool ApplicationImp::loadOldLedger ( } catch (SHAMapMissingNode&) { - m_journal.fatal << "Data is missing for selected ledger"; + JLOG(m_journal.fatal) << "Data is missing for selected ledger"; return false; } catch (boost::bad_lexical_cast&) { - m_journal.fatal << "Ledger specified '" << ledgerID << "' is not valid"; + JLOG(m_journal.fatal) + << "Ledger specified '" << ledgerID << "' is not valid"; return false; } diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index c5e4b7304c..a713a172e8 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -651,7 +651,7 @@ void NetworkOPsImp::processHeartbeatTimer () if (mMode != omDISCONNECTED) { setMode (omDISCONNECTED); - m_journal.warning + JLOG(m_journal.warning) << "Node count (" << numPeers << ") " << "has fallen below quorum (" << m_network_quorum << ")."; } @@ -664,7 +664,8 @@ void NetworkOPsImp::processHeartbeatTimer () if (mMode == omDISCONNECTED) { setMode (omCONNECTED); - m_journal.info << "Node count (" << numPeers << ") is sufficient."; + JLOG(m_journal.info) + << "Node count (" << numPeers << ") is sufficient."; } // Check if the last validated ledger forces a change between these @@ -693,7 +694,7 @@ void NetworkOPsImp::processClusterTimer () if (!update) { - m_journal.debug << "Too soon to send cluster update"; + JLOG(m_journal.debug) << "Too soon to send cluster update"; return; } @@ -828,7 +829,7 @@ void NetworkOPsImp::processTransaction (std::shared_ptr& transactio // Not concerned with local checks at this point. if (validity.first == Validity::SigBad) { - m_journal.info << "Transaction has bad signature: " << + JLOG(m_journal.info) << "Transaction has bad signature: " << validity.second; transaction->setStatus(INVALID); transaction->setResult(temBAD_SIGNATURE); @@ -973,8 +974,10 @@ void NetworkOPsImp::apply (std::unique_lock& batchLock) std::string token, human; if (transResultInfo (e.result, token, human)) - m_journal.info << "TransactionResult: " + { + JLOG(m_journal.info) << "TransactionResult: " << token << ": " << human; + } } #endif @@ -982,7 +985,8 @@ void NetworkOPsImp::apply (std::unique_lock& batchLock) if (e.result == tesSUCCESS) { - m_journal.debug << "Transaction is now included in open ledger"; + JLOG(m_journal.debug) + << "Transaction is now included in open ledger"; e.transaction->setStatus (INCLUDED); auto txCur = e.transaction->getSTransaction(); @@ -995,7 +999,7 @@ void NetworkOPsImp::apply (std::unique_lock& batchLock) else if (e.result == tefPAST_SEQ) { // duplicate or conflict - m_journal.info << "Transaction is obsolete"; + JLOG(m_journal.info) << "Transaction is obsolete"; e.transaction->setStatus (OBSOLETE); } else if (e.result == terQUEUED) @@ -1017,14 +1021,16 @@ void NetworkOPsImp::apply (std::unique_lock& batchLock) else { // transaction should be held - m_journal.debug << "Transaction should be held: " << e.result; + JLOG(m_journal.debug) + << "Transaction should be held: " << e.result; e.transaction->setStatus (HELD); m_ledgerMaster.addHeldTransaction (e.transaction); } } else { - m_journal.debug << "Status other than success " << e.result; + JLOG(m_journal.debug) + << "Status other than success " << e.result; e.transaction->setStatus (INVALID); } @@ -1225,7 +1231,7 @@ bool NetworkOPsImp::checkLastClosedLedger ( // our last closed ledger? Or do sufficient nodes agree? And do we have no // better ledger available? If so, we are either tracking or full. - m_journal.trace << "NetworkOPsImp::checkLastClosedLedger"; + JLOG(m_journal.trace) << "NetworkOPsImp::checkLastClosedLedger"; Ledger::pointer ourClosed = m_ledgerMaster.getClosedLedger (); @@ -1234,8 +1240,8 @@ bool NetworkOPsImp::checkLastClosedLedger ( uint256 closedLedger = ourClosed->getHash (); uint256 prevClosedLedger = ourClosed->info().parentHash; - m_journal.trace << "OurClosed: " << closedLedger; - m_journal.trace << "PrevClosed: " << prevClosedLedger; + JLOG(m_journal.trace) << "OurClosed: " << closedLedger; + JLOG(m_journal.trace) << "PrevClosed: " << prevClosedLedger; hash_map ledgers; { @@ -1296,17 +1302,21 @@ bool NetworkOPsImp::checkLastClosedLedger ( for (auto const& it: ledgers) { - m_journal.debug << "L: " << it.first - << " t=" << it.second.trustedValidations - << ", n=" << it.second.nodesUsing; + JLOG(m_journal.debug) << "L: " << it.first + << " t=" << it.second.trustedValidations + << ", n=" << it.second.nodesUsing; // Temporary logging to make sure tiebreaking isn't broken if (it.second.trustedValidations > 0) - m_journal.trace << " TieBreakTV: " << it.second.highValidation; + JLOG(m_journal.trace) + << " TieBreakTV: " << it.second.highValidation; else { if (it.second.nodesUsing > 0) - m_journal.trace << " TieBreakNU: " << it.second.highNodeUsing; + { + JLOG(m_journal.trace) + << " TieBreakNU: " << it.second.highNodeUsing; + } } if (it.second > bestVC) @@ -1320,7 +1330,7 @@ bool NetworkOPsImp::checkLastClosedLedger ( if (switchLedgers && (closedLedger == prevClosedLedger)) { // don't switch to our own previous ledger - m_journal.info << "We won't switch to our own previous ledger"; + JLOG(m_journal.info) << "We won't switch to our own previous ledger"; networkClosed = ourClosed->getHash (); switchLedgers = false; } @@ -1345,9 +1355,9 @@ bool NetworkOPsImp::checkLastClosedLedger ( return false; } - m_journal.warning << "We are not running on the consensus ledger"; - m_journal.info << "Our LCL: " << getJson (*ourClosed); - m_journal.info << "Net LCL " << closedLedger; + JLOG(m_journal.warning) << "We are not running on the consensus ledger"; + JLOG(m_journal.info) << "Our LCL: " << getJson (*ourClosed); + JLOG(m_journal.info) << "Net LCL " << closedLedger; if ((mMode == omTRACKING) || (mMode == omFULL)) setMode (omCONNECTED); @@ -1371,7 +1381,7 @@ void NetworkOPsImp::switchLastClosedLedger ( // set the newLCL as our last closed ledger -- this is abnormal code auto msg = duringConsensus ? "JUMPdc" : "JUMP"; - m_journal.error + JLOG(m_journal.error) << msg << " last closed ledger to " << newLCL->getHash (); clearNeedNetworkLedger (); @@ -1425,7 +1435,7 @@ bool NetworkOPsImp::beginConsensus (uint256 const& networkClosed) auto closingInfo = m_ledgerMaster.getCurrentLedger()->info(); - if (m_journal.info) m_journal.info << + JLOG(m_journal.info) << "Consensus time for #" << closingInfo.seq << " with LCL " << closingInfo.parentHash; @@ -1437,7 +1447,7 @@ bool NetworkOPsImp::beginConsensus (uint256 const& networkClosed) // this shouldn't happen unless we jump ledgers if (mMode == omFULL) { - m_journal.warning << "Don't have LCL, going to tracking"; + JLOG(m_journal.warning) << "Don't have LCL, going to tracking"; setMode (omTRACKING); } @@ -1456,7 +1466,7 @@ bool NetworkOPsImp::beginConsensus (uint256 const& networkClosed) prevLedger, closingInfo.closeTime); - m_journal.debug << "Initiating consensus engine"; + JLOG(m_journal.debug) << "Initiating consensus engine"; return true; } @@ -1476,7 +1486,7 @@ void NetworkOPsImp::processTrustedProposal ( if (mLedgerConsensus->peerPosition (proposal)) app_.overlay().relay(*set, proposal->getSuppressionID()); else - m_journal.info << "Not relaying trusted proposal"; + JLOG(m_journal.info) << "Not relaying trusted proposal"; } } @@ -1498,7 +1508,7 @@ void NetworkOPsImp::endConsensus (bool correctLCL) { if (it && (it->getClosedLedgerHash () == deadLedger)) { - m_journal.trace << "Killing obsolete peer status"; + JLOG(m_journal.trace) << "Killing obsolete peer status"; it->cycleStatus (); } } @@ -1672,7 +1682,7 @@ void NetworkOPsImp::setMode (OperatingMode om) accounting_.mode (om); - m_journal.info << "STATE->" << strOperatingMode (); + JLOG(m_journal.info) << "STATE->" << strOperatingMode (); pubServer (); } @@ -1757,7 +1767,7 @@ NetworkOPsImp::transactionsSQL ( % beast::lexicalCastThrow (offset) % beast::lexicalCastThrow (numberOfResults) ); - m_journal.trace << "txSQL query: " << sql; + JLOG(m_journal.trace) << "txSQL query: " << sql; return sql; } @@ -1810,8 +1820,8 @@ NetworkOPs::AccountTxs NetworkOPsImp::getAccountTxs ( { // Work around a bug that could leave the metadata missing auto const seq = rangeCheckedCast( ledgerSeq.value_or (0)); - m_journal.warning << "Recovering ledger " << seq - << ", txn " << txn->getID(); + JLOG(m_journal.warning) << "Recovering ledger " << seq + << ", txn " << txn->getID(); Ledger::pointer ledger = m_ledgerMaster.getLedgerBySeq(seq); if (ledger) pendSaveValidated(app_, ledger, false, false); @@ -1936,8 +1946,8 @@ NetworkOPsImp::getTxsAccountB ( bool NetworkOPsImp::recvValidation ( STValidation::ref val, std::string const& source) { - m_journal.debug << "recvValidation " << val->getLedgerHash () - << " from " << source; + JLOG(m_journal.debug) << "recvValidation " << val->getLedgerHash () + << " from " << source; pubValidation (val); return app_.getValidations ().addValidation (val, source); } @@ -2161,7 +2171,7 @@ void NetworkOPsImp::pubProposedTransaction ( } AcceptedLedgerTx alt (lpCurrent, stTxn, terResult, app_.accountIDCache(), app_.logs()); - m_journal.trace << "pubProposed: " << alt.getJson (); + JLOG(m_journal.trace) << "pubProposed: " << alt.getJson (); pubAccountTransaction (lpCurrent, alt, false); } @@ -2225,7 +2235,7 @@ void NetworkOPsImp::pubLedger (Ledger::ref lpAccepted) // Don't lock since pubAcceptedTransaction is locking. for (auto const& vt : alpAccepted->getMap ()) { - m_journal.trace << "pubAccepted: " << vt.second->getJson (); + JLOG(m_journal.trace) << "pubAccepted: " << vt.second->getJson (); pubValidatedTransaction (lpAccepted, *vt.second); } } @@ -2404,7 +2414,7 @@ void NetworkOPsImp::pubAccountTransaction ( } } } - m_journal.trace << "pubAccountTransaction:" << + JLOG(m_journal.trace) << "pubAccountTransaction:" << " iProposed=" << iProposed << " iAccepted=" << iAccepted; @@ -2437,7 +2447,7 @@ void NetworkOPsImp::subAccount ( for (auto const& naAccountID : vnaAccountIDs) { - if (m_journal.trace) m_journal.trace << + JLOG(m_journal.trace) << "subAccount: account: " << toBase58(naAccountID); isrListener->insertSubAccountInfo (naAccountID, rt); @@ -2764,7 +2774,7 @@ void NetworkOPsImp::getBookPage ( { bDirectAdvance = false; - m_journal.trace << "getBookPage: bDirectAdvance"; + JLOG(m_journal.trace) << "getBookPage: bDirectAdvance"; auto const ledgerIndex = view.succ(uTipIndex, uBookEnd); if (ledgerIndex) @@ -2774,7 +2784,7 @@ void NetworkOPsImp::getBookPage ( if (!sleOfferDir) { - m_journal.trace << "getBookPage: bDone"; + JLOG(m_journal.trace) << "getBookPage: bDone"; bDone = true; } else @@ -2785,8 +2795,10 @@ void NetworkOPsImp::getBookPage ( cdirFirst (view, uTipIndex, sleOfferDir, uBookEntry, offerIndex, viewJ); - m_journal.trace << "getBookPage: uTipIndex=" << uTipIndex; - m_journal.trace << "getBookPage: offerIndex=" << offerIndex; + JLOG(m_journal.trace) + << "getBookPage: uTipIndex=" << uTipIndex; + JLOG(m_journal.trace) + << "getBookPage: offerIndex=" << offerIndex; } } @@ -2909,7 +2921,7 @@ void NetworkOPsImp::getBookPage ( } else { - m_journal.warning << "Missing offer"; + JLOG(m_journal.warning) << "Missing offer"; } if (! cdirNext(view, @@ -2919,7 +2931,8 @@ void NetworkOPsImp::getBookPage ( } else { - m_journal.trace << "getBookPage: offerIndex=" << offerIndex; + JLOG(m_journal.trace) + << "getBookPage: offerIndex=" << offerIndex; } } } diff --git a/src/ripple/app/misc/SHAMapStoreImp.cpp b/src/ripple/app/misc/SHAMapStoreImp.cpp index 929b6ba683..c6b497e0d3 100644 --- a/src/ripple/app/misc/SHAMapStoreImp.cpp +++ b/src/ripple/app/misc/SHAMapStoreImp.cpp @@ -309,7 +309,7 @@ SHAMapStoreImp::run() if (validatedSeq >= lastRotated + setup_.deleteInterval && canDelete_ >= lastRotated - 1) { - journal_.debug << "rotating validatedSeq " << validatedSeq + JLOG(journal_.debug) << "rotating validatedSeq " << validatedSeq << " lastRotated " << lastRotated << " deleteInterval " << setup_.deleteInterval << " canDelete_ " << canDelete_; @@ -343,7 +343,7 @@ SHAMapStoreImp::run() false)->visitNodes ( std::bind (&SHAMapStoreImp::copyNode, this, std::ref(nodeCount), std::placeholders::_1)); - journal_.debug << "copied ledger " << validatedSeq + JLOG(journal_.debug) << "copied ledger " << validatedSeq << " nodecount " << nodeCount; switch (health()) { @@ -358,7 +358,7 @@ SHAMapStoreImp::run() } freshenCaches(); - journal_.debug << validatedSeq << " freshened caches"; + JLOG(journal_.debug) << validatedSeq << " freshened caches"; switch (health()) { case Health::stopping: @@ -373,7 +373,7 @@ SHAMapStoreImp::run() std::shared_ptr newBackend = makeBackendRotating(); - journal_.debug << validatedSeq << " new backend " + JLOG(journal_.debug) << validatedSeq << " new backend " << newBackend->getName(); std::shared_ptr oldBackend; @@ -401,7 +401,7 @@ SHAMapStoreImp::run() clearCaches (validatedSeq); oldBackend = database_->rotateBackends (newBackend); } - journal_.debug << "finished rotation " << validatedSeq; + JLOG(journal_.debug) << "finished rotation " << validatedSeq; oldBackend->setDeletePath(); } @@ -525,7 +525,7 @@ SHAMapStoreImp::clearSql (DatabaseCon& database, boost::format formattedDeleteQuery (deleteQuery); - if (journal_.debug) journal_.debug << + JLOG(journal_.debug) << "start: " << deleteQuery << " from " << min << " to " << lastRotated; while (min < lastRotated) { @@ -541,7 +541,7 @@ SHAMapStoreImp::clearSql (DatabaseCon& database, std::this_thread::sleep_for ( std::chrono::milliseconds (setup_.backOff)); } - journal_.debug << "finished: " << deleteQuery; + JLOG(journal_.debug) << "finished: " << deleteQuery; } void @@ -622,9 +622,9 @@ SHAMapStoreImp::health() auto age = ledgerMaster_->getValidatedLedgerAge(); if (mode != NetworkOPs::omFULL || age.count() >= setup_.ageThreshold) { - journal_.warning << "Not deleting. state: " << mode - << " age " << age.count() - << " age threshold " << setup_.ageThreshold; + JLOG(journal_.warning) << "Not deleting. state: " << mode + << " age " << age.count() + << " age threshold " << setup_.ageThreshold; healthy_ = false; } diff --git a/src/ripple/app/tx/impl/CreateOffer.cpp b/src/ripple/app/tx/impl/CreateOffer.cpp index 03d7245dd8..f9d81f676d 100644 --- a/src/ripple/app/tx/impl/CreateOffer.cpp +++ b/src/ripple/app/tx/impl/CreateOffer.cpp @@ -450,13 +450,13 @@ CreateOffer::bridged_cross ( if (taker.done()) { - j_.debug << "The taker reports he's done during crossing!"; + JLOG(j_.debug) << "The taker reports he's done during crossing!"; break; } if (reachedOfferCrossingLimit (taker)) { - j_.debug << "The offer crossing limit has been exceeded!"; + JLOG(j_.debug) << "The offer crossing limit has been exceeded!"; break; } @@ -531,13 +531,13 @@ CreateOffer::direct_cross ( if (taker.done()) { - j_.debug << "The taker reports he's done during crossing!"; + JLOG(j_.debug) << "The taker reports he's done during crossing!"; break; } if (reachedOfferCrossingLimit (taker)) { - j_.debug << "The offer crossing limit has been exceeded!"; + JLOG(j_.debug) << "The offer crossing limit has been exceeded!"; break; } @@ -770,7 +770,7 @@ CreateOffer::applyGuts (ApplyView& view, ApplyView& view_cancel) // never be negative. If it is, something went very very wrong. if (place_offer.in < zero || place_offer.out < zero) { - j_.fatal << "Cross left offer negative!" << + JLOG(j_.fatal) << "Cross left offer negative!" << " in: " << format_amount (place_offer.in) << " out: " << format_amount (place_offer.out); return { tefINTERNAL, true }; @@ -778,7 +778,7 @@ CreateOffer::applyGuts (ApplyView& view, ApplyView& view_cancel) if (place_offer.in == zero || place_offer.out == zero) { - j_.debug << "Offer fully crossed!"; + JLOG(j_.debug) << "Offer fully crossed!"; return { result, true }; } diff --git a/src/ripple/app/tx/impl/SetAccount.cpp b/src/ripple/app/tx/impl/SetAccount.cpp index a50b364012..eb940edac0 100644 --- a/src/ripple/app/tx/impl/SetAccount.cpp +++ b/src/ripple/app/tx/impl/SetAccount.cpp @@ -195,13 +195,13 @@ SetAccount::doApply () // if (bSetRequireAuth && !(uFlagsIn & lsfRequireAuth)) { - j_.trace << "Set RequireAuth."; + JLOG(j_.trace) << "Set RequireAuth."; uFlagsOut |= lsfRequireAuth; } if (bClearRequireAuth && (uFlagsIn & lsfRequireAuth)) { - j_.trace << "Clear RequireAuth."; + JLOG(j_.trace) << "Clear RequireAuth."; uFlagsOut &= ~lsfRequireAuth; } @@ -210,13 +210,13 @@ SetAccount::doApply () // if (bSetRequireDest && !(uFlagsIn & lsfRequireDestTag)) { - j_.trace << "Set lsfRequireDestTag."; + JLOG(j_.trace) << "Set lsfRequireDestTag."; uFlagsOut |= lsfRequireDestTag; } if (bClearRequireDest && (uFlagsIn & lsfRequireDestTag)) { - j_.trace << "Clear lsfRequireDestTag."; + JLOG(j_.trace) << "Clear lsfRequireDestTag."; uFlagsOut &= ~lsfRequireDestTag; } @@ -225,13 +225,13 @@ SetAccount::doApply () // if (bSetDisallowXRP && !(uFlagsIn & lsfDisallowXRP)) { - j_.trace << "Set lsfDisallowXRP."; + JLOG(j_.trace) << "Set lsfDisallowXRP."; uFlagsOut |= lsfDisallowXRP; } if (bClearDisallowXRP && (uFlagsIn & lsfDisallowXRP)) { - j_.trace << "Clear lsfDisallowXRP."; + JLOG(j_.trace) << "Clear lsfDisallowXRP."; uFlagsOut &= ~lsfDisallowXRP; } @@ -242,7 +242,7 @@ SetAccount::doApply () { if (!sigWithMaster) { - j_.trace << "Must use master key to disable master key."; + JLOG(j_.trace) << "Must use master key to disable master key."; return tecNEED_MASTER_KEY; } @@ -259,13 +259,13 @@ SetAccount::doApply () return tecNO_REGULAR_KEY; } - j_.trace << "Set lsfDisableMaster."; + JLOG(j_.trace) << "Set lsfDisableMaster."; uFlagsOut |= lsfDisableMaster; } if ((uClearFlag == asfDisableMaster) && (uFlagsIn & lsfDisableMaster)) { - j_.trace << "Clear lsfDisableMaster."; + JLOG(j_.trace) << "Clear lsfDisableMaster."; uFlagsOut &= ~lsfDisableMaster; } @@ -288,18 +288,18 @@ SetAccount::doApply () { if (!sigWithMaster && !(uFlagsIn & lsfDisableMaster)) { - j_.trace << "Can't use regular key to set NoFreeze."; + JLOG(j_.trace) << "Can't use regular key to set NoFreeze."; return tecNEED_MASTER_KEY; } - j_.trace << "Set NoFreeze flag"; + JLOG(j_.trace) << "Set NoFreeze flag"; uFlagsOut |= lsfNoFreeze; } // Anyone may set global freeze if (uSetFlag == asfGlobalFreeze) { - j_.trace << "Set GlobalFreeze flag"; + JLOG(j_.trace) << "Set GlobalFreeze flag"; uFlagsOut |= lsfGlobalFreeze; } @@ -309,7 +309,7 @@ SetAccount::doApply () if ((uSetFlag != asfGlobalFreeze) && (uClearFlag == asfGlobalFreeze) && ((uFlagsOut & lsfNoFreeze) == 0)) { - j_.trace << "Clear GlobalFreeze flag"; + JLOG(j_.trace) << "Clear GlobalFreeze flag"; uFlagsOut &= ~lsfGlobalFreeze; } @@ -318,13 +318,13 @@ SetAccount::doApply () // if ((uSetFlag == asfAccountTxnID) && !sle->isFieldPresent (sfAccountTxnID)) { - j_.trace << "Set AccountTxnID"; + JLOG(j_.trace) << "Set AccountTxnID"; sle->makeFieldPresent (sfAccountTxnID); } if ((uClearFlag == asfAccountTxnID) && sle->isFieldPresent (sfAccountTxnID)) { - j_.trace << "Clear AccountTxnID"; + JLOG(j_.trace) << "Clear AccountTxnID"; sle->makeFieldAbsent (sfAccountTxnID); } @@ -337,12 +337,12 @@ SetAccount::doApply () if (!uHash) { - j_.trace << "unset email hash"; + JLOG(j_.trace) << "unset email hash"; sle->makeFieldAbsent (sfEmailHash); } else { - j_.trace << "set email hash"; + JLOG(j_.trace) << "set email hash"; sle->setFieldH128 (sfEmailHash, uHash); } } @@ -356,12 +356,12 @@ SetAccount::doApply () if (!uHash) { - j_.trace << "unset wallet locator"; + JLOG(j_.trace) << "unset wallet locator"; sle->makeFieldAbsent (sfWalletLocator); } else { - j_.trace << "set wallet locator"; + JLOG(j_.trace) << "set wallet locator"; sle->setFieldH256 (sfWalletLocator, uHash); } } @@ -375,12 +375,12 @@ SetAccount::doApply () if (messageKey.empty ()) { - j_.debug << "set message key"; + JLOG(j_.debug) << "set message key"; sle->makeFieldAbsent (sfMessageKey); } else { - j_.debug << "set message key"; + JLOG(j_.debug) << "set message key"; sle->setFieldVL (sfMessageKey, messageKey); } } @@ -394,12 +394,12 @@ SetAccount::doApply () if (domain.empty ()) { - j_.trace << "unset domain"; + JLOG(j_.trace) << "unset domain"; sle->makeFieldAbsent (sfDomain); } else { - j_.trace << "set domain"; + JLOG(j_.trace) << "set domain"; sle->setFieldVL (sfDomain, domain); } } @@ -413,12 +413,12 @@ SetAccount::doApply () if (uRate == 0 || uRate == QUALITY_ONE) { - j_.trace << "unset transfer rate"; + JLOG(j_.trace) << "unset transfer rate"; sle->makeFieldAbsent (sfTransferRate); } else if (uRate > QUALITY_ONE) { - j_.trace << "set transfer rate"; + JLOG(j_.trace) << "set transfer rate"; sle->setFieldU32 (sfTransferRate, uRate); } } diff --git a/src/ripple/app/tx/impl/SetTrust.cpp b/src/ripple/app/tx/impl/SetTrust.cpp index 85b770c82e..9204031468 100644 --- a/src/ripple/app/tx/impl/SetTrust.cpp +++ b/src/ripple/app/tx/impl/SetTrust.cpp @@ -191,7 +191,7 @@ SetTrust::doApply () SLE::pointer sleDelete = view().peek ( keylet::line(account_, uDstAccountID, currency)); - j_.warning << + JLOG(j_.warning) << "Clearing redundant line."; return trustDelete (view(), @@ -203,7 +203,7 @@ SetTrust::doApply () if (!sleDst) { - j_.trace << + JLOG(j_.trace) << "Delay transaction: Destination account does not exist."; return tecNO_DST; } @@ -412,7 +412,7 @@ SetTrust::doApply () // Reserve is not scaled by load. else if (bReserveIncrease && mPriorBalance < reserveCreate) { - j_.trace << + JLOG(j_.trace) << "Delay transaction: Insufficent reserve to add trust line."; // Another transaction could provide XRP to the account and then @@ -423,7 +423,7 @@ SetTrust::doApply () { view().update (sleRippleState); - j_.trace << "Modify ripple line"; + JLOG(j_.trace) << "Modify ripple line"; } } // Line does not exist. @@ -432,13 +432,13 @@ SetTrust::doApply () (! bQualityOut || ! uQualityOut) && // Not setting quality out or setting default quality out. (! (view().rules().enabled(featureTrustSetAuth, ctx_.app.config().features)) || ! bSetAuth)) { - j_.trace << + JLOG(j_.trace) << "Redundant: Setting non-existent ripple line to defaults."; return tecNO_LINE_REDUNDANT; } else if (mPriorBalance < reserveCreate) // Reserve is not scaled by load. { - j_.trace << + JLOG(j_.trace) << "Delay transaction: Line does not exist. Insufficent reserve to create line."; // Another transaction could create the account and then this transaction would succeed. @@ -452,7 +452,7 @@ SetTrust::doApply () uint256 index (getRippleStateIndex ( account_, uDstAccountID, currency)); - j_.trace << + JLOG(j_.trace) << "doTrustSet: Creating ripple line: " << to_string (index); diff --git a/src/ripple/app/tx/impl/Taker.cpp b/src/ripple/app/tx/impl/Taker.cpp index f19317b823..cb989554db 100644 --- a/src/ripple/app/tx/impl/Taker.cpp +++ b/src/ripple/app/tx/impl/Taker.cpp @@ -20,6 +20,7 @@ #include #include #include +#include namespace ripple { @@ -117,7 +118,7 @@ BasicTaker::unfunded () const if (get_funds (account(), remaining_.in) > zero) return false; - journal_.debug << "Unfunded: taker is out of funds."; + JLOG(journal_.debug) << "Unfunded: taker is out of funds."; return true; } @@ -127,7 +128,7 @@ BasicTaker::done () const // We are done if we have consumed all the input currency if (remaining_.in <= zero) { - journal_.debug << "Done: all the input currency has been consumed."; + JLOG(journal_.debug) << "Done: all the input currency has been consumed."; return true; } @@ -135,14 +136,14 @@ BasicTaker::done () const // desired amount of output currency if (!sell_ && (remaining_.out <= zero)) { - journal_.debug << "Done: the desired amount has been received."; + JLOG(journal_.debug) << "Done: the desired amount has been received."; return true; } // We are done if the taker is out of funds if (unfunded ()) { - journal_.debug << "Done: taker out of funds."; + JLOG(journal_.debug) << "Done: taker out of funds."; return true; } @@ -438,7 +439,7 @@ BasicTaker::do_cross ( if (account () == owner1) { - journal_.trace << "The taker owns the first leg of a bridge."; + JLOG(journal_.trace) << "The taker owns the first leg of a bridge."; leg1_in_funds = std::max (leg1_in_funds, offer1.in); } @@ -448,7 +449,7 @@ BasicTaker::do_cross ( if (account () == owner2) { - journal_.trace << "The taker owns the second leg of a bridge."; + JLOG(journal_.trace) << "The taker owns the second leg of a bridge."; leg2_out_funds = std::max (leg2_out_funds, offer2.out); } @@ -464,7 +465,8 @@ BasicTaker::do_cross ( if (owner1 == owner2) { - journal_.trace << "The bridge endpoints are owneb by the same account."; + JLOG(journal_.trace) << + "The bridge endpoints are owned by the same account."; xrp_funds = std::max (offer1.out, offer2.in); } @@ -568,7 +570,7 @@ Taker::consume_offer (Offer const& offer, Amounts const& order) if (order.out < zero) Throw ("flow with negative output."); - if (journal_.debug) journal_.debug << "Consuming from offer " << offer; + JLOG(journal_.debug) << "Consuming from offer " << offer; if (journal_.trace) { diff --git a/src/ripple/basics/impl/ResolverAsio.cpp b/src/ripple/basics/impl/ResolverAsio.cpp index 5a22640c95..bc3979facf 100644 --- a/src/ripple/basics/impl/ResolverAsio.cpp +++ b/src/ripple/basics/impl/ResolverAsio.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -120,7 +121,7 @@ public: &ResolverAsioImpl::do_stop, this, CompletionCounter (this)))); - m_journal.debug << "Queued a stop request"; + JLOG(m_journal.debug) << "Queued a stop request"; } } @@ -128,9 +129,9 @@ public: { stop_async (); - m_journal.debug << "Waiting to stop"; + JLOG(m_journal.debug) << "Waiting to stop"; m_stop_complete.wait(); - m_journal.debug << "Stopped"; + JLOG(m_journal.debug) << "Stopped"; } void resolve ( @@ -255,7 +256,7 @@ public: if (hp.first.empty ()) { - m_journal.error << + JLOG(m_journal.error) << "Unable to parse '" << name << "'"; m_io_service.post (m_strand.wrap (std::bind ( @@ -284,7 +285,7 @@ public: { m_work.emplace_back (names, handler); - m_journal.debug << + JLOG(m_journal.debug) << "Queued new job with " << names.size() << " tasks. " << m_work.size() << " jobs outstanding."; diff --git a/src/ripple/core/impl/JobQueue.cpp b/src/ripple/core/impl/JobQueue.cpp index c48df62bef..a20feb6f08 100644 --- a/src/ripple/core/impl/JobQueue.cpp +++ b/src/ripple/core/impl/JobQueue.cpp @@ -117,7 +117,7 @@ JobQueue::addJob (JobType type, std::string const& name, // if (isStopping() && skipOnStop (type)) { - m_journal.debug << + JLOG(m_journal.debug) << "Skipping addJob ('" << name << "')"; return; } @@ -176,7 +176,7 @@ JobQueue::getJobCountGE (JobType t) const void JobQueue::shutdown () { - m_journal.info << "Job queue shutting down"; + JLOG(m_journal.info) << "Job queue shutting down"; m_workers.pauseAllThreadsAndWait (); } @@ -193,7 +193,7 @@ JobQueue::setThreadCount (int c, bool const standaloneMode) c = static_cast(std::thread::hardware_concurrency()); c = 2 + std::min (c, 4); // I/O will bottleneck - m_journal.info << "Auto-tuning to " << c << + JLOG(m_journal.info) << "Auto-tuning to " << c << " validation/transaction/proposal threads"; } @@ -471,7 +471,7 @@ JobQueue::processTask () if (!isStopping() || !data.info.skip ()) { beast::Thread::setCurrentThreadName (data.name ()); - m_journal.trace << "Doing " << data.name () << " job"; + JLOG(m_journal.trace) << "Doing " << data.name () << " job"; Job::clock_type::time_point const start_time ( Job::clock_type::now()); @@ -482,7 +482,7 @@ JobQueue::processTask () } else { - m_journal.trace << "Skipping processTask ('" << data.name () << "')"; + JLOG(m_journal.trace) << "Skipping processTask ('" << data.name () << "')"; } { diff --git a/src/ripple/overlay/impl/OverlayImpl.cpp b/src/ripple/overlay/impl/OverlayImpl.cpp index 86b008f441..ef119e934a 100644 --- a/src/ripple/overlay/impl/OverlayImpl.cpp +++ b/src/ripple/overlay/impl/OverlayImpl.cpp @@ -108,8 +108,9 @@ OverlayImpl::Timer::on_timer (error_code ec) if (ec || overlay_.isStopping()) { if (ec && ec != boost::asio::error::operation_aborted) - if (overlay_.journal_.error) overlay_.journal_.error << - "on_timer: " << ec.message(); + { + JLOG(overlay_.journal_.error) << "on_timer: " << ec.message(); + } return; } @@ -185,15 +186,13 @@ OverlayImpl::onHandoff (std::unique_ptr && ssl_bundle, handoff.moved = true; - if (journal.debug) journal.debug<< - "Peer connection upgrade from " << remote_endpoint; + JLOG(journal.debug) << "Peer connection upgrade from " << remote_endpoint; error_code ec; auto const local_endpoint (ssl_bundle->socket.local_endpoint(ec)); if (ec) { - if (journal.debug) journal.debug << - remote_endpoint << " failed: " << ec.message(); + JLOG(journal.debug) << remote_endpoint << " failed: " << ec.message(); return handoff; } @@ -256,7 +255,7 @@ OverlayImpl::onHandoff (std::unique_ptr && ssl_bundle, if (result != PeerFinder::Result::success) { m_peerFinder->on_closed(slot); - if (journal.debug) journal.debug << + JLOG(journal.debug) << "Peer " << remote_endpoint << " redirected, slots full"; handoff.moved = false; handoff.response = makeRedirectResponse(slot, request, @@ -347,16 +346,14 @@ OverlayImpl::connect (beast::IP::Endpoint const& remote_endpoint) auto usage = resourceManager().newOutboundEndpoint (remote_endpoint); if (usage.disconnect()) { - if (journal_.info) journal_.info << - "Over resource limit: " << remote_endpoint; + JLOG(journal_.info) << "Over resource limit: " << remote_endpoint; return; } auto const slot = peerFinder().new_outbound_slot(remote_endpoint); if (slot == nullptr) { - if (journal_.debug) journal_.debug << - "Connect: No slot for " << remote_endpoint; + JLOG(journal_.debug) << "Connect: No slot for " << remote_endpoint; return; } @@ -396,7 +393,7 @@ OverlayImpl::add_active (std::shared_ptr const& peer) list_.emplace(peer.get(), peer); - journal_.debug << + JLOG(journal_.debug) << "activated " << peer->getRemoteAddress() << " (" << peer->id() << ":" << toBase58 ( @@ -466,8 +463,7 @@ OverlayImpl::setupValidatorKeyManifests (BasicConfig const& config, } else { - if (journal_.debug) - journal_.debug << "No [validation_manifest] section in config"; + JLOG(journal_.debug) << "No [validation_manifest] section in config"; } manifestCache_.load ( @@ -632,7 +628,7 @@ OverlayImpl::activate (std::shared_ptr const& peer) (void) result.second; } - journal_.debug << + JLOG(journal_.debug) << "activated " << peer->getRemoteAddress() << " (" << peer->id() << ":" << toBase58 ( @@ -659,8 +655,7 @@ OverlayImpl::onManifests ( auto const n = m->list_size(); auto const& journal = from->pjournal(); - if (journal.debug) journal.debug - << "TMManifest, " << n << (n == 1 ? " item" : " items"); + JLOG(journal.debug) << "TMManifest, " << n << (n == 1 ? " item" : " items"); bool const history = m->history (); for (std::size_t i = 0; i < n; ++i) @@ -718,14 +713,12 @@ OverlayImpl::onManifests ( } else { - if (journal.info) - journal.info << "Bad manifest #" << i + 1; + JLOG(journal.info) << "Bad manifest #" << i + 1; } } else { - if (journal.warning) - journal.warning << "Malformed manifest #" << i + 1; + JLOG(journal.warning) << "Malformed manifest #" << i + 1; continue; } } diff --git a/src/ripple/overlay/impl/PeerImp.cpp b/src/ripple/overlay/impl/PeerImp.cpp index bd3252bfaf..c2afd75dd3 100644 --- a/src/ripple/overlay/impl/PeerImp.cpp +++ b/src/ripple/overlay/impl/PeerImp.cpp @@ -96,8 +96,9 @@ PeerImp::PeerImp (Application& app, id_t id, endpoint_type remote_endpoint, PeerImp::~PeerImp () { if (cluster()) - if (journal_.warning) journal_.warning << - name_ << " left cluster"; + { + JLOG(journal_.warning) << name_ << " left cluster"; + } if (state_ == State::active) overlay_.onPeerDeactivate(id_); overlay_.peerFinder().on_closed (slot_); @@ -171,13 +172,11 @@ PeerImp::stop() // if(m_inbound) { - if(journal_.debug) journal_.debug << - "Stop"; + JLOG(journal_.debug) << "Stop"; } else { - if(journal_.info) journal_.info << - "Stop"; + JLOG(journal_.info) << "Stop"; } } close(); @@ -351,7 +350,7 @@ PeerImp::json() default: // FIXME: do we really want this? - p_journal_.warning << + JLOG(p_journal_.warning) << "Unknown status: " << last_status_.newstatus (); } } @@ -423,13 +422,11 @@ PeerImp::close() socket_.close(ec); if(m_inbound) { - if(journal_.debug) journal_.debug << - "Closed"; + JLOG(journal_.debug) << "Closed"; } else { - if(journal_.info) journal_.info << - "Closed"; + JLOG(journal_.info) << "Closed"; } } } @@ -442,8 +439,9 @@ PeerImp::fail(std::string const& reason) (void(Peer::*)(std::string const&))&PeerImp::fail, shared_from_this(), reason)); if (socket_.is_open()) - if (journal_.debug) journal_.debug << - reason; + { + JLOG (journal_.debug) << reason; + } close(); } @@ -452,8 +450,9 @@ PeerImp::fail(std::string const& name, error_code ec) { assert(strand_.running_in_this_thread()); if (socket_.is_open()) - if (journal_.debug) journal_.debug << - name << ": " << ec.message(); + { + JLOG(journal_.debug) << name << ": " << ec.message(); + } close(); } @@ -485,8 +484,7 @@ PeerImp::setTimer() if (ec) { - if (journal_.error) journal_.error << - "setTimer: " << ec.message(); + JLOG(journal_.error) << "setTimer: " << ec.message(); return; } timer_.async_wait(strand_.wrap(std::bind(&PeerImp::onTimer, @@ -523,8 +521,7 @@ PeerImp::onTimer (error_code const& ec) if (ec) { // This should never happen - if (journal_.error) journal_.error << - "onTimer: " << ec.message(); + JLOG(journal_.error) << "onTimer: " << ec.message(); return close(); } @@ -576,8 +573,7 @@ PeerImp::onShutdown(error_code ec) // If we don't get eof then something went wrong if (! ec) { - if (journal_.error) journal_.error << - "onShutdown: expected error condition"; + JLOG(journal_.error) << "onShutdown: expected error condition"; return close(); } if (ec != boost::asio::error::eof) @@ -592,8 +588,7 @@ void PeerImp::doAccept() assert(read_buffer_.size() == 0); assert(http_message_.upgrade()); - if(journal_.debug) journal_.debug << - "doAccept: " << remote_address_; + JLOG(journal_.debug) << "doAccept: " << remote_address_; auto sharedValue = makeSharedValue( ssl_bundle_->stream.native_handle(), journal_); @@ -610,17 +605,15 @@ void PeerImp::doAccept() beast::http::write (write_buffer_, resp); auto const protocol = BuildInfo::make_protocol(hello_.protoversion()); - if(journal_.info) journal_.info << - "Protocol: " << to_string(protocol); - if(journal_.info) journal_.info << + JLOG(journal_.info) << "Protocol: " << to_string(protocol); + JLOG(journal_.info) << "Public Key: " << toBase58 ( TokenType::TOKEN_NODE_PUBLIC, publicKey_); if (auto member = app_.cluster().member(publicKey_)) { name_ = *member; - if (journal_.info) journal_.info << - "Cluster name: " << name_; + JLOG(journal_.info) << "Cluster name: " << name_; } overlay_.activate(shared_from_this()); @@ -682,10 +675,11 @@ PeerImp::onWriteResponse (error_code ec, std::size_t bytes_transferred) return fail("onWriteResponse", ec); if(journal_.trace) { - if (bytes_transferred > 0) journal_.trace << - "onWriteResponse: " << bytes_transferred << " bytes"; - else journal_.trace << - "onWriteResponse"; + if (bytes_transferred > 0) + journal_.trace << + "onWriteResponse: " << bytes_transferred << " bytes"; + else + journal_.trace << "onWriteResponse"; } write_buffer_.consume (bytes_transferred); @@ -736,18 +730,18 @@ PeerImp::onReadMessage (error_code ec, std::size_t bytes_transferred) return; if(ec == boost::asio::error::eof) { - if(journal_.info) journal_.info << - "EOF"; + JLOG(journal_.info) << "EOF"; return gracefulClose(); } if(ec) return fail("onReadMessage", ec); if(journal_.trace) { - if (bytes_transferred > 0) journal_.trace << - "onReadMessage: " << bytes_transferred << " bytes"; - else journal_.trace << - "onReadMessage"; + if (bytes_transferred > 0) + journal_.trace << + "onReadMessage: " << bytes_transferred << " bytes"; + else + journal_.trace << "onReadMessage"; } read_buffer_.commit (bytes_transferred); @@ -785,10 +779,11 @@ PeerImp::onWriteMessage (error_code ec, std::size_t bytes_transferred) return fail("onWriteMessage", ec); if(journal_.trace) { - if (bytes_transferred > 0) journal_.trace << - "onWriteMessage: " << bytes_transferred << " bytes"; - else journal_.trace << - "onWriteMessage"; + if (bytes_transferred > 0) + journal_.trace << + "onWriteMessage: " << bytes_transferred << " bytes"; + else + journal_.trace << "onWriteMessage"; } assert(! send_queue_.empty()); @@ -1095,8 +1090,7 @@ PeerImp::onMessage (std::shared_ptr const& m) return; } - p_journal_.debug << - "Got tx " << txID; + JLOG(p_journal_.debug) << "Got tx " << txID; bool checkSignature = true; if (cluster()) @@ -1117,9 +1111,13 @@ PeerImp::onMessage (std::shared_ptr const& m) } if (app_.getJobQueue().getJobCount(jtTRANSACTION) > 100) - p_journal_.info << "Transaction queue is full"; + { + JLOG(p_journal_.info) << "Transaction queue is full"; + } else if (app_.getLedgerMaster().getValidatedLedgerAge() > 4min) - p_journal_.trace << "No new transactions until synchronized"; + { + JLOG(p_journal_.trace) << "No new transactions until synchronized"; + } else { app_.getJobQueue ().addJob ( @@ -1134,7 +1132,7 @@ PeerImp::onMessage (std::shared_ptr const& m) } catch (std::exception const&) { - p_journal_.warning << "Transaction invalid: " << + JLOG(p_journal_.warning) << "Transaction invalid: " << strHex(m->rawtransaction ()); } } @@ -1159,7 +1157,7 @@ PeerImp::onMessage (std::shared_ptr const& m) if (m->nodes ().size () <= 0) { - p_journal_.warning << "Ledger/TXset data with no nodes"; + JLOG(p_journal_.warning) << "Ledger/TXset data with no nodes"; return; } @@ -1174,7 +1172,7 @@ PeerImp::onMessage (std::shared_ptr const& m) } else { - p_journal_.info << "Unable to route TX/ledger data reply"; + JLOG(p_journal_.info) << "Unable to route TX/ledger data reply"; fee_ = Resource::feeUnwantedData; } return; @@ -1184,7 +1182,7 @@ PeerImp::onMessage (std::shared_ptr const& m) if (m->ledgerhash ().size () != 32) { - p_journal_.warning << "TX candidate reply with invalid hash size"; + JLOG(p_journal_.warning) << "TX candidate reply with invalid hash size"; fee_ = Resource::feeInvalidRequest; return; } @@ -1208,7 +1206,7 @@ PeerImp::onMessage (std::shared_ptr const& m) if (!app_.getInboundLedgers ().gotLedgerData ( hash, shared_from_this(), m)) { - p_journal_.trace << "Got data for unwanted ledger"; + JLOG(p_journal_.trace) << "Got data for unwanted ledger"; fee_ = Resource::feeUnwantedData; } } @@ -1236,14 +1234,14 @@ PeerImp::onMessage (std::shared_ptr const& m) (set.signature ().size () > 128) ) { - p_journal_.warning << "Proposal: malformed"; + JLOG(p_journal_.warning) << "Proposal: malformed"; fee_ = Resource::feeInvalidSignature; return; } if (set.previousledger ().size () != 32) { - p_journal_.warning << "Proposal: malformed"; + JLOG(p_journal_.warning) << "Proposal: malformed"; fee_ = Resource::feeInvalidRequest; return; } @@ -1262,14 +1260,14 @@ PeerImp::onMessage (std::shared_ptr const& m) if (! app_.getHashRouter ().addSuppressionPeer (suppression, id_)) { - p_journal_.trace << "Proposal: duplicate"; + JLOG(p_journal_.trace) << "Proposal: duplicate"; return; } if (app_.config().VALIDATION_PUB.size() && publicKey == app_.config().VALIDATION_PUB) { - p_journal_.trace << "Proposal: self"; + JLOG(p_journal_.trace) << "Proposal: self"; return; } @@ -1279,18 +1277,18 @@ PeerImp::onMessage (std::shared_ptr const& m) { if (sanity_.load() == Sanity::insane) { - p_journal_.debug << "Proposal: Dropping UNTRUSTED (insane)"; + JLOG(p_journal_.debug) << "Proposal: Dropping UNTRUSTED (insane)"; return; } if (app_.getFeeTrack ().isLoadedLocal ()) { - p_journal_.debug << "Proposal: Dropping UNTRUSTED (load)"; + JLOG(p_journal_.debug) << "Proposal: Dropping UNTRUSTED (load)"; return; } } - p_journal_.trace << + JLOG(p_journal_.trace) << "Proposal: " << (isTrusted ? "trusted" : "UNTRUSTED"); auto proposal = std::make_shared ( @@ -1310,7 +1308,7 @@ PeerImp::onMessage (std::shared_ptr const& m) void PeerImp::onMessage (std::shared_ptr const& m) { - p_journal_.trace << "Status: Change"; + JLOG(p_journal_.trace) << "Status: Change"; if (!m->has_networktime ()) m->set_networktime (app_.timeKeeper().now().time_since_epoch().count()); @@ -1329,7 +1327,7 @@ PeerImp::onMessage (std::shared_ptr const& m) { if (!closedLedgerHash_.isZero ()) { - p_journal_.trace << "Status: Out of sync"; + JLOG(p_journal_.trace) << "Status: Out of sync"; closedLedgerHash_.zero (); } @@ -1342,11 +1340,11 @@ PeerImp::onMessage (std::shared_ptr const& m) // a peer has changed ledgers memcpy (closedLedgerHash_.begin (), m->ledgerhash ().data (), 256 / 8); addLedger (closedLedgerHash_); - p_journal_.trace << "LCL is " << closedLedgerHash_; + JLOG(p_journal_.trace) << "LCL is " << closedLedgerHash_; } else { - p_journal_.trace << "Status: No ledger"; + JLOG(p_journal_.trace) << "Status: No ledger"; closedLedgerHash_.zero (); } @@ -1571,7 +1569,7 @@ PeerImp::onMessage (std::shared_ptr const& m) if (m->validation ().size () < 50) { - p_journal_.warning << "Validation: Too small"; + JLOG(p_journal_.warning) << "Validation: Too small"; fee_ = Resource::feeInvalidRequest; return; } @@ -1588,7 +1586,7 @@ PeerImp::onMessage (std::shared_ptr const& m) if (! app_.getValidations().current (val)) { - p_journal_.trace << "Validation: Not current"; + JLOG(p_journal_.trace) << "Validation: Not current"; fee_ = Resource::feeUnwantedData; return; } @@ -1596,7 +1594,7 @@ PeerImp::onMessage (std::shared_ptr const& m) if (! app_.getHashRouter ().addSuppressionPeer( sha512Half(makeSlice(m->validation())), id_)) { - p_journal_.trace << "Validation: duplicate"; + JLOG(p_journal_.trace) << "Validation: duplicate"; return; } @@ -1605,7 +1603,7 @@ PeerImp::onMessage (std::shared_ptr const& m) if (!isTrusted && (sanity_.load () == Sanity::insane)) { - p_journal_.debug << + JLOG(p_journal_.debug) << "Validation: dropping untrusted from insane peer"; } if (isTrusted || !app_.getFeeTrack ().isLoadedLocal ()) @@ -1625,13 +1623,13 @@ PeerImp::onMessage (std::shared_ptr const& m) } else { - p_journal_.debug << + JLOG(p_journal_.debug) << "Validation: Dropping UNTRUSTED (load)"; } } catch (std::exception const& e) { - p_journal_.warning << + JLOG(p_journal_.warning) << "Validation: Exception, " << e.what(); fee_ = Resource::feeInvalidRequest; } @@ -1647,8 +1645,7 @@ PeerImp::onMessage (std::shared_ptr const& m) // this is a query if (send_queue_.size() >= Tuning::dropSendQueue) { - if (p_journal_.debug) p_journal_.debug << - "GetObject: Large send queue"; + JLOG(p_journal_.debug) << "GetObject: Large send queue"; return; } @@ -1702,7 +1699,7 @@ PeerImp::onMessage (std::shared_ptr const& m) } } - p_journal_.trace << + JLOG(p_journal_.trace) << "GetObj: " << reply.objects_size () << " of " << packet.objects_size (); send (std::make_shared (reply, protocol::mtGET_OBJECTS)); @@ -1724,17 +1721,19 @@ PeerImp::onMessage (std::shared_ptr const& m) { if (obj.ledgerseq () != pLSeq) { - if ((pLDo && (pLSeq != 0)) && - p_journal_.active(beast::Journal::Severity::kDebug)) - p_journal_.debug << + if (pLDo && (pLSeq != 0)) + { + JLOG(p_journal_.debug) << "GetObj: Full fetch pack for " << pLSeq; - + } pLSeq = obj.ledgerseq (); pLDo = !app_.getLedgerMaster ().haveLedger (pLSeq); if (!pLDo) - p_journal_.debug << - "GetObj: Late fetch pack for " << pLSeq; + { + JLOG(p_journal_.debug) << + "GetObj: Late fetch pack for " << pLSeq; + } else progress = true; } @@ -1754,10 +1753,11 @@ PeerImp::onMessage (std::shared_ptr const& m) } } - if ((pLDo && (pLSeq != 0)) && - p_journal_.active(beast::Journal::Severity::kDebug)) - p_journal_.debug << "GetObj: Partial fetch pack for " << pLSeq; - + if (pLDo && (pLSeq != 0)) + { + JLOG(p_journal_.debug) << + "GetObj: Partial fetch pack for " << pLSeq; + } if (packet.type () == protocol::TMGetObjectByHash::otFETCH_PACK) app_.getLedgerMaster ().gotFetchPack (progress, pLSeq); } @@ -1806,13 +1806,13 @@ PeerImp::doFetchPack (const std::shared_ptr& packet (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) || (app_.getJobQueue().getJobCount(jtPACK) > 10)) { - p_journal_.info << "Too busy to make fetch pack"; + JLOG(p_journal_.info) << "Too busy to make fetch pack"; return; } if (packet->ledgerhash ().size () != 32) { - p_journal_.warning << "FetchPack hash size malformed"; + JLOG(p_journal_.warning) << "FetchPack hash size malformed"; fee_ = Resource::feeInvalidRequest; return; } @@ -1884,8 +1884,10 @@ PeerImp::checkTransaction (int flags, if (tx->getStatus () == INVALID) { if (! reason.empty ()) - p_journal_.trace << "Exception checking transaction: " << reason; - + { + JLOG(p_journal_.trace) << + "Exception checking transaction: " << reason; + } app_.getHashRouter ().setFlags (stx->getTransactionID (), SF_BAD); charge (Resource::feeInvalidSignature); return; @@ -1910,7 +1912,7 @@ PeerImp::checkPropose (Job& job, { bool isTrusted = (job.getType () == jtPROPOSAL_t); - p_journal_.trace << + JLOG(p_journal_.trace) << "Checking " << (isTrusted ? "trusted" : "UNTRUSTED") << " proposal"; assert (packet); @@ -1918,7 +1920,7 @@ PeerImp::checkPropose (Job& job, if (! cluster() && ! proposal->checkSign ()) { - p_journal_.warning << + JLOG(p_journal_.warning) << "Proposal fails sig check"; charge (Resource::feeInvalidSignature); return; @@ -1934,13 +1936,13 @@ PeerImp::checkPropose (Job& job, if (app_.getOPs().getConsensusLCL() == proposal->getPrevLedger()) { // relay untrusted proposal - p_journal_.trace << + JLOG(p_journal_.trace) << "relaying UNTRUSTED proposal"; overlay_.relay(set, proposal->getSuppressionID()); } else { - p_journal_.debug << + JLOG(p_journal_.debug) << "Not relaying UNTRUSTED proposal"; } } @@ -1956,7 +1958,7 @@ PeerImp::checkValidation (STValidation::pointer val, uint256 signingHash = val->getSigningHash(); if (! cluster() && !val->isValid (signingHash)) { - p_journal_.warning << + JLOG(p_journal_.warning) << "Validation is invalid"; charge (Resource::feeInvalidRequest); return; @@ -1968,7 +1970,7 @@ PeerImp::checkValidation (STValidation::pointer val, } catch (std::exception const&) { - p_journal_.trace << + JLOG(p_journal_.trace) << "Exception processing validation"; charge (Resource::feeInvalidRequest); } @@ -2048,14 +2050,12 @@ PeerImp::getLedger (std::shared_ptr const& m) if (packet.itype () == protocol::liTS_CANDIDATE) { // Request is for a transaction candidate set - if (p_journal_.trace) p_journal_.trace << - "GetLedger: Tx candidate set"; + JLOG(p_journal_.trace) << "GetLedger: Tx candidate set"; if ((!packet.has_ledgerhash () || packet.ledgerhash ().size () != 32)) { charge (Resource::feeInvalidRequest); - if (p_journal_.warning) p_journal_.warning << - "GetLedger: Tx candidate set invalid"; + JLOG(p_journal_.warning) << "GetLedger: Tx candidate set invalid"; return; } @@ -2069,15 +2069,13 @@ PeerImp::getLedger (std::shared_ptr const& m) { if (packet.has_querytype () && !packet.has_requestcookie ()) { - if (p_journal_.debug) p_journal_.debug << - "GetLedger: Routing Tx set request"; + JLOG(p_journal_.debug) << "GetLedger: Routing Tx set request"; auto const v = getPeerWithTree( overlay_, txHash, this); if (! v) { - if (p_journal_.info) p_journal_.info << - "GetLedger: Route TX set failed"; + JLOG(p_journal_.info) << "GetLedger: Route TX set failed"; return; } @@ -2087,8 +2085,7 @@ PeerImp::getLedger (std::shared_ptr const& m) return; } - if (p_journal_.debug) p_journal_.debug << - "GetLedger: Can't provide map "; + JLOG(p_journal_.debug) << "GetLedger: Can't provide map "; charge (Resource::feeInvalidRequest); return; } @@ -2102,21 +2099,18 @@ PeerImp::getLedger (std::shared_ptr const& m) { if (send_queue_.size() >= Tuning::dropSendQueue) { - if (p_journal_.debug) p_journal_.debug << - "GetLedger: Large send queue"; + JLOG(p_journal_.debug) << "GetLedger: Large send queue"; return; } if (app_.getFeeTrack().isLoadedLocal() && ! cluster()) { - if (p_journal_.debug) p_journal_.debug << - "GetLedger: Too busy"; + JLOG(p_journal_.debug) << "GetLedger: Too busy"; return; } // Figure out what ledger they want - if (p_journal_.trace) p_journal_.trace << - "GetLedger: Received"; + JLOG(p_journal_.trace) << "GetLedger: Received"; Ledger::pointer ledger; if (packet.has_ledgerhash ()) @@ -2126,8 +2120,7 @@ PeerImp::getLedger (std::shared_ptr const& m) if (packet.ledgerhash ().size () != 32) { charge (Resource::feeInvalidRequest); - if (p_journal_.warning) p_journal_.warning << - "GetLedger: Invalid request"; + JLOG(p_journal_.warning) << "GetLedger: Invalid request"; return; } @@ -2137,9 +2130,10 @@ PeerImp::getLedger (std::shared_ptr const& m) ledger = app_.getLedgerMaster ().getLedgerByHash (ledgerhash); if (!ledger) - if (p_journal_.trace) p_journal_.trace << + { + JLOG(p_journal_.trace) << "GetLedger: Don't have " << ledgerhash; - + } if (!ledger && (packet.has_querytype () && !packet.has_requestcookie ())) { @@ -2152,16 +2146,14 @@ PeerImp::getLedger (std::shared_ptr const& m) overlay_, ledgerhash, seq, this); if (! v) { - if (p_journal_.trace) p_journal_.trace << - "GetLedger: Cannot route"; + JLOG(p_journal_.trace) << "GetLedger: Cannot route"; return; } packet.set_requestcookie (id ()); v->send (std::make_shared( packet, protocol::mtGET_LEDGER)); - if (p_journal_.debug) p_journal_.debug << - "GetLedger: Request routed"; + JLOG(p_journal_.debug) << "GetLedger: Request routed"; return; } } @@ -2170,15 +2162,16 @@ PeerImp::getLedger (std::shared_ptr const& m) if (packet.ledgerseq() < app_.getLedgerMaster().getEarliestFetch()) { - if (p_journal_.debug) p_journal_.debug << - "GetLedger: Early ledger request"; + JLOG(p_journal_.debug) << "GetLedger: Early ledger request"; return; } ledger = app_.getLedgerMaster ().getLedgerBySeq ( packet.ledgerseq ()); if (! ledger) - if (p_journal_.debug) p_journal_.debug << + { + JLOG(p_journal_.debug) << "GetLedger: Don't have " << packet.ledgerseq (); + } } else if (packet.has_ltype () && (packet.ltype () == protocol::ltCLOSED) ) { @@ -2191,8 +2184,7 @@ PeerImp::getLedger (std::shared_ptr const& m) else { charge (Resource::feeInvalidRequest); - if (p_journal_.warning) p_journal_.warning << - "GetLedger: Unknown request"; + JLOG(p_journal_.warning) << "GetLedger: Unknown request"; return; } @@ -2202,17 +2194,16 @@ PeerImp::getLedger (std::shared_ptr const& m) charge (Resource::feeInvalidRequest); if (ledger) - if (p_journal_.warning) p_journal_.warning << - "GetLedger: Invalid sequence"; - + { + JLOG(p_journal_.warning) << "GetLedger: Invalid sequence"; + } return; } if (!packet.has_ledgerseq() && (ledger->info().seq < app_.getLedgerMaster().getEarliestFetch())) { - if (p_journal_.debug) p_journal_.debug << - "GetLedger: Early ledger request"; + JLOG(p_journal_.debug) << "GetLedger: Early ledger request"; return; } @@ -2225,8 +2216,7 @@ PeerImp::getLedger (std::shared_ptr const& m) if (packet.itype () == protocol::liBASE) { // they want the ledger base data - if (p_journal_.trace) p_journal_.trace << - "GetLedger: Base data"; + JLOG(p_journal_.trace) << "GetLedger: Base data"; Serializer nData (128); ledger->addRaw (nData); reply.add_nodes ()->set_nodedata ( @@ -2281,14 +2271,13 @@ PeerImp::getLedger (std::shared_ptr const& m) if (!map || (packet.nodeids_size () == 0)) { - if (p_journal_.warning) p_journal_.warning << + JLOG(p_journal_.warning) << "GetLedger: Can't find map or empty request"; charge (Resource::feeInvalidRequest); return; } - if (p_journal_.trace) p_journal_.trace << - "GetLeder: " << logMe; + JLOG(p_journal_.trace) << "GetLedger: " << logMe; auto const depth = packet.has_querydepth() ? @@ -2303,8 +2292,7 @@ PeerImp::getLedger (std::shared_ptr const& m) if (!mn.isValid ()) { - if (p_journal_.warning) p_journal_.warning << - "GetLedger: Invalid node " << logMe; + JLOG(p_journal_.warning) << "GetLedger: Invalid node " << logMe; charge (Resource::feeInvalidRequest); return; } @@ -2319,7 +2307,7 @@ PeerImp::getLedger (std::shared_ptr const& m) if (map && map->getNodeFat (mn, nodeIDs, rawNodes, fatLeaves, depth)) { assert (nodeIDs.size () == rawNodes.size ()); - if (p_journal_.trace) p_journal_.trace << + JLOG(p_journal_.trace) << "GetLedger: getNodeFat got " << rawNodes.size () << " nodes"; std::vector::iterator nodeIDIterator; std::vector< Blob >::iterator rawNodeIterator; @@ -2338,7 +2326,7 @@ PeerImp::getLedger (std::shared_ptr const& m) } } else - p_journal_.warning << + JLOG(p_journal_.warning) << "GetLedger: getNodeFat returns false"; } catch (std::exception&) @@ -2357,12 +2345,12 @@ PeerImp::getLedger (std::shared_ptr const& m) if (!packet.has_ledgerhash ()) info += ", no hash specified"; - if (p_journal_.warning) p_journal_.warning << + JLOG(p_journal_.warning) << "getNodeFat( " << mn << ") throws exception: " << info; } } - if (p_journal_.info) p_journal_.info << + JLOG(p_journal_.info) << "Got request for " << packet.nodeids().size() << " nodes at depth " << depth << ", return " << reply.nodes().size() << " nodes"; diff --git a/src/ripple/overlay/impl/TMHello.cpp b/src/ripple/overlay/impl/TMHello.cpp index 97064f23d2..e1d28d32a0 100644 --- a/src/ripple/overlay/impl/TMHello.cpp +++ b/src/ripple/overlay/impl/TMHello.cpp @@ -71,14 +71,14 @@ makeSharedValue (SSL* ssl, beast::Journal journal) auto const cookie1 = hashLastMessage(ssl, SSL_get_finished); if (!cookie1) { - journal.error << "Cookie generation: local setup not complete"; + JLOG (journal.error) << "Cookie generation: local setup not complete"; return boost::none; } auto const cookie2 = hashLastMessage(ssl, SSL_get_peer_finished); if (!cookie2) { - journal.error << "Cookie generation: peer setup not complete"; + JLOG (journal.error) << "Cookie generation: peer setup not complete"; return boost::none; } @@ -88,7 +88,7 @@ makeSharedValue (SSL* ssl, beast::Journal journal) // is 0. Don't allow this. if (result == zero) { - journal.error << "Cookie generation: identical finished messages"; + JLOG(journal.error) << "Cookie generation: identical finished messages"; return boost::none; } @@ -357,26 +357,26 @@ verifyHello (protocol::TMHello const& h, if (h.nettime () > maxTime) { - journal.info << + JLOG(journal.info) << "Clock for is off by +" << h.nettime() - ourTime; return boost::none; } if (h.nettime () < minTime) { - journal.info << + JLOG(journal.info) << "Clock is off by -" << ourTime - h.nettime(); return boost::none; } - journal.trace << + JLOG(journal.trace) << "Connect: time offset " << static_cast(ourTime) - h.nettime(); } if (h.protoversionmin () > to_packed (BuildInfo::getCurrentProtocol())) { - journal.info << + JLOG(journal.info) << "Hello: Disconnect: Protocol mismatch [" << "Peer expects " << to_string ( BuildInfo::make_protocol(h.protoversion())) << @@ -397,7 +397,7 @@ verifyHello (protocol::TMHello const& h, if (*publicKey == app.nodeIdentity().first) { - journal.info << + JLOG(journal.info) << "Hello: Disconnect: Self connection."; return boost::none; } @@ -406,7 +406,7 @@ verifyHello (protocol::TMHello const& h, makeSlice (h.nodeproof()), false)) { // Unable to verify they have private key for claimed public key. - journal.info << + JLOG(journal.info) << "Hello: Disconnect: Failed to verify session."; return boost::none; } @@ -418,7 +418,7 @@ verifyHello (protocol::TMHello const& h, { // Remote asked us to confirm connection is from // correct IP - journal.info << + JLOG(journal.info) << "Hello: Disconnect: Peer IP is " << beast::IP::to_string (remote.to_v4()) << " not " << @@ -432,7 +432,7 @@ verifyHello (protocol::TMHello const& h, { // We know our public IP and peer reports connection // from some other IP - journal.info << + JLOG(journal.info) << "Hello: Disconnect: Our IP is " << beast::IP::to_string (public_ip.to_v4()) << " not " << diff --git a/src/ripple/peerfinder/impl/StoreSqdb.h b/src/ripple/peerfinder/impl/StoreSqdb.h index b76537660e..d580d8792c 100644 --- a/src/ripple/peerfinder/impl/StoreSqdb.h +++ b/src/ripple/peerfinder/impl/StoreSqdb.h @@ -55,8 +55,8 @@ public: { sociConfig.open (m_session); - m_journal.info << "Opening database at '" << sociConfig.connectionString () - << "'"; + JLOG(m_journal.info) << + "Opening database at '" << sociConfig.connectionString () << "'"; init (); update (); @@ -91,7 +91,7 @@ public: } else { - m_journal.error << + JLOG(m_journal.error) << "Bad address string '" << s << "' in Bootcache table"; } } @@ -152,14 +152,16 @@ public: version = vO.value_or (0); - m_journal.info << + JLOG(m_journal.info) << "Opened version " << version << " database"; } { if (version < currentSchemaVersion) - m_journal.info << + { + JLOG(m_journal.info) << "Updating database to version " << currentSchemaVersion; + } else if (version > currentSchemaVersion) { Throw ( @@ -221,7 +223,7 @@ public: } else { - m_journal.error << + JLOG(m_journal.error) << "Bad address string '" << s << "' in Bootcache table"; } } diff --git a/src/ripple/resource/impl/Logic.h b/src/ripple/resource/impl/Logic.h index 1ad144f5c9..72ea3d045d 100644 --- a/src/ripple/resource/impl/Logic.h +++ b/src/ripple/resource/impl/Logic.h @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -131,7 +132,7 @@ public: } } - m_journal.debug << + JLOG(m_journal.debug) << "New inbound endpoint " << *entry; return Consumer (*this, *entry); @@ -160,7 +161,7 @@ public: } } - m_journal.debug << + JLOG(m_journal.debug) << "New outbound endpoint " << *entry; return Consumer (*this, *entry); @@ -194,7 +195,7 @@ public: } } - m_journal.debug << + JLOG(m_journal.debug) << "New unlimited endpoint " << *entry; return Consumer (*this, *entry); @@ -346,7 +347,7 @@ public: { if (iter->whenExpires <= elapsed) { - m_journal.debug << "Expired " << *iter; + JLOG(m_journal.debug) << "Expired " << *iter; auto table_iter = table_.find (*iter->key); ++iter; @@ -412,7 +413,7 @@ public: std::lock_guard _(lock_); if (--entry.refcount == 0) { - m_journal.debug << + JLOG(m_journal.debug) << "Inactive " << entry; switch (entry.key->kind) @@ -443,7 +444,7 @@ public: std::lock_guard _(lock_); clock_type::time_point const now (m_clock.now()); int const balance (entry.add (fee.cost(), now)); - m_journal.trace << + JLOG(m_journal.trace) << "Charging " << entry << " for " << fee; return disposition (balance); } @@ -464,10 +465,10 @@ public: entry.lastWarningTime = elapsed; } if (notify) - m_journal.info << - "Load warning: " << entry; - if (notify) + { + JLOG(m_journal.info) << "Load warning: " << entry; ++m_stats.warn; + } return notify; } @@ -482,7 +483,7 @@ public: int const balance (entry.balance (now)); if (balance >= dropThreshold) { - m_journal.warning << + JLOG(m_journal.warning) << "Consumer entry " << entry << " dropped with balance " << balance << " at or above drop threshold " << dropThreshold; diff --git a/src/ripple/rpc/tests/LedgerRequestRPC.test.cpp b/src/ripple/rpc/tests/LedgerRequestRPC.test.cpp index 90a923b0ec..310b607eaf 100644 --- a/src/ripple/rpc/tests/LedgerRequestRPC.test.cpp +++ b/src/ripple/rpc/tests/LedgerRequestRPC.test.cpp @@ -125,7 +125,6 @@ public: auto const result = env.rpc("ledger_request", ledgerHash); - log << result; expect(!RPC::contains_error(result[jss::result]) && result[jss::result][jss::have_header] == false); } diff --git a/src/ripple/server/impl/BaseHTTPPeer.h b/src/ripple/server/impl/BaseHTTPPeer.h index 02d780b720..490a4ab582 100644 --- a/src/ripple/server/impl/BaseHTTPPeer.h +++ b/src/ripple/server/impl/BaseHTTPPeer.h @@ -20,6 +20,7 @@ #ifndef RIPPLE_SERVER_BASEHTTPPEER_H_INCLUDED #define RIPPLE_SERVER_BASEHTTPPEER_H_INCLUDED +#include #include #include #include @@ -233,7 +234,7 @@ BaseHTTPPeer::BaseHTTPPeer (Port const& port, Handler& handler, static std::atomic sid; nid_ = ++sid; id_ = std::string("#") + std::to_string(nid_) + " "; - if (journal_.trace) journal_.trace << id_ << + JLOG(journal_.trace) << id_ << "accept: " << remote_address_.address(); when_ = clock_type::now(); } @@ -242,7 +243,7 @@ template BaseHTTPPeer::~BaseHTTPPeer() { handler_.onClose(session(), ec_); - if (journal_.trace) journal_.trace << id_ << + JLOG(journal_.trace) << id_ << "destroyed: " << request_count_ << ((request_count_ == 1) ? " request" : " requests"); } @@ -268,7 +269,7 @@ BaseHTTPPeer::fail (error_code ec, char const* what) if (! ec_ && ec != boost::asio::error::operation_aborted) { ec_ = ec; - if (journal_.trace) journal_.trace << id_ << + JLOG(journal_.trace) << id_ << std::string(what) << ": " << ec.message(); impl().stream_.lowest_layer().close (ec); } diff --git a/src/ripple/server/impl/ServerHandlerImp.cpp b/src/ripple/server/impl/ServerHandlerImp.cpp index d4514cc81d..b40c2d97de 100644 --- a/src/ripple/server/impl/ServerHandlerImp.cpp +++ b/src/ripple/server/impl/ServerHandlerImp.cpp @@ -357,7 +357,7 @@ ServerHandlerImp::processRequest (Port const& port, Resource::Charge loadType = Resource::feeReferenceRPC; - m_journal.debug << "Query: " << strMethod << params; + JLOG(m_journal.debug) << "Query: " << strMethod << params; // Provide the JSON-RPC method as the field "command" in the request. params[jss::command] = strMethod; diff --git a/src/ripple/shamap/impl/SHAMap.cpp b/src/ripple/shamap/impl/SHAMap.cpp index ffd260e8e9..456dbac739 100644 --- a/src/ripple/shamap/impl/SHAMap.cpp +++ b/src/ripple/shamap/impl/SHAMap.cpp @@ -106,7 +106,7 @@ SHAMap::dirtyUp (SharedPtrNodeStack& stack, node->setChild (branch, child); #ifdef ST_DEBUG - if (journal_.trace) journal_.trace << + JLOG(journal_.trace) << "dirtyUp sets branch " << branch << " to " << prevHash; #endif child = std::move (node); @@ -165,7 +165,7 @@ SHAMap::fetchNodeFromDB (SHAMapHash const& hash) const } catch (std::exception const&) { - if (journal_.warning) journal_.warning << + JLOG(journal_.warning) << "Invalid DB node " << hash; return std::shared_ptr (); } @@ -804,7 +804,7 @@ SHAMap::updateGiveItem (std::shared_ptr const& item, if (!node->setItem (item, !isTransaction ? SHAMapTreeNode::tnACCOUNT_STATE : (hasMeta ? SHAMapTreeNode::tnTRANSACTION_MD : SHAMapTreeNode::tnTRANSACTION_NM))) { - journal_.trace << + JLOG(journal_.trace) << "SHAMap setItem, no change"; return true; } @@ -1033,8 +1033,7 @@ SHAMap::walkSubTree (bool doWrite, NodeObjectType t, std::uint32_t seq) void SHAMap::dump (bool hash) const { int leafCount = 0; - if (journal_.info) journal_.info << - " MAP Contains"; + JLOG(journal_.info) << " MAP Contains"; std::stack > stack; stack.push ({root_.get (), SHAMapNodeID ()}); @@ -1045,11 +1044,11 @@ void SHAMap::dump (bool hash) const auto nodeID = stack.top().second; stack.pop(); - if (journal_.info) journal_.info << - node->getString (nodeID); + JLOG(journal_.info) << node->getString (nodeID); if (hash) - if (journal_.info) journal_.info << - "Hash: " << node->getNodeHash(); + { + JLOG(journal_.info) << "Hash: " << node->getNodeHash(); + } if (node->isInner ()) { @@ -1072,8 +1071,7 @@ void SHAMap::dump (bool hash) const } while (!stack.empty ()); - if (journal_.info) journal_.info << - leafCount << " resident leaves"; + JLOG(journal_.info) << leafCount << " resident leaves"; } std::shared_ptr SHAMap::getCache (SHAMapHash const& hash) const diff --git a/src/ripple/shamap/impl/SHAMapSync.cpp b/src/ripple/shamap/impl/SHAMapSync.cpp index c7113b038e..504c71cdac 100644 --- a/src/ripple/shamap/impl/SHAMapSync.cpp +++ b/src/ripple/shamap/impl/SHAMapSync.cpp @@ -129,8 +129,8 @@ SHAMap::getMissingNodes(std::size_t max, SHAMapSyncFilter* filter) { if (generation == 0) clearSynching(); - else if (journal_.warning) journal_.warning << - "synching empty tree"; + else + JLOG(journal_.warning) << "synching empty tree"; return ret; } @@ -291,9 +291,11 @@ SHAMap::getMissingNodes(std::size_t max, SHAMapSyncFilter* filter) (std::chrono::steady_clock::now() - after); if ((count > 50) || (elapsed.count() > 50)) - journal_.debug << "getMissingNodes reads " << + { + JLOG(journal_.debug) << "getMissingNodes reads " << count << " nodes (" << hits << " hits) in " << elapsed.count() << " + " << process_time.count() << " ms"; + } if (max <= 0) return ret; @@ -343,15 +345,14 @@ bool SHAMap::getNodeFat (SHAMapNodeID wanted, if (!node || (nodeID != wanted)) { - if (journal_.warning) journal_.warning << + JLOG(journal_.warning) << "peer requested node that is not in the map: " << wanted; return false; } if (node->isInner() && static_cast(node)->isEmpty()) { - if (journal_.warning) journal_.warning << - "peer requests empty node"; + JLOG(journal_.warning) << "peer requests empty node"; return false; } @@ -422,8 +423,7 @@ SHAMapAddNode SHAMap::addRootNode (SHAMapHash const& hash, Blob const& rootNode, // we already have a root_ node if (root_->getNodeHash ().isNonZero ()) { - if (journal_.trace) journal_.trace << - "got root node, already have one"; + JLOG(journal_.trace) << "got root node, already have one"; assert (root_->getNodeHash () == hash); return SHAMapAddNode::duplicate (); } @@ -462,8 +462,7 @@ SHAMap::addKnownNode (const SHAMapNodeID& node, Blob const& rawNode, if (!isSynching ()) { - if (journal_.trace) journal_.trace << - "AddKnownNode while not synching"; + JLOG(journal_.trace) << "AddKnownNode while not synching"; return SHAMapAddNode::duplicate (); } @@ -480,8 +479,7 @@ SHAMap::addKnownNode (const SHAMapNodeID& node, Blob const& rawNode, auto inner = static_cast(iNode); if (inner->isEmptyBranch (branch)) { - if (journal_.warning) journal_.warning << - "Add known node for empty branch" << node; + JLOG(journal_.warning) << "Add known node for empty branch" << node; return SHAMapAddNode::invalid (); } @@ -497,11 +495,9 @@ SHAMap::addKnownNode (const SHAMapNodeID& node, Blob const& rawNode, if (iNodeID != node) { // Either this node is broken or we didn't request it (yet) - if (journal_.warning) journal_.warning << - "unable to hook node " << node; - if (journal_.info) journal_.info << - " stuck at " << iNodeID; - if (journal_.info) journal_.info << + JLOG(journal_.warning) << "unable to hook node " << node; + JLOG(journal_.info) << " stuck at " << iNodeID; + JLOG(journal_.info) << "got depth=" << node.getDepth () << ", walked to= " << iNodeID.getDepth (); return SHAMapAddNode::invalid (); @@ -512,8 +508,7 @@ SHAMap::addKnownNode (const SHAMapNodeID& node, Blob const& rawNode, if (!newNode || !newNode->isValid() || childHash != newNode->getNodeHash ()) { - if (journal_.warning) journal_.warning << - "Corrupt node received"; + JLOG(journal_.warning) << "Corrupt node received"; return SHAMapAddNode::invalid (); } @@ -541,8 +536,7 @@ SHAMap::addKnownNode (const SHAMapNodeID& node, Blob const& rawNode, } } - if (journal_.trace) journal_.trace << - "got node, already had it (late)"; + JLOG(journal_.trace) << "got node, already had it (late)"; return SHAMapAddNode::duplicate (); } @@ -562,14 +556,12 @@ bool SHAMap::deepCompare (SHAMap& other) const if (!node || !otherNode) { - if (journal_.info) journal_.info << - "unable to fetch node"; + JLOG(journal_.info) << "unable to fetch node"; return false; } else if (otherNode->getNodeHash () != node->getNodeHash ()) { - if (journal_.warning) journal_.warning << - "node hash mismatch"; + JLOG(journal_.warning) << "node hash mismatch"; return false; } @@ -606,8 +598,7 @@ bool SHAMap::deepCompare (SHAMap& other) const auto otherNext = other.descend(other_inner, i); if (!next || !otherNext) { - if (journal_.warning) journal_.warning << - "unable to fetch inner node"; + JLOG(journal_.warning) << "unable to fetch inner node"; return false; } stack.push ({next, otherNext});