20 #include <ripple/app/consensus/RCLConsensus.h>
21 #include <ripple/app/consensus/RCLValidations.h>
22 #include <ripple/app/ledger/AcceptedLedger.h>
23 #include <ripple/app/ledger/InboundLedgers.h>
24 #include <ripple/app/ledger/LedgerMaster.h>
25 #include <ripple/app/ledger/LedgerToJson.h>
26 #include <ripple/app/ledger/LocalTxs.h>
27 #include <ripple/app/ledger/OpenLedger.h>
28 #include <ripple/app/ledger/OrderBookDB.h>
29 #include <ripple/app/ledger/TransactionMaster.h>
30 #include <ripple/app/main/LoadManager.h>
31 #include <ripple/app/misc/AmendmentTable.h>
32 #include <ripple/app/misc/HashRouter.h>
33 #include <ripple/app/misc/LoadFeeTrack.h>
34 #include <ripple/app/misc/NetworkOPs.h>
35 #include <ripple/app/misc/Transaction.h>
36 #include <ripple/app/misc/TxQ.h>
37 #include <ripple/app/misc/ValidatorKeys.h>
38 #include <ripple/app/misc/ValidatorList.h>
39 #include <ripple/app/misc/impl/AccountTxPaging.h>
40 #include <ripple/app/rdb/backend/PostgresDatabase.h>
41 #include <ripple/app/rdb/backend/SQLiteDatabase.h>
42 #include <ripple/app/reporting/ReportingETL.h>
43 #include <ripple/app/tx/apply.h>
44 #include <ripple/basics/PerfLog.h>
45 #include <ripple/basics/UptimeClock.h>
46 #include <ripple/basics/mulDiv.h>
47 #include <ripple/basics/safe_cast.h>
48 #include <ripple/beast/rfc2616.h>
49 #include <ripple/beast/utility/rngfill.h>
50 #include <ripple/consensus/Consensus.h>
51 #include <ripple/consensus/ConsensusParms.h>
52 #include <ripple/crypto/RFC1751.h>
53 #include <ripple/crypto/csprng.h>
54 #include <ripple/json/to_string.h>
55 #include <ripple/net/RPCErr.h>
56 #include <ripple/nodestore/DatabaseShard.h>
57 #include <ripple/overlay/Cluster.h>
58 #include <ripple/overlay/Overlay.h>
59 #include <ripple/overlay/predicates.h>
60 #include <ripple/protocol/BuildInfo.h>
61 #include <ripple/protocol/Feature.h>
62 #include <ripple/protocol/STParsedJSON.h>
63 #include <ripple/resource/Fees.h>
64 #include <ripple/resource/ResourceManager.h>
65 #include <ripple/rpc/BookChanges.h>
66 #include <ripple/rpc/DeliveredAmount.h>
67 #include <ripple/rpc/impl/RPCHelpers.h>
68 #include <boost/asio/ip/host_name.hpp>
69 #include <boost/asio/steady_timer.hpp>
144 std::chrono::steady_clock::time_point
start_ =
205 return !(*
this != b);
224 boost::asio::io_service& io_svc,
238 app_.logs().journal(
"FeeVote")),
241 app.getInboundTransactions(),
242 beast::get_abstract_clock<
std::chrono::steady_clock>(),
244 app_.logs().journal(
"LedgerConsensus"))
423 getServerInfo(
bool human,
bool admin,
bool counters)
override;
450 TER result)
override;
493 bool historyOnly)
override;
499 bool historyOnly)
override;
567 boost::system::error_code ec;
572 <<
"NetworkOPs: heartbeatTimer cancel error: "
581 <<
"NetworkOPs: clusterTimer cancel error: "
590 <<
"NetworkOPs: accountHistoryTxTimer cancel error: "
595 using namespace std::chrono_literals;
605 boost::asio::steady_timer& timer,
620 const STTx& transaction,
780 template <
class Handler>
782 Handler
const& handler,
784 :
hook(collector->make_hook(handler))
787 "Disconnected_duration"))
790 "Connected_duration"))
792 collector->make_gauge(
"State_Accounting",
"Syncing_duration"))
795 "Tracking_duration"))
797 collector->make_gauge(
"State_Accounting",
"Full_duration"))
800 "Disconnected_transitions"))
803 "Connected_transitions"))
806 "Syncing_transitions"))
809 "Tracking_transitions"))
811 collector->make_gauge(
"State_Accounting",
"Full_transitions"))
840 {
"disconnected",
"connected",
"syncing",
"tracking",
"full"}};
902 static std::string const hostname = boost::asio::ip::host_name();
909 static std::string const shroudedHostId = [
this]() {
915 return shroudedHostId;
927 boost::asio::steady_timer& timer,
934 [
this, onExpire, onError](boost::system::error_code
const& e) {
935 if ((e.value() == boost::system::errc::success) &&
936 (!m_job_queue.isStopped()))
941 if (e.value() != boost::system::errc::success &&
942 e.value() != boost::asio::error::operation_aborted)
945 JLOG(m_journal.error())
946 <<
"Timer got error '" << e.message()
947 <<
"'. Restarting timer.";
952 timer.expires_from_now(expiry_time);
953 timer.async_wait(std::move(*optionalCountedHandler));
964 m_job_queue.addJob(jtNETOP_TIMER,
"NetOPs.heartbeat", [this]() {
965 processHeartbeatTimer();
968 [
this]() { setHeartbeatTimer(); });
972 NetworkOPsImp::setClusterTimer()
974 using namespace std::chrono_literals;
980 processClusterTimer();
983 [
this]() { setClusterTimer(); });
989 JLOG(m_journal.debug()) <<
"Scheduling AccountHistory job for account "
991 using namespace std::chrono_literals;
993 accountHistoryTxTimer_,
995 [
this, subInfo]() { addAccountHistoryJob(subInfo); },
996 [
this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1000 NetworkOPsImp::processHeartbeatTimer()
1009 std::size_t const numPeers = app_.overlay().size();
1012 if (numPeers < minPeerCount_)
1014 if (mMode != OperatingMode::DISCONNECTED)
1016 setMode(OperatingMode::DISCONNECTED);
1017 JLOG(m_journal.warn())
1018 <<
"Node count (" << numPeers <<
") has fallen "
1019 <<
"below required minimum (" << minPeerCount_ <<
").";
1026 setHeartbeatTimer();
1030 if (mMode == OperatingMode::DISCONNECTED)
1032 setMode(OperatingMode::CONNECTED);
1033 JLOG(m_journal.info())
1034 <<
"Node count (" << numPeers <<
") is sufficient.";
1039 if (mMode == OperatingMode::SYNCING)
1040 setMode(OperatingMode::SYNCING);
1041 else if (mMode == OperatingMode::CONNECTED)
1042 setMode(OperatingMode::CONNECTED);
1045 mConsensus.timerEntry(app_.timeKeeper().closeTime());
1048 if (mLastConsensusPhase != currPhase)
1050 reportConsensusStateChange(currPhase);
1051 mLastConsensusPhase = currPhase;
1054 setHeartbeatTimer();
1058 NetworkOPsImp::processClusterTimer()
1060 using namespace std::chrono_literals;
1061 bool const update = app_.cluster().update(
1062 app_.nodeIdentity().first,
1064 (m_ledgerMaster.getValidatedLedgerAge() <= 4
min)
1065 ? app_.getFeeTrack().getLocalFee()
1067 app_.timeKeeper().now());
1071 JLOG(m_journal.debug()) <<
"Too soon to send cluster update";
1076 protocol::TMCluster cluster;
1077 app_.cluster().for_each([&cluster](
ClusterNode const& node) {
1078 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1083 n.set_nodename(node.
name());
1087 for (
auto& item : gossip.
items)
1089 protocol::TMLoadSource& node = *cluster.add_loadsources();
1091 node.set_cost(item.balance);
1093 app_.overlay().foreach(
send_if(
1094 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1105 if (mode == OperatingMode::FULL && admin)
1107 auto const consensusMode = mConsensus.mode();
1108 if (consensusMode != ConsensusMode::wrongLedger)
1110 if (consensusMode == ConsensusMode::proposing)
1113 if (mConsensus.validating())
1114 return "validating";
1124 if (isNeedNetworkLedger())
1133 auto const txid = trans->getTransactionID();
1134 auto const flags = app_.getHashRouter().getFlags(txid);
1136 if ((flags & SF_BAD) != 0)
1138 JLOG(m_journal.warn()) <<
"Submitted transaction cached bad";
1145 app_.getHashRouter(),
1147 m_ledgerMaster.getValidatedRules(),
1150 if (validity != Validity::Valid)
1152 JLOG(m_journal.warn())
1153 <<
"Submitted transaction invalid: " << reason;
1159 JLOG(m_journal.warn()) <<
"Exception checking transaction" << txid;
1166 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1168 m_job_queue.addJob(
jtTRANSACTION,
"submitTxn", [
this, tx]() {
1170 processTransaction(t,
false,
false, FailHard::no);
1175 NetworkOPsImp::processTransaction(
1181 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXN");
1182 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1184 if ((newFlags & SF_BAD) != 0)
1187 JLOG(m_journal.warn()) << transaction->getID() <<
": cached bad!\n";
1188 transaction->setStatus(
INVALID);
1196 auto const view = m_ledgerMaster.getCurrentLedger();
1198 app_.getHashRouter(),
1199 *transaction->getSTransaction(),
1202 assert(validity == Validity::Valid);
1205 if (validity == Validity::SigBad)
1207 JLOG(m_journal.info()) <<
"Transaction has bad signature: " << reason;
1208 transaction->setStatus(
INVALID);
1210 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1215 app_.getMasterTransaction().canonicalize(&transaction);
1218 doTransactionSync(transaction, bUnlimited, failType);
1220 doTransactionAsync(transaction, bUnlimited, failType);
1224 NetworkOPsImp::doTransactionAsync(
1231 if (transaction->getApplying())
1234 mTransactions.push_back(
1236 transaction->setApplying();
1238 if (mDispatchState == DispatchState::none)
1240 if (m_job_queue.addJob(
1241 jtBATCH,
"transactionBatch", [
this]() { transactionBatch(); }))
1243 mDispatchState = DispatchState::scheduled;
1249 NetworkOPsImp::doTransactionSync(
1256 if (!transaction->getApplying())
1258 mTransactions.push_back(
1260 transaction->setApplying();
1265 if (mDispatchState == DispatchState::running)
1274 if (mTransactions.size())
1277 if (m_job_queue.addJob(
jtBATCH,
"transactionBatch", [
this]() {
1281 mDispatchState = DispatchState::scheduled;
1285 }
while (transaction->getApplying());
1289 NetworkOPsImp::transactionBatch()
1293 if (mDispatchState == DispatchState::running)
1296 while (mTransactions.size())
1307 mTransactions.
swap(transactions);
1308 assert(!transactions.
empty());
1310 assert(mDispatchState != DispatchState::running);
1311 mDispatchState = DispatchState::running;
1317 bool changed =
false;
1320 m_ledgerMaster.peekMutex(), std::defer_lock};
1331 if (e.failType == FailHard::yes)
1334 auto const result = app_.getTxQ().apply(
1335 app_, view, e.transaction->getSTransaction(), flags, j);
1336 e.result = result.first;
1337 e.applied = result.second;
1338 changed = changed || result.second;
1347 if (
auto const l = m_ledgerMaster.getValidatedLedger())
1348 validatedLedgerIndex = l->info().seq;
1350 auto newOL = app_.openLedger().current();
1353 e.transaction->clearSubmitResult();
1357 pubProposedTransaction(
1358 newOL, e.transaction->getSTransaction(), e.result);
1359 e.transaction->setApplied();
1362 e.transaction->setResult(e.result);
1365 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1374 JLOG(m_journal.info())
1375 <<
"TransactionResult: " << token <<
": " << human;
1380 bool addLocal = e.local;
1384 JLOG(m_journal.debug())
1385 <<
"Transaction is now included in open ledger";
1386 e.transaction->setStatus(
INCLUDED);
1388 auto const& txCur = e.transaction->getSTransaction();
1389 auto const txNext = m_ledgerMaster.popAcctTransaction(txCur);
1394 auto t = std::make_shared<Transaction>(trans, reason, app_);
1395 submit_held.
emplace_back(t,
false,
false, FailHard::no);
1402 JLOG(m_journal.info()) <<
"Transaction is obsolete";
1403 e.transaction->setStatus(
OBSOLETE);
1407 JLOG(m_journal.debug())
1408 <<
"Transaction is likely to claim a"
1409 <<
" fee, but is queued until fee drops";
1411 e.transaction->setStatus(
HELD);
1415 m_ledgerMaster.addHeldTransaction(e.transaction);
1416 e.transaction->setQueued();
1417 e.transaction->setKept();
1421 if (e.failType != FailHard::yes)
1424 JLOG(m_journal.debug())
1425 <<
"Transaction should be held: " << e.result;
1426 e.transaction->setStatus(
HELD);
1427 m_ledgerMaster.addHeldTransaction(e.transaction);
1428 e.transaction->setKept();
1433 JLOG(m_journal.debug())
1434 <<
"Status other than success " << e.result;
1435 e.transaction->setStatus(
INVALID);
1438 auto const enforceFailHard =
1439 e.failType == FailHard::yes && !
isTesSuccess(e.result);
1441 if (addLocal && !enforceFailHard)
1443 m_localTX->push_back(
1444 m_ledgerMaster.getCurrentLedgerIndex(),
1445 e.transaction->getSTransaction());
1446 e.transaction->setKept();
1450 ((mMode != OperatingMode::FULL) &&
1451 (e.failType != FailHard::yes) && e.local) ||
1456 app_.getHashRouter().shouldRelay(e.transaction->getID());
1460 protocol::TMTransaction tx;
1463 e.transaction->getSTransaction()->add(s);
1464 tx.set_rawtransaction(s.
data(), s.
size());
1465 tx.set_status(protocol::tsCURRENT);
1466 tx.set_receivetimestamp(
1467 app_.timeKeeper().now().time_since_epoch().count());
1470 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1471 e.transaction->setBroadcast();
1475 if (validatedLedgerIndex)
1477 auto [fee, accountSeq, availableSeq] =
1478 app_.getTxQ().getTxRequiredFeeAndSeq(
1479 *newOL, e.transaction->getSTransaction());
1480 e.transaction->setCurrentLedgerState(
1481 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1489 e.transaction->clearApplying();
1491 if (!submit_held.
empty())
1493 if (mTransactions.empty())
1494 mTransactions.swap(submit_held);
1496 for (
auto& e : submit_held)
1497 mTransactions.push_back(std::move(e));
1502 mDispatchState = DispatchState::none;
1510 NetworkOPsImp::getOwnerInfo(
1515 auto root = keylet::ownerDir(account);
1516 auto sleNode = lpLedger->read(keylet::page(root));
1523 for (
auto const& uDirEntry : sleNode->getFieldV256(
sfIndexes))
1525 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1528 switch (sleCur->getType())
1531 if (!jvObjects.
isMember(jss::offers))
1532 jvObjects[jss::offers] =
1535 jvObjects[jss::offers].
append(
1536 sleCur->getJson(JsonOptions::none));
1540 if (!jvObjects.
isMember(jss::ripple_lines))
1542 jvObjects[jss::ripple_lines] =
1546 jvObjects[jss::ripple_lines].
append(
1547 sleCur->getJson(JsonOptions::none));
1562 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1576 NetworkOPsImp::isBlocked()
1578 return isAmendmentBlocked() || isUNLBlocked();
1582 NetworkOPsImp::isAmendmentBlocked()
1584 return amendmentBlocked_;
1588 NetworkOPsImp::setAmendmentBlocked()
1590 amendmentBlocked_ =
true;
1591 setMode(OperatingMode::CONNECTED);
1595 NetworkOPsImp::isAmendmentWarned()
1597 return !amendmentBlocked_ && amendmentWarned_;
1601 NetworkOPsImp::setAmendmentWarned()
1603 amendmentWarned_ =
true;
1607 NetworkOPsImp::clearAmendmentWarned()
1609 amendmentWarned_ =
false;
1613 NetworkOPsImp::isUNLBlocked()
1619 NetworkOPsImp::setUNLBlocked()
1622 setMode(OperatingMode::CONNECTED);
1626 NetworkOPsImp::clearUNLBlocked()
1628 unlBlocked_ =
false;
1632 NetworkOPsImp::checkLastClosedLedger(
1641 JLOG(m_journal.trace()) <<
"NetworkOPsImp::checkLastClosedLedger";
1643 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1648 uint256 closedLedger = ourClosed->info().hash;
1649 uint256 prevClosedLedger = ourClosed->info().parentHash;
1650 JLOG(m_journal.trace()) <<
"OurClosed: " << closedLedger;
1651 JLOG(m_journal.trace()) <<
"PrevClosed: " << prevClosedLedger;
1656 auto& validations = app_.getValidations();
1657 JLOG(m_journal.debug())
1658 <<
"ValidationTrie " <<
Json::Compact(validations.getJsonTrie());
1662 peerCounts[closedLedger] = 0;
1663 if (mMode >= OperatingMode::TRACKING)
1664 peerCounts[closedLedger]++;
1666 for (
auto& peer : peerList)
1668 uint256 peerLedger = peer->getClosedLedgerHash();
1671 ++peerCounts[peerLedger];
1674 for (
auto const& it : peerCounts)
1675 JLOG(m_journal.debug()) <<
"L: " << it.first <<
" n=" << it.second;
1677 uint256 preferredLCL = validations.getPreferredLCL(
1679 m_ledgerMaster.getValidLedgerIndex(),
1682 bool switchLedgers = preferredLCL != closedLedger;
1684 closedLedger = preferredLCL;
1686 if (switchLedgers && (closedLedger == prevClosedLedger))
1689 JLOG(m_journal.info()) <<
"We won't switch to our own previous ledger";
1690 networkClosed = ourClosed->info().hash;
1691 switchLedgers =
false;
1694 networkClosed = closedLedger;
1699 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1702 consensus = app_.getInboundLedgers().acquire(
1703 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1706 (!m_ledgerMaster.canBeCurrent(consensus) ||
1707 !m_ledgerMaster.isCompatible(
1708 *consensus, m_journal.debug(),
"Not switching")))
1712 networkClosed = ourClosed->info().hash;
1716 JLOG(m_journal.warn()) <<
"We are not running on the consensus ledger";
1717 JLOG(m_journal.info()) <<
"Our LCL: " <<
getJson({*ourClosed, {}});
1718 JLOG(m_journal.info()) <<
"Net LCL " << closedLedger;
1720 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1722 setMode(OperatingMode::CONNECTED);
1730 switchLastClosedLedger(consensus);
1737 NetworkOPsImp::switchLastClosedLedger(
1741 JLOG(m_journal.error())
1742 <<
"JUMP last closed ledger to " << newLCL->info().hash;
1744 clearNeedNetworkLedger();
1747 app_.getTxQ().processClosedLedger(app_, *newLCL,
true);
1754 auto retries = m_localTX->getTxSet();
1755 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1760 rules.
emplace(app_.config().features);
1761 app_.openLedger().accept(
1772 return app_.getTxQ().accept(app_, view);
1776 m_ledgerMaster.switchLCL(newLCL);
1778 protocol::TMStatusChange s;
1779 s.set_newevent(protocol::neSWITCHED_LEDGER);
1780 s.set_ledgerseq(newLCL->info().seq);
1781 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1782 s.set_ledgerhashprevious(
1783 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1784 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
1786 app_.overlay().foreach(
1787 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1791 NetworkOPsImp::beginConsensus(
uint256 const& networkClosed)
1795 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
1797 JLOG(m_journal.info()) <<
"Consensus time for #" << closingInfo.seq
1798 <<
" with LCL " << closingInfo.parentHash;
1800 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1805 if (mMode == OperatingMode::FULL)
1807 JLOG(m_journal.warn()) <<
"Don't have LCL, going to tracking";
1808 setMode(OperatingMode::TRACKING);
1814 assert(prevLedger->info().hash == closingInfo.parentHash);
1816 closingInfo.parentHash ==
1817 m_ledgerMaster.getClosedLedger()->info().hash);
1820 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1821 TrustChanges const changes = app_.validators().updateTrusted(
1822 app_.getValidations().getCurrentNodeIDs(),
1823 closingInfo.parentCloseTime,
1826 app_.getHashRouter());
1828 if (!changes.
added.empty() || !changes.
removed.empty())
1829 app_.getValidations().trustChanged(changes.
added, changes.
removed);
1831 mConsensus.startRound(
1832 app_.timeKeeper().closeTime(),
1839 if (mLastConsensusPhase != currPhase)
1841 reportConsensusStateChange(currPhase);
1842 mLastConsensusPhase = currPhase;
1845 JLOG(m_journal.debug()) <<
"Initiating consensus engine";
1852 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1863 protocol::TMHaveTransactionSet msg;
1864 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1865 msg.set_status(protocol::tsHAVE);
1866 app_.overlay().foreach(
1867 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
1871 mConsensus.gotTxSet(app_.timeKeeper().closeTime(),
RCLTxSet{map});
1875 NetworkOPsImp::endConsensus()
1877 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
1879 for (
auto const& it : app_.overlay().getActivePeers())
1881 if (it && (it->getClosedLedgerHash() == deadLedger))
1883 JLOG(m_journal.trace()) <<
"Killing obsolete peer status";
1890 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1892 if (networkClosed.
isZero())
1901 if (((mMode == OperatingMode::CONNECTED) ||
1902 (mMode == OperatingMode::SYNCING)) &&
1908 if (!needNetworkLedger_)
1909 setMode(OperatingMode::TRACKING);
1912 if (((mMode == OperatingMode::CONNECTED) ||
1913 (mMode == OperatingMode::TRACKING)) &&
1919 auto current = m_ledgerMaster.getCurrentLedger();
1920 if (app_.timeKeeper().now() < (
current->info().parentCloseTime +
1921 2 *
current->info().closeTimeResolution))
1923 setMode(OperatingMode::FULL);
1927 beginConsensus(networkClosed);
1931 NetworkOPsImp::consensusViewChange()
1933 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
1935 setMode(OperatingMode::CONNECTED);
1945 if (!mStreamMaps[sManifests].empty())
1949 jvObj[jss::type] =
"manifestReceived";
1952 jvObj[jss::signing_key] =
1956 jvObj[jss::signature] =
strHex(*sig);
1959 jvObj[jss::domain] = mo.
domain;
1962 for (
auto i = mStreamMaps[sManifests].begin();
1963 i != mStreamMaps[sManifests].end();)
1965 if (
auto p = i->second.lock())
1967 p->send(jvObj,
true);
1972 i = mStreamMaps[sManifests].erase(i);
1978 NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
1983 , loadBaseServer{loadFeeTrack.getLoadBase()}
1985 , em{std::move(escalationMetrics)}
1995 em.has_value() != b.
em.has_value())
2001 em->minProcessingFeeLevel != b.
em->minProcessingFeeLevel ||
2002 em->openLedgerFeeLevel != b.
em->openLedgerFeeLevel ||
2003 em->referenceFeeLevel != b.
em->referenceFeeLevel);
2036 jvObj[jss::type] =
"serverStatus";
2038 jvObj[jss::load_base] = f.loadBaseServer;
2039 jvObj[jss::load_factor_server] = f.loadFactorServer;
2040 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2045 safe_cast<std::uint64_t>(f.loadFactorServer),
2047 f.em->openLedgerFeeLevel,
2049 f.em->referenceFeeLevel)
2052 jvObj[jss::load_factor] =
trunc32(loadFactor);
2053 jvObj[jss::load_factor_fee_escalation] =
2054 f.em->openLedgerFeeLevel.jsonClipped();
2055 jvObj[jss::load_factor_fee_queue] =
2056 f.em->minProcessingFeeLevel.jsonClipped();
2057 jvObj[jss::load_factor_fee_reference] =
2058 f.em->referenceFeeLevel.jsonClipped();
2061 jvObj[jss::load_factor] = f.loadFactorServer;
2075 p->send(jvObj,
true);
2092 if (!streamMap.empty())
2095 jvObj[jss::type] =
"consensusPhase";
2096 jvObj[jss::consensus] =
to_string(phase);
2098 for (
auto i = streamMap.begin(); i != streamMap.end();)
2100 if (
auto p = i->second.lock())
2102 p->send(jvObj,
true);
2107 i = streamMap.erase(i);
2123 auto const signerPublic = val->getSignerPublic();
2125 jvObj[jss::type] =
"validationReceived";
2126 jvObj[jss::validation_public_key] =
2128 jvObj[jss::ledger_hash] =
to_string(val->getLedgerHash());
2129 jvObj[jss::signature] =
strHex(val->getSignature());
2130 jvObj[jss::full] = val->isFull();
2131 jvObj[jss::flags] = val->getFlags();
2133 jvObj[jss::data] =
strHex(val->getSerializer().slice());
2138 if (
auto cookie = (*val)[~
sfCookie])
2142 jvObj[jss::validated_hash] =
strHex(*hash);
2144 auto const masterKey =
2147 if (masterKey != signerPublic)
2151 jvObj[jss::ledger_index] =
to_string(*seq);
2156 for (
auto const& amendment : val->getFieldV256(
sfAmendments))
2161 jvObj[jss::close_time] = *closeTime;
2163 if (
auto const loadFee = (*val)[~
sfLoadFee])
2164 jvObj[jss::load_fee] = *loadFee;
2166 if (
auto const baseFee = (*val)[~
sfBaseFee])
2167 jvObj[jss::base_fee] =
static_cast<double>(*baseFee);
2170 jvObj[jss::reserve_base] = *reserveBase;
2173 jvObj[jss::reserve_inc] = *reserveInc;
2178 if (
auto p = i->second.lock())
2180 p->send(jvObj,
true);
2200 jvObj[jss::type] =
"peerStatusChange";
2209 p->send(jvObj,
true);
2223 using namespace std::chrono_literals;
2255 <<
"recvValidation " << val->getLedgerHash() <<
" from " << source;
2285 "This server is amendment blocked, and must be updated to be "
2286 "able to stay in sync with the network.";
2293 "This server has an expired validator list. validators.txt "
2294 "may be incorrectly configured or some [validator_list_sites] "
2295 "may be unreachable.";
2302 "One or more unsupported amendments have reached majority. "
2303 "Upgrade to the latest version before they are activated "
2304 "to avoid being amendment blocked.";
2305 if (
auto const expected =
2309 d[jss::expected_date] = expected->time_since_epoch().count();
2310 d[jss::expected_date_UTC] =
to_string(*expected);
2314 if (warnings.size())
2315 info[jss::warnings] = std::move(warnings);
2328 info[jss::network_id] =
static_cast<Json::UInt>(*netid);
2334 info[jss::time] =
to_string(std::chrono::floor<std::chrono::microseconds>(
2338 info[jss::network_ledger] =
"waiting";
2340 info[jss::validation_quorum] =
2348 info[jss::node_size] =
"tiny";
2351 info[jss::node_size] =
"small";
2354 info[jss::node_size] =
"medium";
2357 info[jss::node_size] =
"large";
2360 info[jss::node_size] =
"huge";
2369 info[jss::validator_list_expires] =
2370 safe_cast<Json::UInt>(when->time_since_epoch().count());
2372 info[jss::validator_list_expires] = 0;
2382 if (*when == TimeKeeper::time_point::max())
2384 x[jss::expiration] =
"never";
2385 x[jss::status] =
"active";
2392 x[jss::status] =
"active";
2394 x[jss::status] =
"expired";
2399 x[jss::status] =
"unknown";
2400 x[jss::expiration] =
"unknown";
2404 info[jss::io_latency_ms] =
2411 info[jss::pubkey_validator] =
toBase58(
2416 info[jss::pubkey_validator] =
"none";
2429 info[jss::counters][jss::nodestore] = nodestore;
2433 info[jss::pubkey_node] =
2439 info[jss::amendment_blocked] =
true;
2454 lastClose[jss::converge_time_s] =
2459 lastClose[jss::converge_time] =
2463 info[jss::last_close] = lastClose;
2472 auto const escalationMetrics =
2480 auto const loadFactorFeeEscalation =
2482 escalationMetrics.openLedgerFeeLevel,
2484 escalationMetrics.referenceFeeLevel)
2488 safe_cast<std::uint64_t>(loadFactorServer),
2489 loadFactorFeeEscalation);
2493 info[jss::load_base] = loadBaseServer;
2494 info[jss::load_factor] =
trunc32(loadFactor);
2495 info[jss::load_factor_server] = loadFactorServer;
2502 info[jss::load_factor_fee_escalation] =
2503 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2504 info[jss::load_factor_fee_queue] =
2505 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2506 info[jss::load_factor_fee_reference] =
2507 escalationMetrics.referenceFeeLevel.jsonClipped();
2511 info[jss::load_factor] =
2512 static_cast<double>(loadFactor) / loadBaseServer;
2514 if (loadFactorServer != loadFactor)
2515 info[jss::load_factor_server] =
2516 static_cast<double>(loadFactorServer) / loadBaseServer;
2521 if (fee != loadBaseServer)
2522 info[jss::load_factor_local] =
2523 static_cast<double>(fee) / loadBaseServer;
2525 if (fee != loadBaseServer)
2526 info[jss::load_factor_net] =
2527 static_cast<double>(fee) / loadBaseServer;
2529 if (fee != loadBaseServer)
2530 info[jss::load_factor_cluster] =
2531 static_cast<double>(fee) / loadBaseServer;
2533 if (escalationMetrics.openLedgerFeeLevel !=
2534 escalationMetrics.referenceFeeLevel &&
2535 (admin || loadFactorFeeEscalation != loadFactor))
2536 info[jss::load_factor_fee_escalation] =
2537 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2538 escalationMetrics.referenceFeeLevel);
2539 if (escalationMetrics.minProcessingFeeLevel !=
2540 escalationMetrics.referenceFeeLevel)
2541 info[jss::load_factor_fee_queue] =
2542 escalationMetrics.minProcessingFeeLevel
2543 .decimalFromReference(
2544 escalationMetrics.referenceFeeLevel);
2558 XRPAmount const baseFee = lpClosed->fees().base;
2560 l[jss::seq] =
Json::UInt(lpClosed->info().seq);
2561 l[jss::hash] =
to_string(lpClosed->info().hash);
2566 l[jss::reserve_base] =
2567 lpClosed->fees().accountReserve(0).jsonClipped();
2568 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2570 lpClosed->info().closeTime.time_since_epoch().count());
2575 l[jss::reserve_base_xrp] =
2576 lpClosed->fees().accountReserve(0).decimalXRP();
2577 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2580 if (std::abs(nowOffset.count()) >= 60)
2581 l[jss::system_time_offset] = nowOffset.count();
2584 if (std::abs(closeOffset.count()) >= 60)
2585 l[jss::close_time_offset] = closeOffset.count();
2587 #if RIPPLED_REPORTING
2597 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2601 auto lCloseTime = lpClosed->info().closeTime;
2603 if (lCloseTime <= closeTime)
2605 using namespace std::chrono_literals;
2606 auto age = closeTime - lCloseTime;
2608 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2615 info[jss::validated_ledger] = l;
2617 info[jss::closed_ledger] = l;
2621 info[jss::published_ledger] =
"none";
2622 else if (lpPublished->info().seq != lpClosed->info().seq)
2623 info[jss::published_ledger] = lpPublished->info().seq;
2630 info[jss::jq_trans_overflow] =
2632 info[jss::peer_disconnects] =
2634 info[jss::peer_disconnects_resources] =
2675 p->send(jvObj,
true);
2694 if (jvObj[jss::validated].asBool())
2706 p->send(jvObj,
true);
2727 if (
auto p = i->second.lock())
2729 p->send(jvObj,
true);
2747 if (
auto p = i->second.lock())
2749 p->send(jvObj,
true);
2762 for (
auto& jv : jvObj)
2768 else if (jv.isString())
2792 if (jvObj.
isMember(jss::transaction))
2801 << __func__ <<
" : "
2802 <<
"error parsing json for accounts affected";
2811 for (
auto const& affectedAccount : accounts)
2816 auto it = simiIt->second.begin();
2818 while (it != simiIt->second.end())
2829 it = simiIt->second.erase(it);
2836 <<
" iProposed=" << iProposed;
2838 if (!notify.
empty())
2841 isrListener->send(jvObj,
true);
2855 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted,
app_);
2857 lpAccepted->info().hash, alpAccepted);
2860 assert(alpAccepted->getLedger().
get() == lpAccepted.
get());
2864 <<
"Publishing ledger " << lpAccepted->info().seq <<
" "
2865 << lpAccepted->info().hash;
2873 jvObj[jss::type] =
"ledgerClosed";
2874 jvObj[jss::ledger_index] = lpAccepted->info().seq;
2875 jvObj[jss::ledger_hash] =
to_string(lpAccepted->info().hash);
2877 lpAccepted->info().closeTime.time_since_epoch().count());
2879 jvObj[jss::fee_ref] = lpAccepted->fees().units.jsonClipped();
2880 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2881 jvObj[jss::reserve_base] =
2882 lpAccepted->fees().accountReserve(0).jsonClipped();
2883 jvObj[jss::reserve_inc] =
2884 lpAccepted->fees().increment.jsonClipped();
2886 jvObj[jss::txn_count] =
Json::UInt(alpAccepted->size());
2890 jvObj[jss::validated_ledgers] =
2900 p->send(jvObj,
true);
2918 p->send(jvObj,
true);
2927 static bool firstTime =
true;
2934 for (
auto& inner : outer.second)
2936 auto& subInfo = inner.second;
2937 if (subInfo.index_->separationLedgerSeq_ == 0)
2940 alpAccepted->getLedger(), subInfo);
2949 for (
auto const& accTx : *alpAccepted)
2981 "reportConsensusStateChange->pubConsensus",
3000 const STTx& transaction,
3011 jvObj[jss::type] =
"transaction";
3016 jvObj[jss::ledger_index] = ledger->info().seq;
3017 jvObj[jss::ledger_hash] =
to_string(ledger->info().hash);
3018 jvObj[jss::transaction][jss::date] =
3019 ledger->info().closeTime.time_since_epoch().count();
3020 jvObj[jss::validated] =
true;
3026 jvObj[jss::validated] =
false;
3027 jvObj[jss::ledger_current_index] = ledger->info().seq;
3030 jvObj[jss::status] = validated ?
"closed" :
"proposed";
3031 jvObj[jss::engine_result] = sToken;
3032 jvObj[jss::engine_result_code] = result;
3033 jvObj[jss::engine_result_message] = sHuman;
3041 if (account != amount.issue().account)
3049 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3061 auto const& stTxn = transaction.
getTxn();
3067 auto const& meta = transaction.
getMeta();
3082 p->send(jvObj,
true);
3097 p->send(jvObj,
true);
3121 auto const currLedgerSeq = ledger->seq();
3128 for (
auto const& affectedAccount : transaction.
getAffected())
3133 auto it = simiIt->second.begin();
3135 while (it != simiIt->second.end())
3146 it = simiIt->second.erase(it);
3153 auto it = simiIt->second.begin();
3154 while (it != simiIt->second.end())
3165 it = simiIt->second.erase(it);
3172 auto& subs = histoIt->second;
3173 auto it = subs.begin();
3174 while (it != subs.end())
3177 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3191 it = subs.erase(it);
3202 <<
"pubAccountTransaction: "
3203 <<
"proposed=" << iProposed <<
", accepted=" << iAccepted;
3205 if (!notify.
empty() || !accountHistoryNotify.
empty())
3207 auto const& stTxn = transaction.
getTxn();
3213 auto const& meta = transaction.
getMeta();
3220 isrListener->send(jvObj,
true);
3222 assert(!jvObj.
isMember(jss::account_history_tx_stream));
3223 for (
auto& info : accountHistoryNotify)
3225 auto& index = info.index_;
3226 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3227 jvObj[jss::account_history_tx_first] =
true;
3228 jvObj[jss::account_history_tx_index] = index->forwardTxIndex_++;
3229 info.sink_->send(jvObj,
true);
3254 for (
auto const& affectedAccount : tx->getMentionedAccounts())
3259 auto it = simiIt->second.begin();
3261 while (it != simiIt->second.end())
3272 it = simiIt->second.erase(it);
3279 JLOG(
m_journal.
trace()) <<
"pubProposedAccountTransaction: " << iProposed;
3281 if (!notify.
empty() || !accountHistoryNotify.
empty())
3286 isrListener->send(jvObj,
true);
3288 assert(!jvObj.
isMember(jss::account_history_tx_stream));
3289 for (
auto& info : accountHistoryNotify)
3291 auto& index = info.index_;
3292 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3293 jvObj[jss::account_history_tx_first] =
true;
3294 jvObj[jss::account_history_tx_index] = index->forwardTxIndex_++;
3295 info.sink_->send(jvObj,
true);
3312 for (
auto const& naAccountID : vnaAccountIDs)
3315 <<
"subAccount: account: " <<
toBase58(naAccountID);
3317 isrListener->insertSubAccountInfo(naAccountID, rt);
3322 for (
auto const& naAccountID : vnaAccountIDs)
3324 auto simIterator = subMap.
find(naAccountID);
3325 if (simIterator == subMap.
end())
3329 usisElement[isrListener->getSeq()] = isrListener;
3331 subMap.
insert(simIterator, make_pair(naAccountID, usisElement));
3336 simIterator->second[isrListener->getSeq()] = isrListener;
3347 for (
auto const& naAccountID : vnaAccountIDs)
3350 isrListener->deleteSubAccountInfo(naAccountID, rt);
3367 for (
auto const& naAccountID : vnaAccountIDs)
3369 auto simIterator = subMap.
find(naAccountID);
3371 if (simIterator != subMap.
end())
3374 simIterator->second.erase(uSeq);
3376 if (simIterator->second.empty())
3379 subMap.
erase(simIterator);
3388 enum DatabaseType { Postgres, Sqlite, None };
3389 static const auto databaseType = [&]() -> DatabaseType {
3390 #ifdef RIPPLED_REPORTING
3397 return DatabaseType::Postgres;
3399 return DatabaseType::None;
3407 return DatabaseType::Sqlite;
3409 return DatabaseType::None;
3416 return DatabaseType::Sqlite;
3418 return DatabaseType::None;
3422 if (databaseType == DatabaseType::None)
3425 <<
"AccountHistory job for account "
3437 "AccountHistoryTxStream",
3438 [
this, dbType = databaseType, subInfo]() {
3439 auto const& accountId = subInfo.
index_->accountId_;
3440 auto& lastLedgerSeq = subInfo.
index_->historyLastLedgerSeq_;
3441 auto& txHistoryIndex = subInfo.
index_->historyTxIndex_;
3444 <<
"AccountHistory job for account " <<
toBase58(accountId)
3445 <<
" started. lastLedgerSeq=" << lastLedgerSeq;
3455 auto stx = tx->getSTransaction();
3456 if (stx->getAccountID(
sfAccount) == accountId &&
3457 stx->getSeqProxy().value() == 1)
3461 for (
auto& node : meta->getNodes())
3468 if (
auto inner =
dynamic_cast<const STObject*
>(
3473 inner->getAccountID(
sfAccount) == accountId)
3485 bool unsubscribe) ->
bool {
3488 sptr->send(jvObj,
true);
3514 auto [txResult, status] = db->getAccountTx(args);
3518 <<
"AccountHistory job for account "
3520 <<
" getAccountTx failed";
3525 std::get_if<RelationalDatabase::AccountTxs>(
3526 &txResult.transactions);
3534 <<
"AccountHistory job for account "
3536 <<
" getAccountTx wrong data";
3544 accountId, minLedger, maxLedger, marker, 0,
true};
3545 return db->newestAccountTxPage(options);
3557 while (lastLedgerSeq >= 2 && !subInfo.
index_->stopHistorical_)
3559 int feeChargeCount = 0;
3568 <<
"AccountHistory job for account "
3569 <<
toBase58(accountId) <<
" no InfoSub. Fee charged "
3570 << feeChargeCount <<
" times.";
3575 auto startLedgerSeq =
3576 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3578 <<
"AccountHistory job for account " <<
toBase58(accountId)
3579 <<
", working on ledger range [" << startLedgerSeq <<
","
3580 << lastLedgerSeq <<
"]";
3582 auto haveRange = [&]() ->
bool {
3585 auto haveSomeValidatedLedgers =
3587 validatedMin, validatedMax);
3589 return haveSomeValidatedLedgers &&
3590 validatedMin <= startLedgerSeq &&
3591 lastLedgerSeq <= validatedMax;
3597 <<
"AccountHistory reschedule job for account "
3598 <<
toBase58(accountId) <<
", incomplete ledger range ["
3599 << startLedgerSeq <<
"," << lastLedgerSeq <<
"]";
3605 while (!subInfo.
index_->stopHistorical_)
3608 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3612 <<
"AccountHistory job for account "
3613 <<
toBase58(accountId) <<
" getMoreTxns failed.";
3618 auto const& txns = dbResult->first;
3619 marker = dbResult->second;
3620 for (
auto const& [tx, meta] : txns)
3625 <<
"AccountHistory job for account "
3626 <<
toBase58(accountId) <<
" empty tx or meta.";
3636 <<
"AccountHistory job for account "
3637 <<
toBase58(accountId) <<
" no ledger.";
3642 tx->getSTransaction();
3646 <<
"AccountHistory job for account "
3648 <<
" getSTransaction failed.";
3653 *stTxn, meta->getResultTER(),
true, curTxLedger);
3655 jvTx[jss::account_history_tx_index] = txHistoryIndex--;
3657 jvTx[jss::meta], *curTxLedger, stTxn, *meta);
3658 if (isFirstTx(tx, meta))
3660 jvTx[jss::account_history_tx_first] =
true;
3664 <<
"AccountHistory job for account "
3666 <<
" done, found last tx.";
3678 <<
"AccountHistory job for account "
3680 <<
" paging, marker=" << marker->ledgerSeq <<
":"
3689 if (!subInfo.
index_->stopHistorical_)
3691 lastLedgerSeq = startLedgerSeq - 1;
3692 if (lastLedgerSeq <= 1)
3695 <<
"AccountHistory job for account "
3697 <<
" done, reached genesis ledger.";
3710 subInfo.
index_->separationLedgerSeq_ = ledger->seq();
3711 auto const& accountId = subInfo.
index_->accountId_;
3713 if (!ledger->exists(accountKeylet))
3716 <<
"subAccountHistoryStart, no account " <<
toBase58(accountId)
3717 <<
", no need to add AccountHistory job.";
3722 if (
auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3727 <<
"subAccountHistoryStart, genesis account "
3729 <<
" does not have tx, no need to add AccountHistory job.";
3739 subInfo.
index_->historyLastLedgerSeq_ = ledger->seq();
3740 subInfo.
index_->haveHistorical_ =
true;
3743 <<
"subAccountHistoryStart, add AccountHistory job: accountId="
3744 <<
toBase58(accountId) <<
", currentLedgerSeq=" << ledger->seq();
3754 if (!isrListener->insertSubAccountHistory(accountId))
3757 <<
"subAccountHistory, already subscribed to account "
3764 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
3769 inner.
emplace(isrListener->getSeq(), ahi);
3775 simIterator->second.emplace(isrListener->getSeq(), ahi);
3789 <<
"subAccountHistory, no validated ledger yet, delay start";
3802 isrListener->deleteSubAccountHistory(account);
3816 auto& subInfoMap = simIterator->second;
3817 auto subInfoIter = subInfoMap.find(seq);
3818 if (subInfoIter != subInfoMap.end())
3820 subInfoIter->second.index_->stopHistorical_ =
true;
3825 simIterator->second.erase(seq);
3826 if (simIterator->second.empty())
3832 <<
"unsubAccountHistory, account " <<
toBase58(account)
3833 <<
", historyOnly = " << (historyOnly ?
"true" :
"false");
3841 listeners->addSubscriber(isrListener);
3851 listeners->removeSubscriber(uSeq);
3865 Throw<std::runtime_error>(
3866 "Operation only possible in STANDALONE mode.");
3881 jvResult[jss::ledger_index] = lpClosed->info().seq;
3882 jvResult[jss::ledger_hash] =
to_string(lpClosed->info().hash);
3884 lpClosed->info().closeTime.time_since_epoch().count());
3885 jvResult[jss::fee_ref] = lpClosed->fees().units.jsonClipped();
3886 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3887 jvResult[jss::reserve_base] =
3888 lpClosed->fees().accountReserve(0).jsonClipped();
3889 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3894 jvResult[jss::validated_ledgers] =
3900 .emplace(isrListener->getSeq(), isrListener)
3910 .emplace(isrListener->getSeq(), isrListener)
3936 .emplace(isrListener->getSeq(), isrListener)
3964 jvResult[jss::random] =
to_string(uRandom);
3966 jvResult[jss::load_base] = feeTrack.getLoadBase();
3967 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
3968 jvResult[jss::hostid] =
getHostId(admin);
3969 jvResult[jss::pubkey_node] =
3974 .emplace(isrListener->getSeq(), isrListener)
3992 .emplace(isrListener->getSeq(), isrListener)
4010 .emplace(isrListener->getSeq(), isrListener)
4028 .emplace(isrListener->getSeq(), isrListener)
4052 .emplace(isrListener->getSeq(), isrListener)
4070 .emplace(isrListener->getSeq(), isrListener)
4118 if (map.find(pInfo->getSeq()) != map.end())
4125 #ifndef USE_NEW_BOOK_PAGE
4136 unsigned int iLimit,
4146 uint256 uTipIndex = uBookBase;
4150 stream <<
"getBookPage:" << book;
4151 stream <<
"getBookPage: uBookBase=" << uBookBase;
4152 stream <<
"getBookPage: uBookEnd=" << uBookEnd;
4153 stream <<
"getBookPage: uTipIndex=" << uTipIndex;
4162 bool bDirectAdvance =
true;
4166 unsigned int uBookEntry;
4172 while (!bDone && iLimit-- > 0)
4176 bDirectAdvance =
false;
4180 auto const ledgerIndex = view.
succ(uTipIndex, uBookEnd);
4184 sleOfferDir.
reset();
4193 uTipIndex = sleOfferDir->key();
4196 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4199 <<
"getBookPage: uTipIndex=" << uTipIndex;
4201 <<
"getBookPage: offerIndex=" << offerIndex;
4211 auto const uOfferOwnerID = sleOffer->getAccountID(
sfAccount);
4212 auto const& saTakerGets = sleOffer->getFieldAmount(
sfTakerGets);
4213 auto const& saTakerPays = sleOffer->getFieldAmount(
sfTakerPays);
4215 bool firstOwnerOffer(
true);
4221 saOwnerFunds = saTakerGets;
4223 else if (bGlobalFreeze)
4231 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4232 if (umBalanceEntry != umBalance.
end())
4236 saOwnerFunds = umBalanceEntry->second;
4237 firstOwnerOffer =
false;
4251 if (saOwnerFunds < beast::zero)
4255 saOwnerFunds.
clear();
4263 STAmount saOwnerFundsLimit = saOwnerFunds;
4275 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4278 if (saOwnerFundsLimit >= saTakerGets)
4281 saTakerGetsFunded = saTakerGets;
4287 saTakerGetsFunded = saOwnerFundsLimit;
4289 saTakerGetsFunded.
setJson(jvOffer[jss::taker_gets_funded]);
4293 saTakerGetsFunded, saDirRate, saTakerPays.
issue()))
4294 .setJson(jvOffer[jss::taker_pays_funded]);
4300 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4302 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4306 jvOf[jss::quality] = saDirRate.
getText();
4308 if (firstOwnerOffer)
4309 jvOf[jss::owner_funds] = saOwnerFunds.
getText();
4316 if (!
cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4318 bDirectAdvance =
true;
4323 <<
"getBookPage: offerIndex=" << offerIndex;
4343 unsigned int iLimit,
4351 MetaView lesActive(lpLedger,
tapNONE,
true);
4352 OrderBookIterator obIterator(lesActive, book);
4356 const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.
out.
account) ||
4357 lesActive.isGlobalFrozen(book.
in.
account);
4359 while (iLimit-- > 0 && obIterator.nextOffer())
4364 auto const uOfferOwnerID = sleOffer->getAccountID(
sfAccount);
4365 auto const& saTakerGets = sleOffer->getFieldAmount(
sfTakerGets);
4366 auto const& saTakerPays = sleOffer->getFieldAmount(
sfTakerPays);
4367 STAmount saDirRate = obIterator.getCurrentRate();
4373 saOwnerFunds = saTakerGets;
4375 else if (bGlobalFreeze)
4383 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4385 if (umBalanceEntry != umBalance.
end())
4389 saOwnerFunds = umBalanceEntry->second;
4395 saOwnerFunds = lesActive.accountHolds(
4401 if (saOwnerFunds.isNegative())
4405 saOwnerFunds.zero();
4412 STAmount saTakerGetsFunded;
4413 STAmount saOwnerFundsLimit = saOwnerFunds;
4425 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4428 if (saOwnerFundsLimit >= saTakerGets)
4431 saTakerGetsFunded = saTakerGets;
4436 saTakerGetsFunded = saOwnerFundsLimit;
4438 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4444 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4445 .setJson(jvOffer[jss::taker_pays_funded]);
4448 STAmount saOwnerPays = (
parityRate == offerRate)
4451 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4453 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4455 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4459 jvOf[jss::quality] = saDirRate.
getText();
4474 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4514 ++counters_[
static_cast<std::size_t>(om)].transitions;
4516 counters_[
static_cast<std::size_t>(om)].transitions == 1)
4518 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4519 now - processStart_)
4523 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4532 auto [counters, mode, start, initialSync] = getCounterData();
4533 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4543 auto& state = obj[jss::state_accounting][
states_[i]];
4544 state[jss::transitions] =
std::to_string(counters[i].transitions);
4545 state[jss::duration_us] =
std::to_string(counters[i].dur.count());
4549 obj[jss::initial_sync_duration_us] =
std::to_string(initialSync);
4564 boost::asio::io_service& io_svc,
4568 return std::make_unique<NetworkOPsImp>(