20 #include <ripple/app/consensus/RCLConsensus.h>
21 #include <ripple/app/consensus/RCLValidations.h>
22 #include <ripple/app/ledger/AcceptedLedger.h>
23 #include <ripple/app/ledger/InboundLedgers.h>
24 #include <ripple/app/ledger/LedgerMaster.h>
25 #include <ripple/app/ledger/LedgerToJson.h>
26 #include <ripple/app/ledger/LocalTxs.h>
27 #include <ripple/app/ledger/OpenLedger.h>
28 #include <ripple/app/ledger/OrderBookDB.h>
29 #include <ripple/app/ledger/TransactionMaster.h>
30 #include <ripple/app/main/LoadManager.h>
31 #include <ripple/app/misc/AmendmentTable.h>
32 #include <ripple/app/misc/HashRouter.h>
33 #include <ripple/app/misc/LoadFeeTrack.h>
34 #include <ripple/app/misc/NetworkOPs.h>
35 #include <ripple/app/misc/Transaction.h>
36 #include <ripple/app/misc/TxQ.h>
37 #include <ripple/app/misc/ValidatorKeys.h>
38 #include <ripple/app/misc/ValidatorList.h>
39 #include <ripple/app/misc/impl/AccountTxPaging.h>
40 #include <ripple/app/rdb/RelationalDBInterface.h>
41 #include <ripple/app/rdb/backend/RelationalDBInterfacePostgres.h>
42 #include <ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h>
43 #include <ripple/app/reporting/ReportingETL.h>
44 #include <ripple/app/tx/apply.h>
45 #include <ripple/basics/PerfLog.h>
46 #include <ripple/basics/UptimeClock.h>
47 #include <ripple/basics/mulDiv.h>
48 #include <ripple/basics/safe_cast.h>
49 #include <ripple/beast/rfc2616.h>
50 #include <ripple/beast/utility/rngfill.h>
51 #include <ripple/consensus/Consensus.h>
52 #include <ripple/consensus/ConsensusParms.h>
53 #include <ripple/crypto/RFC1751.h>
54 #include <ripple/crypto/csprng.h>
55 #include <ripple/json/to_string.h>
56 #include <ripple/net/RPCErr.h>
57 #include <ripple/nodestore/DatabaseShard.h>
58 #include <ripple/overlay/Cluster.h>
59 #include <ripple/overlay/Overlay.h>
60 #include <ripple/overlay/predicates.h>
61 #include <ripple/protocol/BuildInfo.h>
62 #include <ripple/protocol/Feature.h>
63 #include <ripple/protocol/STParsedJSON.h>
64 #include <ripple/resource/Fees.h>
65 #include <ripple/resource/ResourceManager.h>
66 #include <ripple/rpc/DeliveredAmount.h>
67 #include <ripple/rpc/impl/RPCHelpers.h>
68 #include <boost/asio/ip/host_name.hpp>
69 #include <boost/asio/steady_timer.hpp>
144 std::chrono::steady_clock::time_point
start_ =
205 return !(*
this != b);
224 boost::asio::io_service& io_svc,
238 app_.logs().journal(
"FeeVote")),
241 app.getInboundTransactions(),
242 beast::get_abstract_clock<
std::chrono::steady_clock>(),
244 app_.logs().journal(
"LedgerConsensus"))
423 getServerInfo(
bool human,
bool admin,
bool counters)
override;
450 TER terResult)
override;
493 bool historyOnly)
override;
499 bool historyOnly)
override;
562 boost::system::error_code ec;
567 <<
"NetworkOPs: heartbeatTimer cancel error: "
576 <<
"NetworkOPs: clusterTimer cancel error: "
585 <<
"NetworkOPs: accountHistoryTxTimer cancel error: "
590 using namespace std::chrono_literals;
600 boost::asio::steady_timer& timer,
768 template <
class Handler>
770 Handler
const& handler,
772 :
hook(collector->make_hook(handler))
775 "Disconnected_duration"))
778 "Connected_duration"))
780 collector->make_gauge(
"State_Accounting",
"Syncing_duration"))
783 "Tracking_duration"))
785 collector->make_gauge(
"State_Accounting",
"Full_duration"))
788 "Disconnected_transitions"))
791 "Connected_transitions"))
794 "Syncing_transitions"))
797 "Tracking_transitions"))
799 collector->make_gauge(
"State_Accounting",
"Full_transitions"))
828 {
"disconnected",
"connected",
"syncing",
"tracking",
"full"}};
890 static std::string const hostname = boost::asio::ip::host_name();
897 static std::string const shroudedHostId = [
this]() {
903 return shroudedHostId;
915 boost::asio::steady_timer& timer,
922 [
this, onExpire, onError](boost::system::error_code
const& e) {
923 if ((e.value() == boost::system::errc::success) &&
924 (!m_job_queue.isStopped()))
929 if (e.value() != boost::system::errc::success &&
930 e.value() != boost::asio::error::operation_aborted)
933 JLOG(m_journal.error())
934 <<
"Timer got error '" << e.message()
935 <<
"'. Restarting timer.";
940 timer.expires_from_now(expiry_time);
941 timer.async_wait(std::move(*optionalCountedHandler));
952 m_job_queue.addJob(jtNETOP_TIMER,
"NetOPs.heartbeat", [this]() {
953 processHeartbeatTimer();
956 [
this]() { setHeartbeatTimer(); });
960 NetworkOPsImp::setClusterTimer()
962 using namespace std::chrono_literals;
968 processClusterTimer();
971 [
this]() { setClusterTimer(); });
977 JLOG(m_journal.debug()) <<
"Scheduling AccountHistory job for account "
979 using namespace std::chrono_literals;
981 accountHistoryTxTimer_,
983 [
this, subInfo]() { addAccountHistoryJob(subInfo); },
984 [
this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
988 NetworkOPsImp::processHeartbeatTimer()
997 std::size_t const numPeers = app_.overlay().size();
1000 if (numPeers < minPeerCount_)
1002 if (mMode != OperatingMode::DISCONNECTED)
1004 setMode(OperatingMode::DISCONNECTED);
1005 JLOG(m_journal.warn())
1006 <<
"Node count (" << numPeers <<
") has fallen "
1007 <<
"below required minimum (" << minPeerCount_ <<
").";
1014 setHeartbeatTimer();
1018 if (mMode == OperatingMode::DISCONNECTED)
1020 setMode(OperatingMode::CONNECTED);
1021 JLOG(m_journal.info())
1022 <<
"Node count (" << numPeers <<
") is sufficient.";
1027 if (mMode == OperatingMode::SYNCING)
1028 setMode(OperatingMode::SYNCING);
1029 else if (mMode == OperatingMode::CONNECTED)
1030 setMode(OperatingMode::CONNECTED);
1033 mConsensus.timerEntry(app_.timeKeeper().closeTime());
1036 if (mLastConsensusPhase != currPhase)
1038 reportConsensusStateChange(currPhase);
1039 mLastConsensusPhase = currPhase;
1042 setHeartbeatTimer();
1046 NetworkOPsImp::processClusterTimer()
1048 using namespace std::chrono_literals;
1049 bool const update = app_.cluster().update(
1050 app_.nodeIdentity().first,
1052 (m_ledgerMaster.getValidatedLedgerAge() <= 4
min)
1053 ? app_.getFeeTrack().getLocalFee()
1055 app_.timeKeeper().now());
1059 JLOG(m_journal.debug()) <<
"Too soon to send cluster update";
1064 protocol::TMCluster cluster;
1065 app_.cluster().for_each([&cluster](
ClusterNode const& node) {
1066 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1071 n.set_nodename(node.
name());
1075 for (
auto& item : gossip.
items)
1077 protocol::TMLoadSource& node = *cluster.add_loadsources();
1079 node.set_cost(item.balance);
1081 app_.overlay().foreach(
send_if(
1082 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1093 if (mode == OperatingMode::FULL && admin)
1095 auto const consensusMode = mConsensus.mode();
1096 if (consensusMode != ConsensusMode::wrongLedger)
1098 if (consensusMode == ConsensusMode::proposing)
1101 if (mConsensus.validating())
1102 return "validating";
1112 if (isNeedNetworkLedger())
1121 auto const txid = trans->getTransactionID();
1122 auto const flags = app_.getHashRouter().getFlags(txid);
1124 if ((flags & SF_BAD) != 0)
1126 JLOG(m_journal.warn()) <<
"Submitted transaction cached bad";
1133 app_.getHashRouter(),
1135 m_ledgerMaster.getValidatedRules(),
1138 if (validity != Validity::Valid)
1140 JLOG(m_journal.warn())
1141 <<
"Submitted transaction invalid: " << reason;
1147 JLOG(m_journal.warn()) <<
"Exception checking transaction" << txid;
1154 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1156 m_job_queue.addJob(
jtTRANSACTION,
"submitTxn", [
this, tx]() {
1158 processTransaction(t,
false,
false, FailHard::no);
1163 NetworkOPsImp::processTransaction(
1169 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXN");
1170 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1172 if ((newFlags & SF_BAD) != 0)
1175 transaction->setStatus(
INVALID);
1183 auto const view = m_ledgerMaster.getCurrentLedger();
1185 app_.getHashRouter(),
1186 *transaction->getSTransaction(),
1189 assert(validity == Validity::Valid);
1192 if (validity == Validity::SigBad)
1194 JLOG(m_journal.info()) <<
"Transaction has bad signature: " << reason;
1195 transaction->setStatus(
INVALID);
1197 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1202 app_.getMasterTransaction().canonicalize(&transaction);
1205 doTransactionSync(transaction, bUnlimited, failType);
1207 doTransactionAsync(transaction, bUnlimited, failType);
1211 NetworkOPsImp::doTransactionAsync(
1218 if (transaction->getApplying())
1221 mTransactions.push_back(
1223 transaction->setApplying();
1225 if (mDispatchState == DispatchState::none)
1227 if (m_job_queue.addJob(
1228 jtBATCH,
"transactionBatch", [
this]() { transactionBatch(); }))
1230 mDispatchState = DispatchState::scheduled;
1236 NetworkOPsImp::doTransactionSync(
1243 if (!transaction->getApplying())
1245 mTransactions.push_back(
1247 transaction->setApplying();
1252 if (mDispatchState == DispatchState::running)
1261 if (mTransactions.size())
1264 if (m_job_queue.addJob(
jtBATCH,
"transactionBatch", [
this]() {
1268 mDispatchState = DispatchState::scheduled;
1272 }
while (transaction->getApplying());
1276 NetworkOPsImp::transactionBatch()
1280 if (mDispatchState == DispatchState::running)
1283 while (mTransactions.size())
1294 mTransactions.
swap(transactions);
1295 assert(!transactions.
empty());
1297 assert(mDispatchState != DispatchState::running);
1298 mDispatchState = DispatchState::running;
1304 bool changed =
false;
1307 m_ledgerMaster.peekMutex(), std::defer_lock};
1318 if (e.failType == FailHard::yes)
1321 auto const result = app_.getTxQ().apply(
1322 app_, view, e.transaction->getSTransaction(), flags, j);
1323 e.result = result.first;
1324 e.applied = result.second;
1325 changed = changed || result.second;
1334 if (
auto const l = m_ledgerMaster.getValidatedLedger())
1335 validatedLedgerIndex = l->info().seq;
1337 auto newOL = app_.openLedger().current();
1340 e.transaction->clearSubmitResult();
1344 pubProposedTransaction(
1345 newOL, e.transaction->getSTransaction(), e.result);
1346 e.transaction->setApplied();
1349 e.transaction->setResult(e.result);
1352 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1361 JLOG(m_journal.info())
1362 <<
"TransactionResult: " << token <<
": " << human;
1367 bool addLocal = e.local;
1371 JLOG(m_journal.debug())
1372 <<
"Transaction is now included in open ledger";
1373 e.transaction->setStatus(
INCLUDED);
1375 auto const& txCur = e.transaction->getSTransaction();
1376 auto const txNext = m_ledgerMaster.popAcctTransaction(txCur);
1381 auto t = std::make_shared<Transaction>(trans, reason, app_);
1382 submit_held.
emplace_back(t,
false,
false, FailHard::no);
1389 JLOG(m_journal.info()) <<
"Transaction is obsolete";
1390 e.transaction->setStatus(
OBSOLETE);
1394 JLOG(m_journal.debug())
1395 <<
"Transaction is likely to claim a"
1396 <<
" fee, but is queued until fee drops";
1398 e.transaction->setStatus(
HELD);
1402 m_ledgerMaster.addHeldTransaction(e.transaction);
1403 e.transaction->setQueued();
1404 e.transaction->setKept();
1408 if (e.failType != FailHard::yes)
1411 JLOG(m_journal.debug())
1412 <<
"Transaction should be held: " << e.result;
1413 e.transaction->setStatus(
HELD);
1414 m_ledgerMaster.addHeldTransaction(e.transaction);
1415 e.transaction->setKept();
1420 JLOG(m_journal.debug())
1421 <<
"Status other than success " << e.result;
1422 e.transaction->setStatus(
INVALID);
1425 auto const enforceFailHard =
1426 e.failType == FailHard::yes && !
isTesSuccess(e.result);
1428 if (addLocal && !enforceFailHard)
1430 m_localTX->push_back(
1431 m_ledgerMaster.getCurrentLedgerIndex(),
1432 e.transaction->getSTransaction());
1433 e.transaction->setKept();
1437 ((mMode != OperatingMode::FULL) &&
1438 (e.failType != FailHard::yes) && e.local) ||
1443 app_.getHashRouter().shouldRelay(e.transaction->getID());
1447 protocol::TMTransaction tx;
1450 e.transaction->getSTransaction()->add(s);
1451 tx.set_rawtransaction(s.
data(), s.
size());
1452 tx.set_status(protocol::tsCURRENT);
1453 tx.set_receivetimestamp(
1454 app_.timeKeeper().now().time_since_epoch().count());
1457 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1458 e.transaction->setBroadcast();
1462 if (validatedLedgerIndex)
1464 auto [fee, accountSeq, availableSeq] =
1465 app_.getTxQ().getTxRequiredFeeAndSeq(
1466 *newOL, e.transaction->getSTransaction());
1467 e.transaction->setCurrentLedgerState(
1468 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1476 e.transaction->clearApplying();
1478 if (!submit_held.
empty())
1480 if (mTransactions.empty())
1481 mTransactions.swap(submit_held);
1483 for (
auto& e : submit_held)
1484 mTransactions.push_back(std::move(e));
1489 mDispatchState = DispatchState::none;
1497 NetworkOPsImp::getOwnerInfo(
1502 auto root = keylet::ownerDir(account);
1503 auto sleNode = lpLedger->read(keylet::page(root));
1510 for (
auto const& uDirEntry : sleNode->getFieldV256(
sfIndexes))
1512 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1515 switch (sleCur->getType())
1518 if (!jvObjects.
isMember(jss::offers))
1519 jvObjects[jss::offers] =
1522 jvObjects[jss::offers].
append(
1523 sleCur->getJson(JsonOptions::none));
1527 if (!jvObjects.
isMember(jss::ripple_lines))
1529 jvObjects[jss::ripple_lines] =
1533 jvObjects[jss::ripple_lines].
append(
1534 sleCur->getJson(JsonOptions::none));
1549 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1563 NetworkOPsImp::isBlocked()
1565 return isAmendmentBlocked() || isUNLBlocked();
1569 NetworkOPsImp::isAmendmentBlocked()
1571 return amendmentBlocked_;
1575 NetworkOPsImp::setAmendmentBlocked()
1577 amendmentBlocked_ =
true;
1578 setMode(OperatingMode::CONNECTED);
1582 NetworkOPsImp::isAmendmentWarned()
1584 return !amendmentBlocked_ && amendmentWarned_;
1588 NetworkOPsImp::setAmendmentWarned()
1590 amendmentWarned_ =
true;
1594 NetworkOPsImp::clearAmendmentWarned()
1596 amendmentWarned_ =
false;
1600 NetworkOPsImp::isUNLBlocked()
1606 NetworkOPsImp::setUNLBlocked()
1609 setMode(OperatingMode::CONNECTED);
1613 NetworkOPsImp::clearUNLBlocked()
1615 unlBlocked_ =
false;
1619 NetworkOPsImp::checkLastClosedLedger(
1628 JLOG(m_journal.trace()) <<
"NetworkOPsImp::checkLastClosedLedger";
1630 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1635 uint256 closedLedger = ourClosed->info().hash;
1636 uint256 prevClosedLedger = ourClosed->info().parentHash;
1637 JLOG(m_journal.trace()) <<
"OurClosed: " << closedLedger;
1638 JLOG(m_journal.trace()) <<
"PrevClosed: " << prevClosedLedger;
1643 auto& validations = app_.getValidations();
1644 JLOG(m_journal.debug())
1645 <<
"ValidationTrie " <<
Json::Compact(validations.getJsonTrie());
1649 peerCounts[closedLedger] = 0;
1650 if (mMode >= OperatingMode::TRACKING)
1651 peerCounts[closedLedger]++;
1653 for (
auto& peer : peerList)
1655 uint256 peerLedger = peer->getClosedLedgerHash();
1658 ++peerCounts[peerLedger];
1661 for (
auto const& it : peerCounts)
1662 JLOG(m_journal.debug()) <<
"L: " << it.first <<
" n=" << it.second;
1664 uint256 preferredLCL = validations.getPreferredLCL(
1666 m_ledgerMaster.getValidLedgerIndex(),
1669 bool switchLedgers = preferredLCL != closedLedger;
1671 closedLedger = preferredLCL;
1673 if (switchLedgers && (closedLedger == prevClosedLedger))
1676 JLOG(m_journal.info()) <<
"We won't switch to our own previous ledger";
1677 networkClosed = ourClosed->info().hash;
1678 switchLedgers =
false;
1681 networkClosed = closedLedger;
1686 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1689 consensus = app_.getInboundLedgers().acquire(
1690 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1693 (!m_ledgerMaster.canBeCurrent(consensus) ||
1694 !m_ledgerMaster.isCompatible(
1695 *consensus, m_journal.debug(),
"Not switching")))
1699 networkClosed = ourClosed->info().hash;
1703 JLOG(m_journal.warn()) <<
"We are not running on the consensus ledger";
1704 JLOG(m_journal.info()) <<
"Our LCL: " <<
getJson({*ourClosed, {}});
1705 JLOG(m_journal.info()) <<
"Net LCL " << closedLedger;
1707 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1709 setMode(OperatingMode::CONNECTED);
1717 switchLastClosedLedger(consensus);
1724 NetworkOPsImp::switchLastClosedLedger(
1728 JLOG(m_journal.error())
1729 <<
"JUMP last closed ledger to " << newLCL->info().hash;
1731 clearNeedNetworkLedger();
1734 app_.getTxQ().processClosedLedger(app_, *newLCL,
true);
1741 auto retries = m_localTX->getTxSet();
1742 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1745 rules.
emplace(*lastVal, app_.config().features);
1747 rules.
emplace(app_.config().features);
1748 app_.openLedger().accept(
1759 return app_.getTxQ().accept(app_, view);
1763 m_ledgerMaster.switchLCL(newLCL);
1765 protocol::TMStatusChange s;
1766 s.set_newevent(protocol::neSWITCHED_LEDGER);
1767 s.set_ledgerseq(newLCL->info().seq);
1768 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1769 s.set_ledgerhashprevious(
1770 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1771 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
1773 app_.overlay().foreach(
1774 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1778 NetworkOPsImp::beginConsensus(
uint256 const& networkClosed)
1782 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
1784 JLOG(m_journal.info()) <<
"Consensus time for #" << closingInfo.seq
1785 <<
" with LCL " << closingInfo.parentHash;
1787 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1792 if (mMode == OperatingMode::FULL)
1794 JLOG(m_journal.warn()) <<
"Don't have LCL, going to tracking";
1795 setMode(OperatingMode::TRACKING);
1801 assert(prevLedger->info().hash == closingInfo.parentHash);
1803 closingInfo.parentHash ==
1804 m_ledgerMaster.getClosedLedger()->info().hash);
1807 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1808 TrustChanges const changes = app_.validators().updateTrusted(
1809 app_.getValidations().getCurrentNodeIDs(),
1810 closingInfo.parentCloseTime,
1813 app_.getHashRouter());
1815 if (!changes.
added.empty() || !changes.
removed.empty())
1816 app_.getValidations().trustChanged(changes.
added, changes.
removed);
1818 mConsensus.startRound(
1819 app_.timeKeeper().closeTime(),
1826 if (mLastConsensusPhase != currPhase)
1828 reportConsensusStateChange(currPhase);
1829 mLastConsensusPhase = currPhase;
1832 JLOG(m_journal.debug()) <<
"Initiating consensus engine";
1839 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1850 protocol::TMHaveTransactionSet msg;
1851 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1852 msg.set_status(protocol::tsHAVE);
1853 app_.overlay().foreach(
1854 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
1858 mConsensus.gotTxSet(app_.timeKeeper().closeTime(),
RCLTxSet{map});
1862 NetworkOPsImp::endConsensus()
1864 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
1866 for (
auto const& it : app_.overlay().getActivePeers())
1868 if (it && (it->getClosedLedgerHash() == deadLedger))
1870 JLOG(m_journal.trace()) <<
"Killing obsolete peer status";
1877 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1879 if (networkClosed.
isZero())
1888 if (((mMode == OperatingMode::CONNECTED) ||
1889 (mMode == OperatingMode::SYNCING)) &&
1895 if (!needNetworkLedger_)
1896 setMode(OperatingMode::TRACKING);
1899 if (((mMode == OperatingMode::CONNECTED) ||
1900 (mMode == OperatingMode::TRACKING)) &&
1906 auto current = m_ledgerMaster.getCurrentLedger();
1907 if (app_.timeKeeper().now() < (
current->info().parentCloseTime +
1908 2 *
current->info().closeTimeResolution))
1910 setMode(OperatingMode::FULL);
1914 beginConsensus(networkClosed);
1918 NetworkOPsImp::consensusViewChange()
1920 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
1922 setMode(OperatingMode::CONNECTED);
1932 if (!mStreamMaps[sManifests].empty())
1936 jvObj[jss::type] =
"manifestReceived";
1939 jvObj[jss::signing_key] =
1943 jvObj[jss::signature] =
strHex(*sig);
1946 jvObj[jss::domain] = mo.
domain;
1949 for (
auto i = mStreamMaps[sManifests].begin();
1950 i != mStreamMaps[sManifests].end();)
1952 if (
auto p = i->second.lock())
1954 p->send(jvObj,
true);
1959 i = mStreamMaps[sManifests].erase(i);
1965 NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
1970 , loadBaseServer{loadFeeTrack.getLoadBase()}
1972 , em{std::move(escalationMetrics)}
1982 em.has_value() != b.
em.has_value())
1988 em->minProcessingFeeLevel != b.
em->minProcessingFeeLevel ||
1989 em->openLedgerFeeLevel != b.
em->openLedgerFeeLevel ||
1990 em->referenceFeeLevel != b.
em->referenceFeeLevel);
2023 jvObj[jss::type] =
"serverStatus";
2025 jvObj[jss::load_base] = f.loadBaseServer;
2026 jvObj[jss::load_factor_server] = f.loadFactorServer;
2027 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2032 safe_cast<std::uint64_t>(f.loadFactorServer),
2034 f.em->openLedgerFeeLevel,
2036 f.em->referenceFeeLevel)
2039 jvObj[jss::load_factor] =
trunc32(loadFactor);
2040 jvObj[jss::load_factor_fee_escalation] =
2041 f.em->openLedgerFeeLevel.jsonClipped();
2042 jvObj[jss::load_factor_fee_queue] =
2043 f.em->minProcessingFeeLevel.jsonClipped();
2044 jvObj[jss::load_factor_fee_reference] =
2045 f.em->referenceFeeLevel.jsonClipped();
2048 jvObj[jss::load_factor] = f.loadFactorServer;
2062 p->send(jvObj,
true);
2079 if (!streamMap.empty())
2082 jvObj[jss::type] =
"consensusPhase";
2083 jvObj[jss::consensus] =
to_string(phase);
2085 for (
auto i = streamMap.begin(); i != streamMap.end();)
2087 if (
auto p = i->second.lock())
2089 p->send(jvObj,
true);
2094 i = streamMap.erase(i);
2110 auto const signerPublic = val->getSignerPublic();
2112 jvObj[jss::type] =
"validationReceived";
2113 jvObj[jss::validation_public_key] =
2115 jvObj[jss::ledger_hash] =
to_string(val->getLedgerHash());
2116 jvObj[jss::signature] =
strHex(val->getSignature());
2117 jvObj[jss::full] = val->isFull();
2118 jvObj[jss::flags] = val->getFlags();
2120 jvObj[jss::data] =
strHex(val->getSerializer().slice());
2125 if (
auto cookie = (*val)[~
sfCookie])
2129 jvObj[jss::validated_hash] =
strHex(*hash);
2131 auto const masterKey =
2134 if (masterKey != signerPublic)
2138 jvObj[jss::ledger_index] =
to_string(*seq);
2143 for (
auto const& amendment : val->getFieldV256(
sfAmendments))
2148 jvObj[jss::close_time] = *closeTime;
2150 if (
auto const loadFee = (*val)[~
sfLoadFee])
2151 jvObj[jss::load_fee] = *loadFee;
2153 if (
auto const baseFee = (*val)[~
sfBaseFee])
2154 jvObj[jss::base_fee] =
static_cast<double>(*baseFee);
2157 jvObj[jss::reserve_base] = *reserveBase;
2160 jvObj[jss::reserve_inc] = *reserveInc;
2165 if (
auto p = i->second.lock())
2167 p->send(jvObj,
true);
2187 jvObj[jss::type] =
"peerStatusChange";
2196 p->send(jvObj,
true);
2210 using namespace std::chrono_literals;
2242 <<
"recvValidation " << val->getLedgerHash() <<
" from " << source;
2272 "This server is amendment blocked, and must be updated to be "
2273 "able to stay in sync with the network.";
2280 "This server has an expired validator list. validators.txt "
2281 "may be incorrectly configured or some [validator_list_sites] "
2282 "may be unreachable.";
2289 "One or more unsupported amendments have reached majority. "
2290 "Upgrade to the latest version before they are activated "
2291 "to avoid being amendment blocked.";
2292 if (
auto const expected =
2296 d[jss::expected_date] = expected->time_since_epoch().count();
2297 d[jss::expected_date_UTC] =
to_string(*expected);
2301 if (warnings.size())
2302 info[jss::warnings] = std::move(warnings);
2315 info[jss::network_id] =
static_cast<Json::UInt>(*netid);
2321 info[jss::time] =
to_string(std::chrono::floor<std::chrono::microseconds>(
2325 info[jss::network_ledger] =
"waiting";
2327 info[jss::validation_quorum] =
2335 info[jss::node_size] =
"tiny";
2338 info[jss::node_size] =
"small";
2341 info[jss::node_size] =
"medium";
2344 info[jss::node_size] =
"large";
2347 info[jss::node_size] =
"huge";
2356 info[jss::validator_list_expires] =
2357 safe_cast<Json::UInt>(when->time_since_epoch().count());
2359 info[jss::validator_list_expires] = 0;
2369 if (*when == TimeKeeper::time_point::max())
2371 x[jss::expiration] =
"never";
2372 x[jss::status] =
"active";
2379 x[jss::status] =
"active";
2381 x[jss::status] =
"expired";
2386 x[jss::status] =
"unknown";
2387 x[jss::expiration] =
"unknown";
2391 info[jss::io_latency_ms] =
2398 info[jss::pubkey_validator] =
toBase58(
2403 info[jss::pubkey_validator] =
"none";
2416 info[jss::counters][jss::nodestore] = nodestore;
2420 info[jss::pubkey_node] =
2426 info[jss::amendment_blocked] =
true;
2441 lastClose[jss::converge_time_s] =
2446 lastClose[jss::converge_time] =
2450 info[jss::last_close] = lastClose;
2459 auto const escalationMetrics =
2467 auto const loadFactorFeeEscalation =
2469 escalationMetrics.openLedgerFeeLevel,
2471 escalationMetrics.referenceFeeLevel)
2475 safe_cast<std::uint64_t>(loadFactorServer),
2476 loadFactorFeeEscalation);
2480 info[jss::load_base] = loadBaseServer;
2481 info[jss::load_factor] =
trunc32(loadFactor);
2482 info[jss::load_factor_server] = loadFactorServer;
2489 info[jss::load_factor_fee_escalation] =
2490 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2491 info[jss::load_factor_fee_queue] =
2492 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2493 info[jss::load_factor_fee_reference] =
2494 escalationMetrics.referenceFeeLevel.jsonClipped();
2498 info[jss::load_factor] =
2499 static_cast<double>(loadFactor) / loadBaseServer;
2501 if (loadFactorServer != loadFactor)
2502 info[jss::load_factor_server] =
2503 static_cast<double>(loadFactorServer) / loadBaseServer;
2508 if (fee != loadBaseServer)
2509 info[jss::load_factor_local] =
2510 static_cast<double>(fee) / loadBaseServer;
2512 if (fee != loadBaseServer)
2513 info[jss::load_factor_net] =
2514 static_cast<double>(fee) / loadBaseServer;
2516 if (fee != loadBaseServer)
2517 info[jss::load_factor_cluster] =
2518 static_cast<double>(fee) / loadBaseServer;
2520 if (escalationMetrics.openLedgerFeeLevel !=
2521 escalationMetrics.referenceFeeLevel &&
2522 (admin || loadFactorFeeEscalation != loadFactor))
2523 info[jss::load_factor_fee_escalation] =
2524 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2525 escalationMetrics.referenceFeeLevel);
2526 if (escalationMetrics.minProcessingFeeLevel !=
2527 escalationMetrics.referenceFeeLevel)
2528 info[jss::load_factor_fee_queue] =
2529 escalationMetrics.minProcessingFeeLevel
2530 .decimalFromReference(
2531 escalationMetrics.referenceFeeLevel);
2545 XRPAmount const baseFee = lpClosed->fees().base;
2547 l[jss::seq] =
Json::UInt(lpClosed->info().seq);
2548 l[jss::hash] =
to_string(lpClosed->info().hash);
2553 l[jss::reserve_base] =
2554 lpClosed->fees().accountReserve(0).jsonClipped();
2555 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2557 lpClosed->info().closeTime.time_since_epoch().count());
2562 l[jss::reserve_base_xrp] =
2563 lpClosed->fees().accountReserve(0).decimalXRP();
2564 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2567 if (std::abs(nowOffset.count()) >= 60)
2568 l[jss::system_time_offset] = nowOffset.count();
2571 if (std::abs(closeOffset.count()) >= 60)
2572 l[jss::close_time_offset] = closeOffset.count();
2574 #if RIPPLED_REPORTING
2584 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2588 auto lCloseTime = lpClosed->info().closeTime;
2590 if (lCloseTime <= closeTime)
2592 using namespace std::chrono_literals;
2593 auto age = closeTime - lCloseTime;
2595 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2602 info[jss::validated_ledger] = l;
2604 info[jss::closed_ledger] = l;
2608 info[jss::published_ledger] =
"none";
2609 else if (lpPublished->info().seq != lpClosed->info().seq)
2610 info[jss::published_ledger] = lpPublished->info().seq;
2617 info[jss::jq_trans_overflow] =
2619 info[jss::peer_disconnects] =
2621 info[jss::peer_disconnects_resources] =
2662 p->send(jvObj,
true);
2683 if (jvObj[jss::validated].asBool())
2695 p->send(jvObj,
true);
2716 if (
auto p = i->second.lock())
2718 p->send(jvObj,
true);
2736 if (
auto p = i->second.lock())
2738 p->send(jvObj,
true);
2751 for (
auto& jv : jvObj)
2757 else if (jv.isString())
2781 if (jvObj.
isMember(jss::transaction))
2790 << __func__ <<
" : "
2791 <<
"error parsing json for accounts affected";
2800 for (
auto const& affectedAccount : accounts)
2805 auto it = simiIt->second.begin();
2807 while (it != simiIt->second.end())
2818 it = simiIt->second.erase(it);
2825 <<
" iProposed=" << iProposed;
2827 if (!notify.
empty())
2830 isrListener->send(jvObj,
true);
2844 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted,
app_);
2846 lpAccepted->info().hash, alpAccepted);
2851 <<
"Publishing ledger = " << lpAccepted->info().seq;
2858 jvObj[jss::type] =
"ledgerClosed";
2859 jvObj[jss::ledger_index] = lpAccepted->info().seq;
2860 jvObj[jss::ledger_hash] =
to_string(lpAccepted->info().hash);
2862 lpAccepted->info().closeTime.time_since_epoch().count());
2864 jvObj[jss::fee_ref] = lpAccepted->fees().units.jsonClipped();
2865 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2866 jvObj[jss::reserve_base] =
2867 lpAccepted->fees().accountReserve(0).jsonClipped();
2868 jvObj[jss::reserve_inc] =
2869 lpAccepted->fees().increment.jsonClipped();
2871 jvObj[jss::txn_count] =
Json::UInt(alpAccepted->getTxnCount());
2875 jvObj[jss::validated_ledgers] =
2886 <<
"Publishing ledger = " << lpAccepted->info().seq
2887 <<
" : consumer = " << p->getConsumer()
2888 <<
" : obj = " << jvObj;
2889 p->send(jvObj,
true);
2898 static bool firstTime =
true;
2905 for (
auto& inner : outer.second)
2907 auto& subInfo = inner.second;
2908 if (subInfo.index_->separationLedgerSeq_ == 0)
2911 alpAccepted->getLedger(), subInfo);
2920 for (
auto const& [_, accTx] : alpAccepted->getMap())
2953 "reportConsensusStateChange->pubConsensus",
2983 jvObj[jss::type] =
"transaction";
2988 jvObj[jss::ledger_index] = lpCurrent->info().seq;
2989 jvObj[jss::ledger_hash] =
to_string(lpCurrent->info().hash);
2990 jvObj[jss::transaction][jss::date] =
2991 lpCurrent->info().closeTime.time_since_epoch().count();
2992 jvObj[jss::validated] =
true;
2998 jvObj[jss::validated] =
false;
2999 jvObj[jss::ledger_current_index] = lpCurrent->info().seq;
3002 jvObj[jss::status] = bValidated ?
"closed" :
"proposed";
3003 jvObj[jss::engine_result] = sToken;
3004 jvObj[jss::engine_result_code] = terResult;
3005 jvObj[jss::engine_result_message] = sHuman;
3013 if (account != amount.issue().account)
3021 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3036 if (
auto const txMeta = alTx.
getMeta())
3040 jvObj[jss::meta], *alAccepted, stTxn, *txMeta);
3053 p->send(jvObj,
true);
3068 p->send(jvObj,
true);
3090 auto const currLedgerSeq = lpCurrent->seq();
3100 for (
auto const& affectedAccount : alTx.
getAffected())
3105 auto it = simiIt->second.begin();
3107 while (it != simiIt->second.end())
3118 it = simiIt->second.erase(it);
3127 auto it = simiIt->second.begin();
3128 while (it != simiIt->second.end())
3139 it = simiIt->second.erase(it);
3146 auto& subs = histoIt->second;
3147 auto it = subs.begin();
3148 while (it != subs.end())
3151 if (currLedgerSeq <=
3152 info.
index_->separationLedgerSeq_)
3166 it = subs.erase(it);
3178 <<
"pubAccountTransaction:"
3179 <<
" iProposed=" << iProposed <<
" iAccepted=" << iAccepted;
3181 if (!notify.
empty() || !accountHistoryNotify.
empty())
3189 if (
auto const txMeta = alTx.
getMeta())
3193 jvObj[jss::meta], *lpCurrent, stTxn, *txMeta);
3198 isrListener->send(jvObj,
true);
3200 assert(!jvObj.
isMember(jss::account_history_tx_stream));
3201 for (
auto& info : accountHistoryNotify)
3203 auto& index = info.index_;
3204 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3205 jvObj[jss::account_history_tx_first] =
true;
3206 jvObj[jss::account_history_tx_index] = index->forwardTxIndex_++;
3207 info.sink_->send(jvObj,
true);
3224 for (
auto const& naAccountID : vnaAccountIDs)
3227 <<
"subAccount: account: " <<
toBase58(naAccountID);
3229 isrListener->insertSubAccountInfo(naAccountID, rt);
3234 for (
auto const& naAccountID : vnaAccountIDs)
3236 auto simIterator = subMap.
find(naAccountID);
3237 if (simIterator == subMap.
end())
3241 usisElement[isrListener->getSeq()] = isrListener;
3243 subMap.
insert(simIterator, make_pair(naAccountID, usisElement));
3248 simIterator->second[isrListener->getSeq()] = isrListener;
3259 for (
auto const& naAccountID : vnaAccountIDs)
3262 isrListener->deleteSubAccountInfo(naAccountID, rt);
3279 for (
auto const& naAccountID : vnaAccountIDs)
3281 auto simIterator = subMap.
find(naAccountID);
3283 if (simIterator != subMap.
end())
3286 simIterator->second.erase(uSeq);
3288 if (simIterator->second.empty())
3291 subMap.
erase(simIterator);
3300 enum DatabaseType { Postgres, Sqlite, None };
3301 static const auto databaseType = [&]() -> DatabaseType {
3302 #ifdef RIPPLED_REPORTING
3308 return DatabaseType::Postgres;
3310 return DatabaseType::None;
3317 return DatabaseType::Sqlite;
3319 return DatabaseType::None;
3325 return DatabaseType::Sqlite;
3327 return DatabaseType::None;
3331 if (databaseType == DatabaseType::None)
3334 <<
"AccountHistory job for account "
3346 "AccountHistoryTxStream",
3347 [
this, dbType = databaseType, subInfo]() {
3348 auto const& accountId = subInfo.
index_->accountId_;
3349 auto& lastLedgerSeq = subInfo.
index_->historyLastLedgerSeq_;
3350 auto& txHistoryIndex = subInfo.
index_->historyTxIndex_;
3353 <<
"AccountHistory job for account " <<
toBase58(accountId)
3354 <<
" started. lastLedgerSeq=" << lastLedgerSeq;
3364 auto stx = tx->getSTransaction();
3365 if (stx->getAccountID(
sfAccount) == accountId &&
3366 stx->getSeqProxy().value() == 1)
3370 for (
auto& node : meta->getNodes())
3377 if (
auto inner =
dynamic_cast<const STObject*
>(
3382 inner->getAccountID(
sfAccount) == accountId)
3394 bool unsubscribe) ->
bool {
3397 sptr->send(jvObj,
true);
3424 auto [txResult, status] = db->getAccountTx(args);
3428 <<
"AccountHistory job for account "
3430 <<
" getAccountTx failed";
3435 std::get_if<RelationalDBInterface::AccountTxs>(
3436 &txResult.transactions);
3444 <<
"AccountHistory job for account "
3446 <<
" getAccountTx wrong data";
3454 accountId, minLedger, maxLedger, marker, 0,
true};
3455 return db->newestAccountTxPage(options);
3467 while (lastLedgerSeq >= 2 && !subInfo.
index_->stopHistorical_)
3469 int feeChargeCount = 0;
3478 <<
"AccountHistory job for account "
3479 <<
toBase58(accountId) <<
" no InfoSub. Fee charged "
3480 << feeChargeCount <<
" times.";
3485 auto startLedgerSeq =
3486 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3488 <<
"AccountHistory job for account " <<
toBase58(accountId)
3489 <<
", working on ledger range [" << startLedgerSeq <<
","
3490 << lastLedgerSeq <<
"]";
3492 auto haveRange = [&]() ->
bool {
3495 auto haveSomeValidatedLedgers =
3497 validatedMin, validatedMax);
3499 return haveSomeValidatedLedgers &&
3500 validatedMin <= startLedgerSeq &&
3501 lastLedgerSeq <= validatedMax;
3507 <<
"AccountHistory reschedule job for account "
3508 <<
toBase58(accountId) <<
", incomplete ledger range ["
3509 << startLedgerSeq <<
"," << lastLedgerSeq <<
"]";
3515 while (!subInfo.
index_->stopHistorical_)
3518 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3522 <<
"AccountHistory job for account "
3523 <<
toBase58(accountId) <<
" getMoreTxns failed.";
3528 auto const& txns = dbResult->first;
3529 marker = dbResult->second;
3530 for (
auto const& [tx, meta] : txns)
3535 <<
"AccountHistory job for account "
3536 <<
toBase58(accountId) <<
" empty tx or meta.";
3546 <<
"AccountHistory job for account "
3547 <<
toBase58(accountId) <<
" no ledger.";
3552 tx->getSTransaction();
3556 <<
"AccountHistory job for account "
3558 <<
" getSTransaction failed.";
3563 *stTxn, meta->getResultTER(),
true, curTxLedger);
3565 jvTx[jss::account_history_tx_index] = txHistoryIndex--;
3567 jvTx[jss::meta], *curTxLedger, stTxn, *meta);
3568 if (isFirstTx(tx, meta))
3570 jvTx[jss::account_history_tx_first] =
true;
3574 <<
"AccountHistory job for account "
3576 <<
" done, found last tx.";
3588 <<
"AccountHistory job for account "
3590 <<
" paging, marker=" << marker->ledgerSeq <<
":"
3599 if (!subInfo.
index_->stopHistorical_)
3601 lastLedgerSeq = startLedgerSeq - 1;
3602 if (lastLedgerSeq <= 1)
3605 <<
"AccountHistory job for account "
3607 <<
" done, reached genesis ledger.";
3620 subInfo.
index_->separationLedgerSeq_ = ledger->seq();
3621 auto const& accountId = subInfo.
index_->accountId_;
3623 if (!ledger->exists(accountKeylet))
3626 <<
"subAccountHistoryStart, no account " <<
toBase58(accountId)
3627 <<
", no need to add AccountHistory job.";
3632 if (
auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3637 <<
"subAccountHistoryStart, genesis account "
3639 <<
" does not have tx, no need to add AccountHistory job.";
3649 subInfo.
index_->historyLastLedgerSeq_ = ledger->seq();
3650 subInfo.
index_->haveHistorical_ =
true;
3653 <<
"subAccountHistoryStart, add AccountHistory job: accountId="
3654 <<
toBase58(accountId) <<
", currentLedgerSeq=" << ledger->seq();
3664 if (!isrListener->insertSubAccountHistory(accountId))
3667 <<
"subAccountHistory, already subscribed to account "
3674 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
3679 inner.
emplace(isrListener->getSeq(), ahi);
3685 simIterator->second.emplace(isrListener->getSeq(), ahi);
3699 <<
"subAccountHistory, no validated ledger yet, delay start";
3712 isrListener->deleteSubAccountHistory(account);
3726 auto& subInfoMap = simIterator->second;
3727 auto subInfoIter = subInfoMap.find(seq);
3728 if (subInfoIter != subInfoMap.end())
3730 subInfoIter->second.index_->stopHistorical_ =
true;
3735 simIterator->second.erase(seq);
3736 if (simIterator->second.empty())
3742 <<
"unsubAccountHistory, account " <<
toBase58(account)
3743 <<
", historyOnly = " << (historyOnly ?
"true" :
"false");
3751 listeners->addSubscriber(isrListener);
3761 listeners->removeSubscriber(uSeq);
3775 Throw<std::runtime_error>(
3776 "Operation only possible in STANDALONE mode.");
3791 jvResult[jss::ledger_index] = lpClosed->info().seq;
3792 jvResult[jss::ledger_hash] =
to_string(lpClosed->info().hash);
3794 lpClosed->info().closeTime.time_since_epoch().count());
3795 jvResult[jss::fee_ref] = lpClosed->fees().units.jsonClipped();
3796 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3797 jvResult[jss::reserve_base] =
3798 lpClosed->fees().accountReserve(0).jsonClipped();
3799 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3804 jvResult[jss::validated_ledgers] =
3810 .emplace(isrListener->getSeq(), isrListener)
3828 .emplace(isrListener->getSeq(), isrListener)
3856 jvResult[jss::random] =
to_string(uRandom);
3858 jvResult[jss::load_base] = feeTrack.getLoadBase();
3859 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
3860 jvResult[jss::hostid] =
getHostId(admin);
3861 jvResult[jss::pubkey_node] =
3866 .emplace(isrListener->getSeq(), isrListener)
3884 .emplace(isrListener->getSeq(), isrListener)
3902 .emplace(isrListener->getSeq(), isrListener)
3920 .emplace(isrListener->getSeq(), isrListener)
3944 .emplace(isrListener->getSeq(), isrListener)
3962 .emplace(isrListener->getSeq(), isrListener)
4010 if (map.find(pInfo->getSeq()) != map.end())
4017 #ifndef USE_NEW_BOOK_PAGE
4028 unsigned int iLimit,
4038 uint256 uTipIndex = uBookBase;
4042 stream <<
"getBookPage:" << book;
4043 stream <<
"getBookPage: uBookBase=" << uBookBase;
4044 stream <<
"getBookPage: uBookEnd=" << uBookEnd;
4045 stream <<
"getBookPage: uTipIndex=" << uTipIndex;
4054 bool bDirectAdvance =
true;
4058 unsigned int uBookEntry;
4064 while (!bDone && iLimit-- > 0)
4068 bDirectAdvance =
false;
4072 auto const ledgerIndex = view.
succ(uTipIndex, uBookEnd);
4076 sleOfferDir.
reset();
4085 uTipIndex = sleOfferDir->key();
4088 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4091 <<
"getBookPage: uTipIndex=" << uTipIndex;
4093 <<
"getBookPage: offerIndex=" << offerIndex;
4103 auto const uOfferOwnerID = sleOffer->getAccountID(
sfAccount);
4104 auto const& saTakerGets = sleOffer->getFieldAmount(
sfTakerGets);
4105 auto const& saTakerPays = sleOffer->getFieldAmount(
sfTakerPays);
4107 bool firstOwnerOffer(
true);
4113 saOwnerFunds = saTakerGets;
4115 else if (bGlobalFreeze)
4123 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4124 if (umBalanceEntry != umBalance.
end())
4128 saOwnerFunds = umBalanceEntry->second;
4129 firstOwnerOffer =
false;
4143 if (saOwnerFunds < beast::zero)
4147 saOwnerFunds.
clear();
4155 STAmount saOwnerFundsLimit = saOwnerFunds;
4167 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4170 if (saOwnerFundsLimit >= saTakerGets)
4173 saTakerGetsFunded = saTakerGets;
4179 saTakerGetsFunded = saOwnerFundsLimit;
4181 saTakerGetsFunded.
setJson(jvOffer[jss::taker_gets_funded]);
4185 saTakerGetsFunded, saDirRate, saTakerPays.
issue()))
4186 .setJson(jvOffer[jss::taker_pays_funded]);
4192 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4194 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4198 jvOf[jss::quality] = saDirRate.
getText();
4200 if (firstOwnerOffer)
4201 jvOf[jss::owner_funds] = saOwnerFunds.
getText();
4208 if (!
cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4210 bDirectAdvance =
true;
4215 <<
"getBookPage: offerIndex=" << offerIndex;
4235 unsigned int iLimit,
4243 MetaView lesActive(lpLedger,
tapNONE,
true);
4244 OrderBookIterator obIterator(lesActive, book);
4248 const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.
out.
account) ||
4249 lesActive.isGlobalFrozen(book.
in.
account);
4251 while (iLimit-- > 0 && obIterator.nextOffer())
4256 auto const uOfferOwnerID = sleOffer->getAccountID(
sfAccount);
4257 auto const& saTakerGets = sleOffer->getFieldAmount(
sfTakerGets);
4258 auto const& saTakerPays = sleOffer->getFieldAmount(
sfTakerPays);
4259 STAmount saDirRate = obIterator.getCurrentRate();
4265 saOwnerFunds = saTakerGets;
4267 else if (bGlobalFreeze)
4275 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4277 if (umBalanceEntry != umBalance.
end())
4281 saOwnerFunds = umBalanceEntry->second;
4287 saOwnerFunds = lesActive.accountHolds(
4293 if (saOwnerFunds.isNegative())
4297 saOwnerFunds.zero();
4304 STAmount saTakerGetsFunded;
4305 STAmount saOwnerFundsLimit = saOwnerFunds;
4317 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4320 if (saOwnerFundsLimit >= saTakerGets)
4323 saTakerGetsFunded = saTakerGets;
4328 saTakerGetsFunded = saOwnerFundsLimit;
4330 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4336 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4337 .setJson(jvOffer[jss::taker_pays_funded]);
4340 STAmount saOwnerPays = (
parityRate == offerRate)
4343 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4345 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4347 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4351 jvOf[jss::quality] = saDirRate.
getText();
4366 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4406 ++counters_[
static_cast<std::size_t>(om)].transitions;
4408 counters_[
static_cast<std::size_t>(om)].transitions == 1)
4410 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4411 now - processStart_)
4415 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4424 auto [counters, mode, start, initialSync] = getCounterData();
4425 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4435 auto& state = obj[jss::state_accounting][
states_[i]];
4436 state[jss::transitions] =
std::to_string(counters[i].transitions);
4437 state[jss::duration_us] =
std::to_string(counters[i].dur.count());
4441 obj[jss::initial_sync_duration_us] =
std::to_string(initialSync);
4456 boost::asio::io_service& io_svc,
4460 return std::make_unique<NetworkOPsImp>(