20 #include <ripple/app/consensus/RCLConsensus.h>
21 #include <ripple/app/consensus/RCLValidations.h>
22 #include <ripple/app/ledger/AcceptedLedger.h>
23 #include <ripple/app/ledger/InboundLedgers.h>
24 #include <ripple/app/ledger/LedgerMaster.h>
25 #include <ripple/app/ledger/LedgerToJson.h>
26 #include <ripple/app/ledger/LocalTxs.h>
27 #include <ripple/app/ledger/OpenLedger.h>
28 #include <ripple/app/ledger/OrderBookDB.h>
29 #include <ripple/app/ledger/TransactionMaster.h>
30 #include <ripple/app/main/LoadManager.h>
31 #include <ripple/app/misc/AmendmentTable.h>
32 #include <ripple/app/misc/DeliverMax.h>
33 #include <ripple/app/misc/HashRouter.h>
34 #include <ripple/app/misc/LoadFeeTrack.h>
35 #include <ripple/app/misc/NetworkOPs.h>
36 #include <ripple/app/misc/Transaction.h>
37 #include <ripple/app/misc/TxQ.h>
38 #include <ripple/app/misc/ValidatorKeys.h>
39 #include <ripple/app/misc/ValidatorList.h>
40 #include <ripple/app/misc/impl/AccountTxPaging.h>
41 #include <ripple/app/rdb/backend/PostgresDatabase.h>
42 #include <ripple/app/rdb/backend/SQLiteDatabase.h>
43 #include <ripple/app/reporting/ReportingETL.h>
44 #include <ripple/app/tx/apply.h>
45 #include <ripple/basics/PerfLog.h>
46 #include <ripple/basics/SubmitSync.h>
47 #include <ripple/basics/UptimeClock.h>
48 #include <ripple/basics/mulDiv.h>
49 #include <ripple/basics/safe_cast.h>
50 #include <ripple/beast/rfc2616.h>
51 #include <ripple/beast/utility/rngfill.h>
52 #include <ripple/consensus/Consensus.h>
53 #include <ripple/consensus/ConsensusParms.h>
54 #include <ripple/crypto/RFC1751.h>
55 #include <ripple/crypto/csprng.h>
56 #include <ripple/json/MultivarJson.h>
57 #include <ripple/json/to_string.h>
58 #include <ripple/net/RPCErr.h>
59 #include <ripple/nodestore/DatabaseShard.h>
60 #include <ripple/overlay/Cluster.h>
61 #include <ripple/overlay/Overlay.h>
62 #include <ripple/overlay/predicates.h>
63 #include <ripple/protocol/BuildInfo.h>
64 #include <ripple/protocol/Feature.h>
65 #include <ripple/protocol/STParsedJSON.h>
66 #include <ripple/protocol/jss.h>
67 #include <ripple/resource/Fees.h>
68 #include <ripple/resource/ResourceManager.h>
69 #include <ripple/rpc/BookChanges.h>
70 #include <ripple/rpc/DeliveredAmount.h>
71 #include <ripple/rpc/ServerHandler.h>
72 #include <ripple/rpc/impl/RPCHelpers.h>
73 #include <boost/asio/ip/host_name.hpp>
74 #include <boost/asio/steady_timer.hpp>
151 std::chrono::steady_clock::time_point
start_ =
212 return !(*
this != b);
231 boost::asio::io_service& io_svc,
246 app_.logs().journal(
"FeeVote")),
249 app.getInboundTransactions(),
250 beast::get_abstract_clock<
std::chrono::steady_clock>(),
252 app_.logs().journal(
"LedgerConsensus"))
400 getServerInfo(
bool human,
bool admin,
bool counters)
override;
427 TER result)
override;
470 bool historyOnly)
override;
476 bool historyOnly)
override;
544 boost::system::error_code ec;
549 <<
"NetworkOPs: heartbeatTimer cancel error: "
558 <<
"NetworkOPs: clusterTimer cancel error: "
567 <<
"NetworkOPs: accountHistoryTxTimer cancel error: "
576 <<
"NetworkOPs: batchApplyTimer cancel error: "
581 using namespace std::chrono_literals;
591 boost::asio::steady_timer& timer,
773 template <
class Handler>
775 Handler
const& handler,
777 :
hook(collector->make_hook(handler))
780 "Disconnected_duration"))
783 "Connected_duration"))
785 collector->make_gauge(
"State_Accounting",
"Syncing_duration"))
788 "Tracking_duration"))
790 collector->make_gauge(
"State_Accounting",
"Full_duration"))
793 "Disconnected_transitions"))
796 "Connected_transitions"))
799 "Syncing_transitions"))
802 "Tracking_transitions"))
804 collector->make_gauge(
"State_Accounting",
"Full_transitions"))
833 {
"disconnected",
"connected",
"syncing",
"tracking",
"full"}};
895 static std::string const hostname = boost::asio::ip::host_name();
902 static std::string const shroudedHostId = [
this]() {
908 return shroudedHostId;
923 boost::asio::steady_timer& timer,
930 [
this, onExpire, onError](boost::system::error_code
const& e) {
931 if ((e.value() == boost::system::errc::success) &&
932 (!m_job_queue.isStopped()))
937 if (e.value() != boost::system::errc::success &&
938 e.value() != boost::asio::error::operation_aborted)
941 JLOG(m_journal.error())
942 <<
"Timer got error '" << e.message()
943 <<
"'. Restarting timer.";
948 timer.expires_from_now(expiry_time);
949 timer.async_wait(std::move(*optionalCountedHandler));
960 m_job_queue.addJob(jtNETOP_TIMER,
"NetOPs.heartbeat", [this]() {
961 processHeartbeatTimer();
964 [
this]() { setHeartbeatTimer(); });
968 NetworkOPsImp::setClusterTimer()
970 using namespace std::chrono_literals;
977 processClusterTimer();
980 [
this]() { setClusterTimer(); });
986 JLOG(m_journal.debug()) <<
"Scheduling AccountHistory job for account "
988 using namespace std::chrono_literals;
990 accountHistoryTxTimer_,
992 [
this, subInfo]() { addAccountHistoryJob(subInfo); },
993 [
this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
997 NetworkOPsImp::setBatchApplyTimer()
999 using namespace std::chrono_literals;
1004 auto constexpr batchInterval = 100ms;
1014 if (mTransactions.size() &&
1015 mDispatchState == DispatchState::none)
1017 if (m_job_queue.addJob(
1018 jtBATCH,
"transactionBatch", [
this]() {
1019 transactionBatch(false);
1022 mDispatchState = DispatchState::scheduled;
1027 setBatchApplyTimer();
1029 [
this]() { setBatchApplyTimer(); });
1033 NetworkOPsImp::processHeartbeatTimer()
1042 std::size_t const numPeers = app_.overlay().size();
1045 if (numPeers < minPeerCount_)
1047 if (mMode != OperatingMode::DISCONNECTED)
1049 setMode(OperatingMode::DISCONNECTED);
1050 JLOG(m_journal.warn())
1051 <<
"Node count (" << numPeers <<
") has fallen "
1052 <<
"below required minimum (" << minPeerCount_ <<
").";
1059 setHeartbeatTimer();
1063 if (mMode == OperatingMode::DISCONNECTED)
1065 setMode(OperatingMode::CONNECTED);
1066 JLOG(m_journal.info())
1067 <<
"Node count (" << numPeers <<
") is sufficient.";
1072 if (mMode == OperatingMode::SYNCING)
1073 setMode(OperatingMode::SYNCING);
1074 else if (mMode == OperatingMode::CONNECTED)
1075 setMode(OperatingMode::CONNECTED);
1078 mConsensus.timerEntry(app_.timeKeeper().closeTime());
1081 if (mLastConsensusPhase != currPhase)
1083 reportConsensusStateChange(currPhase);
1084 mLastConsensusPhase = currPhase;
1087 setHeartbeatTimer();
1091 NetworkOPsImp::processClusterTimer()
1093 if (app_.cluster().size() == 0)
1096 using namespace std::chrono_literals;
1098 bool const update = app_.cluster().update(
1099 app_.nodeIdentity().first,
1101 (m_ledgerMaster.getValidatedLedgerAge() <= 4
min)
1102 ? app_.getFeeTrack().getLocalFee()
1104 app_.timeKeeper().now());
1108 JLOG(m_journal.debug()) <<
"Too soon to send cluster update";
1113 protocol::TMCluster cluster;
1114 app_.cluster().for_each([&cluster](
ClusterNode const& node) {
1115 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1120 n.set_nodename(node.
name());
1124 for (
auto& item : gossip.
items)
1126 protocol::TMLoadSource& node = *cluster.add_loadsources();
1128 node.set_cost(item.balance);
1130 app_.overlay().foreach(
send_if(
1131 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1142 if (mode == OperatingMode::FULL && admin)
1144 auto const consensusMode = mConsensus.mode();
1145 if (consensusMode != ConsensusMode::wrongLedger)
1147 if (consensusMode == ConsensusMode::proposing)
1150 if (mConsensus.validating())
1151 return "validating";
1161 if (isNeedNetworkLedger())
1170 auto const txid = trans->getTransactionID();
1171 auto const flags = app_.getHashRouter().getFlags(txid);
1173 if ((flags & SF_BAD) != 0)
1175 JLOG(m_journal.warn()) <<
"Submitted transaction cached bad";
1182 app_.getHashRouter(),
1184 m_ledgerMaster.getValidatedRules(),
1187 if (validity != Validity::Valid)
1189 JLOG(m_journal.warn())
1190 <<
"Submitted transaction invalid: " << reason;
1196 JLOG(m_journal.warn())
1197 <<
"Exception checking transaction " << txid <<
": " << ex.
what();
1204 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1206 m_job_queue.addJob(
jtTRANSACTION,
"submitTxn", [
this, tx]() {
1209 t,
false, RPC::SubmitSync::async,
false, FailHard::no);
1214 NetworkOPsImp::processTransaction(
1221 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXN");
1222 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1224 if ((newFlags & SF_BAD) != 0)
1227 JLOG(m_journal.warn()) << transaction->getID() <<
": cached bad!\n";
1228 transaction->setStatus(
INVALID);
1236 auto const view = m_ledgerMaster.getCurrentLedger();
1238 app_.getHashRouter(),
1239 *transaction->getSTransaction(),
1242 assert(validity == Validity::Valid);
1245 if (validity == Validity::SigBad)
1247 JLOG(m_journal.trace()) <<
"Transaction has bad signature: " << reason;
1248 transaction->setStatus(
INVALID);
1250 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1255 app_.getMasterTransaction().canonicalize(&transaction);
1258 if (!transaction->getApplying())
1260 transaction->setApplying();
1261 mTransactions.push_back(
1272 if (mDispatchState == DispatchState::running)
1276 }
while (transaction->getApplying());
1285 transaction = std::make_shared<Transaction>(*transaction);
1291 lock, [&transaction] {
return !transaction->getApplying(); });
1300 NetworkOPsImp::transactionBatch(
bool const drain)
1304 if (mDispatchState == DispatchState::running || mTransactions.empty())
1309 while (drain && mTransactions.size());
1311 setBatchApplyTimer();
1318 assert(!mTransactions.empty());
1319 assert(mDispatchState != DispatchState::running);
1322 mTransactions.
swap(transactions);
1323 mDispatchState = DispatchState::running;
1329 bool changed =
false;
1332 m_ledgerMaster.peekMutex(), std::defer_lock};
1343 if (e.failType == FailHard::yes)
1346 auto const result = app_.getTxQ().apply(
1347 app_, view, e.transaction->getSTransaction(), flags, j);
1348 e.result = result.first;
1349 e.applied = result.second;
1350 changed = changed || result.second;
1359 if (
auto const l = m_ledgerMaster.getValidatedLedger())
1360 validatedLedgerIndex = l->info().seq;
1362 auto newOL = app_.openLedger().current();
1365 e.transaction->clearSubmitResult();
1369 pubProposedTransaction(
1370 newOL, e.transaction->getSTransaction(), e.result);
1371 e.transaction->setApplied();
1374 e.transaction->setResult(e.result);
1377 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1386 JLOG(m_journal.info())
1387 <<
"TransactionResult: " << token <<
": " << human;
1392 bool addLocal = e.local;
1396 JLOG(m_journal.debug())
1397 <<
"Transaction is now included in open ledger";
1398 e.transaction->setStatus(
INCLUDED);
1400 auto const& txCur = e.transaction->getSTransaction();
1401 auto const txNext = m_ledgerMaster.popAcctTransaction(txCur);
1406 auto t = std::make_shared<Transaction>(trans, reason, app_);
1407 submit_held.
emplace_back(t,
false,
false, FailHard::no);
1414 JLOG(m_journal.info()) <<
"Transaction is obsolete";
1415 e.transaction->setStatus(
OBSOLETE);
1419 JLOG(m_journal.debug())
1420 <<
"Transaction is likely to claim a"
1421 <<
" fee, but is queued until fee drops";
1423 e.transaction->setStatus(
HELD);
1427 m_ledgerMaster.addHeldTransaction(e.transaction);
1428 e.transaction->setQueued();
1429 e.transaction->setKept();
1433 if (e.failType != FailHard::yes)
1436 JLOG(m_journal.debug())
1437 <<
"Transaction should be held: " << e.result;
1438 e.transaction->setStatus(
HELD);
1439 m_ledgerMaster.addHeldTransaction(e.transaction);
1440 e.transaction->setKept();
1445 JLOG(m_journal.debug())
1446 <<
"Status other than success " << e.result;
1447 e.transaction->setStatus(
INVALID);
1450 auto const enforceFailHard =
1451 e.failType == FailHard::yes && !
isTesSuccess(e.result);
1453 if (addLocal && !enforceFailHard)
1455 m_localTX->push_back(
1456 m_ledgerMaster.getCurrentLedgerIndex(),
1457 e.transaction->getSTransaction());
1458 e.transaction->setKept();
1462 ((mMode != OperatingMode::FULL) &&
1463 (e.failType != FailHard::yes) && e.local) ||
1468 app_.getHashRouter().shouldRelay(e.transaction->getID());
1472 protocol::TMTransaction tx;
1475 e.transaction->getSTransaction()->add(s);
1476 tx.set_rawtransaction(s.
data(), s.
size());
1477 tx.set_status(protocol::tsCURRENT);
1478 tx.set_receivetimestamp(
1479 app_.timeKeeper().now().time_since_epoch().count());
1482 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1483 e.transaction->setBroadcast();
1487 if (validatedLedgerIndex)
1489 auto [fee, accountSeq, availableSeq] =
1490 app_.getTxQ().getTxRequiredFeeAndSeq(
1491 *newOL, e.transaction->getSTransaction());
1492 e.transaction->setCurrentLedgerState(
1493 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1501 e.transaction->clearApplying();
1503 if (!submit_held.
empty())
1505 if (mTransactions.empty())
1506 mTransactions.swap(submit_held);
1508 for (
auto& e : submit_held)
1509 mTransactions.push_back(std::move(e));
1514 mDispatchState = DispatchState::none;
1522 NetworkOPsImp::getOwnerInfo(
1527 auto root = keylet::ownerDir(account);
1528 auto sleNode = lpLedger->read(keylet::page(
root));
1535 for (
auto const& uDirEntry : sleNode->getFieldV256(
sfIndexes))
1537 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1540 switch (sleCur->getType())
1543 if (!jvObjects.
isMember(jss::offers))
1544 jvObjects[jss::offers] =
1547 jvObjects[jss::offers].
append(
1548 sleCur->getJson(JsonOptions::none));
1552 if (!jvObjects.
isMember(jss::ripple_lines))
1554 jvObjects[jss::ripple_lines] =
1558 jvObjects[jss::ripple_lines].
append(
1559 sleCur->getJson(JsonOptions::none));
1574 sleNode = lpLedger->read(keylet::page(
root, uNodeDir));
1588 NetworkOPsImp::isBlocked()
1590 return isAmendmentBlocked() || isUNLBlocked();
1594 NetworkOPsImp::isAmendmentBlocked()
1596 return amendmentBlocked_;
1600 NetworkOPsImp::setAmendmentBlocked()
1602 amendmentBlocked_ =
true;
1603 setMode(OperatingMode::CONNECTED);
1607 NetworkOPsImp::isAmendmentWarned()
1609 return !amendmentBlocked_ && amendmentWarned_;
1613 NetworkOPsImp::setAmendmentWarned()
1615 amendmentWarned_ =
true;
1619 NetworkOPsImp::clearAmendmentWarned()
1621 amendmentWarned_ =
false;
1625 NetworkOPsImp::isUNLBlocked()
1631 NetworkOPsImp::setUNLBlocked()
1634 setMode(OperatingMode::CONNECTED);
1638 NetworkOPsImp::clearUNLBlocked()
1640 unlBlocked_ =
false;
1644 NetworkOPsImp::checkLastClosedLedger(
1653 JLOG(m_journal.trace()) <<
"NetworkOPsImp::checkLastClosedLedger";
1655 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1660 uint256 closedLedger = ourClosed->info().hash;
1661 uint256 prevClosedLedger = ourClosed->info().parentHash;
1662 JLOG(m_journal.trace()) <<
"OurClosed: " << closedLedger;
1663 JLOG(m_journal.trace()) <<
"PrevClosed: " << prevClosedLedger;
1668 auto& validations = app_.getValidations();
1669 JLOG(m_journal.debug())
1670 <<
"ValidationTrie " <<
Json::Compact(validations.getJsonTrie());
1674 peerCounts[closedLedger] = 0;
1675 if (mMode >= OperatingMode::TRACKING)
1676 peerCounts[closedLedger]++;
1678 for (
auto& peer : peerList)
1680 uint256 peerLedger = peer->getClosedLedgerHash();
1683 ++peerCounts[peerLedger];
1686 for (
auto const& it : peerCounts)
1687 JLOG(m_journal.debug()) <<
"L: " << it.first <<
" n=" << it.second;
1689 uint256 preferredLCL = validations.getPreferredLCL(
1691 m_ledgerMaster.getValidLedgerIndex(),
1694 bool switchLedgers = preferredLCL != closedLedger;
1696 closedLedger = preferredLCL;
1698 if (switchLedgers && (closedLedger == prevClosedLedger))
1701 JLOG(m_journal.info()) <<
"We won't switch to our own previous ledger";
1702 networkClosed = ourClosed->info().hash;
1703 switchLedgers =
false;
1707 networkClosed = closedLedger;
1713 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1716 consensus = app_.getInboundLedgers().acquire(
1717 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1720 (!m_ledgerMaster.canBeCurrent(consensus) ||
1721 !m_ledgerMaster.isCompatible(
1722 *consensus, m_journal.debug(),
"Not switching")))
1726 networkClosed = ourClosed->info().hash;
1730 JLOG(m_journal.warn()) <<
"We are not running on the consensus ledger";
1731 JLOG(m_journal.info()) <<
"Our LCL: " <<
getJson({*ourClosed, {}});
1732 JLOG(m_journal.info()) <<
"Net LCL " << closedLedger;
1734 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1736 setMode(OperatingMode::CONNECTED);
1744 switchLastClosedLedger(consensus);
1751 NetworkOPsImp::switchLastClosedLedger(
1755 JLOG(m_journal.error())
1756 <<
"JUMP last closed ledger to " << newLCL->info().hash;
1758 clearNeedNetworkLedger();
1761 app_.getTxQ().processClosedLedger(app_, *newLCL,
true);
1768 auto retries = m_localTX->getTxSet();
1769 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1774 rules.
emplace(app_.config().features);
1775 app_.openLedger().accept(
1786 return app_.getTxQ().accept(app_, view);
1790 m_ledgerMaster.switchLCL(newLCL);
1792 protocol::TMStatusChange s;
1793 s.set_newevent(protocol::neSWITCHED_LEDGER);
1794 s.set_ledgerseq(newLCL->info().seq);
1795 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1796 s.set_ledgerhashprevious(
1797 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1798 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
1800 app_.overlay().foreach(
1801 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1805 NetworkOPsImp::beginConsensus(
uint256 const& networkClosed)
1809 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
1811 JLOG(m_journal.info()) <<
"Consensus time for #" << closingInfo.seq
1812 <<
" with LCL " << closingInfo.parentHash;
1814 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1819 if (mMode == OperatingMode::FULL)
1821 JLOG(m_journal.warn()) <<
"Don't have LCL, going to tracking";
1822 setMode(OperatingMode::TRACKING);
1828 assert(prevLedger->info().hash == closingInfo.parentHash);
1830 closingInfo.parentHash ==
1831 m_ledgerMaster.getClosedLedger()->info().hash);
1834 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1835 TrustChanges const changes = app_.validators().updateTrusted(
1836 app_.getValidations().getCurrentNodeIDs(),
1837 closingInfo.parentCloseTime,
1840 app_.getHashRouter());
1842 if (!changes.
added.empty() || !changes.
removed.empty())
1844 app_.getValidations().trustChanged(changes.
added, changes.
removed);
1846 app_.getAmendmentTable().trustChanged(
1847 app_.validators().getQuorumKeys().second);
1850 mConsensus.startRound(
1851 app_.timeKeeper().closeTime(),
1858 if (mLastConsensusPhase != currPhase)
1860 reportConsensusStateChange(currPhase);
1861 mLastConsensusPhase = currPhase;
1864 JLOG(m_journal.debug()) <<
"Initiating consensus engine";
1871 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1882 protocol::TMHaveTransactionSet msg;
1883 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1884 msg.set_status(protocol::tsHAVE);
1885 app_.overlay().foreach(
1886 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
1890 mConsensus.gotTxSet(app_.timeKeeper().closeTime(),
RCLTxSet{map});
1894 NetworkOPsImp::endConsensus()
1896 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
1898 for (
auto const& it : app_.overlay().getActivePeers())
1900 if (it && (it->getClosedLedgerHash() == deadLedger))
1902 JLOG(m_journal.trace()) <<
"Killing obsolete peer status";
1909 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1911 if (networkClosed.
isZero())
1920 if (((mMode == OperatingMode::CONNECTED) ||
1921 (mMode == OperatingMode::SYNCING)) &&
1927 if (!needNetworkLedger_)
1928 setMode(OperatingMode::TRACKING);
1931 if (((mMode == OperatingMode::CONNECTED) ||
1932 (mMode == OperatingMode::TRACKING)) &&
1938 auto current = m_ledgerMaster.getCurrentLedger();
1939 if (app_.timeKeeper().now() < (
current->info().parentCloseTime +
1940 2 *
current->info().closeTimeResolution))
1942 setMode(OperatingMode::FULL);
1946 beginConsensus(networkClosed);
1950 NetworkOPsImp::consensusViewChange()
1952 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
1954 setMode(OperatingMode::CONNECTED);
1964 if (!mStreamMaps[sManifests].empty())
1968 jvObj[jss::type] =
"manifestReceived";
1971 jvObj[jss::signing_key] =
1975 jvObj[jss::signature] =
strHex(*sig);
1978 jvObj[jss::domain] = mo.
domain;
1981 for (
auto i = mStreamMaps[sManifests].begin();
1982 i != mStreamMaps[sManifests].end();)
1984 if (
auto p = i->second.lock())
1986 p->send(jvObj,
true);
1991 i = mStreamMaps[sManifests].erase(i);
1997 NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2002 , loadBaseServer{loadFeeTrack.getLoadBase()}
2004 , em{std::move(escalationMetrics)}
2014 em.has_value() != b.
em.has_value())
2020 em->minProcessingFeeLevel != b.
em->minProcessingFeeLevel ||
2021 em->openLedgerFeeLevel != b.
em->openLedgerFeeLevel ||
2022 em->referenceFeeLevel != b.
em->referenceFeeLevel);
2055 jvObj[jss::type] =
"serverStatus";
2057 jvObj[jss::load_base] = f.loadBaseServer;
2058 jvObj[jss::load_factor_server] = f.loadFactorServer;
2059 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2064 safe_cast<std::uint64_t>(f.loadFactorServer),
2066 f.em->openLedgerFeeLevel,
2068 f.em->referenceFeeLevel)
2071 jvObj[jss::load_factor] =
trunc32(loadFactor);
2072 jvObj[jss::load_factor_fee_escalation] =
2073 f.em->openLedgerFeeLevel.jsonClipped();
2074 jvObj[jss::load_factor_fee_queue] =
2075 f.em->minProcessingFeeLevel.jsonClipped();
2076 jvObj[jss::load_factor_fee_reference] =
2077 f.em->referenceFeeLevel.jsonClipped();
2080 jvObj[jss::load_factor] = f.loadFactorServer;
2094 p->send(jvObj,
true);
2111 if (!streamMap.empty())
2114 jvObj[jss::type] =
"consensusPhase";
2115 jvObj[jss::consensus] =
to_string(phase);
2117 for (
auto i = streamMap.begin(); i != streamMap.end();)
2119 if (
auto p = i->second.lock())
2121 p->send(jvObj,
true);
2126 i = streamMap.erase(i);
2142 auto const signerPublic = val->getSignerPublic();
2144 jvObj[jss::type] =
"validationReceived";
2145 jvObj[jss::validation_public_key] =
2147 jvObj[jss::ledger_hash] =
to_string(val->getLedgerHash());
2148 jvObj[jss::signature] =
strHex(val->getSignature());
2149 jvObj[jss::full] = val->isFull();
2150 jvObj[jss::flags] = val->getFlags();
2152 jvObj[jss::data] =
strHex(val->getSerializer().slice());
2157 if (
auto cookie = (*val)[~
sfCookie])
2161 jvObj[jss::validated_hash] =
strHex(*hash);
2163 auto const masterKey =
2166 if (masterKey != signerPublic)
2172 jvObj[jss::ledger_index] = *seq;
2177 for (
auto const& amendment : val->getFieldV256(
sfAmendments))
2182 jvObj[jss::close_time] = *closeTime;
2184 if (
auto const loadFee = (*val)[~
sfLoadFee])
2185 jvObj[jss::load_fee] = *loadFee;
2187 if (
auto const baseFee = val->at(~
sfBaseFee))
2188 jvObj[jss::base_fee] =
static_cast<double>(*baseFee);
2191 jvObj[jss::reserve_base] = *reserveBase;
2194 jvObj[jss::reserve_inc] = *reserveInc;
2199 baseFeeXRP && baseFeeXRP->native())
2200 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2203 reserveBaseXRP && reserveBaseXRP->native())
2204 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2207 reserveIncXRP && reserveIncXRP->native())
2208 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2213 visit<RPC::apiMinimumSupportedVersion, RPC::apiMaximumValidVersion>(
2217 if (jvTx.
isMember(jss::ledger_index) && apiVersion < 2)
2219 jvTx[jss::ledger_index] =
2220 std::to_string(jvTx[jss::ledger_index].asUInt());
2227 if (
auto p = i->second.lock())
2251 jvObj[jss::type] =
"peerStatusChange";
2260 p->send(jvObj,
true);
2274 using namespace std::chrono_literals;
2306 <<
"recvValidation " << val->getLedgerHash() <<
" from " << source;
2336 "This server is amendment blocked, and must be updated to be "
2337 "able to stay in sync with the network.";
2344 "This server has an expired validator list. validators.txt "
2345 "may be incorrectly configured or some [validator_list_sites] "
2346 "may be unreachable.";
2353 "One or more unsupported amendments have reached majority. "
2354 "Upgrade to the latest version before they are activated "
2355 "to avoid being amendment blocked.";
2356 if (
auto const expected =
2360 d[jss::expected_date] = expected->time_since_epoch().count();
2361 d[jss::expected_date_UTC] =
to_string(*expected);
2365 if (warnings.size())
2366 info[jss::warnings] = std::move(warnings);
2381 info[jss::time] =
to_string(std::chrono::floor<std::chrono::microseconds>(
2385 info[jss::network_ledger] =
"waiting";
2387 info[jss::validation_quorum] =
2395 info[jss::node_size] =
"tiny";
2398 info[jss::node_size] =
"small";
2401 info[jss::node_size] =
"medium";
2404 info[jss::node_size] =
"large";
2407 info[jss::node_size] =
"huge";
2416 info[jss::validator_list_expires] =
2417 safe_cast<Json::UInt>(when->time_since_epoch().count());
2419 info[jss::validator_list_expires] = 0;
2429 if (*when == TimeKeeper::time_point::max())
2431 x[jss::expiration] =
"never";
2432 x[jss::status] =
"active";
2439 x[jss::status] =
"active";
2441 x[jss::status] =
"expired";
2446 x[jss::status] =
"unknown";
2447 x[jss::expiration] =
"unknown";
2451 info[jss::io_latency_ms] =
2458 info[jss::pubkey_validator] =
toBase58(
2463 info[jss::pubkey_validator] =
"none";
2476 info[jss::counters][jss::nodestore] = nodestore;
2480 info[jss::pubkey_node] =
2486 info[jss::amendment_blocked] =
true;
2501 lastClose[jss::converge_time_s] =
2506 lastClose[jss::converge_time] =
2510 info[jss::last_close] = lastClose;
2520 info[jss::network_id] =
static_cast<Json::UInt>(*netid);
2522 auto const escalationMetrics =
2530 auto const loadFactorFeeEscalation =
2532 escalationMetrics.openLedgerFeeLevel,
2534 escalationMetrics.referenceFeeLevel)
2538 safe_cast<std::uint64_t>(loadFactorServer),
2539 loadFactorFeeEscalation);
2543 info[jss::load_base] = loadBaseServer;
2544 info[jss::load_factor] =
trunc32(loadFactor);
2545 info[jss::load_factor_server] = loadFactorServer;
2552 info[jss::load_factor_fee_escalation] =
2553 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2554 info[jss::load_factor_fee_queue] =
2555 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2556 info[jss::load_factor_fee_reference] =
2557 escalationMetrics.referenceFeeLevel.jsonClipped();
2561 info[jss::load_factor] =
2562 static_cast<double>(loadFactor) / loadBaseServer;
2564 if (loadFactorServer != loadFactor)
2565 info[jss::load_factor_server] =
2566 static_cast<double>(loadFactorServer) / loadBaseServer;
2571 if (fee != loadBaseServer)
2572 info[jss::load_factor_local] =
2573 static_cast<double>(fee) / loadBaseServer;
2575 if (fee != loadBaseServer)
2576 info[jss::load_factor_net] =
2577 static_cast<double>(fee) / loadBaseServer;
2579 if (fee != loadBaseServer)
2580 info[jss::load_factor_cluster] =
2581 static_cast<double>(fee) / loadBaseServer;
2583 if (escalationMetrics.openLedgerFeeLevel !=
2584 escalationMetrics.referenceFeeLevel &&
2585 (admin || loadFactorFeeEscalation != loadFactor))
2586 info[jss::load_factor_fee_escalation] =
2587 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2588 escalationMetrics.referenceFeeLevel);
2589 if (escalationMetrics.minProcessingFeeLevel !=
2590 escalationMetrics.referenceFeeLevel)
2591 info[jss::load_factor_fee_queue] =
2592 escalationMetrics.minProcessingFeeLevel
2593 .decimalFromReference(
2594 escalationMetrics.referenceFeeLevel);
2608 XRPAmount const baseFee = lpClosed->fees().base;
2610 l[jss::seq] =
Json::UInt(lpClosed->info().seq);
2611 l[jss::hash] =
to_string(lpClosed->info().hash);
2616 l[jss::reserve_base] =
2617 lpClosed->fees().accountReserve(0).jsonClipped();
2618 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2620 lpClosed->info().closeTime.time_since_epoch().count());
2625 l[jss::reserve_base_xrp] =
2626 lpClosed->fees().accountReserve(0).decimalXRP();
2627 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2630 std::abs(closeOffset.count()) >= 60)
2631 l[jss::close_time_offset] =
2634 #if RIPPLED_REPORTING
2644 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2648 auto lCloseTime = lpClosed->info().closeTime;
2650 if (lCloseTime <= closeTime)
2652 using namespace std::chrono_literals;
2653 auto age = closeTime - lCloseTime;
2655 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2662 info[jss::validated_ledger] = l;
2664 info[jss::closed_ledger] = l;
2668 info[jss::published_ledger] =
"none";
2669 else if (lpPublished->info().seq != lpClosed->info().seq)
2670 info[jss::published_ledger] = lpPublished->info().seq;
2677 info[jss::jq_trans_overflow] =
2679 info[jss::peer_disconnects] =
2681 info[jss::peer_disconnects_resources] =
2691 "http",
"https",
"peer",
"ws",
"ws2",
"wss",
"wss2"};
2699 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2700 port.admin_user.empty() && port.admin_password.empty()))
2714 for (
auto const& p : proto)
2715 jv[jss::protocol].append(p);
2722 auto const optPort = grpcSection.
get(
"port");
2723 if (optPort && grpcSection.get(
"ip"))
2726 jv[jss::port] = *optPort;
2728 jv[jss::protocol].append(
"grpc");
2731 info[jss::ports] = std::move(ports);
2756 transJson(transaction, result,
false, ledger, std::nullopt);
2788 if (jvObj[jss::validated].asBool())
2800 p->send(jvObj,
true);
2821 if (
auto p = i->second.lock())
2823 p->send(jvObj,
true);
2841 if (
auto p = i->second.lock())
2843 p->send(jvObj,
true);
2856 for (
auto& jv : jvObj)
2862 else if (jv.isString())
2886 if (jvObj.
isMember(jss::transaction))
2895 << __func__ <<
" : "
2896 <<
"error parsing json for accounts affected";
2905 for (
auto const& affectedAccount : accounts)
2910 auto it = simiIt->second.begin();
2912 while (it != simiIt->second.end())
2923 it = simiIt->second.erase(it);
2930 <<
" iProposed=" << iProposed;
2932 if (!notify.
empty())
2935 isrListener->send(jvObj,
true);
2949 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted,
app_);
2951 lpAccepted->info().hash, alpAccepted);
2954 assert(alpAccepted->getLedger().
get() == lpAccepted.
get());
2958 <<
"Publishing ledger " << lpAccepted->info().seq <<
" "
2959 << lpAccepted->info().hash;
2967 jvObj[jss::type] =
"ledgerClosed";
2968 jvObj[jss::ledger_index] = lpAccepted->info().seq;
2969 jvObj[jss::ledger_hash] =
to_string(lpAccepted->info().hash);
2971 lpAccepted->info().closeTime.time_since_epoch().count());
2975 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2976 jvObj[jss::reserve_base] =
2977 lpAccepted->fees().accountReserve(0).jsonClipped();
2978 jvObj[jss::reserve_inc] =
2979 lpAccepted->fees().increment.jsonClipped();
2981 jvObj[jss::txn_count] =
Json::UInt(alpAccepted->size());
2985 jvObj[jss::validated_ledgers] =
2995 p->send(jvObj,
true);
3013 p->send(jvObj,
true);
3022 static bool firstTime =
true;
3029 for (
auto& inner : outer.second)
3031 auto& subInfo = inner.second;
3032 if (subInfo.index_->separationLedgerSeq_ == 0)
3035 alpAccepted->getLedger(), subInfo);
3044 for (
auto const& accTx : *alpAccepted)
3048 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3077 "reportConsensusStateChange->pubConsensus",
3108 jvObj[jss::type] =
"transaction";
3112 jvObj[jss::transaction] =
3119 jvObj[jss::meta], *ledger, transaction, meta->
get());
3122 if (!ledger->open())
3123 jvObj[jss::ledger_hash] =
to_string(ledger->info().hash);
3127 jvObj[jss::ledger_index] = ledger->info().seq;
3128 jvObj[jss::transaction][jss::date] =
3129 ledger->info().closeTime.time_since_epoch().count();
3130 jvObj[jss::validated] =
true;
3131 jvObj[jss::close_time_iso] =
to_string_iso(ledger->info().closeTime);
3137 jvObj[jss::validated] =
false;
3138 jvObj[jss::ledger_current_index] = ledger->info().seq;
3141 jvObj[jss::status] = validated ?
"closed" :
"proposed";
3142 jvObj[jss::engine_result] = sToken;
3143 jvObj[jss::engine_result_code] = result;
3144 jvObj[jss::engine_result_message] = sHuman;
3148 auto const account = transaction->getAccountID(
sfAccount);
3149 auto const amount = transaction->getFieldAmount(
sfTakerGets);
3152 if (account != amount.issue().account)
3160 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3166 visit<RPC::apiMinimumSupportedVersion, RPC::apiMaximumValidVersion>(
3170 jvTx[jss::transaction], transaction->getTxnType(), apiVersion);
3174 jvTx[jss::tx_json] = jvTx.
removeMember(jss::transaction);
3175 jvTx[jss::hash] = hash;
3179 jvTx[jss::transaction][jss::hash] = hash;
3192 auto const& stTxn = transaction.
getTxn();
3196 auto const trResult = transaction.
getResult();
3251 auto const currLedgerSeq = ledger->seq();
3258 for (
auto const& affectedAccount : transaction.
getAffected())
3263 auto it = simiIt->second.begin();
3265 while (it != simiIt->second.end())
3276 it = simiIt->second.erase(it);
3283 auto it = simiIt->second.begin();
3284 while (it != simiIt->second.end())
3295 it = simiIt->second.erase(it);
3302 auto& subs = histoIt->second;
3303 auto it = subs.begin();
3304 while (it != subs.end())
3307 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3321 it = subs.erase(it);
3332 <<
"pubAccountTransaction: "
3333 <<
"proposed=" << iProposed <<
", accepted=" << iAccepted;
3335 if (!notify.
empty() || !accountHistoryNotify.
empty())
3337 auto const& stTxn = transaction.
getTxn();
3341 auto const trResult = transaction.
getResult();
3352 jvObj.set(jss::account_history_boundary,
true);
3355 jvObj.
isMember(jss::account_history_tx_stream) ==
3356 MultiApiJson::none);
3357 for (
auto& info : accountHistoryNotify)
3359 auto& index = info.index_;
3360 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3361 jvObj.set(jss::account_history_tx_first,
true);
3363 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3392 for (
auto const& affectedAccount : tx->getMentionedAccounts())
3397 auto it = simiIt->second.begin();
3399 while (it != simiIt->second.end())
3410 it = simiIt->second.erase(it);
3417 JLOG(
m_journal.
trace()) <<
"pubProposedAccountTransaction: " << iProposed;
3419 if (!notify.
empty() || !accountHistoryNotify.
empty())
3430 jvObj.
isMember(jss::account_history_tx_stream) ==
3431 MultiApiJson::none);
3432 for (
auto& info : accountHistoryNotify)
3434 auto& index = info.index_;
3435 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3436 jvObj.set(jss::account_history_tx_first,
true);
3437 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3457 for (
auto const& naAccountID : vnaAccountIDs)
3460 <<
"subAccount: account: " <<
toBase58(naAccountID);
3462 isrListener->insertSubAccountInfo(naAccountID, rt);
3467 for (
auto const& naAccountID : vnaAccountIDs)
3469 auto simIterator = subMap.
find(naAccountID);
3470 if (simIterator == subMap.
end())
3474 usisElement[isrListener->getSeq()] = isrListener;
3476 subMap.
insert(simIterator, make_pair(naAccountID, usisElement));
3481 simIterator->second[isrListener->getSeq()] = isrListener;
3492 for (
auto const& naAccountID : vnaAccountIDs)
3495 isrListener->deleteSubAccountInfo(naAccountID, rt);
3512 for (
auto const& naAccountID : vnaAccountIDs)
3514 auto simIterator = subMap.
find(naAccountID);
3516 if (simIterator != subMap.
end())
3519 simIterator->second.erase(uSeq);
3521 if (simIterator->second.empty())
3524 subMap.
erase(simIterator);
3533 enum DatabaseType { Postgres, Sqlite, None };
3534 static const auto databaseType = [&]() -> DatabaseType {
3535 #ifdef RIPPLED_REPORTING
3542 return DatabaseType::Postgres;
3544 return DatabaseType::None;
3552 return DatabaseType::Sqlite;
3554 return DatabaseType::None;
3561 return DatabaseType::Sqlite;
3563 return DatabaseType::None;
3567 if (databaseType == DatabaseType::None)
3570 <<
"AccountHistory job for account "
3582 "AccountHistoryTxStream",
3583 [
this, dbType = databaseType, subInfo]() {
3584 auto const& accountId = subInfo.
index_->accountId_;
3585 auto& lastLedgerSeq = subInfo.
index_->historyLastLedgerSeq_;
3586 auto& txHistoryIndex = subInfo.
index_->historyTxIndex_;
3589 <<
"AccountHistory job for account " <<
toBase58(accountId)
3590 <<
" started. lastLedgerSeq=" << lastLedgerSeq;
3600 auto stx = tx->getSTransaction();
3601 if (stx->getAccountID(
sfAccount) == accountId &&
3602 stx->getSeqProxy().value() == 1)
3606 for (
auto& node : meta->getNodes())
3613 if (
auto inner =
dynamic_cast<const STObject*
>(
3618 inner->getAccountID(
sfAccount) == accountId)
3630 bool unsubscribe) ->
bool {
3633 sptr->send(jvObj,
true);
3643 bool unsubscribe) ->
bool {
3675 auto [txResult, status] = db->getAccountTx(args);
3679 <<
"AccountHistory job for account "
3681 <<
" getAccountTx failed";
3686 std::get_if<RelationalDatabase::AccountTxs>(
3687 &txResult.transactions);
3695 <<
"AccountHistory job for account "
3697 <<
" getAccountTx wrong data";
3705 accountId, minLedger, maxLedger, marker, 0,
true};
3706 return db->newestAccountTxPage(options);
3718 while (lastLedgerSeq >= 2 && !subInfo.
index_->stopHistorical_)
3720 int feeChargeCount = 0;
3729 <<
"AccountHistory job for account "
3730 <<
toBase58(accountId) <<
" no InfoSub. Fee charged "
3731 << feeChargeCount <<
" times.";
3736 auto startLedgerSeq =
3737 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3739 <<
"AccountHistory job for account " <<
toBase58(accountId)
3740 <<
", working on ledger range [" << startLedgerSeq <<
","
3741 << lastLedgerSeq <<
"]";
3743 auto haveRange = [&]() ->
bool {
3746 auto haveSomeValidatedLedgers =
3748 validatedMin, validatedMax);
3750 return haveSomeValidatedLedgers &&
3751 validatedMin <= startLedgerSeq &&
3752 lastLedgerSeq <= validatedMax;
3758 <<
"AccountHistory reschedule job for account "
3759 <<
toBase58(accountId) <<
", incomplete ledger range ["
3760 << startLedgerSeq <<
"," << lastLedgerSeq <<
"]";
3766 while (!subInfo.
index_->stopHistorical_)
3769 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3773 <<
"AccountHistory job for account "
3774 <<
toBase58(accountId) <<
" getMoreTxns failed.";
3779 auto const& txns = dbResult->first;
3780 marker = dbResult->second;
3781 size_t num_txns = txns.size();
3782 for (
size_t i = 0; i < num_txns; ++i)
3784 auto const& [tx, meta] = txns[i];
3789 <<
"AccountHistory job for account "
3790 <<
toBase58(accountId) <<
" empty tx or meta.";
3800 <<
"AccountHistory job for account "
3801 <<
toBase58(accountId) <<
" no ledger.";
3806 tx->getSTransaction();
3810 <<
"AccountHistory job for account "
3812 <<
" getSTransaction failed.";
3818 auto const trR = meta->getResultTER();
3820 transJson(stTxn, trR,
true, curTxLedger, mRef);
3823 jss::account_history_tx_index, txHistoryIndex--);
3824 if (i + 1 == num_txns ||
3825 txns[i + 1].first->getLedger() != tx->getLedger())
3826 jvTx.set(jss::account_history_boundary,
true);
3828 if (isFirstTx(tx, meta))
3830 jvTx.set(jss::account_history_tx_first,
true);
3831 sendMultiApiJson(jvTx,
false);
3834 <<
"AccountHistory job for account "
3836 <<
" done, found last tx.";
3841 sendMultiApiJson(jvTx,
false);
3848 <<
"AccountHistory job for account "
3850 <<
" paging, marker=" << marker->ledgerSeq <<
":"
3859 if (!subInfo.
index_->stopHistorical_)
3861 lastLedgerSeq = startLedgerSeq - 1;
3862 if (lastLedgerSeq <= 1)
3865 <<
"AccountHistory job for account "
3867 <<
" done, reached genesis ledger.";
3880 subInfo.
index_->separationLedgerSeq_ = ledger->seq();
3881 auto const& accountId = subInfo.
index_->accountId_;
3883 if (!ledger->exists(accountKeylet))
3886 <<
"subAccountHistoryStart, no account " <<
toBase58(accountId)
3887 <<
", no need to add AccountHistory job.";
3892 if (
auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3897 <<
"subAccountHistoryStart, genesis account "
3899 <<
" does not have tx, no need to add AccountHistory job.";
3909 subInfo.
index_->historyLastLedgerSeq_ = ledger->seq();
3910 subInfo.
index_->haveHistorical_ =
true;
3913 <<
"subAccountHistoryStart, add AccountHistory job: accountId="
3914 <<
toBase58(accountId) <<
", currentLedgerSeq=" << ledger->seq();
3924 if (!isrListener->insertSubAccountHistory(accountId))
3927 <<
"subAccountHistory, already subscribed to account "
3934 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
3939 inner.
emplace(isrListener->getSeq(), ahi);
3945 simIterator->second.emplace(isrListener->getSeq(), ahi);
3959 <<
"subAccountHistory, no validated ledger yet, delay start";
3972 isrListener->deleteSubAccountHistory(account);
3986 auto& subInfoMap = simIterator->second;
3987 auto subInfoIter = subInfoMap.find(seq);
3988 if (subInfoIter != subInfoMap.end())
3990 subInfoIter->second.index_->stopHistorical_ =
true;
3995 simIterator->second.erase(seq);
3996 if (simIterator->second.empty())
4002 <<
"unsubAccountHistory, account " <<
toBase58(account)
4003 <<
", historyOnly = " << (historyOnly ?
"true" :
"false");
4011 listeners->addSubscriber(isrListener);
4021 listeners->removeSubscriber(uSeq);
4035 Throw<std::runtime_error>(
4036 "Operation only possible in STANDALONE mode.");
4051 jvResult[jss::ledger_index] = lpClosed->info().seq;
4052 jvResult[jss::ledger_hash] =
to_string(lpClosed->info().hash);
4054 lpClosed->info().closeTime.time_since_epoch().count());
4057 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
4058 jvResult[jss::reserve_base] =
4059 lpClosed->fees().accountReserve(0).jsonClipped();
4060 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4065 jvResult[jss::validated_ledgers] =
4071 .emplace(isrListener->getSeq(), isrListener)
4081 .emplace(isrListener->getSeq(), isrListener)
4107 .emplace(isrListener->getSeq(), isrListener)
4135 jvResult[jss::random] =
to_string(uRandom);
4137 jvResult[jss::load_base] = feeTrack.getLoadBase();
4138 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4139 jvResult[jss::hostid] =
getHostId(admin);
4140 jvResult[jss::pubkey_node] =
4145 .emplace(isrListener->getSeq(), isrListener)
4163 .emplace(isrListener->getSeq(), isrListener)
4181 .emplace(isrListener->getSeq(), isrListener)
4199 .emplace(isrListener->getSeq(), isrListener)
4223 .emplace(isrListener->getSeq(), isrListener)
4241 .emplace(isrListener->getSeq(), isrListener)
4289 if (map.find(pInfo->getSeq()) != map.end())
4296 #ifndef USE_NEW_BOOK_PAGE
4307 unsigned int iLimit,
4317 uint256 uTipIndex = uBookBase;
4321 stream <<
"getBookPage:" << book;
4322 stream <<
"getBookPage: uBookBase=" << uBookBase;
4323 stream <<
"getBookPage: uBookEnd=" << uBookEnd;
4324 stream <<
"getBookPage: uTipIndex=" << uTipIndex;
4333 bool bDirectAdvance =
true;
4337 unsigned int uBookEntry;
4343 while (!bDone && iLimit-- > 0)
4347 bDirectAdvance =
false;
4351 auto const ledgerIndex = view.
succ(uTipIndex, uBookEnd);
4355 sleOfferDir.
reset();
4364 uTipIndex = sleOfferDir->key();
4367 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4370 <<
"getBookPage: uTipIndex=" << uTipIndex;
4372 <<
"getBookPage: offerIndex=" << offerIndex;
4382 auto const uOfferOwnerID = sleOffer->getAccountID(
sfAccount);
4383 auto const& saTakerGets = sleOffer->getFieldAmount(
sfTakerGets);
4384 auto const& saTakerPays = sleOffer->getFieldAmount(
sfTakerPays);
4386 bool firstOwnerOffer(
true);
4392 saOwnerFunds = saTakerGets;
4394 else if (bGlobalFreeze)
4402 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4403 if (umBalanceEntry != umBalance.
end())
4407 saOwnerFunds = umBalanceEntry->second;
4408 firstOwnerOffer =
false;
4422 if (saOwnerFunds < beast::zero)
4426 saOwnerFunds.
clear();
4434 STAmount saOwnerFundsLimit = saOwnerFunds;
4446 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4449 if (saOwnerFundsLimit >= saTakerGets)
4452 saTakerGetsFunded = saTakerGets;
4458 saTakerGetsFunded = saOwnerFundsLimit;
4460 saTakerGetsFunded.
setJson(jvOffer[jss::taker_gets_funded]);
4464 saTakerGetsFunded, saDirRate, saTakerPays.
issue()))
4465 .setJson(jvOffer[jss::taker_pays_funded]);
4471 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4473 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4477 jvOf[jss::quality] = saDirRate.
getText();
4479 if (firstOwnerOffer)
4480 jvOf[jss::owner_funds] = saOwnerFunds.
getText();
4487 if (!
cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4489 bDirectAdvance =
true;
4494 <<
"getBookPage: offerIndex=" << offerIndex;
4514 unsigned int iLimit,
4522 MetaView lesActive(lpLedger,
tapNONE,
true);
4523 OrderBookIterator obIterator(lesActive, book);
4527 const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.
out.
account) ||
4528 lesActive.isGlobalFrozen(book.
in.
account);
4530 while (iLimit-- > 0 && obIterator.nextOffer())
4535 auto const uOfferOwnerID = sleOffer->getAccountID(
sfAccount);
4536 auto const& saTakerGets = sleOffer->getFieldAmount(
sfTakerGets);
4537 auto const& saTakerPays = sleOffer->getFieldAmount(
sfTakerPays);
4538 STAmount saDirRate = obIterator.getCurrentRate();
4544 saOwnerFunds = saTakerGets;
4546 else if (bGlobalFreeze)
4554 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4556 if (umBalanceEntry != umBalance.
end())
4560 saOwnerFunds = umBalanceEntry->second;
4566 saOwnerFunds = lesActive.accountHolds(
4572 if (saOwnerFunds.isNegative())
4576 saOwnerFunds.zero();
4583 STAmount saTakerGetsFunded;
4584 STAmount saOwnerFundsLimit = saOwnerFunds;
4596 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4599 if (saOwnerFundsLimit >= saTakerGets)
4602 saTakerGetsFunded = saTakerGets;
4607 saTakerGetsFunded = saOwnerFundsLimit;
4609 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4615 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4616 .setJson(jvOffer[jss::taker_pays_funded]);
4619 STAmount saOwnerPays = (
parityRate == offerRate)
4622 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4624 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4626 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4630 jvOf[jss::quality] = saDirRate.
getText();
4645 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4685 ++counters_[
static_cast<std::size_t>(om)].transitions;
4687 counters_[
static_cast<std::size_t>(om)].transitions == 1)
4689 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4690 now - processStart_)
4694 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4703 auto [counters, mode, start, initialSync] = getCounterData();
4704 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4714 auto& state = obj[jss::state_accounting][
states_[i]];
4715 state[jss::transitions] =
std::to_string(counters[i].transitions);
4716 state[jss::duration_us] =
std::to_string(counters[i].dur.count());
4720 obj[jss::initial_sync_duration_us] =
std::to_string(initialSync);
4735 boost::asio::io_service& io_svc,
4739 return std::make_unique<NetworkOPsImp>(