20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/main/Tuning.h>
32#include <xrpld/app/misc/AmendmentTable.h>
33#include <xrpld/app/misc/DeliverMax.h>
34#include <xrpld/app/misc/HashRouter.h>
35#include <xrpld/app/misc/LoadFeeTrack.h>
36#include <xrpld/app/misc/NetworkOPs.h>
37#include <xrpld/app/misc/Transaction.h>
38#include <xrpld/app/misc/TxQ.h>
39#include <xrpld/app/misc/ValidatorKeys.h>
40#include <xrpld/app/misc/ValidatorList.h>
41#include <xrpld/app/misc/detail/AccountTxPaging.h>
42#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
43#include <xrpld/app/tx/apply.h>
44#include <xrpld/consensus/Consensus.h>
45#include <xrpld/consensus/ConsensusParms.h>
46#include <xrpld/overlay/Cluster.h>
47#include <xrpld/overlay/Overlay.h>
48#include <xrpld/overlay/predicates.h>
49#include <xrpld/perflog/PerfLog.h>
50#include <xrpld/rpc/BookChanges.h>
51#include <xrpld/rpc/CTID.h>
52#include <xrpld/rpc/DeliveredAmount.h>
53#include <xrpld/rpc/MPTokenIssuanceID.h>
54#include <xrpld/rpc/ServerHandler.h>
56#include <xrpl/basics/UptimeClock.h>
57#include <xrpl/basics/mulDiv.h>
58#include <xrpl/basics/safe_cast.h>
59#include <xrpl/basics/scope.h>
60#include <xrpl/beast/utility/rngfill.h>
61#include <xrpl/crypto/RFC1751.h>
62#include <xrpl/crypto/csprng.h>
63#include <xrpl/protocol/BuildInfo.h>
64#include <xrpl/protocol/Feature.h>
65#include <xrpl/protocol/MultiApiJson.h>
66#include <xrpl/protocol/NFTSyntheticSerializer.h>
67#include <xrpl/protocol/RPCErr.h>
68#include <xrpl/protocol/TxFlags.h>
69#include <xrpl/protocol/jss.h>
70#include <xrpl/resource/Fees.h>
71#include <xrpl/resource/ResourceManager.h>
73#include <boost/asio/ip/host_name.hpp>
74#include <boost/asio/steady_timer.hpp>
113 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
156 std::chrono::steady_clock::time_point
start_ =
217 return !(*
this != b);
236 boost::asio::io_service& io_svc,
250 app_.logs().journal(
"FeeVote")),
253 app.getInboundTransactions(),
254 beast::get_abstract_clock<
std::chrono::steady_clock>(),
256 app_.logs().journal(
"LedgerConsensus"))
258 validatorKeys.keys ? validatorKeys.keys->publicKey
261 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
456 getServerInfo(
bool human,
bool admin,
bool counters)
override;
483 TER result)
override;
517 bool historyOnly)
override;
523 bool historyOnly)
override;
591 boost::system::error_code ec;
596 <<
"NetworkOPs: heartbeatTimer cancel error: "
605 <<
"NetworkOPs: clusterTimer cancel error: "
614 <<
"NetworkOPs: accountHistoryTxTimer cancel error: "
619 using namespace std::chrono_literals;
629 boost::asio::steady_timer& timer,
812 template <
class Handler>
814 Handler
const& handler,
816 :
hook(collector->make_hook(handler))
819 "Disconnected_duration"))
822 "Connected_duration"))
824 collector->make_gauge(
"State_Accounting",
"Syncing_duration"))
827 "Tracking_duration"))
829 collector->make_gauge(
"State_Accounting",
"Full_duration"))
832 "Disconnected_transitions"))
835 "Connected_transitions"))
838 "Syncing_transitions"))
841 "Tracking_transitions"))
843 collector->make_gauge(
"State_Accounting",
"Full_transitions"))
872 {
"disconnected",
"connected",
"syncing",
"tracking",
"full"}};
934 static std::string const hostname = boost::asio::ip::host_name();
941 static std::string const shroudedHostId = [
this]() {
947 return shroudedHostId;
962 boost::asio::steady_timer& timer,
969 [
this, onExpire, onError](boost::system::error_code
const& e) {
970 if ((e.value() == boost::system::errc::success) &&
971 (!m_job_queue.isStopped()))
976 if (e.value() != boost::system::errc::success &&
977 e.value() != boost::asio::error::operation_aborted)
980 JLOG(m_journal.error())
981 <<
"Timer got error '" << e.message()
982 <<
"'. Restarting timer.";
987 timer.expires_from_now(expiry_time);
988 timer.async_wait(std::move(*optionalCountedHandler));
993NetworkOPsImp::setHeartbeatTimer()
997 mConsensus.parms().ledgerGRANULARITY,
999 m_job_queue.addJob(jtNETOP_TIMER,
"NetOPs.heartbeat", [this]() {
1000 processHeartbeatTimer();
1003 [
this]() { setHeartbeatTimer(); });
1007NetworkOPsImp::setClusterTimer()
1009 using namespace std::chrono_literals;
1016 processClusterTimer();
1019 [
this]() { setClusterTimer(); });
1025 JLOG(m_journal.debug()) <<
"Scheduling AccountHistory job for account "
1027 using namespace std::chrono_literals;
1029 accountHistoryTxTimer_,
1031 [
this, subInfo]() { addAccountHistoryJob(subInfo); },
1032 [
this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1036NetworkOPsImp::processHeartbeatTimer()
1039 "Heartbeat Timer", mConsensus.validating(), m_journal);
1047 std::size_t const numPeers = app_.overlay().size();
1050 if (numPeers < minPeerCount_)
1052 if (mMode != OperatingMode::DISCONNECTED)
1054 setMode(OperatingMode::DISCONNECTED);
1056 ss <<
"Node count (" << numPeers <<
") has fallen "
1057 <<
"below required minimum (" << minPeerCount_ <<
").";
1058 JLOG(m_journal.warn()) << ss.
str();
1059 CLOG(clog.
ss()) <<
"set mode to DISCONNECTED: " << ss.
str();
1064 <<
"already DISCONNECTED. too few peers (" << numPeers
1065 <<
"), need at least " << minPeerCount_;
1072 setHeartbeatTimer();
1077 if (mMode == OperatingMode::DISCONNECTED)
1079 setMode(OperatingMode::CONNECTED);
1080 JLOG(m_journal.info())
1081 <<
"Node count (" << numPeers <<
") is sufficient.";
1082 CLOG(clog.
ss()) <<
"setting mode to CONNECTED based on " << numPeers
1088 auto origMode = mMode.load();
1089 CLOG(clog.
ss()) <<
"mode: " << strOperatingMode(origMode,
true);
1090 if (mMode == OperatingMode::SYNCING)
1091 setMode(OperatingMode::SYNCING);
1092 else if (mMode == OperatingMode::CONNECTED)
1093 setMode(OperatingMode::CONNECTED);
1094 auto newMode = mMode.load();
1095 if (origMode != newMode)
1098 <<
", changing to " << strOperatingMode(newMode,
true);
1100 CLOG(clog.
ss()) <<
". ";
1103 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.
ss());
1105 CLOG(clog.
ss()) <<
"consensus phase " << to_string(mLastConsensusPhase);
1107 if (mLastConsensusPhase != currPhase)
1109 reportConsensusStateChange(currPhase);
1110 mLastConsensusPhase = currPhase;
1111 CLOG(clog.
ss()) <<
" changed to " << to_string(mLastConsensusPhase);
1113 CLOG(clog.
ss()) <<
". ";
1115 setHeartbeatTimer();
1119NetworkOPsImp::processClusterTimer()
1121 if (app_.cluster().size() == 0)
1124 using namespace std::chrono_literals;
1126 bool const update = app_.cluster().update(
1127 app_.nodeIdentity().first,
1129 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1130 ? app_.getFeeTrack().getLocalFee()
1132 app_.timeKeeper().now());
1136 JLOG(m_journal.debug()) <<
"Too soon to send cluster update";
1141 protocol::TMCluster cluster;
1142 app_.cluster().for_each([&cluster](
ClusterNode const& node) {
1143 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1148 n.set_nodename(node.
name());
1152 for (
auto& item : gossip.
items)
1154 protocol::TMLoadSource& node = *cluster.add_loadsources();
1155 node.set_name(to_string(item.address));
1156 node.set_cost(item.balance);
1158 app_.overlay().foreach(
send_if(
1159 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1170 if (mode == OperatingMode::FULL && admin)
1172 auto const consensusMode = mConsensus.mode();
1173 if (consensusMode != ConsensusMode::wrongLedger)
1175 if (consensusMode == ConsensusMode::proposing)
1178 if (mConsensus.validating())
1179 return "validating";
1189 if (isNeedNetworkLedger())
1197 m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1199 JLOG(m_journal.error())
1200 <<
"Submitted transaction invalid: tfInnerBatchTxn flag present.";
1207 auto const txid = trans->getTransactionID();
1208 auto const flags = app_.getHashRouter().getFlags(txid);
1210 if ((flags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1212 JLOG(m_journal.warn()) <<
"Submitted transaction cached bad";
1219 app_.getHashRouter(),
1221 m_ledgerMaster.getValidatedRules(),
1224 if (validity != Validity::Valid)
1226 JLOG(m_journal.warn())
1227 <<
"Submitted transaction invalid: " << reason;
1233 JLOG(m_journal.warn())
1234 <<
"Exception checking transaction " << txid <<
": " << ex.
what();
1241 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1243 m_job_queue.addJob(
jtTRANSACTION,
"submitTxn", [
this, tx]() {
1245 processTransaction(t,
false,
false, FailHard::no);
1252 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1254 if ((newFlags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1257 JLOG(m_journal.warn()) << transaction->getID() <<
": cached bad!\n";
1258 transaction->setStatus(
INVALID);
1263 auto const view = m_ledgerMaster.getCurrentLedger();
1268 auto const sttx = *transaction->getSTransaction();
1269 if (sttx.isFlag(
tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1271 transaction->setStatus(
INVALID);
1273 app_.getHashRouter().setFlags(
1274 transaction->getID(), HashRouterFlags::BAD);
1281 auto const [validity, reason] =
1282 checkValidity(app_.getHashRouter(), sttx, view->rules(), app_.config());
1284 validity == Validity::Valid,
1285 "ripple::NetworkOPsImp::processTransaction : valid validity");
1288 if (validity == Validity::SigBad)
1290 JLOG(m_journal.info()) <<
"Transaction has bad signature: " << reason;
1291 transaction->setStatus(
INVALID);
1293 app_.getHashRouter().setFlags(
1294 transaction->getID(), HashRouterFlags::BAD);
1299 app_.getMasterTransaction().canonicalize(&transaction);
1305NetworkOPsImp::processTransaction(
1311 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXN");
1314 if (!preProcessTransaction(transaction))
1318 doTransactionSync(transaction, bUnlimited, failType);
1320 doTransactionAsync(transaction, bUnlimited, failType);
1324NetworkOPsImp::doTransactionAsync(
1331 if (transaction->getApplying())
1334 mTransactions.push_back(
1336 transaction->setApplying();
1338 if (mDispatchState == DispatchState::none)
1340 if (m_job_queue.addJob(
1341 jtBATCH,
"transactionBatch", [
this]() { transactionBatch(); }))
1343 mDispatchState = DispatchState::scheduled;
1349NetworkOPsImp::doTransactionSync(
1356 if (!transaction->getApplying())
1358 mTransactions.push_back(
1360 transaction->setApplying();
1363 doTransactionSyncBatch(
1365 return transaction->getApplying();
1370NetworkOPsImp::doTransactionSyncBatch(
1376 if (mDispatchState == DispatchState::running)
1385 if (mTransactions.size())
1388 if (m_job_queue.addJob(
jtBATCH,
"transactionBatch", [
this]() {
1392 mDispatchState = DispatchState::scheduled;
1396 }
while (retryCallback(lock));
1402 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXNSet");
1405 for (
auto const& [_, tx] :
set)
1408 auto transaction = std::make_shared<Transaction>(tx, reason, app_);
1410 if (transaction->getStatus() ==
INVALID)
1412 if (!reason.
empty())
1414 JLOG(m_journal.trace())
1415 <<
"Exception checking transaction: " << reason;
1417 app_.getHashRouter().setFlags(
1418 tx->getTransactionID(), HashRouterFlags::BAD);
1423 if (!preProcessTransaction(transaction))
1434 for (
auto& transaction : candidates)
1436 if (!transaction->getApplying())
1438 transactions.
emplace_back(transaction,
false,
false, FailHard::no);
1439 transaction->setApplying();
1443 if (mTransactions.empty())
1444 mTransactions.swap(transactions);
1447 mTransactions.reserve(mTransactions.size() + transactions.
size());
1448 for (
auto& t : transactions)
1449 mTransactions.push_back(std::move(t));
1455 "ripple::NetworkOPsImp::processTransactionSet has lock");
1457 mTransactions.begin(), mTransactions.end(), [](
auto const& t) {
1458 return t.transaction->getApplying();
1464NetworkOPsImp::transactionBatch()
1468 if (mDispatchState == DispatchState::running)
1471 while (mTransactions.size())
1482 mTransactions.
swap(transactions);
1484 !transactions.
empty(),
1485 "ripple::NetworkOPsImp::apply : non-empty transactions");
1487 mDispatchState != DispatchState::running,
1488 "ripple::NetworkOPsImp::apply : is not running");
1490 mDispatchState = DispatchState::running;
1496 bool changed =
false;
1499 m_ledgerMaster.peekMutex(), std::defer_lock};
1510 if (e.failType == FailHard::yes)
1513 auto const result = app_.getTxQ().apply(
1514 app_, view, e.transaction->getSTransaction(), flags, j);
1515 e.result = result.ter;
1516 e.applied = result.applied;
1517 changed = changed || result.applied;
1526 if (
auto const l = m_ledgerMaster.getValidatedLedger())
1527 validatedLedgerIndex = l->info().seq;
1529 auto newOL = app_.openLedger().current();
1532 e.transaction->clearSubmitResult();
1536 pubProposedTransaction(
1537 newOL, e.transaction->getSTransaction(), e.result);
1538 e.transaction->setApplied();
1541 e.transaction->setResult(e.result);
1544 app_.getHashRouter().setFlags(
1545 e.transaction->getID(), HashRouterFlags::BAD);
1554 JLOG(m_journal.info())
1555 <<
"TransactionResult: " << token <<
": " << human;
1560 bool addLocal = e.local;
1564 JLOG(m_journal.debug())
1565 <<
"Transaction is now included in open ledger";
1566 e.transaction->setStatus(
INCLUDED);
1571 auto const& txCur = e.transaction->getSTransaction();
1574 for (
auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1576 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1582 auto t = std::make_shared<Transaction>(trans, reason, app_);
1583 if (t->getApplying())
1585 submit_held.
emplace_back(t,
false,
false, FailHard::no);
1594 JLOG(m_journal.info()) <<
"Transaction is obsolete";
1595 e.transaction->setStatus(
OBSOLETE);
1599 JLOG(m_journal.debug())
1600 <<
"Transaction is likely to claim a"
1601 <<
" fee, but is queued until fee drops";
1603 e.transaction->setStatus(
HELD);
1607 m_ledgerMaster.addHeldTransaction(e.transaction);
1608 e.transaction->setQueued();
1609 e.transaction->setKept();
1615 if (e.failType != FailHard::yes)
1617 auto const lastLedgerSeq =
1618 e.transaction->getSTransaction()->at(
1619 ~sfLastLedgerSequence);
1620 auto const ledgersLeft = lastLedgerSeq
1622 m_ledgerMaster.getCurrentLedgerIndex()
1641 (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1642 app_.getHashRouter().setFlags(
1643 e.transaction->getID(), HashRouterFlags::HELD))
1646 JLOG(m_journal.debug())
1647 <<
"Transaction should be held: " << e.result;
1648 e.transaction->setStatus(
HELD);
1649 m_ledgerMaster.addHeldTransaction(e.transaction);
1650 e.transaction->setKept();
1653 JLOG(m_journal.debug())
1654 <<
"Not holding transaction "
1655 << e.transaction->getID() <<
": "
1656 << (e.local ?
"local" :
"network") <<
", "
1657 <<
"result: " << e.result <<
" ledgers left: "
1658 << (ledgersLeft ? to_string(*ledgersLeft)
1664 JLOG(m_journal.debug())
1665 <<
"Status other than success " << e.result;
1666 e.transaction->setStatus(
INVALID);
1669 auto const enforceFailHard =
1670 e.failType == FailHard::yes && !
isTesSuccess(e.result);
1672 if (addLocal && !enforceFailHard)
1674 m_localTX->push_back(
1675 m_ledgerMaster.getCurrentLedgerIndex(),
1676 e.transaction->getSTransaction());
1677 e.transaction->setKept();
1681 ((mMode != OperatingMode::FULL) &&
1682 (e.failType != FailHard::yes) && e.local) ||
1687 app_.getHashRouter().shouldRelay(e.transaction->getID());
1688 if (
auto const sttx = *(e.transaction->getSTransaction());
1693 newOL->rules().enabled(featureBatch)))
1695 protocol::TMTransaction tx;
1699 tx.set_rawtransaction(s.
data(), s.
size());
1700 tx.set_status(protocol::tsCURRENT);
1701 tx.set_receivetimestamp(
1702 app_.timeKeeper().now().time_since_epoch().count());
1705 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1706 e.transaction->setBroadcast();
1710 if (validatedLedgerIndex)
1712 auto [fee, accountSeq, availableSeq] =
1713 app_.getTxQ().getTxRequiredFeeAndSeq(
1714 *newOL, e.transaction->getSTransaction());
1715 e.transaction->setCurrentLedgerState(
1716 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1724 e.transaction->clearApplying();
1726 if (!submit_held.
empty())
1728 if (mTransactions.empty())
1729 mTransactions.swap(submit_held);
1732 mTransactions.reserve(mTransactions.size() + submit_held.
size());
1733 for (
auto& e : submit_held)
1734 mTransactions.push_back(std::move(e));
1740 mDispatchState = DispatchState::none;
1748NetworkOPsImp::getOwnerInfo(
1753 auto root = keylet::ownerDir(account);
1754 auto sleNode = lpLedger->read(keylet::page(
root));
1761 for (
auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1763 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1766 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1768 switch (sleCur->getType())
1771 if (!jvObjects.
isMember(jss::offers))
1772 jvObjects[jss::offers] =
1775 jvObjects[jss::offers].
append(
1776 sleCur->getJson(JsonOptions::none));
1779 case ltRIPPLE_STATE:
1780 if (!jvObjects.
isMember(jss::ripple_lines))
1782 jvObjects[jss::ripple_lines] =
1786 jvObjects[jss::ripple_lines].
append(
1787 sleCur->getJson(JsonOptions::none));
1790 case ltACCOUNT_ROOT:
1794 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1800 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1804 sleNode = lpLedger->read(keylet::page(
root, uNodeDir));
1807 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1820NetworkOPsImp::isBlocked()
1822 return isAmendmentBlocked() || isUNLBlocked();
1826NetworkOPsImp::isAmendmentBlocked()
1828 return amendmentBlocked_;
1832NetworkOPsImp::setAmendmentBlocked()
1834 amendmentBlocked_ =
true;
1835 setMode(OperatingMode::CONNECTED);
1839NetworkOPsImp::isAmendmentWarned()
1841 return !amendmentBlocked_ && amendmentWarned_;
1845NetworkOPsImp::setAmendmentWarned()
1847 amendmentWarned_ =
true;
1851NetworkOPsImp::clearAmendmentWarned()
1853 amendmentWarned_ =
false;
1857NetworkOPsImp::isUNLBlocked()
1863NetworkOPsImp::setUNLBlocked()
1866 setMode(OperatingMode::CONNECTED);
1870NetworkOPsImp::clearUNLBlocked()
1872 unlBlocked_ =
false;
1876NetworkOPsImp::checkLastClosedLedger(
1885 JLOG(m_journal.trace()) <<
"NetworkOPsImp::checkLastClosedLedger";
1887 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1892 uint256 closedLedger = ourClosed->info().hash;
1893 uint256 prevClosedLedger = ourClosed->info().parentHash;
1894 JLOG(m_journal.trace()) <<
"OurClosed: " << closedLedger;
1895 JLOG(m_journal.trace()) <<
"PrevClosed: " << prevClosedLedger;
1900 auto& validations = app_.getValidations();
1901 JLOG(m_journal.debug())
1902 <<
"ValidationTrie " <<
Json::Compact(validations.getJsonTrie());
1906 peerCounts[closedLedger] = 0;
1907 if (mMode >= OperatingMode::TRACKING)
1908 peerCounts[closedLedger]++;
1910 for (
auto& peer : peerList)
1912 uint256 peerLedger = peer->getClosedLedgerHash();
1915 ++peerCounts[peerLedger];
1918 for (
auto const& it : peerCounts)
1919 JLOG(m_journal.debug()) <<
"L: " << it.first <<
" n=" << it.second;
1921 uint256 preferredLCL = validations.getPreferredLCL(
1923 m_ledgerMaster.getValidLedgerIndex(),
1926 bool switchLedgers = preferredLCL != closedLedger;
1928 closedLedger = preferredLCL;
1930 if (switchLedgers && (closedLedger == prevClosedLedger))
1933 JLOG(m_journal.info()) <<
"We won't switch to our own previous ledger";
1934 networkClosed = ourClosed->info().hash;
1935 switchLedgers =
false;
1938 networkClosed = closedLedger;
1943 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1946 consensus = app_.getInboundLedgers().acquire(
1947 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1950 (!m_ledgerMaster.canBeCurrent(consensus) ||
1951 !m_ledgerMaster.isCompatible(
1952 *consensus, m_journal.debug(),
"Not switching")))
1956 networkClosed = ourClosed->info().hash;
1960 JLOG(m_journal.warn()) <<
"We are not running on the consensus ledger";
1961 JLOG(m_journal.info()) <<
"Our LCL: " << ourClosed->info().hash
1963 JLOG(m_journal.info()) <<
"Net LCL " << closedLedger;
1965 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1967 setMode(OperatingMode::CONNECTED);
1975 switchLastClosedLedger(consensus);
1982NetworkOPsImp::switchLastClosedLedger(
1986 JLOG(m_journal.error())
1987 <<
"JUMP last closed ledger to " << newLCL->info().hash;
1989 clearNeedNetworkLedger();
1992 app_.getTxQ().processClosedLedger(app_, *newLCL,
true);
1999 auto retries = m_localTX->getTxSet();
2000 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
2005 rules.
emplace(app_.config().features);
2006 app_.openLedger().accept(
2017 return app_.getTxQ().accept(app_, view);
2021 m_ledgerMaster.switchLCL(newLCL);
2023 protocol::TMStatusChange s;
2024 s.set_newevent(protocol::neSWITCHED_LEDGER);
2025 s.set_ledgerseq(newLCL->info().seq);
2026 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
2027 s.set_ledgerhashprevious(
2028 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
2029 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
2031 app_.overlay().foreach(
2032 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
2036NetworkOPsImp::beginConsensus(
2042 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
2044 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
2046 JLOG(m_journal.info()) <<
"Consensus time for #" << closingInfo.seq
2047 <<
" with LCL " << closingInfo.parentHash;
2049 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
2054 if (mMode == OperatingMode::FULL)
2056 JLOG(m_journal.warn()) <<
"Don't have LCL, going to tracking";
2057 setMode(OperatingMode::TRACKING);
2058 CLOG(clog) <<
"beginConsensus Don't have LCL, going to tracking. ";
2061 CLOG(clog) <<
"beginConsensus no previous ledger. ";
2066 prevLedger->info().hash == closingInfo.parentHash,
2067 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
2070 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
2071 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
2074 if (prevLedger->rules().enabled(featureNegativeUNL))
2075 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
2076 TrustChanges const changes = app_.validators().updateTrusted(
2077 app_.getValidations().getCurrentNodeIDs(),
2078 closingInfo.parentCloseTime,
2081 app_.getHashRouter());
2083 if (!changes.
added.empty() || !changes.
removed.empty())
2085 app_.getValidations().trustChanged(changes.
added, changes.
removed);
2087 app_.getAmendmentTable().trustChanged(
2088 app_.validators().getQuorumKeys().second);
2091 mConsensus.startRound(
2092 app_.timeKeeper().closeTime(),
2100 if (mLastConsensusPhase != currPhase)
2102 reportConsensusStateChange(currPhase);
2103 mLastConsensusPhase = currPhase;
2106 JLOG(m_journal.debug()) <<
"Initiating consensus engine";
2113 auto const& peerKey = peerPos.
publicKey();
2114 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
2125 JLOG(m_journal.error())
2126 <<
"Received a proposal signed by MY KEY from a peer. This may "
2127 "indicate a misconfiguration where another node has the same "
2128 "validator key, or may be caused by unusual message routing and "
2133 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
2144 protocol::TMHaveTransactionSet msg;
2145 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2146 msg.set_status(protocol::tsHAVE);
2147 app_.overlay().foreach(
2148 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
2152 mConsensus.gotTxSet(app_.timeKeeper().closeTime(),
RCLTxSet{map});
2158 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
2160 for (
auto const& it : app_.overlay().getActivePeers())
2162 if (it && (it->getClosedLedgerHash() == deadLedger))
2164 JLOG(m_journal.trace()) <<
"Killing obsolete peer status";
2171 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
2173 if (networkClosed.
isZero())
2175 CLOG(clog) <<
"endConsensus last closed ledger is zero. ";
2185 if (((mMode == OperatingMode::CONNECTED) ||
2186 (mMode == OperatingMode::SYNCING)) &&
2192 if (!needNetworkLedger_)
2193 setMode(OperatingMode::TRACKING);
2196 if (((mMode == OperatingMode::CONNECTED) ||
2197 (mMode == OperatingMode::TRACKING)) &&
2203 auto current = m_ledgerMaster.getCurrentLedger();
2204 if (app_.timeKeeper().now() < (
current->info().parentCloseTime +
2205 2 *
current->info().closeTimeResolution))
2207 setMode(OperatingMode::FULL);
2211 beginConsensus(networkClosed, clog);
2215NetworkOPsImp::consensusViewChange()
2217 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2219 setMode(OperatingMode::CONNECTED);
2229 if (!mStreamMaps[sManifests].empty())
2233 jvObj[jss::type] =
"manifestReceived";
2236 jvObj[jss::signing_key] =
2240 jvObj[jss::signature] =
strHex(*sig);
2243 jvObj[jss::domain] = mo.
domain;
2246 for (
auto i = mStreamMaps[sManifests].begin();
2247 i != mStreamMaps[sManifests].end();)
2249 if (
auto p = i->second.lock())
2251 p->send(jvObj,
true);
2256 i = mStreamMaps[sManifests].erase(i);
2262NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2266 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2267 , loadBaseServer{loadFeeTrack.getLoadBase()}
2269 , em{
std::move(escalationMetrics)}
2279 em.has_value() != b.
em.has_value())
2285 em->minProcessingFeeLevel != b.
em->minProcessingFeeLevel ||
2286 em->openLedgerFeeLevel != b.
em->openLedgerFeeLevel ||
2287 em->referenceFeeLevel != b.
em->referenceFeeLevel);
2320 jvObj[jss::type] =
"serverStatus";
2322 jvObj[jss::load_base] = f.loadBaseServer;
2323 jvObj[jss::load_factor_server] = f.loadFactorServer;
2324 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2329 safe_cast<std::uint64_t>(f.loadFactorServer),
2331 f.em->openLedgerFeeLevel,
2333 f.em->referenceFeeLevel)
2336 jvObj[jss::load_factor] =
trunc32(loadFactor);
2337 jvObj[jss::load_factor_fee_escalation] =
2338 f.em->openLedgerFeeLevel.jsonClipped();
2339 jvObj[jss::load_factor_fee_queue] =
2340 f.em->minProcessingFeeLevel.jsonClipped();
2341 jvObj[jss::load_factor_fee_reference] =
2342 f.em->referenceFeeLevel.jsonClipped();
2345 jvObj[jss::load_factor] = f.loadFactorServer;
2359 p->send(jvObj,
true);
2376 if (!streamMap.empty())
2379 jvObj[jss::type] =
"consensusPhase";
2380 jvObj[jss::consensus] =
to_string(phase);
2382 for (
auto i = streamMap.begin(); i != streamMap.end();)
2384 if (
auto p = i->second.lock())
2386 p->send(jvObj,
true);
2391 i = streamMap.erase(i);
2407 auto const signerPublic = val->getSignerPublic();
2409 jvObj[jss::type] =
"validationReceived";
2410 jvObj[jss::validation_public_key] =
2412 jvObj[jss::ledger_hash] =
to_string(val->getLedgerHash());
2413 jvObj[jss::signature] =
strHex(val->getSignature());
2414 jvObj[jss::full] = val->isFull();
2415 jvObj[jss::flags] = val->getFlags();
2416 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2417 jvObj[jss::data] =
strHex(val->getSerializer().slice());
2419 if (
auto version = (*val)[~sfServerVersion])
2422 if (
auto cookie = (*val)[~sfCookie])
2425 if (
auto hash = (*val)[~sfValidatedHash])
2426 jvObj[jss::validated_hash] =
strHex(*hash);
2428 auto const masterKey =
2431 if (masterKey != signerPublic)
2436 if (
auto const seq = (*val)[~sfLedgerSequence])
2437 jvObj[jss::ledger_index] = *seq;
2439 if (val->isFieldPresent(sfAmendments))
2442 for (
auto const& amendment : val->getFieldV256(sfAmendments))
2446 if (
auto const closeTime = (*val)[~sfCloseTime])
2447 jvObj[jss::close_time] = *closeTime;
2449 if (
auto const loadFee = (*val)[~sfLoadFee])
2450 jvObj[jss::load_fee] = *loadFee;
2452 if (
auto const baseFee = val->at(~sfBaseFee))
2453 jvObj[jss::base_fee] =
static_cast<double>(*baseFee);
2455 if (
auto const reserveBase = val->at(~sfReserveBase))
2456 jvObj[jss::reserve_base] = *reserveBase;
2458 if (
auto const reserveInc = val->at(~sfReserveIncrement))
2459 jvObj[jss::reserve_inc] = *reserveInc;
2463 if (
auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2464 baseFeeXRP && baseFeeXRP->native())
2465 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2467 if (
auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2468 reserveBaseXRP && reserveBaseXRP->native())
2469 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2471 if (
auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2472 reserveIncXRP && reserveIncXRP->native())
2473 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2482 if (jvTx.
isMember(jss::ledger_index))
2484 jvTx[jss::ledger_index] =
2485 std::to_string(jvTx[jss::ledger_index].asUInt());
2492 if (
auto p = i->second.lock())
2496 [&](
Json::Value const& jv) { p->send(jv, true); });
2516 jvObj[jss::type] =
"peerStatusChange";
2525 p->send(jvObj,
true);
2539 using namespace std::chrono_literals;
2571 <<
"recvValidation " << val->getLedgerHash() <<
" from " << source;
2587 <<
"Exception thrown for handling new validation "
2588 << val->getLedgerHash() <<
": " << e.
what();
2593 <<
"Unknown exception thrown for handling new validation "
2594 << val->getLedgerHash();
2606 ss <<
"VALIDATION: " << val->render() <<
" master_key: ";
2643 "This server is amendment blocked, and must be updated to be "
2644 "able to stay in sync with the network.";
2651 "This server has an expired validator list. validators.txt "
2652 "may be incorrectly configured or some [validator_list_sites] "
2653 "may be unreachable.";
2660 "One or more unsupported amendments have reached majority. "
2661 "Upgrade to the latest version before they are activated "
2662 "to avoid being amendment blocked.";
2663 if (
auto const expected =
2667 d[jss::expected_date] = expected->time_since_epoch().count();
2668 d[jss::expected_date_UTC] =
to_string(*expected);
2672 if (warnings.size())
2673 info[jss::warnings] = std::move(warnings);
2688 info[jss::time] =
to_string(std::chrono::floor<std::chrono::microseconds>(
2692 info[jss::network_ledger] =
"waiting";
2694 info[jss::validation_quorum] =
2702 info[jss::node_size] =
"tiny";
2705 info[jss::node_size] =
"small";
2708 info[jss::node_size] =
"medium";
2711 info[jss::node_size] =
"large";
2714 info[jss::node_size] =
"huge";
2723 info[jss::validator_list_expires] =
2724 safe_cast<Json::UInt>(when->time_since_epoch().count());
2726 info[jss::validator_list_expires] = 0;
2736 if (*when == TimeKeeper::time_point::max())
2738 x[jss::expiration] =
"never";
2739 x[jss::status] =
"active";
2746 x[jss::status] =
"active";
2748 x[jss::status] =
"expired";
2753 x[jss::status] =
"unknown";
2754 x[jss::expiration] =
"unknown";
2758#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2761#ifdef GIT_COMMIT_HASH
2762 x[jss::hash] = GIT_COMMIT_HASH;
2765 x[jss::branch] = GIT_BRANCH;
2770 info[jss::io_latency_ms] =
2778 info[jss::pubkey_validator] =
2783 info[jss::pubkey_validator] =
"none";
2793 info[jss::counters][jss::nodestore] = nodestore;
2797 info[jss::pubkey_node] =
2803 info[jss::amendment_blocked] =
true;
2817 lastClose[jss::converge_time_s] =
2822 lastClose[jss::converge_time] =
2826 info[jss::last_close] = lastClose;
2834 info[jss::network_id] =
static_cast<Json::UInt>(*netid);
2836 auto const escalationMetrics =
2844 auto const loadFactorFeeEscalation =
2846 escalationMetrics.openLedgerFeeLevel,
2848 escalationMetrics.referenceFeeLevel)
2852 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2856 info[jss::load_base] = loadBaseServer;
2857 info[jss::load_factor] =
trunc32(loadFactor);
2858 info[jss::load_factor_server] = loadFactorServer;
2865 info[jss::load_factor_fee_escalation] =
2866 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2867 info[jss::load_factor_fee_queue] =
2868 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2869 info[jss::load_factor_fee_reference] =
2870 escalationMetrics.referenceFeeLevel.jsonClipped();
2874 info[jss::load_factor] =
2875 static_cast<double>(loadFactor) / loadBaseServer;
2877 if (loadFactorServer != loadFactor)
2878 info[jss::load_factor_server] =
2879 static_cast<double>(loadFactorServer) / loadBaseServer;
2884 if (fee != loadBaseServer)
2885 info[jss::load_factor_local] =
2886 static_cast<double>(fee) / loadBaseServer;
2888 if (fee != loadBaseServer)
2889 info[jss::load_factor_net] =
2890 static_cast<double>(fee) / loadBaseServer;
2892 if (fee != loadBaseServer)
2893 info[jss::load_factor_cluster] =
2894 static_cast<double>(fee) / loadBaseServer;
2896 if (escalationMetrics.openLedgerFeeLevel !=
2897 escalationMetrics.referenceFeeLevel &&
2898 (admin || loadFactorFeeEscalation != loadFactor))
2899 info[jss::load_factor_fee_escalation] =
2900 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2901 escalationMetrics.referenceFeeLevel);
2902 if (escalationMetrics.minProcessingFeeLevel !=
2903 escalationMetrics.referenceFeeLevel)
2904 info[jss::load_factor_fee_queue] =
2905 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2906 escalationMetrics.referenceFeeLevel);
2919 XRPAmount const baseFee = lpClosed->fees().base;
2921 l[jss::seq] =
Json::UInt(lpClosed->info().seq);
2922 l[jss::hash] =
to_string(lpClosed->info().hash);
2927 l[jss::reserve_base] =
2928 lpClosed->fees().accountReserve(0).jsonClipped();
2929 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2931 lpClosed->info().closeTime.time_since_epoch().count());
2936 l[jss::reserve_base_xrp] =
2937 lpClosed->fees().accountReserve(0).decimalXRP();
2938 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2941 std::abs(closeOffset.count()) >= 60)
2942 l[jss::close_time_offset] =
2950 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2954 auto lCloseTime = lpClosed->info().closeTime;
2956 if (lCloseTime <= closeTime)
2958 using namespace std::chrono_literals;
2959 auto age = closeTime - lCloseTime;
2961 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2967 info[jss::validated_ledger] = l;
2969 info[jss::closed_ledger] = l;
2973 info[jss::published_ledger] =
"none";
2974 else if (lpPublished->info().seq != lpClosed->info().seq)
2975 info[jss::published_ledger] = lpPublished->info().seq;
2980 info[jss::jq_trans_overflow] =
2982 info[jss::peer_disconnects] =
2984 info[jss::peer_disconnects_resources] =
2989 "http",
"https",
"peer",
"ws",
"ws2",
"wss",
"wss2"};
2997 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2998 port.admin_user.empty() && port.admin_password.empty()))
3012 for (
auto const& p : proto)
3013 jv[jss::protocol].append(p);
3020 auto const optPort = grpcSection.
get(
"port");
3021 if (optPort && grpcSection.get(
"ip"))
3024 jv[jss::port] = *optPort;
3026 jv[jss::protocol].append(
"grpc");
3029 info[jss::ports] = std::move(ports);
3055 ledger->rules().enabled(featureBatch))
3059 transJson(transaction, result,
false, ledger, std::nullopt);
3073 [&](
Json::Value const& jv) { p->send(jv, true); });
3096 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted,
app_);
3098 lpAccepted->info().hash, alpAccepted);
3102 alpAccepted->getLedger().
get() == lpAccepted.
get(),
3103 "ripple::NetworkOPsImp::pubLedger : accepted input");
3107 <<
"Publishing ledger " << lpAccepted->info().seq <<
" "
3108 << lpAccepted->info().hash;
3116 jvObj[jss::type] =
"ledgerClosed";
3117 jvObj[jss::ledger_index] = lpAccepted->info().seq;
3118 jvObj[jss::ledger_hash] =
to_string(lpAccepted->info().hash);
3120 lpAccepted->info().closeTime.time_since_epoch().count());
3122 if (!lpAccepted->rules().enabled(featureXRPFees))
3124 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
3125 jvObj[jss::reserve_base] =
3126 lpAccepted->fees().accountReserve(0).jsonClipped();
3127 jvObj[jss::reserve_inc] =
3128 lpAccepted->fees().increment.jsonClipped();
3130 jvObj[jss::txn_count] =
Json::UInt(alpAccepted->size());
3134 jvObj[jss::validated_ledgers] =
3144 p->send(jvObj,
true);
3162 p->send(jvObj,
true);
3171 static bool firstTime =
true;
3178 for (
auto& inner : outer.second)
3180 auto& subInfo = inner.second;
3181 if (subInfo.index_->separationLedgerSeq_ == 0)
3184 alpAccepted->getLedger(), subInfo);
3193 for (
auto const& accTx : *alpAccepted)
3197 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3224 "reportConsensusStateChange->pubConsensus",
3255 jvObj[jss::type] =
"transaction";
3259 jvObj[jss::transaction] =
3266 jvObj[jss::meta], *ledger, transaction, meta->
get());
3269 jvObj[jss::meta], transaction, meta->
get());
3273 if (
auto const& lookup = ledger->txRead(transaction->getTransactionID());
3274 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3276 uint32_t
const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3278 if (transaction->isFieldPresent(sfNetworkID))
3279 netID = transaction->getFieldU32(sfNetworkID);
3284 jvObj[jss::ctid] = *ctid;
3286 if (!ledger->open())
3287 jvObj[jss::ledger_hash] =
to_string(ledger->info().hash);
3291 jvObj[jss::ledger_index] = ledger->info().seq;
3292 jvObj[jss::transaction][jss::date] =
3293 ledger->info().closeTime.time_since_epoch().count();
3294 jvObj[jss::validated] =
true;
3295 jvObj[jss::close_time_iso] =
to_string_iso(ledger->info().closeTime);
3301 jvObj[jss::validated] =
false;
3302 jvObj[jss::ledger_current_index] = ledger->info().seq;
3305 jvObj[jss::status] = validated ?
"closed" :
"proposed";
3306 jvObj[jss::engine_result] = sToken;
3307 jvObj[jss::engine_result_code] = result;
3308 jvObj[jss::engine_result_message] = sHuman;
3310 if (transaction->getTxnType() == ttOFFER_CREATE)
3312 auto const account = transaction->getAccountID(sfAccount);
3313 auto const amount = transaction->getFieldAmount(sfTakerGets);
3316 if (account != amount.issue().account)
3324 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3332 [&]<
unsigned Version>(
3334 RPC::insertDeliverMax(
3335 jvTx[jss::transaction], transaction->getTxnType(), Version);
3337 if constexpr (Version > 1)
3339 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3340 jvTx[jss::hash] = hash;
3344 jvTx[jss::transaction][jss::hash] = hash;
3357 auto const& stTxn = transaction.
getTxn();
3361 auto const trResult = transaction.
getResult();
3376 [&](
Json::Value const& jv) { p->send(jv, true); });
3393 [&](
Json::Value const& jv) { p->send(jv, true); });
3418 auto const currLedgerSeq = ledger->seq();
3425 for (
auto const& affectedAccount : transaction.
getAffected())
3430 auto it = simiIt->second.begin();
3432 while (it != simiIt->second.end())
3443 it = simiIt->second.erase(it);
3450 auto it = simiIt->second.begin();
3451 while (it != simiIt->second.end())
3462 it = simiIt->second.erase(it);
3469 auto& subs = histoIt->second;
3470 auto it = subs.begin();
3471 while (it != subs.end())
3474 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3488 it = subs.erase(it);
3499 <<
"pubAccountTransaction: "
3500 <<
"proposed=" << iProposed <<
", accepted=" << iAccepted;
3502 if (!notify.
empty() || !accountHistoryNotify.
empty())
3504 auto const& stTxn = transaction.
getTxn();
3508 auto const trResult = transaction.
getResult();
3514 isrListener->getApiVersion(),
3515 [&](
Json::Value const& jv) { isrListener->send(jv, true); });
3519 jvObj.
set(jss::account_history_boundary,
true);
3522 jvObj.
isMember(jss::account_history_tx_stream) ==
3524 "ripple::NetworkOPsImp::pubAccountTransaction : "
3525 "account_history_tx_stream not set");
3526 for (
auto& info : accountHistoryNotify)
3528 auto& index = info.index_;
3529 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3530 jvObj.
set(jss::account_history_tx_first,
true);
3532 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3535 info.sink_->getApiVersion(),
3536 [&](
Json::Value const& jv) { info.sink_->send(jv, true); });
3561 for (
auto const& affectedAccount : tx->getMentionedAccounts())
3566 auto it = simiIt->second.begin();
3568 while (it != simiIt->second.end())
3579 it = simiIt->second.erase(it);
3586 JLOG(
m_journal.
trace()) <<
"pubProposedAccountTransaction: " << iProposed;
3588 if (!notify.
empty() || !accountHistoryNotify.
empty())
3595 isrListener->getApiVersion(),
3596 [&](
Json::Value const& jv) { isrListener->send(jv, true); });
3599 jvObj.
isMember(jss::account_history_tx_stream) ==
3601 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3602 "account_history_tx_stream not set");
3603 for (
auto& info : accountHistoryNotify)
3605 auto& index = info.index_;
3606 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3607 jvObj.
set(jss::account_history_tx_first,
true);
3608 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3610 info.sink_->getApiVersion(),
3611 [&](
Json::Value const& jv) { info.sink_->send(jv, true); });
3628 for (
auto const& naAccountID : vnaAccountIDs)
3631 <<
"subAccount: account: " <<
toBase58(naAccountID);
3633 isrListener->insertSubAccountInfo(naAccountID, rt);
3638 for (
auto const& naAccountID : vnaAccountIDs)
3640 auto simIterator = subMap.
find(naAccountID);
3641 if (simIterator == subMap.
end())
3645 usisElement[isrListener->getSeq()] = isrListener;
3647 subMap.
insert(simIterator, make_pair(naAccountID, usisElement));
3652 simIterator->second[isrListener->getSeq()] = isrListener;
3663 for (
auto const& naAccountID : vnaAccountIDs)
3666 isrListener->deleteSubAccountInfo(naAccountID, rt);
3683 for (
auto const& naAccountID : vnaAccountIDs)
3685 auto simIterator = subMap.
find(naAccountID);
3687 if (simIterator != subMap.
end())
3690 simIterator->second.erase(uSeq);
3692 if (simIterator->second.empty())
3695 subMap.
erase(simIterator);
3704 enum DatabaseType { Sqlite,
None };
3705 static auto const databaseType = [&]() -> DatabaseType {
3710 return DatabaseType::Sqlite;
3712 return DatabaseType::None;
3715 if (databaseType == DatabaseType::None)
3718 <<
"AccountHistory job for account "
3730 "AccountHistoryTxStream",
3731 [
this, dbType = databaseType, subInfo]() {
3732 auto const& accountId = subInfo.
index_->accountId_;
3733 auto& lastLedgerSeq = subInfo.
index_->historyLastLedgerSeq_;
3734 auto& txHistoryIndex = subInfo.
index_->historyTxIndex_;
3737 <<
"AccountHistory job for account " <<
toBase58(accountId)
3738 <<
" started. lastLedgerSeq=" << lastLedgerSeq;
3748 auto stx = tx->getSTransaction();
3749 if (stx->getAccountID(sfAccount) == accountId &&
3750 stx->getSeqValue() == 1)
3754 for (
auto& node : meta->getNodes())
3756 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3759 if (node.isFieldPresent(sfNewFields))
3761 if (
auto inner =
dynamic_cast<STObject const*
>(
3762 node.peekAtPField(sfNewFields));
3765 if (inner->isFieldPresent(sfAccount) &&
3766 inner->getAccountID(sfAccount) == accountId)
3778 bool unsubscribe) ->
bool {
3781 sptr->send(jvObj,
true);
3791 bool unsubscribe) ->
bool {
3795 sptr->getApiVersion(),
3796 [&](
Json::Value const& jv) { sptr->send(jv,
true); });
3819 accountId, minLedger, maxLedger, marker, 0,
true};
3820 return db->newestAccountTxPage(options);
3824 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3825 "getMoreTxns : invalid database type");
3834 while (lastLedgerSeq >= 2 && !subInfo.
index_->stopHistorical_)
3836 int feeChargeCount = 0;
3845 <<
"AccountHistory job for account "
3846 <<
toBase58(accountId) <<
" no InfoSub. Fee charged "
3847 << feeChargeCount <<
" times.";
3852 auto startLedgerSeq =
3853 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3855 <<
"AccountHistory job for account " <<
toBase58(accountId)
3856 <<
", working on ledger range [" << startLedgerSeq <<
","
3857 << lastLedgerSeq <<
"]";
3859 auto haveRange = [&]() ->
bool {
3862 auto haveSomeValidatedLedgers =
3864 validatedMin, validatedMax);
3866 return haveSomeValidatedLedgers &&
3867 validatedMin <= startLedgerSeq &&
3868 lastLedgerSeq <= validatedMax;
3874 <<
"AccountHistory reschedule job for account "
3875 <<
toBase58(accountId) <<
", incomplete ledger range ["
3876 << startLedgerSeq <<
"," << lastLedgerSeq <<
"]";
3882 while (!subInfo.
index_->stopHistorical_)
3885 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3889 <<
"AccountHistory job for account "
3890 <<
toBase58(accountId) <<
" getMoreTxns failed.";
3895 auto const& txns = dbResult->first;
3896 marker = dbResult->second;
3897 size_t num_txns = txns.size();
3898 for (
size_t i = 0; i < num_txns; ++i)
3900 auto const& [tx, meta] = txns[i];
3905 <<
"AccountHistory job for account "
3906 <<
toBase58(accountId) <<
" empty tx or meta.";
3916 <<
"AccountHistory job for account "
3917 <<
toBase58(accountId) <<
" no ledger.";
3922 tx->getSTransaction();
3926 <<
"AccountHistory job for account "
3928 <<
" getSTransaction failed.";
3934 auto const trR = meta->getResultTER();
3936 transJson(stTxn, trR,
true, curTxLedger, mRef);
3939 jss::account_history_tx_index, txHistoryIndex--);
3940 if (i + 1 == num_txns ||
3941 txns[i + 1].first->getLedger() != tx->getLedger())
3942 jvTx.
set(jss::account_history_boundary,
true);
3944 if (isFirstTx(tx, meta))
3946 jvTx.
set(jss::account_history_tx_first,
true);
3947 sendMultiApiJson(jvTx,
false);
3950 <<
"AccountHistory job for account "
3952 <<
" done, found last tx.";
3957 sendMultiApiJson(jvTx,
false);
3964 <<
"AccountHistory job for account "
3966 <<
" paging, marker=" << marker->ledgerSeq <<
":"
3975 if (!subInfo.
index_->stopHistorical_)
3977 lastLedgerSeq = startLedgerSeq - 1;
3978 if (lastLedgerSeq <= 1)
3981 <<
"AccountHistory job for account "
3983 <<
" done, reached genesis ledger.";
3996 subInfo.
index_->separationLedgerSeq_ = ledger->seq();
3997 auto const& accountId = subInfo.
index_->accountId_;
3999 if (!ledger->exists(accountKeylet))
4002 <<
"subAccountHistoryStart, no account " <<
toBase58(accountId)
4003 <<
", no need to add AccountHistory job.";
4008 if (
auto const sleAcct = ledger->read(accountKeylet); sleAcct)
4010 if (sleAcct->getFieldU32(sfSequence) == 1)
4013 <<
"subAccountHistoryStart, genesis account "
4015 <<
" does not have tx, no need to add AccountHistory job.";
4022 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
4023 "access genesis account");
4027 subInfo.
index_->historyLastLedgerSeq_ = ledger->seq();
4028 subInfo.
index_->haveHistorical_ =
true;
4031 <<
"subAccountHistoryStart, add AccountHistory job: accountId="
4032 <<
toBase58(accountId) <<
", currentLedgerSeq=" << ledger->seq();
4042 if (!isrListener->insertSubAccountHistory(accountId))
4045 <<
"subAccountHistory, already subscribed to account "
4052 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
4057 inner.
emplace(isrListener->getSeq(), ahi);
4063 simIterator->second.emplace(isrListener->getSeq(), ahi);
4077 <<
"subAccountHistory, no validated ledger yet, delay start";
4090 isrListener->deleteSubAccountHistory(account);
4104 auto& subInfoMap = simIterator->second;
4105 auto subInfoIter = subInfoMap.find(seq);
4106 if (subInfoIter != subInfoMap.end())
4108 subInfoIter->second.index_->stopHistorical_ =
true;
4113 simIterator->second.erase(seq);
4114 if (simIterator->second.empty())
4120 <<
"unsubAccountHistory, account " <<
toBase58(account)
4121 <<
", historyOnly = " << (historyOnly ?
"true" :
"false");
4129 listeners->addSubscriber(isrListener);
4131 UNREACHABLE(
"ripple::NetworkOPsImp::subBook : null book listeners");
4139 listeners->removeSubscriber(uSeq);
4151 m_standalone,
"ripple::NetworkOPsImp::acceptLedger : is standalone");
4154 Throw<std::runtime_error>(
4155 "Operation only possible in STANDALONE mode.");
4170 jvResult[jss::ledger_index] = lpClosed->info().seq;
4171 jvResult[jss::ledger_hash] =
to_string(lpClosed->info().hash);
4173 lpClosed->info().closeTime.time_since_epoch().count());
4174 if (!lpClosed->rules().enabled(featureXRPFees))
4176 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
4177 jvResult[jss::reserve_base] =
4178 lpClosed->fees().accountReserve(0).jsonClipped();
4179 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4184 jvResult[jss::validated_ledgers] =
4190 .emplace(isrListener->getSeq(), isrListener)
4200 .emplace(isrListener->getSeq(), isrListener)
4226 .emplace(isrListener->getSeq(), isrListener)
4254 jvResult[jss::random] =
to_string(uRandom);
4256 jvResult[jss::load_base] = feeTrack.getLoadBase();
4257 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4258 jvResult[jss::hostid] =
getHostId(admin);
4259 jvResult[jss::pubkey_node] =
4264 .emplace(isrListener->getSeq(), isrListener)
4282 .emplace(isrListener->getSeq(), isrListener)
4300 .emplace(isrListener->getSeq(), isrListener)
4318 .emplace(isrListener->getSeq(), isrListener)
4342 .emplace(isrListener->getSeq(), isrListener)
4360 .emplace(isrListener->getSeq(), isrListener)
4408 if (map.find(pInfo->getSeq()) != map.end())
4415#ifndef USE_NEW_BOOK_PAGE
4426 unsigned int iLimit,
4436 uint256 uTipIndex = uBookBase;
4440 stream <<
"getBookPage:" << book;
4441 stream <<
"getBookPage: uBookBase=" << uBookBase;
4442 stream <<
"getBookPage: uBookEnd=" << uBookEnd;
4443 stream <<
"getBookPage: uTipIndex=" << uTipIndex;
4452 bool bDirectAdvance =
true;
4456 unsigned int uBookEntry;
4462 while (!bDone && iLimit-- > 0)
4466 bDirectAdvance =
false;
4470 auto const ledgerIndex = view.
succ(uTipIndex, uBookEnd);
4474 sleOfferDir.
reset();
4483 uTipIndex = sleOfferDir->key();
4486 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4489 <<
"getBookPage: uTipIndex=" << uTipIndex;
4491 <<
"getBookPage: offerIndex=" << offerIndex;
4501 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4502 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4503 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4505 bool firstOwnerOffer(
true);
4511 saOwnerFunds = saTakerGets;
4513 else if (bGlobalFreeze)
4521 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4522 if (umBalanceEntry != umBalance.
end())
4526 saOwnerFunds = umBalanceEntry->second;
4527 firstOwnerOffer =
false;
4541 if (saOwnerFunds < beast::zero)
4545 saOwnerFunds.
clear();
4553 STAmount saOwnerFundsLimit = saOwnerFunds;
4565 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4568 if (saOwnerFundsLimit >= saTakerGets)
4571 saTakerGetsFunded = saTakerGets;
4577 saTakerGetsFunded = saOwnerFundsLimit;
4579 saTakerGetsFunded.
setJson(jvOffer[jss::taker_gets_funded]);
4583 saTakerGetsFunded, saDirRate, saTakerPays.
issue()))
4584 .setJson(jvOffer[jss::taker_pays_funded]);
4590 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4592 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4596 jvOf[jss::quality] = saDirRate.
getText();
4598 if (firstOwnerOffer)
4599 jvOf[jss::owner_funds] = saOwnerFunds.
getText();
4606 if (!
cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4608 bDirectAdvance =
true;
4613 <<
"getBookPage: offerIndex=" << offerIndex;
4633 unsigned int iLimit,
4641 MetaView lesActive(lpLedger,
tapNONE,
true);
4642 OrderBookIterator obIterator(lesActive, book);
4646 bool const bGlobalFreeze = lesActive.isGlobalFrozen(book.
out.
account) ||
4647 lesActive.isGlobalFrozen(book.
in.
account);
4649 while (iLimit-- > 0 && obIterator.nextOffer())
4654 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4655 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4656 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4657 STAmount saDirRate = obIterator.getCurrentRate();
4663 saOwnerFunds = saTakerGets;
4665 else if (bGlobalFreeze)
4673 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4675 if (umBalanceEntry != umBalance.
end())
4679 saOwnerFunds = umBalanceEntry->second;
4685 saOwnerFunds = lesActive.accountHolds(
4691 if (saOwnerFunds.isNegative())
4695 saOwnerFunds.zero();
4702 STAmount saTakerGetsFunded;
4703 STAmount saOwnerFundsLimit = saOwnerFunds;
4715 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4718 if (saOwnerFundsLimit >= saTakerGets)
4721 saTakerGetsFunded = saTakerGets;
4726 saTakerGetsFunded = saOwnerFundsLimit;
4728 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4734 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4735 .setJson(jvOffer[jss::taker_pays_funded]);
4738 STAmount saOwnerPays = (
parityRate == offerRate)
4741 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4743 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4745 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4749 jvOf[jss::quality] = saDirRate.
getText();
4764 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4804 ++counters_[
static_cast<std::size_t>(om)].transitions;
4806 counters_[
static_cast<std::size_t>(om)].transitions == 1)
4808 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4809 now - processStart_)
4813 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4822 auto [counters, mode, start, initialSync] = getCounterData();
4823 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4833 auto& state = obj[jss::state_accounting][
states_[i]];
4834 state[jss::transitions] =
std::to_string(counters[i].transitions);
4835 state[jss::duration_us] =
std::to_string(counters[i].dur.count());
4839 obj[jss::initial_sync_duration_us] =
std::to_string(initialSync);
4854 boost::asio::io_service& io_svc,
4858 return std::make_unique<NetworkOPsImp>(
T back_inserter(T... args)
Decorator for streaming out compact json.
Lightweight wrapper to tag static string.
Value & append(Value const &value)
Append value to array at the end.
bool isMember(char const *key) const
Return true if the object has a member named key.
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
A generic endpoint for log messages.
Stream trace() const
Severity stream access functions.
A metric for measuring an integral value.
void set(value_type value) const
Set the value on the gauge.
A reference to a handler for performing polled collection.
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
std::uint32_t getLoadFee() const
NetClock::time_point getReportTime() const
PublicKey const & identity() const
std::size_t size() const
The number of nodes in the cluster list.
std::string SERVER_DOMAIN
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
int RELAY_UNTRUSTED_VALIDATIONS
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
A pool of threads to perform work.
Json::Value getJson(int c=0)
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
std::uint32_t getClusterFee() const
std::uint32_t getLocalFee() const
std::uint32_t getLoadBase() const
std::uint32_t getRemoteFee() const
std::uint32_t getLoadFactor() const
void heartbeat()
Reset the stall detection timer.
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
std::uint64_t initialSyncUs_
CounterData getCounterData() const
std::chrono::steady_clock::time_point start_
static std::array< Json::StaticString const, 5 > const states_
std::chrono::steady_clock::time_point const processStart_
Transaction with input flags and results to be applied in batches.
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
std::shared_ptr< Transaction > const transaction
void processClusterTimer()
boost::asio::steady_timer accountHistoryTxTimer_
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
std::vector< TransactionStatus > mTransactions
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
std::optional< PublicKey > const validatorPK_
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
SubInfoMapType mSubAccount
std::optional< PublicKey > const validatorMasterPK_
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
std::atomic< bool > unlBlocked_
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
std::atomic< bool > needNetworkLedger_
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
void setMode(OperatingMode om) override
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
void clearNeedNetworkLedger() override
DispatchState mDispatchState
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
void reportFeeChange() override
void processHeartbeatTimer()
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
bool subTransactions(InfoSub::ref ispListener) override
std::atomic< bool > amendmentWarned_
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
void getCountsJson(Json::Value &obj)
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Writable ledger view that accumulates state and tx changes.
BookListeners::pointer getBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)
BookListeners::pointer makeBookListeners(Book const &)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Represents a set of transactions in RCLConsensus.
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Collects logging information.
std::unique_ptr< std::stringstream > const & ss()
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
std::string getText() const override
Issue const & issue() const
std::optional< T > get(std::string const &name) const
std::size_t size() const noexcept
void const * data() const noexcept
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
std::chrono::seconds closeOffset() const
time_point closeTime() const
Returns the predicted close time, in network time.
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Validator keys and manifest as set in configuration file.
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Json::Value jsonClipped() const
static constexpr std::size_t size()
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
T emplace_back(T... args)
@ arrayValue
array value (ordered list)
@ objectValue
object value (collection of name/value pairs).
void rngfill(void *const buffer, std::size_t const bytes, Generator &g)
std::string const & getVersionString()
Server version.
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
void insertNFTSyntheticInJson(Json::Value &, std::shared_ptr< STTx const > const &, TxMeta const &)
Adds common synthetic fields to transaction-related JSON responses.
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Keylet account(AccountID const &id) noexcept
AccountID root.
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
STAmount divide(STAmount const &amount, Rate const &rate)
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
std::uint64_t getQuality(uint256 const &uBase)
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
auto constexpr muldiv_max
std::unique_ptr< LocalTxs > make_LocalTxs()
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
STAmount amountFromQuality(std::uint64_t rate)
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
@ warnRPC_UNSUPPORTED_MAJORITY
@ warnRPC_AMENDMENT_BLOCKED
bool set(T &target, std::string const &name, Section const §ion)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
AccountID calcAccountID(PublicKey const &pk)
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Json::Value rpcError(int iError)
bool isTefFailure(TER x) noexcept
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
std::string strHex(FwdIt begin, FwdIt end)
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
void forAllApiVersions(Fn const &fn, Args &&... args)
bool isTerRetry(TER x) noexcept
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
uint256 getQualityNext(uint256 const &uBase)
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const ¤cy, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
bool isTesSuccess(TER x) noexcept
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const ¤t)
std::string to_string_iso(date::sys_time< Duration > tp)
std::string to_string(base_uint< Bits, Tag > const &a)
FeeSetup setup_FeeVote(Section const §ion)
bool isTemMalformed(TER x) noexcept
Number root(Number f, unsigned d)
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
constexpr std::size_t maxPoppedTransactions
bool transResultInfo(TER code, std::string &token, std::string &text)
bool isTelLocal(TER x) noexcept
uint256 getBookBase(Book const &book)
constexpr std::uint32_t tfInnerBatchTxn
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
static std::uint32_t trunc32(std::uint64_t v)
static auto const genesisAccountId
T set_intersection(T... args)
std::string serialized
The manifest in serialized form.
std::uint32_t sequence
The sequence number of this manifest.
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Server fees published on server subscription.
bool operator!=(ServerFeeSummary const &b) const
ServerFeeSummary()=default
std::optional< TxQ::Metrics > em
std::uint32_t loadFactorServer
bool operator==(ServerFeeSummary const &b) const
std::uint32_t loadBaseServer
decltype(initialSyncUs_) initialSyncUs
decltype(counters_) counters
std::uint64_t transitions
std::chrono::microseconds dur
beast::insight::Gauge full_transitions
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
beast::insight::Hook hook
beast::insight::Gauge connected_duration
beast::insight::Gauge tracking_duration
beast::insight::Gauge connected_transitions
beast::insight::Gauge disconnected_transitions
beast::insight::Gauge syncing_duration
beast::insight::Gauge tracking_transitions
beast::insight::Gauge full_duration
beast::insight::Gauge disconnected_duration
beast::insight::Gauge syncing_transitions
std::uint32_t historyLastLedgerSeq_
std::uint32_t separationLedgerSeq_
AccountID const accountId_
std::uint32_t forwardTxIndex_
std::atomic< bool > stopHistorical_
std::int32_t historyTxIndex_
SubAccountHistoryIndex(AccountID const &accountId)
std::shared_ptr< SubAccountHistoryIndex > index_
std::shared_ptr< SubAccountHistoryIndex > index_
Represents a transfer rate.
Data format for exchanging consumption information across peers.
std::vector< Item > items
Changes in trusted nodes after updating validator list.
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
IsMemberResult isMember(char const *key) const
void set(char const *key, auto const &v)
Select all peers (except optional excluded) that are in our cluster.
Sends a message to all peers.
T time_since_epoch(T... args)