20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/main/Tuning.h>
32#include <xrpld/app/misc/AmendmentTable.h>
33#include <xrpld/app/misc/DeliverMax.h>
34#include <xrpld/app/misc/HashRouter.h>
35#include <xrpld/app/misc/LoadFeeTrack.h>
36#include <xrpld/app/misc/NetworkOPs.h>
37#include <xrpld/app/misc/Transaction.h>
38#include <xrpld/app/misc/TxQ.h>
39#include <xrpld/app/misc/ValidatorKeys.h>
40#include <xrpld/app/misc/ValidatorList.h>
41#include <xrpld/app/misc/detail/AccountTxPaging.h>
42#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
43#include <xrpld/app/tx/apply.h>
44#include <xrpld/consensus/Consensus.h>
45#include <xrpld/consensus/ConsensusParms.h>
46#include <xrpld/overlay/Cluster.h>
47#include <xrpld/overlay/Overlay.h>
48#include <xrpld/overlay/predicates.h>
49#include <xrpld/perflog/PerfLog.h>
50#include <xrpld/rpc/BookChanges.h>
51#include <xrpld/rpc/CTID.h>
52#include <xrpld/rpc/DeliveredAmount.h>
53#include <xrpld/rpc/MPTokenIssuanceID.h>
54#include <xrpld/rpc/ServerHandler.h>
56#include <xrpl/basics/UptimeClock.h>
57#include <xrpl/basics/mulDiv.h>
58#include <xrpl/basics/safe_cast.h>
59#include <xrpl/basics/scope.h>
60#include <xrpl/beast/utility/rngfill.h>
61#include <xrpl/crypto/RFC1751.h>
62#include <xrpl/crypto/csprng.h>
63#include <xrpl/protocol/BuildInfo.h>
64#include <xrpl/protocol/Feature.h>
65#include <xrpl/protocol/MultiApiJson.h>
66#include <xrpl/protocol/NFTSyntheticSerializer.h>
67#include <xrpl/protocol/RPCErr.h>
68#include <xrpl/protocol/TxFlags.h>
69#include <xrpl/protocol/jss.h>
70#include <xrpl/resource/Fees.h>
71#include <xrpl/resource/ResourceManager.h>
73#include <boost/asio/ip/host_name.hpp>
74#include <boost/asio/steady_timer.hpp>
113 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
156 std::chrono::steady_clock::time_point
start_ =
217 return !(*
this != b);
236 boost::asio::io_service& io_svc,
250 app_.logs().journal(
"FeeVote")),
253 app.getInboundTransactions(),
254 beast::get_abstract_clock<
std::chrono::steady_clock>(),
256 app_.logs().journal(
"LedgerConsensus"))
258 validatorKeys.keys ? validatorKeys.keys->publicKey
261 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
456 getServerInfo(
bool human,
bool admin,
bool counters)
override;
483 TER result)
override;
517 bool historyOnly)
override;
523 bool historyOnly)
override;
591 boost::system::error_code ec;
596 <<
"NetworkOPs: heartbeatTimer cancel error: "
605 <<
"NetworkOPs: clusterTimer cancel error: "
614 <<
"NetworkOPs: accountHistoryTxTimer cancel error: "
619 using namespace std::chrono_literals;
629 boost::asio::steady_timer& timer,
812 template <
class Handler>
814 Handler
const& handler,
816 :
hook(collector->make_hook(handler))
819 "Disconnected_duration"))
822 "Connected_duration"))
824 collector->make_gauge(
"State_Accounting",
"Syncing_duration"))
827 "Tracking_duration"))
829 collector->make_gauge(
"State_Accounting",
"Full_duration"))
832 "Disconnected_transitions"))
835 "Connected_transitions"))
838 "Syncing_transitions"))
841 "Tracking_transitions"))
843 collector->make_gauge(
"State_Accounting",
"Full_transitions"))
872 {
"disconnected",
"connected",
"syncing",
"tracking",
"full"}};
934 static std::string const hostname = boost::asio::ip::host_name();
941 static std::string const shroudedHostId = [
this]() {
947 return shroudedHostId;
962 boost::asio::steady_timer& timer,
969 [
this, onExpire, onError](boost::system::error_code
const& e) {
970 if ((e.value() == boost::system::errc::success) &&
971 (!m_job_queue.isStopped()))
976 if (e.value() != boost::system::errc::success &&
977 e.value() != boost::asio::error::operation_aborted)
980 JLOG(m_journal.error())
981 <<
"Timer got error '" << e.message()
982 <<
"'. Restarting timer.";
987 timer.expires_from_now(expiry_time);
988 timer.async_wait(std::move(*optionalCountedHandler));
993NetworkOPsImp::setHeartbeatTimer()
997 mConsensus.parms().ledgerGRANULARITY,
999 m_job_queue.addJob(jtNETOP_TIMER,
"NetOPs.heartbeat", [this]() {
1000 processHeartbeatTimer();
1003 [
this]() { setHeartbeatTimer(); });
1007NetworkOPsImp::setClusterTimer()
1009 using namespace std::chrono_literals;
1016 processClusterTimer();
1019 [
this]() { setClusterTimer(); });
1025 JLOG(m_journal.debug()) <<
"Scheduling AccountHistory job for account "
1027 using namespace std::chrono_literals;
1029 accountHistoryTxTimer_,
1031 [
this, subInfo]() { addAccountHistoryJob(subInfo); },
1032 [
this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1036NetworkOPsImp::processHeartbeatTimer()
1039 "Heartbeat Timer", mConsensus.validating(), m_journal);
1047 std::size_t const numPeers = app_.overlay().size();
1050 if (numPeers < minPeerCount_)
1052 if (mMode != OperatingMode::DISCONNECTED)
1054 setMode(OperatingMode::DISCONNECTED);
1056 ss <<
"Node count (" << numPeers <<
") has fallen "
1057 <<
"below required minimum (" << minPeerCount_ <<
").";
1058 JLOG(m_journal.warn()) << ss.
str();
1059 CLOG(clog.
ss()) <<
"set mode to DISCONNECTED: " << ss.
str();
1064 <<
"already DISCONNECTED. too few peers (" << numPeers
1065 <<
"), need at least " << minPeerCount_;
1072 setHeartbeatTimer();
1077 if (mMode == OperatingMode::DISCONNECTED)
1079 setMode(OperatingMode::CONNECTED);
1080 JLOG(m_journal.info())
1081 <<
"Node count (" << numPeers <<
") is sufficient.";
1082 CLOG(clog.
ss()) <<
"setting mode to CONNECTED based on " << numPeers
1088 auto origMode = mMode.load();
1089 CLOG(clog.
ss()) <<
"mode: " << strOperatingMode(origMode,
true);
1090 if (mMode == OperatingMode::SYNCING)
1091 setMode(OperatingMode::SYNCING);
1092 else if (mMode == OperatingMode::CONNECTED)
1093 setMode(OperatingMode::CONNECTED);
1094 auto newMode = mMode.load();
1095 if (origMode != newMode)
1098 <<
", changing to " << strOperatingMode(newMode,
true);
1100 CLOG(clog.
ss()) <<
". ";
1103 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.
ss());
1105 CLOG(clog.
ss()) <<
"consensus phase " << to_string(mLastConsensusPhase);
1107 if (mLastConsensusPhase != currPhase)
1109 reportConsensusStateChange(currPhase);
1110 mLastConsensusPhase = currPhase;
1111 CLOG(clog.
ss()) <<
" changed to " << to_string(mLastConsensusPhase);
1113 CLOG(clog.
ss()) <<
". ";
1115 setHeartbeatTimer();
1119NetworkOPsImp::processClusterTimer()
1121 if (app_.cluster().size() == 0)
1124 using namespace std::chrono_literals;
1126 bool const update = app_.cluster().update(
1127 app_.nodeIdentity().first,
1129 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1130 ? app_.getFeeTrack().getLocalFee()
1132 app_.timeKeeper().now());
1136 JLOG(m_journal.debug()) <<
"Too soon to send cluster update";
1141 protocol::TMCluster cluster;
1142 app_.cluster().for_each([&cluster](
ClusterNode const& node) {
1143 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1148 n.set_nodename(node.
name());
1152 for (
auto& item : gossip.
items)
1154 protocol::TMLoadSource& node = *cluster.add_loadsources();
1155 node.set_name(to_string(item.address));
1156 node.set_cost(item.balance);
1158 app_.overlay().foreach(
send_if(
1159 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1170 if (mode == OperatingMode::FULL && admin)
1172 auto const consensusMode = mConsensus.mode();
1173 if (consensusMode != ConsensusMode::wrongLedger)
1175 if (consensusMode == ConsensusMode::proposing)
1178 if (mConsensus.validating())
1179 return "validating";
1189 if (isNeedNetworkLedger())
1197 m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1199 JLOG(m_journal.error())
1200 <<
"Submitted transaction invalid: tfInnerBatchTxn flag present.";
1207 auto const txid = trans->getTransactionID();
1208 auto const flags = app_.getHashRouter().getFlags(txid);
1210 if ((flags & SF_BAD) != 0)
1212 JLOG(m_journal.warn()) <<
"Submitted transaction cached bad";
1219 app_.getHashRouter(),
1221 m_ledgerMaster.getValidatedRules(),
1224 if (validity != Validity::Valid)
1226 JLOG(m_journal.warn())
1227 <<
"Submitted transaction invalid: " << reason;
1233 JLOG(m_journal.warn())
1234 <<
"Exception checking transaction " << txid <<
": " << ex.
what();
1241 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1243 m_job_queue.addJob(
jtTRANSACTION,
"submitTxn", [
this, tx]() {
1245 processTransaction(t,
false,
false, FailHard::no);
1252 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1254 if ((newFlags & SF_BAD) != 0)
1257 JLOG(m_journal.warn()) << transaction->getID() <<
": cached bad!\n";
1258 transaction->setStatus(
INVALID);
1263 auto const view = m_ledgerMaster.getCurrentLedger();
1268 auto const sttx = *transaction->getSTransaction();
1269 if (sttx.isFlag(
tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1271 transaction->setStatus(
INVALID);
1273 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1280 auto const [validity, reason] =
1281 checkValidity(app_.getHashRouter(), sttx, view->rules(), app_.config());
1283 validity == Validity::Valid,
1284 "ripple::NetworkOPsImp::processTransaction : valid validity");
1287 if (validity == Validity::SigBad)
1289 JLOG(m_journal.info()) <<
"Transaction has bad signature: " << reason;
1290 transaction->setStatus(
INVALID);
1292 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1297 app_.getMasterTransaction().canonicalize(&transaction);
1303NetworkOPsImp::processTransaction(
1309 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXN");
1312 if (!preProcessTransaction(transaction))
1316 doTransactionSync(transaction, bUnlimited, failType);
1318 doTransactionAsync(transaction, bUnlimited, failType);
1322NetworkOPsImp::doTransactionAsync(
1329 if (transaction->getApplying())
1332 mTransactions.push_back(
1334 transaction->setApplying();
1336 if (mDispatchState == DispatchState::none)
1338 if (m_job_queue.addJob(
1339 jtBATCH,
"transactionBatch", [
this]() { transactionBatch(); }))
1341 mDispatchState = DispatchState::scheduled;
1347NetworkOPsImp::doTransactionSync(
1354 if (!transaction->getApplying())
1356 mTransactions.push_back(
1358 transaction->setApplying();
1361 doTransactionSyncBatch(
1363 return transaction->getApplying();
1368NetworkOPsImp::doTransactionSyncBatch(
1374 if (mDispatchState == DispatchState::running)
1383 if (mTransactions.size())
1386 if (m_job_queue.addJob(
jtBATCH,
"transactionBatch", [
this]() {
1390 mDispatchState = DispatchState::scheduled;
1394 }
while (retryCallback(lock));
1400 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXNSet");
1403 for (
auto const& [_, tx] :
set)
1406 auto transaction = std::make_shared<Transaction>(tx, reason, app_);
1408 if (transaction->getStatus() ==
INVALID)
1410 if (!reason.
empty())
1412 JLOG(m_journal.trace())
1413 <<
"Exception checking transaction: " << reason;
1415 app_.getHashRouter().setFlags(tx->getTransactionID(), SF_BAD);
1420 if (!preProcessTransaction(transaction))
1431 for (
auto& transaction : candidates)
1433 if (!transaction->getApplying())
1435 transactions.
emplace_back(transaction,
false,
false, FailHard::no);
1436 transaction->setApplying();
1440 if (mTransactions.empty())
1441 mTransactions.swap(transactions);
1444 mTransactions.reserve(mTransactions.size() + transactions.
size());
1445 for (
auto& t : transactions)
1446 mTransactions.push_back(std::move(t));
1452 "ripple::NetworkOPsImp::processTransactionSet has lock");
1454 mTransactions.begin(), mTransactions.end(), [](
auto const& t) {
1455 return t.transaction->getApplying();
1461NetworkOPsImp::transactionBatch()
1465 if (mDispatchState == DispatchState::running)
1468 while (mTransactions.size())
1479 mTransactions.
swap(transactions);
1481 !transactions.
empty(),
1482 "ripple::NetworkOPsImp::apply : non-empty transactions");
1484 mDispatchState != DispatchState::running,
1485 "ripple::NetworkOPsImp::apply : is not running");
1487 mDispatchState = DispatchState::running;
1493 bool changed =
false;
1496 m_ledgerMaster.peekMutex(), std::defer_lock};
1507 if (e.failType == FailHard::yes)
1510 auto const result = app_.getTxQ().apply(
1511 app_, view, e.transaction->getSTransaction(), flags, j);
1512 e.result = result.ter;
1513 e.applied = result.applied;
1514 changed = changed || result.applied;
1523 if (
auto const l = m_ledgerMaster.getValidatedLedger())
1524 validatedLedgerIndex = l->info().seq;
1526 auto newOL = app_.openLedger().current();
1529 e.transaction->clearSubmitResult();
1533 pubProposedTransaction(
1534 newOL, e.transaction->getSTransaction(), e.result);
1535 e.transaction->setApplied();
1538 e.transaction->setResult(e.result);
1541 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1550 JLOG(m_journal.info())
1551 <<
"TransactionResult: " << token <<
": " << human;
1556 bool addLocal = e.local;
1560 JLOG(m_journal.debug())
1561 <<
"Transaction is now included in open ledger";
1562 e.transaction->setStatus(
INCLUDED);
1567 auto const& txCur = e.transaction->getSTransaction();
1570 for (
auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1572 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1578 auto t = std::make_shared<Transaction>(trans, reason, app_);
1579 if (t->getApplying())
1581 submit_held.
emplace_back(t,
false,
false, FailHard::no);
1590 JLOG(m_journal.info()) <<
"Transaction is obsolete";
1591 e.transaction->setStatus(
OBSOLETE);
1595 JLOG(m_journal.debug())
1596 <<
"Transaction is likely to claim a"
1597 <<
" fee, but is queued until fee drops";
1599 e.transaction->setStatus(
HELD);
1603 m_ledgerMaster.addHeldTransaction(e.transaction);
1604 e.transaction->setQueued();
1605 e.transaction->setKept();
1611 if (e.failType != FailHard::yes)
1613 auto const lastLedgerSeq =
1614 e.transaction->getSTransaction()->at(
1615 ~sfLastLedgerSequence);
1616 auto const ledgersLeft = lastLedgerSeq
1618 m_ledgerMaster.getCurrentLedgerIndex()
1636 (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1637 app_.getHashRouter().setFlags(
1638 e.transaction->getID(), SF_HELD))
1641 JLOG(m_journal.debug())
1642 <<
"Transaction should be held: " << e.result;
1643 e.transaction->setStatus(
HELD);
1644 m_ledgerMaster.addHeldTransaction(e.transaction);
1645 e.transaction->setKept();
1648 JLOG(m_journal.debug())
1649 <<
"Not holding transaction "
1650 << e.transaction->getID() <<
": "
1651 << (e.local ?
"local" :
"network") <<
", "
1652 <<
"result: " << e.result <<
" ledgers left: "
1653 << (ledgersLeft ? to_string(*ledgersLeft)
1659 JLOG(m_journal.debug())
1660 <<
"Status other than success " << e.result;
1661 e.transaction->setStatus(
INVALID);
1664 auto const enforceFailHard =
1665 e.failType == FailHard::yes && !
isTesSuccess(e.result);
1667 if (addLocal && !enforceFailHard)
1669 m_localTX->push_back(
1670 m_ledgerMaster.getCurrentLedgerIndex(),
1671 e.transaction->getSTransaction());
1672 e.transaction->setKept();
1676 ((mMode != OperatingMode::FULL) &&
1677 (e.failType != FailHard::yes) && e.local) ||
1682 app_.getHashRouter().shouldRelay(e.transaction->getID());
1683 if (
auto const sttx = *(e.transaction->getSTransaction());
1688 newOL->rules().enabled(featureBatch)))
1690 protocol::TMTransaction tx;
1694 tx.set_rawtransaction(s.
data(), s.
size());
1695 tx.set_status(protocol::tsCURRENT);
1696 tx.set_receivetimestamp(
1697 app_.timeKeeper().now().time_since_epoch().count());
1700 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1701 e.transaction->setBroadcast();
1705 if (validatedLedgerIndex)
1707 auto [fee, accountSeq, availableSeq] =
1708 app_.getTxQ().getTxRequiredFeeAndSeq(
1709 *newOL, e.transaction->getSTransaction());
1710 e.transaction->setCurrentLedgerState(
1711 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1719 e.transaction->clearApplying();
1721 if (!submit_held.
empty())
1723 if (mTransactions.empty())
1724 mTransactions.swap(submit_held);
1727 mTransactions.reserve(mTransactions.size() + submit_held.
size());
1728 for (
auto& e : submit_held)
1729 mTransactions.push_back(std::move(e));
1735 mDispatchState = DispatchState::none;
1743NetworkOPsImp::getOwnerInfo(
1748 auto root = keylet::ownerDir(account);
1749 auto sleNode = lpLedger->read(keylet::page(
root));
1756 for (
auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1758 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1761 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1763 switch (sleCur->getType())
1766 if (!jvObjects.
isMember(jss::offers))
1767 jvObjects[jss::offers] =
1770 jvObjects[jss::offers].
append(
1771 sleCur->getJson(JsonOptions::none));
1774 case ltRIPPLE_STATE:
1775 if (!jvObjects.
isMember(jss::ripple_lines))
1777 jvObjects[jss::ripple_lines] =
1781 jvObjects[jss::ripple_lines].
append(
1782 sleCur->getJson(JsonOptions::none));
1785 case ltACCOUNT_ROOT:
1789 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1795 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1799 sleNode = lpLedger->read(keylet::page(
root, uNodeDir));
1802 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1815NetworkOPsImp::isBlocked()
1817 return isAmendmentBlocked() || isUNLBlocked();
1821NetworkOPsImp::isAmendmentBlocked()
1823 return amendmentBlocked_;
1827NetworkOPsImp::setAmendmentBlocked()
1829 amendmentBlocked_ =
true;
1830 setMode(OperatingMode::CONNECTED);
1834NetworkOPsImp::isAmendmentWarned()
1836 return !amendmentBlocked_ && amendmentWarned_;
1840NetworkOPsImp::setAmendmentWarned()
1842 amendmentWarned_ =
true;
1846NetworkOPsImp::clearAmendmentWarned()
1848 amendmentWarned_ =
false;
1852NetworkOPsImp::isUNLBlocked()
1858NetworkOPsImp::setUNLBlocked()
1861 setMode(OperatingMode::CONNECTED);
1865NetworkOPsImp::clearUNLBlocked()
1867 unlBlocked_ =
false;
1871NetworkOPsImp::checkLastClosedLedger(
1880 JLOG(m_journal.trace()) <<
"NetworkOPsImp::checkLastClosedLedger";
1882 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1887 uint256 closedLedger = ourClosed->info().hash;
1888 uint256 prevClosedLedger = ourClosed->info().parentHash;
1889 JLOG(m_journal.trace()) <<
"OurClosed: " << closedLedger;
1890 JLOG(m_journal.trace()) <<
"PrevClosed: " << prevClosedLedger;
1895 auto& validations = app_.getValidations();
1896 JLOG(m_journal.debug())
1897 <<
"ValidationTrie " <<
Json::Compact(validations.getJsonTrie());
1901 peerCounts[closedLedger] = 0;
1902 if (mMode >= OperatingMode::TRACKING)
1903 peerCounts[closedLedger]++;
1905 for (
auto& peer : peerList)
1907 uint256 peerLedger = peer->getClosedLedgerHash();
1910 ++peerCounts[peerLedger];
1913 for (
auto const& it : peerCounts)
1914 JLOG(m_journal.debug()) <<
"L: " << it.first <<
" n=" << it.second;
1916 uint256 preferredLCL = validations.getPreferredLCL(
1918 m_ledgerMaster.getValidLedgerIndex(),
1921 bool switchLedgers = preferredLCL != closedLedger;
1923 closedLedger = preferredLCL;
1925 if (switchLedgers && (closedLedger == prevClosedLedger))
1928 JLOG(m_journal.info()) <<
"We won't switch to our own previous ledger";
1929 networkClosed = ourClosed->info().hash;
1930 switchLedgers =
false;
1933 networkClosed = closedLedger;
1938 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1941 consensus = app_.getInboundLedgers().acquire(
1942 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1945 (!m_ledgerMaster.canBeCurrent(consensus) ||
1946 !m_ledgerMaster.isCompatible(
1947 *consensus, m_journal.debug(),
"Not switching")))
1951 networkClosed = ourClosed->info().hash;
1955 JLOG(m_journal.warn()) <<
"We are not running on the consensus ledger";
1956 JLOG(m_journal.info()) <<
"Our LCL: " << ourClosed->info().hash
1958 JLOG(m_journal.info()) <<
"Net LCL " << closedLedger;
1960 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1962 setMode(OperatingMode::CONNECTED);
1970 switchLastClosedLedger(consensus);
1977NetworkOPsImp::switchLastClosedLedger(
1981 JLOG(m_journal.error())
1982 <<
"JUMP last closed ledger to " << newLCL->info().hash;
1984 clearNeedNetworkLedger();
1987 app_.getTxQ().processClosedLedger(app_, *newLCL,
true);
1994 auto retries = m_localTX->getTxSet();
1995 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
2000 rules.
emplace(app_.config().features);
2001 app_.openLedger().accept(
2012 return app_.getTxQ().accept(app_, view);
2016 m_ledgerMaster.switchLCL(newLCL);
2018 protocol::TMStatusChange s;
2019 s.set_newevent(protocol::neSWITCHED_LEDGER);
2020 s.set_ledgerseq(newLCL->info().seq);
2021 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
2022 s.set_ledgerhashprevious(
2023 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
2024 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
2026 app_.overlay().foreach(
2027 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
2031NetworkOPsImp::beginConsensus(
2037 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
2039 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
2041 JLOG(m_journal.info()) <<
"Consensus time for #" << closingInfo.seq
2042 <<
" with LCL " << closingInfo.parentHash;
2044 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
2049 if (mMode == OperatingMode::FULL)
2051 JLOG(m_journal.warn()) <<
"Don't have LCL, going to tracking";
2052 setMode(OperatingMode::TRACKING);
2053 CLOG(clog) <<
"beginConsensus Don't have LCL, going to tracking. ";
2056 CLOG(clog) <<
"beginConsensus no previous ledger. ";
2061 prevLedger->info().hash == closingInfo.parentHash,
2062 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
2065 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
2066 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
2069 if (prevLedger->rules().enabled(featureNegativeUNL))
2070 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
2071 TrustChanges const changes = app_.validators().updateTrusted(
2072 app_.getValidations().getCurrentNodeIDs(),
2073 closingInfo.parentCloseTime,
2076 app_.getHashRouter());
2078 if (!changes.
added.empty() || !changes.
removed.empty())
2080 app_.getValidations().trustChanged(changes.
added, changes.
removed);
2082 app_.getAmendmentTable().trustChanged(
2083 app_.validators().getQuorumKeys().second);
2086 mConsensus.startRound(
2087 app_.timeKeeper().closeTime(),
2095 if (mLastConsensusPhase != currPhase)
2097 reportConsensusStateChange(currPhase);
2098 mLastConsensusPhase = currPhase;
2101 JLOG(m_journal.debug()) <<
"Initiating consensus engine";
2108 auto const& peerKey = peerPos.
publicKey();
2109 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
2120 JLOG(m_journal.error())
2121 <<
"Received a proposal signed by MY KEY from a peer. This may "
2122 "indicate a misconfiguration where another node has the same "
2123 "validator key, or may be caused by unusual message routing and "
2128 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
2139 protocol::TMHaveTransactionSet msg;
2140 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2141 msg.set_status(protocol::tsHAVE);
2142 app_.overlay().foreach(
2143 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
2147 mConsensus.gotTxSet(app_.timeKeeper().closeTime(),
RCLTxSet{map});
2153 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
2155 for (
auto const& it : app_.overlay().getActivePeers())
2157 if (it && (it->getClosedLedgerHash() == deadLedger))
2159 JLOG(m_journal.trace()) <<
"Killing obsolete peer status";
2166 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
2168 if (networkClosed.
isZero())
2170 CLOG(clog) <<
"endConsensus last closed ledger is zero. ";
2180 if (((mMode == OperatingMode::CONNECTED) ||
2181 (mMode == OperatingMode::SYNCING)) &&
2187 if (!needNetworkLedger_)
2188 setMode(OperatingMode::TRACKING);
2191 if (((mMode == OperatingMode::CONNECTED) ||
2192 (mMode == OperatingMode::TRACKING)) &&
2198 auto current = m_ledgerMaster.getCurrentLedger();
2199 if (app_.timeKeeper().now() < (
current->info().parentCloseTime +
2200 2 *
current->info().closeTimeResolution))
2202 setMode(OperatingMode::FULL);
2206 beginConsensus(networkClosed, clog);
2210NetworkOPsImp::consensusViewChange()
2212 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2214 setMode(OperatingMode::CONNECTED);
2224 if (!mStreamMaps[sManifests].empty())
2228 jvObj[jss::type] =
"manifestReceived";
2231 jvObj[jss::signing_key] =
2235 jvObj[jss::signature] =
strHex(*sig);
2238 jvObj[jss::domain] = mo.
domain;
2241 for (
auto i = mStreamMaps[sManifests].begin();
2242 i != mStreamMaps[sManifests].end();)
2244 if (
auto p = i->second.lock())
2246 p->send(jvObj,
true);
2251 i = mStreamMaps[sManifests].erase(i);
2257NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2261 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2262 , loadBaseServer{loadFeeTrack.getLoadBase()}
2264 , em{
std::move(escalationMetrics)}
2274 em.has_value() != b.
em.has_value())
2280 em->minProcessingFeeLevel != b.
em->minProcessingFeeLevel ||
2281 em->openLedgerFeeLevel != b.
em->openLedgerFeeLevel ||
2282 em->referenceFeeLevel != b.
em->referenceFeeLevel);
2315 jvObj[jss::type] =
"serverStatus";
2317 jvObj[jss::load_base] = f.loadBaseServer;
2318 jvObj[jss::load_factor_server] = f.loadFactorServer;
2319 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2324 safe_cast<std::uint64_t>(f.loadFactorServer),
2326 f.em->openLedgerFeeLevel,
2328 f.em->referenceFeeLevel)
2331 jvObj[jss::load_factor] =
trunc32(loadFactor);
2332 jvObj[jss::load_factor_fee_escalation] =
2333 f.em->openLedgerFeeLevel.jsonClipped();
2334 jvObj[jss::load_factor_fee_queue] =
2335 f.em->minProcessingFeeLevel.jsonClipped();
2336 jvObj[jss::load_factor_fee_reference] =
2337 f.em->referenceFeeLevel.jsonClipped();
2340 jvObj[jss::load_factor] = f.loadFactorServer;
2354 p->send(jvObj,
true);
2371 if (!streamMap.empty())
2374 jvObj[jss::type] =
"consensusPhase";
2375 jvObj[jss::consensus] =
to_string(phase);
2377 for (
auto i = streamMap.begin(); i != streamMap.end();)
2379 if (
auto p = i->second.lock())
2381 p->send(jvObj,
true);
2386 i = streamMap.erase(i);
2402 auto const signerPublic = val->getSignerPublic();
2404 jvObj[jss::type] =
"validationReceived";
2405 jvObj[jss::validation_public_key] =
2407 jvObj[jss::ledger_hash] =
to_string(val->getLedgerHash());
2408 jvObj[jss::signature] =
strHex(val->getSignature());
2409 jvObj[jss::full] = val->isFull();
2410 jvObj[jss::flags] = val->getFlags();
2411 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2412 jvObj[jss::data] =
strHex(val->getSerializer().slice());
2414 if (
auto version = (*val)[~sfServerVersion])
2417 if (
auto cookie = (*val)[~sfCookie])
2420 if (
auto hash = (*val)[~sfValidatedHash])
2421 jvObj[jss::validated_hash] =
strHex(*hash);
2423 auto const masterKey =
2426 if (masterKey != signerPublic)
2431 if (
auto const seq = (*val)[~sfLedgerSequence])
2432 jvObj[jss::ledger_index] = *seq;
2434 if (val->isFieldPresent(sfAmendments))
2437 for (
auto const& amendment : val->getFieldV256(sfAmendments))
2441 if (
auto const closeTime = (*val)[~sfCloseTime])
2442 jvObj[jss::close_time] = *closeTime;
2444 if (
auto const loadFee = (*val)[~sfLoadFee])
2445 jvObj[jss::load_fee] = *loadFee;
2447 if (
auto const baseFee = val->at(~sfBaseFee))
2448 jvObj[jss::base_fee] =
static_cast<double>(*baseFee);
2450 if (
auto const reserveBase = val->at(~sfReserveBase))
2451 jvObj[jss::reserve_base] = *reserveBase;
2453 if (
auto const reserveInc = val->at(~sfReserveIncrement))
2454 jvObj[jss::reserve_inc] = *reserveInc;
2458 if (
auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2459 baseFeeXRP && baseFeeXRP->native())
2460 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2462 if (
auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2463 reserveBaseXRP && reserveBaseXRP->native())
2464 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2466 if (
auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2467 reserveIncXRP && reserveIncXRP->native())
2468 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2477 if (jvTx.
isMember(jss::ledger_index))
2479 jvTx[jss::ledger_index] =
2480 std::to_string(jvTx[jss::ledger_index].asUInt());
2487 if (
auto p = i->second.lock())
2491 [&](
Json::Value const& jv) { p->send(jv, true); });
2511 jvObj[jss::type] =
"peerStatusChange";
2520 p->send(jvObj,
true);
2534 using namespace std::chrono_literals;
2566 <<
"recvValidation " << val->getLedgerHash() <<
" from " << source;
2582 <<
"Exception thrown for handling new validation "
2583 << val->getLedgerHash() <<
": " << e.
what();
2588 <<
"Unknown exception thrown for handling new validation "
2589 << val->getLedgerHash();
2601 ss <<
"VALIDATION: " << val->render() <<
" master_key: ";
2638 "This server is amendment blocked, and must be updated to be "
2639 "able to stay in sync with the network.";
2646 "This server has an expired validator list. validators.txt "
2647 "may be incorrectly configured or some [validator_list_sites] "
2648 "may be unreachable.";
2655 "One or more unsupported amendments have reached majority. "
2656 "Upgrade to the latest version before they are activated "
2657 "to avoid being amendment blocked.";
2658 if (
auto const expected =
2662 d[jss::expected_date] = expected->time_since_epoch().count();
2663 d[jss::expected_date_UTC] =
to_string(*expected);
2667 if (warnings.size())
2668 info[jss::warnings] = std::move(warnings);
2683 info[jss::time] =
to_string(std::chrono::floor<std::chrono::microseconds>(
2687 info[jss::network_ledger] =
"waiting";
2689 info[jss::validation_quorum] =
2697 info[jss::node_size] =
"tiny";
2700 info[jss::node_size] =
"small";
2703 info[jss::node_size] =
"medium";
2706 info[jss::node_size] =
"large";
2709 info[jss::node_size] =
"huge";
2718 info[jss::validator_list_expires] =
2719 safe_cast<Json::UInt>(when->time_since_epoch().count());
2721 info[jss::validator_list_expires] = 0;
2731 if (*when == TimeKeeper::time_point::max())
2733 x[jss::expiration] =
"never";
2734 x[jss::status] =
"active";
2741 x[jss::status] =
"active";
2743 x[jss::status] =
"expired";
2748 x[jss::status] =
"unknown";
2749 x[jss::expiration] =
"unknown";
2753#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2756#ifdef GIT_COMMIT_HASH
2757 x[jss::hash] = GIT_COMMIT_HASH;
2760 x[jss::branch] = GIT_BRANCH;
2765 info[jss::io_latency_ms] =
2773 info[jss::pubkey_validator] =
2778 info[jss::pubkey_validator] =
"none";
2788 info[jss::counters][jss::nodestore] = nodestore;
2792 info[jss::pubkey_node] =
2798 info[jss::amendment_blocked] =
true;
2812 lastClose[jss::converge_time_s] =
2817 lastClose[jss::converge_time] =
2821 info[jss::last_close] = lastClose;
2829 info[jss::network_id] =
static_cast<Json::UInt>(*netid);
2831 auto const escalationMetrics =
2839 auto const loadFactorFeeEscalation =
2841 escalationMetrics.openLedgerFeeLevel,
2843 escalationMetrics.referenceFeeLevel)
2847 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2851 info[jss::load_base] = loadBaseServer;
2852 info[jss::load_factor] =
trunc32(loadFactor);
2853 info[jss::load_factor_server] = loadFactorServer;
2860 info[jss::load_factor_fee_escalation] =
2861 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2862 info[jss::load_factor_fee_queue] =
2863 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2864 info[jss::load_factor_fee_reference] =
2865 escalationMetrics.referenceFeeLevel.jsonClipped();
2869 info[jss::load_factor] =
2870 static_cast<double>(loadFactor) / loadBaseServer;
2872 if (loadFactorServer != loadFactor)
2873 info[jss::load_factor_server] =
2874 static_cast<double>(loadFactorServer) / loadBaseServer;
2879 if (fee != loadBaseServer)
2880 info[jss::load_factor_local] =
2881 static_cast<double>(fee) / loadBaseServer;
2883 if (fee != loadBaseServer)
2884 info[jss::load_factor_net] =
2885 static_cast<double>(fee) / loadBaseServer;
2887 if (fee != loadBaseServer)
2888 info[jss::load_factor_cluster] =
2889 static_cast<double>(fee) / loadBaseServer;
2891 if (escalationMetrics.openLedgerFeeLevel !=
2892 escalationMetrics.referenceFeeLevel &&
2893 (admin || loadFactorFeeEscalation != loadFactor))
2894 info[jss::load_factor_fee_escalation] =
2895 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2896 escalationMetrics.referenceFeeLevel);
2897 if (escalationMetrics.minProcessingFeeLevel !=
2898 escalationMetrics.referenceFeeLevel)
2899 info[jss::load_factor_fee_queue] =
2900 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2901 escalationMetrics.referenceFeeLevel);
2914 XRPAmount const baseFee = lpClosed->fees().base;
2916 l[jss::seq] =
Json::UInt(lpClosed->info().seq);
2917 l[jss::hash] =
to_string(lpClosed->info().hash);
2922 l[jss::reserve_base] =
2923 lpClosed->fees().accountReserve(0).jsonClipped();
2924 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2926 lpClosed->info().closeTime.time_since_epoch().count());
2931 l[jss::reserve_base_xrp] =
2932 lpClosed->fees().accountReserve(0).decimalXRP();
2933 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2936 std::abs(closeOffset.count()) >= 60)
2937 l[jss::close_time_offset] =
2945 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2949 auto lCloseTime = lpClosed->info().closeTime;
2951 if (lCloseTime <= closeTime)
2953 using namespace std::chrono_literals;
2954 auto age = closeTime - lCloseTime;
2956 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2962 info[jss::validated_ledger] = l;
2964 info[jss::closed_ledger] = l;
2968 info[jss::published_ledger] =
"none";
2969 else if (lpPublished->info().seq != lpClosed->info().seq)
2970 info[jss::published_ledger] = lpPublished->info().seq;
2975 info[jss::jq_trans_overflow] =
2977 info[jss::peer_disconnects] =
2979 info[jss::peer_disconnects_resources] =
2984 "http",
"https",
"peer",
"ws",
"ws2",
"wss",
"wss2"};
2992 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2993 port.admin_user.empty() && port.admin_password.empty()))
3007 for (
auto const& p : proto)
3008 jv[jss::protocol].append(p);
3015 auto const optPort = grpcSection.
get(
"port");
3016 if (optPort && grpcSection.get(
"ip"))
3019 jv[jss::port] = *optPort;
3021 jv[jss::protocol].append(
"grpc");
3024 info[jss::ports] = std::move(ports);
3050 ledger->rules().enabled(featureBatch))
3054 transJson(transaction, result,
false, ledger, std::nullopt);
3068 [&](
Json::Value const& jv) { p->send(jv, true); });
3091 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted,
app_);
3093 lpAccepted->info().hash, alpAccepted);
3097 alpAccepted->getLedger().
get() == lpAccepted.
get(),
3098 "ripple::NetworkOPsImp::pubLedger : accepted input");
3102 <<
"Publishing ledger " << lpAccepted->info().seq <<
" "
3103 << lpAccepted->info().hash;
3111 jvObj[jss::type] =
"ledgerClosed";
3112 jvObj[jss::ledger_index] = lpAccepted->info().seq;
3113 jvObj[jss::ledger_hash] =
to_string(lpAccepted->info().hash);
3115 lpAccepted->info().closeTime.time_since_epoch().count());
3117 if (!lpAccepted->rules().enabled(featureXRPFees))
3119 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
3120 jvObj[jss::reserve_base] =
3121 lpAccepted->fees().accountReserve(0).jsonClipped();
3122 jvObj[jss::reserve_inc] =
3123 lpAccepted->fees().increment.jsonClipped();
3125 jvObj[jss::txn_count] =
Json::UInt(alpAccepted->size());
3129 jvObj[jss::validated_ledgers] =
3139 p->send(jvObj,
true);
3157 p->send(jvObj,
true);
3166 static bool firstTime =
true;
3173 for (
auto& inner : outer.second)
3175 auto& subInfo = inner.second;
3176 if (subInfo.index_->separationLedgerSeq_ == 0)
3179 alpAccepted->getLedger(), subInfo);
3188 for (
auto const& accTx : *alpAccepted)
3192 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3219 "reportConsensusStateChange->pubConsensus",
3250 jvObj[jss::type] =
"transaction";
3254 jvObj[jss::transaction] =
3261 jvObj[jss::meta], *ledger, transaction, meta->
get());
3264 jvObj[jss::meta], transaction, meta->
get());
3268 if (
auto const& lookup = ledger->txRead(transaction->getTransactionID());
3269 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3271 uint32_t
const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3273 if (transaction->isFieldPresent(sfNetworkID))
3274 netID = transaction->getFieldU32(sfNetworkID);
3279 jvObj[jss::ctid] = *ctid;
3281 if (!ledger->open())
3282 jvObj[jss::ledger_hash] =
to_string(ledger->info().hash);
3286 jvObj[jss::ledger_index] = ledger->info().seq;
3287 jvObj[jss::transaction][jss::date] =
3288 ledger->info().closeTime.time_since_epoch().count();
3289 jvObj[jss::validated] =
true;
3290 jvObj[jss::close_time_iso] =
to_string_iso(ledger->info().closeTime);
3296 jvObj[jss::validated] =
false;
3297 jvObj[jss::ledger_current_index] = ledger->info().seq;
3300 jvObj[jss::status] = validated ?
"closed" :
"proposed";
3301 jvObj[jss::engine_result] = sToken;
3302 jvObj[jss::engine_result_code] = result;
3303 jvObj[jss::engine_result_message] = sHuman;
3305 if (transaction->getTxnType() == ttOFFER_CREATE)
3307 auto const account = transaction->getAccountID(sfAccount);
3308 auto const amount = transaction->getFieldAmount(sfTakerGets);
3311 if (account != amount.issue().account)
3319 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3327 [&]<
unsigned Version>(
3329 RPC::insertDeliverMax(
3330 jvTx[jss::transaction], transaction->getTxnType(), Version);
3332 if constexpr (Version > 1)
3334 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3335 jvTx[jss::hash] = hash;
3339 jvTx[jss::transaction][jss::hash] = hash;
3352 auto const& stTxn = transaction.
getTxn();
3356 auto const trResult = transaction.
getResult();
3371 [&](
Json::Value const& jv) { p->send(jv, true); });
3388 [&](
Json::Value const& jv) { p->send(jv, true); });
3413 auto const currLedgerSeq = ledger->seq();
3420 for (
auto const& affectedAccount : transaction.
getAffected())
3425 auto it = simiIt->second.begin();
3427 while (it != simiIt->second.end())
3438 it = simiIt->second.erase(it);
3445 auto it = simiIt->second.begin();
3446 while (it != simiIt->second.end())
3457 it = simiIt->second.erase(it);
3464 auto& subs = histoIt->second;
3465 auto it = subs.begin();
3466 while (it != subs.end())
3469 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3483 it = subs.erase(it);
3494 <<
"pubAccountTransaction: "
3495 <<
"proposed=" << iProposed <<
", accepted=" << iAccepted;
3497 if (!notify.
empty() || !accountHistoryNotify.
empty())
3499 auto const& stTxn = transaction.
getTxn();
3503 auto const trResult = transaction.
getResult();
3509 isrListener->getApiVersion(),
3510 [&](
Json::Value const& jv) { isrListener->send(jv, true); });
3514 jvObj.
set(jss::account_history_boundary,
true);
3517 jvObj.
isMember(jss::account_history_tx_stream) ==
3519 "ripple::NetworkOPsImp::pubAccountTransaction : "
3520 "account_history_tx_stream not set");
3521 for (
auto& info : accountHistoryNotify)
3523 auto& index = info.index_;
3524 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3525 jvObj.
set(jss::account_history_tx_first,
true);
3527 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3530 info.sink_->getApiVersion(),
3531 [&](
Json::Value const& jv) { info.sink_->send(jv, true); });
3556 for (
auto const& affectedAccount : tx->getMentionedAccounts())
3561 auto it = simiIt->second.begin();
3563 while (it != simiIt->second.end())
3574 it = simiIt->second.erase(it);
3581 JLOG(
m_journal.
trace()) <<
"pubProposedAccountTransaction: " << iProposed;
3583 if (!notify.
empty() || !accountHistoryNotify.
empty())
3590 isrListener->getApiVersion(),
3591 [&](
Json::Value const& jv) { isrListener->send(jv, true); });
3594 jvObj.
isMember(jss::account_history_tx_stream) ==
3596 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3597 "account_history_tx_stream not set");
3598 for (
auto& info : accountHistoryNotify)
3600 auto& index = info.index_;
3601 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3602 jvObj.
set(jss::account_history_tx_first,
true);
3603 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3605 info.sink_->getApiVersion(),
3606 [&](
Json::Value const& jv) { info.sink_->send(jv, true); });
3623 for (
auto const& naAccountID : vnaAccountIDs)
3626 <<
"subAccount: account: " <<
toBase58(naAccountID);
3628 isrListener->insertSubAccountInfo(naAccountID, rt);
3633 for (
auto const& naAccountID : vnaAccountIDs)
3635 auto simIterator = subMap.
find(naAccountID);
3636 if (simIterator == subMap.
end())
3640 usisElement[isrListener->getSeq()] = isrListener;
3642 subMap.
insert(simIterator, make_pair(naAccountID, usisElement));
3647 simIterator->second[isrListener->getSeq()] = isrListener;
3658 for (
auto const& naAccountID : vnaAccountIDs)
3661 isrListener->deleteSubAccountInfo(naAccountID, rt);
3678 for (
auto const& naAccountID : vnaAccountIDs)
3680 auto simIterator = subMap.
find(naAccountID);
3682 if (simIterator != subMap.
end())
3685 simIterator->second.erase(uSeq);
3687 if (simIterator->second.empty())
3690 subMap.
erase(simIterator);
3699 enum DatabaseType { Sqlite,
None };
3700 static auto const databaseType = [&]() -> DatabaseType {
3705 return DatabaseType::Sqlite;
3707 return DatabaseType::None;
3710 if (databaseType == DatabaseType::None)
3713 <<
"AccountHistory job for account "
3725 "AccountHistoryTxStream",
3726 [
this, dbType = databaseType, subInfo]() {
3727 auto const& accountId = subInfo.
index_->accountId_;
3728 auto& lastLedgerSeq = subInfo.
index_->historyLastLedgerSeq_;
3729 auto& txHistoryIndex = subInfo.
index_->historyTxIndex_;
3732 <<
"AccountHistory job for account " <<
toBase58(accountId)
3733 <<
" started. lastLedgerSeq=" << lastLedgerSeq;
3743 auto stx = tx->getSTransaction();
3744 if (stx->getAccountID(sfAccount) == accountId &&
3745 stx->getSeqValue() == 1)
3749 for (
auto& node : meta->getNodes())
3751 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3754 if (node.isFieldPresent(sfNewFields))
3756 if (
auto inner =
dynamic_cast<STObject const*
>(
3757 node.peekAtPField(sfNewFields));
3760 if (inner->isFieldPresent(sfAccount) &&
3761 inner->getAccountID(sfAccount) == accountId)
3773 bool unsubscribe) ->
bool {
3776 sptr->send(jvObj,
true);
3786 bool unsubscribe) ->
bool {
3790 sptr->getApiVersion(),
3791 [&](
Json::Value const& jv) { sptr->send(jv,
true); });
3814 accountId, minLedger, maxLedger, marker, 0,
true};
3815 return db->newestAccountTxPage(options);
3819 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3820 "getMoreTxns : invalid database type");
3829 while (lastLedgerSeq >= 2 && !subInfo.
index_->stopHistorical_)
3831 int feeChargeCount = 0;
3840 <<
"AccountHistory job for account "
3841 <<
toBase58(accountId) <<
" no InfoSub. Fee charged "
3842 << feeChargeCount <<
" times.";
3847 auto startLedgerSeq =
3848 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3850 <<
"AccountHistory job for account " <<
toBase58(accountId)
3851 <<
", working on ledger range [" << startLedgerSeq <<
","
3852 << lastLedgerSeq <<
"]";
3854 auto haveRange = [&]() ->
bool {
3857 auto haveSomeValidatedLedgers =
3859 validatedMin, validatedMax);
3861 return haveSomeValidatedLedgers &&
3862 validatedMin <= startLedgerSeq &&
3863 lastLedgerSeq <= validatedMax;
3869 <<
"AccountHistory reschedule job for account "
3870 <<
toBase58(accountId) <<
", incomplete ledger range ["
3871 << startLedgerSeq <<
"," << lastLedgerSeq <<
"]";
3877 while (!subInfo.
index_->stopHistorical_)
3880 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3884 <<
"AccountHistory job for account "
3885 <<
toBase58(accountId) <<
" getMoreTxns failed.";
3890 auto const& txns = dbResult->first;
3891 marker = dbResult->second;
3892 size_t num_txns = txns.size();
3893 for (
size_t i = 0; i < num_txns; ++i)
3895 auto const& [tx, meta] = txns[i];
3900 <<
"AccountHistory job for account "
3901 <<
toBase58(accountId) <<
" empty tx or meta.";
3911 <<
"AccountHistory job for account "
3912 <<
toBase58(accountId) <<
" no ledger.";
3917 tx->getSTransaction();
3921 <<
"AccountHistory job for account "
3923 <<
" getSTransaction failed.";
3929 auto const trR = meta->getResultTER();
3931 transJson(stTxn, trR,
true, curTxLedger, mRef);
3934 jss::account_history_tx_index, txHistoryIndex--);
3935 if (i + 1 == num_txns ||
3936 txns[i + 1].first->getLedger() != tx->getLedger())
3937 jvTx.
set(jss::account_history_boundary,
true);
3939 if (isFirstTx(tx, meta))
3941 jvTx.
set(jss::account_history_tx_first,
true);
3942 sendMultiApiJson(jvTx,
false);
3945 <<
"AccountHistory job for account "
3947 <<
" done, found last tx.";
3952 sendMultiApiJson(jvTx,
false);
3959 <<
"AccountHistory job for account "
3961 <<
" paging, marker=" << marker->ledgerSeq <<
":"
3970 if (!subInfo.
index_->stopHistorical_)
3972 lastLedgerSeq = startLedgerSeq - 1;
3973 if (lastLedgerSeq <= 1)
3976 <<
"AccountHistory job for account "
3978 <<
" done, reached genesis ledger.";
3991 subInfo.
index_->separationLedgerSeq_ = ledger->seq();
3992 auto const& accountId = subInfo.
index_->accountId_;
3994 if (!ledger->exists(accountKeylet))
3997 <<
"subAccountHistoryStart, no account " <<
toBase58(accountId)
3998 <<
", no need to add AccountHistory job.";
4003 if (
auto const sleAcct = ledger->read(accountKeylet); sleAcct)
4005 if (sleAcct->getFieldU32(sfSequence) == 1)
4008 <<
"subAccountHistoryStart, genesis account "
4010 <<
" does not have tx, no need to add AccountHistory job.";
4017 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
4018 "access genesis account");
4022 subInfo.
index_->historyLastLedgerSeq_ = ledger->seq();
4023 subInfo.
index_->haveHistorical_ =
true;
4026 <<
"subAccountHistoryStart, add AccountHistory job: accountId="
4027 <<
toBase58(accountId) <<
", currentLedgerSeq=" << ledger->seq();
4037 if (!isrListener->insertSubAccountHistory(accountId))
4040 <<
"subAccountHistory, already subscribed to account "
4047 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
4052 inner.
emplace(isrListener->getSeq(), ahi);
4058 simIterator->second.emplace(isrListener->getSeq(), ahi);
4072 <<
"subAccountHistory, no validated ledger yet, delay start";
4085 isrListener->deleteSubAccountHistory(account);
4099 auto& subInfoMap = simIterator->second;
4100 auto subInfoIter = subInfoMap.find(seq);
4101 if (subInfoIter != subInfoMap.end())
4103 subInfoIter->second.index_->stopHistorical_ =
true;
4108 simIterator->second.erase(seq);
4109 if (simIterator->second.empty())
4115 <<
"unsubAccountHistory, account " <<
toBase58(account)
4116 <<
", historyOnly = " << (historyOnly ?
"true" :
"false");
4124 listeners->addSubscriber(isrListener);
4126 UNREACHABLE(
"ripple::NetworkOPsImp::subBook : null book listeners");
4134 listeners->removeSubscriber(uSeq);
4146 m_standalone,
"ripple::NetworkOPsImp::acceptLedger : is standalone");
4149 Throw<std::runtime_error>(
4150 "Operation only possible in STANDALONE mode.");
4165 jvResult[jss::ledger_index] = lpClosed->info().seq;
4166 jvResult[jss::ledger_hash] =
to_string(lpClosed->info().hash);
4168 lpClosed->info().closeTime.time_since_epoch().count());
4169 if (!lpClosed->rules().enabled(featureXRPFees))
4171 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
4172 jvResult[jss::reserve_base] =
4173 lpClosed->fees().accountReserve(0).jsonClipped();
4174 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4179 jvResult[jss::validated_ledgers] =
4185 .emplace(isrListener->getSeq(), isrListener)
4195 .emplace(isrListener->getSeq(), isrListener)
4221 .emplace(isrListener->getSeq(), isrListener)
4249 jvResult[jss::random] =
to_string(uRandom);
4251 jvResult[jss::load_base] = feeTrack.getLoadBase();
4252 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4253 jvResult[jss::hostid] =
getHostId(admin);
4254 jvResult[jss::pubkey_node] =
4259 .emplace(isrListener->getSeq(), isrListener)
4277 .emplace(isrListener->getSeq(), isrListener)
4295 .emplace(isrListener->getSeq(), isrListener)
4313 .emplace(isrListener->getSeq(), isrListener)
4337 .emplace(isrListener->getSeq(), isrListener)
4355 .emplace(isrListener->getSeq(), isrListener)
4403 if (map.find(pInfo->getSeq()) != map.end())
4410#ifndef USE_NEW_BOOK_PAGE
4421 unsigned int iLimit,
4431 uint256 uTipIndex = uBookBase;
4435 stream <<
"getBookPage:" << book;
4436 stream <<
"getBookPage: uBookBase=" << uBookBase;
4437 stream <<
"getBookPage: uBookEnd=" << uBookEnd;
4438 stream <<
"getBookPage: uTipIndex=" << uTipIndex;
4447 bool bDirectAdvance =
true;
4451 unsigned int uBookEntry;
4457 while (!bDone && iLimit-- > 0)
4461 bDirectAdvance =
false;
4465 auto const ledgerIndex = view.
succ(uTipIndex, uBookEnd);
4469 sleOfferDir.
reset();
4478 uTipIndex = sleOfferDir->key();
4481 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4484 <<
"getBookPage: uTipIndex=" << uTipIndex;
4486 <<
"getBookPage: offerIndex=" << offerIndex;
4496 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4497 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4498 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4500 bool firstOwnerOffer(
true);
4506 saOwnerFunds = saTakerGets;
4508 else if (bGlobalFreeze)
4516 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4517 if (umBalanceEntry != umBalance.
end())
4521 saOwnerFunds = umBalanceEntry->second;
4522 firstOwnerOffer =
false;
4536 if (saOwnerFunds < beast::zero)
4540 saOwnerFunds.
clear();
4548 STAmount saOwnerFundsLimit = saOwnerFunds;
4560 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4563 if (saOwnerFundsLimit >= saTakerGets)
4566 saTakerGetsFunded = saTakerGets;
4572 saTakerGetsFunded = saOwnerFundsLimit;
4574 saTakerGetsFunded.
setJson(jvOffer[jss::taker_gets_funded]);
4578 saTakerGetsFunded, saDirRate, saTakerPays.
issue()))
4579 .setJson(jvOffer[jss::taker_pays_funded]);
4585 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4587 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4591 jvOf[jss::quality] = saDirRate.
getText();
4593 if (firstOwnerOffer)
4594 jvOf[jss::owner_funds] = saOwnerFunds.
getText();
4601 if (!
cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4603 bDirectAdvance =
true;
4608 <<
"getBookPage: offerIndex=" << offerIndex;
4628 unsigned int iLimit,
4636 MetaView lesActive(lpLedger,
tapNONE,
true);
4637 OrderBookIterator obIterator(lesActive, book);
4641 bool const bGlobalFreeze = lesActive.isGlobalFrozen(book.
out.
account) ||
4642 lesActive.isGlobalFrozen(book.
in.
account);
4644 while (iLimit-- > 0 && obIterator.nextOffer())
4649 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4650 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4651 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4652 STAmount saDirRate = obIterator.getCurrentRate();
4658 saOwnerFunds = saTakerGets;
4660 else if (bGlobalFreeze)
4668 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4670 if (umBalanceEntry != umBalance.
end())
4674 saOwnerFunds = umBalanceEntry->second;
4680 saOwnerFunds = lesActive.accountHolds(
4686 if (saOwnerFunds.isNegative())
4690 saOwnerFunds.zero();
4697 STAmount saTakerGetsFunded;
4698 STAmount saOwnerFundsLimit = saOwnerFunds;
4710 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4713 if (saOwnerFundsLimit >= saTakerGets)
4716 saTakerGetsFunded = saTakerGets;
4721 saTakerGetsFunded = saOwnerFundsLimit;
4723 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4729 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4730 .setJson(jvOffer[jss::taker_pays_funded]);
4733 STAmount saOwnerPays = (
parityRate == offerRate)
4736 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4738 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4740 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4744 jvOf[jss::quality] = saDirRate.
getText();
4759 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4799 ++counters_[
static_cast<std::size_t>(om)].transitions;
4801 counters_[
static_cast<std::size_t>(om)].transitions == 1)
4803 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4804 now - processStart_)
4808 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4817 auto [counters, mode, start, initialSync] = getCounterData();
4818 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4828 auto& state = obj[jss::state_accounting][
states_[i]];
4829 state[jss::transitions] =
std::to_string(counters[i].transitions);
4830 state[jss::duration_us] =
std::to_string(counters[i].dur.count());
4834 obj[jss::initial_sync_duration_us] =
std::to_string(initialSync);
4849 boost::asio::io_service& io_svc,
4853 return std::make_unique<NetworkOPsImp>(
T back_inserter(T... args)
Decorator for streaming out compact json.
Lightweight wrapper to tag static string.
Value & append(Value const &value)
Append value to array at the end.
bool isMember(char const *key) const
Return true if the object has a member named key.
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
A generic endpoint for log messages.
Stream trace() const
Severity stream access functions.
A metric for measuring an integral value.
void set(value_type value) const
Set the value on the gauge.
A reference to a handler for performing polled collection.
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
std::uint32_t getLoadFee() const
NetClock::time_point getReportTime() const
PublicKey const & identity() const
std::size_t size() const
The number of nodes in the cluster list.
std::string SERVER_DOMAIN
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
int RELAY_UNTRUSTED_VALIDATIONS
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
A pool of threads to perform work.
Json::Value getJson(int c=0)
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
std::uint32_t getClusterFee() const
std::uint32_t getLocalFee() const
std::uint32_t getLoadBase() const
std::uint32_t getRemoteFee() const
std::uint32_t getLoadFactor() const
void heartbeat()
Reset the stall detection timer.
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
std::uint64_t initialSyncUs_
CounterData getCounterData() const
std::chrono::steady_clock::time_point start_
static std::array< Json::StaticString const, 5 > const states_
std::chrono::steady_clock::time_point const processStart_
Transaction with input flags and results to be applied in batches.
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
std::shared_ptr< Transaction > const transaction
void processClusterTimer()
boost::asio::steady_timer accountHistoryTxTimer_
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
std::vector< TransactionStatus > mTransactions
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
std::optional< PublicKey > const validatorPK_
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
SubInfoMapType mSubAccount
std::optional< PublicKey > const validatorMasterPK_
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
std::atomic< bool > unlBlocked_
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
std::atomic< bool > needNetworkLedger_
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
void setMode(OperatingMode om) override
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
void clearNeedNetworkLedger() override
DispatchState mDispatchState
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
void reportFeeChange() override
void processHeartbeatTimer()
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
bool subTransactions(InfoSub::ref ispListener) override
std::atomic< bool > amendmentWarned_
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
void getCountsJson(Json::Value &obj)
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Writable ledger view that accumulates state and tx changes.
BookListeners::pointer getBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)
BookListeners::pointer makeBookListeners(Book const &)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Represents a set of transactions in RCLConsensus.
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Collects logging information.
std::unique_ptr< std::stringstream > const & ss()
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
std::string getText() const override
Issue const & issue() const
std::optional< T > get(std::string const &name) const
std::size_t size() const noexcept
void const * data() const noexcept
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
std::chrono::seconds closeOffset() const
time_point closeTime() const
Returns the predicted close time, in network time.
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Validator keys and manifest as set in configuration file.
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Json::Value jsonClipped() const
static constexpr std::size_t size()
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
T emplace_back(T... args)
@ arrayValue
array value (ordered list)
@ objectValue
object value (collection of name/value pairs).
void rngfill(void *const buffer, std::size_t const bytes, Generator &g)
std::string const & getVersionString()
Server version.
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
void insertNFTSyntheticInJson(Json::Value &, std::shared_ptr< STTx const > const &, TxMeta const &)
Adds common synthetic fields to transaction-related JSON responses.
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Keylet account(AccountID const &id) noexcept
AccountID root.
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
STAmount divide(STAmount const &amount, Rate const &rate)
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
std::uint64_t getQuality(uint256 const &uBase)
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
auto constexpr muldiv_max
std::unique_ptr< LocalTxs > make_LocalTxs()
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
STAmount amountFromQuality(std::uint64_t rate)
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
@ warnRPC_UNSUPPORTED_MAJORITY
@ warnRPC_AMENDMENT_BLOCKED
bool set(T &target, std::string const &name, Section const §ion)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
AccountID calcAccountID(PublicKey const &pk)
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Json::Value rpcError(int iError)
bool isTefFailure(TER x) noexcept
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
std::string strHex(FwdIt begin, FwdIt end)
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
void forAllApiVersions(Fn const &fn, Args &&... args)
bool isTerRetry(TER x) noexcept
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
uint256 getQualityNext(uint256 const &uBase)
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const ¤cy, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
bool isTesSuccess(TER x) noexcept
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const ¤t)
std::string to_string_iso(date::sys_time< Duration > tp)
std::string to_string(base_uint< Bits, Tag > const &a)
FeeSetup setup_FeeVote(Section const §ion)
bool isTemMalformed(TER x) noexcept
Number root(Number f, unsigned d)
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
constexpr std::size_t maxPoppedTransactions
bool transResultInfo(TER code, std::string &token, std::string &text)
bool isTelLocal(TER x) noexcept
uint256 getBookBase(Book const &book)
constexpr std::uint32_t tfInnerBatchTxn
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
static std::uint32_t trunc32(std::uint64_t v)
static auto const genesisAccountId
T set_intersection(T... args)
std::string serialized
The manifest in serialized form.
std::uint32_t sequence
The sequence number of this manifest.
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Server fees published on server subscription.
bool operator!=(ServerFeeSummary const &b) const
ServerFeeSummary()=default
std::optional< TxQ::Metrics > em
std::uint32_t loadFactorServer
bool operator==(ServerFeeSummary const &b) const
std::uint32_t loadBaseServer
decltype(initialSyncUs_) initialSyncUs
decltype(counters_) counters
std::uint64_t transitions
std::chrono::microseconds dur
beast::insight::Gauge full_transitions
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
beast::insight::Hook hook
beast::insight::Gauge connected_duration
beast::insight::Gauge tracking_duration
beast::insight::Gauge connected_transitions
beast::insight::Gauge disconnected_transitions
beast::insight::Gauge syncing_duration
beast::insight::Gauge tracking_transitions
beast::insight::Gauge full_duration
beast::insight::Gauge disconnected_duration
beast::insight::Gauge syncing_transitions
std::uint32_t historyLastLedgerSeq_
std::uint32_t separationLedgerSeq_
AccountID const accountId_
std::uint32_t forwardTxIndex_
std::atomic< bool > stopHistorical_
std::int32_t historyTxIndex_
SubAccountHistoryIndex(AccountID const &accountId)
std::shared_ptr< SubAccountHistoryIndex > index_
std::shared_ptr< SubAccountHistoryIndex > index_
Represents a transfer rate.
Data format for exchanging consumption information across peers.
std::vector< Item > items
Changes in trusted nodes after updating validator list.
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
IsMemberResult isMember(char const *key) const
void set(char const *key, auto const &v)
Select all peers (except optional excluded) that are in our cluster.
Sends a message to all peers.
T time_since_epoch(T... args)