1#include <xrpld/app/consensus/RCLConsensus.h>
2#include <xrpld/app/consensus/RCLValidations.h>
3#include <xrpld/app/ledger/AcceptedLedger.h>
4#include <xrpld/app/ledger/InboundLedgers.h>
5#include <xrpld/app/ledger/LedgerMaster.h>
6#include <xrpld/app/ledger/LedgerToJson.h>
7#include <xrpld/app/ledger/LocalTxs.h>
8#include <xrpld/app/ledger/OpenLedger.h>
9#include <xrpld/app/ledger/OrderBookDB.h>
10#include <xrpld/app/ledger/TransactionMaster.h>
11#include <xrpld/app/main/LoadManager.h>
12#include <xrpld/app/main/Tuning.h>
13#include <xrpld/app/misc/AmendmentTable.h>
14#include <xrpld/app/misc/DeliverMax.h>
15#include <xrpld/app/misc/HashRouter.h>
16#include <xrpld/app/misc/LoadFeeTrack.h>
17#include <xrpld/app/misc/NetworkOPs.h>
18#include <xrpld/app/misc/Transaction.h>
19#include <xrpld/app/misc/TxQ.h>
20#include <xrpld/app/misc/ValidatorKeys.h>
21#include <xrpld/app/misc/ValidatorList.h>
22#include <xrpld/app/misc/detail/AccountTxPaging.h>
23#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
24#include <xrpld/app/tx/apply.h>
25#include <xrpld/consensus/Consensus.h>
26#include <xrpld/consensus/ConsensusParms.h>
27#include <xrpld/overlay/Cluster.h>
28#include <xrpld/overlay/Overlay.h>
29#include <xrpld/overlay/predicates.h>
30#include <xrpld/perflog/PerfLog.h>
31#include <xrpld/rpc/BookChanges.h>
32#include <xrpld/rpc/CTID.h>
33#include <xrpld/rpc/DeliveredAmount.h>
34#include <xrpld/rpc/MPTokenIssuanceID.h>
35#include <xrpld/rpc/ServerHandler.h>
37#include <xrpl/basics/UptimeClock.h>
38#include <xrpl/basics/mulDiv.h>
39#include <xrpl/basics/safe_cast.h>
40#include <xrpl/basics/scope.h>
41#include <xrpl/beast/utility/rngfill.h>
42#include <xrpl/crypto/RFC1751.h>
43#include <xrpl/crypto/csprng.h>
44#include <xrpl/protocol/BuildInfo.h>
45#include <xrpl/protocol/Feature.h>
46#include <xrpl/protocol/MultiApiJson.h>
47#include <xrpl/protocol/NFTSyntheticSerializer.h>
48#include <xrpl/protocol/RPCErr.h>
49#include <xrpl/protocol/TxFlags.h>
50#include <xrpl/protocol/jss.h>
51#include <xrpl/resource/Fees.h>
52#include <xrpl/resource/ResourceManager.h>
54#include <boost/asio/ip/host_name.hpp>
55#include <boost/asio/steady_timer.hpp>
94 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
137 std::chrono::steady_clock::time_point
start_ =
198 return !(*
this != b);
217 boost::asio::io_context& io_svc,
231 app_.logs().journal(
"FeeVote")),
234 app.getInboundTransactions(),
235 beast::get_abstract_clock<
std::chrono::steady_clock>(),
237 app_.logs().journal(
"LedgerConsensus"))
239 validatorKeys.keys ? validatorKeys.keys->publicKey
242 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
437 getServerInfo(
bool human,
bool admin,
bool counters)
override;
464 TER result)
override;
498 bool historyOnly)
override;
504 bool historyOnly)
override;
576 catch (boost::system::system_error
const& e)
579 <<
"NetworkOPs: heartbeatTimer cancel error: " << e.what();
586 catch (boost::system::system_error
const& e)
589 <<
"NetworkOPs: clusterTimer cancel error: " << e.what();
596 catch (boost::system::system_error
const& e)
599 <<
"NetworkOPs: accountHistoryTxTimer cancel error: "
604 using namespace std::chrono_literals;
614 boost::asio::steady_timer& timer,
797 template <
class Handler>
799 Handler
const& handler,
801 :
hook(collector->make_hook(handler))
804 "Disconnected_duration"))
807 "Connected_duration"))
809 collector->make_gauge(
"State_Accounting",
"Syncing_duration"))
812 "Tracking_duration"))
814 collector->make_gauge(
"State_Accounting",
"Full_duration"))
817 "Disconnected_transitions"))
820 "Connected_transitions"))
823 "Syncing_transitions"))
826 "Tracking_transitions"))
828 collector->make_gauge(
"State_Accounting",
"Full_transitions"))
857 {
"disconnected",
"connected",
"syncing",
"tracking",
"full"}};
919 static std::string const hostname = boost::asio::ip::host_name();
926 static std::string const shroudedHostId = [
this]() {
932 return shroudedHostId;
947 boost::asio::steady_timer& timer,
954 [
this, onExpire, onError](boost::system::error_code
const& e) {
955 if ((e.value() == boost::system::errc::success) &&
956 (!m_job_queue.isStopped()))
961 if (e.value() != boost::system::errc::success &&
962 e.value() != boost::asio::error::operation_aborted)
965 JLOG(m_journal.error())
966 <<
"Timer got error '" << e.message()
967 <<
"'. Restarting timer.";
972 timer.expires_after(expiry_time);
973 timer.async_wait(std::move(*optionalCountedHandler));
978NetworkOPsImp::setHeartbeatTimer()
982 mConsensus.parms().ledgerGRANULARITY,
984 m_job_queue.addJob(jtNETOP_TIMER,
"NetOPs.heartbeat", [this]() {
985 processHeartbeatTimer();
988 [
this]() { setHeartbeatTimer(); });
992NetworkOPsImp::setClusterTimer()
994 using namespace std::chrono_literals;
1001 processClusterTimer();
1004 [
this]() { setClusterTimer(); });
1010 JLOG(m_journal.debug()) <<
"Scheduling AccountHistory job for account "
1012 using namespace std::chrono_literals;
1014 accountHistoryTxTimer_,
1016 [
this, subInfo]() { addAccountHistoryJob(subInfo); },
1017 [
this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1021NetworkOPsImp::processHeartbeatTimer()
1024 "Heartbeat Timer", mConsensus.validating(), m_journal);
1032 std::size_t const numPeers = app_.overlay().size();
1035 if (numPeers < minPeerCount_)
1037 if (mMode != OperatingMode::DISCONNECTED)
1039 setMode(OperatingMode::DISCONNECTED);
1041 ss <<
"Node count (" << numPeers <<
") has fallen "
1042 <<
"below required minimum (" << minPeerCount_ <<
").";
1043 JLOG(m_journal.warn()) << ss.
str();
1044 CLOG(clog.
ss()) <<
"set mode to DISCONNECTED: " << ss.
str();
1049 <<
"already DISCONNECTED. too few peers (" << numPeers
1050 <<
"), need at least " << minPeerCount_;
1057 setHeartbeatTimer();
1062 if (mMode == OperatingMode::DISCONNECTED)
1064 setMode(OperatingMode::CONNECTED);
1065 JLOG(m_journal.info())
1066 <<
"Node count (" << numPeers <<
") is sufficient.";
1067 CLOG(clog.
ss()) <<
"setting mode to CONNECTED based on " << numPeers
1073 auto origMode = mMode.load();
1074 CLOG(clog.
ss()) <<
"mode: " << strOperatingMode(origMode,
true);
1075 if (mMode == OperatingMode::SYNCING)
1076 setMode(OperatingMode::SYNCING);
1077 else if (mMode == OperatingMode::CONNECTED)
1078 setMode(OperatingMode::CONNECTED);
1079 auto newMode = mMode.load();
1080 if (origMode != newMode)
1083 <<
", changing to " << strOperatingMode(newMode,
true);
1085 CLOG(clog.
ss()) <<
". ";
1088 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.
ss());
1090 CLOG(clog.
ss()) <<
"consensus phase " << to_string(mLastConsensusPhase);
1092 if (mLastConsensusPhase != currPhase)
1094 reportConsensusStateChange(currPhase);
1095 mLastConsensusPhase = currPhase;
1096 CLOG(clog.
ss()) <<
" changed to " << to_string(mLastConsensusPhase);
1098 CLOG(clog.
ss()) <<
". ";
1100 setHeartbeatTimer();
1104NetworkOPsImp::processClusterTimer()
1106 if (app_.cluster().size() == 0)
1109 using namespace std::chrono_literals;
1111 bool const update = app_.cluster().update(
1112 app_.nodeIdentity().first,
1114 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1115 ? app_.getFeeTrack().getLocalFee()
1117 app_.timeKeeper().now());
1121 JLOG(m_journal.debug()) <<
"Too soon to send cluster update";
1126 protocol::TMCluster cluster;
1127 app_.cluster().for_each([&cluster](
ClusterNode const& node) {
1128 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1133 n.set_nodename(node.
name());
1137 for (
auto& item : gossip.
items)
1139 protocol::TMLoadSource& node = *cluster.add_loadsources();
1140 node.set_name(to_string(item.address));
1141 node.set_cost(item.balance);
1143 app_.overlay().foreach(
send_if(
1155 if (mode == OperatingMode::FULL && admin)
1157 auto const consensusMode = mConsensus.mode();
1158 if (consensusMode != ConsensusMode::wrongLedger)
1160 if (consensusMode == ConsensusMode::proposing)
1163 if (mConsensus.validating())
1164 return "validating";
1174 if (isNeedNetworkLedger())
1182 m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1184 JLOG(m_journal.error())
1185 <<
"Submitted transaction invalid: tfInnerBatchTxn flag present.";
1192 auto const txid = trans->getTransactionID();
1193 auto const flags = app_.getHashRouter().getFlags(txid);
1195 if ((flags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1197 JLOG(m_journal.warn()) <<
"Submitted transaction cached bad";
1204 app_.getHashRouter(),
1206 m_ledgerMaster.getValidatedRules(),
1209 if (validity != Validity::Valid)
1211 JLOG(m_journal.warn())
1212 <<
"Submitted transaction invalid: " << reason;
1218 JLOG(m_journal.warn())
1219 <<
"Exception checking transaction " << txid <<
": " << ex.
what();
1228 m_job_queue.addJob(
jtTRANSACTION,
"submitTxn", [
this, tx]() {
1230 processTransaction(t,
false,
false, FailHard::no);
1237 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1239 if ((newFlags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1242 JLOG(m_journal.warn()) << transaction->getID() <<
": cached bad!\n";
1243 transaction->setStatus(
INVALID);
1248 auto const view = m_ledgerMaster.getCurrentLedger();
1253 auto const sttx = *transaction->getSTransaction();
1254 if (sttx.isFlag(
tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1256 transaction->setStatus(
INVALID);
1258 app_.getHashRouter().setFlags(
1259 transaction->getID(), HashRouterFlags::BAD);
1266 auto const [validity, reason] =
1267 checkValidity(app_.getHashRouter(), sttx, view->rules(), app_.config());
1269 validity == Validity::Valid,
1270 "ripple::NetworkOPsImp::processTransaction : valid validity");
1273 if (validity == Validity::SigBad)
1275 JLOG(m_journal.info()) <<
"Transaction has bad signature: " << reason;
1276 transaction->setStatus(
INVALID);
1278 app_.getHashRouter().setFlags(
1279 transaction->getID(), HashRouterFlags::BAD);
1284 app_.getMasterTransaction().canonicalize(&transaction);
1290NetworkOPsImp::processTransaction(
1296 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXN");
1299 if (!preProcessTransaction(transaction))
1303 doTransactionSync(transaction, bUnlimited, failType);
1305 doTransactionAsync(transaction, bUnlimited, failType);
1309NetworkOPsImp::doTransactionAsync(
1316 if (transaction->getApplying())
1319 mTransactions.push_back(
1321 transaction->setApplying();
1323 if (mDispatchState == DispatchState::none)
1325 if (m_job_queue.addJob(
1326 jtBATCH,
"transactionBatch", [
this]() { transactionBatch(); }))
1328 mDispatchState = DispatchState::scheduled;
1334NetworkOPsImp::doTransactionSync(
1341 if (!transaction->getApplying())
1343 mTransactions.push_back(
1345 transaction->setApplying();
1348 doTransactionSyncBatch(
1350 return transaction->getApplying();
1355NetworkOPsImp::doTransactionSyncBatch(
1361 if (mDispatchState == DispatchState::running)
1370 if (mTransactions.size())
1373 if (m_job_queue.addJob(
jtBATCH,
"transactionBatch", [
this]() {
1377 mDispatchState = DispatchState::scheduled;
1381 }
while (retryCallback(lock));
1387 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXNSet");
1390 for (
auto const& [_, tx] :
set)
1395 if (transaction->getStatus() ==
INVALID)
1397 if (!reason.
empty())
1399 JLOG(m_journal.trace())
1400 <<
"Exception checking transaction: " << reason;
1402 app_.getHashRouter().setFlags(
1403 tx->getTransactionID(), HashRouterFlags::BAD);
1408 if (!preProcessTransaction(transaction))
1419 for (
auto& transaction : candidates)
1421 if (!transaction->getApplying())
1423 transactions.
emplace_back(transaction,
false,
false, FailHard::no);
1424 transaction->setApplying();
1428 if (mTransactions.empty())
1429 mTransactions.swap(transactions);
1432 mTransactions.reserve(mTransactions.size() + transactions.
size());
1433 for (
auto& t : transactions)
1434 mTransactions.push_back(std::move(t));
1436 if (mTransactions.empty())
1438 JLOG(m_journal.debug()) <<
"No transaction to process!";
1445 "ripple::NetworkOPsImp::processTransactionSet has lock");
1447 mTransactions.begin(), mTransactions.end(), [](
auto const& t) {
1448 return t.transaction->getApplying();
1454NetworkOPsImp::transactionBatch()
1458 if (mDispatchState == DispatchState::running)
1461 while (mTransactions.size())
1472 mTransactions.
swap(transactions);
1474 !transactions.
empty(),
1475 "ripple::NetworkOPsImp::apply : non-empty transactions");
1477 mDispatchState != DispatchState::running,
1478 "ripple::NetworkOPsImp::apply : is not running");
1480 mDispatchState = DispatchState::running;
1486 bool changed =
false;
1500 if (e.failType == FailHard::yes)
1503 auto const result = app_.getTxQ().apply(
1504 app_, view, e.transaction->getSTransaction(), flags, j);
1505 e.result = result.ter;
1506 e.applied = result.applied;
1507 changed = changed || result.applied;
1516 if (
auto const l = m_ledgerMaster.getValidatedLedger())
1517 validatedLedgerIndex = l->info().seq;
1519 auto newOL = app_.openLedger().current();
1522 e.transaction->clearSubmitResult();
1526 pubProposedTransaction(
1527 newOL, e.transaction->getSTransaction(), e.result);
1528 e.transaction->setApplied();
1531 e.transaction->setResult(e.result);
1534 app_.getHashRouter().setFlags(
1535 e.transaction->getID(), HashRouterFlags::BAD);
1544 JLOG(m_journal.info())
1545 <<
"TransactionResult: " << token <<
": " << human;
1550 bool addLocal = e.local;
1554 JLOG(m_journal.debug())
1555 <<
"Transaction is now included in open ledger";
1556 e.transaction->setStatus(
INCLUDED);
1561 auto const& txCur = e.transaction->getSTransaction();
1564 for (
auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1566 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1573 if (t->getApplying())
1575 submit_held.
emplace_back(t,
false,
false, FailHard::no);
1584 JLOG(m_journal.info()) <<
"Transaction is obsolete";
1585 e.transaction->setStatus(
OBSOLETE);
1589 JLOG(m_journal.debug())
1590 <<
"Transaction is likely to claim a"
1591 <<
" fee, but is queued until fee drops";
1593 e.transaction->setStatus(
HELD);
1597 m_ledgerMaster.addHeldTransaction(e.transaction);
1598 e.transaction->setQueued();
1599 e.transaction->setKept();
1605 if (e.failType != FailHard::yes)
1607 auto const lastLedgerSeq =
1608 e.transaction->getSTransaction()->at(
1609 ~sfLastLedgerSequence);
1610 auto const ledgersLeft = lastLedgerSeq
1612 m_ledgerMaster.getCurrentLedgerIndex()
1631 (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1632 app_.getHashRouter().setFlags(
1633 e.transaction->getID(), HashRouterFlags::HELD))
1636 JLOG(m_journal.debug())
1637 <<
"Transaction should be held: " << e.result;
1638 e.transaction->setStatus(
HELD);
1639 m_ledgerMaster.addHeldTransaction(e.transaction);
1640 e.transaction->setKept();
1643 JLOG(m_journal.debug())
1644 <<
"Not holding transaction "
1645 << e.transaction->getID() <<
": "
1646 << (e.local ?
"local" :
"network") <<
", "
1647 <<
"result: " << e.result <<
" ledgers left: "
1648 << (ledgersLeft ? to_string(*ledgersLeft)
1654 JLOG(m_journal.debug())
1655 <<
"Status other than success " << e.result;
1656 e.transaction->setStatus(
INVALID);
1659 auto const enforceFailHard =
1660 e.failType == FailHard::yes && !
isTesSuccess(e.result);
1662 if (addLocal && !enforceFailHard)
1664 m_localTX->push_back(
1665 m_ledgerMaster.getCurrentLedgerIndex(),
1666 e.transaction->getSTransaction());
1667 e.transaction->setKept();
1671 ((mMode != OperatingMode::FULL) &&
1672 (e.failType != FailHard::yes) && e.local) ||
1677 app_.getHashRouter().shouldRelay(e.transaction->getID());
1678 if (
auto const sttx = *(e.transaction->getSTransaction());
1683 newOL->rules().enabled(featureBatch)))
1685 protocol::TMTransaction tx;
1689 tx.set_rawtransaction(s.
data(), s.
size());
1690 tx.set_status(protocol::tsCURRENT);
1691 tx.set_receivetimestamp(
1692 app_.timeKeeper().now().time_since_epoch().count());
1695 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1696 e.transaction->setBroadcast();
1700 if (validatedLedgerIndex)
1702 auto [fee, accountSeq, availableSeq] =
1703 app_.getTxQ().getTxRequiredFeeAndSeq(
1704 *newOL, e.transaction->getSTransaction());
1705 e.transaction->setCurrentLedgerState(
1706 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1714 e.transaction->clearApplying();
1716 if (!submit_held.
empty())
1718 if (mTransactions.empty())
1719 mTransactions.swap(submit_held);
1722 mTransactions.reserve(mTransactions.size() + submit_held.
size());
1723 for (
auto& e : submit_held)
1724 mTransactions.push_back(std::move(e));
1730 mDispatchState = DispatchState::none;
1738NetworkOPsImp::getOwnerInfo(
1743 auto root = keylet::ownerDir(account);
1744 auto sleNode = lpLedger->read(keylet::page(
root));
1751 for (
auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1753 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1756 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1758 switch (sleCur->getType())
1761 if (!jvObjects.
isMember(jss::offers))
1762 jvObjects[jss::offers] =
1765 jvObjects[jss::offers].
append(
1766 sleCur->getJson(JsonOptions::none));
1769 case ltRIPPLE_STATE:
1770 if (!jvObjects.
isMember(jss::ripple_lines))
1772 jvObjects[jss::ripple_lines] =
1776 jvObjects[jss::ripple_lines].
append(
1777 sleCur->getJson(JsonOptions::none));
1780 case ltACCOUNT_ROOT:
1785 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1792 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1796 sleNode = lpLedger->read(keylet::page(
root, uNodeDir));
1799 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1812NetworkOPsImp::isBlocked()
1814 return isAmendmentBlocked() || isUNLBlocked();
1818NetworkOPsImp::isAmendmentBlocked()
1820 return amendmentBlocked_;
1824NetworkOPsImp::setAmendmentBlocked()
1826 amendmentBlocked_ =
true;
1827 setMode(OperatingMode::CONNECTED);
1831NetworkOPsImp::isAmendmentWarned()
1833 return !amendmentBlocked_ && amendmentWarned_;
1837NetworkOPsImp::setAmendmentWarned()
1839 amendmentWarned_ =
true;
1843NetworkOPsImp::clearAmendmentWarned()
1845 amendmentWarned_ =
false;
1849NetworkOPsImp::isUNLBlocked()
1855NetworkOPsImp::setUNLBlocked()
1858 setMode(OperatingMode::CONNECTED);
1862NetworkOPsImp::clearUNLBlocked()
1864 unlBlocked_ =
false;
1868NetworkOPsImp::checkLastClosedLedger(
1877 JLOG(m_journal.trace()) <<
"NetworkOPsImp::checkLastClosedLedger";
1879 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1884 uint256 closedLedger = ourClosed->info().hash;
1885 uint256 prevClosedLedger = ourClosed->info().parentHash;
1886 JLOG(m_journal.trace()) <<
"OurClosed: " << closedLedger;
1887 JLOG(m_journal.trace()) <<
"PrevClosed: " << prevClosedLedger;
1892 auto& validations = app_.getValidations();
1893 JLOG(m_journal.debug())
1894 <<
"ValidationTrie " <<
Json::Compact(validations.getJsonTrie());
1898 peerCounts[closedLedger] = 0;
1899 if (mMode >= OperatingMode::TRACKING)
1900 peerCounts[closedLedger]++;
1902 for (
auto& peer : peerList)
1904 uint256 peerLedger = peer->getClosedLedgerHash();
1907 ++peerCounts[peerLedger];
1910 for (
auto const& it : peerCounts)
1911 JLOG(m_journal.debug()) <<
"L: " << it.first <<
" n=" << it.second;
1913 uint256 preferredLCL = validations.getPreferredLCL(
1915 m_ledgerMaster.getValidLedgerIndex(),
1918 bool switchLedgers = preferredLCL != closedLedger;
1920 closedLedger = preferredLCL;
1922 if (switchLedgers && (closedLedger == prevClosedLedger))
1925 JLOG(m_journal.info()) <<
"We won't switch to our own previous ledger";
1926 networkClosed = ourClosed->info().hash;
1927 switchLedgers =
false;
1930 networkClosed = closedLedger;
1935 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1938 consensus = app_.getInboundLedgers().acquire(
1939 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1942 (!m_ledgerMaster.canBeCurrent(consensus) ||
1943 !m_ledgerMaster.isCompatible(
1944 *consensus, m_journal.debug(),
"Not switching")))
1948 networkClosed = ourClosed->info().hash;
1952 JLOG(m_journal.warn()) <<
"We are not running on the consensus ledger";
1953 JLOG(m_journal.info()) <<
"Our LCL: " << ourClosed->info().hash
1955 JLOG(m_journal.info()) <<
"Net LCL " << closedLedger;
1957 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1959 setMode(OperatingMode::CONNECTED);
1967 switchLastClosedLedger(consensus);
1974NetworkOPsImp::switchLastClosedLedger(
1978 JLOG(m_journal.error())
1979 <<
"JUMP last closed ledger to " << newLCL->info().hash;
1981 clearNeedNetworkLedger();
1984 app_.getTxQ().processClosedLedger(app_, *newLCL,
true);
1991 auto retries = m_localTX->getTxSet();
1992 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1997 rules.
emplace(app_.config().features);
1998 app_.openLedger().accept(
2009 return app_.getTxQ().accept(app_, view);
2013 m_ledgerMaster.switchLCL(newLCL);
2015 protocol::TMStatusChange s;
2016 s.set_newevent(protocol::neSWITCHED_LEDGER);
2017 s.set_ledgerseq(newLCL->info().seq);
2018 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
2019 s.set_ledgerhashprevious(
2020 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
2021 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
2023 app_.overlay().foreach(
2028NetworkOPsImp::beginConsensus(
2034 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
2036 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
2038 JLOG(m_journal.info()) <<
"Consensus time for #" << closingInfo.seq
2039 <<
" with LCL " << closingInfo.parentHash;
2041 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
2046 if (mMode == OperatingMode::FULL)
2048 JLOG(m_journal.warn()) <<
"Don't have LCL, going to tracking";
2049 setMode(OperatingMode::TRACKING);
2050 CLOG(clog) <<
"beginConsensus Don't have LCL, going to tracking. ";
2053 CLOG(clog) <<
"beginConsensus no previous ledger. ";
2058 prevLedger->info().hash == closingInfo.parentHash,
2059 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
2062 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
2063 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
2066 if (prevLedger->rules().enabled(featureNegativeUNL))
2067 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
2068 TrustChanges const changes = app_.validators().updateTrusted(
2069 app_.getValidations().getCurrentNodeIDs(),
2070 closingInfo.parentCloseTime,
2073 app_.getHashRouter());
2075 if (!changes.
added.empty() || !changes.
removed.empty())
2077 app_.getValidations().trustChanged(changes.
added, changes.
removed);
2079 app_.getAmendmentTable().trustChanged(
2080 app_.validators().getQuorumKeys().second);
2083 mConsensus.startRound(
2084 app_.timeKeeper().closeTime(),
2092 if (mLastConsensusPhase != currPhase)
2094 reportConsensusStateChange(currPhase);
2095 mLastConsensusPhase = currPhase;
2098 JLOG(m_journal.debug()) <<
"Initiating consensus engine";
2105 auto const& peerKey = peerPos.
publicKey();
2106 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
2117 JLOG(m_journal.error())
2118 <<
"Received a proposal signed by MY KEY from a peer. This may "
2119 "indicate a misconfiguration where another node has the same "
2120 "validator key, or may be caused by unusual message routing and "
2125 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
2136 protocol::TMHaveTransactionSet msg;
2137 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2138 msg.set_status(protocol::tsHAVE);
2139 app_.overlay().foreach(
2144 mConsensus.gotTxSet(app_.timeKeeper().closeTime(),
RCLTxSet{map});
2150 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
2152 for (
auto const& it : app_.overlay().getActivePeers())
2154 if (it && (it->getClosedLedgerHash() == deadLedger))
2156 JLOG(m_journal.trace()) <<
"Killing obsolete peer status";
2163 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
2165 if (networkClosed.
isZero())
2167 CLOG(clog) <<
"endConsensus last closed ledger is zero. ";
2177 if (((mMode == OperatingMode::CONNECTED) ||
2178 (mMode == OperatingMode::SYNCING)) &&
2184 if (!needNetworkLedger_)
2185 setMode(OperatingMode::TRACKING);
2188 if (((mMode == OperatingMode::CONNECTED) ||
2189 (mMode == OperatingMode::TRACKING)) &&
2195 auto current = m_ledgerMaster.getCurrentLedger();
2196 if (app_.timeKeeper().now() < (
current->info().parentCloseTime +
2197 2 *
current->info().closeTimeResolution))
2199 setMode(OperatingMode::FULL);
2203 beginConsensus(networkClosed, clog);
2207NetworkOPsImp::consensusViewChange()
2209 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2211 setMode(OperatingMode::CONNECTED);
2221 if (!mStreamMaps[sManifests].empty())
2225 jvObj[jss::type] =
"manifestReceived";
2228 jvObj[jss::signing_key] =
2232 jvObj[jss::signature] =
strHex(*sig);
2235 jvObj[jss::domain] = mo.
domain;
2238 for (
auto i = mStreamMaps[sManifests].begin();
2239 i != mStreamMaps[sManifests].end();)
2241 if (
auto p = i->second.lock())
2243 p->send(jvObj,
true);
2248 i = mStreamMaps[sManifests].erase(i);
2254NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2258 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2259 , loadBaseServer{loadFeeTrack.getLoadBase()}
2261 , em{
std::move(escalationMetrics)}
2271 em.has_value() != b.
em.has_value())
2277 em->minProcessingFeeLevel != b.
em->minProcessingFeeLevel ||
2278 em->openLedgerFeeLevel != b.
em->openLedgerFeeLevel ||
2279 em->referenceFeeLevel != b.
em->referenceFeeLevel);
2312 jvObj[jss::type] =
"serverStatus";
2314 jvObj[jss::load_base] = f.loadBaseServer;
2315 jvObj[jss::load_factor_server] = f.loadFactorServer;
2316 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2321 safe_cast<std::uint64_t>(f.loadFactorServer),
2323 f.em->openLedgerFeeLevel,
2325 f.em->referenceFeeLevel)
2328 jvObj[jss::load_factor] =
trunc32(loadFactor);
2329 jvObj[jss::load_factor_fee_escalation] =
2330 f.em->openLedgerFeeLevel.jsonClipped();
2331 jvObj[jss::load_factor_fee_queue] =
2332 f.em->minProcessingFeeLevel.jsonClipped();
2333 jvObj[jss::load_factor_fee_reference] =
2334 f.em->referenceFeeLevel.jsonClipped();
2337 jvObj[jss::load_factor] = f.loadFactorServer;
2351 p->send(jvObj,
true);
2368 if (!streamMap.empty())
2371 jvObj[jss::type] =
"consensusPhase";
2372 jvObj[jss::consensus] =
to_string(phase);
2374 for (
auto i = streamMap.begin(); i != streamMap.end();)
2376 if (
auto p = i->second.lock())
2378 p->send(jvObj,
true);
2383 i = streamMap.erase(i);
2399 auto const signerPublic = val->getSignerPublic();
2401 jvObj[jss::type] =
"validationReceived";
2402 jvObj[jss::validation_public_key] =
2404 jvObj[jss::ledger_hash] =
to_string(val->getLedgerHash());
2405 jvObj[jss::signature] =
strHex(val->getSignature());
2406 jvObj[jss::full] = val->isFull();
2407 jvObj[jss::flags] = val->getFlags();
2408 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2409 jvObj[jss::data] =
strHex(val->getSerializer().slice());
2412 if (
auto version = (*val)[~sfServerVersion])
2415 if (
auto cookie = (*val)[~sfCookie])
2418 if (
auto hash = (*val)[~sfValidatedHash])
2419 jvObj[jss::validated_hash] =
strHex(*hash);
2421 auto const masterKey =
2424 if (masterKey != signerPublic)
2429 if (
auto const seq = (*val)[~sfLedgerSequence])
2430 jvObj[jss::ledger_index] = *seq;
2432 if (val->isFieldPresent(sfAmendments))
2435 for (
auto const& amendment : val->getFieldV256(sfAmendments))
2436 jvObj[jss::amendments].append(
to_string(amendment));
2439 if (
auto const closeTime = (*val)[~sfCloseTime])
2440 jvObj[jss::close_time] = *closeTime;
2442 if (
auto const loadFee = (*val)[~sfLoadFee])
2443 jvObj[jss::load_fee] = *loadFee;
2445 if (
auto const baseFee = val->at(~sfBaseFee))
2446 jvObj[jss::base_fee] =
static_cast<double>(*baseFee);
2448 if (
auto const reserveBase = val->at(~sfReserveBase))
2449 jvObj[jss::reserve_base] = *reserveBase;
2451 if (
auto const reserveInc = val->at(~sfReserveIncrement))
2452 jvObj[jss::reserve_inc] = *reserveInc;
2456 if (
auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2457 baseFeeXRP && baseFeeXRP->native())
2458 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2460 if (
auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2461 reserveBaseXRP && reserveBaseXRP->native())
2462 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2464 if (
auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2465 reserveIncXRP && reserveIncXRP->native())
2466 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2475 if (jvTx.
isMember(jss::ledger_index))
2477 jvTx[jss::ledger_index] =
2485 if (
auto p = i->second.lock())
2489 [&](
Json::Value const& jv) { p->send(jv,
true); });
2509 jvObj[jss::type] =
"peerStatusChange";
2518 p->send(jvObj,
true);
2532 using namespace std::chrono_literals;
2564 <<
"recvValidation " << val->getLedgerHash() <<
" from " << source;
2580 <<
"Exception thrown for handling new validation "
2581 << val->getLedgerHash() <<
": " << e.
what();
2586 <<
"Unknown exception thrown for handling new validation "
2587 << val->getLedgerHash();
2599 ss <<
"VALIDATION: " << val->render() <<
" master_key: ";
2636 "This server is amendment blocked, and must be updated to be "
2637 "able to stay in sync with the network.";
2644 "This server has an expired validator list. validators.txt "
2645 "may be incorrectly configured or some [validator_list_sites] "
2646 "may be unreachable.";
2653 "One or more unsupported amendments have reached majority. "
2654 "Upgrade to the latest version before they are activated "
2655 "to avoid being amendment blocked.";
2656 if (
auto const expected =
2660 d[jss::expected_date] = expected->time_since_epoch().count();
2661 d[jss::expected_date_UTC] =
to_string(*expected);
2665 if (warnings.size())
2666 info[jss::warnings] = std::move(warnings);
2681 info[jss::time] =
to_string(std::chrono::floor<std::chrono::microseconds>(
2685 info[jss::network_ledger] =
"waiting";
2687 info[jss::validation_quorum] =
2695 info[jss::node_size] =
"tiny";
2698 info[jss::node_size] =
"small";
2701 info[jss::node_size] =
"medium";
2704 info[jss::node_size] =
"large";
2707 info[jss::node_size] =
"huge";
2716 info[jss::validator_list_expires] =
2717 safe_cast<Json::UInt>(when->time_since_epoch().count());
2719 info[jss::validator_list_expires] = 0;
2729 if (*when == TimeKeeper::time_point::max())
2731 x[jss::expiration] =
"never";
2732 x[jss::status] =
"active";
2739 x[jss::status] =
"active";
2741 x[jss::status] =
"expired";
2746 x[jss::status] =
"unknown";
2747 x[jss::expiration] =
"unknown";
2751#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2754#ifdef GIT_COMMIT_HASH
2755 x[jss::hash] = GIT_COMMIT_HASH;
2758 x[jss::branch] = GIT_BRANCH;
2763 info[jss::io_latency_ms] =
2771 info[jss::pubkey_validator] =
2776 info[jss::pubkey_validator] =
"none";
2786 info[jss::counters][jss::nodestore] = nodestore;
2790 info[jss::pubkey_node] =
2796 info[jss::amendment_blocked] =
true;
2810 lastClose[jss::converge_time_s] =
2815 lastClose[jss::converge_time] =
2819 info[jss::last_close] = lastClose;
2827 info[jss::network_id] =
static_cast<Json::UInt>(*netid);
2829 auto const escalationMetrics =
2837 auto const loadFactorFeeEscalation =
2839 escalationMetrics.openLedgerFeeLevel,
2841 escalationMetrics.referenceFeeLevel)
2845 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2849 info[jss::load_base] = loadBaseServer;
2850 info[jss::load_factor] =
trunc32(loadFactor);
2851 info[jss::load_factor_server] = loadFactorServer;
2858 info[jss::load_factor_fee_escalation] =
2859 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2860 info[jss::load_factor_fee_queue] =
2861 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2862 info[jss::load_factor_fee_reference] =
2863 escalationMetrics.referenceFeeLevel.jsonClipped();
2867 info[jss::load_factor] =
2868 static_cast<double>(loadFactor) / loadBaseServer;
2870 if (loadFactorServer != loadFactor)
2871 info[jss::load_factor_server] =
2872 static_cast<double>(loadFactorServer) / loadBaseServer;
2877 if (fee != loadBaseServer)
2878 info[jss::load_factor_local] =
2879 static_cast<double>(fee) / loadBaseServer;
2881 if (fee != loadBaseServer)
2882 info[jss::load_factor_net] =
2883 static_cast<double>(fee) / loadBaseServer;
2885 if (fee != loadBaseServer)
2886 info[jss::load_factor_cluster] =
2887 static_cast<double>(fee) / loadBaseServer;
2889 if (escalationMetrics.openLedgerFeeLevel !=
2890 escalationMetrics.referenceFeeLevel &&
2891 (admin || loadFactorFeeEscalation != loadFactor))
2892 info[jss::load_factor_fee_escalation] =
2893 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2894 escalationMetrics.referenceFeeLevel);
2895 if (escalationMetrics.minProcessingFeeLevel !=
2896 escalationMetrics.referenceFeeLevel)
2897 info[jss::load_factor_fee_queue] =
2898 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2899 escalationMetrics.referenceFeeLevel);
2912 XRPAmount const baseFee = lpClosed->fees().base;
2914 l[jss::seq] =
Json::UInt(lpClosed->info().seq);
2915 l[jss::hash] =
to_string(lpClosed->info().hash);
2920 l[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped();
2921 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2923 lpClosed->info().closeTime.time_since_epoch().count());
2928 l[jss::reserve_base_xrp] = lpClosed->fees().reserve.decimalXRP();
2929 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2932 std::abs(closeOffset.count()) >= 60)
2933 l[jss::close_time_offset] =
2941 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2945 auto lCloseTime = lpClosed->info().closeTime;
2947 if (lCloseTime <= closeTime)
2949 using namespace std::chrono_literals;
2950 auto age = closeTime - lCloseTime;
2952 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2958 info[jss::validated_ledger] = l;
2960 info[jss::closed_ledger] = l;
2964 info[jss::published_ledger] =
"none";
2965 else if (lpPublished->info().seq != lpClosed->info().seq)
2966 info[jss::published_ledger] = lpPublished->info().seq;
2971 info[jss::jq_trans_overflow] =
2973 info[jss::peer_disconnects] =
2975 info[jss::peer_disconnects_resources] =
2980 "http",
"https",
"peer",
"ws",
"ws2",
"wss",
"wss2"};
2988 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2989 port.admin_user.empty() && port.admin_password.empty()))
3003 for (
auto const& p : proto)
3004 jv[jss::protocol].append(p);
3011 auto const optPort = grpcSection.
get(
"port");
3012 if (optPort && grpcSection.get(
"ip"))
3015 jv[jss::port] = *optPort;
3017 jv[jss::protocol].
append(
"grpc");
3020 info[jss::ports] = std::move(ports);
3046 ledger->rules().enabled(featureBatch))
3064 [&](
Json::Value const& jv) { p->send(jv, true); });
3089 lpAccepted->info().hash, alpAccepted);
3093 alpAccepted->getLedger().
get() == lpAccepted.
get(),
3094 "ripple::NetworkOPsImp::pubLedger : accepted input");
3098 <<
"Publishing ledger " << lpAccepted->info().seq <<
" "
3099 << lpAccepted->info().hash;
3107 jvObj[jss::type] =
"ledgerClosed";
3108 jvObj[jss::ledger_index] = lpAccepted->info().seq;
3109 jvObj[jss::ledger_hash] =
to_string(lpAccepted->info().hash);
3111 lpAccepted->info().closeTime.time_since_epoch().count());
3115 if (!lpAccepted->rules().enabled(featureXRPFees))
3117 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
3118 jvObj[jss::reserve_base] = lpAccepted->fees().reserve.jsonClipped();
3119 jvObj[jss::reserve_inc] =
3120 lpAccepted->fees().increment.jsonClipped();
3122 jvObj[jss::txn_count] =
Json::UInt(alpAccepted->size());
3126 jvObj[jss::validated_ledgers] =
3136 p->send(jvObj,
true);
3154 p->send(jvObj,
true);
3163 static bool firstTime =
true;
3170 for (
auto& inner : outer.second)
3172 auto& subInfo = inner.second;
3173 if (subInfo.index_->separationLedgerSeq_ == 0)
3176 alpAccepted->getLedger(), subInfo);
3185 for (
auto const& accTx : *alpAccepted)
3189 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3216 "reportConsensusStateChange->pubConsensus",
3247 jvObj[jss::type] =
"transaction";
3251 jvObj[jss::transaction] =
3258 jvObj[jss::meta], *ledger, transaction, meta->
get());
3261 jvObj[jss::meta], transaction, meta->
get());
3265 if (
auto const& lookup = ledger->txRead(transaction->getTransactionID());
3266 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3268 uint32_t
const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3270 if (transaction->isFieldPresent(sfNetworkID))
3271 netID = transaction->getFieldU32(sfNetworkID);
3276 jvObj[jss::ctid] = *ctid;
3278 if (!ledger->open())
3279 jvObj[jss::ledger_hash] =
to_string(ledger->info().hash);
3283 jvObj[jss::ledger_index] = ledger->info().seq;
3284 jvObj[jss::transaction][jss::date] =
3285 ledger->info().closeTime.time_since_epoch().count();
3286 jvObj[jss::validated] =
true;
3287 jvObj[jss::close_time_iso] =
to_string_iso(ledger->info().closeTime);
3293 jvObj[jss::validated] =
false;
3294 jvObj[jss::ledger_current_index] = ledger->info().seq;
3297 jvObj[jss::status] = validated ?
"closed" :
"proposed";
3298 jvObj[jss::engine_result] = sToken;
3299 jvObj[jss::engine_result_code] = result;
3300 jvObj[jss::engine_result_message] = sHuman;
3302 if (transaction->getTxnType() == ttOFFER_CREATE)
3304 auto const account = transaction->getAccountID(sfAccount);
3305 auto const amount = transaction->getFieldAmount(sfTakerGets);
3308 if (account != amount.issue().account)
3316 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3324 [&]<
unsigned Version>(
3326 RPC::insertDeliverMax(
3327 jvTx[jss::transaction], transaction->getTxnType(), Version);
3329 if constexpr (Version > 1)
3331 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3332 jvTx[jss::hash] = hash;
3336 jvTx[jss::transaction][jss::hash] = hash;
3349 auto const& stTxn = transaction.
getTxn();
3353 auto const trResult = transaction.
getResult();
3368 [&](
Json::Value const& jv) { p->send(jv, true); });
3385 [&](
Json::Value const& jv) { p->send(jv, true); });
3410 auto const currLedgerSeq = ledger->seq();
3417 for (
auto const& affectedAccount : transaction.
getAffected())
3422 auto it = simiIt->second.begin();
3424 while (it != simiIt->second.end())
3435 it = simiIt->second.erase(it);
3442 auto it = simiIt->second.begin();
3443 while (it != simiIt->second.end())
3454 it = simiIt->second.erase(it);
3461 auto& subs = histoIt->second;
3462 auto it = subs.begin();
3463 while (it != subs.end())
3466 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3480 it = subs.erase(it);
3491 <<
"pubAccountTransaction: "
3492 <<
"proposed=" << iProposed <<
", accepted=" << iAccepted;
3494 if (!notify.
empty() || !accountHistoryNotify.
empty())
3496 auto const& stTxn = transaction.
getTxn();
3500 auto const trResult = transaction.
getResult();
3506 isrListener->getApiVersion(),
3507 [&](
Json::Value const& jv) { isrListener->send(jv,
true); });
3511 jvObj.
set(jss::account_history_boundary,
true);
3514 jvObj.
isMember(jss::account_history_tx_stream) ==
3516 "ripple::NetworkOPsImp::pubAccountTransaction : "
3517 "account_history_tx_stream not set");
3518 for (
auto& info : accountHistoryNotify)
3520 auto& index = info.index_;
3521 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3522 jvObj.
set(jss::account_history_tx_first,
true);
3524 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3527 info.sink_->getApiVersion(),
3528 [&](
Json::Value const& jv) { info.sink_->send(jv,
true); });
3553 for (
auto const& affectedAccount : tx->getMentionedAccounts())
3558 auto it = simiIt->second.begin();
3560 while (it != simiIt->second.end())
3571 it = simiIt->second.erase(it);
3578 JLOG(
m_journal.
trace()) <<
"pubProposedAccountTransaction: " << iProposed;
3580 if (!notify.
empty() || !accountHistoryNotify.
empty())
3587 isrListener->getApiVersion(),
3588 [&](
Json::Value const& jv) { isrListener->send(jv,
true); });
3591 jvObj.
isMember(jss::account_history_tx_stream) ==
3593 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3594 "account_history_tx_stream not set");
3595 for (
auto& info : accountHistoryNotify)
3597 auto& index = info.index_;
3598 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3599 jvObj.
set(jss::account_history_tx_first,
true);
3600 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3602 info.sink_->getApiVersion(),
3603 [&](
Json::Value const& jv) { info.sink_->send(jv,
true); });
3620 for (
auto const& naAccountID : vnaAccountIDs)
3623 <<
"subAccount: account: " <<
toBase58(naAccountID);
3625 isrListener->insertSubAccountInfo(naAccountID, rt);
3630 for (
auto const& naAccountID : vnaAccountIDs)
3632 auto simIterator = subMap.
find(naAccountID);
3633 if (simIterator == subMap.
end())
3637 usisElement[isrListener->getSeq()] = isrListener;
3639 subMap.
insert(simIterator, make_pair(naAccountID, usisElement));
3644 simIterator->second[isrListener->getSeq()] = isrListener;
3655 for (
auto const& naAccountID : vnaAccountIDs)
3658 isrListener->deleteSubAccountInfo(naAccountID, rt);
3675 for (
auto const& naAccountID : vnaAccountIDs)
3677 auto simIterator = subMap.
find(naAccountID);
3679 if (simIterator != subMap.
end())
3682 simIterator->second.erase(uSeq);
3684 if (simIterator->second.empty())
3687 subMap.
erase(simIterator);
3696 enum DatabaseType { Sqlite,
None };
3697 static auto const databaseType = [&]() -> DatabaseType {
3702 return DatabaseType::Sqlite;
3704 return DatabaseType::None;
3707 if (databaseType == DatabaseType::None)
3711 "ripple::NetworkOPsImp::addAccountHistoryJob : no database");
3713 <<
"AccountHistory job for account "
3726 "AccountHistoryTxStream",
3727 [
this, dbType = databaseType, subInfo]() {
3728 auto const& accountId = subInfo.
index_->accountId_;
3729 auto& lastLedgerSeq = subInfo.
index_->historyLastLedgerSeq_;
3730 auto& txHistoryIndex = subInfo.
index_->historyTxIndex_;
3733 <<
"AccountHistory job for account " <<
toBase58(accountId)
3734 <<
" started. lastLedgerSeq=" << lastLedgerSeq;
3744 auto stx = tx->getSTransaction();
3745 if (stx->getAccountID(sfAccount) == accountId &&
3746 stx->getSeqValue() == 1)
3750 for (
auto& node : meta->getNodes())
3752 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3755 if (node.isFieldPresent(sfNewFields))
3757 if (
auto inner =
dynamic_cast<STObject const*
>(
3758 node.peekAtPField(sfNewFields));
3761 if (inner->isFieldPresent(sfAccount) &&
3762 inner->getAccountID(sfAccount) == accountId)
3774 bool unsubscribe) ->
bool {
3777 sptr->send(jvObj,
true);
3787 bool unsubscribe) ->
bool {
3791 sptr->getApiVersion(),
3792 [&](
Json::Value const& jv) { sptr->send(jv,
true); });
3815 accountId, minLedger, maxLedger, marker, 0,
true};
3816 return db->newestAccountTxPage(options);
3821 "ripple::NetworkOPsImp::addAccountHistoryJob : "
3822 "getMoreTxns : invalid database type");
3832 while (lastLedgerSeq >= 2 && !subInfo.
index_->stopHistorical_)
3834 int feeChargeCount = 0;
3843 <<
"AccountHistory job for account "
3844 <<
toBase58(accountId) <<
" no InfoSub. Fee charged "
3845 << feeChargeCount <<
" times.";
3850 auto startLedgerSeq =
3851 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3853 <<
"AccountHistory job for account " <<
toBase58(accountId)
3854 <<
", working on ledger range [" << startLedgerSeq <<
","
3855 << lastLedgerSeq <<
"]";
3857 auto haveRange = [&]() ->
bool {
3860 auto haveSomeValidatedLedgers =
3862 validatedMin, validatedMax);
3864 return haveSomeValidatedLedgers &&
3865 validatedMin <= startLedgerSeq &&
3866 lastLedgerSeq <= validatedMax;
3872 <<
"AccountHistory reschedule job for account "
3873 <<
toBase58(accountId) <<
", incomplete ledger range ["
3874 << startLedgerSeq <<
"," << lastLedgerSeq <<
"]";
3880 while (!subInfo.
index_->stopHistorical_)
3883 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3888 "ripple::NetworkOPsImp::addAccountHistoryJob : "
3889 "getMoreTxns failed");
3891 <<
"AccountHistory job for account "
3892 <<
toBase58(accountId) <<
" getMoreTxns failed.";
3898 auto const& txns = dbResult->first;
3899 marker = dbResult->second;
3900 size_t num_txns = txns.size();
3901 for (
size_t i = 0; i < num_txns; ++i)
3903 auto const& [tx, meta] = txns[i];
3908 <<
"AccountHistory job for account "
3909 <<
toBase58(accountId) <<
" empty tx or meta.";
3920 "ripple::NetworkOPsImp::addAccountHistoryJob : "
3921 "getLedgerBySeq failed");
3923 <<
"AccountHistory job for account "
3924 <<
toBase58(accountId) <<
" no ledger.";
3930 tx->getSTransaction();
3935 "NetworkOPsImp::addAccountHistoryJob : "
3936 "getSTransaction failed");
3938 <<
"AccountHistory job for account "
3940 <<
" getSTransaction failed.";
3947 auto const trR = meta->getResultTER();
3949 transJson(stTxn, trR,
true, curTxLedger, mRef);
3952 jss::account_history_tx_index, txHistoryIndex--);
3953 if (i + 1 == num_txns ||
3954 txns[i + 1].first->getLedger() != tx->getLedger())
3955 jvTx.
set(jss::account_history_boundary,
true);
3957 if (isFirstTx(tx, meta))
3959 jvTx.
set(jss::account_history_tx_first,
true);
3960 sendMultiApiJson(jvTx,
false);
3963 <<
"AccountHistory job for account "
3965 <<
" done, found last tx.";
3970 sendMultiApiJson(jvTx,
false);
3977 <<
"AccountHistory job for account "
3979 <<
" paging, marker=" << marker->ledgerSeq <<
":"
3988 if (!subInfo.
index_->stopHistorical_)
3990 lastLedgerSeq = startLedgerSeq - 1;
3991 if (lastLedgerSeq <= 1)
3994 <<
"AccountHistory job for account "
3996 <<
" done, reached genesis ledger.";
4009 subInfo.
index_->separationLedgerSeq_ = ledger->seq();
4010 auto const& accountId = subInfo.
index_->accountId_;
4012 if (!ledger->exists(accountKeylet))
4015 <<
"subAccountHistoryStart, no account " <<
toBase58(accountId)
4016 <<
", no need to add AccountHistory job.";
4021 if (
auto const sleAcct = ledger->read(accountKeylet); sleAcct)
4023 if (sleAcct->getFieldU32(sfSequence) == 1)
4026 <<
"subAccountHistoryStart, genesis account "
4028 <<
" does not have tx, no need to add AccountHistory job.";
4036 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
4037 "access genesis account");
4042 subInfo.
index_->historyLastLedgerSeq_ = ledger->seq();
4043 subInfo.
index_->haveHistorical_ =
true;
4046 <<
"subAccountHistoryStart, add AccountHistory job: accountId="
4047 <<
toBase58(accountId) <<
", currentLedgerSeq=" << ledger->seq();
4057 if (!isrListener->insertSubAccountHistory(accountId))
4060 <<
"subAccountHistory, already subscribed to account "
4072 inner.
emplace(isrListener->getSeq(), ahi);
4078 simIterator->second.emplace(isrListener->getSeq(), ahi);
4092 <<
"subAccountHistory, no validated ledger yet, delay start";
4105 isrListener->deleteSubAccountHistory(account);
4119 auto& subInfoMap = simIterator->second;
4120 auto subInfoIter = subInfoMap.find(seq);
4121 if (subInfoIter != subInfoMap.end())
4123 subInfoIter->second.index_->stopHistorical_ =
true;
4128 simIterator->second.erase(seq);
4129 if (simIterator->second.empty())
4135 <<
"unsubAccountHistory, account " <<
toBase58(account)
4136 <<
", historyOnly = " << (historyOnly ?
"true" :
"false");
4144 listeners->addSubscriber(isrListener);
4148 UNREACHABLE(
"ripple::NetworkOPsImp::subBook : null book listeners");
4158 listeners->removeSubscriber(uSeq);
4170 m_standalone,
"ripple::NetworkOPsImp::acceptLedger : is standalone");
4173 Throw<std::runtime_error>(
4174 "Operation only possible in STANDALONE mode.");
4189 jvResult[jss::ledger_index] = lpClosed->info().seq;
4190 jvResult[jss::ledger_hash] =
to_string(lpClosed->info().hash);
4192 lpClosed->info().closeTime.time_since_epoch().count());
4193 if (!lpClosed->rules().enabled(featureXRPFees))
4195 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
4196 jvResult[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped();
4197 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4203 jvResult[jss::validated_ledgers] =
4209 .emplace(isrListener->getSeq(), isrListener)
4219 .emplace(isrListener->getSeq(), isrListener)
4245 .emplace(isrListener->getSeq(), isrListener)
4273 jvResult[jss::random] =
to_string(uRandom);
4275 jvResult[jss::load_base] = feeTrack.getLoadBase();
4276 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4277 jvResult[jss::hostid] =
getHostId(admin);
4278 jvResult[jss::pubkey_node] =
4283 .emplace(isrListener->getSeq(), isrListener)
4301 .emplace(isrListener->getSeq(), isrListener)
4319 .emplace(isrListener->getSeq(), isrListener)
4337 .emplace(isrListener->getSeq(), isrListener)
4361 .emplace(isrListener->getSeq(), isrListener)
4379 .emplace(isrListener->getSeq(), isrListener)
4427 if (map.find(pInfo->getSeq()) != map.end())
4434#ifndef USE_NEW_BOOK_PAGE
4445 unsigned int iLimit,
4455 uint256 uTipIndex = uBookBase;
4459 stream <<
"getBookPage:" << book;
4460 stream <<
"getBookPage: uBookBase=" << uBookBase;
4461 stream <<
"getBookPage: uBookEnd=" << uBookEnd;
4462 stream <<
"getBookPage: uTipIndex=" << uTipIndex;
4471 bool bDirectAdvance =
true;
4475 unsigned int uBookEntry;
4481 while (!bDone && iLimit-- > 0)
4485 bDirectAdvance =
false;
4489 auto const ledgerIndex = view.
succ(uTipIndex, uBookEnd);
4493 sleOfferDir.
reset();
4502 uTipIndex = sleOfferDir->key();
4505 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4508 <<
"getBookPage: uTipIndex=" << uTipIndex;
4510 <<
"getBookPage: offerIndex=" << offerIndex;
4520 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4521 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4522 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4524 bool firstOwnerOffer(
true);
4530 saOwnerFunds = saTakerGets;
4532 else if (bGlobalFreeze)
4540 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4541 if (umBalanceEntry != umBalance.
end())
4545 saOwnerFunds = umBalanceEntry->second;
4546 firstOwnerOffer =
false;
4560 if (saOwnerFunds < beast::zero)
4564 saOwnerFunds.
clear();
4572 STAmount saOwnerFundsLimit = saOwnerFunds;
4584 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4587 if (saOwnerFundsLimit >= saTakerGets)
4590 saTakerGetsFunded = saTakerGets;
4596 saTakerGetsFunded = saOwnerFundsLimit;
4598 saTakerGetsFunded.
setJson(jvOffer[jss::taker_gets_funded]);
4602 saTakerGetsFunded, saDirRate, saTakerPays.
issue()))
4603 .setJson(jvOffer[jss::taker_pays_funded]);
4609 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4611 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4615 jvOf[jss::quality] = saDirRate.
getText();
4617 if (firstOwnerOffer)
4618 jvOf[jss::owner_funds] = saOwnerFunds.
getText();
4625 if (!
cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4627 bDirectAdvance =
true;
4632 <<
"getBookPage: offerIndex=" << offerIndex;
4652 unsigned int iLimit,
4660 MetaView lesActive(lpLedger,
tapNONE,
true);
4661 OrderBookIterator obIterator(lesActive, book);
4665 bool const bGlobalFreeze = lesActive.isGlobalFrozen(book.
out.
account) ||
4666 lesActive.isGlobalFrozen(book.
in.
account);
4668 while (iLimit-- > 0 && obIterator.nextOffer())
4673 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4674 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4675 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4676 STAmount saDirRate = obIterator.getCurrentRate();
4682 saOwnerFunds = saTakerGets;
4684 else if (bGlobalFreeze)
4692 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4694 if (umBalanceEntry != umBalance.
end())
4698 saOwnerFunds = umBalanceEntry->second;
4704 saOwnerFunds = lesActive.accountHolds(
4710 if (saOwnerFunds.isNegative())
4714 saOwnerFunds.zero();
4721 STAmount saTakerGetsFunded;
4722 STAmount saOwnerFundsLimit = saOwnerFunds;
4734 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4737 if (saOwnerFundsLimit >= saTakerGets)
4740 saTakerGetsFunded = saTakerGets;
4745 saTakerGetsFunded = saOwnerFundsLimit;
4747 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4753 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4754 .setJson(jvOffer[jss::taker_pays_funded]);
4757 STAmount saOwnerPays = (
parityRate == offerRate)
4760 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4762 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4764 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4768 jvOf[jss::quality] = saDirRate.
getText();
4783 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4823 ++counters_[
static_cast<std::size_t>(om)].transitions;
4825 counters_[
static_cast<std::size_t>(om)].transitions == 1)
4827 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4828 now - processStart_)
4832 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4841 auto [counters, mode, start, initialSync] = getCounterData();
4842 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4852 auto& state = obj[jss::state_accounting][
states_[i]];
4853 state[jss::transitions] =
std::to_string(counters[i].transitions);
4854 state[jss::duration_us] =
std::to_string(counters[i].dur.count());
4858 obj[jss::initial_sync_duration_us] =
std::to_string(initialSync);
4873 boost::asio::io_context& io_svc,
T back_inserter(T... args)
Decorator for streaming out compact json.
Lightweight wrapper to tag static string.
Value & append(Value const &value)
Append value to array at the end.
bool isMember(char const *key) const
Return true if the object has a member named key.
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
A generic endpoint for log messages.
Stream trace() const
Severity stream access functions.
A metric for measuring an integral value.
void set(value_type value) const
Set the value on the gauge.
A reference to a handler for performing polled collection.
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
std::uint32_t getLoadFee() const
NetClock::time_point getReportTime() const
PublicKey const & identity() const
std::size_t size() const
The number of nodes in the cluster list.
std::string SERVER_DOMAIN
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
int RELAY_UNTRUSTED_VALIDATIONS
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
A pool of threads to perform work.
Json::Value getJson(int c=0)
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
std::uint32_t getClusterFee() const
std::uint32_t getLocalFee() const
std::uint32_t getLoadBase() const
std::uint32_t getRemoteFee() const
std::uint32_t getLoadFactor() const
void heartbeat()
Reset the stall detection timer.
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
std::uint64_t initialSyncUs_
CounterData getCounterData() const
std::chrono::steady_clock::time_point start_
static std::array< Json::StaticString const, 5 > const states_
std::chrono::steady_clock::time_point const processStart_
Transaction with input flags and results to be applied in batches.
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
std::shared_ptr< Transaction > const transaction
void processClusterTimer()
boost::asio::steady_timer accountHistoryTxTimer_
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
std::vector< TransactionStatus > mTransactions
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
std::optional< PublicKey > const validatorPK_
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
SubInfoMapType mSubAccount
std::optional< PublicKey > const validatorMasterPK_
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
std::atomic< bool > unlBlocked_
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
std::atomic< bool > needNetworkLedger_
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
void setMode(OperatingMode om) override
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
void clearNeedNetworkLedger() override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
DispatchState mDispatchState
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
void reportFeeChange() override
void processHeartbeatTimer()
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
bool subTransactions(InfoSub::ref ispListener) override
std::atomic< bool > amendmentWarned_
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
void getCountsJson(Json::Value &obj)
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Writable ledger view that accumulates state and tx changes.
BookListeners::pointer getBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)
BookListeners::pointer makeBookListeners(Book const &)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Represents a set of transactions in RCLConsensus.
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Collects logging information.
std::unique_ptr< std::stringstream > const & ss()
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
std::string getText() const override
Issue const & issue() const
std::optional< T > get(std::string const &name) const
std::size_t size() const noexcept
void const * data() const noexcept
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
std::chrono::seconds closeOffset() const
time_point closeTime() const
Returns the predicted close time, in network time.
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Validator keys and manifest as set in configuration file.
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Json::Value jsonClipped() const
static constexpr std::size_t size()
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
T emplace_back(T... args)
@ arrayValue
array value (ordered list)
@ objectValue
object value (collection of name/value pairs).
void rngfill(void *const buffer, std::size_t const bytes, Generator &g)
std::string const & getVersionString()
Server version.
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Encodes ledger sequence, transaction index, and network ID into a CTID string.
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
void insertNFTSyntheticInJson(Json::Value &, std::shared_ptr< STTx const > const &, TxMeta const &)
Adds common synthetic fields to transaction-related JSON responses.
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Keylet account(AccountID const &id) noexcept
AccountID root.
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
STAmount divide(STAmount const &amount, Rate const &rate)
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
std::uint64_t getQuality(uint256 const &uBase)
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
auto constexpr muldiv_max
std::unique_ptr< LocalTxs > make_LocalTxs()
STAmount amountFromQuality(std::uint64_t rate)
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
@ warnRPC_EXPIRED_VALIDATOR_LIST
@ warnRPC_UNSUPPORTED_MAJORITY
@ warnRPC_AMENDMENT_BLOCKED
bool set(T &target, std::string const &name, Section const §ion)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
AccountID calcAccountID(PublicKey const &pk)
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Json::Value rpcError(int iError)
bool isTefFailure(TER x) noexcept
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
std::string strHex(FwdIt begin, FwdIt end)
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
void forAllApiVersions(Fn const &fn, Args &&... args)
bool isTerRetry(TER x) noexcept
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
uint256 getQualityNext(uint256 const &uBase)
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const ¤cy, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
bool isTesSuccess(TER x) noexcept
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const ¤t)
std::string to_string_iso(date::sys_time< Duration > tp)
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
std::string to_string(base_uint< Bits, Tag > const &a)
FeeSetup setup_FeeVote(Section const §ion)
bool isTemMalformed(TER x) noexcept
Number root(Number f, unsigned d)
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
constexpr std::size_t maxPoppedTransactions
bool transResultInfo(TER code, std::string &token, std::string &text)
bool isTelLocal(TER x) noexcept
uint256 getBookBase(Book const &book)
constexpr std::uint32_t tfInnerBatchTxn
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
static std::uint32_t trunc32(std::uint64_t v)
static auto const genesisAccountId
T set_intersection(T... args)
std::string serialized
The manifest in serialized form.
std::uint32_t sequence
The sequence number of this manifest.
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Server fees published on server subscription.
bool operator!=(ServerFeeSummary const &b) const
ServerFeeSummary()=default
std::optional< TxQ::Metrics > em
std::uint32_t loadFactorServer
bool operator==(ServerFeeSummary const &b) const
std::uint32_t loadBaseServer
decltype(initialSyncUs_) initialSyncUs
decltype(counters_) counters
std::uint64_t transitions
std::chrono::microseconds dur
beast::insight::Gauge full_transitions
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
beast::insight::Hook hook
beast::insight::Gauge connected_duration
beast::insight::Gauge tracking_duration
beast::insight::Gauge connected_transitions
beast::insight::Gauge disconnected_transitions
beast::insight::Gauge syncing_duration
beast::insight::Gauge tracking_transitions
beast::insight::Gauge full_duration
beast::insight::Gauge disconnected_duration
beast::insight::Gauge syncing_transitions
std::uint32_t historyLastLedgerSeq_
std::uint32_t separationLedgerSeq_
AccountID const accountId_
std::uint32_t forwardTxIndex_
std::atomic< bool > stopHistorical_
std::int32_t historyTxIndex_
SubAccountHistoryIndex(AccountID const &accountId)
std::shared_ptr< SubAccountHistoryIndex > index_
std::shared_ptr< SubAccountHistoryIndex > index_
Represents a transfer rate.
Data format for exchanging consumption information across peers.
std::vector< Item > items
Changes in trusted nodes after updating validator list.
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
IsMemberResult isMember(char const *key) const
void set(char const *key, auto const &v)
Select all peers (except optional excluded) that are in our cluster.
Sends a message to all peers.
T time_since_epoch(T... args)