1#include <xrpld/app/consensus/RCLConsensus.h>
2#include <xrpld/app/consensus/RCLValidations.h>
3#include <xrpld/app/ledger/AcceptedLedger.h>
4#include <xrpld/app/ledger/InboundLedgers.h>
5#include <xrpld/app/ledger/LedgerMaster.h>
6#include <xrpld/app/ledger/LedgerToJson.h>
7#include <xrpld/app/ledger/LocalTxs.h>
8#include <xrpld/app/ledger/OpenLedger.h>
9#include <xrpld/app/ledger/OrderBookDB.h>
10#include <xrpld/app/ledger/TransactionMaster.h>
11#include <xrpld/app/main/LoadManager.h>
12#include <xrpld/app/main/Tuning.h>
13#include <xrpld/app/misc/AmendmentTable.h>
14#include <xrpld/app/misc/DeliverMax.h>
15#include <xrpld/app/misc/HashRouter.h>
16#include <xrpld/app/misc/LoadFeeTrack.h>
17#include <xrpld/app/misc/NetworkOPs.h>
18#include <xrpld/app/misc/Transaction.h>
19#include <xrpld/app/misc/TxQ.h>
20#include <xrpld/app/misc/ValidatorKeys.h>
21#include <xrpld/app/misc/ValidatorList.h>
22#include <xrpld/app/misc/detail/AccountTxPaging.h>
23#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
24#include <xrpld/app/tx/apply.h>
25#include <xrpld/consensus/Consensus.h>
26#include <xrpld/consensus/ConsensusParms.h>
27#include <xrpld/overlay/Cluster.h>
28#include <xrpld/overlay/Overlay.h>
29#include <xrpld/overlay/predicates.h>
30#include <xrpld/perflog/PerfLog.h>
31#include <xrpld/rpc/BookChanges.h>
32#include <xrpld/rpc/CTID.h>
33#include <xrpld/rpc/DeliveredAmount.h>
34#include <xrpld/rpc/MPTokenIssuanceID.h>
35#include <xrpld/rpc/ServerHandler.h>
37#include <xrpl/basics/UptimeClock.h>
38#include <xrpl/basics/mulDiv.h>
39#include <xrpl/basics/safe_cast.h>
40#include <xrpl/basics/scope.h>
41#include <xrpl/beast/utility/rngfill.h>
42#include <xrpl/crypto/RFC1751.h>
43#include <xrpl/crypto/csprng.h>
44#include <xrpl/protocol/BuildInfo.h>
45#include <xrpl/protocol/Feature.h>
46#include <xrpl/protocol/MultiApiJson.h>
47#include <xrpl/protocol/NFTSyntheticSerializer.h>
48#include <xrpl/protocol/RPCErr.h>
49#include <xrpl/protocol/TxFlags.h>
50#include <xrpl/protocol/jss.h>
51#include <xrpl/resource/Fees.h>
52#include <xrpl/resource/ResourceManager.h>
54#include <boost/asio/ip/host_name.hpp>
55#include <boost/asio/steady_timer.hpp>
94 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
137 std::chrono::steady_clock::time_point
start_ =
198 return !(*
this != b);
217 boost::asio::io_context& io_svc,
231 app_.logs().journal(
"FeeVote")),
234 app.getInboundTransactions(),
235 beast::get_abstract_clock<
std::chrono::steady_clock>(),
237 app_.logs().journal(
"LedgerConsensus"))
239 validatorKeys.keys ? validatorKeys.keys->publicKey
242 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
437 getServerInfo(
bool human,
bool admin,
bool counters)
override;
464 TER result)
override;
498 bool historyOnly)
override;
504 bool historyOnly)
override;
576 catch (boost::system::system_error
const& e)
579 <<
"NetworkOPs: heartbeatTimer cancel error: " << e.what();
586 catch (boost::system::system_error
const& e)
589 <<
"NetworkOPs: clusterTimer cancel error: " << e.what();
596 catch (boost::system::system_error
const& e)
599 <<
"NetworkOPs: accountHistoryTxTimer cancel error: "
604 using namespace std::chrono_literals;
614 boost::asio::steady_timer& timer,
797 template <
class Handler>
799 Handler
const& handler,
801 :
hook(collector->make_hook(handler))
804 "Disconnected_duration"))
807 "Connected_duration"))
809 collector->make_gauge(
"State_Accounting",
"Syncing_duration"))
812 "Tracking_duration"))
814 collector->make_gauge(
"State_Accounting",
"Full_duration"))
817 "Disconnected_transitions"))
820 "Connected_transitions"))
823 "Syncing_transitions"))
826 "Tracking_transitions"))
828 collector->make_gauge(
"State_Accounting",
"Full_transitions"))
857 {
"disconnected",
"connected",
"syncing",
"tracking",
"full"}};
919 static std::string const hostname = boost::asio::ip::host_name();
926 static std::string const shroudedHostId = [
this]() {
932 return shroudedHostId;
947 boost::asio::steady_timer& timer,
954 [
this, onExpire, onError](boost::system::error_code
const& e) {
955 if ((e.value() == boost::system::errc::success) &&
956 (!m_job_queue.isStopped()))
961 if (e.value() != boost::system::errc::success &&
962 e.value() != boost::asio::error::operation_aborted)
965 JLOG(m_journal.error())
966 <<
"Timer got error '" << e.message()
967 <<
"'. Restarting timer.";
972 timer.expires_after(expiry_time);
973 timer.async_wait(std::move(*optionalCountedHandler));
978NetworkOPsImp::setHeartbeatTimer()
982 mConsensus.parms().ledgerGRANULARITY,
984 m_job_queue.addJob(jtNETOP_TIMER,
"NetOPs.heartbeat", [this]() {
985 processHeartbeatTimer();
988 [
this]() { setHeartbeatTimer(); });
992NetworkOPsImp::setClusterTimer()
994 using namespace std::chrono_literals;
1001 processClusterTimer();
1004 [
this]() { setClusterTimer(); });
1010 JLOG(m_journal.debug()) <<
"Scheduling AccountHistory job for account "
1012 using namespace std::chrono_literals;
1014 accountHistoryTxTimer_,
1016 [
this, subInfo]() { addAccountHistoryJob(subInfo); },
1017 [
this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1021NetworkOPsImp::processHeartbeatTimer()
1024 "Heartbeat Timer", mConsensus.validating(), m_journal);
1032 std::size_t const numPeers = app_.overlay().size();
1035 if (numPeers < minPeerCount_)
1037 if (mMode != OperatingMode::DISCONNECTED)
1039 setMode(OperatingMode::DISCONNECTED);
1041 ss <<
"Node count (" << numPeers <<
") has fallen "
1042 <<
"below required minimum (" << minPeerCount_ <<
").";
1043 JLOG(m_journal.warn()) << ss.
str();
1044 CLOG(clog.
ss()) <<
"set mode to DISCONNECTED: " << ss.
str();
1049 <<
"already DISCONNECTED. too few peers (" << numPeers
1050 <<
"), need at least " << minPeerCount_;
1057 setHeartbeatTimer();
1062 if (mMode == OperatingMode::DISCONNECTED)
1064 setMode(OperatingMode::CONNECTED);
1065 JLOG(m_journal.info())
1066 <<
"Node count (" << numPeers <<
") is sufficient.";
1067 CLOG(clog.
ss()) <<
"setting mode to CONNECTED based on " << numPeers
1073 auto origMode = mMode.load();
1074 CLOG(clog.
ss()) <<
"mode: " << strOperatingMode(origMode,
true);
1075 if (mMode == OperatingMode::SYNCING)
1076 setMode(OperatingMode::SYNCING);
1077 else if (mMode == OperatingMode::CONNECTED)
1078 setMode(OperatingMode::CONNECTED);
1079 auto newMode = mMode.load();
1080 if (origMode != newMode)
1083 <<
", changing to " << strOperatingMode(newMode,
true);
1085 CLOG(clog.
ss()) <<
". ";
1088 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.
ss());
1090 CLOG(clog.
ss()) <<
"consensus phase " << to_string(mLastConsensusPhase);
1092 if (mLastConsensusPhase != currPhase)
1094 reportConsensusStateChange(currPhase);
1095 mLastConsensusPhase = currPhase;
1096 CLOG(clog.
ss()) <<
" changed to " << to_string(mLastConsensusPhase);
1098 CLOG(clog.
ss()) <<
". ";
1100 setHeartbeatTimer();
1104NetworkOPsImp::processClusterTimer()
1106 if (app_.cluster().size() == 0)
1109 using namespace std::chrono_literals;
1111 bool const update = app_.cluster().update(
1112 app_.nodeIdentity().first,
1114 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1115 ? app_.getFeeTrack().getLocalFee()
1117 app_.timeKeeper().now());
1121 JLOG(m_journal.debug()) <<
"Too soon to send cluster update";
1126 protocol::TMCluster cluster;
1127 app_.cluster().for_each([&cluster](
ClusterNode const& node) {
1128 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1133 n.set_nodename(node.
name());
1137 for (
auto& item : gossip.
items)
1139 protocol::TMLoadSource& node = *cluster.add_loadsources();
1140 node.set_name(to_string(item.address));
1141 node.set_cost(item.balance);
1143 app_.overlay().foreach(
send_if(
1155 if (mode == OperatingMode::FULL && admin)
1157 auto const consensusMode = mConsensus.mode();
1158 if (consensusMode != ConsensusMode::wrongLedger)
1160 if (consensusMode == ConsensusMode::proposing)
1163 if (mConsensus.validating())
1164 return "validating";
1174 if (isNeedNetworkLedger())
1182 m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1184 JLOG(m_journal.error())
1185 <<
"Submitted transaction invalid: tfInnerBatchTxn flag present.";
1192 auto const txid = trans->getTransactionID();
1193 auto const flags = app_.getHashRouter().getFlags(txid);
1195 if ((flags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1197 JLOG(m_journal.warn()) <<
"Submitted transaction cached bad";
1204 app_.getHashRouter(),
1206 m_ledgerMaster.getValidatedRules(),
1209 if (validity != Validity::Valid)
1211 JLOG(m_journal.warn())
1212 <<
"Submitted transaction invalid: " << reason;
1218 JLOG(m_journal.warn())
1219 <<
"Exception checking transaction " << txid <<
": " << ex.
what();
1228 m_job_queue.addJob(
jtTRANSACTION,
"submitTxn", [
this, tx]() {
1230 processTransaction(t,
false,
false, FailHard::no);
1237 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1239 if ((newFlags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1242 JLOG(m_journal.warn()) << transaction->getID() <<
": cached bad!\n";
1243 transaction->setStatus(
INVALID);
1248 auto const view = m_ledgerMaster.getCurrentLedger();
1253 auto const sttx = *transaction->getSTransaction();
1254 if (sttx.isFlag(
tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1256 transaction->setStatus(
INVALID);
1258 app_.getHashRouter().setFlags(
1259 transaction->getID(), HashRouterFlags::BAD);
1266 auto const [validity, reason] =
1267 checkValidity(app_.getHashRouter(), sttx, view->rules(), app_.config());
1269 validity == Validity::Valid,
1270 "ripple::NetworkOPsImp::processTransaction : valid validity");
1273 if (validity == Validity::SigBad)
1275 JLOG(m_journal.info()) <<
"Transaction has bad signature: " << reason;
1276 transaction->setStatus(
INVALID);
1278 app_.getHashRouter().setFlags(
1279 transaction->getID(), HashRouterFlags::BAD);
1284 app_.getMasterTransaction().canonicalize(&transaction);
1290NetworkOPsImp::processTransaction(
1296 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXN");
1299 if (!preProcessTransaction(transaction))
1303 doTransactionSync(transaction, bUnlimited, failType);
1305 doTransactionAsync(transaction, bUnlimited, failType);
1309NetworkOPsImp::doTransactionAsync(
1316 if (transaction->getApplying())
1319 mTransactions.push_back(
1321 transaction->setApplying();
1323 if (mDispatchState == DispatchState::none)
1325 if (m_job_queue.addJob(
1326 jtBATCH,
"transactionBatch", [
this]() { transactionBatch(); }))
1328 mDispatchState = DispatchState::scheduled;
1334NetworkOPsImp::doTransactionSync(
1341 if (!transaction->getApplying())
1343 mTransactions.push_back(
1345 transaction->setApplying();
1348 doTransactionSyncBatch(
1350 return transaction->getApplying();
1355NetworkOPsImp::doTransactionSyncBatch(
1361 if (mDispatchState == DispatchState::running)
1370 if (mTransactions.size())
1373 if (m_job_queue.addJob(
jtBATCH,
"transactionBatch", [
this]() {
1377 mDispatchState = DispatchState::scheduled;
1381 }
while (retryCallback(lock));
1387 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXNSet");
1390 for (
auto const& [_, tx] :
set)
1395 if (transaction->getStatus() ==
INVALID)
1397 if (!reason.
empty())
1399 JLOG(m_journal.trace())
1400 <<
"Exception checking transaction: " << reason;
1402 app_.getHashRouter().setFlags(
1403 tx->getTransactionID(), HashRouterFlags::BAD);
1408 if (!preProcessTransaction(transaction))
1419 for (
auto& transaction : candidates)
1421 if (!transaction->getApplying())
1423 transactions.
emplace_back(transaction,
false,
false, FailHard::no);
1424 transaction->setApplying();
1428 if (mTransactions.empty())
1429 mTransactions.swap(transactions);
1432 mTransactions.reserve(mTransactions.size() + transactions.
size());
1433 for (
auto& t : transactions)
1434 mTransactions.push_back(std::move(t));
1436 if (mTransactions.empty())
1438 JLOG(m_journal.debug()) <<
"No transaction to process!";
1445 "ripple::NetworkOPsImp::processTransactionSet has lock");
1447 mTransactions.begin(), mTransactions.end(), [](
auto const& t) {
1448 return t.transaction->getApplying();
1454NetworkOPsImp::transactionBatch()
1458 if (mDispatchState == DispatchState::running)
1461 while (mTransactions.size())
1472 mTransactions.
swap(transactions);
1474 !transactions.
empty(),
1475 "ripple::NetworkOPsImp::apply : non-empty transactions");
1477 mDispatchState != DispatchState::running,
1478 "ripple::NetworkOPsImp::apply : is not running");
1480 mDispatchState = DispatchState::running;
1486 bool changed =
false;
1500 if (e.failType == FailHard::yes)
1503 auto const result = app_.getTxQ().apply(
1504 app_, view, e.transaction->getSTransaction(), flags, j);
1505 e.result = result.ter;
1506 e.applied = result.applied;
1507 changed = changed || result.applied;
1516 if (
auto const l = m_ledgerMaster.getValidatedLedger())
1517 validatedLedgerIndex = l->info().seq;
1519 auto newOL = app_.openLedger().current();
1522 e.transaction->clearSubmitResult();
1526 pubProposedTransaction(
1527 newOL, e.transaction->getSTransaction(), e.result);
1528 e.transaction->setApplied();
1531 e.transaction->setResult(e.result);
1534 app_.getHashRouter().setFlags(
1535 e.transaction->getID(), HashRouterFlags::BAD);
1544 JLOG(m_journal.info())
1545 <<
"TransactionResult: " << token <<
": " << human;
1550 bool addLocal = e.local;
1554 JLOG(m_journal.debug())
1555 <<
"Transaction is now included in open ledger";
1556 e.transaction->setStatus(
INCLUDED);
1561 auto const& txCur = e.transaction->getSTransaction();
1564 for (
auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1566 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1573 if (t->getApplying())
1575 submit_held.
emplace_back(t,
false,
false, FailHard::no);
1584 JLOG(m_journal.info()) <<
"Transaction is obsolete";
1585 e.transaction->setStatus(
OBSOLETE);
1589 JLOG(m_journal.debug())
1590 <<
"Transaction is likely to claim a"
1591 <<
" fee, but is queued until fee drops";
1593 e.transaction->setStatus(
HELD);
1597 m_ledgerMaster.addHeldTransaction(e.transaction);
1598 e.transaction->setQueued();
1599 e.transaction->setKept();
1605 if (e.failType != FailHard::yes)
1607 auto const lastLedgerSeq =
1608 e.transaction->getSTransaction()->at(
1609 ~sfLastLedgerSequence);
1610 auto const ledgersLeft = lastLedgerSeq
1612 m_ledgerMaster.getCurrentLedgerIndex()
1631 (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1632 app_.getHashRouter().setFlags(
1633 e.transaction->getID(), HashRouterFlags::HELD))
1636 JLOG(m_journal.debug())
1637 <<
"Transaction should be held: " << e.result;
1638 e.transaction->setStatus(
HELD);
1639 m_ledgerMaster.addHeldTransaction(e.transaction);
1640 e.transaction->setKept();
1643 JLOG(m_journal.debug())
1644 <<
"Not holding transaction "
1645 << e.transaction->getID() <<
": "
1646 << (e.local ?
"local" :
"network") <<
", "
1647 <<
"result: " << e.result <<
" ledgers left: "
1648 << (ledgersLeft ? to_string(*ledgersLeft)
1654 JLOG(m_journal.debug())
1655 <<
"Status other than success " << e.result;
1656 e.transaction->setStatus(
INVALID);
1659 auto const enforceFailHard =
1660 e.failType == FailHard::yes && !
isTesSuccess(e.result);
1662 if (addLocal && !enforceFailHard)
1664 m_localTX->push_back(
1665 m_ledgerMaster.getCurrentLedgerIndex(),
1666 e.transaction->getSTransaction());
1667 e.transaction->setKept();
1671 ((mMode != OperatingMode::FULL) &&
1672 (e.failType != FailHard::yes) && e.local) ||
1677 app_.getHashRouter().shouldRelay(e.transaction->getID());
1678 if (
auto const sttx = *(e.transaction->getSTransaction());
1683 newOL->rules().enabled(featureBatch)))
1685 protocol::TMTransaction tx;
1689 tx.set_rawtransaction(s.
data(), s.
size());
1690 tx.set_status(protocol::tsCURRENT);
1691 tx.set_receivetimestamp(
1692 app_.timeKeeper().now().time_since_epoch().count());
1695 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1696 e.transaction->setBroadcast();
1700 if (validatedLedgerIndex)
1702 auto [fee, accountSeq, availableSeq] =
1703 app_.getTxQ().getTxRequiredFeeAndSeq(
1704 *newOL, e.transaction->getSTransaction());
1705 e.transaction->setCurrentLedgerState(
1706 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1714 e.transaction->clearApplying();
1716 if (!submit_held.
empty())
1718 if (mTransactions.empty())
1719 mTransactions.swap(submit_held);
1722 mTransactions.reserve(mTransactions.size() + submit_held.
size());
1723 for (
auto& e : submit_held)
1724 mTransactions.push_back(std::move(e));
1730 mDispatchState = DispatchState::none;
1738NetworkOPsImp::getOwnerInfo(
1743 auto root = keylet::ownerDir(account);
1744 auto sleNode = lpLedger->read(keylet::page(
root));
1751 for (
auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1753 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1756 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1758 switch (sleCur->getType())
1761 if (!jvObjects.
isMember(jss::offers))
1762 jvObjects[jss::offers] =
1765 jvObjects[jss::offers].
append(
1766 sleCur->getJson(JsonOptions::none));
1769 case ltRIPPLE_STATE:
1770 if (!jvObjects.
isMember(jss::ripple_lines))
1772 jvObjects[jss::ripple_lines] =
1776 jvObjects[jss::ripple_lines].
append(
1777 sleCur->getJson(JsonOptions::none));
1780 case ltACCOUNT_ROOT:
1785 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1792 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1796 sleNode = lpLedger->read(keylet::page(
root, uNodeDir));
1799 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1812NetworkOPsImp::isBlocked()
1814 return isAmendmentBlocked() || isUNLBlocked();
1818NetworkOPsImp::isAmendmentBlocked()
1820 return amendmentBlocked_;
1824NetworkOPsImp::setAmendmentBlocked()
1826 amendmentBlocked_ =
true;
1827 setMode(OperatingMode::CONNECTED);
1831NetworkOPsImp::isAmendmentWarned()
1833 return !amendmentBlocked_ && amendmentWarned_;
1837NetworkOPsImp::setAmendmentWarned()
1839 amendmentWarned_ =
true;
1843NetworkOPsImp::clearAmendmentWarned()
1845 amendmentWarned_ =
false;
1849NetworkOPsImp::isUNLBlocked()
1855NetworkOPsImp::setUNLBlocked()
1858 setMode(OperatingMode::CONNECTED);
1862NetworkOPsImp::clearUNLBlocked()
1864 unlBlocked_ =
false;
1868NetworkOPsImp::checkLastClosedLedger(
1877 JLOG(m_journal.trace()) <<
"NetworkOPsImp::checkLastClosedLedger";
1879 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1884 uint256 closedLedger = ourClosed->info().hash;
1885 uint256 prevClosedLedger = ourClosed->info().parentHash;
1886 JLOG(m_journal.trace()) <<
"OurClosed: " << closedLedger;
1887 JLOG(m_journal.trace()) <<
"PrevClosed: " << prevClosedLedger;
1892 auto& validations = app_.getValidations();
1893 JLOG(m_journal.debug())
1894 <<
"ValidationTrie " <<
Json::Compact(validations.getJsonTrie());
1898 peerCounts[closedLedger] = 0;
1899 if (mMode >= OperatingMode::TRACKING)
1900 peerCounts[closedLedger]++;
1902 for (
auto& peer : peerList)
1904 uint256 peerLedger = peer->getClosedLedgerHash();
1907 ++peerCounts[peerLedger];
1910 for (
auto const& it : peerCounts)
1911 JLOG(m_journal.debug()) <<
"L: " << it.first <<
" n=" << it.second;
1913 uint256 preferredLCL = validations.getPreferredLCL(
1915 m_ledgerMaster.getValidLedgerIndex(),
1918 bool switchLedgers = preferredLCL != closedLedger;
1920 closedLedger = preferredLCL;
1922 if (switchLedgers && (closedLedger == prevClosedLedger))
1925 JLOG(m_journal.info()) <<
"We won't switch to our own previous ledger";
1926 networkClosed = ourClosed->info().hash;
1927 switchLedgers =
false;
1930 networkClosed = closedLedger;
1935 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1938 consensus = app_.getInboundLedgers().acquire(
1939 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1942 (!m_ledgerMaster.canBeCurrent(consensus) ||
1943 !m_ledgerMaster.isCompatible(
1944 *consensus, m_journal.debug(),
"Not switching")))
1948 networkClosed = ourClosed->info().hash;
1952 JLOG(m_journal.warn()) <<
"We are not running on the consensus ledger";
1953 JLOG(m_journal.info()) <<
"Our LCL: " << ourClosed->info().hash
1955 JLOG(m_journal.info()) <<
"Net LCL " << closedLedger;
1957 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1959 setMode(OperatingMode::CONNECTED);
1967 switchLastClosedLedger(consensus);
1974NetworkOPsImp::switchLastClosedLedger(
1978 JLOG(m_journal.error())
1979 <<
"JUMP last closed ledger to " << newLCL->info().hash;
1981 clearNeedNetworkLedger();
1984 app_.getTxQ().processClosedLedger(app_, *newLCL,
true);
1991 auto retries = m_localTX->getTxSet();
1992 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1997 rules.
emplace(app_.config().features);
1998 app_.openLedger().accept(
2009 return app_.getTxQ().accept(app_, view);
2013 m_ledgerMaster.switchLCL(newLCL);
2015 protocol::TMStatusChange s;
2016 s.set_newevent(protocol::neSWITCHED_LEDGER);
2017 s.set_ledgerseq(newLCL->info().seq);
2018 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
2019 s.set_ledgerhashprevious(
2020 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
2021 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
2023 app_.overlay().foreach(
2028NetworkOPsImp::beginConsensus(
2034 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
2036 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
2038 JLOG(m_journal.info()) <<
"Consensus time for #" << closingInfo.seq
2039 <<
" with LCL " << closingInfo.parentHash;
2041 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
2046 if (mMode == OperatingMode::FULL)
2048 JLOG(m_journal.warn()) <<
"Don't have LCL, going to tracking";
2049 setMode(OperatingMode::TRACKING);
2050 CLOG(clog) <<
"beginConsensus Don't have LCL, going to tracking. ";
2053 CLOG(clog) <<
"beginConsensus no previous ledger. ";
2058 prevLedger->info().hash == closingInfo.parentHash,
2059 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
2062 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
2063 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
2066 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
2067 TrustChanges const changes = app_.validators().updateTrusted(
2068 app_.getValidations().getCurrentNodeIDs(),
2069 closingInfo.parentCloseTime,
2072 app_.getHashRouter());
2074 if (!changes.
added.empty() || !changes.
removed.empty())
2076 app_.getValidations().trustChanged(changes.
added, changes.
removed);
2078 app_.getAmendmentTable().trustChanged(
2079 app_.validators().getQuorumKeys().second);
2082 mConsensus.startRound(
2083 app_.timeKeeper().closeTime(),
2091 if (mLastConsensusPhase != currPhase)
2093 reportConsensusStateChange(currPhase);
2094 mLastConsensusPhase = currPhase;
2097 JLOG(m_journal.debug()) <<
"Initiating consensus engine";
2104 auto const& peerKey = peerPos.
publicKey();
2105 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
2116 JLOG(m_journal.error())
2117 <<
"Received a proposal signed by MY KEY from a peer. This may "
2118 "indicate a misconfiguration where another node has the same "
2119 "validator key, or may be caused by unusual message routing and "
2124 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
2135 protocol::TMHaveTransactionSet msg;
2136 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2137 msg.set_status(protocol::tsHAVE);
2138 app_.overlay().foreach(
2143 mConsensus.gotTxSet(app_.timeKeeper().closeTime(),
RCLTxSet{map});
2149 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
2151 for (
auto const& it : app_.overlay().getActivePeers())
2153 if (it && (it->getClosedLedgerHash() == deadLedger))
2155 JLOG(m_journal.trace()) <<
"Killing obsolete peer status";
2162 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
2164 if (networkClosed.
isZero())
2166 CLOG(clog) <<
"endConsensus last closed ledger is zero. ";
2176 if (((mMode == OperatingMode::CONNECTED) ||
2177 (mMode == OperatingMode::SYNCING)) &&
2183 if (!needNetworkLedger_)
2184 setMode(OperatingMode::TRACKING);
2187 if (((mMode == OperatingMode::CONNECTED) ||
2188 (mMode == OperatingMode::TRACKING)) &&
2194 auto current = m_ledgerMaster.getCurrentLedger();
2195 if (app_.timeKeeper().now() < (
current->info().parentCloseTime +
2196 2 *
current->info().closeTimeResolution))
2198 setMode(OperatingMode::FULL);
2202 beginConsensus(networkClosed, clog);
2206NetworkOPsImp::consensusViewChange()
2208 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2210 setMode(OperatingMode::CONNECTED);
2220 if (!mStreamMaps[sManifests].empty())
2224 jvObj[jss::type] =
"manifestReceived";
2227 jvObj[jss::signing_key] =
2231 jvObj[jss::signature] =
strHex(*sig);
2234 jvObj[jss::domain] = mo.
domain;
2237 for (
auto i = mStreamMaps[sManifests].begin();
2238 i != mStreamMaps[sManifests].end();)
2240 if (
auto p = i->second.lock())
2242 p->send(jvObj,
true);
2247 i = mStreamMaps[sManifests].erase(i);
2253NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2257 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2258 , loadBaseServer{loadFeeTrack.getLoadBase()}
2260 , em{
std::move(escalationMetrics)}
2270 em.has_value() != b.
em.has_value())
2276 em->minProcessingFeeLevel != b.
em->minProcessingFeeLevel ||
2277 em->openLedgerFeeLevel != b.
em->openLedgerFeeLevel ||
2278 em->referenceFeeLevel != b.
em->referenceFeeLevel);
2311 jvObj[jss::type] =
"serverStatus";
2313 jvObj[jss::load_base] = f.loadBaseServer;
2314 jvObj[jss::load_factor_server] = f.loadFactorServer;
2315 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2320 safe_cast<std::uint64_t>(f.loadFactorServer),
2322 f.em->openLedgerFeeLevel,
2324 f.em->referenceFeeLevel)
2327 jvObj[jss::load_factor] =
trunc32(loadFactor);
2328 jvObj[jss::load_factor_fee_escalation] =
2329 f.em->openLedgerFeeLevel.jsonClipped();
2330 jvObj[jss::load_factor_fee_queue] =
2331 f.em->minProcessingFeeLevel.jsonClipped();
2332 jvObj[jss::load_factor_fee_reference] =
2333 f.em->referenceFeeLevel.jsonClipped();
2336 jvObj[jss::load_factor] = f.loadFactorServer;
2350 p->send(jvObj,
true);
2367 if (!streamMap.empty())
2370 jvObj[jss::type] =
"consensusPhase";
2371 jvObj[jss::consensus] =
to_string(phase);
2373 for (
auto i = streamMap.begin(); i != streamMap.end();)
2375 if (
auto p = i->second.lock())
2377 p->send(jvObj,
true);
2382 i = streamMap.erase(i);
2398 auto const signerPublic = val->getSignerPublic();
2400 jvObj[jss::type] =
"validationReceived";
2401 jvObj[jss::validation_public_key] =
2403 jvObj[jss::ledger_hash] =
to_string(val->getLedgerHash());
2404 jvObj[jss::signature] =
strHex(val->getSignature());
2405 jvObj[jss::full] = val->isFull();
2406 jvObj[jss::flags] = val->getFlags();
2407 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2408 jvObj[jss::data] =
strHex(val->getSerializer().slice());
2411 if (
auto version = (*val)[~sfServerVersion])
2414 if (
auto cookie = (*val)[~sfCookie])
2417 if (
auto hash = (*val)[~sfValidatedHash])
2418 jvObj[jss::validated_hash] =
strHex(*hash);
2420 auto const masterKey =
2423 if (masterKey != signerPublic)
2428 if (
auto const seq = (*val)[~sfLedgerSequence])
2429 jvObj[jss::ledger_index] = *seq;
2431 if (val->isFieldPresent(sfAmendments))
2434 for (
auto const& amendment : val->getFieldV256(sfAmendments))
2435 jvObj[jss::amendments].append(
to_string(amendment));
2438 if (
auto const closeTime = (*val)[~sfCloseTime])
2439 jvObj[jss::close_time] = *closeTime;
2441 if (
auto const loadFee = (*val)[~sfLoadFee])
2442 jvObj[jss::load_fee] = *loadFee;
2444 if (
auto const baseFee = val->at(~sfBaseFee))
2445 jvObj[jss::base_fee] =
static_cast<double>(*baseFee);
2447 if (
auto const reserveBase = val->at(~sfReserveBase))
2448 jvObj[jss::reserve_base] = *reserveBase;
2450 if (
auto const reserveInc = val->at(~sfReserveIncrement))
2451 jvObj[jss::reserve_inc] = *reserveInc;
2455 if (
auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2456 baseFeeXRP && baseFeeXRP->native())
2457 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2459 if (
auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2460 reserveBaseXRP && reserveBaseXRP->native())
2461 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2463 if (
auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2464 reserveIncXRP && reserveIncXRP->native())
2465 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2474 if (jvTx.
isMember(jss::ledger_index))
2476 jvTx[jss::ledger_index] =
2484 if (
auto p = i->second.lock())
2488 [&](
Json::Value const& jv) { p->send(jv,
true); });
2508 jvObj[jss::type] =
"peerStatusChange";
2517 p->send(jvObj,
true);
2531 using namespace std::chrono_literals;
2563 <<
"recvValidation " << val->getLedgerHash() <<
" from " << source;
2579 <<
"Exception thrown for handling new validation "
2580 << val->getLedgerHash() <<
": " << e.
what();
2585 <<
"Unknown exception thrown for handling new validation "
2586 << val->getLedgerHash();
2598 ss <<
"VALIDATION: " << val->render() <<
" master_key: ";
2635 "This server is amendment blocked, and must be updated to be "
2636 "able to stay in sync with the network.";
2643 "This server has an expired validator list. validators.txt "
2644 "may be incorrectly configured or some [validator_list_sites] "
2645 "may be unreachable.";
2652 "One or more unsupported amendments have reached majority. "
2653 "Upgrade to the latest version before they are activated "
2654 "to avoid being amendment blocked.";
2655 if (
auto const expected =
2659 d[jss::expected_date] = expected->time_since_epoch().count();
2660 d[jss::expected_date_UTC] =
to_string(*expected);
2664 if (warnings.size())
2665 info[jss::warnings] = std::move(warnings);
2680 info[jss::time] =
to_string(std::chrono::floor<std::chrono::microseconds>(
2684 info[jss::network_ledger] =
"waiting";
2686 info[jss::validation_quorum] =
2694 info[jss::node_size] =
"tiny";
2697 info[jss::node_size] =
"small";
2700 info[jss::node_size] =
"medium";
2703 info[jss::node_size] =
"large";
2706 info[jss::node_size] =
"huge";
2715 info[jss::validator_list_expires] =
2716 safe_cast<Json::UInt>(when->time_since_epoch().count());
2718 info[jss::validator_list_expires] = 0;
2728 if (*when == TimeKeeper::time_point::max())
2730 x[jss::expiration] =
"never";
2731 x[jss::status] =
"active";
2738 x[jss::status] =
"active";
2740 x[jss::status] =
"expired";
2745 x[jss::status] =
"unknown";
2746 x[jss::expiration] =
"unknown";
2750#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2753#ifdef GIT_COMMIT_HASH
2754 x[jss::hash] = GIT_COMMIT_HASH;
2757 x[jss::branch] = GIT_BRANCH;
2762 info[jss::io_latency_ms] =
2770 info[jss::pubkey_validator] =
2775 info[jss::pubkey_validator] =
"none";
2785 info[jss::counters][jss::nodestore] = nodestore;
2789 info[jss::pubkey_node] =
2795 info[jss::amendment_blocked] =
true;
2809 lastClose[jss::converge_time_s] =
2814 lastClose[jss::converge_time] =
2818 info[jss::last_close] = lastClose;
2826 info[jss::network_id] =
static_cast<Json::UInt>(*netid);
2828 auto const escalationMetrics =
2836 auto const loadFactorFeeEscalation =
2838 escalationMetrics.openLedgerFeeLevel,
2840 escalationMetrics.referenceFeeLevel)
2844 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2848 info[jss::load_base] = loadBaseServer;
2849 info[jss::load_factor] =
trunc32(loadFactor);
2850 info[jss::load_factor_server] = loadFactorServer;
2857 info[jss::load_factor_fee_escalation] =
2858 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2859 info[jss::load_factor_fee_queue] =
2860 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2861 info[jss::load_factor_fee_reference] =
2862 escalationMetrics.referenceFeeLevel.jsonClipped();
2866 info[jss::load_factor] =
2867 static_cast<double>(loadFactor) / loadBaseServer;
2869 if (loadFactorServer != loadFactor)
2870 info[jss::load_factor_server] =
2871 static_cast<double>(loadFactorServer) / loadBaseServer;
2876 if (fee != loadBaseServer)
2877 info[jss::load_factor_local] =
2878 static_cast<double>(fee) / loadBaseServer;
2880 if (fee != loadBaseServer)
2881 info[jss::load_factor_net] =
2882 static_cast<double>(fee) / loadBaseServer;
2884 if (fee != loadBaseServer)
2885 info[jss::load_factor_cluster] =
2886 static_cast<double>(fee) / loadBaseServer;
2888 if (escalationMetrics.openLedgerFeeLevel !=
2889 escalationMetrics.referenceFeeLevel &&
2890 (admin || loadFactorFeeEscalation != loadFactor))
2891 info[jss::load_factor_fee_escalation] =
2892 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2893 escalationMetrics.referenceFeeLevel);
2894 if (escalationMetrics.minProcessingFeeLevel !=
2895 escalationMetrics.referenceFeeLevel)
2896 info[jss::load_factor_fee_queue] =
2897 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2898 escalationMetrics.referenceFeeLevel);
2911 XRPAmount const baseFee = lpClosed->fees().base;
2913 l[jss::seq] =
Json::UInt(lpClosed->info().seq);
2914 l[jss::hash] =
to_string(lpClosed->info().hash);
2919 l[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped();
2920 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2922 lpClosed->info().closeTime.time_since_epoch().count());
2927 l[jss::reserve_base_xrp] = lpClosed->fees().reserve.decimalXRP();
2928 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2931 std::abs(closeOffset.count()) >= 60)
2932 l[jss::close_time_offset] =
2940 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2944 auto lCloseTime = lpClosed->info().closeTime;
2946 if (lCloseTime <= closeTime)
2948 using namespace std::chrono_literals;
2949 auto age = closeTime - lCloseTime;
2951 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2957 info[jss::validated_ledger] = l;
2959 info[jss::closed_ledger] = l;
2963 info[jss::published_ledger] =
"none";
2964 else if (lpPublished->info().seq != lpClosed->info().seq)
2965 info[jss::published_ledger] = lpPublished->info().seq;
2970 info[jss::jq_trans_overflow] =
2972 info[jss::peer_disconnects] =
2974 info[jss::peer_disconnects_resources] =
2979 "http",
"https",
"peer",
"ws",
"ws2",
"wss",
"wss2"};
2987 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2988 port.admin_user.empty() && port.admin_password.empty()))
3002 for (
auto const& p : proto)
3003 jv[jss::protocol].append(p);
3010 auto const optPort = grpcSection.
get(
"port");
3011 if (optPort && grpcSection.get(
"ip"))
3014 jv[jss::port] = *optPort;
3016 jv[jss::protocol].
append(
"grpc");
3019 info[jss::ports] = std::move(ports);
3045 ledger->rules().enabled(featureBatch))
3063 [&](
Json::Value const& jv) { p->send(jv, true); });
3088 lpAccepted->info().hash, alpAccepted);
3092 alpAccepted->getLedger().
get() == lpAccepted.
get(),
3093 "ripple::NetworkOPsImp::pubLedger : accepted input");
3097 <<
"Publishing ledger " << lpAccepted->info().seq <<
" "
3098 << lpAccepted->info().hash;
3106 jvObj[jss::type] =
"ledgerClosed";
3107 jvObj[jss::ledger_index] = lpAccepted->info().seq;
3108 jvObj[jss::ledger_hash] =
to_string(lpAccepted->info().hash);
3110 lpAccepted->info().closeTime.time_since_epoch().count());
3114 if (!lpAccepted->rules().enabled(featureXRPFees))
3116 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
3117 jvObj[jss::reserve_base] = lpAccepted->fees().reserve.jsonClipped();
3118 jvObj[jss::reserve_inc] =
3119 lpAccepted->fees().increment.jsonClipped();
3121 jvObj[jss::txn_count] =
Json::UInt(alpAccepted->size());
3125 jvObj[jss::validated_ledgers] =
3135 p->send(jvObj,
true);
3153 p->send(jvObj,
true);
3162 static bool firstTime =
true;
3169 for (
auto& inner : outer.second)
3171 auto& subInfo = inner.second;
3172 if (subInfo.index_->separationLedgerSeq_ == 0)
3175 alpAccepted->getLedger(), subInfo);
3184 for (
auto const& accTx : *alpAccepted)
3188 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3215 "reportConsensusStateChange->pubConsensus",
3246 jvObj[jss::type] =
"transaction";
3250 jvObj[jss::transaction] =
3257 jvObj[jss::meta], *ledger, transaction, meta->
get());
3260 jvObj[jss::meta], transaction, meta->
get());
3264 if (
auto const& lookup = ledger->txRead(transaction->getTransactionID());
3265 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3267 uint32_t
const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3269 if (transaction->isFieldPresent(sfNetworkID))
3270 netID = transaction->getFieldU32(sfNetworkID);
3275 jvObj[jss::ctid] = *ctid;
3277 if (!ledger->open())
3278 jvObj[jss::ledger_hash] =
to_string(ledger->info().hash);
3282 jvObj[jss::ledger_index] = ledger->info().seq;
3283 jvObj[jss::transaction][jss::date] =
3284 ledger->info().closeTime.time_since_epoch().count();
3285 jvObj[jss::validated] =
true;
3286 jvObj[jss::close_time_iso] =
to_string_iso(ledger->info().closeTime);
3292 jvObj[jss::validated] =
false;
3293 jvObj[jss::ledger_current_index] = ledger->info().seq;
3296 jvObj[jss::status] = validated ?
"closed" :
"proposed";
3297 jvObj[jss::engine_result] = sToken;
3298 jvObj[jss::engine_result_code] = result;
3299 jvObj[jss::engine_result_message] = sHuman;
3301 if (transaction->getTxnType() == ttOFFER_CREATE)
3303 auto const account = transaction->getAccountID(sfAccount);
3304 auto const amount = transaction->getFieldAmount(sfTakerGets);
3307 if (account != amount.issue().account)
3315 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3323 [&]<
unsigned Version>(
3325 RPC::insertDeliverMax(
3326 jvTx[jss::transaction], transaction->getTxnType(), Version);
3328 if constexpr (Version > 1)
3330 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3331 jvTx[jss::hash] = hash;
3335 jvTx[jss::transaction][jss::hash] = hash;
3348 auto const& stTxn = transaction.
getTxn();
3352 auto const trResult = transaction.
getResult();
3367 [&](
Json::Value const& jv) { p->send(jv, true); });
3384 [&](
Json::Value const& jv) { p->send(jv, true); });
3409 auto const currLedgerSeq = ledger->seq();
3416 for (
auto const& affectedAccount : transaction.
getAffected())
3421 auto it = simiIt->second.begin();
3423 while (it != simiIt->second.end())
3434 it = simiIt->second.erase(it);
3441 auto it = simiIt->second.begin();
3442 while (it != simiIt->second.end())
3453 it = simiIt->second.erase(it);
3460 auto& subs = histoIt->second;
3461 auto it = subs.begin();
3462 while (it != subs.end())
3465 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3479 it = subs.erase(it);
3490 <<
"pubAccountTransaction: "
3491 <<
"proposed=" << iProposed <<
", accepted=" << iAccepted;
3493 if (!notify.
empty() || !accountHistoryNotify.
empty())
3495 auto const& stTxn = transaction.
getTxn();
3499 auto const trResult = transaction.
getResult();
3505 isrListener->getApiVersion(),
3506 [&](
Json::Value const& jv) { isrListener->send(jv,
true); });
3510 jvObj.
set(jss::account_history_boundary,
true);
3513 jvObj.
isMember(jss::account_history_tx_stream) ==
3515 "ripple::NetworkOPsImp::pubAccountTransaction : "
3516 "account_history_tx_stream not set");
3517 for (
auto& info : accountHistoryNotify)
3519 auto& index = info.index_;
3520 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3521 jvObj.
set(jss::account_history_tx_first,
true);
3523 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3526 info.sink_->getApiVersion(),
3527 [&](
Json::Value const& jv) { info.sink_->send(jv,
true); });
3552 for (
auto const& affectedAccount : tx->getMentionedAccounts())
3557 auto it = simiIt->second.begin();
3559 while (it != simiIt->second.end())
3570 it = simiIt->second.erase(it);
3577 JLOG(
m_journal.
trace()) <<
"pubProposedAccountTransaction: " << iProposed;
3579 if (!notify.
empty() || !accountHistoryNotify.
empty())
3586 isrListener->getApiVersion(),
3587 [&](
Json::Value const& jv) { isrListener->send(jv,
true); });
3590 jvObj.
isMember(jss::account_history_tx_stream) ==
3592 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3593 "account_history_tx_stream not set");
3594 for (
auto& info : accountHistoryNotify)
3596 auto& index = info.index_;
3597 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3598 jvObj.
set(jss::account_history_tx_first,
true);
3599 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3601 info.sink_->getApiVersion(),
3602 [&](
Json::Value const& jv) { info.sink_->send(jv,
true); });
3619 for (
auto const& naAccountID : vnaAccountIDs)
3622 <<
"subAccount: account: " <<
toBase58(naAccountID);
3624 isrListener->insertSubAccountInfo(naAccountID, rt);
3629 for (
auto const& naAccountID : vnaAccountIDs)
3631 auto simIterator = subMap.
find(naAccountID);
3632 if (simIterator == subMap.
end())
3636 usisElement[isrListener->getSeq()] = isrListener;
3638 subMap.
insert(simIterator, make_pair(naAccountID, usisElement));
3643 simIterator->second[isrListener->getSeq()] = isrListener;
3654 for (
auto const& naAccountID : vnaAccountIDs)
3657 isrListener->deleteSubAccountInfo(naAccountID, rt);
3674 for (
auto const& naAccountID : vnaAccountIDs)
3676 auto simIterator = subMap.
find(naAccountID);
3678 if (simIterator != subMap.
end())
3681 simIterator->second.erase(uSeq);
3683 if (simIterator->second.empty())
3686 subMap.
erase(simIterator);
3695 enum DatabaseType { Sqlite,
None };
3696 static auto const databaseType = [&]() -> DatabaseType {
3701 return DatabaseType::Sqlite;
3703 return DatabaseType::None;
3706 if (databaseType == DatabaseType::None)
3710 "ripple::NetworkOPsImp::addAccountHistoryJob : no database");
3712 <<
"AccountHistory job for account "
3725 "AccountHistoryTxStream",
3726 [
this, dbType = databaseType, subInfo]() {
3727 auto const& accountId = subInfo.
index_->accountId_;
3728 auto& lastLedgerSeq = subInfo.
index_->historyLastLedgerSeq_;
3729 auto& txHistoryIndex = subInfo.
index_->historyTxIndex_;
3732 <<
"AccountHistory job for account " <<
toBase58(accountId)
3733 <<
" started. lastLedgerSeq=" << lastLedgerSeq;
3743 auto stx = tx->getSTransaction();
3744 if (stx->getAccountID(sfAccount) == accountId &&
3745 stx->getSeqValue() == 1)
3749 for (
auto& node : meta->getNodes())
3751 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3754 if (node.isFieldPresent(sfNewFields))
3756 if (
auto inner =
dynamic_cast<STObject const*
>(
3757 node.peekAtPField(sfNewFields));
3760 if (inner->isFieldPresent(sfAccount) &&
3761 inner->getAccountID(sfAccount) == accountId)
3773 bool unsubscribe) ->
bool {
3776 sptr->send(jvObj,
true);
3786 bool unsubscribe) ->
bool {
3790 sptr->getApiVersion(),
3791 [&](
Json::Value const& jv) { sptr->send(jv,
true); });
3814 accountId, minLedger, maxLedger, marker, 0,
true};
3815 return db->newestAccountTxPage(options);
3820 "ripple::NetworkOPsImp::addAccountHistoryJob : "
3821 "getMoreTxns : invalid database type");
3831 while (lastLedgerSeq >= 2 && !subInfo.
index_->stopHistorical_)
3833 int feeChargeCount = 0;
3842 <<
"AccountHistory job for account "
3843 <<
toBase58(accountId) <<
" no InfoSub. Fee charged "
3844 << feeChargeCount <<
" times.";
3849 auto startLedgerSeq =
3850 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3852 <<
"AccountHistory job for account " <<
toBase58(accountId)
3853 <<
", working on ledger range [" << startLedgerSeq <<
","
3854 << lastLedgerSeq <<
"]";
3856 auto haveRange = [&]() ->
bool {
3859 auto haveSomeValidatedLedgers =
3861 validatedMin, validatedMax);
3863 return haveSomeValidatedLedgers &&
3864 validatedMin <= startLedgerSeq &&
3865 lastLedgerSeq <= validatedMax;
3871 <<
"AccountHistory reschedule job for account "
3872 <<
toBase58(accountId) <<
", incomplete ledger range ["
3873 << startLedgerSeq <<
"," << lastLedgerSeq <<
"]";
3879 while (!subInfo.
index_->stopHistorical_)
3882 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3887 "ripple::NetworkOPsImp::addAccountHistoryJob : "
3888 "getMoreTxns failed");
3890 <<
"AccountHistory job for account "
3891 <<
toBase58(accountId) <<
" getMoreTxns failed.";
3897 auto const& txns = dbResult->first;
3898 marker = dbResult->second;
3899 size_t num_txns = txns.size();
3900 for (
size_t i = 0; i < num_txns; ++i)
3902 auto const& [tx, meta] = txns[i];
3907 <<
"AccountHistory job for account "
3908 <<
toBase58(accountId) <<
" empty tx or meta.";
3919 "ripple::NetworkOPsImp::addAccountHistoryJob : "
3920 "getLedgerBySeq failed");
3922 <<
"AccountHistory job for account "
3923 <<
toBase58(accountId) <<
" no ledger.";
3929 tx->getSTransaction();
3934 "NetworkOPsImp::addAccountHistoryJob : "
3935 "getSTransaction failed");
3937 <<
"AccountHistory job for account "
3939 <<
" getSTransaction failed.";
3946 auto const trR = meta->getResultTER();
3948 transJson(stTxn, trR,
true, curTxLedger, mRef);
3951 jss::account_history_tx_index, txHistoryIndex--);
3952 if (i + 1 == num_txns ||
3953 txns[i + 1].first->getLedger() != tx->getLedger())
3954 jvTx.
set(jss::account_history_boundary,
true);
3956 if (isFirstTx(tx, meta))
3958 jvTx.
set(jss::account_history_tx_first,
true);
3959 sendMultiApiJson(jvTx,
false);
3962 <<
"AccountHistory job for account "
3964 <<
" done, found last tx.";
3969 sendMultiApiJson(jvTx,
false);
3976 <<
"AccountHistory job for account "
3978 <<
" paging, marker=" << marker->ledgerSeq <<
":"
3987 if (!subInfo.
index_->stopHistorical_)
3989 lastLedgerSeq = startLedgerSeq - 1;
3990 if (lastLedgerSeq <= 1)
3993 <<
"AccountHistory job for account "
3995 <<
" done, reached genesis ledger.";
4008 subInfo.
index_->separationLedgerSeq_ = ledger->seq();
4009 auto const& accountId = subInfo.
index_->accountId_;
4011 if (!ledger->exists(accountKeylet))
4014 <<
"subAccountHistoryStart, no account " <<
toBase58(accountId)
4015 <<
", no need to add AccountHistory job.";
4020 if (
auto const sleAcct = ledger->read(accountKeylet); sleAcct)
4022 if (sleAcct->getFieldU32(sfSequence) == 1)
4025 <<
"subAccountHistoryStart, genesis account "
4027 <<
" does not have tx, no need to add AccountHistory job.";
4035 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
4036 "access genesis account");
4041 subInfo.
index_->historyLastLedgerSeq_ = ledger->seq();
4042 subInfo.
index_->haveHistorical_ =
true;
4045 <<
"subAccountHistoryStart, add AccountHistory job: accountId="
4046 <<
toBase58(accountId) <<
", currentLedgerSeq=" << ledger->seq();
4056 if (!isrListener->insertSubAccountHistory(accountId))
4059 <<
"subAccountHistory, already subscribed to account "
4071 inner.
emplace(isrListener->getSeq(), ahi);
4077 simIterator->second.emplace(isrListener->getSeq(), ahi);
4091 <<
"subAccountHistory, no validated ledger yet, delay start";
4104 isrListener->deleteSubAccountHistory(account);
4118 auto& subInfoMap = simIterator->second;
4119 auto subInfoIter = subInfoMap.find(seq);
4120 if (subInfoIter != subInfoMap.end())
4122 subInfoIter->second.index_->stopHistorical_ =
true;
4127 simIterator->second.erase(seq);
4128 if (simIterator->second.empty())
4134 <<
"unsubAccountHistory, account " <<
toBase58(account)
4135 <<
", historyOnly = " << (historyOnly ?
"true" :
"false");
4143 listeners->addSubscriber(isrListener);
4147 UNREACHABLE(
"ripple::NetworkOPsImp::subBook : null book listeners");
4157 listeners->removeSubscriber(uSeq);
4169 m_standalone,
"ripple::NetworkOPsImp::acceptLedger : is standalone");
4172 Throw<std::runtime_error>(
4173 "Operation only possible in STANDALONE mode.");
4188 jvResult[jss::ledger_index] = lpClosed->info().seq;
4189 jvResult[jss::ledger_hash] =
to_string(lpClosed->info().hash);
4191 lpClosed->info().closeTime.time_since_epoch().count());
4192 if (!lpClosed->rules().enabled(featureXRPFees))
4194 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
4195 jvResult[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped();
4196 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4202 jvResult[jss::validated_ledgers] =
4208 .emplace(isrListener->getSeq(), isrListener)
4218 .emplace(isrListener->getSeq(), isrListener)
4244 .emplace(isrListener->getSeq(), isrListener)
4272 jvResult[jss::random] =
to_string(uRandom);
4274 jvResult[jss::load_base] = feeTrack.getLoadBase();
4275 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4276 jvResult[jss::hostid] =
getHostId(admin);
4277 jvResult[jss::pubkey_node] =
4282 .emplace(isrListener->getSeq(), isrListener)
4300 .emplace(isrListener->getSeq(), isrListener)
4318 .emplace(isrListener->getSeq(), isrListener)
4336 .emplace(isrListener->getSeq(), isrListener)
4360 .emplace(isrListener->getSeq(), isrListener)
4378 .emplace(isrListener->getSeq(), isrListener)
4426 if (map.find(pInfo->getSeq()) != map.end())
4433#ifndef USE_NEW_BOOK_PAGE
4444 unsigned int iLimit,
4454 uint256 uTipIndex = uBookBase;
4458 stream <<
"getBookPage:" << book;
4459 stream <<
"getBookPage: uBookBase=" << uBookBase;
4460 stream <<
"getBookPage: uBookEnd=" << uBookEnd;
4461 stream <<
"getBookPage: uTipIndex=" << uTipIndex;
4470 bool bDirectAdvance =
true;
4474 unsigned int uBookEntry;
4480 while (!bDone && iLimit-- > 0)
4484 bDirectAdvance =
false;
4488 auto const ledgerIndex = view.
succ(uTipIndex, uBookEnd);
4492 sleOfferDir.
reset();
4501 uTipIndex = sleOfferDir->key();
4504 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4507 <<
"getBookPage: uTipIndex=" << uTipIndex;
4509 <<
"getBookPage: offerIndex=" << offerIndex;
4519 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4520 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4521 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4523 bool firstOwnerOffer(
true);
4529 saOwnerFunds = saTakerGets;
4531 else if (bGlobalFreeze)
4539 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4540 if (umBalanceEntry != umBalance.
end())
4544 saOwnerFunds = umBalanceEntry->second;
4545 firstOwnerOffer =
false;
4559 if (saOwnerFunds < beast::zero)
4563 saOwnerFunds.
clear();
4571 STAmount saOwnerFundsLimit = saOwnerFunds;
4583 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4586 if (saOwnerFundsLimit >= saTakerGets)
4589 saTakerGetsFunded = saTakerGets;
4595 saTakerGetsFunded = saOwnerFundsLimit;
4597 saTakerGetsFunded.
setJson(jvOffer[jss::taker_gets_funded]);
4601 saTakerGetsFunded, saDirRate, saTakerPays.
issue()))
4602 .setJson(jvOffer[jss::taker_pays_funded]);
4608 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4610 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4614 jvOf[jss::quality] = saDirRate.
getText();
4616 if (firstOwnerOffer)
4617 jvOf[jss::owner_funds] = saOwnerFunds.
getText();
4624 if (!
cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4626 bDirectAdvance =
true;
4631 <<
"getBookPage: offerIndex=" << offerIndex;
4651 unsigned int iLimit,
4659 MetaView lesActive(lpLedger,
tapNONE,
true);
4660 OrderBookIterator obIterator(lesActive, book);
4664 bool const bGlobalFreeze = lesActive.isGlobalFrozen(book.
out.
account) ||
4665 lesActive.isGlobalFrozen(book.
in.
account);
4667 while (iLimit-- > 0 && obIterator.nextOffer())
4672 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4673 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4674 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4675 STAmount saDirRate = obIterator.getCurrentRate();
4681 saOwnerFunds = saTakerGets;
4683 else if (bGlobalFreeze)
4691 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4693 if (umBalanceEntry != umBalance.
end())
4697 saOwnerFunds = umBalanceEntry->second;
4703 saOwnerFunds = lesActive.accountHolds(
4709 if (saOwnerFunds.isNegative())
4713 saOwnerFunds.zero();
4720 STAmount saTakerGetsFunded;
4721 STAmount saOwnerFundsLimit = saOwnerFunds;
4733 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4736 if (saOwnerFundsLimit >= saTakerGets)
4739 saTakerGetsFunded = saTakerGets;
4744 saTakerGetsFunded = saOwnerFundsLimit;
4746 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4752 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4753 .setJson(jvOffer[jss::taker_pays_funded]);
4756 STAmount saOwnerPays = (
parityRate == offerRate)
4759 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4761 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4763 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4767 jvOf[jss::quality] = saDirRate.
getText();
4782 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4822 ++counters_[
static_cast<std::size_t>(om)].transitions;
4824 counters_[
static_cast<std::size_t>(om)].transitions == 1)
4826 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4827 now - processStart_)
4831 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4840 auto [counters, mode, start, initialSync] = getCounterData();
4841 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4851 auto& state = obj[jss::state_accounting][
states_[i]];
4852 state[jss::transitions] =
std::to_string(counters[i].transitions);
4853 state[jss::duration_us] =
std::to_string(counters[i].dur.count());
4857 obj[jss::initial_sync_duration_us] =
std::to_string(initialSync);
4872 boost::asio::io_context& io_svc,
T back_inserter(T... args)
Decorator for streaming out compact json.
Lightweight wrapper to tag static string.
Value & append(Value const &value)
Append value to array at the end.
bool isMember(char const *key) const
Return true if the object has a member named key.
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
A generic endpoint for log messages.
Stream trace() const
Severity stream access functions.
A metric for measuring an integral value.
void set(value_type value) const
Set the value on the gauge.
A reference to a handler for performing polled collection.
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
std::uint32_t getLoadFee() const
NetClock::time_point getReportTime() const
PublicKey const & identity() const
std::size_t size() const
The number of nodes in the cluster list.
std::string SERVER_DOMAIN
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
int RELAY_UNTRUSTED_VALIDATIONS
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
A pool of threads to perform work.
Json::Value getJson(int c=0)
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
std::uint32_t getClusterFee() const
std::uint32_t getLocalFee() const
std::uint32_t getLoadBase() const
std::uint32_t getRemoteFee() const
std::uint32_t getLoadFactor() const
void heartbeat()
Reset the stall detection timer.
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
std::uint64_t initialSyncUs_
CounterData getCounterData() const
std::chrono::steady_clock::time_point start_
static std::array< Json::StaticString const, 5 > const states_
std::chrono::steady_clock::time_point const processStart_
Transaction with input flags and results to be applied in batches.
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
std::shared_ptr< Transaction > const transaction
void processClusterTimer()
boost::asio::steady_timer accountHistoryTxTimer_
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
std::vector< TransactionStatus > mTransactions
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
std::optional< PublicKey > const validatorPK_
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
SubInfoMapType mSubAccount
std::optional< PublicKey > const validatorMasterPK_
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
std::atomic< bool > unlBlocked_
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
std::atomic< bool > needNetworkLedger_
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
void setMode(OperatingMode om) override
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
void clearNeedNetworkLedger() override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
DispatchState mDispatchState
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
void reportFeeChange() override
void processHeartbeatTimer()
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
bool subTransactions(InfoSub::ref ispListener) override
std::atomic< bool > amendmentWarned_
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
void getCountsJson(Json::Value &obj)
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Writable ledger view that accumulates state and tx changes.
BookListeners::pointer getBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)
BookListeners::pointer makeBookListeners(Book const &)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Represents a set of transactions in RCLConsensus.
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Collects logging information.
std::unique_ptr< std::stringstream > const & ss()
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
std::string getText() const override
Issue const & issue() const
std::optional< T > get(std::string const &name) const
std::size_t size() const noexcept
void const * data() const noexcept
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
std::chrono::seconds closeOffset() const
time_point closeTime() const
Returns the predicted close time, in network time.
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Validator keys and manifest as set in configuration file.
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Json::Value jsonClipped() const
static constexpr std::size_t size()
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
T emplace_back(T... args)
@ arrayValue
array value (ordered list)
@ objectValue
object value (collection of name/value pairs).
void rngfill(void *const buffer, std::size_t const bytes, Generator &g)
std::string const & getVersionString()
Server version.
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Encodes ledger sequence, transaction index, and network ID into a CTID string.
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
void insertNFTSyntheticInJson(Json::Value &, std::shared_ptr< STTx const > const &, TxMeta const &)
Adds common synthetic fields to transaction-related JSON responses.
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Keylet account(AccountID const &id) noexcept
AccountID root.
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
STAmount divide(STAmount const &amount, Rate const &rate)
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
std::uint64_t getQuality(uint256 const &uBase)
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
auto constexpr muldiv_max
std::unique_ptr< LocalTxs > make_LocalTxs()
STAmount amountFromQuality(std::uint64_t rate)
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
@ warnRPC_EXPIRED_VALIDATOR_LIST
@ warnRPC_UNSUPPORTED_MAJORITY
@ warnRPC_AMENDMENT_BLOCKED
bool set(T &target, std::string const &name, Section const §ion)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
AccountID calcAccountID(PublicKey const &pk)
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Json::Value rpcError(int iError)
bool isTefFailure(TER x) noexcept
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
std::string strHex(FwdIt begin, FwdIt end)
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
void forAllApiVersions(Fn const &fn, Args &&... args)
bool isTerRetry(TER x) noexcept
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
uint256 getQualityNext(uint256 const &uBase)
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const ¤cy, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
bool isTesSuccess(TER x) noexcept
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const ¤t)
std::string to_string_iso(date::sys_time< Duration > tp)
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
std::string to_string(base_uint< Bits, Tag > const &a)
FeeSetup setup_FeeVote(Section const §ion)
bool isTemMalformed(TER x) noexcept
Number root(Number f, unsigned d)
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
constexpr std::size_t maxPoppedTransactions
bool transResultInfo(TER code, std::string &token, std::string &text)
bool isTelLocal(TER x) noexcept
uint256 getBookBase(Book const &book)
constexpr std::uint32_t tfInnerBatchTxn
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
static std::uint32_t trunc32(std::uint64_t v)
static auto const genesisAccountId
T set_intersection(T... args)
std::string serialized
The manifest in serialized form.
std::uint32_t sequence
The sequence number of this manifest.
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Server fees published on server subscription.
bool operator!=(ServerFeeSummary const &b) const
ServerFeeSummary()=default
std::optional< TxQ::Metrics > em
std::uint32_t loadFactorServer
bool operator==(ServerFeeSummary const &b) const
std::uint32_t loadBaseServer
decltype(initialSyncUs_) initialSyncUs
decltype(counters_) counters
std::uint64_t transitions
std::chrono::microseconds dur
beast::insight::Gauge full_transitions
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
beast::insight::Hook hook
beast::insight::Gauge connected_duration
beast::insight::Gauge tracking_duration
beast::insight::Gauge connected_transitions
beast::insight::Gauge disconnected_transitions
beast::insight::Gauge syncing_duration
beast::insight::Gauge tracking_transitions
beast::insight::Gauge full_duration
beast::insight::Gauge disconnected_duration
beast::insight::Gauge syncing_transitions
std::uint32_t historyLastLedgerSeq_
std::uint32_t separationLedgerSeq_
AccountID const accountId_
std::uint32_t forwardTxIndex_
std::atomic< bool > stopHistorical_
std::int32_t historyTxIndex_
SubAccountHistoryIndex(AccountID const &accountId)
std::shared_ptr< SubAccountHistoryIndex > index_
std::shared_ptr< SubAccountHistoryIndex > index_
Represents a transfer rate.
Data format for exchanging consumption information across peers.
std::vector< Item > items
Changes in trusted nodes after updating validator list.
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
IsMemberResult isMember(char const *key) const
void set(char const *key, auto const &v)
Select all peers (except optional excluded) that are in our cluster.
Sends a message to all peers.
T time_since_epoch(T... args)