1#include <xrpld/app/consensus/RCLConsensus.h>
2#include <xrpld/app/consensus/RCLValidations.h>
3#include <xrpld/app/ledger/AcceptedLedger.h>
4#include <xrpld/app/ledger/InboundLedgers.h>
5#include <xrpld/app/ledger/LedgerMaster.h>
6#include <xrpld/app/ledger/LedgerToJson.h>
7#include <xrpld/app/ledger/LocalTxs.h>
8#include <xrpld/app/ledger/OpenLedger.h>
9#include <xrpld/app/ledger/OrderBookDB.h>
10#include <xrpld/app/ledger/TransactionMaster.h>
11#include <xrpld/app/main/LoadManager.h>
12#include <xrpld/app/main/Tuning.h>
13#include <xrpld/app/misc/AmendmentTable.h>
14#include <xrpld/app/misc/DeliverMax.h>
15#include <xrpld/app/misc/HashRouter.h>
16#include <xrpld/app/misc/LoadFeeTrack.h>
17#include <xrpld/app/misc/NetworkOPs.h>
18#include <xrpld/app/misc/Transaction.h>
19#include <xrpld/app/misc/TxQ.h>
20#include <xrpld/app/misc/ValidatorKeys.h>
21#include <xrpld/app/misc/ValidatorList.h>
22#include <xrpld/app/misc/detail/AccountTxPaging.h>
23#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
24#include <xrpld/app/tx/apply.h>
25#include <xrpld/consensus/Consensus.h>
26#include <xrpld/consensus/ConsensusParms.h>
27#include <xrpld/overlay/Cluster.h>
28#include <xrpld/overlay/Overlay.h>
29#include <xrpld/overlay/predicates.h>
30#include <xrpld/rpc/BookChanges.h>
31#include <xrpld/rpc/CTID.h>
32#include <xrpld/rpc/DeliveredAmount.h>
33#include <xrpld/rpc/MPTokenIssuanceID.h>
34#include <xrpld/rpc/ServerHandler.h>
36#include <xrpl/basics/UptimeClock.h>
37#include <xrpl/basics/mulDiv.h>
38#include <xrpl/basics/safe_cast.h>
39#include <xrpl/basics/scope.h>
40#include <xrpl/beast/utility/rngfill.h>
41#include <xrpl/core/PerfLog.h>
42#include <xrpl/crypto/RFC1751.h>
43#include <xrpl/crypto/csprng.h>
44#include <xrpl/protocol/BuildInfo.h>
45#include <xrpl/protocol/Feature.h>
46#include <xrpl/protocol/MultiApiJson.h>
47#include <xrpl/protocol/NFTSyntheticSerializer.h>
48#include <xrpl/protocol/RPCErr.h>
49#include <xrpl/protocol/TxFlags.h>
50#include <xrpl/protocol/jss.h>
51#include <xrpl/resource/Fees.h>
52#include <xrpl/resource/ResourceManager.h>
54#include <boost/asio/ip/host_name.hpp>
55#include <boost/asio/steady_timer.hpp>
90 "xrpl::NetworkOPsImp::TransactionStatus::TransactionStatus : "
189 return !(*
this != b);
208 boost::asio::io_context& io_svc,
223 app.getInboundTransactions(),
224 beast::get_abstract_clock<
std::chrono::steady_clock>(),
226 app_.logs().journal(
"LedgerConsensus"))
407 getServerInfo(
bool human,
bool admin,
bool counters)
override;
433 TER result)
override;
528 catch (boost::system::system_error
const& e)
530 JLOG(
m_journal.
error()) <<
"NetworkOPs: heartbeatTimer cancel error: " << e.what();
537 catch (boost::system::system_error
const& e)
539 JLOG(
m_journal.
error()) <<
"NetworkOPs: clusterTimer cancel error: " << e.what();
546 catch (boost::system::system_error
const& e)
548 JLOG(
m_journal.
error()) <<
"NetworkOPs: accountHistoryTxTimer cancel error: " << e.what();
552 using namespace std::chrono_literals;
562 boost::asio::steady_timer& timer,
742 template <
class Handler>
744 :
hook(collector->make_hook(handler))
747 ,
syncing_duration(collector->make_gauge(
"State_Accounting",
"Syncing_duration"))
748 ,
tracking_duration(collector->make_gauge(
"State_Accounting",
"Tracking_duration"))
749 ,
full_duration(collector->make_gauge(
"State_Accounting",
"Full_duration"))
754 ,
full_transitions(collector->make_gauge(
"State_Accounting",
"Full_transitions"))
842 static std::string const hostname = boost::asio::ip::host_name();
849 static std::string const shroudedHostId = [
this]() {
855 return shroudedHostId;
870 boost::asio::steady_timer& timer,
876 if (
auto optionalCountedHandler =
878 if ((e.value() == boost::system::errc::success) && (!m_job_queue.isStopped()))
883 if (e.value() != boost::system::errc::success && e.value() != boost::asio::error::operation_aborted)
886 JLOG(m_journal.error()) <<
"Timer got error '" << e.message() <<
"'. Restarting timer.";
891 timer.expires_after(expiry_time);
892 timer.async_wait(std::move(*optionalCountedHandler));
897NetworkOPsImp::setHeartbeatTimer()
901 mConsensus.parms().ledgerGRANULARITY,
902 [
this]() { m_job_queue.addJob(jtNETOP_TIMER,
"NetHeart", [this]() { processHeartbeatTimer(); }); },
903 [
this]() { setHeartbeatTimer(); });
907NetworkOPsImp::setClusterTimer()
909 using namespace std::chrono_literals;
914 [
this]() { m_job_queue.addJob(
jtNETOP_CLUSTER,
"NetCluster", [
this]() { processClusterTimer(); }); },
915 [
this]() { setClusterTimer(); });
921 JLOG(m_journal.debug()) <<
"Scheduling AccountHistory job for account " <<
toBase58(subInfo.
index_->accountId_);
922 using namespace std::chrono_literals;
924 accountHistoryTxTimer_,
926 [
this, subInfo]() { addAccountHistoryJob(subInfo); },
927 [
this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
931NetworkOPsImp::processHeartbeatTimer()
941 std::size_t const numPeers = app_.overlay().size();
944 if (numPeers < minPeerCount_)
946 if (mMode != OperatingMode::DISCONNECTED)
948 setMode(OperatingMode::DISCONNECTED);
950 ss <<
"Node count (" << numPeers <<
") has fallen "
951 <<
"below required minimum (" << minPeerCount_ <<
").";
952 JLOG(m_journal.warn()) << ss.
str();
953 CLOG(clog.
ss()) <<
"set mode to DISCONNECTED: " << ss.
str();
957 CLOG(clog.
ss()) <<
"already DISCONNECTED. too few peers (" << numPeers <<
"), need at least "
970 if (mMode == OperatingMode::DISCONNECTED)
972 setMode(OperatingMode::CONNECTED);
973 JLOG(m_journal.info()) <<
"Node count (" << numPeers <<
") is sufficient.";
974 CLOG(clog.
ss()) <<
"setting mode to CONNECTED based on " << numPeers <<
" peers. ";
979 auto origMode = mMode.load();
980 CLOG(clog.
ss()) <<
"mode: " << strOperatingMode(origMode,
true);
981 if (mMode == OperatingMode::SYNCING)
982 setMode(OperatingMode::SYNCING);
983 else if (mMode == OperatingMode::CONNECTED)
984 setMode(OperatingMode::CONNECTED);
985 auto newMode = mMode.load();
986 if (origMode != newMode)
988 CLOG(clog.
ss()) <<
", changing to " << strOperatingMode(newMode,
true);
990 CLOG(clog.
ss()) <<
". ";
993 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.
ss());
995 CLOG(clog.
ss()) <<
"consensus phase " << to_string(mLastConsensusPhase);
997 if (mLastConsensusPhase != currPhase)
999 reportConsensusStateChange(currPhase);
1000 mLastConsensusPhase = currPhase;
1001 CLOG(clog.
ss()) <<
" changed to " << to_string(mLastConsensusPhase);
1003 CLOG(clog.
ss()) <<
". ";
1005 setHeartbeatTimer();
1009NetworkOPsImp::processClusterTimer()
1011 if (app_.cluster().size() == 0)
1014 using namespace std::chrono_literals;
1016 bool const update = app_.cluster().update(
1017 app_.nodeIdentity().first,
1019 (m_ledgerMaster.getValidatedLedgerAge() <= 4min) ? app_.getFeeTrack().getLocalFee() : 0,
1020 app_.timeKeeper().now());
1024 JLOG(m_journal.debug()) <<
"Too soon to send cluster update";
1029 protocol::TMCluster cluster;
1030 app_.cluster().for_each([&cluster](
ClusterNode const& node) {
1031 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1036 n.set_nodename(node.
name());
1040 for (
auto& item : gossip.
items)
1042 protocol::TMLoadSource& node = *cluster.add_loadsources();
1043 node.set_name(to_string(item.address));
1044 node.set_cost(item.balance);
1053NetworkOPsImp::strOperatingMode(
OperatingMode const mode,
bool const admin)
const
1055 if (mode == OperatingMode::FULL && admin)
1057 auto const consensusMode = mConsensus.mode();
1058 if (consensusMode != ConsensusMode::wrongLedger)
1060 if (consensusMode == ConsensusMode::proposing)
1063 if (mConsensus.validating())
1064 return "validating";
1074 if (isNeedNetworkLedger())
1081 if (iTrans->isFlag(
tfInnerBatchTxn) && m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1083 JLOG(m_journal.error()) <<
"Submitted transaction invalid: tfInnerBatchTxn flag present.";
1090 auto const txid = trans->getTransactionID();
1091 auto const flags = app_.getHashRouter().getFlags(txid);
1093 if ((flags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1095 JLOG(m_journal.warn()) <<
"Submitted transaction cached bad";
1101 auto const [validity, reason] =
1102 checkValidity(app_.getHashRouter(), *trans, m_ledgerMaster.getValidatedRules(), app_.config());
1104 if (validity != Validity::Valid)
1106 JLOG(m_journal.warn()) <<
"Submitted transaction invalid: " << reason;
1112 JLOG(m_journal.warn()) <<
"Exception checking transaction " << txid <<
": " << ex.
what();
1121 m_job_queue.addJob(
jtTRANSACTION,
"SubmitTxn", [
this, tx]() {
1123 processTransaction(t,
false,
false, FailHard::no);
1130 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1132 if ((newFlags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1135 JLOG(m_journal.warn()) << transaction->getID() <<
": cached bad!\n";
1136 transaction->setStatus(
INVALID);
1141 auto const view = m_ledgerMaster.getCurrentLedger();
1146 auto const sttx = *transaction->getSTransaction();
1147 if (sttx.isFlag(
tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1149 transaction->setStatus(
INVALID);
1151 app_.getHashRouter().setFlags(transaction->getID(), HashRouterFlags::BAD);
1158 auto const [validity, reason] =
checkValidity(app_.getHashRouter(), sttx, view->rules(), app_.config());
1159 XRPL_ASSERT(validity == Validity::Valid,
"xrpl::NetworkOPsImp::processTransaction : valid validity");
1162 if (validity == Validity::SigBad)
1164 JLOG(m_journal.info()) <<
"Transaction has bad signature: " << reason;
1165 transaction->setStatus(
INVALID);
1167 app_.getHashRouter().setFlags(transaction->getID(), HashRouterFlags::BAD);
1172 app_.getMasterTransaction().canonicalize(&transaction);
1178NetworkOPsImp::processTransaction(
1184 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXN");
1187 if (!preProcessTransaction(transaction))
1191 doTransactionSync(transaction, bUnlimited, failType);
1193 doTransactionAsync(transaction, bUnlimited, failType);
1201 if (transaction->getApplying())
1204 mTransactions.push_back(
TransactionStatus(transaction, bUnlimited,
false, failType));
1205 transaction->setApplying();
1207 if (mDispatchState == DispatchState::none)
1209 if (m_job_queue.addJob(
jtBATCH,
"TxBatchAsync", [
this]() { transactionBatch(); }))
1211 mDispatchState = DispatchState::scheduled;
1221 if (!transaction->getApplying())
1223 mTransactions.push_back(
TransactionStatus(transaction, bUnlimited,
true, failType));
1224 transaction->setApplying();
1227 doTransactionSyncBatch(
1232NetworkOPsImp::doTransactionSyncBatch(
1238 if (mDispatchState == DispatchState::running)
1247 if (mTransactions.size())
1250 if (m_job_queue.addJob(
jtBATCH,
"TxBatchSync", [
this]() { transactionBatch(); }))
1252 mDispatchState = DispatchState::scheduled;
1256 }
while (retryCallback(lock));
1262 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXNSet");
1265 for (
auto const& [_, tx] :
set)
1270 if (transaction->getStatus() ==
INVALID)
1272 if (!reason.
empty())
1274 JLOG(m_journal.trace()) <<
"Exception checking transaction: " << reason;
1276 app_.getHashRouter().setFlags(tx->getTransactionID(), HashRouterFlags::BAD);
1281 if (!preProcessTransaction(transaction))
1292 for (
auto& transaction : candidates)
1294 if (!transaction->getApplying())
1296 transactions.
emplace_back(transaction,
false,
false, FailHard::no);
1297 transaction->setApplying();
1301 if (mTransactions.empty())
1302 mTransactions.swap(transactions);
1305 mTransactions.reserve(mTransactions.size() + transactions.
size());
1306 for (
auto& t : transactions)
1307 mTransactions.push_back(std::move(t));
1309 if (mTransactions.empty())
1311 JLOG(m_journal.debug()) <<
"No transaction to process!";
1316 XRPL_ASSERT(lock.owns_lock(),
"xrpl::NetworkOPsImp::processTransactionSet has lock");
1318 mTransactions.begin(), mTransactions.end(), [](
auto const& t) { return t.transaction->getApplying(); });
1323NetworkOPsImp::transactionBatch()
1327 if (mDispatchState == DispatchState::running)
1330 while (mTransactions.size())
1341 mTransactions.
swap(transactions);
1342 XRPL_ASSERT(!transactions.
empty(),
"xrpl::NetworkOPsImp::apply : non-empty transactions");
1343 XRPL_ASSERT(mDispatchState != DispatchState::running,
"xrpl::NetworkOPsImp::apply : is not running");
1345 mDispatchState = DispatchState::running;
1351 bool changed =
false;
1364 if (e.failType == FailHard::yes)
1367 auto const result = app_.getTxQ().apply(app_, view, e.transaction->getSTransaction(), flags, j);
1368 e.result = result.ter;
1369 e.applied = result.applied;
1370 changed = changed || result.applied;
1379 if (
auto const l = m_ledgerMaster.getValidatedLedger())
1380 validatedLedgerIndex = l->header().seq;
1382 auto newOL = app_.openLedger().current();
1385 e.transaction->clearSubmitResult();
1389 pubProposedTransaction(newOL, e.transaction->getSTransaction(), e.result);
1390 e.transaction->setApplied();
1393 e.transaction->setResult(e.result);
1396 app_.getHashRouter().setFlags(e.transaction->getID(), HashRouterFlags::BAD);
1405 JLOG(m_journal.info()) <<
"TransactionResult: " << token <<
": " << human;
1410 bool addLocal = e.local;
1414 JLOG(m_journal.debug()) <<
"Transaction is now included in open ledger";
1415 e.transaction->setStatus(
INCLUDED);
1420 auto const& txCur = e.transaction->getSTransaction();
1423 for (
auto txNext = m_ledgerMaster.popAcctTransaction(txCur); txNext && count <
maxPoppedTransactions;
1424 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1431 if (t->getApplying())
1433 submit_held.
emplace_back(t,
false,
false, FailHard::no);
1442 JLOG(m_journal.info()) <<
"Transaction is obsolete";
1443 e.transaction->setStatus(
OBSOLETE);
1447 JLOG(m_journal.debug()) <<
"Transaction is likely to claim a"
1448 <<
" fee, but is queued until fee drops";
1450 e.transaction->setStatus(
HELD);
1454 m_ledgerMaster.addHeldTransaction(e.transaction);
1455 e.transaction->setQueued();
1456 e.transaction->setKept();
1460 if (e.failType != FailHard::yes)
1462 auto const lastLedgerSeq = e.transaction->getSTransaction()->at(~sfLastLedgerSequence);
1463 auto const ledgersLeft = lastLedgerSeq ? *lastLedgerSeq - m_ledgerMaster.getCurrentLedgerIndex()
1481 if (e.local || (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1482 app_.getHashRouter().setFlags(e.transaction->getID(), HashRouterFlags::HELD))
1485 JLOG(m_journal.debug()) <<
"Transaction should be held: " << e.result;
1486 e.transaction->setStatus(
HELD);
1487 m_ledgerMaster.addHeldTransaction(e.transaction);
1488 e.transaction->setKept();
1491 JLOG(m_journal.debug())
1492 <<
"Not holding transaction " << e.transaction->getID() <<
": "
1493 << (e.local ?
"local" :
"network") <<
", "
1494 <<
"result: " << e.result
1495 <<
" ledgers left: " << (ledgersLeft ? to_string(*ledgersLeft) :
"unspecified");
1500 JLOG(m_journal.debug()) <<
"Status other than success " << e.result;
1501 e.transaction->setStatus(
INVALID);
1504 auto const enforceFailHard = e.failType == FailHard::yes && !
isTesSuccess(e.result);
1506 if (addLocal && !enforceFailHard)
1508 m_localTX->push_back(m_ledgerMaster.getCurrentLedgerIndex(), e.transaction->getSTransaction());
1509 e.transaction->setKept();
1512 if ((e.applied || ((mMode != OperatingMode::FULL) && (e.failType != FailHard::yes) && e.local) ||
1516 auto const toSkip = app_.getHashRouter().shouldRelay(e.transaction->getID());
1517 if (
auto const sttx = *(e.transaction->getSTransaction()); toSkip &&
1524 protocol::TMTransaction tx;
1528 tx.set_rawtransaction(s.
data(), s.
size());
1529 tx.set_status(protocol::tsCURRENT);
1530 tx.set_receivetimestamp(app_.timeKeeper().now().time_since_epoch().count());
1533 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1534 e.transaction->setBroadcast();
1538 if (validatedLedgerIndex)
1540 auto [fee, accountSeq, availableSeq] =
1541 app_.getTxQ().getTxRequiredFeeAndSeq(*newOL, e.transaction->getSTransaction());
1542 e.transaction->setCurrentLedgerState(*validatedLedgerIndex, fee, accountSeq, availableSeq);
1550 e.transaction->clearApplying();
1552 if (!submit_held.
empty())
1554 if (mTransactions.empty())
1555 mTransactions.swap(submit_held);
1558 mTransactions.reserve(mTransactions.size() + submit_held.
size());
1559 for (
auto& e : submit_held)
1560 mTransactions.push_back(std::move(e));
1566 mDispatchState = DispatchState::none;
1577 auto root = keylet::ownerDir(account);
1578 auto sleNode = lpLedger->read(keylet::page(
root));
1585 for (
auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1587 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1588 XRPL_ASSERT(sleCur,
"xrpl::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1590 switch (sleCur->getType())
1593 if (!jvObjects.
isMember(jss::offers))
1596 jvObjects[jss::offers].
append(sleCur->getJson(JsonOptions::none));
1599 case ltRIPPLE_STATE:
1600 if (!jvObjects.
isMember(jss::ripple_lines))
1605 jvObjects[jss::ripple_lines].
append(sleCur->getJson(JsonOptions::none));
1608 case ltACCOUNT_ROOT:
1613 "xrpl::NetworkOPsImp::getOwnerInfo : invalid "
1620 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1624 sleNode = lpLedger->read(keylet::page(
root, uNodeDir));
1625 XRPL_ASSERT(sleNode,
"xrpl::NetworkOPsImp::getOwnerInfo : read next page");
1638NetworkOPsImp::isBlocked()
1640 return isAmendmentBlocked() || isUNLBlocked();
1644NetworkOPsImp::isAmendmentBlocked()
1646 return amendmentBlocked_;
1650NetworkOPsImp::setAmendmentBlocked()
1652 amendmentBlocked_ =
true;
1653 setMode(OperatingMode::CONNECTED);
1657NetworkOPsImp::isAmendmentWarned()
1659 return !amendmentBlocked_ && amendmentWarned_;
1663NetworkOPsImp::setAmendmentWarned()
1665 amendmentWarned_ =
true;
1669NetworkOPsImp::clearAmendmentWarned()
1671 amendmentWarned_ =
false;
1675NetworkOPsImp::isUNLBlocked()
1681NetworkOPsImp::setUNLBlocked()
1684 setMode(OperatingMode::CONNECTED);
1688NetworkOPsImp::clearUNLBlocked()
1690 unlBlocked_ =
false;
1701 JLOG(m_journal.trace()) <<
"NetworkOPsImp::checkLastClosedLedger";
1703 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1708 uint256 closedLedger = ourClosed->header().hash;
1709 uint256 prevClosedLedger = ourClosed->header().parentHash;
1710 JLOG(m_journal.trace()) <<
"OurClosed: " << closedLedger;
1711 JLOG(m_journal.trace()) <<
"PrevClosed: " << prevClosedLedger;
1716 auto& validations = app_.getValidations();
1717 JLOG(m_journal.debug()) <<
"ValidationTrie " <<
Json::Compact(validations.getJsonTrie());
1721 peerCounts[closedLedger] = 0;
1722 if (mMode >= OperatingMode::TRACKING)
1723 peerCounts[closedLedger]++;
1725 for (
auto& peer : peerList)
1727 uint256 peerLedger = peer->getClosedLedgerHash();
1730 ++peerCounts[peerLedger];
1733 for (
auto const& it : peerCounts)
1734 JLOG(m_journal.debug()) <<
"L: " << it.first <<
" n=" << it.second;
1736 uint256 preferredLCL = validations.getPreferredLCL(
1738 m_ledgerMaster.getValidLedgerIndex(),
1741 bool switchLedgers = preferredLCL != closedLedger;
1743 closedLedger = preferredLCL;
1745 if (switchLedgers && (closedLedger == prevClosedLedger))
1748 JLOG(m_journal.info()) <<
"We won't switch to our own previous ledger";
1749 networkClosed = ourClosed->header().hash;
1750 switchLedgers =
false;
1753 networkClosed = closedLedger;
1758 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1761 consensus = app_.getInboundLedgers().acquire(closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1764 (!m_ledgerMaster.canBeCurrent(consensus) ||
1765 !m_ledgerMaster.isCompatible(*consensus, m_journal.debug(),
"Not switching")))
1769 networkClosed = ourClosed->header().hash;
1773 JLOG(m_journal.warn()) <<
"We are not running on the consensus ledger";
1774 JLOG(m_journal.info()) <<
"Our LCL: " << ourClosed->header().hash <<
getJson({*ourClosed, {}});
1775 JLOG(m_journal.info()) <<
"Net LCL " << closedLedger;
1777 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1779 setMode(OperatingMode::CONNECTED);
1787 switchLastClosedLedger(consensus);
1797 JLOG(m_journal.error()) <<
"JUMP last closed ledger to " << newLCL->header().hash;
1799 clearNeedNetworkLedger();
1802 app_.getTxQ().processClosedLedger(app_, *newLCL,
true);
1809 auto retries = m_localTX->getTxSet();
1810 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1815 rules.
emplace(app_.config().features);
1816 app_.openLedger().accept(
1827 return app_.getTxQ().accept(app_, view);
1831 m_ledgerMaster.switchLCL(newLCL);
1833 protocol::TMStatusChange s;
1834 s.set_newevent(protocol::neSWITCHED_LEDGER);
1835 s.set_ledgerseq(newLCL->header().seq);
1836 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1837 s.set_ledgerhashprevious(newLCL->header().parentHash.begin(), newLCL->header().parentHash.size());
1838 s.set_ledgerhash(newLCL->header().hash.begin(), newLCL->header().hash.size());
1846 XRPL_ASSERT(networkClosed.
isNonZero(),
"xrpl::NetworkOPsImp::beginConsensus : nonzero input");
1848 auto closingInfo = m_ledgerMaster.getCurrentLedger()->header();
1850 JLOG(m_journal.info()) <<
"Consensus time for #" << closingInfo.seq <<
" with LCL " << closingInfo.parentHash;
1852 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1857 if (mMode == OperatingMode::FULL)
1859 JLOG(m_journal.warn()) <<
"Don't have LCL, going to tracking";
1860 setMode(OperatingMode::TRACKING);
1861 CLOG(clog) <<
"beginConsensus Don't have LCL, going to tracking. ";
1864 CLOG(clog) <<
"beginConsensus no previous ledger. ";
1869 prevLedger->header().hash == closingInfo.parentHash,
1870 "xrpl::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1873 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->header().hash,
1874 "xrpl::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1877 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1878 TrustChanges const changes = app_.validators().updateTrusted(
1879 app_.getValidations().getCurrentNodeIDs(),
1880 closingInfo.parentCloseTime,
1883 app_.getHashRouter());
1885 if (!changes.
added.empty() || !changes.
removed.empty())
1887 app_.getValidations().trustChanged(changes.
added, changes.
removed);
1889 app_.getAmendmentTable().trustChanged(app_.validators().getQuorumKeys().second);
1892 mConsensus.startRound(
1893 app_.timeKeeper().closeTime(), networkClosed, prevLedger, changes.
removed, changes.
added, clog);
1896 if (mLastConsensusPhase != currPhase)
1898 reportConsensusStateChange(currPhase);
1899 mLastConsensusPhase = currPhase;
1902 JLOG(m_journal.debug()) <<
"Initiating consensus engine";
1909 auto const& peerKey = peerPos.
publicKey();
1910 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
1921 JLOG(m_journal.error()) <<
"Received a proposal signed by MY KEY from a peer. This may "
1922 "indicate a misconfiguration where another node has the same "
1923 "validator key, or may be caused by unusual message routing and "
1928 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1939 protocol::TMHaveTransactionSet msg;
1940 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1941 msg.set_status(protocol::tsHAVE);
1946 mConsensus.gotTxSet(app_.timeKeeper().closeTime(),
RCLTxSet{map});
1952 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->header().parentHash;
1954 for (
auto const& it : app_.overlay().getActivePeers())
1956 if (it && (it->getClosedLedgerHash() == deadLedger))
1958 JLOG(m_journal.trace()) <<
"Killing obsolete peer status";
1964 bool ledgerChange = checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1966 if (networkClosed.
isZero())
1968 CLOG(clog) <<
"endConsensus last closed ledger is zero. ";
1978 if (((mMode == OperatingMode::CONNECTED) || (mMode == OperatingMode::SYNCING)) && !ledgerChange)
1983 if (!needNetworkLedger_)
1984 setMode(OperatingMode::TRACKING);
1987 if (((mMode == OperatingMode::CONNECTED) || (mMode == OperatingMode::TRACKING)) && !ledgerChange)
1992 auto current = m_ledgerMaster.getCurrentLedger();
1993 if (app_.timeKeeper().now() < (
current->header().parentCloseTime + 2 *
current->header().closeTimeResolution))
1995 setMode(OperatingMode::FULL);
1999 beginConsensus(networkClosed, clog);
2003NetworkOPsImp::consensusViewChange()
2005 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2007 setMode(OperatingMode::CONNECTED);
2017 if (!mStreamMaps[sManifests].empty())
2021 jvObj[jss::type] =
"manifestReceived";
2027 jvObj[jss::signature] =
strHex(*sig);
2030 jvObj[jss::domain] = mo.
domain;
2033 for (
auto i = mStreamMaps[sManifests].begin(); i != mStreamMaps[sManifests].end();)
2035 if (
auto p = i->second.lock())
2037 p->send(jvObj,
true);
2042 i = mStreamMaps[sManifests].erase(i);
2048NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2052 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2053 , loadBaseServer{loadFeeTrack.getLoadBase()}
2055 , em{
std::move(escalationMetrics)}
2063 em.has_value() != b.
em.has_value())
2069 em->minProcessingFeeLevel != b.
em->minProcessingFeeLevel ||
2070 em->openLedgerFeeLevel != b.
em->openLedgerFeeLevel || em->referenceFeeLevel != b.
em->referenceFeeLevel);
2103 jvObj[jss::type] =
"serverStatus";
2105 jvObj[jss::load_base] = f.loadBaseServer;
2106 jvObj[jss::load_factor_server] = f.loadFactorServer;
2107 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2112 safe_cast<std::uint64_t>(f.loadFactorServer),
2115 jvObj[jss::load_factor] =
trunc32(loadFactor);
2116 jvObj[jss::load_factor_fee_escalation] = f.em->openLedgerFeeLevel.jsonClipped();
2117 jvObj[jss::load_factor_fee_queue] = f.em->minProcessingFeeLevel.jsonClipped();
2118 jvObj[jss::load_factor_fee_reference] = f.em->referenceFeeLevel.jsonClipped();
2121 jvObj[jss::load_factor] = f.loadFactorServer;
2134 p->send(jvObj,
true);
2151 if (!streamMap.empty())
2154 jvObj[jss::type] =
"consensusPhase";
2155 jvObj[jss::consensus] =
to_string(phase);
2157 for (
auto i = streamMap.begin(); i != streamMap.end();)
2159 if (
auto p = i->second.lock())
2161 p->send(jvObj,
true);
2166 i = streamMap.erase(i);
2182 auto const signerPublic = val->getSignerPublic();
2184 jvObj[jss::type] =
"validationReceived";
2186 jvObj[jss::ledger_hash] =
to_string(val->getLedgerHash());
2187 jvObj[jss::signature] =
strHex(val->getSignature());
2188 jvObj[jss::full] = val->isFull();
2189 jvObj[jss::flags] = val->getFlags();
2190 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2191 jvObj[jss::data] =
strHex(val->getSerializer().slice());
2194 if (
auto version = (*val)[~sfServerVersion])
2197 if (
auto cookie = (*val)[~sfCookie])
2200 if (
auto hash = (*val)[~sfValidatedHash])
2201 jvObj[jss::validated_hash] =
strHex(*hash);
2205 if (masterKey != signerPublic)
2210 if (
auto const seq = (*val)[~sfLedgerSequence])
2211 jvObj[jss::ledger_index] = *seq;
2213 if (val->isFieldPresent(sfAmendments))
2216 for (
auto const& amendment : val->getFieldV256(sfAmendments))
2217 jvObj[jss::amendments].append(
to_string(amendment));
2220 if (
auto const closeTime = (*val)[~sfCloseTime])
2221 jvObj[jss::close_time] = *closeTime;
2223 if (
auto const loadFee = (*val)[~sfLoadFee])
2224 jvObj[jss::load_fee] = *loadFee;
2226 if (
auto const baseFee = val->at(~sfBaseFee))
2227 jvObj[jss::base_fee] =
static_cast<double>(*baseFee);
2229 if (
auto const reserveBase = val->at(~sfReserveBase))
2230 jvObj[jss::reserve_base] = *reserveBase;
2232 if (
auto const reserveInc = val->at(~sfReserveIncrement))
2233 jvObj[jss::reserve_inc] = *reserveInc;
2237 if (
auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops); baseFeeXRP && baseFeeXRP->native())
2238 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2240 if (
auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops); reserveBaseXRP && reserveBaseXRP->native())
2241 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2243 if (
auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops); reserveIncXRP && reserveIncXRP->native())
2244 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2253 if (jvTx.
isMember(jss::ledger_index))
2255 jvTx[jss::ledger_index] =
std::to_string(jvTx[jss::ledger_index].asUInt());
2261 if (
auto p = i->second.lock())
2265 [&](
Json::Value const& jv) { p->send(jv,
true); });
2285 jvObj[jss::type] =
"peerStatusChange";
2293 p->send(jvObj,
true);
2307 using namespace std::chrono_literals;
2336 JLOG(
m_journal.
trace()) <<
"recvValidation " << val->getLedgerHash() <<
" from " << source;
2351 JLOG(
m_journal.
warn()) <<
"Exception thrown for handling new validation " << val->getLedgerHash() <<
": "
2356 JLOG(
m_journal.
warn()) <<
"Unknown exception thrown for handling new validation " << val->getLedgerHash();
2368 ss <<
"VALIDATION: " << val->render() <<
" master_key: ";
2405 "This server is amendment blocked, and must be updated to be "
2406 "able to stay in sync with the network.";
2413 "This server has an expired validator list. validators.txt "
2414 "may be incorrectly configured or some [validator_list_sites] "
2415 "may be unreachable.";
2422 "One or more unsupported amendments have reached majority. "
2423 "Upgrade to the latest version before they are activated "
2424 "to avoid being amendment blocked.";
2428 d[jss::expected_date] = expected->time_since_epoch().count();
2429 d[jss::expected_date_UTC] =
to_string(*expected);
2433 if (warnings.size())
2434 info[jss::warnings] = std::move(warnings);
2452 info[jss::network_ledger] =
"waiting";
2461 info[jss::node_size] =
"tiny";
2464 info[jss::node_size] =
"small";
2467 info[jss::node_size] =
"medium";
2470 info[jss::node_size] =
"large";
2473 info[jss::node_size] =
"huge";
2482 info[jss::validator_list_expires] = safe_cast<Json::UInt>(when->time_since_epoch().count());
2484 info[jss::validator_list_expires] = 0;
2494 if (*when == TimeKeeper::time_point::max())
2496 x[jss::expiration] =
"never";
2497 x[jss::status] =
"active";
2504 x[jss::status] =
"active";
2506 x[jss::status] =
"expired";
2511 x[jss::status] =
"unknown";
2512 x[jss::expiration] =
"unknown";
2516#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2519#ifdef GIT_COMMIT_HASH
2520 x[jss::hash] = GIT_COMMIT_HASH;
2523 x[jss::branch] = GIT_BRANCH;
2538 info[jss::pubkey_validator] =
"none";
2548 info[jss::counters][jss::nodestore] = nodestore;
2557 info[jss::amendment_blocked] =
true;
2578 info[jss::last_close] = lastClose;
2586 info[jss::network_id] =
static_cast<Json::UInt>(*netid);
2595 auto const loadFactorFeeEscalation =
2596 mulDiv(escalationMetrics.openLedgerFeeLevel, loadBaseServer, escalationMetrics.referenceFeeLevel)
2599 auto const loadFactor =
std::max(safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2603 info[jss::load_base] = loadBaseServer;
2604 info[jss::load_factor] =
trunc32(loadFactor);
2605 info[jss::load_factor_server] = loadFactorServer;
2612 info[jss::load_factor_fee_escalation] = escalationMetrics.openLedgerFeeLevel.jsonClipped();
2613 info[jss::load_factor_fee_queue] = escalationMetrics.minProcessingFeeLevel.jsonClipped();
2614 info[jss::load_factor_fee_reference] = escalationMetrics.referenceFeeLevel.jsonClipped();
2618 info[jss::load_factor] =
static_cast<double>(loadFactor) / loadBaseServer;
2620 if (loadFactorServer != loadFactor)
2621 info[jss::load_factor_server] =
static_cast<double>(loadFactorServer) / loadBaseServer;
2626 if (fee != loadBaseServer)
2627 info[jss::load_factor_local] =
static_cast<double>(fee) / loadBaseServer;
2629 if (fee != loadBaseServer)
2630 info[jss::load_factor_net] =
static_cast<double>(fee) / loadBaseServer;
2632 if (fee != loadBaseServer)
2633 info[jss::load_factor_cluster] =
static_cast<double>(fee) / loadBaseServer;
2635 if (escalationMetrics.openLedgerFeeLevel != escalationMetrics.referenceFeeLevel &&
2636 (admin || loadFactorFeeEscalation != loadFactor))
2637 info[jss::load_factor_fee_escalation] =
2638 escalationMetrics.openLedgerFeeLevel.decimalFromReference(escalationMetrics.referenceFeeLevel);
2639 if (escalationMetrics.minProcessingFeeLevel != escalationMetrics.referenceFeeLevel)
2640 info[jss::load_factor_fee_queue] =
2641 escalationMetrics.minProcessingFeeLevel.decimalFromReference(escalationMetrics.referenceFeeLevel);
2654 XRPAmount const baseFee = lpClosed->fees().base;
2656 l[jss::seq] =
Json::UInt(lpClosed->header().seq);
2657 l[jss::hash] =
to_string(lpClosed->header().hash);
2662 l[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped();
2663 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2664 l[jss::close_time] =
Json::Value::UInt(lpClosed->header().closeTime.time_since_epoch().count());
2669 l[jss::reserve_base_xrp] = lpClosed->fees().reserve.decimalXRP();
2670 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2673 l[jss::close_time_offset] =
static_cast<std::uint32_t>(closeOffset.count());
2679 l[jss::age] =
Json::UInt(age < highAgeThreshold ? age.count() : 0);
2683 auto lCloseTime = lpClosed->header().closeTime;
2685 if (lCloseTime <= closeTime)
2687 using namespace std::chrono_literals;
2688 auto age = closeTime - lCloseTime;
2689 l[jss::age] =
Json::UInt(age < highAgeThreshold ? age.count() : 0);
2695 info[jss::validated_ledger] = l;
2697 info[jss::closed_ledger] = l;
2701 info[jss::published_ledger] =
"none";
2702 else if (lpPublished->header().seq != lpClosed->header().seq)
2703 info[jss::published_ledger] = lpPublished->header().seq;
2721 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() && port.admin_user.empty() &&
2722 port.admin_password.empty()))
2736 for (
auto const& p : proto)
2737 jv[jss::protocol].append(p);
2744 auto const optPort = grpcSection.
get(
"port");
2745 if (optPort && grpcSection.get(
"ip"))
2748 jv[jss::port] = *optPort;
2750 jv[jss::protocol].
append(
"grpc");
2753 info[jss::ports] = std::move(ports);
2798 [&](
Json::Value const& jv) { p->send(jv, true); });
2824 XRPL_ASSERT(alpAccepted->getLedger().
get() == lpAccepted.
get(),
"xrpl::NetworkOPsImp::pubLedger : accepted input");
2827 JLOG(
m_journal.
debug()) <<
"Publishing ledger " << lpAccepted->header().seq <<
" " << lpAccepted->header().hash;
2835 jvObj[jss::type] =
"ledgerClosed";
2836 jvObj[jss::ledger_index] = lpAccepted->header().seq;
2837 jvObj[jss::ledger_hash] =
to_string(lpAccepted->header().hash);
2838 jvObj[jss::ledger_time] =
Json::Value::UInt(lpAccepted->header().closeTime.time_since_epoch().count());
2842 if (!lpAccepted->rules().enabled(featureXRPFees))
2844 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2845 jvObj[jss::reserve_base] = lpAccepted->fees().reserve.jsonClipped();
2846 jvObj[jss::reserve_inc] = lpAccepted->fees().increment.jsonClipped();
2848 jvObj[jss::txn_count] =
Json::UInt(alpAccepted->size());
2861 p->send(jvObj,
true);
2879 p->send(jvObj,
true);
2888 static bool firstTime =
true;
2895 for (
auto& inner : outer.second)
2897 auto& subInfo = inner.second;
2898 if (subInfo.index_->separationLedgerSeq_ == 0)
2909 for (
auto const& accTx : *alpAccepted)
2964 jvObj[jss::type] =
"transaction";
2979 if (
auto const& lookup = ledger->txRead(transaction->getTransactionID());
2980 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
2982 uint32_t
const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
2984 if (transaction->isFieldPresent(sfNetworkID))
2985 netID = transaction->getFieldU32(sfNetworkID);
2988 jvObj[jss::ctid] = *ctid;
2990 if (!ledger->open())
2991 jvObj[jss::ledger_hash] =
to_string(ledger->header().hash);
2995 jvObj[jss::ledger_index] = ledger->header().seq;
2996 jvObj[jss::transaction][jss::date] = ledger->header().closeTime.time_since_epoch().count();
2997 jvObj[jss::validated] =
true;
2998 jvObj[jss::close_time_iso] =
to_string_iso(ledger->header().closeTime);
3004 jvObj[jss::validated] =
false;
3005 jvObj[jss::ledger_current_index] = ledger->header().seq;
3008 jvObj[jss::status] = validated ?
"closed" :
"proposed";
3009 jvObj[jss::engine_result] = sToken;
3010 jvObj[jss::engine_result_code] = result;
3011 jvObj[jss::engine_result_message] = sHuman;
3013 if (transaction->getTxnType() == ttOFFER_CREATE)
3015 auto const account = transaction->getAccountID(sfAccount);
3016 auto const amount = transaction->getFieldAmount(sfTakerGets);
3019 if (account != amount.issue().account)
3022 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3031 RPC::insertDeliverMax(jvTx[jss::transaction], transaction->getTxnType(), Version);
3033 if constexpr (Version > 1)
3035 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3036 jvTx[jss::hash] = hash;
3040 jvTx[jss::transaction][jss::hash] = hash;
3053 auto const& stTxn = transaction.
getTxn();
3057 auto const trResult = transaction.
getResult();
3072 [&](
Json::Value const& jv) { p->send(jv, true); });
3089 [&](
Json::Value const& jv) { p->send(jv, true); });
3114 auto const currLedgerSeq = ledger->seq();
3120 for (
auto const& affectedAccount : transaction.
getAffected())
3124 auto it = simiIt->second.begin();
3126 while (it != simiIt->second.end())
3137 it = simiIt->second.erase(it);
3143 auto it = simiIt->second.begin();
3144 while (it != simiIt->second.end())
3155 it = simiIt->second.erase(it);
3161 auto& subs = historyIt->second;
3162 auto it = subs.begin();
3163 while (it != subs.end())
3166 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3179 it = subs.erase(it);
3190 <<
"proposed=" << iProposed <<
", accepted=" << iAccepted;
3192 if (!notify.
empty() || !accountHistoryNotify.
empty())
3194 auto const& stTxn = transaction.
getTxn();
3198 auto const trResult = transaction.
getResult();
3204 isrListener->getApiVersion(),
3205 [&](
Json::Value const& jv) { isrListener->send(jv,
true); });
3209 jvObj.
set(jss::account_history_boundary,
true);
3213 "xrpl::NetworkOPsImp::pubAccountTransaction : "
3214 "account_history_tx_stream not set");
3215 for (
auto& info : accountHistoryNotify)
3217 auto& index = info.index_;
3218 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3219 jvObj.
set(jss::account_history_tx_first,
true);
3221 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3224 info.sink_->getApiVersion(),
3225 [&](
Json::Value const& jv) { info.sink_->send(jv,
true); });
3249 for (
auto const& affectedAccount : tx->getMentionedAccounts())
3253 auto it = simiIt->second.begin();
3255 while (it != simiIt->second.end())
3266 it = simiIt->second.erase(it);
3273 JLOG(
m_journal.
trace()) <<
"pubProposedAccountTransaction: " << iProposed;
3275 if (!notify.
empty() || !accountHistoryNotify.
empty())
3282 isrListener->getApiVersion(),
3283 [&](
Json::Value const& jv) { isrListener->send(jv,
true); });
3287 "xrpl::NetworkOPs::pubProposedAccountTransaction : "
3288 "account_history_tx_stream not set");
3289 for (
auto& info : accountHistoryNotify)
3291 auto& index = info.index_;
3292 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3293 jvObj.
set(jss::account_history_tx_first,
true);
3294 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3296 info.sink_->getApiVersion(),
3297 [&](
Json::Value const& jv) { info.sink_->send(jv,
true); });
3311 for (
auto const& naAccountID : vnaAccountIDs)
3315 isrListener->insertSubAccountInfo(naAccountID, rt);
3320 for (
auto const& naAccountID : vnaAccountIDs)
3322 auto simIterator = subMap.
find(naAccountID);
3323 if (simIterator == subMap.
end())
3327 usisElement[isrListener->getSeq()] = isrListener;
3329 subMap.
insert(simIterator, make_pair(naAccountID, usisElement));
3334 simIterator->second[isrListener->getSeq()] = isrListener;
3342 for (
auto const& naAccountID : vnaAccountIDs)
3345 isrListener->deleteSubAccountInfo(naAccountID, rt);
3359 for (
auto const& naAccountID : vnaAccountIDs)
3361 auto simIterator = subMap.
find(naAccountID);
3363 if (simIterator != subMap.
end())
3366 simIterator->second.erase(uSeq);
3368 if (simIterator->second.empty())
3371 subMap.
erase(simIterator);
3380 enum DatabaseType { Sqlite,
None };
3381 static auto const databaseType = [&]() -> DatabaseType {
3386 return DatabaseType::Sqlite;
3388 return DatabaseType::None;
3391 if (databaseType == DatabaseType::None)
3394 UNREACHABLE(
"xrpl::NetworkOPsImp::addAccountHistoryJob : no database");
3407 auto const& accountId = subInfo.
index_->accountId_;
3408 auto& lastLedgerSeq = subInfo.
index_->historyLastLedgerSeq_;
3409 auto& txHistoryIndex = subInfo.
index_->historyTxIndex_;
3412 <<
" started. lastLedgerSeq=" << lastLedgerSeq;
3421 auto stx = tx->getSTransaction();
3422 if (stx->getAccountID(sfAccount) == accountId && stx->getSeqValue() == 1)
3426 for (
auto& node : meta->getNodes())
3428 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3431 if (node.isFieldPresent(sfNewFields))
3433 if (
auto inner =
dynamic_cast<STObject const*
>(node.peekAtPField(sfNewFields)); inner)
3435 if (inner->isFieldPresent(sfAccount) && inner->getAccountID(sfAccount) == accountId)
3446 auto send = [&](
Json::Value const& jvObj,
bool unsubscribe) ->
bool {
3449 sptr->send(jvObj,
true);
3458 auto sendMultiApiJson = [&](
MultiApiJson const& jvObj,
bool unsubscribe) ->
bool {
3462 sptr->getApiVersion(),
3463 [&](
Json::Value const& jv) { sptr->send(jv,
true); });
3483 return db->newestAccountTxPage(options);
3488 "xrpl::NetworkOPsImp::addAccountHistoryJob : "
3489 "getMoreTxns : invalid database type");
3499 while (lastLedgerSeq >= 2 && !subInfo.
index_->stopHistorical_)
3501 int feeChargeCount = 0;
3510 <<
" no InfoSub. Fee charged " << feeChargeCount <<
" times.";
3515 auto startLedgerSeq = (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3517 <<
", working on ledger range [" << startLedgerSeq <<
"," << lastLedgerSeq <<
"]";
3519 auto haveRange = [&]() ->
bool {
3524 return haveSomeValidatedLedgers && validatedMin <= startLedgerSeq && lastLedgerSeq <= validatedMax;
3530 <<
", incomplete ledger range [" << startLedgerSeq <<
"," << lastLedgerSeq
3537 while (!subInfo.
index_->stopHistorical_)
3539 auto dbResult = getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3544 "xrpl::NetworkOPsImp::addAccountHistoryJob : "
3545 "getMoreTxns failed");
3547 <<
"AccountHistory job for account " <<
toBase58(accountId) <<
" getMoreTxns failed.";
3553 auto const& txns = dbResult->first;
3554 marker = dbResult->second;
3555 size_t num_txns = txns.size();
3556 for (
size_t i = 0; i < num_txns; ++i)
3558 auto const& [tx, meta] = txns[i];
3563 <<
"AccountHistory job for account " <<
toBase58(accountId) <<
" empty tx or meta.";
3572 "xrpl::NetworkOPsImp::addAccountHistoryJob : "
3573 "getLedgerBySeq failed");
3575 <<
"AccountHistory job for account " <<
toBase58(accountId) <<
" no ledger.";
3585 "NetworkOPsImp::addAccountHistoryJob : "
3586 "getSTransaction failed");
3588 <<
"AccountHistory job for account " <<
toBase58(accountId) <<
" getSTransaction failed.";
3595 auto const trR = meta->getResultTER();
3598 jvTx.
set(jss::account_history_tx_index, txHistoryIndex--);
3599 if (i + 1 == num_txns || txns[i + 1].first->getLedger() != tx->getLedger())
3600 jvTx.
set(jss::account_history_boundary,
true);
3602 if (isFirstTx(tx, meta))
3604 jvTx.
set(jss::account_history_tx_first,
true);
3605 sendMultiApiJson(jvTx,
false);
3608 <<
"AccountHistory job for account " <<
toBase58(accountId) <<
" done, found last tx.";
3613 sendMultiApiJson(jvTx,
false);
3620 <<
" paging, marker=" << marker->ledgerSeq <<
":" << marker->txnSeq;
3628 if (!subInfo.
index_->stopHistorical_)
3630 lastLedgerSeq = startLedgerSeq - 1;
3631 if (lastLedgerSeq <= 1)
3634 <<
"AccountHistory job for account " <<
toBase58(accountId) <<
" done, reached genesis ledger.";
3645 subInfo.
index_->separationLedgerSeq_ = ledger->seq();
3646 auto const& accountId = subInfo.
index_->accountId_;
3648 if (!ledger->exists(accountKeylet))
3651 <<
", no need to add AccountHistory job.";
3656 if (
auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3658 if (sleAcct->getFieldU32(sfSequence) == 1)
3661 <<
" does not have tx, no need to add AccountHistory job.";
3669 "xrpl::NetworkOPsImp::subAccountHistoryStart : failed to "
3670 "access genesis account");
3675 subInfo.
index_->historyLastLedgerSeq_ = ledger->seq();
3676 subInfo.
index_->haveHistorical_ =
true;
3678 JLOG(
m_journal.
debug()) <<
"subAccountHistoryStart, add AccountHistory job: accountId=" <<
toBase58(accountId)
3679 <<
", currentLedgerSeq=" << ledger->seq();
3687 if (!isrListener->insertSubAccountHistory(accountId))
3699 inner.
emplace(isrListener->getSeq(), ahi);
3704 simIterator->second.emplace(isrListener->getSeq(), ahi);
3717 JLOG(
m_journal.
debug()) <<
"subAccountHistory, no validated ledger yet, delay start";
3727 isrListener->deleteSubAccountHistory(account);
3738 auto& subInfoMap = simIterator->second;
3739 auto subInfoIter = subInfoMap.find(seq);
3740 if (subInfoIter != subInfoMap.end())
3742 subInfoIter->second.index_->stopHistorical_ =
true;
3747 simIterator->second.erase(seq);
3748 if (simIterator->second.empty())
3754 <<
", historyOnly = " << (historyOnly ?
"true" :
"false");
3762 listeners->addSubscriber(isrListener);
3766 UNREACHABLE(
"xrpl::NetworkOPsImp::subBook : null book listeners");
3776 listeners->removeSubscriber(uSeq);
3786 XRPL_ASSERT(
m_standalone,
"xrpl::NetworkOPsImp::acceptLedger : is standalone");
3789 Throw<std::runtime_error>(
"Operation only possible in STANDALONE mode.");
3804 jvResult[jss::ledger_index] = lpClosed->header().seq;
3805 jvResult[jss::ledger_hash] =
to_string(lpClosed->header().hash);
3806 jvResult[jss::ledger_time] =
Json::Value::UInt(lpClosed->header().closeTime.time_since_epoch().count());
3807 if (!lpClosed->rules().enabled(featureXRPFees))
3809 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3810 jvResult[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped();
3811 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3877 jvResult[jss::random] =
to_string(uRandom);
3879 jvResult[jss::load_base] = feeTrack.getLoadBase();
3880 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
3881 jvResult[jss::hostid] =
getHostId(admin);
4018 if (map.find(pInfo->getSeq()) != map.end())
4025#ifndef USE_NEW_BOOK_PAGE
4036 unsigned int iLimit,
4045 uint256 uTipIndex = uBookBase;
4049 stream <<
"getBookPage:" << book;
4050 stream <<
"getBookPage: uBookBase=" << uBookBase;
4051 stream <<
"getBookPage: uBookEnd=" << uBookEnd;
4052 stream <<
"getBookPage: uTipIndex=" << uTipIndex;
4060 bool bDirectAdvance =
true;
4064 unsigned int uBookEntry;
4070 while (!bDone && iLimit-- > 0)
4074 bDirectAdvance =
false;
4078 auto const ledgerIndex = view.
succ(uTipIndex, uBookEnd);
4082 sleOfferDir.
reset();
4091 uTipIndex = sleOfferDir->key();
4094 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4096 JLOG(
m_journal.
trace()) <<
"getBookPage: uTipIndex=" << uTipIndex;
4097 JLOG(
m_journal.
trace()) <<
"getBookPage: offerIndex=" << offerIndex;
4107 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4108 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4109 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4111 bool firstOwnerOffer(
true);
4117 saOwnerFunds = saTakerGets;
4119 else if (bGlobalFreeze)
4127 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4128 if (umBalanceEntry != umBalance.
end())
4132 saOwnerFunds = umBalanceEntry->second;
4133 firstOwnerOffer =
false;
4142 if (saOwnerFunds < beast::zero)
4146 saOwnerFunds.
clear();
4154 STAmount saOwnerFundsLimit = saOwnerFunds;
4166 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4169 if (saOwnerFundsLimit >= saTakerGets)
4172 saTakerGetsFunded = saTakerGets;
4178 saTakerGetsFunded = saOwnerFundsLimit;
4180 saTakerGetsFunded.
setJson(jvOffer[jss::taker_gets_funded]);
4182 .setJson(jvOffer[jss::taker_pays_funded]);
4189 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4193 jvOf[jss::quality] = saDirRate.
getText();
4195 if (firstOwnerOffer)
4196 jvOf[jss::owner_funds] = saOwnerFunds.
getText();
4203 if (!
cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4205 bDirectAdvance =
true;
4209 JLOG(
m_journal.
trace()) <<
"getBookPage: offerIndex=" << offerIndex;
4229 unsigned int iLimit,
4237 MetaView lesActive(lpLedger,
tapNONE,
true);
4238 OrderBookIterator obIterator(lesActive, book);
4242 bool const bGlobalFreeze = lesActive.isGlobalFrozen(book.
out.
account) || lesActive.isGlobalFrozen(book.
in.
account);
4244 while (iLimit-- > 0 && obIterator.nextOffer())
4249 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4250 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4251 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4252 STAmount saDirRate = obIterator.getCurrentRate();
4258 saOwnerFunds = saTakerGets;
4260 else if (bGlobalFreeze)
4268 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4270 if (umBalanceEntry != umBalance.
end())
4274 saOwnerFunds = umBalanceEntry->second;
4283 if (saOwnerFunds.isNegative())
4287 saOwnerFunds.zero();
4294 STAmount saTakerGetsFunded;
4295 STAmount saOwnerFundsLimit = saOwnerFunds;
4307 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4310 if (saOwnerFundsLimit >= saTakerGets)
4313 saTakerGetsFunded = saTakerGets;
4318 saTakerGetsFunded = saOwnerFundsLimit;
4320 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4324 std::min(saTakerPays,
multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4325 .setJson(jvOffer[jss::taker_pays_funded]);
4328 STAmount saOwnerPays = (
parityRate == offerRate)
4330 :
std::
min(saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4332 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4334 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4338 jvOf[jss::quality] = saDirRate.
getText();
4377 ++counters_[
static_cast<std::size_t>(om)].transitions;
4380 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(now - processStart_).count();
4383 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4392 auto [counters, mode, start, initialSync] = getCounterData();
4403 auto& state = obj[jss::state_accounting][
states_[i]];
4404 state[jss::transitions] =
std::to_string(counters[i].transitions);
4405 state[jss::duration_us] =
std::to_string(counters[i].dur.count());
4409 obj[jss::initial_sync_duration_us] =
std::to_string(initialSync);
4424 boost::asio::io_context& io_svc,
T back_inserter(T... args)
Decorator for streaming out compact json.
Lightweight wrapper to tag static string.
Value & append(Value const &value)
Append value to array at the end.
bool isMember(char const *key) const
Return true if the object has a member named key.
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
A generic endpoint for log messages.
Stream trace() const
Severity stream access functions.
A metric for measuring an integral value.
void set(value_type value) const
Set the value on the gauge.
A reference to a handler for performing polled collection.
A transaction that is in a closed ledger.
TxMeta const & getMeta() const
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual perf::PerfLog & getPerfLog()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual Cluster & cluster()=0
virtual Config & config()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual LedgerMaster & getLedgerMaster()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual TimeKeeper & timeKeeper()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual OpenLedger & openLedger()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual Overlay & overlay()=0
virtual JobQueue & getJobQueue()=0
virtual ManifestCache & validatorManifests()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
virtual ValidatorList & validators()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::uint32_t getLoadFee() const
NetClock::time_point getReportTime() const
PublicKey const & identity() const
std::string const & name() const
std::size_t size() const
The number of nodes in the cluster list.
std::string SERVER_DOMAIN
int RELAY_UNTRUSTED_VALIDATIONS
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
virtual Json::Value getInfo()=0
virtual void clearFailures()=0
std::shared_ptr< InfoSub > pointer
A pool of threads to perform work.
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Json::Value getJson(int c=0)
std::chrono::seconds getValidatedLedgerAge()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
bool haveValidated()
Whether we have ever fully validated a ledger.
std::size_t getFetchPackCacheSize() const
std::shared_ptr< Ledger const > getClosedLedger()
std::string getCompleteLedgers()
std::shared_ptr< Ledger const > getValidatedLedger()
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< ReadView const > getCurrentLedger()
Manages the current fee schedule.
std::uint32_t getClusterFee() const
std::uint32_t getLocalFee() const
std::uint32_t getRemoteFee() const
std::uint32_t getLoadFactor() const
std::uint32_t getLoadBase() const
void heartbeat()
Reset the stall detection timer.
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
void json(Json::Value &obj) const
Output state counters in JSON format.
std::chrono::steady_clock::time_point const processStart_
static std::array< Json::StaticString const, 5 > const states_
CounterData getCounterData() const
std::uint64_t initialSyncUs_
std::array< Counters, 5 > counters_
void mode(OperatingMode om)
Record state transition.
std::chrono::steady_clock::time_point start_
Transaction with input flags and results to be applied in batches.
std::shared_ptr< Transaction > const transaction
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
std::string getHostId(bool forAdmin)
void reportConsensusStateChange(ConsensusPhase phase)
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void clearNeedNetworkLedger() override
ServerFeeSummary mLastFeeSummary
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
DispatchState mDispatchState
std::size_t const minPeerCount_
static std::array< char const *, 5 > const states_
std::set< uint256 > pendingValidations_
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
bool unsubManifests(std::uint64_t uListener) override
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool subManifests(InfoSub::ref ispListener) override
void stateAccounting(Json::Value &obj) override
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
SubInfoMapType mSubRTAccount
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
void transactionBatch()
Apply transactions in batches.
bool unsubRTTransactions(std::uint64_t uListener) override
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
bool processTrustedProposal(RCLCxPeerPos proposal) override
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
void pubValidation(std::shared_ptr< STValidation > const &val) override
bool subBook(InfoSub::ref ispListener, Book const &) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
std::atomic< OperatingMode > mMode
void setMode(OperatingMode om) override
void setAmendmentBlocked() override
void pubConsensus(ConsensusPhase phase)
std::recursive_mutex mSubLock
bool isNeedNetworkLedger() override
DispatchState
Synchronization states for transaction batches.
std::atomic< bool > needNetworkLedger_
boost::asio::steady_timer heartbeatTimer_
bool subConsensus(InfoSub::ref ispListener) override
bool unsubBook(std::uint64_t uListener, Book const &) override
bool unsubLedger(std::uint64_t uListener) override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
std::optional< PublicKey > const validatorPK_
std::atomic< bool > amendmentBlocked_
void clearAmendmentWarned() override
void updateLocalTx(ReadView const &view) override
void clearLedgerFetch() override
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool isAmendmentBlocked() override
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::unique_ptr< LocalTxs > m_localTX
void setStandAlone() override
void setNeedNetworkLedger() override
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
bool unsubServer(std::uint64_t uListener) override
SubAccountHistoryMapType mSubAccountHistory
void processClusterTimer()
bool unsubConsensus(std::uint64_t uListener) override
std::condition_variable mCond
void pubManifest(Manifest const &) override
void consensusViewChange() override
boost::asio::steady_timer accountHistoryTxTimer_
Json::Value getConsensusInfo() override
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void setUNLBlocked() override
bool unsubValidations(std::uint64_t uListener) override
bool subPeerStatus(InfoSub::ref ispListener) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
ConsensusPhase mLastConsensusPhase
OperatingMode getOperatingMode() const override
std::optional< PublicKey > const validatorMasterPK_
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
std::vector< TransactionStatus > mTransactions
bool tryRemoveRpcSub(std::string const &strUrl) override
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void processHeartbeatTimer()
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void submitTransaction(std::shared_ptr< STTx const > const &) override
void setAmendmentWarned() override
LedgerMaster & m_ledgerMaster
Json::Value getServerInfo(bool human, bool admin, bool counters) override
StateAccounting accounting_
bool subValidations(InfoSub::ref ispListener) override
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool subRTTransactions(InfoSub::ref ispListener) override
std::atomic< bool > unlBlocked_
bool unsubBookChanges(std::uint64_t uListener) override
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
void setStateTimer() override
Called to initially start our timers.
std::size_t getLocalTxCount() override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
bool unsubTransactions(std::uint64_t uListener) override
bool isAmendmentWarned() override
bool subTransactions(InfoSub::ref ispListener) override
std::mutex validationsMutex_
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
SubInfoMapType mSubAccount
void clearUNLBlocked() override
bool isUNLBlocked() override
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
std::atomic< bool > amendmentWarned_
boost::asio::steady_timer clusterTimer_
bool unsubPeerStatus(std::uint64_t uListener) override
void reportFeeChange() override
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
bool isBlocked() override
~NetworkOPsImp() override
Json::Value getLedgerFetchInfo() override
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool subBookChanges(InfoSub::ref ispListener) override
Provides server functionality for clients.
void getCountsJson(Json::Value &obj)
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Writable ledger view that accumulates state and tx changes.
BookListeners::pointer getBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)
BookListeners::pointer makeBookListeners(Book const &)
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnectCharges() const =0
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
Manages the generic consensus algorithm for use by the RCL.
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
Json::Value getJson(bool full) const
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
A peer's signed, proposed position for use in RCLConsensus.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Represents a set of transactions in RCLConsensus.
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Collects logging information.
std::unique_ptr< std::stringstream > const & ss()
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
Issue const & issue() const
std::string getText() const override
void setJson(Json::Value &) const
std::optional< T > get(std::string const &name) const
std::size_t size() const noexcept
void const * data() const noexcept
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
std::chrono::seconds closeOffset() const
time_point closeTime() const
Returns the predicted close time, in network time.
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Validator keys and manifest as set in configuration file.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::size_t quorum() const
Get quorum value for current trusted key set.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
Json::Value jsonClipped() const
constexpr double decimalXRP() const
static constexpr std::size_t size()
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
T emplace_back(T... args)
@ arrayValue
array value (ordered list)
@ objectValue
object value (collection of name/value pairs).
void rngfill(void *const buffer, std::size_t const bytes, Generator &g)
std::string const & getVersionString()
Server version.
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Encodes ledger sequence, transaction index, and network ID into a CTID string.
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
void insertNFTSyntheticInJson(Json::Value &, std::shared_ptr< STTx const > const &, TxMeta const &)
Adds common synthetic fields to transaction-related JSON responses.
Charge const feeMediumBurdenRPC
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Keylet account(AccountID const &id) noexcept
AccountID root.
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
STAmount divide(STAmount const &amount, Rate const &rate)
bool set(T &target, std::string const &name, Section const §ion)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
bool isTerRetry(TER x) noexcept
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
constexpr std::uint32_t tfInnerBatchTxn
std::string to_string(base_uint< Bits, Tag > const &a)
std::string strHex(FwdIt begin, FwdIt end)
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const ¤t)
std::uint64_t getQuality(uint256 const &uBase)
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
FeeSetup setup_FeeVote(Section const §ion)
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const ¤cy, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j, SpendableHandling includeFullBalance=shSIMPLE_BALANCE)
Number root(Number f, unsigned d)
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
bool transResultInfo(TER code, std::string &token, std::string &text)
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
STAmount multiply(STAmount const &amount, Rate const &rate)
static auto const genesisAccountId
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
@ current
This was a new validation and was added.
constexpr std::size_t maxPoppedTransactions
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
STAmount amountFromQuality(std::uint64_t rate)
bool isTefFailure(TER x) noexcept
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
auto constexpr muldiv_max
uint256 getQualityNext(uint256 const &uBase)
ConsensusPhase
Phases of consensus for a single ledger round.
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
void forAllApiVersions(Fn const &fn, Args &&... args)
AccountID calcAccountID(PublicKey const &pk)
uint256 getBookBase(Book const &book)
Json::Value rpcError(error_code_i iError)
std::string to_string_iso(date::sys_time< Duration > tp)
std::unique_ptr< LocalTxs > make_LocalTxs()
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
bool isTelLocal(TER x) noexcept
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
bool isTesSuccess(TER x) noexcept
static std::uint32_t trunc32(std::uint64_t v)
static std::array< char const *, 5 > const stateNames
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
OperatingMode
Specifies the mode under which the server believes it's operating.
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
bool isTemMalformed(TER x) noexcept
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
@ warnRPC_AMENDMENT_BLOCKED
@ warnRPC_UNSUPPORTED_MAJORITY
@ warnRPC_EXPIRED_VALIDATOR_LIST
T set_intersection(T... args)
PublicKey masterKey
The master key associated with this manifest.
std::string serialized
The manifest in serialized form.
Blob getMasterSignature() const
Returns manifest master key signature.
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
std::uint32_t sequence
The sequence number of this manifest.
Server fees published on server subscription.
std::optional< TxQ::Metrics > em
bool operator!=(ServerFeeSummary const &b) const
bool operator==(ServerFeeSummary const &b) const
std::uint32_t loadBaseServer
ServerFeeSummary()=default
std::uint32_t loadFactorServer
decltype(initialSyncUs_) initialSyncUs
decltype(counters_) counters
std::chrono::microseconds dur
std::uint64_t transitions
beast::insight::Gauge syncing_duration
beast::insight::Gauge tracking_duration
beast::insight::Gauge connected_duration
beast::insight::Gauge tracking_transitions
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
beast::insight::Gauge connected_transitions
beast::insight::Gauge full_transitions
beast::insight::Gauge disconnected_duration
beast::insight::Gauge syncing_transitions
beast::insight::Gauge disconnected_transitions
beast::insight::Gauge full_duration
beast::insight::Hook hook
std::int32_t historyTxIndex_
AccountID const accountId_
std::uint32_t forwardTxIndex_
std::uint32_t separationLedgerSeq_
std::uint32_t historyLastLedgerSeq_
SubAccountHistoryIndex(AccountID const &accountId)
std::atomic< bool > stopHistorical_
std::shared_ptr< SubAccountHistoryIndex > index_
std::shared_ptr< SubAccountHistoryIndex > index_
Represents a transfer rate.
Data format for exchanging consumption information across peers.
std::vector< Item > items
Changes in trusted nodes after updating validator list.
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
void set(char const *key, auto const &v)
IsMemberResult isMember(char const *key) const
Select all peers (except optional excluded) that are in our cluster.
Sends a message to all peers.
T time_since_epoch(T... args)