1#include <xrpld/app/consensus/RCLConsensus.h>
2#include <xrpld/app/consensus/RCLCxPeerPos.h>
3#include <xrpld/app/consensus/RCLValidations.h>
4#include <xrpld/app/ledger/AcceptedLedger.h>
5#include <xrpld/app/ledger/InboundLedgers.h>
6#include <xrpld/app/ledger/LedgerMaster.h>
7#include <xrpld/app/ledger/LedgerToJson.h>
8#include <xrpld/app/ledger/LocalTxs.h>
9#include <xrpld/app/ledger/OpenLedger.h>
10#include <xrpld/app/ledger/TransactionMaster.h>
11#include <xrpld/app/main/LoadManager.h>
12#include <xrpld/app/main/Tuning.h>
13#include <xrpld/app/misc/DeliverMax.h>
14#include <xrpld/app/misc/Transaction.h>
15#include <xrpld/app/misc/TxQ.h>
16#include <xrpld/app/misc/ValidatorKeys.h>
17#include <xrpld/app/misc/ValidatorList.h>
18#include <xrpld/app/misc/detail/AccountTxPaging.h>
19#include <xrpld/app/misc/make_NetworkOPs.h>
20#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
21#include <xrpld/consensus/Consensus.h>
22#include <xrpld/consensus/ConsensusParms.h>
23#include <xrpld/core/ConfigSections.h>
24#include <xrpld/overlay/Cluster.h>
25#include <xrpld/overlay/Overlay.h>
26#include <xrpld/overlay/predicates.h>
27#include <xrpld/rpc/BookChanges.h>
28#include <xrpld/rpc/CTID.h>
29#include <xrpld/rpc/DeliveredAmount.h>
30#include <xrpld/rpc/MPTokenIssuanceID.h>
31#include <xrpld/rpc/ServerHandler.h>
33#include <xrpl/basics/UptimeClock.h>
34#include <xrpl/basics/mulDiv.h>
35#include <xrpl/basics/safe_cast.h>
36#include <xrpl/basics/scope.h>
37#include <xrpl/beast/utility/rngfill.h>
38#include <xrpl/core/HashRouter.h>
39#include <xrpl/core/NetworkIDService.h>
40#include <xrpl/core/PerfLog.h>
41#include <xrpl/crypto/RFC1751.h>
42#include <xrpl/crypto/csprng.h>
43#include <xrpl/ledger/AmendmentTable.h>
44#include <xrpl/ledger/OrderBookDB.h>
45#include <xrpl/protocol/BuildInfo.h>
46#include <xrpl/protocol/Feature.h>
47#include <xrpl/protocol/MultiApiJson.h>
48#include <xrpl/protocol/NFTSyntheticSerializer.h>
49#include <xrpl/protocol/RPCErr.h>
50#include <xrpl/protocol/TxFlags.h>
51#include <xrpl/protocol/jss.h>
52#include <xrpl/resource/Fees.h>
53#include <xrpl/resource/ResourceManager.h>
54#include <xrpl/server/LoadFeeTrack.h>
55#include <xrpl/tx/apply.h>
57#include <boost/asio/ip/host_name.hpp>
58#include <boost/asio/steady_timer.hpp>
93 "xrpl::NetworkOPsImp::TransactionStatus::TransactionStatus : "
195 return !(*
this != b);
214 boost::asio::io_context& io_svc,
231 registry.getInboundTransactions(),
232 beast::get_abstract_clock<
std::chrono::steady_clock>(),
234 registry_.logs().journal(
"LedgerConsensus"))
236 validatorKeys.keys ? validatorKeys.keys->publicKey : decltype(
validatorPK_){})
238 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
425 getServerInfo(
bool human,
bool admin,
bool counters)
override;
451 TER result)
override;
551 catch (boost::system::system_error
const& e)
553 JLOG(
m_journal.
error()) <<
"NetworkOPs: heartbeatTimer cancel error: " << e.what();
560 catch (boost::system::system_error
const& e)
562 JLOG(
m_journal.
error()) <<
"NetworkOPs: clusterTimer cancel error: " << e.what();
569 catch (boost::system::system_error
const& e)
572 <<
"NetworkOPs: accountHistoryTxTimer cancel error: " << e.what();
576 using namespace std::chrono_literals;
586 boost::asio::steady_timer& timer,
769 template <
class Handler>
771 :
hook(collector->make_hook(handler))
773 collector->make_gauge(
"State_Accounting",
"Disconnected_duration"))
775 ,
syncing_duration(collector->make_gauge(
"State_Accounting",
"Syncing_duration"))
776 ,
tracking_duration(collector->make_gauge(
"State_Accounting",
"Tracking_duration"))
777 ,
full_duration(collector->make_gauge(
"State_Accounting",
"Full_duration"))
779 collector->make_gauge(
"State_Accounting",
"Disconnected_transitions"))
781 collector->make_gauge(
"State_Accounting",
"Connected_transitions"))
784 collector->make_gauge(
"State_Accounting",
"Tracking_transitions"))
785 ,
full_transitions(collector->make_gauge(
"State_Accounting",
"Full_transitions"))
814 {
"disconnected",
"connected",
"syncing",
"tracking",
"full"}};
874 static std::string const hostname = boost::asio::ip::host_name();
881 static std::string const shroudedHostId = [
this]() {
887 return shroudedHostId;
902 boost::asio::steady_timer& timer,
908 if (
auto optionalCountedHandler =
910 if ((e.value() == boost::system::errc::success) && (!m_job_queue.isStopped()))
915 if (e.value() != boost::system::errc::success &&
916 e.value() != boost::asio::error::operation_aborted)
919 JLOG(m_journal.error())
920 <<
"Timer got error '" << e.message() <<
"'. Restarting timer.";
925 timer.expires_after(expiry_time);
926 timer.async_wait(std::move(*optionalCountedHandler));
931NetworkOPsImp::setHeartbeatTimer()
935 mConsensus.parms().ledgerGRANULARITY,
937 m_job_queue.addJob(jtNETOP_TIMER,
"NetHeart", [this]() { processHeartbeatTimer(); });
939 [
this]() { setHeartbeatTimer(); });
943NetworkOPsImp::setClusterTimer()
945 using namespace std::chrono_literals;
951 m_job_queue.addJob(
jtNETOP_CLUSTER,
"NetCluster", [
this]() { processClusterTimer(); });
953 [
this]() { setClusterTimer(); });
959 JLOG(m_journal.debug()) <<
"Scheduling AccountHistory job for account "
961 using namespace std::chrono_literals;
963 accountHistoryTxTimer_,
965 [
this, subInfo]() { addAccountHistoryJob(subInfo); },
966 [
this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
970NetworkOPsImp::processHeartbeatTimer()
980 std::size_t const numPeers = registry_.overlay().size();
983 if (numPeers < minPeerCount_)
985 if (mMode != OperatingMode::DISCONNECTED)
987 setMode(OperatingMode::DISCONNECTED);
989 ss <<
"Node count (" << numPeers <<
") has fallen "
990 <<
"below required minimum (" << minPeerCount_ <<
").";
991 JLOG(m_journal.warn()) << ss.
str();
992 CLOG(clog.
ss()) <<
"set mode to DISCONNECTED: " << ss.
str();
996 CLOG(clog.
ss()) <<
"already DISCONNECTED. too few peers (" << numPeers
997 <<
"), need at least " << minPeerCount_;
1004 setHeartbeatTimer();
1009 if (mMode == OperatingMode::DISCONNECTED)
1011 setMode(OperatingMode::CONNECTED);
1012 JLOG(m_journal.info()) <<
"Node count (" << numPeers <<
") is sufficient.";
1013 CLOG(clog.
ss()) <<
"setting mode to CONNECTED based on " << numPeers <<
" peers. ";
1018 auto origMode = mMode.load();
1019 CLOG(clog.
ss()) <<
"mode: " << strOperatingMode(origMode,
true);
1020 if (mMode == OperatingMode::SYNCING)
1021 setMode(OperatingMode::SYNCING);
1022 else if (mMode == OperatingMode::CONNECTED)
1023 setMode(OperatingMode::CONNECTED);
1024 auto newMode = mMode.load();
1025 if (origMode != newMode)
1027 CLOG(clog.
ss()) <<
", changing to " << strOperatingMode(newMode,
true);
1029 CLOG(clog.
ss()) <<
". ";
1032 mConsensus.timerEntry(registry_.timeKeeper().closeTime(), clog.
ss());
1034 CLOG(clog.
ss()) <<
"consensus phase " << to_string(mLastConsensusPhase);
1036 if (mLastConsensusPhase != currPhase)
1038 reportConsensusStateChange(currPhase);
1039 mLastConsensusPhase = currPhase;
1040 CLOG(clog.
ss()) <<
" changed to " << to_string(mLastConsensusPhase);
1042 CLOG(clog.
ss()) <<
". ";
1044 setHeartbeatTimer();
1048NetworkOPsImp::processClusterTimer()
1050 if (registry_.cluster().size() == 0)
1053 using namespace std::chrono_literals;
1055 bool const update = registry_.cluster().update(
1056 registry_.app().nodeIdentity().first,
1058 (m_ledgerMaster.getValidatedLedgerAge() <= 4min) ? registry_.getFeeTrack().getLocalFee()
1060 registry_.timeKeeper().now());
1064 JLOG(m_journal.debug()) <<
"Too soon to send cluster update";
1069 protocol::TMCluster cluster;
1070 registry_.cluster().for_each([&cluster](
ClusterNode const& node) {
1071 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1076 n.set_nodename(node.
name());
1079 Resource::Gossip gossip = registry_.getResourceManager().exportConsumers();
1080 for (
auto& item : gossip.
items)
1082 protocol::TMLoadSource& node = *cluster.add_loadsources();
1083 node.set_name(to_string(item.address));
1084 node.set_cost(item.balance);
1086 registry_.overlay().foreach(
1094NetworkOPsImp::strOperatingMode(
OperatingMode const mode,
bool const admin)
const
1096 if (mode == OperatingMode::FULL && admin)
1098 auto const consensusMode = mConsensus.mode();
1099 if (consensusMode != ConsensusMode::wrongLedger)
1101 if (consensusMode == ConsensusMode::proposing)
1104 if (mConsensus.validating())
1105 return "validating";
1115 if (isNeedNetworkLedger())
1122 if (iTrans->isFlag(
tfInnerBatchTxn) && m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1124 JLOG(m_journal.error()) <<
"Submitted transaction invalid: tfInnerBatchTxn flag present.";
1131 auto const txid = trans->getTransactionID();
1132 auto const flags = registry_.getHashRouter().getFlags(txid);
1134 if ((flags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1136 JLOG(m_journal.warn()) <<
"Submitted transaction cached bad";
1142 auto const [validity, reason] =
1143 checkValidity(registry_.getHashRouter(), *trans, m_ledgerMaster.getValidatedRules());
1145 if (validity != Validity::Valid)
1147 JLOG(m_journal.warn()) <<
"Submitted transaction invalid: " << reason;
1153 JLOG(m_journal.warn()) <<
"Exception checking transaction " << txid <<
": " << ex.
what();
1162 m_job_queue.addJob(
jtTRANSACTION,
"SubmitTxn", [
this, tx]() {
1164 processTransaction(t,
false,
false, FailHard::no);
1171 auto const newFlags = registry_.getHashRouter().getFlags(transaction->getID());
1173 if ((newFlags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1176 JLOG(m_journal.warn()) << transaction->getID() <<
": cached bad!\n";
1177 transaction->setStatus(
INVALID);
1182 auto const view = m_ledgerMaster.getCurrentLedger();
1187 auto const sttx = *transaction->getSTransaction();
1188 if (sttx.isFlag(
tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1190 transaction->setStatus(
INVALID);
1192 registry_.getHashRouter().setFlags(transaction->getID(), HashRouterFlags::BAD);
1199 auto const [validity, reason] =
checkValidity(registry_.getHashRouter(), sttx, view->rules());
1201 validity == Validity::Valid,
"xrpl::NetworkOPsImp::processTransaction : valid validity");
1204 if (validity == Validity::SigBad)
1206 JLOG(m_journal.info()) <<
"Transaction has bad signature: " << reason;
1207 transaction->setStatus(
INVALID);
1209 registry_.getHashRouter().setFlags(transaction->getID(), HashRouterFlags::BAD);
1214 registry_.getMasterTransaction().canonicalize(&transaction);
1220NetworkOPsImp::processTransaction(
1226 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXN");
1229 if (!preProcessTransaction(transaction))
1233 doTransactionSync(transaction, bUnlimited, failType);
1235 doTransactionAsync(transaction, bUnlimited, failType);
1239NetworkOPsImp::doTransactionAsync(
1246 if (transaction->getApplying())
1249 mTransactions.push_back(
TransactionStatus(transaction, bUnlimited,
false, failType));
1250 transaction->setApplying();
1252 if (mDispatchState == DispatchState::none)
1254 if (m_job_queue.addJob(
jtBATCH,
"TxBatchAsync", [
this]() { transactionBatch(); }))
1256 mDispatchState = DispatchState::scheduled;
1262NetworkOPsImp::doTransactionSync(
1269 if (!transaction->getApplying())
1271 mTransactions.push_back(
TransactionStatus(transaction, bUnlimited,
true, failType));
1272 transaction->setApplying();
1276 return transaction->getApplying();
1281NetworkOPsImp::doTransactionSyncBatch(
1287 if (mDispatchState == DispatchState::running)
1296 if (mTransactions.size())
1299 if (m_job_queue.addJob(
jtBATCH,
"TxBatchSync", [
this]() { transactionBatch(); }))
1301 mDispatchState = DispatchState::scheduled;
1305 }
while (retryCallback(lock));
1311 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXNSet");
1314 for (
auto const& [_, tx] :
set)
1319 if (transaction->getStatus() ==
INVALID)
1321 if (!reason.
empty())
1323 JLOG(m_journal.trace()) <<
"Exception checking transaction: " << reason;
1325 registry_.getHashRouter().setFlags(tx->getTransactionID(), HashRouterFlags::BAD);
1330 if (!preProcessTransaction(transaction))
1341 for (
auto& transaction : candidates)
1343 if (!transaction->getApplying())
1345 transactions.
emplace_back(transaction,
false,
false, FailHard::no);
1346 transaction->setApplying();
1350 if (mTransactions.empty())
1351 mTransactions.swap(transactions);
1354 mTransactions.reserve(mTransactions.size() + transactions.
size());
1355 for (
auto& t : transactions)
1356 mTransactions.push_back(std::move(t));
1358 if (mTransactions.empty())
1360 JLOG(m_journal.debug()) <<
"No transaction to process!";
1365 XRPL_ASSERT(lock.owns_lock(),
"xrpl::NetworkOPsImp::processTransactionSet has lock");
1366 return std::any_of(mTransactions.begin(), mTransactions.end(), [](
auto const& t) {
1367 return t.transaction->getApplying();
1373NetworkOPsImp::transactionBatch()
1377 if (mDispatchState == DispatchState::running)
1380 while (mTransactions.size())
1391 mTransactions.
swap(transactions);
1392 XRPL_ASSERT(!transactions.
empty(),
"xrpl::NetworkOPsImp::apply : non-empty transactions");
1394 mDispatchState != DispatchState::running,
"xrpl::NetworkOPsImp::apply : is not running");
1396 mDispatchState = DispatchState::running;
1402 bool changed =
false;
1415 if (e.failType == FailHard::yes)
1418 auto const result = registry_.getTxQ().apply(
1419 registry_.app(), view, e.transaction->getSTransaction(), flags, j);
1420 e.result = result.ter;
1421 e.applied = result.applied;
1422 changed = changed || result.applied;
1431 if (
auto const l = m_ledgerMaster.getValidatedLedger())
1432 validatedLedgerIndex = l->header().seq;
1434 auto newOL = registry_.openLedger().current();
1437 e.transaction->clearSubmitResult();
1441 pubProposedTransaction(newOL, e.transaction->getSTransaction(), e.result);
1442 e.transaction->setApplied();
1445 e.transaction->setResult(e.result);
1448 registry_.getHashRouter().setFlags(e.transaction->getID(), HashRouterFlags::BAD);
1457 JLOG(m_journal.info()) <<
"TransactionResult: " << token <<
": " << human;
1462 bool addLocal = e.local;
1466 JLOG(m_journal.debug()) <<
"Transaction is now included in open ledger";
1467 e.transaction->setStatus(
INCLUDED);
1472 auto const& txCur = e.transaction->getSTransaction();
1475 for (
auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1477 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1484 if (t->getApplying())
1486 submit_held.
emplace_back(t,
false,
false, FailHard::no);
1495 JLOG(m_journal.info()) <<
"Transaction is obsolete";
1496 e.transaction->setStatus(
OBSOLETE);
1500 JLOG(m_journal.debug()) <<
"Transaction is likely to claim a"
1501 <<
" fee, but is queued until fee drops";
1503 e.transaction->setStatus(
HELD);
1507 m_ledgerMaster.addHeldTransaction(e.transaction);
1508 e.transaction->setQueued();
1509 e.transaction->setKept();
1513 if (e.failType != FailHard::yes)
1515 auto const lastLedgerSeq =
1516 e.transaction->getSTransaction()->at(~sfLastLedgerSequence);
1517 auto const ledgersLeft = lastLedgerSeq
1518 ? *lastLedgerSeq - m_ledgerMaster.getCurrentLedgerIndex()
1536 if (e.local || (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1537 registry_.getHashRouter().setFlags(
1538 e.transaction->getID(), HashRouterFlags::HELD))
1541 JLOG(m_journal.debug()) <<
"Transaction should be held: " << e.result;
1542 e.transaction->setStatus(
HELD);
1543 m_ledgerMaster.addHeldTransaction(e.transaction);
1544 e.transaction->setKept();
1547 JLOG(m_journal.debug())
1548 <<
"Not holding transaction " << e.transaction->getID() <<
": "
1549 << (e.local ?
"local" :
"network") <<
", "
1550 <<
"result: " << e.result <<
" ledgers left: "
1551 << (ledgersLeft ? to_string(*ledgersLeft) :
"unspecified");
1556 JLOG(m_journal.debug()) <<
"Status other than success " << e.result;
1557 e.transaction->setStatus(
INVALID);
1560 auto const enforceFailHard = e.failType == FailHard::yes && !
isTesSuccess(e.result);
1562 if (addLocal && !enforceFailHard)
1564 m_localTX->push_back(
1565 m_ledgerMaster.getCurrentLedgerIndex(), e.transaction->getSTransaction());
1566 e.transaction->setKept();
1570 ((mMode != OperatingMode::FULL) && (e.failType != FailHard::yes) && e.local) ||
1574 auto const toSkip = registry_.getHashRouter().shouldRelay(e.transaction->getID());
1575 if (
auto const sttx = *(e.transaction->getSTransaction()); toSkip &&
1582 protocol::TMTransaction tx;
1586 tx.set_rawtransaction(s.
data(), s.
size());
1587 tx.set_status(protocol::tsCURRENT);
1588 tx.set_receivetimestamp(
1589 registry_.timeKeeper().now().time_since_epoch().count());
1592 registry_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1593 e.transaction->setBroadcast();
1597 if (validatedLedgerIndex)
1599 auto [fee, accountSeq, availableSeq] = registry_.getTxQ().getTxRequiredFeeAndSeq(
1600 *newOL, e.transaction->getSTransaction());
1601 e.transaction->setCurrentLedgerState(
1602 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1610 e.transaction->clearApplying();
1612 if (!submit_held.
empty())
1614 if (mTransactions.empty())
1615 mTransactions.swap(submit_held);
1618 mTransactions.reserve(mTransactions.size() + submit_held.
size());
1619 for (
auto& e : submit_held)
1620 mTransactions.push_back(std::move(e));
1626 mDispatchState = DispatchState::none;
1637 auto root = keylet::ownerDir(account);
1638 auto sleNode = lpLedger->read(keylet::page(
root));
1645 for (
auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1647 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1648 XRPL_ASSERT(sleCur,
"xrpl::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1650 switch (sleCur->getType())
1653 if (!jvObjects.
isMember(jss::offers))
1656 jvObjects[jss::offers].
append(sleCur->getJson(JsonOptions::none));
1659 case ltRIPPLE_STATE:
1660 if (!jvObjects.
isMember(jss::ripple_lines))
1665 jvObjects[jss::ripple_lines].
append(sleCur->getJson(JsonOptions::none));
1668 case ltACCOUNT_ROOT:
1673 "xrpl::NetworkOPsImp::getOwnerInfo : invalid "
1680 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1684 sleNode = lpLedger->read(keylet::page(
root, uNodeDir));
1685 XRPL_ASSERT(sleNode,
"xrpl::NetworkOPsImp::getOwnerInfo : read next page");
1698NetworkOPsImp::isBlocked()
1700 return isAmendmentBlocked() || isUNLBlocked();
1704NetworkOPsImp::isAmendmentBlocked()
1706 return amendmentBlocked_;
1710NetworkOPsImp::setAmendmentBlocked()
1712 amendmentBlocked_ =
true;
1713 setMode(OperatingMode::CONNECTED);
1717NetworkOPsImp::isAmendmentWarned()
1719 return !amendmentBlocked_ && amendmentWarned_;
1723NetworkOPsImp::setAmendmentWarned()
1725 amendmentWarned_ =
true;
1729NetworkOPsImp::clearAmendmentWarned()
1731 amendmentWarned_ =
false;
1735NetworkOPsImp::isUNLBlocked()
1741NetworkOPsImp::setUNLBlocked()
1744 setMode(OperatingMode::CONNECTED);
1748NetworkOPsImp::clearUNLBlocked()
1750 unlBlocked_ =
false;
1761 JLOG(m_journal.trace()) <<
"NetworkOPsImp::checkLastClosedLedger";
1763 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1768 uint256 closedLedger = ourClosed->header().hash;
1769 uint256 prevClosedLedger = ourClosed->header().parentHash;
1770 JLOG(m_journal.trace()) <<
"OurClosed: " << closedLedger;
1771 JLOG(m_journal.trace()) <<
"PrevClosed: " << prevClosedLedger;
1776 auto& validations = registry_.getValidations();
1777 JLOG(m_journal.debug()) <<
"ValidationTrie " <<
Json::Compact(validations.getJsonTrie());
1781 peerCounts[closedLedger] = 0;
1782 if (mMode >= OperatingMode::TRACKING)
1783 peerCounts[closedLedger]++;
1785 for (
auto& peer : peerList)
1787 uint256 peerLedger = peer->getClosedLedgerHash();
1790 ++peerCounts[peerLedger];
1793 for (
auto const& it : peerCounts)
1794 JLOG(m_journal.debug()) <<
"L: " << it.first <<
" n=" << it.second;
1796 uint256 preferredLCL = validations.getPreferredLCL(
1798 m_ledgerMaster.getValidLedgerIndex(),
1801 bool switchLedgers = preferredLCL != closedLedger;
1803 closedLedger = preferredLCL;
1805 if (switchLedgers && (closedLedger == prevClosedLedger))
1808 JLOG(m_journal.info()) <<
"We won't switch to our own previous ledger";
1809 networkClosed = ourClosed->header().hash;
1810 switchLedgers =
false;
1813 networkClosed = closedLedger;
1818 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1821 consensus = registry_.getInboundLedgers().acquire(
1822 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1825 (!m_ledgerMaster.canBeCurrent(consensus) ||
1826 !m_ledgerMaster.isCompatible(*consensus, m_journal.debug(),
"Not switching")))
1830 networkClosed = ourClosed->header().hash;
1834 JLOG(m_journal.warn()) <<
"We are not running on the consensus ledger";
1835 JLOG(m_journal.info()) <<
"Our LCL: " << ourClosed->header().hash <<
getJson({*ourClosed, {}});
1836 JLOG(m_journal.info()) <<
"Net LCL " << closedLedger;
1838 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1840 setMode(OperatingMode::CONNECTED);
1848 switchLastClosedLedger(consensus);
1858 JLOG(m_journal.error()) <<
"JUMP last closed ledger to " << newLCL->header().hash;
1860 clearNeedNetworkLedger();
1863 registry_.getTxQ().processClosedLedger(registry_.app(), *newLCL,
true);
1870 auto retries = m_localTX->getTxSet();
1871 auto const lastVal = registry_.getLedgerMaster().getValidatedLedger();
1876 rules.
emplace(registry_.app().config().features);
1877 registry_.openLedger().accept(
1888 return registry_.getTxQ().accept(registry_.app(), view);
1892 m_ledgerMaster.switchLCL(newLCL);
1894 protocol::TMStatusChange s;
1895 s.set_newevent(protocol::neSWITCHED_LEDGER);
1896 s.set_ledgerseq(newLCL->header().seq);
1897 s.set_networktime(registry_.timeKeeper().now().time_since_epoch().count());
1898 s.set_ledgerhashprevious(
1899 newLCL->header().parentHash.begin(), newLCL->header().parentHash.size());
1900 s.set_ledgerhash(newLCL->header().hash.begin(), newLCL->header().hash.size());
1902 registry_.overlay().foreach(
1907NetworkOPsImp::beginConsensus(
1911 XRPL_ASSERT(networkClosed.
isNonZero(),
"xrpl::NetworkOPsImp::beginConsensus : nonzero input");
1913 auto closingInfo = m_ledgerMaster.getCurrentLedger()->header();
1915 JLOG(m_journal.info()) <<
"Consensus time for #" << closingInfo.seq <<
" with LCL "
1916 << closingInfo.parentHash;
1918 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1923 if (mMode == OperatingMode::FULL)
1925 JLOG(m_journal.warn()) <<
"Don't have LCL, going to tracking";
1926 setMode(OperatingMode::TRACKING);
1927 CLOG(clog) <<
"beginConsensus Don't have LCL, going to tracking. ";
1930 CLOG(clog) <<
"beginConsensus no previous ledger. ";
1935 prevLedger->header().hash == closingInfo.parentHash,
1936 "xrpl::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1939 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->header().hash,
1940 "xrpl::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1943 registry_.validators().setNegativeUNL(prevLedger->negativeUNL());
1944 TrustChanges const changes = registry_.validators().updateTrusted(
1945 registry_.getValidations().getCurrentNodeIDs(),
1946 closingInfo.parentCloseTime,
1948 registry_.overlay(),
1949 registry_.getHashRouter());
1951 if (!changes.
added.empty() || !changes.
removed.empty())
1953 registry_.getValidations().trustChanged(changes.
added, changes.
removed);
1955 registry_.getAmendmentTable().trustChanged(registry_.validators().getQuorumKeys().second);
1958 mConsensus.startRound(
1959 registry_.timeKeeper().closeTime(),
1967 if (mLastConsensusPhase != currPhase)
1969 reportConsensusStateChange(currPhase);
1970 mLastConsensusPhase = currPhase;
1973 JLOG(m_journal.debug()) <<
"Initiating consensus engine";
1980 auto const& peerKey = peerPos.
publicKey();
1981 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
1992 JLOG(m_journal.error()) <<
"Received a proposal signed by MY KEY from a peer. This may "
1993 "indicate a misconfiguration where another node has the same "
1994 "validator key, or may be caused by unusual message routing and "
1999 return mConsensus.peerProposal(registry_.timeKeeper().closeTime(), peerPos);
2010 protocol::TMHaveTransactionSet msg;
2011 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2012 msg.set_status(protocol::tsHAVE);
2017 mConsensus.gotTxSet(registry_.timeKeeper().closeTime(),
RCLTxSet{map});
2023 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->header().parentHash;
2025 for (
auto const& it : registry_.overlay().getActivePeers())
2027 if (it && (it->getClosedLedgerHash() == deadLedger))
2029 JLOG(m_journal.trace()) <<
"Killing obsolete peer status";
2035 bool ledgerChange = checkLastClosedLedger(registry_.overlay().getActivePeers(), networkClosed);
2037 if (networkClosed.
isZero())
2039 CLOG(clog) <<
"endConsensus last closed ledger is zero. ";
2049 if (((mMode == OperatingMode::CONNECTED) || (mMode == OperatingMode::SYNCING)) && !ledgerChange)
2054 if (!needNetworkLedger_)
2055 setMode(OperatingMode::TRACKING);
2058 if (((mMode == OperatingMode::CONNECTED) || (mMode == OperatingMode::TRACKING)) &&
2064 auto current = m_ledgerMaster.getCurrentLedger();
2065 if (registry_.timeKeeper().now() <
2066 (
current->header().parentCloseTime + 2 *
current->header().closeTimeResolution))
2068 setMode(OperatingMode::FULL);
2072 beginConsensus(networkClosed, clog);
2076NetworkOPsImp::consensusViewChange()
2078 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2080 setMode(OperatingMode::CONNECTED);
2090 if (!mStreamMaps[sManifests].empty())
2094 jvObj[jss::type] =
"manifestReceived";
2100 jvObj[jss::signature] =
strHex(*sig);
2103 jvObj[jss::domain] = mo.
domain;
2106 for (
auto i = mStreamMaps[sManifests].begin(); i != mStreamMaps[sManifests].end();)
2108 if (
auto p = i->second.lock())
2110 p->send(jvObj,
true);
2115 i = mStreamMaps[sManifests].erase(i);
2121NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2125 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2126 , loadBaseServer{loadFeeTrack.getLoadBase()}
2128 , em{
std::move(escalationMetrics)}
2136 baseFee != b.
baseFee || em.has_value() != b.
em.has_value())
2142 em->minProcessingFeeLevel != b.
em->minProcessingFeeLevel ||
2143 em->openLedgerFeeLevel != b.
em->openLedgerFeeLevel ||
2144 em->referenceFeeLevel != b.
em->referenceFeeLevel);
2177 jvObj[jss::type] =
"serverStatus";
2179 jvObj[jss::load_base] = f.loadBaseServer;
2180 jvObj[jss::load_factor_server] = f.loadFactorServer;
2181 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2186 safe_cast<std::uint64_t>(f.loadFactorServer),
2187 mulDiv(f.em->openLedgerFeeLevel, f.loadBaseServer, f.em->referenceFeeLevel)
2190 jvObj[jss::load_factor] =
trunc32(loadFactor);
2191 jvObj[jss::load_factor_fee_escalation] = f.em->openLedgerFeeLevel.jsonClipped();
2192 jvObj[jss::load_factor_fee_queue] = f.em->minProcessingFeeLevel.jsonClipped();
2193 jvObj[jss::load_factor_fee_reference] = f.em->referenceFeeLevel.jsonClipped();
2196 jvObj[jss::load_factor] = f.loadFactorServer;
2209 p->send(jvObj,
true);
2226 if (!streamMap.empty())
2229 jvObj[jss::type] =
"consensusPhase";
2230 jvObj[jss::consensus] =
to_string(phase);
2232 for (
auto i = streamMap.begin(); i != streamMap.end();)
2234 if (
auto p = i->second.lock())
2236 p->send(jvObj,
true);
2241 i = streamMap.erase(i);
2257 auto const signerPublic = val->getSignerPublic();
2259 jvObj[jss::type] =
"validationReceived";
2261 jvObj[jss::ledger_hash] =
to_string(val->getLedgerHash());
2262 jvObj[jss::signature] =
strHex(val->getSignature());
2263 jvObj[jss::full] = val->isFull();
2264 jvObj[jss::flags] = val->getFlags();
2265 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2266 jvObj[jss::data] =
strHex(val->getSerializer().slice());
2269 if (
auto version = (*val)[~sfServerVersion])
2272 if (
auto cookie = (*val)[~sfCookie])
2275 if (
auto hash = (*val)[~sfValidatedHash])
2276 jvObj[jss::validated_hash] =
strHex(*hash);
2280 if (masterKey != signerPublic)
2285 if (
auto const seq = (*val)[~sfLedgerSequence])
2286 jvObj[jss::ledger_index] = *seq;
2288 if (val->isFieldPresent(sfAmendments))
2291 for (
auto const& amendment : val->getFieldV256(sfAmendments))
2292 jvObj[jss::amendments].append(
to_string(amendment));
2295 if (
auto const closeTime = (*val)[~sfCloseTime])
2296 jvObj[jss::close_time] = *closeTime;
2298 if (
auto const loadFee = (*val)[~sfLoadFee])
2299 jvObj[jss::load_fee] = *loadFee;
2301 if (
auto const baseFee = val->at(~sfBaseFee))
2302 jvObj[jss::base_fee] =
static_cast<double>(*baseFee);
2304 if (
auto const reserveBase = val->at(~sfReserveBase))
2305 jvObj[jss::reserve_base] = *reserveBase;
2307 if (
auto const reserveInc = val->at(~sfReserveIncrement))
2308 jvObj[jss::reserve_inc] = *reserveInc;
2312 if (
auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops); baseFeeXRP && baseFeeXRP->native())
2313 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2315 if (
auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2316 reserveBaseXRP && reserveBaseXRP->native())
2317 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2319 if (
auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2320 reserveIncXRP && reserveIncXRP->native())
2321 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2330 if (jvTx.
isMember(jss::ledger_index))
2332 jvTx[jss::ledger_index] =
std::to_string(jvTx[jss::ledger_index].asUInt());
2338 if (
auto p = i->second.lock())
2342 [&](
Json::Value const& jv) { p->send(jv,
true); });
2362 jvObj[jss::type] =
"peerStatusChange";
2370 p->send(jvObj,
true);
2384 using namespace std::chrono_literals;
2413 JLOG(
m_journal.
trace()) <<
"recvValidation " << val->getLedgerHash() <<
" from " << source;
2428 JLOG(
m_journal.
warn()) <<
"Exception thrown for handling new validation "
2429 << val->getLedgerHash() <<
": " << e.
what();
2433 JLOG(
m_journal.
warn()) <<
"Unknown exception thrown for handling new validation "
2434 << val->getLedgerHash();
2446 ss <<
"VALIDATION: " << val->render() <<
" master_key: ";
2483 "This server is amendment blocked, and must be updated to be "
2484 "able to stay in sync with the network.";
2491 "This server has an expired validator list. validators.txt "
2492 "may be incorrectly configured or some [validator_list_sites] "
2493 "may be unreachable.";
2500 "One or more unsupported amendments have reached majority. "
2501 "Upgrade to the latest version before they are activated "
2502 "to avoid being amendment blocked.";
2506 d[jss::expected_date] = expected->time_since_epoch().count();
2507 d[jss::expected_date_UTC] =
to_string(*expected);
2511 if (warnings.size())
2512 info[jss::warnings] = std::move(warnings);
2531 info[jss::network_ledger] =
"waiting";
2540 info[jss::node_size] =
"tiny";
2543 info[jss::node_size] =
"small";
2546 info[jss::node_size] =
"medium";
2549 info[jss::node_size] =
"large";
2552 info[jss::node_size] =
"huge";
2561 info[jss::validator_list_expires] =
2562 safe_cast<Json::UInt>(when->time_since_epoch().count());
2564 info[jss::validator_list_expires] = 0;
2574 if (*when == TimeKeeper::time_point::max())
2576 x[jss::expiration] =
"never";
2577 x[jss::status] =
"active";
2584 x[jss::status] =
"active";
2586 x[jss::status] =
"expired";
2591 x[jss::status] =
"unknown";
2592 x[jss::expiration] =
"unknown";
2596#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2599#ifdef GIT_COMMIT_HASH
2600 x[jss::hash] = GIT_COMMIT_HASH;
2603 x[jss::branch] = GIT_BRANCH;
2619 info[jss::pubkey_validator] =
"none";
2629 info[jss::counters][jss::nodestore] = nodestore;
2638 info[jss::amendment_blocked] =
true;
2652 lastClose[jss::converge_time_s] =
2660 info[jss::last_close] = lastClose;
2668 info[jss::network_id] =
static_cast<Json::UInt>(*netid);
2677 auto const loadFactorFeeEscalation =
mulDiv(
2678 escalationMetrics.openLedgerFeeLevel,
2680 escalationMetrics.referenceFeeLevel)
2683 auto const loadFactor =
2684 std::max(safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2688 info[jss::load_base] = loadBaseServer;
2689 info[jss::load_factor] =
trunc32(loadFactor);
2690 info[jss::load_factor_server] = loadFactorServer;
2697 info[jss::load_factor_fee_escalation] = escalationMetrics.openLedgerFeeLevel.jsonClipped();
2698 info[jss::load_factor_fee_queue] = escalationMetrics.minProcessingFeeLevel.jsonClipped();
2699 info[jss::load_factor_fee_reference] = escalationMetrics.referenceFeeLevel.jsonClipped();
2703 info[jss::load_factor] =
static_cast<double>(loadFactor) / loadBaseServer;
2705 if (loadFactorServer != loadFactor)
2706 info[jss::load_factor_server] =
static_cast<double>(loadFactorServer) / loadBaseServer;
2711 if (fee != loadBaseServer)
2712 info[jss::load_factor_local] =
static_cast<double>(fee) / loadBaseServer;
2714 if (fee != loadBaseServer)
2715 info[jss::load_factor_net] =
static_cast<double>(fee) / loadBaseServer;
2717 if (fee != loadBaseServer)
2718 info[jss::load_factor_cluster] =
static_cast<double>(fee) / loadBaseServer;
2720 if (escalationMetrics.openLedgerFeeLevel != escalationMetrics.referenceFeeLevel &&
2721 (admin || loadFactorFeeEscalation != loadFactor))
2722 info[jss::load_factor_fee_escalation] =
2723 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2724 escalationMetrics.referenceFeeLevel);
2725 if (escalationMetrics.minProcessingFeeLevel != escalationMetrics.referenceFeeLevel)
2726 info[jss::load_factor_fee_queue] =
2727 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2728 escalationMetrics.referenceFeeLevel);
2741 XRPAmount const baseFee = lpClosed->fees().base;
2743 l[jss::seq] =
Json::UInt(lpClosed->header().seq);
2744 l[jss::hash] =
to_string(lpClosed->header().hash);
2749 l[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped();
2750 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2751 l[jss::close_time] =
2757 l[jss::reserve_base_xrp] = lpClosed->fees().reserve.decimalXRP();
2758 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2761 std::abs(closeOffset.count()) >= 60)
2762 l[jss::close_time_offset] =
static_cast<std::uint32_t>(closeOffset.count());
2768 l[jss::age] =
Json::UInt(age < highAgeThreshold ? age.count() : 0);
2772 auto lCloseTime = lpClosed->header().closeTime;
2774 if (lCloseTime <= closeTime)
2776 using namespace std::chrono_literals;
2777 auto age = closeTime - lCloseTime;
2778 l[jss::age] =
Json::UInt(age < highAgeThreshold ? age.count() : 0);
2784 info[jss::validated_ledger] = l;
2786 info[jss::closed_ledger] = l;
2790 info[jss::published_ledger] =
"none";
2791 else if (lpPublished->header().seq != lpClosed->header().seq)
2792 info[jss::published_ledger] = lpPublished->header().seq;
2799 info[jss::peer_disconnects_resources] =
2804 "http",
"https",
"peer",
"ws",
"ws2",
"wss",
"wss2"};
2812 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2813 port.admin_user.empty() && port.admin_password.empty()))
2827 for (
auto const& p : proto)
2828 jv[jss::protocol].append(p);
2835 auto const optPort = grpcSection.
get(
"port");
2836 if (optPort && grpcSection.get(
"ip"))
2839 jv[jss::port] = *optPort;
2841 jv[jss::protocol].
append(
"grpc");
2844 info[jss::ports] = std::move(ports);
2889 [&](
Json::Value const& jv) { p->send(jv, true); });
2914 lpAccepted->header().hash, alpAccepted);
2918 alpAccepted->getLedger().
get() == lpAccepted.
get(),
2919 "xrpl::NetworkOPsImp::pubLedger : accepted input");
2922 JLOG(
m_journal.
debug()) <<
"Publishing ledger " << lpAccepted->header().seq <<
" "
2923 << lpAccepted->header().hash;
2931 jvObj[jss::type] =
"ledgerClosed";
2932 jvObj[jss::ledger_index] = lpAccepted->header().seq;
2933 jvObj[jss::ledger_hash] =
to_string(lpAccepted->header().hash);
2934 jvObj[jss::ledger_time] =
2939 if (!lpAccepted->rules().enabled(featureXRPFees))
2941 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2942 jvObj[jss::reserve_base] = lpAccepted->fees().reserve.jsonClipped();
2943 jvObj[jss::reserve_inc] = lpAccepted->fees().increment.jsonClipped();
2945 jvObj[jss::txn_count] =
Json::UInt(alpAccepted->size());
2958 p->send(jvObj,
true);
2976 p->send(jvObj,
true);
2985 static bool firstTime =
true;
2992 for (
auto& inner : outer.second)
2994 auto& subInfo = inner.second;
2995 if (subInfo.index_->separationLedgerSeq_ == 0)
3006 for (
auto const& accTx : *alpAccepted)
3061 jvObj[jss::type] =
"transaction";
3076 if (
auto const& lookup = ledger->txRead(transaction->getTransactionID());
3077 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3079 uint32_t
const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3081 if (transaction->isFieldPresent(sfNetworkID))
3082 netID = transaction->getFieldU32(sfNetworkID);
3086 jvObj[jss::ctid] = *ctid;
3088 if (!ledger->open())
3089 jvObj[jss::ledger_hash] =
to_string(ledger->header().hash);
3093 jvObj[jss::ledger_index] = ledger->header().seq;
3094 jvObj[jss::transaction][jss::date] = ledger->header().closeTime.time_since_epoch().count();
3095 jvObj[jss::validated] =
true;
3096 jvObj[jss::close_time_iso] =
to_string_iso(ledger->header().closeTime);
3102 jvObj[jss::validated] =
false;
3103 jvObj[jss::ledger_current_index] = ledger->header().seq;
3106 jvObj[jss::status] = validated ?
"closed" :
"proposed";
3107 jvObj[jss::engine_result] = sToken;
3108 jvObj[jss::engine_result_code] = result;
3109 jvObj[jss::engine_result_message] = sHuman;
3111 if (transaction->getTxnType() == ttOFFER_CREATE)
3113 auto const account = transaction->getAccountID(sfAccount);
3114 auto const amount = transaction->getFieldAmount(sfTakerGets);
3117 if (account != amount.issue().account)
3119 auto const ownerFunds =
3121 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3130 RPC::insertDeliverMax(jvTx[jss::transaction], transaction->getTxnType(), Version);
3132 if constexpr (Version > 1)
3134 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3135 jvTx[jss::hash] = hash;
3139 jvTx[jss::transaction][jss::hash] = hash;
3152 auto const& stTxn = transaction.
getTxn();
3156 auto const trResult = transaction.
getResult();
3171 [&](
Json::Value const& jv) { p->send(jv, true); });
3188 [&](
Json::Value const& jv) { p->send(jv, true); });
3213 auto const currLedgerSeq = ledger->seq();
3219 for (
auto const& affectedAccount : transaction.
getAffected())
3224 auto it = simiIt->second.begin();
3226 while (it != simiIt->second.end())
3237 it = simiIt->second.erase(it);
3243 auto it = simiIt->second.begin();
3244 while (it != simiIt->second.end())
3255 it = simiIt->second.erase(it);
3262 auto& subs = historyIt->second;
3263 auto it = subs.begin();
3264 while (it != subs.end())
3267 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3281 it = subs.erase(it);
3292 <<
"proposed=" << iProposed <<
", accepted=" << iAccepted;
3294 if (!notify.
empty() || !accountHistoryNotify.
empty())
3296 auto const& stTxn = transaction.
getTxn();
3300 auto const trResult = transaction.
getResult();
3306 isrListener->getApiVersion(),
3307 [&](
Json::Value const& jv) { isrListener->send(jv,
true); });
3311 jvObj.
set(jss::account_history_boundary,
true);
3315 "xrpl::NetworkOPsImp::pubAccountTransaction : "
3316 "account_history_tx_stream not set");
3317 for (
auto& info : accountHistoryNotify)
3319 auto& index = info.index_;
3320 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3321 jvObj.
set(jss::account_history_tx_first,
true);
3323 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3326 info.sink_->getApiVersion(),
3327 [&](
Json::Value const& jv) { info.sink_->send(jv,
true); });
3351 for (
auto const& affectedAccount : tx->getMentionedAccounts())
3356 auto it = simiIt->second.begin();
3358 while (it != simiIt->second.end())
3369 it = simiIt->second.erase(it);
3376 JLOG(
m_journal.
trace()) <<
"pubProposedAccountTransaction: " << iProposed;
3378 if (!notify.
empty() || !accountHistoryNotify.
empty())
3385 isrListener->getApiVersion(),
3386 [&](
Json::Value const& jv) { isrListener->send(jv,
true); });
3390 "xrpl::NetworkOPs::pubProposedAccountTransaction : "
3391 "account_history_tx_stream not set");
3392 for (
auto& info : accountHistoryNotify)
3394 auto& index = info.index_;
3395 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3396 jvObj.
set(jss::account_history_tx_first,
true);
3397 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3399 info.sink_->getApiVersion(),
3400 [&](
Json::Value const& jv) { info.sink_->send(jv,
true); });
3417 for (
auto const& naAccountID : vnaAccountIDs)
3421 isrListener->insertSubAccountInfo(naAccountID, rt);
3426 for (
auto const& naAccountID : vnaAccountIDs)
3428 auto simIterator = subMap.
find(naAccountID);
3429 if (simIterator == subMap.
end())
3433 usisElement[isrListener->getSeq()] = isrListener;
3435 subMap.
insert(simIterator, make_pair(naAccountID, usisElement));
3440 simIterator->second[isrListener->getSeq()] = isrListener;
3451 for (
auto const& naAccountID : vnaAccountIDs)
3454 isrListener->deleteSubAccountInfo(naAccountID, rt);
3471 for (
auto const& naAccountID : vnaAccountIDs)
3473 auto simIterator = subMap.
find(naAccountID);
3475 if (simIterator != subMap.
end())
3478 simIterator->second.erase(uSeq);
3480 if (simIterator->second.empty())
3483 subMap.
erase(simIterator);
3492 enum DatabaseType { Sqlite,
None };
3493 static auto const databaseType = [&]() -> DatabaseType {
3498 return DatabaseType::Sqlite;
3500 return DatabaseType::None;
3503 if (databaseType == DatabaseType::None)
3506 UNREACHABLE(
"xrpl::NetworkOPsImp::addAccountHistoryJob : no database");
3520 auto const& accountId = subInfo.
index_->accountId_;
3521 auto& lastLedgerSeq = subInfo.
index_->historyLastLedgerSeq_;
3522 auto& txHistoryIndex = subInfo.
index_->historyTxIndex_;
3525 <<
" started. lastLedgerSeq=" << lastLedgerSeq;
3535 auto stx = tx->getSTransaction();
3536 if (stx->getAccountID(sfAccount) == accountId && stx->getSeqValue() == 1)
3540 for (
auto& node : meta->getNodes())
3542 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3545 if (node.isFieldPresent(sfNewFields))
3548 dynamic_cast<STObject const*
>(node.peekAtPField(sfNewFields));
3551 if (inner->isFieldPresent(sfAccount) &&
3552 inner->getAccountID(sfAccount) == accountId)
3563 auto send = [&](
Json::Value const& jvObj,
bool unsubscribe) ->
bool {
3566 sptr->send(jvObj,
true);
3575 auto sendMultiApiJson = [&](
MultiApiJson const& jvObj,
bool unsubscribe) ->
bool {
3579 sptr->getApiVersion(),
3580 [&](
Json::Value const& jv) { sptr->send(jv,
true); });
3601 accountId, minLedger, maxLedger, marker, 0,
true};
3602 return db->newestAccountTxPage(options);
3607 "xrpl::NetworkOPsImp::addAccountHistoryJob : "
3608 "getMoreTxns : invalid database type");
3618 while (lastLedgerSeq >= 2 && !subInfo.
index_->stopHistorical_)
3620 int feeChargeCount = 0;
3629 <<
"AccountHistory job for account " <<
toBase58(accountId)
3630 <<
" no InfoSub. Fee charged " << feeChargeCount <<
" times.";
3635 auto startLedgerSeq = (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3637 <<
", working on ledger range [" << startLedgerSeq <<
","
3638 << lastLedgerSeq <<
"]";
3640 auto haveRange = [&]() ->
bool {
3643 auto haveSomeValidatedLedgers =
3646 return haveSomeValidatedLedgers && validatedMin <= startLedgerSeq &&
3647 lastLedgerSeq <= validatedMax;
3652 JLOG(
m_journal.
debug()) <<
"AccountHistory reschedule job for account "
3653 <<
toBase58(accountId) <<
", incomplete ledger range ["
3654 << startLedgerSeq <<
"," << lastLedgerSeq <<
"]";
3660 while (!subInfo.
index_->stopHistorical_)
3662 auto dbResult = getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3667 "xrpl::NetworkOPsImp::addAccountHistoryJob : "
3668 "getMoreTxns failed");
3670 <<
toBase58(accountId) <<
" getMoreTxns failed.";
3676 auto const& txns = dbResult->first;
3677 marker = dbResult->second;
3678 size_t num_txns = txns.size();
3679 for (
size_t i = 0; i < num_txns; ++i)
3681 auto const& [tx, meta] = txns[i];
3686 <<
toBase58(accountId) <<
" empty tx or meta.";
3696 "xrpl::NetworkOPsImp::addAccountHistoryJob : "
3697 "getLedgerBySeq failed");
3699 <<
toBase58(accountId) <<
" no ledger.";
3709 "NetworkOPsImp::addAccountHistoryJob : "
3710 "getSTransaction failed");
3712 <<
"AccountHistory job for account " <<
toBase58(accountId)
3713 <<
" getSTransaction failed.";
3720 auto const trR = meta->getResultTER();
3723 jvTx.
set(jss::account_history_tx_index, txHistoryIndex--);
3724 if (i + 1 == num_txns || txns[i + 1].first->getLedger() != tx->getLedger())
3725 jvTx.
set(jss::account_history_boundary,
true);
3727 if (isFirstTx(tx, meta))
3729 jvTx.
set(jss::account_history_tx_first,
true);
3730 sendMultiApiJson(jvTx,
false);
3733 <<
"AccountHistory job for account " <<
toBase58(accountId)
3734 <<
" done, found last tx.";
3739 sendMultiApiJson(jvTx,
false);
3746 <<
"AccountHistory job for account " <<
toBase58(accountId)
3747 <<
" paging, marker=" << marker->ledgerSeq <<
":" << marker->txnSeq;
3755 if (!subInfo.
index_->stopHistorical_)
3757 lastLedgerSeq = startLedgerSeq - 1;
3758 if (lastLedgerSeq <= 1)
3761 <<
"AccountHistory job for account " <<
toBase58(accountId)
3762 <<
" done, reached genesis ledger.";
3775 subInfo.
index_->separationLedgerSeq_ = ledger->seq();
3776 auto const& accountId = subInfo.
index_->accountId_;
3778 if (!ledger->exists(accountKeylet))
3781 <<
", no need to add AccountHistory job.";
3786 if (
auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3788 if (sleAcct->getFieldU32(sfSequence) == 1)
3791 <<
"subAccountHistoryStart, genesis account " <<
toBase58(accountId)
3792 <<
" does not have tx, no need to add AccountHistory job.";
3800 "xrpl::NetworkOPsImp::subAccountHistoryStart : failed to "
3801 "access genesis account");
3806 subInfo.
index_->historyLastLedgerSeq_ = ledger->seq();
3807 subInfo.
index_->haveHistorical_ =
true;
3809 JLOG(
m_journal.
debug()) <<
"subAccountHistoryStart, add AccountHistory job: accountId="
3810 <<
toBase58(accountId) <<
", currentLedgerSeq=" << ledger->seq();
3818 if (!isrListener->insertSubAccountHistory(accountId))
3820 JLOG(
m_journal.
debug()) <<
"subAccountHistory, already subscribed to account "
3831 inner.
emplace(isrListener->getSeq(), ahi);
3836 simIterator->second.emplace(isrListener->getSeq(), ahi);
3849 JLOG(
m_journal.
debug()) <<
"subAccountHistory, no validated ledger yet, delay start";
3862 isrListener->deleteSubAccountHistory(account);
3876 auto& subInfoMap = simIterator->second;
3877 auto subInfoIter = subInfoMap.find(seq);
3878 if (subInfoIter != subInfoMap.end())
3880 subInfoIter->second.index_->stopHistorical_ =
true;
3885 simIterator->second.erase(seq);
3886 if (simIterator->second.empty())
3892 <<
", historyOnly = " << (historyOnly ?
"true" :
"false");
3900 listeners->addSubscriber(isrListener);
3904 UNREACHABLE(
"xrpl::NetworkOPsImp::subBook : null book listeners");
3914 listeners->removeSubscriber(uSeq);
3924 XRPL_ASSERT(
m_standalone,
"xrpl::NetworkOPsImp::acceptLedger : is standalone");
3927 Throw<std::runtime_error>(
"Operation only possible in STANDALONE mode.");
3942 jvResult[jss::ledger_index] = lpClosed->header().seq;
3943 jvResult[jss::ledger_hash] =
to_string(lpClosed->header().hash);
3944 jvResult[jss::ledger_time] =
3946 if (!lpClosed->rules().enabled(featureXRPFees))
3948 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3949 jvResult[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped();
3950 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4016 jvResult[jss::random] =
to_string(uRandom);
4018 jvResult[jss::load_base] = feeTrack.getLoadBase();
4019 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4020 jvResult[jss::hostid] =
getHostId(admin);
4021 jvResult[jss::pubkey_node] =
4158 if (map.find(pInfo->getSeq()) != map.end())
4165#ifndef USE_NEW_BOOK_PAGE
4176 unsigned int iLimit,
4185 uint256 uTipIndex = uBookBase;
4189 stream <<
"getBookPage:" << book;
4190 stream <<
"getBookPage: uBookBase=" << uBookBase;
4191 stream <<
"getBookPage: uBookEnd=" << uBookEnd;
4192 stream <<
"getBookPage: uTipIndex=" << uTipIndex;
4197 bool const bGlobalFreeze =
4201 bool bDirectAdvance =
true;
4205 unsigned int uBookEntry;
4211 while (!bDone && iLimit-- > 0)
4215 bDirectAdvance =
false;
4219 auto const ledgerIndex = view.
succ(uTipIndex, uBookEnd);
4223 sleOfferDir.
reset();
4232 uTipIndex = sleOfferDir->key();
4235 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4237 JLOG(
m_journal.
trace()) <<
"getBookPage: uTipIndex=" << uTipIndex;
4238 JLOG(
m_journal.
trace()) <<
"getBookPage: offerIndex=" << offerIndex;
4248 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4249 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4250 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4252 bool firstOwnerOffer(
true);
4258 saOwnerFunds = saTakerGets;
4260 else if (bGlobalFreeze)
4268 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4269 if (umBalanceEntry != umBalance.
end())
4273 saOwnerFunds = umBalanceEntry->second;
4274 firstOwnerOffer =
false;
4288 if (saOwnerFunds < beast::zero)
4292 saOwnerFunds.
clear();
4300 STAmount saOwnerFundsLimit = saOwnerFunds;
4312 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4315 if (saOwnerFundsLimit >= saTakerGets)
4318 saTakerGetsFunded = saTakerGets;
4324 saTakerGetsFunded = saOwnerFundsLimit;
4326 saTakerGetsFunded.
setJson(jvOffer[jss::taker_gets_funded]);
4328 saTakerPays,
multiply(saTakerGetsFunded, saDirRate, saTakerPays.
issue()))
4329 .setJson(jvOffer[jss::taker_pays_funded]);
4336 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4340 jvOf[jss::quality] = saDirRate.
getText();
4342 if (firstOwnerOffer)
4343 jvOf[jss::owner_funds] = saOwnerFunds.
getText();
4350 if (!
cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4352 bDirectAdvance =
true;
4356 JLOG(
m_journal.
trace()) <<
"getBookPage: offerIndex=" << offerIndex;
4376 unsigned int iLimit,
4384 MetaView lesActive(lpLedger,
tapNONE,
true);
4385 OrderBookIterator obIterator(lesActive, book);
4389 bool const bGlobalFreeze =
4392 while (iLimit-- > 0 && obIterator.nextOffer())
4397 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4398 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4399 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4400 STAmount saDirRate = obIterator.getCurrentRate();
4406 saOwnerFunds = saTakerGets;
4408 else if (bGlobalFreeze)
4416 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4418 if (umBalanceEntry != umBalance.
end())
4422 saOwnerFunds = umBalanceEntry->second;
4428 saOwnerFunds = lesActive.accountHolds(
4431 if (saOwnerFunds.isNegative())
4435 saOwnerFunds.zero();
4442 STAmount saTakerGetsFunded;
4443 STAmount saOwnerFundsLimit = saOwnerFunds;
4455 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4458 if (saOwnerFundsLimit >= saTakerGets)
4461 saTakerGetsFunded = saTakerGets;
4466 saTakerGetsFunded = saOwnerFundsLimit;
4468 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4472 std::min(saTakerPays,
multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4473 .setJson(jvOffer[jss::taker_pays_funded]);
4476 STAmount saOwnerPays = (
parityRate == offerRate)
4478 :
std::
min(saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4480 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4482 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4486 jvOf[jss::quality] = saDirRate.
getText();
4501 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4534 ++counters_[
static_cast<std::size_t>(om)].transitions;
4538 std::chrono::duration_cast<std::chrono::microseconds>(now - processStart_).count();
4541 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4550 auto [counters, mode, start, initialSync] = getCounterData();
4551 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4561 auto& state = obj[jss::state_accounting][
states_[i]];
4562 state[jss::transitions] =
std::to_string(counters[i].transitions);
4563 state[jss::duration_us] =
std::to_string(counters[i].dur.count());
4567 obj[jss::initial_sync_duration_us] =
std::to_string(initialSync);
4582 boost::asio::io_context& io_svc,
T back_inserter(T... args)
Decorator for streaming out compact json.
Lightweight wrapper to tag static string.
Value & append(Value const &value)
Append value to array at the end.
bool isMember(char const *key) const
Return true if the object has a member named key.
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
A generic endpoint for log messages.
Stream trace() const
Severity stream access functions.
A metric for measuring an integral value.
void set(value_type value) const
Set the value on the gauge.
A reference to a handler for performing polled collection.
A transaction that is in a closed ledger.
TxMeta const & getMeta() const
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual std::chrono::milliseconds getIOLatency()=0
virtual Config & config()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::uint32_t getLoadFee() const
NetClock::time_point getReportTime() const
PublicKey const & identity() const
std::string const & name() const
std::size_t size() const
The number of nodes in the cluster list.
std::string SERVER_DOMAIN
int RELAY_UNTRUSTED_VALIDATIONS
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
virtual Json::Value getInfo()=0
virtual void clearFailures()=0
std::shared_ptr< InfoSub > pointer
A pool of threads to perform work.
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Json::Value getJson(int c=0)
std::chrono::seconds getValidatedLedgerAge()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
bool haveValidated()
Whether we have ever fully validated a ledger.
std::size_t getFetchPackCacheSize() const
std::shared_ptr< Ledger const > getClosedLedger()
std::string getCompleteLedgers()
std::shared_ptr< Ledger const > getValidatedLedger()
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< ReadView const > getCurrentLedger()
Manages the current fee schedule.
std::uint32_t getClusterFee() const
std::uint32_t getLocalFee() const
std::uint32_t getRemoteFee() const
std::uint32_t getLoadFactor() const
std::uint32_t getLoadBase() const
void heartbeat()
Reset the stall detection timer.
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
virtual std::uint32_t getNetworkID() const noexcept=0
Get the configured network ID.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
void json(Json::Value &obj) const
Output state counters in JSON format.
std::chrono::steady_clock::time_point const processStart_
static std::array< Json::StaticString const, 5 > const states_
CounterData getCounterData() const
std::uint64_t initialSyncUs_
std::array< Counters, 5 > counters_
void mode(OperatingMode om)
Record state transition.
std::chrono::steady_clock::time_point start_
Transaction with input flags and results to be applied in batches.
std::shared_ptr< Transaction > const transaction
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
std::string getHostId(bool forAdmin)
void reportConsensusStateChange(ConsensusPhase phase)
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void clearNeedNetworkLedger() override
ServerFeeSummary mLastFeeSummary
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
DispatchState mDispatchState
std::size_t const minPeerCount_
static std::array< char const *, 5 > const states_
std::set< uint256 > pendingValidations_
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
bool unsubManifests(std::uint64_t uListener) override
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool subManifests(InfoSub::ref ispListener) override
void stateAccounting(Json::Value &obj) override
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
SubInfoMapType mSubRTAccount
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
void transactionBatch()
Apply transactions in batches.
bool unsubRTTransactions(std::uint64_t uListener) override
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
bool processTrustedProposal(RCLCxPeerPos proposal) override
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
void pubValidation(std::shared_ptr< STValidation > const &val) override
bool subBook(InfoSub::ref ispListener, Book const &) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
std::atomic< OperatingMode > mMode
void setMode(OperatingMode om) override
void setAmendmentBlocked() override
void pubConsensus(ConsensusPhase phase)
std::recursive_mutex mSubLock
bool isNeedNetworkLedger() override
DispatchState
Synchronization states for transaction batches.
std::atomic< bool > needNetworkLedger_
boost::asio::steady_timer heartbeatTimer_
bool subConsensus(InfoSub::ref ispListener) override
bool unsubBook(std::uint64_t uListener, Book const &) override
bool unsubLedger(std::uint64_t uListener) override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
std::optional< PublicKey > const validatorPK_
std::atomic< bool > amendmentBlocked_
void clearAmendmentWarned() override
void updateLocalTx(ReadView const &view) override
void clearLedgerFetch() override
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool isAmendmentBlocked() override
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::unique_ptr< LocalTxs > m_localTX
void setStandAlone() override
void setNeedNetworkLedger() override
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
bool unsubServer(std::uint64_t uListener) override
SubAccountHistoryMapType mSubAccountHistory
void processClusterTimer()
bool unsubConsensus(std::uint64_t uListener) override
std::condition_variable mCond
void pubManifest(Manifest const &) override
void consensusViewChange() override
ServiceRegistry & registry_
boost::asio::steady_timer accountHistoryTxTimer_
Json::Value getConsensusInfo() override
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void setUNLBlocked() override
bool unsubValidations(std::uint64_t uListener) override
bool subPeerStatus(InfoSub::ref ispListener) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
ConsensusPhase mLastConsensusPhase
OperatingMode getOperatingMode() const override
std::optional< PublicKey > const validatorMasterPK_
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
std::vector< TransactionStatus > mTransactions
bool tryRemoveRpcSub(std::string const &strUrl) override
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void processHeartbeatTimer()
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void submitTransaction(std::shared_ptr< STTx const > const &) override
void setAmendmentWarned() override
LedgerMaster & m_ledgerMaster
Json::Value getServerInfo(bool human, bool admin, bool counters) override
StateAccounting accounting_
bool subValidations(InfoSub::ref ispListener) override
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool subRTTransactions(InfoSub::ref ispListener) override
std::atomic< bool > unlBlocked_
bool unsubBookChanges(std::uint64_t uListener) override
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
void setStateTimer() override
Called to initially start our timers.
std::size_t getLocalTxCount() override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
bool unsubTransactions(std::uint64_t uListener) override
bool isAmendmentWarned() override
bool subTransactions(InfoSub::ref ispListener) override
std::mutex validationsMutex_
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
SubInfoMapType mSubAccount
void clearUNLBlocked() override
bool isUNLBlocked() override
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
std::atomic< bool > amendmentWarned_
boost::asio::steady_timer clusterTimer_
NetworkOPsImp(ServiceRegistry ®istry, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
bool unsubPeerStatus(std::uint64_t uListener) override
void reportFeeChange() override
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
bool isBlocked() override
~NetworkOPsImp() override
Json::Value getLedgerFetchInfo() override
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool subBookChanges(InfoSub::ref ispListener) override
Provides server functionality for clients.
void getCountsJson(Json::Value &obj)
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Writable ledger view that accumulates state and tx changes.
virtual void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)=0
virtual BookListeners::pointer makeBookListeners(Book const &)=0
virtual BookListeners::pointer getBookListeners(Book const &)=0
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnectCharges() const =0
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
Manages the generic consensus algorithm for use by the RCL.
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
Json::Value getJson(bool full) const
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
A peer's signed, proposed position for use in RCLConsensus.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Represents a set of transactions in RCLConsensus.
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Collects logging information.
std::unique_ptr< std::stringstream > const & ss()
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
Issue const & issue() const
std::string getText() const override
void setJson(Json::Value &) const
std::optional< T > get(std::string const &name) const
std::size_t size() const noexcept
void const * data() const noexcept
void setup(Setup const &setup, beast::Journal journal)
Service registry for dependency injection.
virtual perf::PerfLog & getPerfLog()=0
virtual JobQueue & getJobQueue()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual ValidatorList & validators()=0
virtual Overlay & overlay()=0
virtual NodeStore::Database & getNodeStore()=0
virtual NetworkIDService & getNetworkIDService()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual ServerHandler & getServerHandler()=0
virtual OpenLedger & openLedger()=0
virtual Cluster & cluster()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual LedgerMaster & getLedgerMaster()=0
virtual TimeKeeper & timeKeeper()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual ManifestCache & validatorManifests()=0
virtual beast::Journal journal(std::string const &name)=0
virtual Application & app()=0
time_point now() const override
Returns the current time, using the server's clock.
std::chrono::seconds closeOffset() const
time_point closeTime() const
Returns the predicted close time, in network time.
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Validator keys and manifest as set in configuration file.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::size_t quorum() const
Get quorum value for current trusted key set.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
Json::Value jsonClipped() const
constexpr double decimalXRP() const
static constexpr std::size_t size()
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
T emplace_back(T... args)
@ arrayValue
array value (ordered list)
@ objectValue
object value (collection of name/value pairs).
void rngfill(void *const buffer, std::size_t const bytes, Generator &g)
std::string const & getVersionString()
Server version.
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Encodes ledger sequence, transaction index, and network ID into a CTID string.
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
void insertNFTSyntheticInJson(Json::Value &, std::shared_ptr< STTx const > const &, TxMeta const &)
Adds common synthetic fields to transaction-related JSON responses.
Charge const feeMediumBurdenRPC
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Keylet account(AccountID const &id) noexcept
AccountID root.
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
STAmount divide(STAmount const &amount, Rate const &rate)
bool set(T &target, std::string const &name, Section const §ion)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
bool isTerRetry(TER x) noexcept
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
constexpr std::uint32_t tfInnerBatchTxn
std::string to_string(base_uint< Bits, Tag > const &a)
std::string strHex(FwdIt begin, FwdIt end)
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules)
Checks transaction signature and local checks.
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const ¤t)
std::uint64_t getQuality(uint256 const &uBase)
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
FeeSetup setup_FeeVote(Section const §ion)
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const ¤cy, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j, SpendableHandling includeFullBalance=shSIMPLE_BALANCE)
Number root(Number f, unsigned d)
bool transResultInfo(TER code, std::string &token, std::string &text)
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
STAmount multiply(STAmount const &amount, Rate const &rate)
static auto const genesisAccountId
void forAllApiVersions(Fn const &fn, Args &&... args)
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
std::unique_ptr< NetworkOPs > make_NetworkOPs(ServiceRegistry ®istry, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ current
This was a new validation and was added.
constexpr std::size_t maxPoppedTransactions
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
STAmount amountFromQuality(std::uint64_t rate)
bool isTefFailure(TER x) noexcept
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
auto constexpr muldiv_max
uint256 getQualityNext(uint256 const &uBase)
ConsensusPhase
Phases of consensus for a single ledger round.
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
AccountID calcAccountID(PublicKey const &pk)
uint256 getBookBase(Book const &book)
Json::Value rpcError(error_code_i iError)
std::string to_string_iso(date::sys_time< Duration > tp)
std::unique_ptr< LocalTxs > make_LocalTxs()
bool isTelLocal(TER x) noexcept
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
bool isTesSuccess(TER x) noexcept
static std::uint32_t trunc32(std::uint64_t v)
static std::array< char const *, 5 > const stateNames
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
OperatingMode
Specifies the mode under which the server believes it's operating.
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
bool isTemMalformed(TER x) noexcept
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
@ warnRPC_AMENDMENT_BLOCKED
@ warnRPC_UNSUPPORTED_MAJORITY
@ warnRPC_EXPIRED_VALIDATOR_LIST
T set_intersection(T... args)
PublicKey masterKey
The master key associated with this manifest.
std::string serialized
The manifest in serialized form.
Blob getMasterSignature() const
Returns manifest master key signature.
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
std::uint32_t sequence
The sequence number of this manifest.
Server fees published on server subscription.
std::optional< TxQ::Metrics > em
bool operator!=(ServerFeeSummary const &b) const
bool operator==(ServerFeeSummary const &b) const
std::uint32_t loadBaseServer
ServerFeeSummary()=default
std::uint32_t loadFactorServer
decltype(initialSyncUs_) initialSyncUs
decltype(counters_) counters
std::chrono::microseconds dur
std::uint64_t transitions
beast::insight::Gauge syncing_duration
beast::insight::Gauge tracking_duration
beast::insight::Gauge connected_duration
beast::insight::Gauge tracking_transitions
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
beast::insight::Gauge connected_transitions
beast::insight::Gauge full_transitions
beast::insight::Gauge disconnected_duration
beast::insight::Gauge syncing_transitions
beast::insight::Gauge disconnected_transitions
beast::insight::Gauge full_duration
beast::insight::Hook hook
std::int32_t historyTxIndex_
AccountID const accountId_
std::uint32_t forwardTxIndex_
std::uint32_t separationLedgerSeq_
std::uint32_t historyLastLedgerSeq_
SubAccountHistoryIndex(AccountID const &accountId)
std::atomic< bool > stopHistorical_
std::shared_ptr< SubAccountHistoryIndex > index_
std::shared_ptr< SubAccountHistoryIndex > index_
Represents a transfer rate.
Data format for exchanging consumption information across peers.
std::vector< Item > items
Changes in trusted nodes after updating validator list.
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
void set(char const *key, auto const &v)
IsMemberResult isMember(char const *key) const
Select all peers (except optional excluded) that are in our cluster.
Sends a message to all peers.
T time_since_epoch(T... args)