20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/main/Tuning.h>
32#include <xrpld/app/misc/AmendmentTable.h>
33#include <xrpld/app/misc/DeliverMax.h>
34#include <xrpld/app/misc/HashRouter.h>
35#include <xrpld/app/misc/LoadFeeTrack.h>
36#include <xrpld/app/misc/NetworkOPs.h>
37#include <xrpld/app/misc/Transaction.h>
38#include <xrpld/app/misc/TxQ.h>
39#include <xrpld/app/misc/ValidatorKeys.h>
40#include <xrpld/app/misc/ValidatorList.h>
41#include <xrpld/app/misc/detail/AccountTxPaging.h>
42#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
43#include <xrpld/app/tx/apply.h>
44#include <xrpld/consensus/Consensus.h>
45#include <xrpld/consensus/ConsensusParms.h>
46#include <xrpld/overlay/Cluster.h>
47#include <xrpld/overlay/Overlay.h>
48#include <xrpld/overlay/predicates.h>
49#include <xrpld/perflog/PerfLog.h>
50#include <xrpld/rpc/BookChanges.h>
51#include <xrpld/rpc/CTID.h>
52#include <xrpld/rpc/DeliveredAmount.h>
53#include <xrpld/rpc/MPTokenIssuanceID.h>
54#include <xrpld/rpc/ServerHandler.h>
56#include <xrpl/basics/UptimeClock.h>
57#include <xrpl/basics/mulDiv.h>
58#include <xrpl/basics/safe_cast.h>
59#include <xrpl/basics/scope.h>
60#include <xrpl/beast/utility/rngfill.h>
61#include <xrpl/crypto/RFC1751.h>
62#include <xrpl/crypto/csprng.h>
63#include <xrpl/protocol/BuildInfo.h>
64#include <xrpl/protocol/Feature.h>
65#include <xrpl/protocol/MultiApiJson.h>
66#include <xrpl/protocol/RPCErr.h>
67#include <xrpl/protocol/TxFlags.h>
68#include <xrpl/protocol/jss.h>
69#include <xrpl/resource/Fees.h>
70#include <xrpl/resource/ResourceManager.h>
72#include <boost/asio/ip/host_name.hpp>
73#include <boost/asio/steady_timer.hpp>
112 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
155 std::chrono::steady_clock::time_point
start_ =
216 return !(*
this != b);
235 boost::asio::io_service& io_svc,
249 app_.logs().journal(
"FeeVote")),
252 app.getInboundTransactions(),
253 beast::get_abstract_clock<
std::chrono::steady_clock>(),
255 app_.logs().journal(
"LedgerConsensus"))
257 validatorKeys.keys ? validatorKeys.keys->publicKey
260 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
455 getServerInfo(
bool human,
bool admin,
bool counters)
override;
482 TER result)
override;
516 bool historyOnly)
override;
522 bool historyOnly)
override;
590 boost::system::error_code ec;
595 <<
"NetworkOPs: heartbeatTimer cancel error: "
604 <<
"NetworkOPs: clusterTimer cancel error: "
613 <<
"NetworkOPs: accountHistoryTxTimer cancel error: "
618 using namespace std::chrono_literals;
628 boost::asio::steady_timer& timer,
811 template <
class Handler>
813 Handler
const& handler,
815 :
hook(collector->make_hook(handler))
818 "Disconnected_duration"))
821 "Connected_duration"))
823 collector->make_gauge(
"State_Accounting",
"Syncing_duration"))
826 "Tracking_duration"))
828 collector->make_gauge(
"State_Accounting",
"Full_duration"))
831 "Disconnected_transitions"))
834 "Connected_transitions"))
837 "Syncing_transitions"))
840 "Tracking_transitions"))
842 collector->make_gauge(
"State_Accounting",
"Full_transitions"))
871 {
"disconnected",
"connected",
"syncing",
"tracking",
"full"}};
933 static std::string const hostname = boost::asio::ip::host_name();
940 static std::string const shroudedHostId = [
this]() {
946 return shroudedHostId;
961 boost::asio::steady_timer& timer,
968 [
this, onExpire, onError](boost::system::error_code
const& e) {
969 if ((e.value() == boost::system::errc::success) &&
970 (!m_job_queue.isStopped()))
975 if (e.value() != boost::system::errc::success &&
976 e.value() != boost::asio::error::operation_aborted)
979 JLOG(m_journal.error())
980 <<
"Timer got error '" << e.message()
981 <<
"'. Restarting timer.";
986 timer.expires_from_now(expiry_time);
987 timer.async_wait(std::move(*optionalCountedHandler));
992NetworkOPsImp::setHeartbeatTimer()
996 mConsensus.parms().ledgerGRANULARITY,
998 m_job_queue.addJob(jtNETOP_TIMER,
"NetOPs.heartbeat", [this]() {
999 processHeartbeatTimer();
1002 [
this]() { setHeartbeatTimer(); });
1006NetworkOPsImp::setClusterTimer()
1008 using namespace std::chrono_literals;
1015 processClusterTimer();
1018 [
this]() { setClusterTimer(); });
1024 JLOG(m_journal.debug()) <<
"Scheduling AccountHistory job for account "
1026 using namespace std::chrono_literals;
1028 accountHistoryTxTimer_,
1030 [
this, subInfo]() { addAccountHistoryJob(subInfo); },
1031 [
this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1035NetworkOPsImp::processHeartbeatTimer()
1038 "Heartbeat Timer", mConsensus.validating(), m_journal);
1046 std::size_t const numPeers = app_.overlay().size();
1049 if (numPeers < minPeerCount_)
1051 if (mMode != OperatingMode::DISCONNECTED)
1053 setMode(OperatingMode::DISCONNECTED);
1055 ss <<
"Node count (" << numPeers <<
") has fallen "
1056 <<
"below required minimum (" << minPeerCount_ <<
").";
1057 JLOG(m_journal.warn()) << ss.
str();
1058 CLOG(clog.
ss()) <<
"set mode to DISCONNECTED: " << ss.
str();
1063 <<
"already DISCONNECTED. too few peers (" << numPeers
1064 <<
"), need at least " << minPeerCount_;
1071 setHeartbeatTimer();
1076 if (mMode == OperatingMode::DISCONNECTED)
1078 setMode(OperatingMode::CONNECTED);
1079 JLOG(m_journal.info())
1080 <<
"Node count (" << numPeers <<
") is sufficient.";
1081 CLOG(clog.
ss()) <<
"setting mode to CONNECTED based on " << numPeers
1087 auto origMode = mMode.load();
1088 CLOG(clog.
ss()) <<
"mode: " << strOperatingMode(origMode,
true);
1089 if (mMode == OperatingMode::SYNCING)
1090 setMode(OperatingMode::SYNCING);
1091 else if (mMode == OperatingMode::CONNECTED)
1092 setMode(OperatingMode::CONNECTED);
1093 auto newMode = mMode.load();
1094 if (origMode != newMode)
1097 <<
", changing to " << strOperatingMode(newMode,
true);
1099 CLOG(clog.
ss()) <<
". ";
1102 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.
ss());
1104 CLOG(clog.
ss()) <<
"consensus phase " << to_string(mLastConsensusPhase);
1106 if (mLastConsensusPhase != currPhase)
1108 reportConsensusStateChange(currPhase);
1109 mLastConsensusPhase = currPhase;
1110 CLOG(clog.
ss()) <<
" changed to " << to_string(mLastConsensusPhase);
1112 CLOG(clog.
ss()) <<
". ";
1114 setHeartbeatTimer();
1118NetworkOPsImp::processClusterTimer()
1120 if (app_.cluster().size() == 0)
1123 using namespace std::chrono_literals;
1125 bool const update = app_.cluster().update(
1126 app_.nodeIdentity().first,
1128 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1129 ? app_.getFeeTrack().getLocalFee()
1131 app_.timeKeeper().now());
1135 JLOG(m_journal.debug()) <<
"Too soon to send cluster update";
1140 protocol::TMCluster cluster;
1141 app_.cluster().for_each([&cluster](
ClusterNode const& node) {
1142 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1147 n.set_nodename(node.
name());
1151 for (
auto& item : gossip.
items)
1153 protocol::TMLoadSource& node = *cluster.add_loadsources();
1154 node.set_name(to_string(item.address));
1155 node.set_cost(item.balance);
1157 app_.overlay().foreach(
send_if(
1158 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1169 if (mode == OperatingMode::FULL && admin)
1171 auto const consensusMode = mConsensus.mode();
1172 if (consensusMode != ConsensusMode::wrongLedger)
1174 if (consensusMode == ConsensusMode::proposing)
1177 if (mConsensus.validating())
1178 return "validating";
1188 if (isNeedNetworkLedger())
1196 m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1198 JLOG(m_journal.error())
1199 <<
"Submitted transaction invalid: tfInnerBatchTxn flag present.";
1206 auto const txid = trans->getTransactionID();
1207 auto const flags = app_.getHashRouter().getFlags(txid);
1209 if ((flags & SF_BAD) != 0)
1211 JLOG(m_journal.warn()) <<
"Submitted transaction cached bad";
1218 app_.getHashRouter(),
1220 m_ledgerMaster.getValidatedRules(),
1223 if (validity != Validity::Valid)
1225 JLOG(m_journal.warn())
1226 <<
"Submitted transaction invalid: " << reason;
1232 JLOG(m_journal.warn())
1233 <<
"Exception checking transaction " << txid <<
": " << ex.
what();
1240 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1242 m_job_queue.addJob(
jtTRANSACTION,
"submitTxn", [
this, tx]() {
1244 processTransaction(t,
false,
false, FailHard::no);
1251 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1253 if ((newFlags & SF_BAD) != 0)
1256 JLOG(m_journal.warn()) << transaction->getID() <<
": cached bad!\n";
1257 transaction->setStatus(
INVALID);
1262 auto const view = m_ledgerMaster.getCurrentLedger();
1267 auto const sttx = *transaction->getSTransaction();
1268 if (sttx.isFlag(
tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1270 transaction->setStatus(
INVALID);
1272 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1279 auto const [validity, reason] =
1280 checkValidity(app_.getHashRouter(), sttx, view->rules(), app_.config());
1282 validity == Validity::Valid,
1283 "ripple::NetworkOPsImp::processTransaction : valid validity");
1286 if (validity == Validity::SigBad)
1288 JLOG(m_journal.info()) <<
"Transaction has bad signature: " << reason;
1289 transaction->setStatus(
INVALID);
1291 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1296 app_.getMasterTransaction().canonicalize(&transaction);
1302NetworkOPsImp::processTransaction(
1308 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXN");
1311 if (!preProcessTransaction(transaction))
1315 doTransactionSync(transaction, bUnlimited, failType);
1317 doTransactionAsync(transaction, bUnlimited, failType);
1321NetworkOPsImp::doTransactionAsync(
1328 if (transaction->getApplying())
1331 mTransactions.push_back(
1333 transaction->setApplying();
1335 if (mDispatchState == DispatchState::none)
1337 if (m_job_queue.addJob(
1338 jtBATCH,
"transactionBatch", [
this]() { transactionBatch(); }))
1340 mDispatchState = DispatchState::scheduled;
1346NetworkOPsImp::doTransactionSync(
1353 if (!transaction->getApplying())
1355 mTransactions.push_back(
1357 transaction->setApplying();
1360 doTransactionSyncBatch(
1362 return transaction->getApplying();
1367NetworkOPsImp::doTransactionSyncBatch(
1373 if (mDispatchState == DispatchState::running)
1382 if (mTransactions.size())
1385 if (m_job_queue.addJob(
jtBATCH,
"transactionBatch", [
this]() {
1389 mDispatchState = DispatchState::scheduled;
1393 }
while (retryCallback(lock));
1399 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXNSet");
1402 for (
auto const& [_, tx] :
set)
1405 auto transaction = std::make_shared<Transaction>(tx, reason, app_);
1407 if (transaction->getStatus() ==
INVALID)
1409 if (!reason.
empty())
1411 JLOG(m_journal.trace())
1412 <<
"Exception checking transaction: " << reason;
1414 app_.getHashRouter().setFlags(tx->getTransactionID(), SF_BAD);
1419 if (!preProcessTransaction(transaction))
1430 for (
auto& transaction : candidates)
1432 if (!transaction->getApplying())
1434 transactions.
emplace_back(transaction,
false,
false, FailHard::no);
1435 transaction->setApplying();
1439 if (mTransactions.empty())
1440 mTransactions.swap(transactions);
1443 mTransactions.reserve(mTransactions.size() + transactions.
size());
1444 for (
auto& t : transactions)
1445 mTransactions.push_back(std::move(t));
1451 "ripple::NetworkOPsImp::processTransactionSet has lock");
1453 mTransactions.begin(), mTransactions.end(), [](
auto const& t) {
1454 return t.transaction->getApplying();
1460NetworkOPsImp::transactionBatch()
1464 if (mDispatchState == DispatchState::running)
1467 while (mTransactions.size())
1478 mTransactions.
swap(transactions);
1480 !transactions.
empty(),
1481 "ripple::NetworkOPsImp::apply : non-empty transactions");
1483 mDispatchState != DispatchState::running,
1484 "ripple::NetworkOPsImp::apply : is not running");
1486 mDispatchState = DispatchState::running;
1492 bool changed =
false;
1495 m_ledgerMaster.peekMutex(), std::defer_lock};
1506 if (e.failType == FailHard::yes)
1509 auto const result = app_.getTxQ().apply(
1510 app_, view, e.transaction->getSTransaction(), flags, j);
1511 e.result = result.ter;
1512 e.applied = result.applied;
1513 changed = changed || result.applied;
1522 if (
auto const l = m_ledgerMaster.getValidatedLedger())
1523 validatedLedgerIndex = l->info().seq;
1525 auto newOL = app_.openLedger().current();
1528 e.transaction->clearSubmitResult();
1532 pubProposedTransaction(
1533 newOL, e.transaction->getSTransaction(), e.result);
1534 e.transaction->setApplied();
1537 e.transaction->setResult(e.result);
1540 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1549 JLOG(m_journal.info())
1550 <<
"TransactionResult: " << token <<
": " << human;
1555 bool addLocal = e.local;
1559 JLOG(m_journal.debug())
1560 <<
"Transaction is now included in open ledger";
1561 e.transaction->setStatus(
INCLUDED);
1566 auto const& txCur = e.transaction->getSTransaction();
1569 for (
auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1571 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1577 auto t = std::make_shared<Transaction>(trans, reason, app_);
1578 if (t->getApplying())
1580 submit_held.
emplace_back(t,
false,
false, FailHard::no);
1589 JLOG(m_journal.info()) <<
"Transaction is obsolete";
1590 e.transaction->setStatus(
OBSOLETE);
1594 JLOG(m_journal.debug())
1595 <<
"Transaction is likely to claim a"
1596 <<
" fee, but is queued until fee drops";
1598 e.transaction->setStatus(
HELD);
1602 m_ledgerMaster.addHeldTransaction(e.transaction);
1603 e.transaction->setQueued();
1604 e.transaction->setKept();
1610 if (e.failType != FailHard::yes)
1612 auto const lastLedgerSeq =
1613 e.transaction->getSTransaction()->at(
1614 ~sfLastLedgerSequence);
1615 auto const ledgersLeft = lastLedgerSeq
1617 m_ledgerMaster.getCurrentLedgerIndex()
1635 (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1636 app_.getHashRouter().setFlags(
1637 e.transaction->getID(), SF_HELD))
1640 JLOG(m_journal.debug())
1641 <<
"Transaction should be held: " << e.result;
1642 e.transaction->setStatus(
HELD);
1643 m_ledgerMaster.addHeldTransaction(e.transaction);
1644 e.transaction->setKept();
1647 JLOG(m_journal.debug())
1648 <<
"Not holding transaction "
1649 << e.transaction->getID() <<
": "
1650 << (e.local ?
"local" :
"network") <<
", "
1651 <<
"result: " << e.result <<
" ledgers left: "
1652 << (ledgersLeft ? to_string(*ledgersLeft)
1658 JLOG(m_journal.debug())
1659 <<
"Status other than success " << e.result;
1660 e.transaction->setStatus(
INVALID);
1663 auto const enforceFailHard =
1664 e.failType == FailHard::yes && !
isTesSuccess(e.result);
1666 if (addLocal && !enforceFailHard)
1668 m_localTX->push_back(
1669 m_ledgerMaster.getCurrentLedgerIndex(),
1670 e.transaction->getSTransaction());
1671 e.transaction->setKept();
1675 ((mMode != OperatingMode::FULL) &&
1676 (e.failType != FailHard::yes) && e.local) ||
1681 app_.getHashRouter().shouldRelay(e.transaction->getID());
1682 if (
auto const sttx = *(e.transaction->getSTransaction());
1687 newOL->rules().enabled(featureBatch)))
1689 protocol::TMTransaction tx;
1693 tx.set_rawtransaction(s.
data(), s.
size());
1694 tx.set_status(protocol::tsCURRENT);
1695 tx.set_receivetimestamp(
1696 app_.timeKeeper().now().time_since_epoch().count());
1699 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1700 e.transaction->setBroadcast();
1704 if (validatedLedgerIndex)
1706 auto [fee, accountSeq, availableSeq] =
1707 app_.getTxQ().getTxRequiredFeeAndSeq(
1708 *newOL, e.transaction->getSTransaction());
1709 e.transaction->setCurrentLedgerState(
1710 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1718 e.transaction->clearApplying();
1720 if (!submit_held.
empty())
1722 if (mTransactions.empty())
1723 mTransactions.swap(submit_held);
1726 mTransactions.reserve(mTransactions.size() + submit_held.
size());
1727 for (
auto& e : submit_held)
1728 mTransactions.push_back(std::move(e));
1734 mDispatchState = DispatchState::none;
1742NetworkOPsImp::getOwnerInfo(
1747 auto root = keylet::ownerDir(account);
1748 auto sleNode = lpLedger->read(keylet::page(
root));
1755 for (
auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1757 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1760 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1762 switch (sleCur->getType())
1765 if (!jvObjects.
isMember(jss::offers))
1766 jvObjects[jss::offers] =
1769 jvObjects[jss::offers].
append(
1770 sleCur->getJson(JsonOptions::none));
1773 case ltRIPPLE_STATE:
1774 if (!jvObjects.
isMember(jss::ripple_lines))
1776 jvObjects[jss::ripple_lines] =
1780 jvObjects[jss::ripple_lines].
append(
1781 sleCur->getJson(JsonOptions::none));
1784 case ltACCOUNT_ROOT:
1788 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1794 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1798 sleNode = lpLedger->read(keylet::page(
root, uNodeDir));
1801 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1814NetworkOPsImp::isBlocked()
1816 return isAmendmentBlocked() || isUNLBlocked();
1820NetworkOPsImp::isAmendmentBlocked()
1822 return amendmentBlocked_;
1826NetworkOPsImp::setAmendmentBlocked()
1828 amendmentBlocked_ =
true;
1829 setMode(OperatingMode::CONNECTED);
1833NetworkOPsImp::isAmendmentWarned()
1835 return !amendmentBlocked_ && amendmentWarned_;
1839NetworkOPsImp::setAmendmentWarned()
1841 amendmentWarned_ =
true;
1845NetworkOPsImp::clearAmendmentWarned()
1847 amendmentWarned_ =
false;
1851NetworkOPsImp::isUNLBlocked()
1857NetworkOPsImp::setUNLBlocked()
1860 setMode(OperatingMode::CONNECTED);
1864NetworkOPsImp::clearUNLBlocked()
1866 unlBlocked_ =
false;
1870NetworkOPsImp::checkLastClosedLedger(
1879 JLOG(m_journal.trace()) <<
"NetworkOPsImp::checkLastClosedLedger";
1881 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1886 uint256 closedLedger = ourClosed->info().hash;
1887 uint256 prevClosedLedger = ourClosed->info().parentHash;
1888 JLOG(m_journal.trace()) <<
"OurClosed: " << closedLedger;
1889 JLOG(m_journal.trace()) <<
"PrevClosed: " << prevClosedLedger;
1894 auto& validations = app_.getValidations();
1895 JLOG(m_journal.debug())
1896 <<
"ValidationTrie " <<
Json::Compact(validations.getJsonTrie());
1900 peerCounts[closedLedger] = 0;
1901 if (mMode >= OperatingMode::TRACKING)
1902 peerCounts[closedLedger]++;
1904 for (
auto& peer : peerList)
1906 uint256 peerLedger = peer->getClosedLedgerHash();
1909 ++peerCounts[peerLedger];
1912 for (
auto const& it : peerCounts)
1913 JLOG(m_journal.debug()) <<
"L: " << it.first <<
" n=" << it.second;
1915 uint256 preferredLCL = validations.getPreferredLCL(
1917 m_ledgerMaster.getValidLedgerIndex(),
1920 bool switchLedgers = preferredLCL != closedLedger;
1922 closedLedger = preferredLCL;
1924 if (switchLedgers && (closedLedger == prevClosedLedger))
1927 JLOG(m_journal.info()) <<
"We won't switch to our own previous ledger";
1928 networkClosed = ourClosed->info().hash;
1929 switchLedgers =
false;
1932 networkClosed = closedLedger;
1937 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1940 consensus = app_.getInboundLedgers().acquire(
1941 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1944 (!m_ledgerMaster.canBeCurrent(consensus) ||
1945 !m_ledgerMaster.isCompatible(
1946 *consensus, m_journal.debug(),
"Not switching")))
1950 networkClosed = ourClosed->info().hash;
1954 JLOG(m_journal.warn()) <<
"We are not running on the consensus ledger";
1955 JLOG(m_journal.info()) <<
"Our LCL: " << ourClosed->info().hash
1957 JLOG(m_journal.info()) <<
"Net LCL " << closedLedger;
1959 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1961 setMode(OperatingMode::CONNECTED);
1969 switchLastClosedLedger(consensus);
1976NetworkOPsImp::switchLastClosedLedger(
1980 JLOG(m_journal.error())
1981 <<
"JUMP last closed ledger to " << newLCL->info().hash;
1983 clearNeedNetworkLedger();
1986 app_.getTxQ().processClosedLedger(app_, *newLCL,
true);
1993 auto retries = m_localTX->getTxSet();
1994 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1999 rules.
emplace(app_.config().features);
2000 app_.openLedger().accept(
2011 return app_.getTxQ().accept(app_, view);
2015 m_ledgerMaster.switchLCL(newLCL);
2017 protocol::TMStatusChange s;
2018 s.set_newevent(protocol::neSWITCHED_LEDGER);
2019 s.set_ledgerseq(newLCL->info().seq);
2020 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
2021 s.set_ledgerhashprevious(
2022 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
2023 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
2025 app_.overlay().foreach(
2026 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
2030NetworkOPsImp::beginConsensus(
2036 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
2038 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
2040 JLOG(m_journal.info()) <<
"Consensus time for #" << closingInfo.seq
2041 <<
" with LCL " << closingInfo.parentHash;
2043 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
2048 if (mMode == OperatingMode::FULL)
2050 JLOG(m_journal.warn()) <<
"Don't have LCL, going to tracking";
2051 setMode(OperatingMode::TRACKING);
2052 CLOG(clog) <<
"beginConsensus Don't have LCL, going to tracking. ";
2055 CLOG(clog) <<
"beginConsensus no previous ledger. ";
2060 prevLedger->info().hash == closingInfo.parentHash,
2061 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
2064 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
2065 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
2068 if (prevLedger->rules().enabled(featureNegativeUNL))
2069 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
2070 TrustChanges const changes = app_.validators().updateTrusted(
2071 app_.getValidations().getCurrentNodeIDs(),
2072 closingInfo.parentCloseTime,
2075 app_.getHashRouter());
2077 if (!changes.
added.empty() || !changes.
removed.empty())
2079 app_.getValidations().trustChanged(changes.
added, changes.
removed);
2081 app_.getAmendmentTable().trustChanged(
2082 app_.validators().getQuorumKeys().second);
2085 mConsensus.startRound(
2086 app_.timeKeeper().closeTime(),
2094 if (mLastConsensusPhase != currPhase)
2096 reportConsensusStateChange(currPhase);
2097 mLastConsensusPhase = currPhase;
2100 JLOG(m_journal.debug()) <<
"Initiating consensus engine";
2107 auto const& peerKey = peerPos.
publicKey();
2108 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
2119 JLOG(m_journal.error())
2120 <<
"Received a proposal signed by MY KEY from a peer. This may "
2121 "indicate a misconfiguration where another node has the same "
2122 "validator key, or may be caused by unusual message routing and "
2127 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
2138 protocol::TMHaveTransactionSet msg;
2139 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2140 msg.set_status(protocol::tsHAVE);
2141 app_.overlay().foreach(
2142 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
2146 mConsensus.gotTxSet(app_.timeKeeper().closeTime(),
RCLTxSet{map});
2152 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
2154 for (
auto const& it : app_.overlay().getActivePeers())
2156 if (it && (it->getClosedLedgerHash() == deadLedger))
2158 JLOG(m_journal.trace()) <<
"Killing obsolete peer status";
2165 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
2167 if (networkClosed.
isZero())
2169 CLOG(clog) <<
"endConsensus last closed ledger is zero. ";
2179 if (((mMode == OperatingMode::CONNECTED) ||
2180 (mMode == OperatingMode::SYNCING)) &&
2186 if (!needNetworkLedger_)
2187 setMode(OperatingMode::TRACKING);
2190 if (((mMode == OperatingMode::CONNECTED) ||
2191 (mMode == OperatingMode::TRACKING)) &&
2197 auto current = m_ledgerMaster.getCurrentLedger();
2198 if (app_.timeKeeper().now() < (
current->info().parentCloseTime +
2199 2 *
current->info().closeTimeResolution))
2201 setMode(OperatingMode::FULL);
2205 beginConsensus(networkClosed, clog);
2209NetworkOPsImp::consensusViewChange()
2211 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2213 setMode(OperatingMode::CONNECTED);
2223 if (!mStreamMaps[sManifests].empty())
2227 jvObj[jss::type] =
"manifestReceived";
2230 jvObj[jss::signing_key] =
2234 jvObj[jss::signature] =
strHex(*sig);
2237 jvObj[jss::domain] = mo.
domain;
2240 for (
auto i = mStreamMaps[sManifests].begin();
2241 i != mStreamMaps[sManifests].end();)
2243 if (
auto p = i->second.lock())
2245 p->send(jvObj,
true);
2250 i = mStreamMaps[sManifests].erase(i);
2256NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2260 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2261 , loadBaseServer{loadFeeTrack.getLoadBase()}
2263 , em{
std::move(escalationMetrics)}
2273 em.has_value() != b.
em.has_value())
2279 em->minProcessingFeeLevel != b.
em->minProcessingFeeLevel ||
2280 em->openLedgerFeeLevel != b.
em->openLedgerFeeLevel ||
2281 em->referenceFeeLevel != b.
em->referenceFeeLevel);
2314 jvObj[jss::type] =
"serverStatus";
2316 jvObj[jss::load_base] = f.loadBaseServer;
2317 jvObj[jss::load_factor_server] = f.loadFactorServer;
2318 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2323 safe_cast<std::uint64_t>(f.loadFactorServer),
2325 f.em->openLedgerFeeLevel,
2327 f.em->referenceFeeLevel)
2330 jvObj[jss::load_factor] =
trunc32(loadFactor);
2331 jvObj[jss::load_factor_fee_escalation] =
2332 f.em->openLedgerFeeLevel.jsonClipped();
2333 jvObj[jss::load_factor_fee_queue] =
2334 f.em->minProcessingFeeLevel.jsonClipped();
2335 jvObj[jss::load_factor_fee_reference] =
2336 f.em->referenceFeeLevel.jsonClipped();
2339 jvObj[jss::load_factor] = f.loadFactorServer;
2353 p->send(jvObj,
true);
2370 if (!streamMap.empty())
2373 jvObj[jss::type] =
"consensusPhase";
2374 jvObj[jss::consensus] =
to_string(phase);
2376 for (
auto i = streamMap.begin(); i != streamMap.end();)
2378 if (
auto p = i->second.lock())
2380 p->send(jvObj,
true);
2385 i = streamMap.erase(i);
2401 auto const signerPublic = val->getSignerPublic();
2403 jvObj[jss::type] =
"validationReceived";
2404 jvObj[jss::validation_public_key] =
2406 jvObj[jss::ledger_hash] =
to_string(val->getLedgerHash());
2407 jvObj[jss::signature] =
strHex(val->getSignature());
2408 jvObj[jss::full] = val->isFull();
2409 jvObj[jss::flags] = val->getFlags();
2410 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2411 jvObj[jss::data] =
strHex(val->getSerializer().slice());
2413 if (
auto version = (*val)[~sfServerVersion])
2416 if (
auto cookie = (*val)[~sfCookie])
2419 if (
auto hash = (*val)[~sfValidatedHash])
2420 jvObj[jss::validated_hash] =
strHex(*hash);
2422 auto const masterKey =
2425 if (masterKey != signerPublic)
2430 if (
auto const seq = (*val)[~sfLedgerSequence])
2431 jvObj[jss::ledger_index] = *seq;
2433 if (val->isFieldPresent(sfAmendments))
2436 for (
auto const& amendment : val->getFieldV256(sfAmendments))
2440 if (
auto const closeTime = (*val)[~sfCloseTime])
2441 jvObj[jss::close_time] = *closeTime;
2443 if (
auto const loadFee = (*val)[~sfLoadFee])
2444 jvObj[jss::load_fee] = *loadFee;
2446 if (
auto const baseFee = val->at(~sfBaseFee))
2447 jvObj[jss::base_fee] =
static_cast<double>(*baseFee);
2449 if (
auto const reserveBase = val->at(~sfReserveBase))
2450 jvObj[jss::reserve_base] = *reserveBase;
2452 if (
auto const reserveInc = val->at(~sfReserveIncrement))
2453 jvObj[jss::reserve_inc] = *reserveInc;
2457 if (
auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2458 baseFeeXRP && baseFeeXRP->native())
2459 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2461 if (
auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2462 reserveBaseXRP && reserveBaseXRP->native())
2463 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2465 if (
auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2466 reserveIncXRP && reserveIncXRP->native())
2467 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2476 if (jvTx.
isMember(jss::ledger_index))
2478 jvTx[jss::ledger_index] =
2479 std::to_string(jvTx[jss::ledger_index].asUInt());
2486 if (
auto p = i->second.lock())
2490 [&](
Json::Value const& jv) { p->send(jv, true); });
2510 jvObj[jss::type] =
"peerStatusChange";
2519 p->send(jvObj,
true);
2533 using namespace std::chrono_literals;
2565 <<
"recvValidation " << val->getLedgerHash() <<
" from " << source;
2581 <<
"Exception thrown for handling new validation "
2582 << val->getLedgerHash() <<
": " << e.
what();
2587 <<
"Unknown exception thrown for handling new validation "
2588 << val->getLedgerHash();
2600 ss <<
"VALIDATION: " << val->render() <<
" master_key: ";
2637 "This server is amendment blocked, and must be updated to be "
2638 "able to stay in sync with the network.";
2645 "This server has an expired validator list. validators.txt "
2646 "may be incorrectly configured or some [validator_list_sites] "
2647 "may be unreachable.";
2654 "One or more unsupported amendments have reached majority. "
2655 "Upgrade to the latest version before they are activated "
2656 "to avoid being amendment blocked.";
2657 if (
auto const expected =
2661 d[jss::expected_date] = expected->time_since_epoch().count();
2662 d[jss::expected_date_UTC] =
to_string(*expected);
2666 if (warnings.size())
2667 info[jss::warnings] = std::move(warnings);
2682 info[jss::time] =
to_string(std::chrono::floor<std::chrono::microseconds>(
2686 info[jss::network_ledger] =
"waiting";
2688 info[jss::validation_quorum] =
2696 info[jss::node_size] =
"tiny";
2699 info[jss::node_size] =
"small";
2702 info[jss::node_size] =
"medium";
2705 info[jss::node_size] =
"large";
2708 info[jss::node_size] =
"huge";
2717 info[jss::validator_list_expires] =
2718 safe_cast<Json::UInt>(when->time_since_epoch().count());
2720 info[jss::validator_list_expires] = 0;
2730 if (*when == TimeKeeper::time_point::max())
2732 x[jss::expiration] =
"never";
2733 x[jss::status] =
"active";
2740 x[jss::status] =
"active";
2742 x[jss::status] =
"expired";
2747 x[jss::status] =
"unknown";
2748 x[jss::expiration] =
"unknown";
2752#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2755#ifdef GIT_COMMIT_HASH
2756 x[jss::hash] = GIT_COMMIT_HASH;
2759 x[jss::branch] = GIT_BRANCH;
2764 info[jss::io_latency_ms] =
2772 info[jss::pubkey_validator] =
2777 info[jss::pubkey_validator] =
"none";
2787 info[jss::counters][jss::nodestore] = nodestore;
2791 info[jss::pubkey_node] =
2797 info[jss::amendment_blocked] =
true;
2811 lastClose[jss::converge_time_s] =
2816 lastClose[jss::converge_time] =
2820 info[jss::last_close] = lastClose;
2828 info[jss::network_id] =
static_cast<Json::UInt>(*netid);
2830 auto const escalationMetrics =
2838 auto const loadFactorFeeEscalation =
2840 escalationMetrics.openLedgerFeeLevel,
2842 escalationMetrics.referenceFeeLevel)
2846 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2850 info[jss::load_base] = loadBaseServer;
2851 info[jss::load_factor] =
trunc32(loadFactor);
2852 info[jss::load_factor_server] = loadFactorServer;
2859 info[jss::load_factor_fee_escalation] =
2860 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2861 info[jss::load_factor_fee_queue] =
2862 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2863 info[jss::load_factor_fee_reference] =
2864 escalationMetrics.referenceFeeLevel.jsonClipped();
2868 info[jss::load_factor] =
2869 static_cast<double>(loadFactor) / loadBaseServer;
2871 if (loadFactorServer != loadFactor)
2872 info[jss::load_factor_server] =
2873 static_cast<double>(loadFactorServer) / loadBaseServer;
2878 if (fee != loadBaseServer)
2879 info[jss::load_factor_local] =
2880 static_cast<double>(fee) / loadBaseServer;
2882 if (fee != loadBaseServer)
2883 info[jss::load_factor_net] =
2884 static_cast<double>(fee) / loadBaseServer;
2886 if (fee != loadBaseServer)
2887 info[jss::load_factor_cluster] =
2888 static_cast<double>(fee) / loadBaseServer;
2890 if (escalationMetrics.openLedgerFeeLevel !=
2891 escalationMetrics.referenceFeeLevel &&
2892 (admin || loadFactorFeeEscalation != loadFactor))
2893 info[jss::load_factor_fee_escalation] =
2894 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2895 escalationMetrics.referenceFeeLevel);
2896 if (escalationMetrics.minProcessingFeeLevel !=
2897 escalationMetrics.referenceFeeLevel)
2898 info[jss::load_factor_fee_queue] =
2899 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2900 escalationMetrics.referenceFeeLevel);
2913 XRPAmount const baseFee = lpClosed->fees().base;
2915 l[jss::seq] =
Json::UInt(lpClosed->info().seq);
2916 l[jss::hash] =
to_string(lpClosed->info().hash);
2921 l[jss::reserve_base] =
2922 lpClosed->fees().accountReserve(0).jsonClipped();
2923 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2925 lpClosed->info().closeTime.time_since_epoch().count());
2930 l[jss::reserve_base_xrp] =
2931 lpClosed->fees().accountReserve(0).decimalXRP();
2932 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2935 std::abs(closeOffset.count()) >= 60)
2936 l[jss::close_time_offset] =
2944 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2948 auto lCloseTime = lpClosed->info().closeTime;
2950 if (lCloseTime <= closeTime)
2952 using namespace std::chrono_literals;
2953 auto age = closeTime - lCloseTime;
2955 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2961 info[jss::validated_ledger] = l;
2963 info[jss::closed_ledger] = l;
2967 info[jss::published_ledger] =
"none";
2968 else if (lpPublished->info().seq != lpClosed->info().seq)
2969 info[jss::published_ledger] = lpPublished->info().seq;
2974 info[jss::jq_trans_overflow] =
2976 info[jss::peer_disconnects] =
2978 info[jss::peer_disconnects_resources] =
2983 "http",
"https",
"peer",
"ws",
"ws2",
"wss",
"wss2"};
2991 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2992 port.admin_user.empty() && port.admin_password.empty()))
3006 for (
auto const& p : proto)
3007 jv[jss::protocol].append(p);
3014 auto const optPort = grpcSection.
get(
"port");
3015 if (optPort && grpcSection.get(
"ip"))
3018 jv[jss::port] = *optPort;
3020 jv[jss::protocol].append(
"grpc");
3023 info[jss::ports] = std::move(ports);
3049 ledger->rules().enabled(featureBatch))
3053 transJson(transaction, result,
false, ledger, std::nullopt);
3067 [&](
Json::Value const& jv) { p->send(jv, true); });
3090 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted,
app_);
3092 lpAccepted->info().hash, alpAccepted);
3096 alpAccepted->getLedger().
get() == lpAccepted.
get(),
3097 "ripple::NetworkOPsImp::pubLedger : accepted input");
3101 <<
"Publishing ledger " << lpAccepted->info().seq <<
" "
3102 << lpAccepted->info().hash;
3110 jvObj[jss::type] =
"ledgerClosed";
3111 jvObj[jss::ledger_index] = lpAccepted->info().seq;
3112 jvObj[jss::ledger_hash] =
to_string(lpAccepted->info().hash);
3114 lpAccepted->info().closeTime.time_since_epoch().count());
3116 if (!lpAccepted->rules().enabled(featureXRPFees))
3118 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
3119 jvObj[jss::reserve_base] =
3120 lpAccepted->fees().accountReserve(0).jsonClipped();
3121 jvObj[jss::reserve_inc] =
3122 lpAccepted->fees().increment.jsonClipped();
3124 jvObj[jss::txn_count] =
Json::UInt(alpAccepted->size());
3128 jvObj[jss::validated_ledgers] =
3138 p->send(jvObj,
true);
3156 p->send(jvObj,
true);
3165 static bool firstTime =
true;
3172 for (
auto& inner : outer.second)
3174 auto& subInfo = inner.second;
3175 if (subInfo.index_->separationLedgerSeq_ == 0)
3178 alpAccepted->getLedger(), subInfo);
3187 for (
auto const& accTx : *alpAccepted)
3191 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3218 "reportConsensusStateChange->pubConsensus",
3249 jvObj[jss::type] =
"transaction";
3253 jvObj[jss::transaction] =
3260 jvObj[jss::meta], *ledger, transaction, meta->
get());
3262 jvObj[jss::meta], transaction, meta->
get());
3266 if (
auto const& lookup = ledger->txRead(transaction->getTransactionID());
3267 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3269 uint32_t
const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3271 if (transaction->isFieldPresent(sfNetworkID))
3272 netID = transaction->getFieldU32(sfNetworkID);
3277 jvObj[jss::ctid] = *ctid;
3279 if (!ledger->open())
3280 jvObj[jss::ledger_hash] =
to_string(ledger->info().hash);
3284 jvObj[jss::ledger_index] = ledger->info().seq;
3285 jvObj[jss::transaction][jss::date] =
3286 ledger->info().closeTime.time_since_epoch().count();
3287 jvObj[jss::validated] =
true;
3288 jvObj[jss::close_time_iso] =
to_string_iso(ledger->info().closeTime);
3294 jvObj[jss::validated] =
false;
3295 jvObj[jss::ledger_current_index] = ledger->info().seq;
3298 jvObj[jss::status] = validated ?
"closed" :
"proposed";
3299 jvObj[jss::engine_result] = sToken;
3300 jvObj[jss::engine_result_code] = result;
3301 jvObj[jss::engine_result_message] = sHuman;
3303 if (transaction->getTxnType() == ttOFFER_CREATE)
3305 auto const account = transaction->getAccountID(sfAccount);
3306 auto const amount = transaction->getFieldAmount(sfTakerGets);
3309 if (account != amount.issue().account)
3317 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3325 [&]<
unsigned Version>(
3327 RPC::insertDeliverMax(
3328 jvTx[jss::transaction], transaction->getTxnType(), Version);
3330 if constexpr (Version > 1)
3332 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3333 jvTx[jss::hash] = hash;
3337 jvTx[jss::transaction][jss::hash] = hash;
3350 auto const& stTxn = transaction.
getTxn();
3354 auto const trResult = transaction.
getResult();
3369 [&](
Json::Value const& jv) { p->send(jv, true); });
3386 [&](
Json::Value const& jv) { p->send(jv, true); });
3411 auto const currLedgerSeq = ledger->seq();
3418 for (
auto const& affectedAccount : transaction.
getAffected())
3423 auto it = simiIt->second.begin();
3425 while (it != simiIt->second.end())
3436 it = simiIt->second.erase(it);
3443 auto it = simiIt->second.begin();
3444 while (it != simiIt->second.end())
3455 it = simiIt->second.erase(it);
3462 auto& subs = histoIt->second;
3463 auto it = subs.begin();
3464 while (it != subs.end())
3467 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3481 it = subs.erase(it);
3492 <<
"pubAccountTransaction: "
3493 <<
"proposed=" << iProposed <<
", accepted=" << iAccepted;
3495 if (!notify.
empty() || !accountHistoryNotify.
empty())
3497 auto const& stTxn = transaction.
getTxn();
3501 auto const trResult = transaction.
getResult();
3507 isrListener->getApiVersion(),
3508 [&](
Json::Value const& jv) { isrListener->send(jv, true); });
3512 jvObj.
set(jss::account_history_boundary,
true);
3515 jvObj.
isMember(jss::account_history_tx_stream) ==
3517 "ripple::NetworkOPsImp::pubAccountTransaction : "
3518 "account_history_tx_stream not set");
3519 for (
auto& info : accountHistoryNotify)
3521 auto& index = info.index_;
3522 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3523 jvObj.
set(jss::account_history_tx_first,
true);
3525 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3528 info.sink_->getApiVersion(),
3529 [&](
Json::Value const& jv) { info.sink_->send(jv, true); });
3554 for (
auto const& affectedAccount : tx->getMentionedAccounts())
3559 auto it = simiIt->second.begin();
3561 while (it != simiIt->second.end())
3572 it = simiIt->second.erase(it);
3579 JLOG(
m_journal.
trace()) <<
"pubProposedAccountTransaction: " << iProposed;
3581 if (!notify.
empty() || !accountHistoryNotify.
empty())
3588 isrListener->getApiVersion(),
3589 [&](
Json::Value const& jv) { isrListener->send(jv, true); });
3592 jvObj.
isMember(jss::account_history_tx_stream) ==
3594 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3595 "account_history_tx_stream not set");
3596 for (
auto& info : accountHistoryNotify)
3598 auto& index = info.index_;
3599 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3600 jvObj.
set(jss::account_history_tx_first,
true);
3601 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3603 info.sink_->getApiVersion(),
3604 [&](
Json::Value const& jv) { info.sink_->send(jv, true); });
3621 for (
auto const& naAccountID : vnaAccountIDs)
3624 <<
"subAccount: account: " <<
toBase58(naAccountID);
3626 isrListener->insertSubAccountInfo(naAccountID, rt);
3631 for (
auto const& naAccountID : vnaAccountIDs)
3633 auto simIterator = subMap.
find(naAccountID);
3634 if (simIterator == subMap.
end())
3638 usisElement[isrListener->getSeq()] = isrListener;
3640 subMap.
insert(simIterator, make_pair(naAccountID, usisElement));
3645 simIterator->second[isrListener->getSeq()] = isrListener;
3656 for (
auto const& naAccountID : vnaAccountIDs)
3659 isrListener->deleteSubAccountInfo(naAccountID, rt);
3676 for (
auto const& naAccountID : vnaAccountIDs)
3678 auto simIterator = subMap.
find(naAccountID);
3680 if (simIterator != subMap.
end())
3683 simIterator->second.erase(uSeq);
3685 if (simIterator->second.empty())
3688 subMap.
erase(simIterator);
3697 enum DatabaseType { Sqlite,
None };
3698 static auto const databaseType = [&]() -> DatabaseType {
3703 return DatabaseType::Sqlite;
3705 return DatabaseType::None;
3708 if (databaseType == DatabaseType::None)
3711 <<
"AccountHistory job for account "
3723 "AccountHistoryTxStream",
3724 [
this, dbType = databaseType, subInfo]() {
3725 auto const& accountId = subInfo.
index_->accountId_;
3726 auto& lastLedgerSeq = subInfo.
index_->historyLastLedgerSeq_;
3727 auto& txHistoryIndex = subInfo.
index_->historyTxIndex_;
3730 <<
"AccountHistory job for account " <<
toBase58(accountId)
3731 <<
" started. lastLedgerSeq=" << lastLedgerSeq;
3741 auto stx = tx->getSTransaction();
3742 if (stx->getAccountID(sfAccount) == accountId &&
3743 stx->getSeqValue() == 1)
3747 for (
auto& node : meta->getNodes())
3749 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3752 if (node.isFieldPresent(sfNewFields))
3754 if (
auto inner =
dynamic_cast<STObject const*
>(
3755 node.peekAtPField(sfNewFields));
3758 if (inner->isFieldPresent(sfAccount) &&
3759 inner->getAccountID(sfAccount) == accountId)
3771 bool unsubscribe) ->
bool {
3774 sptr->send(jvObj,
true);
3784 bool unsubscribe) ->
bool {
3788 sptr->getApiVersion(),
3789 [&](
Json::Value const& jv) { sptr->send(jv,
true); });
3812 accountId, minLedger, maxLedger, marker, 0,
true};
3813 return db->newestAccountTxPage(options);
3817 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3818 "getMoreTxns : invalid database type");
3827 while (lastLedgerSeq >= 2 && !subInfo.
index_->stopHistorical_)
3829 int feeChargeCount = 0;
3838 <<
"AccountHistory job for account "
3839 <<
toBase58(accountId) <<
" no InfoSub. Fee charged "
3840 << feeChargeCount <<
" times.";
3845 auto startLedgerSeq =
3846 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3848 <<
"AccountHistory job for account " <<
toBase58(accountId)
3849 <<
", working on ledger range [" << startLedgerSeq <<
","
3850 << lastLedgerSeq <<
"]";
3852 auto haveRange = [&]() ->
bool {
3855 auto haveSomeValidatedLedgers =
3857 validatedMin, validatedMax);
3859 return haveSomeValidatedLedgers &&
3860 validatedMin <= startLedgerSeq &&
3861 lastLedgerSeq <= validatedMax;
3867 <<
"AccountHistory reschedule job for account "
3868 <<
toBase58(accountId) <<
", incomplete ledger range ["
3869 << startLedgerSeq <<
"," << lastLedgerSeq <<
"]";
3875 while (!subInfo.
index_->stopHistorical_)
3878 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3882 <<
"AccountHistory job for account "
3883 <<
toBase58(accountId) <<
" getMoreTxns failed.";
3888 auto const& txns = dbResult->first;
3889 marker = dbResult->second;
3890 size_t num_txns = txns.size();
3891 for (
size_t i = 0; i < num_txns; ++i)
3893 auto const& [tx, meta] = txns[i];
3898 <<
"AccountHistory job for account "
3899 <<
toBase58(accountId) <<
" empty tx or meta.";
3909 <<
"AccountHistory job for account "
3910 <<
toBase58(accountId) <<
" no ledger.";
3915 tx->getSTransaction();
3919 <<
"AccountHistory job for account "
3921 <<
" getSTransaction failed.";
3927 auto const trR = meta->getResultTER();
3929 transJson(stTxn, trR,
true, curTxLedger, mRef);
3932 jss::account_history_tx_index, txHistoryIndex--);
3933 if (i + 1 == num_txns ||
3934 txns[i + 1].first->getLedger() != tx->getLedger())
3935 jvTx.
set(jss::account_history_boundary,
true);
3937 if (isFirstTx(tx, meta))
3939 jvTx.
set(jss::account_history_tx_first,
true);
3940 sendMultiApiJson(jvTx,
false);
3943 <<
"AccountHistory job for account "
3945 <<
" done, found last tx.";
3950 sendMultiApiJson(jvTx,
false);
3957 <<
"AccountHistory job for account "
3959 <<
" paging, marker=" << marker->ledgerSeq <<
":"
3968 if (!subInfo.
index_->stopHistorical_)
3970 lastLedgerSeq = startLedgerSeq - 1;
3971 if (lastLedgerSeq <= 1)
3974 <<
"AccountHistory job for account "
3976 <<
" done, reached genesis ledger.";
3989 subInfo.
index_->separationLedgerSeq_ = ledger->seq();
3990 auto const& accountId = subInfo.
index_->accountId_;
3992 if (!ledger->exists(accountKeylet))
3995 <<
"subAccountHistoryStart, no account " <<
toBase58(accountId)
3996 <<
", no need to add AccountHistory job.";
4001 if (
auto const sleAcct = ledger->read(accountKeylet); sleAcct)
4003 if (sleAcct->getFieldU32(sfSequence) == 1)
4006 <<
"subAccountHistoryStart, genesis account "
4008 <<
" does not have tx, no need to add AccountHistory job.";
4015 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
4016 "access genesis account");
4020 subInfo.
index_->historyLastLedgerSeq_ = ledger->seq();
4021 subInfo.
index_->haveHistorical_ =
true;
4024 <<
"subAccountHistoryStart, add AccountHistory job: accountId="
4025 <<
toBase58(accountId) <<
", currentLedgerSeq=" << ledger->seq();
4035 if (!isrListener->insertSubAccountHistory(accountId))
4038 <<
"subAccountHistory, already subscribed to account "
4045 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
4050 inner.
emplace(isrListener->getSeq(), ahi);
4056 simIterator->second.emplace(isrListener->getSeq(), ahi);
4070 <<
"subAccountHistory, no validated ledger yet, delay start";
4083 isrListener->deleteSubAccountHistory(account);
4097 auto& subInfoMap = simIterator->second;
4098 auto subInfoIter = subInfoMap.find(seq);
4099 if (subInfoIter != subInfoMap.end())
4101 subInfoIter->second.index_->stopHistorical_ =
true;
4106 simIterator->second.erase(seq);
4107 if (simIterator->second.empty())
4113 <<
"unsubAccountHistory, account " <<
toBase58(account)
4114 <<
", historyOnly = " << (historyOnly ?
"true" :
"false");
4122 listeners->addSubscriber(isrListener);
4124 UNREACHABLE(
"ripple::NetworkOPsImp::subBook : null book listeners");
4132 listeners->removeSubscriber(uSeq);
4144 m_standalone,
"ripple::NetworkOPsImp::acceptLedger : is standalone");
4147 Throw<std::runtime_error>(
4148 "Operation only possible in STANDALONE mode.");
4163 jvResult[jss::ledger_index] = lpClosed->info().seq;
4164 jvResult[jss::ledger_hash] =
to_string(lpClosed->info().hash);
4166 lpClosed->info().closeTime.time_since_epoch().count());
4167 if (!lpClosed->rules().enabled(featureXRPFees))
4169 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
4170 jvResult[jss::reserve_base] =
4171 lpClosed->fees().accountReserve(0).jsonClipped();
4172 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4177 jvResult[jss::validated_ledgers] =
4183 .emplace(isrListener->getSeq(), isrListener)
4193 .emplace(isrListener->getSeq(), isrListener)
4219 .emplace(isrListener->getSeq(), isrListener)
4247 jvResult[jss::random] =
to_string(uRandom);
4249 jvResult[jss::load_base] = feeTrack.getLoadBase();
4250 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4251 jvResult[jss::hostid] =
getHostId(admin);
4252 jvResult[jss::pubkey_node] =
4257 .emplace(isrListener->getSeq(), isrListener)
4275 .emplace(isrListener->getSeq(), isrListener)
4293 .emplace(isrListener->getSeq(), isrListener)
4311 .emplace(isrListener->getSeq(), isrListener)
4335 .emplace(isrListener->getSeq(), isrListener)
4353 .emplace(isrListener->getSeq(), isrListener)
4401 if (map.find(pInfo->getSeq()) != map.end())
4408#ifndef USE_NEW_BOOK_PAGE
4419 unsigned int iLimit,
4429 uint256 uTipIndex = uBookBase;
4433 stream <<
"getBookPage:" << book;
4434 stream <<
"getBookPage: uBookBase=" << uBookBase;
4435 stream <<
"getBookPage: uBookEnd=" << uBookEnd;
4436 stream <<
"getBookPage: uTipIndex=" << uTipIndex;
4445 bool bDirectAdvance =
true;
4449 unsigned int uBookEntry;
4455 while (!bDone && iLimit-- > 0)
4459 bDirectAdvance =
false;
4463 auto const ledgerIndex = view.
succ(uTipIndex, uBookEnd);
4467 sleOfferDir.
reset();
4476 uTipIndex = sleOfferDir->key();
4479 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4482 <<
"getBookPage: uTipIndex=" << uTipIndex;
4484 <<
"getBookPage: offerIndex=" << offerIndex;
4494 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4495 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4496 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4498 bool firstOwnerOffer(
true);
4504 saOwnerFunds = saTakerGets;
4506 else if (bGlobalFreeze)
4514 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4515 if (umBalanceEntry != umBalance.
end())
4519 saOwnerFunds = umBalanceEntry->second;
4520 firstOwnerOffer =
false;
4534 if (saOwnerFunds < beast::zero)
4538 saOwnerFunds.
clear();
4546 STAmount saOwnerFundsLimit = saOwnerFunds;
4558 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4561 if (saOwnerFundsLimit >= saTakerGets)
4564 saTakerGetsFunded = saTakerGets;
4570 saTakerGetsFunded = saOwnerFundsLimit;
4572 saTakerGetsFunded.
setJson(jvOffer[jss::taker_gets_funded]);
4576 saTakerGetsFunded, saDirRate, saTakerPays.
issue()))
4577 .setJson(jvOffer[jss::taker_pays_funded]);
4583 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4585 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4589 jvOf[jss::quality] = saDirRate.
getText();
4591 if (firstOwnerOffer)
4592 jvOf[jss::owner_funds] = saOwnerFunds.
getText();
4599 if (!
cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4601 bDirectAdvance =
true;
4606 <<
"getBookPage: offerIndex=" << offerIndex;
4626 unsigned int iLimit,
4634 MetaView lesActive(lpLedger,
tapNONE,
true);
4635 OrderBookIterator obIterator(lesActive, book);
4639 bool const bGlobalFreeze = lesActive.isGlobalFrozen(book.
out.
account) ||
4640 lesActive.isGlobalFrozen(book.
in.
account);
4642 while (iLimit-- > 0 && obIterator.nextOffer())
4647 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4648 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4649 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4650 STAmount saDirRate = obIterator.getCurrentRate();
4656 saOwnerFunds = saTakerGets;
4658 else if (bGlobalFreeze)
4666 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4668 if (umBalanceEntry != umBalance.
end())
4672 saOwnerFunds = umBalanceEntry->second;
4678 saOwnerFunds = lesActive.accountHolds(
4684 if (saOwnerFunds.isNegative())
4688 saOwnerFunds.zero();
4695 STAmount saTakerGetsFunded;
4696 STAmount saOwnerFundsLimit = saOwnerFunds;
4708 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4711 if (saOwnerFundsLimit >= saTakerGets)
4714 saTakerGetsFunded = saTakerGets;
4719 saTakerGetsFunded = saOwnerFundsLimit;
4721 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4727 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4728 .setJson(jvOffer[jss::taker_pays_funded]);
4731 STAmount saOwnerPays = (
parityRate == offerRate)
4734 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4736 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4738 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4742 jvOf[jss::quality] = saDirRate.
getText();
4757 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4797 ++counters_[
static_cast<std::size_t>(om)].transitions;
4799 counters_[
static_cast<std::size_t>(om)].transitions == 1)
4801 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4802 now - processStart_)
4806 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4815 auto [counters, mode, start, initialSync] = getCounterData();
4816 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4826 auto& state = obj[jss::state_accounting][
states_[i]];
4827 state[jss::transitions] =
std::to_string(counters[i].transitions);
4828 state[jss::duration_us] =
std::to_string(counters[i].dur.count());
4832 obj[jss::initial_sync_duration_us] =
std::to_string(initialSync);
4847 boost::asio::io_service& io_svc,
4851 return std::make_unique<NetworkOPsImp>(
T back_inserter(T... args)
Decorator for streaming out compact json.
Lightweight wrapper to tag static string.
Value & append(Value const &value)
Append value to array at the end.
bool isMember(char const *key) const
Return true if the object has a member named key.
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
A generic endpoint for log messages.
Stream trace() const
Severity stream access functions.
A metric for measuring an integral value.
void set(value_type value) const
Set the value on the gauge.
A reference to a handler for performing polled collection.
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
std::uint32_t getLoadFee() const
NetClock::time_point getReportTime() const
PublicKey const & identity() const
std::size_t size() const
The number of nodes in the cluster list.
std::string SERVER_DOMAIN
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
int RELAY_UNTRUSTED_VALIDATIONS
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
A pool of threads to perform work.
Json::Value getJson(int c=0)
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
std::uint32_t getClusterFee() const
std::uint32_t getLocalFee() const
std::uint32_t getLoadBase() const
std::uint32_t getRemoteFee() const
std::uint32_t getLoadFactor() const
void heartbeat()
Reset the stall detection timer.
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
std::uint64_t initialSyncUs_
CounterData getCounterData() const
std::chrono::steady_clock::time_point start_
static std::array< Json::StaticString const, 5 > const states_
std::chrono::steady_clock::time_point const processStart_
Transaction with input flags and results to be applied in batches.
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
std::shared_ptr< Transaction > const transaction
void processClusterTimer()
boost::asio::steady_timer accountHistoryTxTimer_
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
std::vector< TransactionStatus > mTransactions
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
std::optional< PublicKey > const validatorPK_
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
SubInfoMapType mSubAccount
std::optional< PublicKey > const validatorMasterPK_
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
std::atomic< bool > unlBlocked_
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
std::atomic< bool > needNetworkLedger_
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
void setMode(OperatingMode om) override
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
void clearNeedNetworkLedger() override
DispatchState mDispatchState
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
void reportFeeChange() override
void processHeartbeatTimer()
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
bool subTransactions(InfoSub::ref ispListener) override
std::atomic< bool > amendmentWarned_
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
void getCountsJson(Json::Value &obj)
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Writable ledger view that accumulates state and tx changes.
BookListeners::pointer getBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)
BookListeners::pointer makeBookListeners(Book const &)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Represents a set of transactions in RCLConsensus.
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Collects logging information.
std::unique_ptr< std::stringstream > const & ss()
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
std::string getText() const override
Issue const & issue() const
std::optional< T > get(std::string const &name) const
std::size_t size() const noexcept
void const * data() const noexcept
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
std::chrono::seconds closeOffset() const
time_point closeTime() const
Returns the predicted close time, in network time.
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Validator keys and manifest as set in configuration file.
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Json::Value jsonClipped() const
static constexpr std::size_t size()
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
T emplace_back(T... args)
@ arrayValue
array value (ordered list)
@ objectValue
object value (collection of name/value pairs).
void rngfill(void *buffer, std::size_t bytes, Generator &g)
std::string const & getVersionString()
Server version.
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(PreclaimContext const &ctx, AccountID const &src)
Keylet account(AccountID const &id) noexcept
AccountID root.
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
STAmount divide(STAmount const &amount, Rate const &rate)
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
std::uint64_t getQuality(uint256 const &uBase)
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
auto constexpr muldiv_max
std::unique_ptr< LocalTxs > make_LocalTxs()
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
STAmount amountFromQuality(std::uint64_t rate)
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
@ warnRPC_UNSUPPORTED_MAJORITY
@ warnRPC_AMENDMENT_BLOCKED
bool set(T &target, std::string const &name, Section const §ion)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
AccountID calcAccountID(PublicKey const &pk)
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Json::Value rpcError(int iError)
bool isTefFailure(TER x) noexcept
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
std::string strHex(FwdIt begin, FwdIt end)
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
void forAllApiVersions(Fn const &fn, Args &&... args)
bool isTerRetry(TER x) noexcept
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
uint256 getQualityNext(uint256 const &uBase)
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const ¤cy, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
bool isTesSuccess(TER x) noexcept
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const ¤t)
std::string to_string_iso(date::sys_time< Duration > tp)
std::string to_string(base_uint< Bits, Tag > const &a)
FeeSetup setup_FeeVote(Section const §ion)
bool isTemMalformed(TER x) noexcept
Number root(Number f, unsigned d)
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
constexpr std::size_t maxPoppedTransactions
bool transResultInfo(TER code, std::string &token, std::string &text)
bool isTelLocal(TER x) noexcept
uint256 getBookBase(Book const &book)
constexpr std::uint32_t tfInnerBatchTxn
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
static std::uint32_t trunc32(std::uint64_t v)
static auto const genesisAccountId
T set_intersection(T... args)
std::string serialized
The manifest in serialized form.
std::uint32_t sequence
The sequence number of this manifest.
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Server fees published on server subscription.
bool operator!=(ServerFeeSummary const &b) const
ServerFeeSummary()=default
std::optional< TxQ::Metrics > em
std::uint32_t loadFactorServer
bool operator==(ServerFeeSummary const &b) const
std::uint32_t loadBaseServer
decltype(initialSyncUs_) initialSyncUs
decltype(counters_) counters
std::uint64_t transitions
std::chrono::microseconds dur
beast::insight::Gauge full_transitions
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
beast::insight::Hook hook
beast::insight::Gauge connected_duration
beast::insight::Gauge tracking_duration
beast::insight::Gauge connected_transitions
beast::insight::Gauge disconnected_transitions
beast::insight::Gauge syncing_duration
beast::insight::Gauge tracking_transitions
beast::insight::Gauge full_duration
beast::insight::Gauge disconnected_duration
beast::insight::Gauge syncing_transitions
std::uint32_t historyLastLedgerSeq_
std::uint32_t separationLedgerSeq_
AccountID const accountId_
std::uint32_t forwardTxIndex_
std::atomic< bool > stopHistorical_
std::int32_t historyTxIndex_
SubAccountHistoryIndex(AccountID const &accountId)
std::shared_ptr< SubAccountHistoryIndex > index_
std::shared_ptr< SubAccountHistoryIndex > index_
Represents a transfer rate.
Data format for exchanging consumption information across peers.
std::vector< Item > items
Changes in trusted nodes after updating validator list.
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
IsMemberResult isMember(char const *key) const
void set(char const *key, auto const &v)
Select all peers (except optional excluded) that are in our cluster.
Sends a message to all peers.
T time_since_epoch(T... args)