20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/misc/AmendmentTable.h>
32#include <xrpld/app/misc/DeliverMax.h>
33#include <xrpld/app/misc/HashRouter.h>
34#include <xrpld/app/misc/LoadFeeTrack.h>
35#include <xrpld/app/misc/NetworkOPs.h>
36#include <xrpld/app/misc/Transaction.h>
37#include <xrpld/app/misc/TxQ.h>
38#include <xrpld/app/misc/ValidatorKeys.h>
39#include <xrpld/app/misc/ValidatorList.h>
40#include <xrpld/app/misc/detail/AccountTxPaging.h>
41#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
42#include <xrpld/app/tx/apply.h>
43#include <xrpld/consensus/Consensus.h>
44#include <xrpld/consensus/ConsensusParms.h>
45#include <xrpld/overlay/Cluster.h>
46#include <xrpld/overlay/Overlay.h>
47#include <xrpld/overlay/predicates.h>
48#include <xrpld/perflog/PerfLog.h>
49#include <xrpld/rpc/BookChanges.h>
50#include <xrpld/rpc/DeliveredAmount.h>
51#include <xrpld/rpc/MPTokenIssuanceID.h>
52#include <xrpld/rpc/ServerHandler.h>
54#include <xrpl/basics/UptimeClock.h>
55#include <xrpl/basics/mulDiv.h>
56#include <xrpl/basics/safe_cast.h>
57#include <xrpl/basics/scope.h>
58#include <xrpl/beast/utility/rngfill.h>
59#include <xrpl/crypto/RFC1751.h>
60#include <xrpl/crypto/csprng.h>
61#include <xrpl/protocol/BuildInfo.h>
62#include <xrpl/protocol/Feature.h>
63#include <xrpl/protocol/MultiApiJson.h>
64#include <xrpl/protocol/RPCErr.h>
65#include <xrpl/protocol/jss.h>
66#include <xrpl/resource/Fees.h>
67#include <xrpl/resource/ResourceManager.h>
69#include <boost/asio/ip/host_name.hpp>
70#include <boost/asio/steady_timer.hpp>
109 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
152 std::chrono::steady_clock::time_point
start_ =
213 return !(*
this != b);
232 boost::asio::io_service& io_svc,
246 app_.logs().journal(
"FeeVote")),
249 app.getInboundTransactions(),
250 beast::get_abstract_clock<
std::chrono::steady_clock>(),
252 app_.logs().journal(
"LedgerConsensus"))
254 validatorKeys.keys ? validatorKeys.keys->publicKey
257 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
439 getServerInfo(
bool human,
bool admin,
bool counters)
override;
466 TER result)
override;
500 bool historyOnly)
override;
506 bool historyOnly)
override;
574 boost::system::error_code ec;
579 <<
"NetworkOPs: heartbeatTimer cancel error: "
588 <<
"NetworkOPs: clusterTimer cancel error: "
597 <<
"NetworkOPs: accountHistoryTxTimer cancel error: "
602 using namespace std::chrono_literals;
612 boost::asio::steady_timer& timer,
795 template <
class Handler>
797 Handler
const& handler,
799 :
hook(collector->make_hook(handler))
802 "Disconnected_duration"))
805 "Connected_duration"))
807 collector->make_gauge(
"State_Accounting",
"Syncing_duration"))
810 "Tracking_duration"))
812 collector->make_gauge(
"State_Accounting",
"Full_duration"))
815 "Disconnected_transitions"))
818 "Connected_transitions"))
821 "Syncing_transitions"))
824 "Tracking_transitions"))
826 collector->make_gauge(
"State_Accounting",
"Full_transitions"))
855 {
"disconnected",
"connected",
"syncing",
"tracking",
"full"}};
917 static std::string const hostname = boost::asio::ip::host_name();
924 static std::string const shroudedHostId = [
this]() {
930 return shroudedHostId;
945 boost::asio::steady_timer& timer,
952 [
this, onExpire, onError](boost::system::error_code
const& e) {
953 if ((e.value() == boost::system::errc::success) &&
954 (!m_job_queue.isStopped()))
959 if (e.value() != boost::system::errc::success &&
960 e.value() != boost::asio::error::operation_aborted)
963 JLOG(m_journal.error())
964 <<
"Timer got error '" << e.message()
965 <<
"'. Restarting timer.";
970 timer.expires_from_now(expiry_time);
971 timer.async_wait(std::move(*optionalCountedHandler));
976NetworkOPsImp::setHeartbeatTimer()
980 mConsensus.parms().ledgerGRANULARITY,
982 m_job_queue.addJob(jtNETOP_TIMER,
"NetOPs.heartbeat", [this]() {
983 processHeartbeatTimer();
986 [
this]() { setHeartbeatTimer(); });
990NetworkOPsImp::setClusterTimer()
992 using namespace std::chrono_literals;
999 processClusterTimer();
1002 [
this]() { setClusterTimer(); });
1008 JLOG(m_journal.debug()) <<
"Scheduling AccountHistory job for account "
1010 using namespace std::chrono_literals;
1012 accountHistoryTxTimer_,
1014 [
this, subInfo]() { addAccountHistoryJob(subInfo); },
1015 [
this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1019NetworkOPsImp::processHeartbeatTimer()
1022 "Heartbeat Timer", mConsensus.validating(), m_journal);
1030 std::size_t const numPeers = app_.overlay().size();
1033 if (numPeers < minPeerCount_)
1035 if (mMode != OperatingMode::DISCONNECTED)
1037 setMode(OperatingMode::DISCONNECTED);
1039 ss <<
"Node count (" << numPeers <<
") has fallen "
1040 <<
"below required minimum (" << minPeerCount_ <<
").";
1041 JLOG(m_journal.warn()) << ss.
str();
1042 CLOG(clog.
ss()) <<
"set mode to DISCONNECTED: " << ss.
str();
1047 <<
"already DISCONNECTED. too few peers (" << numPeers
1048 <<
"), need at least " << minPeerCount_;
1055 setHeartbeatTimer();
1060 if (mMode == OperatingMode::DISCONNECTED)
1062 setMode(OperatingMode::CONNECTED);
1063 JLOG(m_journal.info())
1064 <<
"Node count (" << numPeers <<
") is sufficient.";
1065 CLOG(clog.
ss()) <<
"setting mode to CONNECTED based on " << numPeers
1071 auto origMode = mMode.load();
1072 CLOG(clog.
ss()) <<
"mode: " << strOperatingMode(origMode,
true);
1073 if (mMode == OperatingMode::SYNCING)
1074 setMode(OperatingMode::SYNCING);
1075 else if (mMode == OperatingMode::CONNECTED)
1076 setMode(OperatingMode::CONNECTED);
1077 auto newMode = mMode.load();
1078 if (origMode != newMode)
1081 <<
", changing to " << strOperatingMode(newMode,
true);
1083 CLOG(clog.
ss()) <<
". ";
1086 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.
ss());
1088 CLOG(clog.
ss()) <<
"consensus phase " << to_string(mLastConsensusPhase);
1090 if (mLastConsensusPhase != currPhase)
1092 reportConsensusStateChange(currPhase);
1093 mLastConsensusPhase = currPhase;
1094 CLOG(clog.
ss()) <<
" changed to " << to_string(mLastConsensusPhase);
1096 CLOG(clog.
ss()) <<
". ";
1098 setHeartbeatTimer();
1102NetworkOPsImp::processClusterTimer()
1104 if (app_.cluster().size() == 0)
1107 using namespace std::chrono_literals;
1109 bool const update = app_.cluster().update(
1110 app_.nodeIdentity().first,
1112 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1113 ? app_.getFeeTrack().getLocalFee()
1115 app_.timeKeeper().now());
1119 JLOG(m_journal.debug()) <<
"Too soon to send cluster update";
1124 protocol::TMCluster cluster;
1125 app_.cluster().for_each([&cluster](
ClusterNode const& node) {
1126 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1131 n.set_nodename(node.
name());
1135 for (
auto& item : gossip.
items)
1137 protocol::TMLoadSource& node = *cluster.add_loadsources();
1138 node.set_name(to_string(item.address));
1139 node.set_cost(item.balance);
1141 app_.overlay().foreach(
send_if(
1142 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1153 if (mode == OperatingMode::FULL && admin)
1155 auto const consensusMode = mConsensus.mode();
1156 if (consensusMode != ConsensusMode::wrongLedger)
1158 if (consensusMode == ConsensusMode::proposing)
1161 if (mConsensus.validating())
1162 return "validating";
1172 if (isNeedNetworkLedger())
1181 auto const txid = trans->getTransactionID();
1182 auto const flags = app_.getHashRouter().getFlags(txid);
1184 if ((flags & SF_BAD) != 0)
1186 JLOG(m_journal.warn()) <<
"Submitted transaction cached bad";
1193 app_.getHashRouter(),
1195 m_ledgerMaster.getValidatedRules(),
1198 if (validity != Validity::Valid)
1200 JLOG(m_journal.warn())
1201 <<
"Submitted transaction invalid: " << reason;
1207 JLOG(m_journal.warn())
1208 <<
"Exception checking transaction " << txid <<
": " << ex.
what();
1215 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1217 m_job_queue.addJob(
jtTRANSACTION,
"submitTxn", [
this, tx]() {
1219 processTransaction(t,
false,
false, FailHard::no);
1224NetworkOPsImp::processTransaction(
1230 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXN");
1231 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1233 if ((newFlags & SF_BAD) != 0)
1236 JLOG(m_journal.warn()) << transaction->getID() <<
": cached bad!\n";
1237 transaction->setStatus(
INVALID);
1245 auto const view = m_ledgerMaster.getCurrentLedger();
1247 app_.getHashRouter(),
1248 *transaction->getSTransaction(),
1252 validity == Validity::Valid,
1253 "ripple::NetworkOPsImp::processTransaction : valid validity");
1256 if (validity == Validity::SigBad)
1258 JLOG(m_journal.info()) <<
"Transaction has bad signature: " << reason;
1259 transaction->setStatus(
INVALID);
1261 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1266 app_.getMasterTransaction().canonicalize(&transaction);
1269 doTransactionSync(transaction, bUnlimited, failType);
1271 doTransactionAsync(transaction, bUnlimited, failType);
1275NetworkOPsImp::doTransactionAsync(
1282 if (transaction->getApplying())
1285 mTransactions.push_back(
1287 transaction->setApplying();
1289 if (mDispatchState == DispatchState::none)
1291 if (m_job_queue.addJob(
1292 jtBATCH,
"transactionBatch", [
this]() { transactionBatch(); }))
1294 mDispatchState = DispatchState::scheduled;
1300NetworkOPsImp::doTransactionSync(
1307 if (!transaction->getApplying())
1309 mTransactions.push_back(
1311 transaction->setApplying();
1316 if (mDispatchState == DispatchState::running)
1325 if (mTransactions.size())
1328 if (m_job_queue.addJob(
jtBATCH,
"transactionBatch", [
this]() {
1332 mDispatchState = DispatchState::scheduled;
1336 }
while (transaction->getApplying());
1340NetworkOPsImp::transactionBatch()
1344 if (mDispatchState == DispatchState::running)
1347 while (mTransactions.size())
1358 mTransactions.
swap(transactions);
1360 !transactions.
empty(),
1361 "ripple::NetworkOPsImp::apply : non-empty transactions");
1363 mDispatchState != DispatchState::running,
1364 "ripple::NetworkOPsImp::apply : is not running");
1366 mDispatchState = DispatchState::running;
1372 bool changed =
false;
1375 m_ledgerMaster.peekMutex(), std::defer_lock};
1386 if (e.failType == FailHard::yes)
1389 auto const result = app_.getTxQ().apply(
1390 app_, view, e.transaction->getSTransaction(), flags, j);
1391 e.result = result.ter;
1392 e.applied = result.applied;
1393 changed = changed || result.applied;
1402 if (
auto const l = m_ledgerMaster.getValidatedLedger())
1403 validatedLedgerIndex = l->info().seq;
1405 auto newOL = app_.openLedger().current();
1408 e.transaction->clearSubmitResult();
1412 pubProposedTransaction(
1413 newOL, e.transaction->getSTransaction(), e.result);
1414 e.transaction->setApplied();
1417 e.transaction->setResult(e.result);
1420 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1429 JLOG(m_journal.info())
1430 <<
"TransactionResult: " << token <<
": " << human;
1435 bool addLocal = e.local;
1439 JLOG(m_journal.debug())
1440 <<
"Transaction is now included in open ledger";
1441 e.transaction->setStatus(
INCLUDED);
1443 auto const& txCur = e.transaction->getSTransaction();
1444 auto const txNext = m_ledgerMaster.popAcctTransaction(txCur);
1449 auto t = std::make_shared<Transaction>(trans, reason, app_);
1450 submit_held.
emplace_back(t,
false,
false, FailHard::no);
1457 JLOG(m_journal.info()) <<
"Transaction is obsolete";
1458 e.transaction->setStatus(
OBSOLETE);
1462 JLOG(m_journal.debug())
1463 <<
"Transaction is likely to claim a"
1464 <<
" fee, but is queued until fee drops";
1466 e.transaction->setStatus(
HELD);
1470 m_ledgerMaster.addHeldTransaction(e.transaction);
1471 e.transaction->setQueued();
1472 e.transaction->setKept();
1476 if (e.failType != FailHard::yes)
1479 JLOG(m_journal.debug())
1480 <<
"Transaction should be held: " << e.result;
1481 e.transaction->setStatus(
HELD);
1482 m_ledgerMaster.addHeldTransaction(e.transaction);
1483 e.transaction->setKept();
1488 JLOG(m_journal.debug())
1489 <<
"Status other than success " << e.result;
1490 e.transaction->setStatus(
INVALID);
1493 auto const enforceFailHard =
1494 e.failType == FailHard::yes && !
isTesSuccess(e.result);
1496 if (addLocal && !enforceFailHard)
1498 m_localTX->push_back(
1499 m_ledgerMaster.getCurrentLedgerIndex(),
1500 e.transaction->getSTransaction());
1501 e.transaction->setKept();
1505 ((mMode != OperatingMode::FULL) &&
1506 (e.failType != FailHard::yes) && e.local) ||
1511 app_.getHashRouter().shouldRelay(e.transaction->getID());
1515 protocol::TMTransaction tx;
1518 e.transaction->getSTransaction()->add(s);
1519 tx.set_rawtransaction(s.
data(), s.
size());
1520 tx.set_status(protocol::tsCURRENT);
1521 tx.set_receivetimestamp(
1522 app_.timeKeeper().now().time_since_epoch().count());
1525 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1526 e.transaction->setBroadcast();
1530 if (validatedLedgerIndex)
1532 auto [fee, accountSeq, availableSeq] =
1533 app_.getTxQ().getTxRequiredFeeAndSeq(
1534 *newOL, e.transaction->getSTransaction());
1535 e.transaction->setCurrentLedgerState(
1536 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1544 e.transaction->clearApplying();
1546 if (!submit_held.
empty())
1548 if (mTransactions.empty())
1549 mTransactions.swap(submit_held);
1551 for (
auto& e : submit_held)
1552 mTransactions.push_back(std::move(e));
1557 mDispatchState = DispatchState::none;
1565NetworkOPsImp::getOwnerInfo(
1570 auto root = keylet::ownerDir(account);
1571 auto sleNode = lpLedger->read(keylet::page(
root));
1578 for (
auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1580 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1583 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1585 switch (sleCur->getType())
1588 if (!jvObjects.
isMember(jss::offers))
1589 jvObjects[jss::offers] =
1592 jvObjects[jss::offers].
append(
1593 sleCur->getJson(JsonOptions::none));
1596 case ltRIPPLE_STATE:
1597 if (!jvObjects.
isMember(jss::ripple_lines))
1599 jvObjects[jss::ripple_lines] =
1603 jvObjects[jss::ripple_lines].
append(
1604 sleCur->getJson(JsonOptions::none));
1607 case ltACCOUNT_ROOT:
1611 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1617 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1621 sleNode = lpLedger->read(keylet::page(
root, uNodeDir));
1624 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1637NetworkOPsImp::isBlocked()
1639 return isAmendmentBlocked() || isUNLBlocked();
1643NetworkOPsImp::isAmendmentBlocked()
1645 return amendmentBlocked_;
1649NetworkOPsImp::setAmendmentBlocked()
1651 amendmentBlocked_ =
true;
1652 setMode(OperatingMode::CONNECTED);
1656NetworkOPsImp::isAmendmentWarned()
1658 return !amendmentBlocked_ && amendmentWarned_;
1662NetworkOPsImp::setAmendmentWarned()
1664 amendmentWarned_ =
true;
1668NetworkOPsImp::clearAmendmentWarned()
1670 amendmentWarned_ =
false;
1674NetworkOPsImp::isUNLBlocked()
1680NetworkOPsImp::setUNLBlocked()
1683 setMode(OperatingMode::CONNECTED);
1687NetworkOPsImp::clearUNLBlocked()
1689 unlBlocked_ =
false;
1693NetworkOPsImp::checkLastClosedLedger(
1702 JLOG(m_journal.trace()) <<
"NetworkOPsImp::checkLastClosedLedger";
1704 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1709 uint256 closedLedger = ourClosed->info().hash;
1710 uint256 prevClosedLedger = ourClosed->info().parentHash;
1711 JLOG(m_journal.trace()) <<
"OurClosed: " << closedLedger;
1712 JLOG(m_journal.trace()) <<
"PrevClosed: " << prevClosedLedger;
1717 auto& validations = app_.getValidations();
1718 JLOG(m_journal.debug())
1719 <<
"ValidationTrie " <<
Json::Compact(validations.getJsonTrie());
1723 peerCounts[closedLedger] = 0;
1724 if (mMode >= OperatingMode::TRACKING)
1725 peerCounts[closedLedger]++;
1727 for (
auto& peer : peerList)
1729 uint256 peerLedger = peer->getClosedLedgerHash();
1732 ++peerCounts[peerLedger];
1735 for (
auto const& it : peerCounts)
1736 JLOG(m_journal.debug()) <<
"L: " << it.first <<
" n=" << it.second;
1738 uint256 preferredLCL = validations.getPreferredLCL(
1740 m_ledgerMaster.getValidLedgerIndex(),
1743 bool switchLedgers = preferredLCL != closedLedger;
1745 closedLedger = preferredLCL;
1747 if (switchLedgers && (closedLedger == prevClosedLedger))
1750 JLOG(m_journal.info()) <<
"We won't switch to our own previous ledger";
1751 networkClosed = ourClosed->info().hash;
1752 switchLedgers =
false;
1755 networkClosed = closedLedger;
1760 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1763 consensus = app_.getInboundLedgers().acquire(
1764 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1767 (!m_ledgerMaster.canBeCurrent(consensus) ||
1768 !m_ledgerMaster.isCompatible(
1769 *consensus, m_journal.debug(),
"Not switching")))
1773 networkClosed = ourClosed->info().hash;
1777 JLOG(m_journal.warn()) <<
"We are not running on the consensus ledger";
1778 JLOG(m_journal.info()) <<
"Our LCL: " << ourClosed->info().hash
1780 JLOG(m_journal.info()) <<
"Net LCL " << closedLedger;
1782 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1784 setMode(OperatingMode::CONNECTED);
1792 switchLastClosedLedger(consensus);
1799NetworkOPsImp::switchLastClosedLedger(
1803 JLOG(m_journal.error())
1804 <<
"JUMP last closed ledger to " << newLCL->info().hash;
1806 clearNeedNetworkLedger();
1809 app_.getTxQ().processClosedLedger(app_, *newLCL,
true);
1816 auto retries = m_localTX->getTxSet();
1817 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1822 rules.
emplace(app_.config().features);
1823 app_.openLedger().accept(
1834 return app_.getTxQ().accept(app_, view);
1838 m_ledgerMaster.switchLCL(newLCL);
1840 protocol::TMStatusChange s;
1841 s.set_newevent(protocol::neSWITCHED_LEDGER);
1842 s.set_ledgerseq(newLCL->info().seq);
1843 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1844 s.set_ledgerhashprevious(
1845 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1846 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
1848 app_.overlay().foreach(
1849 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1853NetworkOPsImp::beginConsensus(
1859 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
1861 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
1863 JLOG(m_journal.info()) <<
"Consensus time for #" << closingInfo.seq
1864 <<
" with LCL " << closingInfo.parentHash;
1866 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1871 if (mMode == OperatingMode::FULL)
1873 JLOG(m_journal.warn()) <<
"Don't have LCL, going to tracking";
1874 setMode(OperatingMode::TRACKING);
1875 CLOG(clog) <<
"beginConsensus Don't have LCL, going to tracking. ";
1878 CLOG(clog) <<
"beginConsensus no previous ledger. ";
1883 prevLedger->info().hash == closingInfo.parentHash,
1884 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1887 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
1888 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1891 if (prevLedger->rules().enabled(featureNegativeUNL))
1892 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1893 TrustChanges const changes = app_.validators().updateTrusted(
1894 app_.getValidations().getCurrentNodeIDs(),
1895 closingInfo.parentCloseTime,
1898 app_.getHashRouter());
1900 if (!changes.
added.empty() || !changes.
removed.empty())
1902 app_.getValidations().trustChanged(changes.
added, changes.
removed);
1904 app_.getAmendmentTable().trustChanged(
1905 app_.validators().getQuorumKeys().second);
1908 mConsensus.startRound(
1909 app_.timeKeeper().closeTime(),
1917 if (mLastConsensusPhase != currPhase)
1919 reportConsensusStateChange(currPhase);
1920 mLastConsensusPhase = currPhase;
1923 JLOG(m_journal.debug()) <<
"Initiating consensus engine";
1930 auto const& peerKey = peerPos.
publicKey();
1931 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
1940 "ripple::NetworkOPsImp::processTrustedProposal : received own "
1942 JLOG(m_journal.error())
1943 <<
"Received a TRUSTED proposal signed with my key from a peer";
1947 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1958 protocol::TMHaveTransactionSet msg;
1959 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1960 msg.set_status(protocol::tsHAVE);
1961 app_.overlay().foreach(
1962 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
1966 mConsensus.gotTxSet(app_.timeKeeper().closeTime(),
RCLTxSet{map});
1972 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
1974 for (
auto const& it : app_.overlay().getActivePeers())
1976 if (it && (it->getClosedLedgerHash() == deadLedger))
1978 JLOG(m_journal.trace()) <<
"Killing obsolete peer status";
1985 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1987 if (networkClosed.
isZero())
1989 CLOG(clog) <<
"endConsensus last closed ledger is zero. ";
1999 if (((mMode == OperatingMode::CONNECTED) ||
2000 (mMode == OperatingMode::SYNCING)) &&
2006 if (!needNetworkLedger_)
2007 setMode(OperatingMode::TRACKING);
2010 if (((mMode == OperatingMode::CONNECTED) ||
2011 (mMode == OperatingMode::TRACKING)) &&
2017 auto current = m_ledgerMaster.getCurrentLedger();
2018 if (app_.timeKeeper().now() < (
current->info().parentCloseTime +
2019 2 *
current->info().closeTimeResolution))
2021 setMode(OperatingMode::FULL);
2025 beginConsensus(networkClosed, clog);
2029NetworkOPsImp::consensusViewChange()
2031 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2033 setMode(OperatingMode::CONNECTED);
2043 if (!mStreamMaps[sManifests].empty())
2047 jvObj[jss::type] =
"manifestReceived";
2050 jvObj[jss::signing_key] =
2054 jvObj[jss::signature] =
strHex(*sig);
2057 jvObj[jss::domain] = mo.
domain;
2060 for (
auto i = mStreamMaps[sManifests].begin();
2061 i != mStreamMaps[sManifests].end();)
2063 if (
auto p = i->second.lock())
2065 p->send(jvObj,
true);
2070 i = mStreamMaps[sManifests].erase(i);
2076NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2080 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2081 , loadBaseServer{loadFeeTrack.getLoadBase()}
2083 , em{
std::move(escalationMetrics)}
2093 em.has_value() != b.
em.has_value())
2099 em->minProcessingFeeLevel != b.
em->minProcessingFeeLevel ||
2100 em->openLedgerFeeLevel != b.
em->openLedgerFeeLevel ||
2101 em->referenceFeeLevel != b.
em->referenceFeeLevel);
2134 jvObj[jss::type] =
"serverStatus";
2136 jvObj[jss::load_base] = f.loadBaseServer;
2137 jvObj[jss::load_factor_server] = f.loadFactorServer;
2138 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2143 safe_cast<std::uint64_t>(f.loadFactorServer),
2145 f.em->openLedgerFeeLevel,
2147 f.em->referenceFeeLevel)
2150 jvObj[jss::load_factor] =
trunc32(loadFactor);
2151 jvObj[jss::load_factor_fee_escalation] =
2152 f.em->openLedgerFeeLevel.jsonClipped();
2153 jvObj[jss::load_factor_fee_queue] =
2154 f.em->minProcessingFeeLevel.jsonClipped();
2155 jvObj[jss::load_factor_fee_reference] =
2156 f.em->referenceFeeLevel.jsonClipped();
2159 jvObj[jss::load_factor] = f.loadFactorServer;
2173 p->send(jvObj,
true);
2190 if (!streamMap.empty())
2193 jvObj[jss::type] =
"consensusPhase";
2194 jvObj[jss::consensus] =
to_string(phase);
2196 for (
auto i = streamMap.begin(); i != streamMap.end();)
2198 if (
auto p = i->second.lock())
2200 p->send(jvObj,
true);
2205 i = streamMap.erase(i);
2221 auto const signerPublic = val->getSignerPublic();
2223 jvObj[jss::type] =
"validationReceived";
2224 jvObj[jss::validation_public_key] =
2226 jvObj[jss::ledger_hash] =
to_string(val->getLedgerHash());
2227 jvObj[jss::signature] =
strHex(val->getSignature());
2228 jvObj[jss::full] = val->isFull();
2229 jvObj[jss::flags] = val->getFlags();
2230 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2231 jvObj[jss::data] =
strHex(val->getSerializer().slice());
2233 if (
auto version = (*val)[~sfServerVersion])
2236 if (
auto cookie = (*val)[~sfCookie])
2239 if (
auto hash = (*val)[~sfValidatedHash])
2240 jvObj[jss::validated_hash] =
strHex(*hash);
2242 auto const masterKey =
2245 if (masterKey != signerPublic)
2250 if (
auto const seq = (*val)[~sfLedgerSequence])
2251 jvObj[jss::ledger_index] = *seq;
2253 if (val->isFieldPresent(sfAmendments))
2256 for (
auto const& amendment : val->getFieldV256(sfAmendments))
2260 if (
auto const closeTime = (*val)[~sfCloseTime])
2261 jvObj[jss::close_time] = *closeTime;
2263 if (
auto const loadFee = (*val)[~sfLoadFee])
2264 jvObj[jss::load_fee] = *loadFee;
2266 if (
auto const baseFee = val->at(~sfBaseFee))
2267 jvObj[jss::base_fee] =
static_cast<double>(*baseFee);
2269 if (
auto const reserveBase = val->at(~sfReserveBase))
2270 jvObj[jss::reserve_base] = *reserveBase;
2272 if (
auto const reserveInc = val->at(~sfReserveIncrement))
2273 jvObj[jss::reserve_inc] = *reserveInc;
2277 if (
auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2278 baseFeeXRP && baseFeeXRP->native())
2279 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2281 if (
auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2282 reserveBaseXRP && reserveBaseXRP->native())
2283 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2285 if (
auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2286 reserveIncXRP && reserveIncXRP->native())
2287 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2296 if (jvTx.
isMember(jss::ledger_index))
2298 jvTx[jss::ledger_index] =
2299 std::to_string(jvTx[jss::ledger_index].asUInt());
2306 if (
auto p = i->second.lock())
2310 [&](
Json::Value const& jv) { p->send(jv, true); });
2330 jvObj[jss::type] =
"peerStatusChange";
2339 p->send(jvObj,
true);
2353 using namespace std::chrono_literals;
2385 <<
"recvValidation " << val->getLedgerHash() <<
" from " << source;
2401 <<
"Exception thrown for handling new validation "
2402 << val->getLedgerHash() <<
": " << e.
what();
2407 <<
"Unknown exception thrown for handling new validation "
2408 << val->getLedgerHash();
2420 ss <<
"VALIDATION: " << val->render() <<
" master_key: ";
2457 "This server is amendment blocked, and must be updated to be "
2458 "able to stay in sync with the network.";
2465 "This server has an expired validator list. validators.txt "
2466 "may be incorrectly configured or some [validator_list_sites] "
2467 "may be unreachable.";
2474 "One or more unsupported amendments have reached majority. "
2475 "Upgrade to the latest version before they are activated "
2476 "to avoid being amendment blocked.";
2477 if (
auto const expected =
2481 d[jss::expected_date] = expected->time_since_epoch().count();
2482 d[jss::expected_date_UTC] =
to_string(*expected);
2486 if (warnings.size())
2487 info[jss::warnings] = std::move(warnings);
2502 info[jss::time] =
to_string(std::chrono::floor<std::chrono::microseconds>(
2506 info[jss::network_ledger] =
"waiting";
2508 info[jss::validation_quorum] =
2516 info[jss::node_size] =
"tiny";
2519 info[jss::node_size] =
"small";
2522 info[jss::node_size] =
"medium";
2525 info[jss::node_size] =
"large";
2528 info[jss::node_size] =
"huge";
2537 info[jss::validator_list_expires] =
2538 safe_cast<Json::UInt>(when->time_since_epoch().count());
2540 info[jss::validator_list_expires] = 0;
2550 if (*when == TimeKeeper::time_point::max())
2552 x[jss::expiration] =
"never";
2553 x[jss::status] =
"active";
2560 x[jss::status] =
"active";
2562 x[jss::status] =
"expired";
2567 x[jss::status] =
"unknown";
2568 x[jss::expiration] =
"unknown";
2572#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2575#ifdef GIT_COMMIT_HASH
2576 x[jss::hash] = GIT_COMMIT_HASH;
2579 x[jss::branch] = GIT_BRANCH;
2584 info[jss::io_latency_ms] =
2592 info[jss::pubkey_validator] =
2597 info[jss::pubkey_validator] =
"none";
2607 info[jss::counters][jss::nodestore] = nodestore;
2611 info[jss::pubkey_node] =
2617 info[jss::amendment_blocked] =
true;
2631 lastClose[jss::converge_time_s] =
2636 lastClose[jss::converge_time] =
2640 info[jss::last_close] = lastClose;
2648 info[jss::network_id] =
static_cast<Json::UInt>(*netid);
2650 auto const escalationMetrics =
2658 auto const loadFactorFeeEscalation =
2660 escalationMetrics.openLedgerFeeLevel,
2662 escalationMetrics.referenceFeeLevel)
2666 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2670 info[jss::load_base] = loadBaseServer;
2671 info[jss::load_factor] =
trunc32(loadFactor);
2672 info[jss::load_factor_server] = loadFactorServer;
2679 info[jss::load_factor_fee_escalation] =
2680 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2681 info[jss::load_factor_fee_queue] =
2682 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2683 info[jss::load_factor_fee_reference] =
2684 escalationMetrics.referenceFeeLevel.jsonClipped();
2688 info[jss::load_factor] =
2689 static_cast<double>(loadFactor) / loadBaseServer;
2691 if (loadFactorServer != loadFactor)
2692 info[jss::load_factor_server] =
2693 static_cast<double>(loadFactorServer) / loadBaseServer;
2698 if (fee != loadBaseServer)
2699 info[jss::load_factor_local] =
2700 static_cast<double>(fee) / loadBaseServer;
2702 if (fee != loadBaseServer)
2703 info[jss::load_factor_net] =
2704 static_cast<double>(fee) / loadBaseServer;
2706 if (fee != loadBaseServer)
2707 info[jss::load_factor_cluster] =
2708 static_cast<double>(fee) / loadBaseServer;
2710 if (escalationMetrics.openLedgerFeeLevel !=
2711 escalationMetrics.referenceFeeLevel &&
2712 (admin || loadFactorFeeEscalation != loadFactor))
2713 info[jss::load_factor_fee_escalation] =
2714 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2715 escalationMetrics.referenceFeeLevel);
2716 if (escalationMetrics.minProcessingFeeLevel !=
2717 escalationMetrics.referenceFeeLevel)
2718 info[jss::load_factor_fee_queue] =
2719 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2720 escalationMetrics.referenceFeeLevel);
2733 XRPAmount const baseFee = lpClosed->fees().base;
2735 l[jss::seq] =
Json::UInt(lpClosed->info().seq);
2736 l[jss::hash] =
to_string(lpClosed->info().hash);
2741 l[jss::reserve_base] =
2742 lpClosed->fees().accountReserve(0).jsonClipped();
2743 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2745 lpClosed->info().closeTime.time_since_epoch().count());
2750 l[jss::reserve_base_xrp] =
2751 lpClosed->fees().accountReserve(0).decimalXRP();
2752 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2755 std::abs(closeOffset.count()) >= 60)
2756 l[jss::close_time_offset] =
2764 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2768 auto lCloseTime = lpClosed->info().closeTime;
2770 if (lCloseTime <= closeTime)
2772 using namespace std::chrono_literals;
2773 auto age = closeTime - lCloseTime;
2775 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2781 info[jss::validated_ledger] = l;
2783 info[jss::closed_ledger] = l;
2787 info[jss::published_ledger] =
"none";
2788 else if (lpPublished->info().seq != lpClosed->info().seq)
2789 info[jss::published_ledger] = lpPublished->info().seq;
2794 info[jss::jq_trans_overflow] =
2796 info[jss::peer_disconnects] =
2798 info[jss::peer_disconnects_resources] =
2803 "http",
"https",
"peer",
"ws",
"ws2",
"wss",
"wss2"};
2811 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2812 port.admin_user.empty() && port.admin_password.empty()))
2826 for (
auto const& p : proto)
2827 jv[jss::protocol].append(p);
2834 auto const optPort = grpcSection.
get(
"port");
2835 if (optPort && grpcSection.get(
"ip"))
2838 jv[jss::port] = *optPort;
2840 jv[jss::protocol].append(
"grpc");
2843 info[jss::ports] = std::move(ports);
2868 transJson(transaction, result,
false, ledger, std::nullopt);
2882 [&](
Json::Value const& jv) { p->send(jv, true); });
2905 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted,
app_);
2907 lpAccepted->info().hash, alpAccepted);
2911 alpAccepted->getLedger().
get() == lpAccepted.
get(),
2912 "ripple::NetworkOPsImp::pubLedger : accepted input");
2916 <<
"Publishing ledger " << lpAccepted->info().seq <<
" "
2917 << lpAccepted->info().hash;
2925 jvObj[jss::type] =
"ledgerClosed";
2926 jvObj[jss::ledger_index] = lpAccepted->info().seq;
2927 jvObj[jss::ledger_hash] =
to_string(lpAccepted->info().hash);
2929 lpAccepted->info().closeTime.time_since_epoch().count());
2931 if (!lpAccepted->rules().enabled(featureXRPFees))
2933 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2934 jvObj[jss::reserve_base] =
2935 lpAccepted->fees().accountReserve(0).jsonClipped();
2936 jvObj[jss::reserve_inc] =
2937 lpAccepted->fees().increment.jsonClipped();
2939 jvObj[jss::txn_count] =
Json::UInt(alpAccepted->size());
2943 jvObj[jss::validated_ledgers] =
2953 p->send(jvObj,
true);
2971 p->send(jvObj,
true);
2980 static bool firstTime =
true;
2987 for (
auto& inner : outer.second)
2989 auto& subInfo = inner.second;
2990 if (subInfo.index_->separationLedgerSeq_ == 0)
2993 alpAccepted->getLedger(), subInfo);
3002 for (
auto const& accTx : *alpAccepted)
3006 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3033 "reportConsensusStateChange->pubConsensus",
3064 jvObj[jss::type] =
"transaction";
3068 jvObj[jss::transaction] =
3075 jvObj[jss::meta], *ledger, transaction, meta->
get());
3077 jvObj[jss::meta], transaction, meta->
get());
3080 if (!ledger->open())
3081 jvObj[jss::ledger_hash] =
to_string(ledger->info().hash);
3085 jvObj[jss::ledger_index] = ledger->info().seq;
3086 jvObj[jss::transaction][jss::date] =
3087 ledger->info().closeTime.time_since_epoch().count();
3088 jvObj[jss::validated] =
true;
3089 jvObj[jss::close_time_iso] =
to_string_iso(ledger->info().closeTime);
3095 jvObj[jss::validated] =
false;
3096 jvObj[jss::ledger_current_index] = ledger->info().seq;
3099 jvObj[jss::status] = validated ?
"closed" :
"proposed";
3100 jvObj[jss::engine_result] = sToken;
3101 jvObj[jss::engine_result_code] = result;
3102 jvObj[jss::engine_result_message] = sHuman;
3104 if (transaction->getTxnType() == ttOFFER_CREATE)
3106 auto const account = transaction->getAccountID(sfAccount);
3107 auto const amount = transaction->getFieldAmount(sfTakerGets);
3110 if (account != amount.issue().account)
3118 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3126 [&]<
unsigned Version>(
3128 RPC::insertDeliverMax(
3129 jvTx[jss::transaction], transaction->getTxnType(), Version);
3131 if constexpr (Version > 1)
3133 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3134 jvTx[jss::hash] = hash;
3138 jvTx[jss::transaction][jss::hash] = hash;
3151 auto const& stTxn = transaction.
getTxn();
3155 auto const trResult = transaction.
getResult();
3170 [&](
Json::Value const& jv) { p->send(jv, true); });
3187 [&](
Json::Value const& jv) { p->send(jv, true); });
3212 auto const currLedgerSeq = ledger->seq();
3219 for (
auto const& affectedAccount : transaction.
getAffected())
3224 auto it = simiIt->second.begin();
3226 while (it != simiIt->second.end())
3237 it = simiIt->second.erase(it);
3244 auto it = simiIt->second.begin();
3245 while (it != simiIt->second.end())
3256 it = simiIt->second.erase(it);
3263 auto& subs = histoIt->second;
3264 auto it = subs.begin();
3265 while (it != subs.end())
3268 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3282 it = subs.erase(it);
3293 <<
"pubAccountTransaction: " <<
"proposed=" << iProposed
3294 <<
", accepted=" << iAccepted;
3296 if (!notify.
empty() || !accountHistoryNotify.
empty())
3298 auto const& stTxn = transaction.
getTxn();
3302 auto const trResult = transaction.
getResult();
3308 isrListener->getApiVersion(),
3309 [&](
Json::Value const& jv) { isrListener->send(jv, true); });
3313 jvObj.
set(jss::account_history_boundary,
true);
3316 jvObj.
isMember(jss::account_history_tx_stream) ==
3318 "ripple::NetworkOPsImp::pubAccountTransaction : "
3319 "account_history_tx_stream not set");
3320 for (
auto& info : accountHistoryNotify)
3322 auto& index = info.index_;
3323 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3324 jvObj.
set(jss::account_history_tx_first,
true);
3326 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3329 info.sink_->getApiVersion(),
3330 [&](
Json::Value const& jv) { info.sink_->send(jv, true); });
3355 for (
auto const& affectedAccount : tx->getMentionedAccounts())
3360 auto it = simiIt->second.begin();
3362 while (it != simiIt->second.end())
3373 it = simiIt->second.erase(it);
3380 JLOG(
m_journal.
trace()) <<
"pubProposedAccountTransaction: " << iProposed;
3382 if (!notify.
empty() || !accountHistoryNotify.
empty())
3389 isrListener->getApiVersion(),
3390 [&](
Json::Value const& jv) { isrListener->send(jv, true); });
3393 jvObj.
isMember(jss::account_history_tx_stream) ==
3395 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3396 "account_history_tx_stream not set");
3397 for (
auto& info : accountHistoryNotify)
3399 auto& index = info.index_;
3400 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3401 jvObj.
set(jss::account_history_tx_first,
true);
3402 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3404 info.sink_->getApiVersion(),
3405 [&](
Json::Value const& jv) { info.sink_->send(jv, true); });
3422 for (
auto const& naAccountID : vnaAccountIDs)
3425 <<
"subAccount: account: " <<
toBase58(naAccountID);
3427 isrListener->insertSubAccountInfo(naAccountID, rt);
3432 for (
auto const& naAccountID : vnaAccountIDs)
3434 auto simIterator = subMap.
find(naAccountID);
3435 if (simIterator == subMap.
end())
3439 usisElement[isrListener->getSeq()] = isrListener;
3441 subMap.
insert(simIterator, make_pair(naAccountID, usisElement));
3446 simIterator->second[isrListener->getSeq()] = isrListener;
3457 for (
auto const& naAccountID : vnaAccountIDs)
3460 isrListener->deleteSubAccountInfo(naAccountID, rt);
3477 for (
auto const& naAccountID : vnaAccountIDs)
3479 auto simIterator = subMap.
find(naAccountID);
3481 if (simIterator != subMap.
end())
3484 simIterator->second.erase(uSeq);
3486 if (simIterator->second.empty())
3489 subMap.
erase(simIterator);
3498 enum DatabaseType { Sqlite,
None };
3499 static const auto databaseType = [&]() -> DatabaseType {
3504 return DatabaseType::Sqlite;
3506 return DatabaseType::None;
3509 if (databaseType == DatabaseType::None)
3512 <<
"AccountHistory job for account "
3524 "AccountHistoryTxStream",
3525 [
this, dbType = databaseType, subInfo]() {
3526 auto const& accountId = subInfo.
index_->accountId_;
3527 auto& lastLedgerSeq = subInfo.
index_->historyLastLedgerSeq_;
3528 auto& txHistoryIndex = subInfo.
index_->historyTxIndex_;
3531 <<
"AccountHistory job for account " <<
toBase58(accountId)
3532 <<
" started. lastLedgerSeq=" << lastLedgerSeq;
3542 auto stx = tx->getSTransaction();
3543 if (stx->getAccountID(sfAccount) == accountId &&
3544 stx->getSeqProxy().value() == 1)
3548 for (
auto& node : meta->getNodes())
3550 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3553 if (node.isFieldPresent(sfNewFields))
3555 if (
auto inner =
dynamic_cast<const STObject*
>(
3556 node.peekAtPField(sfNewFields));
3559 if (inner->isFieldPresent(sfAccount) &&
3560 inner->getAccountID(sfAccount) == accountId)
3572 bool unsubscribe) ->
bool {
3575 sptr->send(jvObj,
true);
3585 bool unsubscribe) ->
bool {
3589 sptr->getApiVersion(),
3590 [&](
Json::Value const& jv) { sptr->send(jv,
true); });
3613 accountId, minLedger, maxLedger, marker, 0,
true};
3614 return db->newestAccountTxPage(options);
3618 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3619 "getMoreTxns : invalid database type");
3628 while (lastLedgerSeq >= 2 && !subInfo.
index_->stopHistorical_)
3630 int feeChargeCount = 0;
3639 <<
"AccountHistory job for account "
3640 <<
toBase58(accountId) <<
" no InfoSub. Fee charged "
3641 << feeChargeCount <<
" times.";
3646 auto startLedgerSeq =
3647 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3649 <<
"AccountHistory job for account " <<
toBase58(accountId)
3650 <<
", working on ledger range [" << startLedgerSeq <<
","
3651 << lastLedgerSeq <<
"]";
3653 auto haveRange = [&]() ->
bool {
3656 auto haveSomeValidatedLedgers =
3658 validatedMin, validatedMax);
3660 return haveSomeValidatedLedgers &&
3661 validatedMin <= startLedgerSeq &&
3662 lastLedgerSeq <= validatedMax;
3668 <<
"AccountHistory reschedule job for account "
3669 <<
toBase58(accountId) <<
", incomplete ledger range ["
3670 << startLedgerSeq <<
"," << lastLedgerSeq <<
"]";
3676 while (!subInfo.
index_->stopHistorical_)
3679 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3683 <<
"AccountHistory job for account "
3684 <<
toBase58(accountId) <<
" getMoreTxns failed.";
3689 auto const& txns = dbResult->first;
3690 marker = dbResult->second;
3691 size_t num_txns = txns.size();
3692 for (
size_t i = 0; i < num_txns; ++i)
3694 auto const& [tx, meta] = txns[i];
3699 <<
"AccountHistory job for account "
3700 <<
toBase58(accountId) <<
" empty tx or meta.";
3710 <<
"AccountHistory job for account "
3711 <<
toBase58(accountId) <<
" no ledger.";
3716 tx->getSTransaction();
3720 <<
"AccountHistory job for account "
3722 <<
" getSTransaction failed.";
3728 auto const trR = meta->getResultTER();
3730 transJson(stTxn, trR,
true, curTxLedger, mRef);
3733 jss::account_history_tx_index, txHistoryIndex--);
3734 if (i + 1 == num_txns ||
3735 txns[i + 1].first->getLedger() != tx->getLedger())
3736 jvTx.
set(jss::account_history_boundary,
true);
3738 if (isFirstTx(tx, meta))
3740 jvTx.
set(jss::account_history_tx_first,
true);
3741 sendMultiApiJson(jvTx,
false);
3744 <<
"AccountHistory job for account "
3746 <<
" done, found last tx.";
3751 sendMultiApiJson(jvTx,
false);
3758 <<
"AccountHistory job for account "
3760 <<
" paging, marker=" << marker->ledgerSeq <<
":"
3769 if (!subInfo.
index_->stopHistorical_)
3771 lastLedgerSeq = startLedgerSeq - 1;
3772 if (lastLedgerSeq <= 1)
3775 <<
"AccountHistory job for account "
3777 <<
" done, reached genesis ledger.";
3790 subInfo.
index_->separationLedgerSeq_ = ledger->seq();
3791 auto const& accountId = subInfo.
index_->accountId_;
3793 if (!ledger->exists(accountKeylet))
3796 <<
"subAccountHistoryStart, no account " <<
toBase58(accountId)
3797 <<
", no need to add AccountHistory job.";
3802 if (
auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3804 if (sleAcct->getFieldU32(sfSequence) == 1)
3807 <<
"subAccountHistoryStart, genesis account "
3809 <<
" does not have tx, no need to add AccountHistory job.";
3816 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
3817 "access genesis account");
3821 subInfo.
index_->historyLastLedgerSeq_ = ledger->seq();
3822 subInfo.
index_->haveHistorical_ =
true;
3825 <<
"subAccountHistoryStart, add AccountHistory job: accountId="
3826 <<
toBase58(accountId) <<
", currentLedgerSeq=" << ledger->seq();
3836 if (!isrListener->insertSubAccountHistory(accountId))
3839 <<
"subAccountHistory, already subscribed to account "
3846 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
3851 inner.
emplace(isrListener->getSeq(), ahi);
3857 simIterator->second.emplace(isrListener->getSeq(), ahi);
3871 <<
"subAccountHistory, no validated ledger yet, delay start";
3884 isrListener->deleteSubAccountHistory(account);
3898 auto& subInfoMap = simIterator->second;
3899 auto subInfoIter = subInfoMap.find(seq);
3900 if (subInfoIter != subInfoMap.end())
3902 subInfoIter->second.index_->stopHistorical_ =
true;
3907 simIterator->second.erase(seq);
3908 if (simIterator->second.empty())
3914 <<
"unsubAccountHistory, account " <<
toBase58(account)
3915 <<
", historyOnly = " << (historyOnly ?
"true" :
"false");
3923 listeners->addSubscriber(isrListener);
3925 UNREACHABLE(
"ripple::NetworkOPsImp::subBook : null book listeners");
3933 listeners->removeSubscriber(uSeq);
3945 m_standalone,
"ripple::NetworkOPsImp::acceptLedger : is standalone");
3948 Throw<std::runtime_error>(
3949 "Operation only possible in STANDALONE mode.");
3964 jvResult[jss::ledger_index] = lpClosed->info().seq;
3965 jvResult[jss::ledger_hash] =
to_string(lpClosed->info().hash);
3967 lpClosed->info().closeTime.time_since_epoch().count());
3968 if (!lpClosed->rules().enabled(featureXRPFees))
3970 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3971 jvResult[jss::reserve_base] =
3972 lpClosed->fees().accountReserve(0).jsonClipped();
3973 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3978 jvResult[jss::validated_ledgers] =
3984 .emplace(isrListener->getSeq(), isrListener)
3994 .emplace(isrListener->getSeq(), isrListener)
4020 .emplace(isrListener->getSeq(), isrListener)
4048 jvResult[jss::random] =
to_string(uRandom);
4050 jvResult[jss::load_base] = feeTrack.getLoadBase();
4051 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4052 jvResult[jss::hostid] =
getHostId(admin);
4053 jvResult[jss::pubkey_node] =
4058 .emplace(isrListener->getSeq(), isrListener)
4076 .emplace(isrListener->getSeq(), isrListener)
4094 .emplace(isrListener->getSeq(), isrListener)
4112 .emplace(isrListener->getSeq(), isrListener)
4136 .emplace(isrListener->getSeq(), isrListener)
4154 .emplace(isrListener->getSeq(), isrListener)
4202 if (map.find(pInfo->getSeq()) != map.end())
4209#ifndef USE_NEW_BOOK_PAGE
4220 unsigned int iLimit,
4230 uint256 uTipIndex = uBookBase;
4234 stream <<
"getBookPage:" << book;
4235 stream <<
"getBookPage: uBookBase=" << uBookBase;
4236 stream <<
"getBookPage: uBookEnd=" << uBookEnd;
4237 stream <<
"getBookPage: uTipIndex=" << uTipIndex;
4246 bool bDirectAdvance =
true;
4250 unsigned int uBookEntry;
4256 while (!bDone && iLimit-- > 0)
4260 bDirectAdvance =
false;
4264 auto const ledgerIndex = view.
succ(uTipIndex, uBookEnd);
4268 sleOfferDir.
reset();
4277 uTipIndex = sleOfferDir->key();
4280 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4283 <<
"getBookPage: uTipIndex=" << uTipIndex;
4285 <<
"getBookPage: offerIndex=" << offerIndex;
4295 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4296 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4297 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4299 bool firstOwnerOffer(
true);
4305 saOwnerFunds = saTakerGets;
4307 else if (bGlobalFreeze)
4315 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4316 if (umBalanceEntry != umBalance.
end())
4320 saOwnerFunds = umBalanceEntry->second;
4321 firstOwnerOffer =
false;
4335 if (saOwnerFunds < beast::zero)
4339 saOwnerFunds.
clear();
4347 STAmount saOwnerFundsLimit = saOwnerFunds;
4359 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4362 if (saOwnerFundsLimit >= saTakerGets)
4365 saTakerGetsFunded = saTakerGets;
4371 saTakerGetsFunded = saOwnerFundsLimit;
4373 saTakerGetsFunded.
setJson(jvOffer[jss::taker_gets_funded]);
4377 saTakerGetsFunded, saDirRate, saTakerPays.
issue()))
4378 .setJson(jvOffer[jss::taker_pays_funded]);
4384 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4386 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4390 jvOf[jss::quality] = saDirRate.
getText();
4392 if (firstOwnerOffer)
4393 jvOf[jss::owner_funds] = saOwnerFunds.
getText();
4400 if (!
cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4402 bDirectAdvance =
true;
4407 <<
"getBookPage: offerIndex=" << offerIndex;
4427 unsigned int iLimit,
4435 MetaView lesActive(lpLedger,
tapNONE,
true);
4436 OrderBookIterator obIterator(lesActive, book);
4440 const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.
out.
account) ||
4441 lesActive.isGlobalFrozen(book.
in.
account);
4443 while (iLimit-- > 0 && obIterator.nextOffer())
4448 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4449 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4450 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4451 STAmount saDirRate = obIterator.getCurrentRate();
4457 saOwnerFunds = saTakerGets;
4459 else if (bGlobalFreeze)
4467 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4469 if (umBalanceEntry != umBalance.
end())
4473 saOwnerFunds = umBalanceEntry->second;
4479 saOwnerFunds = lesActive.accountHolds(
4485 if (saOwnerFunds.isNegative())
4489 saOwnerFunds.zero();
4496 STAmount saTakerGetsFunded;
4497 STAmount saOwnerFundsLimit = saOwnerFunds;
4509 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4512 if (saOwnerFundsLimit >= saTakerGets)
4515 saTakerGetsFunded = saTakerGets;
4520 saTakerGetsFunded = saOwnerFundsLimit;
4522 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4528 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4529 .setJson(jvOffer[jss::taker_pays_funded]);
4532 STAmount saOwnerPays = (
parityRate == offerRate)
4535 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4537 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4539 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4543 jvOf[jss::quality] = saDirRate.
getText();
4558 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4598 ++counters_[
static_cast<std::size_t>(om)].transitions;
4600 counters_[
static_cast<std::size_t>(om)].transitions == 1)
4602 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4603 now - processStart_)
4607 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4616 auto [counters, mode, start, initialSync] = getCounterData();
4617 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4627 auto& state = obj[jss::state_accounting][
states_[i]];
4628 state[jss::transitions] =
std::to_string(counters[i].transitions);
4629 state[jss::duration_us] =
std::to_string(counters[i].dur.count());
4633 obj[jss::initial_sync_duration_us] =
std::to_string(initialSync);
4648 boost::asio::io_service& io_svc,
4652 return std::make_unique<NetworkOPsImp>(
T back_inserter(T... args)
Decorator for streaming out compact json.
Lightweight wrapper to tag static string.
Value get(UInt index, const Value &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Value & append(const Value &value)
Append value to array at the end.
bool isMember(const char *key) const
Return true if the object has a member named key.
A generic endpoint for log messages.
Stream trace() const
Severity stream access functions.
A metric for measuring an integral value.
void set(value_type value) const
Set the value on the gauge.
A reference to a handler for performing polled collection.
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
std::uint32_t getLoadFee() const
NetClock::time_point getReportTime() const
PublicKey const & identity() const
std::size_t size() const
The number of nodes in the cluster list.
std::string SERVER_DOMAIN
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
int RELAY_UNTRUSTED_VALIDATIONS
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
A pool of threads to perform work.
Json::Value getJson(int c=0)
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
std::uint32_t getClusterFee() const
std::uint32_t getLocalFee() const
std::uint32_t getLoadBase() const
std::uint32_t getRemoteFee() const
std::uint32_t getLoadFactor() const
void heartbeat()
Reset the stall detection timer.
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
std::uint64_t initialSyncUs_
CounterData getCounterData() const
std::chrono::steady_clock::time_point start_
static std::array< Json::StaticString const, 5 > const states_
std::chrono::steady_clock::time_point const processStart_
Transaction with input flags and results to be applied in batches.
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
std::shared_ptr< Transaction > const transaction
void processClusterTimer()
boost::asio::steady_timer accountHistoryTxTimer_
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::vector< TransactionStatus > mTransactions
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
std::optional< PublicKey > const validatorPK_
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, const bool bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
SubInfoMapType mSubAccount
std::optional< PublicKey > const validatorMasterPK_
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
std::atomic< bool > unlBlocked_
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
std::atomic< bool > needNetworkLedger_
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
void setMode(OperatingMode om) override
void clearNeedNetworkLedger() override
DispatchState mDispatchState
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
bool checkLastClosedLedger(const Overlay::PeerSequence &, uint256 &networkClosed)
void reportFeeChange() override
void processHeartbeatTimer()
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
bool subTransactions(InfoSub::ref ispListener) override
std::atomic< bool > amendmentWarned_
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
void getCountsJson(Json::Value &obj)
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Writable ledger view that accumulates state and tx changes.
BookListeners::pointer getBookListeners(Book const &)
BookListeners::pointer makeBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, const AcceptedLedgerTx &alTx, MultiApiJson const &jvObj)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Represents a set of transactions in RCLConsensus.
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Collects logging information.
std::unique_ptr< std::stringstream > const & ss()
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
std::string getText() const override
Issue const & issue() const
std::optional< T > get(std::string const &name) const
std::size_t size() const noexcept
void const * data() const noexcept
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
std::chrono::seconds closeOffset() const
time_point closeTime() const
Returns the predicted close time, in network time.
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Validator keys and manifest as set in configuration file.
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Json::Value jsonClipped() const
static constexpr std::size_t size()
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
T emplace_back(T... args)
@ arrayValue
array value (ordered list)
@ objectValue
object value (collection of name/value pairs).
void rngfill(void *buffer, std::size_t bytes, Generator &g)
std::string const & getVersionString()
Server version.
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(PreclaimContext const &ctx, AccountID const &src)
Keylet account(AccountID const &id) noexcept
AccountID root.
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
STAmount divide(STAmount const &amount, Rate const &rate)
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
std::uint64_t getQuality(uint256 const &uBase)
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
auto constexpr muldiv_max
std::unique_ptr< LocalTxs > make_LocalTxs()
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
STAmount amountFromQuality(std::uint64_t rate)
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
@ warnRPC_UNSUPPORTED_MAJORITY
@ warnRPC_AMENDMENT_BLOCKED
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
AccountID calcAccountID(PublicKey const &pk)
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Json::Value rpcError(int iError)
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
std::string strHex(FwdIt begin, FwdIt end)
bool isTemMalformed(TER x)
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
void forAllApiVersions(Fn const &fn, Args &&... args)
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
uint256 getQualityNext(uint256 const &uBase)
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const ¤cy, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const ¤t)
std::string to_string_iso(date::sys_time< Duration > tp)
std::string to_string(base_uint< Bits, Tag > const &a)
FeeSetup setup_FeeVote(Section const §ion)
Number root(Number f, unsigned d)
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
bool transResultInfo(TER code, std::string &token, std::string &text)
uint256 getBookBase(Book const &book)
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
static std::uint32_t trunc32(std::uint64_t v)
static auto const genesisAccountId
T set_intersection(T... args)
std::string serialized
The manifest in serialized form.
std::uint32_t sequence
The sequence number of this manifest.
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Server fees published on server subscription.
bool operator!=(ServerFeeSummary const &b) const
ServerFeeSummary()=default
std::optional< TxQ::Metrics > em
std::uint32_t loadFactorServer
bool operator==(ServerFeeSummary const &b) const
std::uint32_t loadBaseServer
decltype(initialSyncUs_) initialSyncUs
decltype(counters_) counters
std::uint64_t transitions
std::chrono::microseconds dur
beast::insight::Gauge full_transitions
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
beast::insight::Hook hook
beast::insight::Gauge connected_duration
beast::insight::Gauge tracking_duration
beast::insight::Gauge connected_transitions
beast::insight::Gauge disconnected_transitions
beast::insight::Gauge syncing_duration
beast::insight::Gauge tracking_transitions
beast::insight::Gauge full_duration
beast::insight::Gauge disconnected_duration
beast::insight::Gauge syncing_transitions
std::uint32_t historyLastLedgerSeq_
std::uint32_t separationLedgerSeq_
AccountID const accountId_
std::uint32_t forwardTxIndex_
std::atomic< bool > stopHistorical_
std::int32_t historyTxIndex_
SubAccountHistoryIndex(AccountID const &accountId)
std::shared_ptr< SubAccountHistoryIndex > index_
std::shared_ptr< SubAccountHistoryIndex > index_
Represents a transfer rate.
Data format for exchanging consumption information across peers.
std::vector< Item > items
Changes in trusted nodes after updating validator list.
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
void set(const char *key, auto const &v)
IsMemberResult isMember(const char *key) const
Select all peers (except optional excluded) that are in our cluster.
Sends a message to all peers.
T time_since_epoch(T... args)