20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/misc/AmendmentTable.h>
32#include <xrpld/app/misc/DeliverMax.h>
33#include <xrpld/app/misc/HashRouter.h>
34#include <xrpld/app/misc/LoadFeeTrack.h>
35#include <xrpld/app/misc/NetworkOPs.h>
36#include <xrpld/app/misc/Transaction.h>
37#include <xrpld/app/misc/TxQ.h>
38#include <xrpld/app/misc/ValidatorKeys.h>
39#include <xrpld/app/misc/ValidatorList.h>
40#include <xrpld/app/misc/detail/AccountTxPaging.h>
41#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
42#include <xrpld/app/tx/apply.h>
43#include <xrpld/consensus/Consensus.h>
44#include <xrpld/consensus/ConsensusParms.h>
45#include <xrpld/overlay/Cluster.h>
46#include <xrpld/overlay/Overlay.h>
47#include <xrpld/overlay/predicates.h>
48#include <xrpld/perflog/PerfLog.h>
49#include <xrpld/rpc/BookChanges.h>
50#include <xrpld/rpc/DeliveredAmount.h>
51#include <xrpld/rpc/MPTokenIssuanceID.h>
52#include <xrpld/rpc/ServerHandler.h>
53#include <xrpl/basics/CanProcess.h>
54#include <xrpl/basics/UptimeClock.h>
55#include <xrpl/basics/mulDiv.h>
56#include <xrpl/basics/safe_cast.h>
57#include <xrpl/beast/rfc2616.h>
58#include <xrpl/beast/utility/rngfill.h>
59#include <xrpl/crypto/RFC1751.h>
60#include <xrpl/crypto/csprng.h>
61#include <xrpl/json/to_string.h>
62#include <xrpl/protocol/BuildInfo.h>
63#include <xrpl/protocol/Feature.h>
64#include <xrpl/protocol/MultiApiJson.h>
65#include <xrpl/protocol/RPCErr.h>
66#include <xrpl/protocol/STParsedJSON.h>
67#include <xrpl/protocol/jss.h>
68#include <xrpl/resource/Fees.h>
69#include <xrpl/resource/ResourceManager.h>
70#include <boost/asio/ip/host_name.hpp>
71#include <boost/asio/steady_timer.hpp>
110 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
153 std::chrono::steady_clock::time_point
start_ =
214 return !(*
this != b);
233 boost::asio::io_service& io_svc,
247 app_.logs().journal(
"FeeVote")),
250 app.getInboundTransactions(),
251 beast::get_abstract_clock<
std::chrono::steady_clock>(),
253 app_.logs().journal(
"LedgerConsensus"))
432 getServerInfo(
bool human,
bool admin,
bool counters)
override;
459 TER result)
override;
493 bool historyOnly)
override;
499 bool historyOnly)
override;
567 boost::system::error_code ec;
572 <<
"NetworkOPs: heartbeatTimer cancel error: "
581 <<
"NetworkOPs: clusterTimer cancel error: "
590 <<
"NetworkOPs: accountHistoryTxTimer cancel error: "
595 using namespace std::chrono_literals;
605 boost::asio::steady_timer& timer,
785 template <
class Handler>
787 Handler
const& handler,
789 :
hook(collector->make_hook(handler))
792 "Disconnected_duration"))
795 "Connected_duration"))
797 collector->make_gauge(
"State_Accounting",
"Syncing_duration"))
800 "Tracking_duration"))
802 collector->make_gauge(
"State_Accounting",
"Full_duration"))
805 "Disconnected_transitions"))
808 "Connected_transitions"))
811 "Syncing_transitions"))
814 "Tracking_transitions"))
816 collector->make_gauge(
"State_Accounting",
"Full_transitions"))
845 {
"disconnected",
"connected",
"syncing",
"tracking",
"full"}};
907 static std::string const hostname = boost::asio::ip::host_name();
914 static std::string const shroudedHostId = [
this]() {
920 return shroudedHostId;
935 boost::asio::steady_timer& timer,
942 [
this, onExpire, onError](boost::system::error_code
const& e) {
943 if ((e.value() == boost::system::errc::success) &&
944 (!m_job_queue.isStopped()))
949 if (e.value() != boost::system::errc::success &&
950 e.value() != boost::asio::error::operation_aborted)
953 JLOG(m_journal.error())
954 <<
"Timer got error '" << e.message()
955 <<
"'. Restarting timer.";
960 timer.expires_from_now(expiry_time);
961 timer.async_wait(std::move(*optionalCountedHandler));
966NetworkOPsImp::setHeartbeatTimer()
970 mConsensus.parms().ledgerGRANULARITY,
972 m_job_queue.addJob(jtNETOP_TIMER,
"NetOPs.heartbeat", [this]() {
973 processHeartbeatTimer();
976 [
this]() { setHeartbeatTimer(); });
980NetworkOPsImp::setClusterTimer()
982 using namespace std::chrono_literals;
989 processClusterTimer();
992 [
this]() { setClusterTimer(); });
998 JLOG(m_journal.debug()) <<
"Scheduling AccountHistory job for account "
1000 using namespace std::chrono_literals;
1002 accountHistoryTxTimer_,
1004 [
this, subInfo]() { addAccountHistoryJob(subInfo); },
1005 [
this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1009NetworkOPsImp::processHeartbeatTimer()
1018 std::size_t const numPeers = app_.overlay().size();
1021 if (numPeers < minPeerCount_)
1023 if (mMode != OperatingMode::DISCONNECTED)
1026 OperatingMode::DISCONNECTED,
1027 "Heartbeat: insufficient peers");
1028 JLOG(m_journal.warn())
1029 <<
"Node count (" << numPeers <<
") has fallen "
1030 <<
"below required minimum (" << minPeerCount_ <<
").";
1037 setHeartbeatTimer();
1041 if (mMode == OperatingMode::DISCONNECTED)
1043 setMode(OperatingMode::CONNECTED,
"Heartbeat: sufficient peers");
1044 JLOG(m_journal.info())
1045 <<
"Node count (" << numPeers <<
") is sufficient.";
1050 if (mMode == OperatingMode::SYNCING)
1051 setMode(OperatingMode::SYNCING,
"Heartbeat: check syncing");
1052 else if (mMode == OperatingMode::CONNECTED)
1053 setMode(OperatingMode::CONNECTED,
"Heartbeat: check connected");
1056 mConsensus.timerEntry(app_.timeKeeper().closeTime());
1059 if (mLastConsensusPhase != currPhase)
1061 reportConsensusStateChange(currPhase);
1062 mLastConsensusPhase = currPhase;
1065 setHeartbeatTimer();
1069NetworkOPsImp::processClusterTimer()
1071 if (app_.cluster().size() == 0)
1074 using namespace std::chrono_literals;
1076 bool const update = app_.cluster().update(
1077 app_.nodeIdentity().first,
1079 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1080 ? app_.getFeeTrack().getLocalFee()
1082 app_.timeKeeper().now());
1086 JLOG(m_journal.debug()) <<
"Too soon to send cluster update";
1091 protocol::TMCluster cluster;
1092 app_.cluster().for_each([&cluster](
ClusterNode const& node) {
1093 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1098 n.set_nodename(node.
name());
1102 for (
auto& item : gossip.
items)
1104 protocol::TMLoadSource& node = *cluster.add_loadsources();
1105 node.set_name(to_string(item.address));
1106 node.set_cost(item.balance);
1108 app_.overlay().foreach(
send_if(
1109 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1120 if (mode == OperatingMode::FULL && admin)
1122 auto const consensusMode = mConsensus.mode();
1123 if (consensusMode != ConsensusMode::wrongLedger)
1125 if (consensusMode == ConsensusMode::proposing)
1128 if (mConsensus.validating())
1129 return "validating";
1139 if (isNeedNetworkLedger())
1148 auto const txid = trans->getTransactionID();
1149 auto const flags = app_.getHashRouter().getFlags(txid);
1151 if ((flags & SF_BAD) != 0)
1153 JLOG(m_journal.warn()) <<
"Submitted transaction cached bad";
1160 app_.getHashRouter(),
1162 m_ledgerMaster.getValidatedRules(),
1165 if (validity != Validity::Valid)
1167 JLOG(m_journal.warn())
1168 <<
"Submitted transaction invalid: " << reason;
1174 JLOG(m_journal.warn())
1175 <<
"Exception checking transaction " << txid <<
": " << ex.
what();
1182 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1184 m_job_queue.addJob(
jtTRANSACTION,
"submitTxn", [
this, tx]() {
1186 processTransaction(t,
false,
false, FailHard::no);
1191NetworkOPsImp::processTransaction(
1197 auto ev = m_job_queue.makeLoadEvent(
jtTXN_PROC,
"ProcessTXN");
1198 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1200 if ((newFlags & SF_BAD) != 0)
1203 JLOG(m_journal.warn()) << transaction->getID() <<
": cached bad!\n";
1204 transaction->setStatus(
INVALID);
1212 auto const view = m_ledgerMaster.getCurrentLedger();
1214 app_.getHashRouter(),
1215 *transaction->getSTransaction(),
1219 validity == Validity::Valid,
1220 "ripple::NetworkOPsImp::processTransaction : valid validity");
1223 if (validity == Validity::SigBad)
1225 JLOG(m_journal.info()) <<
"Transaction has bad signature: " << reason;
1226 transaction->setStatus(
INVALID);
1228 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1233 app_.getMasterTransaction().canonicalize(&transaction);
1236 doTransactionSync(transaction, bUnlimited, failType);
1238 doTransactionAsync(transaction, bUnlimited, failType);
1242NetworkOPsImp::doTransactionAsync(
1249 if (transaction->getApplying())
1252 mTransactions.push_back(
1254 transaction->setApplying();
1256 if (mDispatchState == DispatchState::none)
1258 if (m_job_queue.addJob(
1259 jtBATCH,
"transactionBatch", [
this]() { transactionBatch(); }))
1261 mDispatchState = DispatchState::scheduled;
1267NetworkOPsImp::doTransactionSync(
1274 if (!transaction->getApplying())
1276 mTransactions.push_back(
1278 transaction->setApplying();
1283 if (mDispatchState == DispatchState::running)
1292 if (mTransactions.size())
1295 if (m_job_queue.addJob(
jtBATCH,
"transactionBatch", [
this]() {
1299 mDispatchState = DispatchState::scheduled;
1303 }
while (transaction->getApplying());
1307NetworkOPsImp::transactionBatch()
1311 if (mDispatchState == DispatchState::running)
1314 while (mTransactions.size())
1325 mTransactions.
swap(transactions);
1327 !transactions.
empty(),
1328 "ripple::NetworkOPsImp::apply : non-empty transactions");
1330 mDispatchState != DispatchState::running,
1331 "ripple::NetworkOPsImp::apply : is not running");
1333 mDispatchState = DispatchState::running;
1339 bool changed =
false;
1342 m_ledgerMaster.peekMutex(), std::defer_lock};
1353 if (e.failType == FailHard::yes)
1356 auto const result = app_.getTxQ().apply(
1357 app_, view, e.transaction->getSTransaction(), flags, j);
1358 e.result = result.ter;
1359 e.applied = result.applied;
1360 changed = changed || result.applied;
1369 if (
auto const l = m_ledgerMaster.getValidatedLedger())
1370 validatedLedgerIndex = l->info().seq;
1372 auto newOL = app_.openLedger().current();
1375 e.transaction->clearSubmitResult();
1379 pubProposedTransaction(
1380 newOL, e.transaction->getSTransaction(), e.result);
1381 e.transaction->setApplied();
1384 e.transaction->setResult(e.result);
1387 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1396 JLOG(m_journal.info())
1397 <<
"TransactionResult: " << token <<
": " << human;
1402 bool addLocal = e.local;
1406 JLOG(m_journal.debug())
1407 <<
"Transaction is now included in open ledger";
1408 e.transaction->setStatus(
INCLUDED);
1410 auto const& txCur = e.transaction->getSTransaction();
1411 auto const txNext = m_ledgerMaster.popAcctTransaction(txCur);
1416 auto t = std::make_shared<Transaction>(trans, reason, app_);
1417 submit_held.
emplace_back(t,
false,
false, FailHard::no);
1424 JLOG(m_journal.info()) <<
"Transaction is obsolete";
1425 e.transaction->setStatus(
OBSOLETE);
1429 JLOG(m_journal.debug())
1430 <<
"Transaction is likely to claim a"
1431 <<
" fee, but is queued until fee drops";
1433 e.transaction->setStatus(
HELD);
1437 m_ledgerMaster.addHeldTransaction(e.transaction);
1438 e.transaction->setQueued();
1439 e.transaction->setKept();
1443 if (e.failType != FailHard::yes)
1446 JLOG(m_journal.debug())
1447 <<
"Transaction should be held: " << e.result;
1448 e.transaction->setStatus(
HELD);
1449 m_ledgerMaster.addHeldTransaction(e.transaction);
1450 e.transaction->setKept();
1455 JLOG(m_journal.debug())
1456 <<
"Status other than success " << e.result;
1457 e.transaction->setStatus(
INVALID);
1460 auto const enforceFailHard =
1461 e.failType == FailHard::yes && !
isTesSuccess(e.result);
1463 if (addLocal && !enforceFailHard)
1465 m_localTX->push_back(
1466 m_ledgerMaster.getCurrentLedgerIndex(),
1467 e.transaction->getSTransaction());
1468 e.transaction->setKept();
1472 ((mMode != OperatingMode::FULL) &&
1473 (e.failType != FailHard::yes) && e.local) ||
1478 app_.getHashRouter().shouldRelay(e.transaction->getID());
1482 protocol::TMTransaction tx;
1485 e.transaction->getSTransaction()->add(s);
1486 tx.set_rawtransaction(s.
data(), s.
size());
1487 tx.set_status(protocol::tsCURRENT);
1488 tx.set_receivetimestamp(
1489 app_.timeKeeper().now().time_since_epoch().count());
1492 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1493 e.transaction->setBroadcast();
1497 if (validatedLedgerIndex)
1499 auto [fee, accountSeq, availableSeq] =
1500 app_.getTxQ().getTxRequiredFeeAndSeq(
1501 *newOL, e.transaction->getSTransaction());
1502 e.transaction->setCurrentLedgerState(
1503 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1511 e.transaction->clearApplying();
1513 if (!submit_held.
empty())
1515 if (mTransactions.empty())
1516 mTransactions.swap(submit_held);
1518 for (
auto& e : submit_held)
1519 mTransactions.push_back(std::move(e));
1524 mDispatchState = DispatchState::none;
1532NetworkOPsImp::getOwnerInfo(
1537 auto root = keylet::ownerDir(account);
1538 auto sleNode = lpLedger->read(keylet::page(
root));
1545 for (
auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1547 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1550 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1552 switch (sleCur->getType())
1555 if (!jvObjects.
isMember(jss::offers))
1556 jvObjects[jss::offers] =
1559 jvObjects[jss::offers].
append(
1560 sleCur->getJson(JsonOptions::none));
1563 case ltRIPPLE_STATE:
1564 if (!jvObjects.
isMember(jss::ripple_lines))
1566 jvObjects[jss::ripple_lines] =
1570 jvObjects[jss::ripple_lines].
append(
1571 sleCur->getJson(JsonOptions::none));
1574 case ltACCOUNT_ROOT:
1578 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1584 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1588 sleNode = lpLedger->read(keylet::page(
root, uNodeDir));
1591 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1604NetworkOPsImp::isBlocked()
1606 return isAmendmentBlocked() || isUNLBlocked();
1610NetworkOPsImp::isAmendmentBlocked()
1612 return amendmentBlocked_;
1616NetworkOPsImp::setAmendmentBlocked()
1618 amendmentBlocked_ =
true;
1619 setMode(OperatingMode::CONNECTED,
"setAmendmentBlocked");
1623NetworkOPsImp::isAmendmentWarned()
1625 return !amendmentBlocked_ && amendmentWarned_;
1629NetworkOPsImp::setAmendmentWarned()
1631 amendmentWarned_ =
true;
1635NetworkOPsImp::clearAmendmentWarned()
1637 amendmentWarned_ =
false;
1641NetworkOPsImp::isUNLBlocked()
1647NetworkOPsImp::setUNLBlocked()
1650 setMode(OperatingMode::CONNECTED,
"setUNLBlocked");
1654NetworkOPsImp::clearUNLBlocked()
1656 unlBlocked_ =
false;
1660NetworkOPsImp::checkLastClosedLedger(
1669 JLOG(m_journal.trace()) <<
"NetworkOPsImp::checkLastClosedLedger";
1671 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1676 uint256 closedLedger = ourClosed->info().hash;
1677 uint256 prevClosedLedger = ourClosed->info().parentHash;
1678 JLOG(m_journal.trace()) <<
"OurClosed: " << closedLedger;
1679 JLOG(m_journal.trace()) <<
"PrevClosed: " << prevClosedLedger;
1684 auto& validations = app_.getValidations();
1685 JLOG(m_journal.debug())
1686 <<
"ValidationTrie " <<
Json::Compact(validations.getJsonTrie());
1690 peerCounts[closedLedger] = 0;
1691 if (mMode >= OperatingMode::TRACKING)
1692 peerCounts[closedLedger]++;
1694 for (
auto& peer : peerList)
1696 uint256 peerLedger = peer->getClosedLedgerHash();
1699 ++peerCounts[peerLedger];
1702 for (
auto const& it : peerCounts)
1703 JLOG(m_journal.debug()) <<
"L: " << it.first <<
" n=" << it.second;
1705 uint256 preferredLCL = validations.getPreferredLCL(
1707 m_ledgerMaster.getValidLedgerIndex(),
1710 bool switchLedgers = preferredLCL != closedLedger;
1712 closedLedger = preferredLCL;
1714 if (switchLedgers && (closedLedger == prevClosedLedger))
1717 JLOG(m_journal.info()) <<
"We won't switch to our own previous ledger";
1718 networkClosed = ourClosed->info().hash;
1719 switchLedgers =
false;
1722 networkClosed = closedLedger;
1727 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1730 consensus = app_.getInboundLedgers().acquire(
1731 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1734 (!m_ledgerMaster.canBeCurrent(consensus) ||
1735 !m_ledgerMaster.isCompatible(
1736 *consensus, m_journal.debug(),
"Not switching")))
1740 networkClosed = ourClosed->info().hash;
1744 JLOG(m_journal.warn()) <<
"We are not running on the consensus ledger";
1745 JLOG(m_journal.info()) <<
"Our LCL: " << ourClosed->info().hash
1747 JLOG(m_journal.info()) <<
"Net LCL " << closedLedger;
1749 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1751 setMode(OperatingMode::CONNECTED,
"check LCL: not on consensus ledger");
1759 switchLastClosedLedger(consensus);
1766NetworkOPsImp::switchLastClosedLedger(
1770 JLOG(m_journal.error())
1771 <<
"JUMP last closed ledger to " << newLCL->info().hash;
1773 clearNeedNetworkLedger();
1776 app_.getTxQ().processClosedLedger(app_, *newLCL,
true);
1783 auto retries = m_localTX->getTxSet();
1784 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1789 rules.
emplace(app_.config().features);
1790 app_.openLedger().accept(
1801 return app_.getTxQ().accept(app_, view);
1805 m_ledgerMaster.switchLCL(newLCL);
1807 protocol::TMStatusChange s;
1808 s.set_newevent(protocol::neSWITCHED_LEDGER);
1809 s.set_ledgerseq(newLCL->info().seq);
1810 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1811 s.set_ledgerhashprevious(
1812 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1813 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
1815 app_.overlay().foreach(
1816 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1820NetworkOPsImp::beginConsensus(
uint256 const& networkClosed)
1824 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
1826 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
1828 JLOG(m_journal.info()) <<
"Consensus time for #" << closingInfo.seq
1829 <<
" with LCL " << closingInfo.parentHash;
1831 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1836 if (mMode == OperatingMode::FULL)
1838 JLOG(m_journal.warn())
1839 <<
"beginConsensus Don't have LCL, going to tracking";
1840 setMode(OperatingMode::TRACKING,
"beginConsensus: No LCL");
1847 prevLedger->info().hash == closingInfo.parentHash,
1848 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1851 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
1852 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1855 if (prevLedger->rules().enabled(featureNegativeUNL))
1856 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1857 TrustChanges const changes = app_.validators().updateTrusted(
1858 app_.getValidations().getCurrentNodeIDs(),
1859 closingInfo.parentCloseTime,
1862 app_.getHashRouter());
1864 if (!changes.
added.empty() || !changes.
removed.empty())
1866 app_.getValidations().trustChanged(changes.
added, changes.
removed);
1868 app_.getAmendmentTable().trustChanged(
1869 app_.validators().getQuorumKeys().second);
1872 mConsensus.startRound(
1873 app_.timeKeeper().closeTime(),
1880 if (mLastConsensusPhase != currPhase)
1882 reportConsensusStateChange(currPhase);
1883 mLastConsensusPhase = currPhase;
1886 JLOG(m_journal.debug()) <<
"Initiating consensus engine";
1893 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1904 protocol::TMHaveTransactionSet msg;
1905 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1906 msg.set_status(protocol::tsHAVE);
1907 app_.overlay().foreach(
1908 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
1912 mConsensus.gotTxSet(app_.timeKeeper().closeTime(),
RCLTxSet{map});
1916NetworkOPsImp::endConsensus()
1918 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
1920 for (
auto const& it : app_.overlay().getActivePeers())
1922 if (it && (it->getClosedLedgerHash() == deadLedger))
1924 JLOG(m_journal.trace()) <<
"Killing obsolete peer status";
1931 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1933 if (networkClosed.
isZero())
1942 if (((mMode == OperatingMode::CONNECTED) ||
1943 (mMode == OperatingMode::SYNCING)) &&
1949 if (!needNetworkLedger_)
1950 setMode(OperatingMode::TRACKING,
"endConsensus: check tracking");
1953 if (((mMode == OperatingMode::CONNECTED) ||
1954 (mMode == OperatingMode::TRACKING)) &&
1960 auto current = m_ledgerMaster.getCurrentLedger();
1961 if (app_.timeKeeper().now() < (
current->info().parentCloseTime +
1962 2 *
current->info().closeTimeResolution))
1964 setMode(OperatingMode::FULL,
"endConsensus: check full");
1968 beginConsensus(networkClosed);
1972NetworkOPsImp::consensusViewChange()
1974 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
1976 setMode(OperatingMode::CONNECTED,
"consensusViewChange");
1986 if (!mStreamMaps[sManifests].empty())
1990 jvObj[jss::type] =
"manifestReceived";
1993 jvObj[jss::signing_key] =
1997 jvObj[jss::signature] =
strHex(*sig);
2000 jvObj[jss::domain] = mo.
domain;
2003 for (
auto i = mStreamMaps[sManifests].begin();
2004 i != mStreamMaps[sManifests].end();)
2006 if (
auto p = i->second.lock())
2008 p->send(jvObj,
true);
2013 i = mStreamMaps[sManifests].erase(i);
2019NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2023 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2024 , loadBaseServer{loadFeeTrack.getLoadBase()}
2026 , em{
std::move(escalationMetrics)}
2036 em.has_value() != b.
em.has_value())
2042 em->minProcessingFeeLevel != b.
em->minProcessingFeeLevel ||
2043 em->openLedgerFeeLevel != b.
em->openLedgerFeeLevel ||
2044 em->referenceFeeLevel != b.
em->referenceFeeLevel);
2077 jvObj[jss::type] =
"serverStatus";
2079 jvObj[jss::load_base] = f.loadBaseServer;
2080 jvObj[jss::load_factor_server] = f.loadFactorServer;
2081 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2086 safe_cast<std::uint64_t>(f.loadFactorServer),
2088 f.em->openLedgerFeeLevel,
2090 f.em->referenceFeeLevel)
2093 jvObj[jss::load_factor] =
trunc32(loadFactor);
2094 jvObj[jss::load_factor_fee_escalation] =
2095 f.em->openLedgerFeeLevel.jsonClipped();
2096 jvObj[jss::load_factor_fee_queue] =
2097 f.em->minProcessingFeeLevel.jsonClipped();
2098 jvObj[jss::load_factor_fee_reference] =
2099 f.em->referenceFeeLevel.jsonClipped();
2102 jvObj[jss::load_factor] = f.loadFactorServer;
2116 p->send(jvObj,
true);
2133 if (!streamMap.empty())
2136 jvObj[jss::type] =
"consensusPhase";
2137 jvObj[jss::consensus] =
to_string(phase);
2139 for (
auto i = streamMap.begin(); i != streamMap.end();)
2141 if (
auto p = i->second.lock())
2143 p->send(jvObj,
true);
2148 i = streamMap.erase(i);
2164 auto const signerPublic = val->getSignerPublic();
2166 jvObj[jss::type] =
"validationReceived";
2167 jvObj[jss::validation_public_key] =
2169 jvObj[jss::ledger_hash] =
to_string(val->getLedgerHash());
2170 jvObj[jss::signature] =
strHex(val->getSignature());
2171 jvObj[jss::full] = val->isFull();
2172 jvObj[jss::flags] = val->getFlags();
2173 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2174 jvObj[jss::data] =
strHex(val->getSerializer().slice());
2176 if (
auto version = (*val)[~sfServerVersion])
2179 if (
auto cookie = (*val)[~sfCookie])
2182 if (
auto hash = (*val)[~sfValidatedHash])
2183 jvObj[jss::validated_hash] =
strHex(*hash);
2185 auto const masterKey =
2188 if (masterKey != signerPublic)
2193 if (
auto const seq = (*val)[~sfLedgerSequence])
2194 jvObj[jss::ledger_index] = *seq;
2196 if (val->isFieldPresent(sfAmendments))
2199 for (
auto const& amendment : val->getFieldV256(sfAmendments))
2203 if (
auto const closeTime = (*val)[~sfCloseTime])
2204 jvObj[jss::close_time] = *closeTime;
2206 if (
auto const loadFee = (*val)[~sfLoadFee])
2207 jvObj[jss::load_fee] = *loadFee;
2209 if (
auto const baseFee = val->at(~sfBaseFee))
2210 jvObj[jss::base_fee] =
static_cast<double>(*baseFee);
2212 if (
auto const reserveBase = val->at(~sfReserveBase))
2213 jvObj[jss::reserve_base] = *reserveBase;
2215 if (
auto const reserveInc = val->at(~sfReserveIncrement))
2216 jvObj[jss::reserve_inc] = *reserveInc;
2220 if (
auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2221 baseFeeXRP && baseFeeXRP->native())
2222 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2224 if (
auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2225 reserveBaseXRP && reserveBaseXRP->native())
2226 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2228 if (
auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2229 reserveIncXRP && reserveIncXRP->native())
2230 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2239 if (jvTx.
isMember(jss::ledger_index))
2241 jvTx[jss::ledger_index] =
2242 std::to_string(jvTx[jss::ledger_index].asUInt());
2249 if (
auto p = i->second.lock())
2253 [&](
Json::Value const& jv) { p->send(jv, true); });
2273 jvObj[jss::type] =
"peerStatusChange";
2282 p->send(jvObj,
true);
2296 using namespace std::chrono_literals;
2329 <<
"recvValidation " << val->getLedgerHash() <<
" from " << source;
2343 <<
"Exception thrown for handling new validation "
2344 << val->getLedgerHash() <<
": " << e.
what();
2349 <<
"Unknown exception thrown for handling new validation "
2350 << val->getLedgerHash();
2380 "This server is amendment blocked, and must be updated to be "
2381 "able to stay in sync with the network.";
2388 "This server has an expired validator list. validators.txt "
2389 "may be incorrectly configured or some [validator_list_sites] "
2390 "may be unreachable.";
2397 "One or more unsupported amendments have reached majority. "
2398 "Upgrade to the latest version before they are activated "
2399 "to avoid being amendment blocked.";
2400 if (
auto const expected =
2404 d[jss::expected_date] = expected->time_since_epoch().count();
2405 d[jss::expected_date_UTC] =
to_string(*expected);
2409 if (warnings.size())
2410 info[jss::warnings] = std::move(warnings);
2425 info[jss::time] =
to_string(std::chrono::floor<std::chrono::microseconds>(
2429 info[jss::network_ledger] =
"waiting";
2431 info[jss::validation_quorum] =
2439 info[jss::node_size] =
"tiny";
2442 info[jss::node_size] =
"small";
2445 info[jss::node_size] =
"medium";
2448 info[jss::node_size] =
"large";
2451 info[jss::node_size] =
"huge";
2460 info[jss::validator_list_expires] =
2461 safe_cast<Json::UInt>(when->time_since_epoch().count());
2463 info[jss::validator_list_expires] = 0;
2473 if (*when == TimeKeeper::time_point::max())
2475 x[jss::expiration] =
"never";
2476 x[jss::status] =
"active";
2483 x[jss::status] =
"active";
2485 x[jss::status] =
"expired";
2490 x[jss::status] =
"unknown";
2491 x[jss::expiration] =
"unknown";
2495#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2498#ifdef GIT_COMMIT_HASH
2499 x[jss::hash] = GIT_COMMIT_HASH;
2502 x[jss::branch] = GIT_BRANCH;
2507 info[jss::io_latency_ms] =
2515 info[jss::pubkey_validator] =
2520 info[jss::pubkey_validator] =
"none";
2530 info[jss::counters][jss::nodestore] = nodestore;
2534 info[jss::pubkey_node] =
2540 info[jss::amendment_blocked] =
true;
2554 lastClose[jss::converge_time_s] =
2559 lastClose[jss::converge_time] =
2563 info[jss::last_close] = lastClose;
2571 info[jss::network_id] =
static_cast<Json::UInt>(*netid);
2573 auto const escalationMetrics =
2581 auto const loadFactorFeeEscalation =
2583 escalationMetrics.openLedgerFeeLevel,
2585 escalationMetrics.referenceFeeLevel)
2589 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2593 info[jss::load_base] = loadBaseServer;
2594 info[jss::load_factor] =
trunc32(loadFactor);
2595 info[jss::load_factor_server] = loadFactorServer;
2602 info[jss::load_factor_fee_escalation] =
2603 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2604 info[jss::load_factor_fee_queue] =
2605 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2606 info[jss::load_factor_fee_reference] =
2607 escalationMetrics.referenceFeeLevel.jsonClipped();
2611 info[jss::load_factor] =
2612 static_cast<double>(loadFactor) / loadBaseServer;
2614 if (loadFactorServer != loadFactor)
2615 info[jss::load_factor_server] =
2616 static_cast<double>(loadFactorServer) / loadBaseServer;
2621 if (fee != loadBaseServer)
2622 info[jss::load_factor_local] =
2623 static_cast<double>(fee) / loadBaseServer;
2625 if (fee != loadBaseServer)
2626 info[jss::load_factor_net] =
2627 static_cast<double>(fee) / loadBaseServer;
2629 if (fee != loadBaseServer)
2630 info[jss::load_factor_cluster] =
2631 static_cast<double>(fee) / loadBaseServer;
2633 if (escalationMetrics.openLedgerFeeLevel !=
2634 escalationMetrics.referenceFeeLevel &&
2635 (admin || loadFactorFeeEscalation != loadFactor))
2636 info[jss::load_factor_fee_escalation] =
2637 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2638 escalationMetrics.referenceFeeLevel);
2639 if (escalationMetrics.minProcessingFeeLevel !=
2640 escalationMetrics.referenceFeeLevel)
2641 info[jss::load_factor_fee_queue] =
2642 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2643 escalationMetrics.referenceFeeLevel);
2656 XRPAmount const baseFee = lpClosed->fees().base;
2658 l[jss::seq] =
Json::UInt(lpClosed->info().seq);
2659 l[jss::hash] =
to_string(lpClosed->info().hash);
2664 l[jss::reserve_base] =
2665 lpClosed->fees().accountReserve(0).jsonClipped();
2666 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2668 lpClosed->info().closeTime.time_since_epoch().count());
2673 l[jss::reserve_base_xrp] =
2674 lpClosed->fees().accountReserve(0).decimalXRP();
2675 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2678 std::abs(closeOffset.count()) >= 60)
2679 l[jss::close_time_offset] =
2687 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2691 auto lCloseTime = lpClosed->info().closeTime;
2693 if (lCloseTime <= closeTime)
2695 using namespace std::chrono_literals;
2696 auto age = closeTime - lCloseTime;
2698 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2704 info[jss::validated_ledger] = l;
2706 info[jss::closed_ledger] = l;
2710 info[jss::published_ledger] =
"none";
2711 else if (lpPublished->info().seq != lpClosed->info().seq)
2712 info[jss::published_ledger] = lpPublished->info().seq;
2717 info[jss::jq_trans_overflow] =
2719 info[jss::peer_disconnects] =
2721 info[jss::peer_disconnects_resources] =
2726 "http",
"https",
"peer",
"ws",
"ws2",
"wss",
"wss2"};
2734 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2735 port.admin_user.empty() && port.admin_password.empty()))
2749 for (
auto const& p : proto)
2750 jv[jss::protocol].append(p);
2757 auto const optPort = grpcSection.
get(
"port");
2758 if (optPort && grpcSection.get(
"ip"))
2761 jv[jss::port] = *optPort;
2763 jv[jss::protocol].append(
"grpc");
2766 info[jss::ports] = std::move(ports);
2791 transJson(transaction, result,
false, ledger, std::nullopt);
2805 [&](
Json::Value const& jv) { p->send(jv, true); });
2828 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted,
app_);
2830 lpAccepted->info().hash, alpAccepted);
2834 alpAccepted->getLedger().
get() == lpAccepted.
get(),
2835 "ripple::NetworkOPsImp::pubLedger : accepted input");
2839 <<
"Publishing ledger " << lpAccepted->info().seq <<
" "
2840 << lpAccepted->info().hash;
2848 jvObj[jss::type] =
"ledgerClosed";
2849 jvObj[jss::ledger_index] = lpAccepted->info().seq;
2850 jvObj[jss::ledger_hash] =
to_string(lpAccepted->info().hash);
2852 lpAccepted->info().closeTime.time_since_epoch().count());
2854 if (!lpAccepted->rules().enabled(featureXRPFees))
2856 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2857 jvObj[jss::reserve_base] =
2858 lpAccepted->fees().accountReserve(0).jsonClipped();
2859 jvObj[jss::reserve_inc] =
2860 lpAccepted->fees().increment.jsonClipped();
2862 jvObj[jss::txn_count] =
Json::UInt(alpAccepted->size());
2866 jvObj[jss::validated_ledgers] =
2876 p->send(jvObj,
true);
2894 p->send(jvObj,
true);
2903 static bool firstTime =
true;
2910 for (
auto& inner : outer.second)
2912 auto& subInfo = inner.second;
2913 if (subInfo.index_->separationLedgerSeq_ == 0)
2916 alpAccepted->getLedger(), subInfo);
2925 for (
auto const& accTx : *alpAccepted)
2929 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
2956 "reportConsensusStateChange->pubConsensus",
2987 jvObj[jss::type] =
"transaction";
2991 jvObj[jss::transaction] =
2998 jvObj[jss::meta], *ledger, transaction, meta->
get());
3000 jvObj[jss::meta], transaction, meta->
get());
3003 if (!ledger->open())
3004 jvObj[jss::ledger_hash] =
to_string(ledger->info().hash);
3008 jvObj[jss::ledger_index] = ledger->info().seq;
3009 jvObj[jss::transaction][jss::date] =
3010 ledger->info().closeTime.time_since_epoch().count();
3011 jvObj[jss::validated] =
true;
3012 jvObj[jss::close_time_iso] =
to_string_iso(ledger->info().closeTime);
3018 jvObj[jss::validated] =
false;
3019 jvObj[jss::ledger_current_index] = ledger->info().seq;
3022 jvObj[jss::status] = validated ?
"closed" :
"proposed";
3023 jvObj[jss::engine_result] = sToken;
3024 jvObj[jss::engine_result_code] = result;
3025 jvObj[jss::engine_result_message] = sHuman;
3027 if (transaction->getTxnType() == ttOFFER_CREATE)
3029 auto const account = transaction->getAccountID(sfAccount);
3030 auto const amount = transaction->getFieldAmount(sfTakerGets);
3033 if (account != amount.issue().account)
3041 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3049 [&]<
unsigned Version>(
3051 RPC::insertDeliverMax(
3052 jvTx[jss::transaction], transaction->getTxnType(), Version);
3054 if constexpr (Version > 1)
3056 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3057 jvTx[jss::hash] = hash;
3061 jvTx[jss::transaction][jss::hash] = hash;
3074 auto const& stTxn = transaction.
getTxn();
3078 auto const trResult = transaction.
getResult();
3093 [&](
Json::Value const& jv) { p->send(jv, true); });
3110 [&](
Json::Value const& jv) { p->send(jv, true); });
3135 auto const currLedgerSeq = ledger->seq();
3142 for (
auto const& affectedAccount : transaction.
getAffected())
3147 auto it = simiIt->second.begin();
3149 while (it != simiIt->second.end())
3160 it = simiIt->second.erase(it);
3167 auto it = simiIt->second.begin();
3168 while (it != simiIt->second.end())
3179 it = simiIt->second.erase(it);
3186 auto& subs = histoIt->second;
3187 auto it = subs.begin();
3188 while (it != subs.end())
3191 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3205 it = subs.erase(it);
3216 <<
"pubAccountTransaction: " <<
"proposed=" << iProposed
3217 <<
", accepted=" << iAccepted;
3219 if (!notify.
empty() || !accountHistoryNotify.
empty())
3221 auto const& stTxn = transaction.
getTxn();
3225 auto const trResult = transaction.
getResult();
3231 isrListener->getApiVersion(),
3232 [&](
Json::Value const& jv) { isrListener->send(jv, true); });
3236 jvObj.
set(jss::account_history_boundary,
true);
3239 jvObj.
isMember(jss::account_history_tx_stream) ==
3241 "ripple::NetworkOPsImp::pubAccountTransaction : "
3242 "account_history_tx_stream not set");
3243 for (
auto& info : accountHistoryNotify)
3245 auto& index = info.index_;
3246 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3247 jvObj.
set(jss::account_history_tx_first,
true);
3249 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3252 info.sink_->getApiVersion(),
3253 [&](
Json::Value const& jv) { info.sink_->send(jv, true); });
3278 for (
auto const& affectedAccount : tx->getMentionedAccounts())
3283 auto it = simiIt->second.begin();
3285 while (it != simiIt->second.end())
3296 it = simiIt->second.erase(it);
3303 JLOG(
m_journal.
trace()) <<
"pubProposedAccountTransaction: " << iProposed;
3305 if (!notify.
empty() || !accountHistoryNotify.
empty())
3312 isrListener->getApiVersion(),
3313 [&](
Json::Value const& jv) { isrListener->send(jv, true); });
3316 jvObj.
isMember(jss::account_history_tx_stream) ==
3318 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3319 "account_history_tx_stream not set");
3320 for (
auto& info : accountHistoryNotify)
3322 auto& index = info.index_;
3323 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3324 jvObj.
set(jss::account_history_tx_first,
true);
3325 jvObj.
set(jss::account_history_tx_index, index->forwardTxIndex_++);
3327 info.sink_->getApiVersion(),
3328 [&](
Json::Value const& jv) { info.sink_->send(jv, true); });
3345 for (
auto const& naAccountID : vnaAccountIDs)
3348 <<
"subAccount: account: " <<
toBase58(naAccountID);
3350 isrListener->insertSubAccountInfo(naAccountID, rt);
3355 for (
auto const& naAccountID : vnaAccountIDs)
3357 auto simIterator = subMap.
find(naAccountID);
3358 if (simIterator == subMap.
end())
3362 usisElement[isrListener->getSeq()] = isrListener;
3364 subMap.
insert(simIterator, make_pair(naAccountID, usisElement));
3369 simIterator->second[isrListener->getSeq()] = isrListener;
3380 for (
auto const& naAccountID : vnaAccountIDs)
3383 isrListener->deleteSubAccountInfo(naAccountID, rt);
3400 for (
auto const& naAccountID : vnaAccountIDs)
3402 auto simIterator = subMap.
find(naAccountID);
3404 if (simIterator != subMap.
end())
3407 simIterator->second.erase(uSeq);
3409 if (simIterator->second.empty())
3412 subMap.
erase(simIterator);
3421 enum DatabaseType { Sqlite,
None };
3422 static const auto databaseType = [&]() -> DatabaseType {
3427 return DatabaseType::Sqlite;
3429 return DatabaseType::None;
3432 if (databaseType == DatabaseType::None)
3435 <<
"AccountHistory job for account "
3447 "AccountHistoryTxStream",
3448 [
this, dbType = databaseType, subInfo]() {
3449 auto const& accountId = subInfo.
index_->accountId_;
3450 auto& lastLedgerSeq = subInfo.
index_->historyLastLedgerSeq_;
3451 auto& txHistoryIndex = subInfo.
index_->historyTxIndex_;
3454 <<
"AccountHistory job for account " <<
toBase58(accountId)
3455 <<
" started. lastLedgerSeq=" << lastLedgerSeq;
3465 auto stx = tx->getSTransaction();
3466 if (stx->getAccountID(sfAccount) == accountId &&
3467 stx->getSeqProxy().value() == 1)
3471 for (
auto& node : meta->getNodes())
3473 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3476 if (node.isFieldPresent(sfNewFields))
3478 if (
auto inner =
dynamic_cast<const STObject*
>(
3479 node.peekAtPField(sfNewFields));
3482 if (inner->isFieldPresent(sfAccount) &&
3483 inner->getAccountID(sfAccount) == accountId)
3495 bool unsubscribe) ->
bool {
3498 sptr->send(jvObj,
true);
3508 bool unsubscribe) ->
bool {
3512 sptr->getApiVersion(),
3513 [&](
Json::Value const& jv) { sptr->send(jv,
true); });
3536 accountId, minLedger, maxLedger, marker, 0,
true};
3537 return db->newestAccountTxPage(options);
3541 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3542 "getMoreTxns : invalid database type");
3551 while (lastLedgerSeq >= 2 && !subInfo.
index_->stopHistorical_)
3553 int feeChargeCount = 0;
3562 <<
"AccountHistory job for account "
3563 <<
toBase58(accountId) <<
" no InfoSub. Fee charged "
3564 << feeChargeCount <<
" times.";
3569 auto startLedgerSeq =
3570 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3572 <<
"AccountHistory job for account " <<
toBase58(accountId)
3573 <<
", working on ledger range [" << startLedgerSeq <<
","
3574 << lastLedgerSeq <<
"]";
3576 auto haveRange = [&]() ->
bool {
3579 auto haveSomeValidatedLedgers =
3581 validatedMin, validatedMax);
3583 return haveSomeValidatedLedgers &&
3584 validatedMin <= startLedgerSeq &&
3585 lastLedgerSeq <= validatedMax;
3591 <<
"AccountHistory reschedule job for account "
3592 <<
toBase58(accountId) <<
", incomplete ledger range ["
3593 << startLedgerSeq <<
"," << lastLedgerSeq <<
"]";
3599 while (!subInfo.
index_->stopHistorical_)
3602 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3606 <<
"AccountHistory job for account "
3607 <<
toBase58(accountId) <<
" getMoreTxns failed.";
3612 auto const& txns = dbResult->first;
3613 marker = dbResult->second;
3614 size_t num_txns = txns.size();
3615 for (
size_t i = 0; i < num_txns; ++i)
3617 auto const& [tx, meta] = txns[i];
3622 <<
"AccountHistory job for account "
3623 <<
toBase58(accountId) <<
" empty tx or meta.";
3633 <<
"AccountHistory job for account "
3634 <<
toBase58(accountId) <<
" no ledger.";
3639 tx->getSTransaction();
3643 <<
"AccountHistory job for account "
3645 <<
" getSTransaction failed.";
3651 auto const trR = meta->getResultTER();
3653 transJson(stTxn, trR,
true, curTxLedger, mRef);
3656 jss::account_history_tx_index, txHistoryIndex--);
3657 if (i + 1 == num_txns ||
3658 txns[i + 1].first->getLedger() != tx->getLedger())
3659 jvTx.
set(jss::account_history_boundary,
true);
3661 if (isFirstTx(tx, meta))
3663 jvTx.
set(jss::account_history_tx_first,
true);
3664 sendMultiApiJson(jvTx,
false);
3667 <<
"AccountHistory job for account "
3669 <<
" done, found last tx.";
3674 sendMultiApiJson(jvTx,
false);
3681 <<
"AccountHistory job for account "
3683 <<
" paging, marker=" << marker->ledgerSeq <<
":"
3692 if (!subInfo.
index_->stopHistorical_)
3694 lastLedgerSeq = startLedgerSeq - 1;
3695 if (lastLedgerSeq <= 1)
3698 <<
"AccountHistory job for account "
3700 <<
" done, reached genesis ledger.";
3713 subInfo.
index_->separationLedgerSeq_ = ledger->seq();
3714 auto const& accountId = subInfo.
index_->accountId_;
3716 if (!ledger->exists(accountKeylet))
3719 <<
"subAccountHistoryStart, no account " <<
toBase58(accountId)
3720 <<
", no need to add AccountHistory job.";
3725 if (
auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3727 if (sleAcct->getFieldU32(sfSequence) == 1)
3730 <<
"subAccountHistoryStart, genesis account "
3732 <<
" does not have tx, no need to add AccountHistory job.";
3739 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
3740 "access genesis account");
3744 subInfo.
index_->historyLastLedgerSeq_ = ledger->seq();
3745 subInfo.
index_->haveHistorical_ =
true;
3748 <<
"subAccountHistoryStart, add AccountHistory job: accountId="
3749 <<
toBase58(accountId) <<
", currentLedgerSeq=" << ledger->seq();
3759 if (!isrListener->insertSubAccountHistory(accountId))
3762 <<
"subAccountHistory, already subscribed to account "
3769 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
3774 inner.
emplace(isrListener->getSeq(), ahi);
3780 simIterator->second.emplace(isrListener->getSeq(), ahi);
3794 <<
"subAccountHistory, no validated ledger yet, delay start";
3807 isrListener->deleteSubAccountHistory(account);
3821 auto& subInfoMap = simIterator->second;
3822 auto subInfoIter = subInfoMap.find(seq);
3823 if (subInfoIter != subInfoMap.end())
3825 subInfoIter->second.index_->stopHistorical_ =
true;
3830 simIterator->second.erase(seq);
3831 if (simIterator->second.empty())
3837 <<
"unsubAccountHistory, account " <<
toBase58(account)
3838 <<
", historyOnly = " << (historyOnly ?
"true" :
"false");
3846 listeners->addSubscriber(isrListener);
3848 UNREACHABLE(
"ripple::NetworkOPsImp::subBook : null book listeners");
3856 listeners->removeSubscriber(uSeq);
3868 m_standalone,
"ripple::NetworkOPsImp::acceptLedger : is standalone");
3871 Throw<std::runtime_error>(
3872 "Operation only possible in STANDALONE mode.");
3887 jvResult[jss::ledger_index] = lpClosed->info().seq;
3888 jvResult[jss::ledger_hash] =
to_string(lpClosed->info().hash);
3890 lpClosed->info().closeTime.time_since_epoch().count());
3891 if (!lpClosed->rules().enabled(featureXRPFees))
3893 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3894 jvResult[jss::reserve_base] =
3895 lpClosed->fees().accountReserve(0).jsonClipped();
3896 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3901 jvResult[jss::validated_ledgers] =
3907 .emplace(isrListener->getSeq(), isrListener)
3917 .emplace(isrListener->getSeq(), isrListener)
3943 .emplace(isrListener->getSeq(), isrListener)
3971 jvResult[jss::random] =
to_string(uRandom);
3973 jvResult[jss::load_base] = feeTrack.getLoadBase();
3974 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
3975 jvResult[jss::hostid] =
getHostId(admin);
3976 jvResult[jss::pubkey_node] =
3981 .emplace(isrListener->getSeq(), isrListener)
3999 .emplace(isrListener->getSeq(), isrListener)
4017 .emplace(isrListener->getSeq(), isrListener)
4035 .emplace(isrListener->getSeq(), isrListener)
4059 .emplace(isrListener->getSeq(), isrListener)
4077 .emplace(isrListener->getSeq(), isrListener)
4125 if (map.find(pInfo->getSeq()) != map.end())
4132#ifndef USE_NEW_BOOK_PAGE
4143 unsigned int iLimit,
4153 uint256 uTipIndex = uBookBase;
4157 stream <<
"getBookPage:" << book;
4158 stream <<
"getBookPage: uBookBase=" << uBookBase;
4159 stream <<
"getBookPage: uBookEnd=" << uBookEnd;
4160 stream <<
"getBookPage: uTipIndex=" << uTipIndex;
4169 bool bDirectAdvance =
true;
4173 unsigned int uBookEntry;
4179 while (!bDone && iLimit-- > 0)
4183 bDirectAdvance =
false;
4187 auto const ledgerIndex = view.
succ(uTipIndex, uBookEnd);
4191 sleOfferDir.
reset();
4200 uTipIndex = sleOfferDir->key();
4203 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4206 <<
"getBookPage: uTipIndex=" << uTipIndex;
4208 <<
"getBookPage: offerIndex=" << offerIndex;
4218 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4219 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4220 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4222 bool firstOwnerOffer(
true);
4228 saOwnerFunds = saTakerGets;
4230 else if (bGlobalFreeze)
4238 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4239 if (umBalanceEntry != umBalance.
end())
4243 saOwnerFunds = umBalanceEntry->second;
4244 firstOwnerOffer =
false;
4258 if (saOwnerFunds < beast::zero)
4262 saOwnerFunds.
clear();
4270 STAmount saOwnerFundsLimit = saOwnerFunds;
4282 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4285 if (saOwnerFundsLimit >= saTakerGets)
4288 saTakerGetsFunded = saTakerGets;
4294 saTakerGetsFunded = saOwnerFundsLimit;
4296 saTakerGetsFunded.
setJson(jvOffer[jss::taker_gets_funded]);
4300 saTakerGetsFunded, saDirRate, saTakerPays.
issue()))
4301 .setJson(jvOffer[jss::taker_pays_funded]);
4307 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4309 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4313 jvOf[jss::quality] = saDirRate.
getText();
4315 if (firstOwnerOffer)
4316 jvOf[jss::owner_funds] = saOwnerFunds.
getText();
4323 if (!
cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4325 bDirectAdvance =
true;
4330 <<
"getBookPage: offerIndex=" << offerIndex;
4350 unsigned int iLimit,
4358 MetaView lesActive(lpLedger,
tapNONE,
true);
4359 OrderBookIterator obIterator(lesActive, book);
4363 const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.
out.
account) ||
4364 lesActive.isGlobalFrozen(book.
in.
account);
4366 while (iLimit-- > 0 && obIterator.nextOffer())
4371 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4372 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4373 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4374 STAmount saDirRate = obIterator.getCurrentRate();
4380 saOwnerFunds = saTakerGets;
4382 else if (bGlobalFreeze)
4390 auto umBalanceEntry = umBalance.
find(uOfferOwnerID);
4392 if (umBalanceEntry != umBalance.
end())
4396 saOwnerFunds = umBalanceEntry->second;
4402 saOwnerFunds = lesActive.accountHolds(
4408 if (saOwnerFunds.isNegative())
4412 saOwnerFunds.zero();
4419 STAmount saTakerGetsFunded;
4420 STAmount saOwnerFundsLimit = saOwnerFunds;
4432 saOwnerFundsLimit =
divide(saOwnerFunds, offerRate);
4435 if (saOwnerFundsLimit >= saTakerGets)
4438 saTakerGetsFunded = saTakerGets;
4443 saTakerGetsFunded = saOwnerFundsLimit;
4445 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4451 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4452 .setJson(jvOffer[jss::taker_pays_funded]);
4455 STAmount saOwnerPays = (
parityRate == offerRate)
4458 saOwnerFunds,
multiply(saTakerGetsFunded, offerRate));
4460 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4462 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4466 jvOf[jss::quality] = saDirRate.
getText();
4481 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4521 ++counters_[
static_cast<std::size_t>(om)].transitions;
4523 counters_[
static_cast<std::size_t>(om)].transitions == 1)
4525 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4526 now - processStart_)
4530 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4539 auto [counters, mode, start, initialSync] = getCounterData();
4540 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4550 auto& state = obj[jss::state_accounting][
states_[i]];
4551 state[jss::transitions] =
std::to_string(counters[i].transitions);
4552 state[jss::duration_us] =
std::to_string(counters[i].dur.count());
4556 obj[jss::initial_sync_duration_us] =
std::to_string(initialSync);
4571 boost::asio::io_service& io_svc,
4575 return std::make_unique<NetworkOPsImp>(
T back_inserter(T... args)
RAII class to check if an Item is already being processed on another thread, as indicated by it's pre...
Decorator for streaming out compact json.
Lightweight wrapper to tag static string.
Value get(UInt index, const Value &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Value & append(const Value &value)
Append value to array at the end.
bool isMember(const char *key) const
Return true if the object has a member named key.
A generic endpoint for log messages.
Stream trace() const
Severity stream access functions.
A metric for measuring an integral value.
void set(value_type value) const
Set the value on the gauge.
A reference to a handler for performing polled collection.
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
std::uint32_t getLoadFee() const
NetClock::time_point getReportTime() const
PublicKey const & identity() const
std::size_t size() const
The number of nodes in the cluster list.
std::string SERVER_DOMAIN
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
int RELAY_UNTRUSTED_VALIDATIONS
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
A pool of threads to perform work.
Json::Value getJson(int c=0)
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
std::uint32_t getClusterFee() const
std::uint32_t getLocalFee() const
std::uint32_t getLoadBase() const
std::uint32_t getRemoteFee() const
std::uint32_t getLoadFactor() const
void resetDeadlockDetector()
Reset the deadlock detection timer.
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
std::uint64_t initialSyncUs_
CounterData getCounterData() const
std::chrono::steady_clock::time_point start_
static std::array< Json::StaticString const, 5 > const states_
std::chrono::steady_clock::time_point const processStart_
Transaction with input flags and results to be applied in batches.
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
std::shared_ptr< Transaction > const transaction
void processClusterTimer()
boost::asio::steady_timer accountHistoryTxTimer_
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::vector< TransactionStatus > mTransactions
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, const bool bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
SubInfoMapType mSubAccount
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
std::atomic< bool > unlBlocked_
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
std::atomic< bool > needNetworkLedger_
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void endConsensus() override
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
void clearNeedNetworkLedger() override
DispatchState mDispatchState
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
bool checkLastClosedLedger(const Overlay::PeerSequence &, uint256 &networkClosed)
void reportFeeChange() override
void processHeartbeatTimer()
void setMode(OperatingMode om, const char *reason) override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
bool subTransactions(InfoSub::ref ispListener) override
std::atomic< bool > amendmentWarned_
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
bool beginConsensus(uint256 const &networkClosed) override
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
void getCountsJson(Json::Value &obj)
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Writable ledger view that accumulates state and tx changes.
BookListeners::pointer getBookListeners(Book const &)
BookListeners::pointer makeBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, const AcceptedLedgerTx &alTx, MultiApiJson const &jvObj)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
Represents a set of transactions in RCLConsensus.
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
std::string getText() const override
Issue const & issue() const
std::optional< T > get(std::string const &name) const
std::size_t size() const noexcept
void const * data() const noexcept
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
std::chrono::seconds closeOffset() const
time_point closeTime() const
Returns the predicted close time, in network time.
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Validator keys and manifest as set in configuration file.
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Json::Value jsonClipped() const
static constexpr std::size_t size()
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
T emplace_back(T... args)
@ arrayValue
array value (ordered list)
@ objectValue
object value (collection of name/value pairs).
void rngfill(void *buffer, std::size_t bytes, Generator &g)
std::string const & getVersionString()
Server version.
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(PreclaimContext const &ctx, AccountID const &src)
Keylet account(AccountID const &id) noexcept
AccountID root.
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
STAmount divide(STAmount const &amount, Rate const &rate)
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
std::uint64_t getQuality(uint256 const &uBase)
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
auto constexpr muldiv_max
std::unique_ptr< LocalTxs > make_LocalTxs()
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
STAmount amountFromQuality(std::uint64_t rate)
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
@ warnRPC_UNSUPPORTED_MAJORITY
@ warnRPC_AMENDMENT_BLOCKED
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
AccountID calcAccountID(PublicKey const &pk)
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Json::Value rpcError(int iError)
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
std::string strHex(FwdIt begin, FwdIt end)
bool isTemMalformed(TER x)
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
void forAllApiVersions(Fn const &fn, Args &&... args)
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
uint256 getQualityNext(uint256 const &uBase)
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const ¤cy, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const ¤t)
std::string to_string_iso(date::sys_time< Duration > tp)
std::string to_string(base_uint< Bits, Tag > const &a)
FeeSetup setup_FeeVote(Section const §ion)
Number root(Number f, unsigned d)
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
bool transResultInfo(TER code, std::string &token, std::string &text)
uint256 getBookBase(Book const &book)
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
static std::uint32_t trunc32(std::uint64_t v)
static auto const genesisAccountId
T set_intersection(T... args)
std::string serialized
The manifest in serialized form.
std::uint32_t sequence
The sequence number of this manifest.
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Server fees published on server subscription.
bool operator!=(ServerFeeSummary const &b) const
ServerFeeSummary()=default
std::optional< TxQ::Metrics > em
std::uint32_t loadFactorServer
bool operator==(ServerFeeSummary const &b) const
std::uint32_t loadBaseServer
decltype(initialSyncUs_) initialSyncUs
decltype(counters_) counters
std::uint64_t transitions
std::chrono::microseconds dur
beast::insight::Gauge full_transitions
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
beast::insight::Hook hook
beast::insight::Gauge connected_duration
beast::insight::Gauge tracking_duration
beast::insight::Gauge connected_transitions
beast::insight::Gauge disconnected_transitions
beast::insight::Gauge syncing_duration
beast::insight::Gauge tracking_transitions
beast::insight::Gauge full_duration
beast::insight::Gauge disconnected_duration
beast::insight::Gauge syncing_transitions
std::uint32_t historyLastLedgerSeq_
std::uint32_t separationLedgerSeq_
AccountID const accountId_
std::uint32_t forwardTxIndex_
std::atomic< bool > stopHistorical_
std::int32_t historyTxIndex_
SubAccountHistoryIndex(AccountID const &accountId)
std::shared_ptr< SubAccountHistoryIndex > index_
std::shared_ptr< SubAccountHistoryIndex > index_
Represents a transfer rate.
Data format for exchanging consumption information across peers.
std::vector< Item > items
Changes in trusted nodes after updating validator list.
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
void set(const char *key, auto const &v)
IsMemberResult isMember(const char *key) const
Select all peers (except optional excluded) that are in our cluster.
Sends a message to all peers.
T time_since_epoch(T... args)