rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/main/Tuning.h>
32#include <xrpld/app/misc/AmendmentTable.h>
33#include <xrpld/app/misc/DeliverMax.h>
34#include <xrpld/app/misc/HashRouter.h>
35#include <xrpld/app/misc/LoadFeeTrack.h>
36#include <xrpld/app/misc/NetworkOPs.h>
37#include <xrpld/app/misc/Transaction.h>
38#include <xrpld/app/misc/TxQ.h>
39#include <xrpld/app/misc/ValidatorKeys.h>
40#include <xrpld/app/misc/ValidatorList.h>
41#include <xrpld/app/misc/detail/AccountTxPaging.h>
42#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
43#include <xrpld/app/tx/apply.h>
44#include <xrpld/consensus/Consensus.h>
45#include <xrpld/consensus/ConsensusParms.h>
46#include <xrpld/overlay/Cluster.h>
47#include <xrpld/overlay/Overlay.h>
48#include <xrpld/overlay/predicates.h>
49#include <xrpld/perflog/PerfLog.h>
50#include <xrpld/rpc/BookChanges.h>
51#include <xrpld/rpc/CTID.h>
52#include <xrpld/rpc/DeliveredAmount.h>
53#include <xrpld/rpc/MPTokenIssuanceID.h>
54#include <xrpld/rpc/ServerHandler.h>
55
56#include <xrpl/basics/UptimeClock.h>
57#include <xrpl/basics/mulDiv.h>
58#include <xrpl/basics/safe_cast.h>
59#include <xrpl/basics/scope.h>
60#include <xrpl/beast/utility/rngfill.h>
61#include <xrpl/crypto/RFC1751.h>
62#include <xrpl/crypto/csprng.h>
63#include <xrpl/protocol/BuildInfo.h>
64#include <xrpl/protocol/Feature.h>
65#include <xrpl/protocol/MultiApiJson.h>
66#include <xrpl/protocol/NFTSyntheticSerializer.h>
67#include <xrpl/protocol/RPCErr.h>
68#include <xrpl/protocol/TxFlags.h>
69#include <xrpl/protocol/jss.h>
70#include <xrpl/resource/Fees.h>
71#include <xrpl/resource/ResourceManager.h>
72
73#include <boost/asio/ip/host_name.hpp>
74#include <boost/asio/steady_timer.hpp>
75
76#include <algorithm>
77#include <exception>
78#include <mutex>
79#include <optional>
80#include <set>
81#include <sstream>
82#include <string>
83#include <tuple>
84#include <unordered_map>
85
86namespace ripple {
87
88class NetworkOPsImp final : public NetworkOPs
89{
95 {
96 public:
98 bool const admin;
99 bool const local;
101 bool applied = false;
103
106 bool a,
107 bool l,
108 FailHard f)
109 : transaction(t), admin(a), local(l), failType(f)
110 {
111 XRPL_ASSERT(
113 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
114 "valid inputs");
115 }
116 };
117
121 enum class DispatchState : unsigned char {
122 none,
123 scheduled,
124 running,
125 };
126
128
144 {
152
156 std::chrono::steady_clock::time_point start_ =
158 std::chrono::steady_clock::time_point const processStart_ = start_;
161
162 public:
164 {
166 .transitions = 1;
167 }
168
175 void
177
183 void
184 json(Json::Value& obj) const;
185
187 {
189 decltype(mode_) mode;
190 decltype(start_) start;
192 };
193
196 {
199 }
200 };
201
204 {
205 ServerFeeSummary() = default;
206
208 XRPAmount fee,
209 TxQ::Metrics&& escalationMetrics,
210 LoadFeeTrack const& loadFeeTrack);
211 bool
212 operator!=(ServerFeeSummary const& b) const;
213
214 bool
216 {
217 return !(*this != b);
218 }
219
224 };
225
226public:
228 Application& app,
230 bool standalone,
231 std::size_t minPeerCount,
232 bool start_valid,
233 JobQueue& job_queue,
235 ValidatorKeys const& validatorKeys,
236 boost::asio::io_service& io_svc,
237 beast::Journal journal,
238 beast::insight::Collector::ptr const& collector)
239 : app_(app)
240 , m_journal(journal)
243 , heartbeatTimer_(io_svc)
244 , clusterTimer_(io_svc)
245 , accountHistoryTxTimer_(io_svc)
246 , mConsensus(
247 app,
249 setup_FeeVote(app_.config().section("voting")),
250 app_.logs().journal("FeeVote")),
252 *m_localTX,
253 app.getInboundTransactions(),
254 beast::get_abstract_clock<std::chrono::steady_clock>(),
255 validatorKeys,
256 app_.logs().journal("LedgerConsensus"))
257 , validatorPK_(
258 validatorKeys.keys ? validatorKeys.keys->publicKey
259 : decltype(validatorPK_){})
261 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
262 : decltype(validatorMasterPK_){})
264 , m_job_queue(job_queue)
265 , m_standalone(standalone)
266 , minPeerCount_(start_valid ? 0 : minPeerCount)
267 , m_stats(std::bind(&NetworkOPsImp::collect_metrics, this), collector)
268 {
269 }
270
271 ~NetworkOPsImp() override
272 {
273 // This clear() is necessary to ensure the shared_ptrs in this map get
274 // destroyed NOW because the objects in this map invoke methods on this
275 // class when they are destroyed
277 }
278
279public:
281 getOperatingMode() const override;
282
284 strOperatingMode(OperatingMode const mode, bool const admin) const override;
285
287 strOperatingMode(bool const admin = false) const override;
288
289 //
290 // Transaction operations.
291 //
292
293 // Must complete immediately.
294 void
296
297 void
299 std::shared_ptr<Transaction>& transaction,
300 bool bUnlimited,
301 bool bLocal,
302 FailHard failType) override;
303
304 void
305 processTransactionSet(CanonicalTXSet const& set) override;
306
315 void
318 bool bUnlimited,
319 FailHard failType);
320
330 void
333 bool bUnlimited,
334 FailHard failtype);
335
336private:
337 bool
339
340 void
343 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback);
344
345public:
349 void
351
357 void
359
360 //
361 // Owner functions.
362 //
363
367 AccountID const& account) override;
368
369 //
370 // Book functions.
371 //
372
373 void
376 Book const&,
377 AccountID const& uTakerID,
378 bool const bProof,
379 unsigned int iLimit,
380 Json::Value const& jvMarker,
381 Json::Value& jvResult) override;
382
383 // Ledger proposal/close functions.
384 bool
386
387 bool
390 std::string const& source) override;
391
392 void
393 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
394
395 // Network state machine.
396
397 // Used for the "jump" case.
398private:
399 void
401 bool
403
404public:
405 bool
407 uint256 const& networkClosed,
408 std::unique_ptr<std::stringstream> const& clog) override;
409 void
411 void
412 setStandAlone() override;
413
417 void
418 setStateTimer() override;
419
420 void
421 setNeedNetworkLedger() override;
422 void
423 clearNeedNetworkLedger() override;
424 bool
425 isNeedNetworkLedger() override;
426 bool
427 isFull() override;
428
429 void
430 setMode(OperatingMode om) override;
431
432 bool
433 isBlocked() override;
434 bool
435 isAmendmentBlocked() override;
436 void
437 setAmendmentBlocked() override;
438 bool
439 isAmendmentWarned() override;
440 void
441 setAmendmentWarned() override;
442 void
443 clearAmendmentWarned() override;
444 bool
445 isUNLBlocked() override;
446 void
447 setUNLBlocked() override;
448 void
449 clearUNLBlocked() override;
450 void
451 consensusViewChange() override;
452
454 getConsensusInfo() override;
456 getServerInfo(bool human, bool admin, bool counters) override;
457 void
458 clearLedgerFetch() override;
460 getLedgerFetchInfo() override;
463 std::optional<std::chrono::milliseconds> consensusDelay) override;
464 void
465 reportFeeChange() override;
466 void
468
469 void
470 updateLocalTx(ReadView const& view) override;
472 getLocalTxCount() override;
473
474 //
475 // Monitoring: publisher side.
476 //
477 void
478 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
479 void
482 std::shared_ptr<STTx const> const& transaction,
483 TER result) override;
484 void
485 pubValidation(std::shared_ptr<STValidation> const& val) override;
486
487 //--------------------------------------------------------------------------
488 //
489 // InfoSub::Source.
490 //
491 void
493 InfoSub::ref ispListener,
494 hash_set<AccountID> const& vnaAccountIDs,
495 bool rt) override;
496 void
498 InfoSub::ref ispListener,
499 hash_set<AccountID> const& vnaAccountIDs,
500 bool rt) override;
501
502 // Just remove the subscription from the tracking
503 // not from the InfoSub. Needed for InfoSub destruction
504 void
506 std::uint64_t seq,
507 hash_set<AccountID> const& vnaAccountIDs,
508 bool rt) override;
509
511 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
512 override;
513 void
515 InfoSub::ref ispListener,
516 AccountID const& account,
517 bool historyOnly) override;
518
519 void
521 std::uint64_t seq,
522 AccountID const& account,
523 bool historyOnly) override;
524
525 bool
526 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
527 bool
528 unsubLedger(std::uint64_t uListener) override;
529
530 bool
531 subBookChanges(InfoSub::ref ispListener) override;
532 bool
533 unsubBookChanges(std::uint64_t uListener) override;
534
535 bool
536 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
537 override;
538 bool
539 unsubServer(std::uint64_t uListener) override;
540
541 bool
542 subBook(InfoSub::ref ispListener, Book const&) override;
543 bool
544 unsubBook(std::uint64_t uListener, Book const&) override;
545
546 bool
547 subManifests(InfoSub::ref ispListener) override;
548 bool
549 unsubManifests(std::uint64_t uListener) override;
550 void
551 pubManifest(Manifest const&) override;
552
553 bool
554 subTransactions(InfoSub::ref ispListener) override;
555 bool
556 unsubTransactions(std::uint64_t uListener) override;
557
558 bool
559 subRTTransactions(InfoSub::ref ispListener) override;
560 bool
561 unsubRTTransactions(std::uint64_t uListener) override;
562
563 bool
564 subValidations(InfoSub::ref ispListener) override;
565 bool
566 unsubValidations(std::uint64_t uListener) override;
567
568 bool
569 subPeerStatus(InfoSub::ref ispListener) override;
570 bool
571 unsubPeerStatus(std::uint64_t uListener) override;
572 void
573 pubPeerStatus(std::function<Json::Value(void)> const&) override;
574
575 bool
576 subConsensus(InfoSub::ref ispListener) override;
577 bool
578 unsubConsensus(std::uint64_t uListener) override;
579
581 findRpcSub(std::string const& strUrl) override;
583 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
584 bool
585 tryRemoveRpcSub(std::string const& strUrl) override;
586
587 void
588 stop() override
589 {
590 {
591 boost::system::error_code ec;
592 heartbeatTimer_.cancel(ec);
593 if (ec)
594 {
595 JLOG(m_journal.error())
596 << "NetworkOPs: heartbeatTimer cancel error: "
597 << ec.message();
598 }
599
600 ec.clear();
601 clusterTimer_.cancel(ec);
602 if (ec)
603 {
604 JLOG(m_journal.error())
605 << "NetworkOPs: clusterTimer cancel error: "
606 << ec.message();
607 }
608
609 ec.clear();
610 accountHistoryTxTimer_.cancel(ec);
611 if (ec)
612 {
613 JLOG(m_journal.error())
614 << "NetworkOPs: accountHistoryTxTimer cancel error: "
615 << ec.message();
616 }
617 }
618 // Make sure that any waitHandlers pending in our timers are done.
619 using namespace std::chrono_literals;
620 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
621 }
622
623 void
624 stateAccounting(Json::Value& obj) override;
625
626private:
627 void
628 setTimer(
629 boost::asio::steady_timer& timer,
630 std::chrono::milliseconds const& expiry_time,
631 std::function<void()> onExpire,
632 std::function<void()> onError);
633 void
635 void
637 void
639 void
641
643 transJson(
644 std::shared_ptr<STTx const> const& transaction,
645 TER result,
646 bool validated,
649
650 void
653 AcceptedLedgerTx const& transaction,
654 bool last);
655
656 void
659 AcceptedLedgerTx const& transaction,
660 bool last);
661
662 void
665 std::shared_ptr<STTx const> const& transaction,
666 TER result);
667
668 void
669 pubServer();
670 void
672
674 getHostId(bool forAdmin);
675
676private:
680
681 /*
682 * With a validated ledger to separate history and future, the node
683 * streams historical txns with negative indexes starting from -1,
684 * and streams future txns starting from index 0.
685 * The SubAccountHistoryIndex struct maintains these indexes.
686 * It also has a flag stopHistorical_ for stopping streaming
687 * the historical txns.
688 */
725
729 void
733 void
735 void
737
740
742
744
746
751
753 boost::asio::steady_timer heartbeatTimer_;
754 boost::asio::steady_timer clusterTimer_;
755 boost::asio::steady_timer accountHistoryTxTimer_;
756
758
761
763
765
768
770
772
773 enum SubTypes {
774 sLedger, // Accepted ledgers.
775 sManifests, // Received validator manifests.
776 sServer, // When server changes connectivity state.
777 sTransactions, // All accepted transactions.
778 sRTTransactions, // All proposed and accepted transactions.
779 sValidations, // Received validations.
780 sPeerStatus, // Peer status changes.
781 sConsensusPhase, // Consensus phase
782 sBookChanges, // Per-ledger order book changes
783 sLastEntry // Any new entry must be ADDED ABOVE this one
784 };
785
787
789
791
792 // Whether we are in standalone mode.
793 bool const m_standalone;
794
795 // The number of nodes that we need to consider ourselves connected.
797
798 // Transaction batching.
803
805
808
809private:
810 struct Stats
811 {
812 template <class Handler>
814 Handler const& handler,
815 beast::insight::Collector::ptr const& collector)
816 : hook(collector->make_hook(handler))
817 , disconnected_duration(collector->make_gauge(
818 "State_Accounting",
819 "Disconnected_duration"))
820 , connected_duration(collector->make_gauge(
821 "State_Accounting",
822 "Connected_duration"))
824 collector->make_gauge("State_Accounting", "Syncing_duration"))
825 , tracking_duration(collector->make_gauge(
826 "State_Accounting",
827 "Tracking_duration"))
829 collector->make_gauge("State_Accounting", "Full_duration"))
830 , disconnected_transitions(collector->make_gauge(
831 "State_Accounting",
832 "Disconnected_transitions"))
833 , connected_transitions(collector->make_gauge(
834 "State_Accounting",
835 "Connected_transitions"))
836 , syncing_transitions(collector->make_gauge(
837 "State_Accounting",
838 "Syncing_transitions"))
839 , tracking_transitions(collector->make_gauge(
840 "State_Accounting",
841 "Tracking_transitions"))
843 collector->make_gauge("State_Accounting", "Full_transitions"))
844 {
845 }
846
853
859 };
860
861 std::mutex m_statsMutex; // Mutex to lock m_stats
863
864private:
865 void
867};
868
869//------------------------------------------------------------------------------
870
872 {"disconnected", "connected", "syncing", "tracking", "full"}};
873
875
883
884static auto const genesisAccountId = calcAccountID(
886 .first);
887
888//------------------------------------------------------------------------------
889inline OperatingMode
891{
892 return mMode;
893}
894
895inline std::string
896NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
897{
898 return strOperatingMode(mMode, admin);
899}
900
901inline void
906
907inline void
912
913inline void
918
919inline bool
924
925inline bool
930
933{
934 static std::string const hostname = boost::asio::ip::host_name();
935
936 if (forAdmin)
937 return hostname;
938
939 // For non-admin uses hash the node public key into a
940 // single RFC1751 word:
941 static std::string const shroudedHostId = [this]() {
942 auto const& id = app_.nodeIdentity();
943
944 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
945 }();
946
947 return shroudedHostId;
948}
949
950void
952{
954
955 // Only do this work if a cluster is configured
956 if (app_.cluster().size() != 0)
958}
959
960void
962 boost::asio::steady_timer& timer,
963 std::chrono::milliseconds const& expiry_time,
964 std::function<void()> onExpire,
965 std::function<void()> onError)
966{
967 // Only start the timer if waitHandlerCounter_ is not yet joined.
968 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
969 [this, onExpire, onError](boost::system::error_code const& e) {
970 if ((e.value() == boost::system::errc::success) &&
971 (!m_job_queue.isStopped()))
972 {
973 onExpire();
974 }
975 // Recover as best we can if an unexpected error occurs.
976 if (e.value() != boost::system::errc::success &&
977 e.value() != boost::asio::error::operation_aborted)
978 {
979 // Try again later and hope for the best.
980 JLOG(m_journal.error())
981 << "Timer got error '" << e.message()
982 << "'. Restarting timer.";
983 onError();
984 }
985 }))
986 {
987 timer.expires_from_now(expiry_time);
988 timer.async_wait(std::move(*optionalCountedHandler));
989 }
990}
991
992void
993NetworkOPsImp::setHeartbeatTimer()
994{
995 setTimer(
996 heartbeatTimer_,
997 mConsensus.parms().ledgerGRANULARITY,
998 [this]() {
999 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
1000 processHeartbeatTimer();
1001 });
1002 },
1003 [this]() { setHeartbeatTimer(); });
1004}
1005
1006void
1007NetworkOPsImp::setClusterTimer()
1008{
1009 using namespace std::chrono_literals;
1010
1011 setTimer(
1012 clusterTimer_,
1013 10s,
1014 [this]() {
1015 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
1016 processClusterTimer();
1017 });
1018 },
1019 [this]() { setClusterTimer(); });
1020}
1021
1022void
1023NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
1024{
1025 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
1026 << toBase58(subInfo.index_->accountId_);
1027 using namespace std::chrono_literals;
1028 setTimer(
1029 accountHistoryTxTimer_,
1030 4s,
1031 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1032 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1033}
1034
1035void
1036NetworkOPsImp::processHeartbeatTimer()
1037{
1038 RclConsensusLogger clog(
1039 "Heartbeat Timer", mConsensus.validating(), m_journal);
1040 {
1041 std::unique_lock lock{app_.getMasterMutex()};
1042
1043 // VFALCO NOTE This is for diagnosing a crash on exit
1044 LoadManager& mgr(app_.getLoadManager());
1045 mgr.heartbeat();
1046
1047 std::size_t const numPeers = app_.overlay().size();
1048
1049 // do we have sufficient peers? If not, we are disconnected.
1050 if (numPeers < minPeerCount_)
1051 {
1052 if (mMode != OperatingMode::DISCONNECTED)
1053 {
1054 setMode(OperatingMode::DISCONNECTED);
1056 ss << "Node count (" << numPeers << ") has fallen "
1057 << "below required minimum (" << minPeerCount_ << ").";
1058 JLOG(m_journal.warn()) << ss.str();
1059 CLOG(clog.ss()) << "set mode to DISCONNECTED: " << ss.str();
1060 }
1061 else
1062 {
1063 CLOG(clog.ss())
1064 << "already DISCONNECTED. too few peers (" << numPeers
1065 << "), need at least " << minPeerCount_;
1066 }
1067
1068 // MasterMutex lock need not be held to call setHeartbeatTimer()
1069 lock.unlock();
1070 // We do not call mConsensus.timerEntry until there are enough
1071 // peers providing meaningful inputs to consensus
1072 setHeartbeatTimer();
1073
1074 return;
1075 }
1076
1077 if (mMode == OperatingMode::DISCONNECTED)
1078 {
1079 setMode(OperatingMode::CONNECTED);
1080 JLOG(m_journal.info())
1081 << "Node count (" << numPeers << ") is sufficient.";
1082 CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
1083 << " peers. ";
1084 }
1085
1086 // Check if the last validated ledger forces a change between these
1087 // states.
1088 auto origMode = mMode.load();
1089 CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
1090 if (mMode == OperatingMode::SYNCING)
1091 setMode(OperatingMode::SYNCING);
1092 else if (mMode == OperatingMode::CONNECTED)
1093 setMode(OperatingMode::CONNECTED);
1094 auto newMode = mMode.load();
1095 if (origMode != newMode)
1096 {
1097 CLOG(clog.ss())
1098 << ", changing to " << strOperatingMode(newMode, true);
1099 }
1100 CLOG(clog.ss()) << ". ";
1101 }
1102
1103 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.ss());
1104
1105 CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase);
1106 ConsensusPhase const currPhase = mConsensus.phase();
1107 if (mLastConsensusPhase != currPhase)
1108 {
1109 reportConsensusStateChange(currPhase);
1110 mLastConsensusPhase = currPhase;
1111 CLOG(clog.ss()) << " changed to " << to_string(mLastConsensusPhase);
1112 }
1113 CLOG(clog.ss()) << ". ";
1114
1115 setHeartbeatTimer();
1116}
1117
1118void
1119NetworkOPsImp::processClusterTimer()
1120{
1121 if (app_.cluster().size() == 0)
1122 return;
1123
1124 using namespace std::chrono_literals;
1125
1126 bool const update = app_.cluster().update(
1127 app_.nodeIdentity().first,
1128 "",
1129 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1130 ? app_.getFeeTrack().getLocalFee()
1131 : 0,
1132 app_.timeKeeper().now());
1133
1134 if (!update)
1135 {
1136 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1137 setClusterTimer();
1138 return;
1139 }
1140
1141 protocol::TMCluster cluster;
1142 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1143 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1144 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1145 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1146 n.set_nodeload(node.getLoadFee());
1147 if (!node.name().empty())
1148 n.set_nodename(node.name());
1149 });
1150
1151 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1152 for (auto& item : gossip.items)
1153 {
1154 protocol::TMLoadSource& node = *cluster.add_loadsources();
1155 node.set_name(to_string(item.address));
1156 node.set_cost(item.balance);
1157 }
1158 app_.overlay().foreach(send_if(
1159 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1160 peer_in_cluster()));
1161 setClusterTimer();
1162}
1163
1164//------------------------------------------------------------------------------
1165
1167NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1168 const
1169{
1170 if (mode == OperatingMode::FULL && admin)
1171 {
1172 auto const consensusMode = mConsensus.mode();
1173 if (consensusMode != ConsensusMode::wrongLedger)
1174 {
1175 if (consensusMode == ConsensusMode::proposing)
1176 return "proposing";
1177
1178 if (mConsensus.validating())
1179 return "validating";
1180 }
1181 }
1182
1183 return states_[static_cast<std::size_t>(mode)];
1184}
1185
1186void
1187NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1188{
1189 if (isNeedNetworkLedger())
1190 {
1191 // Nothing we can do if we've never been in sync
1192 return;
1193 }
1194
1195 // Enforce Network bar for batch txn
1196 if (iTrans->isFlag(tfInnerBatchTxn) &&
1197 m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1198 {
1199 JLOG(m_journal.error())
1200 << "Submitted transaction invalid: tfInnerBatchTxn flag present.";
1201 return;
1202 }
1203
1204 // this is an asynchronous interface
1205 auto const trans = sterilize(*iTrans);
1206
1207 auto const txid = trans->getTransactionID();
1208 auto const flags = app_.getHashRouter().getFlags(txid);
1209
1210 if ((flags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1211 {
1212 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1213 return;
1214 }
1215
1216 try
1217 {
1218 auto const [validity, reason] = checkValidity(
1219 app_.getHashRouter(),
1220 *trans,
1221 m_ledgerMaster.getValidatedRules(),
1222 app_.config());
1223
1224 if (validity != Validity::Valid)
1225 {
1226 JLOG(m_journal.warn())
1227 << "Submitted transaction invalid: " << reason;
1228 return;
1229 }
1230 }
1231 catch (std::exception const& ex)
1232 {
1233 JLOG(m_journal.warn())
1234 << "Exception checking transaction " << txid << ": " << ex.what();
1235
1236 return;
1237 }
1238
1239 std::string reason;
1240
1241 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1242
1243 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1244 auto t = tx;
1245 processTransaction(t, false, false, FailHard::no);
1246 });
1247}
1248
1249bool
1250NetworkOPsImp::preProcessTransaction(std::shared_ptr<Transaction>& transaction)
1251{
1252 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1253
1254 if ((newFlags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1255 {
1256 // cached bad
1257 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1258 transaction->setStatus(INVALID);
1259 transaction->setResult(temBAD_SIGNATURE);
1260 return false;
1261 }
1262
1263 auto const view = m_ledgerMaster.getCurrentLedger();
1264
1265 // This function is called by several different parts of the codebase
1266 // under no circumstances will we ever accept an inner txn within a batch
1267 // txn from the network.
1268 auto const sttx = *transaction->getSTransaction();
1269 if (sttx.isFlag(tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1270 {
1271 transaction->setStatus(INVALID);
1272 transaction->setResult(temINVALID_FLAG);
1273 app_.getHashRouter().setFlags(
1274 transaction->getID(), HashRouterFlags::BAD);
1275 return false;
1276 }
1277
1278 // NOTE eahennis - I think this check is redundant,
1279 // but I'm not 100% sure yet.
1280 // If so, only cost is looking up HashRouter flags.
1281 auto const [validity, reason] =
1282 checkValidity(app_.getHashRouter(), sttx, view->rules(), app_.config());
1283 XRPL_ASSERT(
1284 validity == Validity::Valid,
1285 "ripple::NetworkOPsImp::processTransaction : valid validity");
1286
1287 // Not concerned with local checks at this point.
1288 if (validity == Validity::SigBad)
1289 {
1290 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1291 transaction->setStatus(INVALID);
1292 transaction->setResult(temBAD_SIGNATURE);
1293 app_.getHashRouter().setFlags(
1294 transaction->getID(), HashRouterFlags::BAD);
1295 return false;
1296 }
1297
1298 // canonicalize can change our pointer
1299 app_.getMasterTransaction().canonicalize(&transaction);
1300
1301 return true;
1302}
1303
1304void
1305NetworkOPsImp::processTransaction(
1306 std::shared_ptr<Transaction>& transaction,
1307 bool bUnlimited,
1308 bool bLocal,
1309 FailHard failType)
1310{
1311 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1312
1313 // preProcessTransaction can change our pointer
1314 if (!preProcessTransaction(transaction))
1315 return;
1316
1317 if (bLocal)
1318 doTransactionSync(transaction, bUnlimited, failType);
1319 else
1320 doTransactionAsync(transaction, bUnlimited, failType);
1321}
1322
1323void
1324NetworkOPsImp::doTransactionAsync(
1325 std::shared_ptr<Transaction> transaction,
1326 bool bUnlimited,
1327 FailHard failType)
1328{
1329 std::lock_guard lock(mMutex);
1330
1331 if (transaction->getApplying())
1332 return;
1333
1334 mTransactions.push_back(
1335 TransactionStatus(transaction, bUnlimited, false, failType));
1336 transaction->setApplying();
1337
1338 if (mDispatchState == DispatchState::none)
1339 {
1340 if (m_job_queue.addJob(
1341 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1342 {
1343 mDispatchState = DispatchState::scheduled;
1344 }
1345 }
1346}
1347
1348void
1349NetworkOPsImp::doTransactionSync(
1350 std::shared_ptr<Transaction> transaction,
1351 bool bUnlimited,
1352 FailHard failType)
1353{
1354 std::unique_lock<std::mutex> lock(mMutex);
1355
1356 if (!transaction->getApplying())
1357 {
1358 mTransactions.push_back(
1359 TransactionStatus(transaction, bUnlimited, true, failType));
1360 transaction->setApplying();
1361 }
1362
1363 doTransactionSyncBatch(
1364 lock, [&transaction](std::unique_lock<std::mutex> const&) {
1365 return transaction->getApplying();
1366 });
1367}
1368
1369void
1370NetworkOPsImp::doTransactionSyncBatch(
1372 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback)
1373{
1374 do
1375 {
1376 if (mDispatchState == DispatchState::running)
1377 {
1378 // A batch processing job is already running, so wait.
1379 mCond.wait(lock);
1380 }
1381 else
1382 {
1383 apply(lock);
1384
1385 if (mTransactions.size())
1386 {
1387 // More transactions need to be applied, but by another job.
1388 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1389 transactionBatch();
1390 }))
1391 {
1392 mDispatchState = DispatchState::scheduled;
1393 }
1394 }
1395 }
1396 } while (retryCallback(lock));
1397}
1398
1399void
1400NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set)
1401{
1402 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXNSet");
1404 candidates.reserve(set.size());
1405 for (auto const& [_, tx] : set)
1406 {
1407 std::string reason;
1408 auto transaction = std::make_shared<Transaction>(tx, reason, app_);
1409
1410 if (transaction->getStatus() == INVALID)
1411 {
1412 if (!reason.empty())
1413 {
1414 JLOG(m_journal.trace())
1415 << "Exception checking transaction: " << reason;
1416 }
1417 app_.getHashRouter().setFlags(
1418 tx->getTransactionID(), HashRouterFlags::BAD);
1419 continue;
1420 }
1421
1422 // preProcessTransaction can change our pointer
1423 if (!preProcessTransaction(transaction))
1424 continue;
1425
1426 candidates.emplace_back(transaction);
1427 }
1428
1429 std::vector<TransactionStatus> transactions;
1430 transactions.reserve(candidates.size());
1431
1432 std::unique_lock lock(mMutex);
1433
1434 for (auto& transaction : candidates)
1435 {
1436 if (!transaction->getApplying())
1437 {
1438 transactions.emplace_back(transaction, false, false, FailHard::no);
1439 transaction->setApplying();
1440 }
1441 }
1442
1443 if (mTransactions.empty())
1444 mTransactions.swap(transactions);
1445 else
1446 {
1447 mTransactions.reserve(mTransactions.size() + transactions.size());
1448 for (auto& t : transactions)
1449 mTransactions.push_back(std::move(t));
1450 }
1451
1452 doTransactionSyncBatch(lock, [&](std::unique_lock<std::mutex> const&) {
1453 XRPL_ASSERT(
1454 lock.owns_lock(),
1455 "ripple::NetworkOPsImp::processTransactionSet has lock");
1456 return std::any_of(
1457 mTransactions.begin(), mTransactions.end(), [](auto const& t) {
1458 return t.transaction->getApplying();
1459 });
1460 });
1461}
1462
1463void
1464NetworkOPsImp::transactionBatch()
1465{
1466 std::unique_lock<std::mutex> lock(mMutex);
1467
1468 if (mDispatchState == DispatchState::running)
1469 return;
1470
1471 while (mTransactions.size())
1472 {
1473 apply(lock);
1474 }
1475}
1476
1477void
1478NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1479{
1481 std::vector<TransactionStatus> transactions;
1482 mTransactions.swap(transactions);
1483 XRPL_ASSERT(
1484 !transactions.empty(),
1485 "ripple::NetworkOPsImp::apply : non-empty transactions");
1486 XRPL_ASSERT(
1487 mDispatchState != DispatchState::running,
1488 "ripple::NetworkOPsImp::apply : is not running");
1489
1490 mDispatchState = DispatchState::running;
1491
1492 batchLock.unlock();
1493
1494 {
1495 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1496 bool changed = false;
1497 {
1498 std::unique_lock ledgerLock{
1499 m_ledgerMaster.peekMutex(), std::defer_lock};
1500 std::lock(masterLock, ledgerLock);
1501
1502 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1503 for (TransactionStatus& e : transactions)
1504 {
1505 // we check before adding to the batch
1506 ApplyFlags flags = tapNONE;
1507 if (e.admin)
1508 flags |= tapUNLIMITED;
1509
1510 if (e.failType == FailHard::yes)
1511 flags |= tapFAIL_HARD;
1512
1513 auto const result = app_.getTxQ().apply(
1514 app_, view, e.transaction->getSTransaction(), flags, j);
1515 e.result = result.ter;
1516 e.applied = result.applied;
1517 changed = changed || result.applied;
1518 }
1519 return changed;
1520 });
1521 }
1522 if (changed)
1523 reportFeeChange();
1524
1525 std::optional<LedgerIndex> validatedLedgerIndex;
1526 if (auto const l = m_ledgerMaster.getValidatedLedger())
1527 validatedLedgerIndex = l->info().seq;
1528
1529 auto newOL = app_.openLedger().current();
1530 for (TransactionStatus& e : transactions)
1531 {
1532 e.transaction->clearSubmitResult();
1533
1534 if (e.applied)
1535 {
1536 pubProposedTransaction(
1537 newOL, e.transaction->getSTransaction(), e.result);
1538 e.transaction->setApplied();
1539 }
1540
1541 e.transaction->setResult(e.result);
1542
1543 if (isTemMalformed(e.result))
1544 app_.getHashRouter().setFlags(
1545 e.transaction->getID(), HashRouterFlags::BAD);
1546
1547#ifdef DEBUG
1548 if (e.result != tesSUCCESS)
1549 {
1550 std::string token, human;
1551
1552 if (transResultInfo(e.result, token, human))
1553 {
1554 JLOG(m_journal.info())
1555 << "TransactionResult: " << token << ": " << human;
1556 }
1557 }
1558#endif
1559
1560 bool addLocal = e.local;
1561
1562 if (e.result == tesSUCCESS)
1563 {
1564 JLOG(m_journal.debug())
1565 << "Transaction is now included in open ledger";
1566 e.transaction->setStatus(INCLUDED);
1567
1568 // Pop as many "reasonable" transactions for this account as
1569 // possible. "Reasonable" means they have sequential sequence
1570 // numbers, or use tickets.
1571 auto const& txCur = e.transaction->getSTransaction();
1572
1573 std::size_t count = 0;
1574 for (auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1575 txNext && count < maxPoppedTransactions;
1576 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1577 {
1578 if (!batchLock.owns_lock())
1579 batchLock.lock();
1580 std::string reason;
1581 auto const trans = sterilize(*txNext);
1582 auto t = std::make_shared<Transaction>(trans, reason, app_);
1583 if (t->getApplying())
1584 break;
1585 submit_held.emplace_back(t, false, false, FailHard::no);
1586 t->setApplying();
1587 }
1588 if (batchLock.owns_lock())
1589 batchLock.unlock();
1590 }
1591 else if (e.result == tefPAST_SEQ)
1592 {
1593 // duplicate or conflict
1594 JLOG(m_journal.info()) << "Transaction is obsolete";
1595 e.transaction->setStatus(OBSOLETE);
1596 }
1597 else if (e.result == terQUEUED)
1598 {
1599 JLOG(m_journal.debug())
1600 << "Transaction is likely to claim a"
1601 << " fee, but is queued until fee drops";
1602
1603 e.transaction->setStatus(HELD);
1604 // Add to held transactions, because it could get
1605 // kicked out of the queue, and this will try to
1606 // put it back.
1607 m_ledgerMaster.addHeldTransaction(e.transaction);
1608 e.transaction->setQueued();
1609 e.transaction->setKept();
1610 }
1611 else if (
1612 isTerRetry(e.result) || isTelLocal(e.result) ||
1613 isTefFailure(e.result))
1614 {
1615 if (e.failType != FailHard::yes)
1616 {
1617 auto const lastLedgerSeq =
1618 e.transaction->getSTransaction()->at(
1619 ~sfLastLedgerSequence);
1620 auto const ledgersLeft = lastLedgerSeq
1621 ? *lastLedgerSeq -
1622 m_ledgerMaster.getCurrentLedgerIndex()
1624 // If any of these conditions are met, the transaction can
1625 // be held:
1626 // 1. It was submitted locally. (Note that this flag is only
1627 // true on the initial submission.)
1628 // 2. The transaction has a LastLedgerSequence, and the
1629 // LastLedgerSequence is fewer than LocalTxs::holdLedgers
1630 // (5) ledgers into the future. (Remember that an
1631 // unseated optional compares as less than all seated
1632 // values, so it has to be checked explicitly first.)
1633 // 3. The HashRouterFlags::BAD flag is not set on the txID.
1634 // (setFlags
1635 // checks before setting. If the flag is set, it returns
1636 // false, which means it's been held once without one of
1637 // the other conditions, so don't hold it again. Time's
1638 // up!)
1639 //
1640 if (e.local ||
1641 (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1642 app_.getHashRouter().setFlags(
1643 e.transaction->getID(), HashRouterFlags::HELD))
1644 {
1645 // transaction should be held
1646 JLOG(m_journal.debug())
1647 << "Transaction should be held: " << e.result;
1648 e.transaction->setStatus(HELD);
1649 m_ledgerMaster.addHeldTransaction(e.transaction);
1650 e.transaction->setKept();
1651 }
1652 else
1653 JLOG(m_journal.debug())
1654 << "Not holding transaction "
1655 << e.transaction->getID() << ": "
1656 << (e.local ? "local" : "network") << ", "
1657 << "result: " << e.result << " ledgers left: "
1658 << (ledgersLeft ? to_string(*ledgersLeft)
1659 : "unspecified");
1660 }
1661 }
1662 else
1663 {
1664 JLOG(m_journal.debug())
1665 << "Status other than success " << e.result;
1666 e.transaction->setStatus(INVALID);
1667 }
1668
1669 auto const enforceFailHard =
1670 e.failType == FailHard::yes && !isTesSuccess(e.result);
1671
1672 if (addLocal && !enforceFailHard)
1673 {
1674 m_localTX->push_back(
1675 m_ledgerMaster.getCurrentLedgerIndex(),
1676 e.transaction->getSTransaction());
1677 e.transaction->setKept();
1678 }
1679
1680 if ((e.applied ||
1681 ((mMode != OperatingMode::FULL) &&
1682 (e.failType != FailHard::yes) && e.local) ||
1683 (e.result == terQUEUED)) &&
1684 !enforceFailHard)
1685 {
1686 auto const toSkip =
1687 app_.getHashRouter().shouldRelay(e.transaction->getID());
1688 if (auto const sttx = *(e.transaction->getSTransaction());
1689 toSkip &&
1690 // Skip relaying if it's an inner batch txn and batch
1691 // feature is enabled
1692 !(sttx.isFlag(tfInnerBatchTxn) &&
1693 newOL->rules().enabled(featureBatch)))
1694 {
1695 protocol::TMTransaction tx;
1696 Serializer s;
1697
1698 sttx.add(s);
1699 tx.set_rawtransaction(s.data(), s.size());
1700 tx.set_status(protocol::tsCURRENT);
1701 tx.set_receivetimestamp(
1702 app_.timeKeeper().now().time_since_epoch().count());
1703 tx.set_deferred(e.result == terQUEUED);
1704 // FIXME: This should be when we received it
1705 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1706 e.transaction->setBroadcast();
1707 }
1708 }
1709
1710 if (validatedLedgerIndex)
1711 {
1712 auto [fee, accountSeq, availableSeq] =
1713 app_.getTxQ().getTxRequiredFeeAndSeq(
1714 *newOL, e.transaction->getSTransaction());
1715 e.transaction->setCurrentLedgerState(
1716 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1717 }
1718 }
1719 }
1720
1721 batchLock.lock();
1722
1723 for (TransactionStatus& e : transactions)
1724 e.transaction->clearApplying();
1725
1726 if (!submit_held.empty())
1727 {
1728 if (mTransactions.empty())
1729 mTransactions.swap(submit_held);
1730 else
1731 {
1732 mTransactions.reserve(mTransactions.size() + submit_held.size());
1733 for (auto& e : submit_held)
1734 mTransactions.push_back(std::move(e));
1735 }
1736 }
1737
1738 mCond.notify_all();
1739
1740 mDispatchState = DispatchState::none;
1741}
1742
1743//
1744// Owner functions
1745//
1746
1748NetworkOPsImp::getOwnerInfo(
1750 AccountID const& account)
1751{
1752 Json::Value jvObjects(Json::objectValue);
1753 auto root = keylet::ownerDir(account);
1754 auto sleNode = lpLedger->read(keylet::page(root));
1755 if (sleNode)
1756 {
1757 std::uint64_t uNodeDir;
1758
1759 do
1760 {
1761 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1762 {
1763 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1764 XRPL_ASSERT(
1765 sleCur,
1766 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1767
1768 switch (sleCur->getType())
1769 {
1770 case ltOFFER:
1771 if (!jvObjects.isMember(jss::offers))
1772 jvObjects[jss::offers] =
1774
1775 jvObjects[jss::offers].append(
1776 sleCur->getJson(JsonOptions::none));
1777 break;
1778
1779 case ltRIPPLE_STATE:
1780 if (!jvObjects.isMember(jss::ripple_lines))
1781 {
1782 jvObjects[jss::ripple_lines] =
1784 }
1785
1786 jvObjects[jss::ripple_lines].append(
1787 sleCur->getJson(JsonOptions::none));
1788 break;
1789
1790 case ltACCOUNT_ROOT:
1791 case ltDIR_NODE:
1792 default:
1793 UNREACHABLE(
1794 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1795 "type");
1796 break;
1797 }
1798 }
1799
1800 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1801
1802 if (uNodeDir)
1803 {
1804 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1805 XRPL_ASSERT(
1806 sleNode,
1807 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1808 }
1809 } while (uNodeDir);
1810 }
1811
1812 return jvObjects;
1813}
1814
1815//
1816// Other
1817//
1818
1819inline bool
1820NetworkOPsImp::isBlocked()
1821{
1822 return isAmendmentBlocked() || isUNLBlocked();
1823}
1824
1825inline bool
1826NetworkOPsImp::isAmendmentBlocked()
1827{
1828 return amendmentBlocked_;
1829}
1830
1831void
1832NetworkOPsImp::setAmendmentBlocked()
1833{
1834 amendmentBlocked_ = true;
1835 setMode(OperatingMode::CONNECTED);
1836}
1837
1838inline bool
1839NetworkOPsImp::isAmendmentWarned()
1840{
1841 return !amendmentBlocked_ && amendmentWarned_;
1842}
1843
1844inline void
1845NetworkOPsImp::setAmendmentWarned()
1846{
1847 amendmentWarned_ = true;
1848}
1849
1850inline void
1851NetworkOPsImp::clearAmendmentWarned()
1852{
1853 amendmentWarned_ = false;
1854}
1855
1856inline bool
1857NetworkOPsImp::isUNLBlocked()
1858{
1859 return unlBlocked_;
1860}
1861
1862void
1863NetworkOPsImp::setUNLBlocked()
1864{
1865 unlBlocked_ = true;
1866 setMode(OperatingMode::CONNECTED);
1867}
1868
1869inline void
1870NetworkOPsImp::clearUNLBlocked()
1871{
1872 unlBlocked_ = false;
1873}
1874
1875bool
1876NetworkOPsImp::checkLastClosedLedger(
1877 Overlay::PeerSequence const& peerList,
1878 uint256& networkClosed)
1879{
1880 // Returns true if there's an *abnormal* ledger issue, normal changing in
1881 // TRACKING mode should return false. Do we have sufficient validations for
1882 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1883 // better ledger available? If so, we are either tracking or full.
1884
1885 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1886
1887 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1888
1889 if (!ourClosed)
1890 return false;
1891
1892 uint256 closedLedger = ourClosed->info().hash;
1893 uint256 prevClosedLedger = ourClosed->info().parentHash;
1894 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1895 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1896
1897 //-------------------------------------------------------------------------
1898 // Determine preferred last closed ledger
1899
1900 auto& validations = app_.getValidations();
1901 JLOG(m_journal.debug())
1902 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1903
1904 // Will rely on peer LCL if no trusted validations exist
1906 peerCounts[closedLedger] = 0;
1907 if (mMode >= OperatingMode::TRACKING)
1908 peerCounts[closedLedger]++;
1909
1910 for (auto& peer : peerList)
1911 {
1912 uint256 peerLedger = peer->getClosedLedgerHash();
1913
1914 if (peerLedger.isNonZero())
1915 ++peerCounts[peerLedger];
1916 }
1917
1918 for (auto const& it : peerCounts)
1919 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1920
1921 uint256 preferredLCL = validations.getPreferredLCL(
1922 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1923 m_ledgerMaster.getValidLedgerIndex(),
1924 peerCounts);
1925
1926 bool switchLedgers = preferredLCL != closedLedger;
1927 if (switchLedgers)
1928 closedLedger = preferredLCL;
1929 //-------------------------------------------------------------------------
1930 if (switchLedgers && (closedLedger == prevClosedLedger))
1931 {
1932 // don't switch to our own previous ledger
1933 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1934 networkClosed = ourClosed->info().hash;
1935 switchLedgers = false;
1936 }
1937 else
1938 networkClosed = closedLedger;
1939
1940 if (!switchLedgers)
1941 return false;
1942
1943 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1944
1945 if (!consensus)
1946 consensus = app_.getInboundLedgers().acquire(
1947 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1948
1949 if (consensus &&
1950 (!m_ledgerMaster.canBeCurrent(consensus) ||
1951 !m_ledgerMaster.isCompatible(
1952 *consensus, m_journal.debug(), "Not switching")))
1953 {
1954 // Don't switch to a ledger not on the validated chain
1955 // or with an invalid close time or sequence
1956 networkClosed = ourClosed->info().hash;
1957 return false;
1958 }
1959
1960 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1961 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1962 << getJson({*ourClosed, {}});
1963 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1964
1965 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1966 {
1967 setMode(OperatingMode::CONNECTED);
1968 }
1969
1970 if (consensus)
1971 {
1972 // FIXME: If this rewinds the ledger sequence, or has the same
1973 // sequence, we should update the status on any stored transactions
1974 // in the invalidated ledgers.
1975 switchLastClosedLedger(consensus);
1976 }
1977
1978 return true;
1979}
1980
1981void
1982NetworkOPsImp::switchLastClosedLedger(
1983 std::shared_ptr<Ledger const> const& newLCL)
1984{
1985 // set the newLCL as our last closed ledger -- this is abnormal code
1986 JLOG(m_journal.error())
1987 << "JUMP last closed ledger to " << newLCL->info().hash;
1988
1989 clearNeedNetworkLedger();
1990
1991 // Update fee computations.
1992 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1993
1994 // Caller must own master lock
1995 {
1996 // Apply tx in old open ledger to new
1997 // open ledger. Then apply local tx.
1998
1999 auto retries = m_localTX->getTxSet();
2000 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
2002 if (lastVal)
2003 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
2004 else
2005 rules.emplace(app_.config().features);
2006 app_.openLedger().accept(
2007 app_,
2008 *rules,
2009 newLCL,
2010 OrderedTxs({}),
2011 false,
2012 retries,
2013 tapNONE,
2014 "jump",
2015 [&](OpenView& view, beast::Journal j) {
2016 // Stuff the ledger with transactions from the queue.
2017 return app_.getTxQ().accept(app_, view);
2018 });
2019 }
2020
2021 m_ledgerMaster.switchLCL(newLCL);
2022
2023 protocol::TMStatusChange s;
2024 s.set_newevent(protocol::neSWITCHED_LEDGER);
2025 s.set_ledgerseq(newLCL->info().seq);
2026 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
2027 s.set_ledgerhashprevious(
2028 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
2029 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
2030
2031 app_.overlay().foreach(
2032 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
2033}
2034
2035bool
2036NetworkOPsImp::beginConsensus(
2037 uint256 const& networkClosed,
2039{
2040 XRPL_ASSERT(
2041 networkClosed.isNonZero(),
2042 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
2043
2044 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
2045
2046 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
2047 << " with LCL " << closingInfo.parentHash;
2048
2049 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
2050
2051 if (!prevLedger)
2052 {
2053 // this shouldn't happen unless we jump ledgers
2054 if (mMode == OperatingMode::FULL)
2055 {
2056 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
2057 setMode(OperatingMode::TRACKING);
2058 CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
2059 }
2060
2061 CLOG(clog) << "beginConsensus no previous ledger. ";
2062 return false;
2063 }
2064
2065 XRPL_ASSERT(
2066 prevLedger->info().hash == closingInfo.parentHash,
2067 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
2068 "parent");
2069 XRPL_ASSERT(
2070 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
2071 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
2072 "hash");
2073
2074 if (prevLedger->rules().enabled(featureNegativeUNL))
2075 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
2076 TrustChanges const changes = app_.validators().updateTrusted(
2077 app_.getValidations().getCurrentNodeIDs(),
2078 closingInfo.parentCloseTime,
2079 *this,
2080 app_.overlay(),
2081 app_.getHashRouter());
2082
2083 if (!changes.added.empty() || !changes.removed.empty())
2084 {
2085 app_.getValidations().trustChanged(changes.added, changes.removed);
2086 // Update the AmendmentTable so it tracks the current validators.
2087 app_.getAmendmentTable().trustChanged(
2088 app_.validators().getQuorumKeys().second);
2089 }
2090
2091 mConsensus.startRound(
2092 app_.timeKeeper().closeTime(),
2093 networkClosed,
2094 prevLedger,
2095 changes.removed,
2096 changes.added,
2097 clog);
2098
2099 ConsensusPhase const currPhase = mConsensus.phase();
2100 if (mLastConsensusPhase != currPhase)
2101 {
2102 reportConsensusStateChange(currPhase);
2103 mLastConsensusPhase = currPhase;
2104 }
2105
2106 JLOG(m_journal.debug()) << "Initiating consensus engine";
2107 return true;
2108}
2109
2110bool
2111NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
2112{
2113 auto const& peerKey = peerPos.publicKey();
2114 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
2115 {
2116 // Could indicate a operator misconfiguration where two nodes are
2117 // running with the same validator key configured, so this isn't fatal,
2118 // and it doesn't necessarily indicate peer misbehavior. But since this
2119 // is a trusted message, it could be a very big deal. Either way, we
2120 // don't want to relay the proposal. Note that the byzantine behavior
2121 // detection in handleNewValidation will notify other peers.
2122 //
2123 // Another, innocuous explanation is unusual message routing and delays,
2124 // causing this node to receive its own messages back.
2125 JLOG(m_journal.error())
2126 << "Received a proposal signed by MY KEY from a peer. This may "
2127 "indicate a misconfiguration where another node has the same "
2128 "validator key, or may be caused by unusual message routing and "
2129 "delays.";
2130 return false;
2131 }
2132
2133 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
2134}
2135
2136void
2137NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
2138{
2139 // We now have an additional transaction set
2140 // either created locally during the consensus process
2141 // or acquired from a peer
2142
2143 // Inform peers we have this set
2144 protocol::TMHaveTransactionSet msg;
2145 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2146 msg.set_status(protocol::tsHAVE);
2147 app_.overlay().foreach(
2148 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
2149
2150 // We acquired it because consensus asked us to
2151 if (fromAcquire)
2152 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
2153}
2154
2155void
2156NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
2157{
2158 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
2159
2160 for (auto const& it : app_.overlay().getActivePeers())
2161 {
2162 if (it && (it->getClosedLedgerHash() == deadLedger))
2163 {
2164 JLOG(m_journal.trace()) << "Killing obsolete peer status";
2165 it->cycleStatus();
2166 }
2167 }
2168
2169 uint256 networkClosed;
2170 bool ledgerChange =
2171 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
2172
2173 if (networkClosed.isZero())
2174 {
2175 CLOG(clog) << "endConsensus last closed ledger is zero. ";
2176 return;
2177 }
2178
2179 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
2180 // we must count how many nodes share our LCL, how many nodes disagree with
2181 // our LCL, and how many validations our LCL has. We also want to check
2182 // timing to make sure there shouldn't be a newer LCL. We need this
2183 // information to do the next three tests.
2184
2185 if (((mMode == OperatingMode::CONNECTED) ||
2186 (mMode == OperatingMode::SYNCING)) &&
2187 !ledgerChange)
2188 {
2189 // Count number of peers that agree with us and UNL nodes whose
2190 // validations we have for LCL. If the ledger is good enough, go to
2191 // TRACKING - TODO
2192 if (!needNetworkLedger_)
2193 setMode(OperatingMode::TRACKING);
2194 }
2195
2196 if (((mMode == OperatingMode::CONNECTED) ||
2197 (mMode == OperatingMode::TRACKING)) &&
2198 !ledgerChange)
2199 {
2200 // check if the ledger is good enough to go to FULL
2201 // Note: Do not go to FULL if we don't have the previous ledger
2202 // check if the ledger is bad enough to go to CONNECTE D -- TODO
2203 auto current = m_ledgerMaster.getCurrentLedger();
2204 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
2205 2 * current->info().closeTimeResolution))
2206 {
2207 setMode(OperatingMode::FULL);
2208 }
2209 }
2210
2211 beginConsensus(networkClosed, clog);
2212}
2213
2214void
2215NetworkOPsImp::consensusViewChange()
2216{
2217 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2218 {
2219 setMode(OperatingMode::CONNECTED);
2220 }
2221}
2222
2223void
2224NetworkOPsImp::pubManifest(Manifest const& mo)
2225{
2226 // VFALCO consider std::shared_mutex
2227 std::lock_guard sl(mSubLock);
2228
2229 if (!mStreamMaps[sManifests].empty())
2230 {
2232
2233 jvObj[jss::type] = "manifestReceived";
2234 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
2235 if (mo.signingKey)
2236 jvObj[jss::signing_key] =
2237 toBase58(TokenType::NodePublic, *mo.signingKey);
2238 jvObj[jss::seq] = Json::UInt(mo.sequence);
2239 if (auto sig = mo.getSignature())
2240 jvObj[jss::signature] = strHex(*sig);
2241 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
2242 if (!mo.domain.empty())
2243 jvObj[jss::domain] = mo.domain;
2244 jvObj[jss::manifest] = strHex(mo.serialized);
2245
2246 for (auto i = mStreamMaps[sManifests].begin();
2247 i != mStreamMaps[sManifests].end();)
2248 {
2249 if (auto p = i->second.lock())
2250 {
2251 p->send(jvObj, true);
2252 ++i;
2253 }
2254 else
2255 {
2256 i = mStreamMaps[sManifests].erase(i);
2257 }
2258 }
2259 }
2260}
2261
2262NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2263 XRPAmount fee,
2264 TxQ::Metrics&& escalationMetrics,
2265 LoadFeeTrack const& loadFeeTrack)
2266 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2267 , loadBaseServer{loadFeeTrack.getLoadBase()}
2268 , baseFee{fee}
2269 , em{std::move(escalationMetrics)}
2270{
2271}
2272
2273bool
2275 NetworkOPsImp::ServerFeeSummary const& b) const
2276{
2277 if (loadFactorServer != b.loadFactorServer ||
2278 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2279 em.has_value() != b.em.has_value())
2280 return true;
2281
2282 if (em && b.em)
2283 {
2284 return (
2285 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2286 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2287 em->referenceFeeLevel != b.em->referenceFeeLevel);
2288 }
2289
2290 return false;
2291}
2292
2293// Need to cap to uint64 to uint32 due to JSON limitations
2294static std::uint32_t
2296{
2298
2299 return std::min(max32, v);
2300};
2301
2302void
2304{
2305 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2306 // list into a local array while holding the lock then release
2307 // the lock and call send on everyone.
2308 //
2310
2311 if (!mStreamMaps[sServer].empty())
2312 {
2314
2316 app_.openLedger().current()->fees().base,
2318 app_.getFeeTrack()};
2319
2320 jvObj[jss::type] = "serverStatus";
2321 jvObj[jss::server_status] = strOperatingMode();
2322 jvObj[jss::load_base] = f.loadBaseServer;
2323 jvObj[jss::load_factor_server] = f.loadFactorServer;
2324 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2325
2326 if (f.em)
2327 {
2328 auto const loadFactor = std::max(
2329 safe_cast<std::uint64_t>(f.loadFactorServer),
2330 mulDiv(
2331 f.em->openLedgerFeeLevel,
2332 f.loadBaseServer,
2333 f.em->referenceFeeLevel)
2335
2336 jvObj[jss::load_factor] = trunc32(loadFactor);
2337 jvObj[jss::load_factor_fee_escalation] =
2338 f.em->openLedgerFeeLevel.jsonClipped();
2339 jvObj[jss::load_factor_fee_queue] =
2340 f.em->minProcessingFeeLevel.jsonClipped();
2341 jvObj[jss::load_factor_fee_reference] =
2342 f.em->referenceFeeLevel.jsonClipped();
2343 }
2344 else
2345 jvObj[jss::load_factor] = f.loadFactorServer;
2346
2347 mLastFeeSummary = f;
2348
2349 for (auto i = mStreamMaps[sServer].begin();
2350 i != mStreamMaps[sServer].end();)
2351 {
2352 InfoSub::pointer p = i->second.lock();
2353
2354 // VFALCO TODO research the possibility of using thread queues and
2355 // linearizing the deletion of subscribers with the
2356 // sending of JSON data.
2357 if (p)
2358 {
2359 p->send(jvObj, true);
2360 ++i;
2361 }
2362 else
2363 {
2364 i = mStreamMaps[sServer].erase(i);
2365 }
2366 }
2367 }
2368}
2369
2370void
2372{
2374
2375 auto& streamMap = mStreamMaps[sConsensusPhase];
2376 if (!streamMap.empty())
2377 {
2379 jvObj[jss::type] = "consensusPhase";
2380 jvObj[jss::consensus] = to_string(phase);
2381
2382 for (auto i = streamMap.begin(); i != streamMap.end();)
2383 {
2384 if (auto p = i->second.lock())
2385 {
2386 p->send(jvObj, true);
2387 ++i;
2388 }
2389 else
2390 {
2391 i = streamMap.erase(i);
2392 }
2393 }
2394 }
2395}
2396
2397void
2399{
2400 // VFALCO consider std::shared_mutex
2402
2403 if (!mStreamMaps[sValidations].empty())
2404 {
2406
2407 auto const signerPublic = val->getSignerPublic();
2408
2409 jvObj[jss::type] = "validationReceived";
2410 jvObj[jss::validation_public_key] =
2411 toBase58(TokenType::NodePublic, signerPublic);
2412 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2413 jvObj[jss::signature] = strHex(val->getSignature());
2414 jvObj[jss::full] = val->isFull();
2415 jvObj[jss::flags] = val->getFlags();
2416 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2417 jvObj[jss::data] = strHex(val->getSerializer().slice());
2418 jvObj[jss::network_id] = app_.config().NETWORK_ID;
2419
2420 if (auto version = (*val)[~sfServerVersion])
2421 jvObj[jss::server_version] = std::to_string(*version);
2422
2423 if (auto cookie = (*val)[~sfCookie])
2424 jvObj[jss::cookie] = std::to_string(*cookie);
2425
2426 if (auto hash = (*val)[~sfValidatedHash])
2427 jvObj[jss::validated_hash] = strHex(*hash);
2428
2429 auto const masterKey =
2430 app_.validatorManifests().getMasterKey(signerPublic);
2431
2432 if (masterKey != signerPublic)
2433 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2434
2435 // NOTE *seq is a number, but old API versions used string. We replace
2436 // number with a string using MultiApiJson near end of this function
2437 if (auto const seq = (*val)[~sfLedgerSequence])
2438 jvObj[jss::ledger_index] = *seq;
2439
2440 if (val->isFieldPresent(sfAmendments))
2441 {
2442 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2443 for (auto const& amendment : val->getFieldV256(sfAmendments))
2444 jvObj[jss::amendments].append(to_string(amendment));
2445 }
2446
2447 if (auto const closeTime = (*val)[~sfCloseTime])
2448 jvObj[jss::close_time] = *closeTime;
2449
2450 if (auto const loadFee = (*val)[~sfLoadFee])
2451 jvObj[jss::load_fee] = *loadFee;
2452
2453 if (auto const baseFee = val->at(~sfBaseFee))
2454 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2455
2456 if (auto const reserveBase = val->at(~sfReserveBase))
2457 jvObj[jss::reserve_base] = *reserveBase;
2458
2459 if (auto const reserveInc = val->at(~sfReserveIncrement))
2460 jvObj[jss::reserve_inc] = *reserveInc;
2461
2462 // (The ~ operator converts the Proxy to a std::optional, which
2463 // simplifies later operations)
2464 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2465 baseFeeXRP && baseFeeXRP->native())
2466 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2467
2468 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2469 reserveBaseXRP && reserveBaseXRP->native())
2470 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2471
2472 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2473 reserveIncXRP && reserveIncXRP->native())
2474 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2475
2476 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2477 // for consumers supporting different API versions
2478 MultiApiJson multiObj{jvObj};
2479 multiObj.visit(
2480 RPC::apiVersion<1>, //
2481 [](Json::Value& jvTx) {
2482 // Type conversion for older API versions to string
2483 if (jvTx.isMember(jss::ledger_index))
2484 {
2485 jvTx[jss::ledger_index] =
2486 std::to_string(jvTx[jss::ledger_index].asUInt());
2487 }
2488 });
2489
2490 for (auto i = mStreamMaps[sValidations].begin();
2491 i != mStreamMaps[sValidations].end();)
2492 {
2493 if (auto p = i->second.lock())
2494 {
2495 multiObj.visit(
2496 p->getApiVersion(), //
2497 [&](Json::Value const& jv) { p->send(jv, true); });
2498 ++i;
2499 }
2500 else
2501 {
2502 i = mStreamMaps[sValidations].erase(i);
2503 }
2504 }
2505 }
2506}
2507
2508void
2510{
2512
2513 if (!mStreamMaps[sPeerStatus].empty())
2514 {
2515 Json::Value jvObj(func());
2516
2517 jvObj[jss::type] = "peerStatusChange";
2518
2519 for (auto i = mStreamMaps[sPeerStatus].begin();
2520 i != mStreamMaps[sPeerStatus].end();)
2521 {
2522 InfoSub::pointer p = i->second.lock();
2523
2524 if (p)
2525 {
2526 p->send(jvObj, true);
2527 ++i;
2528 }
2529 else
2530 {
2531 i = mStreamMaps[sPeerStatus].erase(i);
2532 }
2533 }
2534 }
2535}
2536
2537void
2539{
2540 using namespace std::chrono_literals;
2541 if (om == OperatingMode::CONNECTED)
2542 {
2545 }
2546 else if (om == OperatingMode::SYNCING)
2547 {
2550 }
2551
2552 if ((om > OperatingMode::CONNECTED) && isBlocked())
2554
2555 if (mMode == om)
2556 return;
2557
2558 mMode = om;
2559
2560 accounting_.mode(om);
2561
2562 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2563 pubServer();
2564}
2565
2566bool
2569 std::string const& source)
2570{
2571 JLOG(m_journal.trace())
2572 << "recvValidation " << val->getLedgerHash() << " from " << source;
2573
2575 BypassAccept bypassAccept = BypassAccept::no;
2576 try
2577 {
2578 if (pendingValidations_.contains(val->getLedgerHash()))
2579 bypassAccept = BypassAccept::yes;
2580 else
2581 pendingValidations_.insert(val->getLedgerHash());
2582 scope_unlock unlock(lock);
2583 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2584 }
2585 catch (std::exception const& e)
2586 {
2587 JLOG(m_journal.warn())
2588 << "Exception thrown for handling new validation "
2589 << val->getLedgerHash() << ": " << e.what();
2590 }
2591 catch (...)
2592 {
2593 JLOG(m_journal.warn())
2594 << "Unknown exception thrown for handling new validation "
2595 << val->getLedgerHash();
2596 }
2597 if (bypassAccept == BypassAccept::no)
2598 {
2599 pendingValidations_.erase(val->getLedgerHash());
2600 }
2601 lock.unlock();
2602
2603 pubValidation(val);
2604
2605 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2607 ss << "VALIDATION: " << val->render() << " master_key: ";
2608 auto master = app_.validators().getTrustedKey(val->getSignerPublic());
2609 if (master)
2610 {
2611 ss << toBase58(TokenType::NodePublic, *master);
2612 }
2613 else
2614 {
2615 ss << "none";
2616 }
2617 return ss.str();
2618 }();
2619
2620 // We will always relay trusted validations; if configured, we will
2621 // also relay all untrusted validations.
2622 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2623}
2624
2627{
2628 return mConsensus.getJson(true);
2629}
2630
2632NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2633{
2635
2636 // System-level warnings
2637 {
2638 Json::Value warnings{Json::arrayValue};
2639 if (isAmendmentBlocked())
2640 {
2641 Json::Value& w = warnings.append(Json::objectValue);
2642 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2643 w[jss::message] =
2644 "This server is amendment blocked, and must be updated to be "
2645 "able to stay in sync with the network.";
2646 }
2647 if (isUNLBlocked())
2648 {
2649 Json::Value& w = warnings.append(Json::objectValue);
2650 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2651 w[jss::message] =
2652 "This server has an expired validator list. validators.txt "
2653 "may be incorrectly configured or some [validator_list_sites] "
2654 "may be unreachable.";
2655 }
2656 if (admin && isAmendmentWarned())
2657 {
2658 Json::Value& w = warnings.append(Json::objectValue);
2659 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2660 w[jss::message] =
2661 "One or more unsupported amendments have reached majority. "
2662 "Upgrade to the latest version before they are activated "
2663 "to avoid being amendment blocked.";
2664 if (auto const expected =
2666 {
2667 auto& d = w[jss::details] = Json::objectValue;
2668 d[jss::expected_date] = expected->time_since_epoch().count();
2669 d[jss::expected_date_UTC] = to_string(*expected);
2670 }
2671 }
2672
2673 if (warnings.size())
2674 info[jss::warnings] = std::move(warnings);
2675 }
2676
2677 // hostid: unique string describing the machine
2678 if (human)
2679 info[jss::hostid] = getHostId(admin);
2680
2681 // domain: if configured with a domain, report it:
2682 if (!app_.config().SERVER_DOMAIN.empty())
2683 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2684
2685 info[jss::build_version] = BuildInfo::getVersionString();
2686
2687 info[jss::server_state] = strOperatingMode(admin);
2688
2689 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2691
2693 info[jss::network_ledger] = "waiting";
2694
2695 info[jss::validation_quorum] =
2696 static_cast<Json::UInt>(app_.validators().quorum());
2697
2698 if (admin)
2699 {
2700 switch (app_.config().NODE_SIZE)
2701 {
2702 case 0:
2703 info[jss::node_size] = "tiny";
2704 break;
2705 case 1:
2706 info[jss::node_size] = "small";
2707 break;
2708 case 2:
2709 info[jss::node_size] = "medium";
2710 break;
2711 case 3:
2712 info[jss::node_size] = "large";
2713 break;
2714 case 4:
2715 info[jss::node_size] = "huge";
2716 break;
2717 }
2718
2719 auto when = app_.validators().expires();
2720
2721 if (!human)
2722 {
2723 if (when)
2724 info[jss::validator_list_expires] =
2725 safe_cast<Json::UInt>(when->time_since_epoch().count());
2726 else
2727 info[jss::validator_list_expires] = 0;
2728 }
2729 else
2730 {
2731 auto& x = (info[jss::validator_list] = Json::objectValue);
2732
2733 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2734
2735 if (when)
2736 {
2737 if (*when == TimeKeeper::time_point::max())
2738 {
2739 x[jss::expiration] = "never";
2740 x[jss::status] = "active";
2741 }
2742 else
2743 {
2744 x[jss::expiration] = to_string(*when);
2745
2746 if (*when > app_.timeKeeper().now())
2747 x[jss::status] = "active";
2748 else
2749 x[jss::status] = "expired";
2750 }
2751 }
2752 else
2753 {
2754 x[jss::status] = "unknown";
2755 x[jss::expiration] = "unknown";
2756 }
2757 }
2758
2759#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2760 {
2761 auto& x = (info[jss::git] = Json::objectValue);
2762#ifdef GIT_COMMIT_HASH
2763 x[jss::hash] = GIT_COMMIT_HASH;
2764#endif
2765#ifdef GIT_BRANCH
2766 x[jss::branch] = GIT_BRANCH;
2767#endif
2768 }
2769#endif
2770 }
2771 info[jss::io_latency_ms] =
2772 static_cast<Json::UInt>(app_.getIOLatency().count());
2773
2774 if (admin)
2775 {
2776 if (auto const localPubKey = app_.validators().localPublicKey();
2777 localPubKey && app_.getValidationPublicKey())
2778 {
2779 info[jss::pubkey_validator] =
2780 toBase58(TokenType::NodePublic, localPubKey.value());
2781 }
2782 else
2783 {
2784 info[jss::pubkey_validator] = "none";
2785 }
2786 }
2787
2788 if (counters)
2789 {
2790 info[jss::counters] = app_.getPerfLog().countersJson();
2791
2792 Json::Value nodestore(Json::objectValue);
2793 app_.getNodeStore().getCountsJson(nodestore);
2794 info[jss::counters][jss::nodestore] = nodestore;
2795 info[jss::current_activities] = app_.getPerfLog().currentJson();
2796 }
2797
2798 info[jss::pubkey_node] =
2800
2801 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2802
2804 info[jss::amendment_blocked] = true;
2805
2806 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2807
2808 if (fp != 0)
2809 info[jss::fetch_pack] = Json::UInt(fp);
2810
2811 info[jss::peers] = Json::UInt(app_.overlay().size());
2812
2813 Json::Value lastClose = Json::objectValue;
2814 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2815
2816 if (human)
2817 {
2818 lastClose[jss::converge_time_s] =
2820 }
2821 else
2822 {
2823 lastClose[jss::converge_time] =
2825 }
2826
2827 info[jss::last_close] = lastClose;
2828
2829 // info[jss::consensus] = mConsensus.getJson();
2830
2831 if (admin)
2832 info[jss::load] = m_job_queue.getJson();
2833
2834 if (auto const netid = app_.overlay().networkID())
2835 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2836
2837 auto const escalationMetrics =
2839
2840 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2841 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2842 /* Scale the escalated fee level to unitless "load factor".
2843 In practice, this just strips the units, but it will continue
2844 to work correctly if either base value ever changes. */
2845 auto const loadFactorFeeEscalation =
2846 mulDiv(
2847 escalationMetrics.openLedgerFeeLevel,
2848 loadBaseServer,
2849 escalationMetrics.referenceFeeLevel)
2851
2852 auto const loadFactor = std::max(
2853 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2854
2855 if (!human)
2856 {
2857 info[jss::load_base] = loadBaseServer;
2858 info[jss::load_factor] = trunc32(loadFactor);
2859 info[jss::load_factor_server] = loadFactorServer;
2860
2861 /* Json::Value doesn't support uint64, so clamp to max
2862 uint32 value. This is mostly theoretical, since there
2863 probably isn't enough extant XRP to drive the factor
2864 that high.
2865 */
2866 info[jss::load_factor_fee_escalation] =
2867 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2868 info[jss::load_factor_fee_queue] =
2869 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2870 info[jss::load_factor_fee_reference] =
2871 escalationMetrics.referenceFeeLevel.jsonClipped();
2872 }
2873 else
2874 {
2875 info[jss::load_factor] =
2876 static_cast<double>(loadFactor) / loadBaseServer;
2877
2878 if (loadFactorServer != loadFactor)
2879 info[jss::load_factor_server] =
2880 static_cast<double>(loadFactorServer) / loadBaseServer;
2881
2882 if (admin)
2883 {
2885 if (fee != loadBaseServer)
2886 info[jss::load_factor_local] =
2887 static_cast<double>(fee) / loadBaseServer;
2888 fee = app_.getFeeTrack().getRemoteFee();
2889 if (fee != loadBaseServer)
2890 info[jss::load_factor_net] =
2891 static_cast<double>(fee) / loadBaseServer;
2892 fee = app_.getFeeTrack().getClusterFee();
2893 if (fee != loadBaseServer)
2894 info[jss::load_factor_cluster] =
2895 static_cast<double>(fee) / loadBaseServer;
2896 }
2897 if (escalationMetrics.openLedgerFeeLevel !=
2898 escalationMetrics.referenceFeeLevel &&
2899 (admin || loadFactorFeeEscalation != loadFactor))
2900 info[jss::load_factor_fee_escalation] =
2901 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2902 escalationMetrics.referenceFeeLevel);
2903 if (escalationMetrics.minProcessingFeeLevel !=
2904 escalationMetrics.referenceFeeLevel)
2905 info[jss::load_factor_fee_queue] =
2906 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2907 escalationMetrics.referenceFeeLevel);
2908 }
2909
2910 bool valid = false;
2911 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2912
2913 if (lpClosed)
2914 valid = true;
2915 else
2916 lpClosed = m_ledgerMaster.getClosedLedger();
2917
2918 if (lpClosed)
2919 {
2920 XRPAmount const baseFee = lpClosed->fees().base;
2922 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2923 l[jss::hash] = to_string(lpClosed->info().hash);
2924
2925 if (!human)
2926 {
2927 l[jss::base_fee] = baseFee.jsonClipped();
2928 l[jss::reserve_base] =
2929 lpClosed->fees().accountReserve(0).jsonClipped();
2930 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2931 l[jss::close_time] = Json::Value::UInt(
2932 lpClosed->info().closeTime.time_since_epoch().count());
2933 }
2934 else
2935 {
2936 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2937 l[jss::reserve_base_xrp] =
2938 lpClosed->fees().accountReserve(0).decimalXRP();
2939 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2940
2941 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2942 std::abs(closeOffset.count()) >= 60)
2943 l[jss::close_time_offset] =
2944 static_cast<std::uint32_t>(closeOffset.count());
2945
2946 constexpr std::chrono::seconds highAgeThreshold{1000000};
2948 {
2949 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2950 l[jss::age] =
2951 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2952 }
2953 else
2954 {
2955 auto lCloseTime = lpClosed->info().closeTime;
2956 auto closeTime = app_.timeKeeper().closeTime();
2957 if (lCloseTime <= closeTime)
2958 {
2959 using namespace std::chrono_literals;
2960 auto age = closeTime - lCloseTime;
2961 l[jss::age] =
2962 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2963 }
2964 }
2965 }
2966
2967 if (valid)
2968 info[jss::validated_ledger] = l;
2969 else
2970 info[jss::closed_ledger] = l;
2971
2972 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2973 if (!lpPublished)
2974 info[jss::published_ledger] = "none";
2975 else if (lpPublished->info().seq != lpClosed->info().seq)
2976 info[jss::published_ledger] = lpPublished->info().seq;
2977 }
2978
2979 accounting_.json(info);
2980 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2981 info[jss::jq_trans_overflow] =
2983 info[jss::peer_disconnects] =
2985 info[jss::peer_disconnects_resources] =
2987
2988 // This array must be sorted in increasing order.
2989 static constexpr std::array<std::string_view, 7> protocols{
2990 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2991 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2992 {
2994 for (auto const& port : app_.getServerHandler().setup().ports)
2995 {
2996 // Don't publish admin ports for non-admin users
2997 if (!admin &&
2998 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2999 port.admin_user.empty() && port.admin_password.empty()))
3000 continue;
3003 std::begin(port.protocol),
3004 std::end(port.protocol),
3005 std::begin(protocols),
3006 std::end(protocols),
3007 std::back_inserter(proto));
3008 if (!proto.empty())
3009 {
3010 auto& jv = ports.append(Json::Value(Json::objectValue));
3011 jv[jss::port] = std::to_string(port.port);
3012 jv[jss::protocol] = Json::Value{Json::arrayValue};
3013 for (auto const& p : proto)
3014 jv[jss::protocol].append(p);
3015 }
3016 }
3017
3018 if (app_.config().exists(SECTION_PORT_GRPC))
3019 {
3020 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
3021 auto const optPort = grpcSection.get("port");
3022 if (optPort && grpcSection.get("ip"))
3023 {
3024 auto& jv = ports.append(Json::Value(Json::objectValue));
3025 jv[jss::port] = *optPort;
3026 jv[jss::protocol] = Json::Value{Json::arrayValue};
3027 jv[jss::protocol].append("grpc");
3028 }
3029 }
3030 info[jss::ports] = std::move(ports);
3031 }
3032
3033 return info;
3034}
3035
3036void
3041
3047
3048void
3050 std::shared_ptr<ReadView const> const& ledger,
3051 std::shared_ptr<STTx const> const& transaction,
3052 TER result)
3053{
3054 // never publish an inner txn inside a batch txn
3055 if (transaction->isFlag(tfInnerBatchTxn) &&
3056 ledger->rules().enabled(featureBatch))
3057 return;
3058
3059 MultiApiJson jvObj =
3060 transJson(transaction, result, false, ledger, std::nullopt);
3061
3062 {
3064
3065 auto it = mStreamMaps[sRTTransactions].begin();
3066 while (it != mStreamMaps[sRTTransactions].end())
3067 {
3068 InfoSub::pointer p = it->second.lock();
3069
3070 if (p)
3071 {
3072 jvObj.visit(
3073 p->getApiVersion(), //
3074 [&](Json::Value const& jv) { p->send(jv, true); });
3075 ++it;
3076 }
3077 else
3078 {
3079 it = mStreamMaps[sRTTransactions].erase(it);
3080 }
3081 }
3082 }
3083
3084 pubProposedAccountTransaction(ledger, transaction, result);
3085}
3086
3087void
3089{
3090 // Ledgers are published only when they acquire sufficient validations
3091 // Holes are filled across connection loss or other catastrophe
3092
3094 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
3095 if (!alpAccepted)
3096 {
3097 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
3098 app_.getAcceptedLedgerCache().canonicalize_replace_client(
3099 lpAccepted->info().hash, alpAccepted);
3100 }
3101
3102 XRPL_ASSERT(
3103 alpAccepted->getLedger().get() == lpAccepted.get(),
3104 "ripple::NetworkOPsImp::pubLedger : accepted input");
3105
3106 {
3107 JLOG(m_journal.debug())
3108 << "Publishing ledger " << lpAccepted->info().seq << " "
3109 << lpAccepted->info().hash;
3110
3112
3113 if (!mStreamMaps[sLedger].empty())
3114 {
3116
3117 jvObj[jss::type] = "ledgerClosed";
3118 jvObj[jss::ledger_index] = lpAccepted->info().seq;
3119 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
3120 jvObj[jss::ledger_time] = Json::Value::UInt(
3121 lpAccepted->info().closeTime.time_since_epoch().count());
3122
3123 jvObj[jss::network_id] = app_.config().NETWORK_ID;
3124
3125 if (!lpAccepted->rules().enabled(featureXRPFees))
3126 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3127 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
3128 jvObj[jss::reserve_base] =
3129 lpAccepted->fees().accountReserve(0).jsonClipped();
3130 jvObj[jss::reserve_inc] =
3131 lpAccepted->fees().increment.jsonClipped();
3132
3133 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
3134
3136 {
3137 jvObj[jss::validated_ledgers] =
3139 }
3140
3141 auto it = mStreamMaps[sLedger].begin();
3142 while (it != mStreamMaps[sLedger].end())
3143 {
3144 InfoSub::pointer p = it->second.lock();
3145 if (p)
3146 {
3147 p->send(jvObj, true);
3148 ++it;
3149 }
3150 else
3151 it = mStreamMaps[sLedger].erase(it);
3152 }
3153 }
3154
3155 if (!mStreamMaps[sBookChanges].empty())
3156 {
3157 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
3158
3159 auto it = mStreamMaps[sBookChanges].begin();
3160 while (it != mStreamMaps[sBookChanges].end())
3161 {
3162 InfoSub::pointer p = it->second.lock();
3163 if (p)
3164 {
3165 p->send(jvObj, true);
3166 ++it;
3167 }
3168 else
3169 it = mStreamMaps[sBookChanges].erase(it);
3170 }
3171 }
3172
3173 {
3174 static bool firstTime = true;
3175 if (firstTime)
3176 {
3177 // First validated ledger, start delayed SubAccountHistory
3178 firstTime = false;
3179 for (auto& outer : mSubAccountHistory)
3180 {
3181 for (auto& inner : outer.second)
3182 {
3183 auto& subInfo = inner.second;
3184 if (subInfo.index_->separationLedgerSeq_ == 0)
3185 {
3187 alpAccepted->getLedger(), subInfo);
3188 }
3189 }
3190 }
3191 }
3192 }
3193 }
3194
3195 // Don't lock since pubAcceptedTransaction is locking.
3196 for (auto const& accTx : *alpAccepted)
3197 {
3198 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
3200 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3201 }
3202}
3203
3204void
3206{
3208 app_.openLedger().current()->fees().base,
3210 app_.getFeeTrack()};
3211
3212 // only schedule the job if something has changed
3213 if (f != mLastFeeSummary)
3214 {
3216 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
3217 pubServer();
3218 });
3219 }
3220}
3221
3222void
3224{
3227 "reportConsensusStateChange->pubConsensus",
3228 [this, phase]() { pubConsensus(phase); });
3229}
3230
3231inline void
3233{
3234 m_localTX->sweep(view);
3235}
3236inline std::size_t
3238{
3239 return m_localTX->size();
3240}
3241
3242// This routine should only be used to publish accepted or validated
3243// transactions.
3246 std::shared_ptr<STTx const> const& transaction,
3247 TER result,
3248 bool validated,
3249 std::shared_ptr<ReadView const> const& ledger,
3251{
3253 std::string sToken;
3254 std::string sHuman;
3255
3256 transResultInfo(result, sToken, sHuman);
3257
3258 jvObj[jss::type] = "transaction";
3259 // NOTE jvObj is not a finished object for either API version. After
3260 // it's populated, we need to finish it for a specific API version. This is
3261 // done in a loop, near the end of this function.
3262 jvObj[jss::transaction] =
3263 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3264
3265 if (meta)
3266 {
3267 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3269 jvObj[jss::meta], *ledger, transaction, meta->get());
3270 RPC::insertNFTSyntheticInJson(jvObj, transaction, meta->get());
3272 jvObj[jss::meta], transaction, meta->get());
3273 }
3274
3275 // add CTID where the needed data for it exists
3276 if (auto const& lookup = ledger->txRead(transaction->getTransactionID());
3277 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3278 {
3279 uint32_t const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3280 uint32_t netID = app_.config().NETWORK_ID;
3281 if (transaction->isFieldPresent(sfNetworkID))
3282 netID = transaction->getFieldU32(sfNetworkID);
3283
3285 RPC::encodeCTID(ledger->info().seq, txnSeq, netID);
3286 ctid)
3287 jvObj[jss::ctid] = *ctid;
3288 }
3289 if (!ledger->open())
3290 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3291
3292 if (validated)
3293 {
3294 jvObj[jss::ledger_index] = ledger->info().seq;
3295 jvObj[jss::transaction][jss::date] =
3296 ledger->info().closeTime.time_since_epoch().count();
3297 jvObj[jss::validated] = true;
3298 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3299
3300 // WRITEME: Put the account next seq here
3301 }
3302 else
3303 {
3304 jvObj[jss::validated] = false;
3305 jvObj[jss::ledger_current_index] = ledger->info().seq;
3306 }
3307
3308 jvObj[jss::status] = validated ? "closed" : "proposed";
3309 jvObj[jss::engine_result] = sToken;
3310 jvObj[jss::engine_result_code] = result;
3311 jvObj[jss::engine_result_message] = sHuman;
3312
3313 if (transaction->getTxnType() == ttOFFER_CREATE)
3314 {
3315 auto const account = transaction->getAccountID(sfAccount);
3316 auto const amount = transaction->getFieldAmount(sfTakerGets);
3317
3318 // If the offer create is not self funded then add the owner balance
3319 if (account != amount.issue().account)
3320 {
3321 auto const ownerFunds = accountFunds(
3322 *ledger,
3323 account,
3324 amount,
3326 app_.journal("View"));
3327 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3328 }
3329 }
3330
3331 std::string const hash = to_string(transaction->getTransactionID());
3332 MultiApiJson multiObj{jvObj};
3334 multiObj.visit(), //
3335 [&]<unsigned Version>(
3337 RPC::insertDeliverMax(
3338 jvTx[jss::transaction], transaction->getTxnType(), Version);
3339
3340 if constexpr (Version > 1)
3341 {
3342 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3343 jvTx[jss::hash] = hash;
3344 }
3345 else
3346 {
3347 jvTx[jss::transaction][jss::hash] = hash;
3348 }
3349 });
3350
3351 return multiObj;
3352}
3353
3354void
3356 std::shared_ptr<ReadView const> const& ledger,
3357 AcceptedLedgerTx const& transaction,
3358 bool last)
3359{
3360 auto const& stTxn = transaction.getTxn();
3361
3362 // Create two different Json objects, for different API versions
3363 auto const metaRef = std::ref(transaction.getMeta());
3364 auto const trResult = transaction.getResult();
3365 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3366
3367 {
3369
3370 auto it = mStreamMaps[sTransactions].begin();
3371 while (it != mStreamMaps[sTransactions].end())
3372 {
3373 InfoSub::pointer p = it->second.lock();
3374
3375 if (p)
3376 {
3377 jvObj.visit(
3378 p->getApiVersion(), //
3379 [&](Json::Value const& jv) { p->send(jv, true); });
3380 ++it;
3381 }
3382 else
3383 it = mStreamMaps[sTransactions].erase(it);
3384 }
3385
3386 it = mStreamMaps[sRTTransactions].begin();
3387
3388 while (it != mStreamMaps[sRTTransactions].end())
3389 {
3390 InfoSub::pointer p = it->second.lock();
3391
3392 if (p)
3393 {
3394 jvObj.visit(
3395 p->getApiVersion(), //
3396 [&](Json::Value const& jv) { p->send(jv, true); });
3397 ++it;
3398 }
3399 else
3400 it = mStreamMaps[sRTTransactions].erase(it);
3401 }
3402 }
3403
3404 if (transaction.getResult() == tesSUCCESS)
3405 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3406
3407 pubAccountTransaction(ledger, transaction, last);
3408}
3409
3410void
3412 std::shared_ptr<ReadView const> const& ledger,
3413 AcceptedLedgerTx const& transaction,
3414 bool last)
3415{
3417 int iProposed = 0;
3418 int iAccepted = 0;
3419
3420 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3421 auto const currLedgerSeq = ledger->seq();
3422 {
3424
3425 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3427 {
3428 for (auto const& affectedAccount : transaction.getAffected())
3429 {
3430 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3431 simiIt != mSubRTAccount.end())
3432 {
3433 auto it = simiIt->second.begin();
3434
3435 while (it != simiIt->second.end())
3436 {
3437 InfoSub::pointer p = it->second.lock();
3438
3439 if (p)
3440 {
3441 notify.insert(p);
3442 ++it;
3443 ++iProposed;
3444 }
3445 else
3446 it = simiIt->second.erase(it);
3447 }
3448 }
3449
3450 if (auto simiIt = mSubAccount.find(affectedAccount);
3451 simiIt != mSubAccount.end())
3452 {
3453 auto it = simiIt->second.begin();
3454 while (it != simiIt->second.end())
3455 {
3456 InfoSub::pointer p = it->second.lock();
3457
3458 if (p)
3459 {
3460 notify.insert(p);
3461 ++it;
3462 ++iAccepted;
3463 }
3464 else
3465 it = simiIt->second.erase(it);
3466 }
3467 }
3468
3469 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3470 histoIt != mSubAccountHistory.end())
3471 {
3472 auto& subs = histoIt->second;
3473 auto it = subs.begin();
3474 while (it != subs.end())
3475 {
3476 SubAccountHistoryInfoWeak const& info = it->second;
3477 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3478 {
3479 ++it;
3480 continue;
3481 }
3482
3483 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3484 {
3485 accountHistoryNotify.emplace_back(
3486 SubAccountHistoryInfo{isSptr, info.index_});
3487 ++it;
3488 }
3489 else
3490 {
3491 it = subs.erase(it);
3492 }
3493 }
3494 if (subs.empty())
3495 mSubAccountHistory.erase(histoIt);
3496 }
3497 }
3498 }
3499 }
3500
3501 JLOG(m_journal.trace())
3502 << "pubAccountTransaction: "
3503 << "proposed=" << iProposed << ", accepted=" << iAccepted;
3504
3505 if (!notify.empty() || !accountHistoryNotify.empty())
3506 {
3507 auto const& stTxn = transaction.getTxn();
3508
3509 // Create two different Json objects, for different API versions
3510 auto const metaRef = std::ref(transaction.getMeta());
3511 auto const trResult = transaction.getResult();
3512 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3513
3514 for (InfoSub::ref isrListener : notify)
3515 {
3516 jvObj.visit(
3517 isrListener->getApiVersion(), //
3518 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3519 }
3520
3521 if (last)
3522 jvObj.set(jss::account_history_boundary, true);
3523
3524 XRPL_ASSERT(
3525 jvObj.isMember(jss::account_history_tx_stream) ==
3527 "ripple::NetworkOPsImp::pubAccountTransaction : "
3528 "account_history_tx_stream not set");
3529 for (auto& info : accountHistoryNotify)
3530 {
3531 auto& index = info.index_;
3532 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3533 jvObj.set(jss::account_history_tx_first, true);
3534
3535 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3536
3537 jvObj.visit(
3538 info.sink_->getApiVersion(), //
3539 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3540 }
3541 }
3542}
3543
3544void
3546 std::shared_ptr<ReadView const> const& ledger,
3548 TER result)
3549{
3551 int iProposed = 0;
3552
3553 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3554
3555 {
3557
3558 if (mSubRTAccount.empty())
3559 return;
3560
3561 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3563 {
3564 for (auto const& affectedAccount : tx->getMentionedAccounts())
3565 {
3566 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3567 simiIt != mSubRTAccount.end())
3568 {
3569 auto it = simiIt->second.begin();
3570
3571 while (it != simiIt->second.end())
3572 {
3573 InfoSub::pointer p = it->second.lock();
3574
3575 if (p)
3576 {
3577 notify.insert(p);
3578 ++it;
3579 ++iProposed;
3580 }
3581 else
3582 it = simiIt->second.erase(it);
3583 }
3584 }
3585 }
3586 }
3587 }
3588
3589 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3590
3591 if (!notify.empty() || !accountHistoryNotify.empty())
3592 {
3593 // Create two different Json objects, for different API versions
3594 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3595
3596 for (InfoSub::ref isrListener : notify)
3597 jvObj.visit(
3598 isrListener->getApiVersion(), //
3599 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3600
3601 XRPL_ASSERT(
3602 jvObj.isMember(jss::account_history_tx_stream) ==
3604 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3605 "account_history_tx_stream not set");
3606 for (auto& info : accountHistoryNotify)
3607 {
3608 auto& index = info.index_;
3609 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3610 jvObj.set(jss::account_history_tx_first, true);
3611 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3612 jvObj.visit(
3613 info.sink_->getApiVersion(), //
3614 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3615 }
3616 }
3617}
3618
3619//
3620// Monitoring
3621//
3622
3623void
3625 InfoSub::ref isrListener,
3626 hash_set<AccountID> const& vnaAccountIDs,
3627 bool rt)
3628{
3629 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3630
3631 for (auto const& naAccountID : vnaAccountIDs)
3632 {
3633 JLOG(m_journal.trace())
3634 << "subAccount: account: " << toBase58(naAccountID);
3635
3636 isrListener->insertSubAccountInfo(naAccountID, rt);
3637 }
3638
3640
3641 for (auto const& naAccountID : vnaAccountIDs)
3642 {
3643 auto simIterator = subMap.find(naAccountID);
3644 if (simIterator == subMap.end())
3645 {
3646 // Not found, note that account has a new single listner.
3647 SubMapType usisElement;
3648 usisElement[isrListener->getSeq()] = isrListener;
3649 // VFALCO NOTE This is making a needless copy of naAccountID
3650 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3651 }
3652 else
3653 {
3654 // Found, note that the account has another listener.
3655 simIterator->second[isrListener->getSeq()] = isrListener;
3656 }
3657 }
3658}
3659
3660void
3662 InfoSub::ref isrListener,
3663 hash_set<AccountID> const& vnaAccountIDs,
3664 bool rt)
3665{
3666 for (auto const& naAccountID : vnaAccountIDs)
3667 {
3668 // Remove from the InfoSub
3669 isrListener->deleteSubAccountInfo(naAccountID, rt);
3670 }
3671
3672 // Remove from the server
3673 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3674}
3675
3676void
3678 std::uint64_t uSeq,
3679 hash_set<AccountID> const& vnaAccountIDs,
3680 bool rt)
3681{
3683
3684 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3685
3686 for (auto const& naAccountID : vnaAccountIDs)
3687 {
3688 auto simIterator = subMap.find(naAccountID);
3689
3690 if (simIterator != subMap.end())
3691 {
3692 // Found
3693 simIterator->second.erase(uSeq);
3694
3695 if (simIterator->second.empty())
3696 {
3697 // Don't need hash entry.
3698 subMap.erase(simIterator);
3699 }
3700 }
3701 }
3702}
3703
3704void
3706{
3707 enum DatabaseType { Sqlite, None };
3708 static auto const databaseType = [&]() -> DatabaseType {
3709 // Use a dynamic_cast to return DatabaseType::None
3710 // on failure.
3711 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3712 {
3713 return DatabaseType::Sqlite;
3714 }
3715 return DatabaseType::None;
3716 }();
3717
3718 if (databaseType == DatabaseType::None)
3719 {
3720 JLOG(m_journal.error())
3721 << "AccountHistory job for account "
3722 << toBase58(subInfo.index_->accountId_) << " no database";
3723 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3724 {
3725 sptr->send(rpcError(rpcINTERNAL), true);
3726 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3727 }
3728 return;
3729 }
3730
3733 "AccountHistoryTxStream",
3734 [this, dbType = databaseType, subInfo]() {
3735 auto const& accountId = subInfo.index_->accountId_;
3736 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3737 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3738
3739 JLOG(m_journal.trace())
3740 << "AccountHistory job for account " << toBase58(accountId)
3741 << " started. lastLedgerSeq=" << lastLedgerSeq;
3742
3743 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3744 std::shared_ptr<TxMeta> const& meta) -> bool {
3745 /*
3746 * genesis account: first tx is the one with seq 1
3747 * other account: first tx is the one created the account
3748 */
3749 if (accountId == genesisAccountId)
3750 {
3751 auto stx = tx->getSTransaction();
3752 if (stx->getAccountID(sfAccount) == accountId &&
3753 stx->getSeqValue() == 1)
3754 return true;
3755 }
3756
3757 for (auto& node : meta->getNodes())
3758 {
3759 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3760 continue;
3761
3762 if (node.isFieldPresent(sfNewFields))
3763 {
3764 if (auto inner = dynamic_cast<STObject const*>(
3765 node.peekAtPField(sfNewFields));
3766 inner)
3767 {
3768 if (inner->isFieldPresent(sfAccount) &&
3769 inner->getAccountID(sfAccount) == accountId)
3770 {
3771 return true;
3772 }
3773 }
3774 }
3775 }
3776
3777 return false;
3778 };
3779
3780 auto send = [&](Json::Value const& jvObj,
3781 bool unsubscribe) -> bool {
3782 if (auto sptr = subInfo.sinkWptr_.lock())
3783 {
3784 sptr->send(jvObj, true);
3785 if (unsubscribe)
3786 unsubAccountHistory(sptr, accountId, false);
3787 return true;
3788 }
3789
3790 return false;
3791 };
3792
3793 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3794 bool unsubscribe) -> bool {
3795 if (auto sptr = subInfo.sinkWptr_.lock())
3796 {
3797 jvObj.visit(
3798 sptr->getApiVersion(), //
3799 [&](Json::Value const& jv) { sptr->send(jv, true); });
3800
3801 if (unsubscribe)
3802 unsubAccountHistory(sptr, accountId, false);
3803 return true;
3804 }
3805
3806 return false;
3807 };
3808
3809 auto getMoreTxns =
3810 [&](std::uint32_t minLedger,
3811 std::uint32_t maxLedger,
3816 switch (dbType)
3817 {
3818 case Sqlite: {
3819 auto db = static_cast<SQLiteDatabase*>(
3822 accountId, minLedger, maxLedger, marker, 0, true};
3823 return db->newestAccountTxPage(options);
3824 }
3825 default: {
3826 UNREACHABLE(
3827 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3828 "getMoreTxns : invalid database type");
3829 return {};
3830 }
3831 }
3832 };
3833
3834 /*
3835 * search backward until the genesis ledger or asked to stop
3836 */
3837 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3838 {
3839 int feeChargeCount = 0;
3840 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3841 {
3842 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3843 ++feeChargeCount;
3844 }
3845 else
3846 {
3847 JLOG(m_journal.trace())
3848 << "AccountHistory job for account "
3849 << toBase58(accountId) << " no InfoSub. Fee charged "
3850 << feeChargeCount << " times.";
3851 return;
3852 }
3853
3854 // try to search in 1024 ledgers till reaching genesis ledgers
3855 auto startLedgerSeq =
3856 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3857 JLOG(m_journal.trace())
3858 << "AccountHistory job for account " << toBase58(accountId)
3859 << ", working on ledger range [" << startLedgerSeq << ","
3860 << lastLedgerSeq << "]";
3861
3862 auto haveRange = [&]() -> bool {
3863 std::uint32_t validatedMin = UINT_MAX;
3864 std::uint32_t validatedMax = 0;
3865 auto haveSomeValidatedLedgers =
3867 validatedMin, validatedMax);
3868
3869 return haveSomeValidatedLedgers &&
3870 validatedMin <= startLedgerSeq &&
3871 lastLedgerSeq <= validatedMax;
3872 }();
3873
3874 if (!haveRange)
3875 {
3876 JLOG(m_journal.debug())
3877 << "AccountHistory reschedule job for account "
3878 << toBase58(accountId) << ", incomplete ledger range ["
3879 << startLedgerSeq << "," << lastLedgerSeq << "]";
3881 return;
3882 }
3883
3885 while (!subInfo.index_->stopHistorical_)
3886 {
3887 auto dbResult =
3888 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3889 if (!dbResult)
3890 {
3891 JLOG(m_journal.debug())
3892 << "AccountHistory job for account "
3893 << toBase58(accountId) << " getMoreTxns failed.";
3894 send(rpcError(rpcINTERNAL), true);
3895 return;
3896 }
3897
3898 auto const& txns = dbResult->first;
3899 marker = dbResult->second;
3900 size_t num_txns = txns.size();
3901 for (size_t i = 0; i < num_txns; ++i)
3902 {
3903 auto const& [tx, meta] = txns[i];
3904
3905 if (!tx || !meta)
3906 {
3907 JLOG(m_journal.debug())
3908 << "AccountHistory job for account "
3909 << toBase58(accountId) << " empty tx or meta.";
3910 send(rpcError(rpcINTERNAL), true);
3911 return;
3912 }
3913 auto curTxLedger =
3915 tx->getLedger());
3916 if (!curTxLedger)
3917 {
3918 JLOG(m_journal.debug())
3919 << "AccountHistory job for account "
3920 << toBase58(accountId) << " no ledger.";
3921 send(rpcError(rpcINTERNAL), true);
3922 return;
3923 }
3925 tx->getSTransaction();
3926 if (!stTxn)
3927 {
3928 JLOG(m_journal.debug())
3929 << "AccountHistory job for account "
3930 << toBase58(accountId)
3931 << " getSTransaction failed.";
3932 send(rpcError(rpcINTERNAL), true);
3933 return;
3934 }
3935
3936 auto const mRef = std::ref(*meta);
3937 auto const trR = meta->getResultTER();
3938 MultiApiJson jvTx =
3939 transJson(stTxn, trR, true, curTxLedger, mRef);
3940
3941 jvTx.set(
3942 jss::account_history_tx_index, txHistoryIndex--);
3943 if (i + 1 == num_txns ||
3944 txns[i + 1].first->getLedger() != tx->getLedger())
3945 jvTx.set(jss::account_history_boundary, true);
3946
3947 if (isFirstTx(tx, meta))
3948 {
3949 jvTx.set(jss::account_history_tx_first, true);
3950 sendMultiApiJson(jvTx, false);
3951
3952 JLOG(m_journal.trace())
3953 << "AccountHistory job for account "
3954 << toBase58(accountId)
3955 << " done, found last tx.";
3956 return;
3957 }
3958 else
3959 {
3960 sendMultiApiJson(jvTx, false);
3961 }
3962 }
3963
3964 if (marker)
3965 {
3966 JLOG(m_journal.trace())
3967 << "AccountHistory job for account "
3968 << toBase58(accountId)
3969 << " paging, marker=" << marker->ledgerSeq << ":"
3970 << marker->txnSeq;
3971 }
3972 else
3973 {
3974 break;
3975 }
3976 }
3977
3978 if (!subInfo.index_->stopHistorical_)
3979 {
3980 lastLedgerSeq = startLedgerSeq - 1;
3981 if (lastLedgerSeq <= 1)
3982 {
3983 JLOG(m_journal.trace())
3984 << "AccountHistory job for account "
3985 << toBase58(accountId)
3986 << " done, reached genesis ledger.";
3987 return;
3988 }
3989 }
3990 }
3991 });
3992}
3993
3994void
3996 std::shared_ptr<ReadView const> const& ledger,
3998{
3999 subInfo.index_->separationLedgerSeq_ = ledger->seq();
4000 auto const& accountId = subInfo.index_->accountId_;
4001 auto const accountKeylet = keylet::account(accountId);
4002 if (!ledger->exists(accountKeylet))
4003 {
4004 JLOG(m_journal.debug())
4005 << "subAccountHistoryStart, no account " << toBase58(accountId)
4006 << ", no need to add AccountHistory job.";
4007 return;
4008 }
4009 if (accountId == genesisAccountId)
4010 {
4011 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
4012 {
4013 if (sleAcct->getFieldU32(sfSequence) == 1)
4014 {
4015 JLOG(m_journal.debug())
4016 << "subAccountHistoryStart, genesis account "
4017 << toBase58(accountId)
4018 << " does not have tx, no need to add AccountHistory job.";
4019 return;
4020 }
4021 }
4022 else
4023 {
4024 UNREACHABLE(
4025 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
4026 "access genesis account");
4027 return;
4028 }
4029 }
4030 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
4031 subInfo.index_->haveHistorical_ = true;
4032
4033 JLOG(m_journal.debug())
4034 << "subAccountHistoryStart, add AccountHistory job: accountId="
4035 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
4036
4037 addAccountHistoryJob(subInfo);
4038}
4039
4042 InfoSub::ref isrListener,
4043 AccountID const& accountId)
4044{
4045 if (!isrListener->insertSubAccountHistory(accountId))
4046 {
4047 JLOG(m_journal.debug())
4048 << "subAccountHistory, already subscribed to account "
4049 << toBase58(accountId);
4050 return rpcINVALID_PARAMS;
4051 }
4052
4055 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
4056 auto simIterator = mSubAccountHistory.find(accountId);
4057 if (simIterator == mSubAccountHistory.end())
4058 {
4060 inner.emplace(isrListener->getSeq(), ahi);
4062 simIterator, std::make_pair(accountId, inner));
4063 }
4064 else
4065 {
4066 simIterator->second.emplace(isrListener->getSeq(), ahi);
4067 }
4068
4069 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
4070 if (ledger)
4071 {
4072 subAccountHistoryStart(ledger, ahi);
4073 }
4074 else
4075 {
4076 // The node does not have validated ledgers, so wait for
4077 // one before start streaming.
4078 // In this case, the subscription is also considered successful.
4079 JLOG(m_journal.debug())
4080 << "subAccountHistory, no validated ledger yet, delay start";
4081 }
4082
4083 return rpcSUCCESS;
4084}
4085
4086void
4088 InfoSub::ref isrListener,
4089 AccountID const& account,
4090 bool historyOnly)
4091{
4092 if (!historyOnly)
4093 isrListener->deleteSubAccountHistory(account);
4094 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
4095}
4096
4097void
4099 std::uint64_t seq,
4100 AccountID const& account,
4101 bool historyOnly)
4102{
4104 auto simIterator = mSubAccountHistory.find(account);
4105 if (simIterator != mSubAccountHistory.end())
4106 {
4107 auto& subInfoMap = simIterator->second;
4108 auto subInfoIter = subInfoMap.find(seq);
4109 if (subInfoIter != subInfoMap.end())
4110 {
4111 subInfoIter->second.index_->stopHistorical_ = true;
4112 }
4113
4114 if (!historyOnly)
4115 {
4116 simIterator->second.erase(seq);
4117 if (simIterator->second.empty())
4118 {
4119 mSubAccountHistory.erase(simIterator);
4120 }
4121 }
4122 JLOG(m_journal.debug())
4123 << "unsubAccountHistory, account " << toBase58(account)
4124 << ", historyOnly = " << (historyOnly ? "true" : "false");
4125 }
4126}
4127
4128bool
4130{
4131 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
4132 listeners->addSubscriber(isrListener);
4133 else
4134 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
4135 return true;
4136}
4137
4138bool
4140{
4141 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
4142 listeners->removeSubscriber(uSeq);
4143
4144 return true;
4145}
4146
4150{
4151 // This code-path is exclusively used when the server is in standalone
4152 // mode via `ledger_accept`
4153 XRPL_ASSERT(
4154 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
4155
4156 if (!m_standalone)
4157 Throw<std::runtime_error>(
4158 "Operation only possible in STANDALONE mode.");
4159
4160 // FIXME Could we improve on this and remove the need for a specialized
4161 // API in Consensus?
4162 beginConsensus(m_ledgerMaster.getClosedLedger()->info().hash, {});
4163 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
4164 return m_ledgerMaster.getCurrentLedger()->info().seq;
4165}
4166
4167// <-- bool: true=added, false=already there
4168bool
4170{
4171 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
4172 {
4173 jvResult[jss::ledger_index] = lpClosed->info().seq;
4174 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
4175 jvResult[jss::ledger_time] = Json::Value::UInt(
4176 lpClosed->info().closeTime.time_since_epoch().count());
4177 if (!lpClosed->rules().enabled(featureXRPFees))
4178 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
4179 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
4180 jvResult[jss::reserve_base] =
4181 lpClosed->fees().accountReserve(0).jsonClipped();
4182 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4183 jvResult[jss::network_id] = app_.config().NETWORK_ID;
4184 }
4185
4187 {
4188 jvResult[jss::validated_ledgers] =
4190 }
4191
4193 return mStreamMaps[sLedger]
4194 .emplace(isrListener->getSeq(), isrListener)
4195 .second;
4196}
4197
4198// <-- bool: true=added, false=already there
4199bool
4201{
4204 .emplace(isrListener->getSeq(), isrListener)
4205 .second;
4206}
4207
4208// <-- bool: true=erased, false=was not there
4209bool
4211{
4213 return mStreamMaps[sLedger].erase(uSeq);
4214}
4215
4216// <-- bool: true=erased, false=was not there
4217bool
4223
4224// <-- bool: true=added, false=already there
4225bool
4227{
4229 return mStreamMaps[sManifests]
4230 .emplace(isrListener->getSeq(), isrListener)
4231 .second;
4232}
4233
4234// <-- bool: true=erased, false=was not there
4235bool
4241
4242// <-- bool: true=added, false=already there
4243bool
4245 InfoSub::ref isrListener,
4246 Json::Value& jvResult,
4247 bool admin)
4248{
4249 uint256 uRandom;
4250
4251 if (m_standalone)
4252 jvResult[jss::stand_alone] = m_standalone;
4253
4254 // CHECKME: is it necessary to provide a random number here?
4255 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
4256
4257 auto const& feeTrack = app_.getFeeTrack();
4258 jvResult[jss::random] = to_string(uRandom);
4259 jvResult[jss::server_status] = strOperatingMode(admin);
4260 jvResult[jss::load_base] = feeTrack.getLoadBase();
4261 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4262 jvResult[jss::hostid] = getHostId(admin);
4263 jvResult[jss::pubkey_node] =
4265
4267 return mStreamMaps[sServer]
4268 .emplace(isrListener->getSeq(), isrListener)
4269 .second;
4270}
4271
4272// <-- bool: true=erased, false=was not there
4273bool
4275{
4277 return mStreamMaps[sServer].erase(uSeq);
4278}
4279
4280// <-- bool: true=added, false=already there
4281bool
4283{
4286 .emplace(isrListener->getSeq(), isrListener)
4287 .second;
4288}
4289
4290// <-- bool: true=erased, false=was not there
4291bool
4297
4298// <-- bool: true=added, false=already there
4299bool
4301{
4304 .emplace(isrListener->getSeq(), isrListener)
4305 .second;
4306}
4307
4308// <-- bool: true=erased, false=was not there
4309bool
4315
4316// <-- bool: true=added, false=already there
4317bool
4319{
4322 .emplace(isrListener->getSeq(), isrListener)
4323 .second;
4324}
4325
4326void
4331
4332// <-- bool: true=erased, false=was not there
4333bool
4339
4340// <-- bool: true=added, false=already there
4341bool
4343{
4345 return mStreamMaps[sPeerStatus]
4346 .emplace(isrListener->getSeq(), isrListener)
4347 .second;
4348}
4349
4350// <-- bool: true=erased, false=was not there
4351bool
4357
4358// <-- bool: true=added, false=already there
4359bool
4361{
4364 .emplace(isrListener->getSeq(), isrListener)
4365 .second;
4366}
4367
4368// <-- bool: true=erased, false=was not there
4369bool
4375
4378{
4380
4381 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4382
4383 if (it != mRpcSubMap.end())
4384 return it->second;
4385
4386 return InfoSub::pointer();
4387}
4388
4391{
4393
4394 mRpcSubMap.emplace(strUrl, rspEntry);
4395
4396 return rspEntry;
4397}
4398
4399bool
4401{
4403 auto pInfo = findRpcSub(strUrl);
4404
4405 if (!pInfo)
4406 return false;
4407
4408 // check to see if any of the stream maps still hold a weak reference to
4409 // this entry before removing
4410 for (SubMapType const& map : mStreamMaps)
4411 {
4412 if (map.find(pInfo->getSeq()) != map.end())
4413 return false;
4414 }
4415 mRpcSubMap.erase(strUrl);
4416 return true;
4417}
4418
4419#ifndef USE_NEW_BOOK_PAGE
4420
4421// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4422// work, but it demonstrated poor performance.
4423//
4424void
4427 Book const& book,
4428 AccountID const& uTakerID,
4429 bool const bProof,
4430 unsigned int iLimit,
4431 Json::Value const& jvMarker,
4432 Json::Value& jvResult)
4433{ // CAUTION: This is the old get book page logic
4434 Json::Value& jvOffers =
4435 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4436
4438 uint256 const uBookBase = getBookBase(book);
4439 uint256 const uBookEnd = getQualityNext(uBookBase);
4440 uint256 uTipIndex = uBookBase;
4441
4442 if (auto stream = m_journal.trace())
4443 {
4444 stream << "getBookPage:" << book;
4445 stream << "getBookPage: uBookBase=" << uBookBase;
4446 stream << "getBookPage: uBookEnd=" << uBookEnd;
4447 stream << "getBookPage: uTipIndex=" << uTipIndex;
4448 }
4449
4450 ReadView const& view = *lpLedger;
4451
4452 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4453 isGlobalFrozen(view, book.in.account);
4454
4455 bool bDone = false;
4456 bool bDirectAdvance = true;
4457
4458 std::shared_ptr<SLE const> sleOfferDir;
4459 uint256 offerIndex;
4460 unsigned int uBookEntry;
4461 STAmount saDirRate;
4462
4463 auto const rate = transferRate(view, book.out.account);
4464 auto viewJ = app_.journal("View");
4465
4466 while (!bDone && iLimit-- > 0)
4467 {
4468 if (bDirectAdvance)
4469 {
4470 bDirectAdvance = false;
4471
4472 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4473
4474 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4475 if (ledgerIndex)
4476 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4477 else
4478 sleOfferDir.reset();
4479
4480 if (!sleOfferDir)
4481 {
4482 JLOG(m_journal.trace()) << "getBookPage: bDone";
4483 bDone = true;
4484 }
4485 else
4486 {
4487 uTipIndex = sleOfferDir->key();
4488 saDirRate = amountFromQuality(getQuality(uTipIndex));
4489
4490 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4491
4492 JLOG(m_journal.trace())
4493 << "getBookPage: uTipIndex=" << uTipIndex;
4494 JLOG(m_journal.trace())
4495 << "getBookPage: offerIndex=" << offerIndex;
4496 }
4497 }
4498
4499 if (!bDone)
4500 {
4501 auto sleOffer = view.read(keylet::offer(offerIndex));
4502
4503 if (sleOffer)
4504 {
4505 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4506 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4507 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4508 STAmount saOwnerFunds;
4509 bool firstOwnerOffer(true);
4510
4511 if (book.out.account == uOfferOwnerID)
4512 {
4513 // If an offer is selling issuer's own IOUs, it is fully
4514 // funded.
4515 saOwnerFunds = saTakerGets;
4516 }
4517 else if (bGlobalFreeze)
4518 {
4519 // If either asset is globally frozen, consider all offers
4520 // that aren't ours to be totally unfunded
4521 saOwnerFunds.clear(book.out);
4522 }
4523 else
4524 {
4525 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4526 if (umBalanceEntry != umBalance.end())
4527 {
4528 // Found in running balance table.
4529
4530 saOwnerFunds = umBalanceEntry->second;
4531 firstOwnerOffer = false;
4532 }
4533 else
4534 {
4535 // Did not find balance in table.
4536
4537 saOwnerFunds = accountHolds(
4538 view,
4539 uOfferOwnerID,
4540 book.out.currency,
4541 book.out.account,
4543 viewJ);
4544
4545 if (saOwnerFunds < beast::zero)
4546 {
4547 // Treat negative funds as zero.
4548
4549 saOwnerFunds.clear();
4550 }
4551 }
4552 }
4553
4554 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4555
4556 STAmount saTakerGetsFunded;
4557 STAmount saOwnerFundsLimit = saOwnerFunds;
4558 Rate offerRate = parityRate;
4559
4560 if (rate != parityRate
4561 // Have a tranfer fee.
4562 && uTakerID != book.out.account
4563 // Not taking offers of own IOUs.
4564 && book.out.account != uOfferOwnerID)
4565 // Offer owner not issuing ownfunds
4566 {
4567 // Need to charge a transfer fee to offer owner.
4568 offerRate = rate;
4569 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4570 }
4571
4572 if (saOwnerFundsLimit >= saTakerGets)
4573 {
4574 // Sufficient funds no shenanigans.
4575 saTakerGetsFunded = saTakerGets;
4576 }
4577 else
4578 {
4579 // Only provide, if not fully funded.
4580
4581 saTakerGetsFunded = saOwnerFundsLimit;
4582
4583 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4584 std::min(
4585 saTakerPays,
4586 multiply(
4587 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4588 .setJson(jvOffer[jss::taker_pays_funded]);
4589 }
4590
4591 STAmount saOwnerPays = (parityRate == offerRate)
4592 ? saTakerGetsFunded
4593 : std::min(
4594 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4595
4596 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4597
4598 // Include all offers funded and unfunded
4599 Json::Value& jvOf = jvOffers.append(jvOffer);
4600 jvOf[jss::quality] = saDirRate.getText();
4601
4602 if (firstOwnerOffer)
4603 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4604 }
4605 else
4606 {
4607 JLOG(m_journal.warn()) << "Missing offer";
4608 }
4609
4610 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4611 {
4612 bDirectAdvance = true;
4613 }
4614 else
4615 {
4616 JLOG(m_journal.trace())
4617 << "getBookPage: offerIndex=" << offerIndex;
4618 }
4619 }
4620 }
4621
4622 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4623 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4624}
4625
4626#else
4627
4628// This is the new code that uses the book iterators
4629// It has temporarily been disabled
4630
4631void
4634 Book const& book,
4635 AccountID const& uTakerID,
4636 bool const bProof,
4637 unsigned int iLimit,
4638 Json::Value const& jvMarker,
4639 Json::Value& jvResult)
4640{
4641 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4642
4644
4645 MetaView lesActive(lpLedger, tapNONE, true);
4646 OrderBookIterator obIterator(lesActive, book);
4647
4648 auto const rate = transferRate(lesActive, book.out.account);
4649
4650 bool const bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4651 lesActive.isGlobalFrozen(book.in.account);
4652
4653 while (iLimit-- > 0 && obIterator.nextOffer())
4654 {
4655 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4656 if (sleOffer)
4657 {
4658 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4659 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4660 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4661 STAmount saDirRate = obIterator.getCurrentRate();
4662 STAmount saOwnerFunds;
4663
4664 if (book.out.account == uOfferOwnerID)
4665 {
4666 // If offer is selling issuer's own IOUs, it is fully funded.
4667 saOwnerFunds = saTakerGets;
4668 }
4669 else if (bGlobalFreeze)
4670 {
4671 // If either asset is globally frozen, consider all offers
4672 // that aren't ours to be totally unfunded
4673 saOwnerFunds.clear(book.out);
4674 }
4675 else
4676 {
4677 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4678
4679 if (umBalanceEntry != umBalance.end())
4680 {
4681 // Found in running balance table.
4682
4683 saOwnerFunds = umBalanceEntry->second;
4684 }
4685 else
4686 {
4687 // Did not find balance in table.
4688
4689 saOwnerFunds = lesActive.accountHolds(
4690 uOfferOwnerID,
4691 book.out.currency,
4692 book.out.account,
4694
4695 if (saOwnerFunds.isNegative())
4696 {
4697 // Treat negative funds as zero.
4698
4699 saOwnerFunds.zero();
4700 }
4701 }
4702 }
4703
4704 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4705
4706 STAmount saTakerGetsFunded;
4707 STAmount saOwnerFundsLimit = saOwnerFunds;
4708 Rate offerRate = parityRate;
4709
4710 if (rate != parityRate
4711 // Have a tranfer fee.
4712 && uTakerID != book.out.account
4713 // Not taking offers of own IOUs.
4714 && book.out.account != uOfferOwnerID)
4715 // Offer owner not issuing ownfunds
4716 {
4717 // Need to charge a transfer fee to offer owner.
4718 offerRate = rate;
4719 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4720 }
4721
4722 if (saOwnerFundsLimit >= saTakerGets)
4723 {
4724 // Sufficient funds no shenanigans.
4725 saTakerGetsFunded = saTakerGets;
4726 }
4727 else
4728 {
4729 // Only provide, if not fully funded.
4730 saTakerGetsFunded = saOwnerFundsLimit;
4731
4732 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4733
4734 // TOOD(tom): The result of this expression is not used - what's
4735 // going on here?
4736 std::min(
4737 saTakerPays,
4738 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4739 .setJson(jvOffer[jss::taker_pays_funded]);
4740 }
4741
4742 STAmount saOwnerPays = (parityRate == offerRate)
4743 ? saTakerGetsFunded
4744 : std::min(
4745 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4746
4747 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4748
4749 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4750 {
4751 // Only provide funded offers and offers of the taker.
4752 Json::Value& jvOf = jvOffers.append(jvOffer);
4753 jvOf[jss::quality] = saDirRate.getText();
4754 }
4755 }
4756 }
4757
4758 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4759 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4760}
4761
4762#endif
4763
4764inline void
4766{
4767 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4768 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4770 counters[static_cast<std::size_t>(mode)].dur += current;
4771
4774 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4775 .dur.count());
4777 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4778 .dur.count());
4780 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4782 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4783 .dur.count());
4785 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4786
4788 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4789 .transitions);
4791 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4792 .transitions);
4794 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4796 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4797 .transitions);
4799 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4800}
4801
4802void
4804{
4805 auto now = std::chrono::steady_clock::now();
4806
4807 std::lock_guard lock(mutex_);
4808 ++counters_[static_cast<std::size_t>(om)].transitions;
4809 if (om == OperatingMode::FULL &&
4810 counters_[static_cast<std::size_t>(om)].transitions == 1)
4811 {
4812 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4813 now - processStart_)
4814 .count();
4815 }
4816 counters_[static_cast<std::size_t>(mode_)].dur +=
4817 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4818
4819 mode_ = om;
4820 start_ = now;
4821}
4822
4823void
4825{
4826 auto [counters, mode, start, initialSync] = getCounterData();
4827 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4829 counters[static_cast<std::size_t>(mode)].dur += current;
4830
4831 obj[jss::state_accounting] = Json::objectValue;
4833 i <= static_cast<std::size_t>(OperatingMode::FULL);
4834 ++i)
4835 {
4836 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4837 auto& state = obj[jss::state_accounting][states_[i]];
4838 state[jss::transitions] = std::to_string(counters[i].transitions);
4839 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4840 }
4841 obj[jss::server_state_duration_us] = std::to_string(current.count());
4842 if (initialSync)
4843 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4844}
4845
4846//------------------------------------------------------------------------------
4847
4850 Application& app,
4852 bool standalone,
4853 std::size_t minPeerCount,
4854 bool startvalid,
4855 JobQueue& job_queue,
4857 ValidatorKeys const& validatorKeys,
4858 boost::asio::io_service& io_svc,
4859 beast::Journal journal,
4860 beast::insight::Collector::ptr const& collector)
4861{
4863 app,
4864 clock,
4865 standalone,
4866 minPeerCount,
4867 startvalid,
4868 job_queue,
4870 validatorKeys,
4871 io_svc,
4872 journal,
4873 collector);
4874}
4875
4876} // namespace ripple
T any_of(T... args)
T back_inserter(T... args)
T begin(T... args)
Decorator for streaming out compact json.
Lightweight wrapper to tag static string.
Definition json_value.h:63
Represents a JSON value.
Definition json_value.h:149
Json::UInt UInt
Definition json_value.h:156
Value & append(Value const &value)
Append value to array at the end.
bool isMember(char const *key) const
Return true if the object has a member named key.
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
A generic endpoint for log messages.
Definition Journal.h:60
Stream error() const
Definition Journal.h:346
Stream debug() const
Definition Journal.h:328
Stream info() const
Definition Journal.h:334
Stream trace() const
Severity stream access functions.
Definition Journal.h:322
Stream warn() const
Definition Journal.h:340
A metric for measuring an integral value.
Definition Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition Gauge.h:68
A reference to a handler for performing polled collection.
Definition Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition Book.h:36
Issue in
Definition Book.h:38
Issue out
Definition Book.h:39
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition ClusterNode.h:46
std::uint32_t getLoadFee() const
Definition ClusterNode.h:52
NetClock::time_point getReportTime() const
Definition ClusterNode.h:58
PublicKey const & identity() const
Definition ClusterNode.h:64
std::size_t size() const
The number of nodes in the cluster list.
Definition Cluster.cpp:49
uint32_t NETWORK_ID
Definition Config.h:156
std::string SERVER_DOMAIN
Definition Config.h:278
std::size_t NODE_SIZE
Definition Config.h:213
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition Config.h:160
int RELAY_UNTRUSTED_VALIDATIONS
Definition Config.h:169
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition InfoSub.h:54
AccountID account
Definition Issue.h:36
Currency currency
Definition Issue.h:35
A pool of threads to perform work.
Definition JobQueue.h:58
Json::Value getJson(int c=0)
Definition JobQueue.cpp:214
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition JobQueue.h:168
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
std::uint32_t getClusterFee() const
std::uint32_t getLocalFee() const
std::uint32_t getLoadBase() const
std::uint32_t getRemoteFee() const
std::uint32_t getLoadFactor() const
Manages load sources.
Definition LoadManager.h:46
void heartbeat()
Reset the stall detection timer.
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition Manifest.cpp:323
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
std::chrono::steady_clock::time_point start_
static std::array< Json::StaticString const, 5 > const states_
std::chrono::steady_clock::time_point const processStart_
Transaction with input flags and results to be applied in batches.
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
std::shared_ptr< Transaction > const transaction
boost::asio::steady_timer accountHistoryTxTimer_
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
std::vector< TransactionStatus > mTransactions
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
std::optional< PublicKey > const validatorPK_
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
beast::Journal m_journal
SubInfoMapType mSubAccount
std::optional< PublicKey > const validatorMasterPK_
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
std::atomic< bool > unlBlocked_
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
std::atomic< bool > needNetworkLedger_
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
void setMode(OperatingMode om) override
void stop() override
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
void clearNeedNetworkLedger() override
DispatchState mDispatchState
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
std::atomic< bool > amendmentWarned_
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition NetworkOPs.h:89
void getCountsJson(Json::Value &obj)
Definition Database.cpp:268
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Writable ledger view that accumulates state and tx changes.
Definition OpenView.h:66
BookListeners::pointer getBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)
BookListeners::pointer makeBookListeners(Book const &)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Represents a set of transactions in RCLConsensus.
Definition RCLCxTx.h:63
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition RFC1751.cpp:507
Collects logging information.
std::unique_ptr< std::stringstream > const & ss()
A view into a ledger.
Definition ReadView.h:52
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition STAmount.cpp:666
std::string getText() const override
Definition STAmount.cpp:706
Issue const & issue() const
Definition STAmount.h:496
std::optional< T > get(std::string const &name) const
std::size_t size() const noexcept
Definition Serializer.h:72
void const * data() const noexcept
Definition Serializer.h:78
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition TxQ.cpp:1778
static time_point now()
Validator keys and manifest as set in configuration file.
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition XRPAmount.h:262
Json::Value jsonClipped() const
Definition XRPAmount.h:218
iterator begin()
Definition base_uint.h:136
static constexpr std::size_t size()
Definition base_uint.h:526
bool isZero() const
Definition base_uint.h:540
bool isNonZero() const
Definition base_uint.h:545
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition scope.h:231
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_same_v
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition json_value.h:44
@ objectValue
object value (collection of name/value pairs).
Definition json_value.h:45
int Int
unsigned int UInt
void rngfill(void *const buffer, std::size_t const bytes, Generator &g)
Definition rngfill.h:34
std::string const & getVersionString()
Server version.
Definition BuildInfo.cpp:68
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Definition CTID.h:43
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition BookChanges.h:47
void insertNFTSyntheticInJson(Json::Value &, std::shared_ptr< STTx const > const &, TxMeta const &)
Adds common synthetic fields to transaction-related JSON responses.
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition Indexes.cpp:184
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition Indexes.cpp:380
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition Indexes.cpp:274
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Definition escrow.cpp:69
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:25
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
STAmount divide(STAmount const &amount, Rate const &rate)
Definition Rate2.cpp:93
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition STTx.cpp:811
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition View.cpp:552
@ fhZERO_IF_FROZEN
Definition View.h:78
@ fhIGNORE_FREEZE
Definition View.h:78
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition View.cpp:146
std::uint64_t getQuality(uint256 const &uBase)
Definition Indexes.cpp:149
@ rpcSUCCESS
Definition ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition ErrorCodes.h:84
@ rpcINTERNAL
Definition ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
auto constexpr muldiv_max
Definition mulDiv.h:28
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition LocalTxs.cpp:192
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition View.cpp:760
STAmount amountFromQuality(std::uint64_t rate)
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition ErrorCodes.h:170
@ warnRPC_UNSUPPORTED_MAJORITY
Definition ErrorCodes.h:168
@ warnRPC_AMENDMENT_BLOCKED
Definition ErrorCodes.h:169
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition NetworkOPs.h:68
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition Rate2.cpp:53
AccountID calcAccountID(PublicKey const &pk)
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Json::Value rpcError(int iError)
Definition RPCErr.cpp:31
@ tefPAST_SEQ
Definition TER.h:175
bool isTefFailure(TER x) noexcept
Definition TER.h:662
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
std::string strHex(FwdIt begin, FwdIt end)
Definition strHex.h:30
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition View.cpp:157
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition ApiVersion.h:101
bool isTerRetry(TER x) noexcept
Definition TER.h:668
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition predicates.h:75
@ tesSUCCESS
Definition TER.h:244
uint256 getQualityNext(uint256 const &uBase)
Definition Indexes.cpp:141
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition View.cpp:385
bool isTesSuccess(TER x) noexcept
Definition TER.h:674
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition chrono.h:92
std::string to_string(base_uint< Bits, Tag > const &a)
Definition base_uint.h:630
FeeSetup setup_FeeVote(Section const &section)
Definition Config.cpp:1129
bool isTemMalformed(TER x) noexcept
Definition TER.h:656
Number root(Number f, unsigned d)
Definition Number.cpp:636
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
@ tapFAIL_HARD
Definition ApplyView.h:36
@ tapUNLIMITED
Definition ApplyView.h:43
@ tapNONE
Definition ApplyView.h:32
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition apply.cpp:44
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition Seed.cpp:76
constexpr std::size_t maxPoppedTransactions
@ terQUEUED
Definition TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition TER.cpp:249
@ jtNETOP_CLUSTER
Definition Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition Job.h:47
@ jtTRANSACTION
Definition Job.h:62
@ jtTXN_PROC
Definition Job.h:82
@ jtCLIENT_CONSENSUS
Definition Job.h:48
@ jtBATCH
Definition Job.h:65
@ jtCLIENT_ACCT_HIST
Definition Job.h:49
bool isTelLocal(TER x) noexcept
Definition TER.h:650
uint256 getBookBase(Book const &book)
Definition Indexes.cpp:115
constexpr std::uint32_t tfInnerBatchTxn
Definition TxFlags.h:61
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition View.cpp:183
static std::uint32_t trunc32(std::uint64_t v)
@ temINVALID_FLAG
Definition TER.h:111
@ temBAD_SIGNATURE
Definition TER.h:105
static auto const genesisAccountId
STL namespace.
T owns_lock(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T set_intersection(T... args)
T size(T... args)
T str(T... args)
std::string serialized
The manifest in serialized form.
Definition Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
Definition Manifest.cpp:244
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
Definition Manifest.cpp:255
PublicKey masterKey
The master key associated with this manifest.
Definition Manifest.h:86
Server fees published on server subscription.
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
bool operator==(ServerFeeSummary const &b) const
beast::insight::Gauge full_transitions
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
beast::insight::Hook hook
beast::insight::Gauge connected_duration
beast::insight::Gauge tracking_duration
beast::insight::Gauge connected_transitions
beast::insight::Gauge disconnected_transitions
beast::insight::Gauge syncing_duration
beast::insight::Gauge tracking_transitions
beast::insight::Gauge full_duration
beast::insight::Gauge disconnected_duration
beast::insight::Gauge syncing_transitions
SubAccountHistoryIndex(AccountID const &accountId)
std::shared_ptr< SubAccountHistoryIndex > index_
std::shared_ptr< SubAccountHistoryIndex > index_
Represents a transfer rate.
Definition Rate.h:40
Data format for exchanging consumption information across peers.
Definition Gossip.h:32
std::vector< Item > items
Definition Gossip.h:44
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition TxQ.h:165
IsMemberResult isMember(char const *key) const
void set(char const *key, auto const &v)
Select all peers (except optional excluded) that are in our cluster.
Definition predicates.h:137
Sends a message to all peers.
Definition predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)