rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/main/Tuning.h>
32#include <xrpld/app/misc/AmendmentTable.h>
33#include <xrpld/app/misc/DeliverMax.h>
34#include <xrpld/app/misc/HashRouter.h>
35#include <xrpld/app/misc/LoadFeeTrack.h>
36#include <xrpld/app/misc/NetworkOPs.h>
37#include <xrpld/app/misc/Transaction.h>
38#include <xrpld/app/misc/TxQ.h>
39#include <xrpld/app/misc/ValidatorKeys.h>
40#include <xrpld/app/misc/ValidatorList.h>
41#include <xrpld/app/misc/detail/AccountTxPaging.h>
42#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
43#include <xrpld/app/tx/apply.h>
44#include <xrpld/consensus/Consensus.h>
45#include <xrpld/consensus/ConsensusParms.h>
46#include <xrpld/overlay/Cluster.h>
47#include <xrpld/overlay/Overlay.h>
48#include <xrpld/overlay/predicates.h>
49#include <xrpld/perflog/PerfLog.h>
50#include <xrpld/rpc/BookChanges.h>
51#include <xrpld/rpc/CTID.h>
52#include <xrpld/rpc/DeliveredAmount.h>
53#include <xrpld/rpc/MPTokenIssuanceID.h>
54#include <xrpld/rpc/ServerHandler.h>
55
56#include <xrpl/basics/UptimeClock.h>
57#include <xrpl/basics/mulDiv.h>
58#include <xrpl/basics/safe_cast.h>
59#include <xrpl/basics/scope.h>
60#include <xrpl/beast/utility/rngfill.h>
61#include <xrpl/crypto/RFC1751.h>
62#include <xrpl/crypto/csprng.h>
63#include <xrpl/protocol/BuildInfo.h>
64#include <xrpl/protocol/Feature.h>
65#include <xrpl/protocol/MultiApiJson.h>
66#include <xrpl/protocol/NFTSyntheticSerializer.h>
67#include <xrpl/protocol/RPCErr.h>
68#include <xrpl/protocol/TxFlags.h>
69#include <xrpl/protocol/jss.h>
70#include <xrpl/resource/Fees.h>
71#include <xrpl/resource/ResourceManager.h>
72
73#include <boost/asio/ip/host_name.hpp>
74#include <boost/asio/steady_timer.hpp>
75
76#include <algorithm>
77#include <exception>
78#include <mutex>
79#include <optional>
80#include <set>
81#include <sstream>
82#include <string>
83#include <tuple>
84#include <unordered_map>
85
86namespace ripple {
87
88class NetworkOPsImp final : public NetworkOPs
89{
95 {
96 public:
98 bool const admin;
99 bool const local;
101 bool applied = false;
103
106 bool a,
107 bool l,
108 FailHard f)
109 : transaction(t), admin(a), local(l), failType(f)
110 {
111 XRPL_ASSERT(
113 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
114 "valid inputs");
115 }
116 };
117
121 enum class DispatchState : unsigned char {
122 none,
123 scheduled,
124 running,
125 };
126
128
144 {
152
156 std::chrono::steady_clock::time_point start_ =
158 std::chrono::steady_clock::time_point const processStart_ = start_;
161
162 public:
164 {
166 .transitions = 1;
167 }
168
175 void
177
183 void
184 json(Json::Value& obj) const;
185
187 {
189 decltype(mode_) mode;
190 decltype(start_) start;
192 };
193
196 {
199 }
200 };
201
204 {
205 ServerFeeSummary() = default;
206
208 XRPAmount fee,
209 TxQ::Metrics&& escalationMetrics,
210 LoadFeeTrack const& loadFeeTrack);
211 bool
212 operator!=(ServerFeeSummary const& b) const;
213
214 bool
216 {
217 return !(*this != b);
218 }
219
224 };
225
226public:
228 Application& app,
230 bool standalone,
231 std::size_t minPeerCount,
232 bool start_valid,
233 JobQueue& job_queue,
235 ValidatorKeys const& validatorKeys,
236 boost::asio::io_context& io_svc,
237 beast::Journal journal,
238 beast::insight::Collector::ptr const& collector)
239 : app_(app)
240 , m_journal(journal)
243 , heartbeatTimer_(io_svc)
244 , clusterTimer_(io_svc)
245 , accountHistoryTxTimer_(io_svc)
246 , mConsensus(
247 app,
249 setup_FeeVote(app_.config().section("voting")),
250 app_.logs().journal("FeeVote")),
252 *m_localTX,
253 app.getInboundTransactions(),
254 beast::get_abstract_clock<std::chrono::steady_clock>(),
255 validatorKeys,
256 app_.logs().journal("LedgerConsensus"))
257 , validatorPK_(
258 validatorKeys.keys ? validatorKeys.keys->publicKey
259 : decltype(validatorPK_){})
261 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
262 : decltype(validatorMasterPK_){})
264 , m_job_queue(job_queue)
265 , m_standalone(standalone)
266 , minPeerCount_(start_valid ? 0 : minPeerCount)
267 , m_stats(std::bind(&NetworkOPsImp::collect_metrics, this), collector)
268 {
269 }
270
271 ~NetworkOPsImp() override
272 {
273 // This clear() is necessary to ensure the shared_ptrs in this map get
274 // destroyed NOW because the objects in this map invoke methods on this
275 // class when they are destroyed
277 }
278
279public:
281 getOperatingMode() const override;
282
284 strOperatingMode(OperatingMode const mode, bool const admin) const override;
285
287 strOperatingMode(bool const admin = false) const override;
288
289 //
290 // Transaction operations.
291 //
292
293 // Must complete immediately.
294 void
296
297 void
299 std::shared_ptr<Transaction>& transaction,
300 bool bUnlimited,
301 bool bLocal,
302 FailHard failType) override;
303
304 void
305 processTransactionSet(CanonicalTXSet const& set) override;
306
315 void
318 bool bUnlimited,
319 FailHard failType);
320
330 void
333 bool bUnlimited,
334 FailHard failtype);
335
336private:
337 bool
339
340 void
343 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback);
344
345public:
349 void
351
357 void
359
360 //
361 // Owner functions.
362 //
363
367 AccountID const& account) override;
368
369 //
370 // Book functions.
371 //
372
373 void
376 Book const&,
377 AccountID const& uTakerID,
378 bool const bProof,
379 unsigned int iLimit,
380 Json::Value const& jvMarker,
381 Json::Value& jvResult) override;
382
383 // Ledger proposal/close functions.
384 bool
386
387 bool
390 std::string const& source) override;
391
392 void
393 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
394
395 // Network state machine.
396
397 // Used for the "jump" case.
398private:
399 void
401 bool
403
404public:
405 bool
407 uint256 const& networkClosed,
408 std::unique_ptr<std::stringstream> const& clog) override;
409 void
411 void
412 setStandAlone() override;
413
417 void
418 setStateTimer() override;
419
420 void
421 setNeedNetworkLedger() override;
422 void
423 clearNeedNetworkLedger() override;
424 bool
425 isNeedNetworkLedger() override;
426 bool
427 isFull() override;
428
429 void
430 setMode(OperatingMode om) override;
431
432 bool
433 isBlocked() override;
434 bool
435 isAmendmentBlocked() override;
436 void
437 setAmendmentBlocked() override;
438 bool
439 isAmendmentWarned() override;
440 void
441 setAmendmentWarned() override;
442 void
443 clearAmendmentWarned() override;
444 bool
445 isUNLBlocked() override;
446 void
447 setUNLBlocked() override;
448 void
449 clearUNLBlocked() override;
450 void
451 consensusViewChange() override;
452
454 getConsensusInfo() override;
456 getServerInfo(bool human, bool admin, bool counters) override;
457 void
458 clearLedgerFetch() override;
460 getLedgerFetchInfo() override;
463 std::optional<std::chrono::milliseconds> consensusDelay) override;
464 void
465 reportFeeChange() override;
466 void
468
469 void
470 updateLocalTx(ReadView const& view) override;
472 getLocalTxCount() override;
473
474 //
475 // Monitoring: publisher side.
476 //
477 void
478 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
479 void
482 std::shared_ptr<STTx const> const& transaction,
483 TER result) override;
484 void
485 pubValidation(std::shared_ptr<STValidation> const& val) override;
486
487 //--------------------------------------------------------------------------
488 //
489 // InfoSub::Source.
490 //
491 void
493 InfoSub::ref ispListener,
494 hash_set<AccountID> const& vnaAccountIDs,
495 bool rt) override;
496 void
498 InfoSub::ref ispListener,
499 hash_set<AccountID> const& vnaAccountIDs,
500 bool rt) override;
501
502 // Just remove the subscription from the tracking
503 // not from the InfoSub. Needed for InfoSub destruction
504 void
506 std::uint64_t seq,
507 hash_set<AccountID> const& vnaAccountIDs,
508 bool rt) override;
509
511 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
512 override;
513 void
515 InfoSub::ref ispListener,
516 AccountID const& account,
517 bool historyOnly) override;
518
519 void
521 std::uint64_t seq,
522 AccountID const& account,
523 bool historyOnly) override;
524
525 bool
526 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
527 bool
528 unsubLedger(std::uint64_t uListener) override;
529
530 bool
531 subBookChanges(InfoSub::ref ispListener) override;
532 bool
533 unsubBookChanges(std::uint64_t uListener) override;
534
535 bool
536 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
537 override;
538 bool
539 unsubServer(std::uint64_t uListener) override;
540
541 bool
542 subBook(InfoSub::ref ispListener, Book const&) override;
543 bool
544 unsubBook(std::uint64_t uListener, Book const&) override;
545
546 bool
547 subManifests(InfoSub::ref ispListener) override;
548 bool
549 unsubManifests(std::uint64_t uListener) override;
550 void
551 pubManifest(Manifest const&) override;
552
553 bool
554 subTransactions(InfoSub::ref ispListener) override;
555 bool
556 unsubTransactions(std::uint64_t uListener) override;
557
558 bool
559 subRTTransactions(InfoSub::ref ispListener) override;
560 bool
561 unsubRTTransactions(std::uint64_t uListener) override;
562
563 bool
564 subValidations(InfoSub::ref ispListener) override;
565 bool
566 unsubValidations(std::uint64_t uListener) override;
567
568 bool
569 subPeerStatus(InfoSub::ref ispListener) override;
570 bool
571 unsubPeerStatus(std::uint64_t uListener) override;
572 void
573 pubPeerStatus(std::function<Json::Value(void)> const&) override;
574
575 bool
576 subConsensus(InfoSub::ref ispListener) override;
577 bool
578 unsubConsensus(std::uint64_t uListener) override;
579
581 findRpcSub(std::string const& strUrl) override;
583 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
584 bool
585 tryRemoveRpcSub(std::string const& strUrl) override;
586
587 void
588 stop() override
589 {
590 {
591 try
592 {
593 heartbeatTimer_.cancel();
594 }
595 catch (boost::system::system_error const& e)
596 {
597 JLOG(m_journal.error())
598 << "NetworkOPs: heartbeatTimer cancel error: " << e.what();
599 }
600
601 try
602 {
603 clusterTimer_.cancel();
604 }
605 catch (boost::system::system_error const& e)
606 {
607 JLOG(m_journal.error())
608 << "NetworkOPs: clusterTimer cancel error: " << e.what();
609 }
610
611 try
612 {
613 accountHistoryTxTimer_.cancel();
614 }
615 catch (boost::system::system_error const& e)
616 {
617 JLOG(m_journal.error())
618 << "NetworkOPs: accountHistoryTxTimer cancel error: "
619 << e.what();
620 }
621 }
622 // Make sure that any waitHandlers pending in our timers are done.
623 using namespace std::chrono_literals;
624 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
625 }
626
627 void
628 stateAccounting(Json::Value& obj) override;
629
630private:
631 void
632 setTimer(
633 boost::asio::steady_timer& timer,
634 std::chrono::milliseconds const& expiry_time,
635 std::function<void()> onExpire,
636 std::function<void()> onError);
637 void
639 void
641 void
643 void
645
647 transJson(
648 std::shared_ptr<STTx const> const& transaction,
649 TER result,
650 bool validated,
653
654 void
657 AcceptedLedgerTx const& transaction,
658 bool last);
659
660 void
663 AcceptedLedgerTx const& transaction,
664 bool last);
665
666 void
669 std::shared_ptr<STTx const> const& transaction,
670 TER result);
671
672 void
673 pubServer();
674 void
676
678 getHostId(bool forAdmin);
679
680private:
684
685 /*
686 * With a validated ledger to separate history and future, the node
687 * streams historical txns with negative indexes starting from -1,
688 * and streams future txns starting from index 0.
689 * The SubAccountHistoryIndex struct maintains these indexes.
690 * It also has a flag stopHistorical_ for stopping streaming
691 * the historical txns.
692 */
729
733 void
737 void
739 void
741
744
746
748
750
755
757 boost::asio::steady_timer heartbeatTimer_;
758 boost::asio::steady_timer clusterTimer_;
759 boost::asio::steady_timer accountHistoryTxTimer_;
760
762
765
767
769
772
774
776
777 enum SubTypes {
778 sLedger, // Accepted ledgers.
779 sManifests, // Received validator manifests.
780 sServer, // When server changes connectivity state.
781 sTransactions, // All accepted transactions.
782 sRTTransactions, // All proposed and accepted transactions.
783 sValidations, // Received validations.
784 sPeerStatus, // Peer status changes.
785 sConsensusPhase, // Consensus phase
786 sBookChanges, // Per-ledger order book changes
787 sLastEntry // Any new entry must be ADDED ABOVE this one
788 };
789
791
793
795
796 // Whether we are in standalone mode.
797 bool const m_standalone;
798
799 // The number of nodes that we need to consider ourselves connected.
801
802 // Transaction batching.
807
809
812
813private:
814 struct Stats
815 {
816 template <class Handler>
818 Handler const& handler,
819 beast::insight::Collector::ptr const& collector)
820 : hook(collector->make_hook(handler))
821 , disconnected_duration(collector->make_gauge(
822 "State_Accounting",
823 "Disconnected_duration"))
824 , connected_duration(collector->make_gauge(
825 "State_Accounting",
826 "Connected_duration"))
828 collector->make_gauge("State_Accounting", "Syncing_duration"))
829 , tracking_duration(collector->make_gauge(
830 "State_Accounting",
831 "Tracking_duration"))
833 collector->make_gauge("State_Accounting", "Full_duration"))
834 , disconnected_transitions(collector->make_gauge(
835 "State_Accounting",
836 "Disconnected_transitions"))
837 , connected_transitions(collector->make_gauge(
838 "State_Accounting",
839 "Connected_transitions"))
840 , syncing_transitions(collector->make_gauge(
841 "State_Accounting",
842 "Syncing_transitions"))
843 , tracking_transitions(collector->make_gauge(
844 "State_Accounting",
845 "Tracking_transitions"))
847 collector->make_gauge("State_Accounting", "Full_transitions"))
848 {
849 }
850
857
863 };
864
865 std::mutex m_statsMutex; // Mutex to lock m_stats
867
868private:
869 void
871};
872
873//------------------------------------------------------------------------------
874
876 {"disconnected", "connected", "syncing", "tracking", "full"}};
877
879
887
888static auto const genesisAccountId = calcAccountID(
890 .first);
891
892//------------------------------------------------------------------------------
893inline OperatingMode
895{
896 return mMode;
897}
898
899inline std::string
900NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
901{
902 return strOperatingMode(mMode, admin);
903}
904
905inline void
910
911inline void
916
917inline void
922
923inline bool
928
929inline bool
934
937{
938 static std::string const hostname = boost::asio::ip::host_name();
939
940 if (forAdmin)
941 return hostname;
942
943 // For non-admin uses hash the node public key into a
944 // single RFC1751 word:
945 static std::string const shroudedHostId = [this]() {
946 auto const& id = app_.nodeIdentity();
947
948 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
949 }();
950
951 return shroudedHostId;
952}
953
954void
956{
958
959 // Only do this work if a cluster is configured
960 if (app_.cluster().size() != 0)
962}
963
964void
966 boost::asio::steady_timer& timer,
967 std::chrono::milliseconds const& expiry_time,
968 std::function<void()> onExpire,
969 std::function<void()> onError)
970{
971 // Only start the timer if waitHandlerCounter_ is not yet joined.
972 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
973 [this, onExpire, onError](boost::system::error_code const& e) {
974 if ((e.value() == boost::system::errc::success) &&
975 (!m_job_queue.isStopped()))
976 {
977 onExpire();
978 }
979 // Recover as best we can if an unexpected error occurs.
980 if (e.value() != boost::system::errc::success &&
981 e.value() != boost::asio::error::operation_aborted)
982 {
983 // Try again later and hope for the best.
984 JLOG(m_journal.error())
985 << "Timer got error '" << e.message()
986 << "'. Restarting timer.";
987 onError();
988 }
989 }))
990 {
991 timer.expires_after(expiry_time);
992 timer.async_wait(std::move(*optionalCountedHandler));
993 }
994}
995
996void
997NetworkOPsImp::setHeartbeatTimer()
998{
999 setTimer(
1000 heartbeatTimer_,
1001 mConsensus.parms().ledgerGRANULARITY,
1002 [this]() {
1003 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
1004 processHeartbeatTimer();
1005 });
1006 },
1007 [this]() { setHeartbeatTimer(); });
1008}
1009
1010void
1011NetworkOPsImp::setClusterTimer()
1012{
1013 using namespace std::chrono_literals;
1014
1015 setTimer(
1016 clusterTimer_,
1017 10s,
1018 [this]() {
1019 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
1020 processClusterTimer();
1021 });
1022 },
1023 [this]() { setClusterTimer(); });
1024}
1025
1026void
1027NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
1028{
1029 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
1030 << toBase58(subInfo.index_->accountId_);
1031 using namespace std::chrono_literals;
1032 setTimer(
1033 accountHistoryTxTimer_,
1034 4s,
1035 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1036 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1037}
1038
1039void
1040NetworkOPsImp::processHeartbeatTimer()
1041{
1042 RclConsensusLogger clog(
1043 "Heartbeat Timer", mConsensus.validating(), m_journal);
1044 {
1045 std::unique_lock lock{app_.getMasterMutex()};
1046
1047 // VFALCO NOTE This is for diagnosing a crash on exit
1048 LoadManager& mgr(app_.getLoadManager());
1049 mgr.heartbeat();
1050
1051 std::size_t const numPeers = app_.overlay().size();
1052
1053 // do we have sufficient peers? If not, we are disconnected.
1054 if (numPeers < minPeerCount_)
1055 {
1056 if (mMode != OperatingMode::DISCONNECTED)
1057 {
1058 setMode(OperatingMode::DISCONNECTED);
1060 ss << "Node count (" << numPeers << ") has fallen "
1061 << "below required minimum (" << minPeerCount_ << ").";
1062 JLOG(m_journal.warn()) << ss.str();
1063 CLOG(clog.ss()) << "set mode to DISCONNECTED: " << ss.str();
1064 }
1065 else
1066 {
1067 CLOG(clog.ss())
1068 << "already DISCONNECTED. too few peers (" << numPeers
1069 << "), need at least " << minPeerCount_;
1070 }
1071
1072 // MasterMutex lock need not be held to call setHeartbeatTimer()
1073 lock.unlock();
1074 // We do not call mConsensus.timerEntry until there are enough
1075 // peers providing meaningful inputs to consensus
1076 setHeartbeatTimer();
1077
1078 return;
1079 }
1080
1081 if (mMode == OperatingMode::DISCONNECTED)
1082 {
1083 setMode(OperatingMode::CONNECTED);
1084 JLOG(m_journal.info())
1085 << "Node count (" << numPeers << ") is sufficient.";
1086 CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
1087 << " peers. ";
1088 }
1089
1090 // Check if the last validated ledger forces a change between these
1091 // states.
1092 auto origMode = mMode.load();
1093 CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
1094 if (mMode == OperatingMode::SYNCING)
1095 setMode(OperatingMode::SYNCING);
1096 else if (mMode == OperatingMode::CONNECTED)
1097 setMode(OperatingMode::CONNECTED);
1098 auto newMode = mMode.load();
1099 if (origMode != newMode)
1100 {
1101 CLOG(clog.ss())
1102 << ", changing to " << strOperatingMode(newMode, true);
1103 }
1104 CLOG(clog.ss()) << ". ";
1105 }
1106
1107 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.ss());
1108
1109 CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase);
1110 ConsensusPhase const currPhase = mConsensus.phase();
1111 if (mLastConsensusPhase != currPhase)
1112 {
1113 reportConsensusStateChange(currPhase);
1114 mLastConsensusPhase = currPhase;
1115 CLOG(clog.ss()) << " changed to " << to_string(mLastConsensusPhase);
1116 }
1117 CLOG(clog.ss()) << ". ";
1118
1119 setHeartbeatTimer();
1120}
1121
1122void
1123NetworkOPsImp::processClusterTimer()
1124{
1125 if (app_.cluster().size() == 0)
1126 return;
1127
1128 using namespace std::chrono_literals;
1129
1130 bool const update = app_.cluster().update(
1131 app_.nodeIdentity().first,
1132 "",
1133 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1134 ? app_.getFeeTrack().getLocalFee()
1135 : 0,
1136 app_.timeKeeper().now());
1137
1138 if (!update)
1139 {
1140 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1141 setClusterTimer();
1142 return;
1143 }
1144
1145 protocol::TMCluster cluster;
1146 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1147 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1148 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1149 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1150 n.set_nodeload(node.getLoadFee());
1151 if (!node.name().empty())
1152 n.set_nodename(node.name());
1153 });
1154
1155 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1156 for (auto& item : gossip.items)
1157 {
1158 protocol::TMLoadSource& node = *cluster.add_loadsources();
1159 node.set_name(to_string(item.address));
1160 node.set_cost(item.balance);
1161 }
1162 app_.overlay().foreach(send_if(
1163 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1164 peer_in_cluster()));
1165 setClusterTimer();
1166}
1167
1168//------------------------------------------------------------------------------
1169
1171NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1172 const
1173{
1174 if (mode == OperatingMode::FULL && admin)
1175 {
1176 auto const consensusMode = mConsensus.mode();
1177 if (consensusMode != ConsensusMode::wrongLedger)
1178 {
1179 if (consensusMode == ConsensusMode::proposing)
1180 return "proposing";
1181
1182 if (mConsensus.validating())
1183 return "validating";
1184 }
1185 }
1186
1187 return states_[static_cast<std::size_t>(mode)];
1188}
1189
1190void
1191NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1192{
1193 if (isNeedNetworkLedger())
1194 {
1195 // Nothing we can do if we've never been in sync
1196 return;
1197 }
1198
1199 // Enforce Network bar for batch txn
1200 if (iTrans->isFlag(tfInnerBatchTxn) &&
1201 m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1202 {
1203 JLOG(m_journal.error())
1204 << "Submitted transaction invalid: tfInnerBatchTxn flag present.";
1205 return;
1206 }
1207
1208 // this is an asynchronous interface
1209 auto const trans = sterilize(*iTrans);
1210
1211 auto const txid = trans->getTransactionID();
1212 auto const flags = app_.getHashRouter().getFlags(txid);
1213
1214 if ((flags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1215 {
1216 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1217 return;
1218 }
1219
1220 try
1221 {
1222 auto const [validity, reason] = checkValidity(
1223 app_.getHashRouter(),
1224 *trans,
1225 m_ledgerMaster.getValidatedRules(),
1226 app_.config());
1227
1228 if (validity != Validity::Valid)
1229 {
1230 JLOG(m_journal.warn())
1231 << "Submitted transaction invalid: " << reason;
1232 return;
1233 }
1234 }
1235 catch (std::exception const& ex)
1236 {
1237 JLOG(m_journal.warn())
1238 << "Exception checking transaction " << txid << ": " << ex.what();
1239
1240 return;
1241 }
1242
1243 std::string reason;
1244
1245 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1246
1247 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1248 auto t = tx;
1249 processTransaction(t, false, false, FailHard::no);
1250 });
1251}
1252
1253bool
1254NetworkOPsImp::preProcessTransaction(std::shared_ptr<Transaction>& transaction)
1255{
1256 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1257
1258 if ((newFlags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1259 {
1260 // cached bad
1261 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1262 transaction->setStatus(INVALID);
1263 transaction->setResult(temBAD_SIGNATURE);
1264 return false;
1265 }
1266
1267 auto const view = m_ledgerMaster.getCurrentLedger();
1268
1269 // This function is called by several different parts of the codebase
1270 // under no circumstances will we ever accept an inner txn within a batch
1271 // txn from the network.
1272 auto const sttx = *transaction->getSTransaction();
1273 if (sttx.isFlag(tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1274 {
1275 transaction->setStatus(INVALID);
1276 transaction->setResult(temINVALID_FLAG);
1277 app_.getHashRouter().setFlags(
1278 transaction->getID(), HashRouterFlags::BAD);
1279 return false;
1280 }
1281
1282 // NOTE eahennis - I think this check is redundant,
1283 // but I'm not 100% sure yet.
1284 // If so, only cost is looking up HashRouter flags.
1285 auto const [validity, reason] =
1286 checkValidity(app_.getHashRouter(), sttx, view->rules(), app_.config());
1287 XRPL_ASSERT(
1288 validity == Validity::Valid,
1289 "ripple::NetworkOPsImp::processTransaction : valid validity");
1290
1291 // Not concerned with local checks at this point.
1292 if (validity == Validity::SigBad)
1293 {
1294 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1295 transaction->setStatus(INVALID);
1296 transaction->setResult(temBAD_SIGNATURE);
1297 app_.getHashRouter().setFlags(
1298 transaction->getID(), HashRouterFlags::BAD);
1299 return false;
1300 }
1301
1302 // canonicalize can change our pointer
1303 app_.getMasterTransaction().canonicalize(&transaction);
1304
1305 return true;
1306}
1307
1308void
1309NetworkOPsImp::processTransaction(
1310 std::shared_ptr<Transaction>& transaction,
1311 bool bUnlimited,
1312 bool bLocal,
1313 FailHard failType)
1314{
1315 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1316
1317 // preProcessTransaction can change our pointer
1318 if (!preProcessTransaction(transaction))
1319 return;
1320
1321 if (bLocal)
1322 doTransactionSync(transaction, bUnlimited, failType);
1323 else
1324 doTransactionAsync(transaction, bUnlimited, failType);
1325}
1326
1327void
1328NetworkOPsImp::doTransactionAsync(
1329 std::shared_ptr<Transaction> transaction,
1330 bool bUnlimited,
1331 FailHard failType)
1332{
1333 std::lock_guard lock(mMutex);
1334
1335 if (transaction->getApplying())
1336 return;
1337
1338 mTransactions.push_back(
1339 TransactionStatus(transaction, bUnlimited, false, failType));
1340 transaction->setApplying();
1341
1342 if (mDispatchState == DispatchState::none)
1343 {
1344 if (m_job_queue.addJob(
1345 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1346 {
1347 mDispatchState = DispatchState::scheduled;
1348 }
1349 }
1350}
1351
1352void
1353NetworkOPsImp::doTransactionSync(
1354 std::shared_ptr<Transaction> transaction,
1355 bool bUnlimited,
1356 FailHard failType)
1357{
1358 std::unique_lock<std::mutex> lock(mMutex);
1359
1360 if (!transaction->getApplying())
1361 {
1362 mTransactions.push_back(
1363 TransactionStatus(transaction, bUnlimited, true, failType));
1364 transaction->setApplying();
1365 }
1366
1367 doTransactionSyncBatch(
1368 lock, [&transaction](std::unique_lock<std::mutex> const&) {
1369 return transaction->getApplying();
1370 });
1371}
1372
1373void
1374NetworkOPsImp::doTransactionSyncBatch(
1376 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback)
1377{
1378 do
1379 {
1380 if (mDispatchState == DispatchState::running)
1381 {
1382 // A batch processing job is already running, so wait.
1383 mCond.wait(lock);
1384 }
1385 else
1386 {
1387 apply(lock);
1388
1389 if (mTransactions.size())
1390 {
1391 // More transactions need to be applied, but by another job.
1392 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1393 transactionBatch();
1394 }))
1395 {
1396 mDispatchState = DispatchState::scheduled;
1397 }
1398 }
1399 }
1400 } while (retryCallback(lock));
1401}
1402
1403void
1404NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set)
1405{
1406 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXNSet");
1408 candidates.reserve(set.size());
1409 for (auto const& [_, tx] : set)
1410 {
1411 std::string reason;
1412 auto transaction = std::make_shared<Transaction>(tx, reason, app_);
1413
1414 if (transaction->getStatus() == INVALID)
1415 {
1416 if (!reason.empty())
1417 {
1418 JLOG(m_journal.trace())
1419 << "Exception checking transaction: " << reason;
1420 }
1421 app_.getHashRouter().setFlags(
1422 tx->getTransactionID(), HashRouterFlags::BAD);
1423 continue;
1424 }
1425
1426 // preProcessTransaction can change our pointer
1427 if (!preProcessTransaction(transaction))
1428 continue;
1429
1430 candidates.emplace_back(transaction);
1431 }
1432
1433 std::vector<TransactionStatus> transactions;
1434 transactions.reserve(candidates.size());
1435
1436 std::unique_lock lock(mMutex);
1437
1438 for (auto& transaction : candidates)
1439 {
1440 if (!transaction->getApplying())
1441 {
1442 transactions.emplace_back(transaction, false, false, FailHard::no);
1443 transaction->setApplying();
1444 }
1445 }
1446
1447 if (mTransactions.empty())
1448 mTransactions.swap(transactions);
1449 else
1450 {
1451 mTransactions.reserve(mTransactions.size() + transactions.size());
1452 for (auto& t : transactions)
1453 mTransactions.push_back(std::move(t));
1454 }
1455
1456 doTransactionSyncBatch(lock, [&](std::unique_lock<std::mutex> const&) {
1457 XRPL_ASSERT(
1458 lock.owns_lock(),
1459 "ripple::NetworkOPsImp::processTransactionSet has lock");
1460 return std::any_of(
1461 mTransactions.begin(), mTransactions.end(), [](auto const& t) {
1462 return t.transaction->getApplying();
1463 });
1464 });
1465}
1466
1467void
1468NetworkOPsImp::transactionBatch()
1469{
1470 std::unique_lock<std::mutex> lock(mMutex);
1471
1472 if (mDispatchState == DispatchState::running)
1473 return;
1474
1475 while (mTransactions.size())
1476 {
1477 apply(lock);
1478 }
1479}
1480
1481void
1482NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1483{
1485 std::vector<TransactionStatus> transactions;
1486 mTransactions.swap(transactions);
1487 XRPL_ASSERT(
1488 !transactions.empty(),
1489 "ripple::NetworkOPsImp::apply : non-empty transactions");
1490 XRPL_ASSERT(
1491 mDispatchState != DispatchState::running,
1492 "ripple::NetworkOPsImp::apply : is not running");
1493
1494 mDispatchState = DispatchState::running;
1495
1496 batchLock.unlock();
1497
1498 {
1499 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1500 bool changed = false;
1501 {
1502 std::unique_lock ledgerLock{
1503 m_ledgerMaster.peekMutex(), std::defer_lock};
1504 std::lock(masterLock, ledgerLock);
1505
1506 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1507 for (TransactionStatus& e : transactions)
1508 {
1509 // we check before adding to the batch
1510 ApplyFlags flags = tapNONE;
1511 if (e.admin)
1512 flags |= tapUNLIMITED;
1513
1514 if (e.failType == FailHard::yes)
1515 flags |= tapFAIL_HARD;
1516
1517 auto const result = app_.getTxQ().apply(
1518 app_, view, e.transaction->getSTransaction(), flags, j);
1519 e.result = result.ter;
1520 e.applied = result.applied;
1521 changed = changed || result.applied;
1522 }
1523 return changed;
1524 });
1525 }
1526 if (changed)
1527 reportFeeChange();
1528
1529 std::optional<LedgerIndex> validatedLedgerIndex;
1530 if (auto const l = m_ledgerMaster.getValidatedLedger())
1531 validatedLedgerIndex = l->info().seq;
1532
1533 auto newOL = app_.openLedger().current();
1534 for (TransactionStatus& e : transactions)
1535 {
1536 e.transaction->clearSubmitResult();
1537
1538 if (e.applied)
1539 {
1540 pubProposedTransaction(
1541 newOL, e.transaction->getSTransaction(), e.result);
1542 e.transaction->setApplied();
1543 }
1544
1545 e.transaction->setResult(e.result);
1546
1547 if (isTemMalformed(e.result))
1548 app_.getHashRouter().setFlags(
1549 e.transaction->getID(), HashRouterFlags::BAD);
1550
1551#ifdef DEBUG
1552 if (e.result != tesSUCCESS)
1553 {
1554 std::string token, human;
1555
1556 if (transResultInfo(e.result, token, human))
1557 {
1558 JLOG(m_journal.info())
1559 << "TransactionResult: " << token << ": " << human;
1560 }
1561 }
1562#endif
1563
1564 bool addLocal = e.local;
1565
1566 if (e.result == tesSUCCESS)
1567 {
1568 JLOG(m_journal.debug())
1569 << "Transaction is now included in open ledger";
1570 e.transaction->setStatus(INCLUDED);
1571
1572 // Pop as many "reasonable" transactions for this account as
1573 // possible. "Reasonable" means they have sequential sequence
1574 // numbers, or use tickets.
1575 auto const& txCur = e.transaction->getSTransaction();
1576
1577 std::size_t count = 0;
1578 for (auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1579 txNext && count < maxPoppedTransactions;
1580 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1581 {
1582 if (!batchLock.owns_lock())
1583 batchLock.lock();
1584 std::string reason;
1585 auto const trans = sterilize(*txNext);
1586 auto t = std::make_shared<Transaction>(trans, reason, app_);
1587 if (t->getApplying())
1588 break;
1589 submit_held.emplace_back(t, false, false, FailHard::no);
1590 t->setApplying();
1591 }
1592 if (batchLock.owns_lock())
1593 batchLock.unlock();
1594 }
1595 else if (e.result == tefPAST_SEQ)
1596 {
1597 // duplicate or conflict
1598 JLOG(m_journal.info()) << "Transaction is obsolete";
1599 e.transaction->setStatus(OBSOLETE);
1600 }
1601 else if (e.result == terQUEUED)
1602 {
1603 JLOG(m_journal.debug())
1604 << "Transaction is likely to claim a"
1605 << " fee, but is queued until fee drops";
1606
1607 e.transaction->setStatus(HELD);
1608 // Add to held transactions, because it could get
1609 // kicked out of the queue, and this will try to
1610 // put it back.
1611 m_ledgerMaster.addHeldTransaction(e.transaction);
1612 e.transaction->setQueued();
1613 e.transaction->setKept();
1614 }
1615 else if (
1616 isTerRetry(e.result) || isTelLocal(e.result) ||
1617 isTefFailure(e.result))
1618 {
1619 if (e.failType != FailHard::yes)
1620 {
1621 auto const lastLedgerSeq =
1622 e.transaction->getSTransaction()->at(
1623 ~sfLastLedgerSequence);
1624 auto const ledgersLeft = lastLedgerSeq
1625 ? *lastLedgerSeq -
1626 m_ledgerMaster.getCurrentLedgerIndex()
1628 // If any of these conditions are met, the transaction can
1629 // be held:
1630 // 1. It was submitted locally. (Note that this flag is only
1631 // true on the initial submission.)
1632 // 2. The transaction has a LastLedgerSequence, and the
1633 // LastLedgerSequence is fewer than LocalTxs::holdLedgers
1634 // (5) ledgers into the future. (Remember that an
1635 // unseated optional compares as less than all seated
1636 // values, so it has to be checked explicitly first.)
1637 // 3. The HashRouterFlags::BAD flag is not set on the txID.
1638 // (setFlags
1639 // checks before setting. If the flag is set, it returns
1640 // false, which means it's been held once without one of
1641 // the other conditions, so don't hold it again. Time's
1642 // up!)
1643 //
1644 if (e.local ||
1645 (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1646 app_.getHashRouter().setFlags(
1647 e.transaction->getID(), HashRouterFlags::HELD))
1648 {
1649 // transaction should be held
1650 JLOG(m_journal.debug())
1651 << "Transaction should be held: " << e.result;
1652 e.transaction->setStatus(HELD);
1653 m_ledgerMaster.addHeldTransaction(e.transaction);
1654 e.transaction->setKept();
1655 }
1656 else
1657 JLOG(m_journal.debug())
1658 << "Not holding transaction "
1659 << e.transaction->getID() << ": "
1660 << (e.local ? "local" : "network") << ", "
1661 << "result: " << e.result << " ledgers left: "
1662 << (ledgersLeft ? to_string(*ledgersLeft)
1663 : "unspecified");
1664 }
1665 }
1666 else
1667 {
1668 JLOG(m_journal.debug())
1669 << "Status other than success " << e.result;
1670 e.transaction->setStatus(INVALID);
1671 }
1672
1673 auto const enforceFailHard =
1674 e.failType == FailHard::yes && !isTesSuccess(e.result);
1675
1676 if (addLocal && !enforceFailHard)
1677 {
1678 m_localTX->push_back(
1679 m_ledgerMaster.getCurrentLedgerIndex(),
1680 e.transaction->getSTransaction());
1681 e.transaction->setKept();
1682 }
1683
1684 if ((e.applied ||
1685 ((mMode != OperatingMode::FULL) &&
1686 (e.failType != FailHard::yes) && e.local) ||
1687 (e.result == terQUEUED)) &&
1688 !enforceFailHard)
1689 {
1690 auto const toSkip =
1691 app_.getHashRouter().shouldRelay(e.transaction->getID());
1692 if (auto const sttx = *(e.transaction->getSTransaction());
1693 toSkip &&
1694 // Skip relaying if it's an inner batch txn and batch
1695 // feature is enabled
1696 !(sttx.isFlag(tfInnerBatchTxn) &&
1697 newOL->rules().enabled(featureBatch)))
1698 {
1699 protocol::TMTransaction tx;
1700 Serializer s;
1701
1702 sttx.add(s);
1703 tx.set_rawtransaction(s.data(), s.size());
1704 tx.set_status(protocol::tsCURRENT);
1705 tx.set_receivetimestamp(
1706 app_.timeKeeper().now().time_since_epoch().count());
1707 tx.set_deferred(e.result == terQUEUED);
1708 // FIXME: This should be when we received it
1709 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1710 e.transaction->setBroadcast();
1711 }
1712 }
1713
1714 if (validatedLedgerIndex)
1715 {
1716 auto [fee, accountSeq, availableSeq] =
1717 app_.getTxQ().getTxRequiredFeeAndSeq(
1718 *newOL, e.transaction->getSTransaction());
1719 e.transaction->setCurrentLedgerState(
1720 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1721 }
1722 }
1723 }
1724
1725 batchLock.lock();
1726
1727 for (TransactionStatus& e : transactions)
1728 e.transaction->clearApplying();
1729
1730 if (!submit_held.empty())
1731 {
1732 if (mTransactions.empty())
1733 mTransactions.swap(submit_held);
1734 else
1735 {
1736 mTransactions.reserve(mTransactions.size() + submit_held.size());
1737 for (auto& e : submit_held)
1738 mTransactions.push_back(std::move(e));
1739 }
1740 }
1741
1742 mCond.notify_all();
1743
1744 mDispatchState = DispatchState::none;
1745}
1746
1747//
1748// Owner functions
1749//
1750
1752NetworkOPsImp::getOwnerInfo(
1754 AccountID const& account)
1755{
1756 Json::Value jvObjects(Json::objectValue);
1757 auto root = keylet::ownerDir(account);
1758 auto sleNode = lpLedger->read(keylet::page(root));
1759 if (sleNode)
1760 {
1761 std::uint64_t uNodeDir;
1762
1763 do
1764 {
1765 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1766 {
1767 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1768 XRPL_ASSERT(
1769 sleCur,
1770 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1771
1772 switch (sleCur->getType())
1773 {
1774 case ltOFFER:
1775 if (!jvObjects.isMember(jss::offers))
1776 jvObjects[jss::offers] =
1778
1779 jvObjects[jss::offers].append(
1780 sleCur->getJson(JsonOptions::none));
1781 break;
1782
1783 case ltRIPPLE_STATE:
1784 if (!jvObjects.isMember(jss::ripple_lines))
1785 {
1786 jvObjects[jss::ripple_lines] =
1788 }
1789
1790 jvObjects[jss::ripple_lines].append(
1791 sleCur->getJson(JsonOptions::none));
1792 break;
1793
1794 case ltACCOUNT_ROOT:
1795 case ltDIR_NODE:
1796 default:
1797 UNREACHABLE(
1798 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1799 "type");
1800 break;
1801 }
1802 }
1803
1804 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1805
1806 if (uNodeDir)
1807 {
1808 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1809 XRPL_ASSERT(
1810 sleNode,
1811 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1812 }
1813 } while (uNodeDir);
1814 }
1815
1816 return jvObjects;
1817}
1818
1819//
1820// Other
1821//
1822
1823inline bool
1824NetworkOPsImp::isBlocked()
1825{
1826 return isAmendmentBlocked() || isUNLBlocked();
1827}
1828
1829inline bool
1830NetworkOPsImp::isAmendmentBlocked()
1831{
1832 return amendmentBlocked_;
1833}
1834
1835void
1836NetworkOPsImp::setAmendmentBlocked()
1837{
1838 amendmentBlocked_ = true;
1839 setMode(OperatingMode::CONNECTED);
1840}
1841
1842inline bool
1843NetworkOPsImp::isAmendmentWarned()
1844{
1845 return !amendmentBlocked_ && amendmentWarned_;
1846}
1847
1848inline void
1849NetworkOPsImp::setAmendmentWarned()
1850{
1851 amendmentWarned_ = true;
1852}
1853
1854inline void
1855NetworkOPsImp::clearAmendmentWarned()
1856{
1857 amendmentWarned_ = false;
1858}
1859
1860inline bool
1861NetworkOPsImp::isUNLBlocked()
1862{
1863 return unlBlocked_;
1864}
1865
1866void
1867NetworkOPsImp::setUNLBlocked()
1868{
1869 unlBlocked_ = true;
1870 setMode(OperatingMode::CONNECTED);
1871}
1872
1873inline void
1874NetworkOPsImp::clearUNLBlocked()
1875{
1876 unlBlocked_ = false;
1877}
1878
1879bool
1880NetworkOPsImp::checkLastClosedLedger(
1881 Overlay::PeerSequence const& peerList,
1882 uint256& networkClosed)
1883{
1884 // Returns true if there's an *abnormal* ledger issue, normal changing in
1885 // TRACKING mode should return false. Do we have sufficient validations for
1886 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1887 // better ledger available? If so, we are either tracking or full.
1888
1889 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1890
1891 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1892
1893 if (!ourClosed)
1894 return false;
1895
1896 uint256 closedLedger = ourClosed->info().hash;
1897 uint256 prevClosedLedger = ourClosed->info().parentHash;
1898 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1899 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1900
1901 //-------------------------------------------------------------------------
1902 // Determine preferred last closed ledger
1903
1904 auto& validations = app_.getValidations();
1905 JLOG(m_journal.debug())
1906 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1907
1908 // Will rely on peer LCL if no trusted validations exist
1910 peerCounts[closedLedger] = 0;
1911 if (mMode >= OperatingMode::TRACKING)
1912 peerCounts[closedLedger]++;
1913
1914 for (auto& peer : peerList)
1915 {
1916 uint256 peerLedger = peer->getClosedLedgerHash();
1917
1918 if (peerLedger.isNonZero())
1919 ++peerCounts[peerLedger];
1920 }
1921
1922 for (auto const& it : peerCounts)
1923 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1924
1925 uint256 preferredLCL = validations.getPreferredLCL(
1926 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1927 m_ledgerMaster.getValidLedgerIndex(),
1928 peerCounts);
1929
1930 bool switchLedgers = preferredLCL != closedLedger;
1931 if (switchLedgers)
1932 closedLedger = preferredLCL;
1933 //-------------------------------------------------------------------------
1934 if (switchLedgers && (closedLedger == prevClosedLedger))
1935 {
1936 // don't switch to our own previous ledger
1937 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1938 networkClosed = ourClosed->info().hash;
1939 switchLedgers = false;
1940 }
1941 else
1942 networkClosed = closedLedger;
1943
1944 if (!switchLedgers)
1945 return false;
1946
1947 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1948
1949 if (!consensus)
1950 consensus = app_.getInboundLedgers().acquire(
1951 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1952
1953 if (consensus &&
1954 (!m_ledgerMaster.canBeCurrent(consensus) ||
1955 !m_ledgerMaster.isCompatible(
1956 *consensus, m_journal.debug(), "Not switching")))
1957 {
1958 // Don't switch to a ledger not on the validated chain
1959 // or with an invalid close time or sequence
1960 networkClosed = ourClosed->info().hash;
1961 return false;
1962 }
1963
1964 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1965 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1966 << getJson({*ourClosed, {}});
1967 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1968
1969 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1970 {
1971 setMode(OperatingMode::CONNECTED);
1972 }
1973
1974 if (consensus)
1975 {
1976 // FIXME: If this rewinds the ledger sequence, or has the same
1977 // sequence, we should update the status on any stored transactions
1978 // in the invalidated ledgers.
1979 switchLastClosedLedger(consensus);
1980 }
1981
1982 return true;
1983}
1984
1985void
1986NetworkOPsImp::switchLastClosedLedger(
1987 std::shared_ptr<Ledger const> const& newLCL)
1988{
1989 // set the newLCL as our last closed ledger -- this is abnormal code
1990 JLOG(m_journal.error())
1991 << "JUMP last closed ledger to " << newLCL->info().hash;
1992
1993 clearNeedNetworkLedger();
1994
1995 // Update fee computations.
1996 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1997
1998 // Caller must own master lock
1999 {
2000 // Apply tx in old open ledger to new
2001 // open ledger. Then apply local tx.
2002
2003 auto retries = m_localTX->getTxSet();
2004 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
2006 if (lastVal)
2007 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
2008 else
2009 rules.emplace(app_.config().features);
2010 app_.openLedger().accept(
2011 app_,
2012 *rules,
2013 newLCL,
2014 OrderedTxs({}),
2015 false,
2016 retries,
2017 tapNONE,
2018 "jump",
2019 [&](OpenView& view, beast::Journal j) {
2020 // Stuff the ledger with transactions from the queue.
2021 return app_.getTxQ().accept(app_, view);
2022 });
2023 }
2024
2025 m_ledgerMaster.switchLCL(newLCL);
2026
2027 protocol::TMStatusChange s;
2028 s.set_newevent(protocol::neSWITCHED_LEDGER);
2029 s.set_ledgerseq(newLCL->info().seq);
2030 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
2031 s.set_ledgerhashprevious(
2032 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
2033 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
2034
2035 app_.overlay().foreach(
2036 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
2037}
2038
2039bool
2040NetworkOPsImp::beginConsensus(
2041 uint256 const& networkClosed,
2043{
2044 XRPL_ASSERT(
2045 networkClosed.isNonZero(),
2046 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
2047
2048 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
2049
2050 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
2051 << " with LCL " << closingInfo.parentHash;
2052
2053 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
2054
2055 if (!prevLedger)
2056 {
2057 // this shouldn't happen unless we jump ledgers
2058 if (mMode == OperatingMode::FULL)
2059 {
2060 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
2061 setMode(OperatingMode::TRACKING);
2062 CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
2063 }
2064
2065 CLOG(clog) << "beginConsensus no previous ledger. ";
2066 return false;
2067 }
2068
2069 XRPL_ASSERT(
2070 prevLedger->info().hash == closingInfo.parentHash,
2071 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
2072 "parent");
2073 XRPL_ASSERT(
2074 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
2075 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
2076 "hash");
2077
2078 if (prevLedger->rules().enabled(featureNegativeUNL))
2079 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
2080 TrustChanges const changes = app_.validators().updateTrusted(
2081 app_.getValidations().getCurrentNodeIDs(),
2082 closingInfo.parentCloseTime,
2083 *this,
2084 app_.overlay(),
2085 app_.getHashRouter());
2086
2087 if (!changes.added.empty() || !changes.removed.empty())
2088 {
2089 app_.getValidations().trustChanged(changes.added, changes.removed);
2090 // Update the AmendmentTable so it tracks the current validators.
2091 app_.getAmendmentTable().trustChanged(
2092 app_.validators().getQuorumKeys().second);
2093 }
2094
2095 mConsensus.startRound(
2096 app_.timeKeeper().closeTime(),
2097 networkClosed,
2098 prevLedger,
2099 changes.removed,
2100 changes.added,
2101 clog);
2102
2103 ConsensusPhase const currPhase = mConsensus.phase();
2104 if (mLastConsensusPhase != currPhase)
2105 {
2106 reportConsensusStateChange(currPhase);
2107 mLastConsensusPhase = currPhase;
2108 }
2109
2110 JLOG(m_journal.debug()) << "Initiating consensus engine";
2111 return true;
2112}
2113
2114bool
2115NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
2116{
2117 auto const& peerKey = peerPos.publicKey();
2118 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
2119 {
2120 // Could indicate a operator misconfiguration where two nodes are
2121 // running with the same validator key configured, so this isn't fatal,
2122 // and it doesn't necessarily indicate peer misbehavior. But since this
2123 // is a trusted message, it could be a very big deal. Either way, we
2124 // don't want to relay the proposal. Note that the byzantine behavior
2125 // detection in handleNewValidation will notify other peers.
2126 //
2127 // Another, innocuous explanation is unusual message routing and delays,
2128 // causing this node to receive its own messages back.
2129 JLOG(m_journal.error())
2130 << "Received a proposal signed by MY KEY from a peer. This may "
2131 "indicate a misconfiguration where another node has the same "
2132 "validator key, or may be caused by unusual message routing and "
2133 "delays.";
2134 return false;
2135 }
2136
2137 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
2138}
2139
2140void
2141NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
2142{
2143 // We now have an additional transaction set
2144 // either created locally during the consensus process
2145 // or acquired from a peer
2146
2147 // Inform peers we have this set
2148 protocol::TMHaveTransactionSet msg;
2149 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2150 msg.set_status(protocol::tsHAVE);
2151 app_.overlay().foreach(
2152 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
2153
2154 // We acquired it because consensus asked us to
2155 if (fromAcquire)
2156 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
2157}
2158
2159void
2160NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
2161{
2162 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
2163
2164 for (auto const& it : app_.overlay().getActivePeers())
2165 {
2166 if (it && (it->getClosedLedgerHash() == deadLedger))
2167 {
2168 JLOG(m_journal.trace()) << "Killing obsolete peer status";
2169 it->cycleStatus();
2170 }
2171 }
2172
2173 uint256 networkClosed;
2174 bool ledgerChange =
2175 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
2176
2177 if (networkClosed.isZero())
2178 {
2179 CLOG(clog) << "endConsensus last closed ledger is zero. ";
2180 return;
2181 }
2182
2183 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
2184 // we must count how many nodes share our LCL, how many nodes disagree with
2185 // our LCL, and how many validations our LCL has. We also want to check
2186 // timing to make sure there shouldn't be a newer LCL. We need this
2187 // information to do the next three tests.
2188
2189 if (((mMode == OperatingMode::CONNECTED) ||
2190 (mMode == OperatingMode::SYNCING)) &&
2191 !ledgerChange)
2192 {
2193 // Count number of peers that agree with us and UNL nodes whose
2194 // validations we have for LCL. If the ledger is good enough, go to
2195 // TRACKING - TODO
2196 if (!needNetworkLedger_)
2197 setMode(OperatingMode::TRACKING);
2198 }
2199
2200 if (((mMode == OperatingMode::CONNECTED) ||
2201 (mMode == OperatingMode::TRACKING)) &&
2202 !ledgerChange)
2203 {
2204 // check if the ledger is good enough to go to FULL
2205 // Note: Do not go to FULL if we don't have the previous ledger
2206 // check if the ledger is bad enough to go to CONNECTE D -- TODO
2207 auto current = m_ledgerMaster.getCurrentLedger();
2208 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
2209 2 * current->info().closeTimeResolution))
2210 {
2211 setMode(OperatingMode::FULL);
2212 }
2213 }
2214
2215 beginConsensus(networkClosed, clog);
2216}
2217
2218void
2219NetworkOPsImp::consensusViewChange()
2220{
2221 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2222 {
2223 setMode(OperatingMode::CONNECTED);
2224 }
2225}
2226
2227void
2228NetworkOPsImp::pubManifest(Manifest const& mo)
2229{
2230 // VFALCO consider std::shared_mutex
2231 std::lock_guard sl(mSubLock);
2232
2233 if (!mStreamMaps[sManifests].empty())
2234 {
2236
2237 jvObj[jss::type] = "manifestReceived";
2238 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
2239 if (mo.signingKey)
2240 jvObj[jss::signing_key] =
2241 toBase58(TokenType::NodePublic, *mo.signingKey);
2242 jvObj[jss::seq] = Json::UInt(mo.sequence);
2243 if (auto sig = mo.getSignature())
2244 jvObj[jss::signature] = strHex(*sig);
2245 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
2246 if (!mo.domain.empty())
2247 jvObj[jss::domain] = mo.domain;
2248 jvObj[jss::manifest] = strHex(mo.serialized);
2249
2250 for (auto i = mStreamMaps[sManifests].begin();
2251 i != mStreamMaps[sManifests].end();)
2252 {
2253 if (auto p = i->second.lock())
2254 {
2255 p->send(jvObj, true);
2256 ++i;
2257 }
2258 else
2259 {
2260 i = mStreamMaps[sManifests].erase(i);
2261 }
2262 }
2263 }
2264}
2265
2266NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2267 XRPAmount fee,
2268 TxQ::Metrics&& escalationMetrics,
2269 LoadFeeTrack const& loadFeeTrack)
2270 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2271 , loadBaseServer{loadFeeTrack.getLoadBase()}
2272 , baseFee{fee}
2273 , em{std::move(escalationMetrics)}
2274{
2275}
2276
2277bool
2279 NetworkOPsImp::ServerFeeSummary const& b) const
2280{
2281 if (loadFactorServer != b.loadFactorServer ||
2282 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2283 em.has_value() != b.em.has_value())
2284 return true;
2285
2286 if (em && b.em)
2287 {
2288 return (
2289 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2290 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2291 em->referenceFeeLevel != b.em->referenceFeeLevel);
2292 }
2293
2294 return false;
2295}
2296
2297// Need to cap to uint64 to uint32 due to JSON limitations
2298static std::uint32_t
2300{
2302
2303 return std::min(max32, v);
2304};
2305
2306void
2308{
2309 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2310 // list into a local array while holding the lock then release
2311 // the lock and call send on everyone.
2312 //
2314
2315 if (!mStreamMaps[sServer].empty())
2316 {
2318
2320 app_.openLedger().current()->fees().base,
2322 app_.getFeeTrack()};
2323
2324 jvObj[jss::type] = "serverStatus";
2325 jvObj[jss::server_status] = strOperatingMode();
2326 jvObj[jss::load_base] = f.loadBaseServer;
2327 jvObj[jss::load_factor_server] = f.loadFactorServer;
2328 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2329
2330 if (f.em)
2331 {
2332 auto const loadFactor = std::max(
2333 safe_cast<std::uint64_t>(f.loadFactorServer),
2334 mulDiv(
2335 f.em->openLedgerFeeLevel,
2336 f.loadBaseServer,
2337 f.em->referenceFeeLevel)
2339
2340 jvObj[jss::load_factor] = trunc32(loadFactor);
2341 jvObj[jss::load_factor_fee_escalation] =
2342 f.em->openLedgerFeeLevel.jsonClipped();
2343 jvObj[jss::load_factor_fee_queue] =
2344 f.em->minProcessingFeeLevel.jsonClipped();
2345 jvObj[jss::load_factor_fee_reference] =
2346 f.em->referenceFeeLevel.jsonClipped();
2347 }
2348 else
2349 jvObj[jss::load_factor] = f.loadFactorServer;
2350
2351 mLastFeeSummary = f;
2352
2353 for (auto i = mStreamMaps[sServer].begin();
2354 i != mStreamMaps[sServer].end();)
2355 {
2356 InfoSub::pointer p = i->second.lock();
2357
2358 // VFALCO TODO research the possibility of using thread queues and
2359 // linearizing the deletion of subscribers with the
2360 // sending of JSON data.
2361 if (p)
2362 {
2363 p->send(jvObj, true);
2364 ++i;
2365 }
2366 else
2367 {
2368 i = mStreamMaps[sServer].erase(i);
2369 }
2370 }
2371 }
2372}
2373
2374void
2376{
2378
2379 auto& streamMap = mStreamMaps[sConsensusPhase];
2380 if (!streamMap.empty())
2381 {
2383 jvObj[jss::type] = "consensusPhase";
2384 jvObj[jss::consensus] = to_string(phase);
2385
2386 for (auto i = streamMap.begin(); i != streamMap.end();)
2387 {
2388 if (auto p = i->second.lock())
2389 {
2390 p->send(jvObj, true);
2391 ++i;
2392 }
2393 else
2394 {
2395 i = streamMap.erase(i);
2396 }
2397 }
2398 }
2399}
2400
2401void
2403{
2404 // VFALCO consider std::shared_mutex
2406
2407 if (!mStreamMaps[sValidations].empty())
2408 {
2410
2411 auto const signerPublic = val->getSignerPublic();
2412
2413 jvObj[jss::type] = "validationReceived";
2414 jvObj[jss::validation_public_key] =
2415 toBase58(TokenType::NodePublic, signerPublic);
2416 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2417 jvObj[jss::signature] = strHex(val->getSignature());
2418 jvObj[jss::full] = val->isFull();
2419 jvObj[jss::flags] = val->getFlags();
2420 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2421 jvObj[jss::data] = strHex(val->getSerializer().slice());
2422 jvObj[jss::network_id] = app_.config().NETWORK_ID;
2423
2424 if (auto version = (*val)[~sfServerVersion])
2425 jvObj[jss::server_version] = std::to_string(*version);
2426
2427 if (auto cookie = (*val)[~sfCookie])
2428 jvObj[jss::cookie] = std::to_string(*cookie);
2429
2430 if (auto hash = (*val)[~sfValidatedHash])
2431 jvObj[jss::validated_hash] = strHex(*hash);
2432
2433 auto const masterKey =
2434 app_.validatorManifests().getMasterKey(signerPublic);
2435
2436 if (masterKey != signerPublic)
2437 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2438
2439 // NOTE *seq is a number, but old API versions used string. We replace
2440 // number with a string using MultiApiJson near end of this function
2441 if (auto const seq = (*val)[~sfLedgerSequence])
2442 jvObj[jss::ledger_index] = *seq;
2443
2444 if (val->isFieldPresent(sfAmendments))
2445 {
2446 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2447 for (auto const& amendment : val->getFieldV256(sfAmendments))
2448 jvObj[jss::amendments].append(to_string(amendment));
2449 }
2450
2451 if (auto const closeTime = (*val)[~sfCloseTime])
2452 jvObj[jss::close_time] = *closeTime;
2453
2454 if (auto const loadFee = (*val)[~sfLoadFee])
2455 jvObj[jss::load_fee] = *loadFee;
2456
2457 if (auto const baseFee = val->at(~sfBaseFee))
2458 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2459
2460 if (auto const reserveBase = val->at(~sfReserveBase))
2461 jvObj[jss::reserve_base] = *reserveBase;
2462
2463 if (auto const reserveInc = val->at(~sfReserveIncrement))
2464 jvObj[jss::reserve_inc] = *reserveInc;
2465
2466 // (The ~ operator converts the Proxy to a std::optional, which
2467 // simplifies later operations)
2468 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2469 baseFeeXRP && baseFeeXRP->native())
2470 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2471
2472 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2473 reserveBaseXRP && reserveBaseXRP->native())
2474 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2475
2476 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2477 reserveIncXRP && reserveIncXRP->native())
2478 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2479
2480 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2481 // for consumers supporting different API versions
2482 MultiApiJson multiObj{jvObj};
2483 multiObj.visit(
2484 RPC::apiVersion<1>, //
2485 [](Json::Value& jvTx) {
2486 // Type conversion for older API versions to string
2487 if (jvTx.isMember(jss::ledger_index))
2488 {
2489 jvTx[jss::ledger_index] =
2490 std::to_string(jvTx[jss::ledger_index].asUInt());
2491 }
2492 });
2493
2494 for (auto i = mStreamMaps[sValidations].begin();
2495 i != mStreamMaps[sValidations].end();)
2496 {
2497 if (auto p = i->second.lock())
2498 {
2499 multiObj.visit(
2500 p->getApiVersion(), //
2501 [&](Json::Value const& jv) { p->send(jv, true); });
2502 ++i;
2503 }
2504 else
2505 {
2506 i = mStreamMaps[sValidations].erase(i);
2507 }
2508 }
2509 }
2510}
2511
2512void
2514{
2516
2517 if (!mStreamMaps[sPeerStatus].empty())
2518 {
2519 Json::Value jvObj(func());
2520
2521 jvObj[jss::type] = "peerStatusChange";
2522
2523 for (auto i = mStreamMaps[sPeerStatus].begin();
2524 i != mStreamMaps[sPeerStatus].end();)
2525 {
2526 InfoSub::pointer p = i->second.lock();
2527
2528 if (p)
2529 {
2530 p->send(jvObj, true);
2531 ++i;
2532 }
2533 else
2534 {
2535 i = mStreamMaps[sPeerStatus].erase(i);
2536 }
2537 }
2538 }
2539}
2540
2541void
2543{
2544 using namespace std::chrono_literals;
2545 if (om == OperatingMode::CONNECTED)
2546 {
2549 }
2550 else if (om == OperatingMode::SYNCING)
2551 {
2554 }
2555
2556 if ((om > OperatingMode::CONNECTED) && isBlocked())
2558
2559 if (mMode == om)
2560 return;
2561
2562 mMode = om;
2563
2564 accounting_.mode(om);
2565
2566 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2567 pubServer();
2568}
2569
2570bool
2573 std::string const& source)
2574{
2575 JLOG(m_journal.trace())
2576 << "recvValidation " << val->getLedgerHash() << " from " << source;
2577
2579 BypassAccept bypassAccept = BypassAccept::no;
2580 try
2581 {
2582 if (pendingValidations_.contains(val->getLedgerHash()))
2583 bypassAccept = BypassAccept::yes;
2584 else
2585 pendingValidations_.insert(val->getLedgerHash());
2586 scope_unlock unlock(lock);
2587 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2588 }
2589 catch (std::exception const& e)
2590 {
2591 JLOG(m_journal.warn())
2592 << "Exception thrown for handling new validation "
2593 << val->getLedgerHash() << ": " << e.what();
2594 }
2595 catch (...)
2596 {
2597 JLOG(m_journal.warn())
2598 << "Unknown exception thrown for handling new validation "
2599 << val->getLedgerHash();
2600 }
2601 if (bypassAccept == BypassAccept::no)
2602 {
2603 pendingValidations_.erase(val->getLedgerHash());
2604 }
2605 lock.unlock();
2606
2607 pubValidation(val);
2608
2609 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2611 ss << "VALIDATION: " << val->render() << " master_key: ";
2612 auto master = app_.validators().getTrustedKey(val->getSignerPublic());
2613 if (master)
2614 {
2615 ss << toBase58(TokenType::NodePublic, *master);
2616 }
2617 else
2618 {
2619 ss << "none";
2620 }
2621 return ss.str();
2622 }();
2623
2624 // We will always relay trusted validations; if configured, we will
2625 // also relay all untrusted validations.
2626 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2627}
2628
2631{
2632 return mConsensus.getJson(true);
2633}
2634
2636NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2637{
2639
2640 // System-level warnings
2641 {
2642 Json::Value warnings{Json::arrayValue};
2643 if (isAmendmentBlocked())
2644 {
2645 Json::Value& w = warnings.append(Json::objectValue);
2646 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2647 w[jss::message] =
2648 "This server is amendment blocked, and must be updated to be "
2649 "able to stay in sync with the network.";
2650 }
2651 if (isUNLBlocked())
2652 {
2653 Json::Value& w = warnings.append(Json::objectValue);
2654 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2655 w[jss::message] =
2656 "This server has an expired validator list. validators.txt "
2657 "may be incorrectly configured or some [validator_list_sites] "
2658 "may be unreachable.";
2659 }
2660 if (admin && isAmendmentWarned())
2661 {
2662 Json::Value& w = warnings.append(Json::objectValue);
2663 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2664 w[jss::message] =
2665 "One or more unsupported amendments have reached majority. "
2666 "Upgrade to the latest version before they are activated "
2667 "to avoid being amendment blocked.";
2668 if (auto const expected =
2670 {
2671 auto& d = w[jss::details] = Json::objectValue;
2672 d[jss::expected_date] = expected->time_since_epoch().count();
2673 d[jss::expected_date_UTC] = to_string(*expected);
2674 }
2675 }
2676
2677 if (warnings.size())
2678 info[jss::warnings] = std::move(warnings);
2679 }
2680
2681 // hostid: unique string describing the machine
2682 if (human)
2683 info[jss::hostid] = getHostId(admin);
2684
2685 // domain: if configured with a domain, report it:
2686 if (!app_.config().SERVER_DOMAIN.empty())
2687 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2688
2689 info[jss::build_version] = BuildInfo::getVersionString();
2690
2691 info[jss::server_state] = strOperatingMode(admin);
2692
2693 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2695
2697 info[jss::network_ledger] = "waiting";
2698
2699 info[jss::validation_quorum] =
2700 static_cast<Json::UInt>(app_.validators().quorum());
2701
2702 if (admin)
2703 {
2704 switch (app_.config().NODE_SIZE)
2705 {
2706 case 0:
2707 info[jss::node_size] = "tiny";
2708 break;
2709 case 1:
2710 info[jss::node_size] = "small";
2711 break;
2712 case 2:
2713 info[jss::node_size] = "medium";
2714 break;
2715 case 3:
2716 info[jss::node_size] = "large";
2717 break;
2718 case 4:
2719 info[jss::node_size] = "huge";
2720 break;
2721 }
2722
2723 auto when = app_.validators().expires();
2724
2725 if (!human)
2726 {
2727 if (when)
2728 info[jss::validator_list_expires] =
2729 safe_cast<Json::UInt>(when->time_since_epoch().count());
2730 else
2731 info[jss::validator_list_expires] = 0;
2732 }
2733 else
2734 {
2735 auto& x = (info[jss::validator_list] = Json::objectValue);
2736
2737 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2738
2739 if (when)
2740 {
2741 if (*when == TimeKeeper::time_point::max())
2742 {
2743 x[jss::expiration] = "never";
2744 x[jss::status] = "active";
2745 }
2746 else
2747 {
2748 x[jss::expiration] = to_string(*when);
2749
2750 if (*when > app_.timeKeeper().now())
2751 x[jss::status] = "active";
2752 else
2753 x[jss::status] = "expired";
2754 }
2755 }
2756 else
2757 {
2758 x[jss::status] = "unknown";
2759 x[jss::expiration] = "unknown";
2760 }
2761 }
2762
2763#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2764 {
2765 auto& x = (info[jss::git] = Json::objectValue);
2766#ifdef GIT_COMMIT_HASH
2767 x[jss::hash] = GIT_COMMIT_HASH;
2768#endif
2769#ifdef GIT_BRANCH
2770 x[jss::branch] = GIT_BRANCH;
2771#endif
2772 }
2773#endif
2774 }
2775 info[jss::io_latency_ms] =
2776 static_cast<Json::UInt>(app_.getIOLatency().count());
2777
2778 if (admin)
2779 {
2780 if (auto const localPubKey = app_.validators().localPublicKey();
2781 localPubKey && app_.getValidationPublicKey())
2782 {
2783 info[jss::pubkey_validator] =
2784 toBase58(TokenType::NodePublic, localPubKey.value());
2785 }
2786 else
2787 {
2788 info[jss::pubkey_validator] = "none";
2789 }
2790 }
2791
2792 if (counters)
2793 {
2794 info[jss::counters] = app_.getPerfLog().countersJson();
2795
2796 Json::Value nodestore(Json::objectValue);
2797 app_.getNodeStore().getCountsJson(nodestore);
2798 info[jss::counters][jss::nodestore] = nodestore;
2799 info[jss::current_activities] = app_.getPerfLog().currentJson();
2800 }
2801
2802 info[jss::pubkey_node] =
2804
2805 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2806
2808 info[jss::amendment_blocked] = true;
2809
2810 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2811
2812 if (fp != 0)
2813 info[jss::fetch_pack] = Json::UInt(fp);
2814
2815 info[jss::peers] = Json::UInt(app_.overlay().size());
2816
2817 Json::Value lastClose = Json::objectValue;
2818 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2819
2820 if (human)
2821 {
2822 lastClose[jss::converge_time_s] =
2824 }
2825 else
2826 {
2827 lastClose[jss::converge_time] =
2829 }
2830
2831 info[jss::last_close] = lastClose;
2832
2833 // info[jss::consensus] = mConsensus.getJson();
2834
2835 if (admin)
2836 info[jss::load] = m_job_queue.getJson();
2837
2838 if (auto const netid = app_.overlay().networkID())
2839 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2840
2841 auto const escalationMetrics =
2843
2844 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2845 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2846 /* Scale the escalated fee level to unitless "load factor".
2847 In practice, this just strips the units, but it will continue
2848 to work correctly if either base value ever changes. */
2849 auto const loadFactorFeeEscalation =
2850 mulDiv(
2851 escalationMetrics.openLedgerFeeLevel,
2852 loadBaseServer,
2853 escalationMetrics.referenceFeeLevel)
2855
2856 auto const loadFactor = std::max(
2857 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2858
2859 if (!human)
2860 {
2861 info[jss::load_base] = loadBaseServer;
2862 info[jss::load_factor] = trunc32(loadFactor);
2863 info[jss::load_factor_server] = loadFactorServer;
2864
2865 /* Json::Value doesn't support uint64, so clamp to max
2866 uint32 value. This is mostly theoretical, since there
2867 probably isn't enough extant XRP to drive the factor
2868 that high.
2869 */
2870 info[jss::load_factor_fee_escalation] =
2871 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2872 info[jss::load_factor_fee_queue] =
2873 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2874 info[jss::load_factor_fee_reference] =
2875 escalationMetrics.referenceFeeLevel.jsonClipped();
2876 }
2877 else
2878 {
2879 info[jss::load_factor] =
2880 static_cast<double>(loadFactor) / loadBaseServer;
2881
2882 if (loadFactorServer != loadFactor)
2883 info[jss::load_factor_server] =
2884 static_cast<double>(loadFactorServer) / loadBaseServer;
2885
2886 if (admin)
2887 {
2889 if (fee != loadBaseServer)
2890 info[jss::load_factor_local] =
2891 static_cast<double>(fee) / loadBaseServer;
2892 fee = app_.getFeeTrack().getRemoteFee();
2893 if (fee != loadBaseServer)
2894 info[jss::load_factor_net] =
2895 static_cast<double>(fee) / loadBaseServer;
2896 fee = app_.getFeeTrack().getClusterFee();
2897 if (fee != loadBaseServer)
2898 info[jss::load_factor_cluster] =
2899 static_cast<double>(fee) / loadBaseServer;
2900 }
2901 if (escalationMetrics.openLedgerFeeLevel !=
2902 escalationMetrics.referenceFeeLevel &&
2903 (admin || loadFactorFeeEscalation != loadFactor))
2904 info[jss::load_factor_fee_escalation] =
2905 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2906 escalationMetrics.referenceFeeLevel);
2907 if (escalationMetrics.minProcessingFeeLevel !=
2908 escalationMetrics.referenceFeeLevel)
2909 info[jss::load_factor_fee_queue] =
2910 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2911 escalationMetrics.referenceFeeLevel);
2912 }
2913
2914 bool valid = false;
2915 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2916
2917 if (lpClosed)
2918 valid = true;
2919 else
2920 lpClosed = m_ledgerMaster.getClosedLedger();
2921
2922 if (lpClosed)
2923 {
2924 XRPAmount const baseFee = lpClosed->fees().base;
2926 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2927 l[jss::hash] = to_string(lpClosed->info().hash);
2928
2929 if (!human)
2930 {
2931 l[jss::base_fee] = baseFee.jsonClipped();
2932 l[jss::reserve_base] =
2933 lpClosed->fees().accountReserve(0).jsonClipped();
2934 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2935 l[jss::close_time] = Json::Value::UInt(
2936 lpClosed->info().closeTime.time_since_epoch().count());
2937 }
2938 else
2939 {
2940 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2941 l[jss::reserve_base_xrp] =
2942 lpClosed->fees().accountReserve(0).decimalXRP();
2943 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2944
2945 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2946 std::abs(closeOffset.count()) >= 60)
2947 l[jss::close_time_offset] =
2948 static_cast<std::uint32_t>(closeOffset.count());
2949
2950 constexpr std::chrono::seconds highAgeThreshold{1000000};
2952 {
2953 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2954 l[jss::age] =
2955 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2956 }
2957 else
2958 {
2959 auto lCloseTime = lpClosed->info().closeTime;
2960 auto closeTime = app_.timeKeeper().closeTime();
2961 if (lCloseTime <= closeTime)
2962 {
2963 using namespace std::chrono_literals;
2964 auto age = closeTime - lCloseTime;
2965 l[jss::age] =
2966 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2967 }
2968 }
2969 }
2970
2971 if (valid)
2972 info[jss::validated_ledger] = l;
2973 else
2974 info[jss::closed_ledger] = l;
2975
2976 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2977 if (!lpPublished)
2978 info[jss::published_ledger] = "none";
2979 else if (lpPublished->info().seq != lpClosed->info().seq)
2980 info[jss::published_ledger] = lpPublished->info().seq;
2981 }
2982
2983 accounting_.json(info);
2984 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2985 info[jss::jq_trans_overflow] =
2987 info[jss::peer_disconnects] =
2989 info[jss::peer_disconnects_resources] =
2991
2992 // This array must be sorted in increasing order.
2993 static constexpr std::array<std::string_view, 7> protocols{
2994 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2995 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2996 {
2998 for (auto const& port : app_.getServerHandler().setup().ports)
2999 {
3000 // Don't publish admin ports for non-admin users
3001 if (!admin &&
3002 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
3003 port.admin_user.empty() && port.admin_password.empty()))
3004 continue;
3007 std::begin(port.protocol),
3008 std::end(port.protocol),
3009 std::begin(protocols),
3010 std::end(protocols),
3011 std::back_inserter(proto));
3012 if (!proto.empty())
3013 {
3014 auto& jv = ports.append(Json::Value(Json::objectValue));
3015 jv[jss::port] = std::to_string(port.port);
3016 jv[jss::protocol] = Json::Value{Json::arrayValue};
3017 for (auto const& p : proto)
3018 jv[jss::protocol].append(p);
3019 }
3020 }
3021
3022 if (app_.config().exists(SECTION_PORT_GRPC))
3023 {
3024 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
3025 auto const optPort = grpcSection.get("port");
3026 if (optPort && grpcSection.get("ip"))
3027 {
3028 auto& jv = ports.append(Json::Value(Json::objectValue));
3029 jv[jss::port] = *optPort;
3030 jv[jss::protocol] = Json::Value{Json::arrayValue};
3031 jv[jss::protocol].append("grpc");
3032 }
3033 }
3034 info[jss::ports] = std::move(ports);
3035 }
3036
3037 return info;
3038}
3039
3040void
3045
3051
3052void
3054 std::shared_ptr<ReadView const> const& ledger,
3055 std::shared_ptr<STTx const> const& transaction,
3056 TER result)
3057{
3058 // never publish an inner txn inside a batch txn
3059 if (transaction->isFlag(tfInnerBatchTxn) &&
3060 ledger->rules().enabled(featureBatch))
3061 return;
3062
3063 MultiApiJson jvObj =
3064 transJson(transaction, result, false, ledger, std::nullopt);
3065
3066 {
3068
3069 auto it = mStreamMaps[sRTTransactions].begin();
3070 while (it != mStreamMaps[sRTTransactions].end())
3071 {
3072 InfoSub::pointer p = it->second.lock();
3073
3074 if (p)
3075 {
3076 jvObj.visit(
3077 p->getApiVersion(), //
3078 [&](Json::Value const& jv) { p->send(jv, true); });
3079 ++it;
3080 }
3081 else
3082 {
3083 it = mStreamMaps[sRTTransactions].erase(it);
3084 }
3085 }
3086 }
3087
3088 pubProposedAccountTransaction(ledger, transaction, result);
3089}
3090
3091void
3093{
3094 // Ledgers are published only when they acquire sufficient validations
3095 // Holes are filled across connection loss or other catastrophe
3096
3098 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
3099 if (!alpAccepted)
3100 {
3101 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
3102 app_.getAcceptedLedgerCache().canonicalize_replace_client(
3103 lpAccepted->info().hash, alpAccepted);
3104 }
3105
3106 XRPL_ASSERT(
3107 alpAccepted->getLedger().get() == lpAccepted.get(),
3108 "ripple::NetworkOPsImp::pubLedger : accepted input");
3109
3110 {
3111 JLOG(m_journal.debug())
3112 << "Publishing ledger " << lpAccepted->info().seq << " "
3113 << lpAccepted->info().hash;
3114
3116
3117 if (!mStreamMaps[sLedger].empty())
3118 {
3120
3121 jvObj[jss::type] = "ledgerClosed";
3122 jvObj[jss::ledger_index] = lpAccepted->info().seq;
3123 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
3124 jvObj[jss::ledger_time] = Json::Value::UInt(
3125 lpAccepted->info().closeTime.time_since_epoch().count());
3126
3127 jvObj[jss::network_id] = app_.config().NETWORK_ID;
3128
3129 if (!lpAccepted->rules().enabled(featureXRPFees))
3130 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3131 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
3132 jvObj[jss::reserve_base] =
3133 lpAccepted->fees().accountReserve(0).jsonClipped();
3134 jvObj[jss::reserve_inc] =
3135 lpAccepted->fees().increment.jsonClipped();
3136
3137 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
3138
3140 {
3141 jvObj[jss::validated_ledgers] =
3143 }
3144
3145 auto it = mStreamMaps[sLedger].begin();
3146 while (it != mStreamMaps[sLedger].end())
3147 {
3148 InfoSub::pointer p = it->second.lock();
3149 if (p)
3150 {
3151 p->send(jvObj, true);
3152 ++it;
3153 }
3154 else
3155 it = mStreamMaps[sLedger].erase(it);
3156 }
3157 }
3158
3159 if (!mStreamMaps[sBookChanges].empty())
3160 {
3161 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
3162
3163 auto it = mStreamMaps[sBookChanges].begin();
3164 while (it != mStreamMaps[sBookChanges].end())
3165 {
3166 InfoSub::pointer p = it->second.lock();
3167 if (p)
3168 {
3169 p->send(jvObj, true);
3170 ++it;
3171 }
3172 else
3173 it = mStreamMaps[sBookChanges].erase(it);
3174 }
3175 }
3176
3177 {
3178 static bool firstTime = true;
3179 if (firstTime)
3180 {
3181 // First validated ledger, start delayed SubAccountHistory
3182 firstTime = false;
3183 for (auto& outer : mSubAccountHistory)
3184 {
3185 for (auto& inner : outer.second)
3186 {
3187 auto& subInfo = inner.second;
3188 if (subInfo.index_->separationLedgerSeq_ == 0)
3189 {
3191 alpAccepted->getLedger(), subInfo);
3192 }
3193 }
3194 }
3195 }
3196 }
3197 }
3198
3199 // Don't lock since pubAcceptedTransaction is locking.
3200 for (auto const& accTx : *alpAccepted)
3201 {
3202 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
3204 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3205 }
3206}
3207
3208void
3210{
3212 app_.openLedger().current()->fees().base,
3214 app_.getFeeTrack()};
3215
3216 // only schedule the job if something has changed
3217 if (f != mLastFeeSummary)
3218 {
3220 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
3221 pubServer();
3222 });
3223 }
3224}
3225
3226void
3228{
3231 "reportConsensusStateChange->pubConsensus",
3232 [this, phase]() { pubConsensus(phase); });
3233}
3234
3235inline void
3237{
3238 m_localTX->sweep(view);
3239}
3240inline std::size_t
3242{
3243 return m_localTX->size();
3244}
3245
3246// This routine should only be used to publish accepted or validated
3247// transactions.
3250 std::shared_ptr<STTx const> const& transaction,
3251 TER result,
3252 bool validated,
3253 std::shared_ptr<ReadView const> const& ledger,
3255{
3257 std::string sToken;
3258 std::string sHuman;
3259
3260 transResultInfo(result, sToken, sHuman);
3261
3262 jvObj[jss::type] = "transaction";
3263 // NOTE jvObj is not a finished object for either API version. After
3264 // it's populated, we need to finish it for a specific API version. This is
3265 // done in a loop, near the end of this function.
3266 jvObj[jss::transaction] =
3267 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3268
3269 if (meta)
3270 {
3271 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3273 jvObj[jss::meta], *ledger, transaction, meta->get());
3274 RPC::insertNFTSyntheticInJson(jvObj, transaction, meta->get());
3276 jvObj[jss::meta], transaction, meta->get());
3277 }
3278
3279 // add CTID where the needed data for it exists
3280 if (auto const& lookup = ledger->txRead(transaction->getTransactionID());
3281 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3282 {
3283 uint32_t const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3284 uint32_t netID = app_.config().NETWORK_ID;
3285 if (transaction->isFieldPresent(sfNetworkID))
3286 netID = transaction->getFieldU32(sfNetworkID);
3287
3289 RPC::encodeCTID(ledger->info().seq, txnSeq, netID);
3290 ctid)
3291 jvObj[jss::ctid] = *ctid;
3292 }
3293 if (!ledger->open())
3294 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3295
3296 if (validated)
3297 {
3298 jvObj[jss::ledger_index] = ledger->info().seq;
3299 jvObj[jss::transaction][jss::date] =
3300 ledger->info().closeTime.time_since_epoch().count();
3301 jvObj[jss::validated] = true;
3302 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3303
3304 // WRITEME: Put the account next seq here
3305 }
3306 else
3307 {
3308 jvObj[jss::validated] = false;
3309 jvObj[jss::ledger_current_index] = ledger->info().seq;
3310 }
3311
3312 jvObj[jss::status] = validated ? "closed" : "proposed";
3313 jvObj[jss::engine_result] = sToken;
3314 jvObj[jss::engine_result_code] = result;
3315 jvObj[jss::engine_result_message] = sHuman;
3316
3317 if (transaction->getTxnType() == ttOFFER_CREATE)
3318 {
3319 auto const account = transaction->getAccountID(sfAccount);
3320 auto const amount = transaction->getFieldAmount(sfTakerGets);
3321
3322 // If the offer create is not self funded then add the owner balance
3323 if (account != amount.issue().account)
3324 {
3325 auto const ownerFunds = accountFunds(
3326 *ledger,
3327 account,
3328 amount,
3330 app_.journal("View"));
3331 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3332 }
3333 }
3334
3335 std::string const hash = to_string(transaction->getTransactionID());
3336 MultiApiJson multiObj{jvObj};
3338 multiObj.visit(), //
3339 [&]<unsigned Version>(
3341 RPC::insertDeliverMax(
3342 jvTx[jss::transaction], transaction->getTxnType(), Version);
3343
3344 if constexpr (Version > 1)
3345 {
3346 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3347 jvTx[jss::hash] = hash;
3348 }
3349 else
3350 {
3351 jvTx[jss::transaction][jss::hash] = hash;
3352 }
3353 });
3354
3355 return multiObj;
3356}
3357
3358void
3360 std::shared_ptr<ReadView const> const& ledger,
3361 AcceptedLedgerTx const& transaction,
3362 bool last)
3363{
3364 auto const& stTxn = transaction.getTxn();
3365
3366 // Create two different Json objects, for different API versions
3367 auto const metaRef = std::ref(transaction.getMeta());
3368 auto const trResult = transaction.getResult();
3369 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3370
3371 {
3373
3374 auto it = mStreamMaps[sTransactions].begin();
3375 while (it != mStreamMaps[sTransactions].end())
3376 {
3377 InfoSub::pointer p = it->second.lock();
3378
3379 if (p)
3380 {
3381 jvObj.visit(
3382 p->getApiVersion(), //
3383 [&](Json::Value const& jv) { p->send(jv, true); });
3384 ++it;
3385 }
3386 else
3387 it = mStreamMaps[sTransactions].erase(it);
3388 }
3389
3390 it = mStreamMaps[sRTTransactions].begin();
3391
3392 while (it != mStreamMaps[sRTTransactions].end())
3393 {
3394 InfoSub::pointer p = it->second.lock();
3395
3396 if (p)
3397 {
3398 jvObj.visit(
3399 p->getApiVersion(), //
3400 [&](Json::Value const& jv) { p->send(jv, true); });
3401 ++it;
3402 }
3403 else
3404 it = mStreamMaps[sRTTransactions].erase(it);
3405 }
3406 }
3407
3408 if (transaction.getResult() == tesSUCCESS)
3409 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3410
3411 pubAccountTransaction(ledger, transaction, last);
3412}
3413
3414void
3416 std::shared_ptr<ReadView const> const& ledger,
3417 AcceptedLedgerTx const& transaction,
3418 bool last)
3419{
3421 int iProposed = 0;
3422 int iAccepted = 0;
3423
3424 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3425 auto const currLedgerSeq = ledger->seq();
3426 {
3428
3429 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3431 {
3432 for (auto const& affectedAccount : transaction.getAffected())
3433 {
3434 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3435 simiIt != mSubRTAccount.end())
3436 {
3437 auto it = simiIt->second.begin();
3438
3439 while (it != simiIt->second.end())
3440 {
3441 InfoSub::pointer p = it->second.lock();
3442
3443 if (p)
3444 {
3445 notify.insert(p);
3446 ++it;
3447 ++iProposed;
3448 }
3449 else
3450 it = simiIt->second.erase(it);
3451 }
3452 }
3453
3454 if (auto simiIt = mSubAccount.find(affectedAccount);
3455 simiIt != mSubAccount.end())
3456 {
3457 auto it = simiIt->second.begin();
3458 while (it != simiIt->second.end())
3459 {
3460 InfoSub::pointer p = it->second.lock();
3461
3462 if (p)
3463 {
3464 notify.insert(p);
3465 ++it;
3466 ++iAccepted;
3467 }
3468 else
3469 it = simiIt->second.erase(it);
3470 }
3471 }
3472
3473 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3474 histoIt != mSubAccountHistory.end())
3475 {
3476 auto& subs = histoIt->second;
3477 auto it = subs.begin();
3478 while (it != subs.end())
3479 {
3480 SubAccountHistoryInfoWeak const& info = it->second;
3481 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3482 {
3483 ++it;
3484 continue;
3485 }
3486
3487 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3488 {
3489 accountHistoryNotify.emplace_back(
3490 SubAccountHistoryInfo{isSptr, info.index_});
3491 ++it;
3492 }
3493 else
3494 {
3495 it = subs.erase(it);
3496 }
3497 }
3498 if (subs.empty())
3499 mSubAccountHistory.erase(histoIt);
3500 }
3501 }
3502 }
3503 }
3504
3505 JLOG(m_journal.trace())
3506 << "pubAccountTransaction: "
3507 << "proposed=" << iProposed << ", accepted=" << iAccepted;
3508
3509 if (!notify.empty() || !accountHistoryNotify.empty())
3510 {
3511 auto const& stTxn = transaction.getTxn();
3512
3513 // Create two different Json objects, for different API versions
3514 auto const metaRef = std::ref(transaction.getMeta());
3515 auto const trResult = transaction.getResult();
3516 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3517
3518 for (InfoSub::ref isrListener : notify)
3519 {
3520 jvObj.visit(
3521 isrListener->getApiVersion(), //
3522 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3523 }
3524
3525 if (last)
3526 jvObj.set(jss::account_history_boundary, true);
3527
3528 XRPL_ASSERT(
3529 jvObj.isMember(jss::account_history_tx_stream) ==
3531 "ripple::NetworkOPsImp::pubAccountTransaction : "
3532 "account_history_tx_stream not set");
3533 for (auto& info : accountHistoryNotify)
3534 {
3535 auto& index = info.index_;
3536 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3537 jvObj.set(jss::account_history_tx_first, true);
3538
3539 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3540
3541 jvObj.visit(
3542 info.sink_->getApiVersion(), //
3543 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3544 }
3545 }
3546}
3547
3548void
3550 std::shared_ptr<ReadView const> const& ledger,
3552 TER result)
3553{
3555 int iProposed = 0;
3556
3557 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3558
3559 {
3561
3562 if (mSubRTAccount.empty())
3563 return;
3564
3565 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3567 {
3568 for (auto const& affectedAccount : tx->getMentionedAccounts())
3569 {
3570 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3571 simiIt != mSubRTAccount.end())
3572 {
3573 auto it = simiIt->second.begin();
3574
3575 while (it != simiIt->second.end())
3576 {
3577 InfoSub::pointer p = it->second.lock();
3578
3579 if (p)
3580 {
3581 notify.insert(p);
3582 ++it;
3583 ++iProposed;
3584 }
3585 else
3586 it = simiIt->second.erase(it);
3587 }
3588 }
3589 }
3590 }
3591 }
3592
3593 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3594
3595 if (!notify.empty() || !accountHistoryNotify.empty())
3596 {
3597 // Create two different Json objects, for different API versions
3598 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3599
3600 for (InfoSub::ref isrListener : notify)
3601 jvObj.visit(
3602 isrListener->getApiVersion(), //
3603 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3604
3605 XRPL_ASSERT(
3606 jvObj.isMember(jss::account_history_tx_stream) ==
3608 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3609 "account_history_tx_stream not set");
3610 for (auto& info : accountHistoryNotify)
3611 {
3612 auto& index = info.index_;
3613 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3614 jvObj.set(jss::account_history_tx_first, true);
3615 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3616 jvObj.visit(
3617 info.sink_->getApiVersion(), //
3618 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3619 }
3620 }
3621}
3622
3623//
3624// Monitoring
3625//
3626
3627void
3629 InfoSub::ref isrListener,
3630 hash_set<AccountID> const& vnaAccountIDs,
3631 bool rt)
3632{
3633 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3634
3635 for (auto const& naAccountID : vnaAccountIDs)
3636 {
3637 JLOG(m_journal.trace())
3638 << "subAccount: account: " << toBase58(naAccountID);
3639
3640 isrListener->insertSubAccountInfo(naAccountID, rt);
3641 }
3642
3644
3645 for (auto const& naAccountID : vnaAccountIDs)
3646 {
3647 auto simIterator = subMap.find(naAccountID);
3648 if (simIterator == subMap.end())
3649 {
3650 // Not found, note that account has a new single listner.
3651 SubMapType usisElement;
3652 usisElement[isrListener->getSeq()] = isrListener;
3653 // VFALCO NOTE This is making a needless copy of naAccountID
3654 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3655 }
3656 else
3657 {
3658 // Found, note that the account has another listener.
3659 simIterator->second[isrListener->getSeq()] = isrListener;
3660 }
3661 }
3662}
3663
3664void
3666 InfoSub::ref isrListener,
3667 hash_set<AccountID> const& vnaAccountIDs,
3668 bool rt)
3669{
3670 for (auto const& naAccountID : vnaAccountIDs)
3671 {
3672 // Remove from the InfoSub
3673 isrListener->deleteSubAccountInfo(naAccountID, rt);
3674 }
3675
3676 // Remove from the server
3677 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3678}
3679
3680void
3682 std::uint64_t uSeq,
3683 hash_set<AccountID> const& vnaAccountIDs,
3684 bool rt)
3685{
3687
3688 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3689
3690 for (auto const& naAccountID : vnaAccountIDs)
3691 {
3692 auto simIterator = subMap.find(naAccountID);
3693
3694 if (simIterator != subMap.end())
3695 {
3696 // Found
3697 simIterator->second.erase(uSeq);
3698
3699 if (simIterator->second.empty())
3700 {
3701 // Don't need hash entry.
3702 subMap.erase(simIterator);
3703 }
3704 }
3705 }
3706}
3707
3708void
3710{
3711 enum DatabaseType { Sqlite, None };
3712 static auto const databaseType = [&]() -> DatabaseType {
3713 // Use a dynamic_cast to return DatabaseType::None
3714 // on failure.
3715 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3716 {
3717 return DatabaseType::Sqlite;
3718 }
3719 return DatabaseType::None;
3720 }();
3721
3722 if (databaseType == DatabaseType::None)
3723 {
3724 JLOG(m_journal.error())
3725 << "AccountHistory job for account "
3726 << toBase58(subInfo.index_->accountId_) << " no database";
3727 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3728 {
3729 sptr->send(rpcError(rpcINTERNAL), true);
3730 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3731 }
3732 return;
3733 }
3734
3737 "AccountHistoryTxStream",
3738 [this, dbType = databaseType, subInfo]() {
3739 auto const& accountId = subInfo.index_->accountId_;
3740 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3741 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3742
3743 JLOG(m_journal.trace())
3744 << "AccountHistory job for account " << toBase58(accountId)
3745 << " started. lastLedgerSeq=" << lastLedgerSeq;
3746
3747 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3748 std::shared_ptr<TxMeta> const& meta) -> bool {
3749 /*
3750 * genesis account: first tx is the one with seq 1
3751 * other account: first tx is the one created the account
3752 */
3753 if (accountId == genesisAccountId)
3754 {
3755 auto stx = tx->getSTransaction();
3756 if (stx->getAccountID(sfAccount) == accountId &&
3757 stx->getSeqValue() == 1)
3758 return true;
3759 }
3760
3761 for (auto& node : meta->getNodes())
3762 {
3763 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3764 continue;
3765
3766 if (node.isFieldPresent(sfNewFields))
3767 {
3768 if (auto inner = dynamic_cast<STObject const*>(
3769 node.peekAtPField(sfNewFields));
3770 inner)
3771 {
3772 if (inner->isFieldPresent(sfAccount) &&
3773 inner->getAccountID(sfAccount) == accountId)
3774 {
3775 return true;
3776 }
3777 }
3778 }
3779 }
3780
3781 return false;
3782 };
3783
3784 auto send = [&](Json::Value const& jvObj,
3785 bool unsubscribe) -> bool {
3786 if (auto sptr = subInfo.sinkWptr_.lock())
3787 {
3788 sptr->send(jvObj, true);
3789 if (unsubscribe)
3790 unsubAccountHistory(sptr, accountId, false);
3791 return true;
3792 }
3793
3794 return false;
3795 };
3796
3797 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3798 bool unsubscribe) -> bool {
3799 if (auto sptr = subInfo.sinkWptr_.lock())
3800 {
3801 jvObj.visit(
3802 sptr->getApiVersion(), //
3803 [&](Json::Value const& jv) { sptr->send(jv, true); });
3804
3805 if (unsubscribe)
3806 unsubAccountHistory(sptr, accountId, false);
3807 return true;
3808 }
3809
3810 return false;
3811 };
3812
3813 auto getMoreTxns =
3814 [&](std::uint32_t minLedger,
3815 std::uint32_t maxLedger,
3820 switch (dbType)
3821 {
3822 case Sqlite: {
3823 auto db = static_cast<SQLiteDatabase*>(
3826 accountId, minLedger, maxLedger, marker, 0, true};
3827 return db->newestAccountTxPage(options);
3828 }
3829 default: {
3830 UNREACHABLE(
3831 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3832 "getMoreTxns : invalid database type");
3833 return {};
3834 }
3835 }
3836 };
3837
3838 /*
3839 * search backward until the genesis ledger or asked to stop
3840 */
3841 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3842 {
3843 int feeChargeCount = 0;
3844 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3845 {
3846 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3847 ++feeChargeCount;
3848 }
3849 else
3850 {
3851 JLOG(m_journal.trace())
3852 << "AccountHistory job for account "
3853 << toBase58(accountId) << " no InfoSub. Fee charged "
3854 << feeChargeCount << " times.";
3855 return;
3856 }
3857
3858 // try to search in 1024 ledgers till reaching genesis ledgers
3859 auto startLedgerSeq =
3860 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3861 JLOG(m_journal.trace())
3862 << "AccountHistory job for account " << toBase58(accountId)
3863 << ", working on ledger range [" << startLedgerSeq << ","
3864 << lastLedgerSeq << "]";
3865
3866 auto haveRange = [&]() -> bool {
3867 std::uint32_t validatedMin = UINT_MAX;
3868 std::uint32_t validatedMax = 0;
3869 auto haveSomeValidatedLedgers =
3871 validatedMin, validatedMax);
3872
3873 return haveSomeValidatedLedgers &&
3874 validatedMin <= startLedgerSeq &&
3875 lastLedgerSeq <= validatedMax;
3876 }();
3877
3878 if (!haveRange)
3879 {
3880 JLOG(m_journal.debug())
3881 << "AccountHistory reschedule job for account "
3882 << toBase58(accountId) << ", incomplete ledger range ["
3883 << startLedgerSeq << "," << lastLedgerSeq << "]";
3885 return;
3886 }
3887
3889 while (!subInfo.index_->stopHistorical_)
3890 {
3891 auto dbResult =
3892 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3893 if (!dbResult)
3894 {
3895 JLOG(m_journal.debug())
3896 << "AccountHistory job for account "
3897 << toBase58(accountId) << " getMoreTxns failed.";
3898 send(rpcError(rpcINTERNAL), true);
3899 return;
3900 }
3901
3902 auto const& txns = dbResult->first;
3903 marker = dbResult->second;
3904 size_t num_txns = txns.size();
3905 for (size_t i = 0; i < num_txns; ++i)
3906 {
3907 auto const& [tx, meta] = txns[i];
3908
3909 if (!tx || !meta)
3910 {
3911 JLOG(m_journal.debug())
3912 << "AccountHistory job for account "
3913 << toBase58(accountId) << " empty tx or meta.";
3914 send(rpcError(rpcINTERNAL), true);
3915 return;
3916 }
3917 auto curTxLedger =
3919 tx->getLedger());
3920 if (!curTxLedger)
3921 {
3922 JLOG(m_journal.debug())
3923 << "AccountHistory job for account "
3924 << toBase58(accountId) << " no ledger.";
3925 send(rpcError(rpcINTERNAL), true);
3926 return;
3927 }
3929 tx->getSTransaction();
3930 if (!stTxn)
3931 {
3932 JLOG(m_journal.debug())
3933 << "AccountHistory job for account "
3934 << toBase58(accountId)
3935 << " getSTransaction failed.";
3936 send(rpcError(rpcINTERNAL), true);
3937 return;
3938 }
3939
3940 auto const mRef = std::ref(*meta);
3941 auto const trR = meta->getResultTER();
3942 MultiApiJson jvTx =
3943 transJson(stTxn, trR, true, curTxLedger, mRef);
3944
3945 jvTx.set(
3946 jss::account_history_tx_index, txHistoryIndex--);
3947 if (i + 1 == num_txns ||
3948 txns[i + 1].first->getLedger() != tx->getLedger())
3949 jvTx.set(jss::account_history_boundary, true);
3950
3951 if (isFirstTx(tx, meta))
3952 {
3953 jvTx.set(jss::account_history_tx_first, true);
3954 sendMultiApiJson(jvTx, false);
3955
3956 JLOG(m_journal.trace())
3957 << "AccountHistory job for account "
3958 << toBase58(accountId)
3959 << " done, found last tx.";
3960 return;
3961 }
3962 else
3963 {
3964 sendMultiApiJson(jvTx, false);
3965 }
3966 }
3967
3968 if (marker)
3969 {
3970 JLOG(m_journal.trace())
3971 << "AccountHistory job for account "
3972 << toBase58(accountId)
3973 << " paging, marker=" << marker->ledgerSeq << ":"
3974 << marker->txnSeq;
3975 }
3976 else
3977 {
3978 break;
3979 }
3980 }
3981
3982 if (!subInfo.index_->stopHistorical_)
3983 {
3984 lastLedgerSeq = startLedgerSeq - 1;
3985 if (lastLedgerSeq <= 1)
3986 {
3987 JLOG(m_journal.trace())
3988 << "AccountHistory job for account "
3989 << toBase58(accountId)
3990 << " done, reached genesis ledger.";
3991 return;
3992 }
3993 }
3994 }
3995 });
3996}
3997
3998void
4000 std::shared_ptr<ReadView const> const& ledger,
4002{
4003 subInfo.index_->separationLedgerSeq_ = ledger->seq();
4004 auto const& accountId = subInfo.index_->accountId_;
4005 auto const accountKeylet = keylet::account(accountId);
4006 if (!ledger->exists(accountKeylet))
4007 {
4008 JLOG(m_journal.debug())
4009 << "subAccountHistoryStart, no account " << toBase58(accountId)
4010 << ", no need to add AccountHistory job.";
4011 return;
4012 }
4013 if (accountId == genesisAccountId)
4014 {
4015 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
4016 {
4017 if (sleAcct->getFieldU32(sfSequence) == 1)
4018 {
4019 JLOG(m_journal.debug())
4020 << "subAccountHistoryStart, genesis account "
4021 << toBase58(accountId)
4022 << " does not have tx, no need to add AccountHistory job.";
4023 return;
4024 }
4025 }
4026 else
4027 {
4028 UNREACHABLE(
4029 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
4030 "access genesis account");
4031 return;
4032 }
4033 }
4034 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
4035 subInfo.index_->haveHistorical_ = true;
4036
4037 JLOG(m_journal.debug())
4038 << "subAccountHistoryStart, add AccountHistory job: accountId="
4039 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
4040
4041 addAccountHistoryJob(subInfo);
4042}
4043
4046 InfoSub::ref isrListener,
4047 AccountID const& accountId)
4048{
4049 if (!isrListener->insertSubAccountHistory(accountId))
4050 {
4051 JLOG(m_journal.debug())
4052 << "subAccountHistory, already subscribed to account "
4053 << toBase58(accountId);
4054 return rpcINVALID_PARAMS;
4055 }
4056
4059 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
4060 auto simIterator = mSubAccountHistory.find(accountId);
4061 if (simIterator == mSubAccountHistory.end())
4062 {
4064 inner.emplace(isrListener->getSeq(), ahi);
4066 simIterator, std::make_pair(accountId, inner));
4067 }
4068 else
4069 {
4070 simIterator->second.emplace(isrListener->getSeq(), ahi);
4071 }
4072
4073 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
4074 if (ledger)
4075 {
4076 subAccountHistoryStart(ledger, ahi);
4077 }
4078 else
4079 {
4080 // The node does not have validated ledgers, so wait for
4081 // one before start streaming.
4082 // In this case, the subscription is also considered successful.
4083 JLOG(m_journal.debug())
4084 << "subAccountHistory, no validated ledger yet, delay start";
4085 }
4086
4087 return rpcSUCCESS;
4088}
4089
4090void
4092 InfoSub::ref isrListener,
4093 AccountID const& account,
4094 bool historyOnly)
4095{
4096 if (!historyOnly)
4097 isrListener->deleteSubAccountHistory(account);
4098 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
4099}
4100
4101void
4103 std::uint64_t seq,
4104 AccountID const& account,
4105 bool historyOnly)
4106{
4108 auto simIterator = mSubAccountHistory.find(account);
4109 if (simIterator != mSubAccountHistory.end())
4110 {
4111 auto& subInfoMap = simIterator->second;
4112 auto subInfoIter = subInfoMap.find(seq);
4113 if (subInfoIter != subInfoMap.end())
4114 {
4115 subInfoIter->second.index_->stopHistorical_ = true;
4116 }
4117
4118 if (!historyOnly)
4119 {
4120 simIterator->second.erase(seq);
4121 if (simIterator->second.empty())
4122 {
4123 mSubAccountHistory.erase(simIterator);
4124 }
4125 }
4126 JLOG(m_journal.debug())
4127 << "unsubAccountHistory, account " << toBase58(account)
4128 << ", historyOnly = " << (historyOnly ? "true" : "false");
4129 }
4130}
4131
4132bool
4134{
4135 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
4136 listeners->addSubscriber(isrListener);
4137 else
4138 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
4139 return true;
4140}
4141
4142bool
4144{
4145 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
4146 listeners->removeSubscriber(uSeq);
4147
4148 return true;
4149}
4150
4154{
4155 // This code-path is exclusively used when the server is in standalone
4156 // mode via `ledger_accept`
4157 XRPL_ASSERT(
4158 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
4159
4160 if (!m_standalone)
4161 Throw<std::runtime_error>(
4162 "Operation only possible in STANDALONE mode.");
4163
4164 // FIXME Could we improve on this and remove the need for a specialized
4165 // API in Consensus?
4166 beginConsensus(m_ledgerMaster.getClosedLedger()->info().hash, {});
4167 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
4168 return m_ledgerMaster.getCurrentLedger()->info().seq;
4169}
4170
4171// <-- bool: true=added, false=already there
4172bool
4174{
4175 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
4176 {
4177 jvResult[jss::ledger_index] = lpClosed->info().seq;
4178 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
4179 jvResult[jss::ledger_time] = Json::Value::UInt(
4180 lpClosed->info().closeTime.time_since_epoch().count());
4181 if (!lpClosed->rules().enabled(featureXRPFees))
4182 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
4183 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
4184 jvResult[jss::reserve_base] =
4185 lpClosed->fees().accountReserve(0).jsonClipped();
4186 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4187 jvResult[jss::network_id] = app_.config().NETWORK_ID;
4188 }
4189
4191 {
4192 jvResult[jss::validated_ledgers] =
4194 }
4195
4197 return mStreamMaps[sLedger]
4198 .emplace(isrListener->getSeq(), isrListener)
4199 .second;
4200}
4201
4202// <-- bool: true=added, false=already there
4203bool
4205{
4208 .emplace(isrListener->getSeq(), isrListener)
4209 .second;
4210}
4211
4212// <-- bool: true=erased, false=was not there
4213bool
4215{
4217 return mStreamMaps[sLedger].erase(uSeq);
4218}
4219
4220// <-- bool: true=erased, false=was not there
4221bool
4227
4228// <-- bool: true=added, false=already there
4229bool
4231{
4233 return mStreamMaps[sManifests]
4234 .emplace(isrListener->getSeq(), isrListener)
4235 .second;
4236}
4237
4238// <-- bool: true=erased, false=was not there
4239bool
4245
4246// <-- bool: true=added, false=already there
4247bool
4249 InfoSub::ref isrListener,
4250 Json::Value& jvResult,
4251 bool admin)
4252{
4253 uint256 uRandom;
4254
4255 if (m_standalone)
4256 jvResult[jss::stand_alone] = m_standalone;
4257
4258 // CHECKME: is it necessary to provide a random number here?
4259 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
4260
4261 auto const& feeTrack = app_.getFeeTrack();
4262 jvResult[jss::random] = to_string(uRandom);
4263 jvResult[jss::server_status] = strOperatingMode(admin);
4264 jvResult[jss::load_base] = feeTrack.getLoadBase();
4265 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4266 jvResult[jss::hostid] = getHostId(admin);
4267 jvResult[jss::pubkey_node] =
4269
4271 return mStreamMaps[sServer]
4272 .emplace(isrListener->getSeq(), isrListener)
4273 .second;
4274}
4275
4276// <-- bool: true=erased, false=was not there
4277bool
4279{
4281 return mStreamMaps[sServer].erase(uSeq);
4282}
4283
4284// <-- bool: true=added, false=already there
4285bool
4287{
4290 .emplace(isrListener->getSeq(), isrListener)
4291 .second;
4292}
4293
4294// <-- bool: true=erased, false=was not there
4295bool
4301
4302// <-- bool: true=added, false=already there
4303bool
4305{
4308 .emplace(isrListener->getSeq(), isrListener)
4309 .second;
4310}
4311
4312// <-- bool: true=erased, false=was not there
4313bool
4319
4320// <-- bool: true=added, false=already there
4321bool
4323{
4326 .emplace(isrListener->getSeq(), isrListener)
4327 .second;
4328}
4329
4330void
4335
4336// <-- bool: true=erased, false=was not there
4337bool
4343
4344// <-- bool: true=added, false=already there
4345bool
4347{
4349 return mStreamMaps[sPeerStatus]
4350 .emplace(isrListener->getSeq(), isrListener)
4351 .second;
4352}
4353
4354// <-- bool: true=erased, false=was not there
4355bool
4361
4362// <-- bool: true=added, false=already there
4363bool
4365{
4368 .emplace(isrListener->getSeq(), isrListener)
4369 .second;
4370}
4371
4372// <-- bool: true=erased, false=was not there
4373bool
4379
4382{
4384
4385 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4386
4387 if (it != mRpcSubMap.end())
4388 return it->second;
4389
4390 return InfoSub::pointer();
4391}
4392
4395{
4397
4398 mRpcSubMap.emplace(strUrl, rspEntry);
4399
4400 return rspEntry;
4401}
4402
4403bool
4405{
4407 auto pInfo = findRpcSub(strUrl);
4408
4409 if (!pInfo)
4410 return false;
4411
4412 // check to see if any of the stream maps still hold a weak reference to
4413 // this entry before removing
4414 for (SubMapType const& map : mStreamMaps)
4415 {
4416 if (map.find(pInfo->getSeq()) != map.end())
4417 return false;
4418 }
4419 mRpcSubMap.erase(strUrl);
4420 return true;
4421}
4422
4423#ifndef USE_NEW_BOOK_PAGE
4424
4425// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4426// work, but it demonstrated poor performance.
4427//
4428void
4431 Book const& book,
4432 AccountID const& uTakerID,
4433 bool const bProof,
4434 unsigned int iLimit,
4435 Json::Value const& jvMarker,
4436 Json::Value& jvResult)
4437{ // CAUTION: This is the old get book page logic
4438 Json::Value& jvOffers =
4439 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4440
4442 uint256 const uBookBase = getBookBase(book);
4443 uint256 const uBookEnd = getQualityNext(uBookBase);
4444 uint256 uTipIndex = uBookBase;
4445
4446 if (auto stream = m_journal.trace())
4447 {
4448 stream << "getBookPage:" << book;
4449 stream << "getBookPage: uBookBase=" << uBookBase;
4450 stream << "getBookPage: uBookEnd=" << uBookEnd;
4451 stream << "getBookPage: uTipIndex=" << uTipIndex;
4452 }
4453
4454 ReadView const& view = *lpLedger;
4455
4456 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4457 isGlobalFrozen(view, book.in.account);
4458
4459 bool bDone = false;
4460 bool bDirectAdvance = true;
4461
4462 std::shared_ptr<SLE const> sleOfferDir;
4463 uint256 offerIndex;
4464 unsigned int uBookEntry;
4465 STAmount saDirRate;
4466
4467 auto const rate = transferRate(view, book.out.account);
4468 auto viewJ = app_.journal("View");
4469
4470 while (!bDone && iLimit-- > 0)
4471 {
4472 if (bDirectAdvance)
4473 {
4474 bDirectAdvance = false;
4475
4476 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4477
4478 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4479 if (ledgerIndex)
4480 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4481 else
4482 sleOfferDir.reset();
4483
4484 if (!sleOfferDir)
4485 {
4486 JLOG(m_journal.trace()) << "getBookPage: bDone";
4487 bDone = true;
4488 }
4489 else
4490 {
4491 uTipIndex = sleOfferDir->key();
4492 saDirRate = amountFromQuality(getQuality(uTipIndex));
4493
4494 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4495
4496 JLOG(m_journal.trace())
4497 << "getBookPage: uTipIndex=" << uTipIndex;
4498 JLOG(m_journal.trace())
4499 << "getBookPage: offerIndex=" << offerIndex;
4500 }
4501 }
4502
4503 if (!bDone)
4504 {
4505 auto sleOffer = view.read(keylet::offer(offerIndex));
4506
4507 if (sleOffer)
4508 {
4509 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4510 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4511 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4512 STAmount saOwnerFunds;
4513 bool firstOwnerOffer(true);
4514
4515 if (book.out.account == uOfferOwnerID)
4516 {
4517 // If an offer is selling issuer's own IOUs, it is fully
4518 // funded.
4519 saOwnerFunds = saTakerGets;
4520 }
4521 else if (bGlobalFreeze)
4522 {
4523 // If either asset is globally frozen, consider all offers
4524 // that aren't ours to be totally unfunded
4525 saOwnerFunds.clear(book.out);
4526 }
4527 else
4528 {
4529 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4530 if (umBalanceEntry != umBalance.end())
4531 {
4532 // Found in running balance table.
4533
4534 saOwnerFunds = umBalanceEntry->second;
4535 firstOwnerOffer = false;
4536 }
4537 else
4538 {
4539 // Did not find balance in table.
4540
4541 saOwnerFunds = accountHolds(
4542 view,
4543 uOfferOwnerID,
4544 book.out.currency,
4545 book.out.account,
4547 viewJ);
4548
4549 if (saOwnerFunds < beast::zero)
4550 {
4551 // Treat negative funds as zero.
4552
4553 saOwnerFunds.clear();
4554 }
4555 }
4556 }
4557
4558 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4559
4560 STAmount saTakerGetsFunded;
4561 STAmount saOwnerFundsLimit = saOwnerFunds;
4562 Rate offerRate = parityRate;
4563
4564 if (rate != parityRate
4565 // Have a tranfer fee.
4566 && uTakerID != book.out.account
4567 // Not taking offers of own IOUs.
4568 && book.out.account != uOfferOwnerID)
4569 // Offer owner not issuing ownfunds
4570 {
4571 // Need to charge a transfer fee to offer owner.
4572 offerRate = rate;
4573 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4574 }
4575
4576 if (saOwnerFundsLimit >= saTakerGets)
4577 {
4578 // Sufficient funds no shenanigans.
4579 saTakerGetsFunded = saTakerGets;
4580 }
4581 else
4582 {
4583 // Only provide, if not fully funded.
4584
4585 saTakerGetsFunded = saOwnerFundsLimit;
4586
4587 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4588 std::min(
4589 saTakerPays,
4590 multiply(
4591 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4592 .setJson(jvOffer[jss::taker_pays_funded]);
4593 }
4594
4595 STAmount saOwnerPays = (parityRate == offerRate)
4596 ? saTakerGetsFunded
4597 : std::min(
4598 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4599
4600 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4601
4602 // Include all offers funded and unfunded
4603 Json::Value& jvOf = jvOffers.append(jvOffer);
4604 jvOf[jss::quality] = saDirRate.getText();
4605
4606 if (firstOwnerOffer)
4607 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4608 }
4609 else
4610 {
4611 JLOG(m_journal.warn()) << "Missing offer";
4612 }
4613
4614 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4615 {
4616 bDirectAdvance = true;
4617 }
4618 else
4619 {
4620 JLOG(m_journal.trace())
4621 << "getBookPage: offerIndex=" << offerIndex;
4622 }
4623 }
4624 }
4625
4626 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4627 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4628}
4629
4630#else
4631
4632// This is the new code that uses the book iterators
4633// It has temporarily been disabled
4634
4635void
4638 Book const& book,
4639 AccountID const& uTakerID,
4640 bool const bProof,
4641 unsigned int iLimit,
4642 Json::Value const& jvMarker,
4643 Json::Value& jvResult)
4644{
4645 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4646
4648
4649 MetaView lesActive(lpLedger, tapNONE, true);
4650 OrderBookIterator obIterator(lesActive, book);
4651
4652 auto const rate = transferRate(lesActive, book.out.account);
4653
4654 bool const bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4655 lesActive.isGlobalFrozen(book.in.account);
4656
4657 while (iLimit-- > 0 && obIterator.nextOffer())
4658 {
4659 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4660 if (sleOffer)
4661 {
4662 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4663 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4664 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4665 STAmount saDirRate = obIterator.getCurrentRate();
4666 STAmount saOwnerFunds;
4667
4668 if (book.out.account == uOfferOwnerID)
4669 {
4670 // If offer is selling issuer's own IOUs, it is fully funded.
4671 saOwnerFunds = saTakerGets;
4672 }
4673 else if (bGlobalFreeze)
4674 {
4675 // If either asset is globally frozen, consider all offers
4676 // that aren't ours to be totally unfunded
4677 saOwnerFunds.clear(book.out);
4678 }
4679 else
4680 {
4681 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4682
4683 if (umBalanceEntry != umBalance.end())
4684 {
4685 // Found in running balance table.
4686
4687 saOwnerFunds = umBalanceEntry->second;
4688 }
4689 else
4690 {
4691 // Did not find balance in table.
4692
4693 saOwnerFunds = lesActive.accountHolds(
4694 uOfferOwnerID,
4695 book.out.currency,
4696 book.out.account,
4698
4699 if (saOwnerFunds.isNegative())
4700 {
4701 // Treat negative funds as zero.
4702
4703 saOwnerFunds.zero();
4704 }
4705 }
4706 }
4707
4708 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4709
4710 STAmount saTakerGetsFunded;
4711 STAmount saOwnerFundsLimit = saOwnerFunds;
4712 Rate offerRate = parityRate;
4713
4714 if (rate != parityRate
4715 // Have a tranfer fee.
4716 && uTakerID != book.out.account
4717 // Not taking offers of own IOUs.
4718 && book.out.account != uOfferOwnerID)
4719 // Offer owner not issuing ownfunds
4720 {
4721 // Need to charge a transfer fee to offer owner.
4722 offerRate = rate;
4723 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4724 }
4725
4726 if (saOwnerFundsLimit >= saTakerGets)
4727 {
4728 // Sufficient funds no shenanigans.
4729 saTakerGetsFunded = saTakerGets;
4730 }
4731 else
4732 {
4733 // Only provide, if not fully funded.
4734 saTakerGetsFunded = saOwnerFundsLimit;
4735
4736 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4737
4738 // TOOD(tom): The result of this expression is not used - what's
4739 // going on here?
4740 std::min(
4741 saTakerPays,
4742 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4743 .setJson(jvOffer[jss::taker_pays_funded]);
4744 }
4745
4746 STAmount saOwnerPays = (parityRate == offerRate)
4747 ? saTakerGetsFunded
4748 : std::min(
4749 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4750
4751 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4752
4753 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4754 {
4755 // Only provide funded offers and offers of the taker.
4756 Json::Value& jvOf = jvOffers.append(jvOffer);
4757 jvOf[jss::quality] = saDirRate.getText();
4758 }
4759 }
4760 }
4761
4762 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4763 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4764}
4765
4766#endif
4767
4768inline void
4770{
4771 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4772 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4774 counters[static_cast<std::size_t>(mode)].dur += current;
4775
4778 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4779 .dur.count());
4781 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4782 .dur.count());
4784 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4786 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4787 .dur.count());
4789 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4790
4792 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4793 .transitions);
4795 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4796 .transitions);
4798 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4800 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4801 .transitions);
4803 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4804}
4805
4806void
4808{
4809 auto now = std::chrono::steady_clock::now();
4810
4811 std::lock_guard lock(mutex_);
4812 ++counters_[static_cast<std::size_t>(om)].transitions;
4813 if (om == OperatingMode::FULL &&
4814 counters_[static_cast<std::size_t>(om)].transitions == 1)
4815 {
4816 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4817 now - processStart_)
4818 .count();
4819 }
4820 counters_[static_cast<std::size_t>(mode_)].dur +=
4821 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4822
4823 mode_ = om;
4824 start_ = now;
4825}
4826
4827void
4829{
4830 auto [counters, mode, start, initialSync] = getCounterData();
4831 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4833 counters[static_cast<std::size_t>(mode)].dur += current;
4834
4835 obj[jss::state_accounting] = Json::objectValue;
4837 i <= static_cast<std::size_t>(OperatingMode::FULL);
4838 ++i)
4839 {
4840 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4841 auto& state = obj[jss::state_accounting][states_[i]];
4842 state[jss::transitions] = std::to_string(counters[i].transitions);
4843 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4844 }
4845 obj[jss::server_state_duration_us] = std::to_string(current.count());
4846 if (initialSync)
4847 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4848}
4849
4850//------------------------------------------------------------------------------
4851
4854 Application& app,
4856 bool standalone,
4857 std::size_t minPeerCount,
4858 bool startvalid,
4859 JobQueue& job_queue,
4861 ValidatorKeys const& validatorKeys,
4862 boost::asio::io_context& io_svc,
4863 beast::Journal journal,
4864 beast::insight::Collector::ptr const& collector)
4865{
4867 app,
4868 clock,
4869 standalone,
4870 minPeerCount,
4871 startvalid,
4872 job_queue,
4874 validatorKeys,
4875 io_svc,
4876 journal,
4877 collector);
4878}
4879
4880} // namespace ripple
T any_of(T... args)
T back_inserter(T... args)
T begin(T... args)
Decorator for streaming out compact json.
Lightweight wrapper to tag static string.
Definition json_value.h:63
Represents a JSON value.
Definition json_value.h:149
Json::UInt UInt
Definition json_value.h:156
Value & append(Value const &value)
Append value to array at the end.
bool isMember(char const *key) const
Return true if the object has a member named key.
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
A generic endpoint for log messages.
Definition Journal.h:60
Stream error() const
Definition Journal.h:346
Stream debug() const
Definition Journal.h:328
Stream info() const
Definition Journal.h:334
Stream trace() const
Severity stream access functions.
Definition Journal.h:322
Stream warn() const
Definition Journal.h:340
A metric for measuring an integral value.
Definition Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition Gauge.h:68
A reference to a handler for performing polled collection.
Definition Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition Book.h:36
Issue in
Definition Book.h:38
Issue out
Definition Book.h:39
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition ClusterNode.h:46
std::uint32_t getLoadFee() const
Definition ClusterNode.h:52
NetClock::time_point getReportTime() const
Definition ClusterNode.h:58
PublicKey const & identity() const
Definition ClusterNode.h:64
std::size_t size() const
The number of nodes in the cluster list.
Definition Cluster.cpp:49
uint32_t NETWORK_ID
Definition Config.h:156
std::string SERVER_DOMAIN
Definition Config.h:278
std::size_t NODE_SIZE
Definition Config.h:213
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition Config.h:160
int RELAY_UNTRUSTED_VALIDATIONS
Definition Config.h:169
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition InfoSub.h:54
AccountID account
Definition Issue.h:36
Currency currency
Definition Issue.h:35
A pool of threads to perform work.
Definition JobQueue.h:58
Json::Value getJson(int c=0)
Definition JobQueue.cpp:214
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition JobQueue.h:168
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
std::uint32_t getClusterFee() const
std::uint32_t getLocalFee() const
std::uint32_t getLoadBase() const
std::uint32_t getRemoteFee() const
std::uint32_t getLoadFactor() const
Manages load sources.
Definition LoadManager.h:46
void heartbeat()
Reset the stall detection timer.
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition Manifest.cpp:323
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
std::chrono::steady_clock::time_point start_
static std::array< Json::StaticString const, 5 > const states_
std::chrono::steady_clock::time_point const processStart_
Transaction with input flags and results to be applied in batches.
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
std::shared_ptr< Transaction > const transaction
boost::asio::steady_timer accountHistoryTxTimer_
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
std::vector< TransactionStatus > mTransactions
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
std::optional< PublicKey > const validatorPK_
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
beast::Journal m_journal
SubInfoMapType mSubAccount
std::optional< PublicKey > const validatorMasterPK_
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
std::atomic< bool > unlBlocked_
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
std::atomic< bool > needNetworkLedger_
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
void setMode(OperatingMode om) override
void stop() override
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
void clearNeedNetworkLedger() override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
DispatchState mDispatchState
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
std::atomic< bool > amendmentWarned_
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition NetworkOPs.h:89
void getCountsJson(Json::Value &obj)
Definition Database.cpp:268
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Writable ledger view that accumulates state and tx changes.
Definition OpenView.h:66
BookListeners::pointer getBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)
BookListeners::pointer makeBookListeners(Book const &)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Represents a set of transactions in RCLConsensus.
Definition RCLCxTx.h:63
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition RFC1751.cpp:507
Collects logging information.
std::unique_ptr< std::stringstream > const & ss()
A view into a ledger.
Definition ReadView.h:52
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition STAmount.cpp:666
std::string getText() const override
Definition STAmount.cpp:706
Issue const & issue() const
Definition STAmount.h:496
std::optional< T > get(std::string const &name) const
std::size_t size() const noexcept
Definition Serializer.h:72
void const * data() const noexcept
Definition Serializer.h:78
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition TxQ.cpp:1778
static time_point now()
Validator keys and manifest as set in configuration file.
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition XRPAmount.h:262
Json::Value jsonClipped() const
Definition XRPAmount.h:218
iterator begin()
Definition base_uint.h:136
static constexpr std::size_t size()
Definition base_uint.h:526
bool isZero() const
Definition base_uint.h:540
bool isNonZero() const
Definition base_uint.h:545
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition scope.h:231
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_same_v
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition json_value.h:44
@ objectValue
object value (collection of name/value pairs).
Definition json_value.h:45
int Int
unsigned int UInt
void rngfill(void *const buffer, std::size_t const bytes, Generator &g)
Definition rngfill.h:34
std::string const & getVersionString()
Server version.
Definition BuildInfo.cpp:68
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Definition CTID.h:43
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition BookChanges.h:47
void insertNFTSyntheticInJson(Json::Value &, std::shared_ptr< STTx const > const &, TxMeta const &)
Adds common synthetic fields to transaction-related JSON responses.
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition Indexes.cpp:184
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition Indexes.cpp:380
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition Indexes.cpp:274
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Definition escrow.cpp:69
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:25
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
STAmount divide(STAmount const &amount, Rate const &rate)
Definition Rate2.cpp:93
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition STTx.cpp:811
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition View.cpp:552
@ fhZERO_IF_FROZEN
Definition View.h:78
@ fhIGNORE_FREEZE
Definition View.h:78
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition View.cpp:146
std::uint64_t getQuality(uint256 const &uBase)
Definition Indexes.cpp:149
@ rpcSUCCESS
Definition ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition ErrorCodes.h:84
@ rpcINTERNAL
Definition ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
auto constexpr muldiv_max
Definition mulDiv.h:28
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition LocalTxs.cpp:192
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition View.cpp:760
STAmount amountFromQuality(std::uint64_t rate)
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition ErrorCodes.h:170
@ warnRPC_UNSUPPORTED_MAJORITY
Definition ErrorCodes.h:168
@ warnRPC_AMENDMENT_BLOCKED
Definition ErrorCodes.h:169
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition NetworkOPs.h:68
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition Rate2.cpp:53
AccountID calcAccountID(PublicKey const &pk)
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Json::Value rpcError(int iError)
Definition RPCErr.cpp:31
@ tefPAST_SEQ
Definition TER.h:175
bool isTefFailure(TER x) noexcept
Definition TER.h:662
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
std::string strHex(FwdIt begin, FwdIt end)
Definition strHex.h:30
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition View.cpp:157
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition ApiVersion.h:101
bool isTerRetry(TER x) noexcept
Definition TER.h:668
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition predicates.h:75
@ tesSUCCESS
Definition TER.h:244
uint256 getQualityNext(uint256 const &uBase)
Definition Indexes.cpp:141
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition View.cpp:385
bool isTesSuccess(TER x) noexcept
Definition TER.h:674
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition chrono.h:92
std::string to_string(base_uint< Bits, Tag > const &a)
Definition base_uint.h:630
FeeSetup setup_FeeVote(Section const &section)
Definition Config.cpp:1129
bool isTemMalformed(TER x) noexcept
Definition TER.h:656
Number root(Number f, unsigned d)
Definition Number.cpp:636
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
@ tapFAIL_HARD
Definition ApplyView.h:36
@ tapUNLIMITED
Definition ApplyView.h:43
@ tapNONE
Definition ApplyView.h:32
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition apply.cpp:44
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition Seed.cpp:76
constexpr std::size_t maxPoppedTransactions
@ terQUEUED
Definition TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition TER.cpp:249
@ jtNETOP_CLUSTER
Definition Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition Job.h:47
@ jtTRANSACTION
Definition Job.h:62
@ jtTXN_PROC
Definition Job.h:82
@ jtCLIENT_CONSENSUS
Definition Job.h:48
@ jtBATCH
Definition Job.h:65
@ jtCLIENT_ACCT_HIST
Definition Job.h:49
bool isTelLocal(TER x) noexcept
Definition TER.h:650
uint256 getBookBase(Book const &book)
Definition Indexes.cpp:115
constexpr std::uint32_t tfInnerBatchTxn
Definition TxFlags.h:61
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition View.cpp:183
static std::uint32_t trunc32(std::uint64_t v)
@ temINVALID_FLAG
Definition TER.h:111
@ temBAD_SIGNATURE
Definition TER.h:105
static auto const genesisAccountId
STL namespace.
T owns_lock(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T set_intersection(T... args)
T size(T... args)
T str(T... args)
std::string serialized
The manifest in serialized form.
Definition Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
Definition Manifest.cpp:244
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
Definition Manifest.cpp:255
PublicKey masterKey
The master key associated with this manifest.
Definition Manifest.h:86
Server fees published on server subscription.
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
bool operator==(ServerFeeSummary const &b) const
beast::insight::Gauge full_transitions
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
beast::insight::Hook hook
beast::insight::Gauge connected_duration
beast::insight::Gauge tracking_duration
beast::insight::Gauge connected_transitions
beast::insight::Gauge disconnected_transitions
beast::insight::Gauge syncing_duration
beast::insight::Gauge tracking_transitions
beast::insight::Gauge full_duration
beast::insight::Gauge disconnected_duration
beast::insight::Gauge syncing_transitions
SubAccountHistoryIndex(AccountID const &accountId)
std::shared_ptr< SubAccountHistoryIndex > index_
std::shared_ptr< SubAccountHistoryIndex > index_
Represents a transfer rate.
Definition Rate.h:40
Data format for exchanging consumption information across peers.
Definition Gossip.h:32
std::vector< Item > items
Definition Gossip.h:44
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition TxQ.h:165
IsMemberResult isMember(char const *key) const
void set(char const *key, auto const &v)
Select all peers (except optional excluded) that are in our cluster.
Definition predicates.h:137
Sends a message to all peers.
Definition predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)