rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1#include <xrpld/app/consensus/RCLConsensus.h>
2#include <xrpld/app/consensus/RCLValidations.h>
3#include <xrpld/app/ledger/AcceptedLedger.h>
4#include <xrpld/app/ledger/InboundLedgers.h>
5#include <xrpld/app/ledger/LedgerMaster.h>
6#include <xrpld/app/ledger/LedgerToJson.h>
7#include <xrpld/app/ledger/LocalTxs.h>
8#include <xrpld/app/ledger/OpenLedger.h>
9#include <xrpld/app/ledger/OrderBookDB.h>
10#include <xrpld/app/ledger/TransactionMaster.h>
11#include <xrpld/app/main/LoadManager.h>
12#include <xrpld/app/main/Tuning.h>
13#include <xrpld/app/misc/AmendmentTable.h>
14#include <xrpld/app/misc/DeliverMax.h>
15#include <xrpld/app/misc/HashRouter.h>
16#include <xrpld/app/misc/LoadFeeTrack.h>
17#include <xrpld/app/misc/NetworkOPs.h>
18#include <xrpld/app/misc/Transaction.h>
19#include <xrpld/app/misc/TxQ.h>
20#include <xrpld/app/misc/ValidatorKeys.h>
21#include <xrpld/app/misc/ValidatorList.h>
22#include <xrpld/app/misc/detail/AccountTxPaging.h>
23#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
24#include <xrpld/app/tx/apply.h>
25#include <xrpld/consensus/Consensus.h>
26#include <xrpld/consensus/ConsensusParms.h>
27#include <xrpld/overlay/Cluster.h>
28#include <xrpld/overlay/Overlay.h>
29#include <xrpld/overlay/predicates.h>
30#include <xrpld/perflog/PerfLog.h>
31#include <xrpld/rpc/BookChanges.h>
32#include <xrpld/rpc/CTID.h>
33#include <xrpld/rpc/DeliveredAmount.h>
34#include <xrpld/rpc/MPTokenIssuanceID.h>
35#include <xrpld/rpc/ServerHandler.h>
36
37#include <xrpl/basics/UptimeClock.h>
38#include <xrpl/basics/mulDiv.h>
39#include <xrpl/basics/safe_cast.h>
40#include <xrpl/basics/scope.h>
41#include <xrpl/beast/utility/rngfill.h>
42#include <xrpl/crypto/RFC1751.h>
43#include <xrpl/crypto/csprng.h>
44#include <xrpl/protocol/BuildInfo.h>
45#include <xrpl/protocol/Feature.h>
46#include <xrpl/protocol/MultiApiJson.h>
47#include <xrpl/protocol/NFTSyntheticSerializer.h>
48#include <xrpl/protocol/RPCErr.h>
49#include <xrpl/protocol/TxFlags.h>
50#include <xrpl/protocol/jss.h>
51#include <xrpl/resource/Fees.h>
52#include <xrpl/resource/ResourceManager.h>
53
54#include <boost/asio/ip/host_name.hpp>
55#include <boost/asio/steady_timer.hpp>
56
57#include <algorithm>
58#include <exception>
59#include <mutex>
60#include <optional>
61#include <set>
62#include <sstream>
63#include <string>
64#include <tuple>
65#include <unordered_map>
66
67namespace ripple {
68
69class NetworkOPsImp final : public NetworkOPs
70{
76 {
77 public:
79 bool const admin;
80 bool const local;
82 bool applied = false;
84
87 bool a,
88 bool l,
89 FailHard f)
90 : transaction(t), admin(a), local(l), failType(f)
91 {
92 XRPL_ASSERT(
94 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
95 "valid inputs");
96 }
97 };
98
102 enum class DispatchState : unsigned char {
103 none,
104 scheduled,
105 running,
106 };
107
109
125 {
133
137 std::chrono::steady_clock::time_point start_ =
139 std::chrono::steady_clock::time_point const processStart_ = start_;
142
143 public:
145 {
147 .transitions = 1;
148 }
149
156 void
158
164 void
165 json(Json::Value& obj) const;
166
168 {
170 decltype(mode_) mode;
171 decltype(start_) start;
173 };
174
177 {
180 }
181 };
182
185 {
186 ServerFeeSummary() = default;
187
189 XRPAmount fee,
190 TxQ::Metrics&& escalationMetrics,
191 LoadFeeTrack const& loadFeeTrack);
192 bool
193 operator!=(ServerFeeSummary const& b) const;
194
195 bool
197 {
198 return !(*this != b);
199 }
200
205 };
206
207public:
209 Application& app,
211 bool standalone,
212 std::size_t minPeerCount,
213 bool start_valid,
214 JobQueue& job_queue,
216 ValidatorKeys const& validatorKeys,
217 boost::asio::io_context& io_svc,
218 beast::Journal journal,
219 beast::insight::Collector::ptr const& collector)
220 : app_(app)
221 , m_journal(journal)
224 , heartbeatTimer_(io_svc)
225 , clusterTimer_(io_svc)
226 , accountHistoryTxTimer_(io_svc)
227 , mConsensus(
228 app,
230 setup_FeeVote(app_.config().section("voting")),
231 app_.logs().journal("FeeVote")),
233 *m_localTX,
234 app.getInboundTransactions(),
235 beast::get_abstract_clock<std::chrono::steady_clock>(),
236 validatorKeys,
237 app_.logs().journal("LedgerConsensus"))
238 , validatorPK_(
239 validatorKeys.keys ? validatorKeys.keys->publicKey
240 : decltype(validatorPK_){})
242 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
243 : decltype(validatorMasterPK_){})
245 , m_job_queue(job_queue)
246 , m_standalone(standalone)
247 , minPeerCount_(start_valid ? 0 : minPeerCount)
248 , m_stats(std::bind(&NetworkOPsImp::collect_metrics, this), collector)
249 {
250 }
251
252 ~NetworkOPsImp() override
253 {
254 // This clear() is necessary to ensure the shared_ptrs in this map get
255 // destroyed NOW because the objects in this map invoke methods on this
256 // class when they are destroyed
258 }
259
260public:
262 getOperatingMode() const override;
263
265 strOperatingMode(OperatingMode const mode, bool const admin) const override;
266
268 strOperatingMode(bool const admin = false) const override;
269
270 //
271 // Transaction operations.
272 //
273
274 // Must complete immediately.
275 void
277
278 void
280 std::shared_ptr<Transaction>& transaction,
281 bool bUnlimited,
282 bool bLocal,
283 FailHard failType) override;
284
285 void
286 processTransactionSet(CanonicalTXSet const& set) override;
287
296 void
299 bool bUnlimited,
300 FailHard failType);
301
311 void
314 bool bUnlimited,
315 FailHard failtype);
316
317private:
318 bool
320
321 void
324 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback);
325
326public:
330 void
332
338 void
340
341 //
342 // Owner functions.
343 //
344
348 AccountID const& account) override;
349
350 //
351 // Book functions.
352 //
353
354 void
357 Book const&,
358 AccountID const& uTakerID,
359 bool const bProof,
360 unsigned int iLimit,
361 Json::Value const& jvMarker,
362 Json::Value& jvResult) override;
363
364 // Ledger proposal/close functions.
365 bool
367
368 bool
371 std::string const& source) override;
372
373 void
374 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
375
376 // Network state machine.
377
378 // Used for the "jump" case.
379private:
380 void
382 bool
384
385public:
386 bool
388 uint256 const& networkClosed,
389 std::unique_ptr<std::stringstream> const& clog) override;
390 void
392 void
393 setStandAlone() override;
394
398 void
399 setStateTimer() override;
400
401 void
402 setNeedNetworkLedger() override;
403 void
404 clearNeedNetworkLedger() override;
405 bool
406 isNeedNetworkLedger() override;
407 bool
408 isFull() override;
409
410 void
411 setMode(OperatingMode om) override;
412
413 bool
414 isBlocked() override;
415 bool
416 isAmendmentBlocked() override;
417 void
418 setAmendmentBlocked() override;
419 bool
420 isAmendmentWarned() override;
421 void
422 setAmendmentWarned() override;
423 void
424 clearAmendmentWarned() override;
425 bool
426 isUNLBlocked() override;
427 void
428 setUNLBlocked() override;
429 void
430 clearUNLBlocked() override;
431 void
432 consensusViewChange() override;
433
435 getConsensusInfo() override;
437 getServerInfo(bool human, bool admin, bool counters) override;
438 void
439 clearLedgerFetch() override;
441 getLedgerFetchInfo() override;
444 std::optional<std::chrono::milliseconds> consensusDelay) override;
445 void
446 reportFeeChange() override;
447 void
449
450 void
451 updateLocalTx(ReadView const& view) override;
453 getLocalTxCount() override;
454
455 //
456 // Monitoring: publisher side.
457 //
458 void
459 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
460 void
463 std::shared_ptr<STTx const> const& transaction,
464 TER result) override;
465 void
466 pubValidation(std::shared_ptr<STValidation> const& val) override;
467
468 //--------------------------------------------------------------------------
469 //
470 // InfoSub::Source.
471 //
472 void
474 InfoSub::ref ispListener,
475 hash_set<AccountID> const& vnaAccountIDs,
476 bool rt) override;
477 void
479 InfoSub::ref ispListener,
480 hash_set<AccountID> const& vnaAccountIDs,
481 bool rt) override;
482
483 // Just remove the subscription from the tracking
484 // not from the InfoSub. Needed for InfoSub destruction
485 void
487 std::uint64_t seq,
488 hash_set<AccountID> const& vnaAccountIDs,
489 bool rt) override;
490
492 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
493 override;
494 void
496 InfoSub::ref ispListener,
497 AccountID const& account,
498 bool historyOnly) override;
499
500 void
502 std::uint64_t seq,
503 AccountID const& account,
504 bool historyOnly) override;
505
506 bool
507 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
508 bool
509 unsubLedger(std::uint64_t uListener) override;
510
511 bool
512 subBookChanges(InfoSub::ref ispListener) override;
513 bool
514 unsubBookChanges(std::uint64_t uListener) override;
515
516 bool
517 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
518 override;
519 bool
520 unsubServer(std::uint64_t uListener) override;
521
522 bool
523 subBook(InfoSub::ref ispListener, Book const&) override;
524 bool
525 unsubBook(std::uint64_t uListener, Book const&) override;
526
527 bool
528 subManifests(InfoSub::ref ispListener) override;
529 bool
530 unsubManifests(std::uint64_t uListener) override;
531 void
532 pubManifest(Manifest const&) override;
533
534 bool
535 subTransactions(InfoSub::ref ispListener) override;
536 bool
537 unsubTransactions(std::uint64_t uListener) override;
538
539 bool
540 subRTTransactions(InfoSub::ref ispListener) override;
541 bool
542 unsubRTTransactions(std::uint64_t uListener) override;
543
544 bool
545 subValidations(InfoSub::ref ispListener) override;
546 bool
547 unsubValidations(std::uint64_t uListener) override;
548
549 bool
550 subPeerStatus(InfoSub::ref ispListener) override;
551 bool
552 unsubPeerStatus(std::uint64_t uListener) override;
553 void
554 pubPeerStatus(std::function<Json::Value(void)> const&) override;
555
556 bool
557 subConsensus(InfoSub::ref ispListener) override;
558 bool
559 unsubConsensus(std::uint64_t uListener) override;
560
562 findRpcSub(std::string const& strUrl) override;
564 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
565 bool
566 tryRemoveRpcSub(std::string const& strUrl) override;
567
568 void
569 stop() override
570 {
571 {
572 try
573 {
574 heartbeatTimer_.cancel();
575 }
576 catch (boost::system::system_error const& e)
577 {
578 JLOG(m_journal.error())
579 << "NetworkOPs: heartbeatTimer cancel error: " << e.what();
580 }
581
582 try
583 {
584 clusterTimer_.cancel();
585 }
586 catch (boost::system::system_error const& e)
587 {
588 JLOG(m_journal.error())
589 << "NetworkOPs: clusterTimer cancel error: " << e.what();
590 }
591
592 try
593 {
594 accountHistoryTxTimer_.cancel();
595 }
596 catch (boost::system::system_error const& e)
597 {
598 JLOG(m_journal.error())
599 << "NetworkOPs: accountHistoryTxTimer cancel error: "
600 << e.what();
601 }
602 }
603 // Make sure that any waitHandlers pending in our timers are done.
604 using namespace std::chrono_literals;
605 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
606 }
607
608 void
609 stateAccounting(Json::Value& obj) override;
610
611private:
612 void
613 setTimer(
614 boost::asio::steady_timer& timer,
615 std::chrono::milliseconds const& expiry_time,
616 std::function<void()> onExpire,
617 std::function<void()> onError);
618 void
620 void
622 void
624 void
626
628 transJson(
629 std::shared_ptr<STTx const> const& transaction,
630 TER result,
631 bool validated,
634
635 void
638 AcceptedLedgerTx const& transaction,
639 bool last);
640
641 void
644 AcceptedLedgerTx const& transaction,
645 bool last);
646
647 void
650 std::shared_ptr<STTx const> const& transaction,
651 TER result);
652
653 void
654 pubServer();
655 void
657
659 getHostId(bool forAdmin);
660
661private:
665
666 /*
667 * With a validated ledger to separate history and future, the node
668 * streams historical txns with negative indexes starting from -1,
669 * and streams future txns starting from index 0.
670 * The SubAccountHistoryIndex struct maintains these indexes.
671 * It also has a flag stopHistorical_ for stopping streaming
672 * the historical txns.
673 */
710
714 void
718 void
720 void
722
725
727
729
731
736
738 boost::asio::steady_timer heartbeatTimer_;
739 boost::asio::steady_timer clusterTimer_;
740 boost::asio::steady_timer accountHistoryTxTimer_;
741
743
746
748
750
753
755
757
758 enum SubTypes {
759 sLedger, // Accepted ledgers.
760 sManifests, // Received validator manifests.
761 sServer, // When server changes connectivity state.
762 sTransactions, // All accepted transactions.
763 sRTTransactions, // All proposed and accepted transactions.
764 sValidations, // Received validations.
765 sPeerStatus, // Peer status changes.
766 sConsensusPhase, // Consensus phase
767 sBookChanges, // Per-ledger order book changes
768 sLastEntry // Any new entry must be ADDED ABOVE this one
769 };
770
772
774
776
777 // Whether we are in standalone mode.
778 bool const m_standalone;
779
780 // The number of nodes that we need to consider ourselves connected.
782
783 // Transaction batching.
788
790
793
794private:
795 struct Stats
796 {
797 template <class Handler>
799 Handler const& handler,
800 beast::insight::Collector::ptr const& collector)
801 : hook(collector->make_hook(handler))
802 , disconnected_duration(collector->make_gauge(
803 "State_Accounting",
804 "Disconnected_duration"))
805 , connected_duration(collector->make_gauge(
806 "State_Accounting",
807 "Connected_duration"))
809 collector->make_gauge("State_Accounting", "Syncing_duration"))
810 , tracking_duration(collector->make_gauge(
811 "State_Accounting",
812 "Tracking_duration"))
814 collector->make_gauge("State_Accounting", "Full_duration"))
815 , disconnected_transitions(collector->make_gauge(
816 "State_Accounting",
817 "Disconnected_transitions"))
818 , connected_transitions(collector->make_gauge(
819 "State_Accounting",
820 "Connected_transitions"))
821 , syncing_transitions(collector->make_gauge(
822 "State_Accounting",
823 "Syncing_transitions"))
824 , tracking_transitions(collector->make_gauge(
825 "State_Accounting",
826 "Tracking_transitions"))
828 collector->make_gauge("State_Accounting", "Full_transitions"))
829 {
830 }
831
838
844 };
845
846 std::mutex m_statsMutex; // Mutex to lock m_stats
848
849private:
850 void
852};
853
854//------------------------------------------------------------------------------
855
857 {"disconnected", "connected", "syncing", "tracking", "full"}};
858
860
868
869static auto const genesisAccountId = calcAccountID(
871 .first);
872
873//------------------------------------------------------------------------------
874inline OperatingMode
876{
877 return mMode;
878}
879
880inline std::string
881NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
882{
883 return strOperatingMode(mMode, admin);
884}
885
886inline void
891
892inline void
897
898inline void
903
904inline bool
909
910inline bool
915
918{
919 static std::string const hostname = boost::asio::ip::host_name();
920
921 if (forAdmin)
922 return hostname;
923
924 // For non-admin uses hash the node public key into a
925 // single RFC1751 word:
926 static std::string const shroudedHostId = [this]() {
927 auto const& id = app_.nodeIdentity();
928
929 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
930 }();
931
932 return shroudedHostId;
933}
934
935void
937{
939
940 // Only do this work if a cluster is configured
941 if (app_.cluster().size() != 0)
943}
944
945void
947 boost::asio::steady_timer& timer,
948 std::chrono::milliseconds const& expiry_time,
949 std::function<void()> onExpire,
950 std::function<void()> onError)
951{
952 // Only start the timer if waitHandlerCounter_ is not yet joined.
953 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
954 [this, onExpire, onError](boost::system::error_code const& e) {
955 if ((e.value() == boost::system::errc::success) &&
956 (!m_job_queue.isStopped()))
957 {
958 onExpire();
959 }
960 // Recover as best we can if an unexpected error occurs.
961 if (e.value() != boost::system::errc::success &&
962 e.value() != boost::asio::error::operation_aborted)
963 {
964 // Try again later and hope for the best.
965 JLOG(m_journal.error())
966 << "Timer got error '" << e.message()
967 << "'. Restarting timer.";
968 onError();
969 }
970 }))
971 {
972 timer.expires_after(expiry_time);
973 timer.async_wait(std::move(*optionalCountedHandler));
974 }
975}
976
977void
978NetworkOPsImp::setHeartbeatTimer()
979{
980 setTimer(
981 heartbeatTimer_,
982 mConsensus.parms().ledgerGRANULARITY,
983 [this]() {
984 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
985 processHeartbeatTimer();
986 });
987 },
988 [this]() { setHeartbeatTimer(); });
989}
990
991void
992NetworkOPsImp::setClusterTimer()
993{
994 using namespace std::chrono_literals;
995
996 setTimer(
997 clusterTimer_,
998 10s,
999 [this]() {
1000 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
1001 processClusterTimer();
1002 });
1003 },
1004 [this]() { setClusterTimer(); });
1005}
1006
1007void
1008NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
1009{
1010 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
1011 << toBase58(subInfo.index_->accountId_);
1012 using namespace std::chrono_literals;
1013 setTimer(
1014 accountHistoryTxTimer_,
1015 4s,
1016 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1017 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1018}
1019
1020void
1021NetworkOPsImp::processHeartbeatTimer()
1022{
1023 RclConsensusLogger clog(
1024 "Heartbeat Timer", mConsensus.validating(), m_journal);
1025 {
1026 std::unique_lock lock{app_.getMasterMutex()};
1027
1028 // VFALCO NOTE This is for diagnosing a crash on exit
1029 LoadManager& mgr(app_.getLoadManager());
1030 mgr.heartbeat();
1031
1032 std::size_t const numPeers = app_.overlay().size();
1033
1034 // do we have sufficient peers? If not, we are disconnected.
1035 if (numPeers < minPeerCount_)
1036 {
1037 if (mMode != OperatingMode::DISCONNECTED)
1038 {
1039 setMode(OperatingMode::DISCONNECTED);
1041 ss << "Node count (" << numPeers << ") has fallen "
1042 << "below required minimum (" << minPeerCount_ << ").";
1043 JLOG(m_journal.warn()) << ss.str();
1044 CLOG(clog.ss()) << "set mode to DISCONNECTED: " << ss.str();
1045 }
1046 else
1047 {
1048 CLOG(clog.ss())
1049 << "already DISCONNECTED. too few peers (" << numPeers
1050 << "), need at least " << minPeerCount_;
1051 }
1052
1053 // MasterMutex lock need not be held to call setHeartbeatTimer()
1054 lock.unlock();
1055 // We do not call mConsensus.timerEntry until there are enough
1056 // peers providing meaningful inputs to consensus
1057 setHeartbeatTimer();
1058
1059 return;
1060 }
1061
1062 if (mMode == OperatingMode::DISCONNECTED)
1063 {
1064 setMode(OperatingMode::CONNECTED);
1065 JLOG(m_journal.info())
1066 << "Node count (" << numPeers << ") is sufficient.";
1067 CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
1068 << " peers. ";
1069 }
1070
1071 // Check if the last validated ledger forces a change between these
1072 // states.
1073 auto origMode = mMode.load();
1074 CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
1075 if (mMode == OperatingMode::SYNCING)
1076 setMode(OperatingMode::SYNCING);
1077 else if (mMode == OperatingMode::CONNECTED)
1078 setMode(OperatingMode::CONNECTED);
1079 auto newMode = mMode.load();
1080 if (origMode != newMode)
1081 {
1082 CLOG(clog.ss())
1083 << ", changing to " << strOperatingMode(newMode, true);
1084 }
1085 CLOG(clog.ss()) << ". ";
1086 }
1087
1088 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.ss());
1089
1090 CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase);
1091 ConsensusPhase const currPhase = mConsensus.phase();
1092 if (mLastConsensusPhase != currPhase)
1093 {
1094 reportConsensusStateChange(currPhase);
1095 mLastConsensusPhase = currPhase;
1096 CLOG(clog.ss()) << " changed to " << to_string(mLastConsensusPhase);
1097 }
1098 CLOG(clog.ss()) << ". ";
1099
1100 setHeartbeatTimer();
1101}
1102
1103void
1104NetworkOPsImp::processClusterTimer()
1105{
1106 if (app_.cluster().size() == 0)
1107 return;
1108
1109 using namespace std::chrono_literals;
1110
1111 bool const update = app_.cluster().update(
1112 app_.nodeIdentity().first,
1113 "",
1114 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1115 ? app_.getFeeTrack().getLocalFee()
1116 : 0,
1117 app_.timeKeeper().now());
1118
1119 if (!update)
1120 {
1121 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1122 setClusterTimer();
1123 return;
1124 }
1125
1126 protocol::TMCluster cluster;
1127 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1128 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1129 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1130 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1131 n.set_nodeload(node.getLoadFee());
1132 if (!node.name().empty())
1133 n.set_nodename(node.name());
1134 });
1135
1136 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1137 for (auto& item : gossip.items)
1138 {
1139 protocol::TMLoadSource& node = *cluster.add_loadsources();
1140 node.set_name(to_string(item.address));
1141 node.set_cost(item.balance);
1142 }
1143 app_.overlay().foreach(send_if(
1144 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1145 peer_in_cluster()));
1146 setClusterTimer();
1147}
1148
1149//------------------------------------------------------------------------------
1150
1152NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1153 const
1154{
1155 if (mode == OperatingMode::FULL && admin)
1156 {
1157 auto const consensusMode = mConsensus.mode();
1158 if (consensusMode != ConsensusMode::wrongLedger)
1159 {
1160 if (consensusMode == ConsensusMode::proposing)
1161 return "proposing";
1162
1163 if (mConsensus.validating())
1164 return "validating";
1165 }
1166 }
1167
1168 return states_[static_cast<std::size_t>(mode)];
1169}
1170
1171void
1172NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1173{
1174 if (isNeedNetworkLedger())
1175 {
1176 // Nothing we can do if we've never been in sync
1177 return;
1178 }
1179
1180 // Enforce Network bar for batch txn
1181 if (iTrans->isFlag(tfInnerBatchTxn) &&
1182 m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1183 {
1184 JLOG(m_journal.error())
1185 << "Submitted transaction invalid: tfInnerBatchTxn flag present.";
1186 return;
1187 }
1188
1189 // this is an asynchronous interface
1190 auto const trans = sterilize(*iTrans);
1191
1192 auto const txid = trans->getTransactionID();
1193 auto const flags = app_.getHashRouter().getFlags(txid);
1194
1195 if ((flags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1196 {
1197 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1198 return;
1199 }
1200
1201 try
1202 {
1203 auto const [validity, reason] = checkValidity(
1204 app_.getHashRouter(),
1205 *trans,
1206 m_ledgerMaster.getValidatedRules(),
1207 app_.config());
1208
1209 if (validity != Validity::Valid)
1210 {
1211 JLOG(m_journal.warn())
1212 << "Submitted transaction invalid: " << reason;
1213 return;
1214 }
1215 }
1216 catch (std::exception const& ex)
1217 {
1218 JLOG(m_journal.warn())
1219 << "Exception checking transaction " << txid << ": " << ex.what();
1220
1221 return;
1222 }
1223
1224 std::string reason;
1225
1226 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1227
1228 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1229 auto t = tx;
1230 processTransaction(t, false, false, FailHard::no);
1231 });
1232}
1233
1234bool
1235NetworkOPsImp::preProcessTransaction(std::shared_ptr<Transaction>& transaction)
1236{
1237 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1238
1239 if ((newFlags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1240 {
1241 // cached bad
1242 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1243 transaction->setStatus(INVALID);
1244 transaction->setResult(temBAD_SIGNATURE);
1245 return false;
1246 }
1247
1248 auto const view = m_ledgerMaster.getCurrentLedger();
1249
1250 // This function is called by several different parts of the codebase
1251 // under no circumstances will we ever accept an inner txn within a batch
1252 // txn from the network.
1253 auto const sttx = *transaction->getSTransaction();
1254 if (sttx.isFlag(tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1255 {
1256 transaction->setStatus(INVALID);
1257 transaction->setResult(temINVALID_FLAG);
1258 app_.getHashRouter().setFlags(
1259 transaction->getID(), HashRouterFlags::BAD);
1260 return false;
1261 }
1262
1263 // NOTE eahennis - I think this check is redundant,
1264 // but I'm not 100% sure yet.
1265 // If so, only cost is looking up HashRouter flags.
1266 auto const [validity, reason] =
1267 checkValidity(app_.getHashRouter(), sttx, view->rules(), app_.config());
1268 XRPL_ASSERT(
1269 validity == Validity::Valid,
1270 "ripple::NetworkOPsImp::processTransaction : valid validity");
1271
1272 // Not concerned with local checks at this point.
1273 if (validity == Validity::SigBad)
1274 {
1275 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1276 transaction->setStatus(INVALID);
1277 transaction->setResult(temBAD_SIGNATURE);
1278 app_.getHashRouter().setFlags(
1279 transaction->getID(), HashRouterFlags::BAD);
1280 return false;
1281 }
1282
1283 // canonicalize can change our pointer
1284 app_.getMasterTransaction().canonicalize(&transaction);
1285
1286 return true;
1287}
1288
1289void
1290NetworkOPsImp::processTransaction(
1291 std::shared_ptr<Transaction>& transaction,
1292 bool bUnlimited,
1293 bool bLocal,
1294 FailHard failType)
1295{
1296 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1297
1298 // preProcessTransaction can change our pointer
1299 if (!preProcessTransaction(transaction))
1300 return;
1301
1302 if (bLocal)
1303 doTransactionSync(transaction, bUnlimited, failType);
1304 else
1305 doTransactionAsync(transaction, bUnlimited, failType);
1306}
1307
1308void
1309NetworkOPsImp::doTransactionAsync(
1310 std::shared_ptr<Transaction> transaction,
1311 bool bUnlimited,
1312 FailHard failType)
1313{
1314 std::lock_guard lock(mMutex);
1315
1316 if (transaction->getApplying())
1317 return;
1318
1319 mTransactions.push_back(
1320 TransactionStatus(transaction, bUnlimited, false, failType));
1321 transaction->setApplying();
1322
1323 if (mDispatchState == DispatchState::none)
1324 {
1325 if (m_job_queue.addJob(
1326 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1327 {
1328 mDispatchState = DispatchState::scheduled;
1329 }
1330 }
1331}
1332
1333void
1334NetworkOPsImp::doTransactionSync(
1335 std::shared_ptr<Transaction> transaction,
1336 bool bUnlimited,
1337 FailHard failType)
1338{
1339 std::unique_lock<std::mutex> lock(mMutex);
1340
1341 if (!transaction->getApplying())
1342 {
1343 mTransactions.push_back(
1344 TransactionStatus(transaction, bUnlimited, true, failType));
1345 transaction->setApplying();
1346 }
1347
1348 doTransactionSyncBatch(
1349 lock, [&transaction](std::unique_lock<std::mutex> const&) {
1350 return transaction->getApplying();
1351 });
1352}
1353
1354void
1355NetworkOPsImp::doTransactionSyncBatch(
1357 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback)
1358{
1359 do
1360 {
1361 if (mDispatchState == DispatchState::running)
1362 {
1363 // A batch processing job is already running, so wait.
1364 mCond.wait(lock);
1365 }
1366 else
1367 {
1368 apply(lock);
1369
1370 if (mTransactions.size())
1371 {
1372 // More transactions need to be applied, but by another job.
1373 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1374 transactionBatch();
1375 }))
1376 {
1377 mDispatchState = DispatchState::scheduled;
1378 }
1379 }
1380 }
1381 } while (retryCallback(lock));
1382}
1383
1384void
1385NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set)
1386{
1387 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXNSet");
1389 candidates.reserve(set.size());
1390 for (auto const& [_, tx] : set)
1391 {
1392 std::string reason;
1393 auto transaction = std::make_shared<Transaction>(tx, reason, app_);
1394
1395 if (transaction->getStatus() == INVALID)
1396 {
1397 if (!reason.empty())
1398 {
1399 JLOG(m_journal.trace())
1400 << "Exception checking transaction: " << reason;
1401 }
1402 app_.getHashRouter().setFlags(
1403 tx->getTransactionID(), HashRouterFlags::BAD);
1404 continue;
1405 }
1406
1407 // preProcessTransaction can change our pointer
1408 if (!preProcessTransaction(transaction))
1409 continue;
1410
1411 candidates.emplace_back(transaction);
1412 }
1413
1414 std::vector<TransactionStatus> transactions;
1415 transactions.reserve(candidates.size());
1416
1417 std::unique_lock lock(mMutex);
1418
1419 for (auto& transaction : candidates)
1420 {
1421 if (!transaction->getApplying())
1422 {
1423 transactions.emplace_back(transaction, false, false, FailHard::no);
1424 transaction->setApplying();
1425 }
1426 }
1427
1428 if (mTransactions.empty())
1429 mTransactions.swap(transactions);
1430 else
1431 {
1432 mTransactions.reserve(mTransactions.size() + transactions.size());
1433 for (auto& t : transactions)
1434 mTransactions.push_back(std::move(t));
1435 }
1436 if (mTransactions.empty())
1437 {
1438 JLOG(m_journal.debug()) << "No transaction to process!";
1439 return;
1440 }
1441
1442 doTransactionSyncBatch(lock, [&](std::unique_lock<std::mutex> const&) {
1443 XRPL_ASSERT(
1444 lock.owns_lock(),
1445 "ripple::NetworkOPsImp::processTransactionSet has lock");
1446 return std::any_of(
1447 mTransactions.begin(), mTransactions.end(), [](auto const& t) {
1448 return t.transaction->getApplying();
1449 });
1450 });
1451}
1452
1453void
1454NetworkOPsImp::transactionBatch()
1455{
1456 std::unique_lock<std::mutex> lock(mMutex);
1457
1458 if (mDispatchState == DispatchState::running)
1459 return;
1460
1461 while (mTransactions.size())
1462 {
1463 apply(lock);
1464 }
1465}
1466
1467void
1468NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1469{
1471 std::vector<TransactionStatus> transactions;
1472 mTransactions.swap(transactions);
1473 XRPL_ASSERT(
1474 !transactions.empty(),
1475 "ripple::NetworkOPsImp::apply : non-empty transactions");
1476 XRPL_ASSERT(
1477 mDispatchState != DispatchState::running,
1478 "ripple::NetworkOPsImp::apply : is not running");
1479
1480 mDispatchState = DispatchState::running;
1481
1482 batchLock.unlock();
1483
1484 {
1485 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1486 bool changed = false;
1487 {
1488 std::unique_lock ledgerLock{
1489 m_ledgerMaster.peekMutex(), std::defer_lock};
1490 std::lock(masterLock, ledgerLock);
1491
1492 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1493 for (TransactionStatus& e : transactions)
1494 {
1495 // we check before adding to the batch
1496 ApplyFlags flags = tapNONE;
1497 if (e.admin)
1498 flags |= tapUNLIMITED;
1499
1500 if (e.failType == FailHard::yes)
1501 flags |= tapFAIL_HARD;
1502
1503 auto const result = app_.getTxQ().apply(
1504 app_, view, e.transaction->getSTransaction(), flags, j);
1505 e.result = result.ter;
1506 e.applied = result.applied;
1507 changed = changed || result.applied;
1508 }
1509 return changed;
1510 });
1511 }
1512 if (changed)
1513 reportFeeChange();
1514
1515 std::optional<LedgerIndex> validatedLedgerIndex;
1516 if (auto const l = m_ledgerMaster.getValidatedLedger())
1517 validatedLedgerIndex = l->info().seq;
1518
1519 auto newOL = app_.openLedger().current();
1520 for (TransactionStatus& e : transactions)
1521 {
1522 e.transaction->clearSubmitResult();
1523
1524 if (e.applied)
1525 {
1526 pubProposedTransaction(
1527 newOL, e.transaction->getSTransaction(), e.result);
1528 e.transaction->setApplied();
1529 }
1530
1531 e.transaction->setResult(e.result);
1532
1533 if (isTemMalformed(e.result))
1534 app_.getHashRouter().setFlags(
1535 e.transaction->getID(), HashRouterFlags::BAD);
1536
1537#ifdef DEBUG
1538 if (e.result != tesSUCCESS)
1539 {
1540 std::string token, human;
1541
1542 if (transResultInfo(e.result, token, human))
1543 {
1544 JLOG(m_journal.info())
1545 << "TransactionResult: " << token << ": " << human;
1546 }
1547 }
1548#endif
1549
1550 bool addLocal = e.local;
1551
1552 if (e.result == tesSUCCESS)
1553 {
1554 JLOG(m_journal.debug())
1555 << "Transaction is now included in open ledger";
1556 e.transaction->setStatus(INCLUDED);
1557
1558 // Pop as many "reasonable" transactions for this account as
1559 // possible. "Reasonable" means they have sequential sequence
1560 // numbers, or use tickets.
1561 auto const& txCur = e.transaction->getSTransaction();
1562
1563 std::size_t count = 0;
1564 for (auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1565 txNext && count < maxPoppedTransactions;
1566 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1567 {
1568 if (!batchLock.owns_lock())
1569 batchLock.lock();
1570 std::string reason;
1571 auto const trans = sterilize(*txNext);
1572 auto t = std::make_shared<Transaction>(trans, reason, app_);
1573 if (t->getApplying())
1574 break;
1575 submit_held.emplace_back(t, false, false, FailHard::no);
1576 t->setApplying();
1577 }
1578 if (batchLock.owns_lock())
1579 batchLock.unlock();
1580 }
1581 else if (e.result == tefPAST_SEQ)
1582 {
1583 // duplicate or conflict
1584 JLOG(m_journal.info()) << "Transaction is obsolete";
1585 e.transaction->setStatus(OBSOLETE);
1586 }
1587 else if (e.result == terQUEUED)
1588 {
1589 JLOG(m_journal.debug())
1590 << "Transaction is likely to claim a"
1591 << " fee, but is queued until fee drops";
1592
1593 e.transaction->setStatus(HELD);
1594 // Add to held transactions, because it could get
1595 // kicked out of the queue, and this will try to
1596 // put it back.
1597 m_ledgerMaster.addHeldTransaction(e.transaction);
1598 e.transaction->setQueued();
1599 e.transaction->setKept();
1600 }
1601 else if (
1602 isTerRetry(e.result) || isTelLocal(e.result) ||
1603 isTefFailure(e.result))
1604 {
1605 if (e.failType != FailHard::yes)
1606 {
1607 auto const lastLedgerSeq =
1608 e.transaction->getSTransaction()->at(
1609 ~sfLastLedgerSequence);
1610 auto const ledgersLeft = lastLedgerSeq
1611 ? *lastLedgerSeq -
1612 m_ledgerMaster.getCurrentLedgerIndex()
1614 // If any of these conditions are met, the transaction can
1615 // be held:
1616 // 1. It was submitted locally. (Note that this flag is only
1617 // true on the initial submission.)
1618 // 2. The transaction has a LastLedgerSequence, and the
1619 // LastLedgerSequence is fewer than LocalTxs::holdLedgers
1620 // (5) ledgers into the future. (Remember that an
1621 // unseated optional compares as less than all seated
1622 // values, so it has to be checked explicitly first.)
1623 // 3. The HashRouterFlags::BAD flag is not set on the txID.
1624 // (setFlags
1625 // checks before setting. If the flag is set, it returns
1626 // false, which means it's been held once without one of
1627 // the other conditions, so don't hold it again. Time's
1628 // up!)
1629 //
1630 if (e.local ||
1631 (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1632 app_.getHashRouter().setFlags(
1633 e.transaction->getID(), HashRouterFlags::HELD))
1634 {
1635 // transaction should be held
1636 JLOG(m_journal.debug())
1637 << "Transaction should be held: " << e.result;
1638 e.transaction->setStatus(HELD);
1639 m_ledgerMaster.addHeldTransaction(e.transaction);
1640 e.transaction->setKept();
1641 }
1642 else
1643 JLOG(m_journal.debug())
1644 << "Not holding transaction "
1645 << e.transaction->getID() << ": "
1646 << (e.local ? "local" : "network") << ", "
1647 << "result: " << e.result << " ledgers left: "
1648 << (ledgersLeft ? to_string(*ledgersLeft)
1649 : "unspecified");
1650 }
1651 }
1652 else
1653 {
1654 JLOG(m_journal.debug())
1655 << "Status other than success " << e.result;
1656 e.transaction->setStatus(INVALID);
1657 }
1658
1659 auto const enforceFailHard =
1660 e.failType == FailHard::yes && !isTesSuccess(e.result);
1661
1662 if (addLocal && !enforceFailHard)
1663 {
1664 m_localTX->push_back(
1665 m_ledgerMaster.getCurrentLedgerIndex(),
1666 e.transaction->getSTransaction());
1667 e.transaction->setKept();
1668 }
1669
1670 if ((e.applied ||
1671 ((mMode != OperatingMode::FULL) &&
1672 (e.failType != FailHard::yes) && e.local) ||
1673 (e.result == terQUEUED)) &&
1674 !enforceFailHard)
1675 {
1676 auto const toSkip =
1677 app_.getHashRouter().shouldRelay(e.transaction->getID());
1678 if (auto const sttx = *(e.transaction->getSTransaction());
1679 toSkip &&
1680 // Skip relaying if it's an inner batch txn. The flag should
1681 // only be set if the Batch feature is enabled. If Batch is
1682 // not enabled, the flag is always invalid, so don't relay
1683 // it regardless.
1684 !sttx.isFlag(tfInnerBatchTxn))
1685 {
1686 protocol::TMTransaction tx;
1687 Serializer s;
1688
1689 sttx.add(s);
1690 tx.set_rawtransaction(s.data(), s.size());
1691 tx.set_status(protocol::tsCURRENT);
1692 tx.set_receivetimestamp(
1693 app_.timeKeeper().now().time_since_epoch().count());
1694 tx.set_deferred(e.result == terQUEUED);
1695 // FIXME: This should be when we received it
1696 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1697 e.transaction->setBroadcast();
1698 }
1699 }
1700
1701 if (validatedLedgerIndex)
1702 {
1703 auto [fee, accountSeq, availableSeq] =
1704 app_.getTxQ().getTxRequiredFeeAndSeq(
1705 *newOL, e.transaction->getSTransaction());
1706 e.transaction->setCurrentLedgerState(
1707 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1708 }
1709 }
1710 }
1711
1712 batchLock.lock();
1713
1714 for (TransactionStatus& e : transactions)
1715 e.transaction->clearApplying();
1716
1717 if (!submit_held.empty())
1718 {
1719 if (mTransactions.empty())
1720 mTransactions.swap(submit_held);
1721 else
1722 {
1723 mTransactions.reserve(mTransactions.size() + submit_held.size());
1724 for (auto& e : submit_held)
1725 mTransactions.push_back(std::move(e));
1726 }
1727 }
1728
1729 mCond.notify_all();
1730
1731 mDispatchState = DispatchState::none;
1732}
1733
1734//
1735// Owner functions
1736//
1737
1739NetworkOPsImp::getOwnerInfo(
1741 AccountID const& account)
1742{
1743 Json::Value jvObjects(Json::objectValue);
1744 auto root = keylet::ownerDir(account);
1745 auto sleNode = lpLedger->read(keylet::page(root));
1746 if (sleNode)
1747 {
1748 std::uint64_t uNodeDir;
1749
1750 do
1751 {
1752 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1753 {
1754 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1755 XRPL_ASSERT(
1756 sleCur,
1757 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1758
1759 switch (sleCur->getType())
1760 {
1761 case ltOFFER:
1762 if (!jvObjects.isMember(jss::offers))
1763 jvObjects[jss::offers] =
1765
1766 jvObjects[jss::offers].append(
1767 sleCur->getJson(JsonOptions::none));
1768 break;
1769
1770 case ltRIPPLE_STATE:
1771 if (!jvObjects.isMember(jss::ripple_lines))
1772 {
1773 jvObjects[jss::ripple_lines] =
1775 }
1776
1777 jvObjects[jss::ripple_lines].append(
1778 sleCur->getJson(JsonOptions::none));
1779 break;
1780
1781 case ltACCOUNT_ROOT:
1782 case ltDIR_NODE:
1783 // LCOV_EXCL_START
1784 default:
1785 UNREACHABLE(
1786 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1787 "type");
1788 break;
1789 // LCOV_EXCL_STOP
1790 }
1791 }
1792
1793 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1794
1795 if (uNodeDir)
1796 {
1797 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1798 XRPL_ASSERT(
1799 sleNode,
1800 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1801 }
1802 } while (uNodeDir);
1803 }
1804
1805 return jvObjects;
1806}
1807
1808//
1809// Other
1810//
1811
1812inline bool
1813NetworkOPsImp::isBlocked()
1814{
1815 return isAmendmentBlocked() || isUNLBlocked();
1816}
1817
1818inline bool
1819NetworkOPsImp::isAmendmentBlocked()
1820{
1821 return amendmentBlocked_;
1822}
1823
1824void
1825NetworkOPsImp::setAmendmentBlocked()
1826{
1827 amendmentBlocked_ = true;
1828 setMode(OperatingMode::CONNECTED);
1829}
1830
1831inline bool
1832NetworkOPsImp::isAmendmentWarned()
1833{
1834 return !amendmentBlocked_ && amendmentWarned_;
1835}
1836
1837inline void
1838NetworkOPsImp::setAmendmentWarned()
1839{
1840 amendmentWarned_ = true;
1841}
1842
1843inline void
1844NetworkOPsImp::clearAmendmentWarned()
1845{
1846 amendmentWarned_ = false;
1847}
1848
1849inline bool
1850NetworkOPsImp::isUNLBlocked()
1851{
1852 return unlBlocked_;
1853}
1854
1855void
1856NetworkOPsImp::setUNLBlocked()
1857{
1858 unlBlocked_ = true;
1859 setMode(OperatingMode::CONNECTED);
1860}
1861
1862inline void
1863NetworkOPsImp::clearUNLBlocked()
1864{
1865 unlBlocked_ = false;
1866}
1867
1868bool
1869NetworkOPsImp::checkLastClosedLedger(
1870 Overlay::PeerSequence const& peerList,
1871 uint256& networkClosed)
1872{
1873 // Returns true if there's an *abnormal* ledger issue, normal changing in
1874 // TRACKING mode should return false. Do we have sufficient validations for
1875 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1876 // better ledger available? If so, we are either tracking or full.
1877
1878 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1879
1880 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1881
1882 if (!ourClosed)
1883 return false;
1884
1885 uint256 closedLedger = ourClosed->info().hash;
1886 uint256 prevClosedLedger = ourClosed->info().parentHash;
1887 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1888 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1889
1890 //-------------------------------------------------------------------------
1891 // Determine preferred last closed ledger
1892
1893 auto& validations = app_.getValidations();
1894 JLOG(m_journal.debug())
1895 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1896
1897 // Will rely on peer LCL if no trusted validations exist
1899 peerCounts[closedLedger] = 0;
1900 if (mMode >= OperatingMode::TRACKING)
1901 peerCounts[closedLedger]++;
1902
1903 for (auto& peer : peerList)
1904 {
1905 uint256 peerLedger = peer->getClosedLedgerHash();
1906
1907 if (peerLedger.isNonZero())
1908 ++peerCounts[peerLedger];
1909 }
1910
1911 for (auto const& it : peerCounts)
1912 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1913
1914 uint256 preferredLCL = validations.getPreferredLCL(
1915 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1916 m_ledgerMaster.getValidLedgerIndex(),
1917 peerCounts);
1918
1919 bool switchLedgers = preferredLCL != closedLedger;
1920 if (switchLedgers)
1921 closedLedger = preferredLCL;
1922 //-------------------------------------------------------------------------
1923 if (switchLedgers && (closedLedger == prevClosedLedger))
1924 {
1925 // don't switch to our own previous ledger
1926 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1927 networkClosed = ourClosed->info().hash;
1928 switchLedgers = false;
1929 }
1930 else
1931 networkClosed = closedLedger;
1932
1933 if (!switchLedgers)
1934 return false;
1935
1936 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1937
1938 if (!consensus)
1939 consensus = app_.getInboundLedgers().acquire(
1940 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1941
1942 if (consensus &&
1943 (!m_ledgerMaster.canBeCurrent(consensus) ||
1944 !m_ledgerMaster.isCompatible(
1945 *consensus, m_journal.debug(), "Not switching")))
1946 {
1947 // Don't switch to a ledger not on the validated chain
1948 // or with an invalid close time or sequence
1949 networkClosed = ourClosed->info().hash;
1950 return false;
1951 }
1952
1953 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1954 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1955 << getJson({*ourClosed, {}});
1956 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1957
1958 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1959 {
1960 setMode(OperatingMode::CONNECTED);
1961 }
1962
1963 if (consensus)
1964 {
1965 // FIXME: If this rewinds the ledger sequence, or has the same
1966 // sequence, we should update the status on any stored transactions
1967 // in the invalidated ledgers.
1968 switchLastClosedLedger(consensus);
1969 }
1970
1971 return true;
1972}
1973
1974void
1975NetworkOPsImp::switchLastClosedLedger(
1976 std::shared_ptr<Ledger const> const& newLCL)
1977{
1978 // set the newLCL as our last closed ledger -- this is abnormal code
1979 JLOG(m_journal.error())
1980 << "JUMP last closed ledger to " << newLCL->info().hash;
1981
1982 clearNeedNetworkLedger();
1983
1984 // Update fee computations.
1985 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1986
1987 // Caller must own master lock
1988 {
1989 // Apply tx in old open ledger to new
1990 // open ledger. Then apply local tx.
1991
1992 auto retries = m_localTX->getTxSet();
1993 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1995 if (lastVal)
1996 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
1997 else
1998 rules.emplace(app_.config().features);
1999 app_.openLedger().accept(
2000 app_,
2001 *rules,
2002 newLCL,
2003 OrderedTxs({}),
2004 false,
2005 retries,
2006 tapNONE,
2007 "jump",
2008 [&](OpenView& view, beast::Journal j) {
2009 // Stuff the ledger with transactions from the queue.
2010 return app_.getTxQ().accept(app_, view);
2011 });
2012 }
2013
2014 m_ledgerMaster.switchLCL(newLCL);
2015
2016 protocol::TMStatusChange s;
2017 s.set_newevent(protocol::neSWITCHED_LEDGER);
2018 s.set_ledgerseq(newLCL->info().seq);
2019 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
2020 s.set_ledgerhashprevious(
2021 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
2022 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
2023
2024 app_.overlay().foreach(
2025 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
2026}
2027
2028bool
2029NetworkOPsImp::beginConsensus(
2030 uint256 const& networkClosed,
2032{
2033 XRPL_ASSERT(
2034 networkClosed.isNonZero(),
2035 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
2036
2037 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
2038
2039 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
2040 << " with LCL " << closingInfo.parentHash;
2041
2042 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
2043
2044 if (!prevLedger)
2045 {
2046 // this shouldn't happen unless we jump ledgers
2047 if (mMode == OperatingMode::FULL)
2048 {
2049 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
2050 setMode(OperatingMode::TRACKING);
2051 CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
2052 }
2053
2054 CLOG(clog) << "beginConsensus no previous ledger. ";
2055 return false;
2056 }
2057
2058 XRPL_ASSERT(
2059 prevLedger->info().hash == closingInfo.parentHash,
2060 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
2061 "parent");
2062 XRPL_ASSERT(
2063 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
2064 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
2065 "hash");
2066
2067 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
2068 TrustChanges const changes = app_.validators().updateTrusted(
2069 app_.getValidations().getCurrentNodeIDs(),
2070 closingInfo.parentCloseTime,
2071 *this,
2072 app_.overlay(),
2073 app_.getHashRouter());
2074
2075 if (!changes.added.empty() || !changes.removed.empty())
2076 {
2077 app_.getValidations().trustChanged(changes.added, changes.removed);
2078 // Update the AmendmentTable so it tracks the current validators.
2079 app_.getAmendmentTable().trustChanged(
2080 app_.validators().getQuorumKeys().second);
2081 }
2082
2083 mConsensus.startRound(
2084 app_.timeKeeper().closeTime(),
2085 networkClosed,
2086 prevLedger,
2087 changes.removed,
2088 changes.added,
2089 clog);
2090
2091 ConsensusPhase const currPhase = mConsensus.phase();
2092 if (mLastConsensusPhase != currPhase)
2093 {
2094 reportConsensusStateChange(currPhase);
2095 mLastConsensusPhase = currPhase;
2096 }
2097
2098 JLOG(m_journal.debug()) << "Initiating consensus engine";
2099 return true;
2100}
2101
2102bool
2103NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
2104{
2105 auto const& peerKey = peerPos.publicKey();
2106 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
2107 {
2108 // Could indicate a operator misconfiguration where two nodes are
2109 // running with the same validator key configured, so this isn't fatal,
2110 // and it doesn't necessarily indicate peer misbehavior. But since this
2111 // is a trusted message, it could be a very big deal. Either way, we
2112 // don't want to relay the proposal. Note that the byzantine behavior
2113 // detection in handleNewValidation will notify other peers.
2114 //
2115 // Another, innocuous explanation is unusual message routing and delays,
2116 // causing this node to receive its own messages back.
2117 JLOG(m_journal.error())
2118 << "Received a proposal signed by MY KEY from a peer. This may "
2119 "indicate a misconfiguration where another node has the same "
2120 "validator key, or may be caused by unusual message routing and "
2121 "delays.";
2122 return false;
2123 }
2124
2125 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
2126}
2127
2128void
2129NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
2130{
2131 // We now have an additional transaction set
2132 // either created locally during the consensus process
2133 // or acquired from a peer
2134
2135 // Inform peers we have this set
2136 protocol::TMHaveTransactionSet msg;
2137 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2138 msg.set_status(protocol::tsHAVE);
2139 app_.overlay().foreach(
2140 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
2141
2142 // We acquired it because consensus asked us to
2143 if (fromAcquire)
2144 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
2145}
2146
2147void
2148NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
2149{
2150 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
2151
2152 for (auto const& it : app_.overlay().getActivePeers())
2153 {
2154 if (it && (it->getClosedLedgerHash() == deadLedger))
2155 {
2156 JLOG(m_journal.trace()) << "Killing obsolete peer status";
2157 it->cycleStatus();
2158 }
2159 }
2160
2161 uint256 networkClosed;
2162 bool ledgerChange =
2163 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
2164
2165 if (networkClosed.isZero())
2166 {
2167 CLOG(clog) << "endConsensus last closed ledger is zero. ";
2168 return;
2169 }
2170
2171 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
2172 // we must count how many nodes share our LCL, how many nodes disagree with
2173 // our LCL, and how many validations our LCL has. We also want to check
2174 // timing to make sure there shouldn't be a newer LCL. We need this
2175 // information to do the next three tests.
2176
2177 if (((mMode == OperatingMode::CONNECTED) ||
2178 (mMode == OperatingMode::SYNCING)) &&
2179 !ledgerChange)
2180 {
2181 // Count number of peers that agree with us and UNL nodes whose
2182 // validations we have for LCL. If the ledger is good enough, go to
2183 // TRACKING - TODO
2184 if (!needNetworkLedger_)
2185 setMode(OperatingMode::TRACKING);
2186 }
2187
2188 if (((mMode == OperatingMode::CONNECTED) ||
2189 (mMode == OperatingMode::TRACKING)) &&
2190 !ledgerChange)
2191 {
2192 // check if the ledger is good enough to go to FULL
2193 // Note: Do not go to FULL if we don't have the previous ledger
2194 // check if the ledger is bad enough to go to CONNECTE D -- TODO
2195 auto current = m_ledgerMaster.getCurrentLedger();
2196 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
2197 2 * current->info().closeTimeResolution))
2198 {
2199 setMode(OperatingMode::FULL);
2200 }
2201 }
2202
2203 beginConsensus(networkClosed, clog);
2204}
2205
2206void
2207NetworkOPsImp::consensusViewChange()
2208{
2209 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2210 {
2211 setMode(OperatingMode::CONNECTED);
2212 }
2213}
2214
2215void
2216NetworkOPsImp::pubManifest(Manifest const& mo)
2217{
2218 // VFALCO consider std::shared_mutex
2219 std::lock_guard sl(mSubLock);
2220
2221 if (!mStreamMaps[sManifests].empty())
2222 {
2224
2225 jvObj[jss::type] = "manifestReceived";
2226 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
2227 if (mo.signingKey)
2228 jvObj[jss::signing_key] =
2229 toBase58(TokenType::NodePublic, *mo.signingKey);
2230 jvObj[jss::seq] = Json::UInt(mo.sequence);
2231 if (auto sig = mo.getSignature())
2232 jvObj[jss::signature] = strHex(*sig);
2233 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
2234 if (!mo.domain.empty())
2235 jvObj[jss::domain] = mo.domain;
2236 jvObj[jss::manifest] = strHex(mo.serialized);
2237
2238 for (auto i = mStreamMaps[sManifests].begin();
2239 i != mStreamMaps[sManifests].end();)
2240 {
2241 if (auto p = i->second.lock())
2242 {
2243 p->send(jvObj, true);
2244 ++i;
2245 }
2246 else
2247 {
2248 i = mStreamMaps[sManifests].erase(i);
2249 }
2250 }
2251 }
2252}
2253
2254NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2255 XRPAmount fee,
2256 TxQ::Metrics&& escalationMetrics,
2257 LoadFeeTrack const& loadFeeTrack)
2258 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2259 , loadBaseServer{loadFeeTrack.getLoadBase()}
2260 , baseFee{fee}
2261 , em{std::move(escalationMetrics)}
2262{
2263}
2264
2265bool
2267 NetworkOPsImp::ServerFeeSummary const& b) const
2268{
2269 if (loadFactorServer != b.loadFactorServer ||
2270 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2271 em.has_value() != b.em.has_value())
2272 return true;
2273
2274 if (em && b.em)
2275 {
2276 return (
2277 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2278 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2279 em->referenceFeeLevel != b.em->referenceFeeLevel);
2280 }
2281
2282 return false;
2283}
2284
2285// Need to cap to uint64 to uint32 due to JSON limitations
2286static std::uint32_t
2288{
2290
2291 return std::min(max32, v);
2292};
2293
2294void
2296{
2297 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2298 // list into a local array while holding the lock then release
2299 // the lock and call send on everyone.
2300 //
2302
2303 if (!mStreamMaps[sServer].empty())
2304 {
2306
2308 app_.openLedger().current()->fees().base,
2310 app_.getFeeTrack()};
2311
2312 jvObj[jss::type] = "serverStatus";
2313 jvObj[jss::server_status] = strOperatingMode();
2314 jvObj[jss::load_base] = f.loadBaseServer;
2315 jvObj[jss::load_factor_server] = f.loadFactorServer;
2316 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2317
2318 if (f.em)
2319 {
2320 auto const loadFactor = std::max(
2321 safe_cast<std::uint64_t>(f.loadFactorServer),
2322 mulDiv(
2323 f.em->openLedgerFeeLevel,
2324 f.loadBaseServer,
2325 f.em->referenceFeeLevel)
2327
2328 jvObj[jss::load_factor] = trunc32(loadFactor);
2329 jvObj[jss::load_factor_fee_escalation] =
2330 f.em->openLedgerFeeLevel.jsonClipped();
2331 jvObj[jss::load_factor_fee_queue] =
2332 f.em->minProcessingFeeLevel.jsonClipped();
2333 jvObj[jss::load_factor_fee_reference] =
2334 f.em->referenceFeeLevel.jsonClipped();
2335 }
2336 else
2337 jvObj[jss::load_factor] = f.loadFactorServer;
2338
2339 mLastFeeSummary = f;
2340
2341 for (auto i = mStreamMaps[sServer].begin();
2342 i != mStreamMaps[sServer].end();)
2343 {
2344 InfoSub::pointer p = i->second.lock();
2345
2346 // VFALCO TODO research the possibility of using thread queues and
2347 // linearizing the deletion of subscribers with the
2348 // sending of JSON data.
2349 if (p)
2350 {
2351 p->send(jvObj, true);
2352 ++i;
2353 }
2354 else
2355 {
2356 i = mStreamMaps[sServer].erase(i);
2357 }
2358 }
2359 }
2360}
2361
2362void
2364{
2366
2367 auto& streamMap = mStreamMaps[sConsensusPhase];
2368 if (!streamMap.empty())
2369 {
2371 jvObj[jss::type] = "consensusPhase";
2372 jvObj[jss::consensus] = to_string(phase);
2373
2374 for (auto i = streamMap.begin(); i != streamMap.end();)
2375 {
2376 if (auto p = i->second.lock())
2377 {
2378 p->send(jvObj, true);
2379 ++i;
2380 }
2381 else
2382 {
2383 i = streamMap.erase(i);
2384 }
2385 }
2386 }
2387}
2388
2389void
2391{
2392 // VFALCO consider std::shared_mutex
2394
2395 if (!mStreamMaps[sValidations].empty())
2396 {
2398
2399 auto const signerPublic = val->getSignerPublic();
2400
2401 jvObj[jss::type] = "validationReceived";
2402 jvObj[jss::validation_public_key] =
2403 toBase58(TokenType::NodePublic, signerPublic);
2404 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2405 jvObj[jss::signature] = strHex(val->getSignature());
2406 jvObj[jss::full] = val->isFull();
2407 jvObj[jss::flags] = val->getFlags();
2408 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2409 jvObj[jss::data] = strHex(val->getSerializer().slice());
2410 jvObj[jss::network_id] = app_.config().NETWORK_ID;
2411
2412 if (auto version = (*val)[~sfServerVersion])
2413 jvObj[jss::server_version] = std::to_string(*version);
2414
2415 if (auto cookie = (*val)[~sfCookie])
2416 jvObj[jss::cookie] = std::to_string(*cookie);
2417
2418 if (auto hash = (*val)[~sfValidatedHash])
2419 jvObj[jss::validated_hash] = strHex(*hash);
2420
2421 auto const masterKey =
2422 app_.validatorManifests().getMasterKey(signerPublic);
2423
2424 if (masterKey != signerPublic)
2425 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2426
2427 // NOTE *seq is a number, but old API versions used string. We replace
2428 // number with a string using MultiApiJson near end of this function
2429 if (auto const seq = (*val)[~sfLedgerSequence])
2430 jvObj[jss::ledger_index] = *seq;
2431
2432 if (val->isFieldPresent(sfAmendments))
2433 {
2434 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2435 for (auto const& amendment : val->getFieldV256(sfAmendments))
2436 jvObj[jss::amendments].append(to_string(amendment));
2437 }
2438
2439 if (auto const closeTime = (*val)[~sfCloseTime])
2440 jvObj[jss::close_time] = *closeTime;
2441
2442 if (auto const loadFee = (*val)[~sfLoadFee])
2443 jvObj[jss::load_fee] = *loadFee;
2444
2445 if (auto const baseFee = val->at(~sfBaseFee))
2446 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2447
2448 if (auto const reserveBase = val->at(~sfReserveBase))
2449 jvObj[jss::reserve_base] = *reserveBase;
2450
2451 if (auto const reserveInc = val->at(~sfReserveIncrement))
2452 jvObj[jss::reserve_inc] = *reserveInc;
2453
2454 // (The ~ operator converts the Proxy to a std::optional, which
2455 // simplifies later operations)
2456 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2457 baseFeeXRP && baseFeeXRP->native())
2458 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2459
2460 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2461 reserveBaseXRP && reserveBaseXRP->native())
2462 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2463
2464 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2465 reserveIncXRP && reserveIncXRP->native())
2466 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2467
2468 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2469 // for consumers supporting different API versions
2470 MultiApiJson multiObj{jvObj};
2471 multiObj.visit(
2472 RPC::apiVersion<1>, //
2473 [](Json::Value& jvTx) {
2474 // Type conversion for older API versions to string
2475 if (jvTx.isMember(jss::ledger_index))
2476 {
2477 jvTx[jss::ledger_index] =
2478 std::to_string(jvTx[jss::ledger_index].asUInt());
2479 }
2480 });
2481
2482 for (auto i = mStreamMaps[sValidations].begin();
2483 i != mStreamMaps[sValidations].end();)
2484 {
2485 if (auto p = i->second.lock())
2486 {
2487 multiObj.visit(
2488 p->getApiVersion(), //
2489 [&](Json::Value const& jv) { p->send(jv, true); });
2490 ++i;
2491 }
2492 else
2493 {
2494 i = mStreamMaps[sValidations].erase(i);
2495 }
2496 }
2497 }
2498}
2499
2500void
2502{
2504
2505 if (!mStreamMaps[sPeerStatus].empty())
2506 {
2507 Json::Value jvObj(func());
2508
2509 jvObj[jss::type] = "peerStatusChange";
2510
2511 for (auto i = mStreamMaps[sPeerStatus].begin();
2512 i != mStreamMaps[sPeerStatus].end();)
2513 {
2514 InfoSub::pointer p = i->second.lock();
2515
2516 if (p)
2517 {
2518 p->send(jvObj, true);
2519 ++i;
2520 }
2521 else
2522 {
2523 i = mStreamMaps[sPeerStatus].erase(i);
2524 }
2525 }
2526 }
2527}
2528
2529void
2531{
2532 using namespace std::chrono_literals;
2533 if (om == OperatingMode::CONNECTED)
2534 {
2537 }
2538 else if (om == OperatingMode::SYNCING)
2539 {
2542 }
2543
2544 if ((om > OperatingMode::CONNECTED) && isBlocked())
2546
2547 if (mMode == om)
2548 return;
2549
2550 mMode = om;
2551
2552 accounting_.mode(om);
2553
2554 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2555 pubServer();
2556}
2557
2558bool
2561 std::string const& source)
2562{
2563 JLOG(m_journal.trace())
2564 << "recvValidation " << val->getLedgerHash() << " from " << source;
2565
2567 BypassAccept bypassAccept = BypassAccept::no;
2568 try
2569 {
2570 if (pendingValidations_.contains(val->getLedgerHash()))
2571 bypassAccept = BypassAccept::yes;
2572 else
2573 pendingValidations_.insert(val->getLedgerHash());
2574 scope_unlock unlock(lock);
2575 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2576 }
2577 catch (std::exception const& e)
2578 {
2579 JLOG(m_journal.warn())
2580 << "Exception thrown for handling new validation "
2581 << val->getLedgerHash() << ": " << e.what();
2582 }
2583 catch (...)
2584 {
2585 JLOG(m_journal.warn())
2586 << "Unknown exception thrown for handling new validation "
2587 << val->getLedgerHash();
2588 }
2589 if (bypassAccept == BypassAccept::no)
2590 {
2591 pendingValidations_.erase(val->getLedgerHash());
2592 }
2593 lock.unlock();
2594
2595 pubValidation(val);
2596
2597 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2599 ss << "VALIDATION: " << val->render() << " master_key: ";
2600 auto master = app_.validators().getTrustedKey(val->getSignerPublic());
2601 if (master)
2602 {
2603 ss << toBase58(TokenType::NodePublic, *master);
2604 }
2605 else
2606 {
2607 ss << "none";
2608 }
2609 return ss.str();
2610 }();
2611
2612 // We will always relay trusted validations; if configured, we will
2613 // also relay all untrusted validations.
2614 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2615}
2616
2619{
2620 return mConsensus.getJson(true);
2621}
2622
2624NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2625{
2627
2628 // System-level warnings
2629 {
2630 Json::Value warnings{Json::arrayValue};
2631 if (isAmendmentBlocked())
2632 {
2633 Json::Value& w = warnings.append(Json::objectValue);
2634 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2635 w[jss::message] =
2636 "This server is amendment blocked, and must be updated to be "
2637 "able to stay in sync with the network.";
2638 }
2639 if (isUNLBlocked())
2640 {
2641 Json::Value& w = warnings.append(Json::objectValue);
2642 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2643 w[jss::message] =
2644 "This server has an expired validator list. validators.txt "
2645 "may be incorrectly configured or some [validator_list_sites] "
2646 "may be unreachable.";
2647 }
2648 if (admin && isAmendmentWarned())
2649 {
2650 Json::Value& w = warnings.append(Json::objectValue);
2651 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2652 w[jss::message] =
2653 "One or more unsupported amendments have reached majority. "
2654 "Upgrade to the latest version before they are activated "
2655 "to avoid being amendment blocked.";
2656 if (auto const expected =
2658 {
2659 auto& d = w[jss::details] = Json::objectValue;
2660 d[jss::expected_date] = expected->time_since_epoch().count();
2661 d[jss::expected_date_UTC] = to_string(*expected);
2662 }
2663 }
2664
2665 if (warnings.size())
2666 info[jss::warnings] = std::move(warnings);
2667 }
2668
2669 // hostid: unique string describing the machine
2670 if (human)
2671 info[jss::hostid] = getHostId(admin);
2672
2673 // domain: if configured with a domain, report it:
2674 if (!app_.config().SERVER_DOMAIN.empty())
2675 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2676
2677 info[jss::build_version] = BuildInfo::getVersionString();
2678
2679 info[jss::server_state] = strOperatingMode(admin);
2680
2681 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2683
2685 info[jss::network_ledger] = "waiting";
2686
2687 info[jss::validation_quorum] =
2688 static_cast<Json::UInt>(app_.validators().quorum());
2689
2690 if (admin)
2691 {
2692 switch (app_.config().NODE_SIZE)
2693 {
2694 case 0:
2695 info[jss::node_size] = "tiny";
2696 break;
2697 case 1:
2698 info[jss::node_size] = "small";
2699 break;
2700 case 2:
2701 info[jss::node_size] = "medium";
2702 break;
2703 case 3:
2704 info[jss::node_size] = "large";
2705 break;
2706 case 4:
2707 info[jss::node_size] = "huge";
2708 break;
2709 }
2710
2711 auto when = app_.validators().expires();
2712
2713 if (!human)
2714 {
2715 if (when)
2716 info[jss::validator_list_expires] =
2717 safe_cast<Json::UInt>(when->time_since_epoch().count());
2718 else
2719 info[jss::validator_list_expires] = 0;
2720 }
2721 else
2722 {
2723 auto& x = (info[jss::validator_list] = Json::objectValue);
2724
2725 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2726
2727 if (when)
2728 {
2729 if (*when == TimeKeeper::time_point::max())
2730 {
2731 x[jss::expiration] = "never";
2732 x[jss::status] = "active";
2733 }
2734 else
2735 {
2736 x[jss::expiration] = to_string(*when);
2737
2738 if (*when > app_.timeKeeper().now())
2739 x[jss::status] = "active";
2740 else
2741 x[jss::status] = "expired";
2742 }
2743 }
2744 else
2745 {
2746 x[jss::status] = "unknown";
2747 x[jss::expiration] = "unknown";
2748 }
2749 }
2750
2751#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2752 {
2753 auto& x = (info[jss::git] = Json::objectValue);
2754#ifdef GIT_COMMIT_HASH
2755 x[jss::hash] = GIT_COMMIT_HASH;
2756#endif
2757#ifdef GIT_BRANCH
2758 x[jss::branch] = GIT_BRANCH;
2759#endif
2760 }
2761#endif
2762 }
2763 info[jss::io_latency_ms] =
2764 static_cast<Json::UInt>(app_.getIOLatency().count());
2765
2766 if (admin)
2767 {
2768 if (auto const localPubKey = app_.validators().localPublicKey();
2769 localPubKey && app_.getValidationPublicKey())
2770 {
2771 info[jss::pubkey_validator] =
2772 toBase58(TokenType::NodePublic, localPubKey.value());
2773 }
2774 else
2775 {
2776 info[jss::pubkey_validator] = "none";
2777 }
2778 }
2779
2780 if (counters)
2781 {
2782 info[jss::counters] = app_.getPerfLog().countersJson();
2783
2784 Json::Value nodestore(Json::objectValue);
2785 app_.getNodeStore().getCountsJson(nodestore);
2786 info[jss::counters][jss::nodestore] = nodestore;
2787 info[jss::current_activities] = app_.getPerfLog().currentJson();
2788 }
2789
2790 info[jss::pubkey_node] =
2792
2793 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2794
2796 info[jss::amendment_blocked] = true;
2797
2798 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2799
2800 if (fp != 0)
2801 info[jss::fetch_pack] = Json::UInt(fp);
2802
2803 info[jss::peers] = Json::UInt(app_.overlay().size());
2804
2805 Json::Value lastClose = Json::objectValue;
2806 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2807
2808 if (human)
2809 {
2810 lastClose[jss::converge_time_s] =
2812 }
2813 else
2814 {
2815 lastClose[jss::converge_time] =
2817 }
2818
2819 info[jss::last_close] = lastClose;
2820
2821 // info[jss::consensus] = mConsensus.getJson();
2822
2823 if (admin)
2824 info[jss::load] = m_job_queue.getJson();
2825
2826 if (auto const netid = app_.overlay().networkID())
2827 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2828
2829 auto const escalationMetrics =
2831
2832 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2833 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2834 /* Scale the escalated fee level to unitless "load factor".
2835 In practice, this just strips the units, but it will continue
2836 to work correctly if either base value ever changes. */
2837 auto const loadFactorFeeEscalation =
2838 mulDiv(
2839 escalationMetrics.openLedgerFeeLevel,
2840 loadBaseServer,
2841 escalationMetrics.referenceFeeLevel)
2843
2844 auto const loadFactor = std::max(
2845 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2846
2847 if (!human)
2848 {
2849 info[jss::load_base] = loadBaseServer;
2850 info[jss::load_factor] = trunc32(loadFactor);
2851 info[jss::load_factor_server] = loadFactorServer;
2852
2853 /* Json::Value doesn't support uint64, so clamp to max
2854 uint32 value. This is mostly theoretical, since there
2855 probably isn't enough extant XRP to drive the factor
2856 that high.
2857 */
2858 info[jss::load_factor_fee_escalation] =
2859 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2860 info[jss::load_factor_fee_queue] =
2861 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2862 info[jss::load_factor_fee_reference] =
2863 escalationMetrics.referenceFeeLevel.jsonClipped();
2864 }
2865 else
2866 {
2867 info[jss::load_factor] =
2868 static_cast<double>(loadFactor) / loadBaseServer;
2869
2870 if (loadFactorServer != loadFactor)
2871 info[jss::load_factor_server] =
2872 static_cast<double>(loadFactorServer) / loadBaseServer;
2873
2874 if (admin)
2875 {
2877 if (fee != loadBaseServer)
2878 info[jss::load_factor_local] =
2879 static_cast<double>(fee) / loadBaseServer;
2880 fee = app_.getFeeTrack().getRemoteFee();
2881 if (fee != loadBaseServer)
2882 info[jss::load_factor_net] =
2883 static_cast<double>(fee) / loadBaseServer;
2884 fee = app_.getFeeTrack().getClusterFee();
2885 if (fee != loadBaseServer)
2886 info[jss::load_factor_cluster] =
2887 static_cast<double>(fee) / loadBaseServer;
2888 }
2889 if (escalationMetrics.openLedgerFeeLevel !=
2890 escalationMetrics.referenceFeeLevel &&
2891 (admin || loadFactorFeeEscalation != loadFactor))
2892 info[jss::load_factor_fee_escalation] =
2893 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2894 escalationMetrics.referenceFeeLevel);
2895 if (escalationMetrics.minProcessingFeeLevel !=
2896 escalationMetrics.referenceFeeLevel)
2897 info[jss::load_factor_fee_queue] =
2898 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2899 escalationMetrics.referenceFeeLevel);
2900 }
2901
2902 bool valid = false;
2903 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2904
2905 if (lpClosed)
2906 valid = true;
2907 else
2908 lpClosed = m_ledgerMaster.getClosedLedger();
2909
2910 if (lpClosed)
2911 {
2912 XRPAmount const baseFee = lpClosed->fees().base;
2914 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2915 l[jss::hash] = to_string(lpClosed->info().hash);
2916
2917 if (!human)
2918 {
2919 l[jss::base_fee] = baseFee.jsonClipped();
2920 l[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped();
2921 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2922 l[jss::close_time] = Json::Value::UInt(
2923 lpClosed->info().closeTime.time_since_epoch().count());
2924 }
2925 else
2926 {
2927 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2928 l[jss::reserve_base_xrp] = lpClosed->fees().reserve.decimalXRP();
2929 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2930
2931 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2932 std::abs(closeOffset.count()) >= 60)
2933 l[jss::close_time_offset] =
2934 static_cast<std::uint32_t>(closeOffset.count());
2935
2936 constexpr std::chrono::seconds highAgeThreshold{1000000};
2938 {
2939 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2940 l[jss::age] =
2941 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2942 }
2943 else
2944 {
2945 auto lCloseTime = lpClosed->info().closeTime;
2946 auto closeTime = app_.timeKeeper().closeTime();
2947 if (lCloseTime <= closeTime)
2948 {
2949 using namespace std::chrono_literals;
2950 auto age = closeTime - lCloseTime;
2951 l[jss::age] =
2952 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2953 }
2954 }
2955 }
2956
2957 if (valid)
2958 info[jss::validated_ledger] = l;
2959 else
2960 info[jss::closed_ledger] = l;
2961
2962 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2963 if (!lpPublished)
2964 info[jss::published_ledger] = "none";
2965 else if (lpPublished->info().seq != lpClosed->info().seq)
2966 info[jss::published_ledger] = lpPublished->info().seq;
2967 }
2968
2969 accounting_.json(info);
2970 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2971 info[jss::jq_trans_overflow] =
2973 info[jss::peer_disconnects] =
2975 info[jss::peer_disconnects_resources] =
2977
2978 // This array must be sorted in increasing order.
2979 static constexpr std::array<std::string_view, 7> protocols{
2980 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2981 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2982 {
2984 for (auto const& port : app_.getServerHandler().setup().ports)
2985 {
2986 // Don't publish admin ports for non-admin users
2987 if (!admin &&
2988 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2989 port.admin_user.empty() && port.admin_password.empty()))
2990 continue;
2993 std::begin(port.protocol),
2994 std::end(port.protocol),
2995 std::begin(protocols),
2996 std::end(protocols),
2997 std::back_inserter(proto));
2998 if (!proto.empty())
2999 {
3000 auto& jv = ports.append(Json::Value(Json::objectValue));
3001 jv[jss::port] = std::to_string(port.port);
3002 jv[jss::protocol] = Json::Value{Json::arrayValue};
3003 for (auto const& p : proto)
3004 jv[jss::protocol].append(p);
3005 }
3006 }
3007
3008 if (app_.config().exists(SECTION_PORT_GRPC))
3009 {
3010 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
3011 auto const optPort = grpcSection.get("port");
3012 if (optPort && grpcSection.get("ip"))
3013 {
3014 auto& jv = ports.append(Json::Value(Json::objectValue));
3015 jv[jss::port] = *optPort;
3016 jv[jss::protocol] = Json::Value{Json::arrayValue};
3017 jv[jss::protocol].append("grpc");
3018 }
3019 }
3020 info[jss::ports] = std::move(ports);
3021 }
3022
3023 return info;
3024}
3025
3026void
3031
3037
3038void
3040 std::shared_ptr<ReadView const> const& ledger,
3041 std::shared_ptr<STTx const> const& transaction,
3042 TER result)
3043{
3044 // never publish an inner txn inside a batch txn. The flag should
3045 // only be set if the Batch feature is enabled. If Batch is not
3046 // enabled, the flag is always invalid, so don't publish it
3047 // regardless.
3048 if (transaction->isFlag(tfInnerBatchTxn))
3049 return;
3050
3051 MultiApiJson jvObj =
3052 transJson(transaction, result, false, ledger, std::nullopt);
3053
3054 {
3056
3057 auto it = mStreamMaps[sRTTransactions].begin();
3058 while (it != mStreamMaps[sRTTransactions].end())
3059 {
3060 InfoSub::pointer p = it->second.lock();
3061
3062 if (p)
3063 {
3064 jvObj.visit(
3065 p->getApiVersion(), //
3066 [&](Json::Value const& jv) { p->send(jv, true); });
3067 ++it;
3068 }
3069 else
3070 {
3071 it = mStreamMaps[sRTTransactions].erase(it);
3072 }
3073 }
3074 }
3075
3076 pubProposedAccountTransaction(ledger, transaction, result);
3077}
3078
3079void
3081{
3082 // Ledgers are published only when they acquire sufficient validations
3083 // Holes are filled across connection loss or other catastrophe
3084
3086 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
3087 if (!alpAccepted)
3088 {
3089 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
3090 app_.getAcceptedLedgerCache().canonicalize_replace_client(
3091 lpAccepted->info().hash, alpAccepted);
3092 }
3093
3094 XRPL_ASSERT(
3095 alpAccepted->getLedger().get() == lpAccepted.get(),
3096 "ripple::NetworkOPsImp::pubLedger : accepted input");
3097
3098 {
3099 JLOG(m_journal.debug())
3100 << "Publishing ledger " << lpAccepted->info().seq << " "
3101 << lpAccepted->info().hash;
3102
3104
3105 if (!mStreamMaps[sLedger].empty())
3106 {
3108
3109 jvObj[jss::type] = "ledgerClosed";
3110 jvObj[jss::ledger_index] = lpAccepted->info().seq;
3111 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
3112 jvObj[jss::ledger_time] = Json::Value::UInt(
3113 lpAccepted->info().closeTime.time_since_epoch().count());
3114
3115 jvObj[jss::network_id] = app_.config().NETWORK_ID;
3116
3117 if (!lpAccepted->rules().enabled(featureXRPFees))
3118 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3119 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
3120 jvObj[jss::reserve_base] = lpAccepted->fees().reserve.jsonClipped();
3121 jvObj[jss::reserve_inc] =
3122 lpAccepted->fees().increment.jsonClipped();
3123
3124 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
3125
3127 {
3128 jvObj[jss::validated_ledgers] =
3130 }
3131
3132 auto it = mStreamMaps[sLedger].begin();
3133 while (it != mStreamMaps[sLedger].end())
3134 {
3135 InfoSub::pointer p = it->second.lock();
3136 if (p)
3137 {
3138 p->send(jvObj, true);
3139 ++it;
3140 }
3141 else
3142 it = mStreamMaps[sLedger].erase(it);
3143 }
3144 }
3145
3146 if (!mStreamMaps[sBookChanges].empty())
3147 {
3148 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
3149
3150 auto it = mStreamMaps[sBookChanges].begin();
3151 while (it != mStreamMaps[sBookChanges].end())
3152 {
3153 InfoSub::pointer p = it->second.lock();
3154 if (p)
3155 {
3156 p->send(jvObj, true);
3157 ++it;
3158 }
3159 else
3160 it = mStreamMaps[sBookChanges].erase(it);
3161 }
3162 }
3163
3164 {
3165 static bool firstTime = true;
3166 if (firstTime)
3167 {
3168 // First validated ledger, start delayed SubAccountHistory
3169 firstTime = false;
3170 for (auto& outer : mSubAccountHistory)
3171 {
3172 for (auto& inner : outer.second)
3173 {
3174 auto& subInfo = inner.second;
3175 if (subInfo.index_->separationLedgerSeq_ == 0)
3176 {
3178 alpAccepted->getLedger(), subInfo);
3179 }
3180 }
3181 }
3182 }
3183 }
3184 }
3185
3186 // Don't lock since pubAcceptedTransaction is locking.
3187 for (auto const& accTx : *alpAccepted)
3188 {
3189 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
3191 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3192 }
3193}
3194
3195void
3197{
3199 app_.openLedger().current()->fees().base,
3201 app_.getFeeTrack()};
3202
3203 // only schedule the job if something has changed
3204 if (f != mLastFeeSummary)
3205 {
3207 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
3208 pubServer();
3209 });
3210 }
3211}
3212
3213void
3215{
3218 "reportConsensusStateChange->pubConsensus",
3219 [this, phase]() { pubConsensus(phase); });
3220}
3221
3222inline void
3224{
3225 m_localTX->sweep(view);
3226}
3227inline std::size_t
3229{
3230 return m_localTX->size();
3231}
3232
3233// This routine should only be used to publish accepted or validated
3234// transactions.
3237 std::shared_ptr<STTx const> const& transaction,
3238 TER result,
3239 bool validated,
3240 std::shared_ptr<ReadView const> const& ledger,
3242{
3244 std::string sToken;
3245 std::string sHuman;
3246
3247 transResultInfo(result, sToken, sHuman);
3248
3249 jvObj[jss::type] = "transaction";
3250 // NOTE jvObj is not a finished object for either API version. After
3251 // it's populated, we need to finish it for a specific API version. This is
3252 // done in a loop, near the end of this function.
3253 jvObj[jss::transaction] =
3254 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3255
3256 if (meta)
3257 {
3258 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3260 jvObj[jss::meta], *ledger, transaction, meta->get());
3261 RPC::insertNFTSyntheticInJson(jvObj, transaction, meta->get());
3263 jvObj[jss::meta], transaction, meta->get());
3264 }
3265
3266 // add CTID where the needed data for it exists
3267 if (auto const& lookup = ledger->txRead(transaction->getTransactionID());
3268 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3269 {
3270 uint32_t const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3271 uint32_t netID = app_.config().NETWORK_ID;
3272 if (transaction->isFieldPresent(sfNetworkID))
3273 netID = transaction->getFieldU32(sfNetworkID);
3274
3276 RPC::encodeCTID(ledger->info().seq, txnSeq, netID);
3277 ctid)
3278 jvObj[jss::ctid] = *ctid;
3279 }
3280 if (!ledger->open())
3281 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3282
3283 if (validated)
3284 {
3285 jvObj[jss::ledger_index] = ledger->info().seq;
3286 jvObj[jss::transaction][jss::date] =
3287 ledger->info().closeTime.time_since_epoch().count();
3288 jvObj[jss::validated] = true;
3289 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3290
3291 // WRITEME: Put the account next seq here
3292 }
3293 else
3294 {
3295 jvObj[jss::validated] = false;
3296 jvObj[jss::ledger_current_index] = ledger->info().seq;
3297 }
3298
3299 jvObj[jss::status] = validated ? "closed" : "proposed";
3300 jvObj[jss::engine_result] = sToken;
3301 jvObj[jss::engine_result_code] = result;
3302 jvObj[jss::engine_result_message] = sHuman;
3303
3304 if (transaction->getTxnType() == ttOFFER_CREATE)
3305 {
3306 auto const account = transaction->getAccountID(sfAccount);
3307 auto const amount = transaction->getFieldAmount(sfTakerGets);
3308
3309 // If the offer create is not self funded then add the owner balance
3310 if (account != amount.issue().account)
3311 {
3312 auto const ownerFunds = accountFunds(
3313 *ledger,
3314 account,
3315 amount,
3317 app_.journal("View"));
3318 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3319 }
3320 }
3321
3322 std::string const hash = to_string(transaction->getTransactionID());
3323 MultiApiJson multiObj{jvObj};
3325 multiObj.visit(), //
3326 [&]<unsigned Version>(
3328 RPC::insertDeliverMax(
3329 jvTx[jss::transaction], transaction->getTxnType(), Version);
3330
3331 if constexpr (Version > 1)
3332 {
3333 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3334 jvTx[jss::hash] = hash;
3335 }
3336 else
3337 {
3338 jvTx[jss::transaction][jss::hash] = hash;
3339 }
3340 });
3341
3342 return multiObj;
3343}
3344
3345void
3347 std::shared_ptr<ReadView const> const& ledger,
3348 AcceptedLedgerTx const& transaction,
3349 bool last)
3350{
3351 auto const& stTxn = transaction.getTxn();
3352
3353 // Create two different Json objects, for different API versions
3354 auto const metaRef = std::ref(transaction.getMeta());
3355 auto const trResult = transaction.getResult();
3356 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3357
3358 {
3360
3361 auto it = mStreamMaps[sTransactions].begin();
3362 while (it != mStreamMaps[sTransactions].end())
3363 {
3364 InfoSub::pointer p = it->second.lock();
3365
3366 if (p)
3367 {
3368 jvObj.visit(
3369 p->getApiVersion(), //
3370 [&](Json::Value const& jv) { p->send(jv, true); });
3371 ++it;
3372 }
3373 else
3374 it = mStreamMaps[sTransactions].erase(it);
3375 }
3376
3377 it = mStreamMaps[sRTTransactions].begin();
3378
3379 while (it != mStreamMaps[sRTTransactions].end())
3380 {
3381 InfoSub::pointer p = it->second.lock();
3382
3383 if (p)
3384 {
3385 jvObj.visit(
3386 p->getApiVersion(), //
3387 [&](Json::Value const& jv) { p->send(jv, true); });
3388 ++it;
3389 }
3390 else
3391 it = mStreamMaps[sRTTransactions].erase(it);
3392 }
3393 }
3394
3395 if (transaction.getResult() == tesSUCCESS)
3396 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3397
3398 pubAccountTransaction(ledger, transaction, last);
3399}
3400
3401void
3403 std::shared_ptr<ReadView const> const& ledger,
3404 AcceptedLedgerTx const& transaction,
3405 bool last)
3406{
3408 int iProposed = 0;
3409 int iAccepted = 0;
3410
3411 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3412 auto const currLedgerSeq = ledger->seq();
3413 {
3415
3416 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3418 {
3419 for (auto const& affectedAccount : transaction.getAffected())
3420 {
3421 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3422 simiIt != mSubRTAccount.end())
3423 {
3424 auto it = simiIt->second.begin();
3425
3426 while (it != simiIt->second.end())
3427 {
3428 InfoSub::pointer p = it->second.lock();
3429
3430 if (p)
3431 {
3432 notify.insert(p);
3433 ++it;
3434 ++iProposed;
3435 }
3436 else
3437 it = simiIt->second.erase(it);
3438 }
3439 }
3440
3441 if (auto simiIt = mSubAccount.find(affectedAccount);
3442 simiIt != mSubAccount.end())
3443 {
3444 auto it = simiIt->second.begin();
3445 while (it != simiIt->second.end())
3446 {
3447 InfoSub::pointer p = it->second.lock();
3448
3449 if (p)
3450 {
3451 notify.insert(p);
3452 ++it;
3453 ++iAccepted;
3454 }
3455 else
3456 it = simiIt->second.erase(it);
3457 }
3458 }
3459
3460 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3461 histoIt != mSubAccountHistory.end())
3462 {
3463 auto& subs = histoIt->second;
3464 auto it = subs.begin();
3465 while (it != subs.end())
3466 {
3467 SubAccountHistoryInfoWeak const& info = it->second;
3468 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3469 {
3470 ++it;
3471 continue;
3472 }
3473
3474 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3475 {
3476 accountHistoryNotify.emplace_back(
3477 SubAccountHistoryInfo{isSptr, info.index_});
3478 ++it;
3479 }
3480 else
3481 {
3482 it = subs.erase(it);
3483 }
3484 }
3485 if (subs.empty())
3486 mSubAccountHistory.erase(histoIt);
3487 }
3488 }
3489 }
3490 }
3491
3492 JLOG(m_journal.trace())
3493 << "pubAccountTransaction: "
3494 << "proposed=" << iProposed << ", accepted=" << iAccepted;
3495
3496 if (!notify.empty() || !accountHistoryNotify.empty())
3497 {
3498 auto const& stTxn = transaction.getTxn();
3499
3500 // Create two different Json objects, for different API versions
3501 auto const metaRef = std::ref(transaction.getMeta());
3502 auto const trResult = transaction.getResult();
3503 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3504
3505 for (InfoSub::ref isrListener : notify)
3506 {
3507 jvObj.visit(
3508 isrListener->getApiVersion(), //
3509 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3510 }
3511
3512 if (last)
3513 jvObj.set(jss::account_history_boundary, true);
3514
3515 XRPL_ASSERT(
3516 jvObj.isMember(jss::account_history_tx_stream) ==
3518 "ripple::NetworkOPsImp::pubAccountTransaction : "
3519 "account_history_tx_stream not set");
3520 for (auto& info : accountHistoryNotify)
3521 {
3522 auto& index = info.index_;
3523 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3524 jvObj.set(jss::account_history_tx_first, true);
3525
3526 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3527
3528 jvObj.visit(
3529 info.sink_->getApiVersion(), //
3530 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3531 }
3532 }
3533}
3534
3535void
3537 std::shared_ptr<ReadView const> const& ledger,
3539 TER result)
3540{
3542 int iProposed = 0;
3543
3544 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3545
3546 {
3548
3549 if (mSubRTAccount.empty())
3550 return;
3551
3552 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3554 {
3555 for (auto const& affectedAccount : tx->getMentionedAccounts())
3556 {
3557 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3558 simiIt != mSubRTAccount.end())
3559 {
3560 auto it = simiIt->second.begin();
3561
3562 while (it != simiIt->second.end())
3563 {
3564 InfoSub::pointer p = it->second.lock();
3565
3566 if (p)
3567 {
3568 notify.insert(p);
3569 ++it;
3570 ++iProposed;
3571 }
3572 else
3573 it = simiIt->second.erase(it);
3574 }
3575 }
3576 }
3577 }
3578 }
3579
3580 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3581
3582 if (!notify.empty() || !accountHistoryNotify.empty())
3583 {
3584 // Create two different Json objects, for different API versions
3585 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3586
3587 for (InfoSub::ref isrListener : notify)
3588 jvObj.visit(
3589 isrListener->getApiVersion(), //
3590 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3591
3592 XRPL_ASSERT(
3593 jvObj.isMember(jss::account_history_tx_stream) ==
3595 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3596 "account_history_tx_stream not set");
3597 for (auto& info : accountHistoryNotify)
3598 {
3599 auto& index = info.index_;
3600 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3601 jvObj.set(jss::account_history_tx_first, true);
3602 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3603 jvObj.visit(
3604 info.sink_->getApiVersion(), //
3605 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3606 }
3607 }
3608}
3609
3610//
3611// Monitoring
3612//
3613
3614void
3616 InfoSub::ref isrListener,
3617 hash_set<AccountID> const& vnaAccountIDs,
3618 bool rt)
3619{
3620 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3621
3622 for (auto const& naAccountID : vnaAccountIDs)
3623 {
3624 JLOG(m_journal.trace())
3625 << "subAccount: account: " << toBase58(naAccountID);
3626
3627 isrListener->insertSubAccountInfo(naAccountID, rt);
3628 }
3629
3631
3632 for (auto const& naAccountID : vnaAccountIDs)
3633 {
3634 auto simIterator = subMap.find(naAccountID);
3635 if (simIterator == subMap.end())
3636 {
3637 // Not found, note that account has a new single listner.
3638 SubMapType usisElement;
3639 usisElement[isrListener->getSeq()] = isrListener;
3640 // VFALCO NOTE This is making a needless copy of naAccountID
3641 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3642 }
3643 else
3644 {
3645 // Found, note that the account has another listener.
3646 simIterator->second[isrListener->getSeq()] = isrListener;
3647 }
3648 }
3649}
3650
3651void
3653 InfoSub::ref isrListener,
3654 hash_set<AccountID> const& vnaAccountIDs,
3655 bool rt)
3656{
3657 for (auto const& naAccountID : vnaAccountIDs)
3658 {
3659 // Remove from the InfoSub
3660 isrListener->deleteSubAccountInfo(naAccountID, rt);
3661 }
3662
3663 // Remove from the server
3664 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3665}
3666
3667void
3669 std::uint64_t uSeq,
3670 hash_set<AccountID> const& vnaAccountIDs,
3671 bool rt)
3672{
3674
3675 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3676
3677 for (auto const& naAccountID : vnaAccountIDs)
3678 {
3679 auto simIterator = subMap.find(naAccountID);
3680
3681 if (simIterator != subMap.end())
3682 {
3683 // Found
3684 simIterator->second.erase(uSeq);
3685
3686 if (simIterator->second.empty())
3687 {
3688 // Don't need hash entry.
3689 subMap.erase(simIterator);
3690 }
3691 }
3692 }
3693}
3694
3695void
3697{
3698 enum DatabaseType { Sqlite, None };
3699 static auto const databaseType = [&]() -> DatabaseType {
3700 // Use a dynamic_cast to return DatabaseType::None
3701 // on failure.
3702 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3703 {
3704 return DatabaseType::Sqlite;
3705 }
3706 return DatabaseType::None;
3707 }();
3708
3709 if (databaseType == DatabaseType::None)
3710 {
3711 // LCOV_EXCL_START
3712 UNREACHABLE(
3713 "ripple::NetworkOPsImp::addAccountHistoryJob : no database");
3714 JLOG(m_journal.error())
3715 << "AccountHistory job for account "
3716 << toBase58(subInfo.index_->accountId_) << " no database";
3717 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3718 {
3719 sptr->send(rpcError(rpcINTERNAL), true);
3720 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3721 }
3722 return;
3723 // LCOV_EXCL_STOP
3724 }
3725
3728 "AccountHistoryTxStream",
3729 [this, dbType = databaseType, subInfo]() {
3730 auto const& accountId = subInfo.index_->accountId_;
3731 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3732 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3733
3734 JLOG(m_journal.trace())
3735 << "AccountHistory job for account " << toBase58(accountId)
3736 << " started. lastLedgerSeq=" << lastLedgerSeq;
3737
3738 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3739 std::shared_ptr<TxMeta> const& meta) -> bool {
3740 /*
3741 * genesis account: first tx is the one with seq 1
3742 * other account: first tx is the one created the account
3743 */
3744 if (accountId == genesisAccountId)
3745 {
3746 auto stx = tx->getSTransaction();
3747 if (stx->getAccountID(sfAccount) == accountId &&
3748 stx->getSeqValue() == 1)
3749 return true;
3750 }
3751
3752 for (auto& node : meta->getNodes())
3753 {
3754 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3755 continue;
3756
3757 if (node.isFieldPresent(sfNewFields))
3758 {
3759 if (auto inner = dynamic_cast<STObject const*>(
3760 node.peekAtPField(sfNewFields));
3761 inner)
3762 {
3763 if (inner->isFieldPresent(sfAccount) &&
3764 inner->getAccountID(sfAccount) == accountId)
3765 {
3766 return true;
3767 }
3768 }
3769 }
3770 }
3771
3772 return false;
3773 };
3774
3775 auto send = [&](Json::Value const& jvObj,
3776 bool unsubscribe) -> bool {
3777 if (auto sptr = subInfo.sinkWptr_.lock())
3778 {
3779 sptr->send(jvObj, true);
3780 if (unsubscribe)
3781 unsubAccountHistory(sptr, accountId, false);
3782 return true;
3783 }
3784
3785 return false;
3786 };
3787
3788 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3789 bool unsubscribe) -> bool {
3790 if (auto sptr = subInfo.sinkWptr_.lock())
3791 {
3792 jvObj.visit(
3793 sptr->getApiVersion(), //
3794 [&](Json::Value const& jv) { sptr->send(jv, true); });
3795
3796 if (unsubscribe)
3797 unsubAccountHistory(sptr, accountId, false);
3798 return true;
3799 }
3800
3801 return false;
3802 };
3803
3804 auto getMoreTxns =
3805 [&](std::uint32_t minLedger,
3806 std::uint32_t maxLedger,
3811 switch (dbType)
3812 {
3813 case Sqlite: {
3814 auto db = static_cast<SQLiteDatabase*>(
3817 accountId, minLedger, maxLedger, marker, 0, true};
3818 return db->newestAccountTxPage(options);
3819 }
3820 // LCOV_EXCL_START
3821 default: {
3822 UNREACHABLE(
3823 "ripple::NetworkOPsImp::addAccountHistoryJob : "
3824 "getMoreTxns : invalid database type");
3825 return {};
3826 }
3827 // LCOV_EXCL_STOP
3828 }
3829 };
3830
3831 /*
3832 * search backward until the genesis ledger or asked to stop
3833 */
3834 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3835 {
3836 int feeChargeCount = 0;
3837 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3838 {
3839 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3840 ++feeChargeCount;
3841 }
3842 else
3843 {
3844 JLOG(m_journal.trace())
3845 << "AccountHistory job for account "
3846 << toBase58(accountId) << " no InfoSub. Fee charged "
3847 << feeChargeCount << " times.";
3848 return;
3849 }
3850
3851 // try to search in 1024 ledgers till reaching genesis ledgers
3852 auto startLedgerSeq =
3853 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3854 JLOG(m_journal.trace())
3855 << "AccountHistory job for account " << toBase58(accountId)
3856 << ", working on ledger range [" << startLedgerSeq << ","
3857 << lastLedgerSeq << "]";
3858
3859 auto haveRange = [&]() -> bool {
3860 std::uint32_t validatedMin = UINT_MAX;
3861 std::uint32_t validatedMax = 0;
3862 auto haveSomeValidatedLedgers =
3864 validatedMin, validatedMax);
3865
3866 return haveSomeValidatedLedgers &&
3867 validatedMin <= startLedgerSeq &&
3868 lastLedgerSeq <= validatedMax;
3869 }();
3870
3871 if (!haveRange)
3872 {
3873 JLOG(m_journal.debug())
3874 << "AccountHistory reschedule job for account "
3875 << toBase58(accountId) << ", incomplete ledger range ["
3876 << startLedgerSeq << "," << lastLedgerSeq << "]";
3878 return;
3879 }
3880
3882 while (!subInfo.index_->stopHistorical_)
3883 {
3884 auto dbResult =
3885 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3886 if (!dbResult)
3887 {
3888 // LCOV_EXCL_START
3889 UNREACHABLE(
3890 "ripple::NetworkOPsImp::addAccountHistoryJob : "
3891 "getMoreTxns failed");
3892 JLOG(m_journal.debug())
3893 << "AccountHistory job for account "
3894 << toBase58(accountId) << " getMoreTxns failed.";
3895 send(rpcError(rpcINTERNAL), true);
3896 return;
3897 // LCOV_EXCL_STOP
3898 }
3899
3900 auto const& txns = dbResult->first;
3901 marker = dbResult->second;
3902 size_t num_txns = txns.size();
3903 for (size_t i = 0; i < num_txns; ++i)
3904 {
3905 auto const& [tx, meta] = txns[i];
3906
3907 if (!tx || !meta)
3908 {
3909 JLOG(m_journal.debug())
3910 << "AccountHistory job for account "
3911 << toBase58(accountId) << " empty tx or meta.";
3912 send(rpcError(rpcINTERNAL), true);
3913 return;
3914 }
3915 auto curTxLedger =
3917 tx->getLedger());
3918 if (!curTxLedger)
3919 {
3920 // LCOV_EXCL_START
3921 UNREACHABLE(
3922 "ripple::NetworkOPsImp::addAccountHistoryJob : "
3923 "getLedgerBySeq failed");
3924 JLOG(m_journal.debug())
3925 << "AccountHistory job for account "
3926 << toBase58(accountId) << " no ledger.";
3927 send(rpcError(rpcINTERNAL), true);
3928 return;
3929 // LCOV_EXCL_STOP
3930 }
3932 tx->getSTransaction();
3933 if (!stTxn)
3934 {
3935 // LCOV_EXCL_START
3936 UNREACHABLE(
3937 "NetworkOPsImp::addAccountHistoryJob : "
3938 "getSTransaction failed");
3939 JLOG(m_journal.debug())
3940 << "AccountHistory job for account "
3941 << toBase58(accountId)
3942 << " getSTransaction failed.";
3943 send(rpcError(rpcINTERNAL), true);
3944 return;
3945 // LCOV_EXCL_STOP
3946 }
3947
3948 auto const mRef = std::ref(*meta);
3949 auto const trR = meta->getResultTER();
3950 MultiApiJson jvTx =
3951 transJson(stTxn, trR, true, curTxLedger, mRef);
3952
3953 jvTx.set(
3954 jss::account_history_tx_index, txHistoryIndex--);
3955 if (i + 1 == num_txns ||
3956 txns[i + 1].first->getLedger() != tx->getLedger())
3957 jvTx.set(jss::account_history_boundary, true);
3958
3959 if (isFirstTx(tx, meta))
3960 {
3961 jvTx.set(jss::account_history_tx_first, true);
3962 sendMultiApiJson(jvTx, false);
3963
3964 JLOG(m_journal.trace())
3965 << "AccountHistory job for account "
3966 << toBase58(accountId)
3967 << " done, found last tx.";
3968 return;
3969 }
3970 else
3971 {
3972 sendMultiApiJson(jvTx, false);
3973 }
3974 }
3975
3976 if (marker)
3977 {
3978 JLOG(m_journal.trace())
3979 << "AccountHistory job for account "
3980 << toBase58(accountId)
3981 << " paging, marker=" << marker->ledgerSeq << ":"
3982 << marker->txnSeq;
3983 }
3984 else
3985 {
3986 break;
3987 }
3988 }
3989
3990 if (!subInfo.index_->stopHistorical_)
3991 {
3992 lastLedgerSeq = startLedgerSeq - 1;
3993 if (lastLedgerSeq <= 1)
3994 {
3995 JLOG(m_journal.trace())
3996 << "AccountHistory job for account "
3997 << toBase58(accountId)
3998 << " done, reached genesis ledger.";
3999 return;
4000 }
4001 }
4002 }
4003 });
4004}
4005
4006void
4008 std::shared_ptr<ReadView const> const& ledger,
4010{
4011 subInfo.index_->separationLedgerSeq_ = ledger->seq();
4012 auto const& accountId = subInfo.index_->accountId_;
4013 auto const accountKeylet = keylet::account(accountId);
4014 if (!ledger->exists(accountKeylet))
4015 {
4016 JLOG(m_journal.debug())
4017 << "subAccountHistoryStart, no account " << toBase58(accountId)
4018 << ", no need to add AccountHistory job.";
4019 return;
4020 }
4021 if (accountId == genesisAccountId)
4022 {
4023 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
4024 {
4025 if (sleAcct->getFieldU32(sfSequence) == 1)
4026 {
4027 JLOG(m_journal.debug())
4028 << "subAccountHistoryStart, genesis account "
4029 << toBase58(accountId)
4030 << " does not have tx, no need to add AccountHistory job.";
4031 return;
4032 }
4033 }
4034 else
4035 {
4036 // LCOV_EXCL_START
4037 UNREACHABLE(
4038 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
4039 "access genesis account");
4040 return;
4041 // LCOV_EXCL_STOP
4042 }
4043 }
4044 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
4045 subInfo.index_->haveHistorical_ = true;
4046
4047 JLOG(m_journal.debug())
4048 << "subAccountHistoryStart, add AccountHistory job: accountId="
4049 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
4050
4051 addAccountHistoryJob(subInfo);
4052}
4053
4056 InfoSub::ref isrListener,
4057 AccountID const& accountId)
4058{
4059 if (!isrListener->insertSubAccountHistory(accountId))
4060 {
4061 JLOG(m_journal.debug())
4062 << "subAccountHistory, already subscribed to account "
4063 << toBase58(accountId);
4064 return rpcINVALID_PARAMS;
4065 }
4066
4069 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
4070 auto simIterator = mSubAccountHistory.find(accountId);
4071 if (simIterator == mSubAccountHistory.end())
4072 {
4074 inner.emplace(isrListener->getSeq(), ahi);
4076 simIterator, std::make_pair(accountId, inner));
4077 }
4078 else
4079 {
4080 simIterator->second.emplace(isrListener->getSeq(), ahi);
4081 }
4082
4083 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
4084 if (ledger)
4085 {
4086 subAccountHistoryStart(ledger, ahi);
4087 }
4088 else
4089 {
4090 // The node does not have validated ledgers, so wait for
4091 // one before start streaming.
4092 // In this case, the subscription is also considered successful.
4093 JLOG(m_journal.debug())
4094 << "subAccountHistory, no validated ledger yet, delay start";
4095 }
4096
4097 return rpcSUCCESS;
4098}
4099
4100void
4102 InfoSub::ref isrListener,
4103 AccountID const& account,
4104 bool historyOnly)
4105{
4106 if (!historyOnly)
4107 isrListener->deleteSubAccountHistory(account);
4108 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
4109}
4110
4111void
4113 std::uint64_t seq,
4114 AccountID const& account,
4115 bool historyOnly)
4116{
4118 auto simIterator = mSubAccountHistory.find(account);
4119 if (simIterator != mSubAccountHistory.end())
4120 {
4121 auto& subInfoMap = simIterator->second;
4122 auto subInfoIter = subInfoMap.find(seq);
4123 if (subInfoIter != subInfoMap.end())
4124 {
4125 subInfoIter->second.index_->stopHistorical_ = true;
4126 }
4127
4128 if (!historyOnly)
4129 {
4130 simIterator->second.erase(seq);
4131 if (simIterator->second.empty())
4132 {
4133 mSubAccountHistory.erase(simIterator);
4134 }
4135 }
4136 JLOG(m_journal.debug())
4137 << "unsubAccountHistory, account " << toBase58(account)
4138 << ", historyOnly = " << (historyOnly ? "true" : "false");
4139 }
4140}
4141
4142bool
4144{
4145 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
4146 listeners->addSubscriber(isrListener);
4147 else
4148 {
4149 // LCOV_EXCL_START
4150 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
4151 // LCOV_EXCL_STOP
4152 }
4153 return true;
4154}
4155
4156bool
4158{
4159 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
4160 listeners->removeSubscriber(uSeq);
4161
4162 return true;
4163}
4164
4168{
4169 // This code-path is exclusively used when the server is in standalone
4170 // mode via `ledger_accept`
4171 XRPL_ASSERT(
4172 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
4173
4174 if (!m_standalone)
4175 Throw<std::runtime_error>(
4176 "Operation only possible in STANDALONE mode.");
4177
4178 // FIXME Could we improve on this and remove the need for a specialized
4179 // API in Consensus?
4180 beginConsensus(m_ledgerMaster.getClosedLedger()->info().hash, {});
4181 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
4182 return m_ledgerMaster.getCurrentLedger()->info().seq;
4183}
4184
4185// <-- bool: true=added, false=already there
4186bool
4188{
4189 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
4190 {
4191 jvResult[jss::ledger_index] = lpClosed->info().seq;
4192 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
4193 jvResult[jss::ledger_time] = Json::Value::UInt(
4194 lpClosed->info().closeTime.time_since_epoch().count());
4195 if (!lpClosed->rules().enabled(featureXRPFees))
4196 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
4197 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
4198 jvResult[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped();
4199 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4200 jvResult[jss::network_id] = app_.config().NETWORK_ID;
4201 }
4202
4204 {
4205 jvResult[jss::validated_ledgers] =
4207 }
4208
4210 return mStreamMaps[sLedger]
4211 .emplace(isrListener->getSeq(), isrListener)
4212 .second;
4213}
4214
4215// <-- bool: true=added, false=already there
4216bool
4218{
4221 .emplace(isrListener->getSeq(), isrListener)
4222 .second;
4223}
4224
4225// <-- bool: true=erased, false=was not there
4226bool
4228{
4230 return mStreamMaps[sLedger].erase(uSeq);
4231}
4232
4233// <-- bool: true=erased, false=was not there
4234bool
4240
4241// <-- bool: true=added, false=already there
4242bool
4244{
4246 return mStreamMaps[sManifests]
4247 .emplace(isrListener->getSeq(), isrListener)
4248 .second;
4249}
4250
4251// <-- bool: true=erased, false=was not there
4252bool
4258
4259// <-- bool: true=added, false=already there
4260bool
4262 InfoSub::ref isrListener,
4263 Json::Value& jvResult,
4264 bool admin)
4265{
4266 uint256 uRandom;
4267
4268 if (m_standalone)
4269 jvResult[jss::stand_alone] = m_standalone;
4270
4271 // CHECKME: is it necessary to provide a random number here?
4272 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
4273
4274 auto const& feeTrack = app_.getFeeTrack();
4275 jvResult[jss::random] = to_string(uRandom);
4276 jvResult[jss::server_status] = strOperatingMode(admin);
4277 jvResult[jss::load_base] = feeTrack.getLoadBase();
4278 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4279 jvResult[jss::hostid] = getHostId(admin);
4280 jvResult[jss::pubkey_node] =
4282
4284 return mStreamMaps[sServer]
4285 .emplace(isrListener->getSeq(), isrListener)
4286 .second;
4287}
4288
4289// <-- bool: true=erased, false=was not there
4290bool
4292{
4294 return mStreamMaps[sServer].erase(uSeq);
4295}
4296
4297// <-- bool: true=added, false=already there
4298bool
4300{
4303 .emplace(isrListener->getSeq(), isrListener)
4304 .second;
4305}
4306
4307// <-- bool: true=erased, false=was not there
4308bool
4314
4315// <-- bool: true=added, false=already there
4316bool
4318{
4321 .emplace(isrListener->getSeq(), isrListener)
4322 .second;
4323}
4324
4325// <-- bool: true=erased, false=was not there
4326bool
4332
4333// <-- bool: true=added, false=already there
4334bool
4336{
4339 .emplace(isrListener->getSeq(), isrListener)
4340 .second;
4341}
4342
4343void
4348
4349// <-- bool: true=erased, false=was not there
4350bool
4356
4357// <-- bool: true=added, false=already there
4358bool
4360{
4362 return mStreamMaps[sPeerStatus]
4363 .emplace(isrListener->getSeq(), isrListener)
4364 .second;
4365}
4366
4367// <-- bool: true=erased, false=was not there
4368bool
4374
4375// <-- bool: true=added, false=already there
4376bool
4378{
4381 .emplace(isrListener->getSeq(), isrListener)
4382 .second;
4383}
4384
4385// <-- bool: true=erased, false=was not there
4386bool
4392
4395{
4397
4398 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4399
4400 if (it != mRpcSubMap.end())
4401 return it->second;
4402
4403 return InfoSub::pointer();
4404}
4405
4408{
4410
4411 mRpcSubMap.emplace(strUrl, rspEntry);
4412
4413 return rspEntry;
4414}
4415
4416bool
4418{
4420 auto pInfo = findRpcSub(strUrl);
4421
4422 if (!pInfo)
4423 return false;
4424
4425 // check to see if any of the stream maps still hold a weak reference to
4426 // this entry before removing
4427 for (SubMapType const& map : mStreamMaps)
4428 {
4429 if (map.find(pInfo->getSeq()) != map.end())
4430 return false;
4431 }
4432 mRpcSubMap.erase(strUrl);
4433 return true;
4434}
4435
4436#ifndef USE_NEW_BOOK_PAGE
4437
4438// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4439// work, but it demonstrated poor performance.
4440//
4441void
4444 Book const& book,
4445 AccountID const& uTakerID,
4446 bool const bProof,
4447 unsigned int iLimit,
4448 Json::Value const& jvMarker,
4449 Json::Value& jvResult)
4450{ // CAUTION: This is the old get book page logic
4451 Json::Value& jvOffers =
4452 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4453
4455 uint256 const uBookBase = getBookBase(book);
4456 uint256 const uBookEnd = getQualityNext(uBookBase);
4457 uint256 uTipIndex = uBookBase;
4458
4459 if (auto stream = m_journal.trace())
4460 {
4461 stream << "getBookPage:" << book;
4462 stream << "getBookPage: uBookBase=" << uBookBase;
4463 stream << "getBookPage: uBookEnd=" << uBookEnd;
4464 stream << "getBookPage: uTipIndex=" << uTipIndex;
4465 }
4466
4467 ReadView const& view = *lpLedger;
4468
4469 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4470 isGlobalFrozen(view, book.in.account);
4471
4472 bool bDone = false;
4473 bool bDirectAdvance = true;
4474
4475 std::shared_ptr<SLE const> sleOfferDir;
4476 uint256 offerIndex;
4477 unsigned int uBookEntry;
4478 STAmount saDirRate;
4479
4480 auto const rate = transferRate(view, book.out.account);
4481 auto viewJ = app_.journal("View");
4482
4483 while (!bDone && iLimit-- > 0)
4484 {
4485 if (bDirectAdvance)
4486 {
4487 bDirectAdvance = false;
4488
4489 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4490
4491 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4492 if (ledgerIndex)
4493 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4494 else
4495 sleOfferDir.reset();
4496
4497 if (!sleOfferDir)
4498 {
4499 JLOG(m_journal.trace()) << "getBookPage: bDone";
4500 bDone = true;
4501 }
4502 else
4503 {
4504 uTipIndex = sleOfferDir->key();
4505 saDirRate = amountFromQuality(getQuality(uTipIndex));
4506
4507 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4508
4509 JLOG(m_journal.trace())
4510 << "getBookPage: uTipIndex=" << uTipIndex;
4511 JLOG(m_journal.trace())
4512 << "getBookPage: offerIndex=" << offerIndex;
4513 }
4514 }
4515
4516 if (!bDone)
4517 {
4518 auto sleOffer = view.read(keylet::offer(offerIndex));
4519
4520 if (sleOffer)
4521 {
4522 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4523 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4524 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4525 STAmount saOwnerFunds;
4526 bool firstOwnerOffer(true);
4527
4528 if (book.out.account == uOfferOwnerID)
4529 {
4530 // If an offer is selling issuer's own IOUs, it is fully
4531 // funded.
4532 saOwnerFunds = saTakerGets;
4533 }
4534 else if (bGlobalFreeze)
4535 {
4536 // If either asset is globally frozen, consider all offers
4537 // that aren't ours to be totally unfunded
4538 saOwnerFunds.clear(book.out);
4539 }
4540 else
4541 {
4542 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4543 if (umBalanceEntry != umBalance.end())
4544 {
4545 // Found in running balance table.
4546
4547 saOwnerFunds = umBalanceEntry->second;
4548 firstOwnerOffer = false;
4549 }
4550 else
4551 {
4552 // Did not find balance in table.
4553
4554 saOwnerFunds = accountHolds(
4555 view,
4556 uOfferOwnerID,
4557 book.out.currency,
4558 book.out.account,
4560 viewJ);
4561
4562 if (saOwnerFunds < beast::zero)
4563 {
4564 // Treat negative funds as zero.
4565
4566 saOwnerFunds.clear();
4567 }
4568 }
4569 }
4570
4571 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4572
4573 STAmount saTakerGetsFunded;
4574 STAmount saOwnerFundsLimit = saOwnerFunds;
4575 Rate offerRate = parityRate;
4576
4577 if (rate != parityRate
4578 // Have a tranfer fee.
4579 && uTakerID != book.out.account
4580 // Not taking offers of own IOUs.
4581 && book.out.account != uOfferOwnerID)
4582 // Offer owner not issuing ownfunds
4583 {
4584 // Need to charge a transfer fee to offer owner.
4585 offerRate = rate;
4586 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4587 }
4588
4589 if (saOwnerFundsLimit >= saTakerGets)
4590 {
4591 // Sufficient funds no shenanigans.
4592 saTakerGetsFunded = saTakerGets;
4593 }
4594 else
4595 {
4596 // Only provide, if not fully funded.
4597
4598 saTakerGetsFunded = saOwnerFundsLimit;
4599
4600 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4601 std::min(
4602 saTakerPays,
4603 multiply(
4604 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4605 .setJson(jvOffer[jss::taker_pays_funded]);
4606 }
4607
4608 STAmount saOwnerPays = (parityRate == offerRate)
4609 ? saTakerGetsFunded
4610 : std::min(
4611 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4612
4613 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4614
4615 // Include all offers funded and unfunded
4616 Json::Value& jvOf = jvOffers.append(jvOffer);
4617 jvOf[jss::quality] = saDirRate.getText();
4618
4619 if (firstOwnerOffer)
4620 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4621 }
4622 else
4623 {
4624 JLOG(m_journal.warn()) << "Missing offer";
4625 }
4626
4627 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4628 {
4629 bDirectAdvance = true;
4630 }
4631 else
4632 {
4633 JLOG(m_journal.trace())
4634 << "getBookPage: offerIndex=" << offerIndex;
4635 }
4636 }
4637 }
4638
4639 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4640 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4641}
4642
4643#else
4644
4645// This is the new code that uses the book iterators
4646// It has temporarily been disabled
4647
4648void
4651 Book const& book,
4652 AccountID const& uTakerID,
4653 bool const bProof,
4654 unsigned int iLimit,
4655 Json::Value const& jvMarker,
4656 Json::Value& jvResult)
4657{
4658 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4659
4661
4662 MetaView lesActive(lpLedger, tapNONE, true);
4663 OrderBookIterator obIterator(lesActive, book);
4664
4665 auto const rate = transferRate(lesActive, book.out.account);
4666
4667 bool const bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4668 lesActive.isGlobalFrozen(book.in.account);
4669
4670 while (iLimit-- > 0 && obIterator.nextOffer())
4671 {
4672 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4673 if (sleOffer)
4674 {
4675 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4676 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4677 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4678 STAmount saDirRate = obIterator.getCurrentRate();
4679 STAmount saOwnerFunds;
4680
4681 if (book.out.account == uOfferOwnerID)
4682 {
4683 // If offer is selling issuer's own IOUs, it is fully funded.
4684 saOwnerFunds = saTakerGets;
4685 }
4686 else if (bGlobalFreeze)
4687 {
4688 // If either asset is globally frozen, consider all offers
4689 // that aren't ours to be totally unfunded
4690 saOwnerFunds.clear(book.out);
4691 }
4692 else
4693 {
4694 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4695
4696 if (umBalanceEntry != umBalance.end())
4697 {
4698 // Found in running balance table.
4699
4700 saOwnerFunds = umBalanceEntry->second;
4701 }
4702 else
4703 {
4704 // Did not find balance in table.
4705
4706 saOwnerFunds = lesActive.accountHolds(
4707 uOfferOwnerID,
4708 book.out.currency,
4709 book.out.account,
4711
4712 if (saOwnerFunds.isNegative())
4713 {
4714 // Treat negative funds as zero.
4715
4716 saOwnerFunds.zero();
4717 }
4718 }
4719 }
4720
4721 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4722
4723 STAmount saTakerGetsFunded;
4724 STAmount saOwnerFundsLimit = saOwnerFunds;
4725 Rate offerRate = parityRate;
4726
4727 if (rate != parityRate
4728 // Have a tranfer fee.
4729 && uTakerID != book.out.account
4730 // Not taking offers of own IOUs.
4731 && book.out.account != uOfferOwnerID)
4732 // Offer owner not issuing ownfunds
4733 {
4734 // Need to charge a transfer fee to offer owner.
4735 offerRate = rate;
4736 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4737 }
4738
4739 if (saOwnerFundsLimit >= saTakerGets)
4740 {
4741 // Sufficient funds no shenanigans.
4742 saTakerGetsFunded = saTakerGets;
4743 }
4744 else
4745 {
4746 // Only provide, if not fully funded.
4747 saTakerGetsFunded = saOwnerFundsLimit;
4748
4749 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4750
4751 // TOOD(tom): The result of this expression is not used - what's
4752 // going on here?
4753 std::min(
4754 saTakerPays,
4755 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4756 .setJson(jvOffer[jss::taker_pays_funded]);
4757 }
4758
4759 STAmount saOwnerPays = (parityRate == offerRate)
4760 ? saTakerGetsFunded
4761 : std::min(
4762 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4763
4764 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4765
4766 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4767 {
4768 // Only provide funded offers and offers of the taker.
4769 Json::Value& jvOf = jvOffers.append(jvOffer);
4770 jvOf[jss::quality] = saDirRate.getText();
4771 }
4772 }
4773 }
4774
4775 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4776 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4777}
4778
4779#endif
4780
4781inline void
4783{
4784 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4785 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4787 counters[static_cast<std::size_t>(mode)].dur += current;
4788
4791 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4792 .dur.count());
4794 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4795 .dur.count());
4797 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4799 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4800 .dur.count());
4802 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4803
4805 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4806 .transitions);
4808 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4809 .transitions);
4811 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4813 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4814 .transitions);
4816 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4817}
4818
4819void
4821{
4822 auto now = std::chrono::steady_clock::now();
4823
4824 std::lock_guard lock(mutex_);
4825 ++counters_[static_cast<std::size_t>(om)].transitions;
4826 if (om == OperatingMode::FULL &&
4827 counters_[static_cast<std::size_t>(om)].transitions == 1)
4828 {
4829 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4830 now - processStart_)
4831 .count();
4832 }
4833 counters_[static_cast<std::size_t>(mode_)].dur +=
4834 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4835
4836 mode_ = om;
4837 start_ = now;
4838}
4839
4840void
4842{
4843 auto [counters, mode, start, initialSync] = getCounterData();
4844 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4846 counters[static_cast<std::size_t>(mode)].dur += current;
4847
4848 obj[jss::state_accounting] = Json::objectValue;
4850 i <= static_cast<std::size_t>(OperatingMode::FULL);
4851 ++i)
4852 {
4853 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4854 auto& state = obj[jss::state_accounting][states_[i]];
4855 state[jss::transitions] = std::to_string(counters[i].transitions);
4856 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4857 }
4858 obj[jss::server_state_duration_us] = std::to_string(current.count());
4859 if (initialSync)
4860 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4861}
4862
4863//------------------------------------------------------------------------------
4864
4867 Application& app,
4869 bool standalone,
4870 std::size_t minPeerCount,
4871 bool startvalid,
4872 JobQueue& job_queue,
4874 ValidatorKeys const& validatorKeys,
4875 boost::asio::io_context& io_svc,
4876 beast::Journal journal,
4877 beast::insight::Collector::ptr const& collector)
4878{
4880 app,
4881 clock,
4882 standalone,
4883 minPeerCount,
4884 startvalid,
4885 job_queue,
4887 validatorKeys,
4888 io_svc,
4889 journal,
4890 collector);
4891}
4892
4893} // namespace ripple
T any_of(T... args)
T back_inserter(T... args)
T begin(T... args)
Decorator for streaming out compact json.
Lightweight wrapper to tag static string.
Definition json_value.h:45
Represents a JSON value.
Definition json_value.h:131
Json::UInt UInt
Definition json_value.h:138
Value & append(Value const &value)
Append value to array at the end.
bool isMember(char const *key) const
Return true if the object has a member named key.
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
A generic endpoint for log messages.
Definition Journal.h:41
Stream error() const
Definition Journal.h:327
Stream debug() const
Definition Journal.h:309
Stream info() const
Definition Journal.h:315
Stream trace() const
Severity stream access functions.
Definition Journal.h:303
Stream warn() const
Definition Journal.h:321
A metric for measuring an integral value.
Definition Gauge.h:21
void set(value_type value) const
Set the value on the gauge.
Definition Gauge.h:49
A reference to a handler for performing polled collection.
Definition Hook.h:13
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition Book.h:17
Issue in
Definition Book.h:19
Issue out
Definition Book.h:20
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition ClusterNode.h:27
std::uint32_t getLoadFee() const
Definition ClusterNode.h:33
NetClock::time_point getReportTime() const
Definition ClusterNode.h:39
PublicKey const & identity() const
Definition ClusterNode.h:45
std::size_t size() const
The number of nodes in the cluster list.
Definition Cluster.cpp:30
uint32_t NETWORK_ID
Definition Config.h:137
std::string SERVER_DOMAIN
Definition Config.h:259
std::size_t NODE_SIZE
Definition Config.h:194
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition Config.h:141
int RELAY_UNTRUSTED_VALIDATIONS
Definition Config.h:150
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition InfoSub.h:35
AccountID account
Definition Issue.h:17
Currency currency
Definition Issue.h:16
A pool of threads to perform work.
Definition JobQueue.h:39
Json::Value getJson(int c=0)
Definition JobQueue.cpp:195
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition JobQueue.h:149
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
std::uint32_t getClusterFee() const
std::uint32_t getLocalFee() const
std::uint32_t getLoadBase() const
std::uint32_t getRemoteFee() const
std::uint32_t getLoadFactor() const
Manages load sources.
Definition LoadManager.h:27
void heartbeat()
Reset the stall detection timer.
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition Manifest.cpp:304
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
std::chrono::steady_clock::time_point start_
static std::array< Json::StaticString const, 5 > const states_
std::chrono::steady_clock::time_point const processStart_
Transaction with input flags and results to be applied in batches.
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
std::shared_ptr< Transaction > const transaction
boost::asio::steady_timer accountHistoryTxTimer_
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
std::vector< TransactionStatus > mTransactions
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
std::optional< PublicKey > const validatorPK_
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
beast::Journal m_journal
SubInfoMapType mSubAccount
std::optional< PublicKey > const validatorMasterPK_
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
std::atomic< bool > unlBlocked_
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
std::atomic< bool > needNetworkLedger_
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
void setMode(OperatingMode om) override
void stop() override
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
void clearNeedNetworkLedger() override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
DispatchState mDispatchState
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
std::atomic< bool > amendmentWarned_
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition NetworkOPs.h:70
void getCountsJson(Json::Value &obj)
Definition Database.cpp:248
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Writable ledger view that accumulates state and tx changes.
Definition OpenView.h:46
BookListeners::pointer getBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)
BookListeners::pointer makeBookListeners(Book const &)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Represents a set of transactions in RCLConsensus.
Definition RCLCxTx.h:44
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition RFC1751.cpp:488
Collects logging information.
std::unique_ptr< std::stringstream > const & ss()
A view into a ledger.
Definition ReadView.h:32
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition STAmount.cpp:624
std::string getText() const override
Definition STAmount.cpp:664
Issue const & issue() const
Definition STAmount.h:488
std::optional< T > get(std::string const &name) const
std::size_t size() const noexcept
Definition Serializer.h:53
void const * data() const noexcept
Definition Serializer.h:59
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition TimeKeeper.h:45
std::chrono::seconds closeOffset() const
Definition TimeKeeper.h:64
time_point closeTime() const
Returns the predicted close time, in network time.
Definition TimeKeeper.h:57
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition TxQ.cpp:1757
static time_point now()
Validator keys and manifest as set in configuration file.
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition XRPAmount.h:243
Json::Value jsonClipped() const
Definition XRPAmount.h:199
iterator begin()
Definition base_uint.h:117
static constexpr std::size_t size()
Definition base_uint.h:507
bool isZero() const
Definition base_uint.h:521
bool isNonZero() const
Definition base_uint.h:526
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition scope.h:212
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_same_v
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition json_value.h:26
@ objectValue
object value (collection of name/value pairs).
Definition json_value.h:27
int Int
unsigned int UInt
void rngfill(void *const buffer, std::size_t const bytes, Generator &g)
Definition rngfill.h:15
std::string const & getVersionString()
Server version.
Definition BuildInfo.cpp:49
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Encodes ledger sequence, transaction index, and network ID into a CTID string.
Definition CTID.h:34
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition BookChanges.h:28
void insertNFTSyntheticInJson(Json::Value &, std::shared_ptr< STTx const > const &, TxMeta const &)
Adds common synthetic fields to transaction-related JSON responses.
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition Indexes.cpp:167
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition Indexes.cpp:363
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition Indexes.cpp:257
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Definition escrow.cpp:50
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:6
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition AccountID.cpp:95
STAmount divide(STAmount const &amount, Rate const &rate)
Definition Rate2.cpp:74
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition STTx.cpp:801
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition View.cpp:657
@ fhZERO_IF_FROZEN
Definition View.h:59
@ fhIGNORE_FREEZE
Definition View.h:59
std::uint64_t getQuality(uint256 const &uBase)
Definition Indexes.cpp:132
@ rpcSUCCESS
Definition ErrorCodes.h:25
@ rpcINVALID_PARAMS
Definition ErrorCodes.h:65
@ rpcINTERNAL
Definition ErrorCodes.h:111
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
auto constexpr muldiv_max
Definition mulDiv.h:9
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition LocalTxs.cpp:173
STAmount amountFromQuality(std::uint64_t rate)
Definition STAmount.cpp:967
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition ErrorCodes.h:156
@ warnRPC_UNSUPPORTED_MAJORITY
Definition ErrorCodes.h:154
@ warnRPC_AMENDMENT_BLOCKED
Definition ErrorCodes.h:155
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition NetworkOPs.h:49
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition Rate2.cpp:34
AccountID calcAccountID(PublicKey const &pk)
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Json::Value rpcError(int iError)
Definition RPCErr.cpp:12
@ tefPAST_SEQ
Definition TER.h:156
bool isTefFailure(TER x) noexcept
Definition TER.h:647
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
std::string strHex(FwdIt begin, FwdIt end)
Definition strHex.h:11
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition View.cpp:865
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition ApiVersion.h:158
bool isTerRetry(TER x) noexcept
Definition TER.h:653
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition predicates.h:56
@ tesSUCCESS
Definition TER.h:226
uint256 getQualityNext(uint256 const &uBase)
Definition Indexes.cpp:124
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition View.cpp:461
bool isTesSuccess(TER x) noexcept
Definition TER.h:659
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition ReadView.cpp:50
std::string to_string_iso(date::sys_time< Duration > tp)
Definition chrono.h:73
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition View.cpp:126
std::string to_string(base_uint< Bits, Tag > const &a)
Definition base_uint.h:611
FeeSetup setup_FeeVote(Section const &section)
Definition Config.cpp:1110
bool isTemMalformed(TER x) noexcept
Definition TER.h:641
Number root(Number f, unsigned d)
Definition Number.cpp:645
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
@ tapFAIL_HARD
Definition ApplyView.h:16
@ tapUNLIMITED
Definition ApplyView.h:23
@ tapNONE
Definition ApplyView.h:12
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition View.cpp:137
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition apply.cpp:25
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition Seed.cpp:57
constexpr std::size_t maxPoppedTransactions
@ terQUEUED
Definition TER.h:206
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition TER.cpp:230
@ jtNETOP_CLUSTER
Definition Job.h:56
@ jtCLIENT_FEE_CHANGE
Definition Job.h:28
@ jtTRANSACTION
Definition Job.h:43
@ jtTXN_PROC
Definition Job.h:63
@ jtCLIENT_CONSENSUS
Definition Job.h:29
@ jtBATCH
Definition Job.h:46
@ jtCLIENT_ACCT_HIST
Definition Job.h:30
bool isTelLocal(TER x) noexcept
Definition TER.h:635
uint256 getBookBase(Book const &book)
Definition Indexes.cpp:98
constexpr std::uint32_t tfInnerBatchTxn
Definition TxFlags.h:42
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition View.cpp:163
static std::uint32_t trunc32(std::uint64_t v)
@ temINVALID_FLAG
Definition TER.h:92
@ temBAD_SIGNATURE
Definition TER.h:86
static auto const genesisAccountId
STL namespace.
T owns_lock(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T set_intersection(T... args)
T size(T... args)
T str(T... args)
std::string serialized
The manifest in serialized form.
Definition Manifest.h:64
std::uint32_t sequence
The sequence number of this manifest.
Definition Manifest.h:76
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition Manifest.h:79
std::optional< Blob > getSignature() const
Returns manifest signature.
Definition Manifest.cpp:225
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition Manifest.h:73
Blob getMasterSignature() const
Returns manifest master key signature.
Definition Manifest.cpp:236
PublicKey masterKey
The master key associated with this manifest.
Definition Manifest.h:67
Server fees published on server subscription.
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
bool operator==(ServerFeeSummary const &b) const
beast::insight::Gauge full_transitions
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
beast::insight::Hook hook
beast::insight::Gauge connected_duration
beast::insight::Gauge tracking_duration
beast::insight::Gauge connected_transitions
beast::insight::Gauge disconnected_transitions
beast::insight::Gauge syncing_duration
beast::insight::Gauge tracking_transitions
beast::insight::Gauge full_duration
beast::insight::Gauge disconnected_duration
beast::insight::Gauge syncing_transitions
SubAccountHistoryIndex(AccountID const &accountId)
std::shared_ptr< SubAccountHistoryIndex > index_
std::shared_ptr< SubAccountHistoryIndex > index_
Represents a transfer rate.
Definition Rate.h:21
Data format for exchanging consumption information across peers.
Definition Gossip.h:13
std::vector< Item > items
Definition Gossip.h:25
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition TxQ.h:146
IsMemberResult isMember(char const *key) const
void set(char const *key, auto const &v)
Select all peers (except optional excluded) that are in our cluster.
Definition predicates.h:118
Sends a message to all peers.
Definition predicates.h:13
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)