rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/main/Tuning.h>
32#include <xrpld/app/misc/AmendmentTable.h>
33#include <xrpld/app/misc/DeliverMax.h>
34#include <xrpld/app/misc/HashRouter.h>
35#include <xrpld/app/misc/LoadFeeTrack.h>
36#include <xrpld/app/misc/NetworkOPs.h>
37#include <xrpld/app/misc/Transaction.h>
38#include <xrpld/app/misc/TxQ.h>
39#include <xrpld/app/misc/ValidatorKeys.h>
40#include <xrpld/app/misc/ValidatorList.h>
41#include <xrpld/app/misc/detail/AccountTxPaging.h>
42#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
43#include <xrpld/app/tx/apply.h>
44#include <xrpld/consensus/Consensus.h>
45#include <xrpld/consensus/ConsensusParms.h>
46#include <xrpld/overlay/Cluster.h>
47#include <xrpld/overlay/Overlay.h>
48#include <xrpld/overlay/predicates.h>
49#include <xrpld/perflog/PerfLog.h>
50#include <xrpld/rpc/BookChanges.h>
51#include <xrpld/rpc/CTID.h>
52#include <xrpld/rpc/DeliveredAmount.h>
53#include <xrpld/rpc/MPTokenIssuanceID.h>
54#include <xrpld/rpc/ServerHandler.h>
55
56#include <xrpl/basics/UptimeClock.h>
57#include <xrpl/basics/mulDiv.h>
58#include <xrpl/basics/safe_cast.h>
59#include <xrpl/basics/scope.h>
60#include <xrpl/beast/utility/rngfill.h>
61#include <xrpl/crypto/RFC1751.h>
62#include <xrpl/crypto/csprng.h>
63#include <xrpl/protocol/BuildInfo.h>
64#include <xrpl/protocol/Feature.h>
65#include <xrpl/protocol/MultiApiJson.h>
66#include <xrpl/protocol/NFTSyntheticSerializer.h>
67#include <xrpl/protocol/RPCErr.h>
68#include <xrpl/protocol/TxFlags.h>
69#include <xrpl/protocol/jss.h>
70#include <xrpl/resource/Fees.h>
71#include <xrpl/resource/ResourceManager.h>
72
73#include <boost/asio/ip/host_name.hpp>
74#include <boost/asio/steady_timer.hpp>
75
76#include <algorithm>
77#include <exception>
78#include <mutex>
79#include <optional>
80#include <set>
81#include <sstream>
82#include <string>
83#include <tuple>
84#include <unordered_map>
85
86namespace ripple {
87
88class NetworkOPsImp final : public NetworkOPs
89{
95 {
96 public:
98 bool const admin;
99 bool const local;
101 bool applied = false;
103
106 bool a,
107 bool l,
108 FailHard f)
109 : transaction(t), admin(a), local(l), failType(f)
110 {
111 XRPL_ASSERT(
113 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
114 "valid inputs");
115 }
116 };
117
121 enum class DispatchState : unsigned char {
122 none,
123 scheduled,
124 running,
125 };
126
128
144 {
152
156 std::chrono::steady_clock::time_point start_ =
158 std::chrono::steady_clock::time_point const processStart_ = start_;
161
162 public:
164 {
166 .transitions = 1;
167 }
168
175 void
177
183 void
184 json(Json::Value& obj) const;
185
187 {
189 decltype(mode_) mode;
190 decltype(start_) start;
192 };
193
196 {
199 }
200 };
201
204 {
205 ServerFeeSummary() = default;
206
208 XRPAmount fee,
209 TxQ::Metrics&& escalationMetrics,
210 LoadFeeTrack const& loadFeeTrack);
211 bool
212 operator!=(ServerFeeSummary const& b) const;
213
214 bool
216 {
217 return !(*this != b);
218 }
219
224 };
225
226public:
228 Application& app,
230 bool standalone,
231 std::size_t minPeerCount,
232 bool start_valid,
233 JobQueue& job_queue,
235 ValidatorKeys const& validatorKeys,
236 boost::asio::io_context& io_svc,
237 beast::Journal journal,
238 beast::insight::Collector::ptr const& collector)
239 : app_(app)
240 , m_journal(journal)
243 , heartbeatTimer_(io_svc)
244 , clusterTimer_(io_svc)
245 , accountHistoryTxTimer_(io_svc)
246 , mConsensus(
247 app,
249 setup_FeeVote(app_.config().section("voting")),
250 app_.logs().journal("FeeVote")),
252 *m_localTX,
253 app.getInboundTransactions(),
254 beast::get_abstract_clock<std::chrono::steady_clock>(),
255 validatorKeys,
256 app_.logs().journal("LedgerConsensus"))
257 , validatorPK_(
258 validatorKeys.keys ? validatorKeys.keys->publicKey
259 : decltype(validatorPK_){})
261 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
262 : decltype(validatorMasterPK_){})
264 , m_job_queue(job_queue)
265 , m_standalone(standalone)
266 , minPeerCount_(start_valid ? 0 : minPeerCount)
267 , m_stats(std::bind(&NetworkOPsImp::collect_metrics, this), collector)
268 {
269 }
270
271 ~NetworkOPsImp() override
272 {
273 // This clear() is necessary to ensure the shared_ptrs in this map get
274 // destroyed NOW because the objects in this map invoke methods on this
275 // class when they are destroyed
277 }
278
279public:
281 getOperatingMode() const override;
282
284 strOperatingMode(OperatingMode const mode, bool const admin) const override;
285
287 strOperatingMode(bool const admin = false) const override;
288
289 //
290 // Transaction operations.
291 //
292
293 // Must complete immediately.
294 void
296
297 void
299 std::shared_ptr<Transaction>& transaction,
300 bool bUnlimited,
301 bool bLocal,
302 FailHard failType) override;
303
304 void
305 processTransactionSet(CanonicalTXSet const& set) override;
306
315 void
318 bool bUnlimited,
319 FailHard failType);
320
330 void
333 bool bUnlimited,
334 FailHard failtype);
335
336private:
337 bool
339
340 void
343 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback);
344
345public:
349 void
351
357 void
359
360 //
361 // Owner functions.
362 //
363
367 AccountID const& account) override;
368
369 //
370 // Book functions.
371 //
372
373 void
376 Book const&,
377 AccountID const& uTakerID,
378 bool const bProof,
379 unsigned int iLimit,
380 Json::Value const& jvMarker,
381 Json::Value& jvResult) override;
382
383 // Ledger proposal/close functions.
384 bool
386
387 bool
390 std::string const& source) override;
391
392 void
393 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
394
395 // Network state machine.
396
397 // Used for the "jump" case.
398private:
399 void
401 bool
403
404public:
405 bool
407 uint256 const& networkClosed,
408 std::unique_ptr<std::stringstream> const& clog) override;
409 void
411 void
412 setStandAlone() override;
413
417 void
418 setStateTimer() override;
419
420 void
421 setNeedNetworkLedger() override;
422 void
423 clearNeedNetworkLedger() override;
424 bool
425 isNeedNetworkLedger() override;
426 bool
427 isFull() override;
428
429 void
430 setMode(OperatingMode om) override;
431
432 bool
433 isBlocked() override;
434 bool
435 isAmendmentBlocked() override;
436 void
437 setAmendmentBlocked() override;
438 bool
439 isAmendmentWarned() override;
440 void
441 setAmendmentWarned() override;
442 void
443 clearAmendmentWarned() override;
444 bool
445 isUNLBlocked() override;
446 void
447 setUNLBlocked() override;
448 void
449 clearUNLBlocked() override;
450 void
451 consensusViewChange() override;
452
454 getConsensusInfo() override;
456 getServerInfo(bool human, bool admin, bool counters) override;
457 void
458 clearLedgerFetch() override;
460 getLedgerFetchInfo() override;
463 std::optional<std::chrono::milliseconds> consensusDelay) override;
464 void
465 reportFeeChange() override;
466 void
468
469 void
470 updateLocalTx(ReadView const& view) override;
472 getLocalTxCount() override;
473
474 //
475 // Monitoring: publisher side.
476 //
477 void
478 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
479 void
482 std::shared_ptr<STTx const> const& transaction,
483 TER result) override;
484 void
485 pubValidation(std::shared_ptr<STValidation> const& val) override;
486
487 //--------------------------------------------------------------------------
488 //
489 // InfoSub::Source.
490 //
491 void
493 InfoSub::ref ispListener,
494 hash_set<AccountID> const& vnaAccountIDs,
495 bool rt) override;
496 void
498 InfoSub::ref ispListener,
499 hash_set<AccountID> const& vnaAccountIDs,
500 bool rt) override;
501
502 // Just remove the subscription from the tracking
503 // not from the InfoSub. Needed for InfoSub destruction
504 void
506 std::uint64_t seq,
507 hash_set<AccountID> const& vnaAccountIDs,
508 bool rt) override;
509
511 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
512 override;
513 void
515 InfoSub::ref ispListener,
516 AccountID const& account,
517 bool historyOnly) override;
518
519 void
521 std::uint64_t seq,
522 AccountID const& account,
523 bool historyOnly) override;
524
525 bool
526 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
527 bool
528 unsubLedger(std::uint64_t uListener) override;
529
530 bool
531 subBookChanges(InfoSub::ref ispListener) override;
532 bool
533 unsubBookChanges(std::uint64_t uListener) override;
534
535 bool
536 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
537 override;
538 bool
539 unsubServer(std::uint64_t uListener) override;
540
541 bool
542 subBook(InfoSub::ref ispListener, Book const&) override;
543 bool
544 unsubBook(std::uint64_t uListener, Book const&) override;
545
546 bool
547 subManifests(InfoSub::ref ispListener) override;
548 bool
549 unsubManifests(std::uint64_t uListener) override;
550 void
551 pubManifest(Manifest const&) override;
552
553 bool
554 subTransactions(InfoSub::ref ispListener) override;
555 bool
556 unsubTransactions(std::uint64_t uListener) override;
557
558 bool
559 subRTTransactions(InfoSub::ref ispListener) override;
560 bool
561 unsubRTTransactions(std::uint64_t uListener) override;
562
563 bool
564 subValidations(InfoSub::ref ispListener) override;
565 bool
566 unsubValidations(std::uint64_t uListener) override;
567
568 bool
569 subPeerStatus(InfoSub::ref ispListener) override;
570 bool
571 unsubPeerStatus(std::uint64_t uListener) override;
572 void
573 pubPeerStatus(std::function<Json::Value(void)> const&) override;
574
575 bool
576 subConsensus(InfoSub::ref ispListener) override;
577 bool
578 unsubConsensus(std::uint64_t uListener) override;
579
581 findRpcSub(std::string const& strUrl) override;
583 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
584 bool
585 tryRemoveRpcSub(std::string const& strUrl) override;
586
587 void
588 stop() override
589 {
590 {
591 try
592 {
593 heartbeatTimer_.cancel();
594 }
595 catch (boost::system::system_error const& e)
596 {
597 JLOG(m_journal.error())
598 << "NetworkOPs: heartbeatTimer cancel error: " << e.what();
599 }
600
601 try
602 {
603 clusterTimer_.cancel();
604 }
605 catch (boost::system::system_error const& e)
606 {
607 JLOG(m_journal.error())
608 << "NetworkOPs: clusterTimer cancel error: " << e.what();
609 }
610
611 try
612 {
613 accountHistoryTxTimer_.cancel();
614 }
615 catch (boost::system::system_error const& e)
616 {
617 JLOG(m_journal.error())
618 << "NetworkOPs: accountHistoryTxTimer cancel error: "
619 << e.what();
620 }
621 }
622 // Make sure that any waitHandlers pending in our timers are done.
623 using namespace std::chrono_literals;
624 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
625 }
626
627 void
628 stateAccounting(Json::Value& obj) override;
629
630private:
631 void
632 setTimer(
633 boost::asio::steady_timer& timer,
634 std::chrono::milliseconds const& expiry_time,
635 std::function<void()> onExpire,
636 std::function<void()> onError);
637 void
639 void
641 void
643 void
645
647 transJson(
648 std::shared_ptr<STTx const> const& transaction,
649 TER result,
650 bool validated,
653
654 void
657 AcceptedLedgerTx const& transaction,
658 bool last);
659
660 void
663 AcceptedLedgerTx const& transaction,
664 bool last);
665
666 void
669 std::shared_ptr<STTx const> const& transaction,
670 TER result);
671
672 void
673 pubServer();
674 void
676
678 getHostId(bool forAdmin);
679
680private:
684
685 /*
686 * With a validated ledger to separate history and future, the node
687 * streams historical txns with negative indexes starting from -1,
688 * and streams future txns starting from index 0.
689 * The SubAccountHistoryIndex struct maintains these indexes.
690 * It also has a flag stopHistorical_ for stopping streaming
691 * the historical txns.
692 */
729
733 void
737 void
739 void
741
744
746
748
750
755
757 boost::asio::steady_timer heartbeatTimer_;
758 boost::asio::steady_timer clusterTimer_;
759 boost::asio::steady_timer accountHistoryTxTimer_;
760
762
765
767
769
772
774
776
777 enum SubTypes {
778 sLedger, // Accepted ledgers.
779 sManifests, // Received validator manifests.
780 sServer, // When server changes connectivity state.
781 sTransactions, // All accepted transactions.
782 sRTTransactions, // All proposed and accepted transactions.
783 sValidations, // Received validations.
784 sPeerStatus, // Peer status changes.
785 sConsensusPhase, // Consensus phase
786 sBookChanges, // Per-ledger order book changes
787 sLastEntry // Any new entry must be ADDED ABOVE this one
788 };
789
791
793
795
796 // Whether we are in standalone mode.
797 bool const m_standalone;
798
799 // The number of nodes that we need to consider ourselves connected.
801
802 // Transaction batching.
807
809
812
813private:
814 struct Stats
815 {
816 template <class Handler>
818 Handler const& handler,
819 beast::insight::Collector::ptr const& collector)
820 : hook(collector->make_hook(handler))
821 , disconnected_duration(collector->make_gauge(
822 "State_Accounting",
823 "Disconnected_duration"))
824 , connected_duration(collector->make_gauge(
825 "State_Accounting",
826 "Connected_duration"))
828 collector->make_gauge("State_Accounting", "Syncing_duration"))
829 , tracking_duration(collector->make_gauge(
830 "State_Accounting",
831 "Tracking_duration"))
833 collector->make_gauge("State_Accounting", "Full_duration"))
834 , disconnected_transitions(collector->make_gauge(
835 "State_Accounting",
836 "Disconnected_transitions"))
837 , connected_transitions(collector->make_gauge(
838 "State_Accounting",
839 "Connected_transitions"))
840 , syncing_transitions(collector->make_gauge(
841 "State_Accounting",
842 "Syncing_transitions"))
843 , tracking_transitions(collector->make_gauge(
844 "State_Accounting",
845 "Tracking_transitions"))
847 collector->make_gauge("State_Accounting", "Full_transitions"))
848 {
849 }
850
857
863 };
864
865 std::mutex m_statsMutex; // Mutex to lock m_stats
867
868private:
869 void
871};
872
873//------------------------------------------------------------------------------
874
876 {"disconnected", "connected", "syncing", "tracking", "full"}};
877
879
887
888static auto const genesisAccountId = calcAccountID(
890 .first);
891
892//------------------------------------------------------------------------------
893inline OperatingMode
895{
896 return mMode;
897}
898
899inline std::string
900NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
901{
902 return strOperatingMode(mMode, admin);
903}
904
905inline void
910
911inline void
916
917inline void
922
923inline bool
928
929inline bool
934
937{
938 static std::string const hostname = boost::asio::ip::host_name();
939
940 if (forAdmin)
941 return hostname;
942
943 // For non-admin uses hash the node public key into a
944 // single RFC1751 word:
945 static std::string const shroudedHostId = [this]() {
946 auto const& id = app_.nodeIdentity();
947
948 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
949 }();
950
951 return shroudedHostId;
952}
953
954void
956{
958
959 // Only do this work if a cluster is configured
960 if (app_.cluster().size() != 0)
962}
963
964void
966 boost::asio::steady_timer& timer,
967 std::chrono::milliseconds const& expiry_time,
968 std::function<void()> onExpire,
969 std::function<void()> onError)
970{
971 // Only start the timer if waitHandlerCounter_ is not yet joined.
972 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
973 [this, onExpire, onError](boost::system::error_code const& e) {
974 if ((e.value() == boost::system::errc::success) &&
975 (!m_job_queue.isStopped()))
976 {
977 onExpire();
978 }
979 // Recover as best we can if an unexpected error occurs.
980 if (e.value() != boost::system::errc::success &&
981 e.value() != boost::asio::error::operation_aborted)
982 {
983 // Try again later and hope for the best.
984 JLOG(m_journal.error())
985 << "Timer got error '" << e.message()
986 << "'. Restarting timer.";
987 onError();
988 }
989 }))
990 {
991 timer.expires_after(expiry_time);
992 timer.async_wait(std::move(*optionalCountedHandler));
993 }
994}
995
996void
997NetworkOPsImp::setHeartbeatTimer()
998{
999 setTimer(
1000 heartbeatTimer_,
1001 mConsensus.parms().ledgerGRANULARITY,
1002 [this]() {
1003 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
1004 processHeartbeatTimer();
1005 });
1006 },
1007 [this]() { setHeartbeatTimer(); });
1008}
1009
1010void
1011NetworkOPsImp::setClusterTimer()
1012{
1013 using namespace std::chrono_literals;
1014
1015 setTimer(
1016 clusterTimer_,
1017 10s,
1018 [this]() {
1019 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
1020 processClusterTimer();
1021 });
1022 },
1023 [this]() { setClusterTimer(); });
1024}
1025
1026void
1027NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
1028{
1029 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
1030 << toBase58(subInfo.index_->accountId_);
1031 using namespace std::chrono_literals;
1032 setTimer(
1033 accountHistoryTxTimer_,
1034 4s,
1035 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1036 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1037}
1038
1039void
1040NetworkOPsImp::processHeartbeatTimer()
1041{
1042 RclConsensusLogger clog(
1043 "Heartbeat Timer", mConsensus.validating(), m_journal);
1044 {
1045 std::unique_lock lock{app_.getMasterMutex()};
1046
1047 // VFALCO NOTE This is for diagnosing a crash on exit
1048 LoadManager& mgr(app_.getLoadManager());
1049 mgr.heartbeat();
1050
1051 std::size_t const numPeers = app_.overlay().size();
1052
1053 // do we have sufficient peers? If not, we are disconnected.
1054 if (numPeers < minPeerCount_)
1055 {
1056 if (mMode != OperatingMode::DISCONNECTED)
1057 {
1058 setMode(OperatingMode::DISCONNECTED);
1060 ss << "Node count (" << numPeers << ") has fallen "
1061 << "below required minimum (" << minPeerCount_ << ").";
1062 JLOG(m_journal.warn()) << ss.str();
1063 CLOG(clog.ss()) << "set mode to DISCONNECTED: " << ss.str();
1064 }
1065 else
1066 {
1067 CLOG(clog.ss())
1068 << "already DISCONNECTED. too few peers (" << numPeers
1069 << "), need at least " << minPeerCount_;
1070 }
1071
1072 // MasterMutex lock need not be held to call setHeartbeatTimer()
1073 lock.unlock();
1074 // We do not call mConsensus.timerEntry until there are enough
1075 // peers providing meaningful inputs to consensus
1076 setHeartbeatTimer();
1077
1078 return;
1079 }
1080
1081 if (mMode == OperatingMode::DISCONNECTED)
1082 {
1083 setMode(OperatingMode::CONNECTED);
1084 JLOG(m_journal.info())
1085 << "Node count (" << numPeers << ") is sufficient.";
1086 CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
1087 << " peers. ";
1088 }
1089
1090 // Check if the last validated ledger forces a change between these
1091 // states.
1092 auto origMode = mMode.load();
1093 CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
1094 if (mMode == OperatingMode::SYNCING)
1095 setMode(OperatingMode::SYNCING);
1096 else if (mMode == OperatingMode::CONNECTED)
1097 setMode(OperatingMode::CONNECTED);
1098 auto newMode = mMode.load();
1099 if (origMode != newMode)
1100 {
1101 CLOG(clog.ss())
1102 << ", changing to " << strOperatingMode(newMode, true);
1103 }
1104 CLOG(clog.ss()) << ". ";
1105 }
1106
1107 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.ss());
1108
1109 CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase);
1110 ConsensusPhase const currPhase = mConsensus.phase();
1111 if (mLastConsensusPhase != currPhase)
1112 {
1113 reportConsensusStateChange(currPhase);
1114 mLastConsensusPhase = currPhase;
1115 CLOG(clog.ss()) << " changed to " << to_string(mLastConsensusPhase);
1116 }
1117 CLOG(clog.ss()) << ". ";
1118
1119 setHeartbeatTimer();
1120}
1121
1122void
1123NetworkOPsImp::processClusterTimer()
1124{
1125 if (app_.cluster().size() == 0)
1126 return;
1127
1128 using namespace std::chrono_literals;
1129
1130 bool const update = app_.cluster().update(
1131 app_.nodeIdentity().first,
1132 "",
1133 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1134 ? app_.getFeeTrack().getLocalFee()
1135 : 0,
1136 app_.timeKeeper().now());
1137
1138 if (!update)
1139 {
1140 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1141 setClusterTimer();
1142 return;
1143 }
1144
1145 protocol::TMCluster cluster;
1146 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1147 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1148 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1149 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1150 n.set_nodeload(node.getLoadFee());
1151 if (!node.name().empty())
1152 n.set_nodename(node.name());
1153 });
1154
1155 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1156 for (auto& item : gossip.items)
1157 {
1158 protocol::TMLoadSource& node = *cluster.add_loadsources();
1159 node.set_name(to_string(item.address));
1160 node.set_cost(item.balance);
1161 }
1162 app_.overlay().foreach(send_if(
1163 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1164 peer_in_cluster()));
1165 setClusterTimer();
1166}
1167
1168//------------------------------------------------------------------------------
1169
1171NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1172 const
1173{
1174 if (mode == OperatingMode::FULL && admin)
1175 {
1176 auto const consensusMode = mConsensus.mode();
1177 if (consensusMode != ConsensusMode::wrongLedger)
1178 {
1179 if (consensusMode == ConsensusMode::proposing)
1180 return "proposing";
1181
1182 if (mConsensus.validating())
1183 return "validating";
1184 }
1185 }
1186
1187 return states_[static_cast<std::size_t>(mode)];
1188}
1189
1190void
1191NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1192{
1193 if (isNeedNetworkLedger())
1194 {
1195 // Nothing we can do if we've never been in sync
1196 return;
1197 }
1198
1199 // Enforce Network bar for batch txn
1200 if (iTrans->isFlag(tfInnerBatchTxn) &&
1201 m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1202 {
1203 JLOG(m_journal.error())
1204 << "Submitted transaction invalid: tfInnerBatchTxn flag present.";
1205 return;
1206 }
1207
1208 // this is an asynchronous interface
1209 auto const trans = sterilize(*iTrans);
1210
1211 auto const txid = trans->getTransactionID();
1212 auto const flags = app_.getHashRouter().getFlags(txid);
1213
1214 if ((flags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1215 {
1216 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1217 return;
1218 }
1219
1220 try
1221 {
1222 auto const [validity, reason] = checkValidity(
1223 app_.getHashRouter(),
1224 *trans,
1225 m_ledgerMaster.getValidatedRules(),
1226 app_.config());
1227
1228 if (validity != Validity::Valid)
1229 {
1230 JLOG(m_journal.warn())
1231 << "Submitted transaction invalid: " << reason;
1232 return;
1233 }
1234 }
1235 catch (std::exception const& ex)
1236 {
1237 JLOG(m_journal.warn())
1238 << "Exception checking transaction " << txid << ": " << ex.what();
1239
1240 return;
1241 }
1242
1243 std::string reason;
1244
1245 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1246
1247 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1248 auto t = tx;
1249 processTransaction(t, false, false, FailHard::no);
1250 });
1251}
1252
1253bool
1254NetworkOPsImp::preProcessTransaction(std::shared_ptr<Transaction>& transaction)
1255{
1256 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1257
1258 if ((newFlags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1259 {
1260 // cached bad
1261 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1262 transaction->setStatus(INVALID);
1263 transaction->setResult(temBAD_SIGNATURE);
1264 return false;
1265 }
1266
1267 auto const view = m_ledgerMaster.getCurrentLedger();
1268
1269 // This function is called by several different parts of the codebase
1270 // under no circumstances will we ever accept an inner txn within a batch
1271 // txn from the network.
1272 auto const sttx = *transaction->getSTransaction();
1273 if (sttx.isFlag(tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1274 {
1275 transaction->setStatus(INVALID);
1276 transaction->setResult(temINVALID_FLAG);
1277 app_.getHashRouter().setFlags(
1278 transaction->getID(), HashRouterFlags::BAD);
1279 return false;
1280 }
1281
1282 // NOTE eahennis - I think this check is redundant,
1283 // but I'm not 100% sure yet.
1284 // If so, only cost is looking up HashRouter flags.
1285 auto const [validity, reason] =
1286 checkValidity(app_.getHashRouter(), sttx, view->rules(), app_.config());
1287 XRPL_ASSERT(
1288 validity == Validity::Valid,
1289 "ripple::NetworkOPsImp::processTransaction : valid validity");
1290
1291 // Not concerned with local checks at this point.
1292 if (validity == Validity::SigBad)
1293 {
1294 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1295 transaction->setStatus(INVALID);
1296 transaction->setResult(temBAD_SIGNATURE);
1297 app_.getHashRouter().setFlags(
1298 transaction->getID(), HashRouterFlags::BAD);
1299 return false;
1300 }
1301
1302 // canonicalize can change our pointer
1303 app_.getMasterTransaction().canonicalize(&transaction);
1304
1305 return true;
1306}
1307
1308void
1309NetworkOPsImp::processTransaction(
1310 std::shared_ptr<Transaction>& transaction,
1311 bool bUnlimited,
1312 bool bLocal,
1313 FailHard failType)
1314{
1315 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1316
1317 // preProcessTransaction can change our pointer
1318 if (!preProcessTransaction(transaction))
1319 return;
1320
1321 if (bLocal)
1322 doTransactionSync(transaction, bUnlimited, failType);
1323 else
1324 doTransactionAsync(transaction, bUnlimited, failType);
1325}
1326
1327void
1328NetworkOPsImp::doTransactionAsync(
1329 std::shared_ptr<Transaction> transaction,
1330 bool bUnlimited,
1331 FailHard failType)
1332{
1333 std::lock_guard lock(mMutex);
1334
1335 if (transaction->getApplying())
1336 return;
1337
1338 mTransactions.push_back(
1339 TransactionStatus(transaction, bUnlimited, false, failType));
1340 transaction->setApplying();
1341
1342 if (mDispatchState == DispatchState::none)
1343 {
1344 if (m_job_queue.addJob(
1345 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1346 {
1347 mDispatchState = DispatchState::scheduled;
1348 }
1349 }
1350}
1351
1352void
1353NetworkOPsImp::doTransactionSync(
1354 std::shared_ptr<Transaction> transaction,
1355 bool bUnlimited,
1356 FailHard failType)
1357{
1358 std::unique_lock<std::mutex> lock(mMutex);
1359
1360 if (!transaction->getApplying())
1361 {
1362 mTransactions.push_back(
1363 TransactionStatus(transaction, bUnlimited, true, failType));
1364 transaction->setApplying();
1365 }
1366
1367 doTransactionSyncBatch(
1368 lock, [&transaction](std::unique_lock<std::mutex> const&) {
1369 return transaction->getApplying();
1370 });
1371}
1372
1373void
1374NetworkOPsImp::doTransactionSyncBatch(
1376 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback)
1377{
1378 do
1379 {
1380 if (mDispatchState == DispatchState::running)
1381 {
1382 // A batch processing job is already running, so wait.
1383 mCond.wait(lock);
1384 }
1385 else
1386 {
1387 apply(lock);
1388
1389 if (mTransactions.size())
1390 {
1391 // More transactions need to be applied, but by another job.
1392 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1393 transactionBatch();
1394 }))
1395 {
1396 mDispatchState = DispatchState::scheduled;
1397 }
1398 }
1399 }
1400 } while (retryCallback(lock));
1401}
1402
1403void
1404NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set)
1405{
1406 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXNSet");
1408 candidates.reserve(set.size());
1409 for (auto const& [_, tx] : set)
1410 {
1411 std::string reason;
1412 auto transaction = std::make_shared<Transaction>(tx, reason, app_);
1413
1414 if (transaction->getStatus() == INVALID)
1415 {
1416 if (!reason.empty())
1417 {
1418 JLOG(m_journal.trace())
1419 << "Exception checking transaction: " << reason;
1420 }
1421 app_.getHashRouter().setFlags(
1422 tx->getTransactionID(), HashRouterFlags::BAD);
1423 continue;
1424 }
1425
1426 // preProcessTransaction can change our pointer
1427 if (!preProcessTransaction(transaction))
1428 continue;
1429
1430 candidates.emplace_back(transaction);
1431 }
1432
1433 std::vector<TransactionStatus> transactions;
1434 transactions.reserve(candidates.size());
1435
1436 std::unique_lock lock(mMutex);
1437
1438 for (auto& transaction : candidates)
1439 {
1440 if (!transaction->getApplying())
1441 {
1442 transactions.emplace_back(transaction, false, false, FailHard::no);
1443 transaction->setApplying();
1444 }
1445 }
1446
1447 if (mTransactions.empty())
1448 mTransactions.swap(transactions);
1449 else
1450 {
1451 mTransactions.reserve(mTransactions.size() + transactions.size());
1452 for (auto& t : transactions)
1453 mTransactions.push_back(std::move(t));
1454 }
1455 if (mTransactions.empty())
1456 {
1457 JLOG(m_journal.debug()) << "No transaction to process!";
1458 return;
1459 }
1460
1461 doTransactionSyncBatch(lock, [&](std::unique_lock<std::mutex> const&) {
1462 XRPL_ASSERT(
1463 lock.owns_lock(),
1464 "ripple::NetworkOPsImp::processTransactionSet has lock");
1465 return std::any_of(
1466 mTransactions.begin(), mTransactions.end(), [](auto const& t) {
1467 return t.transaction->getApplying();
1468 });
1469 });
1470}
1471
1472void
1473NetworkOPsImp::transactionBatch()
1474{
1475 std::unique_lock<std::mutex> lock(mMutex);
1476
1477 if (mDispatchState == DispatchState::running)
1478 return;
1479
1480 while (mTransactions.size())
1481 {
1482 apply(lock);
1483 }
1484}
1485
1486void
1487NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1488{
1490 std::vector<TransactionStatus> transactions;
1491 mTransactions.swap(transactions);
1492 XRPL_ASSERT(
1493 !transactions.empty(),
1494 "ripple::NetworkOPsImp::apply : non-empty transactions");
1495 XRPL_ASSERT(
1496 mDispatchState != DispatchState::running,
1497 "ripple::NetworkOPsImp::apply : is not running");
1498
1499 mDispatchState = DispatchState::running;
1500
1501 batchLock.unlock();
1502
1503 {
1504 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1505 bool changed = false;
1506 {
1507 std::unique_lock ledgerLock{
1508 m_ledgerMaster.peekMutex(), std::defer_lock};
1509 std::lock(masterLock, ledgerLock);
1510
1511 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1512 for (TransactionStatus& e : transactions)
1513 {
1514 // we check before adding to the batch
1515 ApplyFlags flags = tapNONE;
1516 if (e.admin)
1517 flags |= tapUNLIMITED;
1518
1519 if (e.failType == FailHard::yes)
1520 flags |= tapFAIL_HARD;
1521
1522 auto const result = app_.getTxQ().apply(
1523 app_, view, e.transaction->getSTransaction(), flags, j);
1524 e.result = result.ter;
1525 e.applied = result.applied;
1526 changed = changed || result.applied;
1527 }
1528 return changed;
1529 });
1530 }
1531 if (changed)
1532 reportFeeChange();
1533
1534 std::optional<LedgerIndex> validatedLedgerIndex;
1535 if (auto const l = m_ledgerMaster.getValidatedLedger())
1536 validatedLedgerIndex = l->info().seq;
1537
1538 auto newOL = app_.openLedger().current();
1539 for (TransactionStatus& e : transactions)
1540 {
1541 e.transaction->clearSubmitResult();
1542
1543 if (e.applied)
1544 {
1545 pubProposedTransaction(
1546 newOL, e.transaction->getSTransaction(), e.result);
1547 e.transaction->setApplied();
1548 }
1549
1550 e.transaction->setResult(e.result);
1551
1552 if (isTemMalformed(e.result))
1553 app_.getHashRouter().setFlags(
1554 e.transaction->getID(), HashRouterFlags::BAD);
1555
1556#ifdef DEBUG
1557 if (e.result != tesSUCCESS)
1558 {
1559 std::string token, human;
1560
1561 if (transResultInfo(e.result, token, human))
1562 {
1563 JLOG(m_journal.info())
1564 << "TransactionResult: " << token << ": " << human;
1565 }
1566 }
1567#endif
1568
1569 bool addLocal = e.local;
1570
1571 if (e.result == tesSUCCESS)
1572 {
1573 JLOG(m_journal.debug())
1574 << "Transaction is now included in open ledger";
1575 e.transaction->setStatus(INCLUDED);
1576
1577 // Pop as many "reasonable" transactions for this account as
1578 // possible. "Reasonable" means they have sequential sequence
1579 // numbers, or use tickets.
1580 auto const& txCur = e.transaction->getSTransaction();
1581
1582 std::size_t count = 0;
1583 for (auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1584 txNext && count < maxPoppedTransactions;
1585 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1586 {
1587 if (!batchLock.owns_lock())
1588 batchLock.lock();
1589 std::string reason;
1590 auto const trans = sterilize(*txNext);
1591 auto t = std::make_shared<Transaction>(trans, reason, app_);
1592 if (t->getApplying())
1593 break;
1594 submit_held.emplace_back(t, false, false, FailHard::no);
1595 t->setApplying();
1596 }
1597 if (batchLock.owns_lock())
1598 batchLock.unlock();
1599 }
1600 else if (e.result == tefPAST_SEQ)
1601 {
1602 // duplicate or conflict
1603 JLOG(m_journal.info()) << "Transaction is obsolete";
1604 e.transaction->setStatus(OBSOLETE);
1605 }
1606 else if (e.result == terQUEUED)
1607 {
1608 JLOG(m_journal.debug())
1609 << "Transaction is likely to claim a"
1610 << " fee, but is queued until fee drops";
1611
1612 e.transaction->setStatus(HELD);
1613 // Add to held transactions, because it could get
1614 // kicked out of the queue, and this will try to
1615 // put it back.
1616 m_ledgerMaster.addHeldTransaction(e.transaction);
1617 e.transaction->setQueued();
1618 e.transaction->setKept();
1619 }
1620 else if (
1621 isTerRetry(e.result) || isTelLocal(e.result) ||
1622 isTefFailure(e.result))
1623 {
1624 if (e.failType != FailHard::yes)
1625 {
1626 auto const lastLedgerSeq =
1627 e.transaction->getSTransaction()->at(
1628 ~sfLastLedgerSequence);
1629 auto const ledgersLeft = lastLedgerSeq
1630 ? *lastLedgerSeq -
1631 m_ledgerMaster.getCurrentLedgerIndex()
1633 // If any of these conditions are met, the transaction can
1634 // be held:
1635 // 1. It was submitted locally. (Note that this flag is only
1636 // true on the initial submission.)
1637 // 2. The transaction has a LastLedgerSequence, and the
1638 // LastLedgerSequence is fewer than LocalTxs::holdLedgers
1639 // (5) ledgers into the future. (Remember that an
1640 // unseated optional compares as less than all seated
1641 // values, so it has to be checked explicitly first.)
1642 // 3. The HashRouterFlags::BAD flag is not set on the txID.
1643 // (setFlags
1644 // checks before setting. If the flag is set, it returns
1645 // false, which means it's been held once without one of
1646 // the other conditions, so don't hold it again. Time's
1647 // up!)
1648 //
1649 if (e.local ||
1650 (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1651 app_.getHashRouter().setFlags(
1652 e.transaction->getID(), HashRouterFlags::HELD))
1653 {
1654 // transaction should be held
1655 JLOG(m_journal.debug())
1656 << "Transaction should be held: " << e.result;
1657 e.transaction->setStatus(HELD);
1658 m_ledgerMaster.addHeldTransaction(e.transaction);
1659 e.transaction->setKept();
1660 }
1661 else
1662 JLOG(m_journal.debug())
1663 << "Not holding transaction "
1664 << e.transaction->getID() << ": "
1665 << (e.local ? "local" : "network") << ", "
1666 << "result: " << e.result << " ledgers left: "
1667 << (ledgersLeft ? to_string(*ledgersLeft)
1668 : "unspecified");
1669 }
1670 }
1671 else
1672 {
1673 JLOG(m_journal.debug())
1674 << "Status other than success " << e.result;
1675 e.transaction->setStatus(INVALID);
1676 }
1677
1678 auto const enforceFailHard =
1679 e.failType == FailHard::yes && !isTesSuccess(e.result);
1680
1681 if (addLocal && !enforceFailHard)
1682 {
1683 m_localTX->push_back(
1684 m_ledgerMaster.getCurrentLedgerIndex(),
1685 e.transaction->getSTransaction());
1686 e.transaction->setKept();
1687 }
1688
1689 if ((e.applied ||
1690 ((mMode != OperatingMode::FULL) &&
1691 (e.failType != FailHard::yes) && e.local) ||
1692 (e.result == terQUEUED)) &&
1693 !enforceFailHard)
1694 {
1695 auto const toSkip =
1696 app_.getHashRouter().shouldRelay(e.transaction->getID());
1697 if (auto const sttx = *(e.transaction->getSTransaction());
1698 toSkip &&
1699 // Skip relaying if it's an inner batch txn and batch
1700 // feature is enabled
1701 !(sttx.isFlag(tfInnerBatchTxn) &&
1702 newOL->rules().enabled(featureBatch)))
1703 {
1704 protocol::TMTransaction tx;
1705 Serializer s;
1706
1707 sttx.add(s);
1708 tx.set_rawtransaction(s.data(), s.size());
1709 tx.set_status(protocol::tsCURRENT);
1710 tx.set_receivetimestamp(
1711 app_.timeKeeper().now().time_since_epoch().count());
1712 tx.set_deferred(e.result == terQUEUED);
1713 // FIXME: This should be when we received it
1714 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1715 e.transaction->setBroadcast();
1716 }
1717 }
1718
1719 if (validatedLedgerIndex)
1720 {
1721 auto [fee, accountSeq, availableSeq] =
1722 app_.getTxQ().getTxRequiredFeeAndSeq(
1723 *newOL, e.transaction->getSTransaction());
1724 e.transaction->setCurrentLedgerState(
1725 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1726 }
1727 }
1728 }
1729
1730 batchLock.lock();
1731
1732 for (TransactionStatus& e : transactions)
1733 e.transaction->clearApplying();
1734
1735 if (!submit_held.empty())
1736 {
1737 if (mTransactions.empty())
1738 mTransactions.swap(submit_held);
1739 else
1740 {
1741 mTransactions.reserve(mTransactions.size() + submit_held.size());
1742 for (auto& e : submit_held)
1743 mTransactions.push_back(std::move(e));
1744 }
1745 }
1746
1747 mCond.notify_all();
1748
1749 mDispatchState = DispatchState::none;
1750}
1751
1752//
1753// Owner functions
1754//
1755
1757NetworkOPsImp::getOwnerInfo(
1759 AccountID const& account)
1760{
1761 Json::Value jvObjects(Json::objectValue);
1762 auto root = keylet::ownerDir(account);
1763 auto sleNode = lpLedger->read(keylet::page(root));
1764 if (sleNode)
1765 {
1766 std::uint64_t uNodeDir;
1767
1768 do
1769 {
1770 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1771 {
1772 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1773 XRPL_ASSERT(
1774 sleCur,
1775 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1776
1777 switch (sleCur->getType())
1778 {
1779 case ltOFFER:
1780 if (!jvObjects.isMember(jss::offers))
1781 jvObjects[jss::offers] =
1783
1784 jvObjects[jss::offers].append(
1785 sleCur->getJson(JsonOptions::none));
1786 break;
1787
1788 case ltRIPPLE_STATE:
1789 if (!jvObjects.isMember(jss::ripple_lines))
1790 {
1791 jvObjects[jss::ripple_lines] =
1793 }
1794
1795 jvObjects[jss::ripple_lines].append(
1796 sleCur->getJson(JsonOptions::none));
1797 break;
1798
1799 case ltACCOUNT_ROOT:
1800 case ltDIR_NODE:
1801 default:
1802 UNREACHABLE(
1803 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1804 "type");
1805 break;
1806 }
1807 }
1808
1809 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1810
1811 if (uNodeDir)
1812 {
1813 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1814 XRPL_ASSERT(
1815 sleNode,
1816 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1817 }
1818 } while (uNodeDir);
1819 }
1820
1821 return jvObjects;
1822}
1823
1824//
1825// Other
1826//
1827
1828inline bool
1829NetworkOPsImp::isBlocked()
1830{
1831 return isAmendmentBlocked() || isUNLBlocked();
1832}
1833
1834inline bool
1835NetworkOPsImp::isAmendmentBlocked()
1836{
1837 return amendmentBlocked_;
1838}
1839
1840void
1841NetworkOPsImp::setAmendmentBlocked()
1842{
1843 amendmentBlocked_ = true;
1844 setMode(OperatingMode::CONNECTED);
1845}
1846
1847inline bool
1848NetworkOPsImp::isAmendmentWarned()
1849{
1850 return !amendmentBlocked_ && amendmentWarned_;
1851}
1852
1853inline void
1854NetworkOPsImp::setAmendmentWarned()
1855{
1856 amendmentWarned_ = true;
1857}
1858
1859inline void
1860NetworkOPsImp::clearAmendmentWarned()
1861{
1862 amendmentWarned_ = false;
1863}
1864
1865inline bool
1866NetworkOPsImp::isUNLBlocked()
1867{
1868 return unlBlocked_;
1869}
1870
1871void
1872NetworkOPsImp::setUNLBlocked()
1873{
1874 unlBlocked_ = true;
1875 setMode(OperatingMode::CONNECTED);
1876}
1877
1878inline void
1879NetworkOPsImp::clearUNLBlocked()
1880{
1881 unlBlocked_ = false;
1882}
1883
1884bool
1885NetworkOPsImp::checkLastClosedLedger(
1886 Overlay::PeerSequence const& peerList,
1887 uint256& networkClosed)
1888{
1889 // Returns true if there's an *abnormal* ledger issue, normal changing in
1890 // TRACKING mode should return false. Do we have sufficient validations for
1891 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1892 // better ledger available? If so, we are either tracking or full.
1893
1894 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1895
1896 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1897
1898 if (!ourClosed)
1899 return false;
1900
1901 uint256 closedLedger = ourClosed->info().hash;
1902 uint256 prevClosedLedger = ourClosed->info().parentHash;
1903 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1904 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1905
1906 //-------------------------------------------------------------------------
1907 // Determine preferred last closed ledger
1908
1909 auto& validations = app_.getValidations();
1910 JLOG(m_journal.debug())
1911 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1912
1913 // Will rely on peer LCL if no trusted validations exist
1915 peerCounts[closedLedger] = 0;
1916 if (mMode >= OperatingMode::TRACKING)
1917 peerCounts[closedLedger]++;
1918
1919 for (auto& peer : peerList)
1920 {
1921 uint256 peerLedger = peer->getClosedLedgerHash();
1922
1923 if (peerLedger.isNonZero())
1924 ++peerCounts[peerLedger];
1925 }
1926
1927 for (auto const& it : peerCounts)
1928 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1929
1930 uint256 preferredLCL = validations.getPreferredLCL(
1931 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1932 m_ledgerMaster.getValidLedgerIndex(),
1933 peerCounts);
1934
1935 bool switchLedgers = preferredLCL != closedLedger;
1936 if (switchLedgers)
1937 closedLedger = preferredLCL;
1938 //-------------------------------------------------------------------------
1939 if (switchLedgers && (closedLedger == prevClosedLedger))
1940 {
1941 // don't switch to our own previous ledger
1942 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1943 networkClosed = ourClosed->info().hash;
1944 switchLedgers = false;
1945 }
1946 else
1947 networkClosed = closedLedger;
1948
1949 if (!switchLedgers)
1950 return false;
1951
1952 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1953
1954 if (!consensus)
1955 consensus = app_.getInboundLedgers().acquire(
1956 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1957
1958 if (consensus &&
1959 (!m_ledgerMaster.canBeCurrent(consensus) ||
1960 !m_ledgerMaster.isCompatible(
1961 *consensus, m_journal.debug(), "Not switching")))
1962 {
1963 // Don't switch to a ledger not on the validated chain
1964 // or with an invalid close time or sequence
1965 networkClosed = ourClosed->info().hash;
1966 return false;
1967 }
1968
1969 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1970 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1971 << getJson({*ourClosed, {}});
1972 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1973
1974 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1975 {
1976 setMode(OperatingMode::CONNECTED);
1977 }
1978
1979 if (consensus)
1980 {
1981 // FIXME: If this rewinds the ledger sequence, or has the same
1982 // sequence, we should update the status on any stored transactions
1983 // in the invalidated ledgers.
1984 switchLastClosedLedger(consensus);
1985 }
1986
1987 return true;
1988}
1989
1990void
1991NetworkOPsImp::switchLastClosedLedger(
1992 std::shared_ptr<Ledger const> const& newLCL)
1993{
1994 // set the newLCL as our last closed ledger -- this is abnormal code
1995 JLOG(m_journal.error())
1996 << "JUMP last closed ledger to " << newLCL->info().hash;
1997
1998 clearNeedNetworkLedger();
1999
2000 // Update fee computations.
2001 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
2002
2003 // Caller must own master lock
2004 {
2005 // Apply tx in old open ledger to new
2006 // open ledger. Then apply local tx.
2007
2008 auto retries = m_localTX->getTxSet();
2009 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
2011 if (lastVal)
2012 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
2013 else
2014 rules.emplace(app_.config().features);
2015 app_.openLedger().accept(
2016 app_,
2017 *rules,
2018 newLCL,
2019 OrderedTxs({}),
2020 false,
2021 retries,
2022 tapNONE,
2023 "jump",
2024 [&](OpenView& view, beast::Journal j) {
2025 // Stuff the ledger with transactions from the queue.
2026 return app_.getTxQ().accept(app_, view);
2027 });
2028 }
2029
2030 m_ledgerMaster.switchLCL(newLCL);
2031
2032 protocol::TMStatusChange s;
2033 s.set_newevent(protocol::neSWITCHED_LEDGER);
2034 s.set_ledgerseq(newLCL->info().seq);
2035 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
2036 s.set_ledgerhashprevious(
2037 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
2038 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
2039
2040 app_.overlay().foreach(
2041 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
2042}
2043
2044bool
2045NetworkOPsImp::beginConsensus(
2046 uint256 const& networkClosed,
2048{
2049 XRPL_ASSERT(
2050 networkClosed.isNonZero(),
2051 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
2052
2053 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
2054
2055 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
2056 << " with LCL " << closingInfo.parentHash;
2057
2058 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
2059
2060 if (!prevLedger)
2061 {
2062 // this shouldn't happen unless we jump ledgers
2063 if (mMode == OperatingMode::FULL)
2064 {
2065 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
2066 setMode(OperatingMode::TRACKING);
2067 CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
2068 }
2069
2070 CLOG(clog) << "beginConsensus no previous ledger. ";
2071 return false;
2072 }
2073
2074 XRPL_ASSERT(
2075 prevLedger->info().hash == closingInfo.parentHash,
2076 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
2077 "parent");
2078 XRPL_ASSERT(
2079 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
2080 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
2081 "hash");
2082
2083 if (prevLedger->rules().enabled(featureNegativeUNL))
2084 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
2085 TrustChanges const changes = app_.validators().updateTrusted(
2086 app_.getValidations().getCurrentNodeIDs(),
2087 closingInfo.parentCloseTime,
2088 *this,
2089 app_.overlay(),
2090 app_.getHashRouter());
2091
2092 if (!changes.added.empty() || !changes.removed.empty())
2093 {
2094 app_.getValidations().trustChanged(changes.added, changes.removed);
2095 // Update the AmendmentTable so it tracks the current validators.
2096 app_.getAmendmentTable().trustChanged(
2097 app_.validators().getQuorumKeys().second);
2098 }
2099
2100 mConsensus.startRound(
2101 app_.timeKeeper().closeTime(),
2102 networkClosed,
2103 prevLedger,
2104 changes.removed,
2105 changes.added,
2106 clog);
2107
2108 ConsensusPhase const currPhase = mConsensus.phase();
2109 if (mLastConsensusPhase != currPhase)
2110 {
2111 reportConsensusStateChange(currPhase);
2112 mLastConsensusPhase = currPhase;
2113 }
2114
2115 JLOG(m_journal.debug()) << "Initiating consensus engine";
2116 return true;
2117}
2118
2119bool
2120NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
2121{
2122 auto const& peerKey = peerPos.publicKey();
2123 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
2124 {
2125 // Could indicate a operator misconfiguration where two nodes are
2126 // running with the same validator key configured, so this isn't fatal,
2127 // and it doesn't necessarily indicate peer misbehavior. But since this
2128 // is a trusted message, it could be a very big deal. Either way, we
2129 // don't want to relay the proposal. Note that the byzantine behavior
2130 // detection in handleNewValidation will notify other peers.
2131 //
2132 // Another, innocuous explanation is unusual message routing and delays,
2133 // causing this node to receive its own messages back.
2134 JLOG(m_journal.error())
2135 << "Received a proposal signed by MY KEY from a peer. This may "
2136 "indicate a misconfiguration where another node has the same "
2137 "validator key, or may be caused by unusual message routing and "
2138 "delays.";
2139 return false;
2140 }
2141
2142 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
2143}
2144
2145void
2146NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
2147{
2148 // We now have an additional transaction set
2149 // either created locally during the consensus process
2150 // or acquired from a peer
2151
2152 // Inform peers we have this set
2153 protocol::TMHaveTransactionSet msg;
2154 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2155 msg.set_status(protocol::tsHAVE);
2156 app_.overlay().foreach(
2157 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
2158
2159 // We acquired it because consensus asked us to
2160 if (fromAcquire)
2161 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
2162}
2163
2164void
2165NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
2166{
2167 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
2168
2169 for (auto const& it : app_.overlay().getActivePeers())
2170 {
2171 if (it && (it->getClosedLedgerHash() == deadLedger))
2172 {
2173 JLOG(m_journal.trace()) << "Killing obsolete peer status";
2174 it->cycleStatus();
2175 }
2176 }
2177
2178 uint256 networkClosed;
2179 bool ledgerChange =
2180 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
2181
2182 if (networkClosed.isZero())
2183 {
2184 CLOG(clog) << "endConsensus last closed ledger is zero. ";
2185 return;
2186 }
2187
2188 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
2189 // we must count how many nodes share our LCL, how many nodes disagree with
2190 // our LCL, and how many validations our LCL has. We also want to check
2191 // timing to make sure there shouldn't be a newer LCL. We need this
2192 // information to do the next three tests.
2193
2194 if (((mMode == OperatingMode::CONNECTED) ||
2195 (mMode == OperatingMode::SYNCING)) &&
2196 !ledgerChange)
2197 {
2198 // Count number of peers that agree with us and UNL nodes whose
2199 // validations we have for LCL. If the ledger is good enough, go to
2200 // TRACKING - TODO
2201 if (!needNetworkLedger_)
2202 setMode(OperatingMode::TRACKING);
2203 }
2204
2205 if (((mMode == OperatingMode::CONNECTED) ||
2206 (mMode == OperatingMode::TRACKING)) &&
2207 !ledgerChange)
2208 {
2209 // check if the ledger is good enough to go to FULL
2210 // Note: Do not go to FULL if we don't have the previous ledger
2211 // check if the ledger is bad enough to go to CONNECTE D -- TODO
2212 auto current = m_ledgerMaster.getCurrentLedger();
2213 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
2214 2 * current->info().closeTimeResolution))
2215 {
2216 setMode(OperatingMode::FULL);
2217 }
2218 }
2219
2220 beginConsensus(networkClosed, clog);
2221}
2222
2223void
2224NetworkOPsImp::consensusViewChange()
2225{
2226 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2227 {
2228 setMode(OperatingMode::CONNECTED);
2229 }
2230}
2231
2232void
2233NetworkOPsImp::pubManifest(Manifest const& mo)
2234{
2235 // VFALCO consider std::shared_mutex
2236 std::lock_guard sl(mSubLock);
2237
2238 if (!mStreamMaps[sManifests].empty())
2239 {
2241
2242 jvObj[jss::type] = "manifestReceived";
2243 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
2244 if (mo.signingKey)
2245 jvObj[jss::signing_key] =
2246 toBase58(TokenType::NodePublic, *mo.signingKey);
2247 jvObj[jss::seq] = Json::UInt(mo.sequence);
2248 if (auto sig = mo.getSignature())
2249 jvObj[jss::signature] = strHex(*sig);
2250 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
2251 if (!mo.domain.empty())
2252 jvObj[jss::domain] = mo.domain;
2253 jvObj[jss::manifest] = strHex(mo.serialized);
2254
2255 for (auto i = mStreamMaps[sManifests].begin();
2256 i != mStreamMaps[sManifests].end();)
2257 {
2258 if (auto p = i->second.lock())
2259 {
2260 p->send(jvObj, true);
2261 ++i;
2262 }
2263 else
2264 {
2265 i = mStreamMaps[sManifests].erase(i);
2266 }
2267 }
2268 }
2269}
2270
2271NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2272 XRPAmount fee,
2273 TxQ::Metrics&& escalationMetrics,
2274 LoadFeeTrack const& loadFeeTrack)
2275 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2276 , loadBaseServer{loadFeeTrack.getLoadBase()}
2277 , baseFee{fee}
2278 , em{std::move(escalationMetrics)}
2279{
2280}
2281
2282bool
2284 NetworkOPsImp::ServerFeeSummary const& b) const
2285{
2286 if (loadFactorServer != b.loadFactorServer ||
2287 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2288 em.has_value() != b.em.has_value())
2289 return true;
2290
2291 if (em && b.em)
2292 {
2293 return (
2294 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2295 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2296 em->referenceFeeLevel != b.em->referenceFeeLevel);
2297 }
2298
2299 return false;
2300}
2301
2302// Need to cap to uint64 to uint32 due to JSON limitations
2303static std::uint32_t
2305{
2307
2308 return std::min(max32, v);
2309};
2310
2311void
2313{
2314 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2315 // list into a local array while holding the lock then release
2316 // the lock and call send on everyone.
2317 //
2319
2320 if (!mStreamMaps[sServer].empty())
2321 {
2323
2325 app_.openLedger().current()->fees().base,
2327 app_.getFeeTrack()};
2328
2329 jvObj[jss::type] = "serverStatus";
2330 jvObj[jss::server_status] = strOperatingMode();
2331 jvObj[jss::load_base] = f.loadBaseServer;
2332 jvObj[jss::load_factor_server] = f.loadFactorServer;
2333 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2334
2335 if (f.em)
2336 {
2337 auto const loadFactor = std::max(
2338 safe_cast<std::uint64_t>(f.loadFactorServer),
2339 mulDiv(
2340 f.em->openLedgerFeeLevel,
2341 f.loadBaseServer,
2342 f.em->referenceFeeLevel)
2344
2345 jvObj[jss::load_factor] = trunc32(loadFactor);
2346 jvObj[jss::load_factor_fee_escalation] =
2347 f.em->openLedgerFeeLevel.jsonClipped();
2348 jvObj[jss::load_factor_fee_queue] =
2349 f.em->minProcessingFeeLevel.jsonClipped();
2350 jvObj[jss::load_factor_fee_reference] =
2351 f.em->referenceFeeLevel.jsonClipped();
2352 }
2353 else
2354 jvObj[jss::load_factor] = f.loadFactorServer;
2355
2356 mLastFeeSummary = f;
2357
2358 for (auto i = mStreamMaps[sServer].begin();
2359 i != mStreamMaps[sServer].end();)
2360 {
2361 InfoSub::pointer p = i->second.lock();
2362
2363 // VFALCO TODO research the possibility of using thread queues and
2364 // linearizing the deletion of subscribers with the
2365 // sending of JSON data.
2366 if (p)
2367 {
2368 p->send(jvObj, true);
2369 ++i;
2370 }
2371 else
2372 {
2373 i = mStreamMaps[sServer].erase(i);
2374 }
2375 }
2376 }
2377}
2378
2379void
2381{
2383
2384 auto& streamMap = mStreamMaps[sConsensusPhase];
2385 if (!streamMap.empty())
2386 {
2388 jvObj[jss::type] = "consensusPhase";
2389 jvObj[jss::consensus] = to_string(phase);
2390
2391 for (auto i = streamMap.begin(); i != streamMap.end();)
2392 {
2393 if (auto p = i->second.lock())
2394 {
2395 p->send(jvObj, true);
2396 ++i;
2397 }
2398 else
2399 {
2400 i = streamMap.erase(i);
2401 }
2402 }
2403 }
2404}
2405
2406void
2408{
2409 // VFALCO consider std::shared_mutex
2411
2412 if (!mStreamMaps[sValidations].empty())
2413 {
2415
2416 auto const signerPublic = val->getSignerPublic();
2417
2418 jvObj[jss::type] = "validationReceived";
2419 jvObj[jss::validation_public_key] =
2420 toBase58(TokenType::NodePublic, signerPublic);
2421 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2422 jvObj[jss::signature] = strHex(val->getSignature());
2423 jvObj[jss::full] = val->isFull();
2424 jvObj[jss::flags] = val->getFlags();
2425 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2426 jvObj[jss::data] = strHex(val->getSerializer().slice());
2427 jvObj[jss::network_id] = app_.config().NETWORK_ID;
2428
2429 if (auto version = (*val)[~sfServerVersion])
2430 jvObj[jss::server_version] = std::to_string(*version);
2431
2432 if (auto cookie = (*val)[~sfCookie])
2433 jvObj[jss::cookie] = std::to_string(*cookie);
2434
2435 if (auto hash = (*val)[~sfValidatedHash])
2436 jvObj[jss::validated_hash] = strHex(*hash);
2437
2438 auto const masterKey =
2439 app_.validatorManifests().getMasterKey(signerPublic);
2440
2441 if (masterKey != signerPublic)
2442 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2443
2444 // NOTE *seq is a number, but old API versions used string. We replace
2445 // number with a string using MultiApiJson near end of this function
2446 if (auto const seq = (*val)[~sfLedgerSequence])
2447 jvObj[jss::ledger_index] = *seq;
2448
2449 if (val->isFieldPresent(sfAmendments))
2450 {
2451 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2452 for (auto const& amendment : val->getFieldV256(sfAmendments))
2453 jvObj[jss::amendments].append(to_string(amendment));
2454 }
2455
2456 if (auto const closeTime = (*val)[~sfCloseTime])
2457 jvObj[jss::close_time] = *closeTime;
2458
2459 if (auto const loadFee = (*val)[~sfLoadFee])
2460 jvObj[jss::load_fee] = *loadFee;
2461
2462 if (auto const baseFee = val->at(~sfBaseFee))
2463 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2464
2465 if (auto const reserveBase = val->at(~sfReserveBase))
2466 jvObj[jss::reserve_base] = *reserveBase;
2467
2468 if (auto const reserveInc = val->at(~sfReserveIncrement))
2469 jvObj[jss::reserve_inc] = *reserveInc;
2470
2471 // (The ~ operator converts the Proxy to a std::optional, which
2472 // simplifies later operations)
2473 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2474 baseFeeXRP && baseFeeXRP->native())
2475 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2476
2477 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2478 reserveBaseXRP && reserveBaseXRP->native())
2479 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2480
2481 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2482 reserveIncXRP && reserveIncXRP->native())
2483 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2484
2485 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2486 // for consumers supporting different API versions
2487 MultiApiJson multiObj{jvObj};
2488 multiObj.visit(
2489 RPC::apiVersion<1>, //
2490 [](Json::Value& jvTx) {
2491 // Type conversion for older API versions to string
2492 if (jvTx.isMember(jss::ledger_index))
2493 {
2494 jvTx[jss::ledger_index] =
2495 std::to_string(jvTx[jss::ledger_index].asUInt());
2496 }
2497 });
2498
2499 for (auto i = mStreamMaps[sValidations].begin();
2500 i != mStreamMaps[sValidations].end();)
2501 {
2502 if (auto p = i->second.lock())
2503 {
2504 multiObj.visit(
2505 p->getApiVersion(), //
2506 [&](Json::Value const& jv) { p->send(jv, true); });
2507 ++i;
2508 }
2509 else
2510 {
2511 i = mStreamMaps[sValidations].erase(i);
2512 }
2513 }
2514 }
2515}
2516
2517void
2519{
2521
2522 if (!mStreamMaps[sPeerStatus].empty())
2523 {
2524 Json::Value jvObj(func());
2525
2526 jvObj[jss::type] = "peerStatusChange";
2527
2528 for (auto i = mStreamMaps[sPeerStatus].begin();
2529 i != mStreamMaps[sPeerStatus].end();)
2530 {
2531 InfoSub::pointer p = i->second.lock();
2532
2533 if (p)
2534 {
2535 p->send(jvObj, true);
2536 ++i;
2537 }
2538 else
2539 {
2540 i = mStreamMaps[sPeerStatus].erase(i);
2541 }
2542 }
2543 }
2544}
2545
2546void
2548{
2549 using namespace std::chrono_literals;
2550 if (om == OperatingMode::CONNECTED)
2551 {
2554 }
2555 else if (om == OperatingMode::SYNCING)
2556 {
2559 }
2560
2561 if ((om > OperatingMode::CONNECTED) && isBlocked())
2563
2564 if (mMode == om)
2565 return;
2566
2567 mMode = om;
2568
2569 accounting_.mode(om);
2570
2571 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2572 pubServer();
2573}
2574
2575bool
2578 std::string const& source)
2579{
2580 JLOG(m_journal.trace())
2581 << "recvValidation " << val->getLedgerHash() << " from " << source;
2582
2584 BypassAccept bypassAccept = BypassAccept::no;
2585 try
2586 {
2587 if (pendingValidations_.contains(val->getLedgerHash()))
2588 bypassAccept = BypassAccept::yes;
2589 else
2590 pendingValidations_.insert(val->getLedgerHash());
2591 scope_unlock unlock(lock);
2592 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2593 }
2594 catch (std::exception const& e)
2595 {
2596 JLOG(m_journal.warn())
2597 << "Exception thrown for handling new validation "
2598 << val->getLedgerHash() << ": " << e.what();
2599 }
2600 catch (...)
2601 {
2602 JLOG(m_journal.warn())
2603 << "Unknown exception thrown for handling new validation "
2604 << val->getLedgerHash();
2605 }
2606 if (bypassAccept == BypassAccept::no)
2607 {
2608 pendingValidations_.erase(val->getLedgerHash());
2609 }
2610 lock.unlock();
2611
2612 pubValidation(val);
2613
2614 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2616 ss << "VALIDATION: " << val->render() << " master_key: ";
2617 auto master = app_.validators().getTrustedKey(val->getSignerPublic());
2618 if (master)
2619 {
2620 ss << toBase58(TokenType::NodePublic, *master);
2621 }
2622 else
2623 {
2624 ss << "none";
2625 }
2626 return ss.str();
2627 }();
2628
2629 // We will always relay trusted validations; if configured, we will
2630 // also relay all untrusted validations.
2631 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2632}
2633
2636{
2637 return mConsensus.getJson(true);
2638}
2639
2641NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2642{
2644
2645 // System-level warnings
2646 {
2647 Json::Value warnings{Json::arrayValue};
2648 if (isAmendmentBlocked())
2649 {
2650 Json::Value& w = warnings.append(Json::objectValue);
2651 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2652 w[jss::message] =
2653 "This server is amendment blocked, and must be updated to be "
2654 "able to stay in sync with the network.";
2655 }
2656 if (isUNLBlocked())
2657 {
2658 Json::Value& w = warnings.append(Json::objectValue);
2659 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2660 w[jss::message] =
2661 "This server has an expired validator list. validators.txt "
2662 "may be incorrectly configured or some [validator_list_sites] "
2663 "may be unreachable.";
2664 }
2665 if (admin && isAmendmentWarned())
2666 {
2667 Json::Value& w = warnings.append(Json::objectValue);
2668 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2669 w[jss::message] =
2670 "One or more unsupported amendments have reached majority. "
2671 "Upgrade to the latest version before they are activated "
2672 "to avoid being amendment blocked.";
2673 if (auto const expected =
2675 {
2676 auto& d = w[jss::details] = Json::objectValue;
2677 d[jss::expected_date] = expected->time_since_epoch().count();
2678 d[jss::expected_date_UTC] = to_string(*expected);
2679 }
2680 }
2681
2682 if (warnings.size())
2683 info[jss::warnings] = std::move(warnings);
2684 }
2685
2686 // hostid: unique string describing the machine
2687 if (human)
2688 info[jss::hostid] = getHostId(admin);
2689
2690 // domain: if configured with a domain, report it:
2691 if (!app_.config().SERVER_DOMAIN.empty())
2692 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2693
2694 info[jss::build_version] = BuildInfo::getVersionString();
2695
2696 info[jss::server_state] = strOperatingMode(admin);
2697
2698 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2700
2702 info[jss::network_ledger] = "waiting";
2703
2704 info[jss::validation_quorum] =
2705 static_cast<Json::UInt>(app_.validators().quorum());
2706
2707 if (admin)
2708 {
2709 switch (app_.config().NODE_SIZE)
2710 {
2711 case 0:
2712 info[jss::node_size] = "tiny";
2713 break;
2714 case 1:
2715 info[jss::node_size] = "small";
2716 break;
2717 case 2:
2718 info[jss::node_size] = "medium";
2719 break;
2720 case 3:
2721 info[jss::node_size] = "large";
2722 break;
2723 case 4:
2724 info[jss::node_size] = "huge";
2725 break;
2726 }
2727
2728 auto when = app_.validators().expires();
2729
2730 if (!human)
2731 {
2732 if (when)
2733 info[jss::validator_list_expires] =
2734 safe_cast<Json::UInt>(when->time_since_epoch().count());
2735 else
2736 info[jss::validator_list_expires] = 0;
2737 }
2738 else
2739 {
2740 auto& x = (info[jss::validator_list] = Json::objectValue);
2741
2742 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2743
2744 if (when)
2745 {
2746 if (*when == TimeKeeper::time_point::max())
2747 {
2748 x[jss::expiration] = "never";
2749 x[jss::status] = "active";
2750 }
2751 else
2752 {
2753 x[jss::expiration] = to_string(*when);
2754
2755 if (*when > app_.timeKeeper().now())
2756 x[jss::status] = "active";
2757 else
2758 x[jss::status] = "expired";
2759 }
2760 }
2761 else
2762 {
2763 x[jss::status] = "unknown";
2764 x[jss::expiration] = "unknown";
2765 }
2766 }
2767
2768#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2769 {
2770 auto& x = (info[jss::git] = Json::objectValue);
2771#ifdef GIT_COMMIT_HASH
2772 x[jss::hash] = GIT_COMMIT_HASH;
2773#endif
2774#ifdef GIT_BRANCH
2775 x[jss::branch] = GIT_BRANCH;
2776#endif
2777 }
2778#endif
2779 }
2780 info[jss::io_latency_ms] =
2781 static_cast<Json::UInt>(app_.getIOLatency().count());
2782
2783 if (admin)
2784 {
2785 if (auto const localPubKey = app_.validators().localPublicKey();
2786 localPubKey && app_.getValidationPublicKey())
2787 {
2788 info[jss::pubkey_validator] =
2789 toBase58(TokenType::NodePublic, localPubKey.value());
2790 }
2791 else
2792 {
2793 info[jss::pubkey_validator] = "none";
2794 }
2795 }
2796
2797 if (counters)
2798 {
2799 info[jss::counters] = app_.getPerfLog().countersJson();
2800
2801 Json::Value nodestore(Json::objectValue);
2802 app_.getNodeStore().getCountsJson(nodestore);
2803 info[jss::counters][jss::nodestore] = nodestore;
2804 info[jss::current_activities] = app_.getPerfLog().currentJson();
2805 }
2806
2807 info[jss::pubkey_node] =
2809
2810 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2811
2813 info[jss::amendment_blocked] = true;
2814
2815 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2816
2817 if (fp != 0)
2818 info[jss::fetch_pack] = Json::UInt(fp);
2819
2820 info[jss::peers] = Json::UInt(app_.overlay().size());
2821
2822 Json::Value lastClose = Json::objectValue;
2823 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2824
2825 if (human)
2826 {
2827 lastClose[jss::converge_time_s] =
2829 }
2830 else
2831 {
2832 lastClose[jss::converge_time] =
2834 }
2835
2836 info[jss::last_close] = lastClose;
2837
2838 // info[jss::consensus] = mConsensus.getJson();
2839
2840 if (admin)
2841 info[jss::load] = m_job_queue.getJson();
2842
2843 if (auto const netid = app_.overlay().networkID())
2844 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2845
2846 auto const escalationMetrics =
2848
2849 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2850 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2851 /* Scale the escalated fee level to unitless "load factor".
2852 In practice, this just strips the units, but it will continue
2853 to work correctly if either base value ever changes. */
2854 auto const loadFactorFeeEscalation =
2855 mulDiv(
2856 escalationMetrics.openLedgerFeeLevel,
2857 loadBaseServer,
2858 escalationMetrics.referenceFeeLevel)
2860
2861 auto const loadFactor = std::max(
2862 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2863
2864 if (!human)
2865 {
2866 info[jss::load_base] = loadBaseServer;
2867 info[jss::load_factor] = trunc32(loadFactor);
2868 info[jss::load_factor_server] = loadFactorServer;
2869
2870 /* Json::Value doesn't support uint64, so clamp to max
2871 uint32 value. This is mostly theoretical, since there
2872 probably isn't enough extant XRP to drive the factor
2873 that high.
2874 */
2875 info[jss::load_factor_fee_escalation] =
2876 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2877 info[jss::load_factor_fee_queue] =
2878 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2879 info[jss::load_factor_fee_reference] =
2880 escalationMetrics.referenceFeeLevel.jsonClipped();
2881 }
2882 else
2883 {
2884 info[jss::load_factor] =
2885 static_cast<double>(loadFactor) / loadBaseServer;
2886
2887 if (loadFactorServer != loadFactor)
2888 info[jss::load_factor_server] =
2889 static_cast<double>(loadFactorServer) / loadBaseServer;
2890
2891 if (admin)
2892 {
2894 if (fee != loadBaseServer)
2895 info[jss::load_factor_local] =
2896 static_cast<double>(fee) / loadBaseServer;
2897 fee = app_.getFeeTrack().getRemoteFee();
2898 if (fee != loadBaseServer)
2899 info[jss::load_factor_net] =
2900 static_cast<double>(fee) / loadBaseServer;
2901 fee = app_.getFeeTrack().getClusterFee();
2902 if (fee != loadBaseServer)
2903 info[jss::load_factor_cluster] =
2904 static_cast<double>(fee) / loadBaseServer;
2905 }
2906 if (escalationMetrics.openLedgerFeeLevel !=
2907 escalationMetrics.referenceFeeLevel &&
2908 (admin || loadFactorFeeEscalation != loadFactor))
2909 info[jss::load_factor_fee_escalation] =
2910 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2911 escalationMetrics.referenceFeeLevel);
2912 if (escalationMetrics.minProcessingFeeLevel !=
2913 escalationMetrics.referenceFeeLevel)
2914 info[jss::load_factor_fee_queue] =
2915 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2916 escalationMetrics.referenceFeeLevel);
2917 }
2918
2919 bool valid = false;
2920 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2921
2922 if (lpClosed)
2923 valid = true;
2924 else
2925 lpClosed = m_ledgerMaster.getClosedLedger();
2926
2927 if (lpClosed)
2928 {
2929 XRPAmount const baseFee = lpClosed->fees().base;
2931 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2932 l[jss::hash] = to_string(lpClosed->info().hash);
2933
2934 if (!human)
2935 {
2936 l[jss::base_fee] = baseFee.jsonClipped();
2937 l[jss::reserve_base] =
2938 lpClosed->fees().accountReserve(0).jsonClipped();
2939 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2940 l[jss::close_time] = Json::Value::UInt(
2941 lpClosed->info().closeTime.time_since_epoch().count());
2942 }
2943 else
2944 {
2945 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2946 l[jss::reserve_base_xrp] =
2947 lpClosed->fees().accountReserve(0).decimalXRP();
2948 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2949
2950 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2951 std::abs(closeOffset.count()) >= 60)
2952 l[jss::close_time_offset] =
2953 static_cast<std::uint32_t>(closeOffset.count());
2954
2955 constexpr std::chrono::seconds highAgeThreshold{1000000};
2957 {
2958 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2959 l[jss::age] =
2960 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2961 }
2962 else
2963 {
2964 auto lCloseTime = lpClosed->info().closeTime;
2965 auto closeTime = app_.timeKeeper().closeTime();
2966 if (lCloseTime <= closeTime)
2967 {
2968 using namespace std::chrono_literals;
2969 auto age = closeTime - lCloseTime;
2970 l[jss::age] =
2971 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2972 }
2973 }
2974 }
2975
2976 if (valid)
2977 info[jss::validated_ledger] = l;
2978 else
2979 info[jss::closed_ledger] = l;
2980
2981 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2982 if (!lpPublished)
2983 info[jss::published_ledger] = "none";
2984 else if (lpPublished->info().seq != lpClosed->info().seq)
2985 info[jss::published_ledger] = lpPublished->info().seq;
2986 }
2987
2988 accounting_.json(info);
2989 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2990 info[jss::jq_trans_overflow] =
2992 info[jss::peer_disconnects] =
2994 info[jss::peer_disconnects_resources] =
2996
2997 // This array must be sorted in increasing order.
2998 static constexpr std::array<std::string_view, 7> protocols{
2999 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
3000 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
3001 {
3003 for (auto const& port : app_.getServerHandler().setup().ports)
3004 {
3005 // Don't publish admin ports for non-admin users
3006 if (!admin &&
3007 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
3008 port.admin_user.empty() && port.admin_password.empty()))
3009 continue;
3012 std::begin(port.protocol),
3013 std::end(port.protocol),
3014 std::begin(protocols),
3015 std::end(protocols),
3016 std::back_inserter(proto));
3017 if (!proto.empty())
3018 {
3019 auto& jv = ports.append(Json::Value(Json::objectValue));
3020 jv[jss::port] = std::to_string(port.port);
3021 jv[jss::protocol] = Json::Value{Json::arrayValue};
3022 for (auto const& p : proto)
3023 jv[jss::protocol].append(p);
3024 }
3025 }
3026
3027 if (app_.config().exists(SECTION_PORT_GRPC))
3028 {
3029 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
3030 auto const optPort = grpcSection.get("port");
3031 if (optPort && grpcSection.get("ip"))
3032 {
3033 auto& jv = ports.append(Json::Value(Json::objectValue));
3034 jv[jss::port] = *optPort;
3035 jv[jss::protocol] = Json::Value{Json::arrayValue};
3036 jv[jss::protocol].append("grpc");
3037 }
3038 }
3039 info[jss::ports] = std::move(ports);
3040 }
3041
3042 return info;
3043}
3044
3045void
3050
3056
3057void
3059 std::shared_ptr<ReadView const> const& ledger,
3060 std::shared_ptr<STTx const> const& transaction,
3061 TER result)
3062{
3063 // never publish an inner txn inside a batch txn
3064 if (transaction->isFlag(tfInnerBatchTxn) &&
3065 ledger->rules().enabled(featureBatch))
3066 return;
3067
3068 MultiApiJson jvObj =
3069 transJson(transaction, result, false, ledger, std::nullopt);
3070
3071 {
3073
3074 auto it = mStreamMaps[sRTTransactions].begin();
3075 while (it != mStreamMaps[sRTTransactions].end())
3076 {
3077 InfoSub::pointer p = it->second.lock();
3078
3079 if (p)
3080 {
3081 jvObj.visit(
3082 p->getApiVersion(), //
3083 [&](Json::Value const& jv) { p->send(jv, true); });
3084 ++it;
3085 }
3086 else
3087 {
3088 it = mStreamMaps[sRTTransactions].erase(it);
3089 }
3090 }
3091 }
3092
3093 pubProposedAccountTransaction(ledger, transaction, result);
3094}
3095
3096void
3098{
3099 // Ledgers are published only when they acquire sufficient validations
3100 // Holes are filled across connection loss or other catastrophe
3101
3103 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
3104 if (!alpAccepted)
3105 {
3106 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
3107 app_.getAcceptedLedgerCache().canonicalize_replace_client(
3108 lpAccepted->info().hash, alpAccepted);
3109 }
3110
3111 XRPL_ASSERT(
3112 alpAccepted->getLedger().get() == lpAccepted.get(),
3113 "ripple::NetworkOPsImp::pubLedger : accepted input");
3114
3115 {
3116 JLOG(m_journal.debug())
3117 << "Publishing ledger " << lpAccepted->info().seq << " "
3118 << lpAccepted->info().hash;
3119
3121
3122 if (!mStreamMaps[sLedger].empty())
3123 {
3125
3126 jvObj[jss::type] = "ledgerClosed";
3127 jvObj[jss::ledger_index] = lpAccepted->info().seq;
3128 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
3129 jvObj[jss::ledger_time] = Json::Value::UInt(
3130 lpAccepted->info().closeTime.time_since_epoch().count());
3131
3132 jvObj[jss::network_id] = app_.config().NETWORK_ID;
3133
3134 if (!lpAccepted->rules().enabled(featureXRPFees))
3135 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3136 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
3137 jvObj[jss::reserve_base] =
3138 lpAccepted->fees().accountReserve(0).jsonClipped();
3139 jvObj[jss::reserve_inc] =
3140 lpAccepted->fees().increment.jsonClipped();
3141
3142 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
3143
3145 {
3146 jvObj[jss::validated_ledgers] =
3148 }
3149
3150 auto it = mStreamMaps[sLedger].begin();
3151 while (it != mStreamMaps[sLedger].end())
3152 {
3153 InfoSub::pointer p = it->second.lock();
3154 if (p)
3155 {
3156 p->send(jvObj, true);
3157 ++it;
3158 }
3159 else
3160 it = mStreamMaps[sLedger].erase(it);
3161 }
3162 }
3163
3164 if (!mStreamMaps[sBookChanges].empty())
3165 {
3166 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
3167
3168 auto it = mStreamMaps[sBookChanges].begin();
3169 while (it != mStreamMaps[sBookChanges].end())
3170 {
3171 InfoSub::pointer p = it->second.lock();
3172 if (p)
3173 {
3174 p->send(jvObj, true);
3175 ++it;
3176 }
3177 else
3178 it = mStreamMaps[sBookChanges].erase(it);
3179 }
3180 }
3181
3182 {
3183 static bool firstTime = true;
3184 if (firstTime)
3185 {
3186 // First validated ledger, start delayed SubAccountHistory
3187 firstTime = false;
3188 for (auto& outer : mSubAccountHistory)
3189 {
3190 for (auto& inner : outer.second)
3191 {
3192 auto& subInfo = inner.second;
3193 if (subInfo.index_->separationLedgerSeq_ == 0)
3194 {
3196 alpAccepted->getLedger(), subInfo);
3197 }
3198 }
3199 }
3200 }
3201 }
3202 }
3203
3204 // Don't lock since pubAcceptedTransaction is locking.
3205 for (auto const& accTx : *alpAccepted)
3206 {
3207 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
3209 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3210 }
3211}
3212
3213void
3215{
3217 app_.openLedger().current()->fees().base,
3219 app_.getFeeTrack()};
3220
3221 // only schedule the job if something has changed
3222 if (f != mLastFeeSummary)
3223 {
3225 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
3226 pubServer();
3227 });
3228 }
3229}
3230
3231void
3233{
3236 "reportConsensusStateChange->pubConsensus",
3237 [this, phase]() { pubConsensus(phase); });
3238}
3239
3240inline void
3242{
3243 m_localTX->sweep(view);
3244}
3245inline std::size_t
3247{
3248 return m_localTX->size();
3249}
3250
3251// This routine should only be used to publish accepted or validated
3252// transactions.
3255 std::shared_ptr<STTx const> const& transaction,
3256 TER result,
3257 bool validated,
3258 std::shared_ptr<ReadView const> const& ledger,
3260{
3262 std::string sToken;
3263 std::string sHuman;
3264
3265 transResultInfo(result, sToken, sHuman);
3266
3267 jvObj[jss::type] = "transaction";
3268 // NOTE jvObj is not a finished object for either API version. After
3269 // it's populated, we need to finish it for a specific API version. This is
3270 // done in a loop, near the end of this function.
3271 jvObj[jss::transaction] =
3272 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3273
3274 if (meta)
3275 {
3276 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3278 jvObj[jss::meta], *ledger, transaction, meta->get());
3279 RPC::insertNFTSyntheticInJson(jvObj, transaction, meta->get());
3281 jvObj[jss::meta], transaction, meta->get());
3282 }
3283
3284 // add CTID where the needed data for it exists
3285 if (auto const& lookup = ledger->txRead(transaction->getTransactionID());
3286 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3287 {
3288 uint32_t const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3289 uint32_t netID = app_.config().NETWORK_ID;
3290 if (transaction->isFieldPresent(sfNetworkID))
3291 netID = transaction->getFieldU32(sfNetworkID);
3292
3294 RPC::encodeCTID(ledger->info().seq, txnSeq, netID);
3295 ctid)
3296 jvObj[jss::ctid] = *ctid;
3297 }
3298 if (!ledger->open())
3299 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3300
3301 if (validated)
3302 {
3303 jvObj[jss::ledger_index] = ledger->info().seq;
3304 jvObj[jss::transaction][jss::date] =
3305 ledger->info().closeTime.time_since_epoch().count();
3306 jvObj[jss::validated] = true;
3307 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3308
3309 // WRITEME: Put the account next seq here
3310 }
3311 else
3312 {
3313 jvObj[jss::validated] = false;
3314 jvObj[jss::ledger_current_index] = ledger->info().seq;
3315 }
3316
3317 jvObj[jss::status] = validated ? "closed" : "proposed";
3318 jvObj[jss::engine_result] = sToken;
3319 jvObj[jss::engine_result_code] = result;
3320 jvObj[jss::engine_result_message] = sHuman;
3321
3322 if (transaction->getTxnType() == ttOFFER_CREATE)
3323 {
3324 auto const account = transaction->getAccountID(sfAccount);
3325 auto const amount = transaction->getFieldAmount(sfTakerGets);
3326
3327 // If the offer create is not self funded then add the owner balance
3328 if (account != amount.issue().account)
3329 {
3330 auto const ownerFunds = accountFunds(
3331 *ledger,
3332 account,
3333 amount,
3335 app_.journal("View"));
3336 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3337 }
3338 }
3339
3340 std::string const hash = to_string(transaction->getTransactionID());
3341 MultiApiJson multiObj{jvObj};
3343 multiObj.visit(), //
3344 [&]<unsigned Version>(
3346 RPC::insertDeliverMax(
3347 jvTx[jss::transaction], transaction->getTxnType(), Version);
3348
3349 if constexpr (Version > 1)
3350 {
3351 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3352 jvTx[jss::hash] = hash;
3353 }
3354 else
3355 {
3356 jvTx[jss::transaction][jss::hash] = hash;
3357 }
3358 });
3359
3360 return multiObj;
3361}
3362
3363void
3365 std::shared_ptr<ReadView const> const& ledger,
3366 AcceptedLedgerTx const& transaction,
3367 bool last)
3368{
3369 auto const& stTxn = transaction.getTxn();
3370
3371 // Create two different Json objects, for different API versions
3372 auto const metaRef = std::ref(transaction.getMeta());
3373 auto const trResult = transaction.getResult();
3374 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3375
3376 {
3378
3379 auto it = mStreamMaps[sTransactions].begin();
3380 while (it != mStreamMaps[sTransactions].end())
3381 {
3382 InfoSub::pointer p = it->second.lock();
3383
3384 if (p)
3385 {
3386 jvObj.visit(
3387 p->getApiVersion(), //
3388 [&](Json::Value const& jv) { p->send(jv, true); });
3389 ++it;
3390 }
3391 else
3392 it = mStreamMaps[sTransactions].erase(it);
3393 }
3394
3395 it = mStreamMaps[sRTTransactions].begin();
3396
3397 while (it != mStreamMaps[sRTTransactions].end())
3398 {
3399 InfoSub::pointer p = it->second.lock();
3400
3401 if (p)
3402 {
3403 jvObj.visit(
3404 p->getApiVersion(), //
3405 [&](Json::Value const& jv) { p->send(jv, true); });
3406 ++it;
3407 }
3408 else
3409 it = mStreamMaps[sRTTransactions].erase(it);
3410 }
3411 }
3412
3413 if (transaction.getResult() == tesSUCCESS)
3414 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3415
3416 pubAccountTransaction(ledger, transaction, last);
3417}
3418
3419void
3421 std::shared_ptr<ReadView const> const& ledger,
3422 AcceptedLedgerTx const& transaction,
3423 bool last)
3424{
3426 int iProposed = 0;
3427 int iAccepted = 0;
3428
3429 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3430 auto const currLedgerSeq = ledger->seq();
3431 {
3433
3434 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3436 {
3437 for (auto const& affectedAccount : transaction.getAffected())
3438 {
3439 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3440 simiIt != mSubRTAccount.end())
3441 {
3442 auto it = simiIt->second.begin();
3443
3444 while (it != simiIt->second.end())
3445 {
3446 InfoSub::pointer p = it->second.lock();
3447
3448 if (p)
3449 {
3450 notify.insert(p);
3451 ++it;
3452 ++iProposed;
3453 }
3454 else
3455 it = simiIt->second.erase(it);
3456 }
3457 }
3458
3459 if (auto simiIt = mSubAccount.find(affectedAccount);
3460 simiIt != mSubAccount.end())
3461 {
3462 auto it = simiIt->second.begin();
3463 while (it != simiIt->second.end())
3464 {
3465 InfoSub::pointer p = it->second.lock();
3466
3467 if (p)
3468 {
3469 notify.insert(p);
3470 ++it;
3471 ++iAccepted;
3472 }
3473 else
3474 it = simiIt->second.erase(it);
3475 }
3476 }
3477
3478 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3479 histoIt != mSubAccountHistory.end())
3480 {
3481 auto& subs = histoIt->second;
3482 auto it = subs.begin();
3483 while (it != subs.end())
3484 {
3485 SubAccountHistoryInfoWeak const& info = it->second;
3486 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3487 {
3488 ++it;
3489 continue;
3490 }
3491
3492 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3493 {
3494 accountHistoryNotify.emplace_back(
3495 SubAccountHistoryInfo{isSptr, info.index_});
3496 ++it;
3497 }
3498 else
3499 {
3500 it = subs.erase(it);
3501 }
3502 }
3503 if (subs.empty())
3504 mSubAccountHistory.erase(histoIt);
3505 }
3506 }
3507 }
3508 }
3509
3510 JLOG(m_journal.trace())
3511 << "pubAccountTransaction: "
3512 << "proposed=" << iProposed << ", accepted=" << iAccepted;
3513
3514 if (!notify.empty() || !accountHistoryNotify.empty())
3515 {
3516 auto const& stTxn = transaction.getTxn();
3517
3518 // Create two different Json objects, for different API versions
3519 auto const metaRef = std::ref(transaction.getMeta());
3520 auto const trResult = transaction.getResult();
3521 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3522
3523 for (InfoSub::ref isrListener : notify)
3524 {
3525 jvObj.visit(
3526 isrListener->getApiVersion(), //
3527 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3528 }
3529
3530 if (last)
3531 jvObj.set(jss::account_history_boundary, true);
3532
3533 XRPL_ASSERT(
3534 jvObj.isMember(jss::account_history_tx_stream) ==
3536 "ripple::NetworkOPsImp::pubAccountTransaction : "
3537 "account_history_tx_stream not set");
3538 for (auto& info : accountHistoryNotify)
3539 {
3540 auto& index = info.index_;
3541 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3542 jvObj.set(jss::account_history_tx_first, true);
3543
3544 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3545
3546 jvObj.visit(
3547 info.sink_->getApiVersion(), //
3548 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3549 }
3550 }
3551}
3552
3553void
3555 std::shared_ptr<ReadView const> const& ledger,
3557 TER result)
3558{
3560 int iProposed = 0;
3561
3562 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3563
3564 {
3566
3567 if (mSubRTAccount.empty())
3568 return;
3569
3570 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3572 {
3573 for (auto const& affectedAccount : tx->getMentionedAccounts())
3574 {
3575 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3576 simiIt != mSubRTAccount.end())
3577 {
3578 auto it = simiIt->second.begin();
3579
3580 while (it != simiIt->second.end())
3581 {
3582 InfoSub::pointer p = it->second.lock();
3583
3584 if (p)
3585 {
3586 notify.insert(p);
3587 ++it;
3588 ++iProposed;
3589 }
3590 else
3591 it = simiIt->second.erase(it);
3592 }
3593 }
3594 }
3595 }
3596 }
3597
3598 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3599
3600 if (!notify.empty() || !accountHistoryNotify.empty())
3601 {
3602 // Create two different Json objects, for different API versions
3603 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3604
3605 for (InfoSub::ref isrListener : notify)
3606 jvObj.visit(
3607 isrListener->getApiVersion(), //
3608 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3609
3610 XRPL_ASSERT(
3611 jvObj.isMember(jss::account_history_tx_stream) ==
3613 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3614 "account_history_tx_stream not set");
3615 for (auto& info : accountHistoryNotify)
3616 {
3617 auto& index = info.index_;
3618 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3619 jvObj.set(jss::account_history_tx_first, true);
3620 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3621 jvObj.visit(
3622 info.sink_->getApiVersion(), //
3623 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3624 }
3625 }
3626}
3627
3628//
3629// Monitoring
3630//
3631
3632void
3634 InfoSub::ref isrListener,
3635 hash_set<AccountID> const& vnaAccountIDs,
3636 bool rt)
3637{
3638 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3639
3640 for (auto const& naAccountID : vnaAccountIDs)
3641 {
3642 JLOG(m_journal.trace())
3643 << "subAccount: account: " << toBase58(naAccountID);
3644
3645 isrListener->insertSubAccountInfo(naAccountID, rt);
3646 }
3647
3649
3650 for (auto const& naAccountID : vnaAccountIDs)
3651 {
3652 auto simIterator = subMap.find(naAccountID);
3653 if (simIterator == subMap.end())
3654 {
3655 // Not found, note that account has a new single listner.
3656 SubMapType usisElement;
3657 usisElement[isrListener->getSeq()] = isrListener;
3658 // VFALCO NOTE This is making a needless copy of naAccountID
3659 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3660 }
3661 else
3662 {
3663 // Found, note that the account has another listener.
3664 simIterator->second[isrListener->getSeq()] = isrListener;
3665 }
3666 }
3667}
3668
3669void
3671 InfoSub::ref isrListener,
3672 hash_set<AccountID> const& vnaAccountIDs,
3673 bool rt)
3674{
3675 for (auto const& naAccountID : vnaAccountIDs)
3676 {
3677 // Remove from the InfoSub
3678 isrListener->deleteSubAccountInfo(naAccountID, rt);
3679 }
3680
3681 // Remove from the server
3682 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3683}
3684
3685void
3687 std::uint64_t uSeq,
3688 hash_set<AccountID> const& vnaAccountIDs,
3689 bool rt)
3690{
3692
3693 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3694
3695 for (auto const& naAccountID : vnaAccountIDs)
3696 {
3697 auto simIterator = subMap.find(naAccountID);
3698
3699 if (simIterator != subMap.end())
3700 {
3701 // Found
3702 simIterator->second.erase(uSeq);
3703
3704 if (simIterator->second.empty())
3705 {
3706 // Don't need hash entry.
3707 subMap.erase(simIterator);
3708 }
3709 }
3710 }
3711}
3712
3713void
3715{
3716 enum DatabaseType { Sqlite, None };
3717 static auto const databaseType = [&]() -> DatabaseType {
3718 // Use a dynamic_cast to return DatabaseType::None
3719 // on failure.
3720 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3721 {
3722 return DatabaseType::Sqlite;
3723 }
3724 return DatabaseType::None;
3725 }();
3726
3727 if (databaseType == DatabaseType::None)
3728 {
3729 JLOG(m_journal.error())
3730 << "AccountHistory job for account "
3731 << toBase58(subInfo.index_->accountId_) << " no database";
3732 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3733 {
3734 sptr->send(rpcError(rpcINTERNAL), true);
3735 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3736 }
3737 return;
3738 }
3739
3742 "AccountHistoryTxStream",
3743 [this, dbType = databaseType, subInfo]() {
3744 auto const& accountId = subInfo.index_->accountId_;
3745 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3746 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3747
3748 JLOG(m_journal.trace())
3749 << "AccountHistory job for account " << toBase58(accountId)
3750 << " started. lastLedgerSeq=" << lastLedgerSeq;
3751
3752 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3753 std::shared_ptr<TxMeta> const& meta) -> bool {
3754 /*
3755 * genesis account: first tx is the one with seq 1
3756 * other account: first tx is the one created the account
3757 */
3758 if (accountId == genesisAccountId)
3759 {
3760 auto stx = tx->getSTransaction();
3761 if (stx->getAccountID(sfAccount) == accountId &&
3762 stx->getSeqValue() == 1)
3763 return true;
3764 }
3765
3766 for (auto& node : meta->getNodes())
3767 {
3768 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3769 continue;
3770
3771 if (node.isFieldPresent(sfNewFields))
3772 {
3773 if (auto inner = dynamic_cast<STObject const*>(
3774 node.peekAtPField(sfNewFields));
3775 inner)
3776 {
3777 if (inner->isFieldPresent(sfAccount) &&
3778 inner->getAccountID(sfAccount) == accountId)
3779 {
3780 return true;
3781 }
3782 }
3783 }
3784 }
3785
3786 return false;
3787 };
3788
3789 auto send = [&](Json::Value const& jvObj,
3790 bool unsubscribe) -> bool {
3791 if (auto sptr = subInfo.sinkWptr_.lock())
3792 {
3793 sptr->send(jvObj, true);
3794 if (unsubscribe)
3795 unsubAccountHistory(sptr, accountId, false);
3796 return true;
3797 }
3798
3799 return false;
3800 };
3801
3802 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3803 bool unsubscribe) -> bool {
3804 if (auto sptr = subInfo.sinkWptr_.lock())
3805 {
3806 jvObj.visit(
3807 sptr->getApiVersion(), //
3808 [&](Json::Value const& jv) { sptr->send(jv, true); });
3809
3810 if (unsubscribe)
3811 unsubAccountHistory(sptr, accountId, false);
3812 return true;
3813 }
3814
3815 return false;
3816 };
3817
3818 auto getMoreTxns =
3819 [&](std::uint32_t minLedger,
3820 std::uint32_t maxLedger,
3825 switch (dbType)
3826 {
3827 case Sqlite: {
3828 auto db = static_cast<SQLiteDatabase*>(
3831 accountId, minLedger, maxLedger, marker, 0, true};
3832 return db->newestAccountTxPage(options);
3833 }
3834 default: {
3835 UNREACHABLE(
3836 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3837 "getMoreTxns : invalid database type");
3838 return {};
3839 }
3840 }
3841 };
3842
3843 /*
3844 * search backward until the genesis ledger or asked to stop
3845 */
3846 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3847 {
3848 int feeChargeCount = 0;
3849 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3850 {
3851 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3852 ++feeChargeCount;
3853 }
3854 else
3855 {
3856 JLOG(m_journal.trace())
3857 << "AccountHistory job for account "
3858 << toBase58(accountId) << " no InfoSub. Fee charged "
3859 << feeChargeCount << " times.";
3860 return;
3861 }
3862
3863 // try to search in 1024 ledgers till reaching genesis ledgers
3864 auto startLedgerSeq =
3865 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3866 JLOG(m_journal.trace())
3867 << "AccountHistory job for account " << toBase58(accountId)
3868 << ", working on ledger range [" << startLedgerSeq << ","
3869 << lastLedgerSeq << "]";
3870
3871 auto haveRange = [&]() -> bool {
3872 std::uint32_t validatedMin = UINT_MAX;
3873 std::uint32_t validatedMax = 0;
3874 auto haveSomeValidatedLedgers =
3876 validatedMin, validatedMax);
3877
3878 return haveSomeValidatedLedgers &&
3879 validatedMin <= startLedgerSeq &&
3880 lastLedgerSeq <= validatedMax;
3881 }();
3882
3883 if (!haveRange)
3884 {
3885 JLOG(m_journal.debug())
3886 << "AccountHistory reschedule job for account "
3887 << toBase58(accountId) << ", incomplete ledger range ["
3888 << startLedgerSeq << "," << lastLedgerSeq << "]";
3890 return;
3891 }
3892
3894 while (!subInfo.index_->stopHistorical_)
3895 {
3896 auto dbResult =
3897 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3898 if (!dbResult)
3899 {
3900 JLOG(m_journal.debug())
3901 << "AccountHistory job for account "
3902 << toBase58(accountId) << " getMoreTxns failed.";
3903 send(rpcError(rpcINTERNAL), true);
3904 return;
3905 }
3906
3907 auto const& txns = dbResult->first;
3908 marker = dbResult->second;
3909 size_t num_txns = txns.size();
3910 for (size_t i = 0; i < num_txns; ++i)
3911 {
3912 auto const& [tx, meta] = txns[i];
3913
3914 if (!tx || !meta)
3915 {
3916 JLOG(m_journal.debug())
3917 << "AccountHistory job for account "
3918 << toBase58(accountId) << " empty tx or meta.";
3919 send(rpcError(rpcINTERNAL), true);
3920 return;
3921 }
3922 auto curTxLedger =
3924 tx->getLedger());
3925 if (!curTxLedger)
3926 {
3927 JLOG(m_journal.debug())
3928 << "AccountHistory job for account "
3929 << toBase58(accountId) << " no ledger.";
3930 send(rpcError(rpcINTERNAL), true);
3931 return;
3932 }
3934 tx->getSTransaction();
3935 if (!stTxn)
3936 {
3937 JLOG(m_journal.debug())
3938 << "AccountHistory job for account "
3939 << toBase58(accountId)
3940 << " getSTransaction failed.";
3941 send(rpcError(rpcINTERNAL), true);
3942 return;
3943 }
3944
3945 auto const mRef = std::ref(*meta);
3946 auto const trR = meta->getResultTER();
3947 MultiApiJson jvTx =
3948 transJson(stTxn, trR, true, curTxLedger, mRef);
3949
3950 jvTx.set(
3951 jss::account_history_tx_index, txHistoryIndex--);
3952 if (i + 1 == num_txns ||
3953 txns[i + 1].first->getLedger() != tx->getLedger())
3954 jvTx.set(jss::account_history_boundary, true);
3955
3956 if (isFirstTx(tx, meta))
3957 {
3958 jvTx.set(jss::account_history_tx_first, true);
3959 sendMultiApiJson(jvTx, false);
3960
3961 JLOG(m_journal.trace())
3962 << "AccountHistory job for account "
3963 << toBase58(accountId)
3964 << " done, found last tx.";
3965 return;
3966 }
3967 else
3968 {
3969 sendMultiApiJson(jvTx, false);
3970 }
3971 }
3972
3973 if (marker)
3974 {
3975 JLOG(m_journal.trace())
3976 << "AccountHistory job for account "
3977 << toBase58(accountId)
3978 << " paging, marker=" << marker->ledgerSeq << ":"
3979 << marker->txnSeq;
3980 }
3981 else
3982 {
3983 break;
3984 }
3985 }
3986
3987 if (!subInfo.index_->stopHistorical_)
3988 {
3989 lastLedgerSeq = startLedgerSeq - 1;
3990 if (lastLedgerSeq <= 1)
3991 {
3992 JLOG(m_journal.trace())
3993 << "AccountHistory job for account "
3994 << toBase58(accountId)
3995 << " done, reached genesis ledger.";
3996 return;
3997 }
3998 }
3999 }
4000 });
4001}
4002
4003void
4005 std::shared_ptr<ReadView const> const& ledger,
4007{
4008 subInfo.index_->separationLedgerSeq_ = ledger->seq();
4009 auto const& accountId = subInfo.index_->accountId_;
4010 auto const accountKeylet = keylet::account(accountId);
4011 if (!ledger->exists(accountKeylet))
4012 {
4013 JLOG(m_journal.debug())
4014 << "subAccountHistoryStart, no account " << toBase58(accountId)
4015 << ", no need to add AccountHistory job.";
4016 return;
4017 }
4018 if (accountId == genesisAccountId)
4019 {
4020 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
4021 {
4022 if (sleAcct->getFieldU32(sfSequence) == 1)
4023 {
4024 JLOG(m_journal.debug())
4025 << "subAccountHistoryStart, genesis account "
4026 << toBase58(accountId)
4027 << " does not have tx, no need to add AccountHistory job.";
4028 return;
4029 }
4030 }
4031 else
4032 {
4033 UNREACHABLE(
4034 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
4035 "access genesis account");
4036 return;
4037 }
4038 }
4039 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
4040 subInfo.index_->haveHistorical_ = true;
4041
4042 JLOG(m_journal.debug())
4043 << "subAccountHistoryStart, add AccountHistory job: accountId="
4044 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
4045
4046 addAccountHistoryJob(subInfo);
4047}
4048
4051 InfoSub::ref isrListener,
4052 AccountID const& accountId)
4053{
4054 if (!isrListener->insertSubAccountHistory(accountId))
4055 {
4056 JLOG(m_journal.debug())
4057 << "subAccountHistory, already subscribed to account "
4058 << toBase58(accountId);
4059 return rpcINVALID_PARAMS;
4060 }
4061
4064 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
4065 auto simIterator = mSubAccountHistory.find(accountId);
4066 if (simIterator == mSubAccountHistory.end())
4067 {
4069 inner.emplace(isrListener->getSeq(), ahi);
4071 simIterator, std::make_pair(accountId, inner));
4072 }
4073 else
4074 {
4075 simIterator->second.emplace(isrListener->getSeq(), ahi);
4076 }
4077
4078 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
4079 if (ledger)
4080 {
4081 subAccountHistoryStart(ledger, ahi);
4082 }
4083 else
4084 {
4085 // The node does not have validated ledgers, so wait for
4086 // one before start streaming.
4087 // In this case, the subscription is also considered successful.
4088 JLOG(m_journal.debug())
4089 << "subAccountHistory, no validated ledger yet, delay start";
4090 }
4091
4092 return rpcSUCCESS;
4093}
4094
4095void
4097 InfoSub::ref isrListener,
4098 AccountID const& account,
4099 bool historyOnly)
4100{
4101 if (!historyOnly)
4102 isrListener->deleteSubAccountHistory(account);
4103 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
4104}
4105
4106void
4108 std::uint64_t seq,
4109 AccountID const& account,
4110 bool historyOnly)
4111{
4113 auto simIterator = mSubAccountHistory.find(account);
4114 if (simIterator != mSubAccountHistory.end())
4115 {
4116 auto& subInfoMap = simIterator->second;
4117 auto subInfoIter = subInfoMap.find(seq);
4118 if (subInfoIter != subInfoMap.end())
4119 {
4120 subInfoIter->second.index_->stopHistorical_ = true;
4121 }
4122
4123 if (!historyOnly)
4124 {
4125 simIterator->second.erase(seq);
4126 if (simIterator->second.empty())
4127 {
4128 mSubAccountHistory.erase(simIterator);
4129 }
4130 }
4131 JLOG(m_journal.debug())
4132 << "unsubAccountHistory, account " << toBase58(account)
4133 << ", historyOnly = " << (historyOnly ? "true" : "false");
4134 }
4135}
4136
4137bool
4139{
4140 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
4141 listeners->addSubscriber(isrListener);
4142 else
4143 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
4144 return true;
4145}
4146
4147bool
4149{
4150 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
4151 listeners->removeSubscriber(uSeq);
4152
4153 return true;
4154}
4155
4159{
4160 // This code-path is exclusively used when the server is in standalone
4161 // mode via `ledger_accept`
4162 XRPL_ASSERT(
4163 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
4164
4165 if (!m_standalone)
4166 Throw<std::runtime_error>(
4167 "Operation only possible in STANDALONE mode.");
4168
4169 // FIXME Could we improve on this and remove the need for a specialized
4170 // API in Consensus?
4171 beginConsensus(m_ledgerMaster.getClosedLedger()->info().hash, {});
4172 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
4173 return m_ledgerMaster.getCurrentLedger()->info().seq;
4174}
4175
4176// <-- bool: true=added, false=already there
4177bool
4179{
4180 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
4181 {
4182 jvResult[jss::ledger_index] = lpClosed->info().seq;
4183 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
4184 jvResult[jss::ledger_time] = Json::Value::UInt(
4185 lpClosed->info().closeTime.time_since_epoch().count());
4186 if (!lpClosed->rules().enabled(featureXRPFees))
4187 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
4188 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
4189 jvResult[jss::reserve_base] =
4190 lpClosed->fees().accountReserve(0).jsonClipped();
4191 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4192 jvResult[jss::network_id] = app_.config().NETWORK_ID;
4193 }
4194
4196 {
4197 jvResult[jss::validated_ledgers] =
4199 }
4200
4202 return mStreamMaps[sLedger]
4203 .emplace(isrListener->getSeq(), isrListener)
4204 .second;
4205}
4206
4207// <-- bool: true=added, false=already there
4208bool
4210{
4213 .emplace(isrListener->getSeq(), isrListener)
4214 .second;
4215}
4216
4217// <-- bool: true=erased, false=was not there
4218bool
4220{
4222 return mStreamMaps[sLedger].erase(uSeq);
4223}
4224
4225// <-- bool: true=erased, false=was not there
4226bool
4232
4233// <-- bool: true=added, false=already there
4234bool
4236{
4238 return mStreamMaps[sManifests]
4239 .emplace(isrListener->getSeq(), isrListener)
4240 .second;
4241}
4242
4243// <-- bool: true=erased, false=was not there
4244bool
4250
4251// <-- bool: true=added, false=already there
4252bool
4254 InfoSub::ref isrListener,
4255 Json::Value& jvResult,
4256 bool admin)
4257{
4258 uint256 uRandom;
4259
4260 if (m_standalone)
4261 jvResult[jss::stand_alone] = m_standalone;
4262
4263 // CHECKME: is it necessary to provide a random number here?
4264 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
4265
4266 auto const& feeTrack = app_.getFeeTrack();
4267 jvResult[jss::random] = to_string(uRandom);
4268 jvResult[jss::server_status] = strOperatingMode(admin);
4269 jvResult[jss::load_base] = feeTrack.getLoadBase();
4270 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4271 jvResult[jss::hostid] = getHostId(admin);
4272 jvResult[jss::pubkey_node] =
4274
4276 return mStreamMaps[sServer]
4277 .emplace(isrListener->getSeq(), isrListener)
4278 .second;
4279}
4280
4281// <-- bool: true=erased, false=was not there
4282bool
4284{
4286 return mStreamMaps[sServer].erase(uSeq);
4287}
4288
4289// <-- bool: true=added, false=already there
4290bool
4292{
4295 .emplace(isrListener->getSeq(), isrListener)
4296 .second;
4297}
4298
4299// <-- bool: true=erased, false=was not there
4300bool
4306
4307// <-- bool: true=added, false=already there
4308bool
4310{
4313 .emplace(isrListener->getSeq(), isrListener)
4314 .second;
4315}
4316
4317// <-- bool: true=erased, false=was not there
4318bool
4324
4325// <-- bool: true=added, false=already there
4326bool
4328{
4331 .emplace(isrListener->getSeq(), isrListener)
4332 .second;
4333}
4334
4335void
4340
4341// <-- bool: true=erased, false=was not there
4342bool
4348
4349// <-- bool: true=added, false=already there
4350bool
4352{
4354 return mStreamMaps[sPeerStatus]
4355 .emplace(isrListener->getSeq(), isrListener)
4356 .second;
4357}
4358
4359// <-- bool: true=erased, false=was not there
4360bool
4366
4367// <-- bool: true=added, false=already there
4368bool
4370{
4373 .emplace(isrListener->getSeq(), isrListener)
4374 .second;
4375}
4376
4377// <-- bool: true=erased, false=was not there
4378bool
4384
4387{
4389
4390 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4391
4392 if (it != mRpcSubMap.end())
4393 return it->second;
4394
4395 return InfoSub::pointer();
4396}
4397
4400{
4402
4403 mRpcSubMap.emplace(strUrl, rspEntry);
4404
4405 return rspEntry;
4406}
4407
4408bool
4410{
4412 auto pInfo = findRpcSub(strUrl);
4413
4414 if (!pInfo)
4415 return false;
4416
4417 // check to see if any of the stream maps still hold a weak reference to
4418 // this entry before removing
4419 for (SubMapType const& map : mStreamMaps)
4420 {
4421 if (map.find(pInfo->getSeq()) != map.end())
4422 return false;
4423 }
4424 mRpcSubMap.erase(strUrl);
4425 return true;
4426}
4427
4428#ifndef USE_NEW_BOOK_PAGE
4429
4430// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4431// work, but it demonstrated poor performance.
4432//
4433void
4436 Book const& book,
4437 AccountID const& uTakerID,
4438 bool const bProof,
4439 unsigned int iLimit,
4440 Json::Value const& jvMarker,
4441 Json::Value& jvResult)
4442{ // CAUTION: This is the old get book page logic
4443 Json::Value& jvOffers =
4444 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4445
4447 uint256 const uBookBase = getBookBase(book);
4448 uint256 const uBookEnd = getQualityNext(uBookBase);
4449 uint256 uTipIndex = uBookBase;
4450
4451 if (auto stream = m_journal.trace())
4452 {
4453 stream << "getBookPage:" << book;
4454 stream << "getBookPage: uBookBase=" << uBookBase;
4455 stream << "getBookPage: uBookEnd=" << uBookEnd;
4456 stream << "getBookPage: uTipIndex=" << uTipIndex;
4457 }
4458
4459 ReadView const& view = *lpLedger;
4460
4461 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4462 isGlobalFrozen(view, book.in.account);
4463
4464 bool bDone = false;
4465 bool bDirectAdvance = true;
4466
4467 std::shared_ptr<SLE const> sleOfferDir;
4468 uint256 offerIndex;
4469 unsigned int uBookEntry;
4470 STAmount saDirRate;
4471
4472 auto const rate = transferRate(view, book.out.account);
4473 auto viewJ = app_.journal("View");
4474
4475 while (!bDone && iLimit-- > 0)
4476 {
4477 if (bDirectAdvance)
4478 {
4479 bDirectAdvance = false;
4480
4481 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4482
4483 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4484 if (ledgerIndex)
4485 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4486 else
4487 sleOfferDir.reset();
4488
4489 if (!sleOfferDir)
4490 {
4491 JLOG(m_journal.trace()) << "getBookPage: bDone";
4492 bDone = true;
4493 }
4494 else
4495 {
4496 uTipIndex = sleOfferDir->key();
4497 saDirRate = amountFromQuality(getQuality(uTipIndex));
4498
4499 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4500
4501 JLOG(m_journal.trace())
4502 << "getBookPage: uTipIndex=" << uTipIndex;
4503 JLOG(m_journal.trace())
4504 << "getBookPage: offerIndex=" << offerIndex;
4505 }
4506 }
4507
4508 if (!bDone)
4509 {
4510 auto sleOffer = view.read(keylet::offer(offerIndex));
4511
4512 if (sleOffer)
4513 {
4514 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4515 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4516 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4517 STAmount saOwnerFunds;
4518 bool firstOwnerOffer(true);
4519
4520 if (book.out.account == uOfferOwnerID)
4521 {
4522 // If an offer is selling issuer's own IOUs, it is fully
4523 // funded.
4524 saOwnerFunds = saTakerGets;
4525 }
4526 else if (bGlobalFreeze)
4527 {
4528 // If either asset is globally frozen, consider all offers
4529 // that aren't ours to be totally unfunded
4530 saOwnerFunds.clear(book.out);
4531 }
4532 else
4533 {
4534 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4535 if (umBalanceEntry != umBalance.end())
4536 {
4537 // Found in running balance table.
4538
4539 saOwnerFunds = umBalanceEntry->second;
4540 firstOwnerOffer = false;
4541 }
4542 else
4543 {
4544 // Did not find balance in table.
4545
4546 saOwnerFunds = accountHolds(
4547 view,
4548 uOfferOwnerID,
4549 book.out.currency,
4550 book.out.account,
4552 viewJ);
4553
4554 if (saOwnerFunds < beast::zero)
4555 {
4556 // Treat negative funds as zero.
4557
4558 saOwnerFunds.clear();
4559 }
4560 }
4561 }
4562
4563 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4564
4565 STAmount saTakerGetsFunded;
4566 STAmount saOwnerFundsLimit = saOwnerFunds;
4567 Rate offerRate = parityRate;
4568
4569 if (rate != parityRate
4570 // Have a tranfer fee.
4571 && uTakerID != book.out.account
4572 // Not taking offers of own IOUs.
4573 && book.out.account != uOfferOwnerID)
4574 // Offer owner not issuing ownfunds
4575 {
4576 // Need to charge a transfer fee to offer owner.
4577 offerRate = rate;
4578 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4579 }
4580
4581 if (saOwnerFundsLimit >= saTakerGets)
4582 {
4583 // Sufficient funds no shenanigans.
4584 saTakerGetsFunded = saTakerGets;
4585 }
4586 else
4587 {
4588 // Only provide, if not fully funded.
4589
4590 saTakerGetsFunded = saOwnerFundsLimit;
4591
4592 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4593 std::min(
4594 saTakerPays,
4595 multiply(
4596 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4597 .setJson(jvOffer[jss::taker_pays_funded]);
4598 }
4599
4600 STAmount saOwnerPays = (parityRate == offerRate)
4601 ? saTakerGetsFunded
4602 : std::min(
4603 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4604
4605 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4606
4607 // Include all offers funded and unfunded
4608 Json::Value& jvOf = jvOffers.append(jvOffer);
4609 jvOf[jss::quality] = saDirRate.getText();
4610
4611 if (firstOwnerOffer)
4612 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4613 }
4614 else
4615 {
4616 JLOG(m_journal.warn()) << "Missing offer";
4617 }
4618
4619 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4620 {
4621 bDirectAdvance = true;
4622 }
4623 else
4624 {
4625 JLOG(m_journal.trace())
4626 << "getBookPage: offerIndex=" << offerIndex;
4627 }
4628 }
4629 }
4630
4631 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4632 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4633}
4634
4635#else
4636
4637// This is the new code that uses the book iterators
4638// It has temporarily been disabled
4639
4640void
4643 Book const& book,
4644 AccountID const& uTakerID,
4645 bool const bProof,
4646 unsigned int iLimit,
4647 Json::Value const& jvMarker,
4648 Json::Value& jvResult)
4649{
4650 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4651
4653
4654 MetaView lesActive(lpLedger, tapNONE, true);
4655 OrderBookIterator obIterator(lesActive, book);
4656
4657 auto const rate = transferRate(lesActive, book.out.account);
4658
4659 bool const bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4660 lesActive.isGlobalFrozen(book.in.account);
4661
4662 while (iLimit-- > 0 && obIterator.nextOffer())
4663 {
4664 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4665 if (sleOffer)
4666 {
4667 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4668 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4669 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4670 STAmount saDirRate = obIterator.getCurrentRate();
4671 STAmount saOwnerFunds;
4672
4673 if (book.out.account == uOfferOwnerID)
4674 {
4675 // If offer is selling issuer's own IOUs, it is fully funded.
4676 saOwnerFunds = saTakerGets;
4677 }
4678 else if (bGlobalFreeze)
4679 {
4680 // If either asset is globally frozen, consider all offers
4681 // that aren't ours to be totally unfunded
4682 saOwnerFunds.clear(book.out);
4683 }
4684 else
4685 {
4686 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4687
4688 if (umBalanceEntry != umBalance.end())
4689 {
4690 // Found in running balance table.
4691
4692 saOwnerFunds = umBalanceEntry->second;
4693 }
4694 else
4695 {
4696 // Did not find balance in table.
4697
4698 saOwnerFunds = lesActive.accountHolds(
4699 uOfferOwnerID,
4700 book.out.currency,
4701 book.out.account,
4703
4704 if (saOwnerFunds.isNegative())
4705 {
4706 // Treat negative funds as zero.
4707
4708 saOwnerFunds.zero();
4709 }
4710 }
4711 }
4712
4713 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4714
4715 STAmount saTakerGetsFunded;
4716 STAmount saOwnerFundsLimit = saOwnerFunds;
4717 Rate offerRate = parityRate;
4718
4719 if (rate != parityRate
4720 // Have a tranfer fee.
4721 && uTakerID != book.out.account
4722 // Not taking offers of own IOUs.
4723 && book.out.account != uOfferOwnerID)
4724 // Offer owner not issuing ownfunds
4725 {
4726 // Need to charge a transfer fee to offer owner.
4727 offerRate = rate;
4728 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4729 }
4730
4731 if (saOwnerFundsLimit >= saTakerGets)
4732 {
4733 // Sufficient funds no shenanigans.
4734 saTakerGetsFunded = saTakerGets;
4735 }
4736 else
4737 {
4738 // Only provide, if not fully funded.
4739 saTakerGetsFunded = saOwnerFundsLimit;
4740
4741 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4742
4743 // TOOD(tom): The result of this expression is not used - what's
4744 // going on here?
4745 std::min(
4746 saTakerPays,
4747 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4748 .setJson(jvOffer[jss::taker_pays_funded]);
4749 }
4750
4751 STAmount saOwnerPays = (parityRate == offerRate)
4752 ? saTakerGetsFunded
4753 : std::min(
4754 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4755
4756 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4757
4758 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4759 {
4760 // Only provide funded offers and offers of the taker.
4761 Json::Value& jvOf = jvOffers.append(jvOffer);
4762 jvOf[jss::quality] = saDirRate.getText();
4763 }
4764 }
4765 }
4766
4767 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4768 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4769}
4770
4771#endif
4772
4773inline void
4775{
4776 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4777 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4779 counters[static_cast<std::size_t>(mode)].dur += current;
4780
4783 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4784 .dur.count());
4786 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4787 .dur.count());
4789 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4791 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4792 .dur.count());
4794 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4795
4797 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4798 .transitions);
4800 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4801 .transitions);
4803 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4805 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4806 .transitions);
4808 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4809}
4810
4811void
4813{
4814 auto now = std::chrono::steady_clock::now();
4815
4816 std::lock_guard lock(mutex_);
4817 ++counters_[static_cast<std::size_t>(om)].transitions;
4818 if (om == OperatingMode::FULL &&
4819 counters_[static_cast<std::size_t>(om)].transitions == 1)
4820 {
4821 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4822 now - processStart_)
4823 .count();
4824 }
4825 counters_[static_cast<std::size_t>(mode_)].dur +=
4826 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4827
4828 mode_ = om;
4829 start_ = now;
4830}
4831
4832void
4834{
4835 auto [counters, mode, start, initialSync] = getCounterData();
4836 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4838 counters[static_cast<std::size_t>(mode)].dur += current;
4839
4840 obj[jss::state_accounting] = Json::objectValue;
4842 i <= static_cast<std::size_t>(OperatingMode::FULL);
4843 ++i)
4844 {
4845 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4846 auto& state = obj[jss::state_accounting][states_[i]];
4847 state[jss::transitions] = std::to_string(counters[i].transitions);
4848 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4849 }
4850 obj[jss::server_state_duration_us] = std::to_string(current.count());
4851 if (initialSync)
4852 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4853}
4854
4855//------------------------------------------------------------------------------
4856
4859 Application& app,
4861 bool standalone,
4862 std::size_t minPeerCount,
4863 bool startvalid,
4864 JobQueue& job_queue,
4866 ValidatorKeys const& validatorKeys,
4867 boost::asio::io_context& io_svc,
4868 beast::Journal journal,
4869 beast::insight::Collector::ptr const& collector)
4870{
4872 app,
4873 clock,
4874 standalone,
4875 minPeerCount,
4876 startvalid,
4877 job_queue,
4879 validatorKeys,
4880 io_svc,
4881 journal,
4882 collector);
4883}
4884
4885} // namespace ripple
T any_of(T... args)
T back_inserter(T... args)
T begin(T... args)
Decorator for streaming out compact json.
Lightweight wrapper to tag static string.
Definition json_value.h:63
Represents a JSON value.
Definition json_value.h:149
Json::UInt UInt
Definition json_value.h:156
Value & append(Value const &value)
Append value to array at the end.
bool isMember(char const *key) const
Return true if the object has a member named key.
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
A generic endpoint for log messages.
Definition Journal.h:60
Stream error() const
Definition Journal.h:346
Stream debug() const
Definition Journal.h:328
Stream info() const
Definition Journal.h:334
Stream trace() const
Severity stream access functions.
Definition Journal.h:322
Stream warn() const
Definition Journal.h:340
A metric for measuring an integral value.
Definition Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition Gauge.h:68
A reference to a handler for performing polled collection.
Definition Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition Book.h:36
Issue in
Definition Book.h:38
Issue out
Definition Book.h:39
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition ClusterNode.h:46
std::uint32_t getLoadFee() const
Definition ClusterNode.h:52
NetClock::time_point getReportTime() const
Definition ClusterNode.h:58
PublicKey const & identity() const
Definition ClusterNode.h:64
std::size_t size() const
The number of nodes in the cluster list.
Definition Cluster.cpp:49
uint32_t NETWORK_ID
Definition Config.h:156
std::string SERVER_DOMAIN
Definition Config.h:278
std::size_t NODE_SIZE
Definition Config.h:213
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition Config.h:160
int RELAY_UNTRUSTED_VALIDATIONS
Definition Config.h:169
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition InfoSub.h:54
AccountID account
Definition Issue.h:36
Currency currency
Definition Issue.h:35
A pool of threads to perform work.
Definition JobQueue.h:58
Json::Value getJson(int c=0)
Definition JobQueue.cpp:214
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition JobQueue.h:168
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
std::uint32_t getClusterFee() const
std::uint32_t getLocalFee() const
std::uint32_t getLoadBase() const
std::uint32_t getRemoteFee() const
std::uint32_t getLoadFactor() const
Manages load sources.
Definition LoadManager.h:46
void heartbeat()
Reset the stall detection timer.
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition Manifest.cpp:323
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
std::chrono::steady_clock::time_point start_
static std::array< Json::StaticString const, 5 > const states_
std::chrono::steady_clock::time_point const processStart_
Transaction with input flags and results to be applied in batches.
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
std::shared_ptr< Transaction > const transaction
boost::asio::steady_timer accountHistoryTxTimer_
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
std::vector< TransactionStatus > mTransactions
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
std::optional< PublicKey > const validatorPK_
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
beast::Journal m_journal
SubInfoMapType mSubAccount
std::optional< PublicKey > const validatorMasterPK_
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
std::atomic< bool > unlBlocked_
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
std::atomic< bool > needNetworkLedger_
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
void setMode(OperatingMode om) override
void stop() override
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
void clearNeedNetworkLedger() override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
DispatchState mDispatchState
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
std::atomic< bool > amendmentWarned_
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition NetworkOPs.h:89
void getCountsJson(Json::Value &obj)
Definition Database.cpp:268
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Writable ledger view that accumulates state and tx changes.
Definition OpenView.h:65
BookListeners::pointer getBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)
BookListeners::pointer makeBookListeners(Book const &)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Represents a set of transactions in RCLConsensus.
Definition RCLCxTx.h:63
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition RFC1751.cpp:507
Collects logging information.
std::unique_ptr< std::stringstream > const & ss()
A view into a ledger.
Definition ReadView.h:51
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition STAmount.cpp:666
std::string getText() const override
Definition STAmount.cpp:706
Issue const & issue() const
Definition STAmount.h:496
std::optional< T > get(std::string const &name) const
std::size_t size() const noexcept
Definition Serializer.h:72
void const * data() const noexcept
Definition Serializer.h:78
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition TxQ.cpp:1778
static time_point now()
Validator keys and manifest as set in configuration file.
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition XRPAmount.h:262
Json::Value jsonClipped() const
Definition XRPAmount.h:218
iterator begin()
Definition base_uint.h:136
static constexpr std::size_t size()
Definition base_uint.h:526
bool isZero() const
Definition base_uint.h:540
bool isNonZero() const
Definition base_uint.h:545
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition scope.h:231
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_same_v
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition json_value.h:44
@ objectValue
object value (collection of name/value pairs).
Definition json_value.h:45
int Int
unsigned int UInt
void rngfill(void *const buffer, std::size_t const bytes, Generator &g)
Definition rngfill.h:34
std::string const & getVersionString()
Server version.
Definition BuildInfo.cpp:68
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Encodes ledger sequence, transaction index, and network ID into a CTID string.
Definition CTID.h:53
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition BookChanges.h:47
void insertNFTSyntheticInJson(Json::Value &, std::shared_ptr< STTx const > const &, TxMeta const &)
Adds common synthetic fields to transaction-related JSON responses.
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition Indexes.cpp:184
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition Indexes.cpp:380
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition Indexes.cpp:274
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Definition escrow.cpp:69
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:25
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
STAmount divide(STAmount const &amount, Rate const &rate)
Definition Rate2.cpp:93
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition STTx.cpp:811
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition View.cpp:551
@ fhZERO_IF_FROZEN
Definition View.h:77
@ fhIGNORE_FREEZE
Definition View.h:77
std::uint64_t getQuality(uint256 const &uBase)
Definition Indexes.cpp:149
@ rpcSUCCESS
Definition ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition ErrorCodes.h:84
@ rpcINTERNAL
Definition ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
auto constexpr muldiv_max
Definition mulDiv.h:28
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition LocalTxs.cpp:192
STAmount amountFromQuality(std::uint64_t rate)
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition ErrorCodes.h:175
@ warnRPC_UNSUPPORTED_MAJORITY
Definition ErrorCodes.h:173
@ warnRPC_AMENDMENT_BLOCKED
Definition ErrorCodes.h:174
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition NetworkOPs.h:68
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition Rate2.cpp:53
AccountID calcAccountID(PublicKey const &pk)
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Json::Value rpcError(int iError)
Definition RPCErr.cpp:31
@ tefPAST_SEQ
Definition TER.h:175
bool isTefFailure(TER x) noexcept
Definition TER.h:662
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
std::string strHex(FwdIt begin, FwdIt end)
Definition strHex.h:30
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition View.cpp:759
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition ApiVersion.h:101
bool isTerRetry(TER x) noexcept
Definition TER.h:668
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition predicates.h:75
@ tesSUCCESS
Definition TER.h:244
uint256 getQualityNext(uint256 const &uBase)
Definition Indexes.cpp:141
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition View.cpp:384
bool isTesSuccess(TER x) noexcept
Definition TER.h:674
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition chrono.h:92
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition View.cpp:145
std::string to_string(base_uint< Bits, Tag > const &a)
Definition base_uint.h:630
FeeSetup setup_FeeVote(Section const &section)
Definition Config.cpp:1129
bool isTemMalformed(TER x) noexcept
Definition TER.h:656
Number root(Number f, unsigned d)
Definition Number.cpp:636
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
@ tapFAIL_HARD
Definition ApplyView.h:35
@ tapUNLIMITED
Definition ApplyView.h:42
@ tapNONE
Definition ApplyView.h:31
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition View.cpp:156
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition apply.cpp:44
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition Seed.cpp:76
constexpr std::size_t maxPoppedTransactions
@ terQUEUED
Definition TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition TER.cpp:249
@ jtNETOP_CLUSTER
Definition Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition Job.h:47
@ jtTRANSACTION
Definition Job.h:62
@ jtTXN_PROC
Definition Job.h:82
@ jtCLIENT_CONSENSUS
Definition Job.h:48
@ jtBATCH
Definition Job.h:65
@ jtCLIENT_ACCT_HIST
Definition Job.h:49
bool isTelLocal(TER x) noexcept
Definition TER.h:650
uint256 getBookBase(Book const &book)
Definition Indexes.cpp:115
constexpr std::uint32_t tfInnerBatchTxn
Definition TxFlags.h:61
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition View.cpp:182
static std::uint32_t trunc32(std::uint64_t v)
@ temINVALID_FLAG
Definition TER.h:111
@ temBAD_SIGNATURE
Definition TER.h:105
static auto const genesisAccountId
STL namespace.
T owns_lock(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T set_intersection(T... args)
T size(T... args)
T str(T... args)
std::string serialized
The manifest in serialized form.
Definition Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
Definition Manifest.cpp:244
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
Definition Manifest.cpp:255
PublicKey masterKey
The master key associated with this manifest.
Definition Manifest.h:86
Server fees published on server subscription.
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
bool operator==(ServerFeeSummary const &b) const
beast::insight::Gauge full_transitions
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
beast::insight::Hook hook
beast::insight::Gauge connected_duration
beast::insight::Gauge tracking_duration
beast::insight::Gauge connected_transitions
beast::insight::Gauge disconnected_transitions
beast::insight::Gauge syncing_duration
beast::insight::Gauge tracking_transitions
beast::insight::Gauge full_duration
beast::insight::Gauge disconnected_duration
beast::insight::Gauge syncing_transitions
SubAccountHistoryIndex(AccountID const &accountId)
std::shared_ptr< SubAccountHistoryIndex > index_
std::shared_ptr< SubAccountHistoryIndex > index_
Represents a transfer rate.
Definition Rate.h:40
Data format for exchanging consumption information across peers.
Definition Gossip.h:32
std::vector< Item > items
Definition Gossip.h:44
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition TxQ.h:165
IsMemberResult isMember(char const *key) const
void set(char const *key, auto const &v)
Select all peers (except optional excluded) that are in our cluster.
Definition predicates.h:137
Sends a message to all peers.
Definition predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)