rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1#include <xrpld/app/consensus/RCLConsensus.h>
2#include <xrpld/app/consensus/RCLCxPeerPos.h>
3#include <xrpld/app/consensus/RCLValidations.h>
4#include <xrpld/app/ledger/AcceptedLedger.h>
5#include <xrpld/app/ledger/InboundLedgers.h>
6#include <xrpld/app/ledger/LedgerMaster.h>
7#include <xrpld/app/ledger/LedgerToJson.h>
8#include <xrpld/app/ledger/LocalTxs.h>
9#include <xrpld/app/ledger/OpenLedger.h>
10#include <xrpld/app/ledger/TransactionMaster.h>
11#include <xrpld/app/main/LoadManager.h>
12#include <xrpld/app/main/Tuning.h>
13#include <xrpld/app/misc/DeliverMax.h>
14#include <xrpld/app/misc/Transaction.h>
15#include <xrpld/app/misc/TxQ.h>
16#include <xrpld/app/misc/ValidatorKeys.h>
17#include <xrpld/app/misc/ValidatorList.h>
18#include <xrpld/app/misc/detail/AccountTxPaging.h>
19#include <xrpld/app/misc/make_NetworkOPs.h>
20#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
21#include <xrpld/consensus/Consensus.h>
22#include <xrpld/consensus/ConsensusParms.h>
23#include <xrpld/core/ConfigSections.h>
24#include <xrpld/overlay/Cluster.h>
25#include <xrpld/overlay/Overlay.h>
26#include <xrpld/overlay/predicates.h>
27#include <xrpld/rpc/BookChanges.h>
28#include <xrpld/rpc/CTID.h>
29#include <xrpld/rpc/DeliveredAmount.h>
30#include <xrpld/rpc/MPTokenIssuanceID.h>
31#include <xrpld/rpc/ServerHandler.h>
32
33#include <xrpl/basics/UptimeClock.h>
34#include <xrpl/basics/mulDiv.h>
35#include <xrpl/basics/safe_cast.h>
36#include <xrpl/basics/scope.h>
37#include <xrpl/beast/utility/rngfill.h>
38#include <xrpl/core/HashRouter.h>
39#include <xrpl/core/NetworkIDService.h>
40#include <xrpl/core/PerfLog.h>
41#include <xrpl/crypto/RFC1751.h>
42#include <xrpl/crypto/csprng.h>
43#include <xrpl/ledger/AmendmentTable.h>
44#include <xrpl/ledger/OrderBookDB.h>
45#include <xrpl/protocol/BuildInfo.h>
46#include <xrpl/protocol/Feature.h>
47#include <xrpl/protocol/MultiApiJson.h>
48#include <xrpl/protocol/NFTSyntheticSerializer.h>
49#include <xrpl/protocol/RPCErr.h>
50#include <xrpl/protocol/TxFlags.h>
51#include <xrpl/protocol/jss.h>
52#include <xrpl/resource/Fees.h>
53#include <xrpl/resource/ResourceManager.h>
54#include <xrpl/server/LoadFeeTrack.h>
55#include <xrpl/tx/apply.h>
56
57#include <boost/asio/ip/host_name.hpp>
58#include <boost/asio/steady_timer.hpp>
59
60#include <algorithm>
61#include <exception>
62#include <mutex>
63#include <optional>
64#include <set>
65#include <sstream>
66#include <string>
67#include <tuple>
68#include <unordered_map>
69
70namespace xrpl {
71
72class NetworkOPsImp final : public NetworkOPs
73{
79 {
80 public:
82 bool const admin;
83 bool const local;
85 bool applied = false;
87
89 : transaction(t), admin(a), local(l), failType(f)
90 {
91 XRPL_ASSERT(
93 "xrpl::NetworkOPsImp::TransactionStatus::TransactionStatus : "
94 "valid inputs");
95 }
96 };
97
101 enum class DispatchState : unsigned char {
102 none,
103 scheduled,
104 running,
105 };
106
108
124 {
132
136 std::chrono::steady_clock::time_point start_ = std::chrono::steady_clock::now();
137 std::chrono::steady_clock::time_point const processStart_ = start_;
140
141 public:
143 {
144 counters_[static_cast<std::size_t>(OperatingMode::DISCONNECTED)].transitions = 1;
145 }
146
153 void
155
161 void
162 json(Json::Value& obj) const;
163
165 {
167 decltype(mode_) mode;
168 decltype(start_) start;
170 };
171
174 {
177 }
178 };
179
182 {
183 ServerFeeSummary() = default;
184
186 XRPAmount fee,
187 TxQ::Metrics&& escalationMetrics,
188 LoadFeeTrack const& loadFeeTrack);
189 bool
190 operator!=(ServerFeeSummary const& b) const;
191
192 bool
194 {
195 return !(*this != b);
196 }
197
202 };
203
204public:
206 ServiceRegistry& registry,
208 bool standalone,
209 std::size_t minPeerCount,
210 bool start_valid,
211 JobQueue& job_queue,
213 ValidatorKeys const& validatorKeys,
214 boost::asio::io_context& io_svc,
215 beast::Journal journal,
216 beast::insight::Collector::ptr const& collector)
217 : registry_(registry)
218 , m_journal(journal)
221 , heartbeatTimer_(io_svc)
222 , clusterTimer_(io_svc)
223 , accountHistoryTxTimer_(io_svc)
224 , mConsensus(
225 registry_.app(),
227 setup_FeeVote(registry_.app().config().section("voting")),
228 registry_.logs().journal("FeeVote")),
230 *m_localTX,
231 registry.getInboundTransactions(),
232 beast::get_abstract_clock<std::chrono::steady_clock>(),
233 validatorKeys,
234 registry_.logs().journal("LedgerConsensus"))
235 , validatorPK_(
236 validatorKeys.keys ? validatorKeys.keys->publicKey : decltype(validatorPK_){})
238 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
239 : decltype(validatorMasterPK_){})
241 , m_job_queue(job_queue)
242 , m_standalone(standalone)
243 , minPeerCount_(start_valid ? 0 : minPeerCount)
244 , m_stats(std::bind(&NetworkOPsImp::collect_metrics, this), collector)
245 {
246 }
247
248 ~NetworkOPsImp() override
249 {
250 // This clear() is necessary to ensure the shared_ptrs in this map get
251 // destroyed NOW because the objects in this map invoke methods on this
252 // class when they are destroyed
254 }
255
256public:
258 getOperatingMode() const override;
259
261 strOperatingMode(OperatingMode const mode, bool const admin) const override;
262
264 strOperatingMode(bool const admin = false) const override;
265
266 //
267 // Transaction operations.
268 //
269
270 // Must complete immediately.
271 void
273
274 void
276 std::shared_ptr<Transaction>& transaction,
277 bool bUnlimited,
278 bool bLocal,
279 FailHard failType) override;
280
281 void
282 processTransactionSet(CanonicalTXSet const& set) override;
283
292 void
293 doTransactionSync(std::shared_ptr<Transaction> transaction, bool bUnlimited, FailHard failType);
294
304 void
307 bool bUnlimited,
308 FailHard failtype);
309
310private:
311 bool
313
314 void
317 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback);
318
319public:
323 void
325
331 void
333
334 //
335 // Owner functions.
336 //
337
339 getOwnerInfo(std::shared_ptr<ReadView const> lpLedger, AccountID const& account) override;
340
341 //
342 // Book functions.
343 //
344
345 void
348 Book const&,
349 AccountID const& uTakerID,
350 bool const bProof,
351 unsigned int iLimit,
352 Json::Value const& jvMarker,
353 Json::Value& jvResult) override;
354
355 // Ledger proposal/close functions.
356 bool
358
359 bool
360 recvValidation(std::shared_ptr<STValidation> const& val, std::string const& source) override;
361
362 void
363 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
364
365 // Network state machine.
366
367 // Used for the "jump" case.
368private:
369 void
371 bool
373
374public:
375 bool
376 beginConsensus(uint256 const& networkClosed, std::unique_ptr<std::stringstream> const& clog)
377 override;
378 void
380 void
381 setStandAlone() override;
382
386 void
387 setStateTimer() override;
388
389 void
390 setNeedNetworkLedger() override;
391 void
392 clearNeedNetworkLedger() override;
393 bool
394 isNeedNetworkLedger() override;
395 bool
396 isFull() override;
397
398 void
399 setMode(OperatingMode om) override;
400
401 bool
402 isBlocked() override;
403 bool
404 isAmendmentBlocked() override;
405 void
406 setAmendmentBlocked() override;
407 bool
408 isAmendmentWarned() override;
409 void
410 setAmendmentWarned() override;
411 void
412 clearAmendmentWarned() override;
413 bool
414 isUNLBlocked() override;
415 void
416 setUNLBlocked() override;
417 void
418 clearUNLBlocked() override;
419 void
420 consensusViewChange() override;
421
423 getConsensusInfo() override;
425 getServerInfo(bool human, bool admin, bool counters) override;
426 void
427 clearLedgerFetch() override;
429 getLedgerFetchInfo() override;
432 void
433 reportFeeChange() override;
434 void
436
437 void
438 updateLocalTx(ReadView const& view) override;
440 getLocalTxCount() override;
441
442 //
443 // Monitoring: publisher side.
444 //
445 void
446 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
447 void
450 std::shared_ptr<STTx const> const& transaction,
451 TER result) override;
452 void
453 pubValidation(std::shared_ptr<STValidation> const& val) override;
454
455 //--------------------------------------------------------------------------
456 //
457 // InfoSub::Source.
458 //
459 void
460 subAccount(InfoSub::ref ispListener, hash_set<AccountID> const& vnaAccountIDs, bool rt)
461 override;
462 void
463 unsubAccount(InfoSub::ref ispListener, hash_set<AccountID> const& vnaAccountIDs, bool rt)
464 override;
465
466 // Just remove the subscription from the tracking
467 // not from the InfoSub. Needed for InfoSub destruction
468 void
469 unsubAccountInternal(std::uint64_t seq, hash_set<AccountID> const& vnaAccountIDs, bool rt)
470 override;
471
473 subAccountHistory(InfoSub::ref ispListener, AccountID const& account) override;
474 void
475 unsubAccountHistory(InfoSub::ref ispListener, AccountID const& account, bool historyOnly)
476 override;
477
478 void
479 unsubAccountHistoryInternal(std::uint64_t seq, AccountID const& account, bool historyOnly)
480 override;
481
482 bool
483 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
484 bool
485 unsubLedger(std::uint64_t uListener) override;
486
487 bool
488 subBookChanges(InfoSub::ref ispListener) override;
489 bool
490 unsubBookChanges(std::uint64_t uListener) override;
491
492 bool
493 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin) override;
494 bool
495 unsubServer(std::uint64_t uListener) override;
496
497 bool
498 subBook(InfoSub::ref ispListener, Book const&) override;
499 bool
500 unsubBook(std::uint64_t uListener, Book const&) override;
501
502 bool
503 subManifests(InfoSub::ref ispListener) override;
504 bool
505 unsubManifests(std::uint64_t uListener) override;
506 void
507 pubManifest(Manifest const&) override;
508
509 bool
510 subTransactions(InfoSub::ref ispListener) override;
511 bool
512 unsubTransactions(std::uint64_t uListener) override;
513
514 bool
515 subRTTransactions(InfoSub::ref ispListener) override;
516 bool
517 unsubRTTransactions(std::uint64_t uListener) override;
518
519 bool
520 subValidations(InfoSub::ref ispListener) override;
521 bool
522 unsubValidations(std::uint64_t uListener) override;
523
524 bool
525 subPeerStatus(InfoSub::ref ispListener) override;
526 bool
527 unsubPeerStatus(std::uint64_t uListener) override;
528 void
529 pubPeerStatus(std::function<Json::Value(void)> const&) override;
530
531 bool
532 subConsensus(InfoSub::ref ispListener) override;
533 bool
534 unsubConsensus(std::uint64_t uListener) override;
535
537 findRpcSub(std::string const& strUrl) override;
539 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
540 bool
541 tryRemoveRpcSub(std::string const& strUrl) override;
542
543 void
544 stop() override
545 {
546 {
547 try
548 {
549 heartbeatTimer_.cancel();
550 }
551 catch (boost::system::system_error const& e)
552 {
553 JLOG(m_journal.error()) << "NetworkOPs: heartbeatTimer cancel error: " << e.what();
554 }
555
556 try
557 {
558 clusterTimer_.cancel();
559 }
560 catch (boost::system::system_error const& e)
561 {
562 JLOG(m_journal.error()) << "NetworkOPs: clusterTimer cancel error: " << e.what();
563 }
564
565 try
566 {
567 accountHistoryTxTimer_.cancel();
568 }
569 catch (boost::system::system_error const& e)
570 {
571 JLOG(m_journal.error())
572 << "NetworkOPs: accountHistoryTxTimer cancel error: " << e.what();
573 }
574 }
575 // Make sure that any waitHandlers pending in our timers are done.
576 using namespace std::chrono_literals;
577 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
578 }
579
580 void
581 stateAccounting(Json::Value& obj) override;
582
583private:
584 void
585 setTimer(
586 boost::asio::steady_timer& timer,
587 std::chrono::milliseconds const& expiry_time,
588 std::function<void()> onExpire,
589 std::function<void()> onError);
590 void
592 void
594 void
596 void
598
600 transJson(
601 std::shared_ptr<STTx const> const& transaction,
602 TER result,
603 bool validated,
606
607 void
610 AcceptedLedgerTx const& transaction,
611 bool last);
612
613 void
616 AcceptedLedgerTx const& transaction,
617 bool last);
618
619 void
622 std::shared_ptr<STTx const> const& transaction,
623 TER result);
624
625 void
626 pubServer();
627 void
629
631 getHostId(bool forAdmin);
632
633private:
637
638 /*
639 * With a validated ledger to separate history and future, the node
640 * streams historical txns with negative indexes starting from -1,
641 * and streams future txns starting from index 0.
642 * The SubAccountHistoryIndex struct maintains these indexes.
643 * It also has a flag stopHistorical_ for stopping streaming
644 * the historical txns.
645 */
682
686 void
690 void
692 void
694
697
699
701
703
708
710 boost::asio::steady_timer heartbeatTimer_;
711 boost::asio::steady_timer clusterTimer_;
712 boost::asio::steady_timer accountHistoryTxTimer_;
713
715
718
720
722
725
727
729
730 enum SubTypes {
731 sLedger, // Accepted ledgers.
732 sManifests, // Received validator manifests.
733 sServer, // When server changes connectivity state.
734 sTransactions, // All accepted transactions.
735 sRTTransactions, // All proposed and accepted transactions.
736 sValidations, // Received validations.
737 sPeerStatus, // Peer status changes.
738 sConsensusPhase, // Consensus phase
739 sBookChanges, // Per-ledger order book changes
740 sLastEntry // Any new entry must be ADDED ABOVE this one
741 };
742
744
746
748
749 // Whether we are in standalone mode.
750 bool const m_standalone;
751
752 // The number of nodes that we need to consider ourselves connected.
754
755 // Transaction batching.
760
762
765
766private:
767 struct Stats
768 {
769 template <class Handler>
770 Stats(Handler const& handler, beast::insight::Collector::ptr const& collector)
771 : hook(collector->make_hook(handler))
773 collector->make_gauge("State_Accounting", "Disconnected_duration"))
774 , connected_duration(collector->make_gauge("State_Accounting", "Connected_duration"))
775 , syncing_duration(collector->make_gauge("State_Accounting", "Syncing_duration"))
776 , tracking_duration(collector->make_gauge("State_Accounting", "Tracking_duration"))
777 , full_duration(collector->make_gauge("State_Accounting", "Full_duration"))
779 collector->make_gauge("State_Accounting", "Disconnected_transitions"))
781 collector->make_gauge("State_Accounting", "Connected_transitions"))
782 , syncing_transitions(collector->make_gauge("State_Accounting", "Syncing_transitions"))
784 collector->make_gauge("State_Accounting", "Tracking_transitions"))
785 , full_transitions(collector->make_gauge("State_Accounting", "Full_transitions"))
786 {
787 }
788
795
801 };
802
803 std::mutex m_statsMutex; // Mutex to lock m_stats
805
806private:
807 void
809};
810
811//------------------------------------------------------------------------------
812
814 {"disconnected", "connected", "syncing", "tracking", "full"}};
815
817
824
825static auto const genesisAccountId =
827
828//------------------------------------------------------------------------------
829inline OperatingMode
831{
832 return mMode;
833}
834
835inline std::string
836NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
837{
838 return strOperatingMode(mMode, admin);
839}
840
841inline void
846
847inline void
852
853inline void
858
859inline bool
864
865inline bool
870
873{
874 static std::string const hostname = boost::asio::ip::host_name();
875
876 if (forAdmin)
877 return hostname;
878
879 // For non-admin uses hash the node public key into a
880 // single RFC1751 word:
881 static std::string const shroudedHostId = [this]() {
882 auto const& id = registry_.app().nodeIdentity();
883
884 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
885 }();
886
887 return shroudedHostId;
888}
889
890void
892{
894
895 // Only do this work if a cluster is configured
896 if (registry_.cluster().size() != 0)
898}
899
900void
902 boost::asio::steady_timer& timer,
903 std::chrono::milliseconds const& expiry_time,
904 std::function<void()> onExpire,
905 std::function<void()> onError)
906{
907 // Only start the timer if waitHandlerCounter_ is not yet joined.
908 if (auto optionalCountedHandler =
909 waitHandlerCounter_.wrap([this, onExpire, onError](boost::system::error_code const& e) {
910 if ((e.value() == boost::system::errc::success) && (!m_job_queue.isStopped()))
911 {
912 onExpire();
913 }
914 // Recover as best we can if an unexpected error occurs.
915 if (e.value() != boost::system::errc::success &&
916 e.value() != boost::asio::error::operation_aborted)
917 {
918 // Try again later and hope for the best.
919 JLOG(m_journal.error())
920 << "Timer got error '" << e.message() << "'. Restarting timer.";
921 onError();
922 }
923 }))
924 {
925 timer.expires_after(expiry_time);
926 timer.async_wait(std::move(*optionalCountedHandler));
927 }
928}
929
930void
931NetworkOPsImp::setHeartbeatTimer()
932{
933 setTimer(
934 heartbeatTimer_,
935 mConsensus.parms().ledgerGRANULARITY,
936 [this]() {
937 m_job_queue.addJob(jtNETOP_TIMER, "NetHeart", [this]() { processHeartbeatTimer(); });
938 },
939 [this]() { setHeartbeatTimer(); });
940}
941
942void
943NetworkOPsImp::setClusterTimer()
944{
945 using namespace std::chrono_literals;
946
947 setTimer(
948 clusterTimer_,
949 10s,
950 [this]() {
951 m_job_queue.addJob(jtNETOP_CLUSTER, "NetCluster", [this]() { processClusterTimer(); });
952 },
953 [this]() { setClusterTimer(); });
954}
955
956void
957NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
958{
959 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
960 << toBase58(subInfo.index_->accountId_);
961 using namespace std::chrono_literals;
962 setTimer(
963 accountHistoryTxTimer_,
964 4s,
965 [this, subInfo]() { addAccountHistoryJob(subInfo); },
966 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
967}
968
969void
970NetworkOPsImp::processHeartbeatTimer()
971{
972 RclConsensusLogger clog("Heartbeat Timer", mConsensus.validating(), m_journal);
973 {
974 std::unique_lock lock{registry_.app().getMasterMutex()};
975
976 // VFALCO NOTE This is for diagnosing a crash on exit
977 LoadManager& mgr(registry_.getLoadManager());
978 mgr.heartbeat();
979
980 std::size_t const numPeers = registry_.overlay().size();
981
982 // do we have sufficient peers? If not, we are disconnected.
983 if (numPeers < minPeerCount_)
984 {
985 if (mMode != OperatingMode::DISCONNECTED)
986 {
987 setMode(OperatingMode::DISCONNECTED);
989 ss << "Node count (" << numPeers << ") has fallen "
990 << "below required minimum (" << minPeerCount_ << ").";
991 JLOG(m_journal.warn()) << ss.str();
992 CLOG(clog.ss()) << "set mode to DISCONNECTED: " << ss.str();
993 }
994 else
995 {
996 CLOG(clog.ss()) << "already DISCONNECTED. too few peers (" << numPeers
997 << "), need at least " << minPeerCount_;
998 }
999
1000 // MasterMutex lock need not be held to call setHeartbeatTimer()
1001 lock.unlock();
1002 // We do not call mConsensus.timerEntry until there are enough
1003 // peers providing meaningful inputs to consensus
1004 setHeartbeatTimer();
1005
1006 return;
1007 }
1008
1009 if (mMode == OperatingMode::DISCONNECTED)
1010 {
1011 setMode(OperatingMode::CONNECTED);
1012 JLOG(m_journal.info()) << "Node count (" << numPeers << ") is sufficient.";
1013 CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers << " peers. ";
1014 }
1015
1016 // Check if the last validated ledger forces a change between these
1017 // states.
1018 auto origMode = mMode.load();
1019 CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
1020 if (mMode == OperatingMode::SYNCING)
1021 setMode(OperatingMode::SYNCING);
1022 else if (mMode == OperatingMode::CONNECTED)
1023 setMode(OperatingMode::CONNECTED);
1024 auto newMode = mMode.load();
1025 if (origMode != newMode)
1026 {
1027 CLOG(clog.ss()) << ", changing to " << strOperatingMode(newMode, true);
1028 }
1029 CLOG(clog.ss()) << ". ";
1030 }
1031
1032 mConsensus.timerEntry(registry_.timeKeeper().closeTime(), clog.ss());
1033
1034 CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase);
1035 ConsensusPhase const currPhase = mConsensus.phase();
1036 if (mLastConsensusPhase != currPhase)
1037 {
1038 reportConsensusStateChange(currPhase);
1039 mLastConsensusPhase = currPhase;
1040 CLOG(clog.ss()) << " changed to " << to_string(mLastConsensusPhase);
1041 }
1042 CLOG(clog.ss()) << ". ";
1043
1044 setHeartbeatTimer();
1045}
1046
1047void
1048NetworkOPsImp::processClusterTimer()
1049{
1050 if (registry_.cluster().size() == 0)
1051 return;
1052
1053 using namespace std::chrono_literals;
1054
1055 bool const update = registry_.cluster().update(
1056 registry_.app().nodeIdentity().first,
1057 "",
1058 (m_ledgerMaster.getValidatedLedgerAge() <= 4min) ? registry_.getFeeTrack().getLocalFee()
1059 : 0,
1060 registry_.timeKeeper().now());
1061
1062 if (!update)
1063 {
1064 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1065 setClusterTimer();
1066 return;
1067 }
1068
1069 protocol::TMCluster cluster;
1070 registry_.cluster().for_each([&cluster](ClusterNode const& node) {
1071 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1072 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1073 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1074 n.set_nodeload(node.getLoadFee());
1075 if (!node.name().empty())
1076 n.set_nodename(node.name());
1077 });
1078
1079 Resource::Gossip gossip = registry_.getResourceManager().exportConsumers();
1080 for (auto& item : gossip.items)
1081 {
1082 protocol::TMLoadSource& node = *cluster.add_loadsources();
1083 node.set_name(to_string(item.address));
1084 node.set_cost(item.balance);
1085 }
1086 registry_.overlay().foreach(
1087 send_if(std::make_shared<Message>(cluster, protocol::mtCLUSTER), peer_in_cluster()));
1088 setClusterTimer();
1089}
1090
1091//------------------------------------------------------------------------------
1092
1094NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin) const
1095{
1096 if (mode == OperatingMode::FULL && admin)
1097 {
1098 auto const consensusMode = mConsensus.mode();
1099 if (consensusMode != ConsensusMode::wrongLedger)
1100 {
1101 if (consensusMode == ConsensusMode::proposing)
1102 return "proposing";
1103
1104 if (mConsensus.validating())
1105 return "validating";
1106 }
1107 }
1108
1109 return states_[static_cast<std::size_t>(mode)];
1110}
1111
1112void
1113NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1114{
1115 if (isNeedNetworkLedger())
1116 {
1117 // Nothing we can do if we've never been in sync
1118 return;
1119 }
1120
1121 // Enforce Network bar for batch txn
1122 if (iTrans->isFlag(tfInnerBatchTxn) && m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1123 {
1124 JLOG(m_journal.error()) << "Submitted transaction invalid: tfInnerBatchTxn flag present.";
1125 return;
1126 }
1127
1128 // this is an asynchronous interface
1129 auto const trans = sterilize(*iTrans);
1130
1131 auto const txid = trans->getTransactionID();
1132 auto const flags = registry_.getHashRouter().getFlags(txid);
1133
1134 if ((flags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1135 {
1136 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1137 return;
1138 }
1139
1140 try
1141 {
1142 auto const [validity, reason] =
1143 checkValidity(registry_.getHashRouter(), *trans, m_ledgerMaster.getValidatedRules());
1144
1145 if (validity != Validity::Valid)
1146 {
1147 JLOG(m_journal.warn()) << "Submitted transaction invalid: " << reason;
1148 return;
1149 }
1150 }
1151 catch (std::exception const& ex)
1152 {
1153 JLOG(m_journal.warn()) << "Exception checking transaction " << txid << ": " << ex.what();
1154
1155 return;
1156 }
1157
1158 std::string reason;
1159
1160 auto tx = std::make_shared<Transaction>(trans, reason, registry_.app());
1161
1162 m_job_queue.addJob(jtTRANSACTION, "SubmitTxn", [this, tx]() {
1163 auto t = tx;
1164 processTransaction(t, false, false, FailHard::no);
1165 });
1166}
1167
1168bool
1169NetworkOPsImp::preProcessTransaction(std::shared_ptr<Transaction>& transaction)
1170{
1171 auto const newFlags = registry_.getHashRouter().getFlags(transaction->getID());
1172
1173 if ((newFlags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1174 {
1175 // cached bad
1176 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1177 transaction->setStatus(INVALID);
1178 transaction->setResult(temBAD_SIGNATURE);
1179 return false;
1180 }
1181
1182 auto const view = m_ledgerMaster.getCurrentLedger();
1183
1184 // This function is called by several different parts of the codebase
1185 // under no circumstances will we ever accept an inner txn within a batch
1186 // txn from the network.
1187 auto const sttx = *transaction->getSTransaction();
1188 if (sttx.isFlag(tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1189 {
1190 transaction->setStatus(INVALID);
1191 transaction->setResult(temINVALID_FLAG);
1192 registry_.getHashRouter().setFlags(transaction->getID(), HashRouterFlags::BAD);
1193 return false;
1194 }
1195
1196 // NOTE ximinez - I think this check is redundant,
1197 // but I'm not 100% sure yet.
1198 // If so, only cost is looking up HashRouter flags.
1199 auto const [validity, reason] = checkValidity(registry_.getHashRouter(), sttx, view->rules());
1200 XRPL_ASSERT(
1201 validity == Validity::Valid, "xrpl::NetworkOPsImp::processTransaction : valid validity");
1202
1203 // Not concerned with local checks at this point.
1204 if (validity == Validity::SigBad)
1205 {
1206 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1207 transaction->setStatus(INVALID);
1208 transaction->setResult(temBAD_SIGNATURE);
1209 registry_.getHashRouter().setFlags(transaction->getID(), HashRouterFlags::BAD);
1210 return false;
1211 }
1212
1213 // canonicalize can change our pointer
1214 registry_.getMasterTransaction().canonicalize(&transaction);
1215
1216 return true;
1217}
1218
1219void
1220NetworkOPsImp::processTransaction(
1221 std::shared_ptr<Transaction>& transaction,
1222 bool bUnlimited,
1223 bool bLocal,
1224 FailHard failType)
1225{
1226 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1227
1228 // preProcessTransaction can change our pointer
1229 if (!preProcessTransaction(transaction))
1230 return;
1231
1232 if (bLocal)
1233 doTransactionSync(transaction, bUnlimited, failType);
1234 else
1235 doTransactionAsync(transaction, bUnlimited, failType);
1236}
1237
1238void
1239NetworkOPsImp::doTransactionAsync(
1240 std::shared_ptr<Transaction> transaction,
1241 bool bUnlimited,
1242 FailHard failType)
1243{
1244 std::lock_guard lock(mMutex);
1245
1246 if (transaction->getApplying())
1247 return;
1248
1249 mTransactions.push_back(TransactionStatus(transaction, bUnlimited, false, failType));
1250 transaction->setApplying();
1251
1252 if (mDispatchState == DispatchState::none)
1253 {
1254 if (m_job_queue.addJob(jtBATCH, "TxBatchAsync", [this]() { transactionBatch(); }))
1255 {
1256 mDispatchState = DispatchState::scheduled;
1257 }
1258 }
1259}
1260
1261void
1262NetworkOPsImp::doTransactionSync(
1263 std::shared_ptr<Transaction> transaction,
1264 bool bUnlimited,
1265 FailHard failType)
1266{
1267 std::unique_lock<std::mutex> lock(mMutex);
1268
1269 if (!transaction->getApplying())
1270 {
1271 mTransactions.push_back(TransactionStatus(transaction, bUnlimited, true, failType));
1272 transaction->setApplying();
1273 }
1274
1275 doTransactionSyncBatch(lock, [&transaction](std::unique_lock<std::mutex> const&) {
1276 return transaction->getApplying();
1277 });
1278}
1279
1280void
1281NetworkOPsImp::doTransactionSyncBatch(
1283 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback)
1284{
1285 do
1286 {
1287 if (mDispatchState == DispatchState::running)
1288 {
1289 // A batch processing job is already running, so wait.
1290 mCond.wait(lock);
1291 }
1292 else
1293 {
1294 apply(lock);
1295
1296 if (mTransactions.size())
1297 {
1298 // More transactions need to be applied, but by another job.
1299 if (m_job_queue.addJob(jtBATCH, "TxBatchSync", [this]() { transactionBatch(); }))
1300 {
1301 mDispatchState = DispatchState::scheduled;
1302 }
1303 }
1304 }
1305 } while (retryCallback(lock));
1306}
1307
1308void
1309NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set)
1310{
1311 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXNSet");
1313 candidates.reserve(set.size());
1314 for (auto const& [_, tx] : set)
1315 {
1316 std::string reason;
1317 auto transaction = std::make_shared<Transaction>(tx, reason, registry_.app());
1318
1319 if (transaction->getStatus() == INVALID)
1320 {
1321 if (!reason.empty())
1322 {
1323 JLOG(m_journal.trace()) << "Exception checking transaction: " << reason;
1324 }
1325 registry_.getHashRouter().setFlags(tx->getTransactionID(), HashRouterFlags::BAD);
1326 continue;
1327 }
1328
1329 // preProcessTransaction can change our pointer
1330 if (!preProcessTransaction(transaction))
1331 continue;
1332
1333 candidates.emplace_back(transaction);
1334 }
1335
1336 std::vector<TransactionStatus> transactions;
1337 transactions.reserve(candidates.size());
1338
1339 std::unique_lock lock(mMutex);
1340
1341 for (auto& transaction : candidates)
1342 {
1343 if (!transaction->getApplying())
1344 {
1345 transactions.emplace_back(transaction, false, false, FailHard::no);
1346 transaction->setApplying();
1347 }
1348 }
1349
1350 if (mTransactions.empty())
1351 mTransactions.swap(transactions);
1352 else
1353 {
1354 mTransactions.reserve(mTransactions.size() + transactions.size());
1355 for (auto& t : transactions)
1356 mTransactions.push_back(std::move(t));
1357 }
1358 if (mTransactions.empty())
1359 {
1360 JLOG(m_journal.debug()) << "No transaction to process!";
1361 return;
1362 }
1363
1364 doTransactionSyncBatch(lock, [&](std::unique_lock<std::mutex> const&) {
1365 XRPL_ASSERT(lock.owns_lock(), "xrpl::NetworkOPsImp::processTransactionSet has lock");
1366 return std::any_of(mTransactions.begin(), mTransactions.end(), [](auto const& t) {
1367 return t.transaction->getApplying();
1368 });
1369 });
1370}
1371
1372void
1373NetworkOPsImp::transactionBatch()
1374{
1375 std::unique_lock<std::mutex> lock(mMutex);
1376
1377 if (mDispatchState == DispatchState::running)
1378 return;
1379
1380 while (mTransactions.size())
1381 {
1382 apply(lock);
1383 }
1384}
1385
1386void
1387NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1388{
1390 std::vector<TransactionStatus> transactions;
1391 mTransactions.swap(transactions);
1392 XRPL_ASSERT(!transactions.empty(), "xrpl::NetworkOPsImp::apply : non-empty transactions");
1393 XRPL_ASSERT(
1394 mDispatchState != DispatchState::running, "xrpl::NetworkOPsImp::apply : is not running");
1395
1396 mDispatchState = DispatchState::running;
1397
1398 batchLock.unlock();
1399
1400 {
1401 std::unique_lock masterLock{registry_.app().getMasterMutex(), std::defer_lock};
1402 bool changed = false;
1403 {
1404 std::unique_lock ledgerLock{m_ledgerMaster.peekMutex(), std::defer_lock};
1405 std::lock(masterLock, ledgerLock);
1406
1407 registry_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1408 for (TransactionStatus& e : transactions)
1409 {
1410 // we check before adding to the batch
1411 ApplyFlags flags = tapNONE;
1412 if (e.admin)
1413 flags |= tapUNLIMITED;
1414
1415 if (e.failType == FailHard::yes)
1416 flags |= tapFAIL_HARD;
1417
1418 auto const result = registry_.getTxQ().apply(
1419 registry_.app(), view, e.transaction->getSTransaction(), flags, j);
1420 e.result = result.ter;
1421 e.applied = result.applied;
1422 changed = changed || result.applied;
1423 }
1424 return changed;
1425 });
1426 }
1427 if (changed)
1428 reportFeeChange();
1429
1430 std::optional<LedgerIndex> validatedLedgerIndex;
1431 if (auto const l = m_ledgerMaster.getValidatedLedger())
1432 validatedLedgerIndex = l->header().seq;
1433
1434 auto newOL = registry_.openLedger().current();
1435 for (TransactionStatus& e : transactions)
1436 {
1437 e.transaction->clearSubmitResult();
1438
1439 if (e.applied)
1440 {
1441 pubProposedTransaction(newOL, e.transaction->getSTransaction(), e.result);
1442 e.transaction->setApplied();
1443 }
1444
1445 e.transaction->setResult(e.result);
1446
1447 if (isTemMalformed(e.result))
1448 registry_.getHashRouter().setFlags(e.transaction->getID(), HashRouterFlags::BAD);
1449
1450#ifdef DEBUG
1451 if (e.result != tesSUCCESS)
1452 {
1453 std::string token, human;
1454
1455 if (transResultInfo(e.result, token, human))
1456 {
1457 JLOG(m_journal.info()) << "TransactionResult: " << token << ": " << human;
1458 }
1459 }
1460#endif
1461
1462 bool addLocal = e.local;
1463
1464 if (e.result == tesSUCCESS)
1465 {
1466 JLOG(m_journal.debug()) << "Transaction is now included in open ledger";
1467 e.transaction->setStatus(INCLUDED);
1468
1469 // Pop as many "reasonable" transactions for this account as
1470 // possible. "Reasonable" means they have sequential sequence
1471 // numbers, or use tickets.
1472 auto const& txCur = e.transaction->getSTransaction();
1473
1474 std::size_t count = 0;
1475 for (auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1476 txNext && count < maxPoppedTransactions;
1477 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1478 {
1479 if (!batchLock.owns_lock())
1480 batchLock.lock();
1481 std::string reason;
1482 auto const trans = sterilize(*txNext);
1483 auto t = std::make_shared<Transaction>(trans, reason, registry_.app());
1484 if (t->getApplying())
1485 break;
1486 submit_held.emplace_back(t, false, false, FailHard::no);
1487 t->setApplying();
1488 }
1489 if (batchLock.owns_lock())
1490 batchLock.unlock();
1491 }
1492 else if (e.result == tefPAST_SEQ)
1493 {
1494 // duplicate or conflict
1495 JLOG(m_journal.info()) << "Transaction is obsolete";
1496 e.transaction->setStatus(OBSOLETE);
1497 }
1498 else if (e.result == terQUEUED)
1499 {
1500 JLOG(m_journal.debug()) << "Transaction is likely to claim a"
1501 << " fee, but is queued until fee drops";
1502
1503 e.transaction->setStatus(HELD);
1504 // Add to held transactions, because it could get
1505 // kicked out of the queue, and this will try to
1506 // put it back.
1507 m_ledgerMaster.addHeldTransaction(e.transaction);
1508 e.transaction->setQueued();
1509 e.transaction->setKept();
1510 }
1511 else if (isTerRetry(e.result) || isTelLocal(e.result) || isTefFailure(e.result))
1512 {
1513 if (e.failType != FailHard::yes)
1514 {
1515 auto const lastLedgerSeq =
1516 e.transaction->getSTransaction()->at(~sfLastLedgerSequence);
1517 auto const ledgersLeft = lastLedgerSeq
1518 ? *lastLedgerSeq - m_ledgerMaster.getCurrentLedgerIndex()
1520 // If any of these conditions are met, the transaction can
1521 // be held:
1522 // 1. It was submitted locally. (Note that this flag is only
1523 // true on the initial submission.)
1524 // 2. The transaction has a LastLedgerSequence, and the
1525 // LastLedgerSequence is fewer than LocalTxs::holdLedgers
1526 // (5) ledgers into the future. (Remember that an
1527 // unseated optional compares as less than all seated
1528 // values, so it has to be checked explicitly first.)
1529 // 3. The HashRouterFlags::BAD flag is not set on the txID.
1530 // (setFlags
1531 // checks before setting. If the flag is set, it returns
1532 // false, which means it's been held once without one of
1533 // the other conditions, so don't hold it again. Time's
1534 // up!)
1535 //
1536 if (e.local || (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1537 registry_.getHashRouter().setFlags(
1538 e.transaction->getID(), HashRouterFlags::HELD))
1539 {
1540 // transaction should be held
1541 JLOG(m_journal.debug()) << "Transaction should be held: " << e.result;
1542 e.transaction->setStatus(HELD);
1543 m_ledgerMaster.addHeldTransaction(e.transaction);
1544 e.transaction->setKept();
1545 }
1546 else
1547 JLOG(m_journal.debug())
1548 << "Not holding transaction " << e.transaction->getID() << ": "
1549 << (e.local ? "local" : "network") << ", "
1550 << "result: " << e.result << " ledgers left: "
1551 << (ledgersLeft ? to_string(*ledgersLeft) : "unspecified");
1552 }
1553 }
1554 else
1555 {
1556 JLOG(m_journal.debug()) << "Status other than success " << e.result;
1557 e.transaction->setStatus(INVALID);
1558 }
1559
1560 auto const enforceFailHard = e.failType == FailHard::yes && !isTesSuccess(e.result);
1561
1562 if (addLocal && !enforceFailHard)
1563 {
1564 m_localTX->push_back(
1565 m_ledgerMaster.getCurrentLedgerIndex(), e.transaction->getSTransaction());
1566 e.transaction->setKept();
1567 }
1568
1569 if ((e.applied ||
1570 ((mMode != OperatingMode::FULL) && (e.failType != FailHard::yes) && e.local) ||
1571 (e.result == terQUEUED)) &&
1572 !enforceFailHard)
1573 {
1574 auto const toSkip = registry_.getHashRouter().shouldRelay(e.transaction->getID());
1575 if (auto const sttx = *(e.transaction->getSTransaction()); toSkip &&
1576 // Skip relaying if it's an inner batch txn. The flag should
1577 // only be set if the Batch feature is enabled. If Batch is
1578 // not enabled, the flag is always invalid, so don't relay
1579 // it regardless.
1580 !(sttx.isFlag(tfInnerBatchTxn)))
1581 {
1582 protocol::TMTransaction tx;
1583 Serializer s;
1584
1585 sttx.add(s);
1586 tx.set_rawtransaction(s.data(), s.size());
1587 tx.set_status(protocol::tsCURRENT);
1588 tx.set_receivetimestamp(
1589 registry_.timeKeeper().now().time_since_epoch().count());
1590 tx.set_deferred(e.result == terQUEUED);
1591 // FIXME: This should be when we received it
1592 registry_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1593 e.transaction->setBroadcast();
1594 }
1595 }
1596
1597 if (validatedLedgerIndex)
1598 {
1599 auto [fee, accountSeq, availableSeq] = registry_.getTxQ().getTxRequiredFeeAndSeq(
1600 *newOL, e.transaction->getSTransaction());
1601 e.transaction->setCurrentLedgerState(
1602 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1603 }
1604 }
1605 }
1606
1607 batchLock.lock();
1608
1609 for (TransactionStatus& e : transactions)
1610 e.transaction->clearApplying();
1611
1612 if (!submit_held.empty())
1613 {
1614 if (mTransactions.empty())
1615 mTransactions.swap(submit_held);
1616 else
1617 {
1618 mTransactions.reserve(mTransactions.size() + submit_held.size());
1619 for (auto& e : submit_held)
1620 mTransactions.push_back(std::move(e));
1621 }
1622 }
1623
1624 mCond.notify_all();
1625
1626 mDispatchState = DispatchState::none;
1627}
1628
1629//
1630// Owner functions
1631//
1632
1634NetworkOPsImp::getOwnerInfo(std::shared_ptr<ReadView const> lpLedger, AccountID const& account)
1635{
1636 Json::Value jvObjects(Json::objectValue);
1637 auto root = keylet::ownerDir(account);
1638 auto sleNode = lpLedger->read(keylet::page(root));
1639 if (sleNode)
1640 {
1641 std::uint64_t uNodeDir;
1642
1643 do
1644 {
1645 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1646 {
1647 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1648 XRPL_ASSERT(sleCur, "xrpl::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1649
1650 switch (sleCur->getType())
1651 {
1652 case ltOFFER:
1653 if (!jvObjects.isMember(jss::offers))
1654 jvObjects[jss::offers] = Json::Value(Json::arrayValue);
1655
1656 jvObjects[jss::offers].append(sleCur->getJson(JsonOptions::none));
1657 break;
1658
1659 case ltRIPPLE_STATE:
1660 if (!jvObjects.isMember(jss::ripple_lines))
1661 {
1662 jvObjects[jss::ripple_lines] = Json::Value(Json::arrayValue);
1663 }
1664
1665 jvObjects[jss::ripple_lines].append(sleCur->getJson(JsonOptions::none));
1666 break;
1667
1668 case ltACCOUNT_ROOT:
1669 case ltDIR_NODE:
1670 // LCOV_EXCL_START
1671 default:
1672 UNREACHABLE(
1673 "xrpl::NetworkOPsImp::getOwnerInfo : invalid "
1674 "type");
1675 break;
1676 // LCOV_EXCL_STOP
1677 }
1678 }
1679
1680 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1681
1682 if (uNodeDir)
1683 {
1684 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1685 XRPL_ASSERT(sleNode, "xrpl::NetworkOPsImp::getOwnerInfo : read next page");
1686 }
1687 } while (uNodeDir);
1688 }
1689
1690 return jvObjects;
1691}
1692
1693//
1694// Other
1695//
1696
1697inline bool
1698NetworkOPsImp::isBlocked()
1699{
1700 return isAmendmentBlocked() || isUNLBlocked();
1701}
1702
1703inline bool
1704NetworkOPsImp::isAmendmentBlocked()
1705{
1706 return amendmentBlocked_;
1707}
1708
1709void
1710NetworkOPsImp::setAmendmentBlocked()
1711{
1712 amendmentBlocked_ = true;
1713 setMode(OperatingMode::CONNECTED);
1714}
1715
1716inline bool
1717NetworkOPsImp::isAmendmentWarned()
1718{
1719 return !amendmentBlocked_ && amendmentWarned_;
1720}
1721
1722inline void
1723NetworkOPsImp::setAmendmentWarned()
1724{
1725 amendmentWarned_ = true;
1726}
1727
1728inline void
1729NetworkOPsImp::clearAmendmentWarned()
1730{
1731 amendmentWarned_ = false;
1732}
1733
1734inline bool
1735NetworkOPsImp::isUNLBlocked()
1736{
1737 return unlBlocked_;
1738}
1739
1740void
1741NetworkOPsImp::setUNLBlocked()
1742{
1743 unlBlocked_ = true;
1744 setMode(OperatingMode::CONNECTED);
1745}
1746
1747inline void
1748NetworkOPsImp::clearUNLBlocked()
1749{
1750 unlBlocked_ = false;
1751}
1752
1753bool
1754NetworkOPsImp::checkLastClosedLedger(Overlay::PeerSequence const& peerList, uint256& networkClosed)
1755{
1756 // Returns true if there's an *abnormal* ledger issue, normal changing in
1757 // TRACKING mode should return false. Do we have sufficient validations for
1758 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1759 // better ledger available? If so, we are either tracking or full.
1760
1761 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1762
1763 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1764
1765 if (!ourClosed)
1766 return false;
1767
1768 uint256 closedLedger = ourClosed->header().hash;
1769 uint256 prevClosedLedger = ourClosed->header().parentHash;
1770 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1771 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1772
1773 //-------------------------------------------------------------------------
1774 // Determine preferred last closed ledger
1775
1776 auto& validations = registry_.getValidations();
1777 JLOG(m_journal.debug()) << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1778
1779 // Will rely on peer LCL if no trusted validations exist
1781 peerCounts[closedLedger] = 0;
1782 if (mMode >= OperatingMode::TRACKING)
1783 peerCounts[closedLedger]++;
1784
1785 for (auto& peer : peerList)
1786 {
1787 uint256 peerLedger = peer->getClosedLedgerHash();
1788
1789 if (peerLedger.isNonZero())
1790 ++peerCounts[peerLedger];
1791 }
1792
1793 for (auto const& it : peerCounts)
1794 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1795
1796 uint256 preferredLCL = validations.getPreferredLCL(
1797 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1798 m_ledgerMaster.getValidLedgerIndex(),
1799 peerCounts);
1800
1801 bool switchLedgers = preferredLCL != closedLedger;
1802 if (switchLedgers)
1803 closedLedger = preferredLCL;
1804 //-------------------------------------------------------------------------
1805 if (switchLedgers && (closedLedger == prevClosedLedger))
1806 {
1807 // don't switch to our own previous ledger
1808 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1809 networkClosed = ourClosed->header().hash;
1810 switchLedgers = false;
1811 }
1812 else
1813 networkClosed = closedLedger;
1814
1815 if (!switchLedgers)
1816 return false;
1817
1818 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1819
1820 if (!consensus)
1821 consensus = registry_.getInboundLedgers().acquire(
1822 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1823
1824 if (consensus &&
1825 (!m_ledgerMaster.canBeCurrent(consensus) ||
1826 !m_ledgerMaster.isCompatible(*consensus, m_journal.debug(), "Not switching")))
1827 {
1828 // Don't switch to a ledger not on the validated chain
1829 // or with an invalid close time or sequence
1830 networkClosed = ourClosed->header().hash;
1831 return false;
1832 }
1833
1834 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1835 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->header().hash << getJson({*ourClosed, {}});
1836 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1837
1838 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1839 {
1840 setMode(OperatingMode::CONNECTED);
1841 }
1842
1843 if (consensus)
1844 {
1845 // FIXME: If this rewinds the ledger sequence, or has the same
1846 // sequence, we should update the status on any stored transactions
1847 // in the invalidated ledgers.
1848 switchLastClosedLedger(consensus);
1849 }
1850
1851 return true;
1852}
1853
1854void
1855NetworkOPsImp::switchLastClosedLedger(std::shared_ptr<Ledger const> const& newLCL)
1856{
1857 // set the newLCL as our last closed ledger -- this is abnormal code
1858 JLOG(m_journal.error()) << "JUMP last closed ledger to " << newLCL->header().hash;
1859
1860 clearNeedNetworkLedger();
1861
1862 // Update fee computations.
1863 registry_.getTxQ().processClosedLedger(registry_.app(), *newLCL, true);
1864
1865 // Caller must own master lock
1866 {
1867 // Apply tx in old open ledger to new
1868 // open ledger. Then apply local tx.
1869
1870 auto retries = m_localTX->getTxSet();
1871 auto const lastVal = registry_.getLedgerMaster().getValidatedLedger();
1873 if (lastVal)
1874 rules = makeRulesGivenLedger(*lastVal, registry_.app().config().features);
1875 else
1876 rules.emplace(registry_.app().config().features);
1877 registry_.openLedger().accept(
1878 registry_.app(),
1879 *rules,
1880 newLCL,
1881 OrderedTxs({}),
1882 false,
1883 retries,
1884 tapNONE,
1885 "jump",
1886 [&](OpenView& view, beast::Journal j) {
1887 // Stuff the ledger with transactions from the queue.
1888 return registry_.getTxQ().accept(registry_.app(), view);
1889 });
1890 }
1891
1892 m_ledgerMaster.switchLCL(newLCL);
1893
1894 protocol::TMStatusChange s;
1895 s.set_newevent(protocol::neSWITCHED_LEDGER);
1896 s.set_ledgerseq(newLCL->header().seq);
1897 s.set_networktime(registry_.timeKeeper().now().time_since_epoch().count());
1898 s.set_ledgerhashprevious(
1899 newLCL->header().parentHash.begin(), newLCL->header().parentHash.size());
1900 s.set_ledgerhash(newLCL->header().hash.begin(), newLCL->header().hash.size());
1901
1902 registry_.overlay().foreach(
1903 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1904}
1905
1906bool
1907NetworkOPsImp::beginConsensus(
1908 uint256 const& networkClosed,
1910{
1911 XRPL_ASSERT(networkClosed.isNonZero(), "xrpl::NetworkOPsImp::beginConsensus : nonzero input");
1912
1913 auto closingInfo = m_ledgerMaster.getCurrentLedger()->header();
1914
1915 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq << " with LCL "
1916 << closingInfo.parentHash;
1917
1918 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1919
1920 if (!prevLedger)
1921 {
1922 // this shouldn't happen unless we jump ledgers
1923 if (mMode == OperatingMode::FULL)
1924 {
1925 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
1926 setMode(OperatingMode::TRACKING);
1927 CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
1928 }
1929
1930 CLOG(clog) << "beginConsensus no previous ledger. ";
1931 return false;
1932 }
1933
1934 XRPL_ASSERT(
1935 prevLedger->header().hash == closingInfo.parentHash,
1936 "xrpl::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1937 "parent");
1938 XRPL_ASSERT(
1939 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->header().hash,
1940 "xrpl::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1941 "hash");
1942
1943 registry_.validators().setNegativeUNL(prevLedger->negativeUNL());
1944 TrustChanges const changes = registry_.validators().updateTrusted(
1945 registry_.getValidations().getCurrentNodeIDs(),
1946 closingInfo.parentCloseTime,
1947 *this,
1948 registry_.overlay(),
1949 registry_.getHashRouter());
1950
1951 if (!changes.added.empty() || !changes.removed.empty())
1952 {
1953 registry_.getValidations().trustChanged(changes.added, changes.removed);
1954 // Update the AmendmentTable so it tracks the current validators.
1955 registry_.getAmendmentTable().trustChanged(registry_.validators().getQuorumKeys().second);
1956 }
1957
1958 mConsensus.startRound(
1959 registry_.timeKeeper().closeTime(),
1960 networkClosed,
1961 prevLedger,
1962 changes.removed,
1963 changes.added,
1964 clog);
1965
1966 ConsensusPhase const currPhase = mConsensus.phase();
1967 if (mLastConsensusPhase != currPhase)
1968 {
1969 reportConsensusStateChange(currPhase);
1970 mLastConsensusPhase = currPhase;
1971 }
1972
1973 JLOG(m_journal.debug()) << "Initiating consensus engine";
1974 return true;
1975}
1976
1977bool
1978NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
1979{
1980 auto const& peerKey = peerPos.publicKey();
1981 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
1982 {
1983 // Could indicate a operator misconfiguration where two nodes are
1984 // running with the same validator key configured, so this isn't fatal,
1985 // and it doesn't necessarily indicate peer misbehavior. But since this
1986 // is a trusted message, it could be a very big deal. Either way, we
1987 // don't want to relay the proposal. Note that the byzantine behavior
1988 // detection in handleNewValidation will notify other peers.
1989 //
1990 // Another, innocuous explanation is unusual message routing and delays,
1991 // causing this node to receive its own messages back.
1992 JLOG(m_journal.error()) << "Received a proposal signed by MY KEY from a peer. This may "
1993 "indicate a misconfiguration where another node has the same "
1994 "validator key, or may be caused by unusual message routing and "
1995 "delays.";
1996 return false;
1997 }
1998
1999 return mConsensus.peerProposal(registry_.timeKeeper().closeTime(), peerPos);
2000}
2001
2002void
2003NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
2004{
2005 // We now have an additional transaction set
2006 // either created locally during the consensus process
2007 // or acquired from a peer
2008
2009 // Inform peers we have this set
2010 protocol::TMHaveTransactionSet msg;
2011 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2012 msg.set_status(protocol::tsHAVE);
2013 registry_.overlay().foreach(send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
2014
2015 // We acquired it because consensus asked us to
2016 if (fromAcquire)
2017 mConsensus.gotTxSet(registry_.timeKeeper().closeTime(), RCLTxSet{map});
2018}
2019
2020void
2021NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
2022{
2023 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->header().parentHash;
2024
2025 for (auto const& it : registry_.overlay().getActivePeers())
2026 {
2027 if (it && (it->getClosedLedgerHash() == deadLedger))
2028 {
2029 JLOG(m_journal.trace()) << "Killing obsolete peer status";
2030 it->cycleStatus();
2031 }
2032 }
2033
2034 uint256 networkClosed;
2035 bool ledgerChange = checkLastClosedLedger(registry_.overlay().getActivePeers(), networkClosed);
2036
2037 if (networkClosed.isZero())
2038 {
2039 CLOG(clog) << "endConsensus last closed ledger is zero. ";
2040 return;
2041 }
2042
2043 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
2044 // we must count how many nodes share our LCL, how many nodes disagree with
2045 // our LCL, and how many validations our LCL has. We also want to check
2046 // timing to make sure there shouldn't be a newer LCL. We need this
2047 // information to do the next three tests.
2048
2049 if (((mMode == OperatingMode::CONNECTED) || (mMode == OperatingMode::SYNCING)) && !ledgerChange)
2050 {
2051 // Count number of peers that agree with us and UNL nodes whose
2052 // validations we have for LCL. If the ledger is good enough, go to
2053 // TRACKING - TODO
2054 if (!needNetworkLedger_)
2055 setMode(OperatingMode::TRACKING);
2056 }
2057
2058 if (((mMode == OperatingMode::CONNECTED) || (mMode == OperatingMode::TRACKING)) &&
2059 !ledgerChange)
2060 {
2061 // check if the ledger is good enough to go to FULL
2062 // Note: Do not go to FULL if we don't have the previous ledger
2063 // check if the ledger is bad enough to go to CONNECTED -- TODO
2064 auto current = m_ledgerMaster.getCurrentLedger();
2065 if (registry_.timeKeeper().now() <
2066 (current->header().parentCloseTime + 2 * current->header().closeTimeResolution))
2067 {
2068 setMode(OperatingMode::FULL);
2069 }
2070 }
2071
2072 beginConsensus(networkClosed, clog);
2073}
2074
2075void
2076NetworkOPsImp::consensusViewChange()
2077{
2078 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2079 {
2080 setMode(OperatingMode::CONNECTED);
2081 }
2082}
2083
2084void
2085NetworkOPsImp::pubManifest(Manifest const& mo)
2086{
2087 // VFALCO consider std::shared_mutex
2088 std::lock_guard sl(mSubLock);
2089
2090 if (!mStreamMaps[sManifests].empty())
2091 {
2093
2094 jvObj[jss::type] = "manifestReceived";
2095 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
2096 if (mo.signingKey)
2097 jvObj[jss::signing_key] = toBase58(TokenType::NodePublic, *mo.signingKey);
2098 jvObj[jss::seq] = Json::UInt(mo.sequence);
2099 if (auto sig = mo.getSignature())
2100 jvObj[jss::signature] = strHex(*sig);
2101 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
2102 if (!mo.domain.empty())
2103 jvObj[jss::domain] = mo.domain;
2104 jvObj[jss::manifest] = strHex(mo.serialized);
2105
2106 for (auto i = mStreamMaps[sManifests].begin(); i != mStreamMaps[sManifests].end();)
2107 {
2108 if (auto p = i->second.lock())
2109 {
2110 p->send(jvObj, true);
2111 ++i;
2112 }
2113 else
2114 {
2115 i = mStreamMaps[sManifests].erase(i);
2116 }
2117 }
2118 }
2119}
2120
2121NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2122 XRPAmount fee,
2123 TxQ::Metrics&& escalationMetrics,
2124 LoadFeeTrack const& loadFeeTrack)
2125 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2126 , loadBaseServer{loadFeeTrack.getLoadBase()}
2127 , baseFee{fee}
2128 , em{std::move(escalationMetrics)}
2129{
2130}
2131
2132bool
2134{
2135 if (loadFactorServer != b.loadFactorServer || loadBaseServer != b.loadBaseServer ||
2136 baseFee != b.baseFee || em.has_value() != b.em.has_value())
2137 return true;
2138
2139 if (em && b.em)
2140 {
2141 return (
2142 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2143 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2144 em->referenceFeeLevel != b.em->referenceFeeLevel);
2145 }
2146
2147 return false;
2148}
2149
2150// Need to cap to uint64 to uint32 due to JSON limitations
2151static std::uint32_t
2153{
2155
2156 return std::min(max32, v);
2157};
2158
2159void
2161{
2162 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2163 // list into a local array while holding the lock then release
2164 // the lock and call send on everyone.
2165 //
2167
2168 if (!mStreamMaps[sServer].empty())
2169 {
2171
2173 registry_.openLedger().current()->fees().base,
2176
2177 jvObj[jss::type] = "serverStatus";
2178 jvObj[jss::server_status] = strOperatingMode();
2179 jvObj[jss::load_base] = f.loadBaseServer;
2180 jvObj[jss::load_factor_server] = f.loadFactorServer;
2181 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2182
2183 if (f.em)
2184 {
2185 auto const loadFactor = std::max(
2186 safe_cast<std::uint64_t>(f.loadFactorServer),
2187 mulDiv(f.em->openLedgerFeeLevel, f.loadBaseServer, f.em->referenceFeeLevel)
2189
2190 jvObj[jss::load_factor] = trunc32(loadFactor);
2191 jvObj[jss::load_factor_fee_escalation] = f.em->openLedgerFeeLevel.jsonClipped();
2192 jvObj[jss::load_factor_fee_queue] = f.em->minProcessingFeeLevel.jsonClipped();
2193 jvObj[jss::load_factor_fee_reference] = f.em->referenceFeeLevel.jsonClipped();
2194 }
2195 else
2196 jvObj[jss::load_factor] = f.loadFactorServer;
2197
2198 mLastFeeSummary = f;
2199
2200 for (auto i = mStreamMaps[sServer].begin(); i != mStreamMaps[sServer].end();)
2201 {
2202 InfoSub::pointer p = i->second.lock();
2203
2204 // VFALCO TODO research the possibility of using thread queues and
2205 // linearizing the deletion of subscribers with the
2206 // sending of JSON data.
2207 if (p)
2208 {
2209 p->send(jvObj, true);
2210 ++i;
2211 }
2212 else
2213 {
2214 i = mStreamMaps[sServer].erase(i);
2215 }
2216 }
2217 }
2218}
2219
2220void
2222{
2224
2225 auto& streamMap = mStreamMaps[sConsensusPhase];
2226 if (!streamMap.empty())
2227 {
2229 jvObj[jss::type] = "consensusPhase";
2230 jvObj[jss::consensus] = to_string(phase);
2231
2232 for (auto i = streamMap.begin(); i != streamMap.end();)
2233 {
2234 if (auto p = i->second.lock())
2235 {
2236 p->send(jvObj, true);
2237 ++i;
2238 }
2239 else
2240 {
2241 i = streamMap.erase(i);
2242 }
2243 }
2244 }
2245}
2246
2247void
2249{
2250 // VFALCO consider std::shared_mutex
2252
2253 if (!mStreamMaps[sValidations].empty())
2254 {
2256
2257 auto const signerPublic = val->getSignerPublic();
2258
2259 jvObj[jss::type] = "validationReceived";
2260 jvObj[jss::validation_public_key] = toBase58(TokenType::NodePublic, signerPublic);
2261 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2262 jvObj[jss::signature] = strHex(val->getSignature());
2263 jvObj[jss::full] = val->isFull();
2264 jvObj[jss::flags] = val->getFlags();
2265 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2266 jvObj[jss::data] = strHex(val->getSerializer().slice());
2267 jvObj[jss::network_id] = registry_.getNetworkIDService().getNetworkID();
2268
2269 if (auto version = (*val)[~sfServerVersion])
2270 jvObj[jss::server_version] = std::to_string(*version);
2271
2272 if (auto cookie = (*val)[~sfCookie])
2273 jvObj[jss::cookie] = std::to_string(*cookie);
2274
2275 if (auto hash = (*val)[~sfValidatedHash])
2276 jvObj[jss::validated_hash] = strHex(*hash);
2277
2278 auto const masterKey = registry_.validatorManifests().getMasterKey(signerPublic);
2279
2280 if (masterKey != signerPublic)
2281 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2282
2283 // NOTE *seq is a number, but old API versions used string. We replace
2284 // number with a string using MultiApiJson near end of this function
2285 if (auto const seq = (*val)[~sfLedgerSequence])
2286 jvObj[jss::ledger_index] = *seq;
2287
2288 if (val->isFieldPresent(sfAmendments))
2289 {
2290 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2291 for (auto const& amendment : val->getFieldV256(sfAmendments))
2292 jvObj[jss::amendments].append(to_string(amendment));
2293 }
2294
2295 if (auto const closeTime = (*val)[~sfCloseTime])
2296 jvObj[jss::close_time] = *closeTime;
2297
2298 if (auto const loadFee = (*val)[~sfLoadFee])
2299 jvObj[jss::load_fee] = *loadFee;
2300
2301 if (auto const baseFee = val->at(~sfBaseFee))
2302 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2303
2304 if (auto const reserveBase = val->at(~sfReserveBase))
2305 jvObj[jss::reserve_base] = *reserveBase;
2306
2307 if (auto const reserveInc = val->at(~sfReserveIncrement))
2308 jvObj[jss::reserve_inc] = *reserveInc;
2309
2310 // (The ~ operator converts the Proxy to a std::optional, which
2311 // simplifies later operations)
2312 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops); baseFeeXRP && baseFeeXRP->native())
2313 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2314
2315 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2316 reserveBaseXRP && reserveBaseXRP->native())
2317 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2318
2319 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2320 reserveIncXRP && reserveIncXRP->native())
2321 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2322
2323 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2324 // for consumers supporting different API versions
2325 MultiApiJson multiObj{jvObj};
2326 multiObj.visit(
2327 RPC::apiVersion<1>, //
2328 [](Json::Value& jvTx) {
2329 // Type conversion for older API versions to string
2330 if (jvTx.isMember(jss::ledger_index))
2331 {
2332 jvTx[jss::ledger_index] = std::to_string(jvTx[jss::ledger_index].asUInt());
2333 }
2334 });
2335
2336 for (auto i = mStreamMaps[sValidations].begin(); i != mStreamMaps[sValidations].end();)
2337 {
2338 if (auto p = i->second.lock())
2339 {
2340 multiObj.visit(
2341 p->getApiVersion(), //
2342 [&](Json::Value const& jv) { p->send(jv, true); });
2343 ++i;
2344 }
2345 else
2346 {
2347 i = mStreamMaps[sValidations].erase(i);
2348 }
2349 }
2350 }
2351}
2352
2353void
2355{
2357
2358 if (!mStreamMaps[sPeerStatus].empty())
2359 {
2360 Json::Value jvObj(func());
2361
2362 jvObj[jss::type] = "peerStatusChange";
2363
2364 for (auto i = mStreamMaps[sPeerStatus].begin(); i != mStreamMaps[sPeerStatus].end();)
2365 {
2366 InfoSub::pointer p = i->second.lock();
2367
2368 if (p)
2369 {
2370 p->send(jvObj, true);
2371 ++i;
2372 }
2373 else
2374 {
2375 i = mStreamMaps[sPeerStatus].erase(i);
2376 }
2377 }
2378 }
2379}
2380
2381void
2383{
2384 using namespace std::chrono_literals;
2385 if (om == OperatingMode::CONNECTED)
2386 {
2389 }
2390 else if (om == OperatingMode::SYNCING)
2391 {
2394 }
2395
2396 if ((om > OperatingMode::CONNECTED) && isBlocked())
2398
2399 if (mMode == om)
2400 return;
2401
2402 mMode = om;
2403
2404 accounting_.mode(om);
2405
2406 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2407 pubServer();
2408}
2409
2410bool
2412{
2413 JLOG(m_journal.trace()) << "recvValidation " << val->getLedgerHash() << " from " << source;
2414
2416 BypassAccept bypassAccept = BypassAccept::no;
2417 try
2418 {
2419 if (pendingValidations_.contains(val->getLedgerHash()))
2420 bypassAccept = BypassAccept::yes;
2421 else
2422 pendingValidations_.insert(val->getLedgerHash());
2423 scope_unlock unlock(lock);
2424 handleNewValidation(registry_.app(), val, source, bypassAccept, m_journal);
2425 }
2426 catch (std::exception const& e)
2427 {
2428 JLOG(m_journal.warn()) << "Exception thrown for handling new validation "
2429 << val->getLedgerHash() << ": " << e.what();
2430 }
2431 catch (...)
2432 {
2433 JLOG(m_journal.warn()) << "Unknown exception thrown for handling new validation "
2434 << val->getLedgerHash();
2435 }
2436 if (bypassAccept == BypassAccept::no)
2437 {
2438 pendingValidations_.erase(val->getLedgerHash());
2439 }
2440 lock.unlock();
2441
2442 pubValidation(val);
2443
2444 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2446 ss << "VALIDATION: " << val->render() << " master_key: ";
2447 auto master = registry_.validators().getTrustedKey(val->getSignerPublic());
2448 if (master)
2449 {
2450 ss << toBase58(TokenType::NodePublic, *master);
2451 }
2452 else
2453 {
2454 ss << "none";
2455 }
2456 return ss.str();
2457 }();
2458
2459 // We will always relay trusted validations; if configured, we will
2460 // also relay all untrusted validations.
2461 return registry_.app().config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2462}
2463
2466{
2467 return mConsensus.getJson(true);
2468}
2469
2471NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2472{
2474
2475 // System-level warnings
2476 {
2477 Json::Value warnings{Json::arrayValue};
2478 if (isAmendmentBlocked())
2479 {
2480 Json::Value& w = warnings.append(Json::objectValue);
2481 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2482 w[jss::message] =
2483 "This server is amendment blocked, and must be updated to be "
2484 "able to stay in sync with the network.";
2485 }
2486 if (isUNLBlocked())
2487 {
2488 Json::Value& w = warnings.append(Json::objectValue);
2489 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2490 w[jss::message] =
2491 "This server has an expired validator list. validators.txt "
2492 "may be incorrectly configured or some [validator_list_sites] "
2493 "may be unreachable.";
2494 }
2495 if (admin && isAmendmentWarned())
2496 {
2497 Json::Value& w = warnings.append(Json::objectValue);
2498 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2499 w[jss::message] =
2500 "One or more unsupported amendments have reached majority. "
2501 "Upgrade to the latest version before they are activated "
2502 "to avoid being amendment blocked.";
2503 if (auto const expected = registry_.getAmendmentTable().firstUnsupportedExpected())
2504 {
2505 auto& d = w[jss::details] = Json::objectValue;
2506 d[jss::expected_date] = expected->time_since_epoch().count();
2507 d[jss::expected_date_UTC] = to_string(*expected);
2508 }
2509 }
2510
2511 if (warnings.size())
2512 info[jss::warnings] = std::move(warnings);
2513 }
2514
2515 // hostid: unique string describing the machine
2516 if (human)
2517 info[jss::hostid] = getHostId(admin);
2518
2519 // domain: if configured with a domain, report it:
2521 info[jss::server_domain] = registry_.app().config().SERVER_DOMAIN;
2522
2523 info[jss::build_version] = BuildInfo::getVersionString();
2524
2525 info[jss::server_state] = strOperatingMode(admin);
2526
2527 info[jss::time] =
2528 to_string(std::chrono::floor<std::chrono::microseconds>(std::chrono::system_clock::now()));
2529
2531 info[jss::network_ledger] = "waiting";
2532
2533 info[jss::validation_quorum] = static_cast<Json::UInt>(registry_.validators().quorum());
2534
2535 if (admin)
2536 {
2537 switch (registry_.app().config().NODE_SIZE)
2538 {
2539 case 0:
2540 info[jss::node_size] = "tiny";
2541 break;
2542 case 1:
2543 info[jss::node_size] = "small";
2544 break;
2545 case 2:
2546 info[jss::node_size] = "medium";
2547 break;
2548 case 3:
2549 info[jss::node_size] = "large";
2550 break;
2551 case 4:
2552 info[jss::node_size] = "huge";
2553 break;
2554 }
2555
2556 auto when = registry_.validators().expires();
2557
2558 if (!human)
2559 {
2560 if (when)
2561 info[jss::validator_list_expires] =
2562 safe_cast<Json::UInt>(when->time_since_epoch().count());
2563 else
2564 info[jss::validator_list_expires] = 0;
2565 }
2566 else
2567 {
2568 auto& x = (info[jss::validator_list] = Json::objectValue);
2569
2570 x[jss::count] = static_cast<Json::UInt>(registry_.validators().count());
2571
2572 if (when)
2573 {
2574 if (*when == TimeKeeper::time_point::max())
2575 {
2576 x[jss::expiration] = "never";
2577 x[jss::status] = "active";
2578 }
2579 else
2580 {
2581 x[jss::expiration] = to_string(*when);
2582
2583 if (*when > registry_.timeKeeper().now())
2584 x[jss::status] = "active";
2585 else
2586 x[jss::status] = "expired";
2587 }
2588 }
2589 else
2590 {
2591 x[jss::status] = "unknown";
2592 x[jss::expiration] = "unknown";
2593 }
2594 }
2595
2596#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2597 {
2598 auto& x = (info[jss::git] = Json::objectValue);
2599#ifdef GIT_COMMIT_HASH
2600 x[jss::hash] = GIT_COMMIT_HASH;
2601#endif
2602#ifdef GIT_BRANCH
2603 x[jss::branch] = GIT_BRANCH;
2604#endif
2605 }
2606#endif
2607 }
2608 info[jss::io_latency_ms] = static_cast<Json::UInt>(registry_.app().getIOLatency().count());
2609
2610 if (admin)
2611 {
2612 if (auto const localPubKey = registry_.validators().localPublicKey();
2613 localPubKey && registry_.app().getValidationPublicKey())
2614 {
2615 info[jss::pubkey_validator] = toBase58(TokenType::NodePublic, localPubKey.value());
2616 }
2617 else
2618 {
2619 info[jss::pubkey_validator] = "none";
2620 }
2621 }
2622
2623 if (counters)
2624 {
2625 info[jss::counters] = registry_.getPerfLog().countersJson();
2626
2627 Json::Value nodestore(Json::objectValue);
2629 info[jss::counters][jss::nodestore] = nodestore;
2630 info[jss::current_activities] = registry_.getPerfLog().currentJson();
2631 }
2632
2633 info[jss::pubkey_node] = toBase58(TokenType::NodePublic, registry_.app().nodeIdentity().first);
2634
2635 info[jss::complete_ledgers] = registry_.getLedgerMaster().getCompleteLedgers();
2636
2638 info[jss::amendment_blocked] = true;
2639
2640 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2641
2642 if (fp != 0)
2643 info[jss::fetch_pack] = Json::UInt(fp);
2644
2645 info[jss::peers] = Json::UInt(registry_.overlay().size());
2646
2647 Json::Value lastClose = Json::objectValue;
2648 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2649
2650 if (human)
2651 {
2652 lastClose[jss::converge_time_s] =
2654 }
2655 else
2656 {
2657 lastClose[jss::converge_time] = Json::Int(mConsensus.prevRoundTime().count());
2658 }
2659
2660 info[jss::last_close] = lastClose;
2661
2662 // info[jss::consensus] = mConsensus.getJson();
2663
2664 if (admin)
2665 info[jss::load] = m_job_queue.getJson();
2666
2667 if (auto const netid = registry_.overlay().networkID())
2668 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2669
2670 auto const escalationMetrics = registry_.getTxQ().getMetrics(*registry_.openLedger().current());
2671
2672 auto const loadFactorServer = registry_.getFeeTrack().getLoadFactor();
2673 auto const loadBaseServer = registry_.getFeeTrack().getLoadBase();
2674 /* Scale the escalated fee level to unitless "load factor".
2675 In practice, this just strips the units, but it will continue
2676 to work correctly if either base value ever changes. */
2677 auto const loadFactorFeeEscalation = mulDiv(
2678 escalationMetrics.openLedgerFeeLevel,
2679 loadBaseServer,
2680 escalationMetrics.referenceFeeLevel)
2682
2683 auto const loadFactor =
2684 std::max(safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2685
2686 if (!human)
2687 {
2688 info[jss::load_base] = loadBaseServer;
2689 info[jss::load_factor] = trunc32(loadFactor);
2690 info[jss::load_factor_server] = loadFactorServer;
2691
2692 /* Json::Value doesn't support uint64, so clamp to max
2693 uint32 value. This is mostly theoretical, since there
2694 probably isn't enough extant XRP to drive the factor
2695 that high.
2696 */
2697 info[jss::load_factor_fee_escalation] = escalationMetrics.openLedgerFeeLevel.jsonClipped();
2698 info[jss::load_factor_fee_queue] = escalationMetrics.minProcessingFeeLevel.jsonClipped();
2699 info[jss::load_factor_fee_reference] = escalationMetrics.referenceFeeLevel.jsonClipped();
2700 }
2701 else
2702 {
2703 info[jss::load_factor] = static_cast<double>(loadFactor) / loadBaseServer;
2704
2705 if (loadFactorServer != loadFactor)
2706 info[jss::load_factor_server] = static_cast<double>(loadFactorServer) / loadBaseServer;
2707
2708 if (admin)
2709 {
2711 if (fee != loadBaseServer)
2712 info[jss::load_factor_local] = static_cast<double>(fee) / loadBaseServer;
2714 if (fee != loadBaseServer)
2715 info[jss::load_factor_net] = static_cast<double>(fee) / loadBaseServer;
2717 if (fee != loadBaseServer)
2718 info[jss::load_factor_cluster] = static_cast<double>(fee) / loadBaseServer;
2719 }
2720 if (escalationMetrics.openLedgerFeeLevel != escalationMetrics.referenceFeeLevel &&
2721 (admin || loadFactorFeeEscalation != loadFactor))
2722 info[jss::load_factor_fee_escalation] =
2723 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2724 escalationMetrics.referenceFeeLevel);
2725 if (escalationMetrics.minProcessingFeeLevel != escalationMetrics.referenceFeeLevel)
2726 info[jss::load_factor_fee_queue] =
2727 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2728 escalationMetrics.referenceFeeLevel);
2729 }
2730
2731 bool valid = false;
2732 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2733
2734 if (lpClosed)
2735 valid = true;
2736 else
2737 lpClosed = m_ledgerMaster.getClosedLedger();
2738
2739 if (lpClosed)
2740 {
2741 XRPAmount const baseFee = lpClosed->fees().base;
2743 l[jss::seq] = Json::UInt(lpClosed->header().seq);
2744 l[jss::hash] = to_string(lpClosed->header().hash);
2745
2746 if (!human)
2747 {
2748 l[jss::base_fee] = baseFee.jsonClipped();
2749 l[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped();
2750 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2751 l[jss::close_time] =
2752 Json::Value::UInt(lpClosed->header().closeTime.time_since_epoch().count());
2753 }
2754 else
2755 {
2756 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2757 l[jss::reserve_base_xrp] = lpClosed->fees().reserve.decimalXRP();
2758 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2759
2760 if (auto const closeOffset = registry_.timeKeeper().closeOffset();
2761 std::abs(closeOffset.count()) >= 60)
2762 l[jss::close_time_offset] = static_cast<std::uint32_t>(closeOffset.count());
2763
2764 constexpr std::chrono::seconds highAgeThreshold{1000000};
2766 {
2767 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2768 l[jss::age] = Json::UInt(age < highAgeThreshold ? age.count() : 0);
2769 }
2770 else
2771 {
2772 auto lCloseTime = lpClosed->header().closeTime;
2773 auto closeTime = registry_.timeKeeper().closeTime();
2774 if (lCloseTime <= closeTime)
2775 {
2776 using namespace std::chrono_literals;
2777 auto age = closeTime - lCloseTime;
2778 l[jss::age] = Json::UInt(age < highAgeThreshold ? age.count() : 0);
2779 }
2780 }
2781 }
2782
2783 if (valid)
2784 info[jss::validated_ledger] = l;
2785 else
2786 info[jss::closed_ledger] = l;
2787
2788 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2789 if (!lpPublished)
2790 info[jss::published_ledger] = "none";
2791 else if (lpPublished->header().seq != lpClosed->header().seq)
2792 info[jss::published_ledger] = lpPublished->header().seq;
2793 }
2794
2795 accounting_.json(info);
2796 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2797 info[jss::jq_trans_overflow] = std::to_string(registry_.overlay().getJqTransOverflow());
2798 info[jss::peer_disconnects] = std::to_string(registry_.overlay().getPeerDisconnect());
2799 info[jss::peer_disconnects_resources] =
2801
2802 // This array must be sorted in increasing order.
2803 static constexpr std::array<std::string_view, 7> protocols{
2804 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2805 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2806 {
2808 for (auto const& port : registry_.getServerHandler().setup().ports)
2809 {
2810 // Don't publish admin ports for non-admin users
2811 if (!admin &&
2812 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2813 port.admin_user.empty() && port.admin_password.empty()))
2814 continue;
2817 std::begin(port.protocol),
2818 std::end(port.protocol),
2819 std::begin(protocols),
2820 std::end(protocols),
2821 std::back_inserter(proto));
2822 if (!proto.empty())
2823 {
2824 auto& jv = ports.append(Json::Value(Json::objectValue));
2825 jv[jss::port] = std::to_string(port.port);
2826 jv[jss::protocol] = Json::Value{Json::arrayValue};
2827 for (auto const& p : proto)
2828 jv[jss::protocol].append(p);
2829 }
2830 }
2831
2832 if (registry_.app().config().exists(SECTION_PORT_GRPC))
2833 {
2834 auto const& grpcSection = registry_.app().config().section(SECTION_PORT_GRPC);
2835 auto const optPort = grpcSection.get("port");
2836 if (optPort && grpcSection.get("ip"))
2837 {
2838 auto& jv = ports.append(Json::Value(Json::objectValue));
2839 jv[jss::port] = *optPort;
2840 jv[jss::protocol] = Json::Value{Json::arrayValue};
2841 jv[jss::protocol].append("grpc");
2842 }
2843 }
2844 info[jss::ports] = std::move(ports);
2845 }
2846
2847 return info;
2848}
2849
2850void
2855
2861
2862void
2864 std::shared_ptr<ReadView const> const& ledger,
2865 std::shared_ptr<STTx const> const& transaction,
2866 TER result)
2867{
2868 // never publish an inner txn inside a batch txn. The flag should
2869 // only be set if the Batch feature is enabled. If Batch is not
2870 // enabled, the flag is always invalid, so don't publish it
2871 // regardless.
2872 if (transaction->isFlag(tfInnerBatchTxn))
2873 return;
2874
2875 MultiApiJson jvObj = transJson(transaction, result, false, ledger, std::nullopt);
2876
2877 {
2879
2880 auto it = mStreamMaps[sRTTransactions].begin();
2881 while (it != mStreamMaps[sRTTransactions].end())
2882 {
2883 InfoSub::pointer p = it->second.lock();
2884
2885 if (p)
2886 {
2887 jvObj.visit(
2888 p->getApiVersion(), //
2889 [&](Json::Value const& jv) { p->send(jv, true); });
2890 ++it;
2891 }
2892 else
2893 {
2894 it = mStreamMaps[sRTTransactions].erase(it);
2895 }
2896 }
2897 }
2898
2899 pubProposedAccountTransaction(ledger, transaction, result);
2900}
2901
2902void
2904{
2905 // Ledgers are published only when they acquire sufficient validations
2906 // Holes are filled across connection loss or other catastrophe
2907
2909 registry_.getAcceptedLedgerCache().fetch(lpAccepted->header().hash);
2910 if (!alpAccepted)
2911 {
2912 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted);
2913 registry_.getAcceptedLedgerCache().canonicalize_replace_client(
2914 lpAccepted->header().hash, alpAccepted);
2915 }
2916
2917 XRPL_ASSERT(
2918 alpAccepted->getLedger().get() == lpAccepted.get(),
2919 "xrpl::NetworkOPsImp::pubLedger : accepted input");
2920
2921 {
2922 JLOG(m_journal.debug()) << "Publishing ledger " << lpAccepted->header().seq << " "
2923 << lpAccepted->header().hash;
2924
2926
2927 if (!mStreamMaps[sLedger].empty())
2928 {
2930
2931 jvObj[jss::type] = "ledgerClosed";
2932 jvObj[jss::ledger_index] = lpAccepted->header().seq;
2933 jvObj[jss::ledger_hash] = to_string(lpAccepted->header().hash);
2934 jvObj[jss::ledger_time] =
2935 Json::Value::UInt(lpAccepted->header().closeTime.time_since_epoch().count());
2936
2937 jvObj[jss::network_id] = registry_.getNetworkIDService().getNetworkID();
2938
2939 if (!lpAccepted->rules().enabled(featureXRPFees))
2940 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
2941 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2942 jvObj[jss::reserve_base] = lpAccepted->fees().reserve.jsonClipped();
2943 jvObj[jss::reserve_inc] = lpAccepted->fees().increment.jsonClipped();
2944
2945 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
2946
2948 {
2949 jvObj[jss::validated_ledgers] = registry_.getLedgerMaster().getCompleteLedgers();
2950 }
2951
2952 auto it = mStreamMaps[sLedger].begin();
2953 while (it != mStreamMaps[sLedger].end())
2954 {
2955 InfoSub::pointer p = it->second.lock();
2956 if (p)
2957 {
2958 p->send(jvObj, true);
2959 ++it;
2960 }
2961 else
2962 it = mStreamMaps[sLedger].erase(it);
2963 }
2964 }
2965
2966 if (!mStreamMaps[sBookChanges].empty())
2967 {
2968 Json::Value jvObj = xrpl::RPC::computeBookChanges(lpAccepted);
2969
2970 auto it = mStreamMaps[sBookChanges].begin();
2971 while (it != mStreamMaps[sBookChanges].end())
2972 {
2973 InfoSub::pointer p = it->second.lock();
2974 if (p)
2975 {
2976 p->send(jvObj, true);
2977 ++it;
2978 }
2979 else
2980 it = mStreamMaps[sBookChanges].erase(it);
2981 }
2982 }
2983
2984 {
2985 static bool firstTime = true;
2986 if (firstTime)
2987 {
2988 // First validated ledger, start delayed SubAccountHistory
2989 firstTime = false;
2990 for (auto& outer : mSubAccountHistory)
2991 {
2992 for (auto& inner : outer.second)
2993 {
2994 auto& subInfo = inner.second;
2995 if (subInfo.index_->separationLedgerSeq_ == 0)
2996 {
2997 subAccountHistoryStart(alpAccepted->getLedger(), subInfo);
2998 }
2999 }
3000 }
3001 }
3002 }
3003 }
3004
3005 // Don't lock since pubAcceptedTransaction is locking.
3006 for (auto const& accTx : *alpAccepted)
3007 {
3008 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
3009 pubValidatedTransaction(lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3010 }
3011}
3012
3013void
3015{
3017 registry_.openLedger().current()->fees().base,
3020
3021 // only schedule the job if something has changed
3022 if (f != mLastFeeSummary)
3023 {
3024 m_job_queue.addJob(jtCLIENT_FEE_CHANGE, "PubFee", [this]() { pubServer(); });
3025 }
3026}
3027
3028void
3030{
3031 m_job_queue.addJob(jtCLIENT_CONSENSUS, "PubCons", [this, phase]() { pubConsensus(phase); });
3032}
3033
3034inline void
3036{
3037 m_localTX->sweep(view);
3038}
3039inline std::size_t
3041{
3042 return m_localTX->size();
3043}
3044
3045// This routine should only be used to publish accepted or validated
3046// transactions.
3049 std::shared_ptr<STTx const> const& transaction,
3050 TER result,
3051 bool validated,
3052 std::shared_ptr<ReadView const> const& ledger,
3054{
3056 std::string sToken;
3057 std::string sHuman;
3058
3059 transResultInfo(result, sToken, sHuman);
3060
3061 jvObj[jss::type] = "transaction";
3062 // NOTE jvObj is not a finished object for either API version. After
3063 // it's populated, we need to finish it for a specific API version. This is
3064 // done in a loop, near the end of this function.
3065 jvObj[jss::transaction] = transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3066
3067 if (meta)
3068 {
3069 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3070 RPC::insertDeliveredAmount(jvObj[jss::meta], *ledger, transaction, meta->get());
3071 RPC::insertNFTSyntheticInJson(jvObj, transaction, meta->get());
3072 RPC::insertMPTokenIssuanceID(jvObj[jss::meta], transaction, meta->get());
3073 }
3074
3075 // add CTID where the needed data for it exists
3076 if (auto const& lookup = ledger->txRead(transaction->getTransactionID());
3077 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3078 {
3079 uint32_t const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3080 uint32_t netID = registry_.getNetworkIDService().getNetworkID();
3081 if (transaction->isFieldPresent(sfNetworkID))
3082 netID = transaction->getFieldU32(sfNetworkID);
3083
3084 if (std::optional<std::string> ctid = RPC::encodeCTID(ledger->header().seq, txnSeq, netID);
3085 ctid)
3086 jvObj[jss::ctid] = *ctid;
3087 }
3088 if (!ledger->open())
3089 jvObj[jss::ledger_hash] = to_string(ledger->header().hash);
3090
3091 if (validated)
3092 {
3093 jvObj[jss::ledger_index] = ledger->header().seq;
3094 jvObj[jss::transaction][jss::date] = ledger->header().closeTime.time_since_epoch().count();
3095 jvObj[jss::validated] = true;
3096 jvObj[jss::close_time_iso] = to_string_iso(ledger->header().closeTime);
3097
3098 // WRITEME: Put the account next seq here
3099 }
3100 else
3101 {
3102 jvObj[jss::validated] = false;
3103 jvObj[jss::ledger_current_index] = ledger->header().seq;
3104 }
3105
3106 jvObj[jss::status] = validated ? "closed" : "proposed";
3107 jvObj[jss::engine_result] = sToken;
3108 jvObj[jss::engine_result_code] = result;
3109 jvObj[jss::engine_result_message] = sHuman;
3110
3111 if (transaction->getTxnType() == ttOFFER_CREATE)
3112 {
3113 auto const account = transaction->getAccountID(sfAccount);
3114 auto const amount = transaction->getFieldAmount(sfTakerGets);
3115
3116 // If the offer create is not self funded then add the owner balance
3117 if (account != amount.issue().account)
3118 {
3119 auto const ownerFunds =
3120 accountFunds(*ledger, account, amount, fhIGNORE_FREEZE, registry_.journal("View"));
3121 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3122 }
3123 }
3124
3125 std::string const hash = to_string(transaction->getTransactionID());
3126 MultiApiJson multiObj{jvObj};
3128 multiObj.visit(), //
3129 [&]<unsigned Version>(Json::Value& jvTx, std::integral_constant<unsigned, Version>) {
3130 RPC::insertDeliverMax(jvTx[jss::transaction], transaction->getTxnType(), Version);
3131
3132 if constexpr (Version > 1)
3133 {
3134 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3135 jvTx[jss::hash] = hash;
3136 }
3137 else
3138 {
3139 jvTx[jss::transaction][jss::hash] = hash;
3140 }
3141 });
3142
3143 return multiObj;
3144}
3145
3146void
3148 std::shared_ptr<ReadView const> const& ledger,
3149 AcceptedLedgerTx const& transaction,
3150 bool last)
3151{
3152 auto const& stTxn = transaction.getTxn();
3153
3154 // Create two different Json objects, for different API versions
3155 auto const metaRef = std::ref(transaction.getMeta());
3156 auto const trResult = transaction.getResult();
3157 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3158
3159 {
3161
3162 auto it = mStreamMaps[sTransactions].begin();
3163 while (it != mStreamMaps[sTransactions].end())
3164 {
3165 InfoSub::pointer p = it->second.lock();
3166
3167 if (p)
3168 {
3169 jvObj.visit(
3170 p->getApiVersion(), //
3171 [&](Json::Value const& jv) { p->send(jv, true); });
3172 ++it;
3173 }
3174 else
3175 it = mStreamMaps[sTransactions].erase(it);
3176 }
3177
3178 it = mStreamMaps[sRTTransactions].begin();
3179
3180 while (it != mStreamMaps[sRTTransactions].end())
3181 {
3182 InfoSub::pointer p = it->second.lock();
3183
3184 if (p)
3185 {
3186 jvObj.visit(
3187 p->getApiVersion(), //
3188 [&](Json::Value const& jv) { p->send(jv, true); });
3189 ++it;
3190 }
3191 else
3192 it = mStreamMaps[sRTTransactions].erase(it);
3193 }
3194 }
3195
3196 if (transaction.getResult() == tesSUCCESS)
3197 registry_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3198
3199 pubAccountTransaction(ledger, transaction, last);
3200}
3201
3202void
3204 std::shared_ptr<ReadView const> const& ledger,
3205 AcceptedLedgerTx const& transaction,
3206 bool last)
3207{
3209 int iProposed = 0;
3210 int iAccepted = 0;
3211
3212 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3213 auto const currLedgerSeq = ledger->seq();
3214 {
3216
3218 {
3219 for (auto const& affectedAccount : transaction.getAffected())
3220 {
3221 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3222 simiIt != mSubRTAccount.end())
3223 {
3224 auto it = simiIt->second.begin();
3225
3226 while (it != simiIt->second.end())
3227 {
3228 InfoSub::pointer p = it->second.lock();
3229
3230 if (p)
3231 {
3232 notify.insert(p);
3233 ++it;
3234 ++iProposed;
3235 }
3236 else
3237 it = simiIt->second.erase(it);
3238 }
3239 }
3240
3241 if (auto simiIt = mSubAccount.find(affectedAccount); simiIt != mSubAccount.end())
3242 {
3243 auto it = simiIt->second.begin();
3244 while (it != simiIt->second.end())
3245 {
3246 InfoSub::pointer p = it->second.lock();
3247
3248 if (p)
3249 {
3250 notify.insert(p);
3251 ++it;
3252 ++iAccepted;
3253 }
3254 else
3255 it = simiIt->second.erase(it);
3256 }
3257 }
3258
3259 if (auto historyIt = mSubAccountHistory.find(affectedAccount);
3260 historyIt != mSubAccountHistory.end())
3261 {
3262 auto& subs = historyIt->second;
3263 auto it = subs.begin();
3264 while (it != subs.end())
3265 {
3266 SubAccountHistoryInfoWeak const& info = it->second;
3267 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3268 {
3269 ++it;
3270 continue;
3271 }
3272
3273 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3274 {
3275 accountHistoryNotify.emplace_back(
3276 SubAccountHistoryInfo{isSptr, info.index_});
3277 ++it;
3278 }
3279 else
3280 {
3281 it = subs.erase(it);
3282 }
3283 }
3284 if (subs.empty())
3285 mSubAccountHistory.erase(historyIt);
3286 }
3287 }
3288 }
3289 }
3290
3291 JLOG(m_journal.trace()) << "pubAccountTransaction: "
3292 << "proposed=" << iProposed << ", accepted=" << iAccepted;
3293
3294 if (!notify.empty() || !accountHistoryNotify.empty())
3295 {
3296 auto const& stTxn = transaction.getTxn();
3297
3298 // Create two different Json objects, for different API versions
3299 auto const metaRef = std::ref(transaction.getMeta());
3300 auto const trResult = transaction.getResult();
3301 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3302
3303 for (InfoSub::ref isrListener : notify)
3304 {
3305 jvObj.visit(
3306 isrListener->getApiVersion(), //
3307 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3308 }
3309
3310 if (last)
3311 jvObj.set(jss::account_history_boundary, true);
3312
3313 XRPL_ASSERT(
3314 jvObj.isMember(jss::account_history_tx_stream) == MultiApiJson::none,
3315 "xrpl::NetworkOPsImp::pubAccountTransaction : "
3316 "account_history_tx_stream not set");
3317 for (auto& info : accountHistoryNotify)
3318 {
3319 auto& index = info.index_;
3320 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3321 jvObj.set(jss::account_history_tx_first, true);
3322
3323 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3324
3325 jvObj.visit(
3326 info.sink_->getApiVersion(), //
3327 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3328 }
3329 }
3330}
3331
3332void
3334 std::shared_ptr<ReadView const> const& ledger,
3336 TER result)
3337{
3339 int iProposed = 0;
3340
3341 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3342
3343 {
3345
3346 if (mSubRTAccount.empty())
3347 return;
3348
3350 {
3351 for (auto const& affectedAccount : tx->getMentionedAccounts())
3352 {
3353 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3354 simiIt != mSubRTAccount.end())
3355 {
3356 auto it = simiIt->second.begin();
3357
3358 while (it != simiIt->second.end())
3359 {
3360 InfoSub::pointer p = it->second.lock();
3361
3362 if (p)
3363 {
3364 notify.insert(p);
3365 ++it;
3366 ++iProposed;
3367 }
3368 else
3369 it = simiIt->second.erase(it);
3370 }
3371 }
3372 }
3373 }
3374 }
3375
3376 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3377
3378 if (!notify.empty() || !accountHistoryNotify.empty())
3379 {
3380 // Create two different Json objects, for different API versions
3381 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3382
3383 for (InfoSub::ref isrListener : notify)
3384 jvObj.visit(
3385 isrListener->getApiVersion(), //
3386 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3387
3388 XRPL_ASSERT(
3389 jvObj.isMember(jss::account_history_tx_stream) == MultiApiJson::none,
3390 "xrpl::NetworkOPs::pubProposedAccountTransaction : "
3391 "account_history_tx_stream not set");
3392 for (auto& info : accountHistoryNotify)
3393 {
3394 auto& index = info.index_;
3395 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3396 jvObj.set(jss::account_history_tx_first, true);
3397 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3398 jvObj.visit(
3399 info.sink_->getApiVersion(), //
3400 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3401 }
3402 }
3403}
3404
3405//
3406// Monitoring
3407//
3408
3409void
3411 InfoSub::ref isrListener,
3412 hash_set<AccountID> const& vnaAccountIDs,
3413 bool rt)
3414{
3415 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3416
3417 for (auto const& naAccountID : vnaAccountIDs)
3418 {
3419 JLOG(m_journal.trace()) << "subAccount: account: " << toBase58(naAccountID);
3420
3421 isrListener->insertSubAccountInfo(naAccountID, rt);
3422 }
3423
3425
3426 for (auto const& naAccountID : vnaAccountIDs)
3427 {
3428 auto simIterator = subMap.find(naAccountID);
3429 if (simIterator == subMap.end())
3430 {
3431 // Not found, note that account has a new single listener.
3432 SubMapType usisElement;
3433 usisElement[isrListener->getSeq()] = isrListener;
3434 // VFALCO NOTE This is making a needless copy of naAccountID
3435 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3436 }
3437 else
3438 {
3439 // Found, note that the account has another listener.
3440 simIterator->second[isrListener->getSeq()] = isrListener;
3441 }
3442 }
3443}
3444
3445void
3447 InfoSub::ref isrListener,
3448 hash_set<AccountID> const& vnaAccountIDs,
3449 bool rt)
3450{
3451 for (auto const& naAccountID : vnaAccountIDs)
3452 {
3453 // Remove from the InfoSub
3454 isrListener->deleteSubAccountInfo(naAccountID, rt);
3455 }
3456
3457 // Remove from the server
3458 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3459}
3460
3461void
3463 std::uint64_t uSeq,
3464 hash_set<AccountID> const& vnaAccountIDs,
3465 bool rt)
3466{
3468
3469 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3470
3471 for (auto const& naAccountID : vnaAccountIDs)
3472 {
3473 auto simIterator = subMap.find(naAccountID);
3474
3475 if (simIterator != subMap.end())
3476 {
3477 // Found
3478 simIterator->second.erase(uSeq);
3479
3480 if (simIterator->second.empty())
3481 {
3482 // Don't need hash entry.
3483 subMap.erase(simIterator);
3484 }
3485 }
3486 }
3487}
3488
3489void
3491{
3492 enum DatabaseType { Sqlite, None };
3493 static auto const databaseType = [&]() -> DatabaseType {
3494 // Use a dynamic_cast to return DatabaseType::None
3495 // on failure.
3496 if (dynamic_cast<SQLiteDatabase*>(&registry_.getRelationalDatabase()))
3497 {
3498 return DatabaseType::Sqlite;
3499 }
3500 return DatabaseType::None;
3501 }();
3502
3503 if (databaseType == DatabaseType::None)
3504 {
3505 // LCOV_EXCL_START
3506 UNREACHABLE("xrpl::NetworkOPsImp::addAccountHistoryJob : no database");
3507 JLOG(m_journal.error()) << "AccountHistory job for account "
3508 << toBase58(subInfo.index_->accountId_) << " no database";
3509 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3510 {
3511 sptr->send(rpcError(rpcINTERNAL), true);
3512 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3513 }
3514 return;
3515 // LCOV_EXCL_STOP
3516 }
3517
3519 jtCLIENT_ACCT_HIST, "HistTxStream", [this, dbType = databaseType, subInfo]() {
3520 auto const& accountId = subInfo.index_->accountId_;
3521 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3522 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3523
3524 JLOG(m_journal.trace()) << "AccountHistory job for account " << toBase58(accountId)
3525 << " started. lastLedgerSeq=" << lastLedgerSeq;
3526
3527 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3528 std::shared_ptr<TxMeta> const& meta) -> bool {
3529 /*
3530 * genesis account: first tx is the one with seq 1
3531 * other account: first tx is the one created the account
3532 */
3533 if (accountId == genesisAccountId)
3534 {
3535 auto stx = tx->getSTransaction();
3536 if (stx->getAccountID(sfAccount) == accountId && stx->getSeqValue() == 1)
3537 return true;
3538 }
3539
3540 for (auto& node : meta->getNodes())
3541 {
3542 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3543 continue;
3544
3545 if (node.isFieldPresent(sfNewFields))
3546 {
3547 if (auto inner =
3548 dynamic_cast<STObject const*>(node.peekAtPField(sfNewFields));
3549 inner)
3550 {
3551 if (inner->isFieldPresent(sfAccount) &&
3552 inner->getAccountID(sfAccount) == accountId)
3553 {
3554 return true;
3555 }
3556 }
3557 }
3558 }
3559
3560 return false;
3561 };
3562
3563 auto send = [&](Json::Value const& jvObj, bool unsubscribe) -> bool {
3564 if (auto sptr = subInfo.sinkWptr_.lock())
3565 {
3566 sptr->send(jvObj, true);
3567 if (unsubscribe)
3568 unsubAccountHistory(sptr, accountId, false);
3569 return true;
3570 }
3571
3572 return false;
3573 };
3574
3575 auto sendMultiApiJson = [&](MultiApiJson const& jvObj, bool unsubscribe) -> bool {
3576 if (auto sptr = subInfo.sinkWptr_.lock())
3577 {
3578 jvObj.visit(
3579 sptr->getApiVersion(), //
3580 [&](Json::Value const& jv) { sptr->send(jv, true); });
3581
3582 if (unsubscribe)
3583 unsubAccountHistory(sptr, accountId, false);
3584 return true;
3585 }
3586
3587 return false;
3588 };
3589
3590 auto getMoreTxns = [&](std::uint32_t minLedger,
3591 std::uint32_t maxLedger,
3596 switch (dbType)
3597 {
3598 case Sqlite: {
3599 auto db = static_cast<SQLiteDatabase*>(&registry_.getRelationalDatabase());
3601 accountId, minLedger, maxLedger, marker, 0, true};
3602 return db->newestAccountTxPage(options);
3603 }
3604 // LCOV_EXCL_START
3605 default: {
3606 UNREACHABLE(
3607 "xrpl::NetworkOPsImp::addAccountHistoryJob : "
3608 "getMoreTxns : invalid database type");
3609 return {};
3610 }
3611 // LCOV_EXCL_STOP
3612 }
3613 };
3614
3615 /*
3616 * search backward until the genesis ledger or asked to stop
3617 */
3618 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3619 {
3620 int feeChargeCount = 0;
3621 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3622 {
3623 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3624 ++feeChargeCount;
3625 }
3626 else
3627 {
3628 JLOG(m_journal.trace())
3629 << "AccountHistory job for account " << toBase58(accountId)
3630 << " no InfoSub. Fee charged " << feeChargeCount << " times.";
3631 return;
3632 }
3633
3634 // try to search in 1024 ledgers till reaching genesis ledgers
3635 auto startLedgerSeq = (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3636 JLOG(m_journal.trace()) << "AccountHistory job for account " << toBase58(accountId)
3637 << ", working on ledger range [" << startLedgerSeq << ","
3638 << lastLedgerSeq << "]";
3639
3640 auto haveRange = [&]() -> bool {
3641 std::uint32_t validatedMin = UINT_MAX;
3642 std::uint32_t validatedMax = 0;
3643 auto haveSomeValidatedLedgers =
3644 registry_.getLedgerMaster().getValidatedRange(validatedMin, validatedMax);
3645
3646 return haveSomeValidatedLedgers && validatedMin <= startLedgerSeq &&
3647 lastLedgerSeq <= validatedMax;
3648 }();
3649
3650 if (!haveRange)
3651 {
3652 JLOG(m_journal.debug()) << "AccountHistory reschedule job for account "
3653 << toBase58(accountId) << ", incomplete ledger range ["
3654 << startLedgerSeq << "," << lastLedgerSeq << "]";
3656 return;
3657 }
3658
3660 while (!subInfo.index_->stopHistorical_)
3661 {
3662 auto dbResult = getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3663 if (!dbResult)
3664 {
3665 // LCOV_EXCL_START
3666 UNREACHABLE(
3667 "xrpl::NetworkOPsImp::addAccountHistoryJob : "
3668 "getMoreTxns failed");
3669 JLOG(m_journal.debug()) << "AccountHistory job for account "
3670 << toBase58(accountId) << " getMoreTxns failed.";
3671 send(rpcError(rpcINTERNAL), true);
3672 return;
3673 // LCOV_EXCL_STOP
3674 }
3675
3676 auto const& txns = dbResult->first;
3677 marker = dbResult->second;
3678 size_t num_txns = txns.size();
3679 for (size_t i = 0; i < num_txns; ++i)
3680 {
3681 auto const& [tx, meta] = txns[i];
3682
3683 if (!tx || !meta)
3684 {
3685 JLOG(m_journal.debug()) << "AccountHistory job for account "
3686 << toBase58(accountId) << " empty tx or meta.";
3687 send(rpcError(rpcINTERNAL), true);
3688 return;
3689 }
3690 auto curTxLedger =
3691 registry_.getLedgerMaster().getLedgerBySeq(tx->getLedger());
3692 if (!curTxLedger)
3693 {
3694 // LCOV_EXCL_START
3695 UNREACHABLE(
3696 "xrpl::NetworkOPsImp::addAccountHistoryJob : "
3697 "getLedgerBySeq failed");
3698 JLOG(m_journal.debug()) << "AccountHistory job for account "
3699 << toBase58(accountId) << " no ledger.";
3700 send(rpcError(rpcINTERNAL), true);
3701 return;
3702 // LCOV_EXCL_STOP
3703 }
3704 std::shared_ptr<STTx const> stTxn = tx->getSTransaction();
3705 if (!stTxn)
3706 {
3707 // LCOV_EXCL_START
3708 UNREACHABLE(
3709 "NetworkOPsImp::addAccountHistoryJob : "
3710 "getSTransaction failed");
3711 JLOG(m_journal.debug())
3712 << "AccountHistory job for account " << toBase58(accountId)
3713 << " getSTransaction failed.";
3714 send(rpcError(rpcINTERNAL), true);
3715 return;
3716 // LCOV_EXCL_STOP
3717 }
3718
3719 auto const mRef = std::ref(*meta);
3720 auto const trR = meta->getResultTER();
3721 MultiApiJson jvTx = transJson(stTxn, trR, true, curTxLedger, mRef);
3722
3723 jvTx.set(jss::account_history_tx_index, txHistoryIndex--);
3724 if (i + 1 == num_txns || txns[i + 1].first->getLedger() != tx->getLedger())
3725 jvTx.set(jss::account_history_boundary, true);
3726
3727 if (isFirstTx(tx, meta))
3728 {
3729 jvTx.set(jss::account_history_tx_first, true);
3730 sendMultiApiJson(jvTx, false);
3731
3732 JLOG(m_journal.trace())
3733 << "AccountHistory job for account " << toBase58(accountId)
3734 << " done, found last tx.";
3735 return;
3736 }
3737 else
3738 {
3739 sendMultiApiJson(jvTx, false);
3740 }
3741 }
3742
3743 if (marker)
3744 {
3745 JLOG(m_journal.trace())
3746 << "AccountHistory job for account " << toBase58(accountId)
3747 << " paging, marker=" << marker->ledgerSeq << ":" << marker->txnSeq;
3748 }
3749 else
3750 {
3751 break;
3752 }
3753 }
3754
3755 if (!subInfo.index_->stopHistorical_)
3756 {
3757 lastLedgerSeq = startLedgerSeq - 1;
3758 if (lastLedgerSeq <= 1)
3759 {
3760 JLOG(m_journal.trace())
3761 << "AccountHistory job for account " << toBase58(accountId)
3762 << " done, reached genesis ledger.";
3763 return;
3764 }
3765 }
3766 }
3767 });
3768}
3769
3770void
3772 std::shared_ptr<ReadView const> const& ledger,
3774{
3775 subInfo.index_->separationLedgerSeq_ = ledger->seq();
3776 auto const& accountId = subInfo.index_->accountId_;
3777 auto const accountKeylet = keylet::account(accountId);
3778 if (!ledger->exists(accountKeylet))
3779 {
3780 JLOG(m_journal.debug()) << "subAccountHistoryStart, no account " << toBase58(accountId)
3781 << ", no need to add AccountHistory job.";
3782 return;
3783 }
3784 if (accountId == genesisAccountId)
3785 {
3786 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3787 {
3788 if (sleAcct->getFieldU32(sfSequence) == 1)
3789 {
3790 JLOG(m_journal.debug())
3791 << "subAccountHistoryStart, genesis account " << toBase58(accountId)
3792 << " does not have tx, no need to add AccountHistory job.";
3793 return;
3794 }
3795 }
3796 else
3797 {
3798 // LCOV_EXCL_START
3799 UNREACHABLE(
3800 "xrpl::NetworkOPsImp::subAccountHistoryStart : failed to "
3801 "access genesis account");
3802 return;
3803 // LCOV_EXCL_STOP
3804 }
3805 }
3806 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
3807 subInfo.index_->haveHistorical_ = true;
3808
3809 JLOG(m_journal.debug()) << "subAccountHistoryStart, add AccountHistory job: accountId="
3810 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
3811
3812 addAccountHistoryJob(subInfo);
3813}
3814
3817{
3818 if (!isrListener->insertSubAccountHistory(accountId))
3819 {
3820 JLOG(m_journal.debug()) << "subAccountHistory, already subscribed to account "
3821 << toBase58(accountId);
3822 return rpcINVALID_PARAMS;
3823 }
3824
3827 auto simIterator = mSubAccountHistory.find(accountId);
3828 if (simIterator == mSubAccountHistory.end())
3829 {
3831 inner.emplace(isrListener->getSeq(), ahi);
3832 mSubAccountHistory.insert(simIterator, std::make_pair(accountId, inner));
3833 }
3834 else
3835 {
3836 simIterator->second.emplace(isrListener->getSeq(), ahi);
3837 }
3838
3839 auto const ledger = registry_.getLedgerMaster().getValidatedLedger();
3840 if (ledger)
3841 {
3842 subAccountHistoryStart(ledger, ahi);
3843 }
3844 else
3845 {
3846 // The node does not have validated ledgers, so wait for
3847 // one before start streaming.
3848 // In this case, the subscription is also considered successful.
3849 JLOG(m_journal.debug()) << "subAccountHistory, no validated ledger yet, delay start";
3850 }
3851
3852 return rpcSUCCESS;
3853}
3854
3855void
3857 InfoSub::ref isrListener,
3858 AccountID const& account,
3859 bool historyOnly)
3860{
3861 if (!historyOnly)
3862 isrListener->deleteSubAccountHistory(account);
3863 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
3864}
3865
3866void
3868 std::uint64_t seq,
3869 AccountID const& account,
3870 bool historyOnly)
3871{
3873 auto simIterator = mSubAccountHistory.find(account);
3874 if (simIterator != mSubAccountHistory.end())
3875 {
3876 auto& subInfoMap = simIterator->second;
3877 auto subInfoIter = subInfoMap.find(seq);
3878 if (subInfoIter != subInfoMap.end())
3879 {
3880 subInfoIter->second.index_->stopHistorical_ = true;
3881 }
3882
3883 if (!historyOnly)
3884 {
3885 simIterator->second.erase(seq);
3886 if (simIterator->second.empty())
3887 {
3888 mSubAccountHistory.erase(simIterator);
3889 }
3890 }
3891 JLOG(m_journal.debug()) << "unsubAccountHistory, account " << toBase58(account)
3892 << ", historyOnly = " << (historyOnly ? "true" : "false");
3893 }
3894}
3895
3896bool
3898{
3899 if (auto listeners = registry_.getOrderBookDB().makeBookListeners(book))
3900 listeners->addSubscriber(isrListener);
3901 else
3902 {
3903 // LCOV_EXCL_START
3904 UNREACHABLE("xrpl::NetworkOPsImp::subBook : null book listeners");
3905 // LCOV_EXCL_STOP
3906 }
3907 return true;
3908}
3909
3910bool
3912{
3913 if (auto listeners = registry_.getOrderBookDB().getBookListeners(book))
3914 listeners->removeSubscriber(uSeq);
3915
3916 return true;
3917}
3918
3921{
3922 // This code-path is exclusively used when the server is in standalone
3923 // mode via `ledger_accept`
3924 XRPL_ASSERT(m_standalone, "xrpl::NetworkOPsImp::acceptLedger : is standalone");
3925
3926 if (!m_standalone)
3927 Throw<std::runtime_error>("Operation only possible in STANDALONE mode.");
3928
3929 // FIXME Could we improve on this and remove the need for a specialized
3930 // API in Consensus?
3931 beginConsensus(m_ledgerMaster.getClosedLedger()->header().hash, {});
3932 mConsensus.simulate(registry_.timeKeeper().closeTime(), consensusDelay);
3933 return m_ledgerMaster.getCurrentLedger()->header().seq;
3934}
3935
3936// <-- bool: true=added, false=already there
3937bool
3939{
3940 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
3941 {
3942 jvResult[jss::ledger_index] = lpClosed->header().seq;
3943 jvResult[jss::ledger_hash] = to_string(lpClosed->header().hash);
3944 jvResult[jss::ledger_time] =
3945 Json::Value::UInt(lpClosed->header().closeTime.time_since_epoch().count());
3946 if (!lpClosed->rules().enabled(featureXRPFees))
3947 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3948 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3949 jvResult[jss::reserve_base] = lpClosed->fees().reserve.jsonClipped();
3950 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3951 jvResult[jss::network_id] = registry_.getNetworkIDService().getNetworkID();
3952 }
3953
3955 {
3956 jvResult[jss::validated_ledgers] = registry_.getLedgerMaster().getCompleteLedgers();
3957 }
3958
3960 return mStreamMaps[sLedger].emplace(isrListener->getSeq(), isrListener).second;
3961}
3962
3963// <-- bool: true=added, false=already there
3964bool
3966{
3968 return mStreamMaps[sBookChanges].emplace(isrListener->getSeq(), isrListener).second;
3969}
3970
3971// <-- bool: true=erased, false=was not there
3972bool
3974{
3976 return mStreamMaps[sLedger].erase(uSeq);
3977}
3978
3979// <-- bool: true=erased, false=was not there
3980bool
3986
3987// <-- bool: true=added, false=already there
3988bool
3990{
3992 return mStreamMaps[sManifests].emplace(isrListener->getSeq(), isrListener).second;
3993}
3994
3995// <-- bool: true=erased, false=was not there
3996bool
4002
4003// <-- bool: true=added, false=already there
4004bool
4005NetworkOPsImp::subServer(InfoSub::ref isrListener, Json::Value& jvResult, bool admin)
4006{
4007 uint256 uRandom;
4008
4009 if (m_standalone)
4010 jvResult[jss::stand_alone] = m_standalone;
4011
4012 // CHECKME: is it necessary to provide a random number here?
4013 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
4014
4015 auto const& feeTrack = registry_.getFeeTrack();
4016 jvResult[jss::random] = to_string(uRandom);
4017 jvResult[jss::server_status] = strOperatingMode(admin);
4018 jvResult[jss::load_base] = feeTrack.getLoadBase();
4019 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4020 jvResult[jss::hostid] = getHostId(admin);
4021 jvResult[jss::pubkey_node] =
4023
4025 return mStreamMaps[sServer].emplace(isrListener->getSeq(), isrListener).second;
4026}
4027
4028// <-- bool: true=erased, false=was not there
4029bool
4031{
4033 return mStreamMaps[sServer].erase(uSeq);
4034}
4035
4036// <-- bool: true=added, false=already there
4037bool
4039{
4041 return mStreamMaps[sTransactions].emplace(isrListener->getSeq(), isrListener).second;
4042}
4043
4044// <-- bool: true=erased, false=was not there
4045bool
4051
4052// <-- bool: true=added, false=already there
4053bool
4055{
4057 return mStreamMaps[sRTTransactions].emplace(isrListener->getSeq(), isrListener).second;
4058}
4059
4060// <-- bool: true=erased, false=was not there
4061bool
4067
4068// <-- bool: true=added, false=already there
4069bool
4071{
4073 return mStreamMaps[sValidations].emplace(isrListener->getSeq(), isrListener).second;
4074}
4075
4076void
4081
4082// <-- bool: true=erased, false=was not there
4083bool
4089
4090// <-- bool: true=added, false=already there
4091bool
4093{
4095 return mStreamMaps[sPeerStatus].emplace(isrListener->getSeq(), isrListener).second;
4096}
4097
4098// <-- bool: true=erased, false=was not there
4099bool
4105
4106// <-- bool: true=added, false=already there
4107bool
4109{
4111 return mStreamMaps[sConsensusPhase].emplace(isrListener->getSeq(), isrListener).second;
4112}
4113
4114// <-- bool: true=erased, false=was not there
4115bool
4121
4124{
4126
4127 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4128
4129 if (it != mRpcSubMap.end())
4130 return it->second;
4131
4132 return InfoSub::pointer();
4133}
4134
4137{
4139
4140 mRpcSubMap.emplace(strUrl, rspEntry);
4141
4142 return rspEntry;
4143}
4144
4145bool
4147{
4149 auto pInfo = findRpcSub(strUrl);
4150
4151 if (!pInfo)
4152 return false;
4153
4154 // check to see if any of the stream maps still hold a weak reference to
4155 // this entry before removing
4156 for (SubMapType const& map : mStreamMaps)
4157 {
4158 if (map.find(pInfo->getSeq()) != map.end())
4159 return false;
4160 }
4161 mRpcSubMap.erase(strUrl);
4162 return true;
4163}
4164
4165#ifndef USE_NEW_BOOK_PAGE
4166
4167// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4168// work, but it demonstrated poor performance.
4169//
4170void
4173 Book const& book,
4174 AccountID const& uTakerID,
4175 bool const bProof,
4176 unsigned int iLimit,
4177 Json::Value const& jvMarker,
4178 Json::Value& jvResult)
4179{ // CAUTION: This is the old get book page logic
4180 Json::Value& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4181
4183 uint256 const uBookBase = getBookBase(book);
4184 uint256 const uBookEnd = getQualityNext(uBookBase);
4185 uint256 uTipIndex = uBookBase;
4186
4187 if (auto stream = m_journal.trace())
4188 {
4189 stream << "getBookPage:" << book;
4190 stream << "getBookPage: uBookBase=" << uBookBase;
4191 stream << "getBookPage: uBookEnd=" << uBookEnd;
4192 stream << "getBookPage: uTipIndex=" << uTipIndex;
4193 }
4194
4195 ReadView const& view = *lpLedger;
4196
4197 bool const bGlobalFreeze =
4198 isGlobalFrozen(view, book.out.account) || isGlobalFrozen(view, book.in.account);
4199
4200 bool bDone = false;
4201 bool bDirectAdvance = true;
4202
4203 std::shared_ptr<SLE const> sleOfferDir;
4204 uint256 offerIndex;
4205 unsigned int uBookEntry;
4206 STAmount saDirRate;
4207
4208 auto const rate = transferRate(view, book.out.account);
4209 auto viewJ = registry_.journal("View");
4210
4211 while (!bDone && iLimit-- > 0)
4212 {
4213 if (bDirectAdvance)
4214 {
4215 bDirectAdvance = false;
4216
4217 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4218
4219 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4220 if (ledgerIndex)
4221 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4222 else
4223 sleOfferDir.reset();
4224
4225 if (!sleOfferDir)
4226 {
4227 JLOG(m_journal.trace()) << "getBookPage: bDone";
4228 bDone = true;
4229 }
4230 else
4231 {
4232 uTipIndex = sleOfferDir->key();
4233 saDirRate = amountFromQuality(getQuality(uTipIndex));
4234
4235 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4236
4237 JLOG(m_journal.trace()) << "getBookPage: uTipIndex=" << uTipIndex;
4238 JLOG(m_journal.trace()) << "getBookPage: offerIndex=" << offerIndex;
4239 }
4240 }
4241
4242 if (!bDone)
4243 {
4244 auto sleOffer = view.read(keylet::offer(offerIndex));
4245
4246 if (sleOffer)
4247 {
4248 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4249 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4250 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4251 STAmount saOwnerFunds;
4252 bool firstOwnerOffer(true);
4253
4254 if (book.out.account == uOfferOwnerID)
4255 {
4256 // If an offer is selling issuer's own IOUs, it is fully
4257 // funded.
4258 saOwnerFunds = saTakerGets;
4259 }
4260 else if (bGlobalFreeze)
4261 {
4262 // If either asset is globally frozen, consider all offers
4263 // that aren't ours to be totally unfunded
4264 saOwnerFunds.clear(book.out);
4265 }
4266 else
4267 {
4268 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4269 if (umBalanceEntry != umBalance.end())
4270 {
4271 // Found in running balance table.
4272
4273 saOwnerFunds = umBalanceEntry->second;
4274 firstOwnerOffer = false;
4275 }
4276 else
4277 {
4278 // Did not find balance in table.
4279
4280 saOwnerFunds = accountHolds(
4281 view,
4282 uOfferOwnerID,
4283 book.out.currency,
4284 book.out.account,
4286 viewJ);
4287
4288 if (saOwnerFunds < beast::zero)
4289 {
4290 // Treat negative funds as zero.
4291
4292 saOwnerFunds.clear();
4293 }
4294 }
4295 }
4296
4297 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4298
4299 STAmount saTakerGetsFunded;
4300 STAmount saOwnerFundsLimit = saOwnerFunds;
4301 Rate offerRate = parityRate;
4302
4303 if (rate != parityRate
4304 // Have a transfer fee.
4305 && uTakerID != book.out.account
4306 // Not taking offers of own IOUs.
4307 && book.out.account != uOfferOwnerID)
4308 // Offer owner not issuing ownfunds
4309 {
4310 // Need to charge a transfer fee to offer owner.
4311 offerRate = rate;
4312 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4313 }
4314
4315 if (saOwnerFundsLimit >= saTakerGets)
4316 {
4317 // Sufficient funds no shenanigans.
4318 saTakerGetsFunded = saTakerGets;
4319 }
4320 else
4321 {
4322 // Only provide, if not fully funded.
4323
4324 saTakerGetsFunded = saOwnerFundsLimit;
4325
4326 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4327 std::min(
4328 saTakerPays, multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4329 .setJson(jvOffer[jss::taker_pays_funded]);
4330 }
4331
4332 STAmount saOwnerPays = (parityRate == offerRate)
4333 ? saTakerGetsFunded
4334 : std::min(saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4335
4336 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4337
4338 // Include all offers funded and unfunded
4339 Json::Value& jvOf = jvOffers.append(jvOffer);
4340 jvOf[jss::quality] = saDirRate.getText();
4341
4342 if (firstOwnerOffer)
4343 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4344 }
4345 else
4346 {
4347 JLOG(m_journal.warn()) << "Missing offer";
4348 }
4349
4350 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4351 {
4352 bDirectAdvance = true;
4353 }
4354 else
4355 {
4356 JLOG(m_journal.trace()) << "getBookPage: offerIndex=" << offerIndex;
4357 }
4358 }
4359 }
4360
4361 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4362 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4363}
4364
4365#else
4366
4367// This is the new code that uses the book iterators
4368// It has temporarily been disabled
4369
4370void
4373 Book const& book,
4374 AccountID const& uTakerID,
4375 bool const bProof,
4376 unsigned int iLimit,
4377 Json::Value const& jvMarker,
4378 Json::Value& jvResult)
4379{
4380 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4381
4383
4384 MetaView lesActive(lpLedger, tapNONE, true);
4385 OrderBookIterator obIterator(lesActive, book);
4386
4387 auto const rate = transferRate(lesActive, book.out.account);
4388
4389 bool const bGlobalFreeze =
4390 lesActive.isGlobalFrozen(book.out.account) || lesActive.isGlobalFrozen(book.in.account);
4391
4392 while (iLimit-- > 0 && obIterator.nextOffer())
4393 {
4394 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4395 if (sleOffer)
4396 {
4397 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4398 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4399 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4400 STAmount saDirRate = obIterator.getCurrentRate();
4401 STAmount saOwnerFunds;
4402
4403 if (book.out.account == uOfferOwnerID)
4404 {
4405 // If offer is selling issuer's own IOUs, it is fully funded.
4406 saOwnerFunds = saTakerGets;
4407 }
4408 else if (bGlobalFreeze)
4409 {
4410 // If either asset is globally frozen, consider all offers
4411 // that aren't ours to be totally unfunded
4412 saOwnerFunds.clear(book.out);
4413 }
4414 else
4415 {
4416 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4417
4418 if (umBalanceEntry != umBalance.end())
4419 {
4420 // Found in running balance table.
4421
4422 saOwnerFunds = umBalanceEntry->second;
4423 }
4424 else
4425 {
4426 // Did not find balance in table.
4427
4428 saOwnerFunds = lesActive.accountHolds(
4429 uOfferOwnerID, book.out.currency, book.out.account, fhZERO_IF_FROZEN);
4430
4431 if (saOwnerFunds.isNegative())
4432 {
4433 // Treat negative funds as zero.
4434
4435 saOwnerFunds.zero();
4436 }
4437 }
4438 }
4439
4440 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4441
4442 STAmount saTakerGetsFunded;
4443 STAmount saOwnerFundsLimit = saOwnerFunds;
4444 Rate offerRate = parityRate;
4445
4446 if (rate != parityRate
4447 // Have a transfer fee.
4448 && uTakerID != book.out.account
4449 // Not taking offers of own IOUs.
4450 && book.out.account != uOfferOwnerID)
4451 // Offer owner not issuing ownfunds
4452 {
4453 // Need to charge a transfer fee to offer owner.
4454 offerRate = rate;
4455 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4456 }
4457
4458 if (saOwnerFundsLimit >= saTakerGets)
4459 {
4460 // Sufficient funds no shenanigans.
4461 saTakerGetsFunded = saTakerGets;
4462 }
4463 else
4464 {
4465 // Only provide, if not fully funded.
4466 saTakerGetsFunded = saOwnerFundsLimit;
4467
4468 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4469
4470 // TODO(tom): The result of this expression is not used - what's
4471 // going on here?
4472 std::min(saTakerPays, multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4473 .setJson(jvOffer[jss::taker_pays_funded]);
4474 }
4475
4476 STAmount saOwnerPays = (parityRate == offerRate)
4477 ? saTakerGetsFunded
4478 : std::min(saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4479
4480 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4481
4482 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4483 {
4484 // Only provide funded offers and offers of the taker.
4485 Json::Value& jvOf = jvOffers.append(jvOffer);
4486 jvOf[jss::quality] = saDirRate.getText();
4487 }
4488 }
4489 }
4490
4491 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4492 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4493}
4494
4495#endif
4496
4497inline void
4499{
4500 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4501 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4503 counters[static_cast<std::size_t>(mode)].dur += current;
4504
4507 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)].dur.count());
4509 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)].dur.count());
4511 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4513 counters[static_cast<std::size_t>(OperatingMode::TRACKING)].dur.count());
4514 m_stats.full_duration.set(counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4515
4517 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)].transitions);
4519 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)].transitions);
4521 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4523 counters[static_cast<std::size_t>(OperatingMode::TRACKING)].transitions);
4525 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4526}
4527
4528void
4530{
4531 auto now = std::chrono::steady_clock::now();
4532
4533 std::lock_guard lock(mutex_);
4534 ++counters_[static_cast<std::size_t>(om)].transitions;
4535 if (om == OperatingMode::FULL && counters_[static_cast<std::size_t>(om)].transitions == 1)
4536 {
4537 initialSyncUs_ =
4538 std::chrono::duration_cast<std::chrono::microseconds>(now - processStart_).count();
4539 }
4540 counters_[static_cast<std::size_t>(mode_)].dur +=
4541 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4542
4543 mode_ = om;
4544 start_ = now;
4545}
4546
4547void
4549{
4550 auto [counters, mode, start, initialSync] = getCounterData();
4551 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4553 counters[static_cast<std::size_t>(mode)].dur += current;
4554
4555 obj[jss::state_accounting] = Json::objectValue;
4557 i <= static_cast<std::size_t>(OperatingMode::FULL);
4558 ++i)
4559 {
4560 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4561 auto& state = obj[jss::state_accounting][states_[i]];
4562 state[jss::transitions] = std::to_string(counters[i].transitions);
4563 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4564 }
4565 obj[jss::server_state_duration_us] = std::to_string(current.count());
4566 if (initialSync)
4567 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4568}
4569
4570//------------------------------------------------------------------------------
4571
4574 ServiceRegistry& registry,
4576 bool standalone,
4577 std::size_t minPeerCount,
4578 bool startvalid,
4579 JobQueue& job_queue,
4581 ValidatorKeys const& validatorKeys,
4582 boost::asio::io_context& io_svc,
4583 beast::Journal journal,
4584 beast::insight::Collector::ptr const& collector)
4585{
4587 registry,
4588 clock,
4589 standalone,
4590 minPeerCount,
4591 startvalid,
4592 job_queue,
4594 validatorKeys,
4595 io_svc,
4596 journal,
4597 collector);
4598}
4599
4600} // namespace xrpl
T any_of(T... args)
T back_inserter(T... args)
T begin(T... args)
Decorator for streaming out compact json.
Lightweight wrapper to tag static string.
Definition json_value.h:44
Represents a JSON value.
Definition json_value.h:130
Json::UInt UInt
Definition json_value.h:137
Value & append(Value const &value)
Append value to array at the end.
bool isMember(char const *key) const
Return true if the object has a member named key.
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
A generic endpoint for log messages.
Definition Journal.h:40
Stream error() const
Definition Journal.h:319
Stream debug() const
Definition Journal.h:301
Stream info() const
Definition Journal.h:307
Stream trace() const
Severity stream access functions.
Definition Journal.h:295
Stream warn() const
Definition Journal.h:313
A metric for measuring an integral value.
Definition Gauge.h:20
void set(value_type value) const
Set the value on the gauge.
Definition Gauge.h:48
A reference to a handler for performing polled collection.
Definition Hook.h:12
A transaction that is in a closed ledger.
TxMeta const & getMeta() const
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual std::chrono::milliseconds getIOLatency()=0
virtual Config & config()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition Book.h:16
Issue in
Definition Book.h:18
Issue out
Definition Book.h:19
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::uint32_t getLoadFee() const
Definition ClusterNode.h:32
NetClock::time_point getReportTime() const
Definition ClusterNode.h:38
PublicKey const & identity() const
Definition ClusterNode.h:44
std::string const & name() const
Definition ClusterNode.h:26
std::size_t size() const
The number of nodes in the cluster list.
Definition Cluster.cpp:30
std::string SERVER_DOMAIN
Definition Config.h:259
int RELAY_UNTRUSTED_VALIDATIONS
Definition Config.h:151
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition Config.h:142
std::size_t NODE_SIZE
Definition Config.h:194
virtual Json::Value getInfo()=0
virtual void clearFailures()=0
std::shared_ptr< InfoSub > pointer
Definition InfoSub.h:33
Currency currency
Definition Issue.h:15
AccountID account
Definition Issue.h:16
A pool of threads to perform work.
Definition JobQueue.h:37
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition JobQueue.h:146
Json::Value getJson(int c=0)
Definition JobQueue.cpp:176
std::chrono::seconds getValidatedLedgerAge()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
bool haveValidated()
Whether we have ever fully validated a ledger.
std::size_t getFetchPackCacheSize() const
std::shared_ptr< Ledger const > getClosedLedger()
std::string getCompleteLedgers()
std::shared_ptr< Ledger const > getValidatedLedger()
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< ReadView const > getCurrentLedger()
Manages the current fee schedule.
std::uint32_t getClusterFee() const
std::uint32_t getLocalFee() const
std::uint32_t getRemoteFee() const
std::uint32_t getLoadFactor() const
std::uint32_t getLoadBase() const
Manages load sources.
Definition LoadManager.h:26
void heartbeat()
Reset the stall detection timer.
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition Manifest.cpp:292
virtual std::uint32_t getNetworkID() const noexcept=0
Get the configured network ID.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
void json(Json::Value &obj) const
Output state counters in JSON format.
std::chrono::steady_clock::time_point const processStart_
static std::array< Json::StaticString const, 5 > const states_
std::array< Counters, 5 > counters_
void mode(OperatingMode om)
Record state transition.
std::chrono::steady_clock::time_point start_
Transaction with input flags and results to be applied in batches.
std::shared_ptr< Transaction > const transaction
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
std::string getHostId(bool forAdmin)
void reportConsensusStateChange(ConsensusPhase phase)
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void clearNeedNetworkLedger() override
ServerFeeSummary mLastFeeSummary
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
DispatchState mDispatchState
std::size_t const minPeerCount_
beast::Journal m_journal
static std::array< char const *, 5 > const states_
std::set< uint256 > pendingValidations_
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
bool unsubManifests(std::uint64_t uListener) override
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool subManifests(InfoSub::ref ispListener) override
void stateAccounting(Json::Value &obj) override
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void stop() override
SubInfoMapType mSubRTAccount
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
void transactionBatch()
Apply transactions in batches.
bool unsubRTTransactions(std::uint64_t uListener) override
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
bool processTrustedProposal(RCLCxPeerPos proposal) override
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
void pubValidation(std::shared_ptr< STValidation > const &val) override
bool subBook(InfoSub::ref ispListener, Book const &) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
std::atomic< OperatingMode > mMode
void setMode(OperatingMode om) override
void setAmendmentBlocked() override
void pubConsensus(ConsensusPhase phase)
bool const m_standalone
std::recursive_mutex mSubLock
bool isNeedNetworkLedger() override
DispatchState
Synchronization states for transaction batches.
std::atomic< bool > needNetworkLedger_
boost::asio::steady_timer heartbeatTimer_
bool subConsensus(InfoSub::ref ispListener) override
bool unsubBook(std::uint64_t uListener, Book const &) override
bool unsubLedger(std::uint64_t uListener) override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
std::optional< PublicKey > const validatorPK_
std::atomic< bool > amendmentBlocked_
bool isFull() override
void clearAmendmentWarned() override
void updateLocalTx(ReadView const &view) override
void clearLedgerFetch() override
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool isAmendmentBlocked() override
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::unique_ptr< LocalTxs > m_localTX
void setStandAlone() override
void setNeedNetworkLedger() override
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
bool unsubServer(std::uint64_t uListener) override
SubAccountHistoryMapType mSubAccountHistory
bool unsubConsensus(std::uint64_t uListener) override
std::condition_variable mCond
void pubManifest(Manifest const &) override
RCLConsensus mConsensus
void consensusViewChange() override
ServiceRegistry & registry_
boost::asio::steady_timer accountHistoryTxTimer_
Json::Value getConsensusInfo() override
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void setUNLBlocked() override
bool unsubValidations(std::uint64_t uListener) override
bool subPeerStatus(InfoSub::ref ispListener) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
ConsensusPhase mLastConsensusPhase
OperatingMode getOperatingMode() const override
std::optional< PublicKey > const validatorMasterPK_
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
std::vector< TransactionStatus > mTransactions
bool tryRemoveRpcSub(std::string const &strUrl) override
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void submitTransaction(std::shared_ptr< STTx const > const &) override
void setAmendmentWarned() override
LedgerMaster & m_ledgerMaster
Json::Value getServerInfo(bool human, bool admin, bool counters) override
StateAccounting accounting_
bool subValidations(InfoSub::ref ispListener) override
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool subRTTransactions(InfoSub::ref ispListener) override
std::atomic< bool > unlBlocked_
subRpcMapType mRpcSubMap
bool unsubBookChanges(std::uint64_t uListener) override
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
void setStateTimer() override
Called to initially start our timers.
std::size_t getLocalTxCount() override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
bool unsubTransactions(std::uint64_t uListener) override
bool isAmendmentWarned() override
bool subTransactions(InfoSub::ref ispListener) override
std::mutex m_statsMutex
std::mutex validationsMutex_
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
SubInfoMapType mSubAccount
void clearUNLBlocked() override
bool isUNLBlocked() override
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
std::atomic< bool > amendmentWarned_
boost::asio::steady_timer clusterTimer_
NetworkOPsImp(ServiceRegistry &registry, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
bool unsubPeerStatus(std::uint64_t uListener) override
void reportFeeChange() override
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
bool isBlocked() override
~NetworkOPsImp() override
Json::Value getLedgerFetchInfo() override
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool subBookChanges(InfoSub::ref ispListener) override
Provides server functionality for clients.
Definition NetworkOPs.h:71
void getCountsJson(Json::Value &obj)
Definition Database.cpp:236
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Writable ledger view that accumulates state and tx changes.
Definition OpenView.h:45
virtual void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)=0
virtual BookListeners::pointer makeBookListeners(Book const &)=0
virtual BookListeners::pointer getBookListeners(Book const &)=0
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnectCharges() const =0
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
Manages the generic consensus algorithm for use by the RCL.
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
Json::Value getJson(bool full) const
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
A peer's signed, proposed position for use in RCLConsensus.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Represents a set of transactions in RCLConsensus.
Definition RCLCxTx.h:43
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition RFC1751.cpp:422
Collects logging information.
std::unique_ptr< std::stringstream > const & ss()
A view into a ledger.
Definition ReadView.h:31
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
Issue const & issue() const
Definition STAmount.h:470
std::string getText() const override
Definition STAmount.cpp:655
void setJson(Json::Value &) const
Definition STAmount.cpp:615
std::optional< T > get(std::string const &name) const
std::size_t size() const noexcept
Definition Serializer.h:50
void const * data() const noexcept
Definition Serializer.h:56
void setup(Setup const &setup, beast::Journal journal)
Service registry for dependency injection.
virtual perf::PerfLog & getPerfLog()=0
virtual JobQueue & getJobQueue()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual ValidatorList & validators()=0
virtual TxQ & getTxQ()=0
virtual Overlay & overlay()=0
virtual NodeStore::Database & getNodeStore()=0
virtual NetworkIDService & getNetworkIDService()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual ServerHandler & getServerHandler()=0
virtual OpenLedger & openLedger()=0
virtual Cluster & cluster()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual LedgerMaster & getLedgerMaster()=0
virtual TimeKeeper & timeKeeper()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual ManifestCache & validatorManifests()=0
virtual beast::Journal journal(std::string const &name)=0
virtual Application & app()=0
time_point now() const override
Returns the current time, using the server's clock.
Definition TimeKeeper.h:44
std::chrono::seconds closeOffset() const
Definition TimeKeeper.h:63
time_point closeTime() const
Returns the predicted close time, in network time.
Definition TimeKeeper.h:56
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition TxQ.cpp:1658
static time_point now()
Validator keys and manifest as set in configuration file.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::size_t quorum() const
Get quorum value for current trusted key set.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
Json::Value jsonClipped() const
Definition XRPAmount.h:197
constexpr double decimalXRP() const
Definition XRPAmount.h:241
iterator begin()
Definition base_uint.h:112
bool isZero() const
Definition base_uint.h:511
static constexpr std::size_t size()
Definition base_uint.h:497
bool isNonZero() const
Definition base_uint.h:516
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition scope.h:202
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_same_v
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition json_value.h:25
@ objectValue
object value (collection of name/value pairs).
Definition json_value.h:26
int Int
unsigned int UInt
void rngfill(void *const buffer, std::size_t const bytes, Generator &g)
Definition rngfill.h:14
STL namespace.
std::string const & getVersionString()
Server version.
Definition BuildInfo.cpp:51
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Encodes ledger sequence, transaction index, and network ID into a CTID string.
Definition CTID.h:33
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition BookChanges.h:27
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
void insertNFTSyntheticInJson(Json::Value &, std::shared_ptr< STTx const > const &, TxMeta const &)
Adds common synthetic fields to transaction-related JSON responses.
Charge const feeMediumBurdenRPC
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition Indexes.cpp:243
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition Indexes.cpp:165
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition Indexes.cpp:342
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Definition escrow.cpp:50
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:5
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
STAmount divide(STAmount const &amount, Rate const &rate)
Definition Rate2.cpp:69
@ terQUEUED
Definition TER.h:205
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
bool isTerRetry(TER x) noexcept
Definition TER.h:645
@ fhZERO_IF_FROZEN
Definition View.h:58
@ fhIGNORE_FREEZE
Definition View.h:58
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ INVALID
Definition Transaction.h:29
@ OBSOLETE
Definition Transaction.h:35
@ INCLUDED
Definition Transaction.h:30
constexpr std::uint32_t tfInnerBatchTxn
Definition TxFlags.h:41
std::string to_string(base_uint< Bits, Tag > const &a)
Definition base_uint.h:600
std::string strHex(FwdIt begin, FwdIt end)
Definition strHex.h:10
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules)
Checks transaction signature and local checks.
Definition apply.cpp:21
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition ReadView.cpp:50
@ tefPAST_SEQ
Definition TER.h:155
std::uint64_t getQuality(uint256 const &uBase)
Definition Indexes.cpp:131
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition AccountID.cpp:92
FeeSetup setup_FeeVote(Section const &section)
Definition Config.cpp:1050
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j, SpendableHandling includeFullBalance=shSIMPLE_BALANCE)
Definition View.cpp:443
Number root(Number f, unsigned d)
Definition Number.cpp:956
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition TER.cpp:228
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition View.cpp:134
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition Rate2.cpp:34
static auto const genesisAccountId
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition ApiVersion.h:149
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition Seed.cpp:57
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition View.cpp:123
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
std::unique_ptr< NetworkOPs > make_NetworkOPs(ServiceRegistry &registry, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_context &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ current
This was a new validation and was added.
constexpr std::size_t maxPoppedTransactions
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition View.cpp:574
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition View.cpp:160
STAmount amountFromQuality(std::uint64_t rate)
Definition STAmount.cpp:946
@ jtNETOP_CLUSTER
Definition Job.h:54
@ jtCLIENT_CONSENSUS
Definition Job.h:27
@ jtTXN_PROC
Definition Job.h:61
@ jtCLIENT_ACCT_HIST
Definition Job.h:28
@ jtTRANSACTION
Definition Job.h:41
@ jtCLIENT_FEE_CHANGE
Definition Job.h:26
@ jtBATCH
Definition Job.h:44
bool isTefFailure(TER x) noexcept
Definition TER.h:639
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition View.cpp:765
auto constexpr muldiv_max
Definition mulDiv.h:8
uint256 getQualityNext(uint256 const &uBase)
Definition Indexes.cpp:123
ConsensusPhase
Phases of consensus for a single ledger round.
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition predicates.h:54
AccountID calcAccountID(PublicKey const &pk)
uint256 getBookBase(Book const &book)
Definition Indexes.cpp:98
Json::Value rpcError(error_code_i iError)
Definition RPCErr.cpp:12
std::string to_string_iso(date::sys_time< Duration > tp)
Definition chrono.h:68
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition LocalTxs.cpp:170
ApplyFlags
Definition ApplyView.h:10
@ tapNONE
Definition ApplyView.h:11
@ tapFAIL_HARD
Definition ApplyView.h:15
@ tapUNLIMITED
Definition ApplyView.h:22
bool isTelLocal(TER x) noexcept
Definition TER.h:627
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
@ temINVALID_FLAG
Definition TER.h:91
@ temBAD_SIGNATURE
Definition TER.h:85
bool isTesSuccess(TER x) noexcept
Definition TER.h:651
static std::uint32_t trunc32(std::uint64_t v)
static std::array< char const *, 5 > const stateNames
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition NetworkOPs.h:50
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition STTx.cpp:782
bool isTemMalformed(TER x) noexcept
Definition TER.h:633
@ tesSUCCESS
Definition TER.h:225
error_code_i
Definition ErrorCodes.h:20
@ rpcINTERNAL
Definition ErrorCodes.h:110
@ rpcINVALID_PARAMS
Definition ErrorCodes.h:64
@ rpcSUCCESS
Definition ErrorCodes.h:24
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
@ warnRPC_AMENDMENT_BLOCKED
Definition ErrorCodes.h:153
@ warnRPC_UNSUPPORTED_MAJORITY
Definition ErrorCodes.h:152
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition ErrorCodes.h:154
T owns_lock(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T set_intersection(T... args)
T size(T... args)
T str(T... args)
PublicKey masterKey
The master key associated with this manifest.
Definition Manifest.h:66
std::string serialized
The manifest in serialized form.
Definition Manifest.h:63
Blob getMasterSignature() const
Returns manifest master key signature.
Definition Manifest.cpp:226
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition Manifest.h:78
std::optional< Blob > getSignature() const
Returns manifest signature.
Definition Manifest.cpp:215
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition Manifest.h:72
std::uint32_t sequence
The sequence number of this manifest.
Definition Manifest.h:75
Server fees published on server subscription.
std::optional< TxQ::Metrics > em
bool operator!=(ServerFeeSummary const &b) const
bool operator==(ServerFeeSummary const &b) const
beast::insight::Gauge syncing_duration
beast::insight::Gauge tracking_duration
beast::insight::Gauge connected_duration
beast::insight::Gauge tracking_transitions
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
beast::insight::Gauge connected_transitions
beast::insight::Gauge full_transitions
beast::insight::Gauge disconnected_duration
beast::insight::Gauge syncing_transitions
beast::insight::Gauge disconnected_transitions
beast::insight::Gauge full_duration
beast::insight::Hook hook
SubAccountHistoryIndex(AccountID const &accountId)
std::shared_ptr< SubAccountHistoryIndex > index_
std::shared_ptr< SubAccountHistoryIndex > index_
Represents a transfer rate.
Definition Rate.h:20
Data format for exchanging consumption information across peers.
Definition Gossip.h:12
std::vector< Item > items
Definition Gossip.h:24
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition TxQ.h:144
void set(char const *key, auto const &v)
IsMemberResult isMember(char const *key) const
Select all peers (except optional excluded) that are in our cluster.
Definition predicates.h:115
Sends a message to all peers.
Definition predicates.h:12
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)