rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/main/Tuning.h>
32#include <xrpld/app/misc/AmendmentTable.h>
33#include <xrpld/app/misc/DeliverMax.h>
34#include <xrpld/app/misc/HashRouter.h>
35#include <xrpld/app/misc/LoadFeeTrack.h>
36#include <xrpld/app/misc/NetworkOPs.h>
37#include <xrpld/app/misc/Transaction.h>
38#include <xrpld/app/misc/TxQ.h>
39#include <xrpld/app/misc/ValidatorKeys.h>
40#include <xrpld/app/misc/ValidatorList.h>
41#include <xrpld/app/misc/detail/AccountTxPaging.h>
42#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
43#include <xrpld/app/tx/apply.h>
44#include <xrpld/consensus/Consensus.h>
45#include <xrpld/consensus/ConsensusParms.h>
46#include <xrpld/overlay/Cluster.h>
47#include <xrpld/overlay/Overlay.h>
48#include <xrpld/overlay/predicates.h>
49#include <xrpld/perflog/PerfLog.h>
50#include <xrpld/rpc/BookChanges.h>
51#include <xrpld/rpc/CTID.h>
52#include <xrpld/rpc/DeliveredAmount.h>
53#include <xrpld/rpc/MPTokenIssuanceID.h>
54#include <xrpld/rpc/ServerHandler.h>
55
56#include <xrpl/basics/UptimeClock.h>
57#include <xrpl/basics/mulDiv.h>
58#include <xrpl/basics/safe_cast.h>
59#include <xrpl/basics/scope.h>
60#include <xrpl/beast/utility/rngfill.h>
61#include <xrpl/crypto/RFC1751.h>
62#include <xrpl/crypto/csprng.h>
63#include <xrpl/protocol/BuildInfo.h>
64#include <xrpl/protocol/Feature.h>
65#include <xrpl/protocol/MultiApiJson.h>
66#include <xrpl/protocol/RPCErr.h>
67#include <xrpl/protocol/jss.h>
68#include <xrpl/resource/Fees.h>
69#include <xrpl/resource/ResourceManager.h>
70
71#include <boost/asio/ip/host_name.hpp>
72#include <boost/asio/steady_timer.hpp>
73
74#include <algorithm>
75#include <exception>
76#include <mutex>
77#include <optional>
78#include <set>
79#include <sstream>
80#include <string>
81#include <tuple>
82#include <unordered_map>
83
84namespace ripple {
85
86class NetworkOPsImp final : public NetworkOPs
87{
93 {
94 public:
96 bool const admin;
97 bool const local;
99 bool applied = false;
101
104 bool a,
105 bool l,
106 FailHard f)
107 : transaction(t), admin(a), local(l), failType(f)
108 {
109 XRPL_ASSERT(
111 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
112 "valid inputs");
113 }
114 };
115
119 enum class DispatchState : unsigned char {
120 none,
121 scheduled,
122 running,
123 };
124
126
142 {
143 struct Counters
144 {
145 explicit Counters() = default;
146
149 };
150
154 std::chrono::steady_clock::time_point start_ =
156 std::chrono::steady_clock::time_point const processStart_ = start_;
159
160 public:
162 {
164 .transitions = 1;
165 }
166
173 void
175
181 void
182 json(Json::Value& obj) const;
183
185 {
187 decltype(mode_) mode;
188 decltype(start_) start;
190 };
191
194 {
197 }
198 };
199
202 {
203 ServerFeeSummary() = default;
204
207 TxQ::Metrics&& escalationMetrics,
208 LoadFeeTrack const& loadFeeTrack);
209 bool
210 operator!=(ServerFeeSummary const& b) const;
211
212 bool
214 {
215 return !(*this != b);
216 }
217
222 };
223
224public:
226 Application& app,
228 bool standalone,
229 std::size_t minPeerCount,
230 bool start_valid,
231 JobQueue& job_queue,
233 ValidatorKeys const& validatorKeys,
234 boost::asio::io_service& io_svc,
235 beast::Journal journal,
236 beast::insight::Collector::ptr const& collector)
237 : app_(app)
238 , m_journal(journal)
241 , heartbeatTimer_(io_svc)
242 , clusterTimer_(io_svc)
243 , accountHistoryTxTimer_(io_svc)
244 , mConsensus(
245 app,
247 setup_FeeVote(app_.config().section("voting")),
248 app_.logs().journal("FeeVote")),
250 *m_localTX,
251 app.getInboundTransactions(),
252 beast::get_abstract_clock<std::chrono::steady_clock>(),
253 validatorKeys,
254 app_.logs().journal("LedgerConsensus"))
255 , validatorPK_(
256 validatorKeys.keys ? validatorKeys.keys->publicKey
257 : decltype(validatorPK_){})
259 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
260 : decltype(validatorMasterPK_){})
262 , m_job_queue(job_queue)
263 , m_standalone(standalone)
264 , minPeerCount_(start_valid ? 0 : minPeerCount)
266 {
267 }
268
269 ~NetworkOPsImp() override
270 {
271 // This clear() is necessary to ensure the shared_ptrs in this map get
272 // destroyed NOW because the objects in this map invoke methods on this
273 // class when they are destroyed
275 }
276
277public:
279 getOperatingMode() const override;
280
282 strOperatingMode(OperatingMode const mode, bool const admin) const override;
283
285 strOperatingMode(bool const admin = false) const override;
286
287 //
288 // Transaction operations.
289 //
290
291 // Must complete immediately.
292 void
294
295 void
297 std::shared_ptr<Transaction>& transaction,
298 bool bUnlimited,
299 bool bLocal,
300 FailHard failType) override;
301
302 void
303 processTransactionSet(CanonicalTXSet const& set) override;
304
313 void
316 bool bUnlimited,
317 FailHard failType);
318
328 void
331 bool bUnlimited,
332 FailHard failtype);
333
334private:
335 bool
337
338 void
341 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback);
342
343public:
347 void
349
355 void
357
358 //
359 // Owner functions.
360 //
361
365 AccountID const& account) override;
366
367 //
368 // Book functions.
369 //
370
371 void
374 Book const&,
375 AccountID const& uTakerID,
376 bool const bProof,
377 unsigned int iLimit,
378 Json::Value const& jvMarker,
379 Json::Value& jvResult) override;
380
381 // Ledger proposal/close functions.
382 bool
384
385 bool
388 std::string const& source) override;
389
390 void
391 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
392
393 // Network state machine.
394
395 // Used for the "jump" case.
396private:
397 void
399 bool
401
402public:
403 bool
405 uint256 const& networkClosed,
406 std::unique_ptr<std::stringstream> const& clog) override;
407 void
409 void
410 setStandAlone() override;
411
415 void
416 setStateTimer() override;
417
418 void
419 setNeedNetworkLedger() override;
420 void
421 clearNeedNetworkLedger() override;
422 bool
423 isNeedNetworkLedger() override;
424 bool
425 isFull() override;
426
427 void
428 setMode(OperatingMode om) override;
429
430 bool
431 isBlocked() override;
432 bool
433 isAmendmentBlocked() override;
434 void
435 setAmendmentBlocked() override;
436 bool
437 isAmendmentWarned() override;
438 void
439 setAmendmentWarned() override;
440 void
441 clearAmendmentWarned() override;
442 bool
443 isUNLBlocked() override;
444 void
445 setUNLBlocked() override;
446 void
447 clearUNLBlocked() override;
448 void
449 consensusViewChange() override;
450
452 getConsensusInfo() override;
454 getServerInfo(bool human, bool admin, bool counters) override;
455 void
456 clearLedgerFetch() override;
458 getLedgerFetchInfo() override;
461 std::optional<std::chrono::milliseconds> consensusDelay) override;
462 void
463 reportFeeChange() override;
464 void
466
467 void
468 updateLocalTx(ReadView const& view) override;
470 getLocalTxCount() override;
471
472 //
473 // Monitoring: publisher side.
474 //
475 void
476 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
477 void
480 std::shared_ptr<STTx const> const& transaction,
481 TER result) override;
482 void
483 pubValidation(std::shared_ptr<STValidation> const& val) override;
484
485 //--------------------------------------------------------------------------
486 //
487 // InfoSub::Source.
488 //
489 void
491 InfoSub::ref ispListener,
492 hash_set<AccountID> const& vnaAccountIDs,
493 bool rt) override;
494 void
496 InfoSub::ref ispListener,
497 hash_set<AccountID> const& vnaAccountIDs,
498 bool rt) override;
499
500 // Just remove the subscription from the tracking
501 // not from the InfoSub. Needed for InfoSub destruction
502 void
505 hash_set<AccountID> const& vnaAccountIDs,
506 bool rt) override;
507
509 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
510 override;
511 void
513 InfoSub::ref ispListener,
514 AccountID const& account,
515 bool historyOnly) override;
516
517 void
520 AccountID const& account,
521 bool historyOnly) override;
522
523 bool
524 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
525 bool
526 unsubLedger(std::uint64_t uListener) override;
527
528 bool
529 subBookChanges(InfoSub::ref ispListener) override;
530 bool
531 unsubBookChanges(std::uint64_t uListener) override;
532
533 bool
534 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
535 override;
536 bool
537 unsubServer(std::uint64_t uListener) override;
538
539 bool
540 subBook(InfoSub::ref ispListener, Book const&) override;
541 bool
542 unsubBook(std::uint64_t uListener, Book const&) override;
543
544 bool
545 subManifests(InfoSub::ref ispListener) override;
546 bool
547 unsubManifests(std::uint64_t uListener) override;
548 void
549 pubManifest(Manifest const&) override;
550
551 bool
552 subTransactions(InfoSub::ref ispListener) override;
553 bool
554 unsubTransactions(std::uint64_t uListener) override;
555
556 bool
557 subRTTransactions(InfoSub::ref ispListener) override;
558 bool
559 unsubRTTransactions(std::uint64_t uListener) override;
560
561 bool
562 subValidations(InfoSub::ref ispListener) override;
563 bool
564 unsubValidations(std::uint64_t uListener) override;
565
566 bool
567 subPeerStatus(InfoSub::ref ispListener) override;
568 bool
569 unsubPeerStatus(std::uint64_t uListener) override;
570 void
571 pubPeerStatus(std::function<Json::Value(void)> const&) override;
572
573 bool
574 subConsensus(InfoSub::ref ispListener) override;
575 bool
576 unsubConsensus(std::uint64_t uListener) override;
577
579 findRpcSub(std::string const& strUrl) override;
581 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
582 bool
583 tryRemoveRpcSub(std::string const& strUrl) override;
584
585 void
586 stop() override
587 {
588 {
589 boost::system::error_code ec;
590 heartbeatTimer_.cancel(ec);
591 if (ec)
592 {
593 JLOG(m_journal.error())
594 << "NetworkOPs: heartbeatTimer cancel error: "
595 << ec.message();
596 }
597
598 ec.clear();
599 clusterTimer_.cancel(ec);
600 if (ec)
601 {
602 JLOG(m_journal.error())
603 << "NetworkOPs: clusterTimer cancel error: "
604 << ec.message();
605 }
606
607 ec.clear();
608 accountHistoryTxTimer_.cancel(ec);
609 if (ec)
610 {
611 JLOG(m_journal.error())
612 << "NetworkOPs: accountHistoryTxTimer cancel error: "
613 << ec.message();
614 }
615 }
616 // Make sure that any waitHandlers pending in our timers are done.
617 using namespace std::chrono_literals;
618 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
619 }
620
621 void
622 stateAccounting(Json::Value& obj) override;
623
624private:
625 void
626 setTimer(
627 boost::asio::steady_timer& timer,
628 std::chrono::milliseconds const& expiry_time,
629 std::function<void()> onExpire,
630 std::function<void()> onError);
631 void
633 void
635 void
637 void
639
641 transJson(
642 std::shared_ptr<STTx const> const& transaction,
643 TER result,
644 bool validated,
647
648 void
651 AcceptedLedgerTx const& transaction,
652 bool last);
653
654 void
657 AcceptedLedgerTx const& transaction,
658 bool last);
659
660 void
663 std::shared_ptr<STTx const> const& transaction,
664 TER result);
665
666 void
667 pubServer();
668 void
670
672 getHostId(bool forAdmin);
673
674private:
678
679 /*
680 * With a validated ledger to separate history and future, the node
681 * streams historical txns with negative indexes starting from -1,
682 * and streams future txns starting from index 0.
683 * The SubAccountHistoryIndex struct maintains these indexes.
684 * It also has a flag stopHistorical_ for stopping streaming
685 * the historical txns.
686 */
688 {
690 // forward
692 // separate backward and forward
694 // history, backward
699
701 : accountId_(accountId)
702 , forwardTxIndex_(0)
705 , historyTxIndex_(-1)
706 , haveHistorical_(false)
707 , stopHistorical_(false)
708 {
709 }
710 };
712 {
715 };
717 {
720 };
723
727 void
731 void
733 void
735
738
740
742
744
749
751 boost::asio::steady_timer heartbeatTimer_;
752 boost::asio::steady_timer clusterTimer_;
753 boost::asio::steady_timer accountHistoryTxTimer_;
754
756
759
761
763
766
768
770
771 enum SubTypes {
772 sLedger, // Accepted ledgers.
773 sManifests, // Received validator manifests.
774 sServer, // When server changes connectivity state.
775 sTransactions, // All accepted transactions.
776 sRTTransactions, // All proposed and accepted transactions.
777 sValidations, // Received validations.
778 sPeerStatus, // Peer status changes.
779 sConsensusPhase, // Consensus phase
780 sBookChanges, // Per-ledger order book changes
781 sLastEntry // Any new entry must be ADDED ABOVE this one
782 };
783
785
787
789
790 // Whether we are in standalone mode.
791 bool const m_standalone;
792
793 // The number of nodes that we need to consider ourselves connected.
795
796 // Transaction batching.
801
803
806
807private:
808 struct Stats
809 {
810 template <class Handler>
812 Handler const& handler,
813 beast::insight::Collector::ptr const& collector)
814 : hook(collector->make_hook(handler))
815 , disconnected_duration(collector->make_gauge(
816 "State_Accounting",
817 "Disconnected_duration"))
818 , connected_duration(collector->make_gauge(
819 "State_Accounting",
820 "Connected_duration"))
822 collector->make_gauge("State_Accounting", "Syncing_duration"))
823 , tracking_duration(collector->make_gauge(
824 "State_Accounting",
825 "Tracking_duration"))
827 collector->make_gauge("State_Accounting", "Full_duration"))
828 , disconnected_transitions(collector->make_gauge(
829 "State_Accounting",
830 "Disconnected_transitions"))
831 , connected_transitions(collector->make_gauge(
832 "State_Accounting",
833 "Connected_transitions"))
834 , syncing_transitions(collector->make_gauge(
835 "State_Accounting",
836 "Syncing_transitions"))
837 , tracking_transitions(collector->make_gauge(
838 "State_Accounting",
839 "Tracking_transitions"))
841 collector->make_gauge("State_Accounting", "Full_transitions"))
842 {
843 }
844
851
857 };
858
859 std::mutex m_statsMutex; // Mutex to lock m_stats
861
862private:
863 void
865};
866
867//------------------------------------------------------------------------------
868
870 {"disconnected", "connected", "syncing", "tracking", "full"}};
871
873
881
882static auto const genesisAccountId = calcAccountID(
884 .first);
885
886//------------------------------------------------------------------------------
887inline OperatingMode
889{
890 return mMode;
891}
892
893inline std::string
894NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
895{
896 return strOperatingMode(mMode, admin);
897}
898
899inline void
901{
903}
904
905inline void
907{
908 needNetworkLedger_ = true;
909}
910
911inline void
913{
914 needNetworkLedger_ = false;
915}
916
917inline bool
919{
920 return needNetworkLedger_;
921}
922
923inline bool
925{
927}
928
931{
932 static std::string const hostname = boost::asio::ip::host_name();
933
934 if (forAdmin)
935 return hostname;
936
937 // For non-admin uses hash the node public key into a
938 // single RFC1751 word:
939 static std::string const shroudedHostId = [this]() {
940 auto const& id = app_.nodeIdentity();
941
942 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
943 }();
944
945 return shroudedHostId;
946}
947
948void
950{
952
953 // Only do this work if a cluster is configured
954 if (app_.cluster().size() != 0)
956}
957
958void
960 boost::asio::steady_timer& timer,
961 std::chrono::milliseconds const& expiry_time,
962 std::function<void()> onExpire,
963 std::function<void()> onError)
964{
965 // Only start the timer if waitHandlerCounter_ is not yet joined.
966 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
967 [this, onExpire, onError](boost::system::error_code const& e) {
968 if ((e.value() == boost::system::errc::success) &&
969 (!m_job_queue.isStopped()))
970 {
971 onExpire();
972 }
973 // Recover as best we can if an unexpected error occurs.
974 if (e.value() != boost::system::errc::success &&
975 e.value() != boost::asio::error::operation_aborted)
976 {
977 // Try again later and hope for the best.
978 JLOG(m_journal.error())
979 << "Timer got error '" << e.message()
980 << "'. Restarting timer.";
981 onError();
982 }
983 }))
984 {
985 timer.expires_from_now(expiry_time);
986 timer.async_wait(std::move(*optionalCountedHandler));
987 }
988}
989
990void
991NetworkOPsImp::setHeartbeatTimer()
992{
993 setTimer(
994 heartbeatTimer_,
995 mConsensus.parms().ledgerGRANULARITY,
996 [this]() {
997 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
998 processHeartbeatTimer();
999 });
1000 },
1001 [this]() { setHeartbeatTimer(); });
1002}
1003
1004void
1005NetworkOPsImp::setClusterTimer()
1006{
1007 using namespace std::chrono_literals;
1008
1009 setTimer(
1010 clusterTimer_,
1011 10s,
1012 [this]() {
1013 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
1014 processClusterTimer();
1015 });
1016 },
1017 [this]() { setClusterTimer(); });
1018}
1019
1020void
1021NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
1022{
1023 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
1024 << toBase58(subInfo.index_->accountId_);
1025 using namespace std::chrono_literals;
1026 setTimer(
1027 accountHistoryTxTimer_,
1028 4s,
1029 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1030 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1031}
1032
1033void
1034NetworkOPsImp::processHeartbeatTimer()
1035{
1036 RclConsensusLogger clog(
1037 "Heartbeat Timer", mConsensus.validating(), m_journal);
1038 {
1039 std::unique_lock lock{app_.getMasterMutex()};
1040
1041 // VFALCO NOTE This is for diagnosing a crash on exit
1042 LoadManager& mgr(app_.getLoadManager());
1043 mgr.heartbeat();
1044
1045 std::size_t const numPeers = app_.overlay().size();
1046
1047 // do we have sufficient peers? If not, we are disconnected.
1048 if (numPeers < minPeerCount_)
1049 {
1050 if (mMode != OperatingMode::DISCONNECTED)
1051 {
1052 setMode(OperatingMode::DISCONNECTED);
1054 ss << "Node count (" << numPeers << ") has fallen "
1055 << "below required minimum (" << minPeerCount_ << ").";
1056 JLOG(m_journal.warn()) << ss.str();
1057 CLOG(clog.ss()) << "set mode to DISCONNECTED: " << ss.str();
1058 }
1059 else
1060 {
1061 CLOG(clog.ss())
1062 << "already DISCONNECTED. too few peers (" << numPeers
1063 << "), need at least " << minPeerCount_;
1064 }
1065
1066 // MasterMutex lock need not be held to call setHeartbeatTimer()
1067 lock.unlock();
1068 // We do not call mConsensus.timerEntry until there are enough
1069 // peers providing meaningful inputs to consensus
1070 setHeartbeatTimer();
1071
1072 return;
1073 }
1074
1075 if (mMode == OperatingMode::DISCONNECTED)
1076 {
1077 setMode(OperatingMode::CONNECTED);
1078 JLOG(m_journal.info())
1079 << "Node count (" << numPeers << ") is sufficient.";
1080 CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
1081 << " peers. ";
1082 }
1083
1084 // Check if the last validated ledger forces a change between these
1085 // states.
1086 auto origMode = mMode.load();
1087 CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
1088 if (mMode == OperatingMode::SYNCING)
1089 setMode(OperatingMode::SYNCING);
1090 else if (mMode == OperatingMode::CONNECTED)
1091 setMode(OperatingMode::CONNECTED);
1092 auto newMode = mMode.load();
1093 if (origMode != newMode)
1094 {
1095 CLOG(clog.ss())
1096 << ", changing to " << strOperatingMode(newMode, true);
1097 }
1098 CLOG(clog.ss()) << ". ";
1099 }
1100
1101 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.ss());
1102
1103 CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase);
1104 ConsensusPhase const currPhase = mConsensus.phase();
1105 if (mLastConsensusPhase != currPhase)
1106 {
1107 reportConsensusStateChange(currPhase);
1108 mLastConsensusPhase = currPhase;
1109 CLOG(clog.ss()) << " changed to " << to_string(mLastConsensusPhase);
1110 }
1111 CLOG(clog.ss()) << ". ";
1112
1113 setHeartbeatTimer();
1114}
1115
1116void
1117NetworkOPsImp::processClusterTimer()
1118{
1119 if (app_.cluster().size() == 0)
1120 return;
1121
1122 using namespace std::chrono_literals;
1123
1124 bool const update = app_.cluster().update(
1125 app_.nodeIdentity().first,
1126 "",
1127 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1128 ? app_.getFeeTrack().getLocalFee()
1129 : 0,
1130 app_.timeKeeper().now());
1131
1132 if (!update)
1133 {
1134 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1135 setClusterTimer();
1136 return;
1137 }
1138
1139 protocol::TMCluster cluster;
1140 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1141 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1142 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1143 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1144 n.set_nodeload(node.getLoadFee());
1145 if (!node.name().empty())
1146 n.set_nodename(node.name());
1147 });
1148
1149 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1150 for (auto& item : gossip.items)
1151 {
1152 protocol::TMLoadSource& node = *cluster.add_loadsources();
1153 node.set_name(to_string(item.address));
1154 node.set_cost(item.balance);
1155 }
1156 app_.overlay().foreach(send_if(
1157 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1158 peer_in_cluster()));
1159 setClusterTimer();
1160}
1161
1162//------------------------------------------------------------------------------
1163
1165NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1166 const
1167{
1168 if (mode == OperatingMode::FULL && admin)
1169 {
1170 auto const consensusMode = mConsensus.mode();
1171 if (consensusMode != ConsensusMode::wrongLedger)
1172 {
1173 if (consensusMode == ConsensusMode::proposing)
1174 return "proposing";
1175
1176 if (mConsensus.validating())
1177 return "validating";
1178 }
1179 }
1180
1181 return states_[static_cast<std::size_t>(mode)];
1182}
1183
1184void
1185NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1186{
1187 if (isNeedNetworkLedger())
1188 {
1189 // Nothing we can do if we've never been in sync
1190 return;
1191 }
1192
1193 // this is an asynchronous interface
1194 auto const trans = sterilize(*iTrans);
1195
1196 auto const txid = trans->getTransactionID();
1197 auto const flags = app_.getHashRouter().getFlags(txid);
1198
1199 if ((flags & SF_BAD) != 0)
1200 {
1201 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1202 return;
1203 }
1204
1205 try
1206 {
1207 auto const [validity, reason] = checkValidity(
1208 app_.getHashRouter(),
1209 *trans,
1210 m_ledgerMaster.getValidatedRules(),
1211 app_.config());
1212
1213 if (validity != Validity::Valid)
1214 {
1215 JLOG(m_journal.warn())
1216 << "Submitted transaction invalid: " << reason;
1217 return;
1218 }
1219 }
1220 catch (std::exception const& ex)
1221 {
1222 JLOG(m_journal.warn())
1223 << "Exception checking transaction " << txid << ": " << ex.what();
1224
1225 return;
1226 }
1227
1228 std::string reason;
1229
1230 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1231
1232 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1233 auto t = tx;
1234 processTransaction(t, false, false, FailHard::no);
1235 });
1236}
1237
1238bool
1239NetworkOPsImp::preProcessTransaction(std::shared_ptr<Transaction>& transaction)
1240{
1241 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1242
1243 if ((newFlags & SF_BAD) != 0)
1244 {
1245 // cached bad
1246 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1247 transaction->setStatus(INVALID);
1248 transaction->setResult(temBAD_SIGNATURE);
1249 return false;
1250 }
1251
1252 // NOTE eahennis - I think this check is redundant,
1253 // but I'm not 100% sure yet.
1254 // If so, only cost is looking up HashRouter flags.
1255 auto const view = m_ledgerMaster.getCurrentLedger();
1256 auto const [validity, reason] = checkValidity(
1257 app_.getHashRouter(),
1258 *transaction->getSTransaction(),
1259 view->rules(),
1260 app_.config());
1261 XRPL_ASSERT(
1262 validity == Validity::Valid,
1263 "ripple::NetworkOPsImp::processTransaction : valid validity");
1264
1265 // Not concerned with local checks at this point.
1266 if (validity == Validity::SigBad)
1267 {
1268 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1269 transaction->setStatus(INVALID);
1270 transaction->setResult(temBAD_SIGNATURE);
1271 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1272 return false;
1273 }
1274
1275 // canonicalize can change our pointer
1276 app_.getMasterTransaction().canonicalize(&transaction);
1277
1278 return true;
1279}
1280
1281void
1282NetworkOPsImp::processTransaction(
1283 std::shared_ptr<Transaction>& transaction,
1284 bool bUnlimited,
1285 bool bLocal,
1286 FailHard failType)
1287{
1288 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1289
1290 // preProcessTransaction can change our pointer
1291 if (!preProcessTransaction(transaction))
1292 return;
1293
1294 if (bLocal)
1295 doTransactionSync(transaction, bUnlimited, failType);
1296 else
1297 doTransactionAsync(transaction, bUnlimited, failType);
1298}
1299
1300void
1301NetworkOPsImp::doTransactionAsync(
1302 std::shared_ptr<Transaction> transaction,
1303 bool bUnlimited,
1304 FailHard failType)
1305{
1306 std::lock_guard lock(mMutex);
1307
1308 if (transaction->getApplying())
1309 return;
1310
1311 mTransactions.push_back(
1312 TransactionStatus(transaction, bUnlimited, false, failType));
1313 transaction->setApplying();
1314
1315 if (mDispatchState == DispatchState::none)
1316 {
1317 if (m_job_queue.addJob(
1318 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1319 {
1320 mDispatchState = DispatchState::scheduled;
1321 }
1322 }
1323}
1324
1325void
1326NetworkOPsImp::doTransactionSync(
1327 std::shared_ptr<Transaction> transaction,
1328 bool bUnlimited,
1329 FailHard failType)
1330{
1331 std::unique_lock<std::mutex> lock(mMutex);
1332
1333 if (!transaction->getApplying())
1334 {
1335 mTransactions.push_back(
1336 TransactionStatus(transaction, bUnlimited, true, failType));
1337 transaction->setApplying();
1338 }
1339
1340 doTransactionSyncBatch(
1341 lock, [&transaction](std::unique_lock<std::mutex> const&) {
1342 return transaction->getApplying();
1343 });
1344}
1345
1346void
1347NetworkOPsImp::doTransactionSyncBatch(
1349 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback)
1350{
1351 do
1352 {
1353 if (mDispatchState == DispatchState::running)
1354 {
1355 // A batch processing job is already running, so wait.
1356 mCond.wait(lock);
1357 }
1358 else
1359 {
1360 apply(lock);
1361
1362 if (mTransactions.size())
1363 {
1364 // More transactions need to be applied, but by another job.
1365 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1366 transactionBatch();
1367 }))
1368 {
1369 mDispatchState = DispatchState::scheduled;
1370 }
1371 }
1372 }
1373 } while (retryCallback(lock));
1374}
1375
1376void
1377NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set)
1378{
1379 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXNSet");
1381 candidates.reserve(set.size());
1382 for (auto const& [_, tx] : set)
1383 {
1384 std::string reason;
1385 auto transaction = std::make_shared<Transaction>(tx, reason, app_);
1386
1387 if (transaction->getStatus() == INVALID)
1388 {
1389 if (!reason.empty())
1390 {
1391 JLOG(m_journal.trace())
1392 << "Exception checking transaction: " << reason;
1393 }
1394 app_.getHashRouter().setFlags(tx->getTransactionID(), SF_BAD);
1395 continue;
1396 }
1397
1398 // preProcessTransaction can change our pointer
1399 if (!preProcessTransaction(transaction))
1400 continue;
1401
1402 candidates.emplace_back(transaction);
1403 }
1404
1405 std::vector<TransactionStatus> transactions;
1406 transactions.reserve(candidates.size());
1407
1408 std::unique_lock lock(mMutex);
1409
1410 for (auto& transaction : candidates)
1411 {
1412 if (!transaction->getApplying())
1413 {
1414 transactions.emplace_back(transaction, false, false, FailHard::no);
1415 transaction->setApplying();
1416 }
1417 }
1418
1419 if (mTransactions.empty())
1420 mTransactions.swap(transactions);
1421 else
1422 {
1423 mTransactions.reserve(mTransactions.size() + transactions.size());
1424 for (auto& t : transactions)
1425 mTransactions.push_back(std::move(t));
1426 }
1427
1428 doTransactionSyncBatch(lock, [&](std::unique_lock<std::mutex> const&) {
1429 XRPL_ASSERT(
1430 lock.owns_lock(),
1431 "ripple::NetworkOPsImp::processTransactionSet has lock");
1432 return std::any_of(
1433 mTransactions.begin(), mTransactions.end(), [](auto const& t) {
1434 return t.transaction->getApplying();
1435 });
1436 });
1437}
1438
1439void
1440NetworkOPsImp::transactionBatch()
1441{
1442 std::unique_lock<std::mutex> lock(mMutex);
1443
1444 if (mDispatchState == DispatchState::running)
1445 return;
1446
1447 while (mTransactions.size())
1448 {
1449 apply(lock);
1450 }
1451}
1452
1453void
1454NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1455{
1457 std::vector<TransactionStatus> transactions;
1458 mTransactions.swap(transactions);
1459 XRPL_ASSERT(
1460 !transactions.empty(),
1461 "ripple::NetworkOPsImp::apply : non-empty transactions");
1462 XRPL_ASSERT(
1463 mDispatchState != DispatchState::running,
1464 "ripple::NetworkOPsImp::apply : is not running");
1465
1466 mDispatchState = DispatchState::running;
1467
1468 batchLock.unlock();
1469
1470 {
1471 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1472 bool changed = false;
1473 {
1474 std::unique_lock ledgerLock{
1475 m_ledgerMaster.peekMutex(), std::defer_lock};
1476 std::lock(masterLock, ledgerLock);
1477
1478 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1479 for (TransactionStatus& e : transactions)
1480 {
1481 // we check before adding to the batch
1483 if (e.admin)
1485
1486 if (e.failType == FailHard::yes)
1488
1489 auto const result = app_.getTxQ().apply(
1490 app_, view, e.transaction->getSTransaction(), flags, j);
1491 e.result = result.ter;
1492 e.applied = result.applied;
1493 changed = changed || result.applied;
1494 }
1495 return changed;
1496 });
1497 }
1498 if (changed)
1499 reportFeeChange();
1500
1501 std::optional<LedgerIndex> validatedLedgerIndex;
1502 if (auto const l = m_ledgerMaster.getValidatedLedger())
1503 validatedLedgerIndex = l->info().seq;
1504
1505 auto newOL = app_.openLedger().current();
1506 for (TransactionStatus& e : transactions)
1507 {
1508 e.transaction->clearSubmitResult();
1509
1510 if (e.applied)
1511 {
1512 pubProposedTransaction(
1513 newOL, e.transaction->getSTransaction(), e.result);
1514 e.transaction->setApplied();
1515 }
1516
1517 e.transaction->setResult(e.result);
1518
1519 if (isTemMalformed(e.result))
1520 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1521
1522#ifdef DEBUG
1523 if (e.result != tesSUCCESS)
1524 {
1525 std::string token, human;
1526
1527 if (transResultInfo(e.result, token, human))
1528 {
1529 JLOG(m_journal.info())
1530 << "TransactionResult: " << token << ": " << human;
1531 }
1532 }
1533#endif
1534
1535 bool addLocal = e.local;
1536
1537 if (e.result == tesSUCCESS)
1538 {
1539 JLOG(m_journal.debug())
1540 << "Transaction is now included in open ledger";
1541 e.transaction->setStatus(INCLUDED);
1542
1543 // Pop as many "reasonable" transactions for this account as
1544 // possible. "Reasonable" means they have sequential sequence
1545 // numbers, or use tickets.
1546 auto const& txCur = e.transaction->getSTransaction();
1547
1548 std::size_t count = 0;
1549 for (auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1550 txNext && count < maxPoppedTransactions;
1551 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1552 {
1553 if (!batchLock.owns_lock())
1554 batchLock.lock();
1555 std::string reason;
1556 auto const trans = sterilize(*txNext);
1557 auto t = std::make_shared<Transaction>(trans, reason, app_);
1558 if (t->getApplying())
1559 break;
1560 submit_held.emplace_back(t, false, false, FailHard::no);
1561 t->setApplying();
1562 }
1563 if (batchLock.owns_lock())
1564 batchLock.unlock();
1565 }
1566 else if (e.result == tefPAST_SEQ)
1567 {
1568 // duplicate or conflict
1569 JLOG(m_journal.info()) << "Transaction is obsolete";
1570 e.transaction->setStatus(OBSOLETE);
1571 }
1572 else if (e.result == terQUEUED)
1573 {
1574 JLOG(m_journal.debug())
1575 << "Transaction is likely to claim a"
1576 << " fee, but is queued until fee drops";
1577
1578 e.transaction->setStatus(HELD);
1579 // Add to held transactions, because it could get
1580 // kicked out of the queue, and this will try to
1581 // put it back.
1582 m_ledgerMaster.addHeldTransaction(e.transaction);
1583 e.transaction->setQueued();
1584 e.transaction->setKept();
1585 }
1586 else if (
1587 isTerRetry(e.result) || isTelLocal(e.result) ||
1588 isTefFailure(e.result))
1589 {
1590 if (e.failType != FailHard::yes)
1591 {
1592 auto const lastLedgerSeq =
1593 e.transaction->getSTransaction()->at(
1594 ~sfLastLedgerSequence);
1595 auto const ledgersLeft = lastLedgerSeq
1596 ? *lastLedgerSeq -
1597 m_ledgerMaster.getCurrentLedgerIndex()
1599 // If any of these conditions are met, the transaction can
1600 // be held:
1601 // 1. It was submitted locally. (Note that this flag is only
1602 // true on the initial submission.)
1603 // 2. The transaction has a LastLedgerSequence, and the
1604 // LastLedgerSequence is fewer than LocalTxs::holdLedgers
1605 // (5) ledgers into the future. (Remember that an
1606 // unseated optional compares as less than all seated
1607 // values, so it has to be checked explicitly first.)
1608 // 3. The SF_HELD flag is not set on the txID. (setFlags
1609 // checks before setting. If the flag is set, it returns
1610 // false, which means it's been held once without one of
1611 // the other conditions, so don't hold it again. Time's
1612 // up!)
1613 //
1614 if (e.local ||
1615 (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1616 app_.getHashRouter().setFlags(
1617 e.transaction->getID(), SF_HELD))
1618 {
1619 // transaction should be held
1620 JLOG(m_journal.debug())
1621 << "Transaction should be held: " << e.result;
1622 e.transaction->setStatus(HELD);
1623 m_ledgerMaster.addHeldTransaction(e.transaction);
1624 e.transaction->setKept();
1625 }
1626 else
1627 JLOG(m_journal.debug())
1628 << "Not holding transaction "
1629 << e.transaction->getID() << ": "
1630 << (e.local ? "local" : "network") << ", "
1631 << "result: " << e.result << " ledgers left: "
1632 << (ledgersLeft ? to_string(*ledgersLeft)
1633 : "unspecified");
1634 }
1635 }
1636 else
1637 {
1638 JLOG(m_journal.debug())
1639 << "Status other than success " << e.result;
1640 e.transaction->setStatus(INVALID);
1641 }
1642
1643 auto const enforceFailHard =
1644 e.failType == FailHard::yes && !isTesSuccess(e.result);
1645
1646 if (addLocal && !enforceFailHard)
1647 {
1648 m_localTX->push_back(
1649 m_ledgerMaster.getCurrentLedgerIndex(),
1650 e.transaction->getSTransaction());
1651 e.transaction->setKept();
1652 }
1653
1654 if ((e.applied ||
1655 ((mMode != OperatingMode::FULL) &&
1656 (e.failType != FailHard::yes) && e.local) ||
1657 (e.result == terQUEUED)) &&
1658 !enforceFailHard)
1659 {
1660 auto const toSkip =
1661 app_.getHashRouter().shouldRelay(e.transaction->getID());
1662
1663 if (toSkip)
1664 {
1665 protocol::TMTransaction tx;
1666 Serializer s;
1667
1668 e.transaction->getSTransaction()->add(s);
1669 tx.set_rawtransaction(s.data(), s.size());
1670 tx.set_status(protocol::tsCURRENT);
1671 tx.set_receivetimestamp(
1672 app_.timeKeeper().now().time_since_epoch().count());
1673 tx.set_deferred(e.result == terQUEUED);
1674 // FIXME: This should be when we received it
1675 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1676 e.transaction->setBroadcast();
1677 }
1678 }
1679
1680 if (validatedLedgerIndex)
1681 {
1682 auto [fee, accountSeq, availableSeq] =
1683 app_.getTxQ().getTxRequiredFeeAndSeq(
1684 *newOL, e.transaction->getSTransaction());
1685 e.transaction->setCurrentLedgerState(
1686 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1687 }
1688 }
1689 }
1690
1691 batchLock.lock();
1692
1693 for (TransactionStatus& e : transactions)
1694 e.transaction->clearApplying();
1695
1696 if (!submit_held.empty())
1697 {
1698 if (mTransactions.empty())
1699 mTransactions.swap(submit_held);
1700 else
1701 {
1702 mTransactions.reserve(mTransactions.size() + submit_held.size());
1703 for (auto& e : submit_held)
1704 mTransactions.push_back(std::move(e));
1705 }
1706 }
1707
1708 mCond.notify_all();
1709
1710 mDispatchState = DispatchState::none;
1711}
1712
1713//
1714// Owner functions
1715//
1716
1718NetworkOPsImp::getOwnerInfo(
1720 AccountID const& account)
1721{
1722 Json::Value jvObjects(Json::objectValue);
1723 auto root = keylet::ownerDir(account);
1724 auto sleNode = lpLedger->read(keylet::page(root));
1725 if (sleNode)
1726 {
1727 std::uint64_t uNodeDir;
1728
1729 do
1730 {
1731 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1732 {
1733 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1734 XRPL_ASSERT(
1735 sleCur,
1736 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1737
1738 switch (sleCur->getType())
1739 {
1740 case ltOFFER:
1741 if (!jvObjects.isMember(jss::offers))
1742 jvObjects[jss::offers] =
1744
1745 jvObjects[jss::offers].append(
1746 sleCur->getJson(JsonOptions::none));
1747 break;
1748
1749 case ltRIPPLE_STATE:
1750 if (!jvObjects.isMember(jss::ripple_lines))
1751 {
1752 jvObjects[jss::ripple_lines] =
1754 }
1755
1756 jvObjects[jss::ripple_lines].append(
1757 sleCur->getJson(JsonOptions::none));
1758 break;
1759
1760 case ltACCOUNT_ROOT:
1761 case ltDIR_NODE:
1762 default:
1763 UNREACHABLE(
1764 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1765 "type");
1766 break;
1767 }
1768 }
1769
1770 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1771
1772 if (uNodeDir)
1773 {
1774 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1775 XRPL_ASSERT(
1776 sleNode,
1777 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1778 }
1779 } while (uNodeDir);
1780 }
1781
1782 return jvObjects;
1783}
1784
1785//
1786// Other
1787//
1788
1789inline bool
1790NetworkOPsImp::isBlocked()
1791{
1792 return isAmendmentBlocked() || isUNLBlocked();
1793}
1794
1795inline bool
1796NetworkOPsImp::isAmendmentBlocked()
1797{
1798 return amendmentBlocked_;
1799}
1800
1801void
1802NetworkOPsImp::setAmendmentBlocked()
1803{
1804 amendmentBlocked_ = true;
1805 setMode(OperatingMode::CONNECTED);
1806}
1807
1808inline bool
1809NetworkOPsImp::isAmendmentWarned()
1810{
1811 return !amendmentBlocked_ && amendmentWarned_;
1812}
1813
1814inline void
1815NetworkOPsImp::setAmendmentWarned()
1816{
1817 amendmentWarned_ = true;
1818}
1819
1820inline void
1821NetworkOPsImp::clearAmendmentWarned()
1822{
1823 amendmentWarned_ = false;
1824}
1825
1826inline bool
1827NetworkOPsImp::isUNLBlocked()
1828{
1829 return unlBlocked_;
1830}
1831
1832void
1833NetworkOPsImp::setUNLBlocked()
1834{
1835 unlBlocked_ = true;
1836 setMode(OperatingMode::CONNECTED);
1837}
1838
1839inline void
1840NetworkOPsImp::clearUNLBlocked()
1841{
1842 unlBlocked_ = false;
1843}
1844
1845bool
1846NetworkOPsImp::checkLastClosedLedger(
1847 Overlay::PeerSequence const& peerList,
1848 uint256& networkClosed)
1849{
1850 // Returns true if there's an *abnormal* ledger issue, normal changing in
1851 // TRACKING mode should return false. Do we have sufficient validations for
1852 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1853 // better ledger available? If so, we are either tracking or full.
1854
1855 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1856
1857 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1858
1859 if (!ourClosed)
1860 return false;
1861
1862 uint256 closedLedger = ourClosed->info().hash;
1863 uint256 prevClosedLedger = ourClosed->info().parentHash;
1864 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1865 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1866
1867 //-------------------------------------------------------------------------
1868 // Determine preferred last closed ledger
1869
1870 auto& validations = app_.getValidations();
1871 JLOG(m_journal.debug())
1872 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1873
1874 // Will rely on peer LCL if no trusted validations exist
1876 peerCounts[closedLedger] = 0;
1877 if (mMode >= OperatingMode::TRACKING)
1878 peerCounts[closedLedger]++;
1879
1880 for (auto& peer : peerList)
1881 {
1882 uint256 peerLedger = peer->getClosedLedgerHash();
1883
1884 if (peerLedger.isNonZero())
1885 ++peerCounts[peerLedger];
1886 }
1887
1888 for (auto const& it : peerCounts)
1889 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1890
1891 uint256 preferredLCL = validations.getPreferredLCL(
1892 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1893 m_ledgerMaster.getValidLedgerIndex(),
1894 peerCounts);
1895
1896 bool switchLedgers = preferredLCL != closedLedger;
1897 if (switchLedgers)
1898 closedLedger = preferredLCL;
1899 //-------------------------------------------------------------------------
1900 if (switchLedgers && (closedLedger == prevClosedLedger))
1901 {
1902 // don't switch to our own previous ledger
1903 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1904 networkClosed = ourClosed->info().hash;
1905 switchLedgers = false;
1906 }
1907 else
1908 networkClosed = closedLedger;
1909
1910 if (!switchLedgers)
1911 return false;
1912
1913 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1914
1915 if (!consensus)
1916 consensus = app_.getInboundLedgers().acquire(
1917 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1918
1919 if (consensus &&
1920 (!m_ledgerMaster.canBeCurrent(consensus) ||
1921 !m_ledgerMaster.isCompatible(
1922 *consensus, m_journal.debug(), "Not switching")))
1923 {
1924 // Don't switch to a ledger not on the validated chain
1925 // or with an invalid close time or sequence
1926 networkClosed = ourClosed->info().hash;
1927 return false;
1928 }
1929
1930 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1931 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1932 << getJson({*ourClosed, {}});
1933 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1934
1935 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1936 {
1937 setMode(OperatingMode::CONNECTED);
1938 }
1939
1940 if (consensus)
1941 {
1942 // FIXME: If this rewinds the ledger sequence, or has the same
1943 // sequence, we should update the status on any stored transactions
1944 // in the invalidated ledgers.
1945 switchLastClosedLedger(consensus);
1946 }
1947
1948 return true;
1949}
1950
1951void
1952NetworkOPsImp::switchLastClosedLedger(
1953 std::shared_ptr<Ledger const> const& newLCL)
1954{
1955 // set the newLCL as our last closed ledger -- this is abnormal code
1956 JLOG(m_journal.error())
1957 << "JUMP last closed ledger to " << newLCL->info().hash;
1958
1959 clearNeedNetworkLedger();
1960
1961 // Update fee computations.
1962 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1963
1964 // Caller must own master lock
1965 {
1966 // Apply tx in old open ledger to new
1967 // open ledger. Then apply local tx.
1968
1969 auto retries = m_localTX->getTxSet();
1970 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1972 if (lastVal)
1973 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
1974 else
1975 rules.emplace(app_.config().features);
1976 app_.openLedger().accept(
1977 app_,
1978 *rules,
1979 newLCL,
1980 OrderedTxs({}),
1981 false,
1982 retries,
1983 tapNONE,
1984 "jump",
1985 [&](OpenView& view, beast::Journal j) {
1986 // Stuff the ledger with transactions from the queue.
1987 return app_.getTxQ().accept(app_, view);
1988 });
1989 }
1990
1991 m_ledgerMaster.switchLCL(newLCL);
1992
1993 protocol::TMStatusChange s;
1994 s.set_newevent(protocol::neSWITCHED_LEDGER);
1995 s.set_ledgerseq(newLCL->info().seq);
1996 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1997 s.set_ledgerhashprevious(
1998 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1999 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
2000
2001 app_.overlay().foreach(
2002 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
2003}
2004
2005bool
2006NetworkOPsImp::beginConsensus(
2007 uint256 const& networkClosed,
2009{
2010 XRPL_ASSERT(
2011 networkClosed.isNonZero(),
2012 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
2013
2014 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
2015
2016 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
2017 << " with LCL " << closingInfo.parentHash;
2018
2019 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
2020
2021 if (!prevLedger)
2022 {
2023 // this shouldn't happen unless we jump ledgers
2024 if (mMode == OperatingMode::FULL)
2025 {
2026 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
2027 setMode(OperatingMode::TRACKING);
2028 CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
2029 }
2030
2031 CLOG(clog) << "beginConsensus no previous ledger. ";
2032 return false;
2033 }
2034
2035 XRPL_ASSERT(
2036 prevLedger->info().hash == closingInfo.parentHash,
2037 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
2038 "parent");
2039 XRPL_ASSERT(
2040 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
2041 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
2042 "hash");
2043
2044 if (prevLedger->rules().enabled(featureNegativeUNL))
2045 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
2046 TrustChanges const changes = app_.validators().updateTrusted(
2047 app_.getValidations().getCurrentNodeIDs(),
2048 closingInfo.parentCloseTime,
2049 *this,
2050 app_.overlay(),
2051 app_.getHashRouter());
2052
2053 if (!changes.added.empty() || !changes.removed.empty())
2054 {
2055 app_.getValidations().trustChanged(changes.added, changes.removed);
2056 // Update the AmendmentTable so it tracks the current validators.
2057 app_.getAmendmentTable().trustChanged(
2058 app_.validators().getQuorumKeys().second);
2059 }
2060
2061 mConsensus.startRound(
2062 app_.timeKeeper().closeTime(),
2063 networkClosed,
2064 prevLedger,
2065 changes.removed,
2066 changes.added,
2067 clog);
2068
2069 ConsensusPhase const currPhase = mConsensus.phase();
2070 if (mLastConsensusPhase != currPhase)
2071 {
2072 reportConsensusStateChange(currPhase);
2073 mLastConsensusPhase = currPhase;
2074 }
2075
2076 JLOG(m_journal.debug()) << "Initiating consensus engine";
2077 return true;
2078}
2079
2080bool
2081NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
2082{
2083 auto const& peerKey = peerPos.publicKey();
2084 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
2085 {
2086 // Could indicate a operator misconfiguration where two nodes are
2087 // running with the same validator key configured, so this isn't fatal,
2088 // and it doesn't necessarily indicate peer misbehavior. But since this
2089 // is a trusted message, it could be a very big deal. Either way, we
2090 // don't want to relay the proposal. Note that the byzantine behavior
2091 // detection in handleNewValidation will notify other peers.
2092 //
2093 // Another, innocuous explanation is unusual message routing and delays,
2094 // causing this node to receive its own messages back.
2095 JLOG(m_journal.error())
2096 << "Received a proposal signed by MY KEY from a peer. This may "
2097 "indicate a misconfiguration where another node has the same "
2098 "validator key, or may be caused by unusual message routing and "
2099 "delays.";
2100 return false;
2101 }
2102
2103 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
2104}
2105
2106void
2107NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
2108{
2109 // We now have an additional transaction set
2110 // either created locally during the consensus process
2111 // or acquired from a peer
2112
2113 // Inform peers we have this set
2114 protocol::TMHaveTransactionSet msg;
2115 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2116 msg.set_status(protocol::tsHAVE);
2117 app_.overlay().foreach(
2118 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
2119
2120 // We acquired it because consensus asked us to
2121 if (fromAcquire)
2122 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
2123}
2124
2125void
2126NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
2127{
2128 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
2129
2130 for (auto const& it : app_.overlay().getActivePeers())
2131 {
2132 if (it && (it->getClosedLedgerHash() == deadLedger))
2133 {
2134 JLOG(m_journal.trace()) << "Killing obsolete peer status";
2135 it->cycleStatus();
2136 }
2137 }
2138
2139 uint256 networkClosed;
2140 bool ledgerChange =
2141 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
2142
2143 if (networkClosed.isZero())
2144 {
2145 CLOG(clog) << "endConsensus last closed ledger is zero. ";
2146 return;
2147 }
2148
2149 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
2150 // we must count how many nodes share our LCL, how many nodes disagree with
2151 // our LCL, and how many validations our LCL has. We also want to check
2152 // timing to make sure there shouldn't be a newer LCL. We need this
2153 // information to do the next three tests.
2154
2155 if (((mMode == OperatingMode::CONNECTED) ||
2156 (mMode == OperatingMode::SYNCING)) &&
2157 !ledgerChange)
2158 {
2159 // Count number of peers that agree with us and UNL nodes whose
2160 // validations we have for LCL. If the ledger is good enough, go to
2161 // TRACKING - TODO
2162 if (!needNetworkLedger_)
2163 setMode(OperatingMode::TRACKING);
2164 }
2165
2166 if (((mMode == OperatingMode::CONNECTED) ||
2167 (mMode == OperatingMode::TRACKING)) &&
2168 !ledgerChange)
2169 {
2170 // check if the ledger is good enough to go to FULL
2171 // Note: Do not go to FULL if we don't have the previous ledger
2172 // check if the ledger is bad enough to go to CONNECTE D -- TODO
2173 auto current = m_ledgerMaster.getCurrentLedger();
2174 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
2175 2 * current->info().closeTimeResolution))
2176 {
2177 setMode(OperatingMode::FULL);
2178 }
2179 }
2180
2181 beginConsensus(networkClosed, clog);
2182}
2183
2184void
2185NetworkOPsImp::consensusViewChange()
2186{
2187 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2188 {
2189 setMode(OperatingMode::CONNECTED);
2190 }
2191}
2192
2193void
2194NetworkOPsImp::pubManifest(Manifest const& mo)
2195{
2196 // VFALCO consider std::shared_mutex
2197 std::lock_guard sl(mSubLock);
2198
2199 if (!mStreamMaps[sManifests].empty())
2200 {
2202
2203 jvObj[jss::type] = "manifestReceived";
2204 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
2205 if (mo.signingKey)
2206 jvObj[jss::signing_key] =
2207 toBase58(TokenType::NodePublic, *mo.signingKey);
2208 jvObj[jss::seq] = Json::UInt(mo.sequence);
2209 if (auto sig = mo.getSignature())
2210 jvObj[jss::signature] = strHex(*sig);
2211 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
2212 if (!mo.domain.empty())
2213 jvObj[jss::domain] = mo.domain;
2214 jvObj[jss::manifest] = strHex(mo.serialized);
2215
2216 for (auto i = mStreamMaps[sManifests].begin();
2217 i != mStreamMaps[sManifests].end();)
2218 {
2219 if (auto p = i->second.lock())
2220 {
2221 p->send(jvObj, true);
2222 ++i;
2223 }
2224 else
2225 {
2226 i = mStreamMaps[sManifests].erase(i);
2227 }
2228 }
2229 }
2230}
2231
2232NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2233 XRPAmount fee,
2234 TxQ::Metrics&& escalationMetrics,
2235 LoadFeeTrack const& loadFeeTrack)
2236 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2237 , loadBaseServer{loadFeeTrack.getLoadBase()}
2238 , baseFee{fee}
2239 , em{std::move(escalationMetrics)}
2240{
2241}
2242
2243bool
2245 NetworkOPsImp::ServerFeeSummary const& b) const
2246{
2247 if (loadFactorServer != b.loadFactorServer ||
2248 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2249 em.has_value() != b.em.has_value())
2250 return true;
2251
2252 if (em && b.em)
2253 {
2254 return (
2255 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2256 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2257 em->referenceFeeLevel != b.em->referenceFeeLevel);
2258 }
2259
2260 return false;
2261}
2262
2263// Need to cap to uint64 to uint32 due to JSON limitations
2264static std::uint32_t
2266{
2268
2269 return std::min(max32, v);
2270};
2271
2272void
2274{
2275 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2276 // list into a local array while holding the lock then release
2277 // the lock and call send on everyone.
2278 //
2280
2281 if (!mStreamMaps[sServer].empty())
2282 {
2284
2286 app_.openLedger().current()->fees().base,
2288 app_.getFeeTrack()};
2289
2290 jvObj[jss::type] = "serverStatus";
2291 jvObj[jss::server_status] = strOperatingMode();
2292 jvObj[jss::load_base] = f.loadBaseServer;
2293 jvObj[jss::load_factor_server] = f.loadFactorServer;
2294 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2295
2296 if (f.em)
2297 {
2298 auto const loadFactor = std::max(
2299 safe_cast<std::uint64_t>(f.loadFactorServer),
2300 mulDiv(
2301 f.em->openLedgerFeeLevel,
2302 f.loadBaseServer,
2303 f.em->referenceFeeLevel)
2305
2306 jvObj[jss::load_factor] = trunc32(loadFactor);
2307 jvObj[jss::load_factor_fee_escalation] =
2308 f.em->openLedgerFeeLevel.jsonClipped();
2309 jvObj[jss::load_factor_fee_queue] =
2310 f.em->minProcessingFeeLevel.jsonClipped();
2311 jvObj[jss::load_factor_fee_reference] =
2312 f.em->referenceFeeLevel.jsonClipped();
2313 }
2314 else
2315 jvObj[jss::load_factor] = f.loadFactorServer;
2316
2317 mLastFeeSummary = f;
2318
2319 for (auto i = mStreamMaps[sServer].begin();
2320 i != mStreamMaps[sServer].end();)
2321 {
2322 InfoSub::pointer p = i->second.lock();
2323
2324 // VFALCO TODO research the possibility of using thread queues and
2325 // linearizing the deletion of subscribers with the
2326 // sending of JSON data.
2327 if (p)
2328 {
2329 p->send(jvObj, true);
2330 ++i;
2331 }
2332 else
2333 {
2334 i = mStreamMaps[sServer].erase(i);
2335 }
2336 }
2337 }
2338}
2339
2340void
2342{
2344
2345 auto& streamMap = mStreamMaps[sConsensusPhase];
2346 if (!streamMap.empty())
2347 {
2349 jvObj[jss::type] = "consensusPhase";
2350 jvObj[jss::consensus] = to_string(phase);
2351
2352 for (auto i = streamMap.begin(); i != streamMap.end();)
2353 {
2354 if (auto p = i->second.lock())
2355 {
2356 p->send(jvObj, true);
2357 ++i;
2358 }
2359 else
2360 {
2361 i = streamMap.erase(i);
2362 }
2363 }
2364 }
2365}
2366
2367void
2369{
2370 // VFALCO consider std::shared_mutex
2372
2373 if (!mStreamMaps[sValidations].empty())
2374 {
2376
2377 auto const signerPublic = val->getSignerPublic();
2378
2379 jvObj[jss::type] = "validationReceived";
2380 jvObj[jss::validation_public_key] =
2381 toBase58(TokenType::NodePublic, signerPublic);
2382 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2383 jvObj[jss::signature] = strHex(val->getSignature());
2384 jvObj[jss::full] = val->isFull();
2385 jvObj[jss::flags] = val->getFlags();
2386 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2387 jvObj[jss::data] = strHex(val->getSerializer().slice());
2388
2389 if (auto version = (*val)[~sfServerVersion])
2390 jvObj[jss::server_version] = std::to_string(*version);
2391
2392 if (auto cookie = (*val)[~sfCookie])
2393 jvObj[jss::cookie] = std::to_string(*cookie);
2394
2395 if (auto hash = (*val)[~sfValidatedHash])
2396 jvObj[jss::validated_hash] = strHex(*hash);
2397
2398 auto const masterKey =
2399 app_.validatorManifests().getMasterKey(signerPublic);
2400
2401 if (masterKey != signerPublic)
2402 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2403
2404 // NOTE *seq is a number, but old API versions used string. We replace
2405 // number with a string using MultiApiJson near end of this function
2406 if (auto const seq = (*val)[~sfLedgerSequence])
2407 jvObj[jss::ledger_index] = *seq;
2408
2409 if (val->isFieldPresent(sfAmendments))
2410 {
2411 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2412 for (auto const& amendment : val->getFieldV256(sfAmendments))
2413 jvObj[jss::amendments].append(to_string(amendment));
2414 }
2415
2416 if (auto const closeTime = (*val)[~sfCloseTime])
2417 jvObj[jss::close_time] = *closeTime;
2418
2419 if (auto const loadFee = (*val)[~sfLoadFee])
2420 jvObj[jss::load_fee] = *loadFee;
2421
2422 if (auto const baseFee = val->at(~sfBaseFee))
2423 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2424
2425 if (auto const reserveBase = val->at(~sfReserveBase))
2426 jvObj[jss::reserve_base] = *reserveBase;
2427
2428 if (auto const reserveInc = val->at(~sfReserveIncrement))
2429 jvObj[jss::reserve_inc] = *reserveInc;
2430
2431 // (The ~ operator converts the Proxy to a std::optional, which
2432 // simplifies later operations)
2433 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2434 baseFeeXRP && baseFeeXRP->native())
2435 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2436
2437 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2438 reserveBaseXRP && reserveBaseXRP->native())
2439 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2440
2441 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2442 reserveIncXRP && reserveIncXRP->native())
2443 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2444
2445 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2446 // for consumers supporting different API versions
2447 MultiApiJson multiObj{jvObj};
2448 multiObj.visit(
2449 RPC::apiVersion<1>, //
2450 [](Json::Value& jvTx) {
2451 // Type conversion for older API versions to string
2452 if (jvTx.isMember(jss::ledger_index))
2453 {
2454 jvTx[jss::ledger_index] =
2455 std::to_string(jvTx[jss::ledger_index].asUInt());
2456 }
2457 });
2458
2459 for (auto i = mStreamMaps[sValidations].begin();
2460 i != mStreamMaps[sValidations].end();)
2461 {
2462 if (auto p = i->second.lock())
2463 {
2464 multiObj.visit(
2465 p->getApiVersion(), //
2466 [&](Json::Value const& jv) { p->send(jv, true); });
2467 ++i;
2468 }
2469 else
2470 {
2471 i = mStreamMaps[sValidations].erase(i);
2472 }
2473 }
2474 }
2475}
2476
2477void
2479{
2481
2482 if (!mStreamMaps[sPeerStatus].empty())
2483 {
2484 Json::Value jvObj(func());
2485
2486 jvObj[jss::type] = "peerStatusChange";
2487
2488 for (auto i = mStreamMaps[sPeerStatus].begin();
2489 i != mStreamMaps[sPeerStatus].end();)
2490 {
2491 InfoSub::pointer p = i->second.lock();
2492
2493 if (p)
2494 {
2495 p->send(jvObj, true);
2496 ++i;
2497 }
2498 else
2499 {
2500 i = mStreamMaps[sPeerStatus].erase(i);
2501 }
2502 }
2503 }
2504}
2505
2506void
2508{
2509 using namespace std::chrono_literals;
2510 if (om == OperatingMode::CONNECTED)
2511 {
2514 }
2515 else if (om == OperatingMode::SYNCING)
2516 {
2519 }
2520
2521 if ((om > OperatingMode::CONNECTED) && isBlocked())
2523
2524 if (mMode == om)
2525 return;
2526
2527 mMode = om;
2528
2529 accounting_.mode(om);
2530
2531 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2532 pubServer();
2533}
2534
2535bool
2538 std::string const& source)
2539{
2540 JLOG(m_journal.trace())
2541 << "recvValidation " << val->getLedgerHash() << " from " << source;
2542
2544 BypassAccept bypassAccept = BypassAccept::no;
2545 try
2546 {
2547 if (pendingValidations_.contains(val->getLedgerHash()))
2548 bypassAccept = BypassAccept::yes;
2549 else
2550 pendingValidations_.insert(val->getLedgerHash());
2551 scope_unlock unlock(lock);
2552 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2553 }
2554 catch (std::exception const& e)
2555 {
2556 JLOG(m_journal.warn())
2557 << "Exception thrown for handling new validation "
2558 << val->getLedgerHash() << ": " << e.what();
2559 }
2560 catch (...)
2561 {
2562 JLOG(m_journal.warn())
2563 << "Unknown exception thrown for handling new validation "
2564 << val->getLedgerHash();
2565 }
2566 if (bypassAccept == BypassAccept::no)
2567 {
2568 pendingValidations_.erase(val->getLedgerHash());
2569 }
2570 lock.unlock();
2571
2572 pubValidation(val);
2573
2574 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2576 ss << "VALIDATION: " << val->render() << " master_key: ";
2577 auto master = app_.validators().getTrustedKey(val->getSignerPublic());
2578 if (master)
2579 {
2580 ss << toBase58(TokenType::NodePublic, *master);
2581 }
2582 else
2583 {
2584 ss << "none";
2585 }
2586 return ss.str();
2587 }();
2588
2589 // We will always relay trusted validations; if configured, we will
2590 // also relay all untrusted validations.
2591 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2592}
2593
2596{
2597 return mConsensus.getJson(true);
2598}
2599
2601NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2602{
2604
2605 // System-level warnings
2606 {
2607 Json::Value warnings{Json::arrayValue};
2608 if (isAmendmentBlocked())
2609 {
2610 Json::Value& w = warnings.append(Json::objectValue);
2611 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2612 w[jss::message] =
2613 "This server is amendment blocked, and must be updated to be "
2614 "able to stay in sync with the network.";
2615 }
2616 if (isUNLBlocked())
2617 {
2618 Json::Value& w = warnings.append(Json::objectValue);
2619 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2620 w[jss::message] =
2621 "This server has an expired validator list. validators.txt "
2622 "may be incorrectly configured or some [validator_list_sites] "
2623 "may be unreachable.";
2624 }
2625 if (admin && isAmendmentWarned())
2626 {
2627 Json::Value& w = warnings.append(Json::objectValue);
2628 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2629 w[jss::message] =
2630 "One or more unsupported amendments have reached majority. "
2631 "Upgrade to the latest version before they are activated "
2632 "to avoid being amendment blocked.";
2633 if (auto const expected =
2635 {
2636 auto& d = w[jss::details] = Json::objectValue;
2637 d[jss::expected_date] = expected->time_since_epoch().count();
2638 d[jss::expected_date_UTC] = to_string(*expected);
2639 }
2640 }
2641
2642 if (warnings.size())
2643 info[jss::warnings] = std::move(warnings);
2644 }
2645
2646 // hostid: unique string describing the machine
2647 if (human)
2648 info[jss::hostid] = getHostId(admin);
2649
2650 // domain: if configured with a domain, report it:
2651 if (!app_.config().SERVER_DOMAIN.empty())
2652 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2653
2654 info[jss::build_version] = BuildInfo::getVersionString();
2655
2656 info[jss::server_state] = strOperatingMode(admin);
2657
2658 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2660
2662 info[jss::network_ledger] = "waiting";
2663
2664 info[jss::validation_quorum] =
2665 static_cast<Json::UInt>(app_.validators().quorum());
2666
2667 if (admin)
2668 {
2669 switch (app_.config().NODE_SIZE)
2670 {
2671 case 0:
2672 info[jss::node_size] = "tiny";
2673 break;
2674 case 1:
2675 info[jss::node_size] = "small";
2676 break;
2677 case 2:
2678 info[jss::node_size] = "medium";
2679 break;
2680 case 3:
2681 info[jss::node_size] = "large";
2682 break;
2683 case 4:
2684 info[jss::node_size] = "huge";
2685 break;
2686 }
2687
2688 auto when = app_.validators().expires();
2689
2690 if (!human)
2691 {
2692 if (when)
2693 info[jss::validator_list_expires] =
2694 safe_cast<Json::UInt>(when->time_since_epoch().count());
2695 else
2696 info[jss::validator_list_expires] = 0;
2697 }
2698 else
2699 {
2700 auto& x = (info[jss::validator_list] = Json::objectValue);
2701
2702 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2703
2704 if (when)
2705 {
2706 if (*when == TimeKeeper::time_point::max())
2707 {
2708 x[jss::expiration] = "never";
2709 x[jss::status] = "active";
2710 }
2711 else
2712 {
2713 x[jss::expiration] = to_string(*when);
2714
2715 if (*when > app_.timeKeeper().now())
2716 x[jss::status] = "active";
2717 else
2718 x[jss::status] = "expired";
2719 }
2720 }
2721 else
2722 {
2723 x[jss::status] = "unknown";
2724 x[jss::expiration] = "unknown";
2725 }
2726 }
2727
2728#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2729 {
2730 auto& x = (info[jss::git] = Json::objectValue);
2731#ifdef GIT_COMMIT_HASH
2732 x[jss::hash] = GIT_COMMIT_HASH;
2733#endif
2734#ifdef GIT_BRANCH
2735 x[jss::branch] = GIT_BRANCH;
2736#endif
2737 }
2738#endif
2739 }
2740 info[jss::io_latency_ms] =
2741 static_cast<Json::UInt>(app_.getIOLatency().count());
2742
2743 if (admin)
2744 {
2745 if (auto const localPubKey = app_.validators().localPublicKey();
2746 localPubKey && app_.getValidationPublicKey())
2747 {
2748 info[jss::pubkey_validator] =
2749 toBase58(TokenType::NodePublic, localPubKey.value());
2750 }
2751 else
2752 {
2753 info[jss::pubkey_validator] = "none";
2754 }
2755 }
2756
2757 if (counters)
2758 {
2759 info[jss::counters] = app_.getPerfLog().countersJson();
2760
2761 Json::Value nodestore(Json::objectValue);
2762 app_.getNodeStore().getCountsJson(nodestore);
2763 info[jss::counters][jss::nodestore] = nodestore;
2764 info[jss::current_activities] = app_.getPerfLog().currentJson();
2765 }
2766
2767 info[jss::pubkey_node] =
2769
2770 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2771
2773 info[jss::amendment_blocked] = true;
2774
2775 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2776
2777 if (fp != 0)
2778 info[jss::fetch_pack] = Json::UInt(fp);
2779
2780 info[jss::peers] = Json::UInt(app_.overlay().size());
2781
2782 Json::Value lastClose = Json::objectValue;
2783 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2784
2785 if (human)
2786 {
2787 lastClose[jss::converge_time_s] =
2789 }
2790 else
2791 {
2792 lastClose[jss::converge_time] =
2794 }
2795
2796 info[jss::last_close] = lastClose;
2797
2798 // info[jss::consensus] = mConsensus.getJson();
2799
2800 if (admin)
2801 info[jss::load] = m_job_queue.getJson();
2802
2803 if (auto const netid = app_.overlay().networkID())
2804 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2805
2806 auto const escalationMetrics =
2808
2809 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2810 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2811 /* Scale the escalated fee level to unitless "load factor".
2812 In practice, this just strips the units, but it will continue
2813 to work correctly if either base value ever changes. */
2814 auto const loadFactorFeeEscalation =
2815 mulDiv(
2816 escalationMetrics.openLedgerFeeLevel,
2817 loadBaseServer,
2818 escalationMetrics.referenceFeeLevel)
2820
2821 auto const loadFactor = std::max(
2822 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2823
2824 if (!human)
2825 {
2826 info[jss::load_base] = loadBaseServer;
2827 info[jss::load_factor] = trunc32(loadFactor);
2828 info[jss::load_factor_server] = loadFactorServer;
2829
2830 /* Json::Value doesn't support uint64, so clamp to max
2831 uint32 value. This is mostly theoretical, since there
2832 probably isn't enough extant XRP to drive the factor
2833 that high.
2834 */
2835 info[jss::load_factor_fee_escalation] =
2836 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2837 info[jss::load_factor_fee_queue] =
2838 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2839 info[jss::load_factor_fee_reference] =
2840 escalationMetrics.referenceFeeLevel.jsonClipped();
2841 }
2842 else
2843 {
2844 info[jss::load_factor] =
2845 static_cast<double>(loadFactor) / loadBaseServer;
2846
2847 if (loadFactorServer != loadFactor)
2848 info[jss::load_factor_server] =
2849 static_cast<double>(loadFactorServer) / loadBaseServer;
2850
2851 if (admin)
2852 {
2854 if (fee != loadBaseServer)
2855 info[jss::load_factor_local] =
2856 static_cast<double>(fee) / loadBaseServer;
2858 if (fee != loadBaseServer)
2859 info[jss::load_factor_net] =
2860 static_cast<double>(fee) / loadBaseServer;
2862 if (fee != loadBaseServer)
2863 info[jss::load_factor_cluster] =
2864 static_cast<double>(fee) / loadBaseServer;
2865 }
2866 if (escalationMetrics.openLedgerFeeLevel !=
2867 escalationMetrics.referenceFeeLevel &&
2868 (admin || loadFactorFeeEscalation != loadFactor))
2869 info[jss::load_factor_fee_escalation] =
2870 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2871 escalationMetrics.referenceFeeLevel);
2872 if (escalationMetrics.minProcessingFeeLevel !=
2873 escalationMetrics.referenceFeeLevel)
2874 info[jss::load_factor_fee_queue] =
2875 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2876 escalationMetrics.referenceFeeLevel);
2877 }
2878
2879 bool valid = false;
2880 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2881
2882 if (lpClosed)
2883 valid = true;
2884 else
2885 lpClosed = m_ledgerMaster.getClosedLedger();
2886
2887 if (lpClosed)
2888 {
2889 XRPAmount const baseFee = lpClosed->fees().base;
2891 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2892 l[jss::hash] = to_string(lpClosed->info().hash);
2893
2894 if (!human)
2895 {
2896 l[jss::base_fee] = baseFee.jsonClipped();
2897 l[jss::reserve_base] =
2898 lpClosed->fees().accountReserve(0).jsonClipped();
2899 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2900 l[jss::close_time] = Json::Value::UInt(
2901 lpClosed->info().closeTime.time_since_epoch().count());
2902 }
2903 else
2904 {
2905 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2906 l[jss::reserve_base_xrp] =
2907 lpClosed->fees().accountReserve(0).decimalXRP();
2908 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2909
2910 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2911 std::abs(closeOffset.count()) >= 60)
2912 l[jss::close_time_offset] =
2913 static_cast<std::uint32_t>(closeOffset.count());
2914
2915 constexpr std::chrono::seconds highAgeThreshold{1000000};
2917 {
2918 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2919 l[jss::age] =
2920 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2921 }
2922 else
2923 {
2924 auto lCloseTime = lpClosed->info().closeTime;
2925 auto closeTime = app_.timeKeeper().closeTime();
2926 if (lCloseTime <= closeTime)
2927 {
2928 using namespace std::chrono_literals;
2929 auto age = closeTime - lCloseTime;
2930 l[jss::age] =
2931 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2932 }
2933 }
2934 }
2935
2936 if (valid)
2937 info[jss::validated_ledger] = l;
2938 else
2939 info[jss::closed_ledger] = l;
2940
2941 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2942 if (!lpPublished)
2943 info[jss::published_ledger] = "none";
2944 else if (lpPublished->info().seq != lpClosed->info().seq)
2945 info[jss::published_ledger] = lpPublished->info().seq;
2946 }
2947
2948 accounting_.json(info);
2949 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2950 info[jss::jq_trans_overflow] =
2952 info[jss::peer_disconnects] =
2954 info[jss::peer_disconnects_resources] =
2956
2957 // This array must be sorted in increasing order.
2958 static constexpr std::array<std::string_view, 7> protocols{
2959 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2960 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2961 {
2963 for (auto const& port : app_.getServerHandler().setup().ports)
2964 {
2965 // Don't publish admin ports for non-admin users
2966 if (!admin &&
2967 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2968 port.admin_user.empty() && port.admin_password.empty()))
2969 continue;
2972 std::begin(port.protocol),
2973 std::end(port.protocol),
2974 std::begin(protocols),
2975 std::end(protocols),
2976 std::back_inserter(proto));
2977 if (!proto.empty())
2978 {
2979 auto& jv = ports.append(Json::Value(Json::objectValue));
2980 jv[jss::port] = std::to_string(port.port);
2981 jv[jss::protocol] = Json::Value{Json::arrayValue};
2982 for (auto const& p : proto)
2983 jv[jss::protocol].append(p);
2984 }
2985 }
2986
2987 if (app_.config().exists(SECTION_PORT_GRPC))
2988 {
2989 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
2990 auto const optPort = grpcSection.get("port");
2991 if (optPort && grpcSection.get("ip"))
2992 {
2993 auto& jv = ports.append(Json::Value(Json::objectValue));
2994 jv[jss::port] = *optPort;
2995 jv[jss::protocol] = Json::Value{Json::arrayValue};
2996 jv[jss::protocol].append("grpc");
2997 }
2998 }
2999 info[jss::ports] = std::move(ports);
3000 }
3001
3002 return info;
3003}
3004
3005void
3007{
3009}
3010
3013{
3014 return app_.getInboundLedgers().getInfo();
3015}
3016
3017void
3019 std::shared_ptr<ReadView const> const& ledger,
3020 std::shared_ptr<STTx const> const& transaction,
3021 TER result)
3022{
3023 MultiApiJson jvObj =
3024 transJson(transaction, result, false, ledger, std::nullopt);
3025
3026 {
3028
3029 auto it = mStreamMaps[sRTTransactions].begin();
3030 while (it != mStreamMaps[sRTTransactions].end())
3031 {
3032 InfoSub::pointer p = it->second.lock();
3033
3034 if (p)
3035 {
3036 jvObj.visit(
3037 p->getApiVersion(), //
3038 [&](Json::Value const& jv) { p->send(jv, true); });
3039 ++it;
3040 }
3041 else
3042 {
3043 it = mStreamMaps[sRTTransactions].erase(it);
3044 }
3045 }
3046 }
3047
3048 pubProposedAccountTransaction(ledger, transaction, result);
3049}
3050
3051void
3053{
3054 // Ledgers are published only when they acquire sufficient validations
3055 // Holes are filled across connection loss or other catastrophe
3056
3058 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
3059 if (!alpAccepted)
3060 {
3061 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
3062 app_.getAcceptedLedgerCache().canonicalize_replace_client(
3063 lpAccepted->info().hash, alpAccepted);
3064 }
3065
3066 XRPL_ASSERT(
3067 alpAccepted->getLedger().get() == lpAccepted.get(),
3068 "ripple::NetworkOPsImp::pubLedger : accepted input");
3069
3070 {
3071 JLOG(m_journal.debug())
3072 << "Publishing ledger " << lpAccepted->info().seq << " "
3073 << lpAccepted->info().hash;
3074
3076
3077 if (!mStreamMaps[sLedger].empty())
3078 {
3080
3081 jvObj[jss::type] = "ledgerClosed";
3082 jvObj[jss::ledger_index] = lpAccepted->info().seq;
3083 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
3084 jvObj[jss::ledger_time] = Json::Value::UInt(
3085 lpAccepted->info().closeTime.time_since_epoch().count());
3086
3087 if (!lpAccepted->rules().enabled(featureXRPFees))
3088 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3089 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
3090 jvObj[jss::reserve_base] =
3091 lpAccepted->fees().accountReserve(0).jsonClipped();
3092 jvObj[jss::reserve_inc] =
3093 lpAccepted->fees().increment.jsonClipped();
3094
3095 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
3096
3098 {
3099 jvObj[jss::validated_ledgers] =
3101 }
3102
3103 auto it = mStreamMaps[sLedger].begin();
3104 while (it != mStreamMaps[sLedger].end())
3105 {
3106 InfoSub::pointer p = it->second.lock();
3107 if (p)
3108 {
3109 p->send(jvObj, true);
3110 ++it;
3111 }
3112 else
3113 it = mStreamMaps[sLedger].erase(it);
3114 }
3115 }
3116
3117 if (!mStreamMaps[sBookChanges].empty())
3118 {
3119 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
3120
3121 auto it = mStreamMaps[sBookChanges].begin();
3122 while (it != mStreamMaps[sBookChanges].end())
3123 {
3124 InfoSub::pointer p = it->second.lock();
3125 if (p)
3126 {
3127 p->send(jvObj, true);
3128 ++it;
3129 }
3130 else
3131 it = mStreamMaps[sBookChanges].erase(it);
3132 }
3133 }
3134
3135 {
3136 static bool firstTime = true;
3137 if (firstTime)
3138 {
3139 // First validated ledger, start delayed SubAccountHistory
3140 firstTime = false;
3141 for (auto& outer : mSubAccountHistory)
3142 {
3143 for (auto& inner : outer.second)
3144 {
3145 auto& subInfo = inner.second;
3146 if (subInfo.index_->separationLedgerSeq_ == 0)
3147 {
3149 alpAccepted->getLedger(), subInfo);
3150 }
3151 }
3152 }
3153 }
3154 }
3155 }
3156
3157 // Don't lock since pubAcceptedTransaction is locking.
3158 for (auto const& accTx : *alpAccepted)
3159 {
3160 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
3162 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3163 }
3164}
3165
3166void
3168{
3170 app_.openLedger().current()->fees().base,
3172 app_.getFeeTrack()};
3173
3174 // only schedule the job if something has changed
3175 if (f != mLastFeeSummary)
3176 {
3178 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
3179 pubServer();
3180 });
3181 }
3182}
3183
3184void
3186{
3189 "reportConsensusStateChange->pubConsensus",
3190 [this, phase]() { pubConsensus(phase); });
3191}
3192
3193inline void
3195{
3196 m_localTX->sweep(view);
3197}
3198inline std::size_t
3200{
3201 return m_localTX->size();
3202}
3203
3204// This routine should only be used to publish accepted or validated
3205// transactions.
3208 std::shared_ptr<STTx const> const& transaction,
3209 TER result,
3210 bool validated,
3211 std::shared_ptr<ReadView const> const& ledger,
3213{
3215 std::string sToken;
3216 std::string sHuman;
3217
3218 transResultInfo(result, sToken, sHuman);
3219
3220 jvObj[jss::type] = "transaction";
3221 // NOTE jvObj is not a finished object for either API version. After
3222 // it's populated, we need to finish it for a specific API version. This is
3223 // done in a loop, near the end of this function.
3224 jvObj[jss::transaction] =
3225 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3226
3227 if (meta)
3228 {
3229 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3231 jvObj[jss::meta], *ledger, transaction, meta->get());
3233 jvObj[jss::meta], transaction, meta->get());
3234 }
3235
3236 // add CTID where the needed data for it exists
3237 if (auto const& lookup = ledger->txRead(transaction->getTransactionID());
3238 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3239 {
3240 uint32_t const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3241 uint32_t netID = app_.config().NETWORK_ID;
3242 if (transaction->isFieldPresent(sfNetworkID))
3243 netID = transaction->getFieldU32(sfNetworkID);
3244
3246 RPC::encodeCTID(ledger->info().seq, txnSeq, netID);
3247 ctid)
3248 jvObj[jss::ctid] = *ctid;
3249 }
3250 if (!ledger->open())
3251 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3252
3253 if (validated)
3254 {
3255 jvObj[jss::ledger_index] = ledger->info().seq;
3256 jvObj[jss::transaction][jss::date] =
3257 ledger->info().closeTime.time_since_epoch().count();
3258 jvObj[jss::validated] = true;
3259 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3260
3261 // WRITEME: Put the account next seq here
3262 }
3263 else
3264 {
3265 jvObj[jss::validated] = false;
3266 jvObj[jss::ledger_current_index] = ledger->info().seq;
3267 }
3268
3269 jvObj[jss::status] = validated ? "closed" : "proposed";
3270 jvObj[jss::engine_result] = sToken;
3271 jvObj[jss::engine_result_code] = result;
3272 jvObj[jss::engine_result_message] = sHuman;
3273
3274 if (transaction->getTxnType() == ttOFFER_CREATE)
3275 {
3276 auto const account = transaction->getAccountID(sfAccount);
3277 auto const amount = transaction->getFieldAmount(sfTakerGets);
3278
3279 // If the offer create is not self funded then add the owner balance
3280 if (account != amount.issue().account)
3281 {
3282 auto const ownerFunds = accountFunds(
3283 *ledger,
3284 account,
3285 amount,
3287 app_.journal("View"));
3288 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3289 }
3290 }
3291
3292 std::string const hash = to_string(transaction->getTransactionID());
3293 MultiApiJson multiObj{jvObj};
3295 multiObj.visit(), //
3296 [&]<unsigned Version>(
3298 RPC::insertDeliverMax(
3299 jvTx[jss::transaction], transaction->getTxnType(), Version);
3300
3301 if constexpr (Version > 1)
3302 {
3303 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3304 jvTx[jss::hash] = hash;
3305 }
3306 else
3307 {
3308 jvTx[jss::transaction][jss::hash] = hash;
3309 }
3310 });
3311
3312 return multiObj;
3313}
3314
3315void
3317 std::shared_ptr<ReadView const> const& ledger,
3318 AcceptedLedgerTx const& transaction,
3319 bool last)
3320{
3321 auto const& stTxn = transaction.getTxn();
3322
3323 // Create two different Json objects, for different API versions
3324 auto const metaRef = std::ref(transaction.getMeta());
3325 auto const trResult = transaction.getResult();
3326 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3327
3328 {
3330
3331 auto it = mStreamMaps[sTransactions].begin();
3332 while (it != mStreamMaps[sTransactions].end())
3333 {
3334 InfoSub::pointer p = it->second.lock();
3335
3336 if (p)
3337 {
3338 jvObj.visit(
3339 p->getApiVersion(), //
3340 [&](Json::Value const& jv) { p->send(jv, true); });
3341 ++it;
3342 }
3343 else
3344 it = mStreamMaps[sTransactions].erase(it);
3345 }
3346
3347 it = mStreamMaps[sRTTransactions].begin();
3348
3349 while (it != mStreamMaps[sRTTransactions].end())
3350 {
3351 InfoSub::pointer p = it->second.lock();
3352
3353 if (p)
3354 {
3355 jvObj.visit(
3356 p->getApiVersion(), //
3357 [&](Json::Value const& jv) { p->send(jv, true); });
3358 ++it;
3359 }
3360 else
3361 it = mStreamMaps[sRTTransactions].erase(it);
3362 }
3363 }
3364
3365 if (transaction.getResult() == tesSUCCESS)
3366 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3367
3368 pubAccountTransaction(ledger, transaction, last);
3369}
3370
3371void
3373 std::shared_ptr<ReadView const> const& ledger,
3374 AcceptedLedgerTx const& transaction,
3375 bool last)
3376{
3378 int iProposed = 0;
3379 int iAccepted = 0;
3380
3381 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3382 auto const currLedgerSeq = ledger->seq();
3383 {
3385
3386 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3388 {
3389 for (auto const& affectedAccount : transaction.getAffected())
3390 {
3391 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3392 simiIt != mSubRTAccount.end())
3393 {
3394 auto it = simiIt->second.begin();
3395
3396 while (it != simiIt->second.end())
3397 {
3398 InfoSub::pointer p = it->second.lock();
3399
3400 if (p)
3401 {
3402 notify.insert(p);
3403 ++it;
3404 ++iProposed;
3405 }
3406 else
3407 it = simiIt->second.erase(it);
3408 }
3409 }
3410
3411 if (auto simiIt = mSubAccount.find(affectedAccount);
3412 simiIt != mSubAccount.end())
3413 {
3414 auto it = simiIt->second.begin();
3415 while (it != simiIt->second.end())
3416 {
3417 InfoSub::pointer p = it->second.lock();
3418
3419 if (p)
3420 {
3421 notify.insert(p);
3422 ++it;
3423 ++iAccepted;
3424 }
3425 else
3426 it = simiIt->second.erase(it);
3427 }
3428 }
3429
3430 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3431 histoIt != mSubAccountHistory.end())
3432 {
3433 auto& subs = histoIt->second;
3434 auto it = subs.begin();
3435 while (it != subs.end())
3436 {
3437 SubAccountHistoryInfoWeak const& info = it->second;
3438 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3439 {
3440 ++it;
3441 continue;
3442 }
3443
3444 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3445 {
3446 accountHistoryNotify.emplace_back(
3447 SubAccountHistoryInfo{isSptr, info.index_});
3448 ++it;
3449 }
3450 else
3451 {
3452 it = subs.erase(it);
3453 }
3454 }
3455 if (subs.empty())
3456 mSubAccountHistory.erase(histoIt);
3457 }
3458 }
3459 }
3460 }
3461
3462 JLOG(m_journal.trace())
3463 << "pubAccountTransaction: "
3464 << "proposed=" << iProposed << ", accepted=" << iAccepted;
3465
3466 if (!notify.empty() || !accountHistoryNotify.empty())
3467 {
3468 auto const& stTxn = transaction.getTxn();
3469
3470 // Create two different Json objects, for different API versions
3471 auto const metaRef = std::ref(transaction.getMeta());
3472 auto const trResult = transaction.getResult();
3473 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3474
3475 for (InfoSub::ref isrListener : notify)
3476 {
3477 jvObj.visit(
3478 isrListener->getApiVersion(), //
3479 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3480 }
3481
3482 if (last)
3483 jvObj.set(jss::account_history_boundary, true);
3484
3485 XRPL_ASSERT(
3486 jvObj.isMember(jss::account_history_tx_stream) ==
3488 "ripple::NetworkOPsImp::pubAccountTransaction : "
3489 "account_history_tx_stream not set");
3490 for (auto& info : accountHistoryNotify)
3491 {
3492 auto& index = info.index_;
3493 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3494 jvObj.set(jss::account_history_tx_first, true);
3495
3496 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3497
3498 jvObj.visit(
3499 info.sink_->getApiVersion(), //
3500 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3501 }
3502 }
3503}
3504
3505void
3507 std::shared_ptr<ReadView const> const& ledger,
3509 TER result)
3510{
3512 int iProposed = 0;
3513
3514 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3515
3516 {
3518
3519 if (mSubRTAccount.empty())
3520 return;
3521
3522 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3524 {
3525 for (auto const& affectedAccount : tx->getMentionedAccounts())
3526 {
3527 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3528 simiIt != mSubRTAccount.end())
3529 {
3530 auto it = simiIt->second.begin();
3531
3532 while (it != simiIt->second.end())
3533 {
3534 InfoSub::pointer p = it->second.lock();
3535
3536 if (p)
3537 {
3538 notify.insert(p);
3539 ++it;
3540 ++iProposed;
3541 }
3542 else
3543 it = simiIt->second.erase(it);
3544 }
3545 }
3546 }
3547 }
3548 }
3549
3550 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3551
3552 if (!notify.empty() || !accountHistoryNotify.empty())
3553 {
3554 // Create two different Json objects, for different API versions
3555 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3556
3557 for (InfoSub::ref isrListener : notify)
3558 jvObj.visit(
3559 isrListener->getApiVersion(), //
3560 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3561
3562 XRPL_ASSERT(
3563 jvObj.isMember(jss::account_history_tx_stream) ==
3565 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3566 "account_history_tx_stream not set");
3567 for (auto& info : accountHistoryNotify)
3568 {
3569 auto& index = info.index_;
3570 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3571 jvObj.set(jss::account_history_tx_first, true);
3572 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3573 jvObj.visit(
3574 info.sink_->getApiVersion(), //
3575 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3576 }
3577 }
3578}
3579
3580//
3581// Monitoring
3582//
3583
3584void
3586 InfoSub::ref isrListener,
3587 hash_set<AccountID> const& vnaAccountIDs,
3588 bool rt)
3589{
3590 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3591
3592 for (auto const& naAccountID : vnaAccountIDs)
3593 {
3594 JLOG(m_journal.trace())
3595 << "subAccount: account: " << toBase58(naAccountID);
3596
3597 isrListener->insertSubAccountInfo(naAccountID, rt);
3598 }
3599
3601
3602 for (auto const& naAccountID : vnaAccountIDs)
3603 {
3604 auto simIterator = subMap.find(naAccountID);
3605 if (simIterator == subMap.end())
3606 {
3607 // Not found, note that account has a new single listner.
3608 SubMapType usisElement;
3609 usisElement[isrListener->getSeq()] = isrListener;
3610 // VFALCO NOTE This is making a needless copy of naAccountID
3611 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3612 }
3613 else
3614 {
3615 // Found, note that the account has another listener.
3616 simIterator->second[isrListener->getSeq()] = isrListener;
3617 }
3618 }
3619}
3620
3621void
3623 InfoSub::ref isrListener,
3624 hash_set<AccountID> const& vnaAccountIDs,
3625 bool rt)
3626{
3627 for (auto const& naAccountID : vnaAccountIDs)
3628 {
3629 // Remove from the InfoSub
3630 isrListener->deleteSubAccountInfo(naAccountID, rt);
3631 }
3632
3633 // Remove from the server
3634 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3635}
3636
3637void
3639 std::uint64_t uSeq,
3640 hash_set<AccountID> const& vnaAccountIDs,
3641 bool rt)
3642{
3644
3645 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3646
3647 for (auto const& naAccountID : vnaAccountIDs)
3648 {
3649 auto simIterator = subMap.find(naAccountID);
3650
3651 if (simIterator != subMap.end())
3652 {
3653 // Found
3654 simIterator->second.erase(uSeq);
3655
3656 if (simIterator->second.empty())
3657 {
3658 // Don't need hash entry.
3659 subMap.erase(simIterator);
3660 }
3661 }
3662 }
3663}
3664
3665void
3667{
3668 enum DatabaseType { Sqlite, None };
3669 static auto const databaseType = [&]() -> DatabaseType {
3670 // Use a dynamic_cast to return DatabaseType::None
3671 // on failure.
3672 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3673 {
3674 return DatabaseType::Sqlite;
3675 }
3676 return DatabaseType::None;
3677 }();
3678
3679 if (databaseType == DatabaseType::None)
3680 {
3681 JLOG(m_journal.error())
3682 << "AccountHistory job for account "
3683 << toBase58(subInfo.index_->accountId_) << " no database";
3684 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3685 {
3686 sptr->send(rpcError(rpcINTERNAL), true);
3687 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3688 }
3689 return;
3690 }
3691
3694 "AccountHistoryTxStream",
3695 [this, dbType = databaseType, subInfo]() {
3696 auto const& accountId = subInfo.index_->accountId_;
3697 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3698 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3699
3700 JLOG(m_journal.trace())
3701 << "AccountHistory job for account " << toBase58(accountId)
3702 << " started. lastLedgerSeq=" << lastLedgerSeq;
3703
3704 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3705 std::shared_ptr<TxMeta> const& meta) -> bool {
3706 /*
3707 * genesis account: first tx is the one with seq 1
3708 * other account: first tx is the one created the account
3709 */
3710 if (accountId == genesisAccountId)
3711 {
3712 auto stx = tx->getSTransaction();
3713 if (stx->getAccountID(sfAccount) == accountId &&
3714 stx->getSeqValue() == 1)
3715 return true;
3716 }
3717
3718 for (auto& node : meta->getNodes())
3719 {
3720 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3721 continue;
3722
3723 if (node.isFieldPresent(sfNewFields))
3724 {
3725 if (auto inner = dynamic_cast<STObject const*>(
3726 node.peekAtPField(sfNewFields));
3727 inner)
3728 {
3729 if (inner->isFieldPresent(sfAccount) &&
3730 inner->getAccountID(sfAccount) == accountId)
3731 {
3732 return true;
3733 }
3734 }
3735 }
3736 }
3737
3738 return false;
3739 };
3740
3741 auto send = [&](Json::Value const& jvObj,
3742 bool unsubscribe) -> bool {
3743 if (auto sptr = subInfo.sinkWptr_.lock())
3744 {
3745 sptr->send(jvObj, true);
3746 if (unsubscribe)
3747 unsubAccountHistory(sptr, accountId, false);
3748 return true;
3749 }
3750
3751 return false;
3752 };
3753
3754 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3755 bool unsubscribe) -> bool {
3756 if (auto sptr = subInfo.sinkWptr_.lock())
3757 {
3758 jvObj.visit(
3759 sptr->getApiVersion(), //
3760 [&](Json::Value const& jv) { sptr->send(jv, true); });
3761
3762 if (unsubscribe)
3763 unsubAccountHistory(sptr, accountId, false);
3764 return true;
3765 }
3766
3767 return false;
3768 };
3769
3770 auto getMoreTxns =
3771 [&](std::uint32_t minLedger,
3772 std::uint32_t maxLedger,
3777 switch (dbType)
3778 {
3779 case Sqlite: {
3780 auto db = static_cast<SQLiteDatabase*>(
3783 accountId, minLedger, maxLedger, marker, 0, true};
3784 return db->newestAccountTxPage(options);
3785 }
3786 default: {
3787 UNREACHABLE(
3788 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3789 "getMoreTxns : invalid database type");
3790 return {};
3791 }
3792 }
3793 };
3794
3795 /*
3796 * search backward until the genesis ledger or asked to stop
3797 */
3798 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3799 {
3800 int feeChargeCount = 0;
3801 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3802 {
3803 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3804 ++feeChargeCount;
3805 }
3806 else
3807 {
3808 JLOG(m_journal.trace())
3809 << "AccountHistory job for account "
3810 << toBase58(accountId) << " no InfoSub. Fee charged "
3811 << feeChargeCount << " times.";
3812 return;
3813 }
3814
3815 // try to search in 1024 ledgers till reaching genesis ledgers
3816 auto startLedgerSeq =
3817 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3818 JLOG(m_journal.trace())
3819 << "AccountHistory job for account " << toBase58(accountId)
3820 << ", working on ledger range [" << startLedgerSeq << ","
3821 << lastLedgerSeq << "]";
3822
3823 auto haveRange = [&]() -> bool {
3824 std::uint32_t validatedMin = UINT_MAX;
3825 std::uint32_t validatedMax = 0;
3826 auto haveSomeValidatedLedgers =
3828 validatedMin, validatedMax);
3829
3830 return haveSomeValidatedLedgers &&
3831 validatedMin <= startLedgerSeq &&
3832 lastLedgerSeq <= validatedMax;
3833 }();
3834
3835 if (!haveRange)
3836 {
3837 JLOG(m_journal.debug())
3838 << "AccountHistory reschedule job for account "
3839 << toBase58(accountId) << ", incomplete ledger range ["
3840 << startLedgerSeq << "," << lastLedgerSeq << "]";
3842 return;
3843 }
3844
3846 while (!subInfo.index_->stopHistorical_)
3847 {
3848 auto dbResult =
3849 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3850 if (!dbResult)
3851 {
3852 JLOG(m_journal.debug())
3853 << "AccountHistory job for account "
3854 << toBase58(accountId) << " getMoreTxns failed.";
3855 send(rpcError(rpcINTERNAL), true);
3856 return;
3857 }
3858
3859 auto const& txns = dbResult->first;
3860 marker = dbResult->second;
3861 size_t num_txns = txns.size();
3862 for (size_t i = 0; i < num_txns; ++i)
3863 {
3864 auto const& [tx, meta] = txns[i];
3865
3866 if (!tx || !meta)
3867 {
3868 JLOG(m_journal.debug())
3869 << "AccountHistory job for account "
3870 << toBase58(accountId) << " empty tx or meta.";
3871 send(rpcError(rpcINTERNAL), true);
3872 return;
3873 }
3874 auto curTxLedger =
3876 tx->getLedger());
3877 if (!curTxLedger)
3878 {
3879 JLOG(m_journal.debug())
3880 << "AccountHistory job for account "
3881 << toBase58(accountId) << " no ledger.";
3882 send(rpcError(rpcINTERNAL), true);
3883 return;
3884 }
3886 tx->getSTransaction();
3887 if (!stTxn)
3888 {
3889 JLOG(m_journal.debug())
3890 << "AccountHistory job for account "
3891 << toBase58(accountId)
3892 << " getSTransaction failed.";
3893 send(rpcError(rpcINTERNAL), true);
3894 return;
3895 }
3896
3897 auto const mRef = std::ref(*meta);
3898 auto const trR = meta->getResultTER();
3899 MultiApiJson jvTx =
3900 transJson(stTxn, trR, true, curTxLedger, mRef);
3901
3902 jvTx.set(
3903 jss::account_history_tx_index, txHistoryIndex--);
3904 if (i + 1 == num_txns ||
3905 txns[i + 1].first->getLedger() != tx->getLedger())
3906 jvTx.set(jss::account_history_boundary, true);
3907
3908 if (isFirstTx(tx, meta))
3909 {
3910 jvTx.set(jss::account_history_tx_first, true);
3911 sendMultiApiJson(jvTx, false);
3912
3913 JLOG(m_journal.trace())
3914 << "AccountHistory job for account "
3915 << toBase58(accountId)
3916 << " done, found last tx.";
3917 return;
3918 }
3919 else
3920 {
3921 sendMultiApiJson(jvTx, false);
3922 }
3923 }
3924
3925 if (marker)
3926 {
3927 JLOG(m_journal.trace())
3928 << "AccountHistory job for account "
3929 << toBase58(accountId)
3930 << " paging, marker=" << marker->ledgerSeq << ":"
3931 << marker->txnSeq;
3932 }
3933 else
3934 {
3935 break;
3936 }
3937 }
3938
3939 if (!subInfo.index_->stopHistorical_)
3940 {
3941 lastLedgerSeq = startLedgerSeq - 1;
3942 if (lastLedgerSeq <= 1)
3943 {
3944 JLOG(m_journal.trace())
3945 << "AccountHistory job for account "
3946 << toBase58(accountId)
3947 << " done, reached genesis ledger.";
3948 return;
3949 }
3950 }
3951 }
3952 });
3953}
3954
3955void
3957 std::shared_ptr<ReadView const> const& ledger,
3959{
3960 subInfo.index_->separationLedgerSeq_ = ledger->seq();
3961 auto const& accountId = subInfo.index_->accountId_;
3962 auto const accountKeylet = keylet::account(accountId);
3963 if (!ledger->exists(accountKeylet))
3964 {
3965 JLOG(m_journal.debug())
3966 << "subAccountHistoryStart, no account " << toBase58(accountId)
3967 << ", no need to add AccountHistory job.";
3968 return;
3969 }
3970 if (accountId == genesisAccountId)
3971 {
3972 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3973 {
3974 if (sleAcct->getFieldU32(sfSequence) == 1)
3975 {
3976 JLOG(m_journal.debug())
3977 << "subAccountHistoryStart, genesis account "
3978 << toBase58(accountId)
3979 << " does not have tx, no need to add AccountHistory job.";
3980 return;
3981 }
3982 }
3983 else
3984 {
3985 UNREACHABLE(
3986 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
3987 "access genesis account");
3988 return;
3989 }
3990 }
3991 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
3992 subInfo.index_->haveHistorical_ = true;
3993
3994 JLOG(m_journal.debug())
3995 << "subAccountHistoryStart, add AccountHistory job: accountId="
3996 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
3997
3998 addAccountHistoryJob(subInfo);
3999}
4000
4003 InfoSub::ref isrListener,
4004 AccountID const& accountId)
4005{
4006 if (!isrListener->insertSubAccountHistory(accountId))
4007 {
4008 JLOG(m_journal.debug())
4009 << "subAccountHistory, already subscribed to account "
4010 << toBase58(accountId);
4011 return rpcINVALID_PARAMS;
4012 }
4013
4016 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
4017 auto simIterator = mSubAccountHistory.find(accountId);
4018 if (simIterator == mSubAccountHistory.end())
4019 {
4021 inner.emplace(isrListener->getSeq(), ahi);
4023 simIterator, std::make_pair(accountId, inner));
4024 }
4025 else
4026 {
4027 simIterator->second.emplace(isrListener->getSeq(), ahi);
4028 }
4029
4030 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
4031 if (ledger)
4032 {
4033 subAccountHistoryStart(ledger, ahi);
4034 }
4035 else
4036 {
4037 // The node does not have validated ledgers, so wait for
4038 // one before start streaming.
4039 // In this case, the subscription is also considered successful.
4040 JLOG(m_journal.debug())
4041 << "subAccountHistory, no validated ledger yet, delay start";
4042 }
4043
4044 return rpcSUCCESS;
4045}
4046
4047void
4049 InfoSub::ref isrListener,
4050 AccountID const& account,
4051 bool historyOnly)
4052{
4053 if (!historyOnly)
4054 isrListener->deleteSubAccountHistory(account);
4055 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
4056}
4057
4058void
4061 AccountID const& account,
4062 bool historyOnly)
4063{
4065 auto simIterator = mSubAccountHistory.find(account);
4066 if (simIterator != mSubAccountHistory.end())
4067 {
4068 auto& subInfoMap = simIterator->second;
4069 auto subInfoIter = subInfoMap.find(seq);
4070 if (subInfoIter != subInfoMap.end())
4071 {
4072 subInfoIter->second.index_->stopHistorical_ = true;
4073 }
4074
4075 if (!historyOnly)
4076 {
4077 simIterator->second.erase(seq);
4078 if (simIterator->second.empty())
4079 {
4080 mSubAccountHistory.erase(simIterator);
4081 }
4082 }
4083 JLOG(m_journal.debug())
4084 << "unsubAccountHistory, account " << toBase58(account)
4085 << ", historyOnly = " << (historyOnly ? "true" : "false");
4086 }
4087}
4088
4089bool
4091{
4092 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
4093 listeners->addSubscriber(isrListener);
4094 else
4095 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
4096 return true;
4097}
4098
4099bool
4101{
4102 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
4103 listeners->removeSubscriber(uSeq);
4104
4105 return true;
4106}
4107
4111{
4112 // This code-path is exclusively used when the server is in standalone
4113 // mode via `ledger_accept`
4114 XRPL_ASSERT(
4115 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
4116
4117 if (!m_standalone)
4118 Throw<std::runtime_error>(
4119 "Operation only possible in STANDALONE mode.");
4120
4121 // FIXME Could we improve on this and remove the need for a specialized
4122 // API in Consensus?
4123 beginConsensus(m_ledgerMaster.getClosedLedger()->info().hash, {});
4124 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
4125 return m_ledgerMaster.getCurrentLedger()->info().seq;
4126}
4127
4128// <-- bool: true=added, false=already there
4129bool
4131{
4132 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
4133 {
4134 jvResult[jss::ledger_index] = lpClosed->info().seq;
4135 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
4136 jvResult[jss::ledger_time] = Json::Value::UInt(
4137 lpClosed->info().closeTime.time_since_epoch().count());
4138 if (!lpClosed->rules().enabled(featureXRPFees))
4139 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
4140 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
4141 jvResult[jss::reserve_base] =
4142 lpClosed->fees().accountReserve(0).jsonClipped();
4143 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4144 }
4145
4147 {
4148 jvResult[jss::validated_ledgers] =
4150 }
4151
4153 return mStreamMaps[sLedger]
4154 .emplace(isrListener->getSeq(), isrListener)
4155 .second;
4156}
4157
4158// <-- bool: true=added, false=already there
4159bool
4161{
4164 .emplace(isrListener->getSeq(), isrListener)
4165 .second;
4166}
4167
4168// <-- bool: true=erased, false=was not there
4169bool
4171{
4173 return mStreamMaps[sLedger].erase(uSeq);
4174}
4175
4176// <-- bool: true=erased, false=was not there
4177bool
4179{
4181 return mStreamMaps[sBookChanges].erase(uSeq);
4182}
4183
4184// <-- bool: true=added, false=already there
4185bool
4187{
4189 return mStreamMaps[sManifests]
4190 .emplace(isrListener->getSeq(), isrListener)
4191 .second;
4192}
4193
4194// <-- bool: true=erased, false=was not there
4195bool
4197{
4199 return mStreamMaps[sManifests].erase(uSeq);
4200}
4201
4202// <-- bool: true=added, false=already there
4203bool
4205 InfoSub::ref isrListener,
4206 Json::Value& jvResult,
4207 bool admin)
4208{
4209 uint256 uRandom;
4210
4211 if (m_standalone)
4212 jvResult[jss::stand_alone] = m_standalone;
4213
4214 // CHECKME: is it necessary to provide a random number here?
4215 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
4216
4217 auto const& feeTrack = app_.getFeeTrack();
4218 jvResult[jss::random] = to_string(uRandom);
4219 jvResult[jss::server_status] = strOperatingMode(admin);
4220 jvResult[jss::load_base] = feeTrack.getLoadBase();
4221 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4222 jvResult[jss::hostid] = getHostId(admin);
4223 jvResult[jss::pubkey_node] =
4225
4227 return mStreamMaps[sServer]
4228 .emplace(isrListener->getSeq(), isrListener)
4229 .second;
4230}
4231
4232// <-- bool: true=erased, false=was not there
4233bool
4235{
4237 return mStreamMaps[sServer].erase(uSeq);
4238}
4239
4240// <-- bool: true=added, false=already there
4241bool
4243{
4246 .emplace(isrListener->getSeq(), isrListener)
4247 .second;
4248}
4249
4250// <-- bool: true=erased, false=was not there
4251bool
4253{
4255 return mStreamMaps[sTransactions].erase(uSeq);
4256}
4257
4258// <-- bool: true=added, false=already there
4259bool
4261{
4264 .emplace(isrListener->getSeq(), isrListener)
4265 .second;
4266}
4267
4268// <-- bool: true=erased, false=was not there
4269bool
4271{
4273 return mStreamMaps[sRTTransactions].erase(uSeq);
4274}
4275
4276// <-- bool: true=added, false=already there
4277bool
4279{
4282 .emplace(isrListener->getSeq(), isrListener)
4283 .second;
4284}
4285
4286void
4288{
4289 accounting_.json(obj);
4290}
4291
4292// <-- bool: true=erased, false=was not there
4293bool
4295{
4297 return mStreamMaps[sValidations].erase(uSeq);
4298}
4299
4300// <-- bool: true=added, false=already there
4301bool
4303{
4305 return mStreamMaps[sPeerStatus]
4306 .emplace(isrListener->getSeq(), isrListener)
4307 .second;
4308}
4309
4310// <-- bool: true=erased, false=was not there
4311bool
4313{
4315 return mStreamMaps[sPeerStatus].erase(uSeq);
4316}
4317
4318// <-- bool: true=added, false=already there
4319bool
4321{
4324 .emplace(isrListener->getSeq(), isrListener)
4325 .second;
4326}
4327
4328// <-- bool: true=erased, false=was not there
4329bool
4331{
4333 return mStreamMaps[sConsensusPhase].erase(uSeq);
4334}
4335
4338{
4340
4341 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4342
4343 if (it != mRpcSubMap.end())
4344 return it->second;
4345
4346 return InfoSub::pointer();
4347}
4348
4351{
4353
4354 mRpcSubMap.emplace(strUrl, rspEntry);
4355
4356 return rspEntry;
4357}
4358
4359bool
4361{
4363 auto pInfo = findRpcSub(strUrl);
4364
4365 if (!pInfo)
4366 return false;
4367
4368 // check to see if any of the stream maps still hold a weak reference to
4369 // this entry before removing
4370 for (SubMapType const& map : mStreamMaps)
4371 {
4372 if (map.find(pInfo->getSeq()) != map.end())
4373 return false;
4374 }
4375 mRpcSubMap.erase(strUrl);
4376 return true;
4377}
4378
4379#ifndef USE_NEW_BOOK_PAGE
4380
4381// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4382// work, but it demonstrated poor performance.
4383//
4384void
4387 Book const& book,
4388 AccountID const& uTakerID,
4389 bool const bProof,
4390 unsigned int iLimit,
4391 Json::Value const& jvMarker,
4392 Json::Value& jvResult)
4393{ // CAUTION: This is the old get book page logic
4394 Json::Value& jvOffers =
4395 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4396
4398 uint256 const uBookBase = getBookBase(book);
4399 uint256 const uBookEnd = getQualityNext(uBookBase);
4400 uint256 uTipIndex = uBookBase;
4401
4402 if (auto stream = m_journal.trace())
4403 {
4404 stream << "getBookPage:" << book;
4405 stream << "getBookPage: uBookBase=" << uBookBase;
4406 stream << "getBookPage: uBookEnd=" << uBookEnd;
4407 stream << "getBookPage: uTipIndex=" << uTipIndex;
4408 }
4409
4410 ReadView const& view = *lpLedger;
4411
4412 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4413 isGlobalFrozen(view, book.in.account);
4414
4415 bool bDone = false;
4416 bool bDirectAdvance = true;
4417
4418 std::shared_ptr<SLE const> sleOfferDir;
4419 uint256 offerIndex;
4420 unsigned int uBookEntry;
4421 STAmount saDirRate;
4422
4423 auto const rate = transferRate(view, book.out.account);
4424 auto viewJ = app_.journal("View");
4425
4426 while (!bDone && iLimit-- > 0)
4427 {
4428 if (bDirectAdvance)
4429 {
4430 bDirectAdvance = false;
4431
4432 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4433
4434 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4435 if (ledgerIndex)
4436 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4437 else
4438 sleOfferDir.reset();
4439
4440 if (!sleOfferDir)
4441 {
4442 JLOG(m_journal.trace()) << "getBookPage: bDone";
4443 bDone = true;
4444 }
4445 else
4446 {
4447 uTipIndex = sleOfferDir->key();
4448 saDirRate = amountFromQuality(getQuality(uTipIndex));
4449
4450 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4451
4452 JLOG(m_journal.trace())
4453 << "getBookPage: uTipIndex=" << uTipIndex;
4454 JLOG(m_journal.trace())
4455 << "getBookPage: offerIndex=" << offerIndex;
4456 }
4457 }
4458
4459 if (!bDone)
4460 {
4461 auto sleOffer = view.read(keylet::offer(offerIndex));
4462
4463 if (sleOffer)
4464 {
4465 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4466 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4467 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4468 STAmount saOwnerFunds;
4469 bool firstOwnerOffer(true);
4470
4471 if (book.out.account == uOfferOwnerID)
4472 {
4473 // If an offer is selling issuer's own IOUs, it is fully
4474 // funded.
4475 saOwnerFunds = saTakerGets;
4476 }
4477 else if (bGlobalFreeze)
4478 {
4479 // If either asset is globally frozen, consider all offers
4480 // that aren't ours to be totally unfunded
4481 saOwnerFunds.clear(book.out);
4482 }
4483 else
4484 {
4485 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4486 if (umBalanceEntry != umBalance.end())
4487 {
4488 // Found in running balance table.
4489
4490 saOwnerFunds = umBalanceEntry->second;
4491 firstOwnerOffer = false;
4492 }
4493 else
4494 {
4495 // Did not find balance in table.
4496
4497 saOwnerFunds = accountHolds(
4498 view,
4499 uOfferOwnerID,
4500 book.out.currency,
4501 book.out.account,
4503 viewJ);
4504
4505 if (saOwnerFunds < beast::zero)
4506 {
4507 // Treat negative funds as zero.
4508
4509 saOwnerFunds.clear();
4510 }
4511 }
4512 }
4513
4514 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4515
4516 STAmount saTakerGetsFunded;
4517 STAmount saOwnerFundsLimit = saOwnerFunds;
4518 Rate offerRate = parityRate;
4519
4520 if (rate != parityRate
4521 // Have a tranfer fee.
4522 && uTakerID != book.out.account
4523 // Not taking offers of own IOUs.
4524 && book.out.account != uOfferOwnerID)
4525 // Offer owner not issuing ownfunds
4526 {
4527 // Need to charge a transfer fee to offer owner.
4528 offerRate = rate;
4529 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4530 }
4531
4532 if (saOwnerFundsLimit >= saTakerGets)
4533 {
4534 // Sufficient funds no shenanigans.
4535 saTakerGetsFunded = saTakerGets;
4536 }
4537 else
4538 {
4539 // Only provide, if not fully funded.
4540
4541 saTakerGetsFunded = saOwnerFundsLimit;
4542
4543 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4544 std::min(
4545 saTakerPays,
4546 multiply(
4547 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4548 .setJson(jvOffer[jss::taker_pays_funded]);
4549 }
4550
4551 STAmount saOwnerPays = (parityRate == offerRate)
4552 ? saTakerGetsFunded
4553 : std::min(
4554 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4555
4556 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4557
4558 // Include all offers funded and unfunded
4559 Json::Value& jvOf = jvOffers.append(jvOffer);
4560 jvOf[jss::quality] = saDirRate.getText();
4561
4562 if (firstOwnerOffer)
4563 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4564 }
4565 else
4566 {
4567 JLOG(m_journal.warn()) << "Missing offer";
4568 }
4569
4570 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4571 {
4572 bDirectAdvance = true;
4573 }
4574 else
4575 {
4576 JLOG(m_journal.trace())
4577 << "getBookPage: offerIndex=" << offerIndex;
4578 }
4579 }
4580 }
4581
4582 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4583 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4584}
4585
4586#else
4587
4588// This is the new code that uses the book iterators
4589// It has temporarily been disabled
4590
4591void
4594 Book const& book,
4595 AccountID const& uTakerID,
4596 bool const bProof,
4597 unsigned int iLimit,
4598 Json::Value const& jvMarker,
4599 Json::Value& jvResult)
4600{
4601 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4602
4604
4605 MetaView lesActive(lpLedger, tapNONE, true);
4606 OrderBookIterator obIterator(lesActive, book);
4607
4608 auto const rate = transferRate(lesActive, book.out.account);
4609
4610 bool const bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4611 lesActive.isGlobalFrozen(book.in.account);
4612
4613 while (iLimit-- > 0 && obIterator.nextOffer())
4614 {
4615 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4616 if (sleOffer)
4617 {
4618 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4619 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4620 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4621 STAmount saDirRate = obIterator.getCurrentRate();
4622 STAmount saOwnerFunds;
4623
4624 if (book.out.account == uOfferOwnerID)
4625 {
4626 // If offer is selling issuer's own IOUs, it is fully funded.
4627 saOwnerFunds = saTakerGets;
4628 }
4629 else if (bGlobalFreeze)
4630 {
4631 // If either asset is globally frozen, consider all offers
4632 // that aren't ours to be totally unfunded
4633 saOwnerFunds.clear(book.out);
4634 }
4635 else
4636 {
4637 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4638
4639 if (umBalanceEntry != umBalance.end())
4640 {
4641 // Found in running balance table.
4642
4643 saOwnerFunds = umBalanceEntry->second;
4644 }
4645 else
4646 {
4647 // Did not find balance in table.
4648
4649 saOwnerFunds = lesActive.accountHolds(
4650 uOfferOwnerID,
4651 book.out.currency,
4652 book.out.account,
4654
4655 if (saOwnerFunds.isNegative())
4656 {
4657 // Treat negative funds as zero.
4658
4659 saOwnerFunds.zero();
4660 }
4661 }
4662 }
4663
4664 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4665
4666 STAmount saTakerGetsFunded;
4667 STAmount saOwnerFundsLimit = saOwnerFunds;
4668 Rate offerRate = parityRate;
4669
4670 if (rate != parityRate
4671 // Have a tranfer fee.
4672 && uTakerID != book.out.account
4673 // Not taking offers of own IOUs.
4674 && book.out.account != uOfferOwnerID)
4675 // Offer owner not issuing ownfunds
4676 {
4677 // Need to charge a transfer fee to offer owner.
4678 offerRate = rate;
4679 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4680 }
4681
4682 if (saOwnerFundsLimit >= saTakerGets)
4683 {
4684 // Sufficient funds no shenanigans.
4685 saTakerGetsFunded = saTakerGets;
4686 }
4687 else
4688 {
4689 // Only provide, if not fully funded.
4690 saTakerGetsFunded = saOwnerFundsLimit;
4691
4692 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4693
4694 // TOOD(tom): The result of this expression is not used - what's
4695 // going on here?
4696 std::min(
4697 saTakerPays,
4698 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4699 .setJson(jvOffer[jss::taker_pays_funded]);
4700 }
4701
4702 STAmount saOwnerPays = (parityRate == offerRate)
4703 ? saTakerGetsFunded
4704 : std::min(
4705 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4706
4707 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4708
4709 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4710 {
4711 // Only provide funded offers and offers of the taker.
4712 Json::Value& jvOf = jvOffers.append(jvOffer);
4713 jvOf[jss::quality] = saDirRate.getText();
4714 }
4715 }
4716 }
4717
4718 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4719 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4720}
4721
4722#endif
4723
4724inline void
4726{
4727 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4728 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4730 counters[static_cast<std::size_t>(mode)].dur += current;
4731
4734 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4735 .dur.count());
4737 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4738 .dur.count());
4740 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4742 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4743 .dur.count());
4745 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4746
4748 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4749 .transitions);
4751 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4752 .transitions);
4754 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4756 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4757 .transitions);
4759 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4760}
4761
4762void
4764{
4765 auto now = std::chrono::steady_clock::now();
4766
4767 std::lock_guard lock(mutex_);
4768 ++counters_[static_cast<std::size_t>(om)].transitions;
4769 if (om == OperatingMode::FULL &&
4770 counters_[static_cast<std::size_t>(om)].transitions == 1)
4771 {
4772 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4773 now - processStart_)
4774 .count();
4775 }
4776 counters_[static_cast<std::size_t>(mode_)].dur +=
4777 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4778
4779 mode_ = om;
4780 start_ = now;
4781}
4782
4783void
4785{
4786 auto [counters, mode, start, initialSync] = getCounterData();
4787 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4789 counters[static_cast<std::size_t>(mode)].dur += current;
4790
4791 obj[jss::state_accounting] = Json::objectValue;
4793 i <= static_cast<std::size_t>(OperatingMode::FULL);
4794 ++i)
4795 {
4796 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4797 auto& state = obj[jss::state_accounting][states_[i]];
4798 state[jss::transitions] = std::to_string(counters[i].transitions);
4799 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4800 }
4801 obj[jss::server_state_duration_us] = std::to_string(current.count());
4802 if (initialSync)
4803 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4804}
4805
4806//------------------------------------------------------------------------------
4807
4810 Application& app,
4812 bool standalone,
4813 std::size_t minPeerCount,
4814 bool startvalid,
4815 JobQueue& job_queue,
4817 ValidatorKeys const& validatorKeys,
4818 boost::asio::io_service& io_svc,
4819 beast::Journal journal,
4820 beast::insight::Collector::ptr const& collector)
4821{
4822 return std::make_unique<NetworkOPsImp>(
4823 app,
4824 clock,
4825 standalone,
4826 minPeerCount,
4827 startvalid,
4828 job_queue,
4830 validatorKeys,
4831 io_svc,
4832 journal,
4833 collector);
4834}
4835
4836} // namespace ripple
T any_of(T... args)
T back_inserter(T... args)
T begin(T... args)
T bind(T... args)
Decorator for streaming out compact json.
Definition: json_writer.h:318
Lightweight wrapper to tag static string.
Definition: json_value.h:64
Represents a JSON value.
Definition: json_value.h:150
Json::UInt UInt
Definition: json_value.h:157
Value & append(Value const &value)
Append value to array at the end.
Definition: json_value.cpp:910
bool isMember(char const *key) const
Return true if the object has a member named key.
Definition: json_value.cpp:962
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Definition: json_value.cpp:854
A generic endpoint for log messages.
Definition: Journal.h:60
Stream error() const
Definition: Journal.h:346
Stream debug() const
Definition: Journal.h:328
Stream info() const
Definition: Journal.h:334
Stream trace() const
Severity stream access functions.
Definition: Journal.h:322
Stream warn() const
Definition: Journal.h:340
A metric for measuring an integral value.
Definition: Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition: Gauge.h:68
A reference to a handler for performing polled collection.
Definition: Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition: Book.h:35
Issue in
Definition: Book.h:37
Issue out
Definition: Book.h:38
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition: ClusterNode.h:46
std::uint32_t getLoadFee() const
Definition: ClusterNode.h:52
NetClock::time_point getReportTime() const
Definition: ClusterNode.h:58
PublicKey const & identity() const
Definition: ClusterNode.h:64
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:49
uint32_t NETWORK_ID
Definition: Config.h:156
std::string SERVER_DOMAIN
Definition: Config.h:279
std::size_t NODE_SIZE
Definition: Config.h:213
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition: Config.h:160
int RELAY_UNTRUSTED_VALIDATIONS
Definition: Config.h:169
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition: InfoSub.h:54
AccountID account
Definition: Issue.h:39
Currency currency
Definition: Issue.h:38
A pool of threads to perform work.
Definition: JobQueue.h:56
Json::Value getJson(int c=0)
Definition: JobQueue.cpp:214
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
Definition: LedgerMaster.h:265
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:79
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
Definition: LoadFeeTrack.h:45
std::uint32_t getClusterFee() const
Definition: LoadFeeTrack.h:82
std::uint32_t getLocalFee() const
Definition: LoadFeeTrack.h:75
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:89
std::uint32_t getRemoteFee() const
Definition: LoadFeeTrack.h:68
std::uint32_t getLoadFactor() const
Definition: LoadFeeTrack.h:95
Manages load sources.
Definition: LoadManager.h:46
void heartbeat()
Reset the stall detection timer.
Definition: LoadManager.cpp:64
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition: Manifest.cpp:323
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
Definition: NetworkOPs.cpp:142
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
Definition: NetworkOPs.cpp:152
std::chrono::steady_clock::time_point start_
Definition: NetworkOPs.cpp:154
static std::array< Json::StaticString const, 5 > const states_
Definition: NetworkOPs.cpp:158
std::chrono::steady_clock::time_point const processStart_
Definition: NetworkOPs.cpp:156
Transaction with input flags and results to be applied in batches.
Definition: NetworkOPs.cpp:93
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
Definition: NetworkOPs.cpp:102
std::shared_ptr< Transaction > const transaction
Definition: NetworkOPs.cpp:95
boost::asio::steady_timer accountHistoryTxTimer_
Definition: NetworkOPs.cpp:753
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
Definition: NetworkOPs.cpp:888
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
std::vector< TransactionStatus > mTransactions
Definition: NetworkOPs.cpp:800
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Definition: NetworkOPs.cpp:743
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
Definition: NetworkOPs.cpp:755
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
Definition: NetworkOPs.cpp:906
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
Definition: NetworkOPs.cpp:751
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
Definition: NetworkOPs.cpp:119
std::optional< PublicKey > const validatorPK_
Definition: NetworkOPs.cpp:757
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
Definition: NetworkOPs.cpp:739
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
Definition: NetworkOPs.cpp:269
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Definition: NetworkOPs.cpp:769
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
Definition: NetworkOPs.cpp:752
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
Definition: NetworkOPs.cpp:125
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:225
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
Definition: NetworkOPs.cpp:746
beast::Journal m_journal
Definition: NetworkOPs.cpp:737
SubInfoMapType mSubAccount
Definition: NetworkOPs.cpp:764
std::optional< PublicKey > const validatorMasterPK_
Definition: NetworkOPs.cpp:758
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
Definition: NetworkOPs.cpp:804
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
Definition: NetworkOPs.cpp:750
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
Definition: NetworkOPs.cpp:959
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
Definition: NetworkOPs.cpp:784
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
Definition: NetworkOPs.cpp:794
std::atomic< bool > unlBlocked_
Definition: NetworkOPs.cpp:748
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
Definition: NetworkOPs.cpp:741
std::atomic< bool > needNetworkLedger_
Definition: NetworkOPs.cpp:745
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
Definition: NetworkOPs.cpp:802
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
Definition: NetworkOPs.cpp:918
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
Definition: NetworkOPs.cpp:762
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
Definition: NetworkOPs.cpp:949
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
Definition: NetworkOPs.cpp:786
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
Definition: NetworkOPs.cpp:900
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
Definition: NetworkOPs.cpp:797
void setMode(OperatingMode om) override
void stop() override
Definition: NetworkOPs.cpp:586
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
void clearNeedNetworkLedger() override
Definition: NetworkOPs.cpp:912
DispatchState mDispatchState
Definition: NetworkOPs.cpp:799
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
Definition: NetworkOPs.cpp:765
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
Definition: NetworkOPs.cpp:924
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
Definition: NetworkOPs.cpp:805
Application & app_
Definition: NetworkOPs.cpp:736
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
Definition: NetworkOPs.cpp:760
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
Definition: NetworkOPs.cpp:767
std::atomic< bool > amendmentWarned_
Definition: NetworkOPs.cpp:747
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
Definition: NetworkOPs.cpp:930
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition: NetworkOPs.h:89
void getCountsJson(Json::Value &obj)
Definition: Database.cpp:268
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:49
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:57
BookListeners::pointer getBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)
BookListeners::pointer makeBookListeners(Book const &)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
Definition: RCLConsensus.h:53
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
Definition: RCLConsensus.h:447
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Definition: RCLConsensus.h:460
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:78
Represents a set of transactions in RCLConsensus.
Definition: RCLCxTx.h:63
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition: RFC1751.cpp:507
Collects logging information.
Definition: RCLConsensus.h:551
std::unique_ptr< std::stringstream > const & ss()
Definition: RCLConsensus.h:565
A view into a ledger.
Definition: ReadView.h:52
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition: STAmount.cpp:510
std::string getText() const override
Definition: STAmount.cpp:550
Issue const & issue() const
Definition: STAmount.h:496
std::optional< T > get(std::string const &name) const
Definition: BasicConfig.h:140
std::size_t size() const noexcept
Definition: Serializer.h:73
void const * data() const noexcept
Definition: Serializer.h:79
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition: TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition: TxQ.cpp:1778
static time_point now()
Definition: UptimeClock.cpp:67
Validator keys and manifest as set in configuration file.
Definition: ValidatorKeys.h:38
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition: XRPAmount.h:262
Json::Value jsonClipped() const
Definition: XRPAmount.h:218
iterator begin()
Definition: base_uint.h:136
static constexpr std::size_t size()
Definition: base_uint.h:526
bool isZero() const
Definition: base_uint.h:540
bool isNonZero() const
Definition: base_uint.h:545
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition: scope.h:231
Set the fee on a JTx.
Definition: fee.h:37
Match set account flags.
Definition: flags.h:125
Inject raw JSON.
Definition: jtx_json.h:33
Set the regular signature on a JTx.
Definition: sig.h:35
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition: json_value.h:45
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:46
int Int
Definition: json_forwards.h:26
unsigned int UInt
Definition: json_forwards.h:27
void rngfill(void *buffer, std::size_t bytes, Generator &g)
Definition: rngfill.h:34
std::string const & getVersionString()
Server version.
Definition: BuildInfo.cpp:68
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Definition: CTID.h:43
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition: BookChanges.h:47
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(PreclaimContext const &ctx, AccountID const &src)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition: Indexes.cpp:177
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition: Indexes.cpp:373
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition: Indexes.cpp:267
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:32
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:114
STAmount divide(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:93
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition: STTx.cpp:637
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition: View.cpp:554
@ fhZERO_IF_FROZEN
Definition: View.h:78
@ fhIGNORE_FREEZE
Definition: View.h:78
bool isTelLocal(TER x)
Definition: TER.h:648
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition: View.cpp:148
std::uint64_t getQuality(uint256 const &uBase)
Definition: Indexes.cpp:142
@ INCLUDED
Definition: Transaction.h:49
@ OBSOLETE
Definition: Transaction.h:54
@ INVALID
Definition: Transaction.h:48
error_code_i
Definition: ErrorCodes.h:40
@ rpcSUCCESS
Definition: ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition: ErrorCodes.h:84
@ rpcINTERNAL
Definition: ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
Definition: SecretKey.cpp:369
auto constexpr muldiv_max
Definition: mulDiv.h:29
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition: LocalTxs.cpp:192
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition: View.cpp:762
STAmount amountFromQuality(std::uint64_t rate)
Definition: STAmount.cpp:857
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition: ErrorCodes.h:167
@ warnRPC_UNSUPPORTED_MAJORITY
Definition: ErrorCodes.h:165
@ warnRPC_AMENDMENT_BLOCKED
Definition: ErrorCodes.h:166
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:315
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition: NetworkOPs.h:68
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:53
bool isTesSuccess(TER x)
Definition: TER.h:672
bool isTerRetry(TER x)
Definition: TER.h:666
AccountID calcAccountID(PublicKey const &pk)
Definition: AccountID.cpp:168
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Definition: csprng.cpp:103
Json::Value rpcError(int iError)
Definition: RPCErr.cpp:31
@ tefPAST_SEQ
Definition: TER.h:175
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
Definition: NetworkOPs.cpp:869
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
bool isTemMalformed(TER x)
Definition: TER.h:654
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition: View.cpp:159
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition: ApiVersion.h:102
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:75
@ tesSUCCESS
Definition: TER.h:244
uint256 getQualityNext(uint256 const &uBase)
Definition: Indexes.cpp:134
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition: View.cpp:387
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition: ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition: chrono.h:92
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:630
FeeSetup setup_FeeVote(Section const &section)
Definition: Config.cpp:1092
bool isTefFailure(TER x)
Definition: TER.h:660
Number root(Number f, unsigned d)
Definition: Number.cpp:636
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Definition: mulDiv.cpp:32
ApplyFlags
Definition: ApplyView.h:31
@ tapFAIL_HARD
Definition: ApplyView.h:36
@ tapUNLIMITED
Definition: ApplyView.h:43
@ tapNONE
Definition: ApplyView.h:32
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:38
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition: Seed.cpp:76
constexpr std::size_t maxPoppedTransactions
@ terQUEUED
Definition: TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition: TER.cpp:246
@ jtNETOP_CLUSTER
Definition: Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition: Job.h:47
@ jtTRANSACTION
Definition: Job.h:62
@ jtTXN_PROC
Definition: Job.h:82
@ jtCLIENT_CONSENSUS
Definition: Job.h:48
@ jtBATCH
Definition: Job.h:65
@ jtCLIENT_ACCT_HIST
Definition: Job.h:49
uint256 getBookBase(Book const &book)
Definition: Indexes.cpp:115
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition: View.cpp:185
static std::uint32_t trunc32(std::uint64_t v)
@ temBAD_SIGNATURE
Definition: TER.h:105
static auto const genesisAccountId
Definition: NetworkOPs.cpp:882
STL namespace.
T owns_lock(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T set_intersection(T... args)
T size(T... args)
T str(T... args)
std::string serialized
The manifest in serialized form.
Definition: Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition: Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition: Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
Definition: Manifest.cpp:244
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition: Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
Definition: Manifest.cpp:255
PublicKey masterKey
The master key associated with this manifest.
Definition: Manifest.h:86
Server fees published on server subscription.
Definition: NetworkOPs.cpp:202
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
Definition: NetworkOPs.cpp:221
bool operator==(ServerFeeSummary const &b) const
Definition: NetworkOPs.cpp:213
beast::insight::Gauge full_transitions
Definition: NetworkOPs.cpp:856
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:811
beast::insight::Hook hook
Definition: NetworkOPs.cpp:845
beast::insight::Gauge connected_duration
Definition: NetworkOPs.cpp:847
beast::insight::Gauge tracking_duration
Definition: NetworkOPs.cpp:849
beast::insight::Gauge connected_transitions
Definition: NetworkOPs.cpp:853
beast::insight::Gauge disconnected_transitions
Definition: NetworkOPs.cpp:852
beast::insight::Gauge syncing_duration
Definition: NetworkOPs.cpp:848
beast::insight::Gauge tracking_transitions
Definition: NetworkOPs.cpp:855
beast::insight::Gauge full_duration
Definition: NetworkOPs.cpp:850
beast::insight::Gauge disconnected_duration
Definition: NetworkOPs.cpp:846
beast::insight::Gauge syncing_transitions
Definition: NetworkOPs.cpp:854
SubAccountHistoryIndex(AccountID const &accountId)
Definition: NetworkOPs.cpp:700
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:719
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:714
Represents a transfer rate.
Definition: Rate.h:40
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition: TxQ.h:165
IsMemberResult isMember(char const *key) const
Definition: MultiApiJson.h:94
void set(char const *key, auto const &v)
Definition: MultiApiJson.h:83
Select all peers (except optional excluded) that are in our cluster.
Definition: predicates.h:137
Sends a message to all peers.
Definition: predicates.h:32
Set the sequence number on a JTx.
Definition: seq.h:34
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)