rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/main/Tuning.h>
32#include <xrpld/app/misc/AmendmentTable.h>
33#include <xrpld/app/misc/DeliverMax.h>
34#include <xrpld/app/misc/HashRouter.h>
35#include <xrpld/app/misc/LoadFeeTrack.h>
36#include <xrpld/app/misc/NetworkOPs.h>
37#include <xrpld/app/misc/Transaction.h>
38#include <xrpld/app/misc/TxQ.h>
39#include <xrpld/app/misc/ValidatorKeys.h>
40#include <xrpld/app/misc/ValidatorList.h>
41#include <xrpld/app/misc/detail/AccountTxPaging.h>
42#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
43#include <xrpld/app/tx/apply.h>
44#include <xrpld/consensus/Consensus.h>
45#include <xrpld/consensus/ConsensusParms.h>
46#include <xrpld/overlay/Cluster.h>
47#include <xrpld/overlay/Overlay.h>
48#include <xrpld/overlay/predicates.h>
49#include <xrpld/perflog/PerfLog.h>
50#include <xrpld/rpc/BookChanges.h>
51#include <xrpld/rpc/CTID.h>
52#include <xrpld/rpc/DeliveredAmount.h>
53#include <xrpld/rpc/MPTokenIssuanceID.h>
54#include <xrpld/rpc/ServerHandler.h>
55
56#include <xrpl/basics/UptimeClock.h>
57#include <xrpl/basics/mulDiv.h>
58#include <xrpl/basics/safe_cast.h>
59#include <xrpl/basics/scope.h>
60#include <xrpl/beast/utility/rngfill.h>
61#include <xrpl/crypto/RFC1751.h>
62#include <xrpl/crypto/csprng.h>
63#include <xrpl/protocol/BuildInfo.h>
64#include <xrpl/protocol/Feature.h>
65#include <xrpl/protocol/MultiApiJson.h>
66#include <xrpl/protocol/RPCErr.h>
67#include <xrpl/protocol/TxFlags.h>
68#include <xrpl/protocol/jss.h>
69#include <xrpl/resource/Fees.h>
70#include <xrpl/resource/ResourceManager.h>
71
72#include <boost/asio/ip/host_name.hpp>
73#include <boost/asio/steady_timer.hpp>
74
75#include <algorithm>
76#include <exception>
77#include <mutex>
78#include <optional>
79#include <set>
80#include <sstream>
81#include <string>
82#include <tuple>
83#include <unordered_map>
84
85namespace ripple {
86
87class NetworkOPsImp final : public NetworkOPs
88{
94 {
95 public:
97 bool const admin;
98 bool const local;
100 bool applied = false;
102
105 bool a,
106 bool l,
107 FailHard f)
108 : transaction(t), admin(a), local(l), failType(f)
109 {
110 XRPL_ASSERT(
112 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
113 "valid inputs");
114 }
115 };
116
120 enum class DispatchState : unsigned char {
121 none,
122 scheduled,
123 running,
124 };
125
127
143 {
144 struct Counters
145 {
146 explicit Counters() = default;
147
150 };
151
155 std::chrono::steady_clock::time_point start_ =
157 std::chrono::steady_clock::time_point const processStart_ = start_;
160
161 public:
163 {
165 .transitions = 1;
166 }
167
174 void
176
182 void
183 json(Json::Value& obj) const;
184
186 {
188 decltype(mode_) mode;
189 decltype(start_) start;
191 };
192
195 {
198 }
199 };
200
203 {
204 ServerFeeSummary() = default;
205
207 XRPAmount fee,
208 TxQ::Metrics&& escalationMetrics,
209 LoadFeeTrack const& loadFeeTrack);
210 bool
211 operator!=(ServerFeeSummary const& b) const;
212
213 bool
215 {
216 return !(*this != b);
217 }
218
223 };
224
225public:
227 Application& app,
229 bool standalone,
230 std::size_t minPeerCount,
231 bool start_valid,
232 JobQueue& job_queue,
234 ValidatorKeys const& validatorKeys,
235 boost::asio::io_service& io_svc,
236 beast::Journal journal,
237 beast::insight::Collector::ptr const& collector)
238 : app_(app)
239 , m_journal(journal)
242 , heartbeatTimer_(io_svc)
243 , clusterTimer_(io_svc)
244 , accountHistoryTxTimer_(io_svc)
245 , mConsensus(
246 app,
248 setup_FeeVote(app_.config().section("voting")),
249 app_.logs().journal("FeeVote")),
251 *m_localTX,
252 app.getInboundTransactions(),
253 beast::get_abstract_clock<std::chrono::steady_clock>(),
254 validatorKeys,
255 app_.logs().journal("LedgerConsensus"))
256 , validatorPK_(
257 validatorKeys.keys ? validatorKeys.keys->publicKey
258 : decltype(validatorPK_){})
260 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
261 : decltype(validatorMasterPK_){})
263 , m_job_queue(job_queue)
264 , m_standalone(standalone)
265 , minPeerCount_(start_valid ? 0 : minPeerCount)
267 {
268 }
269
270 ~NetworkOPsImp() override
271 {
272 // This clear() is necessary to ensure the shared_ptrs in this map get
273 // destroyed NOW because the objects in this map invoke methods on this
274 // class when they are destroyed
276 }
277
278public:
280 getOperatingMode() const override;
281
283 strOperatingMode(OperatingMode const mode, bool const admin) const override;
284
286 strOperatingMode(bool const admin = false) const override;
287
288 //
289 // Transaction operations.
290 //
291
292 // Must complete immediately.
293 void
295
296 void
298 std::shared_ptr<Transaction>& transaction,
299 bool bUnlimited,
300 bool bLocal,
301 FailHard failType) override;
302
303 void
304 processTransactionSet(CanonicalTXSet const& set) override;
305
314 void
317 bool bUnlimited,
318 FailHard failType);
319
329 void
332 bool bUnlimited,
333 FailHard failtype);
334
335private:
336 bool
338
339 void
342 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback);
343
344public:
348 void
350
356 void
358
359 //
360 // Owner functions.
361 //
362
366 AccountID const& account) override;
367
368 //
369 // Book functions.
370 //
371
372 void
375 Book const&,
376 AccountID const& uTakerID,
377 bool const bProof,
378 unsigned int iLimit,
379 Json::Value const& jvMarker,
380 Json::Value& jvResult) override;
381
382 // Ledger proposal/close functions.
383 bool
385
386 bool
389 std::string const& source) override;
390
391 void
392 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
393
394 // Network state machine.
395
396 // Used for the "jump" case.
397private:
398 void
400 bool
402
403public:
404 bool
406 uint256 const& networkClosed,
407 std::unique_ptr<std::stringstream> const& clog) override;
408 void
410 void
411 setStandAlone() override;
412
416 void
417 setStateTimer() override;
418
419 void
420 setNeedNetworkLedger() override;
421 void
422 clearNeedNetworkLedger() override;
423 bool
424 isNeedNetworkLedger() override;
425 bool
426 isFull() override;
427
428 void
429 setMode(OperatingMode om) override;
430
431 bool
432 isBlocked() override;
433 bool
434 isAmendmentBlocked() override;
435 void
436 setAmendmentBlocked() override;
437 bool
438 isAmendmentWarned() override;
439 void
440 setAmendmentWarned() override;
441 void
442 clearAmendmentWarned() override;
443 bool
444 isUNLBlocked() override;
445 void
446 setUNLBlocked() override;
447 void
448 clearUNLBlocked() override;
449 void
450 consensusViewChange() override;
451
453 getConsensusInfo() override;
455 getServerInfo(bool human, bool admin, bool counters) override;
456 void
457 clearLedgerFetch() override;
459 getLedgerFetchInfo() override;
462 std::optional<std::chrono::milliseconds> consensusDelay) override;
463 void
464 reportFeeChange() override;
465 void
467
468 void
469 updateLocalTx(ReadView const& view) override;
471 getLocalTxCount() override;
472
473 //
474 // Monitoring: publisher side.
475 //
476 void
477 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
478 void
481 std::shared_ptr<STTx const> const& transaction,
482 TER result) override;
483 void
484 pubValidation(std::shared_ptr<STValidation> const& val) override;
485
486 //--------------------------------------------------------------------------
487 //
488 // InfoSub::Source.
489 //
490 void
492 InfoSub::ref ispListener,
493 hash_set<AccountID> const& vnaAccountIDs,
494 bool rt) override;
495 void
497 InfoSub::ref ispListener,
498 hash_set<AccountID> const& vnaAccountIDs,
499 bool rt) override;
500
501 // Just remove the subscription from the tracking
502 // not from the InfoSub. Needed for InfoSub destruction
503 void
505 std::uint64_t seq,
506 hash_set<AccountID> const& vnaAccountIDs,
507 bool rt) override;
508
510 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
511 override;
512 void
514 InfoSub::ref ispListener,
515 AccountID const& account,
516 bool historyOnly) override;
517
518 void
520 std::uint64_t seq,
521 AccountID const& account,
522 bool historyOnly) override;
523
524 bool
525 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
526 bool
527 unsubLedger(std::uint64_t uListener) override;
528
529 bool
530 subBookChanges(InfoSub::ref ispListener) override;
531 bool
532 unsubBookChanges(std::uint64_t uListener) override;
533
534 bool
535 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
536 override;
537 bool
538 unsubServer(std::uint64_t uListener) override;
539
540 bool
541 subBook(InfoSub::ref ispListener, Book const&) override;
542 bool
543 unsubBook(std::uint64_t uListener, Book const&) override;
544
545 bool
546 subManifests(InfoSub::ref ispListener) override;
547 bool
548 unsubManifests(std::uint64_t uListener) override;
549 void
550 pubManifest(Manifest const&) override;
551
552 bool
553 subTransactions(InfoSub::ref ispListener) override;
554 bool
555 unsubTransactions(std::uint64_t uListener) override;
556
557 bool
558 subRTTransactions(InfoSub::ref ispListener) override;
559 bool
560 unsubRTTransactions(std::uint64_t uListener) override;
561
562 bool
563 subValidations(InfoSub::ref ispListener) override;
564 bool
565 unsubValidations(std::uint64_t uListener) override;
566
567 bool
568 subPeerStatus(InfoSub::ref ispListener) override;
569 bool
570 unsubPeerStatus(std::uint64_t uListener) override;
571 void
572 pubPeerStatus(std::function<Json::Value(void)> const&) override;
573
574 bool
575 subConsensus(InfoSub::ref ispListener) override;
576 bool
577 unsubConsensus(std::uint64_t uListener) override;
578
580 findRpcSub(std::string const& strUrl) override;
582 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
583 bool
584 tryRemoveRpcSub(std::string const& strUrl) override;
585
586 void
587 stop() override
588 {
589 {
590 boost::system::error_code ec;
591 heartbeatTimer_.cancel(ec);
592 if (ec)
593 {
594 JLOG(m_journal.error())
595 << "NetworkOPs: heartbeatTimer cancel error: "
596 << ec.message();
597 }
598
599 ec.clear();
600 clusterTimer_.cancel(ec);
601 if (ec)
602 {
603 JLOG(m_journal.error())
604 << "NetworkOPs: clusterTimer cancel error: "
605 << ec.message();
606 }
607
608 ec.clear();
609 accountHistoryTxTimer_.cancel(ec);
610 if (ec)
611 {
612 JLOG(m_journal.error())
613 << "NetworkOPs: accountHistoryTxTimer cancel error: "
614 << ec.message();
615 }
616 }
617 // Make sure that any waitHandlers pending in our timers are done.
618 using namespace std::chrono_literals;
619 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
620 }
621
622 void
623 stateAccounting(Json::Value& obj) override;
624
625private:
626 void
627 setTimer(
628 boost::asio::steady_timer& timer,
629 std::chrono::milliseconds const& expiry_time,
630 std::function<void()> onExpire,
631 std::function<void()> onError);
632 void
634 void
636 void
638 void
640
642 transJson(
643 std::shared_ptr<STTx const> const& transaction,
644 TER result,
645 bool validated,
648
649 void
652 AcceptedLedgerTx const& transaction,
653 bool last);
654
655 void
658 AcceptedLedgerTx const& transaction,
659 bool last);
660
661 void
664 std::shared_ptr<STTx const> const& transaction,
665 TER result);
666
667 void
668 pubServer();
669 void
671
673 getHostId(bool forAdmin);
674
675private:
679
680 /*
681 * With a validated ledger to separate history and future, the node
682 * streams historical txns with negative indexes starting from -1,
683 * and streams future txns starting from index 0.
684 * The SubAccountHistoryIndex struct maintains these indexes.
685 * It also has a flag stopHistorical_ for stopping streaming
686 * the historical txns.
687 */
689 {
691 // forward
693 // separate backward and forward
695 // history, backward
700
702 : accountId_(accountId)
703 , forwardTxIndex_(0)
706 , historyTxIndex_(-1)
707 , haveHistorical_(false)
708 , stopHistorical_(false)
709 {
710 }
711 };
713 {
716 };
718 {
721 };
724
728 void
732 void
734 void
736
739
741
743
745
750
752 boost::asio::steady_timer heartbeatTimer_;
753 boost::asio::steady_timer clusterTimer_;
754 boost::asio::steady_timer accountHistoryTxTimer_;
755
757
760
762
764
767
769
771
772 enum SubTypes {
773 sLedger, // Accepted ledgers.
774 sManifests, // Received validator manifests.
775 sServer, // When server changes connectivity state.
776 sTransactions, // All accepted transactions.
777 sRTTransactions, // All proposed and accepted transactions.
778 sValidations, // Received validations.
779 sPeerStatus, // Peer status changes.
780 sConsensusPhase, // Consensus phase
781 sBookChanges, // Per-ledger order book changes
782 sLastEntry // Any new entry must be ADDED ABOVE this one
783 };
784
786
788
790
791 // Whether we are in standalone mode.
792 bool const m_standalone;
793
794 // The number of nodes that we need to consider ourselves connected.
796
797 // Transaction batching.
802
804
807
808private:
809 struct Stats
810 {
811 template <class Handler>
813 Handler const& handler,
814 beast::insight::Collector::ptr const& collector)
815 : hook(collector->make_hook(handler))
816 , disconnected_duration(collector->make_gauge(
817 "State_Accounting",
818 "Disconnected_duration"))
819 , connected_duration(collector->make_gauge(
820 "State_Accounting",
821 "Connected_duration"))
823 collector->make_gauge("State_Accounting", "Syncing_duration"))
824 , tracking_duration(collector->make_gauge(
825 "State_Accounting",
826 "Tracking_duration"))
828 collector->make_gauge("State_Accounting", "Full_duration"))
829 , disconnected_transitions(collector->make_gauge(
830 "State_Accounting",
831 "Disconnected_transitions"))
832 , connected_transitions(collector->make_gauge(
833 "State_Accounting",
834 "Connected_transitions"))
835 , syncing_transitions(collector->make_gauge(
836 "State_Accounting",
837 "Syncing_transitions"))
838 , tracking_transitions(collector->make_gauge(
839 "State_Accounting",
840 "Tracking_transitions"))
842 collector->make_gauge("State_Accounting", "Full_transitions"))
843 {
844 }
845
852
858 };
859
860 std::mutex m_statsMutex; // Mutex to lock m_stats
862
863private:
864 void
866};
867
868//------------------------------------------------------------------------------
869
871 {"disconnected", "connected", "syncing", "tracking", "full"}};
872
874
882
883static auto const genesisAccountId = calcAccountID(
885 .first);
886
887//------------------------------------------------------------------------------
888inline OperatingMode
890{
891 return mMode;
892}
893
894inline std::string
895NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
896{
897 return strOperatingMode(mMode, admin);
898}
899
900inline void
902{
904}
905
906inline void
908{
909 needNetworkLedger_ = true;
910}
911
912inline void
914{
915 needNetworkLedger_ = false;
916}
917
918inline bool
920{
921 return needNetworkLedger_;
922}
923
924inline bool
926{
928}
929
932{
933 static std::string const hostname = boost::asio::ip::host_name();
934
935 if (forAdmin)
936 return hostname;
937
938 // For non-admin uses hash the node public key into a
939 // single RFC1751 word:
940 static std::string const shroudedHostId = [this]() {
941 auto const& id = app_.nodeIdentity();
942
943 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
944 }();
945
946 return shroudedHostId;
947}
948
949void
951{
953
954 // Only do this work if a cluster is configured
955 if (app_.cluster().size() != 0)
957}
958
959void
961 boost::asio::steady_timer& timer,
962 std::chrono::milliseconds const& expiry_time,
963 std::function<void()> onExpire,
964 std::function<void()> onError)
965{
966 // Only start the timer if waitHandlerCounter_ is not yet joined.
967 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
968 [this, onExpire, onError](boost::system::error_code const& e) {
969 if ((e.value() == boost::system::errc::success) &&
970 (!m_job_queue.isStopped()))
971 {
972 onExpire();
973 }
974 // Recover as best we can if an unexpected error occurs.
975 if (e.value() != boost::system::errc::success &&
976 e.value() != boost::asio::error::operation_aborted)
977 {
978 // Try again later and hope for the best.
979 JLOG(m_journal.error())
980 << "Timer got error '" << e.message()
981 << "'. Restarting timer.";
982 onError();
983 }
984 }))
985 {
986 timer.expires_from_now(expiry_time);
987 timer.async_wait(std::move(*optionalCountedHandler));
988 }
989}
990
991void
992NetworkOPsImp::setHeartbeatTimer()
993{
994 setTimer(
995 heartbeatTimer_,
996 mConsensus.parms().ledgerGRANULARITY,
997 [this]() {
998 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
999 processHeartbeatTimer();
1000 });
1001 },
1002 [this]() { setHeartbeatTimer(); });
1003}
1004
1005void
1006NetworkOPsImp::setClusterTimer()
1007{
1008 using namespace std::chrono_literals;
1009
1010 setTimer(
1011 clusterTimer_,
1012 10s,
1013 [this]() {
1014 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
1015 processClusterTimer();
1016 });
1017 },
1018 [this]() { setClusterTimer(); });
1019}
1020
1021void
1022NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
1023{
1024 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
1025 << toBase58(subInfo.index_->accountId_);
1026 using namespace std::chrono_literals;
1027 setTimer(
1028 accountHistoryTxTimer_,
1029 4s,
1030 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1031 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1032}
1033
1034void
1035NetworkOPsImp::processHeartbeatTimer()
1036{
1037 RclConsensusLogger clog(
1038 "Heartbeat Timer", mConsensus.validating(), m_journal);
1039 {
1040 std::unique_lock lock{app_.getMasterMutex()};
1041
1042 // VFALCO NOTE This is for diagnosing a crash on exit
1043 LoadManager& mgr(app_.getLoadManager());
1044 mgr.heartbeat();
1045
1046 std::size_t const numPeers = app_.overlay().size();
1047
1048 // do we have sufficient peers? If not, we are disconnected.
1049 if (numPeers < minPeerCount_)
1050 {
1051 if (mMode != OperatingMode::DISCONNECTED)
1052 {
1053 setMode(OperatingMode::DISCONNECTED);
1055 ss << "Node count (" << numPeers << ") has fallen "
1056 << "below required minimum (" << minPeerCount_ << ").";
1057 JLOG(m_journal.warn()) << ss.str();
1058 CLOG(clog.ss()) << "set mode to DISCONNECTED: " << ss.str();
1059 }
1060 else
1061 {
1062 CLOG(clog.ss())
1063 << "already DISCONNECTED. too few peers (" << numPeers
1064 << "), need at least " << minPeerCount_;
1065 }
1066
1067 // MasterMutex lock need not be held to call setHeartbeatTimer()
1068 lock.unlock();
1069 // We do not call mConsensus.timerEntry until there are enough
1070 // peers providing meaningful inputs to consensus
1071 setHeartbeatTimer();
1072
1073 return;
1074 }
1075
1076 if (mMode == OperatingMode::DISCONNECTED)
1077 {
1078 setMode(OperatingMode::CONNECTED);
1079 JLOG(m_journal.info())
1080 << "Node count (" << numPeers << ") is sufficient.";
1081 CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
1082 << " peers. ";
1083 }
1084
1085 // Check if the last validated ledger forces a change between these
1086 // states.
1087 auto origMode = mMode.load();
1088 CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
1089 if (mMode == OperatingMode::SYNCING)
1090 setMode(OperatingMode::SYNCING);
1091 else if (mMode == OperatingMode::CONNECTED)
1092 setMode(OperatingMode::CONNECTED);
1093 auto newMode = mMode.load();
1094 if (origMode != newMode)
1095 {
1096 CLOG(clog.ss())
1097 << ", changing to " << strOperatingMode(newMode, true);
1098 }
1099 CLOG(clog.ss()) << ". ";
1100 }
1101
1102 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.ss());
1103
1104 CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase);
1105 ConsensusPhase const currPhase = mConsensus.phase();
1106 if (mLastConsensusPhase != currPhase)
1107 {
1108 reportConsensusStateChange(currPhase);
1109 mLastConsensusPhase = currPhase;
1110 CLOG(clog.ss()) << " changed to " << to_string(mLastConsensusPhase);
1111 }
1112 CLOG(clog.ss()) << ". ";
1113
1114 setHeartbeatTimer();
1115}
1116
1117void
1118NetworkOPsImp::processClusterTimer()
1119{
1120 if (app_.cluster().size() == 0)
1121 return;
1122
1123 using namespace std::chrono_literals;
1124
1125 bool const update = app_.cluster().update(
1126 app_.nodeIdentity().first,
1127 "",
1128 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1129 ? app_.getFeeTrack().getLocalFee()
1130 : 0,
1131 app_.timeKeeper().now());
1132
1133 if (!update)
1134 {
1135 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1136 setClusterTimer();
1137 return;
1138 }
1139
1140 protocol::TMCluster cluster;
1141 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1142 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1143 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1144 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1145 n.set_nodeload(node.getLoadFee());
1146 if (!node.name().empty())
1147 n.set_nodename(node.name());
1148 });
1149
1150 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1151 for (auto& item : gossip.items)
1152 {
1153 protocol::TMLoadSource& node = *cluster.add_loadsources();
1154 node.set_name(to_string(item.address));
1155 node.set_cost(item.balance);
1156 }
1157 app_.overlay().foreach(send_if(
1158 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1159 peer_in_cluster()));
1160 setClusterTimer();
1161}
1162
1163//------------------------------------------------------------------------------
1164
1166NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1167 const
1168{
1169 if (mode == OperatingMode::FULL && admin)
1170 {
1171 auto const consensusMode = mConsensus.mode();
1172 if (consensusMode != ConsensusMode::wrongLedger)
1173 {
1174 if (consensusMode == ConsensusMode::proposing)
1175 return "proposing";
1176
1177 if (mConsensus.validating())
1178 return "validating";
1179 }
1180 }
1181
1182 return states_[static_cast<std::size_t>(mode)];
1183}
1184
1185void
1186NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1187{
1188 if (isNeedNetworkLedger())
1189 {
1190 // Nothing we can do if we've never been in sync
1191 return;
1192 }
1193
1194 // Enforce Network bar for batch txn
1195 if (iTrans->isFlag(tfInnerBatchTxn) &&
1196 m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1197 {
1198 JLOG(m_journal.error())
1199 << "Submitted transaction invalid: tfInnerBatchTxn flag present.";
1200 return;
1201 }
1202
1203 // this is an asynchronous interface
1204 auto const trans = sterilize(*iTrans);
1205
1206 auto const txid = trans->getTransactionID();
1207 auto const flags = app_.getHashRouter().getFlags(txid);
1208
1209 if ((flags & SF_BAD) != 0)
1210 {
1211 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1212 return;
1213 }
1214
1215 try
1216 {
1217 auto const [validity, reason] = checkValidity(
1218 app_.getHashRouter(),
1219 *trans,
1220 m_ledgerMaster.getValidatedRules(),
1221 app_.config());
1222
1223 if (validity != Validity::Valid)
1224 {
1225 JLOG(m_journal.warn())
1226 << "Submitted transaction invalid: " << reason;
1227 return;
1228 }
1229 }
1230 catch (std::exception const& ex)
1231 {
1232 JLOG(m_journal.warn())
1233 << "Exception checking transaction " << txid << ": " << ex.what();
1234
1235 return;
1236 }
1237
1238 std::string reason;
1239
1240 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1241
1242 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1243 auto t = tx;
1244 processTransaction(t, false, false, FailHard::no);
1245 });
1246}
1247
1248bool
1249NetworkOPsImp::preProcessTransaction(std::shared_ptr<Transaction>& transaction)
1250{
1251 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1252
1253 if ((newFlags & SF_BAD) != 0)
1254 {
1255 // cached bad
1256 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1257 transaction->setStatus(INVALID);
1258 transaction->setResult(temBAD_SIGNATURE);
1259 return false;
1260 }
1261
1262 auto const view = m_ledgerMaster.getCurrentLedger();
1263
1264 // This function is called by several different parts of the codebase
1265 // under no circumstances will we ever accept an inner txn within a batch
1266 // txn from the network.
1267 auto const sttx = *transaction->getSTransaction();
1268 if (sttx.isFlag(tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1269 {
1270 transaction->setStatus(INVALID);
1271 transaction->setResult(temINVALID_FLAG);
1272 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1273 return false;
1274 }
1275
1276 // NOTE eahennis - I think this check is redundant,
1277 // but I'm not 100% sure yet.
1278 // If so, only cost is looking up HashRouter flags.
1279 auto const [validity, reason] =
1280 checkValidity(app_.getHashRouter(), sttx, view->rules(), app_.config());
1281 XRPL_ASSERT(
1282 validity == Validity::Valid,
1283 "ripple::NetworkOPsImp::processTransaction : valid validity");
1284
1285 // Not concerned with local checks at this point.
1286 if (validity == Validity::SigBad)
1287 {
1288 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1289 transaction->setStatus(INVALID);
1290 transaction->setResult(temBAD_SIGNATURE);
1291 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1292 return false;
1293 }
1294
1295 // canonicalize can change our pointer
1296 app_.getMasterTransaction().canonicalize(&transaction);
1297
1298 return true;
1299}
1300
1301void
1302NetworkOPsImp::processTransaction(
1303 std::shared_ptr<Transaction>& transaction,
1304 bool bUnlimited,
1305 bool bLocal,
1306 FailHard failType)
1307{
1308 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1309
1310 // preProcessTransaction can change our pointer
1311 if (!preProcessTransaction(transaction))
1312 return;
1313
1314 if (bLocal)
1315 doTransactionSync(transaction, bUnlimited, failType);
1316 else
1317 doTransactionAsync(transaction, bUnlimited, failType);
1318}
1319
1320void
1321NetworkOPsImp::doTransactionAsync(
1322 std::shared_ptr<Transaction> transaction,
1323 bool bUnlimited,
1324 FailHard failType)
1325{
1326 std::lock_guard lock(mMutex);
1327
1328 if (transaction->getApplying())
1329 return;
1330
1331 mTransactions.push_back(
1332 TransactionStatus(transaction, bUnlimited, false, failType));
1333 transaction->setApplying();
1334
1335 if (mDispatchState == DispatchState::none)
1336 {
1337 if (m_job_queue.addJob(
1338 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1339 {
1340 mDispatchState = DispatchState::scheduled;
1341 }
1342 }
1343}
1344
1345void
1346NetworkOPsImp::doTransactionSync(
1347 std::shared_ptr<Transaction> transaction,
1348 bool bUnlimited,
1349 FailHard failType)
1350{
1351 std::unique_lock<std::mutex> lock(mMutex);
1352
1353 if (!transaction->getApplying())
1354 {
1355 mTransactions.push_back(
1356 TransactionStatus(transaction, bUnlimited, true, failType));
1357 transaction->setApplying();
1358 }
1359
1360 doTransactionSyncBatch(
1361 lock, [&transaction](std::unique_lock<std::mutex> const&) {
1362 return transaction->getApplying();
1363 });
1364}
1365
1366void
1367NetworkOPsImp::doTransactionSyncBatch(
1369 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback)
1370{
1371 do
1372 {
1373 if (mDispatchState == DispatchState::running)
1374 {
1375 // A batch processing job is already running, so wait.
1376 mCond.wait(lock);
1377 }
1378 else
1379 {
1380 apply(lock);
1381
1382 if (mTransactions.size())
1383 {
1384 // More transactions need to be applied, but by another job.
1385 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1386 transactionBatch();
1387 }))
1388 {
1389 mDispatchState = DispatchState::scheduled;
1390 }
1391 }
1392 }
1393 } while (retryCallback(lock));
1394}
1395
1396void
1397NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set)
1398{
1399 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXNSet");
1401 candidates.reserve(set.size());
1402 for (auto const& [_, tx] : set)
1403 {
1404 std::string reason;
1405 auto transaction = std::make_shared<Transaction>(tx, reason, app_);
1406
1407 if (transaction->getStatus() == INVALID)
1408 {
1409 if (!reason.empty())
1410 {
1411 JLOG(m_journal.trace())
1412 << "Exception checking transaction: " << reason;
1413 }
1414 app_.getHashRouter().setFlags(tx->getTransactionID(), SF_BAD);
1415 continue;
1416 }
1417
1418 // preProcessTransaction can change our pointer
1419 if (!preProcessTransaction(transaction))
1420 continue;
1421
1422 candidates.emplace_back(transaction);
1423 }
1424
1425 std::vector<TransactionStatus> transactions;
1426 transactions.reserve(candidates.size());
1427
1428 std::unique_lock lock(mMutex);
1429
1430 for (auto& transaction : candidates)
1431 {
1432 if (!transaction->getApplying())
1433 {
1434 transactions.emplace_back(transaction, false, false, FailHard::no);
1435 transaction->setApplying();
1436 }
1437 }
1438
1439 if (mTransactions.empty())
1440 mTransactions.swap(transactions);
1441 else
1442 {
1443 mTransactions.reserve(mTransactions.size() + transactions.size());
1444 for (auto& t : transactions)
1445 mTransactions.push_back(std::move(t));
1446 }
1447
1448 doTransactionSyncBatch(lock, [&](std::unique_lock<std::mutex> const&) {
1449 XRPL_ASSERT(
1450 lock.owns_lock(),
1451 "ripple::NetworkOPsImp::processTransactionSet has lock");
1452 return std::any_of(
1453 mTransactions.begin(), mTransactions.end(), [](auto const& t) {
1454 return t.transaction->getApplying();
1455 });
1456 });
1457}
1458
1459void
1460NetworkOPsImp::transactionBatch()
1461{
1462 std::unique_lock<std::mutex> lock(mMutex);
1463
1464 if (mDispatchState == DispatchState::running)
1465 return;
1466
1467 while (mTransactions.size())
1468 {
1469 apply(lock);
1470 }
1471}
1472
1473void
1474NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1475{
1477 std::vector<TransactionStatus> transactions;
1478 mTransactions.swap(transactions);
1479 XRPL_ASSERT(
1480 !transactions.empty(),
1481 "ripple::NetworkOPsImp::apply : non-empty transactions");
1482 XRPL_ASSERT(
1483 mDispatchState != DispatchState::running,
1484 "ripple::NetworkOPsImp::apply : is not running");
1485
1486 mDispatchState = DispatchState::running;
1487
1488 batchLock.unlock();
1489
1490 {
1491 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1492 bool changed = false;
1493 {
1494 std::unique_lock ledgerLock{
1495 m_ledgerMaster.peekMutex(), std::defer_lock};
1496 std::lock(masterLock, ledgerLock);
1497
1498 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1499 for (TransactionStatus& e : transactions)
1500 {
1501 // we check before adding to the batch
1502 ApplyFlags flags = tapNONE;
1503 if (e.admin)
1504 flags |= tapUNLIMITED;
1505
1506 if (e.failType == FailHard::yes)
1507 flags |= tapFAIL_HARD;
1508
1509 auto const result = app_.getTxQ().apply(
1510 app_, view, e.transaction->getSTransaction(), flags, j);
1511 e.result = result.ter;
1512 e.applied = result.applied;
1513 changed = changed || result.applied;
1514 }
1515 return changed;
1516 });
1517 }
1518 if (changed)
1519 reportFeeChange();
1520
1521 std::optional<LedgerIndex> validatedLedgerIndex;
1522 if (auto const l = m_ledgerMaster.getValidatedLedger())
1523 validatedLedgerIndex = l->info().seq;
1524
1525 auto newOL = app_.openLedger().current();
1526 for (TransactionStatus& e : transactions)
1527 {
1528 e.transaction->clearSubmitResult();
1529
1530 if (e.applied)
1531 {
1532 pubProposedTransaction(
1533 newOL, e.transaction->getSTransaction(), e.result);
1534 e.transaction->setApplied();
1535 }
1536
1537 e.transaction->setResult(e.result);
1538
1539 if (isTemMalformed(e.result))
1540 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1541
1542#ifdef DEBUG
1543 if (e.result != tesSUCCESS)
1544 {
1545 std::string token, human;
1546
1547 if (transResultInfo(e.result, token, human))
1548 {
1549 JLOG(m_journal.info())
1550 << "TransactionResult: " << token << ": " << human;
1551 }
1552 }
1553#endif
1554
1555 bool addLocal = e.local;
1556
1557 if (e.result == tesSUCCESS)
1558 {
1559 JLOG(m_journal.debug())
1560 << "Transaction is now included in open ledger";
1561 e.transaction->setStatus(INCLUDED);
1562
1563 // Pop as many "reasonable" transactions for this account as
1564 // possible. "Reasonable" means they have sequential sequence
1565 // numbers, or use tickets.
1566 auto const& txCur = e.transaction->getSTransaction();
1567
1568 std::size_t count = 0;
1569 for (auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1570 txNext && count < maxPoppedTransactions;
1571 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1572 {
1573 if (!batchLock.owns_lock())
1574 batchLock.lock();
1575 std::string reason;
1576 auto const trans = sterilize(*txNext);
1577 auto t = std::make_shared<Transaction>(trans, reason, app_);
1578 if (t->getApplying())
1579 break;
1580 submit_held.emplace_back(t, false, false, FailHard::no);
1581 t->setApplying();
1582 }
1583 if (batchLock.owns_lock())
1584 batchLock.unlock();
1585 }
1586 else if (e.result == tefPAST_SEQ)
1587 {
1588 // duplicate or conflict
1589 JLOG(m_journal.info()) << "Transaction is obsolete";
1590 e.transaction->setStatus(OBSOLETE);
1591 }
1592 else if (e.result == terQUEUED)
1593 {
1594 JLOG(m_journal.debug())
1595 << "Transaction is likely to claim a"
1596 << " fee, but is queued until fee drops";
1597
1598 e.transaction->setStatus(HELD);
1599 // Add to held transactions, because it could get
1600 // kicked out of the queue, and this will try to
1601 // put it back.
1602 m_ledgerMaster.addHeldTransaction(e.transaction);
1603 e.transaction->setQueued();
1604 e.transaction->setKept();
1605 }
1606 else if (
1607 isTerRetry(e.result) || isTelLocal(e.result) ||
1608 isTefFailure(e.result))
1609 {
1610 if (e.failType != FailHard::yes)
1611 {
1612 auto const lastLedgerSeq =
1613 e.transaction->getSTransaction()->at(
1614 ~sfLastLedgerSequence);
1615 auto const ledgersLeft = lastLedgerSeq
1616 ? *lastLedgerSeq -
1617 m_ledgerMaster.getCurrentLedgerIndex()
1619 // If any of these conditions are met, the transaction can
1620 // be held:
1621 // 1. It was submitted locally. (Note that this flag is only
1622 // true on the initial submission.)
1623 // 2. The transaction has a LastLedgerSequence, and the
1624 // LastLedgerSequence is fewer than LocalTxs::holdLedgers
1625 // (5) ledgers into the future. (Remember that an
1626 // unseated optional compares as less than all seated
1627 // values, so it has to be checked explicitly first.)
1628 // 3. The SF_HELD flag is not set on the txID. (setFlags
1629 // checks before setting. If the flag is set, it returns
1630 // false, which means it's been held once without one of
1631 // the other conditions, so don't hold it again. Time's
1632 // up!)
1633 //
1634 if (e.local ||
1635 (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1636 app_.getHashRouter().setFlags(
1637 e.transaction->getID(), SF_HELD))
1638 {
1639 // transaction should be held
1640 JLOG(m_journal.debug())
1641 << "Transaction should be held: " << e.result;
1642 e.transaction->setStatus(HELD);
1643 m_ledgerMaster.addHeldTransaction(e.transaction);
1644 e.transaction->setKept();
1645 }
1646 else
1647 JLOG(m_journal.debug())
1648 << "Not holding transaction "
1649 << e.transaction->getID() << ": "
1650 << (e.local ? "local" : "network") << ", "
1651 << "result: " << e.result << " ledgers left: "
1652 << (ledgersLeft ? to_string(*ledgersLeft)
1653 : "unspecified");
1654 }
1655 }
1656 else
1657 {
1658 JLOG(m_journal.debug())
1659 << "Status other than success " << e.result;
1660 e.transaction->setStatus(INVALID);
1661 }
1662
1663 auto const enforceFailHard =
1664 e.failType == FailHard::yes && !isTesSuccess(e.result);
1665
1666 if (addLocal && !enforceFailHard)
1667 {
1668 m_localTX->push_back(
1669 m_ledgerMaster.getCurrentLedgerIndex(),
1670 e.transaction->getSTransaction());
1671 e.transaction->setKept();
1672 }
1673
1674 if ((e.applied ||
1675 ((mMode != OperatingMode::FULL) &&
1676 (e.failType != FailHard::yes) && e.local) ||
1677 (e.result == terQUEUED)) &&
1678 !enforceFailHard)
1679 {
1680 auto const toSkip =
1681 app_.getHashRouter().shouldRelay(e.transaction->getID());
1682 if (auto const sttx = *(e.transaction->getSTransaction());
1683 toSkip &&
1684 // Skip relaying if it's an inner batch txn and batch
1685 // feature is enabled
1686 !(sttx.isFlag(tfInnerBatchTxn) &&
1687 newOL->rules().enabled(featureBatch)))
1688 {
1689 protocol::TMTransaction tx;
1690 Serializer s;
1691
1692 sttx.add(s);
1693 tx.set_rawtransaction(s.data(), s.size());
1694 tx.set_status(protocol::tsCURRENT);
1695 tx.set_receivetimestamp(
1696 app_.timeKeeper().now().time_since_epoch().count());
1697 tx.set_deferred(e.result == terQUEUED);
1698 // FIXME: This should be when we received it
1699 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1700 e.transaction->setBroadcast();
1701 }
1702 }
1703
1704 if (validatedLedgerIndex)
1705 {
1706 auto [fee, accountSeq, availableSeq] =
1707 app_.getTxQ().getTxRequiredFeeAndSeq(
1708 *newOL, e.transaction->getSTransaction());
1709 e.transaction->setCurrentLedgerState(
1710 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1711 }
1712 }
1713 }
1714
1715 batchLock.lock();
1716
1717 for (TransactionStatus& e : transactions)
1718 e.transaction->clearApplying();
1719
1720 if (!submit_held.empty())
1721 {
1722 if (mTransactions.empty())
1723 mTransactions.swap(submit_held);
1724 else
1725 {
1726 mTransactions.reserve(mTransactions.size() + submit_held.size());
1727 for (auto& e : submit_held)
1728 mTransactions.push_back(std::move(e));
1729 }
1730 }
1731
1732 mCond.notify_all();
1733
1734 mDispatchState = DispatchState::none;
1735}
1736
1737//
1738// Owner functions
1739//
1740
1742NetworkOPsImp::getOwnerInfo(
1744 AccountID const& account)
1745{
1746 Json::Value jvObjects(Json::objectValue);
1747 auto root = keylet::ownerDir(account);
1748 auto sleNode = lpLedger->read(keylet::page(root));
1749 if (sleNode)
1750 {
1751 std::uint64_t uNodeDir;
1752
1753 do
1754 {
1755 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1756 {
1757 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1758 XRPL_ASSERT(
1759 sleCur,
1760 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1761
1762 switch (sleCur->getType())
1763 {
1764 case ltOFFER:
1765 if (!jvObjects.isMember(jss::offers))
1766 jvObjects[jss::offers] =
1768
1769 jvObjects[jss::offers].append(
1770 sleCur->getJson(JsonOptions::none));
1771 break;
1772
1773 case ltRIPPLE_STATE:
1774 if (!jvObjects.isMember(jss::ripple_lines))
1775 {
1776 jvObjects[jss::ripple_lines] =
1778 }
1779
1780 jvObjects[jss::ripple_lines].append(
1781 sleCur->getJson(JsonOptions::none));
1782 break;
1783
1784 case ltACCOUNT_ROOT:
1785 case ltDIR_NODE:
1786 default:
1787 UNREACHABLE(
1788 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1789 "type");
1790 break;
1791 }
1792 }
1793
1794 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1795
1796 if (uNodeDir)
1797 {
1798 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1799 XRPL_ASSERT(
1800 sleNode,
1801 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1802 }
1803 } while (uNodeDir);
1804 }
1805
1806 return jvObjects;
1807}
1808
1809//
1810// Other
1811//
1812
1813inline bool
1814NetworkOPsImp::isBlocked()
1815{
1816 return isAmendmentBlocked() || isUNLBlocked();
1817}
1818
1819inline bool
1820NetworkOPsImp::isAmendmentBlocked()
1821{
1822 return amendmentBlocked_;
1823}
1824
1825void
1826NetworkOPsImp::setAmendmentBlocked()
1827{
1828 amendmentBlocked_ = true;
1829 setMode(OperatingMode::CONNECTED);
1830}
1831
1832inline bool
1833NetworkOPsImp::isAmendmentWarned()
1834{
1835 return !amendmentBlocked_ && amendmentWarned_;
1836}
1837
1838inline void
1839NetworkOPsImp::setAmendmentWarned()
1840{
1841 amendmentWarned_ = true;
1842}
1843
1844inline void
1845NetworkOPsImp::clearAmendmentWarned()
1846{
1847 amendmentWarned_ = false;
1848}
1849
1850inline bool
1851NetworkOPsImp::isUNLBlocked()
1852{
1853 return unlBlocked_;
1854}
1855
1856void
1857NetworkOPsImp::setUNLBlocked()
1858{
1859 unlBlocked_ = true;
1860 setMode(OperatingMode::CONNECTED);
1861}
1862
1863inline void
1864NetworkOPsImp::clearUNLBlocked()
1865{
1866 unlBlocked_ = false;
1867}
1868
1869bool
1870NetworkOPsImp::checkLastClosedLedger(
1871 Overlay::PeerSequence const& peerList,
1872 uint256& networkClosed)
1873{
1874 // Returns true if there's an *abnormal* ledger issue, normal changing in
1875 // TRACKING mode should return false. Do we have sufficient validations for
1876 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1877 // better ledger available? If so, we are either tracking or full.
1878
1879 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1880
1881 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1882
1883 if (!ourClosed)
1884 return false;
1885
1886 uint256 closedLedger = ourClosed->info().hash;
1887 uint256 prevClosedLedger = ourClosed->info().parentHash;
1888 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1889 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1890
1891 //-------------------------------------------------------------------------
1892 // Determine preferred last closed ledger
1893
1894 auto& validations = app_.getValidations();
1895 JLOG(m_journal.debug())
1896 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1897
1898 // Will rely on peer LCL if no trusted validations exist
1900 peerCounts[closedLedger] = 0;
1901 if (mMode >= OperatingMode::TRACKING)
1902 peerCounts[closedLedger]++;
1903
1904 for (auto& peer : peerList)
1905 {
1906 uint256 peerLedger = peer->getClosedLedgerHash();
1907
1908 if (peerLedger.isNonZero())
1909 ++peerCounts[peerLedger];
1910 }
1911
1912 for (auto const& it : peerCounts)
1913 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1914
1915 uint256 preferredLCL = validations.getPreferredLCL(
1916 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1917 m_ledgerMaster.getValidLedgerIndex(),
1918 peerCounts);
1919
1920 bool switchLedgers = preferredLCL != closedLedger;
1921 if (switchLedgers)
1922 closedLedger = preferredLCL;
1923 //-------------------------------------------------------------------------
1924 if (switchLedgers && (closedLedger == prevClosedLedger))
1925 {
1926 // don't switch to our own previous ledger
1927 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1928 networkClosed = ourClosed->info().hash;
1929 switchLedgers = false;
1930 }
1931 else
1932 networkClosed = closedLedger;
1933
1934 if (!switchLedgers)
1935 return false;
1936
1937 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1938
1939 if (!consensus)
1940 consensus = app_.getInboundLedgers().acquire(
1941 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1942
1943 if (consensus &&
1944 (!m_ledgerMaster.canBeCurrent(consensus) ||
1945 !m_ledgerMaster.isCompatible(
1946 *consensus, m_journal.debug(), "Not switching")))
1947 {
1948 // Don't switch to a ledger not on the validated chain
1949 // or with an invalid close time or sequence
1950 networkClosed = ourClosed->info().hash;
1951 return false;
1952 }
1953
1954 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1955 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1956 << getJson({*ourClosed, {}});
1957 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1958
1959 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1960 {
1961 setMode(OperatingMode::CONNECTED);
1962 }
1963
1964 if (consensus)
1965 {
1966 // FIXME: If this rewinds the ledger sequence, or has the same
1967 // sequence, we should update the status on any stored transactions
1968 // in the invalidated ledgers.
1969 switchLastClosedLedger(consensus);
1970 }
1971
1972 return true;
1973}
1974
1975void
1976NetworkOPsImp::switchLastClosedLedger(
1977 std::shared_ptr<Ledger const> const& newLCL)
1978{
1979 // set the newLCL as our last closed ledger -- this is abnormal code
1980 JLOG(m_journal.error())
1981 << "JUMP last closed ledger to " << newLCL->info().hash;
1982
1983 clearNeedNetworkLedger();
1984
1985 // Update fee computations.
1986 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1987
1988 // Caller must own master lock
1989 {
1990 // Apply tx in old open ledger to new
1991 // open ledger. Then apply local tx.
1992
1993 auto retries = m_localTX->getTxSet();
1994 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1996 if (lastVal)
1997 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
1998 else
1999 rules.emplace(app_.config().features);
2000 app_.openLedger().accept(
2001 app_,
2002 *rules,
2003 newLCL,
2004 OrderedTxs({}),
2005 false,
2006 retries,
2007 tapNONE,
2008 "jump",
2009 [&](OpenView& view, beast::Journal j) {
2010 // Stuff the ledger with transactions from the queue.
2011 return app_.getTxQ().accept(app_, view);
2012 });
2013 }
2014
2015 m_ledgerMaster.switchLCL(newLCL);
2016
2017 protocol::TMStatusChange s;
2018 s.set_newevent(protocol::neSWITCHED_LEDGER);
2019 s.set_ledgerseq(newLCL->info().seq);
2020 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
2021 s.set_ledgerhashprevious(
2022 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
2023 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
2024
2025 app_.overlay().foreach(
2026 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
2027}
2028
2029bool
2030NetworkOPsImp::beginConsensus(
2031 uint256 const& networkClosed,
2033{
2034 XRPL_ASSERT(
2035 networkClosed.isNonZero(),
2036 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
2037
2038 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
2039
2040 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
2041 << " with LCL " << closingInfo.parentHash;
2042
2043 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
2044
2045 if (!prevLedger)
2046 {
2047 // this shouldn't happen unless we jump ledgers
2048 if (mMode == OperatingMode::FULL)
2049 {
2050 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
2051 setMode(OperatingMode::TRACKING);
2052 CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
2053 }
2054
2055 CLOG(clog) << "beginConsensus no previous ledger. ";
2056 return false;
2057 }
2058
2059 XRPL_ASSERT(
2060 prevLedger->info().hash == closingInfo.parentHash,
2061 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
2062 "parent");
2063 XRPL_ASSERT(
2064 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
2065 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
2066 "hash");
2067
2068 if (prevLedger->rules().enabled(featureNegativeUNL))
2069 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
2070 TrustChanges const changes = app_.validators().updateTrusted(
2071 app_.getValidations().getCurrentNodeIDs(),
2072 closingInfo.parentCloseTime,
2073 *this,
2074 app_.overlay(),
2075 app_.getHashRouter());
2076
2077 if (!changes.added.empty() || !changes.removed.empty())
2078 {
2079 app_.getValidations().trustChanged(changes.added, changes.removed);
2080 // Update the AmendmentTable so it tracks the current validators.
2081 app_.getAmendmentTable().trustChanged(
2082 app_.validators().getQuorumKeys().second);
2083 }
2084
2085 mConsensus.startRound(
2086 app_.timeKeeper().closeTime(),
2087 networkClosed,
2088 prevLedger,
2089 changes.removed,
2090 changes.added,
2091 clog);
2092
2093 ConsensusPhase const currPhase = mConsensus.phase();
2094 if (mLastConsensusPhase != currPhase)
2095 {
2096 reportConsensusStateChange(currPhase);
2097 mLastConsensusPhase = currPhase;
2098 }
2099
2100 JLOG(m_journal.debug()) << "Initiating consensus engine";
2101 return true;
2102}
2103
2104bool
2105NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
2106{
2107 auto const& peerKey = peerPos.publicKey();
2108 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
2109 {
2110 // Could indicate a operator misconfiguration where two nodes are
2111 // running with the same validator key configured, so this isn't fatal,
2112 // and it doesn't necessarily indicate peer misbehavior. But since this
2113 // is a trusted message, it could be a very big deal. Either way, we
2114 // don't want to relay the proposal. Note that the byzantine behavior
2115 // detection in handleNewValidation will notify other peers.
2116 //
2117 // Another, innocuous explanation is unusual message routing and delays,
2118 // causing this node to receive its own messages back.
2119 JLOG(m_journal.error())
2120 << "Received a proposal signed by MY KEY from a peer. This may "
2121 "indicate a misconfiguration where another node has the same "
2122 "validator key, or may be caused by unusual message routing and "
2123 "delays.";
2124 return false;
2125 }
2126
2127 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
2128}
2129
2130void
2131NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
2132{
2133 // We now have an additional transaction set
2134 // either created locally during the consensus process
2135 // or acquired from a peer
2136
2137 // Inform peers we have this set
2138 protocol::TMHaveTransactionSet msg;
2139 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2140 msg.set_status(protocol::tsHAVE);
2141 app_.overlay().foreach(
2142 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
2143
2144 // We acquired it because consensus asked us to
2145 if (fromAcquire)
2146 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
2147}
2148
2149void
2150NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
2151{
2152 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
2153
2154 for (auto const& it : app_.overlay().getActivePeers())
2155 {
2156 if (it && (it->getClosedLedgerHash() == deadLedger))
2157 {
2158 JLOG(m_journal.trace()) << "Killing obsolete peer status";
2159 it->cycleStatus();
2160 }
2161 }
2162
2163 uint256 networkClosed;
2164 bool ledgerChange =
2165 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
2166
2167 if (networkClosed.isZero())
2168 {
2169 CLOG(clog) << "endConsensus last closed ledger is zero. ";
2170 return;
2171 }
2172
2173 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
2174 // we must count how many nodes share our LCL, how many nodes disagree with
2175 // our LCL, and how many validations our LCL has. We also want to check
2176 // timing to make sure there shouldn't be a newer LCL. We need this
2177 // information to do the next three tests.
2178
2179 if (((mMode == OperatingMode::CONNECTED) ||
2180 (mMode == OperatingMode::SYNCING)) &&
2181 !ledgerChange)
2182 {
2183 // Count number of peers that agree with us and UNL nodes whose
2184 // validations we have for LCL. If the ledger is good enough, go to
2185 // TRACKING - TODO
2186 if (!needNetworkLedger_)
2187 setMode(OperatingMode::TRACKING);
2188 }
2189
2190 if (((mMode == OperatingMode::CONNECTED) ||
2191 (mMode == OperatingMode::TRACKING)) &&
2192 !ledgerChange)
2193 {
2194 // check if the ledger is good enough to go to FULL
2195 // Note: Do not go to FULL if we don't have the previous ledger
2196 // check if the ledger is bad enough to go to CONNECTE D -- TODO
2197 auto current = m_ledgerMaster.getCurrentLedger();
2198 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
2199 2 * current->info().closeTimeResolution))
2200 {
2201 setMode(OperatingMode::FULL);
2202 }
2203 }
2204
2205 beginConsensus(networkClosed, clog);
2206}
2207
2208void
2209NetworkOPsImp::consensusViewChange()
2210{
2211 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2212 {
2213 setMode(OperatingMode::CONNECTED);
2214 }
2215}
2216
2217void
2218NetworkOPsImp::pubManifest(Manifest const& mo)
2219{
2220 // VFALCO consider std::shared_mutex
2221 std::lock_guard sl(mSubLock);
2222
2223 if (!mStreamMaps[sManifests].empty())
2224 {
2226
2227 jvObj[jss::type] = "manifestReceived";
2228 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
2229 if (mo.signingKey)
2230 jvObj[jss::signing_key] =
2231 toBase58(TokenType::NodePublic, *mo.signingKey);
2232 jvObj[jss::seq] = Json::UInt(mo.sequence);
2233 if (auto sig = mo.getSignature())
2234 jvObj[jss::signature] = strHex(*sig);
2235 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
2236 if (!mo.domain.empty())
2237 jvObj[jss::domain] = mo.domain;
2238 jvObj[jss::manifest] = strHex(mo.serialized);
2239
2240 for (auto i = mStreamMaps[sManifests].begin();
2241 i != mStreamMaps[sManifests].end();)
2242 {
2243 if (auto p = i->second.lock())
2244 {
2245 p->send(jvObj, true);
2246 ++i;
2247 }
2248 else
2249 {
2250 i = mStreamMaps[sManifests].erase(i);
2251 }
2252 }
2253 }
2254}
2255
2256NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2257 XRPAmount fee,
2258 TxQ::Metrics&& escalationMetrics,
2259 LoadFeeTrack const& loadFeeTrack)
2260 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2261 , loadBaseServer{loadFeeTrack.getLoadBase()}
2262 , baseFee{fee}
2263 , em{std::move(escalationMetrics)}
2264{
2265}
2266
2267bool
2269 NetworkOPsImp::ServerFeeSummary const& b) const
2270{
2271 if (loadFactorServer != b.loadFactorServer ||
2272 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2273 em.has_value() != b.em.has_value())
2274 return true;
2275
2276 if (em && b.em)
2277 {
2278 return (
2279 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2280 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2281 em->referenceFeeLevel != b.em->referenceFeeLevel);
2282 }
2283
2284 return false;
2285}
2286
2287// Need to cap to uint64 to uint32 due to JSON limitations
2288static std::uint32_t
2290{
2292
2293 return std::min(max32, v);
2294};
2295
2296void
2298{
2299 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2300 // list into a local array while holding the lock then release
2301 // the lock and call send on everyone.
2302 //
2304
2305 if (!mStreamMaps[sServer].empty())
2306 {
2308
2310 app_.openLedger().current()->fees().base,
2312 app_.getFeeTrack()};
2313
2314 jvObj[jss::type] = "serverStatus";
2315 jvObj[jss::server_status] = strOperatingMode();
2316 jvObj[jss::load_base] = f.loadBaseServer;
2317 jvObj[jss::load_factor_server] = f.loadFactorServer;
2318 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2319
2320 if (f.em)
2321 {
2322 auto const loadFactor = std::max(
2323 safe_cast<std::uint64_t>(f.loadFactorServer),
2324 mulDiv(
2325 f.em->openLedgerFeeLevel,
2326 f.loadBaseServer,
2327 f.em->referenceFeeLevel)
2329
2330 jvObj[jss::load_factor] = trunc32(loadFactor);
2331 jvObj[jss::load_factor_fee_escalation] =
2332 f.em->openLedgerFeeLevel.jsonClipped();
2333 jvObj[jss::load_factor_fee_queue] =
2334 f.em->minProcessingFeeLevel.jsonClipped();
2335 jvObj[jss::load_factor_fee_reference] =
2336 f.em->referenceFeeLevel.jsonClipped();
2337 }
2338 else
2339 jvObj[jss::load_factor] = f.loadFactorServer;
2340
2341 mLastFeeSummary = f;
2342
2343 for (auto i = mStreamMaps[sServer].begin();
2344 i != mStreamMaps[sServer].end();)
2345 {
2346 InfoSub::pointer p = i->second.lock();
2347
2348 // VFALCO TODO research the possibility of using thread queues and
2349 // linearizing the deletion of subscribers with the
2350 // sending of JSON data.
2351 if (p)
2352 {
2353 p->send(jvObj, true);
2354 ++i;
2355 }
2356 else
2357 {
2358 i = mStreamMaps[sServer].erase(i);
2359 }
2360 }
2361 }
2362}
2363
2364void
2366{
2368
2369 auto& streamMap = mStreamMaps[sConsensusPhase];
2370 if (!streamMap.empty())
2371 {
2373 jvObj[jss::type] = "consensusPhase";
2374 jvObj[jss::consensus] = to_string(phase);
2375
2376 for (auto i = streamMap.begin(); i != streamMap.end();)
2377 {
2378 if (auto p = i->second.lock())
2379 {
2380 p->send(jvObj, true);
2381 ++i;
2382 }
2383 else
2384 {
2385 i = streamMap.erase(i);
2386 }
2387 }
2388 }
2389}
2390
2391void
2393{
2394 // VFALCO consider std::shared_mutex
2396
2397 if (!mStreamMaps[sValidations].empty())
2398 {
2400
2401 auto const signerPublic = val->getSignerPublic();
2402
2403 jvObj[jss::type] = "validationReceived";
2404 jvObj[jss::validation_public_key] =
2405 toBase58(TokenType::NodePublic, signerPublic);
2406 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2407 jvObj[jss::signature] = strHex(val->getSignature());
2408 jvObj[jss::full] = val->isFull();
2409 jvObj[jss::flags] = val->getFlags();
2410 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2411 jvObj[jss::data] = strHex(val->getSerializer().slice());
2412
2413 if (auto version = (*val)[~sfServerVersion])
2414 jvObj[jss::server_version] = std::to_string(*version);
2415
2416 if (auto cookie = (*val)[~sfCookie])
2417 jvObj[jss::cookie] = std::to_string(*cookie);
2418
2419 if (auto hash = (*val)[~sfValidatedHash])
2420 jvObj[jss::validated_hash] = strHex(*hash);
2421
2422 auto const masterKey =
2423 app_.validatorManifests().getMasterKey(signerPublic);
2424
2425 if (masterKey != signerPublic)
2426 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2427
2428 // NOTE *seq is a number, but old API versions used string. We replace
2429 // number with a string using MultiApiJson near end of this function
2430 if (auto const seq = (*val)[~sfLedgerSequence])
2431 jvObj[jss::ledger_index] = *seq;
2432
2433 if (val->isFieldPresent(sfAmendments))
2434 {
2435 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2436 for (auto const& amendment : val->getFieldV256(sfAmendments))
2437 jvObj[jss::amendments].append(to_string(amendment));
2438 }
2439
2440 if (auto const closeTime = (*val)[~sfCloseTime])
2441 jvObj[jss::close_time] = *closeTime;
2442
2443 if (auto const loadFee = (*val)[~sfLoadFee])
2444 jvObj[jss::load_fee] = *loadFee;
2445
2446 if (auto const baseFee = val->at(~sfBaseFee))
2447 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2448
2449 if (auto const reserveBase = val->at(~sfReserveBase))
2450 jvObj[jss::reserve_base] = *reserveBase;
2451
2452 if (auto const reserveInc = val->at(~sfReserveIncrement))
2453 jvObj[jss::reserve_inc] = *reserveInc;
2454
2455 // (The ~ operator converts the Proxy to a std::optional, which
2456 // simplifies later operations)
2457 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2458 baseFeeXRP && baseFeeXRP->native())
2459 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2460
2461 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2462 reserveBaseXRP && reserveBaseXRP->native())
2463 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2464
2465 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2466 reserveIncXRP && reserveIncXRP->native())
2467 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2468
2469 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2470 // for consumers supporting different API versions
2471 MultiApiJson multiObj{jvObj};
2472 multiObj.visit(
2473 RPC::apiVersion<1>, //
2474 [](Json::Value& jvTx) {
2475 // Type conversion for older API versions to string
2476 if (jvTx.isMember(jss::ledger_index))
2477 {
2478 jvTx[jss::ledger_index] =
2479 std::to_string(jvTx[jss::ledger_index].asUInt());
2480 }
2481 });
2482
2483 for (auto i = mStreamMaps[sValidations].begin();
2484 i != mStreamMaps[sValidations].end();)
2485 {
2486 if (auto p = i->second.lock())
2487 {
2488 multiObj.visit(
2489 p->getApiVersion(), //
2490 [&](Json::Value const& jv) { p->send(jv, true); });
2491 ++i;
2492 }
2493 else
2494 {
2495 i = mStreamMaps[sValidations].erase(i);
2496 }
2497 }
2498 }
2499}
2500
2501void
2503{
2505
2506 if (!mStreamMaps[sPeerStatus].empty())
2507 {
2508 Json::Value jvObj(func());
2509
2510 jvObj[jss::type] = "peerStatusChange";
2511
2512 for (auto i = mStreamMaps[sPeerStatus].begin();
2513 i != mStreamMaps[sPeerStatus].end();)
2514 {
2515 InfoSub::pointer p = i->second.lock();
2516
2517 if (p)
2518 {
2519 p->send(jvObj, true);
2520 ++i;
2521 }
2522 else
2523 {
2524 i = mStreamMaps[sPeerStatus].erase(i);
2525 }
2526 }
2527 }
2528}
2529
2530void
2532{
2533 using namespace std::chrono_literals;
2534 if (om == OperatingMode::CONNECTED)
2535 {
2538 }
2539 else if (om == OperatingMode::SYNCING)
2540 {
2543 }
2544
2545 if ((om > OperatingMode::CONNECTED) && isBlocked())
2547
2548 if (mMode == om)
2549 return;
2550
2551 mMode = om;
2552
2553 accounting_.mode(om);
2554
2555 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2556 pubServer();
2557}
2558
2559bool
2562 std::string const& source)
2563{
2564 JLOG(m_journal.trace())
2565 << "recvValidation " << val->getLedgerHash() << " from " << source;
2566
2568 BypassAccept bypassAccept = BypassAccept::no;
2569 try
2570 {
2571 if (pendingValidations_.contains(val->getLedgerHash()))
2572 bypassAccept = BypassAccept::yes;
2573 else
2574 pendingValidations_.insert(val->getLedgerHash());
2575 scope_unlock unlock(lock);
2576 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2577 }
2578 catch (std::exception const& e)
2579 {
2580 JLOG(m_journal.warn())
2581 << "Exception thrown for handling new validation "
2582 << val->getLedgerHash() << ": " << e.what();
2583 }
2584 catch (...)
2585 {
2586 JLOG(m_journal.warn())
2587 << "Unknown exception thrown for handling new validation "
2588 << val->getLedgerHash();
2589 }
2590 if (bypassAccept == BypassAccept::no)
2591 {
2592 pendingValidations_.erase(val->getLedgerHash());
2593 }
2594 lock.unlock();
2595
2596 pubValidation(val);
2597
2598 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2600 ss << "VALIDATION: " << val->render() << " master_key: ";
2601 auto master = app_.validators().getTrustedKey(val->getSignerPublic());
2602 if (master)
2603 {
2604 ss << toBase58(TokenType::NodePublic, *master);
2605 }
2606 else
2607 {
2608 ss << "none";
2609 }
2610 return ss.str();
2611 }();
2612
2613 // We will always relay trusted validations; if configured, we will
2614 // also relay all untrusted validations.
2615 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2616}
2617
2620{
2621 return mConsensus.getJson(true);
2622}
2623
2625NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2626{
2628
2629 // System-level warnings
2630 {
2631 Json::Value warnings{Json::arrayValue};
2632 if (isAmendmentBlocked())
2633 {
2634 Json::Value& w = warnings.append(Json::objectValue);
2635 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2636 w[jss::message] =
2637 "This server is amendment blocked, and must be updated to be "
2638 "able to stay in sync with the network.";
2639 }
2640 if (isUNLBlocked())
2641 {
2642 Json::Value& w = warnings.append(Json::objectValue);
2643 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2644 w[jss::message] =
2645 "This server has an expired validator list. validators.txt "
2646 "may be incorrectly configured or some [validator_list_sites] "
2647 "may be unreachable.";
2648 }
2649 if (admin && isAmendmentWarned())
2650 {
2651 Json::Value& w = warnings.append(Json::objectValue);
2652 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2653 w[jss::message] =
2654 "One or more unsupported amendments have reached majority. "
2655 "Upgrade to the latest version before they are activated "
2656 "to avoid being amendment blocked.";
2657 if (auto const expected =
2659 {
2660 auto& d = w[jss::details] = Json::objectValue;
2661 d[jss::expected_date] = expected->time_since_epoch().count();
2662 d[jss::expected_date_UTC] = to_string(*expected);
2663 }
2664 }
2665
2666 if (warnings.size())
2667 info[jss::warnings] = std::move(warnings);
2668 }
2669
2670 // hostid: unique string describing the machine
2671 if (human)
2672 info[jss::hostid] = getHostId(admin);
2673
2674 // domain: if configured with a domain, report it:
2675 if (!app_.config().SERVER_DOMAIN.empty())
2676 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2677
2678 info[jss::build_version] = BuildInfo::getVersionString();
2679
2680 info[jss::server_state] = strOperatingMode(admin);
2681
2682 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2684
2686 info[jss::network_ledger] = "waiting";
2687
2688 info[jss::validation_quorum] =
2689 static_cast<Json::UInt>(app_.validators().quorum());
2690
2691 if (admin)
2692 {
2693 switch (app_.config().NODE_SIZE)
2694 {
2695 case 0:
2696 info[jss::node_size] = "tiny";
2697 break;
2698 case 1:
2699 info[jss::node_size] = "small";
2700 break;
2701 case 2:
2702 info[jss::node_size] = "medium";
2703 break;
2704 case 3:
2705 info[jss::node_size] = "large";
2706 break;
2707 case 4:
2708 info[jss::node_size] = "huge";
2709 break;
2710 }
2711
2712 auto when = app_.validators().expires();
2713
2714 if (!human)
2715 {
2716 if (when)
2717 info[jss::validator_list_expires] =
2718 safe_cast<Json::UInt>(when->time_since_epoch().count());
2719 else
2720 info[jss::validator_list_expires] = 0;
2721 }
2722 else
2723 {
2724 auto& x = (info[jss::validator_list] = Json::objectValue);
2725
2726 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2727
2728 if (when)
2729 {
2730 if (*when == TimeKeeper::time_point::max())
2731 {
2732 x[jss::expiration] = "never";
2733 x[jss::status] = "active";
2734 }
2735 else
2736 {
2737 x[jss::expiration] = to_string(*when);
2738
2739 if (*when > app_.timeKeeper().now())
2740 x[jss::status] = "active";
2741 else
2742 x[jss::status] = "expired";
2743 }
2744 }
2745 else
2746 {
2747 x[jss::status] = "unknown";
2748 x[jss::expiration] = "unknown";
2749 }
2750 }
2751
2752#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2753 {
2754 auto& x = (info[jss::git] = Json::objectValue);
2755#ifdef GIT_COMMIT_HASH
2756 x[jss::hash] = GIT_COMMIT_HASH;
2757#endif
2758#ifdef GIT_BRANCH
2759 x[jss::branch] = GIT_BRANCH;
2760#endif
2761 }
2762#endif
2763 }
2764 info[jss::io_latency_ms] =
2765 static_cast<Json::UInt>(app_.getIOLatency().count());
2766
2767 if (admin)
2768 {
2769 if (auto const localPubKey = app_.validators().localPublicKey();
2770 localPubKey && app_.getValidationPublicKey())
2771 {
2772 info[jss::pubkey_validator] =
2773 toBase58(TokenType::NodePublic, localPubKey.value());
2774 }
2775 else
2776 {
2777 info[jss::pubkey_validator] = "none";
2778 }
2779 }
2780
2781 if (counters)
2782 {
2783 info[jss::counters] = app_.getPerfLog().countersJson();
2784
2785 Json::Value nodestore(Json::objectValue);
2786 app_.getNodeStore().getCountsJson(nodestore);
2787 info[jss::counters][jss::nodestore] = nodestore;
2788 info[jss::current_activities] = app_.getPerfLog().currentJson();
2789 }
2790
2791 info[jss::pubkey_node] =
2793
2794 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2795
2797 info[jss::amendment_blocked] = true;
2798
2799 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2800
2801 if (fp != 0)
2802 info[jss::fetch_pack] = Json::UInt(fp);
2803
2804 info[jss::peers] = Json::UInt(app_.overlay().size());
2805
2806 Json::Value lastClose = Json::objectValue;
2807 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2808
2809 if (human)
2810 {
2811 lastClose[jss::converge_time_s] =
2813 }
2814 else
2815 {
2816 lastClose[jss::converge_time] =
2818 }
2819
2820 info[jss::last_close] = lastClose;
2821
2822 // info[jss::consensus] = mConsensus.getJson();
2823
2824 if (admin)
2825 info[jss::load] = m_job_queue.getJson();
2826
2827 if (auto const netid = app_.overlay().networkID())
2828 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2829
2830 auto const escalationMetrics =
2832
2833 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2834 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2835 /* Scale the escalated fee level to unitless "load factor".
2836 In practice, this just strips the units, but it will continue
2837 to work correctly if either base value ever changes. */
2838 auto const loadFactorFeeEscalation =
2839 mulDiv(
2840 escalationMetrics.openLedgerFeeLevel,
2841 loadBaseServer,
2842 escalationMetrics.referenceFeeLevel)
2844
2845 auto const loadFactor = std::max(
2846 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2847
2848 if (!human)
2849 {
2850 info[jss::load_base] = loadBaseServer;
2851 info[jss::load_factor] = trunc32(loadFactor);
2852 info[jss::load_factor_server] = loadFactorServer;
2853
2854 /* Json::Value doesn't support uint64, so clamp to max
2855 uint32 value. This is mostly theoretical, since there
2856 probably isn't enough extant XRP to drive the factor
2857 that high.
2858 */
2859 info[jss::load_factor_fee_escalation] =
2860 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2861 info[jss::load_factor_fee_queue] =
2862 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2863 info[jss::load_factor_fee_reference] =
2864 escalationMetrics.referenceFeeLevel.jsonClipped();
2865 }
2866 else
2867 {
2868 info[jss::load_factor] =
2869 static_cast<double>(loadFactor) / loadBaseServer;
2870
2871 if (loadFactorServer != loadFactor)
2872 info[jss::load_factor_server] =
2873 static_cast<double>(loadFactorServer) / loadBaseServer;
2874
2875 if (admin)
2876 {
2878 if (fee != loadBaseServer)
2879 info[jss::load_factor_local] =
2880 static_cast<double>(fee) / loadBaseServer;
2881 fee = app_.getFeeTrack().getRemoteFee();
2882 if (fee != loadBaseServer)
2883 info[jss::load_factor_net] =
2884 static_cast<double>(fee) / loadBaseServer;
2885 fee = app_.getFeeTrack().getClusterFee();
2886 if (fee != loadBaseServer)
2887 info[jss::load_factor_cluster] =
2888 static_cast<double>(fee) / loadBaseServer;
2889 }
2890 if (escalationMetrics.openLedgerFeeLevel !=
2891 escalationMetrics.referenceFeeLevel &&
2892 (admin || loadFactorFeeEscalation != loadFactor))
2893 info[jss::load_factor_fee_escalation] =
2894 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2895 escalationMetrics.referenceFeeLevel);
2896 if (escalationMetrics.minProcessingFeeLevel !=
2897 escalationMetrics.referenceFeeLevel)
2898 info[jss::load_factor_fee_queue] =
2899 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2900 escalationMetrics.referenceFeeLevel);
2901 }
2902
2903 bool valid = false;
2904 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2905
2906 if (lpClosed)
2907 valid = true;
2908 else
2909 lpClosed = m_ledgerMaster.getClosedLedger();
2910
2911 if (lpClosed)
2912 {
2913 XRPAmount const baseFee = lpClosed->fees().base;
2915 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2916 l[jss::hash] = to_string(lpClosed->info().hash);
2917
2918 if (!human)
2919 {
2920 l[jss::base_fee] = baseFee.jsonClipped();
2921 l[jss::reserve_base] =
2922 lpClosed->fees().accountReserve(0).jsonClipped();
2923 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2924 l[jss::close_time] = Json::Value::UInt(
2925 lpClosed->info().closeTime.time_since_epoch().count());
2926 }
2927 else
2928 {
2929 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2930 l[jss::reserve_base_xrp] =
2931 lpClosed->fees().accountReserve(0).decimalXRP();
2932 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2933
2934 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2935 std::abs(closeOffset.count()) >= 60)
2936 l[jss::close_time_offset] =
2937 static_cast<std::uint32_t>(closeOffset.count());
2938
2939 constexpr std::chrono::seconds highAgeThreshold{1000000};
2941 {
2942 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2943 l[jss::age] =
2944 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2945 }
2946 else
2947 {
2948 auto lCloseTime = lpClosed->info().closeTime;
2949 auto closeTime = app_.timeKeeper().closeTime();
2950 if (lCloseTime <= closeTime)
2951 {
2952 using namespace std::chrono_literals;
2953 auto age = closeTime - lCloseTime;
2954 l[jss::age] =
2955 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2956 }
2957 }
2958 }
2959
2960 if (valid)
2961 info[jss::validated_ledger] = l;
2962 else
2963 info[jss::closed_ledger] = l;
2964
2965 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2966 if (!lpPublished)
2967 info[jss::published_ledger] = "none";
2968 else if (lpPublished->info().seq != lpClosed->info().seq)
2969 info[jss::published_ledger] = lpPublished->info().seq;
2970 }
2971
2972 accounting_.json(info);
2973 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2974 info[jss::jq_trans_overflow] =
2976 info[jss::peer_disconnects] =
2978 info[jss::peer_disconnects_resources] =
2980
2981 // This array must be sorted in increasing order.
2982 static constexpr std::array<std::string_view, 7> protocols{
2983 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2984 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2985 {
2987 for (auto const& port : app_.getServerHandler().setup().ports)
2988 {
2989 // Don't publish admin ports for non-admin users
2990 if (!admin &&
2991 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2992 port.admin_user.empty() && port.admin_password.empty()))
2993 continue;
2996 std::begin(port.protocol),
2997 std::end(port.protocol),
2998 std::begin(protocols),
2999 std::end(protocols),
3000 std::back_inserter(proto));
3001 if (!proto.empty())
3002 {
3003 auto& jv = ports.append(Json::Value(Json::objectValue));
3004 jv[jss::port] = std::to_string(port.port);
3005 jv[jss::protocol] = Json::Value{Json::arrayValue};
3006 for (auto const& p : proto)
3007 jv[jss::protocol].append(p);
3008 }
3009 }
3010
3011 if (app_.config().exists(SECTION_PORT_GRPC))
3012 {
3013 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
3014 auto const optPort = grpcSection.get("port");
3015 if (optPort && grpcSection.get("ip"))
3016 {
3017 auto& jv = ports.append(Json::Value(Json::objectValue));
3018 jv[jss::port] = *optPort;
3019 jv[jss::protocol] = Json::Value{Json::arrayValue};
3020 jv[jss::protocol].append("grpc");
3021 }
3022 }
3023 info[jss::ports] = std::move(ports);
3024 }
3025
3026 return info;
3027}
3028
3029void
3031{
3033}
3034
3037{
3038 return app_.getInboundLedgers().getInfo();
3039}
3040
3041void
3043 std::shared_ptr<ReadView const> const& ledger,
3044 std::shared_ptr<STTx const> const& transaction,
3045 TER result)
3046{
3047 // never publish an inner txn inside a batch txn
3048 if (transaction->isFlag(tfInnerBatchTxn) &&
3049 ledger->rules().enabled(featureBatch))
3050 return;
3051
3052 MultiApiJson jvObj =
3053 transJson(transaction, result, false, ledger, std::nullopt);
3054
3055 {
3057
3058 auto it = mStreamMaps[sRTTransactions].begin();
3059 while (it != mStreamMaps[sRTTransactions].end())
3060 {
3061 InfoSub::pointer p = it->second.lock();
3062
3063 if (p)
3064 {
3065 jvObj.visit(
3066 p->getApiVersion(), //
3067 [&](Json::Value const& jv) { p->send(jv, true); });
3068 ++it;
3069 }
3070 else
3071 {
3072 it = mStreamMaps[sRTTransactions].erase(it);
3073 }
3074 }
3075 }
3076
3077 pubProposedAccountTransaction(ledger, transaction, result);
3078}
3079
3080void
3082{
3083 // Ledgers are published only when they acquire sufficient validations
3084 // Holes are filled across connection loss or other catastrophe
3085
3087 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
3088 if (!alpAccepted)
3089 {
3090 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
3091 app_.getAcceptedLedgerCache().canonicalize_replace_client(
3092 lpAccepted->info().hash, alpAccepted);
3093 }
3094
3095 XRPL_ASSERT(
3096 alpAccepted->getLedger().get() == lpAccepted.get(),
3097 "ripple::NetworkOPsImp::pubLedger : accepted input");
3098
3099 {
3100 JLOG(m_journal.debug())
3101 << "Publishing ledger " << lpAccepted->info().seq << " "
3102 << lpAccepted->info().hash;
3103
3105
3106 if (!mStreamMaps[sLedger].empty())
3107 {
3109
3110 jvObj[jss::type] = "ledgerClosed";
3111 jvObj[jss::ledger_index] = lpAccepted->info().seq;
3112 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
3113 jvObj[jss::ledger_time] = Json::Value::UInt(
3114 lpAccepted->info().closeTime.time_since_epoch().count());
3115
3116 if (!lpAccepted->rules().enabled(featureXRPFees))
3117 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3118 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
3119 jvObj[jss::reserve_base] =
3120 lpAccepted->fees().accountReserve(0).jsonClipped();
3121 jvObj[jss::reserve_inc] =
3122 lpAccepted->fees().increment.jsonClipped();
3123
3124 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
3125
3127 {
3128 jvObj[jss::validated_ledgers] =
3130 }
3131
3132 auto it = mStreamMaps[sLedger].begin();
3133 while (it != mStreamMaps[sLedger].end())
3134 {
3135 InfoSub::pointer p = it->second.lock();
3136 if (p)
3137 {
3138 p->send(jvObj, true);
3139 ++it;
3140 }
3141 else
3142 it = mStreamMaps[sLedger].erase(it);
3143 }
3144 }
3145
3146 if (!mStreamMaps[sBookChanges].empty())
3147 {
3148 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
3149
3150 auto it = mStreamMaps[sBookChanges].begin();
3151 while (it != mStreamMaps[sBookChanges].end())
3152 {
3153 InfoSub::pointer p = it->second.lock();
3154 if (p)
3155 {
3156 p->send(jvObj, true);
3157 ++it;
3158 }
3159 else
3160 it = mStreamMaps[sBookChanges].erase(it);
3161 }
3162 }
3163
3164 {
3165 static bool firstTime = true;
3166 if (firstTime)
3167 {
3168 // First validated ledger, start delayed SubAccountHistory
3169 firstTime = false;
3170 for (auto& outer : mSubAccountHistory)
3171 {
3172 for (auto& inner : outer.second)
3173 {
3174 auto& subInfo = inner.second;
3175 if (subInfo.index_->separationLedgerSeq_ == 0)
3176 {
3178 alpAccepted->getLedger(), subInfo);
3179 }
3180 }
3181 }
3182 }
3183 }
3184 }
3185
3186 // Don't lock since pubAcceptedTransaction is locking.
3187 for (auto const& accTx : *alpAccepted)
3188 {
3189 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
3191 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3192 }
3193}
3194
3195void
3197{
3199 app_.openLedger().current()->fees().base,
3201 app_.getFeeTrack()};
3202
3203 // only schedule the job if something has changed
3204 if (f != mLastFeeSummary)
3205 {
3207 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
3208 pubServer();
3209 });
3210 }
3211}
3212
3213void
3215{
3218 "reportConsensusStateChange->pubConsensus",
3219 [this, phase]() { pubConsensus(phase); });
3220}
3221
3222inline void
3224{
3225 m_localTX->sweep(view);
3226}
3227inline std::size_t
3229{
3230 return m_localTX->size();
3231}
3232
3233// This routine should only be used to publish accepted or validated
3234// transactions.
3237 std::shared_ptr<STTx const> const& transaction,
3238 TER result,
3239 bool validated,
3240 std::shared_ptr<ReadView const> const& ledger,
3242{
3244 std::string sToken;
3245 std::string sHuman;
3246
3247 transResultInfo(result, sToken, sHuman);
3248
3249 jvObj[jss::type] = "transaction";
3250 // NOTE jvObj is not a finished object for either API version. After
3251 // it's populated, we need to finish it for a specific API version. This is
3252 // done in a loop, near the end of this function.
3253 jvObj[jss::transaction] =
3254 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3255
3256 if (meta)
3257 {
3258 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3260 jvObj[jss::meta], *ledger, transaction, meta->get());
3262 jvObj[jss::meta], transaction, meta->get());
3263 }
3264
3265 // add CTID where the needed data for it exists
3266 if (auto const& lookup = ledger->txRead(transaction->getTransactionID());
3267 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3268 {
3269 uint32_t const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3270 uint32_t netID = app_.config().NETWORK_ID;
3271 if (transaction->isFieldPresent(sfNetworkID))
3272 netID = transaction->getFieldU32(sfNetworkID);
3273
3275 RPC::encodeCTID(ledger->info().seq, txnSeq, netID);
3276 ctid)
3277 jvObj[jss::ctid] = *ctid;
3278 }
3279 if (!ledger->open())
3280 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3281
3282 if (validated)
3283 {
3284 jvObj[jss::ledger_index] = ledger->info().seq;
3285 jvObj[jss::transaction][jss::date] =
3286 ledger->info().closeTime.time_since_epoch().count();
3287 jvObj[jss::validated] = true;
3288 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3289
3290 // WRITEME: Put the account next seq here
3291 }
3292 else
3293 {
3294 jvObj[jss::validated] = false;
3295 jvObj[jss::ledger_current_index] = ledger->info().seq;
3296 }
3297
3298 jvObj[jss::status] = validated ? "closed" : "proposed";
3299 jvObj[jss::engine_result] = sToken;
3300 jvObj[jss::engine_result_code] = result;
3301 jvObj[jss::engine_result_message] = sHuman;
3302
3303 if (transaction->getTxnType() == ttOFFER_CREATE)
3304 {
3305 auto const account = transaction->getAccountID(sfAccount);
3306 auto const amount = transaction->getFieldAmount(sfTakerGets);
3307
3308 // If the offer create is not self funded then add the owner balance
3309 if (account != amount.issue().account)
3310 {
3311 auto const ownerFunds = accountFunds(
3312 *ledger,
3313 account,
3314 amount,
3316 app_.journal("View"));
3317 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3318 }
3319 }
3320
3321 std::string const hash = to_string(transaction->getTransactionID());
3322 MultiApiJson multiObj{jvObj};
3324 multiObj.visit(), //
3325 [&]<unsigned Version>(
3327 RPC::insertDeliverMax(
3328 jvTx[jss::transaction], transaction->getTxnType(), Version);
3329
3330 if constexpr (Version > 1)
3331 {
3332 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3333 jvTx[jss::hash] = hash;
3334 }
3335 else
3336 {
3337 jvTx[jss::transaction][jss::hash] = hash;
3338 }
3339 });
3340
3341 return multiObj;
3342}
3343
3344void
3346 std::shared_ptr<ReadView const> const& ledger,
3347 AcceptedLedgerTx const& transaction,
3348 bool last)
3349{
3350 auto const& stTxn = transaction.getTxn();
3351
3352 // Create two different Json objects, for different API versions
3353 auto const metaRef = std::ref(transaction.getMeta());
3354 auto const trResult = transaction.getResult();
3355 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3356
3357 {
3359
3360 auto it = mStreamMaps[sTransactions].begin();
3361 while (it != mStreamMaps[sTransactions].end())
3362 {
3363 InfoSub::pointer p = it->second.lock();
3364
3365 if (p)
3366 {
3367 jvObj.visit(
3368 p->getApiVersion(), //
3369 [&](Json::Value const& jv) { p->send(jv, true); });
3370 ++it;
3371 }
3372 else
3373 it = mStreamMaps[sTransactions].erase(it);
3374 }
3375
3376 it = mStreamMaps[sRTTransactions].begin();
3377
3378 while (it != mStreamMaps[sRTTransactions].end())
3379 {
3380 InfoSub::pointer p = it->second.lock();
3381
3382 if (p)
3383 {
3384 jvObj.visit(
3385 p->getApiVersion(), //
3386 [&](Json::Value const& jv) { p->send(jv, true); });
3387 ++it;
3388 }
3389 else
3390 it = mStreamMaps[sRTTransactions].erase(it);
3391 }
3392 }
3393
3394 if (transaction.getResult() == tesSUCCESS)
3395 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3396
3397 pubAccountTransaction(ledger, transaction, last);
3398}
3399
3400void
3402 std::shared_ptr<ReadView const> const& ledger,
3403 AcceptedLedgerTx const& transaction,
3404 bool last)
3405{
3407 int iProposed = 0;
3408 int iAccepted = 0;
3409
3410 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3411 auto const currLedgerSeq = ledger->seq();
3412 {
3414
3415 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3417 {
3418 for (auto const& affectedAccount : transaction.getAffected())
3419 {
3420 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3421 simiIt != mSubRTAccount.end())
3422 {
3423 auto it = simiIt->second.begin();
3424
3425 while (it != simiIt->second.end())
3426 {
3427 InfoSub::pointer p = it->second.lock();
3428
3429 if (p)
3430 {
3431 notify.insert(p);
3432 ++it;
3433 ++iProposed;
3434 }
3435 else
3436 it = simiIt->second.erase(it);
3437 }
3438 }
3439
3440 if (auto simiIt = mSubAccount.find(affectedAccount);
3441 simiIt != mSubAccount.end())
3442 {
3443 auto it = simiIt->second.begin();
3444 while (it != simiIt->second.end())
3445 {
3446 InfoSub::pointer p = it->second.lock();
3447
3448 if (p)
3449 {
3450 notify.insert(p);
3451 ++it;
3452 ++iAccepted;
3453 }
3454 else
3455 it = simiIt->second.erase(it);
3456 }
3457 }
3458
3459 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3460 histoIt != mSubAccountHistory.end())
3461 {
3462 auto& subs = histoIt->second;
3463 auto it = subs.begin();
3464 while (it != subs.end())
3465 {
3466 SubAccountHistoryInfoWeak const& info = it->second;
3467 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3468 {
3469 ++it;
3470 continue;
3471 }
3472
3473 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3474 {
3475 accountHistoryNotify.emplace_back(
3476 SubAccountHistoryInfo{isSptr, info.index_});
3477 ++it;
3478 }
3479 else
3480 {
3481 it = subs.erase(it);
3482 }
3483 }
3484 if (subs.empty())
3485 mSubAccountHistory.erase(histoIt);
3486 }
3487 }
3488 }
3489 }
3490
3491 JLOG(m_journal.trace())
3492 << "pubAccountTransaction: "
3493 << "proposed=" << iProposed << ", accepted=" << iAccepted;
3494
3495 if (!notify.empty() || !accountHistoryNotify.empty())
3496 {
3497 auto const& stTxn = transaction.getTxn();
3498
3499 // Create two different Json objects, for different API versions
3500 auto const metaRef = std::ref(transaction.getMeta());
3501 auto const trResult = transaction.getResult();
3502 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3503
3504 for (InfoSub::ref isrListener : notify)
3505 {
3506 jvObj.visit(
3507 isrListener->getApiVersion(), //
3508 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3509 }
3510
3511 if (last)
3512 jvObj.set(jss::account_history_boundary, true);
3513
3514 XRPL_ASSERT(
3515 jvObj.isMember(jss::account_history_tx_stream) ==
3517 "ripple::NetworkOPsImp::pubAccountTransaction : "
3518 "account_history_tx_stream not set");
3519 for (auto& info : accountHistoryNotify)
3520 {
3521 auto& index = info.index_;
3522 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3523 jvObj.set(jss::account_history_tx_first, true);
3524
3525 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3526
3527 jvObj.visit(
3528 info.sink_->getApiVersion(), //
3529 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3530 }
3531 }
3532}
3533
3534void
3536 std::shared_ptr<ReadView const> const& ledger,
3538 TER result)
3539{
3541 int iProposed = 0;
3542
3543 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3544
3545 {
3547
3548 if (mSubRTAccount.empty())
3549 return;
3550
3551 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3553 {
3554 for (auto const& affectedAccount : tx->getMentionedAccounts())
3555 {
3556 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3557 simiIt != mSubRTAccount.end())
3558 {
3559 auto it = simiIt->second.begin();
3560
3561 while (it != simiIt->second.end())
3562 {
3563 InfoSub::pointer p = it->second.lock();
3564
3565 if (p)
3566 {
3567 notify.insert(p);
3568 ++it;
3569 ++iProposed;
3570 }
3571 else
3572 it = simiIt->second.erase(it);
3573 }
3574 }
3575 }
3576 }
3577 }
3578
3579 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3580
3581 if (!notify.empty() || !accountHistoryNotify.empty())
3582 {
3583 // Create two different Json objects, for different API versions
3584 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3585
3586 for (InfoSub::ref isrListener : notify)
3587 jvObj.visit(
3588 isrListener->getApiVersion(), //
3589 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3590
3591 XRPL_ASSERT(
3592 jvObj.isMember(jss::account_history_tx_stream) ==
3594 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3595 "account_history_tx_stream not set");
3596 for (auto& info : accountHistoryNotify)
3597 {
3598 auto& index = info.index_;
3599 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3600 jvObj.set(jss::account_history_tx_first, true);
3601 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3602 jvObj.visit(
3603 info.sink_->getApiVersion(), //
3604 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3605 }
3606 }
3607}
3608
3609//
3610// Monitoring
3611//
3612
3613void
3615 InfoSub::ref isrListener,
3616 hash_set<AccountID> const& vnaAccountIDs,
3617 bool rt)
3618{
3619 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3620
3621 for (auto const& naAccountID : vnaAccountIDs)
3622 {
3623 JLOG(m_journal.trace())
3624 << "subAccount: account: " << toBase58(naAccountID);
3625
3626 isrListener->insertSubAccountInfo(naAccountID, rt);
3627 }
3628
3630
3631 for (auto const& naAccountID : vnaAccountIDs)
3632 {
3633 auto simIterator = subMap.find(naAccountID);
3634 if (simIterator == subMap.end())
3635 {
3636 // Not found, note that account has a new single listner.
3637 SubMapType usisElement;
3638 usisElement[isrListener->getSeq()] = isrListener;
3639 // VFALCO NOTE This is making a needless copy of naAccountID
3640 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3641 }
3642 else
3643 {
3644 // Found, note that the account has another listener.
3645 simIterator->second[isrListener->getSeq()] = isrListener;
3646 }
3647 }
3648}
3649
3650void
3652 InfoSub::ref isrListener,
3653 hash_set<AccountID> const& vnaAccountIDs,
3654 bool rt)
3655{
3656 for (auto const& naAccountID : vnaAccountIDs)
3657 {
3658 // Remove from the InfoSub
3659 isrListener->deleteSubAccountInfo(naAccountID, rt);
3660 }
3661
3662 // Remove from the server
3663 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3664}
3665
3666void
3668 std::uint64_t uSeq,
3669 hash_set<AccountID> const& vnaAccountIDs,
3670 bool rt)
3671{
3673
3674 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3675
3676 for (auto const& naAccountID : vnaAccountIDs)
3677 {
3678 auto simIterator = subMap.find(naAccountID);
3679
3680 if (simIterator != subMap.end())
3681 {
3682 // Found
3683 simIterator->second.erase(uSeq);
3684
3685 if (simIterator->second.empty())
3686 {
3687 // Don't need hash entry.
3688 subMap.erase(simIterator);
3689 }
3690 }
3691 }
3692}
3693
3694void
3696{
3697 enum DatabaseType { Sqlite, None };
3698 static auto const databaseType = [&]() -> DatabaseType {
3699 // Use a dynamic_cast to return DatabaseType::None
3700 // on failure.
3701 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3702 {
3703 return DatabaseType::Sqlite;
3704 }
3705 return DatabaseType::None;
3706 }();
3707
3708 if (databaseType == DatabaseType::None)
3709 {
3710 JLOG(m_journal.error())
3711 << "AccountHistory job for account "
3712 << toBase58(subInfo.index_->accountId_) << " no database";
3713 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3714 {
3715 sptr->send(rpcError(rpcINTERNAL), true);
3716 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3717 }
3718 return;
3719 }
3720
3723 "AccountHistoryTxStream",
3724 [this, dbType = databaseType, subInfo]() {
3725 auto const& accountId = subInfo.index_->accountId_;
3726 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3727 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3728
3729 JLOG(m_journal.trace())
3730 << "AccountHistory job for account " << toBase58(accountId)
3731 << " started. lastLedgerSeq=" << lastLedgerSeq;
3732
3733 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3734 std::shared_ptr<TxMeta> const& meta) -> bool {
3735 /*
3736 * genesis account: first tx is the one with seq 1
3737 * other account: first tx is the one created the account
3738 */
3739 if (accountId == genesisAccountId)
3740 {
3741 auto stx = tx->getSTransaction();
3742 if (stx->getAccountID(sfAccount) == accountId &&
3743 stx->getSeqValue() == 1)
3744 return true;
3745 }
3746
3747 for (auto& node : meta->getNodes())
3748 {
3749 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3750 continue;
3751
3752 if (node.isFieldPresent(sfNewFields))
3753 {
3754 if (auto inner = dynamic_cast<STObject const*>(
3755 node.peekAtPField(sfNewFields));
3756 inner)
3757 {
3758 if (inner->isFieldPresent(sfAccount) &&
3759 inner->getAccountID(sfAccount) == accountId)
3760 {
3761 return true;
3762 }
3763 }
3764 }
3765 }
3766
3767 return false;
3768 };
3769
3770 auto send = [&](Json::Value const& jvObj,
3771 bool unsubscribe) -> bool {
3772 if (auto sptr = subInfo.sinkWptr_.lock())
3773 {
3774 sptr->send(jvObj, true);
3775 if (unsubscribe)
3776 unsubAccountHistory(sptr, accountId, false);
3777 return true;
3778 }
3779
3780 return false;
3781 };
3782
3783 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3784 bool unsubscribe) -> bool {
3785 if (auto sptr = subInfo.sinkWptr_.lock())
3786 {
3787 jvObj.visit(
3788 sptr->getApiVersion(), //
3789 [&](Json::Value const& jv) { sptr->send(jv, true); });
3790
3791 if (unsubscribe)
3792 unsubAccountHistory(sptr, accountId, false);
3793 return true;
3794 }
3795
3796 return false;
3797 };
3798
3799 auto getMoreTxns =
3800 [&](std::uint32_t minLedger,
3801 std::uint32_t maxLedger,
3806 switch (dbType)
3807 {
3808 case Sqlite: {
3809 auto db = static_cast<SQLiteDatabase*>(
3812 accountId, minLedger, maxLedger, marker, 0, true};
3813 return db->newestAccountTxPage(options);
3814 }
3815 default: {
3816 UNREACHABLE(
3817 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3818 "getMoreTxns : invalid database type");
3819 return {};
3820 }
3821 }
3822 };
3823
3824 /*
3825 * search backward until the genesis ledger or asked to stop
3826 */
3827 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3828 {
3829 int feeChargeCount = 0;
3830 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3831 {
3832 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3833 ++feeChargeCount;
3834 }
3835 else
3836 {
3837 JLOG(m_journal.trace())
3838 << "AccountHistory job for account "
3839 << toBase58(accountId) << " no InfoSub. Fee charged "
3840 << feeChargeCount << " times.";
3841 return;
3842 }
3843
3844 // try to search in 1024 ledgers till reaching genesis ledgers
3845 auto startLedgerSeq =
3846 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3847 JLOG(m_journal.trace())
3848 << "AccountHistory job for account " << toBase58(accountId)
3849 << ", working on ledger range [" << startLedgerSeq << ","
3850 << lastLedgerSeq << "]";
3851
3852 auto haveRange = [&]() -> bool {
3853 std::uint32_t validatedMin = UINT_MAX;
3854 std::uint32_t validatedMax = 0;
3855 auto haveSomeValidatedLedgers =
3857 validatedMin, validatedMax);
3858
3859 return haveSomeValidatedLedgers &&
3860 validatedMin <= startLedgerSeq &&
3861 lastLedgerSeq <= validatedMax;
3862 }();
3863
3864 if (!haveRange)
3865 {
3866 JLOG(m_journal.debug())
3867 << "AccountHistory reschedule job for account "
3868 << toBase58(accountId) << ", incomplete ledger range ["
3869 << startLedgerSeq << "," << lastLedgerSeq << "]";
3871 return;
3872 }
3873
3875 while (!subInfo.index_->stopHistorical_)
3876 {
3877 auto dbResult =
3878 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3879 if (!dbResult)
3880 {
3881 JLOG(m_journal.debug())
3882 << "AccountHistory job for account "
3883 << toBase58(accountId) << " getMoreTxns failed.";
3884 send(rpcError(rpcINTERNAL), true);
3885 return;
3886 }
3887
3888 auto const& txns = dbResult->first;
3889 marker = dbResult->second;
3890 size_t num_txns = txns.size();
3891 for (size_t i = 0; i < num_txns; ++i)
3892 {
3893 auto const& [tx, meta] = txns[i];
3894
3895 if (!tx || !meta)
3896 {
3897 JLOG(m_journal.debug())
3898 << "AccountHistory job for account "
3899 << toBase58(accountId) << " empty tx or meta.";
3900 send(rpcError(rpcINTERNAL), true);
3901 return;
3902 }
3903 auto curTxLedger =
3905 tx->getLedger());
3906 if (!curTxLedger)
3907 {
3908 JLOG(m_journal.debug())
3909 << "AccountHistory job for account "
3910 << toBase58(accountId) << " no ledger.";
3911 send(rpcError(rpcINTERNAL), true);
3912 return;
3913 }
3915 tx->getSTransaction();
3916 if (!stTxn)
3917 {
3918 JLOG(m_journal.debug())
3919 << "AccountHistory job for account "
3920 << toBase58(accountId)
3921 << " getSTransaction failed.";
3922 send(rpcError(rpcINTERNAL), true);
3923 return;
3924 }
3925
3926 auto const mRef = std::ref(*meta);
3927 auto const trR = meta->getResultTER();
3928 MultiApiJson jvTx =
3929 transJson(stTxn, trR, true, curTxLedger, mRef);
3930
3931 jvTx.set(
3932 jss::account_history_tx_index, txHistoryIndex--);
3933 if (i + 1 == num_txns ||
3934 txns[i + 1].first->getLedger() != tx->getLedger())
3935 jvTx.set(jss::account_history_boundary, true);
3936
3937 if (isFirstTx(tx, meta))
3938 {
3939 jvTx.set(jss::account_history_tx_first, true);
3940 sendMultiApiJson(jvTx, false);
3941
3942 JLOG(m_journal.trace())
3943 << "AccountHistory job for account "
3944 << toBase58(accountId)
3945 << " done, found last tx.";
3946 return;
3947 }
3948 else
3949 {
3950 sendMultiApiJson(jvTx, false);
3951 }
3952 }
3953
3954 if (marker)
3955 {
3956 JLOG(m_journal.trace())
3957 << "AccountHistory job for account "
3958 << toBase58(accountId)
3959 << " paging, marker=" << marker->ledgerSeq << ":"
3960 << marker->txnSeq;
3961 }
3962 else
3963 {
3964 break;
3965 }
3966 }
3967
3968 if (!subInfo.index_->stopHistorical_)
3969 {
3970 lastLedgerSeq = startLedgerSeq - 1;
3971 if (lastLedgerSeq <= 1)
3972 {
3973 JLOG(m_journal.trace())
3974 << "AccountHistory job for account "
3975 << toBase58(accountId)
3976 << " done, reached genesis ledger.";
3977 return;
3978 }
3979 }
3980 }
3981 });
3982}
3983
3984void
3986 std::shared_ptr<ReadView const> const& ledger,
3988{
3989 subInfo.index_->separationLedgerSeq_ = ledger->seq();
3990 auto const& accountId = subInfo.index_->accountId_;
3991 auto const accountKeylet = keylet::account(accountId);
3992 if (!ledger->exists(accountKeylet))
3993 {
3994 JLOG(m_journal.debug())
3995 << "subAccountHistoryStart, no account " << toBase58(accountId)
3996 << ", no need to add AccountHistory job.";
3997 return;
3998 }
3999 if (accountId == genesisAccountId)
4000 {
4001 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
4002 {
4003 if (sleAcct->getFieldU32(sfSequence) == 1)
4004 {
4005 JLOG(m_journal.debug())
4006 << "subAccountHistoryStart, genesis account "
4007 << toBase58(accountId)
4008 << " does not have tx, no need to add AccountHistory job.";
4009 return;
4010 }
4011 }
4012 else
4013 {
4014 UNREACHABLE(
4015 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
4016 "access genesis account");
4017 return;
4018 }
4019 }
4020 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
4021 subInfo.index_->haveHistorical_ = true;
4022
4023 JLOG(m_journal.debug())
4024 << "subAccountHistoryStart, add AccountHistory job: accountId="
4025 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
4026
4027 addAccountHistoryJob(subInfo);
4028}
4029
4032 InfoSub::ref isrListener,
4033 AccountID const& accountId)
4034{
4035 if (!isrListener->insertSubAccountHistory(accountId))
4036 {
4037 JLOG(m_journal.debug())
4038 << "subAccountHistory, already subscribed to account "
4039 << toBase58(accountId);
4040 return rpcINVALID_PARAMS;
4041 }
4042
4045 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
4046 auto simIterator = mSubAccountHistory.find(accountId);
4047 if (simIterator == mSubAccountHistory.end())
4048 {
4050 inner.emplace(isrListener->getSeq(), ahi);
4052 simIterator, std::make_pair(accountId, inner));
4053 }
4054 else
4055 {
4056 simIterator->second.emplace(isrListener->getSeq(), ahi);
4057 }
4058
4059 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
4060 if (ledger)
4061 {
4062 subAccountHistoryStart(ledger, ahi);
4063 }
4064 else
4065 {
4066 // The node does not have validated ledgers, so wait for
4067 // one before start streaming.
4068 // In this case, the subscription is also considered successful.
4069 JLOG(m_journal.debug())
4070 << "subAccountHistory, no validated ledger yet, delay start";
4071 }
4072
4073 return rpcSUCCESS;
4074}
4075
4076void
4078 InfoSub::ref isrListener,
4079 AccountID const& account,
4080 bool historyOnly)
4081{
4082 if (!historyOnly)
4083 isrListener->deleteSubAccountHistory(account);
4084 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
4085}
4086
4087void
4089 std::uint64_t seq,
4090 AccountID const& account,
4091 bool historyOnly)
4092{
4094 auto simIterator = mSubAccountHistory.find(account);
4095 if (simIterator != mSubAccountHistory.end())
4096 {
4097 auto& subInfoMap = simIterator->second;
4098 auto subInfoIter = subInfoMap.find(seq);
4099 if (subInfoIter != subInfoMap.end())
4100 {
4101 subInfoIter->second.index_->stopHistorical_ = true;
4102 }
4103
4104 if (!historyOnly)
4105 {
4106 simIterator->second.erase(seq);
4107 if (simIterator->second.empty())
4108 {
4109 mSubAccountHistory.erase(simIterator);
4110 }
4111 }
4112 JLOG(m_journal.debug())
4113 << "unsubAccountHistory, account " << toBase58(account)
4114 << ", historyOnly = " << (historyOnly ? "true" : "false");
4115 }
4116}
4117
4118bool
4120{
4121 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
4122 listeners->addSubscriber(isrListener);
4123 else
4124 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
4125 return true;
4126}
4127
4128bool
4130{
4131 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
4132 listeners->removeSubscriber(uSeq);
4133
4134 return true;
4135}
4136
4140{
4141 // This code-path is exclusively used when the server is in standalone
4142 // mode via `ledger_accept`
4143 XRPL_ASSERT(
4144 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
4145
4146 if (!m_standalone)
4147 Throw<std::runtime_error>(
4148 "Operation only possible in STANDALONE mode.");
4149
4150 // FIXME Could we improve on this and remove the need for a specialized
4151 // API in Consensus?
4152 beginConsensus(m_ledgerMaster.getClosedLedger()->info().hash, {});
4153 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
4154 return m_ledgerMaster.getCurrentLedger()->info().seq;
4155}
4156
4157// <-- bool: true=added, false=already there
4158bool
4160{
4161 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
4162 {
4163 jvResult[jss::ledger_index] = lpClosed->info().seq;
4164 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
4165 jvResult[jss::ledger_time] = Json::Value::UInt(
4166 lpClosed->info().closeTime.time_since_epoch().count());
4167 if (!lpClosed->rules().enabled(featureXRPFees))
4168 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
4169 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
4170 jvResult[jss::reserve_base] =
4171 lpClosed->fees().accountReserve(0).jsonClipped();
4172 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4173 }
4174
4176 {
4177 jvResult[jss::validated_ledgers] =
4179 }
4180
4182 return mStreamMaps[sLedger]
4183 .emplace(isrListener->getSeq(), isrListener)
4184 .second;
4185}
4186
4187// <-- bool: true=added, false=already there
4188bool
4190{
4193 .emplace(isrListener->getSeq(), isrListener)
4194 .second;
4195}
4196
4197// <-- bool: true=erased, false=was not there
4198bool
4200{
4202 return mStreamMaps[sLedger].erase(uSeq);
4203}
4204
4205// <-- bool: true=erased, false=was not there
4206bool
4208{
4210 return mStreamMaps[sBookChanges].erase(uSeq);
4211}
4212
4213// <-- bool: true=added, false=already there
4214bool
4216{
4218 return mStreamMaps[sManifests]
4219 .emplace(isrListener->getSeq(), isrListener)
4220 .second;
4221}
4222
4223// <-- bool: true=erased, false=was not there
4224bool
4226{
4228 return mStreamMaps[sManifests].erase(uSeq);
4229}
4230
4231// <-- bool: true=added, false=already there
4232bool
4234 InfoSub::ref isrListener,
4235 Json::Value& jvResult,
4236 bool admin)
4237{
4238 uint256 uRandom;
4239
4240 if (m_standalone)
4241 jvResult[jss::stand_alone] = m_standalone;
4242
4243 // CHECKME: is it necessary to provide a random number here?
4244 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
4245
4246 auto const& feeTrack = app_.getFeeTrack();
4247 jvResult[jss::random] = to_string(uRandom);
4248 jvResult[jss::server_status] = strOperatingMode(admin);
4249 jvResult[jss::load_base] = feeTrack.getLoadBase();
4250 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4251 jvResult[jss::hostid] = getHostId(admin);
4252 jvResult[jss::pubkey_node] =
4254
4256 return mStreamMaps[sServer]
4257 .emplace(isrListener->getSeq(), isrListener)
4258 .second;
4259}
4260
4261// <-- bool: true=erased, false=was not there
4262bool
4264{
4266 return mStreamMaps[sServer].erase(uSeq);
4267}
4268
4269// <-- bool: true=added, false=already there
4270bool
4272{
4275 .emplace(isrListener->getSeq(), isrListener)
4276 .second;
4277}
4278
4279// <-- bool: true=erased, false=was not there
4280bool
4282{
4284 return mStreamMaps[sTransactions].erase(uSeq);
4285}
4286
4287// <-- bool: true=added, false=already there
4288bool
4290{
4293 .emplace(isrListener->getSeq(), isrListener)
4294 .second;
4295}
4296
4297// <-- bool: true=erased, false=was not there
4298bool
4300{
4302 return mStreamMaps[sRTTransactions].erase(uSeq);
4303}
4304
4305// <-- bool: true=added, false=already there
4306bool
4308{
4311 .emplace(isrListener->getSeq(), isrListener)
4312 .second;
4313}
4314
4315void
4317{
4318 accounting_.json(obj);
4319}
4320
4321// <-- bool: true=erased, false=was not there
4322bool
4324{
4326 return mStreamMaps[sValidations].erase(uSeq);
4327}
4328
4329// <-- bool: true=added, false=already there
4330bool
4332{
4334 return mStreamMaps[sPeerStatus]
4335 .emplace(isrListener->getSeq(), isrListener)
4336 .second;
4337}
4338
4339// <-- bool: true=erased, false=was not there
4340bool
4342{
4344 return mStreamMaps[sPeerStatus].erase(uSeq);
4345}
4346
4347// <-- bool: true=added, false=already there
4348bool
4350{
4353 .emplace(isrListener->getSeq(), isrListener)
4354 .second;
4355}
4356
4357// <-- bool: true=erased, false=was not there
4358bool
4360{
4362 return mStreamMaps[sConsensusPhase].erase(uSeq);
4363}
4364
4367{
4369
4370 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4371
4372 if (it != mRpcSubMap.end())
4373 return it->second;
4374
4375 return InfoSub::pointer();
4376}
4377
4380{
4382
4383 mRpcSubMap.emplace(strUrl, rspEntry);
4384
4385 return rspEntry;
4386}
4387
4388bool
4390{
4392 auto pInfo = findRpcSub(strUrl);
4393
4394 if (!pInfo)
4395 return false;
4396
4397 // check to see if any of the stream maps still hold a weak reference to
4398 // this entry before removing
4399 for (SubMapType const& map : mStreamMaps)
4400 {
4401 if (map.find(pInfo->getSeq()) != map.end())
4402 return false;
4403 }
4404 mRpcSubMap.erase(strUrl);
4405 return true;
4406}
4407
4408#ifndef USE_NEW_BOOK_PAGE
4409
4410// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4411// work, but it demonstrated poor performance.
4412//
4413void
4416 Book const& book,
4417 AccountID const& uTakerID,
4418 bool const bProof,
4419 unsigned int iLimit,
4420 Json::Value const& jvMarker,
4421 Json::Value& jvResult)
4422{ // CAUTION: This is the old get book page logic
4423 Json::Value& jvOffers =
4424 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4425
4427 uint256 const uBookBase = getBookBase(book);
4428 uint256 const uBookEnd = getQualityNext(uBookBase);
4429 uint256 uTipIndex = uBookBase;
4430
4431 if (auto stream = m_journal.trace())
4432 {
4433 stream << "getBookPage:" << book;
4434 stream << "getBookPage: uBookBase=" << uBookBase;
4435 stream << "getBookPage: uBookEnd=" << uBookEnd;
4436 stream << "getBookPage: uTipIndex=" << uTipIndex;
4437 }
4438
4439 ReadView const& view = *lpLedger;
4440
4441 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4442 isGlobalFrozen(view, book.in.account);
4443
4444 bool bDone = false;
4445 bool bDirectAdvance = true;
4446
4447 std::shared_ptr<SLE const> sleOfferDir;
4448 uint256 offerIndex;
4449 unsigned int uBookEntry;
4450 STAmount saDirRate;
4451
4452 auto const rate = transferRate(view, book.out.account);
4453 auto viewJ = app_.journal("View");
4454
4455 while (!bDone && iLimit-- > 0)
4456 {
4457 if (bDirectAdvance)
4458 {
4459 bDirectAdvance = false;
4460
4461 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4462
4463 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4464 if (ledgerIndex)
4465 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4466 else
4467 sleOfferDir.reset();
4468
4469 if (!sleOfferDir)
4470 {
4471 JLOG(m_journal.trace()) << "getBookPage: bDone";
4472 bDone = true;
4473 }
4474 else
4475 {
4476 uTipIndex = sleOfferDir->key();
4477 saDirRate = amountFromQuality(getQuality(uTipIndex));
4478
4479 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4480
4481 JLOG(m_journal.trace())
4482 << "getBookPage: uTipIndex=" << uTipIndex;
4483 JLOG(m_journal.trace())
4484 << "getBookPage: offerIndex=" << offerIndex;
4485 }
4486 }
4487
4488 if (!bDone)
4489 {
4490 auto sleOffer = view.read(keylet::offer(offerIndex));
4491
4492 if (sleOffer)
4493 {
4494 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4495 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4496 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4497 STAmount saOwnerFunds;
4498 bool firstOwnerOffer(true);
4499
4500 if (book.out.account == uOfferOwnerID)
4501 {
4502 // If an offer is selling issuer's own IOUs, it is fully
4503 // funded.
4504 saOwnerFunds = saTakerGets;
4505 }
4506 else if (bGlobalFreeze)
4507 {
4508 // If either asset is globally frozen, consider all offers
4509 // that aren't ours to be totally unfunded
4510 saOwnerFunds.clear(book.out);
4511 }
4512 else
4513 {
4514 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4515 if (umBalanceEntry != umBalance.end())
4516 {
4517 // Found in running balance table.
4518
4519 saOwnerFunds = umBalanceEntry->second;
4520 firstOwnerOffer = false;
4521 }
4522 else
4523 {
4524 // Did not find balance in table.
4525
4526 saOwnerFunds = accountHolds(
4527 view,
4528 uOfferOwnerID,
4529 book.out.currency,
4530 book.out.account,
4532 viewJ);
4533
4534 if (saOwnerFunds < beast::zero)
4535 {
4536 // Treat negative funds as zero.
4537
4538 saOwnerFunds.clear();
4539 }
4540 }
4541 }
4542
4543 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4544
4545 STAmount saTakerGetsFunded;
4546 STAmount saOwnerFundsLimit = saOwnerFunds;
4547 Rate offerRate = parityRate;
4548
4549 if (rate != parityRate
4550 // Have a tranfer fee.
4551 && uTakerID != book.out.account
4552 // Not taking offers of own IOUs.
4553 && book.out.account != uOfferOwnerID)
4554 // Offer owner not issuing ownfunds
4555 {
4556 // Need to charge a transfer fee to offer owner.
4557 offerRate = rate;
4558 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4559 }
4560
4561 if (saOwnerFundsLimit >= saTakerGets)
4562 {
4563 // Sufficient funds no shenanigans.
4564 saTakerGetsFunded = saTakerGets;
4565 }
4566 else
4567 {
4568 // Only provide, if not fully funded.
4569
4570 saTakerGetsFunded = saOwnerFundsLimit;
4571
4572 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4573 std::min(
4574 saTakerPays,
4575 multiply(
4576 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4577 .setJson(jvOffer[jss::taker_pays_funded]);
4578 }
4579
4580 STAmount saOwnerPays = (parityRate == offerRate)
4581 ? saTakerGetsFunded
4582 : std::min(
4583 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4584
4585 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4586
4587 // Include all offers funded and unfunded
4588 Json::Value& jvOf = jvOffers.append(jvOffer);
4589 jvOf[jss::quality] = saDirRate.getText();
4590
4591 if (firstOwnerOffer)
4592 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4593 }
4594 else
4595 {
4596 JLOG(m_journal.warn()) << "Missing offer";
4597 }
4598
4599 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4600 {
4601 bDirectAdvance = true;
4602 }
4603 else
4604 {
4605 JLOG(m_journal.trace())
4606 << "getBookPage: offerIndex=" << offerIndex;
4607 }
4608 }
4609 }
4610
4611 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4612 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4613}
4614
4615#else
4616
4617// This is the new code that uses the book iterators
4618// It has temporarily been disabled
4619
4620void
4623 Book const& book,
4624 AccountID const& uTakerID,
4625 bool const bProof,
4626 unsigned int iLimit,
4627 Json::Value const& jvMarker,
4628 Json::Value& jvResult)
4629{
4630 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4631
4633
4634 MetaView lesActive(lpLedger, tapNONE, true);
4635 OrderBookIterator obIterator(lesActive, book);
4636
4637 auto const rate = transferRate(lesActive, book.out.account);
4638
4639 bool const bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4640 lesActive.isGlobalFrozen(book.in.account);
4641
4642 while (iLimit-- > 0 && obIterator.nextOffer())
4643 {
4644 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4645 if (sleOffer)
4646 {
4647 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4648 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4649 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4650 STAmount saDirRate = obIterator.getCurrentRate();
4651 STAmount saOwnerFunds;
4652
4653 if (book.out.account == uOfferOwnerID)
4654 {
4655 // If offer is selling issuer's own IOUs, it is fully funded.
4656 saOwnerFunds = saTakerGets;
4657 }
4658 else if (bGlobalFreeze)
4659 {
4660 // If either asset is globally frozen, consider all offers
4661 // that aren't ours to be totally unfunded
4662 saOwnerFunds.clear(book.out);
4663 }
4664 else
4665 {
4666 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4667
4668 if (umBalanceEntry != umBalance.end())
4669 {
4670 // Found in running balance table.
4671
4672 saOwnerFunds = umBalanceEntry->second;
4673 }
4674 else
4675 {
4676 // Did not find balance in table.
4677
4678 saOwnerFunds = lesActive.accountHolds(
4679 uOfferOwnerID,
4680 book.out.currency,
4681 book.out.account,
4683
4684 if (saOwnerFunds.isNegative())
4685 {
4686 // Treat negative funds as zero.
4687
4688 saOwnerFunds.zero();
4689 }
4690 }
4691 }
4692
4693 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4694
4695 STAmount saTakerGetsFunded;
4696 STAmount saOwnerFundsLimit = saOwnerFunds;
4697 Rate offerRate = parityRate;
4698
4699 if (rate != parityRate
4700 // Have a tranfer fee.
4701 && uTakerID != book.out.account
4702 // Not taking offers of own IOUs.
4703 && book.out.account != uOfferOwnerID)
4704 // Offer owner not issuing ownfunds
4705 {
4706 // Need to charge a transfer fee to offer owner.
4707 offerRate = rate;
4708 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4709 }
4710
4711 if (saOwnerFundsLimit >= saTakerGets)
4712 {
4713 // Sufficient funds no shenanigans.
4714 saTakerGetsFunded = saTakerGets;
4715 }
4716 else
4717 {
4718 // Only provide, if not fully funded.
4719 saTakerGetsFunded = saOwnerFundsLimit;
4720
4721 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4722
4723 // TOOD(tom): The result of this expression is not used - what's
4724 // going on here?
4725 std::min(
4726 saTakerPays,
4727 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4728 .setJson(jvOffer[jss::taker_pays_funded]);
4729 }
4730
4731 STAmount saOwnerPays = (parityRate == offerRate)
4732 ? saTakerGetsFunded
4733 : std::min(
4734 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4735
4736 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4737
4738 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4739 {
4740 // Only provide funded offers and offers of the taker.
4741 Json::Value& jvOf = jvOffers.append(jvOffer);
4742 jvOf[jss::quality] = saDirRate.getText();
4743 }
4744 }
4745 }
4746
4747 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4748 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4749}
4750
4751#endif
4752
4753inline void
4755{
4756 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4757 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4759 counters[static_cast<std::size_t>(mode)].dur += current;
4760
4763 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4764 .dur.count());
4766 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4767 .dur.count());
4769 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4771 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4772 .dur.count());
4774 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4775
4777 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4778 .transitions);
4780 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4781 .transitions);
4783 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4785 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4786 .transitions);
4788 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4789}
4790
4791void
4793{
4794 auto now = std::chrono::steady_clock::now();
4795
4796 std::lock_guard lock(mutex_);
4797 ++counters_[static_cast<std::size_t>(om)].transitions;
4798 if (om == OperatingMode::FULL &&
4799 counters_[static_cast<std::size_t>(om)].transitions == 1)
4800 {
4801 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4802 now - processStart_)
4803 .count();
4804 }
4805 counters_[static_cast<std::size_t>(mode_)].dur +=
4806 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4807
4808 mode_ = om;
4809 start_ = now;
4810}
4811
4812void
4814{
4815 auto [counters, mode, start, initialSync] = getCounterData();
4816 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4818 counters[static_cast<std::size_t>(mode)].dur += current;
4819
4820 obj[jss::state_accounting] = Json::objectValue;
4822 i <= static_cast<std::size_t>(OperatingMode::FULL);
4823 ++i)
4824 {
4825 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4826 auto& state = obj[jss::state_accounting][states_[i]];
4827 state[jss::transitions] = std::to_string(counters[i].transitions);
4828 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4829 }
4830 obj[jss::server_state_duration_us] = std::to_string(current.count());
4831 if (initialSync)
4832 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4833}
4834
4835//------------------------------------------------------------------------------
4836
4839 Application& app,
4841 bool standalone,
4842 std::size_t minPeerCount,
4843 bool startvalid,
4844 JobQueue& job_queue,
4846 ValidatorKeys const& validatorKeys,
4847 boost::asio::io_service& io_svc,
4848 beast::Journal journal,
4849 beast::insight::Collector::ptr const& collector)
4850{
4851 return std::make_unique<NetworkOPsImp>(
4852 app,
4853 clock,
4854 standalone,
4855 minPeerCount,
4856 startvalid,
4857 job_queue,
4859 validatorKeys,
4860 io_svc,
4861 journal,
4862 collector);
4863}
4864
4865} // namespace ripple
T any_of(T... args)
T back_inserter(T... args)
T begin(T... args)
T bind(T... args)
Decorator for streaming out compact json.
Definition: json_writer.h:318
Lightweight wrapper to tag static string.
Definition: json_value.h:64
Represents a JSON value.
Definition: json_value.h:150
Json::UInt UInt
Definition: json_value.h:157
Value & append(Value const &value)
Append value to array at the end.
Definition: json_value.cpp:910
bool isMember(char const *key) const
Return true if the object has a member named key.
Definition: json_value.cpp:962
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Definition: json_value.cpp:854
A generic endpoint for log messages.
Definition: Journal.h:60
Stream error() const
Definition: Journal.h:346
Stream debug() const
Definition: Journal.h:328
Stream info() const
Definition: Journal.h:334
Stream trace() const
Severity stream access functions.
Definition: Journal.h:322
Stream warn() const
Definition: Journal.h:340
A metric for measuring an integral value.
Definition: Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition: Gauge.h:68
A reference to a handler for performing polled collection.
Definition: Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition: Book.h:36
Issue in
Definition: Book.h:38
Issue out
Definition: Book.h:39
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition: ClusterNode.h:46
std::uint32_t getLoadFee() const
Definition: ClusterNode.h:52
NetClock::time_point getReportTime() const
Definition: ClusterNode.h:58
PublicKey const & identity() const
Definition: ClusterNode.h:64
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:49
uint32_t NETWORK_ID
Definition: Config.h:156
std::string SERVER_DOMAIN
Definition: Config.h:278
std::size_t NODE_SIZE
Definition: Config.h:213
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition: Config.h:160
int RELAY_UNTRUSTED_VALIDATIONS
Definition: Config.h:169
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition: InfoSub.h:54
AccountID account
Definition: Issue.h:39
Currency currency
Definition: Issue.h:38
A pool of threads to perform work.
Definition: JobQueue.h:56
Json::Value getJson(int c=0)
Definition: JobQueue.cpp:214
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
Definition: LedgerMaster.h:265
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:79
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
Definition: LoadFeeTrack.h:45
std::uint32_t getClusterFee() const
Definition: LoadFeeTrack.h:82
std::uint32_t getLocalFee() const
Definition: LoadFeeTrack.h:75
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:89
std::uint32_t getRemoteFee() const
Definition: LoadFeeTrack.h:68
std::uint32_t getLoadFactor() const
Definition: LoadFeeTrack.h:95
Manages load sources.
Definition: LoadManager.h:46
void heartbeat()
Reset the stall detection timer.
Definition: LoadManager.cpp:64
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition: Manifest.cpp:323
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
Definition: NetworkOPs.cpp:143
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
Definition: NetworkOPs.cpp:153
std::chrono::steady_clock::time_point start_
Definition: NetworkOPs.cpp:155
static std::array< Json::StaticString const, 5 > const states_
Definition: NetworkOPs.cpp:159
std::chrono::steady_clock::time_point const processStart_
Definition: NetworkOPs.cpp:157
Transaction with input flags and results to be applied in batches.
Definition: NetworkOPs.cpp:94
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
Definition: NetworkOPs.cpp:103
std::shared_ptr< Transaction > const transaction
Definition: NetworkOPs.cpp:96
boost::asio::steady_timer accountHistoryTxTimer_
Definition: NetworkOPs.cpp:754
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
Definition: NetworkOPs.cpp:889
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
std::vector< TransactionStatus > mTransactions
Definition: NetworkOPs.cpp:801
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Definition: NetworkOPs.cpp:744
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
Definition: NetworkOPs.cpp:756
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
Definition: NetworkOPs.cpp:907
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
Definition: NetworkOPs.cpp:752
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
Definition: NetworkOPs.cpp:120
std::optional< PublicKey > const validatorPK_
Definition: NetworkOPs.cpp:758
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
Definition: NetworkOPs.cpp:740
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
Definition: NetworkOPs.cpp:270
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Definition: NetworkOPs.cpp:770
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
Definition: NetworkOPs.cpp:753
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
Definition: NetworkOPs.cpp:126
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:226
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
Definition: NetworkOPs.cpp:747
beast::Journal m_journal
Definition: NetworkOPs.cpp:738
SubInfoMapType mSubAccount
Definition: NetworkOPs.cpp:765
std::optional< PublicKey > const validatorMasterPK_
Definition: NetworkOPs.cpp:759
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
Definition: NetworkOPs.cpp:805
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
Definition: NetworkOPs.cpp:751
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
Definition: NetworkOPs.cpp:960
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
Definition: NetworkOPs.cpp:785
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
Definition: NetworkOPs.cpp:795
std::atomic< bool > unlBlocked_
Definition: NetworkOPs.cpp:749
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
Definition: NetworkOPs.cpp:742
std::atomic< bool > needNetworkLedger_
Definition: NetworkOPs.cpp:746
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
Definition: NetworkOPs.cpp:803
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
Definition: NetworkOPs.cpp:919
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
Definition: NetworkOPs.cpp:763
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
Definition: NetworkOPs.cpp:950
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
Definition: NetworkOPs.cpp:787
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
Definition: NetworkOPs.cpp:901
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
Definition: NetworkOPs.cpp:798
void setMode(OperatingMode om) override
void stop() override
Definition: NetworkOPs.cpp:587
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
void clearNeedNetworkLedger() override
Definition: NetworkOPs.cpp:913
DispatchState mDispatchState
Definition: NetworkOPs.cpp:800
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
Definition: NetworkOPs.cpp:766
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
Definition: NetworkOPs.cpp:925
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
Definition: NetworkOPs.cpp:806
Application & app_
Definition: NetworkOPs.cpp:737
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
Definition: NetworkOPs.cpp:761
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
Definition: NetworkOPs.cpp:768
std::atomic< bool > amendmentWarned_
Definition: NetworkOPs.cpp:748
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
Definition: NetworkOPs.cpp:931
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition: NetworkOPs.h:89
void getCountsJson(Json::Value &obj)
Definition: Database.cpp:268
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:51
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:66
BookListeners::pointer getBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)
BookListeners::pointer makeBookListeners(Book const &)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
Definition: RCLConsensus.h:53
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
Definition: RCLConsensus.h:447
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Definition: RCLConsensus.h:460
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:78
Represents a set of transactions in RCLConsensus.
Definition: RCLCxTx.h:63
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition: RFC1751.cpp:507
Collects logging information.
Definition: RCLConsensus.h:551
std::unique_ptr< std::stringstream > const & ss()
Definition: RCLConsensus.h:565
A view into a ledger.
Definition: ReadView.h:52
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition: STAmount.cpp:666
std::string getText() const override
Definition: STAmount.cpp:706
Issue const & issue() const
Definition: STAmount.h:496
std::optional< T > get(std::string const &name) const
Definition: BasicConfig.h:140
std::size_t size() const noexcept
Definition: Serializer.h:73
void const * data() const noexcept
Definition: Serializer.h:79
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition: TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition: TxQ.cpp:1778
static time_point now()
Definition: UptimeClock.cpp:67
Validator keys and manifest as set in configuration file.
Definition: ValidatorKeys.h:38
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition: XRPAmount.h:262
Json::Value jsonClipped() const
Definition: XRPAmount.h:218
iterator begin()
Definition: base_uint.h:136
static constexpr std::size_t size()
Definition: base_uint.h:526
bool isZero() const
Definition: base_uint.h:540
bool isNonZero() const
Definition: base_uint.h:545
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition: scope.h:231
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition: json_value.h:45
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:46
int Int
Definition: json_forwards.h:26
unsigned int UInt
Definition: json_forwards.h:27
void rngfill(void *buffer, std::size_t bytes, Generator &g)
Definition: rngfill.h:34
std::string const & getVersionString()
Server version.
Definition: BuildInfo.cpp:68
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Definition: CTID.h:43
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition: BookChanges.h:47
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(PreclaimContext const &ctx, AccountID const &src)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition: Indexes.cpp:184
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition: Indexes.cpp:380
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition: Indexes.cpp:274
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Definition: escrow.cpp:69
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:114
STAmount divide(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:93
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition: STTx.cpp:811
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition: View.cpp:553
@ fhZERO_IF_FROZEN
Definition: View.h:78
@ fhIGNORE_FREEZE
Definition: View.h:78
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition: View.cpp:147
std::uint64_t getQuality(uint256 const &uBase)
Definition: Indexes.cpp:149
@ INCLUDED
Definition: Transaction.h:49
@ OBSOLETE
Definition: Transaction.h:54
@ INVALID
Definition: Transaction.h:48
error_code_i
Definition: ErrorCodes.h:40
@ rpcSUCCESS
Definition: ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition: ErrorCodes.h:84
@ rpcINTERNAL
Definition: ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
Definition: SecretKey.cpp:369
auto constexpr muldiv_max
Definition: mulDiv.h:29
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition: LocalTxs.cpp:192
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition: View.cpp:761
STAmount amountFromQuality(std::uint64_t rate)
Definition: STAmount.cpp:1013
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition: ErrorCodes.h:170
@ warnRPC_UNSUPPORTED_MAJORITY
Definition: ErrorCodes.h:168
@ warnRPC_AMENDMENT_BLOCKED
Definition: ErrorCodes.h:169
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:315
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition: NetworkOPs.h:68
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:53
AccountID calcAccountID(PublicKey const &pk)
Definition: AccountID.cpp:168
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Definition: csprng.cpp:103
Json::Value rpcError(int iError)
Definition: RPCErr.cpp:31
@ tefPAST_SEQ
Definition: TER.h:175
bool isTefFailure(TER x) noexcept
Definition: TER.h:662
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
Definition: NetworkOPs.cpp:870
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition: View.cpp:158
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition: ApiVersion.h:102
bool isTerRetry(TER x) noexcept
Definition: TER.h:668
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:75
@ tesSUCCESS
Definition: TER.h:244
uint256 getQualityNext(uint256 const &uBase)
Definition: Indexes.cpp:141
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition: View.cpp:386
bool isTesSuccess(TER x) noexcept
Definition: TER.h:674
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition: ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition: chrono.h:92
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:630
FeeSetup setup_FeeVote(Section const &section)
Definition: Config.cpp:1128
bool isTemMalformed(TER x) noexcept
Definition: TER.h:656
Number root(Number f, unsigned d)
Definition: Number.cpp:636
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Definition: mulDiv.cpp:32
ApplyFlags
Definition: ApplyView.h:31
@ tapFAIL_HARD
Definition: ApplyView.h:36
@ tapUNLIMITED
Definition: ApplyView.h:43
@ tapNONE
Definition: ApplyView.h:32
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:39
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition: Seed.cpp:76
constexpr std::size_t maxPoppedTransactions
@ terQUEUED
Definition: TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition: TER.cpp:249
@ jtNETOP_CLUSTER
Definition: Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition: Job.h:47
@ jtTRANSACTION
Definition: Job.h:62
@ jtTXN_PROC
Definition: Job.h:82
@ jtCLIENT_CONSENSUS
Definition: Job.h:48
@ jtBATCH
Definition: Job.h:65
@ jtCLIENT_ACCT_HIST
Definition: Job.h:49
bool isTelLocal(TER x) noexcept
Definition: TER.h:650
uint256 getBookBase(Book const &book)
Definition: Indexes.cpp:115
constexpr std::uint32_t tfInnerBatchTxn
Definition: TxFlags.h:61
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition: View.cpp:184
static std::uint32_t trunc32(std::uint64_t v)
@ temINVALID_FLAG
Definition: TER.h:111
@ temBAD_SIGNATURE
Definition: TER.h:105
static auto const genesisAccountId
Definition: NetworkOPs.cpp:883
STL namespace.
T owns_lock(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T set_intersection(T... args)
T size(T... args)
T str(T... args)
std::string serialized
The manifest in serialized form.
Definition: Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition: Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition: Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
Definition: Manifest.cpp:244
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition: Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
Definition: Manifest.cpp:255
PublicKey masterKey
The master key associated with this manifest.
Definition: Manifest.h:86
Server fees published on server subscription.
Definition: NetworkOPs.cpp:203
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
Definition: NetworkOPs.cpp:222
bool operator==(ServerFeeSummary const &b) const
Definition: NetworkOPs.cpp:214
beast::insight::Gauge full_transitions
Definition: NetworkOPs.cpp:857
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:812
beast::insight::Hook hook
Definition: NetworkOPs.cpp:846
beast::insight::Gauge connected_duration
Definition: NetworkOPs.cpp:848
beast::insight::Gauge tracking_duration
Definition: NetworkOPs.cpp:850
beast::insight::Gauge connected_transitions
Definition: NetworkOPs.cpp:854
beast::insight::Gauge disconnected_transitions
Definition: NetworkOPs.cpp:853
beast::insight::Gauge syncing_duration
Definition: NetworkOPs.cpp:849
beast::insight::Gauge tracking_transitions
Definition: NetworkOPs.cpp:856
beast::insight::Gauge full_duration
Definition: NetworkOPs.cpp:851
beast::insight::Gauge disconnected_duration
Definition: NetworkOPs.cpp:847
beast::insight::Gauge syncing_transitions
Definition: NetworkOPs.cpp:855
SubAccountHistoryIndex(AccountID const &accountId)
Definition: NetworkOPs.cpp:701
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:720
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:715
Represents a transfer rate.
Definition: Rate.h:40
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition: TxQ.h:165
IsMemberResult isMember(char const *key) const
Definition: MultiApiJson.h:94
void set(char const *key, auto const &v)
Definition: MultiApiJson.h:83
Select all peers (except optional excluded) that are in our cluster.
Definition: predicates.h:137
Sends a message to all peers.
Definition: predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)