rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/main/Tuning.h>
32#include <xrpld/app/misc/AmendmentTable.h>
33#include <xrpld/app/misc/DeliverMax.h>
34#include <xrpld/app/misc/HashRouter.h>
35#include <xrpld/app/misc/LoadFeeTrack.h>
36#include <xrpld/app/misc/NetworkOPs.h>
37#include <xrpld/app/misc/Transaction.h>
38#include <xrpld/app/misc/TxQ.h>
39#include <xrpld/app/misc/ValidatorKeys.h>
40#include <xrpld/app/misc/ValidatorList.h>
41#include <xrpld/app/misc/detail/AccountTxPaging.h>
42#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
43#include <xrpld/app/tx/apply.h>
44#include <xrpld/consensus/Consensus.h>
45#include <xrpld/consensus/ConsensusParms.h>
46#include <xrpld/overlay/Cluster.h>
47#include <xrpld/overlay/Overlay.h>
48#include <xrpld/overlay/predicates.h>
49#include <xrpld/perflog/PerfLog.h>
50#include <xrpld/rpc/BookChanges.h>
51#include <xrpld/rpc/CTID.h>
52#include <xrpld/rpc/DeliveredAmount.h>
53#include <xrpld/rpc/MPTokenIssuanceID.h>
54#include <xrpld/rpc/ServerHandler.h>
55
56#include <xrpl/basics/UptimeClock.h>
57#include <xrpl/basics/mulDiv.h>
58#include <xrpl/basics/safe_cast.h>
59#include <xrpl/basics/scope.h>
60#include <xrpl/beast/utility/rngfill.h>
61#include <xrpl/crypto/RFC1751.h>
62#include <xrpl/crypto/csprng.h>
63#include <xrpl/protocol/BuildInfo.h>
64#include <xrpl/protocol/Feature.h>
65#include <xrpl/protocol/MultiApiJson.h>
66#include <xrpl/protocol/NFTSyntheticSerializer.h>
67#include <xrpl/protocol/RPCErr.h>
68#include <xrpl/protocol/TxFlags.h>
69#include <xrpl/protocol/jss.h>
70#include <xrpl/resource/Fees.h>
71#include <xrpl/resource/ResourceManager.h>
72
73#include <boost/asio/ip/host_name.hpp>
74#include <boost/asio/steady_timer.hpp>
75
76#include <algorithm>
77#include <exception>
78#include <mutex>
79#include <optional>
80#include <set>
81#include <sstream>
82#include <string>
83#include <tuple>
84#include <unordered_map>
85
86namespace ripple {
87
88class NetworkOPsImp final : public NetworkOPs
89{
95 {
96 public:
98 bool const admin;
99 bool const local;
101 bool applied = false;
103
106 bool a,
107 bool l,
108 FailHard f)
109 : transaction(t), admin(a), local(l), failType(f)
110 {
111 XRPL_ASSERT(
113 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
114 "valid inputs");
115 }
116 };
117
121 enum class DispatchState : unsigned char {
122 none,
123 scheduled,
124 running,
125 };
126
128
144 {
145 struct Counters
146 {
147 explicit Counters() = default;
148
151 };
152
156 std::chrono::steady_clock::time_point start_ =
158 std::chrono::steady_clock::time_point const processStart_ = start_;
161
162 public:
164 {
166 .transitions = 1;
167 }
168
175 void
177
183 void
184 json(Json::Value& obj) const;
185
187 {
189 decltype(mode_) mode;
190 decltype(start_) start;
192 };
193
196 {
199 }
200 };
201
204 {
205 ServerFeeSummary() = default;
206
208 XRPAmount fee,
209 TxQ::Metrics&& escalationMetrics,
210 LoadFeeTrack const& loadFeeTrack);
211 bool
212 operator!=(ServerFeeSummary const& b) const;
213
214 bool
216 {
217 return !(*this != b);
218 }
219
224 };
225
226public:
228 Application& app,
230 bool standalone,
231 std::size_t minPeerCount,
232 bool start_valid,
233 JobQueue& job_queue,
235 ValidatorKeys const& validatorKeys,
236 boost::asio::io_service& io_svc,
237 beast::Journal journal,
238 beast::insight::Collector::ptr const& collector)
239 : app_(app)
240 , m_journal(journal)
243 , heartbeatTimer_(io_svc)
244 , clusterTimer_(io_svc)
245 , accountHistoryTxTimer_(io_svc)
246 , mConsensus(
247 app,
249 setup_FeeVote(app_.config().section("voting")),
250 app_.logs().journal("FeeVote")),
252 *m_localTX,
253 app.getInboundTransactions(),
254 beast::get_abstract_clock<std::chrono::steady_clock>(),
255 validatorKeys,
256 app_.logs().journal("LedgerConsensus"))
257 , validatorPK_(
258 validatorKeys.keys ? validatorKeys.keys->publicKey
259 : decltype(validatorPK_){})
261 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
262 : decltype(validatorMasterPK_){})
264 , m_job_queue(job_queue)
265 , m_standalone(standalone)
266 , minPeerCount_(start_valid ? 0 : minPeerCount)
268 {
269 }
270
271 ~NetworkOPsImp() override
272 {
273 // This clear() is necessary to ensure the shared_ptrs in this map get
274 // destroyed NOW because the objects in this map invoke methods on this
275 // class when they are destroyed
277 }
278
279public:
281 getOperatingMode() const override;
282
284 strOperatingMode(OperatingMode const mode, bool const admin) const override;
285
287 strOperatingMode(bool const admin = false) const override;
288
289 //
290 // Transaction operations.
291 //
292
293 // Must complete immediately.
294 void
296
297 void
299 std::shared_ptr<Transaction>& transaction,
300 bool bUnlimited,
301 bool bLocal,
302 FailHard failType) override;
303
304 void
305 processTransactionSet(CanonicalTXSet const& set) override;
306
315 void
318 bool bUnlimited,
319 FailHard failType);
320
330 void
333 bool bUnlimited,
334 FailHard failtype);
335
336private:
337 bool
339
340 void
343 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback);
344
345public:
349 void
351
357 void
359
360 //
361 // Owner functions.
362 //
363
367 AccountID const& account) override;
368
369 //
370 // Book functions.
371 //
372
373 void
376 Book const&,
377 AccountID const& uTakerID,
378 bool const bProof,
379 unsigned int iLimit,
380 Json::Value const& jvMarker,
381 Json::Value& jvResult) override;
382
383 // Ledger proposal/close functions.
384 bool
386
387 bool
390 std::string const& source) override;
391
392 void
393 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
394
395 // Network state machine.
396
397 // Used for the "jump" case.
398private:
399 void
401 bool
403
404public:
405 bool
407 uint256 const& networkClosed,
408 std::unique_ptr<std::stringstream> const& clog) override;
409 void
411 void
412 setStandAlone() override;
413
417 void
418 setStateTimer() override;
419
420 void
421 setNeedNetworkLedger() override;
422 void
423 clearNeedNetworkLedger() override;
424 bool
425 isNeedNetworkLedger() override;
426 bool
427 isFull() override;
428
429 void
430 setMode(OperatingMode om) override;
431
432 bool
433 isBlocked() override;
434 bool
435 isAmendmentBlocked() override;
436 void
437 setAmendmentBlocked() override;
438 bool
439 isAmendmentWarned() override;
440 void
441 setAmendmentWarned() override;
442 void
443 clearAmendmentWarned() override;
444 bool
445 isUNLBlocked() override;
446 void
447 setUNLBlocked() override;
448 void
449 clearUNLBlocked() override;
450 void
451 consensusViewChange() override;
452
454 getConsensusInfo() override;
456 getServerInfo(bool human, bool admin, bool counters) override;
457 void
458 clearLedgerFetch() override;
460 getLedgerFetchInfo() override;
463 std::optional<std::chrono::milliseconds> consensusDelay) override;
464 void
465 reportFeeChange() override;
466 void
468
469 void
470 updateLocalTx(ReadView const& view) override;
472 getLocalTxCount() override;
473
474 //
475 // Monitoring: publisher side.
476 //
477 void
478 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
479 void
482 std::shared_ptr<STTx const> const& transaction,
483 TER result) override;
484 void
485 pubValidation(std::shared_ptr<STValidation> const& val) override;
486
487 //--------------------------------------------------------------------------
488 //
489 // InfoSub::Source.
490 //
491 void
493 InfoSub::ref ispListener,
494 hash_set<AccountID> const& vnaAccountIDs,
495 bool rt) override;
496 void
498 InfoSub::ref ispListener,
499 hash_set<AccountID> const& vnaAccountIDs,
500 bool rt) override;
501
502 // Just remove the subscription from the tracking
503 // not from the InfoSub. Needed for InfoSub destruction
504 void
506 std::uint64_t seq,
507 hash_set<AccountID> const& vnaAccountIDs,
508 bool rt) override;
509
511 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
512 override;
513 void
515 InfoSub::ref ispListener,
516 AccountID const& account,
517 bool historyOnly) override;
518
519 void
521 std::uint64_t seq,
522 AccountID const& account,
523 bool historyOnly) override;
524
525 bool
526 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
527 bool
528 unsubLedger(std::uint64_t uListener) override;
529
530 bool
531 subBookChanges(InfoSub::ref ispListener) override;
532 bool
533 unsubBookChanges(std::uint64_t uListener) override;
534
535 bool
536 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
537 override;
538 bool
539 unsubServer(std::uint64_t uListener) override;
540
541 bool
542 subBook(InfoSub::ref ispListener, Book const&) override;
543 bool
544 unsubBook(std::uint64_t uListener, Book const&) override;
545
546 bool
547 subManifests(InfoSub::ref ispListener) override;
548 bool
549 unsubManifests(std::uint64_t uListener) override;
550 void
551 pubManifest(Manifest const&) override;
552
553 bool
554 subTransactions(InfoSub::ref ispListener) override;
555 bool
556 unsubTransactions(std::uint64_t uListener) override;
557
558 bool
559 subRTTransactions(InfoSub::ref ispListener) override;
560 bool
561 unsubRTTransactions(std::uint64_t uListener) override;
562
563 bool
564 subValidations(InfoSub::ref ispListener) override;
565 bool
566 unsubValidations(std::uint64_t uListener) override;
567
568 bool
569 subPeerStatus(InfoSub::ref ispListener) override;
570 bool
571 unsubPeerStatus(std::uint64_t uListener) override;
572 void
573 pubPeerStatus(std::function<Json::Value(void)> const&) override;
574
575 bool
576 subConsensus(InfoSub::ref ispListener) override;
577 bool
578 unsubConsensus(std::uint64_t uListener) override;
579
581 findRpcSub(std::string const& strUrl) override;
583 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
584 bool
585 tryRemoveRpcSub(std::string const& strUrl) override;
586
587 void
588 stop() override
589 {
590 {
591 boost::system::error_code ec;
592 heartbeatTimer_.cancel(ec);
593 if (ec)
594 {
595 JLOG(m_journal.error())
596 << "NetworkOPs: heartbeatTimer cancel error: "
597 << ec.message();
598 }
599
600 ec.clear();
601 clusterTimer_.cancel(ec);
602 if (ec)
603 {
604 JLOG(m_journal.error())
605 << "NetworkOPs: clusterTimer cancel error: "
606 << ec.message();
607 }
608
609 ec.clear();
610 accountHistoryTxTimer_.cancel(ec);
611 if (ec)
612 {
613 JLOG(m_journal.error())
614 << "NetworkOPs: accountHistoryTxTimer cancel error: "
615 << ec.message();
616 }
617 }
618 // Make sure that any waitHandlers pending in our timers are done.
619 using namespace std::chrono_literals;
620 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
621 }
622
623 void
624 stateAccounting(Json::Value& obj) override;
625
626private:
627 void
628 setTimer(
629 boost::asio::steady_timer& timer,
630 std::chrono::milliseconds const& expiry_time,
631 std::function<void()> onExpire,
632 std::function<void()> onError);
633 void
635 void
637 void
639 void
641
643 transJson(
644 std::shared_ptr<STTx const> const& transaction,
645 TER result,
646 bool validated,
649
650 void
653 AcceptedLedgerTx const& transaction,
654 bool last);
655
656 void
659 AcceptedLedgerTx const& transaction,
660 bool last);
661
662 void
665 std::shared_ptr<STTx const> const& transaction,
666 TER result);
667
668 void
669 pubServer();
670 void
672
674 getHostId(bool forAdmin);
675
676private:
680
681 /*
682 * With a validated ledger to separate history and future, the node
683 * streams historical txns with negative indexes starting from -1,
684 * and streams future txns starting from index 0.
685 * The SubAccountHistoryIndex struct maintains these indexes.
686 * It also has a flag stopHistorical_ for stopping streaming
687 * the historical txns.
688 */
690 {
692 // forward
694 // separate backward and forward
696 // history, backward
701
703 : accountId_(accountId)
704 , forwardTxIndex_(0)
707 , historyTxIndex_(-1)
708 , haveHistorical_(false)
709 , stopHistorical_(false)
710 {
711 }
712 };
714 {
717 };
719 {
722 };
725
729 void
733 void
735 void
737
740
742
744
746
751
753 boost::asio::steady_timer heartbeatTimer_;
754 boost::asio::steady_timer clusterTimer_;
755 boost::asio::steady_timer accountHistoryTxTimer_;
756
758
761
763
765
768
770
772
773 enum SubTypes {
774 sLedger, // Accepted ledgers.
775 sManifests, // Received validator manifests.
776 sServer, // When server changes connectivity state.
777 sTransactions, // All accepted transactions.
778 sRTTransactions, // All proposed and accepted transactions.
779 sValidations, // Received validations.
780 sPeerStatus, // Peer status changes.
781 sConsensusPhase, // Consensus phase
782 sBookChanges, // Per-ledger order book changes
783 sLastEntry // Any new entry must be ADDED ABOVE this one
784 };
785
787
789
791
792 // Whether we are in standalone mode.
793 bool const m_standalone;
794
795 // The number of nodes that we need to consider ourselves connected.
797
798 // Transaction batching.
803
805
808
809private:
810 struct Stats
811 {
812 template <class Handler>
814 Handler const& handler,
815 beast::insight::Collector::ptr const& collector)
816 : hook(collector->make_hook(handler))
817 , disconnected_duration(collector->make_gauge(
818 "State_Accounting",
819 "Disconnected_duration"))
820 , connected_duration(collector->make_gauge(
821 "State_Accounting",
822 "Connected_duration"))
824 collector->make_gauge("State_Accounting", "Syncing_duration"))
825 , tracking_duration(collector->make_gauge(
826 "State_Accounting",
827 "Tracking_duration"))
829 collector->make_gauge("State_Accounting", "Full_duration"))
830 , disconnected_transitions(collector->make_gauge(
831 "State_Accounting",
832 "Disconnected_transitions"))
833 , connected_transitions(collector->make_gauge(
834 "State_Accounting",
835 "Connected_transitions"))
836 , syncing_transitions(collector->make_gauge(
837 "State_Accounting",
838 "Syncing_transitions"))
839 , tracking_transitions(collector->make_gauge(
840 "State_Accounting",
841 "Tracking_transitions"))
843 collector->make_gauge("State_Accounting", "Full_transitions"))
844 {
845 }
846
853
859 };
860
861 std::mutex m_statsMutex; // Mutex to lock m_stats
863
864private:
865 void
867};
868
869//------------------------------------------------------------------------------
870
872 {"disconnected", "connected", "syncing", "tracking", "full"}};
873
875
883
884static auto const genesisAccountId = calcAccountID(
886 .first);
887
888//------------------------------------------------------------------------------
889inline OperatingMode
891{
892 return mMode;
893}
894
895inline std::string
896NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
897{
898 return strOperatingMode(mMode, admin);
899}
900
901inline void
903{
905}
906
907inline void
909{
910 needNetworkLedger_ = true;
911}
912
913inline void
915{
916 needNetworkLedger_ = false;
917}
918
919inline bool
921{
922 return needNetworkLedger_;
923}
924
925inline bool
927{
929}
930
933{
934 static std::string const hostname = boost::asio::ip::host_name();
935
936 if (forAdmin)
937 return hostname;
938
939 // For non-admin uses hash the node public key into a
940 // single RFC1751 word:
941 static std::string const shroudedHostId = [this]() {
942 auto const& id = app_.nodeIdentity();
943
944 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
945 }();
946
947 return shroudedHostId;
948}
949
950void
952{
954
955 // Only do this work if a cluster is configured
956 if (app_.cluster().size() != 0)
958}
959
960void
962 boost::asio::steady_timer& timer,
963 std::chrono::milliseconds const& expiry_time,
964 std::function<void()> onExpire,
965 std::function<void()> onError)
966{
967 // Only start the timer if waitHandlerCounter_ is not yet joined.
968 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
969 [this, onExpire, onError](boost::system::error_code const& e) {
970 if ((e.value() == boost::system::errc::success) &&
971 (!m_job_queue.isStopped()))
972 {
973 onExpire();
974 }
975 // Recover as best we can if an unexpected error occurs.
976 if (e.value() != boost::system::errc::success &&
977 e.value() != boost::asio::error::operation_aborted)
978 {
979 // Try again later and hope for the best.
980 JLOG(m_journal.error())
981 << "Timer got error '" << e.message()
982 << "'. Restarting timer.";
983 onError();
984 }
985 }))
986 {
987 timer.expires_from_now(expiry_time);
988 timer.async_wait(std::move(*optionalCountedHandler));
989 }
990}
991
992void
993NetworkOPsImp::setHeartbeatTimer()
994{
995 setTimer(
996 heartbeatTimer_,
997 mConsensus.parms().ledgerGRANULARITY,
998 [this]() {
999 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
1000 processHeartbeatTimer();
1001 });
1002 },
1003 [this]() { setHeartbeatTimer(); });
1004}
1005
1006void
1007NetworkOPsImp::setClusterTimer()
1008{
1009 using namespace std::chrono_literals;
1010
1011 setTimer(
1012 clusterTimer_,
1013 10s,
1014 [this]() {
1015 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
1016 processClusterTimer();
1017 });
1018 },
1019 [this]() { setClusterTimer(); });
1020}
1021
1022void
1023NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
1024{
1025 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
1026 << toBase58(subInfo.index_->accountId_);
1027 using namespace std::chrono_literals;
1028 setTimer(
1029 accountHistoryTxTimer_,
1030 4s,
1031 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1032 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1033}
1034
1035void
1036NetworkOPsImp::processHeartbeatTimer()
1037{
1038 RclConsensusLogger clog(
1039 "Heartbeat Timer", mConsensus.validating(), m_journal);
1040 {
1041 std::unique_lock lock{app_.getMasterMutex()};
1042
1043 // VFALCO NOTE This is for diagnosing a crash on exit
1044 LoadManager& mgr(app_.getLoadManager());
1045 mgr.heartbeat();
1046
1047 std::size_t const numPeers = app_.overlay().size();
1048
1049 // do we have sufficient peers? If not, we are disconnected.
1050 if (numPeers < minPeerCount_)
1051 {
1052 if (mMode != OperatingMode::DISCONNECTED)
1053 {
1054 setMode(OperatingMode::DISCONNECTED);
1056 ss << "Node count (" << numPeers << ") has fallen "
1057 << "below required minimum (" << minPeerCount_ << ").";
1058 JLOG(m_journal.warn()) << ss.str();
1059 CLOG(clog.ss()) << "set mode to DISCONNECTED: " << ss.str();
1060 }
1061 else
1062 {
1063 CLOG(clog.ss())
1064 << "already DISCONNECTED. too few peers (" << numPeers
1065 << "), need at least " << minPeerCount_;
1066 }
1067
1068 // MasterMutex lock need not be held to call setHeartbeatTimer()
1069 lock.unlock();
1070 // We do not call mConsensus.timerEntry until there are enough
1071 // peers providing meaningful inputs to consensus
1072 setHeartbeatTimer();
1073
1074 return;
1075 }
1076
1077 if (mMode == OperatingMode::DISCONNECTED)
1078 {
1079 setMode(OperatingMode::CONNECTED);
1080 JLOG(m_journal.info())
1081 << "Node count (" << numPeers << ") is sufficient.";
1082 CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
1083 << " peers. ";
1084 }
1085
1086 // Check if the last validated ledger forces a change between these
1087 // states.
1088 auto origMode = mMode.load();
1089 CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
1090 if (mMode == OperatingMode::SYNCING)
1091 setMode(OperatingMode::SYNCING);
1092 else if (mMode == OperatingMode::CONNECTED)
1093 setMode(OperatingMode::CONNECTED);
1094 auto newMode = mMode.load();
1095 if (origMode != newMode)
1096 {
1097 CLOG(clog.ss())
1098 << ", changing to " << strOperatingMode(newMode, true);
1099 }
1100 CLOG(clog.ss()) << ". ";
1101 }
1102
1103 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.ss());
1104
1105 CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase);
1106 ConsensusPhase const currPhase = mConsensus.phase();
1107 if (mLastConsensusPhase != currPhase)
1108 {
1109 reportConsensusStateChange(currPhase);
1110 mLastConsensusPhase = currPhase;
1111 CLOG(clog.ss()) << " changed to " << to_string(mLastConsensusPhase);
1112 }
1113 CLOG(clog.ss()) << ". ";
1114
1115 setHeartbeatTimer();
1116}
1117
1118void
1119NetworkOPsImp::processClusterTimer()
1120{
1121 if (app_.cluster().size() == 0)
1122 return;
1123
1124 using namespace std::chrono_literals;
1125
1126 bool const update = app_.cluster().update(
1127 app_.nodeIdentity().first,
1128 "",
1129 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1130 ? app_.getFeeTrack().getLocalFee()
1131 : 0,
1132 app_.timeKeeper().now());
1133
1134 if (!update)
1135 {
1136 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1137 setClusterTimer();
1138 return;
1139 }
1140
1141 protocol::TMCluster cluster;
1142 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1143 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1144 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1145 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1146 n.set_nodeload(node.getLoadFee());
1147 if (!node.name().empty())
1148 n.set_nodename(node.name());
1149 });
1150
1151 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1152 for (auto& item : gossip.items)
1153 {
1154 protocol::TMLoadSource& node = *cluster.add_loadsources();
1155 node.set_name(to_string(item.address));
1156 node.set_cost(item.balance);
1157 }
1158 app_.overlay().foreach(send_if(
1159 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1160 peer_in_cluster()));
1161 setClusterTimer();
1162}
1163
1164//------------------------------------------------------------------------------
1165
1167NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1168 const
1169{
1170 if (mode == OperatingMode::FULL && admin)
1171 {
1172 auto const consensusMode = mConsensus.mode();
1173 if (consensusMode != ConsensusMode::wrongLedger)
1174 {
1175 if (consensusMode == ConsensusMode::proposing)
1176 return "proposing";
1177
1178 if (mConsensus.validating())
1179 return "validating";
1180 }
1181 }
1182
1183 return states_[static_cast<std::size_t>(mode)];
1184}
1185
1186void
1187NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1188{
1189 if (isNeedNetworkLedger())
1190 {
1191 // Nothing we can do if we've never been in sync
1192 return;
1193 }
1194
1195 // Enforce Network bar for batch txn
1196 if (iTrans->isFlag(tfInnerBatchTxn) &&
1197 m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1198 {
1199 JLOG(m_journal.error())
1200 << "Submitted transaction invalid: tfInnerBatchTxn flag present.";
1201 return;
1202 }
1203
1204 // this is an asynchronous interface
1205 auto const trans = sterilize(*iTrans);
1206
1207 auto const txid = trans->getTransactionID();
1208 auto const flags = app_.getHashRouter().getFlags(txid);
1209
1210 if ((flags & SF_BAD) != 0)
1211 {
1212 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1213 return;
1214 }
1215
1216 try
1217 {
1218 auto const [validity, reason] = checkValidity(
1219 app_.getHashRouter(),
1220 *trans,
1221 m_ledgerMaster.getValidatedRules(),
1222 app_.config());
1223
1224 if (validity != Validity::Valid)
1225 {
1226 JLOG(m_journal.warn())
1227 << "Submitted transaction invalid: " << reason;
1228 return;
1229 }
1230 }
1231 catch (std::exception const& ex)
1232 {
1233 JLOG(m_journal.warn())
1234 << "Exception checking transaction " << txid << ": " << ex.what();
1235
1236 return;
1237 }
1238
1239 std::string reason;
1240
1241 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1242
1243 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1244 auto t = tx;
1245 processTransaction(t, false, false, FailHard::no);
1246 });
1247}
1248
1249bool
1250NetworkOPsImp::preProcessTransaction(std::shared_ptr<Transaction>& transaction)
1251{
1252 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1253
1254 if ((newFlags & SF_BAD) != 0)
1255 {
1256 // cached bad
1257 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1258 transaction->setStatus(INVALID);
1259 transaction->setResult(temBAD_SIGNATURE);
1260 return false;
1261 }
1262
1263 auto const view = m_ledgerMaster.getCurrentLedger();
1264
1265 // This function is called by several different parts of the codebase
1266 // under no circumstances will we ever accept an inner txn within a batch
1267 // txn from the network.
1268 auto const sttx = *transaction->getSTransaction();
1269 if (sttx.isFlag(tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1270 {
1271 transaction->setStatus(INVALID);
1272 transaction->setResult(temINVALID_FLAG);
1273 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1274 return false;
1275 }
1276
1277 // NOTE eahennis - I think this check is redundant,
1278 // but I'm not 100% sure yet.
1279 // If so, only cost is looking up HashRouter flags.
1280 auto const [validity, reason] =
1281 checkValidity(app_.getHashRouter(), sttx, view->rules(), app_.config());
1282 XRPL_ASSERT(
1283 validity == Validity::Valid,
1284 "ripple::NetworkOPsImp::processTransaction : valid validity");
1285
1286 // Not concerned with local checks at this point.
1287 if (validity == Validity::SigBad)
1288 {
1289 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1290 transaction->setStatus(INVALID);
1291 transaction->setResult(temBAD_SIGNATURE);
1292 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1293 return false;
1294 }
1295
1296 // canonicalize can change our pointer
1297 app_.getMasterTransaction().canonicalize(&transaction);
1298
1299 return true;
1300}
1301
1302void
1303NetworkOPsImp::processTransaction(
1304 std::shared_ptr<Transaction>& transaction,
1305 bool bUnlimited,
1306 bool bLocal,
1307 FailHard failType)
1308{
1309 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1310
1311 // preProcessTransaction can change our pointer
1312 if (!preProcessTransaction(transaction))
1313 return;
1314
1315 if (bLocal)
1316 doTransactionSync(transaction, bUnlimited, failType);
1317 else
1318 doTransactionAsync(transaction, bUnlimited, failType);
1319}
1320
1321void
1322NetworkOPsImp::doTransactionAsync(
1323 std::shared_ptr<Transaction> transaction,
1324 bool bUnlimited,
1325 FailHard failType)
1326{
1327 std::lock_guard lock(mMutex);
1328
1329 if (transaction->getApplying())
1330 return;
1331
1332 mTransactions.push_back(
1333 TransactionStatus(transaction, bUnlimited, false, failType));
1334 transaction->setApplying();
1335
1336 if (mDispatchState == DispatchState::none)
1337 {
1338 if (m_job_queue.addJob(
1339 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1340 {
1341 mDispatchState = DispatchState::scheduled;
1342 }
1343 }
1344}
1345
1346void
1347NetworkOPsImp::doTransactionSync(
1348 std::shared_ptr<Transaction> transaction,
1349 bool bUnlimited,
1350 FailHard failType)
1351{
1352 std::unique_lock<std::mutex> lock(mMutex);
1353
1354 if (!transaction->getApplying())
1355 {
1356 mTransactions.push_back(
1357 TransactionStatus(transaction, bUnlimited, true, failType));
1358 transaction->setApplying();
1359 }
1360
1361 doTransactionSyncBatch(
1362 lock, [&transaction](std::unique_lock<std::mutex> const&) {
1363 return transaction->getApplying();
1364 });
1365}
1366
1367void
1368NetworkOPsImp::doTransactionSyncBatch(
1370 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback)
1371{
1372 do
1373 {
1374 if (mDispatchState == DispatchState::running)
1375 {
1376 // A batch processing job is already running, so wait.
1377 mCond.wait(lock);
1378 }
1379 else
1380 {
1381 apply(lock);
1382
1383 if (mTransactions.size())
1384 {
1385 // More transactions need to be applied, but by another job.
1386 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1387 transactionBatch();
1388 }))
1389 {
1390 mDispatchState = DispatchState::scheduled;
1391 }
1392 }
1393 }
1394 } while (retryCallback(lock));
1395}
1396
1397void
1398NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set)
1399{
1400 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXNSet");
1402 candidates.reserve(set.size());
1403 for (auto const& [_, tx] : set)
1404 {
1405 std::string reason;
1406 auto transaction = std::make_shared<Transaction>(tx, reason, app_);
1407
1408 if (transaction->getStatus() == INVALID)
1409 {
1410 if (!reason.empty())
1411 {
1412 JLOG(m_journal.trace())
1413 << "Exception checking transaction: " << reason;
1414 }
1415 app_.getHashRouter().setFlags(tx->getTransactionID(), SF_BAD);
1416 continue;
1417 }
1418
1419 // preProcessTransaction can change our pointer
1420 if (!preProcessTransaction(transaction))
1421 continue;
1422
1423 candidates.emplace_back(transaction);
1424 }
1425
1426 std::vector<TransactionStatus> transactions;
1427 transactions.reserve(candidates.size());
1428
1429 std::unique_lock lock(mMutex);
1430
1431 for (auto& transaction : candidates)
1432 {
1433 if (!transaction->getApplying())
1434 {
1435 transactions.emplace_back(transaction, false, false, FailHard::no);
1436 transaction->setApplying();
1437 }
1438 }
1439
1440 if (mTransactions.empty())
1441 mTransactions.swap(transactions);
1442 else
1443 {
1444 mTransactions.reserve(mTransactions.size() + transactions.size());
1445 for (auto& t : transactions)
1446 mTransactions.push_back(std::move(t));
1447 }
1448
1449 doTransactionSyncBatch(lock, [&](std::unique_lock<std::mutex> const&) {
1450 XRPL_ASSERT(
1451 lock.owns_lock(),
1452 "ripple::NetworkOPsImp::processTransactionSet has lock");
1453 return std::any_of(
1454 mTransactions.begin(), mTransactions.end(), [](auto const& t) {
1455 return t.transaction->getApplying();
1456 });
1457 });
1458}
1459
1460void
1461NetworkOPsImp::transactionBatch()
1462{
1463 std::unique_lock<std::mutex> lock(mMutex);
1464
1465 if (mDispatchState == DispatchState::running)
1466 return;
1467
1468 while (mTransactions.size())
1469 {
1470 apply(lock);
1471 }
1472}
1473
1474void
1475NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1476{
1478 std::vector<TransactionStatus> transactions;
1479 mTransactions.swap(transactions);
1480 XRPL_ASSERT(
1481 !transactions.empty(),
1482 "ripple::NetworkOPsImp::apply : non-empty transactions");
1483 XRPL_ASSERT(
1484 mDispatchState != DispatchState::running,
1485 "ripple::NetworkOPsImp::apply : is not running");
1486
1487 mDispatchState = DispatchState::running;
1488
1489 batchLock.unlock();
1490
1491 {
1492 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1493 bool changed = false;
1494 {
1495 std::unique_lock ledgerLock{
1496 m_ledgerMaster.peekMutex(), std::defer_lock};
1497 std::lock(masterLock, ledgerLock);
1498
1499 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1500 for (TransactionStatus& e : transactions)
1501 {
1502 // we check before adding to the batch
1503 ApplyFlags flags = tapNONE;
1504 if (e.admin)
1505 flags |= tapUNLIMITED;
1506
1507 if (e.failType == FailHard::yes)
1508 flags |= tapFAIL_HARD;
1509
1510 auto const result = app_.getTxQ().apply(
1511 app_, view, e.transaction->getSTransaction(), flags, j);
1512 e.result = result.ter;
1513 e.applied = result.applied;
1514 changed = changed || result.applied;
1515 }
1516 return changed;
1517 });
1518 }
1519 if (changed)
1520 reportFeeChange();
1521
1522 std::optional<LedgerIndex> validatedLedgerIndex;
1523 if (auto const l = m_ledgerMaster.getValidatedLedger())
1524 validatedLedgerIndex = l->info().seq;
1525
1526 auto newOL = app_.openLedger().current();
1527 for (TransactionStatus& e : transactions)
1528 {
1529 e.transaction->clearSubmitResult();
1530
1531 if (e.applied)
1532 {
1533 pubProposedTransaction(
1534 newOL, e.transaction->getSTransaction(), e.result);
1535 e.transaction->setApplied();
1536 }
1537
1538 e.transaction->setResult(e.result);
1539
1540 if (isTemMalformed(e.result))
1541 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1542
1543#ifdef DEBUG
1544 if (e.result != tesSUCCESS)
1545 {
1546 std::string token, human;
1547
1548 if (transResultInfo(e.result, token, human))
1549 {
1550 JLOG(m_journal.info())
1551 << "TransactionResult: " << token << ": " << human;
1552 }
1553 }
1554#endif
1555
1556 bool addLocal = e.local;
1557
1558 if (e.result == tesSUCCESS)
1559 {
1560 JLOG(m_journal.debug())
1561 << "Transaction is now included in open ledger";
1562 e.transaction->setStatus(INCLUDED);
1563
1564 // Pop as many "reasonable" transactions for this account as
1565 // possible. "Reasonable" means they have sequential sequence
1566 // numbers, or use tickets.
1567 auto const& txCur = e.transaction->getSTransaction();
1568
1569 std::size_t count = 0;
1570 for (auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1571 txNext && count < maxPoppedTransactions;
1572 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1573 {
1574 if (!batchLock.owns_lock())
1575 batchLock.lock();
1576 std::string reason;
1577 auto const trans = sterilize(*txNext);
1578 auto t = std::make_shared<Transaction>(trans, reason, app_);
1579 if (t->getApplying())
1580 break;
1581 submit_held.emplace_back(t, false, false, FailHard::no);
1582 t->setApplying();
1583 }
1584 if (batchLock.owns_lock())
1585 batchLock.unlock();
1586 }
1587 else if (e.result == tefPAST_SEQ)
1588 {
1589 // duplicate or conflict
1590 JLOG(m_journal.info()) << "Transaction is obsolete";
1591 e.transaction->setStatus(OBSOLETE);
1592 }
1593 else if (e.result == terQUEUED)
1594 {
1595 JLOG(m_journal.debug())
1596 << "Transaction is likely to claim a"
1597 << " fee, but is queued until fee drops";
1598
1599 e.transaction->setStatus(HELD);
1600 // Add to held transactions, because it could get
1601 // kicked out of the queue, and this will try to
1602 // put it back.
1603 m_ledgerMaster.addHeldTransaction(e.transaction);
1604 e.transaction->setQueued();
1605 e.transaction->setKept();
1606 }
1607 else if (
1608 isTerRetry(e.result) || isTelLocal(e.result) ||
1609 isTefFailure(e.result))
1610 {
1611 if (e.failType != FailHard::yes)
1612 {
1613 auto const lastLedgerSeq =
1614 e.transaction->getSTransaction()->at(
1615 ~sfLastLedgerSequence);
1616 auto const ledgersLeft = lastLedgerSeq
1617 ? *lastLedgerSeq -
1618 m_ledgerMaster.getCurrentLedgerIndex()
1620 // If any of these conditions are met, the transaction can
1621 // be held:
1622 // 1. It was submitted locally. (Note that this flag is only
1623 // true on the initial submission.)
1624 // 2. The transaction has a LastLedgerSequence, and the
1625 // LastLedgerSequence is fewer than LocalTxs::holdLedgers
1626 // (5) ledgers into the future. (Remember that an
1627 // unseated optional compares as less than all seated
1628 // values, so it has to be checked explicitly first.)
1629 // 3. The SF_HELD flag is not set on the txID. (setFlags
1630 // checks before setting. If the flag is set, it returns
1631 // false, which means it's been held once without one of
1632 // the other conditions, so don't hold it again. Time's
1633 // up!)
1634 //
1635 if (e.local ||
1636 (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1637 app_.getHashRouter().setFlags(
1638 e.transaction->getID(), SF_HELD))
1639 {
1640 // transaction should be held
1641 JLOG(m_journal.debug())
1642 << "Transaction should be held: " << e.result;
1643 e.transaction->setStatus(HELD);
1644 m_ledgerMaster.addHeldTransaction(e.transaction);
1645 e.transaction->setKept();
1646 }
1647 else
1648 JLOG(m_journal.debug())
1649 << "Not holding transaction "
1650 << e.transaction->getID() << ": "
1651 << (e.local ? "local" : "network") << ", "
1652 << "result: " << e.result << " ledgers left: "
1653 << (ledgersLeft ? to_string(*ledgersLeft)
1654 : "unspecified");
1655 }
1656 }
1657 else
1658 {
1659 JLOG(m_journal.debug())
1660 << "Status other than success " << e.result;
1661 e.transaction->setStatus(INVALID);
1662 }
1663
1664 auto const enforceFailHard =
1665 e.failType == FailHard::yes && !isTesSuccess(e.result);
1666
1667 if (addLocal && !enforceFailHard)
1668 {
1669 m_localTX->push_back(
1670 m_ledgerMaster.getCurrentLedgerIndex(),
1671 e.transaction->getSTransaction());
1672 e.transaction->setKept();
1673 }
1674
1675 if ((e.applied ||
1676 ((mMode != OperatingMode::FULL) &&
1677 (e.failType != FailHard::yes) && e.local) ||
1678 (e.result == terQUEUED)) &&
1679 !enforceFailHard)
1680 {
1681 auto const toSkip =
1682 app_.getHashRouter().shouldRelay(e.transaction->getID());
1683 if (auto const sttx = *(e.transaction->getSTransaction());
1684 toSkip &&
1685 // Skip relaying if it's an inner batch txn and batch
1686 // feature is enabled
1687 !(sttx.isFlag(tfInnerBatchTxn) &&
1688 newOL->rules().enabled(featureBatch)))
1689 {
1690 protocol::TMTransaction tx;
1691 Serializer s;
1692
1693 sttx.add(s);
1694 tx.set_rawtransaction(s.data(), s.size());
1695 tx.set_status(protocol::tsCURRENT);
1696 tx.set_receivetimestamp(
1697 app_.timeKeeper().now().time_since_epoch().count());
1698 tx.set_deferred(e.result == terQUEUED);
1699 // FIXME: This should be when we received it
1700 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1701 e.transaction->setBroadcast();
1702 }
1703 }
1704
1705 if (validatedLedgerIndex)
1706 {
1707 auto [fee, accountSeq, availableSeq] =
1708 app_.getTxQ().getTxRequiredFeeAndSeq(
1709 *newOL, e.transaction->getSTransaction());
1710 e.transaction->setCurrentLedgerState(
1711 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1712 }
1713 }
1714 }
1715
1716 batchLock.lock();
1717
1718 for (TransactionStatus& e : transactions)
1719 e.transaction->clearApplying();
1720
1721 if (!submit_held.empty())
1722 {
1723 if (mTransactions.empty())
1724 mTransactions.swap(submit_held);
1725 else
1726 {
1727 mTransactions.reserve(mTransactions.size() + submit_held.size());
1728 for (auto& e : submit_held)
1729 mTransactions.push_back(std::move(e));
1730 }
1731 }
1732
1733 mCond.notify_all();
1734
1735 mDispatchState = DispatchState::none;
1736}
1737
1738//
1739// Owner functions
1740//
1741
1743NetworkOPsImp::getOwnerInfo(
1745 AccountID const& account)
1746{
1747 Json::Value jvObjects(Json::objectValue);
1748 auto root = keylet::ownerDir(account);
1749 auto sleNode = lpLedger->read(keylet::page(root));
1750 if (sleNode)
1751 {
1752 std::uint64_t uNodeDir;
1753
1754 do
1755 {
1756 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1757 {
1758 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1759 XRPL_ASSERT(
1760 sleCur,
1761 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1762
1763 switch (sleCur->getType())
1764 {
1765 case ltOFFER:
1766 if (!jvObjects.isMember(jss::offers))
1767 jvObjects[jss::offers] =
1769
1770 jvObjects[jss::offers].append(
1771 sleCur->getJson(JsonOptions::none));
1772 break;
1773
1774 case ltRIPPLE_STATE:
1775 if (!jvObjects.isMember(jss::ripple_lines))
1776 {
1777 jvObjects[jss::ripple_lines] =
1779 }
1780
1781 jvObjects[jss::ripple_lines].append(
1782 sleCur->getJson(JsonOptions::none));
1783 break;
1784
1785 case ltACCOUNT_ROOT:
1786 case ltDIR_NODE:
1787 default:
1788 UNREACHABLE(
1789 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1790 "type");
1791 break;
1792 }
1793 }
1794
1795 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1796
1797 if (uNodeDir)
1798 {
1799 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1800 XRPL_ASSERT(
1801 sleNode,
1802 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1803 }
1804 } while (uNodeDir);
1805 }
1806
1807 return jvObjects;
1808}
1809
1810//
1811// Other
1812//
1813
1814inline bool
1815NetworkOPsImp::isBlocked()
1816{
1817 return isAmendmentBlocked() || isUNLBlocked();
1818}
1819
1820inline bool
1821NetworkOPsImp::isAmendmentBlocked()
1822{
1823 return amendmentBlocked_;
1824}
1825
1826void
1827NetworkOPsImp::setAmendmentBlocked()
1828{
1829 amendmentBlocked_ = true;
1830 setMode(OperatingMode::CONNECTED);
1831}
1832
1833inline bool
1834NetworkOPsImp::isAmendmentWarned()
1835{
1836 return !amendmentBlocked_ && amendmentWarned_;
1837}
1838
1839inline void
1840NetworkOPsImp::setAmendmentWarned()
1841{
1842 amendmentWarned_ = true;
1843}
1844
1845inline void
1846NetworkOPsImp::clearAmendmentWarned()
1847{
1848 amendmentWarned_ = false;
1849}
1850
1851inline bool
1852NetworkOPsImp::isUNLBlocked()
1853{
1854 return unlBlocked_;
1855}
1856
1857void
1858NetworkOPsImp::setUNLBlocked()
1859{
1860 unlBlocked_ = true;
1861 setMode(OperatingMode::CONNECTED);
1862}
1863
1864inline void
1865NetworkOPsImp::clearUNLBlocked()
1866{
1867 unlBlocked_ = false;
1868}
1869
1870bool
1871NetworkOPsImp::checkLastClosedLedger(
1872 Overlay::PeerSequence const& peerList,
1873 uint256& networkClosed)
1874{
1875 // Returns true if there's an *abnormal* ledger issue, normal changing in
1876 // TRACKING mode should return false. Do we have sufficient validations for
1877 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1878 // better ledger available? If so, we are either tracking or full.
1879
1880 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1881
1882 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1883
1884 if (!ourClosed)
1885 return false;
1886
1887 uint256 closedLedger = ourClosed->info().hash;
1888 uint256 prevClosedLedger = ourClosed->info().parentHash;
1889 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1890 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1891
1892 //-------------------------------------------------------------------------
1893 // Determine preferred last closed ledger
1894
1895 auto& validations = app_.getValidations();
1896 JLOG(m_journal.debug())
1897 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1898
1899 // Will rely on peer LCL if no trusted validations exist
1901 peerCounts[closedLedger] = 0;
1902 if (mMode >= OperatingMode::TRACKING)
1903 peerCounts[closedLedger]++;
1904
1905 for (auto& peer : peerList)
1906 {
1907 uint256 peerLedger = peer->getClosedLedgerHash();
1908
1909 if (peerLedger.isNonZero())
1910 ++peerCounts[peerLedger];
1911 }
1912
1913 for (auto const& it : peerCounts)
1914 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1915
1916 uint256 preferredLCL = validations.getPreferredLCL(
1917 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1918 m_ledgerMaster.getValidLedgerIndex(),
1919 peerCounts);
1920
1921 bool switchLedgers = preferredLCL != closedLedger;
1922 if (switchLedgers)
1923 closedLedger = preferredLCL;
1924 //-------------------------------------------------------------------------
1925 if (switchLedgers && (closedLedger == prevClosedLedger))
1926 {
1927 // don't switch to our own previous ledger
1928 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1929 networkClosed = ourClosed->info().hash;
1930 switchLedgers = false;
1931 }
1932 else
1933 networkClosed = closedLedger;
1934
1935 if (!switchLedgers)
1936 return false;
1937
1938 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1939
1940 if (!consensus)
1941 consensus = app_.getInboundLedgers().acquire(
1942 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1943
1944 if (consensus &&
1945 (!m_ledgerMaster.canBeCurrent(consensus) ||
1946 !m_ledgerMaster.isCompatible(
1947 *consensus, m_journal.debug(), "Not switching")))
1948 {
1949 // Don't switch to a ledger not on the validated chain
1950 // or with an invalid close time or sequence
1951 networkClosed = ourClosed->info().hash;
1952 return false;
1953 }
1954
1955 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1956 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1957 << getJson({*ourClosed, {}});
1958 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1959
1960 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1961 {
1962 setMode(OperatingMode::CONNECTED);
1963 }
1964
1965 if (consensus)
1966 {
1967 // FIXME: If this rewinds the ledger sequence, or has the same
1968 // sequence, we should update the status on any stored transactions
1969 // in the invalidated ledgers.
1970 switchLastClosedLedger(consensus);
1971 }
1972
1973 return true;
1974}
1975
1976void
1977NetworkOPsImp::switchLastClosedLedger(
1978 std::shared_ptr<Ledger const> const& newLCL)
1979{
1980 // set the newLCL as our last closed ledger -- this is abnormal code
1981 JLOG(m_journal.error())
1982 << "JUMP last closed ledger to " << newLCL->info().hash;
1983
1984 clearNeedNetworkLedger();
1985
1986 // Update fee computations.
1987 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1988
1989 // Caller must own master lock
1990 {
1991 // Apply tx in old open ledger to new
1992 // open ledger. Then apply local tx.
1993
1994 auto retries = m_localTX->getTxSet();
1995 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1997 if (lastVal)
1998 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
1999 else
2000 rules.emplace(app_.config().features);
2001 app_.openLedger().accept(
2002 app_,
2003 *rules,
2004 newLCL,
2005 OrderedTxs({}),
2006 false,
2007 retries,
2008 tapNONE,
2009 "jump",
2010 [&](OpenView& view, beast::Journal j) {
2011 // Stuff the ledger with transactions from the queue.
2012 return app_.getTxQ().accept(app_, view);
2013 });
2014 }
2015
2016 m_ledgerMaster.switchLCL(newLCL);
2017
2018 protocol::TMStatusChange s;
2019 s.set_newevent(protocol::neSWITCHED_LEDGER);
2020 s.set_ledgerseq(newLCL->info().seq);
2021 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
2022 s.set_ledgerhashprevious(
2023 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
2024 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
2025
2026 app_.overlay().foreach(
2027 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
2028}
2029
2030bool
2031NetworkOPsImp::beginConsensus(
2032 uint256 const& networkClosed,
2034{
2035 XRPL_ASSERT(
2036 networkClosed.isNonZero(),
2037 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
2038
2039 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
2040
2041 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
2042 << " with LCL " << closingInfo.parentHash;
2043
2044 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
2045
2046 if (!prevLedger)
2047 {
2048 // this shouldn't happen unless we jump ledgers
2049 if (mMode == OperatingMode::FULL)
2050 {
2051 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
2052 setMode(OperatingMode::TRACKING);
2053 CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
2054 }
2055
2056 CLOG(clog) << "beginConsensus no previous ledger. ";
2057 return false;
2058 }
2059
2060 XRPL_ASSERT(
2061 prevLedger->info().hash == closingInfo.parentHash,
2062 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
2063 "parent");
2064 XRPL_ASSERT(
2065 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
2066 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
2067 "hash");
2068
2069 if (prevLedger->rules().enabled(featureNegativeUNL))
2070 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
2071 TrustChanges const changes = app_.validators().updateTrusted(
2072 app_.getValidations().getCurrentNodeIDs(),
2073 closingInfo.parentCloseTime,
2074 *this,
2075 app_.overlay(),
2076 app_.getHashRouter());
2077
2078 if (!changes.added.empty() || !changes.removed.empty())
2079 {
2080 app_.getValidations().trustChanged(changes.added, changes.removed);
2081 // Update the AmendmentTable so it tracks the current validators.
2082 app_.getAmendmentTable().trustChanged(
2083 app_.validators().getQuorumKeys().second);
2084 }
2085
2086 mConsensus.startRound(
2087 app_.timeKeeper().closeTime(),
2088 networkClosed,
2089 prevLedger,
2090 changes.removed,
2091 changes.added,
2092 clog);
2093
2094 ConsensusPhase const currPhase = mConsensus.phase();
2095 if (mLastConsensusPhase != currPhase)
2096 {
2097 reportConsensusStateChange(currPhase);
2098 mLastConsensusPhase = currPhase;
2099 }
2100
2101 JLOG(m_journal.debug()) << "Initiating consensus engine";
2102 return true;
2103}
2104
2105bool
2106NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
2107{
2108 auto const& peerKey = peerPos.publicKey();
2109 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
2110 {
2111 // Could indicate a operator misconfiguration where two nodes are
2112 // running with the same validator key configured, so this isn't fatal,
2113 // and it doesn't necessarily indicate peer misbehavior. But since this
2114 // is a trusted message, it could be a very big deal. Either way, we
2115 // don't want to relay the proposal. Note that the byzantine behavior
2116 // detection in handleNewValidation will notify other peers.
2117 //
2118 // Another, innocuous explanation is unusual message routing and delays,
2119 // causing this node to receive its own messages back.
2120 JLOG(m_journal.error())
2121 << "Received a proposal signed by MY KEY from a peer. This may "
2122 "indicate a misconfiguration where another node has the same "
2123 "validator key, or may be caused by unusual message routing and "
2124 "delays.";
2125 return false;
2126 }
2127
2128 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
2129}
2130
2131void
2132NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
2133{
2134 // We now have an additional transaction set
2135 // either created locally during the consensus process
2136 // or acquired from a peer
2137
2138 // Inform peers we have this set
2139 protocol::TMHaveTransactionSet msg;
2140 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2141 msg.set_status(protocol::tsHAVE);
2142 app_.overlay().foreach(
2143 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
2144
2145 // We acquired it because consensus asked us to
2146 if (fromAcquire)
2147 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
2148}
2149
2150void
2151NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
2152{
2153 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
2154
2155 for (auto const& it : app_.overlay().getActivePeers())
2156 {
2157 if (it && (it->getClosedLedgerHash() == deadLedger))
2158 {
2159 JLOG(m_journal.trace()) << "Killing obsolete peer status";
2160 it->cycleStatus();
2161 }
2162 }
2163
2164 uint256 networkClosed;
2165 bool ledgerChange =
2166 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
2167
2168 if (networkClosed.isZero())
2169 {
2170 CLOG(clog) << "endConsensus last closed ledger is zero. ";
2171 return;
2172 }
2173
2174 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
2175 // we must count how many nodes share our LCL, how many nodes disagree with
2176 // our LCL, and how many validations our LCL has. We also want to check
2177 // timing to make sure there shouldn't be a newer LCL. We need this
2178 // information to do the next three tests.
2179
2180 if (((mMode == OperatingMode::CONNECTED) ||
2181 (mMode == OperatingMode::SYNCING)) &&
2182 !ledgerChange)
2183 {
2184 // Count number of peers that agree with us and UNL nodes whose
2185 // validations we have for LCL. If the ledger is good enough, go to
2186 // TRACKING - TODO
2187 if (!needNetworkLedger_)
2188 setMode(OperatingMode::TRACKING);
2189 }
2190
2191 if (((mMode == OperatingMode::CONNECTED) ||
2192 (mMode == OperatingMode::TRACKING)) &&
2193 !ledgerChange)
2194 {
2195 // check if the ledger is good enough to go to FULL
2196 // Note: Do not go to FULL if we don't have the previous ledger
2197 // check if the ledger is bad enough to go to CONNECTE D -- TODO
2198 auto current = m_ledgerMaster.getCurrentLedger();
2199 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
2200 2 * current->info().closeTimeResolution))
2201 {
2202 setMode(OperatingMode::FULL);
2203 }
2204 }
2205
2206 beginConsensus(networkClosed, clog);
2207}
2208
2209void
2210NetworkOPsImp::consensusViewChange()
2211{
2212 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2213 {
2214 setMode(OperatingMode::CONNECTED);
2215 }
2216}
2217
2218void
2219NetworkOPsImp::pubManifest(Manifest const& mo)
2220{
2221 // VFALCO consider std::shared_mutex
2222 std::lock_guard sl(mSubLock);
2223
2224 if (!mStreamMaps[sManifests].empty())
2225 {
2227
2228 jvObj[jss::type] = "manifestReceived";
2229 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
2230 if (mo.signingKey)
2231 jvObj[jss::signing_key] =
2232 toBase58(TokenType::NodePublic, *mo.signingKey);
2233 jvObj[jss::seq] = Json::UInt(mo.sequence);
2234 if (auto sig = mo.getSignature())
2235 jvObj[jss::signature] = strHex(*sig);
2236 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
2237 if (!mo.domain.empty())
2238 jvObj[jss::domain] = mo.domain;
2239 jvObj[jss::manifest] = strHex(mo.serialized);
2240
2241 for (auto i = mStreamMaps[sManifests].begin();
2242 i != mStreamMaps[sManifests].end();)
2243 {
2244 if (auto p = i->second.lock())
2245 {
2246 p->send(jvObj, true);
2247 ++i;
2248 }
2249 else
2250 {
2251 i = mStreamMaps[sManifests].erase(i);
2252 }
2253 }
2254 }
2255}
2256
2257NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2258 XRPAmount fee,
2259 TxQ::Metrics&& escalationMetrics,
2260 LoadFeeTrack const& loadFeeTrack)
2261 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2262 , loadBaseServer{loadFeeTrack.getLoadBase()}
2263 , baseFee{fee}
2264 , em{std::move(escalationMetrics)}
2265{
2266}
2267
2268bool
2270 NetworkOPsImp::ServerFeeSummary const& b) const
2271{
2272 if (loadFactorServer != b.loadFactorServer ||
2273 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2274 em.has_value() != b.em.has_value())
2275 return true;
2276
2277 if (em && b.em)
2278 {
2279 return (
2280 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2281 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2282 em->referenceFeeLevel != b.em->referenceFeeLevel);
2283 }
2284
2285 return false;
2286}
2287
2288// Need to cap to uint64 to uint32 due to JSON limitations
2289static std::uint32_t
2291{
2293
2294 return std::min(max32, v);
2295};
2296
2297void
2299{
2300 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2301 // list into a local array while holding the lock then release
2302 // the lock and call send on everyone.
2303 //
2305
2306 if (!mStreamMaps[sServer].empty())
2307 {
2309
2311 app_.openLedger().current()->fees().base,
2313 app_.getFeeTrack()};
2314
2315 jvObj[jss::type] = "serverStatus";
2316 jvObj[jss::server_status] = strOperatingMode();
2317 jvObj[jss::load_base] = f.loadBaseServer;
2318 jvObj[jss::load_factor_server] = f.loadFactorServer;
2319 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2320
2321 if (f.em)
2322 {
2323 auto const loadFactor = std::max(
2324 safe_cast<std::uint64_t>(f.loadFactorServer),
2325 mulDiv(
2326 f.em->openLedgerFeeLevel,
2327 f.loadBaseServer,
2328 f.em->referenceFeeLevel)
2330
2331 jvObj[jss::load_factor] = trunc32(loadFactor);
2332 jvObj[jss::load_factor_fee_escalation] =
2333 f.em->openLedgerFeeLevel.jsonClipped();
2334 jvObj[jss::load_factor_fee_queue] =
2335 f.em->minProcessingFeeLevel.jsonClipped();
2336 jvObj[jss::load_factor_fee_reference] =
2337 f.em->referenceFeeLevel.jsonClipped();
2338 }
2339 else
2340 jvObj[jss::load_factor] = f.loadFactorServer;
2341
2342 mLastFeeSummary = f;
2343
2344 for (auto i = mStreamMaps[sServer].begin();
2345 i != mStreamMaps[sServer].end();)
2346 {
2347 InfoSub::pointer p = i->second.lock();
2348
2349 // VFALCO TODO research the possibility of using thread queues and
2350 // linearizing the deletion of subscribers with the
2351 // sending of JSON data.
2352 if (p)
2353 {
2354 p->send(jvObj, true);
2355 ++i;
2356 }
2357 else
2358 {
2359 i = mStreamMaps[sServer].erase(i);
2360 }
2361 }
2362 }
2363}
2364
2365void
2367{
2369
2370 auto& streamMap = mStreamMaps[sConsensusPhase];
2371 if (!streamMap.empty())
2372 {
2374 jvObj[jss::type] = "consensusPhase";
2375 jvObj[jss::consensus] = to_string(phase);
2376
2377 for (auto i = streamMap.begin(); i != streamMap.end();)
2378 {
2379 if (auto p = i->second.lock())
2380 {
2381 p->send(jvObj, true);
2382 ++i;
2383 }
2384 else
2385 {
2386 i = streamMap.erase(i);
2387 }
2388 }
2389 }
2390}
2391
2392void
2394{
2395 // VFALCO consider std::shared_mutex
2397
2398 if (!mStreamMaps[sValidations].empty())
2399 {
2401
2402 auto const signerPublic = val->getSignerPublic();
2403
2404 jvObj[jss::type] = "validationReceived";
2405 jvObj[jss::validation_public_key] =
2406 toBase58(TokenType::NodePublic, signerPublic);
2407 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2408 jvObj[jss::signature] = strHex(val->getSignature());
2409 jvObj[jss::full] = val->isFull();
2410 jvObj[jss::flags] = val->getFlags();
2411 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2412 jvObj[jss::data] = strHex(val->getSerializer().slice());
2413
2414 if (auto version = (*val)[~sfServerVersion])
2415 jvObj[jss::server_version] = std::to_string(*version);
2416
2417 if (auto cookie = (*val)[~sfCookie])
2418 jvObj[jss::cookie] = std::to_string(*cookie);
2419
2420 if (auto hash = (*val)[~sfValidatedHash])
2421 jvObj[jss::validated_hash] = strHex(*hash);
2422
2423 auto const masterKey =
2424 app_.validatorManifests().getMasterKey(signerPublic);
2425
2426 if (masterKey != signerPublic)
2427 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2428
2429 // NOTE *seq is a number, but old API versions used string. We replace
2430 // number with a string using MultiApiJson near end of this function
2431 if (auto const seq = (*val)[~sfLedgerSequence])
2432 jvObj[jss::ledger_index] = *seq;
2433
2434 if (val->isFieldPresent(sfAmendments))
2435 {
2436 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2437 for (auto const& amendment : val->getFieldV256(sfAmendments))
2438 jvObj[jss::amendments].append(to_string(amendment));
2439 }
2440
2441 if (auto const closeTime = (*val)[~sfCloseTime])
2442 jvObj[jss::close_time] = *closeTime;
2443
2444 if (auto const loadFee = (*val)[~sfLoadFee])
2445 jvObj[jss::load_fee] = *loadFee;
2446
2447 if (auto const baseFee = val->at(~sfBaseFee))
2448 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2449
2450 if (auto const reserveBase = val->at(~sfReserveBase))
2451 jvObj[jss::reserve_base] = *reserveBase;
2452
2453 if (auto const reserveInc = val->at(~sfReserveIncrement))
2454 jvObj[jss::reserve_inc] = *reserveInc;
2455
2456 // (The ~ operator converts the Proxy to a std::optional, which
2457 // simplifies later operations)
2458 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2459 baseFeeXRP && baseFeeXRP->native())
2460 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2461
2462 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2463 reserveBaseXRP && reserveBaseXRP->native())
2464 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2465
2466 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2467 reserveIncXRP && reserveIncXRP->native())
2468 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2469
2470 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2471 // for consumers supporting different API versions
2472 MultiApiJson multiObj{jvObj};
2473 multiObj.visit(
2474 RPC::apiVersion<1>, //
2475 [](Json::Value& jvTx) {
2476 // Type conversion for older API versions to string
2477 if (jvTx.isMember(jss::ledger_index))
2478 {
2479 jvTx[jss::ledger_index] =
2480 std::to_string(jvTx[jss::ledger_index].asUInt());
2481 }
2482 });
2483
2484 for (auto i = mStreamMaps[sValidations].begin();
2485 i != mStreamMaps[sValidations].end();)
2486 {
2487 if (auto p = i->second.lock())
2488 {
2489 multiObj.visit(
2490 p->getApiVersion(), //
2491 [&](Json::Value const& jv) { p->send(jv, true); });
2492 ++i;
2493 }
2494 else
2495 {
2496 i = mStreamMaps[sValidations].erase(i);
2497 }
2498 }
2499 }
2500}
2501
2502void
2504{
2506
2507 if (!mStreamMaps[sPeerStatus].empty())
2508 {
2509 Json::Value jvObj(func());
2510
2511 jvObj[jss::type] = "peerStatusChange";
2512
2513 for (auto i = mStreamMaps[sPeerStatus].begin();
2514 i != mStreamMaps[sPeerStatus].end();)
2515 {
2516 InfoSub::pointer p = i->second.lock();
2517
2518 if (p)
2519 {
2520 p->send(jvObj, true);
2521 ++i;
2522 }
2523 else
2524 {
2525 i = mStreamMaps[sPeerStatus].erase(i);
2526 }
2527 }
2528 }
2529}
2530
2531void
2533{
2534 using namespace std::chrono_literals;
2535 if (om == OperatingMode::CONNECTED)
2536 {
2539 }
2540 else if (om == OperatingMode::SYNCING)
2541 {
2544 }
2545
2546 if ((om > OperatingMode::CONNECTED) && isBlocked())
2548
2549 if (mMode == om)
2550 return;
2551
2552 mMode = om;
2553
2554 accounting_.mode(om);
2555
2556 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2557 pubServer();
2558}
2559
2560bool
2563 std::string const& source)
2564{
2565 JLOG(m_journal.trace())
2566 << "recvValidation " << val->getLedgerHash() << " from " << source;
2567
2569 BypassAccept bypassAccept = BypassAccept::no;
2570 try
2571 {
2572 if (pendingValidations_.contains(val->getLedgerHash()))
2573 bypassAccept = BypassAccept::yes;
2574 else
2575 pendingValidations_.insert(val->getLedgerHash());
2576 scope_unlock unlock(lock);
2577 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2578 }
2579 catch (std::exception const& e)
2580 {
2581 JLOG(m_journal.warn())
2582 << "Exception thrown for handling new validation "
2583 << val->getLedgerHash() << ": " << e.what();
2584 }
2585 catch (...)
2586 {
2587 JLOG(m_journal.warn())
2588 << "Unknown exception thrown for handling new validation "
2589 << val->getLedgerHash();
2590 }
2591 if (bypassAccept == BypassAccept::no)
2592 {
2593 pendingValidations_.erase(val->getLedgerHash());
2594 }
2595 lock.unlock();
2596
2597 pubValidation(val);
2598
2599 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2601 ss << "VALIDATION: " << val->render() << " master_key: ";
2602 auto master = app_.validators().getTrustedKey(val->getSignerPublic());
2603 if (master)
2604 {
2605 ss << toBase58(TokenType::NodePublic, *master);
2606 }
2607 else
2608 {
2609 ss << "none";
2610 }
2611 return ss.str();
2612 }();
2613
2614 // We will always relay trusted validations; if configured, we will
2615 // also relay all untrusted validations.
2616 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2617}
2618
2621{
2622 return mConsensus.getJson(true);
2623}
2624
2626NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2627{
2629
2630 // System-level warnings
2631 {
2632 Json::Value warnings{Json::arrayValue};
2633 if (isAmendmentBlocked())
2634 {
2635 Json::Value& w = warnings.append(Json::objectValue);
2636 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2637 w[jss::message] =
2638 "This server is amendment blocked, and must be updated to be "
2639 "able to stay in sync with the network.";
2640 }
2641 if (isUNLBlocked())
2642 {
2643 Json::Value& w = warnings.append(Json::objectValue);
2644 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2645 w[jss::message] =
2646 "This server has an expired validator list. validators.txt "
2647 "may be incorrectly configured or some [validator_list_sites] "
2648 "may be unreachable.";
2649 }
2650 if (admin && isAmendmentWarned())
2651 {
2652 Json::Value& w = warnings.append(Json::objectValue);
2653 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2654 w[jss::message] =
2655 "One or more unsupported amendments have reached majority. "
2656 "Upgrade to the latest version before they are activated "
2657 "to avoid being amendment blocked.";
2658 if (auto const expected =
2660 {
2661 auto& d = w[jss::details] = Json::objectValue;
2662 d[jss::expected_date] = expected->time_since_epoch().count();
2663 d[jss::expected_date_UTC] = to_string(*expected);
2664 }
2665 }
2666
2667 if (warnings.size())
2668 info[jss::warnings] = std::move(warnings);
2669 }
2670
2671 // hostid: unique string describing the machine
2672 if (human)
2673 info[jss::hostid] = getHostId(admin);
2674
2675 // domain: if configured with a domain, report it:
2676 if (!app_.config().SERVER_DOMAIN.empty())
2677 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2678
2679 info[jss::build_version] = BuildInfo::getVersionString();
2680
2681 info[jss::server_state] = strOperatingMode(admin);
2682
2683 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2685
2687 info[jss::network_ledger] = "waiting";
2688
2689 info[jss::validation_quorum] =
2690 static_cast<Json::UInt>(app_.validators().quorum());
2691
2692 if (admin)
2693 {
2694 switch (app_.config().NODE_SIZE)
2695 {
2696 case 0:
2697 info[jss::node_size] = "tiny";
2698 break;
2699 case 1:
2700 info[jss::node_size] = "small";
2701 break;
2702 case 2:
2703 info[jss::node_size] = "medium";
2704 break;
2705 case 3:
2706 info[jss::node_size] = "large";
2707 break;
2708 case 4:
2709 info[jss::node_size] = "huge";
2710 break;
2711 }
2712
2713 auto when = app_.validators().expires();
2714
2715 if (!human)
2716 {
2717 if (when)
2718 info[jss::validator_list_expires] =
2719 safe_cast<Json::UInt>(when->time_since_epoch().count());
2720 else
2721 info[jss::validator_list_expires] = 0;
2722 }
2723 else
2724 {
2725 auto& x = (info[jss::validator_list] = Json::objectValue);
2726
2727 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2728
2729 if (when)
2730 {
2731 if (*when == TimeKeeper::time_point::max())
2732 {
2733 x[jss::expiration] = "never";
2734 x[jss::status] = "active";
2735 }
2736 else
2737 {
2738 x[jss::expiration] = to_string(*when);
2739
2740 if (*when > app_.timeKeeper().now())
2741 x[jss::status] = "active";
2742 else
2743 x[jss::status] = "expired";
2744 }
2745 }
2746 else
2747 {
2748 x[jss::status] = "unknown";
2749 x[jss::expiration] = "unknown";
2750 }
2751 }
2752
2753#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2754 {
2755 auto& x = (info[jss::git] = Json::objectValue);
2756#ifdef GIT_COMMIT_HASH
2757 x[jss::hash] = GIT_COMMIT_HASH;
2758#endif
2759#ifdef GIT_BRANCH
2760 x[jss::branch] = GIT_BRANCH;
2761#endif
2762 }
2763#endif
2764 }
2765 info[jss::io_latency_ms] =
2766 static_cast<Json::UInt>(app_.getIOLatency().count());
2767
2768 if (admin)
2769 {
2770 if (auto const localPubKey = app_.validators().localPublicKey();
2771 localPubKey && app_.getValidationPublicKey())
2772 {
2773 info[jss::pubkey_validator] =
2774 toBase58(TokenType::NodePublic, localPubKey.value());
2775 }
2776 else
2777 {
2778 info[jss::pubkey_validator] = "none";
2779 }
2780 }
2781
2782 if (counters)
2783 {
2784 info[jss::counters] = app_.getPerfLog().countersJson();
2785
2786 Json::Value nodestore(Json::objectValue);
2787 app_.getNodeStore().getCountsJson(nodestore);
2788 info[jss::counters][jss::nodestore] = nodestore;
2789 info[jss::current_activities] = app_.getPerfLog().currentJson();
2790 }
2791
2792 info[jss::pubkey_node] =
2794
2795 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2796
2798 info[jss::amendment_blocked] = true;
2799
2800 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2801
2802 if (fp != 0)
2803 info[jss::fetch_pack] = Json::UInt(fp);
2804
2805 info[jss::peers] = Json::UInt(app_.overlay().size());
2806
2807 Json::Value lastClose = Json::objectValue;
2808 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2809
2810 if (human)
2811 {
2812 lastClose[jss::converge_time_s] =
2814 }
2815 else
2816 {
2817 lastClose[jss::converge_time] =
2819 }
2820
2821 info[jss::last_close] = lastClose;
2822
2823 // info[jss::consensus] = mConsensus.getJson();
2824
2825 if (admin)
2826 info[jss::load] = m_job_queue.getJson();
2827
2828 if (auto const netid = app_.overlay().networkID())
2829 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2830
2831 auto const escalationMetrics =
2833
2834 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2835 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2836 /* Scale the escalated fee level to unitless "load factor".
2837 In practice, this just strips the units, but it will continue
2838 to work correctly if either base value ever changes. */
2839 auto const loadFactorFeeEscalation =
2840 mulDiv(
2841 escalationMetrics.openLedgerFeeLevel,
2842 loadBaseServer,
2843 escalationMetrics.referenceFeeLevel)
2845
2846 auto const loadFactor = std::max(
2847 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2848
2849 if (!human)
2850 {
2851 info[jss::load_base] = loadBaseServer;
2852 info[jss::load_factor] = trunc32(loadFactor);
2853 info[jss::load_factor_server] = loadFactorServer;
2854
2855 /* Json::Value doesn't support uint64, so clamp to max
2856 uint32 value. This is mostly theoretical, since there
2857 probably isn't enough extant XRP to drive the factor
2858 that high.
2859 */
2860 info[jss::load_factor_fee_escalation] =
2861 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2862 info[jss::load_factor_fee_queue] =
2863 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2864 info[jss::load_factor_fee_reference] =
2865 escalationMetrics.referenceFeeLevel.jsonClipped();
2866 }
2867 else
2868 {
2869 info[jss::load_factor] =
2870 static_cast<double>(loadFactor) / loadBaseServer;
2871
2872 if (loadFactorServer != loadFactor)
2873 info[jss::load_factor_server] =
2874 static_cast<double>(loadFactorServer) / loadBaseServer;
2875
2876 if (admin)
2877 {
2879 if (fee != loadBaseServer)
2880 info[jss::load_factor_local] =
2881 static_cast<double>(fee) / loadBaseServer;
2882 fee = app_.getFeeTrack().getRemoteFee();
2883 if (fee != loadBaseServer)
2884 info[jss::load_factor_net] =
2885 static_cast<double>(fee) / loadBaseServer;
2886 fee = app_.getFeeTrack().getClusterFee();
2887 if (fee != loadBaseServer)
2888 info[jss::load_factor_cluster] =
2889 static_cast<double>(fee) / loadBaseServer;
2890 }
2891 if (escalationMetrics.openLedgerFeeLevel !=
2892 escalationMetrics.referenceFeeLevel &&
2893 (admin || loadFactorFeeEscalation != loadFactor))
2894 info[jss::load_factor_fee_escalation] =
2895 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2896 escalationMetrics.referenceFeeLevel);
2897 if (escalationMetrics.minProcessingFeeLevel !=
2898 escalationMetrics.referenceFeeLevel)
2899 info[jss::load_factor_fee_queue] =
2900 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2901 escalationMetrics.referenceFeeLevel);
2902 }
2903
2904 bool valid = false;
2905 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2906
2907 if (lpClosed)
2908 valid = true;
2909 else
2910 lpClosed = m_ledgerMaster.getClosedLedger();
2911
2912 if (lpClosed)
2913 {
2914 XRPAmount const baseFee = lpClosed->fees().base;
2916 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2917 l[jss::hash] = to_string(lpClosed->info().hash);
2918
2919 if (!human)
2920 {
2921 l[jss::base_fee] = baseFee.jsonClipped();
2922 l[jss::reserve_base] =
2923 lpClosed->fees().accountReserve(0).jsonClipped();
2924 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2925 l[jss::close_time] = Json::Value::UInt(
2926 lpClosed->info().closeTime.time_since_epoch().count());
2927 }
2928 else
2929 {
2930 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2931 l[jss::reserve_base_xrp] =
2932 lpClosed->fees().accountReserve(0).decimalXRP();
2933 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2934
2935 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2936 std::abs(closeOffset.count()) >= 60)
2937 l[jss::close_time_offset] =
2938 static_cast<std::uint32_t>(closeOffset.count());
2939
2940 constexpr std::chrono::seconds highAgeThreshold{1000000};
2942 {
2943 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2944 l[jss::age] =
2945 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2946 }
2947 else
2948 {
2949 auto lCloseTime = lpClosed->info().closeTime;
2950 auto closeTime = app_.timeKeeper().closeTime();
2951 if (lCloseTime <= closeTime)
2952 {
2953 using namespace std::chrono_literals;
2954 auto age = closeTime - lCloseTime;
2955 l[jss::age] =
2956 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2957 }
2958 }
2959 }
2960
2961 if (valid)
2962 info[jss::validated_ledger] = l;
2963 else
2964 info[jss::closed_ledger] = l;
2965
2966 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2967 if (!lpPublished)
2968 info[jss::published_ledger] = "none";
2969 else if (lpPublished->info().seq != lpClosed->info().seq)
2970 info[jss::published_ledger] = lpPublished->info().seq;
2971 }
2972
2973 accounting_.json(info);
2974 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2975 info[jss::jq_trans_overflow] =
2977 info[jss::peer_disconnects] =
2979 info[jss::peer_disconnects_resources] =
2981
2982 // This array must be sorted in increasing order.
2983 static constexpr std::array<std::string_view, 7> protocols{
2984 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2985 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2986 {
2988 for (auto const& port : app_.getServerHandler().setup().ports)
2989 {
2990 // Don't publish admin ports for non-admin users
2991 if (!admin &&
2992 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2993 port.admin_user.empty() && port.admin_password.empty()))
2994 continue;
2997 std::begin(port.protocol),
2998 std::end(port.protocol),
2999 std::begin(protocols),
3000 std::end(protocols),
3001 std::back_inserter(proto));
3002 if (!proto.empty())
3003 {
3004 auto& jv = ports.append(Json::Value(Json::objectValue));
3005 jv[jss::port] = std::to_string(port.port);
3006 jv[jss::protocol] = Json::Value{Json::arrayValue};
3007 for (auto const& p : proto)
3008 jv[jss::protocol].append(p);
3009 }
3010 }
3011
3012 if (app_.config().exists(SECTION_PORT_GRPC))
3013 {
3014 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
3015 auto const optPort = grpcSection.get("port");
3016 if (optPort && grpcSection.get("ip"))
3017 {
3018 auto& jv = ports.append(Json::Value(Json::objectValue));
3019 jv[jss::port] = *optPort;
3020 jv[jss::protocol] = Json::Value{Json::arrayValue};
3021 jv[jss::protocol].append("grpc");
3022 }
3023 }
3024 info[jss::ports] = std::move(ports);
3025 }
3026
3027 return info;
3028}
3029
3030void
3032{
3034}
3035
3038{
3039 return app_.getInboundLedgers().getInfo();
3040}
3041
3042void
3044 std::shared_ptr<ReadView const> const& ledger,
3045 std::shared_ptr<STTx const> const& transaction,
3046 TER result)
3047{
3048 // never publish an inner txn inside a batch txn
3049 if (transaction->isFlag(tfInnerBatchTxn) &&
3050 ledger->rules().enabled(featureBatch))
3051 return;
3052
3053 MultiApiJson jvObj =
3054 transJson(transaction, result, false, ledger, std::nullopt);
3055
3056 {
3058
3059 auto it = mStreamMaps[sRTTransactions].begin();
3060 while (it != mStreamMaps[sRTTransactions].end())
3061 {
3062 InfoSub::pointer p = it->second.lock();
3063
3064 if (p)
3065 {
3066 jvObj.visit(
3067 p->getApiVersion(), //
3068 [&](Json::Value const& jv) { p->send(jv, true); });
3069 ++it;
3070 }
3071 else
3072 {
3073 it = mStreamMaps[sRTTransactions].erase(it);
3074 }
3075 }
3076 }
3077
3078 pubProposedAccountTransaction(ledger, transaction, result);
3079}
3080
3081void
3083{
3084 // Ledgers are published only when they acquire sufficient validations
3085 // Holes are filled across connection loss or other catastrophe
3086
3088 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
3089 if (!alpAccepted)
3090 {
3091 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
3092 app_.getAcceptedLedgerCache().canonicalize_replace_client(
3093 lpAccepted->info().hash, alpAccepted);
3094 }
3095
3096 XRPL_ASSERT(
3097 alpAccepted->getLedger().get() == lpAccepted.get(),
3098 "ripple::NetworkOPsImp::pubLedger : accepted input");
3099
3100 {
3101 JLOG(m_journal.debug())
3102 << "Publishing ledger " << lpAccepted->info().seq << " "
3103 << lpAccepted->info().hash;
3104
3106
3107 if (!mStreamMaps[sLedger].empty())
3108 {
3110
3111 jvObj[jss::type] = "ledgerClosed";
3112 jvObj[jss::ledger_index] = lpAccepted->info().seq;
3113 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
3114 jvObj[jss::ledger_time] = Json::Value::UInt(
3115 lpAccepted->info().closeTime.time_since_epoch().count());
3116
3117 if (!lpAccepted->rules().enabled(featureXRPFees))
3118 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3119 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
3120 jvObj[jss::reserve_base] =
3121 lpAccepted->fees().accountReserve(0).jsonClipped();
3122 jvObj[jss::reserve_inc] =
3123 lpAccepted->fees().increment.jsonClipped();
3124
3125 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
3126
3128 {
3129 jvObj[jss::validated_ledgers] =
3131 }
3132
3133 auto it = mStreamMaps[sLedger].begin();
3134 while (it != mStreamMaps[sLedger].end())
3135 {
3136 InfoSub::pointer p = it->second.lock();
3137 if (p)
3138 {
3139 p->send(jvObj, true);
3140 ++it;
3141 }
3142 else
3143 it = mStreamMaps[sLedger].erase(it);
3144 }
3145 }
3146
3147 if (!mStreamMaps[sBookChanges].empty())
3148 {
3149 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
3150
3151 auto it = mStreamMaps[sBookChanges].begin();
3152 while (it != mStreamMaps[sBookChanges].end())
3153 {
3154 InfoSub::pointer p = it->second.lock();
3155 if (p)
3156 {
3157 p->send(jvObj, true);
3158 ++it;
3159 }
3160 else
3161 it = mStreamMaps[sBookChanges].erase(it);
3162 }
3163 }
3164
3165 {
3166 static bool firstTime = true;
3167 if (firstTime)
3168 {
3169 // First validated ledger, start delayed SubAccountHistory
3170 firstTime = false;
3171 for (auto& outer : mSubAccountHistory)
3172 {
3173 for (auto& inner : outer.second)
3174 {
3175 auto& subInfo = inner.second;
3176 if (subInfo.index_->separationLedgerSeq_ == 0)
3177 {
3179 alpAccepted->getLedger(), subInfo);
3180 }
3181 }
3182 }
3183 }
3184 }
3185 }
3186
3187 // Don't lock since pubAcceptedTransaction is locking.
3188 for (auto const& accTx : *alpAccepted)
3189 {
3190 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
3192 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3193 }
3194}
3195
3196void
3198{
3200 app_.openLedger().current()->fees().base,
3202 app_.getFeeTrack()};
3203
3204 // only schedule the job if something has changed
3205 if (f != mLastFeeSummary)
3206 {
3208 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
3209 pubServer();
3210 });
3211 }
3212}
3213
3214void
3216{
3219 "reportConsensusStateChange->pubConsensus",
3220 [this, phase]() { pubConsensus(phase); });
3221}
3222
3223inline void
3225{
3226 m_localTX->sweep(view);
3227}
3228inline std::size_t
3230{
3231 return m_localTX->size();
3232}
3233
3234// This routine should only be used to publish accepted or validated
3235// transactions.
3238 std::shared_ptr<STTx const> const& transaction,
3239 TER result,
3240 bool validated,
3241 std::shared_ptr<ReadView const> const& ledger,
3243{
3245 std::string sToken;
3246 std::string sHuman;
3247
3248 transResultInfo(result, sToken, sHuman);
3249
3250 jvObj[jss::type] = "transaction";
3251 // NOTE jvObj is not a finished object for either API version. After
3252 // it's populated, we need to finish it for a specific API version. This is
3253 // done in a loop, near the end of this function.
3254 jvObj[jss::transaction] =
3255 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3256
3257 if (meta)
3258 {
3259 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3261 jvObj[jss::meta], *ledger, transaction, meta->get());
3262 RPC::insertNFTSyntheticInJson(jvObj, transaction, meta->get());
3264 jvObj[jss::meta], transaction, meta->get());
3265 }
3266
3267 // add CTID where the needed data for it exists
3268 if (auto const& lookup = ledger->txRead(transaction->getTransactionID());
3269 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3270 {
3271 uint32_t const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3272 uint32_t netID = app_.config().NETWORK_ID;
3273 if (transaction->isFieldPresent(sfNetworkID))
3274 netID = transaction->getFieldU32(sfNetworkID);
3275
3277 RPC::encodeCTID(ledger->info().seq, txnSeq, netID);
3278 ctid)
3279 jvObj[jss::ctid] = *ctid;
3280 }
3281 if (!ledger->open())
3282 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3283
3284 if (validated)
3285 {
3286 jvObj[jss::ledger_index] = ledger->info().seq;
3287 jvObj[jss::transaction][jss::date] =
3288 ledger->info().closeTime.time_since_epoch().count();
3289 jvObj[jss::validated] = true;
3290 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3291
3292 // WRITEME: Put the account next seq here
3293 }
3294 else
3295 {
3296 jvObj[jss::validated] = false;
3297 jvObj[jss::ledger_current_index] = ledger->info().seq;
3298 }
3299
3300 jvObj[jss::status] = validated ? "closed" : "proposed";
3301 jvObj[jss::engine_result] = sToken;
3302 jvObj[jss::engine_result_code] = result;
3303 jvObj[jss::engine_result_message] = sHuman;
3304
3305 if (transaction->getTxnType() == ttOFFER_CREATE)
3306 {
3307 auto const account = transaction->getAccountID(sfAccount);
3308 auto const amount = transaction->getFieldAmount(sfTakerGets);
3309
3310 // If the offer create is not self funded then add the owner balance
3311 if (account != amount.issue().account)
3312 {
3313 auto const ownerFunds = accountFunds(
3314 *ledger,
3315 account,
3316 amount,
3318 app_.journal("View"));
3319 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3320 }
3321 }
3322
3323 std::string const hash = to_string(transaction->getTransactionID());
3324 MultiApiJson multiObj{jvObj};
3326 multiObj.visit(), //
3327 [&]<unsigned Version>(
3329 RPC::insertDeliverMax(
3330 jvTx[jss::transaction], transaction->getTxnType(), Version);
3331
3332 if constexpr (Version > 1)
3333 {
3334 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3335 jvTx[jss::hash] = hash;
3336 }
3337 else
3338 {
3339 jvTx[jss::transaction][jss::hash] = hash;
3340 }
3341 });
3342
3343 return multiObj;
3344}
3345
3346void
3348 std::shared_ptr<ReadView const> const& ledger,
3349 AcceptedLedgerTx const& transaction,
3350 bool last)
3351{
3352 auto const& stTxn = transaction.getTxn();
3353
3354 // Create two different Json objects, for different API versions
3355 auto const metaRef = std::ref(transaction.getMeta());
3356 auto const trResult = transaction.getResult();
3357 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3358
3359 {
3361
3362 auto it = mStreamMaps[sTransactions].begin();
3363 while (it != mStreamMaps[sTransactions].end())
3364 {
3365 InfoSub::pointer p = it->second.lock();
3366
3367 if (p)
3368 {
3369 jvObj.visit(
3370 p->getApiVersion(), //
3371 [&](Json::Value const& jv) { p->send(jv, true); });
3372 ++it;
3373 }
3374 else
3375 it = mStreamMaps[sTransactions].erase(it);
3376 }
3377
3378 it = mStreamMaps[sRTTransactions].begin();
3379
3380 while (it != mStreamMaps[sRTTransactions].end())
3381 {
3382 InfoSub::pointer p = it->second.lock();
3383
3384 if (p)
3385 {
3386 jvObj.visit(
3387 p->getApiVersion(), //
3388 [&](Json::Value const& jv) { p->send(jv, true); });
3389 ++it;
3390 }
3391 else
3392 it = mStreamMaps[sRTTransactions].erase(it);
3393 }
3394 }
3395
3396 if (transaction.getResult() == tesSUCCESS)
3397 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3398
3399 pubAccountTransaction(ledger, transaction, last);
3400}
3401
3402void
3404 std::shared_ptr<ReadView const> const& ledger,
3405 AcceptedLedgerTx const& transaction,
3406 bool last)
3407{
3409 int iProposed = 0;
3410 int iAccepted = 0;
3411
3412 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3413 auto const currLedgerSeq = ledger->seq();
3414 {
3416
3417 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3419 {
3420 for (auto const& affectedAccount : transaction.getAffected())
3421 {
3422 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3423 simiIt != mSubRTAccount.end())
3424 {
3425 auto it = simiIt->second.begin();
3426
3427 while (it != simiIt->second.end())
3428 {
3429 InfoSub::pointer p = it->second.lock();
3430
3431 if (p)
3432 {
3433 notify.insert(p);
3434 ++it;
3435 ++iProposed;
3436 }
3437 else
3438 it = simiIt->second.erase(it);
3439 }
3440 }
3441
3442 if (auto simiIt = mSubAccount.find(affectedAccount);
3443 simiIt != mSubAccount.end())
3444 {
3445 auto it = simiIt->second.begin();
3446 while (it != simiIt->second.end())
3447 {
3448 InfoSub::pointer p = it->second.lock();
3449
3450 if (p)
3451 {
3452 notify.insert(p);
3453 ++it;
3454 ++iAccepted;
3455 }
3456 else
3457 it = simiIt->second.erase(it);
3458 }
3459 }
3460
3461 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3462 histoIt != mSubAccountHistory.end())
3463 {
3464 auto& subs = histoIt->second;
3465 auto it = subs.begin();
3466 while (it != subs.end())
3467 {
3468 SubAccountHistoryInfoWeak const& info = it->second;
3469 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3470 {
3471 ++it;
3472 continue;
3473 }
3474
3475 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3476 {
3477 accountHistoryNotify.emplace_back(
3478 SubAccountHistoryInfo{isSptr, info.index_});
3479 ++it;
3480 }
3481 else
3482 {
3483 it = subs.erase(it);
3484 }
3485 }
3486 if (subs.empty())
3487 mSubAccountHistory.erase(histoIt);
3488 }
3489 }
3490 }
3491 }
3492
3493 JLOG(m_journal.trace())
3494 << "pubAccountTransaction: "
3495 << "proposed=" << iProposed << ", accepted=" << iAccepted;
3496
3497 if (!notify.empty() || !accountHistoryNotify.empty())
3498 {
3499 auto const& stTxn = transaction.getTxn();
3500
3501 // Create two different Json objects, for different API versions
3502 auto const metaRef = std::ref(transaction.getMeta());
3503 auto const trResult = transaction.getResult();
3504 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3505
3506 for (InfoSub::ref isrListener : notify)
3507 {
3508 jvObj.visit(
3509 isrListener->getApiVersion(), //
3510 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3511 }
3512
3513 if (last)
3514 jvObj.set(jss::account_history_boundary, true);
3515
3516 XRPL_ASSERT(
3517 jvObj.isMember(jss::account_history_tx_stream) ==
3519 "ripple::NetworkOPsImp::pubAccountTransaction : "
3520 "account_history_tx_stream not set");
3521 for (auto& info : accountHistoryNotify)
3522 {
3523 auto& index = info.index_;
3524 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3525 jvObj.set(jss::account_history_tx_first, true);
3526
3527 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3528
3529 jvObj.visit(
3530 info.sink_->getApiVersion(), //
3531 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3532 }
3533 }
3534}
3535
3536void
3538 std::shared_ptr<ReadView const> const& ledger,
3540 TER result)
3541{
3543 int iProposed = 0;
3544
3545 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3546
3547 {
3549
3550 if (mSubRTAccount.empty())
3551 return;
3552
3553 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3555 {
3556 for (auto const& affectedAccount : tx->getMentionedAccounts())
3557 {
3558 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3559 simiIt != mSubRTAccount.end())
3560 {
3561 auto it = simiIt->second.begin();
3562
3563 while (it != simiIt->second.end())
3564 {
3565 InfoSub::pointer p = it->second.lock();
3566
3567 if (p)
3568 {
3569 notify.insert(p);
3570 ++it;
3571 ++iProposed;
3572 }
3573 else
3574 it = simiIt->second.erase(it);
3575 }
3576 }
3577 }
3578 }
3579 }
3580
3581 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3582
3583 if (!notify.empty() || !accountHistoryNotify.empty())
3584 {
3585 // Create two different Json objects, for different API versions
3586 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3587
3588 for (InfoSub::ref isrListener : notify)
3589 jvObj.visit(
3590 isrListener->getApiVersion(), //
3591 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3592
3593 XRPL_ASSERT(
3594 jvObj.isMember(jss::account_history_tx_stream) ==
3596 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3597 "account_history_tx_stream not set");
3598 for (auto& info : accountHistoryNotify)
3599 {
3600 auto& index = info.index_;
3601 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3602 jvObj.set(jss::account_history_tx_first, true);
3603 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3604 jvObj.visit(
3605 info.sink_->getApiVersion(), //
3606 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3607 }
3608 }
3609}
3610
3611//
3612// Monitoring
3613//
3614
3615void
3617 InfoSub::ref isrListener,
3618 hash_set<AccountID> const& vnaAccountIDs,
3619 bool rt)
3620{
3621 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3622
3623 for (auto const& naAccountID : vnaAccountIDs)
3624 {
3625 JLOG(m_journal.trace())
3626 << "subAccount: account: " << toBase58(naAccountID);
3627
3628 isrListener->insertSubAccountInfo(naAccountID, rt);
3629 }
3630
3632
3633 for (auto const& naAccountID : vnaAccountIDs)
3634 {
3635 auto simIterator = subMap.find(naAccountID);
3636 if (simIterator == subMap.end())
3637 {
3638 // Not found, note that account has a new single listner.
3639 SubMapType usisElement;
3640 usisElement[isrListener->getSeq()] = isrListener;
3641 // VFALCO NOTE This is making a needless copy of naAccountID
3642 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3643 }
3644 else
3645 {
3646 // Found, note that the account has another listener.
3647 simIterator->second[isrListener->getSeq()] = isrListener;
3648 }
3649 }
3650}
3651
3652void
3654 InfoSub::ref isrListener,
3655 hash_set<AccountID> const& vnaAccountIDs,
3656 bool rt)
3657{
3658 for (auto const& naAccountID : vnaAccountIDs)
3659 {
3660 // Remove from the InfoSub
3661 isrListener->deleteSubAccountInfo(naAccountID, rt);
3662 }
3663
3664 // Remove from the server
3665 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3666}
3667
3668void
3670 std::uint64_t uSeq,
3671 hash_set<AccountID> const& vnaAccountIDs,
3672 bool rt)
3673{
3675
3676 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3677
3678 for (auto const& naAccountID : vnaAccountIDs)
3679 {
3680 auto simIterator = subMap.find(naAccountID);
3681
3682 if (simIterator != subMap.end())
3683 {
3684 // Found
3685 simIterator->second.erase(uSeq);
3686
3687 if (simIterator->second.empty())
3688 {
3689 // Don't need hash entry.
3690 subMap.erase(simIterator);
3691 }
3692 }
3693 }
3694}
3695
3696void
3698{
3699 enum DatabaseType { Sqlite, None };
3700 static auto const databaseType = [&]() -> DatabaseType {
3701 // Use a dynamic_cast to return DatabaseType::None
3702 // on failure.
3703 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3704 {
3705 return DatabaseType::Sqlite;
3706 }
3707 return DatabaseType::None;
3708 }();
3709
3710 if (databaseType == DatabaseType::None)
3711 {
3712 JLOG(m_journal.error())
3713 << "AccountHistory job for account "
3714 << toBase58(subInfo.index_->accountId_) << " no database";
3715 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3716 {
3717 sptr->send(rpcError(rpcINTERNAL), true);
3718 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3719 }
3720 return;
3721 }
3722
3725 "AccountHistoryTxStream",
3726 [this, dbType = databaseType, subInfo]() {
3727 auto const& accountId = subInfo.index_->accountId_;
3728 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3729 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3730
3731 JLOG(m_journal.trace())
3732 << "AccountHistory job for account " << toBase58(accountId)
3733 << " started. lastLedgerSeq=" << lastLedgerSeq;
3734
3735 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3736 std::shared_ptr<TxMeta> const& meta) -> bool {
3737 /*
3738 * genesis account: first tx is the one with seq 1
3739 * other account: first tx is the one created the account
3740 */
3741 if (accountId == genesisAccountId)
3742 {
3743 auto stx = tx->getSTransaction();
3744 if (stx->getAccountID(sfAccount) == accountId &&
3745 stx->getSeqValue() == 1)
3746 return true;
3747 }
3748
3749 for (auto& node : meta->getNodes())
3750 {
3751 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3752 continue;
3753
3754 if (node.isFieldPresent(sfNewFields))
3755 {
3756 if (auto inner = dynamic_cast<STObject const*>(
3757 node.peekAtPField(sfNewFields));
3758 inner)
3759 {
3760 if (inner->isFieldPresent(sfAccount) &&
3761 inner->getAccountID(sfAccount) == accountId)
3762 {
3763 return true;
3764 }
3765 }
3766 }
3767 }
3768
3769 return false;
3770 };
3771
3772 auto send = [&](Json::Value const& jvObj,
3773 bool unsubscribe) -> bool {
3774 if (auto sptr = subInfo.sinkWptr_.lock())
3775 {
3776 sptr->send(jvObj, true);
3777 if (unsubscribe)
3778 unsubAccountHistory(sptr, accountId, false);
3779 return true;
3780 }
3781
3782 return false;
3783 };
3784
3785 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3786 bool unsubscribe) -> bool {
3787 if (auto sptr = subInfo.sinkWptr_.lock())
3788 {
3789 jvObj.visit(
3790 sptr->getApiVersion(), //
3791 [&](Json::Value const& jv) { sptr->send(jv, true); });
3792
3793 if (unsubscribe)
3794 unsubAccountHistory(sptr, accountId, false);
3795 return true;
3796 }
3797
3798 return false;
3799 };
3800
3801 auto getMoreTxns =
3802 [&](std::uint32_t minLedger,
3803 std::uint32_t maxLedger,
3808 switch (dbType)
3809 {
3810 case Sqlite: {
3811 auto db = static_cast<SQLiteDatabase*>(
3814 accountId, minLedger, maxLedger, marker, 0, true};
3815 return db->newestAccountTxPage(options);
3816 }
3817 default: {
3818 UNREACHABLE(
3819 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3820 "getMoreTxns : invalid database type");
3821 return {};
3822 }
3823 }
3824 };
3825
3826 /*
3827 * search backward until the genesis ledger or asked to stop
3828 */
3829 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3830 {
3831 int feeChargeCount = 0;
3832 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3833 {
3834 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3835 ++feeChargeCount;
3836 }
3837 else
3838 {
3839 JLOG(m_journal.trace())
3840 << "AccountHistory job for account "
3841 << toBase58(accountId) << " no InfoSub. Fee charged "
3842 << feeChargeCount << " times.";
3843 return;
3844 }
3845
3846 // try to search in 1024 ledgers till reaching genesis ledgers
3847 auto startLedgerSeq =
3848 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3849 JLOG(m_journal.trace())
3850 << "AccountHistory job for account " << toBase58(accountId)
3851 << ", working on ledger range [" << startLedgerSeq << ","
3852 << lastLedgerSeq << "]";
3853
3854 auto haveRange = [&]() -> bool {
3855 std::uint32_t validatedMin = UINT_MAX;
3856 std::uint32_t validatedMax = 0;
3857 auto haveSomeValidatedLedgers =
3859 validatedMin, validatedMax);
3860
3861 return haveSomeValidatedLedgers &&
3862 validatedMin <= startLedgerSeq &&
3863 lastLedgerSeq <= validatedMax;
3864 }();
3865
3866 if (!haveRange)
3867 {
3868 JLOG(m_journal.debug())
3869 << "AccountHistory reschedule job for account "
3870 << toBase58(accountId) << ", incomplete ledger range ["
3871 << startLedgerSeq << "," << lastLedgerSeq << "]";
3873 return;
3874 }
3875
3877 while (!subInfo.index_->stopHistorical_)
3878 {
3879 auto dbResult =
3880 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3881 if (!dbResult)
3882 {
3883 JLOG(m_journal.debug())
3884 << "AccountHistory job for account "
3885 << toBase58(accountId) << " getMoreTxns failed.";
3886 send(rpcError(rpcINTERNAL), true);
3887 return;
3888 }
3889
3890 auto const& txns = dbResult->first;
3891 marker = dbResult->second;
3892 size_t num_txns = txns.size();
3893 for (size_t i = 0; i < num_txns; ++i)
3894 {
3895 auto const& [tx, meta] = txns[i];
3896
3897 if (!tx || !meta)
3898 {
3899 JLOG(m_journal.debug())
3900 << "AccountHistory job for account "
3901 << toBase58(accountId) << " empty tx or meta.";
3902 send(rpcError(rpcINTERNAL), true);
3903 return;
3904 }
3905 auto curTxLedger =
3907 tx->getLedger());
3908 if (!curTxLedger)
3909 {
3910 JLOG(m_journal.debug())
3911 << "AccountHistory job for account "
3912 << toBase58(accountId) << " no ledger.";
3913 send(rpcError(rpcINTERNAL), true);
3914 return;
3915 }
3917 tx->getSTransaction();
3918 if (!stTxn)
3919 {
3920 JLOG(m_journal.debug())
3921 << "AccountHistory job for account "
3922 << toBase58(accountId)
3923 << " getSTransaction failed.";
3924 send(rpcError(rpcINTERNAL), true);
3925 return;
3926 }
3927
3928 auto const mRef = std::ref(*meta);
3929 auto const trR = meta->getResultTER();
3930 MultiApiJson jvTx =
3931 transJson(stTxn, trR, true, curTxLedger, mRef);
3932
3933 jvTx.set(
3934 jss::account_history_tx_index, txHistoryIndex--);
3935 if (i + 1 == num_txns ||
3936 txns[i + 1].first->getLedger() != tx->getLedger())
3937 jvTx.set(jss::account_history_boundary, true);
3938
3939 if (isFirstTx(tx, meta))
3940 {
3941 jvTx.set(jss::account_history_tx_first, true);
3942 sendMultiApiJson(jvTx, false);
3943
3944 JLOG(m_journal.trace())
3945 << "AccountHistory job for account "
3946 << toBase58(accountId)
3947 << " done, found last tx.";
3948 return;
3949 }
3950 else
3951 {
3952 sendMultiApiJson(jvTx, false);
3953 }
3954 }
3955
3956 if (marker)
3957 {
3958 JLOG(m_journal.trace())
3959 << "AccountHistory job for account "
3960 << toBase58(accountId)
3961 << " paging, marker=" << marker->ledgerSeq << ":"
3962 << marker->txnSeq;
3963 }
3964 else
3965 {
3966 break;
3967 }
3968 }
3969
3970 if (!subInfo.index_->stopHistorical_)
3971 {
3972 lastLedgerSeq = startLedgerSeq - 1;
3973 if (lastLedgerSeq <= 1)
3974 {
3975 JLOG(m_journal.trace())
3976 << "AccountHistory job for account "
3977 << toBase58(accountId)
3978 << " done, reached genesis ledger.";
3979 return;
3980 }
3981 }
3982 }
3983 });
3984}
3985
3986void
3988 std::shared_ptr<ReadView const> const& ledger,
3990{
3991 subInfo.index_->separationLedgerSeq_ = ledger->seq();
3992 auto const& accountId = subInfo.index_->accountId_;
3993 auto const accountKeylet = keylet::account(accountId);
3994 if (!ledger->exists(accountKeylet))
3995 {
3996 JLOG(m_journal.debug())
3997 << "subAccountHistoryStart, no account " << toBase58(accountId)
3998 << ", no need to add AccountHistory job.";
3999 return;
4000 }
4001 if (accountId == genesisAccountId)
4002 {
4003 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
4004 {
4005 if (sleAcct->getFieldU32(sfSequence) == 1)
4006 {
4007 JLOG(m_journal.debug())
4008 << "subAccountHistoryStart, genesis account "
4009 << toBase58(accountId)
4010 << " does not have tx, no need to add AccountHistory job.";
4011 return;
4012 }
4013 }
4014 else
4015 {
4016 UNREACHABLE(
4017 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
4018 "access genesis account");
4019 return;
4020 }
4021 }
4022 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
4023 subInfo.index_->haveHistorical_ = true;
4024
4025 JLOG(m_journal.debug())
4026 << "subAccountHistoryStart, add AccountHistory job: accountId="
4027 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
4028
4029 addAccountHistoryJob(subInfo);
4030}
4031
4034 InfoSub::ref isrListener,
4035 AccountID const& accountId)
4036{
4037 if (!isrListener->insertSubAccountHistory(accountId))
4038 {
4039 JLOG(m_journal.debug())
4040 << "subAccountHistory, already subscribed to account "
4041 << toBase58(accountId);
4042 return rpcINVALID_PARAMS;
4043 }
4044
4047 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
4048 auto simIterator = mSubAccountHistory.find(accountId);
4049 if (simIterator == mSubAccountHistory.end())
4050 {
4052 inner.emplace(isrListener->getSeq(), ahi);
4054 simIterator, std::make_pair(accountId, inner));
4055 }
4056 else
4057 {
4058 simIterator->second.emplace(isrListener->getSeq(), ahi);
4059 }
4060
4061 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
4062 if (ledger)
4063 {
4064 subAccountHistoryStart(ledger, ahi);
4065 }
4066 else
4067 {
4068 // The node does not have validated ledgers, so wait for
4069 // one before start streaming.
4070 // In this case, the subscription is also considered successful.
4071 JLOG(m_journal.debug())
4072 << "subAccountHistory, no validated ledger yet, delay start";
4073 }
4074
4075 return rpcSUCCESS;
4076}
4077
4078void
4080 InfoSub::ref isrListener,
4081 AccountID const& account,
4082 bool historyOnly)
4083{
4084 if (!historyOnly)
4085 isrListener->deleteSubAccountHistory(account);
4086 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
4087}
4088
4089void
4091 std::uint64_t seq,
4092 AccountID const& account,
4093 bool historyOnly)
4094{
4096 auto simIterator = mSubAccountHistory.find(account);
4097 if (simIterator != mSubAccountHistory.end())
4098 {
4099 auto& subInfoMap = simIterator->second;
4100 auto subInfoIter = subInfoMap.find(seq);
4101 if (subInfoIter != subInfoMap.end())
4102 {
4103 subInfoIter->second.index_->stopHistorical_ = true;
4104 }
4105
4106 if (!historyOnly)
4107 {
4108 simIterator->second.erase(seq);
4109 if (simIterator->second.empty())
4110 {
4111 mSubAccountHistory.erase(simIterator);
4112 }
4113 }
4114 JLOG(m_journal.debug())
4115 << "unsubAccountHistory, account " << toBase58(account)
4116 << ", historyOnly = " << (historyOnly ? "true" : "false");
4117 }
4118}
4119
4120bool
4122{
4123 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
4124 listeners->addSubscriber(isrListener);
4125 else
4126 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
4127 return true;
4128}
4129
4130bool
4132{
4133 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
4134 listeners->removeSubscriber(uSeq);
4135
4136 return true;
4137}
4138
4142{
4143 // This code-path is exclusively used when the server is in standalone
4144 // mode via `ledger_accept`
4145 XRPL_ASSERT(
4146 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
4147
4148 if (!m_standalone)
4149 Throw<std::runtime_error>(
4150 "Operation only possible in STANDALONE mode.");
4151
4152 // FIXME Could we improve on this and remove the need for a specialized
4153 // API in Consensus?
4154 beginConsensus(m_ledgerMaster.getClosedLedger()->info().hash, {});
4155 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
4156 return m_ledgerMaster.getCurrentLedger()->info().seq;
4157}
4158
4159// <-- bool: true=added, false=already there
4160bool
4162{
4163 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
4164 {
4165 jvResult[jss::ledger_index] = lpClosed->info().seq;
4166 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
4167 jvResult[jss::ledger_time] = Json::Value::UInt(
4168 lpClosed->info().closeTime.time_since_epoch().count());
4169 if (!lpClosed->rules().enabled(featureXRPFees))
4170 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
4171 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
4172 jvResult[jss::reserve_base] =
4173 lpClosed->fees().accountReserve(0).jsonClipped();
4174 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4175 }
4176
4178 {
4179 jvResult[jss::validated_ledgers] =
4181 }
4182
4184 return mStreamMaps[sLedger]
4185 .emplace(isrListener->getSeq(), isrListener)
4186 .second;
4187}
4188
4189// <-- bool: true=added, false=already there
4190bool
4192{
4195 .emplace(isrListener->getSeq(), isrListener)
4196 .second;
4197}
4198
4199// <-- bool: true=erased, false=was not there
4200bool
4202{
4204 return mStreamMaps[sLedger].erase(uSeq);
4205}
4206
4207// <-- bool: true=erased, false=was not there
4208bool
4210{
4212 return mStreamMaps[sBookChanges].erase(uSeq);
4213}
4214
4215// <-- bool: true=added, false=already there
4216bool
4218{
4220 return mStreamMaps[sManifests]
4221 .emplace(isrListener->getSeq(), isrListener)
4222 .second;
4223}
4224
4225// <-- bool: true=erased, false=was not there
4226bool
4228{
4230 return mStreamMaps[sManifests].erase(uSeq);
4231}
4232
4233// <-- bool: true=added, false=already there
4234bool
4236 InfoSub::ref isrListener,
4237 Json::Value& jvResult,
4238 bool admin)
4239{
4240 uint256 uRandom;
4241
4242 if (m_standalone)
4243 jvResult[jss::stand_alone] = m_standalone;
4244
4245 // CHECKME: is it necessary to provide a random number here?
4246 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
4247
4248 auto const& feeTrack = app_.getFeeTrack();
4249 jvResult[jss::random] = to_string(uRandom);
4250 jvResult[jss::server_status] = strOperatingMode(admin);
4251 jvResult[jss::load_base] = feeTrack.getLoadBase();
4252 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4253 jvResult[jss::hostid] = getHostId(admin);
4254 jvResult[jss::pubkey_node] =
4256
4258 return mStreamMaps[sServer]
4259 .emplace(isrListener->getSeq(), isrListener)
4260 .second;
4261}
4262
4263// <-- bool: true=erased, false=was not there
4264bool
4266{
4268 return mStreamMaps[sServer].erase(uSeq);
4269}
4270
4271// <-- bool: true=added, false=already there
4272bool
4274{
4277 .emplace(isrListener->getSeq(), isrListener)
4278 .second;
4279}
4280
4281// <-- bool: true=erased, false=was not there
4282bool
4284{
4286 return mStreamMaps[sTransactions].erase(uSeq);
4287}
4288
4289// <-- bool: true=added, false=already there
4290bool
4292{
4295 .emplace(isrListener->getSeq(), isrListener)
4296 .second;
4297}
4298
4299// <-- bool: true=erased, false=was not there
4300bool
4302{
4304 return mStreamMaps[sRTTransactions].erase(uSeq);
4305}
4306
4307// <-- bool: true=added, false=already there
4308bool
4310{
4313 .emplace(isrListener->getSeq(), isrListener)
4314 .second;
4315}
4316
4317void
4319{
4320 accounting_.json(obj);
4321}
4322
4323// <-- bool: true=erased, false=was not there
4324bool
4326{
4328 return mStreamMaps[sValidations].erase(uSeq);
4329}
4330
4331// <-- bool: true=added, false=already there
4332bool
4334{
4336 return mStreamMaps[sPeerStatus]
4337 .emplace(isrListener->getSeq(), isrListener)
4338 .second;
4339}
4340
4341// <-- bool: true=erased, false=was not there
4342bool
4344{
4346 return mStreamMaps[sPeerStatus].erase(uSeq);
4347}
4348
4349// <-- bool: true=added, false=already there
4350bool
4352{
4355 .emplace(isrListener->getSeq(), isrListener)
4356 .second;
4357}
4358
4359// <-- bool: true=erased, false=was not there
4360bool
4362{
4364 return mStreamMaps[sConsensusPhase].erase(uSeq);
4365}
4366
4369{
4371
4372 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4373
4374 if (it != mRpcSubMap.end())
4375 return it->second;
4376
4377 return InfoSub::pointer();
4378}
4379
4382{
4384
4385 mRpcSubMap.emplace(strUrl, rspEntry);
4386
4387 return rspEntry;
4388}
4389
4390bool
4392{
4394 auto pInfo = findRpcSub(strUrl);
4395
4396 if (!pInfo)
4397 return false;
4398
4399 // check to see if any of the stream maps still hold a weak reference to
4400 // this entry before removing
4401 for (SubMapType const& map : mStreamMaps)
4402 {
4403 if (map.find(pInfo->getSeq()) != map.end())
4404 return false;
4405 }
4406 mRpcSubMap.erase(strUrl);
4407 return true;
4408}
4409
4410#ifndef USE_NEW_BOOK_PAGE
4411
4412// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4413// work, but it demonstrated poor performance.
4414//
4415void
4418 Book const& book,
4419 AccountID const& uTakerID,
4420 bool const bProof,
4421 unsigned int iLimit,
4422 Json::Value const& jvMarker,
4423 Json::Value& jvResult)
4424{ // CAUTION: This is the old get book page logic
4425 Json::Value& jvOffers =
4426 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4427
4429 uint256 const uBookBase = getBookBase(book);
4430 uint256 const uBookEnd = getQualityNext(uBookBase);
4431 uint256 uTipIndex = uBookBase;
4432
4433 if (auto stream = m_journal.trace())
4434 {
4435 stream << "getBookPage:" << book;
4436 stream << "getBookPage: uBookBase=" << uBookBase;
4437 stream << "getBookPage: uBookEnd=" << uBookEnd;
4438 stream << "getBookPage: uTipIndex=" << uTipIndex;
4439 }
4440
4441 ReadView const& view = *lpLedger;
4442
4443 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4444 isGlobalFrozen(view, book.in.account);
4445
4446 bool bDone = false;
4447 bool bDirectAdvance = true;
4448
4449 std::shared_ptr<SLE const> sleOfferDir;
4450 uint256 offerIndex;
4451 unsigned int uBookEntry;
4452 STAmount saDirRate;
4453
4454 auto const rate = transferRate(view, book.out.account);
4455 auto viewJ = app_.journal("View");
4456
4457 while (!bDone && iLimit-- > 0)
4458 {
4459 if (bDirectAdvance)
4460 {
4461 bDirectAdvance = false;
4462
4463 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4464
4465 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4466 if (ledgerIndex)
4467 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4468 else
4469 sleOfferDir.reset();
4470
4471 if (!sleOfferDir)
4472 {
4473 JLOG(m_journal.trace()) << "getBookPage: bDone";
4474 bDone = true;
4475 }
4476 else
4477 {
4478 uTipIndex = sleOfferDir->key();
4479 saDirRate = amountFromQuality(getQuality(uTipIndex));
4480
4481 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4482
4483 JLOG(m_journal.trace())
4484 << "getBookPage: uTipIndex=" << uTipIndex;
4485 JLOG(m_journal.trace())
4486 << "getBookPage: offerIndex=" << offerIndex;
4487 }
4488 }
4489
4490 if (!bDone)
4491 {
4492 auto sleOffer = view.read(keylet::offer(offerIndex));
4493
4494 if (sleOffer)
4495 {
4496 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4497 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4498 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4499 STAmount saOwnerFunds;
4500 bool firstOwnerOffer(true);
4501
4502 if (book.out.account == uOfferOwnerID)
4503 {
4504 // If an offer is selling issuer's own IOUs, it is fully
4505 // funded.
4506 saOwnerFunds = saTakerGets;
4507 }
4508 else if (bGlobalFreeze)
4509 {
4510 // If either asset is globally frozen, consider all offers
4511 // that aren't ours to be totally unfunded
4512 saOwnerFunds.clear(book.out);
4513 }
4514 else
4515 {
4516 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4517 if (umBalanceEntry != umBalance.end())
4518 {
4519 // Found in running balance table.
4520
4521 saOwnerFunds = umBalanceEntry->second;
4522 firstOwnerOffer = false;
4523 }
4524 else
4525 {
4526 // Did not find balance in table.
4527
4528 saOwnerFunds = accountHolds(
4529 view,
4530 uOfferOwnerID,
4531 book.out.currency,
4532 book.out.account,
4534 viewJ);
4535
4536 if (saOwnerFunds < beast::zero)
4537 {
4538 // Treat negative funds as zero.
4539
4540 saOwnerFunds.clear();
4541 }
4542 }
4543 }
4544
4545 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4546
4547 STAmount saTakerGetsFunded;
4548 STAmount saOwnerFundsLimit = saOwnerFunds;
4549 Rate offerRate = parityRate;
4550
4551 if (rate != parityRate
4552 // Have a tranfer fee.
4553 && uTakerID != book.out.account
4554 // Not taking offers of own IOUs.
4555 && book.out.account != uOfferOwnerID)
4556 // Offer owner not issuing ownfunds
4557 {
4558 // Need to charge a transfer fee to offer owner.
4559 offerRate = rate;
4560 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4561 }
4562
4563 if (saOwnerFundsLimit >= saTakerGets)
4564 {
4565 // Sufficient funds no shenanigans.
4566 saTakerGetsFunded = saTakerGets;
4567 }
4568 else
4569 {
4570 // Only provide, if not fully funded.
4571
4572 saTakerGetsFunded = saOwnerFundsLimit;
4573
4574 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4575 std::min(
4576 saTakerPays,
4577 multiply(
4578 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4579 .setJson(jvOffer[jss::taker_pays_funded]);
4580 }
4581
4582 STAmount saOwnerPays = (parityRate == offerRate)
4583 ? saTakerGetsFunded
4584 : std::min(
4585 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4586
4587 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4588
4589 // Include all offers funded and unfunded
4590 Json::Value& jvOf = jvOffers.append(jvOffer);
4591 jvOf[jss::quality] = saDirRate.getText();
4592
4593 if (firstOwnerOffer)
4594 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4595 }
4596 else
4597 {
4598 JLOG(m_journal.warn()) << "Missing offer";
4599 }
4600
4601 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4602 {
4603 bDirectAdvance = true;
4604 }
4605 else
4606 {
4607 JLOG(m_journal.trace())
4608 << "getBookPage: offerIndex=" << offerIndex;
4609 }
4610 }
4611 }
4612
4613 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4614 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4615}
4616
4617#else
4618
4619// This is the new code that uses the book iterators
4620// It has temporarily been disabled
4621
4622void
4625 Book const& book,
4626 AccountID const& uTakerID,
4627 bool const bProof,
4628 unsigned int iLimit,
4629 Json::Value const& jvMarker,
4630 Json::Value& jvResult)
4631{
4632 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4633
4635
4636 MetaView lesActive(lpLedger, tapNONE, true);
4637 OrderBookIterator obIterator(lesActive, book);
4638
4639 auto const rate = transferRate(lesActive, book.out.account);
4640
4641 bool const bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4642 lesActive.isGlobalFrozen(book.in.account);
4643
4644 while (iLimit-- > 0 && obIterator.nextOffer())
4645 {
4646 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4647 if (sleOffer)
4648 {
4649 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4650 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4651 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4652 STAmount saDirRate = obIterator.getCurrentRate();
4653 STAmount saOwnerFunds;
4654
4655 if (book.out.account == uOfferOwnerID)
4656 {
4657 // If offer is selling issuer's own IOUs, it is fully funded.
4658 saOwnerFunds = saTakerGets;
4659 }
4660 else if (bGlobalFreeze)
4661 {
4662 // If either asset is globally frozen, consider all offers
4663 // that aren't ours to be totally unfunded
4664 saOwnerFunds.clear(book.out);
4665 }
4666 else
4667 {
4668 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4669
4670 if (umBalanceEntry != umBalance.end())
4671 {
4672 // Found in running balance table.
4673
4674 saOwnerFunds = umBalanceEntry->second;
4675 }
4676 else
4677 {
4678 // Did not find balance in table.
4679
4680 saOwnerFunds = lesActive.accountHolds(
4681 uOfferOwnerID,
4682 book.out.currency,
4683 book.out.account,
4685
4686 if (saOwnerFunds.isNegative())
4687 {
4688 // Treat negative funds as zero.
4689
4690 saOwnerFunds.zero();
4691 }
4692 }
4693 }
4694
4695 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4696
4697 STAmount saTakerGetsFunded;
4698 STAmount saOwnerFundsLimit = saOwnerFunds;
4699 Rate offerRate = parityRate;
4700
4701 if (rate != parityRate
4702 // Have a tranfer fee.
4703 && uTakerID != book.out.account
4704 // Not taking offers of own IOUs.
4705 && book.out.account != uOfferOwnerID)
4706 // Offer owner not issuing ownfunds
4707 {
4708 // Need to charge a transfer fee to offer owner.
4709 offerRate = rate;
4710 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4711 }
4712
4713 if (saOwnerFundsLimit >= saTakerGets)
4714 {
4715 // Sufficient funds no shenanigans.
4716 saTakerGetsFunded = saTakerGets;
4717 }
4718 else
4719 {
4720 // Only provide, if not fully funded.
4721 saTakerGetsFunded = saOwnerFundsLimit;
4722
4723 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4724
4725 // TOOD(tom): The result of this expression is not used - what's
4726 // going on here?
4727 std::min(
4728 saTakerPays,
4729 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4730 .setJson(jvOffer[jss::taker_pays_funded]);
4731 }
4732
4733 STAmount saOwnerPays = (parityRate == offerRate)
4734 ? saTakerGetsFunded
4735 : std::min(
4736 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4737
4738 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4739
4740 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4741 {
4742 // Only provide funded offers and offers of the taker.
4743 Json::Value& jvOf = jvOffers.append(jvOffer);
4744 jvOf[jss::quality] = saDirRate.getText();
4745 }
4746 }
4747 }
4748
4749 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4750 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4751}
4752
4753#endif
4754
4755inline void
4757{
4758 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4759 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4761 counters[static_cast<std::size_t>(mode)].dur += current;
4762
4765 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4766 .dur.count());
4768 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4769 .dur.count());
4771 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4773 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4774 .dur.count());
4776 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4777
4779 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4780 .transitions);
4782 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4783 .transitions);
4785 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4787 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4788 .transitions);
4790 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4791}
4792
4793void
4795{
4796 auto now = std::chrono::steady_clock::now();
4797
4798 std::lock_guard lock(mutex_);
4799 ++counters_[static_cast<std::size_t>(om)].transitions;
4800 if (om == OperatingMode::FULL &&
4801 counters_[static_cast<std::size_t>(om)].transitions == 1)
4802 {
4803 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4804 now - processStart_)
4805 .count();
4806 }
4807 counters_[static_cast<std::size_t>(mode_)].dur +=
4808 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4809
4810 mode_ = om;
4811 start_ = now;
4812}
4813
4814void
4816{
4817 auto [counters, mode, start, initialSync] = getCounterData();
4818 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4820 counters[static_cast<std::size_t>(mode)].dur += current;
4821
4822 obj[jss::state_accounting] = Json::objectValue;
4824 i <= static_cast<std::size_t>(OperatingMode::FULL);
4825 ++i)
4826 {
4827 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4828 auto& state = obj[jss::state_accounting][states_[i]];
4829 state[jss::transitions] = std::to_string(counters[i].transitions);
4830 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4831 }
4832 obj[jss::server_state_duration_us] = std::to_string(current.count());
4833 if (initialSync)
4834 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4835}
4836
4837//------------------------------------------------------------------------------
4838
4841 Application& app,
4843 bool standalone,
4844 std::size_t minPeerCount,
4845 bool startvalid,
4846 JobQueue& job_queue,
4848 ValidatorKeys const& validatorKeys,
4849 boost::asio::io_service& io_svc,
4850 beast::Journal journal,
4851 beast::insight::Collector::ptr const& collector)
4852{
4853 return std::make_unique<NetworkOPsImp>(
4854 app,
4855 clock,
4856 standalone,
4857 minPeerCount,
4858 startvalid,
4859 job_queue,
4861 validatorKeys,
4862 io_svc,
4863 journal,
4864 collector);
4865}
4866
4867} // namespace ripple
T any_of(T... args)
T back_inserter(T... args)
T begin(T... args)
T bind(T... args)
Decorator for streaming out compact json.
Definition: json_writer.h:318
Lightweight wrapper to tag static string.
Definition: json_value.h:63
Represents a JSON value.
Definition: json_value.h:149
Json::UInt UInt
Definition: json_value.h:156
Value & append(Value const &value)
Append value to array at the end.
Definition: json_value.cpp:910
bool isMember(char const *key) const
Return true if the object has a member named key.
Definition: json_value.cpp:962
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Definition: json_value.cpp:854
A generic endpoint for log messages.
Definition: Journal.h:60
Stream error() const
Definition: Journal.h:346
Stream debug() const
Definition: Journal.h:328
Stream info() const
Definition: Journal.h:334
Stream trace() const
Severity stream access functions.
Definition: Journal.h:322
Stream warn() const
Definition: Journal.h:340
A metric for measuring an integral value.
Definition: Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition: Gauge.h:68
A reference to a handler for performing polled collection.
Definition: Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition: Book.h:36
Issue in
Definition: Book.h:38
Issue out
Definition: Book.h:39
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition: ClusterNode.h:46
std::uint32_t getLoadFee() const
Definition: ClusterNode.h:52
NetClock::time_point getReportTime() const
Definition: ClusterNode.h:58
PublicKey const & identity() const
Definition: ClusterNode.h:64
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:49
uint32_t NETWORK_ID
Definition: Config.h:156
std::string SERVER_DOMAIN
Definition: Config.h:278
std::size_t NODE_SIZE
Definition: Config.h:213
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition: Config.h:160
int RELAY_UNTRUSTED_VALIDATIONS
Definition: Config.h:169
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition: InfoSub.h:54
AccountID account
Definition: Issue.h:36
Currency currency
Definition: Issue.h:35
A pool of threads to perform work.
Definition: JobQueue.h:58
Json::Value getJson(int c=0)
Definition: JobQueue.cpp:214
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:168
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
Definition: LedgerMaster.h:265
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:79
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
Definition: LoadFeeTrack.h:45
std::uint32_t getClusterFee() const
Definition: LoadFeeTrack.h:82
std::uint32_t getLocalFee() const
Definition: LoadFeeTrack.h:75
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:89
std::uint32_t getRemoteFee() const
Definition: LoadFeeTrack.h:68
std::uint32_t getLoadFactor() const
Definition: LoadFeeTrack.h:95
Manages load sources.
Definition: LoadManager.h:46
void heartbeat()
Reset the stall detection timer.
Definition: LoadManager.cpp:64
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition: Manifest.cpp:323
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
Definition: NetworkOPs.cpp:144
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
Definition: NetworkOPs.cpp:154
std::chrono::steady_clock::time_point start_
Definition: NetworkOPs.cpp:156
static std::array< Json::StaticString const, 5 > const states_
Definition: NetworkOPs.cpp:160
std::chrono::steady_clock::time_point const processStart_
Definition: NetworkOPs.cpp:158
Transaction with input flags and results to be applied in batches.
Definition: NetworkOPs.cpp:95
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
Definition: NetworkOPs.cpp:104
std::shared_ptr< Transaction > const transaction
Definition: NetworkOPs.cpp:97
boost::asio::steady_timer accountHistoryTxTimer_
Definition: NetworkOPs.cpp:755
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
Definition: NetworkOPs.cpp:890
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
std::vector< TransactionStatus > mTransactions
Definition: NetworkOPs.cpp:802
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Definition: NetworkOPs.cpp:745
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
Definition: NetworkOPs.cpp:757
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
Definition: NetworkOPs.cpp:908
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
Definition: NetworkOPs.cpp:753
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
Definition: NetworkOPs.cpp:121
std::optional< PublicKey > const validatorPK_
Definition: NetworkOPs.cpp:759
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
Definition: NetworkOPs.cpp:741
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
Definition: NetworkOPs.cpp:271
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Definition: NetworkOPs.cpp:771
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
Definition: NetworkOPs.cpp:754
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
Definition: NetworkOPs.cpp:127
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:227
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
Definition: NetworkOPs.cpp:748
beast::Journal m_journal
Definition: NetworkOPs.cpp:739
SubInfoMapType mSubAccount
Definition: NetworkOPs.cpp:766
std::optional< PublicKey > const validatorMasterPK_
Definition: NetworkOPs.cpp:760
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
Definition: NetworkOPs.cpp:806
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
Definition: NetworkOPs.cpp:752
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
Definition: NetworkOPs.cpp:961
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
Definition: NetworkOPs.cpp:786
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
Definition: NetworkOPs.cpp:796
std::atomic< bool > unlBlocked_
Definition: NetworkOPs.cpp:750
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
Definition: NetworkOPs.cpp:743
std::atomic< bool > needNetworkLedger_
Definition: NetworkOPs.cpp:747
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
Definition: NetworkOPs.cpp:804
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
Definition: NetworkOPs.cpp:920
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
Definition: NetworkOPs.cpp:764
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
Definition: NetworkOPs.cpp:951
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
Definition: NetworkOPs.cpp:788
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
Definition: NetworkOPs.cpp:902
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
Definition: NetworkOPs.cpp:799
void setMode(OperatingMode om) override
void stop() override
Definition: NetworkOPs.cpp:588
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
void clearNeedNetworkLedger() override
Definition: NetworkOPs.cpp:914
DispatchState mDispatchState
Definition: NetworkOPs.cpp:801
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
Definition: NetworkOPs.cpp:767
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
Definition: NetworkOPs.cpp:926
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
Definition: NetworkOPs.cpp:807
Application & app_
Definition: NetworkOPs.cpp:738
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
Definition: NetworkOPs.cpp:762
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
Definition: NetworkOPs.cpp:769
std::atomic< bool > amendmentWarned_
Definition: NetworkOPs.cpp:749
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
Definition: NetworkOPs.cpp:932
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition: NetworkOPs.h:89
void getCountsJson(Json::Value &obj)
Definition: Database.cpp:268
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:51
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:66
BookListeners::pointer getBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)
BookListeners::pointer makeBookListeners(Book const &)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
Definition: RCLConsensus.h:53
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
Definition: RCLConsensus.h:447
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Definition: RCLConsensus.h:460
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:78
Represents a set of transactions in RCLConsensus.
Definition: RCLCxTx.h:63
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition: RFC1751.cpp:507
Collects logging information.
Definition: RCLConsensus.h:551
std::unique_ptr< std::stringstream > const & ss()
Definition: RCLConsensus.h:565
A view into a ledger.
Definition: ReadView.h:52
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition: STAmount.cpp:666
std::string getText() const override
Definition: STAmount.cpp:706
Issue const & issue() const
Definition: STAmount.h:496
std::optional< T > get(std::string const &name) const
Definition: BasicConfig.h:140
std::size_t size() const noexcept
Definition: Serializer.h:72
void const * data() const noexcept
Definition: Serializer.h:78
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition: TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition: TxQ.cpp:1778
static time_point now()
Definition: UptimeClock.cpp:67
Validator keys and manifest as set in configuration file.
Definition: ValidatorKeys.h:38
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition: XRPAmount.h:262
Json::Value jsonClipped() const
Definition: XRPAmount.h:218
iterator begin()
Definition: base_uint.h:136
static constexpr std::size_t size()
Definition: base_uint.h:526
bool isZero() const
Definition: base_uint.h:540
bool isNonZero() const
Definition: base_uint.h:545
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition: scope.h:231
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition: json_value.h:44
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:45
int Int
Definition: json_forwards.h:26
unsigned int UInt
Definition: json_forwards.h:27
void rngfill(void *buffer, std::size_t bytes, Generator &g)
Definition: rngfill.h:34
std::string const & getVersionString()
Server version.
Definition: BuildInfo.cpp:68
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Definition: CTID.h:43
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition: BookChanges.h:47
void insertNFTSyntheticInJson(Json::Value &, std::shared_ptr< STTx const > const &, TxMeta const &)
Adds common synthetic fields to transaction-related JSON responses.
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition: Indexes.cpp:184
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition: Indexes.cpp:380
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition: Indexes.cpp:274
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Definition: escrow.cpp:69
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:25
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:114
STAmount divide(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:93
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition: STTx.cpp:811
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition: View.cpp:553
@ fhZERO_IF_FROZEN
Definition: View.h:78
@ fhIGNORE_FREEZE
Definition: View.h:78
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition: View.cpp:147
std::uint64_t getQuality(uint256 const &uBase)
Definition: Indexes.cpp:149
@ INCLUDED
Definition: Transaction.h:49
@ OBSOLETE
Definition: Transaction.h:54
@ INVALID
Definition: Transaction.h:48
error_code_i
Definition: ErrorCodes.h:40
@ rpcSUCCESS
Definition: ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition: ErrorCodes.h:84
@ rpcINTERNAL
Definition: ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
Definition: SecretKey.cpp:369
auto constexpr muldiv_max
Definition: mulDiv.h:28
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition: LocalTxs.cpp:192
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition: View.cpp:761
STAmount amountFromQuality(std::uint64_t rate)
Definition: STAmount.cpp:1013
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition: ErrorCodes.h:170
@ warnRPC_UNSUPPORTED_MAJORITY
Definition: ErrorCodes.h:168
@ warnRPC_AMENDMENT_BLOCKED
Definition: ErrorCodes.h:169
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:315
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition: NetworkOPs.h:68
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:53
AccountID calcAccountID(PublicKey const &pk)
Definition: AccountID.cpp:168
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Json::Value rpcError(int iError)
Definition: RPCErr.cpp:31
@ tefPAST_SEQ
Definition: TER.h:175
bool isTefFailure(TER x) noexcept
Definition: TER.h:662
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
Definition: NetworkOPs.cpp:871
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition: View.cpp:158
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition: ApiVersion.h:101
bool isTerRetry(TER x) noexcept
Definition: TER.h:668
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:75
@ tesSUCCESS
Definition: TER.h:244
uint256 getQualityNext(uint256 const &uBase)
Definition: Indexes.cpp:141
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition: View.cpp:386
bool isTesSuccess(TER x) noexcept
Definition: TER.h:674
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition: ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition: chrono.h:92
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:630
FeeSetup setup_FeeVote(Section const &section)
Definition: Config.cpp:1128
bool isTemMalformed(TER x) noexcept
Definition: TER.h:656
Number root(Number f, unsigned d)
Definition: Number.cpp:636
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
ApplyFlags
Definition: ApplyView.h:31
@ tapFAIL_HARD
Definition: ApplyView.h:36
@ tapUNLIMITED
Definition: ApplyView.h:43
@ tapNONE
Definition: ApplyView.h:32
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:39
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition: Seed.cpp:76
constexpr std::size_t maxPoppedTransactions
@ terQUEUED
Definition: TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition: TER.cpp:249
@ jtNETOP_CLUSTER
Definition: Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition: Job.h:47
@ jtTRANSACTION
Definition: Job.h:62
@ jtTXN_PROC
Definition: Job.h:82
@ jtCLIENT_CONSENSUS
Definition: Job.h:48
@ jtBATCH
Definition: Job.h:65
@ jtCLIENT_ACCT_HIST
Definition: Job.h:49
bool isTelLocal(TER x) noexcept
Definition: TER.h:650
uint256 getBookBase(Book const &book)
Definition: Indexes.cpp:115
constexpr std::uint32_t tfInnerBatchTxn
Definition: TxFlags.h:61
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition: View.cpp:184
static std::uint32_t trunc32(std::uint64_t v)
@ temINVALID_FLAG
Definition: TER.h:111
@ temBAD_SIGNATURE
Definition: TER.h:105
static auto const genesisAccountId
Definition: NetworkOPs.cpp:884
STL namespace.
T owns_lock(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T set_intersection(T... args)
T size(T... args)
T str(T... args)
std::string serialized
The manifest in serialized form.
Definition: Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition: Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition: Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
Definition: Manifest.cpp:244
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition: Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
Definition: Manifest.cpp:255
PublicKey masterKey
The master key associated with this manifest.
Definition: Manifest.h:86
Server fees published on server subscription.
Definition: NetworkOPs.cpp:204
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
Definition: NetworkOPs.cpp:223
bool operator==(ServerFeeSummary const &b) const
Definition: NetworkOPs.cpp:215
beast::insight::Gauge full_transitions
Definition: NetworkOPs.cpp:858
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:813
beast::insight::Hook hook
Definition: NetworkOPs.cpp:847
beast::insight::Gauge connected_duration
Definition: NetworkOPs.cpp:849
beast::insight::Gauge tracking_duration
Definition: NetworkOPs.cpp:851
beast::insight::Gauge connected_transitions
Definition: NetworkOPs.cpp:855
beast::insight::Gauge disconnected_transitions
Definition: NetworkOPs.cpp:854
beast::insight::Gauge syncing_duration
Definition: NetworkOPs.cpp:850
beast::insight::Gauge tracking_transitions
Definition: NetworkOPs.cpp:857
beast::insight::Gauge full_duration
Definition: NetworkOPs.cpp:852
beast::insight::Gauge disconnected_duration
Definition: NetworkOPs.cpp:848
beast::insight::Gauge syncing_transitions
Definition: NetworkOPs.cpp:856
SubAccountHistoryIndex(AccountID const &accountId)
Definition: NetworkOPs.cpp:702
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:721
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:716
Represents a transfer rate.
Definition: Rate.h:40
Data format for exchanging consumption information across peers.
Definition: Gossip.h:32
std::vector< Item > items
Definition: Gossip.h:44
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition: TxQ.h:165
IsMemberResult isMember(char const *key) const
Definition: MultiApiJson.h:93
void set(char const *key, auto const &v)
Definition: MultiApiJson.h:82
Select all peers (except optional excluded) that are in our cluster.
Definition: predicates.h:137
Sends a message to all peers.
Definition: predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)