rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/main/Tuning.h>
32#include <xrpld/app/misc/AmendmentTable.h>
33#include <xrpld/app/misc/DeliverMax.h>
34#include <xrpld/app/misc/HashRouter.h>
35#include <xrpld/app/misc/LoadFeeTrack.h>
36#include <xrpld/app/misc/NetworkOPs.h>
37#include <xrpld/app/misc/Transaction.h>
38#include <xrpld/app/misc/TxQ.h>
39#include <xrpld/app/misc/ValidatorKeys.h>
40#include <xrpld/app/misc/ValidatorList.h>
41#include <xrpld/app/misc/detail/AccountTxPaging.h>
42#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
43#include <xrpld/app/tx/apply.h>
44#include <xrpld/consensus/Consensus.h>
45#include <xrpld/consensus/ConsensusParms.h>
46#include <xrpld/overlay/Cluster.h>
47#include <xrpld/overlay/Overlay.h>
48#include <xrpld/overlay/predicates.h>
49#include <xrpld/perflog/PerfLog.h>
50#include <xrpld/rpc/BookChanges.h>
51#include <xrpld/rpc/CTID.h>
52#include <xrpld/rpc/DeliveredAmount.h>
53#include <xrpld/rpc/MPTokenIssuanceID.h>
54#include <xrpld/rpc/ServerHandler.h>
55
56#include <xrpl/basics/UptimeClock.h>
57#include <xrpl/basics/mulDiv.h>
58#include <xrpl/basics/safe_cast.h>
59#include <xrpl/basics/scope.h>
60#include <xrpl/beast/utility/rngfill.h>
61#include <xrpl/crypto/RFC1751.h>
62#include <xrpl/crypto/csprng.h>
63#include <xrpl/protocol/BuildInfo.h>
64#include <xrpl/protocol/Feature.h>
65#include <xrpl/protocol/MultiApiJson.h>
66#include <xrpl/protocol/NFTSyntheticSerializer.h>
67#include <xrpl/protocol/RPCErr.h>
68#include <xrpl/protocol/TxFlags.h>
69#include <xrpl/protocol/jss.h>
70#include <xrpl/resource/Fees.h>
71#include <xrpl/resource/ResourceManager.h>
72
73#include <boost/asio/ip/host_name.hpp>
74#include <boost/asio/steady_timer.hpp>
75
76#include <algorithm>
77#include <exception>
78#include <mutex>
79#include <optional>
80#include <set>
81#include <sstream>
82#include <string>
83#include <tuple>
84#include <unordered_map>
85
86namespace ripple {
87
88class NetworkOPsImp final : public NetworkOPs
89{
95 {
96 public:
98 bool const admin;
99 bool const local;
101 bool applied = false;
103
106 bool a,
107 bool l,
108 FailHard f)
109 : transaction(t), admin(a), local(l), failType(f)
110 {
111 XRPL_ASSERT(
113 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
114 "valid inputs");
115 }
116 };
117
121 enum class DispatchState : unsigned char {
122 none,
123 scheduled,
124 running,
125 };
126
128
144 {
145 struct Counters
146 {
147 explicit Counters() = default;
148
151 };
152
156 std::chrono::steady_clock::time_point start_ =
158 std::chrono::steady_clock::time_point const processStart_ = start_;
161
162 public:
164 {
166 .transitions = 1;
167 }
168
175 void
177
183 void
184 json(Json::Value& obj) const;
185
187 {
189 decltype(mode_) mode;
190 decltype(start_) start;
192 };
193
196 {
199 }
200 };
201
204 {
205 ServerFeeSummary() = default;
206
208 XRPAmount fee,
209 TxQ::Metrics&& escalationMetrics,
210 LoadFeeTrack const& loadFeeTrack);
211 bool
212 operator!=(ServerFeeSummary const& b) const;
213
214 bool
216 {
217 return !(*this != b);
218 }
219
224 };
225
226public:
228 Application& app,
230 bool standalone,
231 std::size_t minPeerCount,
232 bool start_valid,
233 JobQueue& job_queue,
235 ValidatorKeys const& validatorKeys,
236 boost::asio::io_service& io_svc,
237 beast::Journal journal,
238 beast::insight::Collector::ptr const& collector)
239 : app_(app)
240 , m_journal(journal)
243 , heartbeatTimer_(io_svc)
244 , clusterTimer_(io_svc)
245 , accountHistoryTxTimer_(io_svc)
246 , mConsensus(
247 app,
249 setup_FeeVote(app_.config().section("voting")),
250 app_.logs().journal("FeeVote")),
252 *m_localTX,
253 app.getInboundTransactions(),
254 beast::get_abstract_clock<std::chrono::steady_clock>(),
255 validatorKeys,
256 app_.logs().journal("LedgerConsensus"))
257 , validatorPK_(
258 validatorKeys.keys ? validatorKeys.keys->publicKey
259 : decltype(validatorPK_){})
261 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
262 : decltype(validatorMasterPK_){})
264 , m_job_queue(job_queue)
265 , m_standalone(standalone)
266 , minPeerCount_(start_valid ? 0 : minPeerCount)
268 {
269 }
270
271 ~NetworkOPsImp() override
272 {
273 // This clear() is necessary to ensure the shared_ptrs in this map get
274 // destroyed NOW because the objects in this map invoke methods on this
275 // class when they are destroyed
277 }
278
279public:
281 getOperatingMode() const override;
282
284 strOperatingMode(OperatingMode const mode, bool const admin) const override;
285
287 strOperatingMode(bool const admin = false) const override;
288
289 //
290 // Transaction operations.
291 //
292
293 // Must complete immediately.
294 void
296
297 void
299 std::shared_ptr<Transaction>& transaction,
300 bool bUnlimited,
301 bool bLocal,
302 FailHard failType) override;
303
304 void
305 processTransactionSet(CanonicalTXSet const& set) override;
306
315 void
318 bool bUnlimited,
319 FailHard failType);
320
330 void
333 bool bUnlimited,
334 FailHard failtype);
335
336private:
337 bool
339
340 void
343 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback);
344
345public:
349 void
351
357 void
359
360 //
361 // Owner functions.
362 //
363
367 AccountID const& account) override;
368
369 //
370 // Book functions.
371 //
372
373 void
376 Book const&,
377 AccountID const& uTakerID,
378 bool const bProof,
379 unsigned int iLimit,
380 Json::Value const& jvMarker,
381 Json::Value& jvResult) override;
382
383 // Ledger proposal/close functions.
384 bool
386
387 bool
390 std::string const& source) override;
391
392 void
393 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
394
395 // Network state machine.
396
397 // Used for the "jump" case.
398private:
399 void
401 bool
403
404public:
405 bool
407 uint256 const& networkClosed,
408 std::unique_ptr<std::stringstream> const& clog) override;
409 void
411 void
412 setStandAlone() override;
413
417 void
418 setStateTimer() override;
419
420 void
421 setNeedNetworkLedger() override;
422 void
423 clearNeedNetworkLedger() override;
424 bool
425 isNeedNetworkLedger() override;
426 bool
427 isFull() override;
428
429 void
430 setMode(OperatingMode om) override;
431
432 bool
433 isBlocked() override;
434 bool
435 isAmendmentBlocked() override;
436 void
437 setAmendmentBlocked() override;
438 bool
439 isAmendmentWarned() override;
440 void
441 setAmendmentWarned() override;
442 void
443 clearAmendmentWarned() override;
444 bool
445 isUNLBlocked() override;
446 void
447 setUNLBlocked() override;
448 void
449 clearUNLBlocked() override;
450 void
451 consensusViewChange() override;
452
454 getConsensusInfo() override;
456 getServerInfo(bool human, bool admin, bool counters) override;
457 void
458 clearLedgerFetch() override;
460 getLedgerFetchInfo() override;
463 std::optional<std::chrono::milliseconds> consensusDelay) override;
464 void
465 reportFeeChange() override;
466 void
468
469 void
470 updateLocalTx(ReadView const& view) override;
472 getLocalTxCount() override;
473
474 //
475 // Monitoring: publisher side.
476 //
477 void
478 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
479 void
482 std::shared_ptr<STTx const> const& transaction,
483 TER result) override;
484 void
485 pubValidation(std::shared_ptr<STValidation> const& val) override;
486
487 //--------------------------------------------------------------------------
488 //
489 // InfoSub::Source.
490 //
491 void
493 InfoSub::ref ispListener,
494 hash_set<AccountID> const& vnaAccountIDs,
495 bool rt) override;
496 void
498 InfoSub::ref ispListener,
499 hash_set<AccountID> const& vnaAccountIDs,
500 bool rt) override;
501
502 // Just remove the subscription from the tracking
503 // not from the InfoSub. Needed for InfoSub destruction
504 void
506 std::uint64_t seq,
507 hash_set<AccountID> const& vnaAccountIDs,
508 bool rt) override;
509
511 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
512 override;
513 void
515 InfoSub::ref ispListener,
516 AccountID const& account,
517 bool historyOnly) override;
518
519 void
521 std::uint64_t seq,
522 AccountID const& account,
523 bool historyOnly) override;
524
525 bool
526 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
527 bool
528 unsubLedger(std::uint64_t uListener) override;
529
530 bool
531 subBookChanges(InfoSub::ref ispListener) override;
532 bool
533 unsubBookChanges(std::uint64_t uListener) override;
534
535 bool
536 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
537 override;
538 bool
539 unsubServer(std::uint64_t uListener) override;
540
541 bool
542 subBook(InfoSub::ref ispListener, Book const&) override;
543 bool
544 unsubBook(std::uint64_t uListener, Book const&) override;
545
546 bool
547 subManifests(InfoSub::ref ispListener) override;
548 bool
549 unsubManifests(std::uint64_t uListener) override;
550 void
551 pubManifest(Manifest const&) override;
552
553 bool
554 subTransactions(InfoSub::ref ispListener) override;
555 bool
556 unsubTransactions(std::uint64_t uListener) override;
557
558 bool
559 subRTTransactions(InfoSub::ref ispListener) override;
560 bool
561 unsubRTTransactions(std::uint64_t uListener) override;
562
563 bool
564 subValidations(InfoSub::ref ispListener) override;
565 bool
566 unsubValidations(std::uint64_t uListener) override;
567
568 bool
569 subPeerStatus(InfoSub::ref ispListener) override;
570 bool
571 unsubPeerStatus(std::uint64_t uListener) override;
572 void
573 pubPeerStatus(std::function<Json::Value(void)> const&) override;
574
575 bool
576 subConsensus(InfoSub::ref ispListener) override;
577 bool
578 unsubConsensus(std::uint64_t uListener) override;
579
581 findRpcSub(std::string const& strUrl) override;
583 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
584 bool
585 tryRemoveRpcSub(std::string const& strUrl) override;
586
587 void
588 stop() override
589 {
590 {
591 boost::system::error_code ec;
592 heartbeatTimer_.cancel(ec);
593 if (ec)
594 {
595 JLOG(m_journal.error())
596 << "NetworkOPs: heartbeatTimer cancel error: "
597 << ec.message();
598 }
599
600 ec.clear();
601 clusterTimer_.cancel(ec);
602 if (ec)
603 {
604 JLOG(m_journal.error())
605 << "NetworkOPs: clusterTimer cancel error: "
606 << ec.message();
607 }
608
609 ec.clear();
610 accountHistoryTxTimer_.cancel(ec);
611 if (ec)
612 {
613 JLOG(m_journal.error())
614 << "NetworkOPs: accountHistoryTxTimer cancel error: "
615 << ec.message();
616 }
617 }
618 // Make sure that any waitHandlers pending in our timers are done.
619 using namespace std::chrono_literals;
620 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
621 }
622
623 void
624 stateAccounting(Json::Value& obj) override;
625
626private:
627 void
628 setTimer(
629 boost::asio::steady_timer& timer,
630 std::chrono::milliseconds const& expiry_time,
631 std::function<void()> onExpire,
632 std::function<void()> onError);
633 void
635 void
637 void
639 void
641
643 transJson(
644 std::shared_ptr<STTx const> const& transaction,
645 TER result,
646 bool validated,
649
650 void
653 AcceptedLedgerTx const& transaction,
654 bool last);
655
656 void
659 AcceptedLedgerTx const& transaction,
660 bool last);
661
662 void
665 std::shared_ptr<STTx const> const& transaction,
666 TER result);
667
668 void
669 pubServer();
670 void
672
674 getHostId(bool forAdmin);
675
676private:
680
681 /*
682 * With a validated ledger to separate history and future, the node
683 * streams historical txns with negative indexes starting from -1,
684 * and streams future txns starting from index 0.
685 * The SubAccountHistoryIndex struct maintains these indexes.
686 * It also has a flag stopHistorical_ for stopping streaming
687 * the historical txns.
688 */
690 {
692 // forward
694 // separate backward and forward
696 // history, backward
701
703 : accountId_(accountId)
704 , forwardTxIndex_(0)
707 , historyTxIndex_(-1)
708 , haveHistorical_(false)
709 , stopHistorical_(false)
710 {
711 }
712 };
714 {
717 };
719 {
722 };
725
729 void
733 void
735 void
737
740
742
744
746
751
753 boost::asio::steady_timer heartbeatTimer_;
754 boost::asio::steady_timer clusterTimer_;
755 boost::asio::steady_timer accountHistoryTxTimer_;
756
758
761
763
765
768
770
772
773 enum SubTypes {
774 sLedger, // Accepted ledgers.
775 sManifests, // Received validator manifests.
776 sServer, // When server changes connectivity state.
777 sTransactions, // All accepted transactions.
778 sRTTransactions, // All proposed and accepted transactions.
779 sValidations, // Received validations.
780 sPeerStatus, // Peer status changes.
781 sConsensusPhase, // Consensus phase
782 sBookChanges, // Per-ledger order book changes
783 sLastEntry // Any new entry must be ADDED ABOVE this one
784 };
785
787
789
791
792 // Whether we are in standalone mode.
793 bool const m_standalone;
794
795 // The number of nodes that we need to consider ourselves connected.
797
798 // Transaction batching.
803
805
808
809private:
810 struct Stats
811 {
812 template <class Handler>
814 Handler const& handler,
815 beast::insight::Collector::ptr const& collector)
816 : hook(collector->make_hook(handler))
817 , disconnected_duration(collector->make_gauge(
818 "State_Accounting",
819 "Disconnected_duration"))
820 , connected_duration(collector->make_gauge(
821 "State_Accounting",
822 "Connected_duration"))
824 collector->make_gauge("State_Accounting", "Syncing_duration"))
825 , tracking_duration(collector->make_gauge(
826 "State_Accounting",
827 "Tracking_duration"))
829 collector->make_gauge("State_Accounting", "Full_duration"))
830 , disconnected_transitions(collector->make_gauge(
831 "State_Accounting",
832 "Disconnected_transitions"))
833 , connected_transitions(collector->make_gauge(
834 "State_Accounting",
835 "Connected_transitions"))
836 , syncing_transitions(collector->make_gauge(
837 "State_Accounting",
838 "Syncing_transitions"))
839 , tracking_transitions(collector->make_gauge(
840 "State_Accounting",
841 "Tracking_transitions"))
843 collector->make_gauge("State_Accounting", "Full_transitions"))
844 {
845 }
846
853
859 };
860
861 std::mutex m_statsMutex; // Mutex to lock m_stats
863
864private:
865 void
867};
868
869//------------------------------------------------------------------------------
870
872 {"disconnected", "connected", "syncing", "tracking", "full"}};
873
875
883
884static auto const genesisAccountId = calcAccountID(
886 .first);
887
888//------------------------------------------------------------------------------
889inline OperatingMode
891{
892 return mMode;
893}
894
895inline std::string
896NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
897{
898 return strOperatingMode(mMode, admin);
899}
900
901inline void
903{
905}
906
907inline void
909{
910 needNetworkLedger_ = true;
911}
912
913inline void
915{
916 needNetworkLedger_ = false;
917}
918
919inline bool
921{
922 return needNetworkLedger_;
923}
924
925inline bool
927{
929}
930
933{
934 static std::string const hostname = boost::asio::ip::host_name();
935
936 if (forAdmin)
937 return hostname;
938
939 // For non-admin uses hash the node public key into a
940 // single RFC1751 word:
941 static std::string const shroudedHostId = [this]() {
942 auto const& id = app_.nodeIdentity();
943
944 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
945 }();
946
947 return shroudedHostId;
948}
949
950void
952{
954
955 // Only do this work if a cluster is configured
956 if (app_.cluster().size() != 0)
958}
959
960void
962 boost::asio::steady_timer& timer,
963 std::chrono::milliseconds const& expiry_time,
964 std::function<void()> onExpire,
965 std::function<void()> onError)
966{
967 // Only start the timer if waitHandlerCounter_ is not yet joined.
968 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
969 [this, onExpire, onError](boost::system::error_code const& e) {
970 if ((e.value() == boost::system::errc::success) &&
971 (!m_job_queue.isStopped()))
972 {
973 onExpire();
974 }
975 // Recover as best we can if an unexpected error occurs.
976 if (e.value() != boost::system::errc::success &&
977 e.value() != boost::asio::error::operation_aborted)
978 {
979 // Try again later and hope for the best.
980 JLOG(m_journal.error())
981 << "Timer got error '" << e.message()
982 << "'. Restarting timer.";
983 onError();
984 }
985 }))
986 {
987 timer.expires_from_now(expiry_time);
988 timer.async_wait(std::move(*optionalCountedHandler));
989 }
990}
991
992void
993NetworkOPsImp::setHeartbeatTimer()
994{
995 setTimer(
996 heartbeatTimer_,
997 mConsensus.parms().ledgerGRANULARITY,
998 [this]() {
999 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
1000 processHeartbeatTimer();
1001 });
1002 },
1003 [this]() { setHeartbeatTimer(); });
1004}
1005
1006void
1007NetworkOPsImp::setClusterTimer()
1008{
1009 using namespace std::chrono_literals;
1010
1011 setTimer(
1012 clusterTimer_,
1013 10s,
1014 [this]() {
1015 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
1016 processClusterTimer();
1017 });
1018 },
1019 [this]() { setClusterTimer(); });
1020}
1021
1022void
1023NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
1024{
1025 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
1026 << toBase58(subInfo.index_->accountId_);
1027 using namespace std::chrono_literals;
1028 setTimer(
1029 accountHistoryTxTimer_,
1030 4s,
1031 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1032 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1033}
1034
1035void
1036NetworkOPsImp::processHeartbeatTimer()
1037{
1038 RclConsensusLogger clog(
1039 "Heartbeat Timer", mConsensus.validating(), m_journal);
1040 {
1041 std::unique_lock lock{app_.getMasterMutex()};
1042
1043 // VFALCO NOTE This is for diagnosing a crash on exit
1044 LoadManager& mgr(app_.getLoadManager());
1045 mgr.heartbeat();
1046
1047 std::size_t const numPeers = app_.overlay().size();
1048
1049 // do we have sufficient peers? If not, we are disconnected.
1050 if (numPeers < minPeerCount_)
1051 {
1052 if (mMode != OperatingMode::DISCONNECTED)
1053 {
1054 setMode(OperatingMode::DISCONNECTED);
1056 ss << "Node count (" << numPeers << ") has fallen "
1057 << "below required minimum (" << minPeerCount_ << ").";
1058 JLOG(m_journal.warn()) << ss.str();
1059 CLOG(clog.ss()) << "set mode to DISCONNECTED: " << ss.str();
1060 }
1061 else
1062 {
1063 CLOG(clog.ss())
1064 << "already DISCONNECTED. too few peers (" << numPeers
1065 << "), need at least " << minPeerCount_;
1066 }
1067
1068 // MasterMutex lock need not be held to call setHeartbeatTimer()
1069 lock.unlock();
1070 // We do not call mConsensus.timerEntry until there are enough
1071 // peers providing meaningful inputs to consensus
1072 setHeartbeatTimer();
1073
1074 return;
1075 }
1076
1077 if (mMode == OperatingMode::DISCONNECTED)
1078 {
1079 setMode(OperatingMode::CONNECTED);
1080 JLOG(m_journal.info())
1081 << "Node count (" << numPeers << ") is sufficient.";
1082 CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
1083 << " peers. ";
1084 }
1085
1086 // Check if the last validated ledger forces a change between these
1087 // states.
1088 auto origMode = mMode.load();
1089 CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
1090 if (mMode == OperatingMode::SYNCING)
1091 setMode(OperatingMode::SYNCING);
1092 else if (mMode == OperatingMode::CONNECTED)
1093 setMode(OperatingMode::CONNECTED);
1094 auto newMode = mMode.load();
1095 if (origMode != newMode)
1096 {
1097 CLOG(clog.ss())
1098 << ", changing to " << strOperatingMode(newMode, true);
1099 }
1100 CLOG(clog.ss()) << ". ";
1101 }
1102
1103 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.ss());
1104
1105 CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase);
1106 ConsensusPhase const currPhase = mConsensus.phase();
1107 if (mLastConsensusPhase != currPhase)
1108 {
1109 reportConsensusStateChange(currPhase);
1110 mLastConsensusPhase = currPhase;
1111 CLOG(clog.ss()) << " changed to " << to_string(mLastConsensusPhase);
1112 }
1113 CLOG(clog.ss()) << ". ";
1114
1115 setHeartbeatTimer();
1116}
1117
1118void
1119NetworkOPsImp::processClusterTimer()
1120{
1121 if (app_.cluster().size() == 0)
1122 return;
1123
1124 using namespace std::chrono_literals;
1125
1126 bool const update = app_.cluster().update(
1127 app_.nodeIdentity().first,
1128 "",
1129 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1130 ? app_.getFeeTrack().getLocalFee()
1131 : 0,
1132 app_.timeKeeper().now());
1133
1134 if (!update)
1135 {
1136 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1137 setClusterTimer();
1138 return;
1139 }
1140
1141 protocol::TMCluster cluster;
1142 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1143 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1144 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1145 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1146 n.set_nodeload(node.getLoadFee());
1147 if (!node.name().empty())
1148 n.set_nodename(node.name());
1149 });
1150
1151 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1152 for (auto& item : gossip.items)
1153 {
1154 protocol::TMLoadSource& node = *cluster.add_loadsources();
1155 node.set_name(to_string(item.address));
1156 node.set_cost(item.balance);
1157 }
1158 app_.overlay().foreach(send_if(
1159 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1160 peer_in_cluster()));
1161 setClusterTimer();
1162}
1163
1164//------------------------------------------------------------------------------
1165
1167NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1168 const
1169{
1170 if (mode == OperatingMode::FULL && admin)
1171 {
1172 auto const consensusMode = mConsensus.mode();
1173 if (consensusMode != ConsensusMode::wrongLedger)
1174 {
1175 if (consensusMode == ConsensusMode::proposing)
1176 return "proposing";
1177
1178 if (mConsensus.validating())
1179 return "validating";
1180 }
1181 }
1182
1183 return states_[static_cast<std::size_t>(mode)];
1184}
1185
1186void
1187NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1188{
1189 if (isNeedNetworkLedger())
1190 {
1191 // Nothing we can do if we've never been in sync
1192 return;
1193 }
1194
1195 // Enforce Network bar for batch txn
1196 if (iTrans->isFlag(tfInnerBatchTxn) &&
1197 m_ledgerMaster.getValidatedRules().enabled(featureBatch))
1198 {
1199 JLOG(m_journal.error())
1200 << "Submitted transaction invalid: tfInnerBatchTxn flag present.";
1201 return;
1202 }
1203
1204 // this is an asynchronous interface
1205 auto const trans = sterilize(*iTrans);
1206
1207 auto const txid = trans->getTransactionID();
1208 auto const flags = app_.getHashRouter().getFlags(txid);
1209
1210 if ((flags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1211 {
1212 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1213 return;
1214 }
1215
1216 try
1217 {
1218 auto const [validity, reason] = checkValidity(
1219 app_.getHashRouter(),
1220 *trans,
1221 m_ledgerMaster.getValidatedRules(),
1222 app_.config());
1223
1224 if (validity != Validity::Valid)
1225 {
1226 JLOG(m_journal.warn())
1227 << "Submitted transaction invalid: " << reason;
1228 return;
1229 }
1230 }
1231 catch (std::exception const& ex)
1232 {
1233 JLOG(m_journal.warn())
1234 << "Exception checking transaction " << txid << ": " << ex.what();
1235
1236 return;
1237 }
1238
1239 std::string reason;
1240
1241 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1242
1243 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1244 auto t = tx;
1245 processTransaction(t, false, false, FailHard::no);
1246 });
1247}
1248
1249bool
1250NetworkOPsImp::preProcessTransaction(std::shared_ptr<Transaction>& transaction)
1251{
1252 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1253
1254 if ((newFlags & HashRouterFlags::BAD) != HashRouterFlags::UNDEFINED)
1255 {
1256 // cached bad
1257 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1258 transaction->setStatus(INVALID);
1259 transaction->setResult(temBAD_SIGNATURE);
1260 return false;
1261 }
1262
1263 auto const view = m_ledgerMaster.getCurrentLedger();
1264
1265 // This function is called by several different parts of the codebase
1266 // under no circumstances will we ever accept an inner txn within a batch
1267 // txn from the network.
1268 auto const sttx = *transaction->getSTransaction();
1269 if (sttx.isFlag(tfInnerBatchTxn) && view->rules().enabled(featureBatch))
1270 {
1271 transaction->setStatus(INVALID);
1272 transaction->setResult(temINVALID_FLAG);
1273 app_.getHashRouter().setFlags(
1274 transaction->getID(), HashRouterFlags::BAD);
1275 return false;
1276 }
1277
1278 // NOTE eahennis - I think this check is redundant,
1279 // but I'm not 100% sure yet.
1280 // If so, only cost is looking up HashRouter flags.
1281 auto const [validity, reason] =
1282 checkValidity(app_.getHashRouter(), sttx, view->rules(), app_.config());
1283 XRPL_ASSERT(
1284 validity == Validity::Valid,
1285 "ripple::NetworkOPsImp::processTransaction : valid validity");
1286
1287 // Not concerned with local checks at this point.
1288 if (validity == Validity::SigBad)
1289 {
1290 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1291 transaction->setStatus(INVALID);
1292 transaction->setResult(temBAD_SIGNATURE);
1293 app_.getHashRouter().setFlags(
1294 transaction->getID(), HashRouterFlags::BAD);
1295 return false;
1296 }
1297
1298 // canonicalize can change our pointer
1299 app_.getMasterTransaction().canonicalize(&transaction);
1300
1301 return true;
1302}
1303
1304void
1305NetworkOPsImp::processTransaction(
1306 std::shared_ptr<Transaction>& transaction,
1307 bool bUnlimited,
1308 bool bLocal,
1309 FailHard failType)
1310{
1311 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1312
1313 // preProcessTransaction can change our pointer
1314 if (!preProcessTransaction(transaction))
1315 return;
1316
1317 if (bLocal)
1318 doTransactionSync(transaction, bUnlimited, failType);
1319 else
1320 doTransactionAsync(transaction, bUnlimited, failType);
1321}
1322
1323void
1324NetworkOPsImp::doTransactionAsync(
1325 std::shared_ptr<Transaction> transaction,
1326 bool bUnlimited,
1327 FailHard failType)
1328{
1329 std::lock_guard lock(mMutex);
1330
1331 if (transaction->getApplying())
1332 return;
1333
1334 mTransactions.push_back(
1335 TransactionStatus(transaction, bUnlimited, false, failType));
1336 transaction->setApplying();
1337
1338 if (mDispatchState == DispatchState::none)
1339 {
1340 if (m_job_queue.addJob(
1341 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1342 {
1343 mDispatchState = DispatchState::scheduled;
1344 }
1345 }
1346}
1347
1348void
1349NetworkOPsImp::doTransactionSync(
1350 std::shared_ptr<Transaction> transaction,
1351 bool bUnlimited,
1352 FailHard failType)
1353{
1354 std::unique_lock<std::mutex> lock(mMutex);
1355
1356 if (!transaction->getApplying())
1357 {
1358 mTransactions.push_back(
1359 TransactionStatus(transaction, bUnlimited, true, failType));
1360 transaction->setApplying();
1361 }
1362
1363 doTransactionSyncBatch(
1364 lock, [&transaction](std::unique_lock<std::mutex> const&) {
1365 return transaction->getApplying();
1366 });
1367}
1368
1369void
1370NetworkOPsImp::doTransactionSyncBatch(
1372 std::function<bool(std::unique_lock<std::mutex> const&)> retryCallback)
1373{
1374 do
1375 {
1376 if (mDispatchState == DispatchState::running)
1377 {
1378 // A batch processing job is already running, so wait.
1379 mCond.wait(lock);
1380 }
1381 else
1382 {
1383 apply(lock);
1384
1385 if (mTransactions.size())
1386 {
1387 // More transactions need to be applied, but by another job.
1388 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1389 transactionBatch();
1390 }))
1391 {
1392 mDispatchState = DispatchState::scheduled;
1393 }
1394 }
1395 }
1396 } while (retryCallback(lock));
1397}
1398
1399void
1400NetworkOPsImp::processTransactionSet(CanonicalTXSet const& set)
1401{
1402 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXNSet");
1404 candidates.reserve(set.size());
1405 for (auto const& [_, tx] : set)
1406 {
1407 std::string reason;
1408 auto transaction = std::make_shared<Transaction>(tx, reason, app_);
1409
1410 if (transaction->getStatus() == INVALID)
1411 {
1412 if (!reason.empty())
1413 {
1414 JLOG(m_journal.trace())
1415 << "Exception checking transaction: " << reason;
1416 }
1417 app_.getHashRouter().setFlags(
1418 tx->getTransactionID(), HashRouterFlags::BAD);
1419 continue;
1420 }
1421
1422 // preProcessTransaction can change our pointer
1423 if (!preProcessTransaction(transaction))
1424 continue;
1425
1426 candidates.emplace_back(transaction);
1427 }
1428
1429 std::vector<TransactionStatus> transactions;
1430 transactions.reserve(candidates.size());
1431
1432 std::unique_lock lock(mMutex);
1433
1434 for (auto& transaction : candidates)
1435 {
1436 if (!transaction->getApplying())
1437 {
1438 transactions.emplace_back(transaction, false, false, FailHard::no);
1439 transaction->setApplying();
1440 }
1441 }
1442
1443 if (mTransactions.empty())
1444 mTransactions.swap(transactions);
1445 else
1446 {
1447 mTransactions.reserve(mTransactions.size() + transactions.size());
1448 for (auto& t : transactions)
1449 mTransactions.push_back(std::move(t));
1450 }
1451
1452 doTransactionSyncBatch(lock, [&](std::unique_lock<std::mutex> const&) {
1453 XRPL_ASSERT(
1454 lock.owns_lock(),
1455 "ripple::NetworkOPsImp::processTransactionSet has lock");
1456 return std::any_of(
1457 mTransactions.begin(), mTransactions.end(), [](auto const& t) {
1458 return t.transaction->getApplying();
1459 });
1460 });
1461}
1462
1463void
1464NetworkOPsImp::transactionBatch()
1465{
1466 std::unique_lock<std::mutex> lock(mMutex);
1467
1468 if (mDispatchState == DispatchState::running)
1469 return;
1470
1471 while (mTransactions.size())
1472 {
1473 apply(lock);
1474 }
1475}
1476
1477void
1478NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1479{
1481 std::vector<TransactionStatus> transactions;
1482 mTransactions.swap(transactions);
1483 XRPL_ASSERT(
1484 !transactions.empty(),
1485 "ripple::NetworkOPsImp::apply : non-empty transactions");
1486 XRPL_ASSERT(
1487 mDispatchState != DispatchState::running,
1488 "ripple::NetworkOPsImp::apply : is not running");
1489
1490 mDispatchState = DispatchState::running;
1491
1492 batchLock.unlock();
1493
1494 {
1495 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1496 bool changed = false;
1497 {
1498 std::unique_lock ledgerLock{
1499 m_ledgerMaster.peekMutex(), std::defer_lock};
1500 std::lock(masterLock, ledgerLock);
1501
1502 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1503 for (TransactionStatus& e : transactions)
1504 {
1505 // we check before adding to the batch
1506 ApplyFlags flags = tapNONE;
1507 if (e.admin)
1508 flags |= tapUNLIMITED;
1509
1510 if (e.failType == FailHard::yes)
1511 flags |= tapFAIL_HARD;
1512
1513 auto const result = app_.getTxQ().apply(
1514 app_, view, e.transaction->getSTransaction(), flags, j);
1515 e.result = result.ter;
1516 e.applied = result.applied;
1517 changed = changed || result.applied;
1518 }
1519 return changed;
1520 });
1521 }
1522 if (changed)
1523 reportFeeChange();
1524
1525 std::optional<LedgerIndex> validatedLedgerIndex;
1526 if (auto const l = m_ledgerMaster.getValidatedLedger())
1527 validatedLedgerIndex = l->info().seq;
1528
1529 auto newOL = app_.openLedger().current();
1530 for (TransactionStatus& e : transactions)
1531 {
1532 e.transaction->clearSubmitResult();
1533
1534 if (e.applied)
1535 {
1536 pubProposedTransaction(
1537 newOL, e.transaction->getSTransaction(), e.result);
1538 e.transaction->setApplied();
1539 }
1540
1541 e.transaction->setResult(e.result);
1542
1543 if (isTemMalformed(e.result))
1544 app_.getHashRouter().setFlags(
1545 e.transaction->getID(), HashRouterFlags::BAD);
1546
1547#ifdef DEBUG
1548 if (e.result != tesSUCCESS)
1549 {
1550 std::string token, human;
1551
1552 if (transResultInfo(e.result, token, human))
1553 {
1554 JLOG(m_journal.info())
1555 << "TransactionResult: " << token << ": " << human;
1556 }
1557 }
1558#endif
1559
1560 bool addLocal = e.local;
1561
1562 if (e.result == tesSUCCESS)
1563 {
1564 JLOG(m_journal.debug())
1565 << "Transaction is now included in open ledger";
1566 e.transaction->setStatus(INCLUDED);
1567
1568 // Pop as many "reasonable" transactions for this account as
1569 // possible. "Reasonable" means they have sequential sequence
1570 // numbers, or use tickets.
1571 auto const& txCur = e.transaction->getSTransaction();
1572
1573 std::size_t count = 0;
1574 for (auto txNext = m_ledgerMaster.popAcctTransaction(txCur);
1575 txNext && count < maxPoppedTransactions;
1576 txNext = m_ledgerMaster.popAcctTransaction(txCur), ++count)
1577 {
1578 if (!batchLock.owns_lock())
1579 batchLock.lock();
1580 std::string reason;
1581 auto const trans = sterilize(*txNext);
1582 auto t = std::make_shared<Transaction>(trans, reason, app_);
1583 if (t->getApplying())
1584 break;
1585 submit_held.emplace_back(t, false, false, FailHard::no);
1586 t->setApplying();
1587 }
1588 if (batchLock.owns_lock())
1589 batchLock.unlock();
1590 }
1591 else if (e.result == tefPAST_SEQ)
1592 {
1593 // duplicate or conflict
1594 JLOG(m_journal.info()) << "Transaction is obsolete";
1595 e.transaction->setStatus(OBSOLETE);
1596 }
1597 else if (e.result == terQUEUED)
1598 {
1599 JLOG(m_journal.debug())
1600 << "Transaction is likely to claim a"
1601 << " fee, but is queued until fee drops";
1602
1603 e.transaction->setStatus(HELD);
1604 // Add to held transactions, because it could get
1605 // kicked out of the queue, and this will try to
1606 // put it back.
1607 m_ledgerMaster.addHeldTransaction(e.transaction);
1608 e.transaction->setQueued();
1609 e.transaction->setKept();
1610 }
1611 else if (
1612 isTerRetry(e.result) || isTelLocal(e.result) ||
1613 isTefFailure(e.result))
1614 {
1615 if (e.failType != FailHard::yes)
1616 {
1617 auto const lastLedgerSeq =
1618 e.transaction->getSTransaction()->at(
1619 ~sfLastLedgerSequence);
1620 auto const ledgersLeft = lastLedgerSeq
1621 ? *lastLedgerSeq -
1622 m_ledgerMaster.getCurrentLedgerIndex()
1624 // If any of these conditions are met, the transaction can
1625 // be held:
1626 // 1. It was submitted locally. (Note that this flag is only
1627 // true on the initial submission.)
1628 // 2. The transaction has a LastLedgerSequence, and the
1629 // LastLedgerSequence is fewer than LocalTxs::holdLedgers
1630 // (5) ledgers into the future. (Remember that an
1631 // unseated optional compares as less than all seated
1632 // values, so it has to be checked explicitly first.)
1633 // 3. The HashRouterFlags::BAD flag is not set on the txID.
1634 // (setFlags
1635 // checks before setting. If the flag is set, it returns
1636 // false, which means it's been held once without one of
1637 // the other conditions, so don't hold it again. Time's
1638 // up!)
1639 //
1640 if (e.local ||
1641 (ledgersLeft && ledgersLeft <= LocalTxs::holdLedgers) ||
1642 app_.getHashRouter().setFlags(
1643 e.transaction->getID(), HashRouterFlags::HELD))
1644 {
1645 // transaction should be held
1646 JLOG(m_journal.debug())
1647 << "Transaction should be held: " << e.result;
1648 e.transaction->setStatus(HELD);
1649 m_ledgerMaster.addHeldTransaction(e.transaction);
1650 e.transaction->setKept();
1651 }
1652 else
1653 JLOG(m_journal.debug())
1654 << "Not holding transaction "
1655 << e.transaction->getID() << ": "
1656 << (e.local ? "local" : "network") << ", "
1657 << "result: " << e.result << " ledgers left: "
1658 << (ledgersLeft ? to_string(*ledgersLeft)
1659 : "unspecified");
1660 }
1661 }
1662 else
1663 {
1664 JLOG(m_journal.debug())
1665 << "Status other than success " << e.result;
1666 e.transaction->setStatus(INVALID);
1667 }
1668
1669 auto const enforceFailHard =
1670 e.failType == FailHard::yes && !isTesSuccess(e.result);
1671
1672 if (addLocal && !enforceFailHard)
1673 {
1674 m_localTX->push_back(
1675 m_ledgerMaster.getCurrentLedgerIndex(),
1676 e.transaction->getSTransaction());
1677 e.transaction->setKept();
1678 }
1679
1680 if ((e.applied ||
1681 ((mMode != OperatingMode::FULL) &&
1682 (e.failType != FailHard::yes) && e.local) ||
1683 (e.result == terQUEUED)) &&
1684 !enforceFailHard)
1685 {
1686 auto const toSkip =
1687 app_.getHashRouter().shouldRelay(e.transaction->getID());
1688 if (auto const sttx = *(e.transaction->getSTransaction());
1689 toSkip &&
1690 // Skip relaying if it's an inner batch txn and batch
1691 // feature is enabled
1692 !(sttx.isFlag(tfInnerBatchTxn) &&
1693 newOL->rules().enabled(featureBatch)))
1694 {
1695 protocol::TMTransaction tx;
1696 Serializer s;
1697
1698 sttx.add(s);
1699 tx.set_rawtransaction(s.data(), s.size());
1700 tx.set_status(protocol::tsCURRENT);
1701 tx.set_receivetimestamp(
1702 app_.timeKeeper().now().time_since_epoch().count());
1703 tx.set_deferred(e.result == terQUEUED);
1704 // FIXME: This should be when we received it
1705 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1706 e.transaction->setBroadcast();
1707 }
1708 }
1709
1710 if (validatedLedgerIndex)
1711 {
1712 auto [fee, accountSeq, availableSeq] =
1713 app_.getTxQ().getTxRequiredFeeAndSeq(
1714 *newOL, e.transaction->getSTransaction());
1715 e.transaction->setCurrentLedgerState(
1716 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1717 }
1718 }
1719 }
1720
1721 batchLock.lock();
1722
1723 for (TransactionStatus& e : transactions)
1724 e.transaction->clearApplying();
1725
1726 if (!submit_held.empty())
1727 {
1728 if (mTransactions.empty())
1729 mTransactions.swap(submit_held);
1730 else
1731 {
1732 mTransactions.reserve(mTransactions.size() + submit_held.size());
1733 for (auto& e : submit_held)
1734 mTransactions.push_back(std::move(e));
1735 }
1736 }
1737
1738 mCond.notify_all();
1739
1740 mDispatchState = DispatchState::none;
1741}
1742
1743//
1744// Owner functions
1745//
1746
1748NetworkOPsImp::getOwnerInfo(
1750 AccountID const& account)
1751{
1752 Json::Value jvObjects(Json::objectValue);
1753 auto root = keylet::ownerDir(account);
1754 auto sleNode = lpLedger->read(keylet::page(root));
1755 if (sleNode)
1756 {
1757 std::uint64_t uNodeDir;
1758
1759 do
1760 {
1761 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1762 {
1763 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1764 XRPL_ASSERT(
1765 sleCur,
1766 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1767
1768 switch (sleCur->getType())
1769 {
1770 case ltOFFER:
1771 if (!jvObjects.isMember(jss::offers))
1772 jvObjects[jss::offers] =
1774
1775 jvObjects[jss::offers].append(
1776 sleCur->getJson(JsonOptions::none));
1777 break;
1778
1779 case ltRIPPLE_STATE:
1780 if (!jvObjects.isMember(jss::ripple_lines))
1781 {
1782 jvObjects[jss::ripple_lines] =
1784 }
1785
1786 jvObjects[jss::ripple_lines].append(
1787 sleCur->getJson(JsonOptions::none));
1788 break;
1789
1790 case ltACCOUNT_ROOT:
1791 case ltDIR_NODE:
1792 default:
1793 UNREACHABLE(
1794 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1795 "type");
1796 break;
1797 }
1798 }
1799
1800 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1801
1802 if (uNodeDir)
1803 {
1804 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1805 XRPL_ASSERT(
1806 sleNode,
1807 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1808 }
1809 } while (uNodeDir);
1810 }
1811
1812 return jvObjects;
1813}
1814
1815//
1816// Other
1817//
1818
1819inline bool
1820NetworkOPsImp::isBlocked()
1821{
1822 return isAmendmentBlocked() || isUNLBlocked();
1823}
1824
1825inline bool
1826NetworkOPsImp::isAmendmentBlocked()
1827{
1828 return amendmentBlocked_;
1829}
1830
1831void
1832NetworkOPsImp::setAmendmentBlocked()
1833{
1834 amendmentBlocked_ = true;
1835 setMode(OperatingMode::CONNECTED);
1836}
1837
1838inline bool
1839NetworkOPsImp::isAmendmentWarned()
1840{
1841 return !amendmentBlocked_ && amendmentWarned_;
1842}
1843
1844inline void
1845NetworkOPsImp::setAmendmentWarned()
1846{
1847 amendmentWarned_ = true;
1848}
1849
1850inline void
1851NetworkOPsImp::clearAmendmentWarned()
1852{
1853 amendmentWarned_ = false;
1854}
1855
1856inline bool
1857NetworkOPsImp::isUNLBlocked()
1858{
1859 return unlBlocked_;
1860}
1861
1862void
1863NetworkOPsImp::setUNLBlocked()
1864{
1865 unlBlocked_ = true;
1866 setMode(OperatingMode::CONNECTED);
1867}
1868
1869inline void
1870NetworkOPsImp::clearUNLBlocked()
1871{
1872 unlBlocked_ = false;
1873}
1874
1875bool
1876NetworkOPsImp::checkLastClosedLedger(
1877 Overlay::PeerSequence const& peerList,
1878 uint256& networkClosed)
1879{
1880 // Returns true if there's an *abnormal* ledger issue, normal changing in
1881 // TRACKING mode should return false. Do we have sufficient validations for
1882 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1883 // better ledger available? If so, we are either tracking or full.
1884
1885 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1886
1887 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1888
1889 if (!ourClosed)
1890 return false;
1891
1892 uint256 closedLedger = ourClosed->info().hash;
1893 uint256 prevClosedLedger = ourClosed->info().parentHash;
1894 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1895 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1896
1897 //-------------------------------------------------------------------------
1898 // Determine preferred last closed ledger
1899
1900 auto& validations = app_.getValidations();
1901 JLOG(m_journal.debug())
1902 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1903
1904 // Will rely on peer LCL if no trusted validations exist
1906 peerCounts[closedLedger] = 0;
1907 if (mMode >= OperatingMode::TRACKING)
1908 peerCounts[closedLedger]++;
1909
1910 for (auto& peer : peerList)
1911 {
1912 uint256 peerLedger = peer->getClosedLedgerHash();
1913
1914 if (peerLedger.isNonZero())
1915 ++peerCounts[peerLedger];
1916 }
1917
1918 for (auto const& it : peerCounts)
1919 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1920
1921 uint256 preferredLCL = validations.getPreferredLCL(
1922 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1923 m_ledgerMaster.getValidLedgerIndex(),
1924 peerCounts);
1925
1926 bool switchLedgers = preferredLCL != closedLedger;
1927 if (switchLedgers)
1928 closedLedger = preferredLCL;
1929 //-------------------------------------------------------------------------
1930 if (switchLedgers && (closedLedger == prevClosedLedger))
1931 {
1932 // don't switch to our own previous ledger
1933 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1934 networkClosed = ourClosed->info().hash;
1935 switchLedgers = false;
1936 }
1937 else
1938 networkClosed = closedLedger;
1939
1940 if (!switchLedgers)
1941 return false;
1942
1943 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1944
1945 if (!consensus)
1946 consensus = app_.getInboundLedgers().acquire(
1947 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1948
1949 if (consensus &&
1950 (!m_ledgerMaster.canBeCurrent(consensus) ||
1951 !m_ledgerMaster.isCompatible(
1952 *consensus, m_journal.debug(), "Not switching")))
1953 {
1954 // Don't switch to a ledger not on the validated chain
1955 // or with an invalid close time or sequence
1956 networkClosed = ourClosed->info().hash;
1957 return false;
1958 }
1959
1960 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1961 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1962 << getJson({*ourClosed, {}});
1963 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1964
1965 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1966 {
1967 setMode(OperatingMode::CONNECTED);
1968 }
1969
1970 if (consensus)
1971 {
1972 // FIXME: If this rewinds the ledger sequence, or has the same
1973 // sequence, we should update the status on any stored transactions
1974 // in the invalidated ledgers.
1975 switchLastClosedLedger(consensus);
1976 }
1977
1978 return true;
1979}
1980
1981void
1982NetworkOPsImp::switchLastClosedLedger(
1983 std::shared_ptr<Ledger const> const& newLCL)
1984{
1985 // set the newLCL as our last closed ledger -- this is abnormal code
1986 JLOG(m_journal.error())
1987 << "JUMP last closed ledger to " << newLCL->info().hash;
1988
1989 clearNeedNetworkLedger();
1990
1991 // Update fee computations.
1992 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1993
1994 // Caller must own master lock
1995 {
1996 // Apply tx in old open ledger to new
1997 // open ledger. Then apply local tx.
1998
1999 auto retries = m_localTX->getTxSet();
2000 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
2002 if (lastVal)
2003 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
2004 else
2005 rules.emplace(app_.config().features);
2006 app_.openLedger().accept(
2007 app_,
2008 *rules,
2009 newLCL,
2010 OrderedTxs({}),
2011 false,
2012 retries,
2013 tapNONE,
2014 "jump",
2015 [&](OpenView& view, beast::Journal j) {
2016 // Stuff the ledger with transactions from the queue.
2017 return app_.getTxQ().accept(app_, view);
2018 });
2019 }
2020
2021 m_ledgerMaster.switchLCL(newLCL);
2022
2023 protocol::TMStatusChange s;
2024 s.set_newevent(protocol::neSWITCHED_LEDGER);
2025 s.set_ledgerseq(newLCL->info().seq);
2026 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
2027 s.set_ledgerhashprevious(
2028 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
2029 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
2030
2031 app_.overlay().foreach(
2032 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
2033}
2034
2035bool
2036NetworkOPsImp::beginConsensus(
2037 uint256 const& networkClosed,
2039{
2040 XRPL_ASSERT(
2041 networkClosed.isNonZero(),
2042 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
2043
2044 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
2045
2046 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
2047 << " with LCL " << closingInfo.parentHash;
2048
2049 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
2050
2051 if (!prevLedger)
2052 {
2053 // this shouldn't happen unless we jump ledgers
2054 if (mMode == OperatingMode::FULL)
2055 {
2056 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
2057 setMode(OperatingMode::TRACKING);
2058 CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
2059 }
2060
2061 CLOG(clog) << "beginConsensus no previous ledger. ";
2062 return false;
2063 }
2064
2065 XRPL_ASSERT(
2066 prevLedger->info().hash == closingInfo.parentHash,
2067 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
2068 "parent");
2069 XRPL_ASSERT(
2070 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
2071 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
2072 "hash");
2073
2074 if (prevLedger->rules().enabled(featureNegativeUNL))
2075 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
2076 TrustChanges const changes = app_.validators().updateTrusted(
2077 app_.getValidations().getCurrentNodeIDs(),
2078 closingInfo.parentCloseTime,
2079 *this,
2080 app_.overlay(),
2081 app_.getHashRouter());
2082
2083 if (!changes.added.empty() || !changes.removed.empty())
2084 {
2085 app_.getValidations().trustChanged(changes.added, changes.removed);
2086 // Update the AmendmentTable so it tracks the current validators.
2087 app_.getAmendmentTable().trustChanged(
2088 app_.validators().getQuorumKeys().second);
2089 }
2090
2091 mConsensus.startRound(
2092 app_.timeKeeper().closeTime(),
2093 networkClosed,
2094 prevLedger,
2095 changes.removed,
2096 changes.added,
2097 clog);
2098
2099 ConsensusPhase const currPhase = mConsensus.phase();
2100 if (mLastConsensusPhase != currPhase)
2101 {
2102 reportConsensusStateChange(currPhase);
2103 mLastConsensusPhase = currPhase;
2104 }
2105
2106 JLOG(m_journal.debug()) << "Initiating consensus engine";
2107 return true;
2108}
2109
2110bool
2111NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
2112{
2113 auto const& peerKey = peerPos.publicKey();
2114 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
2115 {
2116 // Could indicate a operator misconfiguration where two nodes are
2117 // running with the same validator key configured, so this isn't fatal,
2118 // and it doesn't necessarily indicate peer misbehavior. But since this
2119 // is a trusted message, it could be a very big deal. Either way, we
2120 // don't want to relay the proposal. Note that the byzantine behavior
2121 // detection in handleNewValidation will notify other peers.
2122 //
2123 // Another, innocuous explanation is unusual message routing and delays,
2124 // causing this node to receive its own messages back.
2125 JLOG(m_journal.error())
2126 << "Received a proposal signed by MY KEY from a peer. This may "
2127 "indicate a misconfiguration where another node has the same "
2128 "validator key, or may be caused by unusual message routing and "
2129 "delays.";
2130 return false;
2131 }
2132
2133 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
2134}
2135
2136void
2137NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
2138{
2139 // We now have an additional transaction set
2140 // either created locally during the consensus process
2141 // or acquired from a peer
2142
2143 // Inform peers we have this set
2144 protocol::TMHaveTransactionSet msg;
2145 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
2146 msg.set_status(protocol::tsHAVE);
2147 app_.overlay().foreach(
2148 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
2149
2150 // We acquired it because consensus asked us to
2151 if (fromAcquire)
2152 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
2153}
2154
2155void
2156NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
2157{
2158 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
2159
2160 for (auto const& it : app_.overlay().getActivePeers())
2161 {
2162 if (it && (it->getClosedLedgerHash() == deadLedger))
2163 {
2164 JLOG(m_journal.trace()) << "Killing obsolete peer status";
2165 it->cycleStatus();
2166 }
2167 }
2168
2169 uint256 networkClosed;
2170 bool ledgerChange =
2171 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
2172
2173 if (networkClosed.isZero())
2174 {
2175 CLOG(clog) << "endConsensus last closed ledger is zero. ";
2176 return;
2177 }
2178
2179 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
2180 // we must count how many nodes share our LCL, how many nodes disagree with
2181 // our LCL, and how many validations our LCL has. We also want to check
2182 // timing to make sure there shouldn't be a newer LCL. We need this
2183 // information to do the next three tests.
2184
2185 if (((mMode == OperatingMode::CONNECTED) ||
2186 (mMode == OperatingMode::SYNCING)) &&
2187 !ledgerChange)
2188 {
2189 // Count number of peers that agree with us and UNL nodes whose
2190 // validations we have for LCL. If the ledger is good enough, go to
2191 // TRACKING - TODO
2192 if (!needNetworkLedger_)
2193 setMode(OperatingMode::TRACKING);
2194 }
2195
2196 if (((mMode == OperatingMode::CONNECTED) ||
2197 (mMode == OperatingMode::TRACKING)) &&
2198 !ledgerChange)
2199 {
2200 // check if the ledger is good enough to go to FULL
2201 // Note: Do not go to FULL if we don't have the previous ledger
2202 // check if the ledger is bad enough to go to CONNECTE D -- TODO
2203 auto current = m_ledgerMaster.getCurrentLedger();
2204 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
2205 2 * current->info().closeTimeResolution))
2206 {
2207 setMode(OperatingMode::FULL);
2208 }
2209 }
2210
2211 beginConsensus(networkClosed, clog);
2212}
2213
2214void
2215NetworkOPsImp::consensusViewChange()
2216{
2217 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2218 {
2219 setMode(OperatingMode::CONNECTED);
2220 }
2221}
2222
2223void
2224NetworkOPsImp::pubManifest(Manifest const& mo)
2225{
2226 // VFALCO consider std::shared_mutex
2227 std::lock_guard sl(mSubLock);
2228
2229 if (!mStreamMaps[sManifests].empty())
2230 {
2232
2233 jvObj[jss::type] = "manifestReceived";
2234 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
2235 if (mo.signingKey)
2236 jvObj[jss::signing_key] =
2237 toBase58(TokenType::NodePublic, *mo.signingKey);
2238 jvObj[jss::seq] = Json::UInt(mo.sequence);
2239 if (auto sig = mo.getSignature())
2240 jvObj[jss::signature] = strHex(*sig);
2241 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
2242 if (!mo.domain.empty())
2243 jvObj[jss::domain] = mo.domain;
2244 jvObj[jss::manifest] = strHex(mo.serialized);
2245
2246 for (auto i = mStreamMaps[sManifests].begin();
2247 i != mStreamMaps[sManifests].end();)
2248 {
2249 if (auto p = i->second.lock())
2250 {
2251 p->send(jvObj, true);
2252 ++i;
2253 }
2254 else
2255 {
2256 i = mStreamMaps[sManifests].erase(i);
2257 }
2258 }
2259 }
2260}
2261
2262NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2263 XRPAmount fee,
2264 TxQ::Metrics&& escalationMetrics,
2265 LoadFeeTrack const& loadFeeTrack)
2266 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2267 , loadBaseServer{loadFeeTrack.getLoadBase()}
2268 , baseFee{fee}
2269 , em{std::move(escalationMetrics)}
2270{
2271}
2272
2273bool
2275 NetworkOPsImp::ServerFeeSummary const& b) const
2276{
2277 if (loadFactorServer != b.loadFactorServer ||
2278 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2279 em.has_value() != b.em.has_value())
2280 return true;
2281
2282 if (em && b.em)
2283 {
2284 return (
2285 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2286 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2287 em->referenceFeeLevel != b.em->referenceFeeLevel);
2288 }
2289
2290 return false;
2291}
2292
2293// Need to cap to uint64 to uint32 due to JSON limitations
2294static std::uint32_t
2296{
2298
2299 return std::min(max32, v);
2300};
2301
2302void
2304{
2305 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2306 // list into a local array while holding the lock then release
2307 // the lock and call send on everyone.
2308 //
2310
2311 if (!mStreamMaps[sServer].empty())
2312 {
2314
2316 app_.openLedger().current()->fees().base,
2318 app_.getFeeTrack()};
2319
2320 jvObj[jss::type] = "serverStatus";
2321 jvObj[jss::server_status] = strOperatingMode();
2322 jvObj[jss::load_base] = f.loadBaseServer;
2323 jvObj[jss::load_factor_server] = f.loadFactorServer;
2324 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2325
2326 if (f.em)
2327 {
2328 auto const loadFactor = std::max(
2329 safe_cast<std::uint64_t>(f.loadFactorServer),
2330 mulDiv(
2331 f.em->openLedgerFeeLevel,
2332 f.loadBaseServer,
2333 f.em->referenceFeeLevel)
2335
2336 jvObj[jss::load_factor] = trunc32(loadFactor);
2337 jvObj[jss::load_factor_fee_escalation] =
2338 f.em->openLedgerFeeLevel.jsonClipped();
2339 jvObj[jss::load_factor_fee_queue] =
2340 f.em->minProcessingFeeLevel.jsonClipped();
2341 jvObj[jss::load_factor_fee_reference] =
2342 f.em->referenceFeeLevel.jsonClipped();
2343 }
2344 else
2345 jvObj[jss::load_factor] = f.loadFactorServer;
2346
2347 mLastFeeSummary = f;
2348
2349 for (auto i = mStreamMaps[sServer].begin();
2350 i != mStreamMaps[sServer].end();)
2351 {
2352 InfoSub::pointer p = i->second.lock();
2353
2354 // VFALCO TODO research the possibility of using thread queues and
2355 // linearizing the deletion of subscribers with the
2356 // sending of JSON data.
2357 if (p)
2358 {
2359 p->send(jvObj, true);
2360 ++i;
2361 }
2362 else
2363 {
2364 i = mStreamMaps[sServer].erase(i);
2365 }
2366 }
2367 }
2368}
2369
2370void
2372{
2374
2375 auto& streamMap = mStreamMaps[sConsensusPhase];
2376 if (!streamMap.empty())
2377 {
2379 jvObj[jss::type] = "consensusPhase";
2380 jvObj[jss::consensus] = to_string(phase);
2381
2382 for (auto i = streamMap.begin(); i != streamMap.end();)
2383 {
2384 if (auto p = i->second.lock())
2385 {
2386 p->send(jvObj, true);
2387 ++i;
2388 }
2389 else
2390 {
2391 i = streamMap.erase(i);
2392 }
2393 }
2394 }
2395}
2396
2397void
2399{
2400 // VFALCO consider std::shared_mutex
2402
2403 if (!mStreamMaps[sValidations].empty())
2404 {
2406
2407 auto const signerPublic = val->getSignerPublic();
2408
2409 jvObj[jss::type] = "validationReceived";
2410 jvObj[jss::validation_public_key] =
2411 toBase58(TokenType::NodePublic, signerPublic);
2412 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2413 jvObj[jss::signature] = strHex(val->getSignature());
2414 jvObj[jss::full] = val->isFull();
2415 jvObj[jss::flags] = val->getFlags();
2416 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2417 jvObj[jss::data] = strHex(val->getSerializer().slice());
2418
2419 if (auto version = (*val)[~sfServerVersion])
2420 jvObj[jss::server_version] = std::to_string(*version);
2421
2422 if (auto cookie = (*val)[~sfCookie])
2423 jvObj[jss::cookie] = std::to_string(*cookie);
2424
2425 if (auto hash = (*val)[~sfValidatedHash])
2426 jvObj[jss::validated_hash] = strHex(*hash);
2427
2428 auto const masterKey =
2429 app_.validatorManifests().getMasterKey(signerPublic);
2430
2431 if (masterKey != signerPublic)
2432 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2433
2434 // NOTE *seq is a number, but old API versions used string. We replace
2435 // number with a string using MultiApiJson near end of this function
2436 if (auto const seq = (*val)[~sfLedgerSequence])
2437 jvObj[jss::ledger_index] = *seq;
2438
2439 if (val->isFieldPresent(sfAmendments))
2440 {
2441 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2442 for (auto const& amendment : val->getFieldV256(sfAmendments))
2443 jvObj[jss::amendments].append(to_string(amendment));
2444 }
2445
2446 if (auto const closeTime = (*val)[~sfCloseTime])
2447 jvObj[jss::close_time] = *closeTime;
2448
2449 if (auto const loadFee = (*val)[~sfLoadFee])
2450 jvObj[jss::load_fee] = *loadFee;
2451
2452 if (auto const baseFee = val->at(~sfBaseFee))
2453 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2454
2455 if (auto const reserveBase = val->at(~sfReserveBase))
2456 jvObj[jss::reserve_base] = *reserveBase;
2457
2458 if (auto const reserveInc = val->at(~sfReserveIncrement))
2459 jvObj[jss::reserve_inc] = *reserveInc;
2460
2461 // (The ~ operator converts the Proxy to a std::optional, which
2462 // simplifies later operations)
2463 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2464 baseFeeXRP && baseFeeXRP->native())
2465 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2466
2467 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2468 reserveBaseXRP && reserveBaseXRP->native())
2469 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2470
2471 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2472 reserveIncXRP && reserveIncXRP->native())
2473 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2474
2475 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2476 // for consumers supporting different API versions
2477 MultiApiJson multiObj{jvObj};
2478 multiObj.visit(
2479 RPC::apiVersion<1>, //
2480 [](Json::Value& jvTx) {
2481 // Type conversion for older API versions to string
2482 if (jvTx.isMember(jss::ledger_index))
2483 {
2484 jvTx[jss::ledger_index] =
2485 std::to_string(jvTx[jss::ledger_index].asUInt());
2486 }
2487 });
2488
2489 for (auto i = mStreamMaps[sValidations].begin();
2490 i != mStreamMaps[sValidations].end();)
2491 {
2492 if (auto p = i->second.lock())
2493 {
2494 multiObj.visit(
2495 p->getApiVersion(), //
2496 [&](Json::Value const& jv) { p->send(jv, true); });
2497 ++i;
2498 }
2499 else
2500 {
2501 i = mStreamMaps[sValidations].erase(i);
2502 }
2503 }
2504 }
2505}
2506
2507void
2509{
2511
2512 if (!mStreamMaps[sPeerStatus].empty())
2513 {
2514 Json::Value jvObj(func());
2515
2516 jvObj[jss::type] = "peerStatusChange";
2517
2518 for (auto i = mStreamMaps[sPeerStatus].begin();
2519 i != mStreamMaps[sPeerStatus].end();)
2520 {
2521 InfoSub::pointer p = i->second.lock();
2522
2523 if (p)
2524 {
2525 p->send(jvObj, true);
2526 ++i;
2527 }
2528 else
2529 {
2530 i = mStreamMaps[sPeerStatus].erase(i);
2531 }
2532 }
2533 }
2534}
2535
2536void
2538{
2539 using namespace std::chrono_literals;
2540 if (om == OperatingMode::CONNECTED)
2541 {
2544 }
2545 else if (om == OperatingMode::SYNCING)
2546 {
2549 }
2550
2551 if ((om > OperatingMode::CONNECTED) && isBlocked())
2553
2554 if (mMode == om)
2555 return;
2556
2557 mMode = om;
2558
2559 accounting_.mode(om);
2560
2561 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2562 pubServer();
2563}
2564
2565bool
2568 std::string const& source)
2569{
2570 JLOG(m_journal.trace())
2571 << "recvValidation " << val->getLedgerHash() << " from " << source;
2572
2574 BypassAccept bypassAccept = BypassAccept::no;
2575 try
2576 {
2577 if (pendingValidations_.contains(val->getLedgerHash()))
2578 bypassAccept = BypassAccept::yes;
2579 else
2580 pendingValidations_.insert(val->getLedgerHash());
2581 scope_unlock unlock(lock);
2582 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2583 }
2584 catch (std::exception const& e)
2585 {
2586 JLOG(m_journal.warn())
2587 << "Exception thrown for handling new validation "
2588 << val->getLedgerHash() << ": " << e.what();
2589 }
2590 catch (...)
2591 {
2592 JLOG(m_journal.warn())
2593 << "Unknown exception thrown for handling new validation "
2594 << val->getLedgerHash();
2595 }
2596 if (bypassAccept == BypassAccept::no)
2597 {
2598 pendingValidations_.erase(val->getLedgerHash());
2599 }
2600 lock.unlock();
2601
2602 pubValidation(val);
2603
2604 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2606 ss << "VALIDATION: " << val->render() << " master_key: ";
2607 auto master = app_.validators().getTrustedKey(val->getSignerPublic());
2608 if (master)
2609 {
2610 ss << toBase58(TokenType::NodePublic, *master);
2611 }
2612 else
2613 {
2614 ss << "none";
2615 }
2616 return ss.str();
2617 }();
2618
2619 // We will always relay trusted validations; if configured, we will
2620 // also relay all untrusted validations.
2621 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2622}
2623
2626{
2627 return mConsensus.getJson(true);
2628}
2629
2631NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2632{
2634
2635 // System-level warnings
2636 {
2637 Json::Value warnings{Json::arrayValue};
2638 if (isAmendmentBlocked())
2639 {
2640 Json::Value& w = warnings.append(Json::objectValue);
2641 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2642 w[jss::message] =
2643 "This server is amendment blocked, and must be updated to be "
2644 "able to stay in sync with the network.";
2645 }
2646 if (isUNLBlocked())
2647 {
2648 Json::Value& w = warnings.append(Json::objectValue);
2649 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2650 w[jss::message] =
2651 "This server has an expired validator list. validators.txt "
2652 "may be incorrectly configured or some [validator_list_sites] "
2653 "may be unreachable.";
2654 }
2655 if (admin && isAmendmentWarned())
2656 {
2657 Json::Value& w = warnings.append(Json::objectValue);
2658 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2659 w[jss::message] =
2660 "One or more unsupported amendments have reached majority. "
2661 "Upgrade to the latest version before they are activated "
2662 "to avoid being amendment blocked.";
2663 if (auto const expected =
2665 {
2666 auto& d = w[jss::details] = Json::objectValue;
2667 d[jss::expected_date] = expected->time_since_epoch().count();
2668 d[jss::expected_date_UTC] = to_string(*expected);
2669 }
2670 }
2671
2672 if (warnings.size())
2673 info[jss::warnings] = std::move(warnings);
2674 }
2675
2676 // hostid: unique string describing the machine
2677 if (human)
2678 info[jss::hostid] = getHostId(admin);
2679
2680 // domain: if configured with a domain, report it:
2681 if (!app_.config().SERVER_DOMAIN.empty())
2682 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2683
2684 info[jss::build_version] = BuildInfo::getVersionString();
2685
2686 info[jss::server_state] = strOperatingMode(admin);
2687
2688 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2690
2692 info[jss::network_ledger] = "waiting";
2693
2694 info[jss::validation_quorum] =
2695 static_cast<Json::UInt>(app_.validators().quorum());
2696
2697 if (admin)
2698 {
2699 switch (app_.config().NODE_SIZE)
2700 {
2701 case 0:
2702 info[jss::node_size] = "tiny";
2703 break;
2704 case 1:
2705 info[jss::node_size] = "small";
2706 break;
2707 case 2:
2708 info[jss::node_size] = "medium";
2709 break;
2710 case 3:
2711 info[jss::node_size] = "large";
2712 break;
2713 case 4:
2714 info[jss::node_size] = "huge";
2715 break;
2716 }
2717
2718 auto when = app_.validators().expires();
2719
2720 if (!human)
2721 {
2722 if (when)
2723 info[jss::validator_list_expires] =
2724 safe_cast<Json::UInt>(when->time_since_epoch().count());
2725 else
2726 info[jss::validator_list_expires] = 0;
2727 }
2728 else
2729 {
2730 auto& x = (info[jss::validator_list] = Json::objectValue);
2731
2732 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2733
2734 if (when)
2735 {
2736 if (*when == TimeKeeper::time_point::max())
2737 {
2738 x[jss::expiration] = "never";
2739 x[jss::status] = "active";
2740 }
2741 else
2742 {
2743 x[jss::expiration] = to_string(*when);
2744
2745 if (*when > app_.timeKeeper().now())
2746 x[jss::status] = "active";
2747 else
2748 x[jss::status] = "expired";
2749 }
2750 }
2751 else
2752 {
2753 x[jss::status] = "unknown";
2754 x[jss::expiration] = "unknown";
2755 }
2756 }
2757
2758#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2759 {
2760 auto& x = (info[jss::git] = Json::objectValue);
2761#ifdef GIT_COMMIT_HASH
2762 x[jss::hash] = GIT_COMMIT_HASH;
2763#endif
2764#ifdef GIT_BRANCH
2765 x[jss::branch] = GIT_BRANCH;
2766#endif
2767 }
2768#endif
2769 }
2770 info[jss::io_latency_ms] =
2771 static_cast<Json::UInt>(app_.getIOLatency().count());
2772
2773 if (admin)
2774 {
2775 if (auto const localPubKey = app_.validators().localPublicKey();
2776 localPubKey && app_.getValidationPublicKey())
2777 {
2778 info[jss::pubkey_validator] =
2779 toBase58(TokenType::NodePublic, localPubKey.value());
2780 }
2781 else
2782 {
2783 info[jss::pubkey_validator] = "none";
2784 }
2785 }
2786
2787 if (counters)
2788 {
2789 info[jss::counters] = app_.getPerfLog().countersJson();
2790
2791 Json::Value nodestore(Json::objectValue);
2792 app_.getNodeStore().getCountsJson(nodestore);
2793 info[jss::counters][jss::nodestore] = nodestore;
2794 info[jss::current_activities] = app_.getPerfLog().currentJson();
2795 }
2796
2797 info[jss::pubkey_node] =
2799
2800 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2801
2803 info[jss::amendment_blocked] = true;
2804
2805 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2806
2807 if (fp != 0)
2808 info[jss::fetch_pack] = Json::UInt(fp);
2809
2810 info[jss::peers] = Json::UInt(app_.overlay().size());
2811
2812 Json::Value lastClose = Json::objectValue;
2813 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2814
2815 if (human)
2816 {
2817 lastClose[jss::converge_time_s] =
2819 }
2820 else
2821 {
2822 lastClose[jss::converge_time] =
2824 }
2825
2826 info[jss::last_close] = lastClose;
2827
2828 // info[jss::consensus] = mConsensus.getJson();
2829
2830 if (admin)
2831 info[jss::load] = m_job_queue.getJson();
2832
2833 if (auto const netid = app_.overlay().networkID())
2834 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2835
2836 auto const escalationMetrics =
2838
2839 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2840 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2841 /* Scale the escalated fee level to unitless "load factor".
2842 In practice, this just strips the units, but it will continue
2843 to work correctly if either base value ever changes. */
2844 auto const loadFactorFeeEscalation =
2845 mulDiv(
2846 escalationMetrics.openLedgerFeeLevel,
2847 loadBaseServer,
2848 escalationMetrics.referenceFeeLevel)
2850
2851 auto const loadFactor = std::max(
2852 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2853
2854 if (!human)
2855 {
2856 info[jss::load_base] = loadBaseServer;
2857 info[jss::load_factor] = trunc32(loadFactor);
2858 info[jss::load_factor_server] = loadFactorServer;
2859
2860 /* Json::Value doesn't support uint64, so clamp to max
2861 uint32 value. This is mostly theoretical, since there
2862 probably isn't enough extant XRP to drive the factor
2863 that high.
2864 */
2865 info[jss::load_factor_fee_escalation] =
2866 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2867 info[jss::load_factor_fee_queue] =
2868 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2869 info[jss::load_factor_fee_reference] =
2870 escalationMetrics.referenceFeeLevel.jsonClipped();
2871 }
2872 else
2873 {
2874 info[jss::load_factor] =
2875 static_cast<double>(loadFactor) / loadBaseServer;
2876
2877 if (loadFactorServer != loadFactor)
2878 info[jss::load_factor_server] =
2879 static_cast<double>(loadFactorServer) / loadBaseServer;
2880
2881 if (admin)
2882 {
2884 if (fee != loadBaseServer)
2885 info[jss::load_factor_local] =
2886 static_cast<double>(fee) / loadBaseServer;
2887 fee = app_.getFeeTrack().getRemoteFee();
2888 if (fee != loadBaseServer)
2889 info[jss::load_factor_net] =
2890 static_cast<double>(fee) / loadBaseServer;
2891 fee = app_.getFeeTrack().getClusterFee();
2892 if (fee != loadBaseServer)
2893 info[jss::load_factor_cluster] =
2894 static_cast<double>(fee) / loadBaseServer;
2895 }
2896 if (escalationMetrics.openLedgerFeeLevel !=
2897 escalationMetrics.referenceFeeLevel &&
2898 (admin || loadFactorFeeEscalation != loadFactor))
2899 info[jss::load_factor_fee_escalation] =
2900 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2901 escalationMetrics.referenceFeeLevel);
2902 if (escalationMetrics.minProcessingFeeLevel !=
2903 escalationMetrics.referenceFeeLevel)
2904 info[jss::load_factor_fee_queue] =
2905 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2906 escalationMetrics.referenceFeeLevel);
2907 }
2908
2909 bool valid = false;
2910 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2911
2912 if (lpClosed)
2913 valid = true;
2914 else
2915 lpClosed = m_ledgerMaster.getClosedLedger();
2916
2917 if (lpClosed)
2918 {
2919 XRPAmount const baseFee = lpClosed->fees().base;
2921 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2922 l[jss::hash] = to_string(lpClosed->info().hash);
2923
2924 if (!human)
2925 {
2926 l[jss::base_fee] = baseFee.jsonClipped();
2927 l[jss::reserve_base] =
2928 lpClosed->fees().accountReserve(0).jsonClipped();
2929 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2930 l[jss::close_time] = Json::Value::UInt(
2931 lpClosed->info().closeTime.time_since_epoch().count());
2932 }
2933 else
2934 {
2935 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2936 l[jss::reserve_base_xrp] =
2937 lpClosed->fees().accountReserve(0).decimalXRP();
2938 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2939
2940 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2941 std::abs(closeOffset.count()) >= 60)
2942 l[jss::close_time_offset] =
2943 static_cast<std::uint32_t>(closeOffset.count());
2944
2945 constexpr std::chrono::seconds highAgeThreshold{1000000};
2947 {
2948 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2949 l[jss::age] =
2950 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2951 }
2952 else
2953 {
2954 auto lCloseTime = lpClosed->info().closeTime;
2955 auto closeTime = app_.timeKeeper().closeTime();
2956 if (lCloseTime <= closeTime)
2957 {
2958 using namespace std::chrono_literals;
2959 auto age = closeTime - lCloseTime;
2960 l[jss::age] =
2961 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2962 }
2963 }
2964 }
2965
2966 if (valid)
2967 info[jss::validated_ledger] = l;
2968 else
2969 info[jss::closed_ledger] = l;
2970
2971 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2972 if (!lpPublished)
2973 info[jss::published_ledger] = "none";
2974 else if (lpPublished->info().seq != lpClosed->info().seq)
2975 info[jss::published_ledger] = lpPublished->info().seq;
2976 }
2977
2978 accounting_.json(info);
2979 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2980 info[jss::jq_trans_overflow] =
2982 info[jss::peer_disconnects] =
2984 info[jss::peer_disconnects_resources] =
2986
2987 // This array must be sorted in increasing order.
2988 static constexpr std::array<std::string_view, 7> protocols{
2989 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2990 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2991 {
2993 for (auto const& port : app_.getServerHandler().setup().ports)
2994 {
2995 // Don't publish admin ports for non-admin users
2996 if (!admin &&
2997 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2998 port.admin_user.empty() && port.admin_password.empty()))
2999 continue;
3002 std::begin(port.protocol),
3003 std::end(port.protocol),
3004 std::begin(protocols),
3005 std::end(protocols),
3006 std::back_inserter(proto));
3007 if (!proto.empty())
3008 {
3009 auto& jv = ports.append(Json::Value(Json::objectValue));
3010 jv[jss::port] = std::to_string(port.port);
3011 jv[jss::protocol] = Json::Value{Json::arrayValue};
3012 for (auto const& p : proto)
3013 jv[jss::protocol].append(p);
3014 }
3015 }
3016
3017 if (app_.config().exists(SECTION_PORT_GRPC))
3018 {
3019 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
3020 auto const optPort = grpcSection.get("port");
3021 if (optPort && grpcSection.get("ip"))
3022 {
3023 auto& jv = ports.append(Json::Value(Json::objectValue));
3024 jv[jss::port] = *optPort;
3025 jv[jss::protocol] = Json::Value{Json::arrayValue};
3026 jv[jss::protocol].append("grpc");
3027 }
3028 }
3029 info[jss::ports] = std::move(ports);
3030 }
3031
3032 return info;
3033}
3034
3035void
3037{
3039}
3040
3043{
3044 return app_.getInboundLedgers().getInfo();
3045}
3046
3047void
3049 std::shared_ptr<ReadView const> const& ledger,
3050 std::shared_ptr<STTx const> const& transaction,
3051 TER result)
3052{
3053 // never publish an inner txn inside a batch txn
3054 if (transaction->isFlag(tfInnerBatchTxn) &&
3055 ledger->rules().enabled(featureBatch))
3056 return;
3057
3058 MultiApiJson jvObj =
3059 transJson(transaction, result, false, ledger, std::nullopt);
3060
3061 {
3063
3064 auto it = mStreamMaps[sRTTransactions].begin();
3065 while (it != mStreamMaps[sRTTransactions].end())
3066 {
3067 InfoSub::pointer p = it->second.lock();
3068
3069 if (p)
3070 {
3071 jvObj.visit(
3072 p->getApiVersion(), //
3073 [&](Json::Value const& jv) { p->send(jv, true); });
3074 ++it;
3075 }
3076 else
3077 {
3078 it = mStreamMaps[sRTTransactions].erase(it);
3079 }
3080 }
3081 }
3082
3083 pubProposedAccountTransaction(ledger, transaction, result);
3084}
3085
3086void
3088{
3089 // Ledgers are published only when they acquire sufficient validations
3090 // Holes are filled across connection loss or other catastrophe
3091
3093 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
3094 if (!alpAccepted)
3095 {
3096 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
3097 app_.getAcceptedLedgerCache().canonicalize_replace_client(
3098 lpAccepted->info().hash, alpAccepted);
3099 }
3100
3101 XRPL_ASSERT(
3102 alpAccepted->getLedger().get() == lpAccepted.get(),
3103 "ripple::NetworkOPsImp::pubLedger : accepted input");
3104
3105 {
3106 JLOG(m_journal.debug())
3107 << "Publishing ledger " << lpAccepted->info().seq << " "
3108 << lpAccepted->info().hash;
3109
3111
3112 if (!mStreamMaps[sLedger].empty())
3113 {
3115
3116 jvObj[jss::type] = "ledgerClosed";
3117 jvObj[jss::ledger_index] = lpAccepted->info().seq;
3118 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
3119 jvObj[jss::ledger_time] = Json::Value::UInt(
3120 lpAccepted->info().closeTime.time_since_epoch().count());
3121
3122 if (!lpAccepted->rules().enabled(featureXRPFees))
3123 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3124 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
3125 jvObj[jss::reserve_base] =
3126 lpAccepted->fees().accountReserve(0).jsonClipped();
3127 jvObj[jss::reserve_inc] =
3128 lpAccepted->fees().increment.jsonClipped();
3129
3130 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
3131
3133 {
3134 jvObj[jss::validated_ledgers] =
3136 }
3137
3138 auto it = mStreamMaps[sLedger].begin();
3139 while (it != mStreamMaps[sLedger].end())
3140 {
3141 InfoSub::pointer p = it->second.lock();
3142 if (p)
3143 {
3144 p->send(jvObj, true);
3145 ++it;
3146 }
3147 else
3148 it = mStreamMaps[sLedger].erase(it);
3149 }
3150 }
3151
3152 if (!mStreamMaps[sBookChanges].empty())
3153 {
3154 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
3155
3156 auto it = mStreamMaps[sBookChanges].begin();
3157 while (it != mStreamMaps[sBookChanges].end())
3158 {
3159 InfoSub::pointer p = it->second.lock();
3160 if (p)
3161 {
3162 p->send(jvObj, true);
3163 ++it;
3164 }
3165 else
3166 it = mStreamMaps[sBookChanges].erase(it);
3167 }
3168 }
3169
3170 {
3171 static bool firstTime = true;
3172 if (firstTime)
3173 {
3174 // First validated ledger, start delayed SubAccountHistory
3175 firstTime = false;
3176 for (auto& outer : mSubAccountHistory)
3177 {
3178 for (auto& inner : outer.second)
3179 {
3180 auto& subInfo = inner.second;
3181 if (subInfo.index_->separationLedgerSeq_ == 0)
3182 {
3184 alpAccepted->getLedger(), subInfo);
3185 }
3186 }
3187 }
3188 }
3189 }
3190 }
3191
3192 // Don't lock since pubAcceptedTransaction is locking.
3193 for (auto const& accTx : *alpAccepted)
3194 {
3195 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
3197 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3198 }
3199}
3200
3201void
3203{
3205 app_.openLedger().current()->fees().base,
3207 app_.getFeeTrack()};
3208
3209 // only schedule the job if something has changed
3210 if (f != mLastFeeSummary)
3211 {
3213 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
3214 pubServer();
3215 });
3216 }
3217}
3218
3219void
3221{
3224 "reportConsensusStateChange->pubConsensus",
3225 [this, phase]() { pubConsensus(phase); });
3226}
3227
3228inline void
3230{
3231 m_localTX->sweep(view);
3232}
3233inline std::size_t
3235{
3236 return m_localTX->size();
3237}
3238
3239// This routine should only be used to publish accepted or validated
3240// transactions.
3243 std::shared_ptr<STTx const> const& transaction,
3244 TER result,
3245 bool validated,
3246 std::shared_ptr<ReadView const> const& ledger,
3248{
3250 std::string sToken;
3251 std::string sHuman;
3252
3253 transResultInfo(result, sToken, sHuman);
3254
3255 jvObj[jss::type] = "transaction";
3256 // NOTE jvObj is not a finished object for either API version. After
3257 // it's populated, we need to finish it for a specific API version. This is
3258 // done in a loop, near the end of this function.
3259 jvObj[jss::transaction] =
3260 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3261
3262 if (meta)
3263 {
3264 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3266 jvObj[jss::meta], *ledger, transaction, meta->get());
3267 RPC::insertNFTSyntheticInJson(jvObj, transaction, meta->get());
3269 jvObj[jss::meta], transaction, meta->get());
3270 }
3271
3272 // add CTID where the needed data for it exists
3273 if (auto const& lookup = ledger->txRead(transaction->getTransactionID());
3274 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3275 {
3276 uint32_t const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3277 uint32_t netID = app_.config().NETWORK_ID;
3278 if (transaction->isFieldPresent(sfNetworkID))
3279 netID = transaction->getFieldU32(sfNetworkID);
3280
3282 RPC::encodeCTID(ledger->info().seq, txnSeq, netID);
3283 ctid)
3284 jvObj[jss::ctid] = *ctid;
3285 }
3286 if (!ledger->open())
3287 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3288
3289 if (validated)
3290 {
3291 jvObj[jss::ledger_index] = ledger->info().seq;
3292 jvObj[jss::transaction][jss::date] =
3293 ledger->info().closeTime.time_since_epoch().count();
3294 jvObj[jss::validated] = true;
3295 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3296
3297 // WRITEME: Put the account next seq here
3298 }
3299 else
3300 {
3301 jvObj[jss::validated] = false;
3302 jvObj[jss::ledger_current_index] = ledger->info().seq;
3303 }
3304
3305 jvObj[jss::status] = validated ? "closed" : "proposed";
3306 jvObj[jss::engine_result] = sToken;
3307 jvObj[jss::engine_result_code] = result;
3308 jvObj[jss::engine_result_message] = sHuman;
3309
3310 if (transaction->getTxnType() == ttOFFER_CREATE)
3311 {
3312 auto const account = transaction->getAccountID(sfAccount);
3313 auto const amount = transaction->getFieldAmount(sfTakerGets);
3314
3315 // If the offer create is not self funded then add the owner balance
3316 if (account != amount.issue().account)
3317 {
3318 auto const ownerFunds = accountFunds(
3319 *ledger,
3320 account,
3321 amount,
3323 app_.journal("View"));
3324 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3325 }
3326 }
3327
3328 std::string const hash = to_string(transaction->getTransactionID());
3329 MultiApiJson multiObj{jvObj};
3331 multiObj.visit(), //
3332 [&]<unsigned Version>(
3334 RPC::insertDeliverMax(
3335 jvTx[jss::transaction], transaction->getTxnType(), Version);
3336
3337 if constexpr (Version > 1)
3338 {
3339 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3340 jvTx[jss::hash] = hash;
3341 }
3342 else
3343 {
3344 jvTx[jss::transaction][jss::hash] = hash;
3345 }
3346 });
3347
3348 return multiObj;
3349}
3350
3351void
3353 std::shared_ptr<ReadView const> const& ledger,
3354 AcceptedLedgerTx const& transaction,
3355 bool last)
3356{
3357 auto const& stTxn = transaction.getTxn();
3358
3359 // Create two different Json objects, for different API versions
3360 auto const metaRef = std::ref(transaction.getMeta());
3361 auto const trResult = transaction.getResult();
3362 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3363
3364 {
3366
3367 auto it = mStreamMaps[sTransactions].begin();
3368 while (it != mStreamMaps[sTransactions].end())
3369 {
3370 InfoSub::pointer p = it->second.lock();
3371
3372 if (p)
3373 {
3374 jvObj.visit(
3375 p->getApiVersion(), //
3376 [&](Json::Value const& jv) { p->send(jv, true); });
3377 ++it;
3378 }
3379 else
3380 it = mStreamMaps[sTransactions].erase(it);
3381 }
3382
3383 it = mStreamMaps[sRTTransactions].begin();
3384
3385 while (it != mStreamMaps[sRTTransactions].end())
3386 {
3387 InfoSub::pointer p = it->second.lock();
3388
3389 if (p)
3390 {
3391 jvObj.visit(
3392 p->getApiVersion(), //
3393 [&](Json::Value const& jv) { p->send(jv, true); });
3394 ++it;
3395 }
3396 else
3397 it = mStreamMaps[sRTTransactions].erase(it);
3398 }
3399 }
3400
3401 if (transaction.getResult() == tesSUCCESS)
3402 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3403
3404 pubAccountTransaction(ledger, transaction, last);
3405}
3406
3407void
3409 std::shared_ptr<ReadView const> const& ledger,
3410 AcceptedLedgerTx const& transaction,
3411 bool last)
3412{
3414 int iProposed = 0;
3415 int iAccepted = 0;
3416
3417 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3418 auto const currLedgerSeq = ledger->seq();
3419 {
3421
3422 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3424 {
3425 for (auto const& affectedAccount : transaction.getAffected())
3426 {
3427 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3428 simiIt != mSubRTAccount.end())
3429 {
3430 auto it = simiIt->second.begin();
3431
3432 while (it != simiIt->second.end())
3433 {
3434 InfoSub::pointer p = it->second.lock();
3435
3436 if (p)
3437 {
3438 notify.insert(p);
3439 ++it;
3440 ++iProposed;
3441 }
3442 else
3443 it = simiIt->second.erase(it);
3444 }
3445 }
3446
3447 if (auto simiIt = mSubAccount.find(affectedAccount);
3448 simiIt != mSubAccount.end())
3449 {
3450 auto it = simiIt->second.begin();
3451 while (it != simiIt->second.end())
3452 {
3453 InfoSub::pointer p = it->second.lock();
3454
3455 if (p)
3456 {
3457 notify.insert(p);
3458 ++it;
3459 ++iAccepted;
3460 }
3461 else
3462 it = simiIt->second.erase(it);
3463 }
3464 }
3465
3466 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3467 histoIt != mSubAccountHistory.end())
3468 {
3469 auto& subs = histoIt->second;
3470 auto it = subs.begin();
3471 while (it != subs.end())
3472 {
3473 SubAccountHistoryInfoWeak const& info = it->second;
3474 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3475 {
3476 ++it;
3477 continue;
3478 }
3479
3480 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3481 {
3482 accountHistoryNotify.emplace_back(
3483 SubAccountHistoryInfo{isSptr, info.index_});
3484 ++it;
3485 }
3486 else
3487 {
3488 it = subs.erase(it);
3489 }
3490 }
3491 if (subs.empty())
3492 mSubAccountHistory.erase(histoIt);
3493 }
3494 }
3495 }
3496 }
3497
3498 JLOG(m_journal.trace())
3499 << "pubAccountTransaction: "
3500 << "proposed=" << iProposed << ", accepted=" << iAccepted;
3501
3502 if (!notify.empty() || !accountHistoryNotify.empty())
3503 {
3504 auto const& stTxn = transaction.getTxn();
3505
3506 // Create two different Json objects, for different API versions
3507 auto const metaRef = std::ref(transaction.getMeta());
3508 auto const trResult = transaction.getResult();
3509 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3510
3511 for (InfoSub::ref isrListener : notify)
3512 {
3513 jvObj.visit(
3514 isrListener->getApiVersion(), //
3515 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3516 }
3517
3518 if (last)
3519 jvObj.set(jss::account_history_boundary, true);
3520
3521 XRPL_ASSERT(
3522 jvObj.isMember(jss::account_history_tx_stream) ==
3524 "ripple::NetworkOPsImp::pubAccountTransaction : "
3525 "account_history_tx_stream not set");
3526 for (auto& info : accountHistoryNotify)
3527 {
3528 auto& index = info.index_;
3529 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3530 jvObj.set(jss::account_history_tx_first, true);
3531
3532 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3533
3534 jvObj.visit(
3535 info.sink_->getApiVersion(), //
3536 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3537 }
3538 }
3539}
3540
3541void
3543 std::shared_ptr<ReadView const> const& ledger,
3545 TER result)
3546{
3548 int iProposed = 0;
3549
3550 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3551
3552 {
3554
3555 if (mSubRTAccount.empty())
3556 return;
3557
3558 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3560 {
3561 for (auto const& affectedAccount : tx->getMentionedAccounts())
3562 {
3563 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3564 simiIt != mSubRTAccount.end())
3565 {
3566 auto it = simiIt->second.begin();
3567
3568 while (it != simiIt->second.end())
3569 {
3570 InfoSub::pointer p = it->second.lock();
3571
3572 if (p)
3573 {
3574 notify.insert(p);
3575 ++it;
3576 ++iProposed;
3577 }
3578 else
3579 it = simiIt->second.erase(it);
3580 }
3581 }
3582 }
3583 }
3584 }
3585
3586 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3587
3588 if (!notify.empty() || !accountHistoryNotify.empty())
3589 {
3590 // Create two different Json objects, for different API versions
3591 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3592
3593 for (InfoSub::ref isrListener : notify)
3594 jvObj.visit(
3595 isrListener->getApiVersion(), //
3596 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3597
3598 XRPL_ASSERT(
3599 jvObj.isMember(jss::account_history_tx_stream) ==
3601 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3602 "account_history_tx_stream not set");
3603 for (auto& info : accountHistoryNotify)
3604 {
3605 auto& index = info.index_;
3606 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3607 jvObj.set(jss::account_history_tx_first, true);
3608 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3609 jvObj.visit(
3610 info.sink_->getApiVersion(), //
3611 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3612 }
3613 }
3614}
3615
3616//
3617// Monitoring
3618//
3619
3620void
3622 InfoSub::ref isrListener,
3623 hash_set<AccountID> const& vnaAccountIDs,
3624 bool rt)
3625{
3626 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3627
3628 for (auto const& naAccountID : vnaAccountIDs)
3629 {
3630 JLOG(m_journal.trace())
3631 << "subAccount: account: " << toBase58(naAccountID);
3632
3633 isrListener->insertSubAccountInfo(naAccountID, rt);
3634 }
3635
3637
3638 for (auto const& naAccountID : vnaAccountIDs)
3639 {
3640 auto simIterator = subMap.find(naAccountID);
3641 if (simIterator == subMap.end())
3642 {
3643 // Not found, note that account has a new single listner.
3644 SubMapType usisElement;
3645 usisElement[isrListener->getSeq()] = isrListener;
3646 // VFALCO NOTE This is making a needless copy of naAccountID
3647 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3648 }
3649 else
3650 {
3651 // Found, note that the account has another listener.
3652 simIterator->second[isrListener->getSeq()] = isrListener;
3653 }
3654 }
3655}
3656
3657void
3659 InfoSub::ref isrListener,
3660 hash_set<AccountID> const& vnaAccountIDs,
3661 bool rt)
3662{
3663 for (auto const& naAccountID : vnaAccountIDs)
3664 {
3665 // Remove from the InfoSub
3666 isrListener->deleteSubAccountInfo(naAccountID, rt);
3667 }
3668
3669 // Remove from the server
3670 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3671}
3672
3673void
3675 std::uint64_t uSeq,
3676 hash_set<AccountID> const& vnaAccountIDs,
3677 bool rt)
3678{
3680
3681 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3682
3683 for (auto const& naAccountID : vnaAccountIDs)
3684 {
3685 auto simIterator = subMap.find(naAccountID);
3686
3687 if (simIterator != subMap.end())
3688 {
3689 // Found
3690 simIterator->second.erase(uSeq);
3691
3692 if (simIterator->second.empty())
3693 {
3694 // Don't need hash entry.
3695 subMap.erase(simIterator);
3696 }
3697 }
3698 }
3699}
3700
3701void
3703{
3704 enum DatabaseType { Sqlite, None };
3705 static auto const databaseType = [&]() -> DatabaseType {
3706 // Use a dynamic_cast to return DatabaseType::None
3707 // on failure.
3708 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3709 {
3710 return DatabaseType::Sqlite;
3711 }
3712 return DatabaseType::None;
3713 }();
3714
3715 if (databaseType == DatabaseType::None)
3716 {
3717 JLOG(m_journal.error())
3718 << "AccountHistory job for account "
3719 << toBase58(subInfo.index_->accountId_) << " no database";
3720 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3721 {
3722 sptr->send(rpcError(rpcINTERNAL), true);
3723 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3724 }
3725 return;
3726 }
3727
3730 "AccountHistoryTxStream",
3731 [this, dbType = databaseType, subInfo]() {
3732 auto const& accountId = subInfo.index_->accountId_;
3733 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3734 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3735
3736 JLOG(m_journal.trace())
3737 << "AccountHistory job for account " << toBase58(accountId)
3738 << " started. lastLedgerSeq=" << lastLedgerSeq;
3739
3740 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3741 std::shared_ptr<TxMeta> const& meta) -> bool {
3742 /*
3743 * genesis account: first tx is the one with seq 1
3744 * other account: first tx is the one created the account
3745 */
3746 if (accountId == genesisAccountId)
3747 {
3748 auto stx = tx->getSTransaction();
3749 if (stx->getAccountID(sfAccount) == accountId &&
3750 stx->getSeqValue() == 1)
3751 return true;
3752 }
3753
3754 for (auto& node : meta->getNodes())
3755 {
3756 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3757 continue;
3758
3759 if (node.isFieldPresent(sfNewFields))
3760 {
3761 if (auto inner = dynamic_cast<STObject const*>(
3762 node.peekAtPField(sfNewFields));
3763 inner)
3764 {
3765 if (inner->isFieldPresent(sfAccount) &&
3766 inner->getAccountID(sfAccount) == accountId)
3767 {
3768 return true;
3769 }
3770 }
3771 }
3772 }
3773
3774 return false;
3775 };
3776
3777 auto send = [&](Json::Value const& jvObj,
3778 bool unsubscribe) -> bool {
3779 if (auto sptr = subInfo.sinkWptr_.lock())
3780 {
3781 sptr->send(jvObj, true);
3782 if (unsubscribe)
3783 unsubAccountHistory(sptr, accountId, false);
3784 return true;
3785 }
3786
3787 return false;
3788 };
3789
3790 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3791 bool unsubscribe) -> bool {
3792 if (auto sptr = subInfo.sinkWptr_.lock())
3793 {
3794 jvObj.visit(
3795 sptr->getApiVersion(), //
3796 [&](Json::Value const& jv) { sptr->send(jv, true); });
3797
3798 if (unsubscribe)
3799 unsubAccountHistory(sptr, accountId, false);
3800 return true;
3801 }
3802
3803 return false;
3804 };
3805
3806 auto getMoreTxns =
3807 [&](std::uint32_t minLedger,
3808 std::uint32_t maxLedger,
3813 switch (dbType)
3814 {
3815 case Sqlite: {
3816 auto db = static_cast<SQLiteDatabase*>(
3819 accountId, minLedger, maxLedger, marker, 0, true};
3820 return db->newestAccountTxPage(options);
3821 }
3822 default: {
3823 UNREACHABLE(
3824 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3825 "getMoreTxns : invalid database type");
3826 return {};
3827 }
3828 }
3829 };
3830
3831 /*
3832 * search backward until the genesis ledger or asked to stop
3833 */
3834 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3835 {
3836 int feeChargeCount = 0;
3837 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3838 {
3839 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3840 ++feeChargeCount;
3841 }
3842 else
3843 {
3844 JLOG(m_journal.trace())
3845 << "AccountHistory job for account "
3846 << toBase58(accountId) << " no InfoSub. Fee charged "
3847 << feeChargeCount << " times.";
3848 return;
3849 }
3850
3851 // try to search in 1024 ledgers till reaching genesis ledgers
3852 auto startLedgerSeq =
3853 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3854 JLOG(m_journal.trace())
3855 << "AccountHistory job for account " << toBase58(accountId)
3856 << ", working on ledger range [" << startLedgerSeq << ","
3857 << lastLedgerSeq << "]";
3858
3859 auto haveRange = [&]() -> bool {
3860 std::uint32_t validatedMin = UINT_MAX;
3861 std::uint32_t validatedMax = 0;
3862 auto haveSomeValidatedLedgers =
3864 validatedMin, validatedMax);
3865
3866 return haveSomeValidatedLedgers &&
3867 validatedMin <= startLedgerSeq &&
3868 lastLedgerSeq <= validatedMax;
3869 }();
3870
3871 if (!haveRange)
3872 {
3873 JLOG(m_journal.debug())
3874 << "AccountHistory reschedule job for account "
3875 << toBase58(accountId) << ", incomplete ledger range ["
3876 << startLedgerSeq << "," << lastLedgerSeq << "]";
3878 return;
3879 }
3880
3882 while (!subInfo.index_->stopHistorical_)
3883 {
3884 auto dbResult =
3885 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3886 if (!dbResult)
3887 {
3888 JLOG(m_journal.debug())
3889 << "AccountHistory job for account "
3890 << toBase58(accountId) << " getMoreTxns failed.";
3891 send(rpcError(rpcINTERNAL), true);
3892 return;
3893 }
3894
3895 auto const& txns = dbResult->first;
3896 marker = dbResult->second;
3897 size_t num_txns = txns.size();
3898 for (size_t i = 0; i < num_txns; ++i)
3899 {
3900 auto const& [tx, meta] = txns[i];
3901
3902 if (!tx || !meta)
3903 {
3904 JLOG(m_journal.debug())
3905 << "AccountHistory job for account "
3906 << toBase58(accountId) << " empty tx or meta.";
3907 send(rpcError(rpcINTERNAL), true);
3908 return;
3909 }
3910 auto curTxLedger =
3912 tx->getLedger());
3913 if (!curTxLedger)
3914 {
3915 JLOG(m_journal.debug())
3916 << "AccountHistory job for account "
3917 << toBase58(accountId) << " no ledger.";
3918 send(rpcError(rpcINTERNAL), true);
3919 return;
3920 }
3922 tx->getSTransaction();
3923 if (!stTxn)
3924 {
3925 JLOG(m_journal.debug())
3926 << "AccountHistory job for account "
3927 << toBase58(accountId)
3928 << " getSTransaction failed.";
3929 send(rpcError(rpcINTERNAL), true);
3930 return;
3931 }
3932
3933 auto const mRef = std::ref(*meta);
3934 auto const trR = meta->getResultTER();
3935 MultiApiJson jvTx =
3936 transJson(stTxn, trR, true, curTxLedger, mRef);
3937
3938 jvTx.set(
3939 jss::account_history_tx_index, txHistoryIndex--);
3940 if (i + 1 == num_txns ||
3941 txns[i + 1].first->getLedger() != tx->getLedger())
3942 jvTx.set(jss::account_history_boundary, true);
3943
3944 if (isFirstTx(tx, meta))
3945 {
3946 jvTx.set(jss::account_history_tx_first, true);
3947 sendMultiApiJson(jvTx, false);
3948
3949 JLOG(m_journal.trace())
3950 << "AccountHistory job for account "
3951 << toBase58(accountId)
3952 << " done, found last tx.";
3953 return;
3954 }
3955 else
3956 {
3957 sendMultiApiJson(jvTx, false);
3958 }
3959 }
3960
3961 if (marker)
3962 {
3963 JLOG(m_journal.trace())
3964 << "AccountHistory job for account "
3965 << toBase58(accountId)
3966 << " paging, marker=" << marker->ledgerSeq << ":"
3967 << marker->txnSeq;
3968 }
3969 else
3970 {
3971 break;
3972 }
3973 }
3974
3975 if (!subInfo.index_->stopHistorical_)
3976 {
3977 lastLedgerSeq = startLedgerSeq - 1;
3978 if (lastLedgerSeq <= 1)
3979 {
3980 JLOG(m_journal.trace())
3981 << "AccountHistory job for account "
3982 << toBase58(accountId)
3983 << " done, reached genesis ledger.";
3984 return;
3985 }
3986 }
3987 }
3988 });
3989}
3990
3991void
3993 std::shared_ptr<ReadView const> const& ledger,
3995{
3996 subInfo.index_->separationLedgerSeq_ = ledger->seq();
3997 auto const& accountId = subInfo.index_->accountId_;
3998 auto const accountKeylet = keylet::account(accountId);
3999 if (!ledger->exists(accountKeylet))
4000 {
4001 JLOG(m_journal.debug())
4002 << "subAccountHistoryStart, no account " << toBase58(accountId)
4003 << ", no need to add AccountHistory job.";
4004 return;
4005 }
4006 if (accountId == genesisAccountId)
4007 {
4008 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
4009 {
4010 if (sleAcct->getFieldU32(sfSequence) == 1)
4011 {
4012 JLOG(m_journal.debug())
4013 << "subAccountHistoryStart, genesis account "
4014 << toBase58(accountId)
4015 << " does not have tx, no need to add AccountHistory job.";
4016 return;
4017 }
4018 }
4019 else
4020 {
4021 UNREACHABLE(
4022 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
4023 "access genesis account");
4024 return;
4025 }
4026 }
4027 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
4028 subInfo.index_->haveHistorical_ = true;
4029
4030 JLOG(m_journal.debug())
4031 << "subAccountHistoryStart, add AccountHistory job: accountId="
4032 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
4033
4034 addAccountHistoryJob(subInfo);
4035}
4036
4039 InfoSub::ref isrListener,
4040 AccountID const& accountId)
4041{
4042 if (!isrListener->insertSubAccountHistory(accountId))
4043 {
4044 JLOG(m_journal.debug())
4045 << "subAccountHistory, already subscribed to account "
4046 << toBase58(accountId);
4047 return rpcINVALID_PARAMS;
4048 }
4049
4052 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
4053 auto simIterator = mSubAccountHistory.find(accountId);
4054 if (simIterator == mSubAccountHistory.end())
4055 {
4057 inner.emplace(isrListener->getSeq(), ahi);
4059 simIterator, std::make_pair(accountId, inner));
4060 }
4061 else
4062 {
4063 simIterator->second.emplace(isrListener->getSeq(), ahi);
4064 }
4065
4066 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
4067 if (ledger)
4068 {
4069 subAccountHistoryStart(ledger, ahi);
4070 }
4071 else
4072 {
4073 // The node does not have validated ledgers, so wait for
4074 // one before start streaming.
4075 // In this case, the subscription is also considered successful.
4076 JLOG(m_journal.debug())
4077 << "subAccountHistory, no validated ledger yet, delay start";
4078 }
4079
4080 return rpcSUCCESS;
4081}
4082
4083void
4085 InfoSub::ref isrListener,
4086 AccountID const& account,
4087 bool historyOnly)
4088{
4089 if (!historyOnly)
4090 isrListener->deleteSubAccountHistory(account);
4091 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
4092}
4093
4094void
4096 std::uint64_t seq,
4097 AccountID const& account,
4098 bool historyOnly)
4099{
4101 auto simIterator = mSubAccountHistory.find(account);
4102 if (simIterator != mSubAccountHistory.end())
4103 {
4104 auto& subInfoMap = simIterator->second;
4105 auto subInfoIter = subInfoMap.find(seq);
4106 if (subInfoIter != subInfoMap.end())
4107 {
4108 subInfoIter->second.index_->stopHistorical_ = true;
4109 }
4110
4111 if (!historyOnly)
4112 {
4113 simIterator->second.erase(seq);
4114 if (simIterator->second.empty())
4115 {
4116 mSubAccountHistory.erase(simIterator);
4117 }
4118 }
4119 JLOG(m_journal.debug())
4120 << "unsubAccountHistory, account " << toBase58(account)
4121 << ", historyOnly = " << (historyOnly ? "true" : "false");
4122 }
4123}
4124
4125bool
4127{
4128 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
4129 listeners->addSubscriber(isrListener);
4130 else
4131 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
4132 return true;
4133}
4134
4135bool
4137{
4138 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
4139 listeners->removeSubscriber(uSeq);
4140
4141 return true;
4142}
4143
4147{
4148 // This code-path is exclusively used when the server is in standalone
4149 // mode via `ledger_accept`
4150 XRPL_ASSERT(
4151 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
4152
4153 if (!m_standalone)
4154 Throw<std::runtime_error>(
4155 "Operation only possible in STANDALONE mode.");
4156
4157 // FIXME Could we improve on this and remove the need for a specialized
4158 // API in Consensus?
4159 beginConsensus(m_ledgerMaster.getClosedLedger()->info().hash, {});
4160 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
4161 return m_ledgerMaster.getCurrentLedger()->info().seq;
4162}
4163
4164// <-- bool: true=added, false=already there
4165bool
4167{
4168 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
4169 {
4170 jvResult[jss::ledger_index] = lpClosed->info().seq;
4171 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
4172 jvResult[jss::ledger_time] = Json::Value::UInt(
4173 lpClosed->info().closeTime.time_since_epoch().count());
4174 if (!lpClosed->rules().enabled(featureXRPFees))
4175 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
4176 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
4177 jvResult[jss::reserve_base] =
4178 lpClosed->fees().accountReserve(0).jsonClipped();
4179 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
4180 }
4181
4183 {
4184 jvResult[jss::validated_ledgers] =
4186 }
4187
4189 return mStreamMaps[sLedger]
4190 .emplace(isrListener->getSeq(), isrListener)
4191 .second;
4192}
4193
4194// <-- bool: true=added, false=already there
4195bool
4197{
4200 .emplace(isrListener->getSeq(), isrListener)
4201 .second;
4202}
4203
4204// <-- bool: true=erased, false=was not there
4205bool
4207{
4209 return mStreamMaps[sLedger].erase(uSeq);
4210}
4211
4212// <-- bool: true=erased, false=was not there
4213bool
4215{
4217 return mStreamMaps[sBookChanges].erase(uSeq);
4218}
4219
4220// <-- bool: true=added, false=already there
4221bool
4223{
4225 return mStreamMaps[sManifests]
4226 .emplace(isrListener->getSeq(), isrListener)
4227 .second;
4228}
4229
4230// <-- bool: true=erased, false=was not there
4231bool
4233{
4235 return mStreamMaps[sManifests].erase(uSeq);
4236}
4237
4238// <-- bool: true=added, false=already there
4239bool
4241 InfoSub::ref isrListener,
4242 Json::Value& jvResult,
4243 bool admin)
4244{
4245 uint256 uRandom;
4246
4247 if (m_standalone)
4248 jvResult[jss::stand_alone] = m_standalone;
4249
4250 // CHECKME: is it necessary to provide a random number here?
4251 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
4252
4253 auto const& feeTrack = app_.getFeeTrack();
4254 jvResult[jss::random] = to_string(uRandom);
4255 jvResult[jss::server_status] = strOperatingMode(admin);
4256 jvResult[jss::load_base] = feeTrack.getLoadBase();
4257 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4258 jvResult[jss::hostid] = getHostId(admin);
4259 jvResult[jss::pubkey_node] =
4261
4263 return mStreamMaps[sServer]
4264 .emplace(isrListener->getSeq(), isrListener)
4265 .second;
4266}
4267
4268// <-- bool: true=erased, false=was not there
4269bool
4271{
4273 return mStreamMaps[sServer].erase(uSeq);
4274}
4275
4276// <-- bool: true=added, false=already there
4277bool
4279{
4282 .emplace(isrListener->getSeq(), isrListener)
4283 .second;
4284}
4285
4286// <-- bool: true=erased, false=was not there
4287bool
4289{
4291 return mStreamMaps[sTransactions].erase(uSeq);
4292}
4293
4294// <-- bool: true=added, false=already there
4295bool
4297{
4300 .emplace(isrListener->getSeq(), isrListener)
4301 .second;
4302}
4303
4304// <-- bool: true=erased, false=was not there
4305bool
4307{
4309 return mStreamMaps[sRTTransactions].erase(uSeq);
4310}
4311
4312// <-- bool: true=added, false=already there
4313bool
4315{
4318 .emplace(isrListener->getSeq(), isrListener)
4319 .second;
4320}
4321
4322void
4324{
4325 accounting_.json(obj);
4326}
4327
4328// <-- bool: true=erased, false=was not there
4329bool
4331{
4333 return mStreamMaps[sValidations].erase(uSeq);
4334}
4335
4336// <-- bool: true=added, false=already there
4337bool
4339{
4341 return mStreamMaps[sPeerStatus]
4342 .emplace(isrListener->getSeq(), isrListener)
4343 .second;
4344}
4345
4346// <-- bool: true=erased, false=was not there
4347bool
4349{
4351 return mStreamMaps[sPeerStatus].erase(uSeq);
4352}
4353
4354// <-- bool: true=added, false=already there
4355bool
4357{
4360 .emplace(isrListener->getSeq(), isrListener)
4361 .second;
4362}
4363
4364// <-- bool: true=erased, false=was not there
4365bool
4367{
4369 return mStreamMaps[sConsensusPhase].erase(uSeq);
4370}
4371
4374{
4376
4377 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4378
4379 if (it != mRpcSubMap.end())
4380 return it->second;
4381
4382 return InfoSub::pointer();
4383}
4384
4387{
4389
4390 mRpcSubMap.emplace(strUrl, rspEntry);
4391
4392 return rspEntry;
4393}
4394
4395bool
4397{
4399 auto pInfo = findRpcSub(strUrl);
4400
4401 if (!pInfo)
4402 return false;
4403
4404 // check to see if any of the stream maps still hold a weak reference to
4405 // this entry before removing
4406 for (SubMapType const& map : mStreamMaps)
4407 {
4408 if (map.find(pInfo->getSeq()) != map.end())
4409 return false;
4410 }
4411 mRpcSubMap.erase(strUrl);
4412 return true;
4413}
4414
4415#ifndef USE_NEW_BOOK_PAGE
4416
4417// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4418// work, but it demonstrated poor performance.
4419//
4420void
4423 Book const& book,
4424 AccountID const& uTakerID,
4425 bool const bProof,
4426 unsigned int iLimit,
4427 Json::Value const& jvMarker,
4428 Json::Value& jvResult)
4429{ // CAUTION: This is the old get book page logic
4430 Json::Value& jvOffers =
4431 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4432
4434 uint256 const uBookBase = getBookBase(book);
4435 uint256 const uBookEnd = getQualityNext(uBookBase);
4436 uint256 uTipIndex = uBookBase;
4437
4438 if (auto stream = m_journal.trace())
4439 {
4440 stream << "getBookPage:" << book;
4441 stream << "getBookPage: uBookBase=" << uBookBase;
4442 stream << "getBookPage: uBookEnd=" << uBookEnd;
4443 stream << "getBookPage: uTipIndex=" << uTipIndex;
4444 }
4445
4446 ReadView const& view = *lpLedger;
4447
4448 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4449 isGlobalFrozen(view, book.in.account);
4450
4451 bool bDone = false;
4452 bool bDirectAdvance = true;
4453
4454 std::shared_ptr<SLE const> sleOfferDir;
4455 uint256 offerIndex;
4456 unsigned int uBookEntry;
4457 STAmount saDirRate;
4458
4459 auto const rate = transferRate(view, book.out.account);
4460 auto viewJ = app_.journal("View");
4461
4462 while (!bDone && iLimit-- > 0)
4463 {
4464 if (bDirectAdvance)
4465 {
4466 bDirectAdvance = false;
4467
4468 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4469
4470 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4471 if (ledgerIndex)
4472 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4473 else
4474 sleOfferDir.reset();
4475
4476 if (!sleOfferDir)
4477 {
4478 JLOG(m_journal.trace()) << "getBookPage: bDone";
4479 bDone = true;
4480 }
4481 else
4482 {
4483 uTipIndex = sleOfferDir->key();
4484 saDirRate = amountFromQuality(getQuality(uTipIndex));
4485
4486 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4487
4488 JLOG(m_journal.trace())
4489 << "getBookPage: uTipIndex=" << uTipIndex;
4490 JLOG(m_journal.trace())
4491 << "getBookPage: offerIndex=" << offerIndex;
4492 }
4493 }
4494
4495 if (!bDone)
4496 {
4497 auto sleOffer = view.read(keylet::offer(offerIndex));
4498
4499 if (sleOffer)
4500 {
4501 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4502 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4503 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4504 STAmount saOwnerFunds;
4505 bool firstOwnerOffer(true);
4506
4507 if (book.out.account == uOfferOwnerID)
4508 {
4509 // If an offer is selling issuer's own IOUs, it is fully
4510 // funded.
4511 saOwnerFunds = saTakerGets;
4512 }
4513 else if (bGlobalFreeze)
4514 {
4515 // If either asset is globally frozen, consider all offers
4516 // that aren't ours to be totally unfunded
4517 saOwnerFunds.clear(book.out);
4518 }
4519 else
4520 {
4521 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4522 if (umBalanceEntry != umBalance.end())
4523 {
4524 // Found in running balance table.
4525
4526 saOwnerFunds = umBalanceEntry->second;
4527 firstOwnerOffer = false;
4528 }
4529 else
4530 {
4531 // Did not find balance in table.
4532
4533 saOwnerFunds = accountHolds(
4534 view,
4535 uOfferOwnerID,
4536 book.out.currency,
4537 book.out.account,
4539 viewJ);
4540
4541 if (saOwnerFunds < beast::zero)
4542 {
4543 // Treat negative funds as zero.
4544
4545 saOwnerFunds.clear();
4546 }
4547 }
4548 }
4549
4550 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4551
4552 STAmount saTakerGetsFunded;
4553 STAmount saOwnerFundsLimit = saOwnerFunds;
4554 Rate offerRate = parityRate;
4555
4556 if (rate != parityRate
4557 // Have a tranfer fee.
4558 && uTakerID != book.out.account
4559 // Not taking offers of own IOUs.
4560 && book.out.account != uOfferOwnerID)
4561 // Offer owner not issuing ownfunds
4562 {
4563 // Need to charge a transfer fee to offer owner.
4564 offerRate = rate;
4565 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4566 }
4567
4568 if (saOwnerFundsLimit >= saTakerGets)
4569 {
4570 // Sufficient funds no shenanigans.
4571 saTakerGetsFunded = saTakerGets;
4572 }
4573 else
4574 {
4575 // Only provide, if not fully funded.
4576
4577 saTakerGetsFunded = saOwnerFundsLimit;
4578
4579 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4580 std::min(
4581 saTakerPays,
4582 multiply(
4583 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4584 .setJson(jvOffer[jss::taker_pays_funded]);
4585 }
4586
4587 STAmount saOwnerPays = (parityRate == offerRate)
4588 ? saTakerGetsFunded
4589 : std::min(
4590 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4591
4592 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4593
4594 // Include all offers funded and unfunded
4595 Json::Value& jvOf = jvOffers.append(jvOffer);
4596 jvOf[jss::quality] = saDirRate.getText();
4597
4598 if (firstOwnerOffer)
4599 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4600 }
4601 else
4602 {
4603 JLOG(m_journal.warn()) << "Missing offer";
4604 }
4605
4606 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4607 {
4608 bDirectAdvance = true;
4609 }
4610 else
4611 {
4612 JLOG(m_journal.trace())
4613 << "getBookPage: offerIndex=" << offerIndex;
4614 }
4615 }
4616 }
4617
4618 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4619 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4620}
4621
4622#else
4623
4624// This is the new code that uses the book iterators
4625// It has temporarily been disabled
4626
4627void
4630 Book const& book,
4631 AccountID const& uTakerID,
4632 bool const bProof,
4633 unsigned int iLimit,
4634 Json::Value const& jvMarker,
4635 Json::Value& jvResult)
4636{
4637 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4638
4640
4641 MetaView lesActive(lpLedger, tapNONE, true);
4642 OrderBookIterator obIterator(lesActive, book);
4643
4644 auto const rate = transferRate(lesActive, book.out.account);
4645
4646 bool const bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4647 lesActive.isGlobalFrozen(book.in.account);
4648
4649 while (iLimit-- > 0 && obIterator.nextOffer())
4650 {
4651 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4652 if (sleOffer)
4653 {
4654 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4655 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4656 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4657 STAmount saDirRate = obIterator.getCurrentRate();
4658 STAmount saOwnerFunds;
4659
4660 if (book.out.account == uOfferOwnerID)
4661 {
4662 // If offer is selling issuer's own IOUs, it is fully funded.
4663 saOwnerFunds = saTakerGets;
4664 }
4665 else if (bGlobalFreeze)
4666 {
4667 // If either asset is globally frozen, consider all offers
4668 // that aren't ours to be totally unfunded
4669 saOwnerFunds.clear(book.out);
4670 }
4671 else
4672 {
4673 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4674
4675 if (umBalanceEntry != umBalance.end())
4676 {
4677 // Found in running balance table.
4678
4679 saOwnerFunds = umBalanceEntry->second;
4680 }
4681 else
4682 {
4683 // Did not find balance in table.
4684
4685 saOwnerFunds = lesActive.accountHolds(
4686 uOfferOwnerID,
4687 book.out.currency,
4688 book.out.account,
4690
4691 if (saOwnerFunds.isNegative())
4692 {
4693 // Treat negative funds as zero.
4694
4695 saOwnerFunds.zero();
4696 }
4697 }
4698 }
4699
4700 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4701
4702 STAmount saTakerGetsFunded;
4703 STAmount saOwnerFundsLimit = saOwnerFunds;
4704 Rate offerRate = parityRate;
4705
4706 if (rate != parityRate
4707 // Have a tranfer fee.
4708 && uTakerID != book.out.account
4709 // Not taking offers of own IOUs.
4710 && book.out.account != uOfferOwnerID)
4711 // Offer owner not issuing ownfunds
4712 {
4713 // Need to charge a transfer fee to offer owner.
4714 offerRate = rate;
4715 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4716 }
4717
4718 if (saOwnerFundsLimit >= saTakerGets)
4719 {
4720 // Sufficient funds no shenanigans.
4721 saTakerGetsFunded = saTakerGets;
4722 }
4723 else
4724 {
4725 // Only provide, if not fully funded.
4726 saTakerGetsFunded = saOwnerFundsLimit;
4727
4728 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4729
4730 // TOOD(tom): The result of this expression is not used - what's
4731 // going on here?
4732 std::min(
4733 saTakerPays,
4734 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4735 .setJson(jvOffer[jss::taker_pays_funded]);
4736 }
4737
4738 STAmount saOwnerPays = (parityRate == offerRate)
4739 ? saTakerGetsFunded
4740 : std::min(
4741 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4742
4743 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4744
4745 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4746 {
4747 // Only provide funded offers and offers of the taker.
4748 Json::Value& jvOf = jvOffers.append(jvOffer);
4749 jvOf[jss::quality] = saDirRate.getText();
4750 }
4751 }
4752 }
4753
4754 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4755 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4756}
4757
4758#endif
4759
4760inline void
4762{
4763 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4764 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4766 counters[static_cast<std::size_t>(mode)].dur += current;
4767
4770 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4771 .dur.count());
4773 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4774 .dur.count());
4776 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4778 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4779 .dur.count());
4781 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4782
4784 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4785 .transitions);
4787 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4788 .transitions);
4790 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4792 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4793 .transitions);
4795 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4796}
4797
4798void
4800{
4801 auto now = std::chrono::steady_clock::now();
4802
4803 std::lock_guard lock(mutex_);
4804 ++counters_[static_cast<std::size_t>(om)].transitions;
4805 if (om == OperatingMode::FULL &&
4806 counters_[static_cast<std::size_t>(om)].transitions == 1)
4807 {
4808 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4809 now - processStart_)
4810 .count();
4811 }
4812 counters_[static_cast<std::size_t>(mode_)].dur +=
4813 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4814
4815 mode_ = om;
4816 start_ = now;
4817}
4818
4819void
4821{
4822 auto [counters, mode, start, initialSync] = getCounterData();
4823 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4825 counters[static_cast<std::size_t>(mode)].dur += current;
4826
4827 obj[jss::state_accounting] = Json::objectValue;
4829 i <= static_cast<std::size_t>(OperatingMode::FULL);
4830 ++i)
4831 {
4832 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4833 auto& state = obj[jss::state_accounting][states_[i]];
4834 state[jss::transitions] = std::to_string(counters[i].transitions);
4835 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4836 }
4837 obj[jss::server_state_duration_us] = std::to_string(current.count());
4838 if (initialSync)
4839 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4840}
4841
4842//------------------------------------------------------------------------------
4843
4846 Application& app,
4848 bool standalone,
4849 std::size_t minPeerCount,
4850 bool startvalid,
4851 JobQueue& job_queue,
4853 ValidatorKeys const& validatorKeys,
4854 boost::asio::io_service& io_svc,
4855 beast::Journal journal,
4856 beast::insight::Collector::ptr const& collector)
4857{
4858 return std::make_unique<NetworkOPsImp>(
4859 app,
4860 clock,
4861 standalone,
4862 minPeerCount,
4863 startvalid,
4864 job_queue,
4866 validatorKeys,
4867 io_svc,
4868 journal,
4869 collector);
4870}
4871
4872} // namespace ripple
T any_of(T... args)
T back_inserter(T... args)
T begin(T... args)
T bind(T... args)
Decorator for streaming out compact json.
Definition: json_writer.h:318
Lightweight wrapper to tag static string.
Definition: json_value.h:63
Represents a JSON value.
Definition: json_value.h:149
Json::UInt UInt
Definition: json_value.h:156
Value & append(Value const &value)
Append value to array at the end.
Definition: json_value.cpp:910
bool isMember(char const *key) const
Return true if the object has a member named key.
Definition: json_value.cpp:962
Value get(UInt index, Value const &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Definition: json_value.cpp:854
A generic endpoint for log messages.
Definition: Journal.h:60
Stream error() const
Definition: Journal.h:346
Stream debug() const
Definition: Journal.h:328
Stream info() const
Definition: Journal.h:334
Stream trace() const
Severity stream access functions.
Definition: Journal.h:322
Stream warn() const
Definition: Journal.h:340
A metric for measuring an integral value.
Definition: Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition: Gauge.h:68
A reference to a handler for performing polled collection.
Definition: Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition: Book.h:36
Issue in
Definition: Book.h:38
Issue out
Definition: Book.h:39
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition: ClusterNode.h:46
std::uint32_t getLoadFee() const
Definition: ClusterNode.h:52
NetClock::time_point getReportTime() const
Definition: ClusterNode.h:58
PublicKey const & identity() const
Definition: ClusterNode.h:64
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:49
uint32_t NETWORK_ID
Definition: Config.h:156
std::string SERVER_DOMAIN
Definition: Config.h:278
std::size_t NODE_SIZE
Definition: Config.h:213
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition: Config.h:160
int RELAY_UNTRUSTED_VALIDATIONS
Definition: Config.h:169
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition: InfoSub.h:54
AccountID account
Definition: Issue.h:36
Currency currency
Definition: Issue.h:35
A pool of threads to perform work.
Definition: JobQueue.h:58
Json::Value getJson(int c=0)
Definition: JobQueue.cpp:214
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:168
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
Definition: LedgerMaster.h:265
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:79
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
Definition: LoadFeeTrack.h:45
std::uint32_t getClusterFee() const
Definition: LoadFeeTrack.h:82
std::uint32_t getLocalFee() const
Definition: LoadFeeTrack.h:75
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:89
std::uint32_t getRemoteFee() const
Definition: LoadFeeTrack.h:68
std::uint32_t getLoadFactor() const
Definition: LoadFeeTrack.h:95
Manages load sources.
Definition: LoadManager.h:46
void heartbeat()
Reset the stall detection timer.
Definition: LoadManager.cpp:64
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition: Manifest.cpp:323
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
Definition: NetworkOPs.cpp:144
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
Definition: NetworkOPs.cpp:154
std::chrono::steady_clock::time_point start_
Definition: NetworkOPs.cpp:156
static std::array< Json::StaticString const, 5 > const states_
Definition: NetworkOPs.cpp:160
std::chrono::steady_clock::time_point const processStart_
Definition: NetworkOPs.cpp:158
Transaction with input flags and results to be applied in batches.
Definition: NetworkOPs.cpp:95
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
Definition: NetworkOPs.cpp:104
std::shared_ptr< Transaction > const transaction
Definition: NetworkOPs.cpp:97
boost::asio::steady_timer accountHistoryTxTimer_
Definition: NetworkOPs.cpp:755
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
Definition: NetworkOPs.cpp:890
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
bool preProcessTransaction(std::shared_ptr< Transaction > &transaction)
std::vector< TransactionStatus > mTransactions
Definition: NetworkOPs.cpp:802
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Definition: NetworkOPs.cpp:745
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
Definition: NetworkOPs.cpp:757
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
Definition: NetworkOPs.cpp:908
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
bool checkLastClosedLedger(Overlay::PeerSequence const &, uint256 &networkClosed)
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void processTransactionSet(CanonicalTXSet const &set) override
Process a set of transactions synchronously, and ensuring that they are processed in one batch.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
Definition: NetworkOPs.cpp:753
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
Definition: NetworkOPs.cpp:121
std::optional< PublicKey > const validatorPK_
Definition: NetworkOPs.cpp:759
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
Definition: NetworkOPs.cpp:741
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
Definition: NetworkOPs.cpp:271
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Definition: NetworkOPs.cpp:771
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
Definition: NetworkOPs.cpp:754
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
Definition: NetworkOPs.cpp:127
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:227
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
Definition: NetworkOPs.cpp:748
beast::Journal m_journal
Definition: NetworkOPs.cpp:739
SubInfoMapType mSubAccount
Definition: NetworkOPs.cpp:766
std::optional< PublicKey > const validatorMasterPK_
Definition: NetworkOPs.cpp:760
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
Definition: NetworkOPs.cpp:806
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
Definition: NetworkOPs.cpp:752
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
Definition: NetworkOPs.cpp:961
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
Definition: NetworkOPs.cpp:786
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
Definition: NetworkOPs.cpp:796
std::atomic< bool > unlBlocked_
Definition: NetworkOPs.cpp:750
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
Definition: NetworkOPs.cpp:743
std::atomic< bool > needNetworkLedger_
Definition: NetworkOPs.cpp:747
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
Definition: NetworkOPs.cpp:804
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
Definition: NetworkOPs.cpp:920
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void doTransactionSyncBatch(std::unique_lock< std::mutex > &lock, std::function< bool(std::unique_lock< std::mutex > const &)> retryCallback)
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
Definition: NetworkOPs.cpp:764
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
Definition: NetworkOPs.cpp:951
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
Definition: NetworkOPs.cpp:788
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
Definition: NetworkOPs.cpp:902
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
Definition: NetworkOPs.cpp:799
void setMode(OperatingMode om) override
void stop() override
Definition: NetworkOPs.cpp:588
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, bool const bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
void clearNeedNetworkLedger() override
Definition: NetworkOPs.cpp:914
DispatchState mDispatchState
Definition: NetworkOPs.cpp:801
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
Definition: NetworkOPs.cpp:767
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
Definition: NetworkOPs.cpp:926
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
Definition: NetworkOPs.cpp:807
Application & app_
Definition: NetworkOPs.cpp:738
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
Definition: NetworkOPs.cpp:762
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
Definition: NetworkOPs.cpp:769
std::atomic< bool > amendmentWarned_
Definition: NetworkOPs.cpp:749
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
Definition: NetworkOPs.cpp:932
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition: NetworkOPs.h:89
void getCountsJson(Json::Value &obj)
Definition: Database.cpp:268
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:51
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:66
BookListeners::pointer getBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &alTx, MultiApiJson const &jvObj)
BookListeners::pointer makeBookListeners(Book const &)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
Definition: RCLConsensus.h:53
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
Definition: RCLConsensus.h:447
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Definition: RCLConsensus.h:460
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:78
Represents a set of transactions in RCLConsensus.
Definition: RCLCxTx.h:63
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition: RFC1751.cpp:507
Collects logging information.
Definition: RCLConsensus.h:551
std::unique_ptr< std::stringstream > const & ss()
Definition: RCLConsensus.h:565
A view into a ledger.
Definition: ReadView.h:52
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition: STAmount.cpp:666
std::string getText() const override
Definition: STAmount.cpp:706
Issue const & issue() const
Definition: STAmount.h:496
std::optional< T > get(std::string const &name) const
Definition: BasicConfig.h:140
std::size_t size() const noexcept
Definition: Serializer.h:72
void const * data() const noexcept
Definition: Serializer.h:78
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition: TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition: TxQ.cpp:1778
static time_point now()
Definition: UptimeClock.cpp:67
Validator keys and manifest as set in configuration file.
Definition: ValidatorKeys.h:38
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition: XRPAmount.h:262
Json::Value jsonClipped() const
Definition: XRPAmount.h:218
iterator begin()
Definition: base_uint.h:136
static constexpr std::size_t size()
Definition: base_uint.h:526
bool isZero() const
Definition: base_uint.h:540
bool isNonZero() const
Definition: base_uint.h:545
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition: scope.h:231
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition: json_value.h:44
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:45
int Int
Definition: json_forwards.h:26
unsigned int UInt
Definition: json_forwards.h:27
void rngfill(void *const buffer, std::size_t const bytes, Generator &g)
Definition: rngfill.h:34
std::string const & getVersionString()
Server version.
Definition: BuildInfo.cpp:68
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Definition: CTID.h:43
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition: BookChanges.h:47
void insertNFTSyntheticInJson(Json::Value &, std::shared_ptr< STTx const > const &, TxMeta const &)
Adds common synthetic fields to transaction-related JSON responses.
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition: Indexes.cpp:184
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition: Indexes.cpp:380
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition: Indexes.cpp:274
Rate rate(Env &env, Account const &account, std::uint32_t const &seq)
Definition: escrow.cpp:69
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:25
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:114
STAmount divide(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:93
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition: STTx.cpp:811
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition: View.cpp:553
@ fhZERO_IF_FROZEN
Definition: View.h:78
@ fhIGNORE_FREEZE
Definition: View.h:78
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition: View.cpp:147
std::uint64_t getQuality(uint256 const &uBase)
Definition: Indexes.cpp:149
@ INCLUDED
Definition: Transaction.h:49
@ OBSOLETE
Definition: Transaction.h:54
@ INVALID
Definition: Transaction.h:48
error_code_i
Definition: ErrorCodes.h:40
@ rpcSUCCESS
Definition: ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition: ErrorCodes.h:84
@ rpcINTERNAL
Definition: ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
Definition: SecretKey.cpp:369
auto constexpr muldiv_max
Definition: mulDiv.h:28
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition: LocalTxs.cpp:192
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition: View.cpp:761
STAmount amountFromQuality(std::uint64_t rate)
Definition: STAmount.cpp:1013
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition: ErrorCodes.h:170
@ warnRPC_UNSUPPORTED_MAJORITY
Definition: ErrorCodes.h:168
@ warnRPC_AMENDMENT_BLOCKED
Definition: ErrorCodes.h:169
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:315
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition: NetworkOPs.h:68
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:53
AccountID calcAccountID(PublicKey const &pk)
Definition: AccountID.cpp:168
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Json::Value rpcError(int iError)
Definition: RPCErr.cpp:31
@ tefPAST_SEQ
Definition: TER.h:175
bool isTefFailure(TER x) noexcept
Definition: TER.h:662
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
Definition: NetworkOPs.cpp:871
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition: View.cpp:158
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition: ApiVersion.h:101
bool isTerRetry(TER x) noexcept
Definition: TER.h:668
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:75
@ tesSUCCESS
Definition: TER.h:244
uint256 getQualityNext(uint256 const &uBase)
Definition: Indexes.cpp:141
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition: View.cpp:386
bool isTesSuccess(TER x) noexcept
Definition: TER.h:674
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition: ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition: chrono.h:92
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:630
FeeSetup setup_FeeVote(Section const &section)
Definition: Config.cpp:1128
bool isTemMalformed(TER x) noexcept
Definition: TER.h:656
Number root(Number f, unsigned d)
Definition: Number.cpp:636
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
ApplyFlags
Definition: ApplyView.h:31
@ tapFAIL_HARD
Definition: ApplyView.h:36
@ tapUNLIMITED
Definition: ApplyView.h:43
@ tapNONE
Definition: ApplyView.h:32
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:44
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition: Seed.cpp:76
constexpr std::size_t maxPoppedTransactions
@ terQUEUED
Definition: TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition: TER.cpp:249
@ jtNETOP_CLUSTER
Definition: Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition: Job.h:47
@ jtTRANSACTION
Definition: Job.h:62
@ jtTXN_PROC
Definition: Job.h:82
@ jtCLIENT_CONSENSUS
Definition: Job.h:48
@ jtBATCH
Definition: Job.h:65
@ jtCLIENT_ACCT_HIST
Definition: Job.h:49
bool isTelLocal(TER x) noexcept
Definition: TER.h:650
uint256 getBookBase(Book const &book)
Definition: Indexes.cpp:115
constexpr std::uint32_t tfInnerBatchTxn
Definition: TxFlags.h:61
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition: View.cpp:184
static std::uint32_t trunc32(std::uint64_t v)
@ temINVALID_FLAG
Definition: TER.h:111
@ temBAD_SIGNATURE
Definition: TER.h:105
static auto const genesisAccountId
Definition: NetworkOPs.cpp:884
STL namespace.
T owns_lock(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T set_intersection(T... args)
T size(T... args)
T str(T... args)
std::string serialized
The manifest in serialized form.
Definition: Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition: Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition: Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
Definition: Manifest.cpp:244
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition: Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
Definition: Manifest.cpp:255
PublicKey masterKey
The master key associated with this manifest.
Definition: Manifest.h:86
Server fees published on server subscription.
Definition: NetworkOPs.cpp:204
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
Definition: NetworkOPs.cpp:223
bool operator==(ServerFeeSummary const &b) const
Definition: NetworkOPs.cpp:215
beast::insight::Gauge full_transitions
Definition: NetworkOPs.cpp:858
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:813
beast::insight::Hook hook
Definition: NetworkOPs.cpp:847
beast::insight::Gauge connected_duration
Definition: NetworkOPs.cpp:849
beast::insight::Gauge tracking_duration
Definition: NetworkOPs.cpp:851
beast::insight::Gauge connected_transitions
Definition: NetworkOPs.cpp:855
beast::insight::Gauge disconnected_transitions
Definition: NetworkOPs.cpp:854
beast::insight::Gauge syncing_duration
Definition: NetworkOPs.cpp:850
beast::insight::Gauge tracking_transitions
Definition: NetworkOPs.cpp:857
beast::insight::Gauge full_duration
Definition: NetworkOPs.cpp:852
beast::insight::Gauge disconnected_duration
Definition: NetworkOPs.cpp:848
beast::insight::Gauge syncing_transitions
Definition: NetworkOPs.cpp:856
SubAccountHistoryIndex(AccountID const &accountId)
Definition: NetworkOPs.cpp:702
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:721
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:716
Represents a transfer rate.
Definition: Rate.h:40
Data format for exchanging consumption information across peers.
Definition: Gossip.h:32
std::vector< Item > items
Definition: Gossip.h:44
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition: TxQ.h:165
IsMemberResult isMember(char const *key) const
Definition: MultiApiJson.h:93
void set(char const *key, auto const &v)
Definition: MultiApiJson.h:82
Select all peers (except optional excluded) that are in our cluster.
Definition: predicates.h:137
Sends a message to all peers.
Definition: predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)