rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/misc/AmendmentTable.h>
32#include <xrpld/app/misc/DeliverMax.h>
33#include <xrpld/app/misc/HashRouter.h>
34#include <xrpld/app/misc/LoadFeeTrack.h>
35#include <xrpld/app/misc/NetworkOPs.h>
36#include <xrpld/app/misc/Transaction.h>
37#include <xrpld/app/misc/TxQ.h>
38#include <xrpld/app/misc/ValidatorKeys.h>
39#include <xrpld/app/misc/ValidatorList.h>
40#include <xrpld/app/misc/detail/AccountTxPaging.h>
41#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
42#include <xrpld/app/tx/apply.h>
43#include <xrpld/consensus/Consensus.h>
44#include <xrpld/consensus/ConsensusParms.h>
45#include <xrpld/overlay/Cluster.h>
46#include <xrpld/overlay/Overlay.h>
47#include <xrpld/overlay/predicates.h>
48#include <xrpld/perflog/PerfLog.h>
49#include <xrpld/rpc/BookChanges.h>
50#include <xrpld/rpc/CTID.h>
51#include <xrpld/rpc/DeliveredAmount.h>
52#include <xrpld/rpc/MPTokenIssuanceID.h>
53#include <xrpld/rpc/ServerHandler.h>
54
55#include <xrpl/basics/UptimeClock.h>
56#include <xrpl/basics/mulDiv.h>
57#include <xrpl/basics/safe_cast.h>
58#include <xrpl/basics/scope.h>
59#include <xrpl/beast/utility/rngfill.h>
60#include <xrpl/crypto/RFC1751.h>
61#include <xrpl/crypto/csprng.h>
62#include <xrpl/protocol/BuildInfo.h>
63#include <xrpl/protocol/Feature.h>
64#include <xrpl/protocol/MultiApiJson.h>
65#include <xrpl/protocol/RPCErr.h>
66#include <xrpl/protocol/jss.h>
67#include <xrpl/resource/Fees.h>
68#include <xrpl/resource/ResourceManager.h>
69
70#include <boost/asio/ip/host_name.hpp>
71#include <boost/asio/steady_timer.hpp>
72
73#include <algorithm>
74#include <exception>
75#include <mutex>
76#include <optional>
77#include <set>
78#include <sstream>
79#include <string>
80#include <tuple>
81#include <unordered_map>
82
83namespace ripple {
84
85class NetworkOPsImp final : public NetworkOPs
86{
92 {
93 public:
95 bool const admin;
96 bool const local;
98 bool applied = false;
100
103 bool a,
104 bool l,
105 FailHard f)
106 : transaction(t), admin(a), local(l), failType(f)
107 {
108 XRPL_ASSERT(
110 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
111 "valid inputs");
112 }
113 };
114
118 enum class DispatchState : unsigned char {
119 none,
120 scheduled,
121 running,
122 };
123
125
141 {
142 struct Counters
143 {
144 explicit Counters() = default;
145
148 };
149
153 std::chrono::steady_clock::time_point start_ =
155 std::chrono::steady_clock::time_point const processStart_ = start_;
158
159 public:
161 {
163 .transitions = 1;
164 }
165
172 void
174
180 void
181 json(Json::Value& obj) const;
182
184 {
186 decltype(mode_) mode;
187 decltype(start_) start;
189 };
190
193 {
196 }
197 };
198
201 {
202 ServerFeeSummary() = default;
203
205 XRPAmount fee,
206 TxQ::Metrics&& escalationMetrics,
207 LoadFeeTrack const& loadFeeTrack);
208 bool
209 operator!=(ServerFeeSummary const& b) const;
210
211 bool
213 {
214 return !(*this != b);
215 }
216
221 };
222
223public:
225 Application& app,
227 bool standalone,
228 std::size_t minPeerCount,
229 bool start_valid,
230 JobQueue& job_queue,
232 ValidatorKeys const& validatorKeys,
233 boost::asio::io_service& io_svc,
234 beast::Journal journal,
235 beast::insight::Collector::ptr const& collector)
236 : app_(app)
237 , m_journal(journal)
240 , heartbeatTimer_(io_svc)
241 , clusterTimer_(io_svc)
242 , accountHistoryTxTimer_(io_svc)
243 , mConsensus(
244 app,
246 setup_FeeVote(app_.config().section("voting")),
247 app_.logs().journal("FeeVote")),
249 *m_localTX,
250 app.getInboundTransactions(),
251 beast::get_abstract_clock<std::chrono::steady_clock>(),
252 validatorKeys,
253 app_.logs().journal("LedgerConsensus"))
254 , validatorPK_(
255 validatorKeys.keys ? validatorKeys.keys->publicKey
256 : decltype(validatorPK_){})
258 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
259 : decltype(validatorMasterPK_){})
261 , m_job_queue(job_queue)
262 , m_standalone(standalone)
263 , minPeerCount_(start_valid ? 0 : minPeerCount)
265 {
266 }
267
268 ~NetworkOPsImp() override
269 {
270 // This clear() is necessary to ensure the shared_ptrs in this map get
271 // destroyed NOW because the objects in this map invoke methods on this
272 // class when they are destroyed
274 }
275
276public:
278 getOperatingMode() const override;
279
281 strOperatingMode(OperatingMode const mode, bool const admin) const override;
282
284 strOperatingMode(bool const admin = false) const override;
285
286 //
287 // Transaction operations.
288 //
289
290 // Must complete immediately.
291 void
293
294 void
296 std::shared_ptr<Transaction>& transaction,
297 bool bUnlimited,
298 bool bLocal,
299 FailHard failType) override;
300
309 void
312 bool bUnlimited,
313 FailHard failType);
314
324 void
327 bool bUnlimited,
328 FailHard failtype);
329
333 void
335
341 void
343
344 //
345 // Owner functions.
346 //
347
351 AccountID const& account) override;
352
353 //
354 // Book functions.
355 //
356
357 void
360 Book const&,
361 AccountID const& uTakerID,
362 const bool bProof,
363 unsigned int iLimit,
364 Json::Value const& jvMarker,
365 Json::Value& jvResult) override;
366
367 // Ledger proposal/close functions.
368 bool
370
371 bool
374 std::string const& source) override;
375
376 void
377 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
378
379 // Network state machine.
380
381 // Used for the "jump" case.
382private:
383 void
385 bool
387
388public:
389 bool
391 uint256 const& networkClosed,
392 std::unique_ptr<std::stringstream> const& clog) override;
393 void
395 void
396 setStandAlone() override;
397
401 void
402 setStateTimer() override;
403
404 void
405 setNeedNetworkLedger() override;
406 void
407 clearNeedNetworkLedger() override;
408 bool
409 isNeedNetworkLedger() override;
410 bool
411 isFull() override;
412
413 void
414 setMode(OperatingMode om) override;
415
416 bool
417 isBlocked() override;
418 bool
419 isAmendmentBlocked() override;
420 void
421 setAmendmentBlocked() override;
422 bool
423 isAmendmentWarned() override;
424 void
425 setAmendmentWarned() override;
426 void
427 clearAmendmentWarned() override;
428 bool
429 isUNLBlocked() override;
430 void
431 setUNLBlocked() override;
432 void
433 clearUNLBlocked() override;
434 void
435 consensusViewChange() override;
436
438 getConsensusInfo() override;
440 getServerInfo(bool human, bool admin, bool counters) override;
441 void
442 clearLedgerFetch() override;
444 getLedgerFetchInfo() override;
447 std::optional<std::chrono::milliseconds> consensusDelay) override;
448 void
449 reportFeeChange() override;
450 void
452
453 void
454 updateLocalTx(ReadView const& view) override;
456 getLocalTxCount() override;
457
458 //
459 // Monitoring: publisher side.
460 //
461 void
462 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
463 void
466 std::shared_ptr<STTx const> const& transaction,
467 TER result) override;
468 void
469 pubValidation(std::shared_ptr<STValidation> const& val) override;
470
471 //--------------------------------------------------------------------------
472 //
473 // InfoSub::Source.
474 //
475 void
477 InfoSub::ref ispListener,
478 hash_set<AccountID> const& vnaAccountIDs,
479 bool rt) override;
480 void
482 InfoSub::ref ispListener,
483 hash_set<AccountID> const& vnaAccountIDs,
484 bool rt) override;
485
486 // Just remove the subscription from the tracking
487 // not from the InfoSub. Needed for InfoSub destruction
488 void
490 std::uint64_t seq,
491 hash_set<AccountID> const& vnaAccountIDs,
492 bool rt) override;
493
495 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
496 override;
497 void
499 InfoSub::ref ispListener,
500 AccountID const& account,
501 bool historyOnly) override;
502
503 void
505 std::uint64_t seq,
506 AccountID const& account,
507 bool historyOnly) override;
508
509 bool
510 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
511 bool
512 unsubLedger(std::uint64_t uListener) override;
513
514 bool
515 subBookChanges(InfoSub::ref ispListener) override;
516 bool
517 unsubBookChanges(std::uint64_t uListener) override;
518
519 bool
520 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
521 override;
522 bool
523 unsubServer(std::uint64_t uListener) override;
524
525 bool
526 subBook(InfoSub::ref ispListener, Book const&) override;
527 bool
528 unsubBook(std::uint64_t uListener, Book const&) override;
529
530 bool
531 subManifests(InfoSub::ref ispListener) override;
532 bool
533 unsubManifests(std::uint64_t uListener) override;
534 void
535 pubManifest(Manifest const&) override;
536
537 bool
538 subTransactions(InfoSub::ref ispListener) override;
539 bool
540 unsubTransactions(std::uint64_t uListener) override;
541
542 bool
543 subRTTransactions(InfoSub::ref ispListener) override;
544 bool
545 unsubRTTransactions(std::uint64_t uListener) override;
546
547 bool
548 subValidations(InfoSub::ref ispListener) override;
549 bool
550 unsubValidations(std::uint64_t uListener) override;
551
552 bool
553 subPeerStatus(InfoSub::ref ispListener) override;
554 bool
555 unsubPeerStatus(std::uint64_t uListener) override;
556 void
557 pubPeerStatus(std::function<Json::Value(void)> const&) override;
558
559 bool
560 subConsensus(InfoSub::ref ispListener) override;
561 bool
562 unsubConsensus(std::uint64_t uListener) override;
563
565 findRpcSub(std::string const& strUrl) override;
567 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
568 bool
569 tryRemoveRpcSub(std::string const& strUrl) override;
570
571 void
572 stop() override
573 {
574 {
575 boost::system::error_code ec;
576 heartbeatTimer_.cancel(ec);
577 if (ec)
578 {
579 JLOG(m_journal.error())
580 << "NetworkOPs: heartbeatTimer cancel error: "
581 << ec.message();
582 }
583
584 ec.clear();
585 clusterTimer_.cancel(ec);
586 if (ec)
587 {
588 JLOG(m_journal.error())
589 << "NetworkOPs: clusterTimer cancel error: "
590 << ec.message();
591 }
592
593 ec.clear();
594 accountHistoryTxTimer_.cancel(ec);
595 if (ec)
596 {
597 JLOG(m_journal.error())
598 << "NetworkOPs: accountHistoryTxTimer cancel error: "
599 << ec.message();
600 }
601 }
602 // Make sure that any waitHandlers pending in our timers are done.
603 using namespace std::chrono_literals;
604 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
605 }
606
607 void
608 stateAccounting(Json::Value& obj) override;
609
610private:
611 void
612 setTimer(
613 boost::asio::steady_timer& timer,
614 std::chrono::milliseconds const& expiry_time,
615 std::function<void()> onExpire,
616 std::function<void()> onError);
617 void
619 void
621 void
623 void
625
627 transJson(
628 std::shared_ptr<STTx const> const& transaction,
629 TER result,
630 bool validated,
633
634 void
637 AcceptedLedgerTx const& transaction,
638 bool last);
639
640 void
643 AcceptedLedgerTx const& transaction,
644 bool last);
645
646 void
649 std::shared_ptr<STTx const> const& transaction,
650 TER result);
651
652 void
653 pubServer();
654 void
656
658 getHostId(bool forAdmin);
659
660private:
664
665 /*
666 * With a validated ledger to separate history and future, the node
667 * streams historical txns with negative indexes starting from -1,
668 * and streams future txns starting from index 0.
669 * The SubAccountHistoryIndex struct maintains these indexes.
670 * It also has a flag stopHistorical_ for stopping streaming
671 * the historical txns.
672 */
674 {
676 // forward
678 // separate backward and forward
680 // history, backward
685
687 : accountId_(accountId)
688 , forwardTxIndex_(0)
691 , historyTxIndex_(-1)
692 , haveHistorical_(false)
693 , stopHistorical_(false)
694 {
695 }
696 };
698 {
701 };
703 {
706 };
709
713 void
717 void
719 void
721
724
726
728
730
735
737 boost::asio::steady_timer heartbeatTimer_;
738 boost::asio::steady_timer clusterTimer_;
739 boost::asio::steady_timer accountHistoryTxTimer_;
740
742
745
747
749
752
754
756
757 enum SubTypes {
758 sLedger, // Accepted ledgers.
759 sManifests, // Received validator manifests.
760 sServer, // When server changes connectivity state.
761 sTransactions, // All accepted transactions.
762 sRTTransactions, // All proposed and accepted transactions.
763 sValidations, // Received validations.
764 sPeerStatus, // Peer status changes.
765 sConsensusPhase, // Consensus phase
766 sBookChanges, // Per-ledger order book changes
767 sLastEntry // Any new entry must be ADDED ABOVE this one
768 };
769
771
773
775
776 // Whether we are in standalone mode.
777 bool const m_standalone;
778
779 // The number of nodes that we need to consider ourselves connected.
781
782 // Transaction batching.
787
789
792
793private:
794 struct Stats
795 {
796 template <class Handler>
798 Handler const& handler,
799 beast::insight::Collector::ptr const& collector)
800 : hook(collector->make_hook(handler))
801 , disconnected_duration(collector->make_gauge(
802 "State_Accounting",
803 "Disconnected_duration"))
804 , connected_duration(collector->make_gauge(
805 "State_Accounting",
806 "Connected_duration"))
808 collector->make_gauge("State_Accounting", "Syncing_duration"))
809 , tracking_duration(collector->make_gauge(
810 "State_Accounting",
811 "Tracking_duration"))
813 collector->make_gauge("State_Accounting", "Full_duration"))
814 , disconnected_transitions(collector->make_gauge(
815 "State_Accounting",
816 "Disconnected_transitions"))
817 , connected_transitions(collector->make_gauge(
818 "State_Accounting",
819 "Connected_transitions"))
820 , syncing_transitions(collector->make_gauge(
821 "State_Accounting",
822 "Syncing_transitions"))
823 , tracking_transitions(collector->make_gauge(
824 "State_Accounting",
825 "Tracking_transitions"))
827 collector->make_gauge("State_Accounting", "Full_transitions"))
828 {
829 }
830
837
843 };
844
845 std::mutex m_statsMutex; // Mutex to lock m_stats
847
848private:
849 void
851};
852
853//------------------------------------------------------------------------------
854
856 {"disconnected", "connected", "syncing", "tracking", "full"}};
857
859
867
868static auto const genesisAccountId = calcAccountID(
870 .first);
871
872//------------------------------------------------------------------------------
873inline OperatingMode
875{
876 return mMode;
877}
878
879inline std::string
880NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
881{
882 return strOperatingMode(mMode, admin);
883}
884
885inline void
887{
889}
890
891inline void
893{
894 needNetworkLedger_ = true;
895}
896
897inline void
899{
900 needNetworkLedger_ = false;
901}
902
903inline bool
905{
906 return needNetworkLedger_;
907}
908
909inline bool
911{
913}
914
917{
918 static std::string const hostname = boost::asio::ip::host_name();
919
920 if (forAdmin)
921 return hostname;
922
923 // For non-admin uses hash the node public key into a
924 // single RFC1751 word:
925 static std::string const shroudedHostId = [this]() {
926 auto const& id = app_.nodeIdentity();
927
928 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
929 }();
930
931 return shroudedHostId;
932}
933
934void
936{
938
939 // Only do this work if a cluster is configured
940 if (app_.cluster().size() != 0)
942}
943
944void
946 boost::asio::steady_timer& timer,
947 const std::chrono::milliseconds& expiry_time,
948 std::function<void()> onExpire,
949 std::function<void()> onError)
950{
951 // Only start the timer if waitHandlerCounter_ is not yet joined.
952 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
953 [this, onExpire, onError](boost::system::error_code const& e) {
954 if ((e.value() == boost::system::errc::success) &&
955 (!m_job_queue.isStopped()))
956 {
957 onExpire();
958 }
959 // Recover as best we can if an unexpected error occurs.
960 if (e.value() != boost::system::errc::success &&
961 e.value() != boost::asio::error::operation_aborted)
962 {
963 // Try again later and hope for the best.
964 JLOG(m_journal.error())
965 << "Timer got error '" << e.message()
966 << "'. Restarting timer.";
967 onError();
968 }
969 }))
970 {
971 timer.expires_from_now(expiry_time);
972 timer.async_wait(std::move(*optionalCountedHandler));
973 }
974}
975
976void
977NetworkOPsImp::setHeartbeatTimer()
978{
979 setTimer(
980 heartbeatTimer_,
981 mConsensus.parms().ledgerGRANULARITY,
982 [this]() {
983 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
984 processHeartbeatTimer();
985 });
986 },
987 [this]() { setHeartbeatTimer(); });
988}
989
990void
991NetworkOPsImp::setClusterTimer()
992{
993 using namespace std::chrono_literals;
994
995 setTimer(
996 clusterTimer_,
997 10s,
998 [this]() {
999 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
1000 processClusterTimer();
1001 });
1002 },
1003 [this]() { setClusterTimer(); });
1004}
1005
1006void
1007NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
1008{
1009 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
1010 << toBase58(subInfo.index_->accountId_);
1011 using namespace std::chrono_literals;
1012 setTimer(
1013 accountHistoryTxTimer_,
1014 4s,
1015 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1016 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1017}
1018
1019void
1020NetworkOPsImp::processHeartbeatTimer()
1021{
1022 RclConsensusLogger clog(
1023 "Heartbeat Timer", mConsensus.validating(), m_journal);
1024 {
1025 std::unique_lock lock{app_.getMasterMutex()};
1026
1027 // VFALCO NOTE This is for diagnosing a crash on exit
1028 LoadManager& mgr(app_.getLoadManager());
1029 mgr.heartbeat();
1030
1031 std::size_t const numPeers = app_.overlay().size();
1032
1033 // do we have sufficient peers? If not, we are disconnected.
1034 if (numPeers < minPeerCount_)
1035 {
1036 if (mMode != OperatingMode::DISCONNECTED)
1037 {
1038 setMode(OperatingMode::DISCONNECTED);
1040 ss << "Node count (" << numPeers << ") has fallen "
1041 << "below required minimum (" << minPeerCount_ << ").";
1042 JLOG(m_journal.warn()) << ss.str();
1043 CLOG(clog.ss()) << "set mode to DISCONNECTED: " << ss.str();
1044 }
1045 else
1046 {
1047 CLOG(clog.ss())
1048 << "already DISCONNECTED. too few peers (" << numPeers
1049 << "), need at least " << minPeerCount_;
1050 }
1051
1052 // MasterMutex lock need not be held to call setHeartbeatTimer()
1053 lock.unlock();
1054 // We do not call mConsensus.timerEntry until there are enough
1055 // peers providing meaningful inputs to consensus
1056 setHeartbeatTimer();
1057
1058 return;
1059 }
1060
1061 if (mMode == OperatingMode::DISCONNECTED)
1062 {
1063 setMode(OperatingMode::CONNECTED);
1064 JLOG(m_journal.info())
1065 << "Node count (" << numPeers << ") is sufficient.";
1066 CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
1067 << " peers. ";
1068 }
1069
1070 // Check if the last validated ledger forces a change between these
1071 // states.
1072 auto origMode = mMode.load();
1073 CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
1074 if (mMode == OperatingMode::SYNCING)
1075 setMode(OperatingMode::SYNCING);
1076 else if (mMode == OperatingMode::CONNECTED)
1077 setMode(OperatingMode::CONNECTED);
1078 auto newMode = mMode.load();
1079 if (origMode != newMode)
1080 {
1081 CLOG(clog.ss())
1082 << ", changing to " << strOperatingMode(newMode, true);
1083 }
1084 CLOG(clog.ss()) << ". ";
1085 }
1086
1087 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.ss());
1088
1089 CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase);
1090 const ConsensusPhase currPhase = mConsensus.phase();
1091 if (mLastConsensusPhase != currPhase)
1092 {
1093 reportConsensusStateChange(currPhase);
1094 mLastConsensusPhase = currPhase;
1095 CLOG(clog.ss()) << " changed to " << to_string(mLastConsensusPhase);
1096 }
1097 CLOG(clog.ss()) << ". ";
1098
1099 setHeartbeatTimer();
1100}
1101
1102void
1103NetworkOPsImp::processClusterTimer()
1104{
1105 if (app_.cluster().size() == 0)
1106 return;
1107
1108 using namespace std::chrono_literals;
1109
1110 bool const update = app_.cluster().update(
1111 app_.nodeIdentity().first,
1112 "",
1113 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1114 ? app_.getFeeTrack().getLocalFee()
1115 : 0,
1116 app_.timeKeeper().now());
1117
1118 if (!update)
1119 {
1120 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1121 setClusterTimer();
1122 return;
1123 }
1124
1125 protocol::TMCluster cluster;
1126 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1127 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1128 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1129 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1130 n.set_nodeload(node.getLoadFee());
1131 if (!node.name().empty())
1132 n.set_nodename(node.name());
1133 });
1134
1135 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1136 for (auto& item : gossip.items)
1137 {
1138 protocol::TMLoadSource& node = *cluster.add_loadsources();
1139 node.set_name(to_string(item.address));
1140 node.set_cost(item.balance);
1141 }
1142 app_.overlay().foreach(send_if(
1143 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1144 peer_in_cluster()));
1145 setClusterTimer();
1146}
1147
1148//------------------------------------------------------------------------------
1149
1151NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1152 const
1153{
1154 if (mode == OperatingMode::FULL && admin)
1155 {
1156 auto const consensusMode = mConsensus.mode();
1157 if (consensusMode != ConsensusMode::wrongLedger)
1158 {
1159 if (consensusMode == ConsensusMode::proposing)
1160 return "proposing";
1161
1162 if (mConsensus.validating())
1163 return "validating";
1164 }
1165 }
1166
1167 return states_[static_cast<std::size_t>(mode)];
1168}
1169
1170void
1171NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1172{
1173 if (isNeedNetworkLedger())
1174 {
1175 // Nothing we can do if we've never been in sync
1176 return;
1177 }
1178
1179 // this is an asynchronous interface
1180 auto const trans = sterilize(*iTrans);
1181
1182 auto const txid = trans->getTransactionID();
1183 auto const flags = app_.getHashRouter().getFlags(txid);
1184
1185 if ((flags & SF_BAD) != 0)
1186 {
1187 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1188 return;
1189 }
1190
1191 try
1192 {
1193 auto const [validity, reason] = checkValidity(
1194 app_.getHashRouter(),
1195 *trans,
1196 m_ledgerMaster.getValidatedRules(),
1197 app_.config());
1198
1199 if (validity != Validity::Valid)
1200 {
1201 JLOG(m_journal.warn())
1202 << "Submitted transaction invalid: " << reason;
1203 return;
1204 }
1205 }
1206 catch (std::exception const& ex)
1207 {
1208 JLOG(m_journal.warn())
1209 << "Exception checking transaction " << txid << ": " << ex.what();
1210
1211 return;
1212 }
1213
1214 std::string reason;
1215
1216 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1217
1218 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1219 auto t = tx;
1220 processTransaction(t, false, false, FailHard::no);
1221 });
1222}
1223
1224void
1225NetworkOPsImp::processTransaction(
1226 std::shared_ptr<Transaction>& transaction,
1227 bool bUnlimited,
1228 bool bLocal,
1229 FailHard failType)
1230{
1231 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1232 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1233
1234 if ((newFlags & SF_BAD) != 0)
1235 {
1236 // cached bad
1237 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1238 transaction->setStatus(INVALID);
1239 transaction->setResult(temBAD_SIGNATURE);
1240 return;
1241 }
1242
1243 // NOTE eahennis - I think this check is redundant,
1244 // but I'm not 100% sure yet.
1245 // If so, only cost is looking up HashRouter flags.
1246 auto const view = m_ledgerMaster.getCurrentLedger();
1247 auto const [validity, reason] = checkValidity(
1248 app_.getHashRouter(),
1249 *transaction->getSTransaction(),
1250 view->rules(),
1251 app_.config());
1252 XRPL_ASSERT(
1253 validity == Validity::Valid,
1254 "ripple::NetworkOPsImp::processTransaction : valid validity");
1255
1256 // Not concerned with local checks at this point.
1257 if (validity == Validity::SigBad)
1258 {
1259 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1260 transaction->setStatus(INVALID);
1261 transaction->setResult(temBAD_SIGNATURE);
1262 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1263 return;
1264 }
1265
1266 // canonicalize can change our pointer
1267 app_.getMasterTransaction().canonicalize(&transaction);
1268
1269 if (bLocal)
1270 doTransactionSync(transaction, bUnlimited, failType);
1271 else
1272 doTransactionAsync(transaction, bUnlimited, failType);
1273}
1274
1275void
1276NetworkOPsImp::doTransactionAsync(
1277 std::shared_ptr<Transaction> transaction,
1278 bool bUnlimited,
1279 FailHard failType)
1280{
1281 std::lock_guard lock(mMutex);
1282
1283 if (transaction->getApplying())
1284 return;
1285
1286 mTransactions.push_back(
1287 TransactionStatus(transaction, bUnlimited, false, failType));
1288 transaction->setApplying();
1289
1290 if (mDispatchState == DispatchState::none)
1291 {
1292 if (m_job_queue.addJob(
1293 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1294 {
1295 mDispatchState = DispatchState::scheduled;
1296 }
1297 }
1298}
1299
1300void
1301NetworkOPsImp::doTransactionSync(
1302 std::shared_ptr<Transaction> transaction,
1303 bool bUnlimited,
1304 FailHard failType)
1305{
1306 std::unique_lock<std::mutex> lock(mMutex);
1307
1308 if (!transaction->getApplying())
1309 {
1310 mTransactions.push_back(
1311 TransactionStatus(transaction, bUnlimited, true, failType));
1312 transaction->setApplying();
1313 }
1314
1315 do
1316 {
1317 if (mDispatchState == DispatchState::running)
1318 {
1319 // A batch processing job is already running, so wait.
1320 mCond.wait(lock);
1321 }
1322 else
1323 {
1324 apply(lock);
1325
1326 if (mTransactions.size())
1327 {
1328 // More transactions need to be applied, but by another job.
1329 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1330 transactionBatch();
1331 }))
1332 {
1333 mDispatchState = DispatchState::scheduled;
1334 }
1335 }
1336 }
1337 } while (transaction->getApplying());
1338}
1339
1340void
1341NetworkOPsImp::transactionBatch()
1342{
1343 std::unique_lock<std::mutex> lock(mMutex);
1344
1345 if (mDispatchState == DispatchState::running)
1346 return;
1347
1348 while (mTransactions.size())
1349 {
1350 apply(lock);
1351 }
1352}
1353
1354void
1355NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1356{
1358 std::vector<TransactionStatus> transactions;
1359 mTransactions.swap(transactions);
1360 XRPL_ASSERT(
1361 !transactions.empty(),
1362 "ripple::NetworkOPsImp::apply : non-empty transactions");
1363 XRPL_ASSERT(
1364 mDispatchState != DispatchState::running,
1365 "ripple::NetworkOPsImp::apply : is not running");
1366
1367 mDispatchState = DispatchState::running;
1368
1369 batchLock.unlock();
1370
1371 {
1372 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1373 bool changed = false;
1374 {
1375 std::unique_lock ledgerLock{
1376 m_ledgerMaster.peekMutex(), std::defer_lock};
1377 std::lock(masterLock, ledgerLock);
1378
1379 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1380 for (TransactionStatus& e : transactions)
1381 {
1382 // we check before adding to the batch
1383 ApplyFlags flags = tapNONE;
1384 if (e.admin)
1385 flags |= tapUNLIMITED;
1386
1387 if (e.failType == FailHard::yes)
1388 flags |= tapFAIL_HARD;
1389
1390 auto const result = app_.getTxQ().apply(
1391 app_, view, e.transaction->getSTransaction(), flags, j);
1392 e.result = result.ter;
1393 e.applied = result.applied;
1394 changed = changed || result.applied;
1395 }
1396 return changed;
1397 });
1398 }
1399 if (changed)
1400 reportFeeChange();
1401
1402 std::optional<LedgerIndex> validatedLedgerIndex;
1403 if (auto const l = m_ledgerMaster.getValidatedLedger())
1404 validatedLedgerIndex = l->info().seq;
1405
1406 auto newOL = app_.openLedger().current();
1407 for (TransactionStatus& e : transactions)
1408 {
1409 e.transaction->clearSubmitResult();
1410
1411 if (e.applied)
1412 {
1413 pubProposedTransaction(
1414 newOL, e.transaction->getSTransaction(), e.result);
1415 e.transaction->setApplied();
1416 }
1417
1418 e.transaction->setResult(e.result);
1419
1420 if (isTemMalformed(e.result))
1421 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1422
1423#ifdef DEBUG
1424 if (e.result != tesSUCCESS)
1425 {
1426 std::string token, human;
1427
1428 if (transResultInfo(e.result, token, human))
1429 {
1430 JLOG(m_journal.info())
1431 << "TransactionResult: " << token << ": " << human;
1432 }
1433 }
1434#endif
1435
1436 bool addLocal = e.local;
1437
1438 if (e.result == tesSUCCESS)
1439 {
1440 JLOG(m_journal.debug())
1441 << "Transaction is now included in open ledger";
1442 e.transaction->setStatus(INCLUDED);
1443
1444 auto const& txCur = e.transaction->getSTransaction();
1445 auto const txNext = m_ledgerMaster.popAcctTransaction(txCur);
1446 if (txNext)
1447 {
1448 std::string reason;
1449 auto const trans = sterilize(*txNext);
1450 auto t = std::make_shared<Transaction>(trans, reason, app_);
1451 submit_held.emplace_back(t, false, false, FailHard::no);
1452 t->setApplying();
1453 }
1454 }
1455 else if (e.result == tefPAST_SEQ)
1456 {
1457 // duplicate or conflict
1458 JLOG(m_journal.info()) << "Transaction is obsolete";
1459 e.transaction->setStatus(OBSOLETE);
1460 }
1461 else if (e.result == terQUEUED)
1462 {
1463 JLOG(m_journal.debug())
1464 << "Transaction is likely to claim a"
1465 << " fee, but is queued until fee drops";
1466
1467 e.transaction->setStatus(HELD);
1468 // Add to held transactions, because it could get
1469 // kicked out of the queue, and this will try to
1470 // put it back.
1471 m_ledgerMaster.addHeldTransaction(e.transaction);
1472 e.transaction->setQueued();
1473 e.transaction->setKept();
1474 }
1475 else if (isTerRetry(e.result))
1476 {
1477 if (e.failType != FailHard::yes)
1478 {
1479 // transaction should be held
1480 JLOG(m_journal.debug())
1481 << "Transaction should be held: " << e.result;
1482 e.transaction->setStatus(HELD);
1483 m_ledgerMaster.addHeldTransaction(e.transaction);
1484 e.transaction->setKept();
1485 }
1486 }
1487 else
1488 {
1489 JLOG(m_journal.debug())
1490 << "Status other than success " << e.result;
1491 e.transaction->setStatus(INVALID);
1492 }
1493
1494 auto const enforceFailHard =
1495 e.failType == FailHard::yes && !isTesSuccess(e.result);
1496
1497 if (addLocal && !enforceFailHard)
1498 {
1499 m_localTX->push_back(
1500 m_ledgerMaster.getCurrentLedgerIndex(),
1501 e.transaction->getSTransaction());
1502 e.transaction->setKept();
1503 }
1504
1505 if ((e.applied ||
1506 ((mMode != OperatingMode::FULL) &&
1507 (e.failType != FailHard::yes) && e.local) ||
1508 (e.result == terQUEUED)) &&
1509 !enforceFailHard)
1510 {
1511 auto const toSkip =
1512 app_.getHashRouter().shouldRelay(e.transaction->getID());
1513
1514 if (toSkip)
1515 {
1516 protocol::TMTransaction tx;
1517 Serializer s;
1518
1519 e.transaction->getSTransaction()->add(s);
1520 tx.set_rawtransaction(s.data(), s.size());
1521 tx.set_status(protocol::tsCURRENT);
1522 tx.set_receivetimestamp(
1523 app_.timeKeeper().now().time_since_epoch().count());
1524 tx.set_deferred(e.result == terQUEUED);
1525 // FIXME: This should be when we received it
1526 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1527 e.transaction->setBroadcast();
1528 }
1529 }
1530
1531 if (validatedLedgerIndex)
1532 {
1533 auto [fee, accountSeq, availableSeq] =
1534 app_.getTxQ().getTxRequiredFeeAndSeq(
1535 *newOL, e.transaction->getSTransaction());
1536 e.transaction->setCurrentLedgerState(
1537 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1538 }
1539 }
1540 }
1541
1542 batchLock.lock();
1543
1544 for (TransactionStatus& e : transactions)
1545 e.transaction->clearApplying();
1546
1547 if (!submit_held.empty())
1548 {
1549 if (mTransactions.empty())
1550 mTransactions.swap(submit_held);
1551 else
1552 for (auto& e : submit_held)
1553 mTransactions.push_back(std::move(e));
1554 }
1555
1556 mCond.notify_all();
1557
1558 mDispatchState = DispatchState::none;
1559}
1560
1561//
1562// Owner functions
1563//
1564
1566NetworkOPsImp::getOwnerInfo(
1568 AccountID const& account)
1569{
1570 Json::Value jvObjects(Json::objectValue);
1571 auto root = keylet::ownerDir(account);
1572 auto sleNode = lpLedger->read(keylet::page(root));
1573 if (sleNode)
1574 {
1575 std::uint64_t uNodeDir;
1576
1577 do
1578 {
1579 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1580 {
1581 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1582 XRPL_ASSERT(
1583 sleCur,
1584 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1585
1586 switch (sleCur->getType())
1587 {
1588 case ltOFFER:
1589 if (!jvObjects.isMember(jss::offers))
1590 jvObjects[jss::offers] =
1592
1593 jvObjects[jss::offers].append(
1594 sleCur->getJson(JsonOptions::none));
1595 break;
1596
1597 case ltRIPPLE_STATE:
1598 if (!jvObjects.isMember(jss::ripple_lines))
1599 {
1600 jvObjects[jss::ripple_lines] =
1602 }
1603
1604 jvObjects[jss::ripple_lines].append(
1605 sleCur->getJson(JsonOptions::none));
1606 break;
1607
1608 case ltACCOUNT_ROOT:
1609 case ltDIR_NODE:
1610 default:
1611 UNREACHABLE(
1612 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1613 "type");
1614 break;
1615 }
1616 }
1617
1618 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1619
1620 if (uNodeDir)
1621 {
1622 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1623 XRPL_ASSERT(
1624 sleNode,
1625 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1626 }
1627 } while (uNodeDir);
1628 }
1629
1630 return jvObjects;
1631}
1632
1633//
1634// Other
1635//
1636
1637inline bool
1638NetworkOPsImp::isBlocked()
1639{
1640 return isAmendmentBlocked() || isUNLBlocked();
1641}
1642
1643inline bool
1644NetworkOPsImp::isAmendmentBlocked()
1645{
1646 return amendmentBlocked_;
1647}
1648
1649void
1650NetworkOPsImp::setAmendmentBlocked()
1651{
1652 amendmentBlocked_ = true;
1653 setMode(OperatingMode::CONNECTED);
1654}
1655
1656inline bool
1657NetworkOPsImp::isAmendmentWarned()
1658{
1659 return !amendmentBlocked_ && amendmentWarned_;
1660}
1661
1662inline void
1663NetworkOPsImp::setAmendmentWarned()
1664{
1665 amendmentWarned_ = true;
1666}
1667
1668inline void
1669NetworkOPsImp::clearAmendmentWarned()
1670{
1671 amendmentWarned_ = false;
1672}
1673
1674inline bool
1675NetworkOPsImp::isUNLBlocked()
1676{
1677 return unlBlocked_;
1678}
1679
1680void
1681NetworkOPsImp::setUNLBlocked()
1682{
1683 unlBlocked_ = true;
1684 setMode(OperatingMode::CONNECTED);
1685}
1686
1687inline void
1688NetworkOPsImp::clearUNLBlocked()
1689{
1690 unlBlocked_ = false;
1691}
1692
1693bool
1694NetworkOPsImp::checkLastClosedLedger(
1695 const Overlay::PeerSequence& peerList,
1696 uint256& networkClosed)
1697{
1698 // Returns true if there's an *abnormal* ledger issue, normal changing in
1699 // TRACKING mode should return false. Do we have sufficient validations for
1700 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1701 // better ledger available? If so, we are either tracking or full.
1702
1703 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1704
1705 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1706
1707 if (!ourClosed)
1708 return false;
1709
1710 uint256 closedLedger = ourClosed->info().hash;
1711 uint256 prevClosedLedger = ourClosed->info().parentHash;
1712 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1713 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1714
1715 //-------------------------------------------------------------------------
1716 // Determine preferred last closed ledger
1717
1718 auto& validations = app_.getValidations();
1719 JLOG(m_journal.debug())
1720 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1721
1722 // Will rely on peer LCL if no trusted validations exist
1724 peerCounts[closedLedger] = 0;
1725 if (mMode >= OperatingMode::TRACKING)
1726 peerCounts[closedLedger]++;
1727
1728 for (auto& peer : peerList)
1729 {
1730 uint256 peerLedger = peer->getClosedLedgerHash();
1731
1732 if (peerLedger.isNonZero())
1733 ++peerCounts[peerLedger];
1734 }
1735
1736 for (auto const& it : peerCounts)
1737 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1738
1739 uint256 preferredLCL = validations.getPreferredLCL(
1740 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1741 m_ledgerMaster.getValidLedgerIndex(),
1742 peerCounts);
1743
1744 bool switchLedgers = preferredLCL != closedLedger;
1745 if (switchLedgers)
1746 closedLedger = preferredLCL;
1747 //-------------------------------------------------------------------------
1748 if (switchLedgers && (closedLedger == prevClosedLedger))
1749 {
1750 // don't switch to our own previous ledger
1751 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1752 networkClosed = ourClosed->info().hash;
1753 switchLedgers = false;
1754 }
1755 else
1756 networkClosed = closedLedger;
1757
1758 if (!switchLedgers)
1759 return false;
1760
1761 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1762
1763 if (!consensus)
1764 consensus = app_.getInboundLedgers().acquire(
1765 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1766
1767 if (consensus &&
1768 (!m_ledgerMaster.canBeCurrent(consensus) ||
1769 !m_ledgerMaster.isCompatible(
1770 *consensus, m_journal.debug(), "Not switching")))
1771 {
1772 // Don't switch to a ledger not on the validated chain
1773 // or with an invalid close time or sequence
1774 networkClosed = ourClosed->info().hash;
1775 return false;
1776 }
1777
1778 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1779 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1780 << getJson({*ourClosed, {}});
1781 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1782
1783 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1784 {
1785 setMode(OperatingMode::CONNECTED);
1786 }
1787
1788 if (consensus)
1789 {
1790 // FIXME: If this rewinds the ledger sequence, or has the same
1791 // sequence, we should update the status on any stored transactions
1792 // in the invalidated ledgers.
1793 switchLastClosedLedger(consensus);
1794 }
1795
1796 return true;
1797}
1798
1799void
1800NetworkOPsImp::switchLastClosedLedger(
1801 std::shared_ptr<Ledger const> const& newLCL)
1802{
1803 // set the newLCL as our last closed ledger -- this is abnormal code
1804 JLOG(m_journal.error())
1805 << "JUMP last closed ledger to " << newLCL->info().hash;
1806
1807 clearNeedNetworkLedger();
1808
1809 // Update fee computations.
1810 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1811
1812 // Caller must own master lock
1813 {
1814 // Apply tx in old open ledger to new
1815 // open ledger. Then apply local tx.
1816
1817 auto retries = m_localTX->getTxSet();
1818 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1820 if (lastVal)
1821 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
1822 else
1823 rules.emplace(app_.config().features);
1824 app_.openLedger().accept(
1825 app_,
1826 *rules,
1827 newLCL,
1828 OrderedTxs({}),
1829 false,
1830 retries,
1831 tapNONE,
1832 "jump",
1833 [&](OpenView& view, beast::Journal j) {
1834 // Stuff the ledger with transactions from the queue.
1835 return app_.getTxQ().accept(app_, view);
1836 });
1837 }
1838
1839 m_ledgerMaster.switchLCL(newLCL);
1840
1841 protocol::TMStatusChange s;
1842 s.set_newevent(protocol::neSWITCHED_LEDGER);
1843 s.set_ledgerseq(newLCL->info().seq);
1844 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1845 s.set_ledgerhashprevious(
1846 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1847 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
1848
1849 app_.overlay().foreach(
1850 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1851}
1852
1853bool
1854NetworkOPsImp::beginConsensus(
1855 uint256 const& networkClosed,
1857{
1858 XRPL_ASSERT(
1859 networkClosed.isNonZero(),
1860 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
1861
1862 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
1863
1864 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
1865 << " with LCL " << closingInfo.parentHash;
1866
1867 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1868
1869 if (!prevLedger)
1870 {
1871 // this shouldn't happen unless we jump ledgers
1872 if (mMode == OperatingMode::FULL)
1873 {
1874 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
1875 setMode(OperatingMode::TRACKING);
1876 CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
1877 }
1878
1879 CLOG(clog) << "beginConsensus no previous ledger. ";
1880 return false;
1881 }
1882
1883 XRPL_ASSERT(
1884 prevLedger->info().hash == closingInfo.parentHash,
1885 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1886 "parent");
1887 XRPL_ASSERT(
1888 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
1889 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1890 "hash");
1891
1892 if (prevLedger->rules().enabled(featureNegativeUNL))
1893 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1894 TrustChanges const changes = app_.validators().updateTrusted(
1895 app_.getValidations().getCurrentNodeIDs(),
1896 closingInfo.parentCloseTime,
1897 *this,
1898 app_.overlay(),
1899 app_.getHashRouter());
1900
1901 if (!changes.added.empty() || !changes.removed.empty())
1902 {
1903 app_.getValidations().trustChanged(changes.added, changes.removed);
1904 // Update the AmendmentTable so it tracks the current validators.
1905 app_.getAmendmentTable().trustChanged(
1906 app_.validators().getQuorumKeys().second);
1907 }
1908
1909 mConsensus.startRound(
1910 app_.timeKeeper().closeTime(),
1911 networkClosed,
1912 prevLedger,
1913 changes.removed,
1914 changes.added,
1915 clog);
1916
1917 const ConsensusPhase currPhase = mConsensus.phase();
1918 if (mLastConsensusPhase != currPhase)
1919 {
1920 reportConsensusStateChange(currPhase);
1921 mLastConsensusPhase = currPhase;
1922 }
1923
1924 JLOG(m_journal.debug()) << "Initiating consensus engine";
1925 return true;
1926}
1927
1928bool
1929NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
1930{
1931 auto const& peerKey = peerPos.publicKey();
1932 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
1933 {
1934 // Could indicate a operator misconfiguration where two nodes are
1935 // running with the same validator key configured, so this isn't fatal,
1936 // and it doesn't necessarily indicate peer misbehavior. But since this
1937 // is a trusted message, it could be a very big deal. Either way, we
1938 // don't want to relay the proposal. Note that the byzantine behavior
1939 // detection in handleNewValidation will notify other peers.
1940 //
1941 // Another, innocuous explanation is unusual message routing and delays,
1942 // causing this node to receive its own messages back.
1943 JLOG(m_journal.error())
1944 << "Received a proposal signed by MY KEY from a peer. This may "
1945 "indicate a misconfiguration where another node has the same "
1946 "validator key, or may be caused by unusual message routing and "
1947 "delays.";
1948 return false;
1949 }
1950
1951 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1952}
1953
1954void
1955NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
1956{
1957 // We now have an additional transaction set
1958 // either created locally during the consensus process
1959 // or acquired from a peer
1960
1961 // Inform peers we have this set
1962 protocol::TMHaveTransactionSet msg;
1963 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1964 msg.set_status(protocol::tsHAVE);
1965 app_.overlay().foreach(
1966 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
1967
1968 // We acquired it because consensus asked us to
1969 if (fromAcquire)
1970 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
1971}
1972
1973void
1974NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
1975{
1976 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
1977
1978 for (auto const& it : app_.overlay().getActivePeers())
1979 {
1980 if (it && (it->getClosedLedgerHash() == deadLedger))
1981 {
1982 JLOG(m_journal.trace()) << "Killing obsolete peer status";
1983 it->cycleStatus();
1984 }
1985 }
1986
1987 uint256 networkClosed;
1988 bool ledgerChange =
1989 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1990
1991 if (networkClosed.isZero())
1992 {
1993 CLOG(clog) << "endConsensus last closed ledger is zero. ";
1994 return;
1995 }
1996
1997 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
1998 // we must count how many nodes share our LCL, how many nodes disagree with
1999 // our LCL, and how many validations our LCL has. We also want to check
2000 // timing to make sure there shouldn't be a newer LCL. We need this
2001 // information to do the next three tests.
2002
2003 if (((mMode == OperatingMode::CONNECTED) ||
2004 (mMode == OperatingMode::SYNCING)) &&
2005 !ledgerChange)
2006 {
2007 // Count number of peers that agree with us and UNL nodes whose
2008 // validations we have for LCL. If the ledger is good enough, go to
2009 // TRACKING - TODO
2010 if (!needNetworkLedger_)
2011 setMode(OperatingMode::TRACKING);
2012 }
2013
2014 if (((mMode == OperatingMode::CONNECTED) ||
2015 (mMode == OperatingMode::TRACKING)) &&
2016 !ledgerChange)
2017 {
2018 // check if the ledger is good enough to go to FULL
2019 // Note: Do not go to FULL if we don't have the previous ledger
2020 // check if the ledger is bad enough to go to CONNECTE D -- TODO
2021 auto current = m_ledgerMaster.getCurrentLedger();
2022 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
2023 2 * current->info().closeTimeResolution))
2024 {
2025 setMode(OperatingMode::FULL);
2026 }
2027 }
2028
2029 beginConsensus(networkClosed, clog);
2030}
2031
2032void
2033NetworkOPsImp::consensusViewChange()
2034{
2035 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2036 {
2037 setMode(OperatingMode::CONNECTED);
2038 }
2039}
2040
2041void
2042NetworkOPsImp::pubManifest(Manifest const& mo)
2043{
2044 // VFALCO consider std::shared_mutex
2045 std::lock_guard sl(mSubLock);
2046
2047 if (!mStreamMaps[sManifests].empty())
2048 {
2050
2051 jvObj[jss::type] = "manifestReceived";
2052 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
2053 if (mo.signingKey)
2054 jvObj[jss::signing_key] =
2055 toBase58(TokenType::NodePublic, *mo.signingKey);
2056 jvObj[jss::seq] = Json::UInt(mo.sequence);
2057 if (auto sig = mo.getSignature())
2058 jvObj[jss::signature] = strHex(*sig);
2059 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
2060 if (!mo.domain.empty())
2061 jvObj[jss::domain] = mo.domain;
2062 jvObj[jss::manifest] = strHex(mo.serialized);
2063
2064 for (auto i = mStreamMaps[sManifests].begin();
2065 i != mStreamMaps[sManifests].end();)
2066 {
2067 if (auto p = i->second.lock())
2068 {
2069 p->send(jvObj, true);
2070 ++i;
2071 }
2072 else
2073 {
2074 i = mStreamMaps[sManifests].erase(i);
2075 }
2076 }
2077 }
2078}
2079
2080NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2081 XRPAmount fee,
2082 TxQ::Metrics&& escalationMetrics,
2083 LoadFeeTrack const& loadFeeTrack)
2084 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2085 , loadBaseServer{loadFeeTrack.getLoadBase()}
2086 , baseFee{fee}
2087 , em{std::move(escalationMetrics)}
2088{
2089}
2090
2091bool
2093 NetworkOPsImp::ServerFeeSummary const& b) const
2094{
2095 if (loadFactorServer != b.loadFactorServer ||
2096 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2097 em.has_value() != b.em.has_value())
2098 return true;
2099
2100 if (em && b.em)
2101 {
2102 return (
2103 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2104 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2105 em->referenceFeeLevel != b.em->referenceFeeLevel);
2106 }
2107
2108 return false;
2109}
2110
2111// Need to cap to uint64 to uint32 due to JSON limitations
2112static std::uint32_t
2114{
2116
2117 return std::min(max32, v);
2118};
2119
2120void
2122{
2123 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2124 // list into a local array while holding the lock then release
2125 // the lock and call send on everyone.
2126 //
2128
2129 if (!mStreamMaps[sServer].empty())
2130 {
2132
2134 app_.openLedger().current()->fees().base,
2136 app_.getFeeTrack()};
2137
2138 jvObj[jss::type] = "serverStatus";
2139 jvObj[jss::server_status] = strOperatingMode();
2140 jvObj[jss::load_base] = f.loadBaseServer;
2141 jvObj[jss::load_factor_server] = f.loadFactorServer;
2142 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2143
2144 if (f.em)
2145 {
2146 auto const loadFactor = std::max(
2147 safe_cast<std::uint64_t>(f.loadFactorServer),
2148 mulDiv(
2149 f.em->openLedgerFeeLevel,
2150 f.loadBaseServer,
2151 f.em->referenceFeeLevel)
2153
2154 jvObj[jss::load_factor] = trunc32(loadFactor);
2155 jvObj[jss::load_factor_fee_escalation] =
2156 f.em->openLedgerFeeLevel.jsonClipped();
2157 jvObj[jss::load_factor_fee_queue] =
2158 f.em->minProcessingFeeLevel.jsonClipped();
2159 jvObj[jss::load_factor_fee_reference] =
2160 f.em->referenceFeeLevel.jsonClipped();
2161 }
2162 else
2163 jvObj[jss::load_factor] = f.loadFactorServer;
2164
2165 mLastFeeSummary = f;
2166
2167 for (auto i = mStreamMaps[sServer].begin();
2168 i != mStreamMaps[sServer].end();)
2169 {
2170 InfoSub::pointer p = i->second.lock();
2171
2172 // VFALCO TODO research the possibility of using thread queues and
2173 // linearizing the deletion of subscribers with the
2174 // sending of JSON data.
2175 if (p)
2176 {
2177 p->send(jvObj, true);
2178 ++i;
2179 }
2180 else
2181 {
2182 i = mStreamMaps[sServer].erase(i);
2183 }
2184 }
2185 }
2186}
2187
2188void
2190{
2192
2193 auto& streamMap = mStreamMaps[sConsensusPhase];
2194 if (!streamMap.empty())
2195 {
2197 jvObj[jss::type] = "consensusPhase";
2198 jvObj[jss::consensus] = to_string(phase);
2199
2200 for (auto i = streamMap.begin(); i != streamMap.end();)
2201 {
2202 if (auto p = i->second.lock())
2203 {
2204 p->send(jvObj, true);
2205 ++i;
2206 }
2207 else
2208 {
2209 i = streamMap.erase(i);
2210 }
2211 }
2212 }
2213}
2214
2215void
2217{
2218 // VFALCO consider std::shared_mutex
2220
2221 if (!mStreamMaps[sValidations].empty())
2222 {
2224
2225 auto const signerPublic = val->getSignerPublic();
2226
2227 jvObj[jss::type] = "validationReceived";
2228 jvObj[jss::validation_public_key] =
2229 toBase58(TokenType::NodePublic, signerPublic);
2230 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2231 jvObj[jss::signature] = strHex(val->getSignature());
2232 jvObj[jss::full] = val->isFull();
2233 jvObj[jss::flags] = val->getFlags();
2234 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2235 jvObj[jss::data] = strHex(val->getSerializer().slice());
2236
2237 if (auto version = (*val)[~sfServerVersion])
2238 jvObj[jss::server_version] = std::to_string(*version);
2239
2240 if (auto cookie = (*val)[~sfCookie])
2241 jvObj[jss::cookie] = std::to_string(*cookie);
2242
2243 if (auto hash = (*val)[~sfValidatedHash])
2244 jvObj[jss::validated_hash] = strHex(*hash);
2245
2246 auto const masterKey =
2247 app_.validatorManifests().getMasterKey(signerPublic);
2248
2249 if (masterKey != signerPublic)
2250 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2251
2252 // NOTE *seq is a number, but old API versions used string. We replace
2253 // number with a string using MultiApiJson near end of this function
2254 if (auto const seq = (*val)[~sfLedgerSequence])
2255 jvObj[jss::ledger_index] = *seq;
2256
2257 if (val->isFieldPresent(sfAmendments))
2258 {
2259 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2260 for (auto const& amendment : val->getFieldV256(sfAmendments))
2261 jvObj[jss::amendments].append(to_string(amendment));
2262 }
2263
2264 if (auto const closeTime = (*val)[~sfCloseTime])
2265 jvObj[jss::close_time] = *closeTime;
2266
2267 if (auto const loadFee = (*val)[~sfLoadFee])
2268 jvObj[jss::load_fee] = *loadFee;
2269
2270 if (auto const baseFee = val->at(~sfBaseFee))
2271 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2272
2273 if (auto const reserveBase = val->at(~sfReserveBase))
2274 jvObj[jss::reserve_base] = *reserveBase;
2275
2276 if (auto const reserveInc = val->at(~sfReserveIncrement))
2277 jvObj[jss::reserve_inc] = *reserveInc;
2278
2279 // (The ~ operator converts the Proxy to a std::optional, which
2280 // simplifies later operations)
2281 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2282 baseFeeXRP && baseFeeXRP->native())
2283 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2284
2285 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2286 reserveBaseXRP && reserveBaseXRP->native())
2287 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2288
2289 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2290 reserveIncXRP && reserveIncXRP->native())
2291 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2292
2293 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2294 // for consumers supporting different API versions
2295 MultiApiJson multiObj{jvObj};
2296 multiObj.visit(
2297 RPC::apiVersion<1>, //
2298 [](Json::Value& jvTx) {
2299 // Type conversion for older API versions to string
2300 if (jvTx.isMember(jss::ledger_index))
2301 {
2302 jvTx[jss::ledger_index] =
2303 std::to_string(jvTx[jss::ledger_index].asUInt());
2304 }
2305 });
2306
2307 for (auto i = mStreamMaps[sValidations].begin();
2308 i != mStreamMaps[sValidations].end();)
2309 {
2310 if (auto p = i->second.lock())
2311 {
2312 multiObj.visit(
2313 p->getApiVersion(), //
2314 [&](Json::Value const& jv) { p->send(jv, true); });
2315 ++i;
2316 }
2317 else
2318 {
2319 i = mStreamMaps[sValidations].erase(i);
2320 }
2321 }
2322 }
2323}
2324
2325void
2327{
2329
2330 if (!mStreamMaps[sPeerStatus].empty())
2331 {
2332 Json::Value jvObj(func());
2333
2334 jvObj[jss::type] = "peerStatusChange";
2335
2336 for (auto i = mStreamMaps[sPeerStatus].begin();
2337 i != mStreamMaps[sPeerStatus].end();)
2338 {
2339 InfoSub::pointer p = i->second.lock();
2340
2341 if (p)
2342 {
2343 p->send(jvObj, true);
2344 ++i;
2345 }
2346 else
2347 {
2348 i = mStreamMaps[sPeerStatus].erase(i);
2349 }
2350 }
2351 }
2352}
2353
2354void
2356{
2357 using namespace std::chrono_literals;
2358 if (om == OperatingMode::CONNECTED)
2359 {
2362 }
2363 else if (om == OperatingMode::SYNCING)
2364 {
2367 }
2368
2369 if ((om > OperatingMode::CONNECTED) && isBlocked())
2371
2372 if (mMode == om)
2373 return;
2374
2375 mMode = om;
2376
2377 accounting_.mode(om);
2378
2379 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2380 pubServer();
2381}
2382
2383bool
2386 std::string const& source)
2387{
2388 JLOG(m_journal.trace())
2389 << "recvValidation " << val->getLedgerHash() << " from " << source;
2390
2392 BypassAccept bypassAccept = BypassAccept::no;
2393 try
2394 {
2395 if (pendingValidations_.contains(val->getLedgerHash()))
2396 bypassAccept = BypassAccept::yes;
2397 else
2398 pendingValidations_.insert(val->getLedgerHash());
2399 scope_unlock unlock(lock);
2400 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2401 }
2402 catch (std::exception const& e)
2403 {
2404 JLOG(m_journal.warn())
2405 << "Exception thrown for handling new validation "
2406 << val->getLedgerHash() << ": " << e.what();
2407 }
2408 catch (...)
2409 {
2410 JLOG(m_journal.warn())
2411 << "Unknown exception thrown for handling new validation "
2412 << val->getLedgerHash();
2413 }
2414 if (bypassAccept == BypassAccept::no)
2415 {
2416 pendingValidations_.erase(val->getLedgerHash());
2417 }
2418 lock.unlock();
2419
2420 pubValidation(val);
2421
2422 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2424 ss << "VALIDATION: " << val->render() << " master_key: ";
2425 auto master = app_.validators().getTrustedKey(val->getSignerPublic());
2426 if (master)
2427 {
2428 ss << toBase58(TokenType::NodePublic, *master);
2429 }
2430 else
2431 {
2432 ss << "none";
2433 }
2434 return ss.str();
2435 }();
2436
2437 // We will always relay trusted validations; if configured, we will
2438 // also relay all untrusted validations.
2439 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2440}
2441
2444{
2445 return mConsensus.getJson(true);
2446}
2447
2449NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2450{
2452
2453 // System-level warnings
2454 {
2455 Json::Value warnings{Json::arrayValue};
2456 if (isAmendmentBlocked())
2457 {
2458 Json::Value& w = warnings.append(Json::objectValue);
2459 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2460 w[jss::message] =
2461 "This server is amendment blocked, and must be updated to be "
2462 "able to stay in sync with the network.";
2463 }
2464 if (isUNLBlocked())
2465 {
2466 Json::Value& w = warnings.append(Json::objectValue);
2467 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2468 w[jss::message] =
2469 "This server has an expired validator list. validators.txt "
2470 "may be incorrectly configured or some [validator_list_sites] "
2471 "may be unreachable.";
2472 }
2473 if (admin && isAmendmentWarned())
2474 {
2475 Json::Value& w = warnings.append(Json::objectValue);
2476 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2477 w[jss::message] =
2478 "One or more unsupported amendments have reached majority. "
2479 "Upgrade to the latest version before they are activated "
2480 "to avoid being amendment blocked.";
2481 if (auto const expected =
2483 {
2484 auto& d = w[jss::details] = Json::objectValue;
2485 d[jss::expected_date] = expected->time_since_epoch().count();
2486 d[jss::expected_date_UTC] = to_string(*expected);
2487 }
2488 }
2489
2490 if (warnings.size())
2491 info[jss::warnings] = std::move(warnings);
2492 }
2493
2494 // hostid: unique string describing the machine
2495 if (human)
2496 info[jss::hostid] = getHostId(admin);
2497
2498 // domain: if configured with a domain, report it:
2499 if (!app_.config().SERVER_DOMAIN.empty())
2500 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2501
2502 info[jss::build_version] = BuildInfo::getVersionString();
2503
2504 info[jss::server_state] = strOperatingMode(admin);
2505
2506 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2508
2510 info[jss::network_ledger] = "waiting";
2511
2512 info[jss::validation_quorum] =
2513 static_cast<Json::UInt>(app_.validators().quorum());
2514
2515 if (admin)
2516 {
2517 switch (app_.config().NODE_SIZE)
2518 {
2519 case 0:
2520 info[jss::node_size] = "tiny";
2521 break;
2522 case 1:
2523 info[jss::node_size] = "small";
2524 break;
2525 case 2:
2526 info[jss::node_size] = "medium";
2527 break;
2528 case 3:
2529 info[jss::node_size] = "large";
2530 break;
2531 case 4:
2532 info[jss::node_size] = "huge";
2533 break;
2534 }
2535
2536 auto when = app_.validators().expires();
2537
2538 if (!human)
2539 {
2540 if (when)
2541 info[jss::validator_list_expires] =
2542 safe_cast<Json::UInt>(when->time_since_epoch().count());
2543 else
2544 info[jss::validator_list_expires] = 0;
2545 }
2546 else
2547 {
2548 auto& x = (info[jss::validator_list] = Json::objectValue);
2549
2550 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2551
2552 if (when)
2553 {
2554 if (*when == TimeKeeper::time_point::max())
2555 {
2556 x[jss::expiration] = "never";
2557 x[jss::status] = "active";
2558 }
2559 else
2560 {
2561 x[jss::expiration] = to_string(*when);
2562
2563 if (*when > app_.timeKeeper().now())
2564 x[jss::status] = "active";
2565 else
2566 x[jss::status] = "expired";
2567 }
2568 }
2569 else
2570 {
2571 x[jss::status] = "unknown";
2572 x[jss::expiration] = "unknown";
2573 }
2574 }
2575
2576#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2577 {
2578 auto& x = (info[jss::git] = Json::objectValue);
2579#ifdef GIT_COMMIT_HASH
2580 x[jss::hash] = GIT_COMMIT_HASH;
2581#endif
2582#ifdef GIT_BRANCH
2583 x[jss::branch] = GIT_BRANCH;
2584#endif
2585 }
2586#endif
2587 }
2588 info[jss::io_latency_ms] =
2589 static_cast<Json::UInt>(app_.getIOLatency().count());
2590
2591 if (admin)
2592 {
2593 if (auto const localPubKey = app_.validators().localPublicKey();
2594 localPubKey && app_.getValidationPublicKey())
2595 {
2596 info[jss::pubkey_validator] =
2597 toBase58(TokenType::NodePublic, localPubKey.value());
2598 }
2599 else
2600 {
2601 info[jss::pubkey_validator] = "none";
2602 }
2603 }
2604
2605 if (counters)
2606 {
2607 info[jss::counters] = app_.getPerfLog().countersJson();
2608
2609 Json::Value nodestore(Json::objectValue);
2610 app_.getNodeStore().getCountsJson(nodestore);
2611 info[jss::counters][jss::nodestore] = nodestore;
2612 info[jss::current_activities] = app_.getPerfLog().currentJson();
2613 }
2614
2615 info[jss::pubkey_node] =
2617
2618 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2619
2621 info[jss::amendment_blocked] = true;
2622
2623 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2624
2625 if (fp != 0)
2626 info[jss::fetch_pack] = Json::UInt(fp);
2627
2628 info[jss::peers] = Json::UInt(app_.overlay().size());
2629
2630 Json::Value lastClose = Json::objectValue;
2631 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2632
2633 if (human)
2634 {
2635 lastClose[jss::converge_time_s] =
2637 }
2638 else
2639 {
2640 lastClose[jss::converge_time] =
2642 }
2643
2644 info[jss::last_close] = lastClose;
2645
2646 // info[jss::consensus] = mConsensus.getJson();
2647
2648 if (admin)
2649 info[jss::load] = m_job_queue.getJson();
2650
2651 if (auto const netid = app_.overlay().networkID())
2652 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2653
2654 auto const escalationMetrics =
2656
2657 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2658 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2659 /* Scale the escalated fee level to unitless "load factor".
2660 In practice, this just strips the units, but it will continue
2661 to work correctly if either base value ever changes. */
2662 auto const loadFactorFeeEscalation =
2663 mulDiv(
2664 escalationMetrics.openLedgerFeeLevel,
2665 loadBaseServer,
2666 escalationMetrics.referenceFeeLevel)
2668
2669 auto const loadFactor = std::max(
2670 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2671
2672 if (!human)
2673 {
2674 info[jss::load_base] = loadBaseServer;
2675 info[jss::load_factor] = trunc32(loadFactor);
2676 info[jss::load_factor_server] = loadFactorServer;
2677
2678 /* Json::Value doesn't support uint64, so clamp to max
2679 uint32 value. This is mostly theoretical, since there
2680 probably isn't enough extant XRP to drive the factor
2681 that high.
2682 */
2683 info[jss::load_factor_fee_escalation] =
2684 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2685 info[jss::load_factor_fee_queue] =
2686 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2687 info[jss::load_factor_fee_reference] =
2688 escalationMetrics.referenceFeeLevel.jsonClipped();
2689 }
2690 else
2691 {
2692 info[jss::load_factor] =
2693 static_cast<double>(loadFactor) / loadBaseServer;
2694
2695 if (loadFactorServer != loadFactor)
2696 info[jss::load_factor_server] =
2697 static_cast<double>(loadFactorServer) / loadBaseServer;
2698
2699 if (admin)
2700 {
2702 if (fee != loadBaseServer)
2703 info[jss::load_factor_local] =
2704 static_cast<double>(fee) / loadBaseServer;
2705 fee = app_.getFeeTrack().getRemoteFee();
2706 if (fee != loadBaseServer)
2707 info[jss::load_factor_net] =
2708 static_cast<double>(fee) / loadBaseServer;
2709 fee = app_.getFeeTrack().getClusterFee();
2710 if (fee != loadBaseServer)
2711 info[jss::load_factor_cluster] =
2712 static_cast<double>(fee) / loadBaseServer;
2713 }
2714 if (escalationMetrics.openLedgerFeeLevel !=
2715 escalationMetrics.referenceFeeLevel &&
2716 (admin || loadFactorFeeEscalation != loadFactor))
2717 info[jss::load_factor_fee_escalation] =
2718 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2719 escalationMetrics.referenceFeeLevel);
2720 if (escalationMetrics.minProcessingFeeLevel !=
2721 escalationMetrics.referenceFeeLevel)
2722 info[jss::load_factor_fee_queue] =
2723 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2724 escalationMetrics.referenceFeeLevel);
2725 }
2726
2727 bool valid = false;
2728 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2729
2730 if (lpClosed)
2731 valid = true;
2732 else
2733 lpClosed = m_ledgerMaster.getClosedLedger();
2734
2735 if (lpClosed)
2736 {
2737 XRPAmount const baseFee = lpClosed->fees().base;
2739 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2740 l[jss::hash] = to_string(lpClosed->info().hash);
2741
2742 if (!human)
2743 {
2744 l[jss::base_fee] = baseFee.jsonClipped();
2745 l[jss::reserve_base] =
2746 lpClosed->fees().accountReserve(0).jsonClipped();
2747 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2748 l[jss::close_time] = Json::Value::UInt(
2749 lpClosed->info().closeTime.time_since_epoch().count());
2750 }
2751 else
2752 {
2753 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2754 l[jss::reserve_base_xrp] =
2755 lpClosed->fees().accountReserve(0).decimalXRP();
2756 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2757
2758 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2759 std::abs(closeOffset.count()) >= 60)
2760 l[jss::close_time_offset] =
2761 static_cast<std::uint32_t>(closeOffset.count());
2762
2763 constexpr std::chrono::seconds highAgeThreshold{1000000};
2765 {
2766 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2767 l[jss::age] =
2768 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2769 }
2770 else
2771 {
2772 auto lCloseTime = lpClosed->info().closeTime;
2773 auto closeTime = app_.timeKeeper().closeTime();
2774 if (lCloseTime <= closeTime)
2775 {
2776 using namespace std::chrono_literals;
2777 auto age = closeTime - lCloseTime;
2778 l[jss::age] =
2779 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2780 }
2781 }
2782 }
2783
2784 if (valid)
2785 info[jss::validated_ledger] = l;
2786 else
2787 info[jss::closed_ledger] = l;
2788
2789 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2790 if (!lpPublished)
2791 info[jss::published_ledger] = "none";
2792 else if (lpPublished->info().seq != lpClosed->info().seq)
2793 info[jss::published_ledger] = lpPublished->info().seq;
2794 }
2795
2796 accounting_.json(info);
2797 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2798 info[jss::jq_trans_overflow] =
2800 info[jss::peer_disconnects] =
2802 info[jss::peer_disconnects_resources] =
2804
2805 // This array must be sorted in increasing order.
2806 static constexpr std::array<std::string_view, 7> protocols{
2807 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2808 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2809 {
2811 for (auto const& port : app_.getServerHandler().setup().ports)
2812 {
2813 // Don't publish admin ports for non-admin users
2814 if (!admin &&
2815 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2816 port.admin_user.empty() && port.admin_password.empty()))
2817 continue;
2820 std::begin(port.protocol),
2821 std::end(port.protocol),
2822 std::begin(protocols),
2823 std::end(protocols),
2824 std::back_inserter(proto));
2825 if (!proto.empty())
2826 {
2827 auto& jv = ports.append(Json::Value(Json::objectValue));
2828 jv[jss::port] = std::to_string(port.port);
2829 jv[jss::protocol] = Json::Value{Json::arrayValue};
2830 for (auto const& p : proto)
2831 jv[jss::protocol].append(p);
2832 }
2833 }
2834
2835 if (app_.config().exists(SECTION_PORT_GRPC))
2836 {
2837 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
2838 auto const optPort = grpcSection.get("port");
2839 if (optPort && grpcSection.get("ip"))
2840 {
2841 auto& jv = ports.append(Json::Value(Json::objectValue));
2842 jv[jss::port] = *optPort;
2843 jv[jss::protocol] = Json::Value{Json::arrayValue};
2844 jv[jss::protocol].append("grpc");
2845 }
2846 }
2847 info[jss::ports] = std::move(ports);
2848 }
2849
2850 return info;
2851}
2852
2853void
2855{
2857}
2858
2861{
2862 return app_.getInboundLedgers().getInfo();
2863}
2864
2865void
2867 std::shared_ptr<ReadView const> const& ledger,
2868 std::shared_ptr<STTx const> const& transaction,
2869 TER result)
2870{
2871 MultiApiJson jvObj =
2872 transJson(transaction, result, false, ledger, std::nullopt);
2873
2874 {
2876
2877 auto it = mStreamMaps[sRTTransactions].begin();
2878 while (it != mStreamMaps[sRTTransactions].end())
2879 {
2880 InfoSub::pointer p = it->second.lock();
2881
2882 if (p)
2883 {
2884 jvObj.visit(
2885 p->getApiVersion(), //
2886 [&](Json::Value const& jv) { p->send(jv, true); });
2887 ++it;
2888 }
2889 else
2890 {
2891 it = mStreamMaps[sRTTransactions].erase(it);
2892 }
2893 }
2894 }
2895
2896 pubProposedAccountTransaction(ledger, transaction, result);
2897}
2898
2899void
2901{
2902 // Ledgers are published only when they acquire sufficient validations
2903 // Holes are filled across connection loss or other catastrophe
2904
2906 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
2907 if (!alpAccepted)
2908 {
2909 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
2910 app_.getAcceptedLedgerCache().canonicalize_replace_client(
2911 lpAccepted->info().hash, alpAccepted);
2912 }
2913
2914 XRPL_ASSERT(
2915 alpAccepted->getLedger().get() == lpAccepted.get(),
2916 "ripple::NetworkOPsImp::pubLedger : accepted input");
2917
2918 {
2919 JLOG(m_journal.debug())
2920 << "Publishing ledger " << lpAccepted->info().seq << " "
2921 << lpAccepted->info().hash;
2922
2924
2925 if (!mStreamMaps[sLedger].empty())
2926 {
2928
2929 jvObj[jss::type] = "ledgerClosed";
2930 jvObj[jss::ledger_index] = lpAccepted->info().seq;
2931 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
2932 jvObj[jss::ledger_time] = Json::Value::UInt(
2933 lpAccepted->info().closeTime.time_since_epoch().count());
2934
2935 if (!lpAccepted->rules().enabled(featureXRPFees))
2936 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
2937 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2938 jvObj[jss::reserve_base] =
2939 lpAccepted->fees().accountReserve(0).jsonClipped();
2940 jvObj[jss::reserve_inc] =
2941 lpAccepted->fees().increment.jsonClipped();
2942
2943 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
2944
2946 {
2947 jvObj[jss::validated_ledgers] =
2949 }
2950
2951 auto it = mStreamMaps[sLedger].begin();
2952 while (it != mStreamMaps[sLedger].end())
2953 {
2954 InfoSub::pointer p = it->second.lock();
2955 if (p)
2956 {
2957 p->send(jvObj, true);
2958 ++it;
2959 }
2960 else
2961 it = mStreamMaps[sLedger].erase(it);
2962 }
2963 }
2964
2965 if (!mStreamMaps[sBookChanges].empty())
2966 {
2967 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
2968
2969 auto it = mStreamMaps[sBookChanges].begin();
2970 while (it != mStreamMaps[sBookChanges].end())
2971 {
2972 InfoSub::pointer p = it->second.lock();
2973 if (p)
2974 {
2975 p->send(jvObj, true);
2976 ++it;
2977 }
2978 else
2979 it = mStreamMaps[sBookChanges].erase(it);
2980 }
2981 }
2982
2983 {
2984 static bool firstTime = true;
2985 if (firstTime)
2986 {
2987 // First validated ledger, start delayed SubAccountHistory
2988 firstTime = false;
2989 for (auto& outer : mSubAccountHistory)
2990 {
2991 for (auto& inner : outer.second)
2992 {
2993 auto& subInfo = inner.second;
2994 if (subInfo.index_->separationLedgerSeq_ == 0)
2995 {
2997 alpAccepted->getLedger(), subInfo);
2998 }
2999 }
3000 }
3001 }
3002 }
3003 }
3004
3005 // Don't lock since pubAcceptedTransaction is locking.
3006 for (auto const& accTx : *alpAccepted)
3007 {
3008 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
3010 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3011 }
3012}
3013
3014void
3016{
3018 app_.openLedger().current()->fees().base,
3020 app_.getFeeTrack()};
3021
3022 // only schedule the job if something has changed
3023 if (f != mLastFeeSummary)
3024 {
3026 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
3027 pubServer();
3028 });
3029 }
3030}
3031
3032void
3034{
3037 "reportConsensusStateChange->pubConsensus",
3038 [this, phase]() { pubConsensus(phase); });
3039}
3040
3041inline void
3043{
3044 m_localTX->sweep(view);
3045}
3046inline std::size_t
3048{
3049 return m_localTX->size();
3050}
3051
3052// This routine should only be used to publish accepted or validated
3053// transactions.
3056 std::shared_ptr<STTx const> const& transaction,
3057 TER result,
3058 bool validated,
3059 std::shared_ptr<ReadView const> const& ledger,
3061{
3063 std::string sToken;
3064 std::string sHuman;
3065
3066 transResultInfo(result, sToken, sHuman);
3067
3068 jvObj[jss::type] = "transaction";
3069 // NOTE jvObj is not a finished object for either API version. After
3070 // it's populated, we need to finish it for a specific API version. This is
3071 // done in a loop, near the end of this function.
3072 jvObj[jss::transaction] =
3073 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3074
3075 if (meta)
3076 {
3077 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3079 jvObj[jss::meta], *ledger, transaction, meta->get());
3081 jvObj[jss::meta], transaction, meta->get());
3082 }
3083
3084 // add CTID where the needed data for it exists
3085 if (auto const& lookup = ledger->txRead(transaction->getTransactionID());
3086 lookup.second && lookup.second->isFieldPresent(sfTransactionIndex))
3087 {
3088 uint32_t const txnSeq = lookup.second->getFieldU32(sfTransactionIndex);
3089 uint32_t netID = app_.config().NETWORK_ID;
3090 if (transaction->isFieldPresent(sfNetworkID))
3091 netID = transaction->getFieldU32(sfNetworkID);
3092
3094 RPC::encodeCTID(ledger->info().seq, txnSeq, netID);
3095 ctid)
3096 jvObj[jss::ctid] = *ctid;
3097 }
3098 if (!ledger->open())
3099 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3100
3101 if (validated)
3102 {
3103 jvObj[jss::ledger_index] = ledger->info().seq;
3104 jvObj[jss::transaction][jss::date] =
3105 ledger->info().closeTime.time_since_epoch().count();
3106 jvObj[jss::validated] = true;
3107 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3108
3109 // WRITEME: Put the account next seq here
3110 }
3111 else
3112 {
3113 jvObj[jss::validated] = false;
3114 jvObj[jss::ledger_current_index] = ledger->info().seq;
3115 }
3116
3117 jvObj[jss::status] = validated ? "closed" : "proposed";
3118 jvObj[jss::engine_result] = sToken;
3119 jvObj[jss::engine_result_code] = result;
3120 jvObj[jss::engine_result_message] = sHuman;
3121
3122 if (transaction->getTxnType() == ttOFFER_CREATE)
3123 {
3124 auto const account = transaction->getAccountID(sfAccount);
3125 auto const amount = transaction->getFieldAmount(sfTakerGets);
3126
3127 // If the offer create is not self funded then add the owner balance
3128 if (account != amount.issue().account)
3129 {
3130 auto const ownerFunds = accountFunds(
3131 *ledger,
3132 account,
3133 amount,
3135 app_.journal("View"));
3136 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3137 }
3138 }
3139
3140 std::string const hash = to_string(transaction->getTransactionID());
3141 MultiApiJson multiObj{jvObj};
3143 multiObj.visit(), //
3144 [&]<unsigned Version>(
3146 RPC::insertDeliverMax(
3147 jvTx[jss::transaction], transaction->getTxnType(), Version);
3148
3149 if constexpr (Version > 1)
3150 {
3151 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3152 jvTx[jss::hash] = hash;
3153 }
3154 else
3155 {
3156 jvTx[jss::transaction][jss::hash] = hash;
3157 }
3158 });
3159
3160 return multiObj;
3161}
3162
3163void
3165 std::shared_ptr<ReadView const> const& ledger,
3166 const AcceptedLedgerTx& transaction,
3167 bool last)
3168{
3169 auto const& stTxn = transaction.getTxn();
3170
3171 // Create two different Json objects, for different API versions
3172 auto const metaRef = std::ref(transaction.getMeta());
3173 auto const trResult = transaction.getResult();
3174 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3175
3176 {
3178
3179 auto it = mStreamMaps[sTransactions].begin();
3180 while (it != mStreamMaps[sTransactions].end())
3181 {
3182 InfoSub::pointer p = it->second.lock();
3183
3184 if (p)
3185 {
3186 jvObj.visit(
3187 p->getApiVersion(), //
3188 [&](Json::Value const& jv) { p->send(jv, true); });
3189 ++it;
3190 }
3191 else
3192 it = mStreamMaps[sTransactions].erase(it);
3193 }
3194
3195 it = mStreamMaps[sRTTransactions].begin();
3196
3197 while (it != mStreamMaps[sRTTransactions].end())
3198 {
3199 InfoSub::pointer p = it->second.lock();
3200
3201 if (p)
3202 {
3203 jvObj.visit(
3204 p->getApiVersion(), //
3205 [&](Json::Value const& jv) { p->send(jv, true); });
3206 ++it;
3207 }
3208 else
3209 it = mStreamMaps[sRTTransactions].erase(it);
3210 }
3211 }
3212
3213 if (transaction.getResult() == tesSUCCESS)
3214 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3215
3216 pubAccountTransaction(ledger, transaction, last);
3217}
3218
3219void
3221 std::shared_ptr<ReadView const> const& ledger,
3222 AcceptedLedgerTx const& transaction,
3223 bool last)
3224{
3226 int iProposed = 0;
3227 int iAccepted = 0;
3228
3229 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3230 auto const currLedgerSeq = ledger->seq();
3231 {
3233
3234 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3236 {
3237 for (auto const& affectedAccount : transaction.getAffected())
3238 {
3239 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3240 simiIt != mSubRTAccount.end())
3241 {
3242 auto it = simiIt->second.begin();
3243
3244 while (it != simiIt->second.end())
3245 {
3246 InfoSub::pointer p = it->second.lock();
3247
3248 if (p)
3249 {
3250 notify.insert(p);
3251 ++it;
3252 ++iProposed;
3253 }
3254 else
3255 it = simiIt->second.erase(it);
3256 }
3257 }
3258
3259 if (auto simiIt = mSubAccount.find(affectedAccount);
3260 simiIt != mSubAccount.end())
3261 {
3262 auto it = simiIt->second.begin();
3263 while (it != simiIt->second.end())
3264 {
3265 InfoSub::pointer p = it->second.lock();
3266
3267 if (p)
3268 {
3269 notify.insert(p);
3270 ++it;
3271 ++iAccepted;
3272 }
3273 else
3274 it = simiIt->second.erase(it);
3275 }
3276 }
3277
3278 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3279 histoIt != mSubAccountHistory.end())
3280 {
3281 auto& subs = histoIt->second;
3282 auto it = subs.begin();
3283 while (it != subs.end())
3284 {
3285 SubAccountHistoryInfoWeak const& info = it->second;
3286 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3287 {
3288 ++it;
3289 continue;
3290 }
3291
3292 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3293 {
3294 accountHistoryNotify.emplace_back(
3295 SubAccountHistoryInfo{isSptr, info.index_});
3296 ++it;
3297 }
3298 else
3299 {
3300 it = subs.erase(it);
3301 }
3302 }
3303 if (subs.empty())
3304 mSubAccountHistory.erase(histoIt);
3305 }
3306 }
3307 }
3308 }
3309
3310 JLOG(m_journal.trace())
3311 << "pubAccountTransaction: " << "proposed=" << iProposed
3312 << ", accepted=" << iAccepted;
3313
3314 if (!notify.empty() || !accountHistoryNotify.empty())
3315 {
3316 auto const& stTxn = transaction.getTxn();
3317
3318 // Create two different Json objects, for different API versions
3319 auto const metaRef = std::ref(transaction.getMeta());
3320 auto const trResult = transaction.getResult();
3321 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3322
3323 for (InfoSub::ref isrListener : notify)
3324 {
3325 jvObj.visit(
3326 isrListener->getApiVersion(), //
3327 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3328 }
3329
3330 if (last)
3331 jvObj.set(jss::account_history_boundary, true);
3332
3333 XRPL_ASSERT(
3334 jvObj.isMember(jss::account_history_tx_stream) ==
3336 "ripple::NetworkOPsImp::pubAccountTransaction : "
3337 "account_history_tx_stream not set");
3338 for (auto& info : accountHistoryNotify)
3339 {
3340 auto& index = info.index_;
3341 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3342 jvObj.set(jss::account_history_tx_first, true);
3343
3344 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3345
3346 jvObj.visit(
3347 info.sink_->getApiVersion(), //
3348 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3349 }
3350 }
3351}
3352
3353void
3355 std::shared_ptr<ReadView const> const& ledger,
3357 TER result)
3358{
3360 int iProposed = 0;
3361
3362 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3363
3364 {
3366
3367 if (mSubRTAccount.empty())
3368 return;
3369
3370 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3372 {
3373 for (auto const& affectedAccount : tx->getMentionedAccounts())
3374 {
3375 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3376 simiIt != mSubRTAccount.end())
3377 {
3378 auto it = simiIt->second.begin();
3379
3380 while (it != simiIt->second.end())
3381 {
3382 InfoSub::pointer p = it->second.lock();
3383
3384 if (p)
3385 {
3386 notify.insert(p);
3387 ++it;
3388 ++iProposed;
3389 }
3390 else
3391 it = simiIt->second.erase(it);
3392 }
3393 }
3394 }
3395 }
3396 }
3397
3398 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3399
3400 if (!notify.empty() || !accountHistoryNotify.empty())
3401 {
3402 // Create two different Json objects, for different API versions
3403 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3404
3405 for (InfoSub::ref isrListener : notify)
3406 jvObj.visit(
3407 isrListener->getApiVersion(), //
3408 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3409
3410 XRPL_ASSERT(
3411 jvObj.isMember(jss::account_history_tx_stream) ==
3413 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3414 "account_history_tx_stream not set");
3415 for (auto& info : accountHistoryNotify)
3416 {
3417 auto& index = info.index_;
3418 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3419 jvObj.set(jss::account_history_tx_first, true);
3420 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3421 jvObj.visit(
3422 info.sink_->getApiVersion(), //
3423 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3424 }
3425 }
3426}
3427
3428//
3429// Monitoring
3430//
3431
3432void
3434 InfoSub::ref isrListener,
3435 hash_set<AccountID> const& vnaAccountIDs,
3436 bool rt)
3437{
3438 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3439
3440 for (auto const& naAccountID : vnaAccountIDs)
3441 {
3442 JLOG(m_journal.trace())
3443 << "subAccount: account: " << toBase58(naAccountID);
3444
3445 isrListener->insertSubAccountInfo(naAccountID, rt);
3446 }
3447
3449
3450 for (auto const& naAccountID : vnaAccountIDs)
3451 {
3452 auto simIterator = subMap.find(naAccountID);
3453 if (simIterator == subMap.end())
3454 {
3455 // Not found, note that account has a new single listner.
3456 SubMapType usisElement;
3457 usisElement[isrListener->getSeq()] = isrListener;
3458 // VFALCO NOTE This is making a needless copy of naAccountID
3459 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3460 }
3461 else
3462 {
3463 // Found, note that the account has another listener.
3464 simIterator->second[isrListener->getSeq()] = isrListener;
3465 }
3466 }
3467}
3468
3469void
3471 InfoSub::ref isrListener,
3472 hash_set<AccountID> const& vnaAccountIDs,
3473 bool rt)
3474{
3475 for (auto const& naAccountID : vnaAccountIDs)
3476 {
3477 // Remove from the InfoSub
3478 isrListener->deleteSubAccountInfo(naAccountID, rt);
3479 }
3480
3481 // Remove from the server
3482 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3483}
3484
3485void
3487 std::uint64_t uSeq,
3488 hash_set<AccountID> const& vnaAccountIDs,
3489 bool rt)
3490{
3492
3493 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3494
3495 for (auto const& naAccountID : vnaAccountIDs)
3496 {
3497 auto simIterator = subMap.find(naAccountID);
3498
3499 if (simIterator != subMap.end())
3500 {
3501 // Found
3502 simIterator->second.erase(uSeq);
3503
3504 if (simIterator->second.empty())
3505 {
3506 // Don't need hash entry.
3507 subMap.erase(simIterator);
3508 }
3509 }
3510 }
3511}
3512
3513void
3515{
3516 enum DatabaseType { Sqlite, None };
3517 static const auto databaseType = [&]() -> DatabaseType {
3518 // Use a dynamic_cast to return DatabaseType::None
3519 // on failure.
3520 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3521 {
3522 return DatabaseType::Sqlite;
3523 }
3524 return DatabaseType::None;
3525 }();
3526
3527 if (databaseType == DatabaseType::None)
3528 {
3529 JLOG(m_journal.error())
3530 << "AccountHistory job for account "
3531 << toBase58(subInfo.index_->accountId_) << " no database";
3532 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3533 {
3534 sptr->send(rpcError(rpcINTERNAL), true);
3535 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3536 }
3537 return;
3538 }
3539
3542 "AccountHistoryTxStream",
3543 [this, dbType = databaseType, subInfo]() {
3544 auto const& accountId = subInfo.index_->accountId_;
3545 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3546 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3547
3548 JLOG(m_journal.trace())
3549 << "AccountHistory job for account " << toBase58(accountId)
3550 << " started. lastLedgerSeq=" << lastLedgerSeq;
3551
3552 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3553 std::shared_ptr<TxMeta> const& meta) -> bool {
3554 /*
3555 * genesis account: first tx is the one with seq 1
3556 * other account: first tx is the one created the account
3557 */
3558 if (accountId == genesisAccountId)
3559 {
3560 auto stx = tx->getSTransaction();
3561 if (stx->getAccountID(sfAccount) == accountId &&
3562 stx->getSeqProxy().value() == 1)
3563 return true;
3564 }
3565
3566 for (auto& node : meta->getNodes())
3567 {
3568 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3569 continue;
3570
3571 if (node.isFieldPresent(sfNewFields))
3572 {
3573 if (auto inner = dynamic_cast<const STObject*>(
3574 node.peekAtPField(sfNewFields));
3575 inner)
3576 {
3577 if (inner->isFieldPresent(sfAccount) &&
3578 inner->getAccountID(sfAccount) == accountId)
3579 {
3580 return true;
3581 }
3582 }
3583 }
3584 }
3585
3586 return false;
3587 };
3588
3589 auto send = [&](Json::Value const& jvObj,
3590 bool unsubscribe) -> bool {
3591 if (auto sptr = subInfo.sinkWptr_.lock())
3592 {
3593 sptr->send(jvObj, true);
3594 if (unsubscribe)
3595 unsubAccountHistory(sptr, accountId, false);
3596 return true;
3597 }
3598
3599 return false;
3600 };
3601
3602 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3603 bool unsubscribe) -> bool {
3604 if (auto sptr = subInfo.sinkWptr_.lock())
3605 {
3606 jvObj.visit(
3607 sptr->getApiVersion(), //
3608 [&](Json::Value const& jv) { sptr->send(jv, true); });
3609
3610 if (unsubscribe)
3611 unsubAccountHistory(sptr, accountId, false);
3612 return true;
3613 }
3614
3615 return false;
3616 };
3617
3618 auto getMoreTxns =
3619 [&](std::uint32_t minLedger,
3620 std::uint32_t maxLedger,
3625 switch (dbType)
3626 {
3627 case Sqlite: {
3628 auto db = static_cast<SQLiteDatabase*>(
3631 accountId, minLedger, maxLedger, marker, 0, true};
3632 return db->newestAccountTxPage(options);
3633 }
3634 default: {
3635 UNREACHABLE(
3636 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3637 "getMoreTxns : invalid database type");
3638 return {};
3639 }
3640 }
3641 };
3642
3643 /*
3644 * search backward until the genesis ledger or asked to stop
3645 */
3646 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3647 {
3648 int feeChargeCount = 0;
3649 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3650 {
3651 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3652 ++feeChargeCount;
3653 }
3654 else
3655 {
3656 JLOG(m_journal.trace())
3657 << "AccountHistory job for account "
3658 << toBase58(accountId) << " no InfoSub. Fee charged "
3659 << feeChargeCount << " times.";
3660 return;
3661 }
3662
3663 // try to search in 1024 ledgers till reaching genesis ledgers
3664 auto startLedgerSeq =
3665 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3666 JLOG(m_journal.trace())
3667 << "AccountHistory job for account " << toBase58(accountId)
3668 << ", working on ledger range [" << startLedgerSeq << ","
3669 << lastLedgerSeq << "]";
3670
3671 auto haveRange = [&]() -> bool {
3672 std::uint32_t validatedMin = UINT_MAX;
3673 std::uint32_t validatedMax = 0;
3674 auto haveSomeValidatedLedgers =
3676 validatedMin, validatedMax);
3677
3678 return haveSomeValidatedLedgers &&
3679 validatedMin <= startLedgerSeq &&
3680 lastLedgerSeq <= validatedMax;
3681 }();
3682
3683 if (!haveRange)
3684 {
3685 JLOG(m_journal.debug())
3686 << "AccountHistory reschedule job for account "
3687 << toBase58(accountId) << ", incomplete ledger range ["
3688 << startLedgerSeq << "," << lastLedgerSeq << "]";
3690 return;
3691 }
3692
3694 while (!subInfo.index_->stopHistorical_)
3695 {
3696 auto dbResult =
3697 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3698 if (!dbResult)
3699 {
3700 JLOG(m_journal.debug())
3701 << "AccountHistory job for account "
3702 << toBase58(accountId) << " getMoreTxns failed.";
3703 send(rpcError(rpcINTERNAL), true);
3704 return;
3705 }
3706
3707 auto const& txns = dbResult->first;
3708 marker = dbResult->second;
3709 size_t num_txns = txns.size();
3710 for (size_t i = 0; i < num_txns; ++i)
3711 {
3712 auto const& [tx, meta] = txns[i];
3713
3714 if (!tx || !meta)
3715 {
3716 JLOG(m_journal.debug())
3717 << "AccountHistory job for account "
3718 << toBase58(accountId) << " empty tx or meta.";
3719 send(rpcError(rpcINTERNAL), true);
3720 return;
3721 }
3722 auto curTxLedger =
3724 tx->getLedger());
3725 if (!curTxLedger)
3726 {
3727 JLOG(m_journal.debug())
3728 << "AccountHistory job for account "
3729 << toBase58(accountId) << " no ledger.";
3730 send(rpcError(rpcINTERNAL), true);
3731 return;
3732 }
3734 tx->getSTransaction();
3735 if (!stTxn)
3736 {
3737 JLOG(m_journal.debug())
3738 << "AccountHistory job for account "
3739 << toBase58(accountId)
3740 << " getSTransaction failed.";
3741 send(rpcError(rpcINTERNAL), true);
3742 return;
3743 }
3744
3745 auto const mRef = std::ref(*meta);
3746 auto const trR = meta->getResultTER();
3747 MultiApiJson jvTx =
3748 transJson(stTxn, trR, true, curTxLedger, mRef);
3749
3750 jvTx.set(
3751 jss::account_history_tx_index, txHistoryIndex--);
3752 if (i + 1 == num_txns ||
3753 txns[i + 1].first->getLedger() != tx->getLedger())
3754 jvTx.set(jss::account_history_boundary, true);
3755
3756 if (isFirstTx(tx, meta))
3757 {
3758 jvTx.set(jss::account_history_tx_first, true);
3759 sendMultiApiJson(jvTx, false);
3760
3761 JLOG(m_journal.trace())
3762 << "AccountHistory job for account "
3763 << toBase58(accountId)
3764 << " done, found last tx.";
3765 return;
3766 }
3767 else
3768 {
3769 sendMultiApiJson(jvTx, false);
3770 }
3771 }
3772
3773 if (marker)
3774 {
3775 JLOG(m_journal.trace())
3776 << "AccountHistory job for account "
3777 << toBase58(accountId)
3778 << " paging, marker=" << marker->ledgerSeq << ":"
3779 << marker->txnSeq;
3780 }
3781 else
3782 {
3783 break;
3784 }
3785 }
3786
3787 if (!subInfo.index_->stopHistorical_)
3788 {
3789 lastLedgerSeq = startLedgerSeq - 1;
3790 if (lastLedgerSeq <= 1)
3791 {
3792 JLOG(m_journal.trace())
3793 << "AccountHistory job for account "
3794 << toBase58(accountId)
3795 << " done, reached genesis ledger.";
3796 return;
3797 }
3798 }
3799 }
3800 });
3801}
3802
3803void
3805 std::shared_ptr<ReadView const> const& ledger,
3807{
3808 subInfo.index_->separationLedgerSeq_ = ledger->seq();
3809 auto const& accountId = subInfo.index_->accountId_;
3810 auto const accountKeylet = keylet::account(accountId);
3811 if (!ledger->exists(accountKeylet))
3812 {
3813 JLOG(m_journal.debug())
3814 << "subAccountHistoryStart, no account " << toBase58(accountId)
3815 << ", no need to add AccountHistory job.";
3816 return;
3817 }
3818 if (accountId == genesisAccountId)
3819 {
3820 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3821 {
3822 if (sleAcct->getFieldU32(sfSequence) == 1)
3823 {
3824 JLOG(m_journal.debug())
3825 << "subAccountHistoryStart, genesis account "
3826 << toBase58(accountId)
3827 << " does not have tx, no need to add AccountHistory job.";
3828 return;
3829 }
3830 }
3831 else
3832 {
3833 UNREACHABLE(
3834 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
3835 "access genesis account");
3836 return;
3837 }
3838 }
3839 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
3840 subInfo.index_->haveHistorical_ = true;
3841
3842 JLOG(m_journal.debug())
3843 << "subAccountHistoryStart, add AccountHistory job: accountId="
3844 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
3845
3846 addAccountHistoryJob(subInfo);
3847}
3848
3851 InfoSub::ref isrListener,
3852 AccountID const& accountId)
3853{
3854 if (!isrListener->insertSubAccountHistory(accountId))
3855 {
3856 JLOG(m_journal.debug())
3857 << "subAccountHistory, already subscribed to account "
3858 << toBase58(accountId);
3859 return rpcINVALID_PARAMS;
3860 }
3861
3864 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
3865 auto simIterator = mSubAccountHistory.find(accountId);
3866 if (simIterator == mSubAccountHistory.end())
3867 {
3869 inner.emplace(isrListener->getSeq(), ahi);
3871 simIterator, std::make_pair(accountId, inner));
3872 }
3873 else
3874 {
3875 simIterator->second.emplace(isrListener->getSeq(), ahi);
3876 }
3877
3878 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
3879 if (ledger)
3880 {
3881 subAccountHistoryStart(ledger, ahi);
3882 }
3883 else
3884 {
3885 // The node does not have validated ledgers, so wait for
3886 // one before start streaming.
3887 // In this case, the subscription is also considered successful.
3888 JLOG(m_journal.debug())
3889 << "subAccountHistory, no validated ledger yet, delay start";
3890 }
3891
3892 return rpcSUCCESS;
3893}
3894
3895void
3897 InfoSub::ref isrListener,
3898 AccountID const& account,
3899 bool historyOnly)
3900{
3901 if (!historyOnly)
3902 isrListener->deleteSubAccountHistory(account);
3903 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
3904}
3905
3906void
3908 std::uint64_t seq,
3909 const AccountID& account,
3910 bool historyOnly)
3911{
3913 auto simIterator = mSubAccountHistory.find(account);
3914 if (simIterator != mSubAccountHistory.end())
3915 {
3916 auto& subInfoMap = simIterator->second;
3917 auto subInfoIter = subInfoMap.find(seq);
3918 if (subInfoIter != subInfoMap.end())
3919 {
3920 subInfoIter->second.index_->stopHistorical_ = true;
3921 }
3922
3923 if (!historyOnly)
3924 {
3925 simIterator->second.erase(seq);
3926 if (simIterator->second.empty())
3927 {
3928 mSubAccountHistory.erase(simIterator);
3929 }
3930 }
3931 JLOG(m_journal.debug())
3932 << "unsubAccountHistory, account " << toBase58(account)
3933 << ", historyOnly = " << (historyOnly ? "true" : "false");
3934 }
3935}
3936
3937bool
3939{
3940 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
3941 listeners->addSubscriber(isrListener);
3942 else
3943 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
3944 return true;
3945}
3946
3947bool
3949{
3950 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
3951 listeners->removeSubscriber(uSeq);
3952
3953 return true;
3954}
3955
3959{
3960 // This code-path is exclusively used when the server is in standalone
3961 // mode via `ledger_accept`
3962 XRPL_ASSERT(
3963 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
3964
3965 if (!m_standalone)
3966 Throw<std::runtime_error>(
3967 "Operation only possible in STANDALONE mode.");
3968
3969 // FIXME Could we improve on this and remove the need for a specialized
3970 // API in Consensus?
3971 beginConsensus(m_ledgerMaster.getClosedLedger()->info().hash, {});
3972 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
3973 return m_ledgerMaster.getCurrentLedger()->info().seq;
3974}
3975
3976// <-- bool: true=added, false=already there
3977bool
3979{
3980 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
3981 {
3982 jvResult[jss::ledger_index] = lpClosed->info().seq;
3983 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
3984 jvResult[jss::ledger_time] = Json::Value::UInt(
3985 lpClosed->info().closeTime.time_since_epoch().count());
3986 if (!lpClosed->rules().enabled(featureXRPFees))
3987 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3988 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3989 jvResult[jss::reserve_base] =
3990 lpClosed->fees().accountReserve(0).jsonClipped();
3991 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3992 }
3993
3995 {
3996 jvResult[jss::validated_ledgers] =
3998 }
3999
4001 return mStreamMaps[sLedger]
4002 .emplace(isrListener->getSeq(), isrListener)
4003 .second;
4004}
4005
4006// <-- bool: true=added, false=already there
4007bool
4009{
4012 .emplace(isrListener->getSeq(), isrListener)
4013 .second;
4014}
4015
4016// <-- bool: true=erased, false=was not there
4017bool
4019{
4021 return mStreamMaps[sLedger].erase(uSeq);
4022}
4023
4024// <-- bool: true=erased, false=was not there
4025bool
4027{
4029 return mStreamMaps[sBookChanges].erase(uSeq);
4030}
4031
4032// <-- bool: true=added, false=already there
4033bool
4035{
4037 return mStreamMaps[sManifests]
4038 .emplace(isrListener->getSeq(), isrListener)
4039 .second;
4040}
4041
4042// <-- bool: true=erased, false=was not there
4043bool
4045{
4047 return mStreamMaps[sManifests].erase(uSeq);
4048}
4049
4050// <-- bool: true=added, false=already there
4051bool
4053 InfoSub::ref isrListener,
4054 Json::Value& jvResult,
4055 bool admin)
4056{
4057 uint256 uRandom;
4058
4059 if (m_standalone)
4060 jvResult[jss::stand_alone] = m_standalone;
4061
4062 // CHECKME: is it necessary to provide a random number here?
4063 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
4064
4065 auto const& feeTrack = app_.getFeeTrack();
4066 jvResult[jss::random] = to_string(uRandom);
4067 jvResult[jss::server_status] = strOperatingMode(admin);
4068 jvResult[jss::load_base] = feeTrack.getLoadBase();
4069 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4070 jvResult[jss::hostid] = getHostId(admin);
4071 jvResult[jss::pubkey_node] =
4073
4075 return mStreamMaps[sServer]
4076 .emplace(isrListener->getSeq(), isrListener)
4077 .second;
4078}
4079
4080// <-- bool: true=erased, false=was not there
4081bool
4083{
4085 return mStreamMaps[sServer].erase(uSeq);
4086}
4087
4088// <-- bool: true=added, false=already there
4089bool
4091{
4094 .emplace(isrListener->getSeq(), isrListener)
4095 .second;
4096}
4097
4098// <-- bool: true=erased, false=was not there
4099bool
4101{
4103 return mStreamMaps[sTransactions].erase(uSeq);
4104}
4105
4106// <-- bool: true=added, false=already there
4107bool
4109{
4112 .emplace(isrListener->getSeq(), isrListener)
4113 .second;
4114}
4115
4116// <-- bool: true=erased, false=was not there
4117bool
4119{
4121 return mStreamMaps[sRTTransactions].erase(uSeq);
4122}
4123
4124// <-- bool: true=added, false=already there
4125bool
4127{
4130 .emplace(isrListener->getSeq(), isrListener)
4131 .second;
4132}
4133
4134void
4136{
4137 accounting_.json(obj);
4138}
4139
4140// <-- bool: true=erased, false=was not there
4141bool
4143{
4145 return mStreamMaps[sValidations].erase(uSeq);
4146}
4147
4148// <-- bool: true=added, false=already there
4149bool
4151{
4153 return mStreamMaps[sPeerStatus]
4154 .emplace(isrListener->getSeq(), isrListener)
4155 .second;
4156}
4157
4158// <-- bool: true=erased, false=was not there
4159bool
4161{
4163 return mStreamMaps[sPeerStatus].erase(uSeq);
4164}
4165
4166// <-- bool: true=added, false=already there
4167bool
4169{
4172 .emplace(isrListener->getSeq(), isrListener)
4173 .second;
4174}
4175
4176// <-- bool: true=erased, false=was not there
4177bool
4179{
4181 return mStreamMaps[sConsensusPhase].erase(uSeq);
4182}
4183
4186{
4188
4189 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4190
4191 if (it != mRpcSubMap.end())
4192 return it->second;
4193
4194 return InfoSub::pointer();
4195}
4196
4199{
4201
4202 mRpcSubMap.emplace(strUrl, rspEntry);
4203
4204 return rspEntry;
4205}
4206
4207bool
4209{
4211 auto pInfo = findRpcSub(strUrl);
4212
4213 if (!pInfo)
4214 return false;
4215
4216 // check to see if any of the stream maps still hold a weak reference to
4217 // this entry before removing
4218 for (SubMapType const& map : mStreamMaps)
4219 {
4220 if (map.find(pInfo->getSeq()) != map.end())
4221 return false;
4222 }
4223 mRpcSubMap.erase(strUrl);
4224 return true;
4225}
4226
4227#ifndef USE_NEW_BOOK_PAGE
4228
4229// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4230// work, but it demonstrated poor performance.
4231//
4232void
4235 Book const& book,
4236 AccountID const& uTakerID,
4237 bool const bProof,
4238 unsigned int iLimit,
4239 Json::Value const& jvMarker,
4240 Json::Value& jvResult)
4241{ // CAUTION: This is the old get book page logic
4242 Json::Value& jvOffers =
4243 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4244
4246 const uint256 uBookBase = getBookBase(book);
4247 const uint256 uBookEnd = getQualityNext(uBookBase);
4248 uint256 uTipIndex = uBookBase;
4249
4250 if (auto stream = m_journal.trace())
4251 {
4252 stream << "getBookPage:" << book;
4253 stream << "getBookPage: uBookBase=" << uBookBase;
4254 stream << "getBookPage: uBookEnd=" << uBookEnd;
4255 stream << "getBookPage: uTipIndex=" << uTipIndex;
4256 }
4257
4258 ReadView const& view = *lpLedger;
4259
4260 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4261 isGlobalFrozen(view, book.in.account);
4262
4263 bool bDone = false;
4264 bool bDirectAdvance = true;
4265
4266 std::shared_ptr<SLE const> sleOfferDir;
4267 uint256 offerIndex;
4268 unsigned int uBookEntry;
4269 STAmount saDirRate;
4270
4271 auto const rate = transferRate(view, book.out.account);
4272 auto viewJ = app_.journal("View");
4273
4274 while (!bDone && iLimit-- > 0)
4275 {
4276 if (bDirectAdvance)
4277 {
4278 bDirectAdvance = false;
4279
4280 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4281
4282 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4283 if (ledgerIndex)
4284 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4285 else
4286 sleOfferDir.reset();
4287
4288 if (!sleOfferDir)
4289 {
4290 JLOG(m_journal.trace()) << "getBookPage: bDone";
4291 bDone = true;
4292 }
4293 else
4294 {
4295 uTipIndex = sleOfferDir->key();
4296 saDirRate = amountFromQuality(getQuality(uTipIndex));
4297
4298 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4299
4300 JLOG(m_journal.trace())
4301 << "getBookPage: uTipIndex=" << uTipIndex;
4302 JLOG(m_journal.trace())
4303 << "getBookPage: offerIndex=" << offerIndex;
4304 }
4305 }
4306
4307 if (!bDone)
4308 {
4309 auto sleOffer = view.read(keylet::offer(offerIndex));
4310
4311 if (sleOffer)
4312 {
4313 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4314 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4315 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4316 STAmount saOwnerFunds;
4317 bool firstOwnerOffer(true);
4318
4319 if (book.out.account == uOfferOwnerID)
4320 {
4321 // If an offer is selling issuer's own IOUs, it is fully
4322 // funded.
4323 saOwnerFunds = saTakerGets;
4324 }
4325 else if (bGlobalFreeze)
4326 {
4327 // If either asset is globally frozen, consider all offers
4328 // that aren't ours to be totally unfunded
4329 saOwnerFunds.clear(book.out);
4330 }
4331 else
4332 {
4333 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4334 if (umBalanceEntry != umBalance.end())
4335 {
4336 // Found in running balance table.
4337
4338 saOwnerFunds = umBalanceEntry->second;
4339 firstOwnerOffer = false;
4340 }
4341 else
4342 {
4343 // Did not find balance in table.
4344
4345 saOwnerFunds = accountHolds(
4346 view,
4347 uOfferOwnerID,
4348 book.out.currency,
4349 book.out.account,
4351 viewJ);
4352
4353 if (saOwnerFunds < beast::zero)
4354 {
4355 // Treat negative funds as zero.
4356
4357 saOwnerFunds.clear();
4358 }
4359 }
4360 }
4361
4362 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4363
4364 STAmount saTakerGetsFunded;
4365 STAmount saOwnerFundsLimit = saOwnerFunds;
4366 Rate offerRate = parityRate;
4367
4368 if (rate != parityRate
4369 // Have a tranfer fee.
4370 && uTakerID != book.out.account
4371 // Not taking offers of own IOUs.
4372 && book.out.account != uOfferOwnerID)
4373 // Offer owner not issuing ownfunds
4374 {
4375 // Need to charge a transfer fee to offer owner.
4376 offerRate = rate;
4377 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4378 }
4379
4380 if (saOwnerFundsLimit >= saTakerGets)
4381 {
4382 // Sufficient funds no shenanigans.
4383 saTakerGetsFunded = saTakerGets;
4384 }
4385 else
4386 {
4387 // Only provide, if not fully funded.
4388
4389 saTakerGetsFunded = saOwnerFundsLimit;
4390
4391 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4392 std::min(
4393 saTakerPays,
4394 multiply(
4395 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4396 .setJson(jvOffer[jss::taker_pays_funded]);
4397 }
4398
4399 STAmount saOwnerPays = (parityRate == offerRate)
4400 ? saTakerGetsFunded
4401 : std::min(
4402 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4403
4404 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4405
4406 // Include all offers funded and unfunded
4407 Json::Value& jvOf = jvOffers.append(jvOffer);
4408 jvOf[jss::quality] = saDirRate.getText();
4409
4410 if (firstOwnerOffer)
4411 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4412 }
4413 else
4414 {
4415 JLOG(m_journal.warn()) << "Missing offer";
4416 }
4417
4418 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4419 {
4420 bDirectAdvance = true;
4421 }
4422 else
4423 {
4424 JLOG(m_journal.trace())
4425 << "getBookPage: offerIndex=" << offerIndex;
4426 }
4427 }
4428 }
4429
4430 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4431 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4432}
4433
4434#else
4435
4436// This is the new code that uses the book iterators
4437// It has temporarily been disabled
4438
4439void
4442 Book const& book,
4443 AccountID const& uTakerID,
4444 bool const bProof,
4445 unsigned int iLimit,
4446 Json::Value const& jvMarker,
4447 Json::Value& jvResult)
4448{
4449 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4450
4452
4453 MetaView lesActive(lpLedger, tapNONE, true);
4454 OrderBookIterator obIterator(lesActive, book);
4455
4456 auto const rate = transferRate(lesActive, book.out.account);
4457
4458 const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4459 lesActive.isGlobalFrozen(book.in.account);
4460
4461 while (iLimit-- > 0 && obIterator.nextOffer())
4462 {
4463 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4464 if (sleOffer)
4465 {
4466 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4467 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4468 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4469 STAmount saDirRate = obIterator.getCurrentRate();
4470 STAmount saOwnerFunds;
4471
4472 if (book.out.account == uOfferOwnerID)
4473 {
4474 // If offer is selling issuer's own IOUs, it is fully funded.
4475 saOwnerFunds = saTakerGets;
4476 }
4477 else if (bGlobalFreeze)
4478 {
4479 // If either asset is globally frozen, consider all offers
4480 // that aren't ours to be totally unfunded
4481 saOwnerFunds.clear(book.out);
4482 }
4483 else
4484 {
4485 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4486
4487 if (umBalanceEntry != umBalance.end())
4488 {
4489 // Found in running balance table.
4490
4491 saOwnerFunds = umBalanceEntry->second;
4492 }
4493 else
4494 {
4495 // Did not find balance in table.
4496
4497 saOwnerFunds = lesActive.accountHolds(
4498 uOfferOwnerID,
4499 book.out.currency,
4500 book.out.account,
4502
4503 if (saOwnerFunds.isNegative())
4504 {
4505 // Treat negative funds as zero.
4506
4507 saOwnerFunds.zero();
4508 }
4509 }
4510 }
4511
4512 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4513
4514 STAmount saTakerGetsFunded;
4515 STAmount saOwnerFundsLimit = saOwnerFunds;
4516 Rate offerRate = parityRate;
4517
4518 if (rate != parityRate
4519 // Have a tranfer fee.
4520 && uTakerID != book.out.account
4521 // Not taking offers of own IOUs.
4522 && book.out.account != uOfferOwnerID)
4523 // Offer owner not issuing ownfunds
4524 {
4525 // Need to charge a transfer fee to offer owner.
4526 offerRate = rate;
4527 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4528 }
4529
4530 if (saOwnerFundsLimit >= saTakerGets)
4531 {
4532 // Sufficient funds no shenanigans.
4533 saTakerGetsFunded = saTakerGets;
4534 }
4535 else
4536 {
4537 // Only provide, if not fully funded.
4538 saTakerGetsFunded = saOwnerFundsLimit;
4539
4540 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4541
4542 // TOOD(tom): The result of this expression is not used - what's
4543 // going on here?
4544 std::min(
4545 saTakerPays,
4546 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4547 .setJson(jvOffer[jss::taker_pays_funded]);
4548 }
4549
4550 STAmount saOwnerPays = (parityRate == offerRate)
4551 ? saTakerGetsFunded
4552 : std::min(
4553 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4554
4555 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4556
4557 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4558 {
4559 // Only provide funded offers and offers of the taker.
4560 Json::Value& jvOf = jvOffers.append(jvOffer);
4561 jvOf[jss::quality] = saDirRate.getText();
4562 }
4563 }
4564 }
4565
4566 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4567 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4568}
4569
4570#endif
4571
4572inline void
4574{
4575 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4576 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4578 counters[static_cast<std::size_t>(mode)].dur += current;
4579
4582 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4583 .dur.count());
4585 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4586 .dur.count());
4588 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4590 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4591 .dur.count());
4593 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4594
4596 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4597 .transitions);
4599 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4600 .transitions);
4602 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4604 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4605 .transitions);
4607 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4608}
4609
4610void
4612{
4613 auto now = std::chrono::steady_clock::now();
4614
4615 std::lock_guard lock(mutex_);
4616 ++counters_[static_cast<std::size_t>(om)].transitions;
4617 if (om == OperatingMode::FULL &&
4618 counters_[static_cast<std::size_t>(om)].transitions == 1)
4619 {
4620 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4621 now - processStart_)
4622 .count();
4623 }
4624 counters_[static_cast<std::size_t>(mode_)].dur +=
4625 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4626
4627 mode_ = om;
4628 start_ = now;
4629}
4630
4631void
4633{
4634 auto [counters, mode, start, initialSync] = getCounterData();
4635 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4637 counters[static_cast<std::size_t>(mode)].dur += current;
4638
4639 obj[jss::state_accounting] = Json::objectValue;
4641 i <= static_cast<std::size_t>(OperatingMode::FULL);
4642 ++i)
4643 {
4644 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4645 auto& state = obj[jss::state_accounting][states_[i]];
4646 state[jss::transitions] = std::to_string(counters[i].transitions);
4647 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4648 }
4649 obj[jss::server_state_duration_us] = std::to_string(current.count());
4650 if (initialSync)
4651 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4652}
4653
4654//------------------------------------------------------------------------------
4655
4658 Application& app,
4660 bool standalone,
4661 std::size_t minPeerCount,
4662 bool startvalid,
4663 JobQueue& job_queue,
4665 ValidatorKeys const& validatorKeys,
4666 boost::asio::io_service& io_svc,
4667 beast::Journal journal,
4668 beast::insight::Collector::ptr const& collector)
4669{
4670 return std::make_unique<NetworkOPsImp>(
4671 app,
4672 clock,
4673 standalone,
4674 minPeerCount,
4675 startvalid,
4676 job_queue,
4678 validatorKeys,
4679 io_svc,
4680 journal,
4681 collector);
4682}
4683
4684} // namespace ripple
T back_inserter(T... args)
T begin(T... args)
T bind(T... args)
Decorator for streaming out compact json.
Definition: json_writer.h:318
Lightweight wrapper to tag static string.
Definition: json_value.h:62
Represents a JSON value.
Definition: json_value.h:148
Json::UInt UInt
Definition: json_value.h:155
Value get(UInt index, const Value &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Definition: json_value.cpp:847
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:897
bool isMember(const char *key) const
Return true if the object has a member named key.
Definition: json_value.cpp:949
A generic endpoint for log messages.
Definition: Journal.h:60
Stream error() const
Definition: Journal.h:346
Stream debug() const
Definition: Journal.h:328
Stream info() const
Definition: Journal.h:334
Stream trace() const
Severity stream access functions.
Definition: Journal.h:322
Stream warn() const
Definition: Journal.h:340
A metric for measuring an integral value.
Definition: Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition: Gauge.h:68
A reference to a handler for performing polled collection.
Definition: Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition: Book.h:35
Issue in
Definition: Book.h:37
Issue out
Definition: Book.h:38
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition: ClusterNode.h:46
std::uint32_t getLoadFee() const
Definition: ClusterNode.h:52
NetClock::time_point getReportTime() const
Definition: ClusterNode.h:58
PublicKey const & identity() const
Definition: ClusterNode.h:64
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:49
uint32_t NETWORK_ID
Definition: Config.h:156
std::string SERVER_DOMAIN
Definition: Config.h:279
std::size_t NODE_SIZE
Definition: Config.h:213
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition: Config.h:160
int RELAY_UNTRUSTED_VALIDATIONS
Definition: Config.h:169
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition: InfoSub.h:54
AccountID account
Definition: Issue.h:39
Currency currency
Definition: Issue.h:38
A pool of threads to perform work.
Definition: JobQueue.h:56
Json::Value getJson(int c=0)
Definition: JobQueue.cpp:214
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
Definition: LedgerMaster.h:265
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:79
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
Definition: LoadFeeTrack.h:45
std::uint32_t getClusterFee() const
Definition: LoadFeeTrack.h:82
std::uint32_t getLocalFee() const
Definition: LoadFeeTrack.h:75
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:89
std::uint32_t getRemoteFee() const
Definition: LoadFeeTrack.h:68
std::uint32_t getLoadFactor() const
Definition: LoadFeeTrack.h:95
Manages load sources.
Definition: LoadManager.h:46
void heartbeat()
Reset the stall detection timer.
Definition: LoadManager.cpp:64
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
Definition: Manifest.cpp:323
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
Definition: NetworkOPs.cpp:141
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
Definition: NetworkOPs.cpp:151
std::chrono::steady_clock::time_point start_
Definition: NetworkOPs.cpp:153
static std::array< Json::StaticString const, 5 > const states_
Definition: NetworkOPs.cpp:157
std::chrono::steady_clock::time_point const processStart_
Definition: NetworkOPs.cpp:155
Transaction with input flags and results to be applied in batches.
Definition: NetworkOPs.cpp:92
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
Definition: NetworkOPs.cpp:101
std::shared_ptr< Transaction > const transaction
Definition: NetworkOPs.cpp:94
boost::asio::steady_timer accountHistoryTxTimer_
Definition: NetworkOPs.cpp:739
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
Definition: NetworkOPs.cpp:874
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::vector< TransactionStatus > mTransactions
Definition: NetworkOPs.cpp:786
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Definition: NetworkOPs.cpp:729
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
Definition: NetworkOPs.cpp:741
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
Definition: NetworkOPs.cpp:892
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
Definition: NetworkOPs.cpp:737
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
Definition: NetworkOPs.cpp:118
std::optional< PublicKey > const validatorPK_
Definition: NetworkOPs.cpp:743
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
Definition: NetworkOPs.cpp:725
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
Definition: NetworkOPs.cpp:268
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Definition: NetworkOPs.cpp:755
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, const bool bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
Definition: NetworkOPs.cpp:738
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
Definition: NetworkOPs.cpp:124
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:224
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
Definition: NetworkOPs.cpp:732
beast::Journal m_journal
Definition: NetworkOPs.cpp:723
SubInfoMapType mSubAccount
Definition: NetworkOPs.cpp:750
std::optional< PublicKey > const validatorMasterPK_
Definition: NetworkOPs.cpp:744
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
Definition: NetworkOPs.cpp:790
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
Definition: NetworkOPs.cpp:736
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
Definition: NetworkOPs.cpp:945
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
Definition: NetworkOPs.cpp:770
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
Definition: NetworkOPs.cpp:780
std::atomic< bool > unlBlocked_
Definition: NetworkOPs.cpp:734
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
Definition: NetworkOPs.cpp:727
std::atomic< bool > needNetworkLedger_
Definition: NetworkOPs.cpp:731
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
Definition: NetworkOPs.cpp:788
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
Definition: NetworkOPs.cpp:904
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
Definition: NetworkOPs.cpp:748
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
Definition: NetworkOPs.cpp:935
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
Definition: NetworkOPs.cpp:772
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
Definition: NetworkOPs.cpp:886
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
Definition: NetworkOPs.cpp:783
void setMode(OperatingMode om) override
void stop() override
Definition: NetworkOPs.cpp:572
void clearNeedNetworkLedger() override
Definition: NetworkOPs.cpp:898
DispatchState mDispatchState
Definition: NetworkOPs.cpp:785
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
Definition: NetworkOPs.cpp:751
bool checkLastClosedLedger(const Overlay::PeerSequence &, uint256 &networkClosed)
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
Definition: NetworkOPs.cpp:910
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
Definition: NetworkOPs.cpp:791
Application & app_
Definition: NetworkOPs.cpp:722
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
Definition: NetworkOPs.cpp:746
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
Definition: NetworkOPs.cpp:753
std::atomic< bool > amendmentWarned_
Definition: NetworkOPs.cpp:733
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
Definition: NetworkOPs.cpp:916
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition: NetworkOPs.h:88
void getCountsJson(Json::Value &obj)
Definition: Database.cpp:268
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:49
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:57
BookListeners::pointer getBookListeners(Book const &)
BookListeners::pointer makeBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, const AcceptedLedgerTx &alTx, MultiApiJson const &jvObj)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
Definition: RCLConsensus.h:53
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
Definition: RCLConsensus.h:447
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Definition: RCLConsensus.h:460
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:78
Represents a set of transactions in RCLConsensus.
Definition: RCLCxTx.h:63
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition: RFC1751.cpp:507
Collects logging information.
Definition: RCLConsensus.h:551
std::unique_ptr< std::stringstream > const & ss()
Definition: RCLConsensus.h:565
A view into a ledger.
Definition: ReadView.h:52
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition: STAmount.cpp:507
std::string getText() const override
Definition: STAmount.cpp:547
Issue const & issue() const
Definition: STAmount.h:487
std::optional< T > get(std::string const &name) const
Definition: BasicConfig.h:140
std::size_t size() const noexcept
Definition: Serializer.h:73
void const * data() const noexcept
Definition: Serializer.h:79
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition: TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition: TxQ.cpp:1778
static time_point now()
Definition: UptimeClock.cpp:67
Validator keys and manifest as set in configuration file.
Definition: ValidatorKeys.h:38
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition: XRPAmount.h:262
Json::Value jsonClipped() const
Definition: XRPAmount.h:218
iterator begin()
Definition: base_uint.h:136
static constexpr std::size_t size()
Definition: base_uint.h:526
bool isZero() const
Definition: base_uint.h:540
bool isNonZero() const
Definition: base_uint.h:545
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition: scope.h:231
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition: json_value.h:43
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:44
int Int
Definition: json_forwards.h:26
unsigned int UInt
Definition: json_forwards.h:27
void rngfill(void *buffer, std::size_t bytes, Generator &g)
Definition: rngfill.h:34
std::string const & getVersionString()
Server version.
Definition: BuildInfo.cpp:68
std::optional< std::string > encodeCTID(uint32_t ledgerSeq, uint32_t txnIndex, uint32_t networkID) noexcept
Definition: CTID.h:43
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition: BookChanges.h:47
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(PreclaimContext const &ctx, AccountID const &src)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition: Indexes.cpp:175
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition: Indexes.cpp:371
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition: Indexes.cpp:265
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:32
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:114
STAmount divide(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:93
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition: STTx.cpp:631
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition: View.cpp:443
@ fhZERO_IF_FROZEN
Definition: View.h:76
@ fhIGNORE_FREEZE
Definition: View.h:76
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition: View.cpp:137
std::uint64_t getQuality(uint256 const &uBase)
Definition: Indexes.cpp:140
@ INCLUDED
Definition: Transaction.h:49
@ OBSOLETE
Definition: Transaction.h:54
@ INVALID
Definition: Transaction.h:48
error_code_i
Definition: ErrorCodes.h:40
@ rpcSUCCESS
Definition: ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition: ErrorCodes.h:84
@ rpcINTERNAL
Definition: ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
Definition: SecretKey.cpp:369
auto constexpr muldiv_max
Definition: mulDiv.h:29
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition: LocalTxs.cpp:197
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition: View.cpp:650
STAmount amountFromQuality(std::uint64_t rate)
Definition: STAmount.cpp:854
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition: ErrorCodes.h:167
@ warnRPC_UNSUPPORTED_MAJORITY
Definition: ErrorCodes.h:165
@ warnRPC_AMENDMENT_BLOCKED
Definition: ErrorCodes.h:166
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition: NetworkOPs.h:67
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:53
bool isTesSuccess(TER x)
Definition: TER.h:656
bool isTerRetry(TER x)
Definition: TER.h:650
AccountID calcAccountID(PublicKey const &pk)
Definition: AccountID.cpp:168
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Definition: csprng.cpp:103
Json::Value rpcError(int iError)
Definition: RPCErr.cpp:31
@ tefPAST_SEQ
Definition: TER.h:175
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
Definition: NetworkOPs.cpp:855
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
bool isTemMalformed(TER x)
Definition: TER.h:638
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition: View.cpp:148
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition: ApiVersion.h:102
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:75
@ tesSUCCESS
Definition: TER.h:242
uint256 getQualityNext(uint256 const &uBase)
Definition: Indexes.cpp:132
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition: View.cpp:309
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition: ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition: chrono.h:92
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:630
FeeSetup setup_FeeVote(Section const &section)
Definition: Config.cpp:1092
Number root(Number f, unsigned d)
Definition: Number.cpp:636
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Definition: mulDiv.cpp:32
ApplyFlags
Definition: ApplyView.h:31
@ tapFAIL_HARD
Definition: ApplyView.h:36
@ tapUNLIMITED
Definition: ApplyView.h:43
@ tapNONE
Definition: ApplyView.h:32
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:38
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition: Seed.cpp:76
@ terQUEUED
Definition: TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition: TER.cpp:242
@ jtNETOP_CLUSTER
Definition: Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition: Job.h:47
@ jtTRANSACTION
Definition: Job.h:62
@ jtTXN_PROC
Definition: Job.h:82
@ jtCLIENT_CONSENSUS
Definition: Job.h:48
@ jtBATCH
Definition: Job.h:65
@ jtCLIENT_ACCT_HIST
Definition: Job.h:49
uint256 getBookBase(Book const &book)
Definition: Indexes.cpp:113
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition: View.cpp:174
static std::uint32_t trunc32(std::uint64_t v)
@ temBAD_SIGNATURE
Definition: TER.h:105
static auto const genesisAccountId
Definition: NetworkOPs.cpp:868
STL namespace.
T ref(T... args)
T reset(T... args)
T set_intersection(T... args)
T str(T... args)
std::string serialized
The manifest in serialized form.
Definition: Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition: Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition: Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
Definition: Manifest.cpp:244
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition: Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
Definition: Manifest.cpp:255
PublicKey masterKey
The master key associated with this manifest.
Definition: Manifest.h:86
Server fees published on server subscription.
Definition: NetworkOPs.cpp:201
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
Definition: NetworkOPs.cpp:220
bool operator==(ServerFeeSummary const &b) const
Definition: NetworkOPs.cpp:212
beast::insight::Gauge full_transitions
Definition: NetworkOPs.cpp:842
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:797
beast::insight::Hook hook
Definition: NetworkOPs.cpp:831
beast::insight::Gauge connected_duration
Definition: NetworkOPs.cpp:833
beast::insight::Gauge tracking_duration
Definition: NetworkOPs.cpp:835
beast::insight::Gauge connected_transitions
Definition: NetworkOPs.cpp:839
beast::insight::Gauge disconnected_transitions
Definition: NetworkOPs.cpp:838
beast::insight::Gauge syncing_duration
Definition: NetworkOPs.cpp:834
beast::insight::Gauge tracking_transitions
Definition: NetworkOPs.cpp:841
beast::insight::Gauge full_duration
Definition: NetworkOPs.cpp:836
beast::insight::Gauge disconnected_duration
Definition: NetworkOPs.cpp:832
beast::insight::Gauge syncing_transitions
Definition: NetworkOPs.cpp:840
SubAccountHistoryIndex(AccountID const &accountId)
Definition: NetworkOPs.cpp:686
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:705
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:700
Represents a transfer rate.
Definition: Rate.h:40
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition: TxQ.h:165
void set(const char *key, auto const &v)
Definition: MultiApiJson.h:83
IsMemberResult isMember(const char *key) const
Definition: MultiApiJson.h:94
Select all peers (except optional excluded) that are in our cluster.
Definition: predicates.h:137
Sends a message to all peers.
Definition: predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)