rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/misc/AmendmentTable.h>
32#include <xrpld/app/misc/DeliverMax.h>
33#include <xrpld/app/misc/HashRouter.h>
34#include <xrpld/app/misc/LoadFeeTrack.h>
35#include <xrpld/app/misc/NetworkOPs.h>
36#include <xrpld/app/misc/Transaction.h>
37#include <xrpld/app/misc/TxQ.h>
38#include <xrpld/app/misc/ValidatorKeys.h>
39#include <xrpld/app/misc/ValidatorList.h>
40#include <xrpld/app/misc/detail/AccountTxPaging.h>
41#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
42#include <xrpld/app/tx/apply.h>
43#include <xrpld/consensus/Consensus.h>
44#include <xrpld/consensus/ConsensusParms.h>
45#include <xrpld/overlay/Cluster.h>
46#include <xrpld/overlay/Overlay.h>
47#include <xrpld/overlay/predicates.h>
48#include <xrpld/perflog/PerfLog.h>
49#include <xrpld/rpc/BookChanges.h>
50#include <xrpld/rpc/DeliveredAmount.h>
51#include <xrpld/rpc/MPTokenIssuanceID.h>
52#include <xrpld/rpc/ServerHandler.h>
53#include <xrpl/basics/UptimeClock.h>
54#include <xrpl/basics/mulDiv.h>
55#include <xrpl/basics/safe_cast.h>
56#include <xrpl/basics/scope.h>
57#include <xrpl/beast/rfc2616.h>
58#include <xrpl/beast/utility/rngfill.h>
59#include <xrpl/crypto/RFC1751.h>
60#include <xrpl/crypto/csprng.h>
61#include <xrpl/json/to_string.h>
62#include <xrpl/protocol/BuildInfo.h>
63#include <xrpl/protocol/Feature.h>
64#include <xrpl/protocol/MultiApiJson.h>
65#include <xrpl/protocol/RPCErr.h>
66#include <xrpl/protocol/STParsedJSON.h>
67#include <xrpl/protocol/jss.h>
68#include <xrpl/resource/Fees.h>
69#include <xrpl/resource/ResourceManager.h>
70#include <boost/asio/ip/host_name.hpp>
71#include <boost/asio/steady_timer.hpp>
72
73#include <algorithm>
74#include <exception>
75#include <mutex>
76#include <optional>
77#include <set>
78#include <sstream>
79#include <string>
80#include <tuple>
81#include <unordered_map>
82#include <utility>
83
84namespace ripple {
85
86class NetworkOPsImp final : public NetworkOPs
87{
93 {
94 public:
96 bool const admin;
97 bool const local;
99 bool applied = false;
101
104 bool a,
105 bool l,
106 FailHard f)
107 : transaction(t), admin(a), local(l), failType(f)
108 {
109 XRPL_ASSERT(
111 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
112 "valid inputs");
113 }
114 };
115
119 enum class DispatchState : unsigned char {
120 none,
121 scheduled,
122 running,
123 };
124
126
142 {
143 struct Counters
144 {
145 explicit Counters() = default;
146
149 };
150
154 std::chrono::steady_clock::time_point start_ =
156 std::chrono::steady_clock::time_point const processStart_ = start_;
159
160 public:
162 {
164 .transitions = 1;
165 }
166
173 void
175
181 void
182 json(Json::Value& obj) const;
183
185 {
187 decltype(mode_) mode;
188 decltype(start_) start;
190 };
191
194 {
197 }
198 };
199
202 {
203 ServerFeeSummary() = default;
204
206 XRPAmount fee,
207 TxQ::Metrics&& escalationMetrics,
208 LoadFeeTrack const& loadFeeTrack);
209 bool
210 operator!=(ServerFeeSummary const& b) const;
211
212 bool
214 {
215 return !(*this != b);
216 }
217
222 };
223
224public:
226 Application& app,
228 bool standalone,
229 std::size_t minPeerCount,
230 bool start_valid,
231 JobQueue& job_queue,
233 ValidatorKeys const& validatorKeys,
234 boost::asio::io_service& io_svc,
235 beast::Journal journal,
236 beast::insight::Collector::ptr const& collector)
237 : app_(app)
238 , m_journal(journal)
241 , heartbeatTimer_(io_svc)
242 , clusterTimer_(io_svc)
243 , accountHistoryTxTimer_(io_svc)
244 , mConsensus(
245 app,
247 setup_FeeVote(app_.config().section("voting")),
248 app_.logs().journal("FeeVote")),
250 *m_localTX,
251 app.getInboundTransactions(),
252 beast::get_abstract_clock<std::chrono::steady_clock>(),
253 validatorKeys,
254 app_.logs().journal("LedgerConsensus"))
256 , m_job_queue(job_queue)
257 , m_standalone(standalone)
258 , minPeerCount_(start_valid ? 0 : minPeerCount)
259 , m_stats(std::bind(&NetworkOPsImp::collect_metrics, this), collector)
260 {
261 }
262
263 ~NetworkOPsImp() override
264 {
265 // This clear() is necessary to ensure the shared_ptrs in this map get
266 // destroyed NOW because the objects in this map invoke methods on this
267 // class when they are destroyed
269 }
270
271public:
273 getOperatingMode() const override;
274
276 strOperatingMode(OperatingMode const mode, bool const admin) const override;
277
279 strOperatingMode(bool const admin = false) const override;
280
281 //
282 // Transaction operations.
283 //
284
285 // Must complete immediately.
286 void
288
289 void
291 std::shared_ptr<Transaction>& transaction,
292 bool bUnlimited,
293 bool bLocal,
294 FailHard failType) override;
295
304 void
307 bool bUnlimited,
308 FailHard failType);
309
319 void
322 bool bUnlimited,
323 FailHard failtype);
324
328 void
330
336 void
338
339 //
340 // Owner functions.
341 //
342
346 AccountID const& account) override;
347
348 //
349 // Book functions.
350 //
351
352 void
355 Book const&,
356 AccountID const& uTakerID,
357 const bool bProof,
358 unsigned int iLimit,
359 Json::Value const& jvMarker,
360 Json::Value& jvResult) override;
361
362 // Ledger proposal/close functions.
363 bool
365
366 bool
369 std::string const& source) override;
370
371 void
372 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
373
374 // Network state machine.
375
376 // Used for the "jump" case.
377private:
378 void
380 bool
382
383public:
384 bool
386 uint256 const& networkClosed,
387 std::unique_ptr<std::stringstream> const& clog) override;
388 void
390 void
391 setStandAlone() override;
392
396 void
397 setStateTimer() override;
398
399 void
400 setNeedNetworkLedger() override;
401 void
402 clearNeedNetworkLedger() override;
403 bool
404 isNeedNetworkLedger() override;
405 bool
406 isFull() override;
407
408 void
409 setMode(OperatingMode om) override;
410
411 bool
412 isBlocked() override;
413 bool
414 isAmendmentBlocked() override;
415 void
416 setAmendmentBlocked() override;
417 bool
418 isAmendmentWarned() override;
419 void
420 setAmendmentWarned() override;
421 void
422 clearAmendmentWarned() override;
423 bool
424 isUNLBlocked() override;
425 void
426 setUNLBlocked() override;
427 void
428 clearUNLBlocked() override;
429 void
430 consensusViewChange() override;
431
433 getConsensusInfo() override;
435 getServerInfo(bool human, bool admin, bool counters) override;
436 void
437 clearLedgerFetch() override;
439 getLedgerFetchInfo() override;
442 std::optional<std::chrono::milliseconds> consensusDelay) override;
443 void
444 reportFeeChange() override;
445 void
447
448 void
449 updateLocalTx(ReadView const& view) override;
451 getLocalTxCount() override;
452
453 //
454 // Monitoring: publisher side.
455 //
456 void
457 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
458 void
461 std::shared_ptr<STTx const> const& transaction,
462 TER result) override;
463 void
464 pubValidation(std::shared_ptr<STValidation> const& val) override;
465
466 //--------------------------------------------------------------------------
467 //
468 // InfoSub::Source.
469 //
470 void
472 InfoSub::ref ispListener,
473 hash_set<AccountID> const& vnaAccountIDs,
474 bool rt) override;
475 void
477 InfoSub::ref ispListener,
478 hash_set<AccountID> const& vnaAccountIDs,
479 bool rt) override;
480
481 // Just remove the subscription from the tracking
482 // not from the InfoSub. Needed for InfoSub destruction
483 void
485 std::uint64_t seq,
486 hash_set<AccountID> const& vnaAccountIDs,
487 bool rt) override;
488
490 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
491 override;
492 void
494 InfoSub::ref ispListener,
495 AccountID const& account,
496 bool historyOnly) override;
497
498 void
500 std::uint64_t seq,
501 AccountID const& account,
502 bool historyOnly) override;
503
504 bool
505 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
506 bool
507 unsubLedger(std::uint64_t uListener) override;
508
509 bool
510 subBookChanges(InfoSub::ref ispListener) override;
511 bool
512 unsubBookChanges(std::uint64_t uListener) override;
513
514 bool
515 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
516 override;
517 bool
518 unsubServer(std::uint64_t uListener) override;
519
520 bool
521 subBook(InfoSub::ref ispListener, Book const&) override;
522 bool
523 unsubBook(std::uint64_t uListener, Book const&) override;
524
525 bool
526 subManifests(InfoSub::ref ispListener) override;
527 bool
528 unsubManifests(std::uint64_t uListener) override;
529 void
530 pubManifest(Manifest const&) override;
531
532 bool
533 subTransactions(InfoSub::ref ispListener) override;
534 bool
535 unsubTransactions(std::uint64_t uListener) override;
536
537 bool
538 subRTTransactions(InfoSub::ref ispListener) override;
539 bool
540 unsubRTTransactions(std::uint64_t uListener) override;
541
542 bool
543 subValidations(InfoSub::ref ispListener) override;
544 bool
545 unsubValidations(std::uint64_t uListener) override;
546
547 bool
548 subPeerStatus(InfoSub::ref ispListener) override;
549 bool
550 unsubPeerStatus(std::uint64_t uListener) override;
551 void
552 pubPeerStatus(std::function<Json::Value(void)> const&) override;
553
554 bool
555 subConsensus(InfoSub::ref ispListener) override;
556 bool
557 unsubConsensus(std::uint64_t uListener) override;
558
560 findRpcSub(std::string const& strUrl) override;
562 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
563 bool
564 tryRemoveRpcSub(std::string const& strUrl) override;
565
566 void
567 stop() override
568 {
569 {
570 boost::system::error_code ec;
571 heartbeatTimer_.cancel(ec);
572 if (ec)
573 {
574 JLOG(m_journal.error())
575 << "NetworkOPs: heartbeatTimer cancel error: "
576 << ec.message();
577 }
578
579 ec.clear();
580 clusterTimer_.cancel(ec);
581 if (ec)
582 {
583 JLOG(m_journal.error())
584 << "NetworkOPs: clusterTimer cancel error: "
585 << ec.message();
586 }
587
588 ec.clear();
589 accountHistoryTxTimer_.cancel(ec);
590 if (ec)
591 {
592 JLOG(m_journal.error())
593 << "NetworkOPs: accountHistoryTxTimer cancel error: "
594 << ec.message();
595 }
596 }
597 // Make sure that any waitHandlers pending in our timers are done.
598 using namespace std::chrono_literals;
599 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
600 }
601
602 void
603 stateAccounting(Json::Value& obj) override;
604
605private:
606 void
607 setTimer(
608 boost::asio::steady_timer& timer,
609 std::chrono::milliseconds const& expiry_time,
610 std::function<void()> onExpire,
611 std::function<void()> onError);
612 void
614 void
616 void
618 void
620
622 transJson(
623 std::shared_ptr<STTx const> const& transaction,
624 TER result,
625 bool validated,
628
629 void
632 AcceptedLedgerTx const& transaction,
633 bool last);
634
635 void
638 AcceptedLedgerTx const& transaction,
639 bool last);
640
641 void
644 std::shared_ptr<STTx const> const& transaction,
645 TER result);
646
647 void
648 pubServer();
649 void
651
653 getHostId(bool forAdmin);
654
655private:
659
660 /*
661 * With a validated ledger to separate history and future, the node
662 * streams historical txns with negative indexes starting from -1,
663 * and streams future txns starting from index 0.
664 * The SubAccountHistoryIndex struct maintains these indexes.
665 * It also has a flag stopHistorical_ for stopping streaming
666 * the historical txns.
667 */
669 {
671 // forward
673 // separate backward and forward
675 // history, backward
680
682 : accountId_(accountId)
683 , forwardTxIndex_(0)
686 , historyTxIndex_(-1)
687 , haveHistorical_(false)
688 , stopHistorical_(false)
689 {
690 }
691 };
693 {
696 };
698 {
701 };
704
708 void
712 void
714 void
716
719
721
723
725
730
732 boost::asio::steady_timer heartbeatTimer_;
733 boost::asio::steady_timer clusterTimer_;
734 boost::asio::steady_timer accountHistoryTxTimer_;
735
737
739
741
744
746
748
749 enum SubTypes {
750 sLedger, // Accepted ledgers.
751 sManifests, // Received validator manifests.
752 sServer, // When server changes connectivity state.
753 sTransactions, // All accepted transactions.
754 sRTTransactions, // All proposed and accepted transactions.
755 sValidations, // Received validations.
756 sPeerStatus, // Peer status changes.
757 sConsensusPhase, // Consensus phase
758 sBookChanges, // Per-ledger order book changes
759 sLastEntry // Any new entry must be ADDED ABOVE this one
760 };
761
763
765
767
768 // Whether we are in standalone mode.
769 bool const m_standalone;
770
771 // The number of nodes that we need to consider ourselves connected.
773
774 // Transaction batching.
779
781
784
785private:
786 struct Stats
787 {
788 template <class Handler>
790 Handler const& handler,
791 beast::insight::Collector::ptr const& collector)
792 : hook(collector->make_hook(handler))
793 , disconnected_duration(collector->make_gauge(
794 "State_Accounting",
795 "Disconnected_duration"))
796 , connected_duration(collector->make_gauge(
797 "State_Accounting",
798 "Connected_duration"))
800 collector->make_gauge("State_Accounting", "Syncing_duration"))
801 , tracking_duration(collector->make_gauge(
802 "State_Accounting",
803 "Tracking_duration"))
805 collector->make_gauge("State_Accounting", "Full_duration"))
806 , disconnected_transitions(collector->make_gauge(
807 "State_Accounting",
808 "Disconnected_transitions"))
809 , connected_transitions(collector->make_gauge(
810 "State_Accounting",
811 "Connected_transitions"))
812 , syncing_transitions(collector->make_gauge(
813 "State_Accounting",
814 "Syncing_transitions"))
815 , tracking_transitions(collector->make_gauge(
816 "State_Accounting",
817 "Tracking_transitions"))
819 collector->make_gauge("State_Accounting", "Full_transitions"))
820 {
821 }
822
829
835 };
836
837 std::mutex m_statsMutex; // Mutex to lock m_stats
839
840private:
841 void
843};
844
845//------------------------------------------------------------------------------
846
848 {"disconnected", "connected", "syncing", "tracking", "full"}};
849
851
859
860static auto const genesisAccountId = calcAccountID(
862 .first);
863
864//------------------------------------------------------------------------------
865inline OperatingMode
867{
868 return mMode;
869}
870
871inline std::string
872NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
873{
874 return strOperatingMode(mMode, admin);
875}
876
877inline void
879{
881}
882
883inline void
885{
886 needNetworkLedger_ = true;
887}
888
889inline void
891{
892 needNetworkLedger_ = false;
893}
894
895inline bool
897{
898 return needNetworkLedger_;
899}
900
901inline bool
903{
905}
906
909{
910 static std::string const hostname = boost::asio::ip::host_name();
911
912 if (forAdmin)
913 return hostname;
914
915 // For non-admin uses hash the node public key into a
916 // single RFC1751 word:
917 static std::string const shroudedHostId = [this]() {
918 auto const& id = app_.nodeIdentity();
919
920 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
921 }();
922
923 return shroudedHostId;
924}
925
926void
928{
930
931 // Only do this work if a cluster is configured
932 if (app_.cluster().size() != 0)
934}
935
936void
938 boost::asio::steady_timer& timer,
939 const std::chrono::milliseconds& expiry_time,
940 std::function<void()> onExpire,
941 std::function<void()> onError)
942{
943 // Only start the timer if waitHandlerCounter_ is not yet joined.
944 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
945 [this, onExpire, onError](boost::system::error_code const& e) {
946 if ((e.value() == boost::system::errc::success) &&
947 (!m_job_queue.isStopped()))
948 {
949 onExpire();
950 }
951 // Recover as best we can if an unexpected error occurs.
952 if (e.value() != boost::system::errc::success &&
953 e.value() != boost::asio::error::operation_aborted)
954 {
955 // Try again later and hope for the best.
956 JLOG(m_journal.error())
957 << "Timer got error '" << e.message()
958 << "'. Restarting timer.";
959 onError();
960 }
961 }))
962 {
963 timer.expires_from_now(expiry_time);
964 timer.async_wait(std::move(*optionalCountedHandler));
965 }
966}
967
968void
969NetworkOPsImp::setHeartbeatTimer()
970{
971 setTimer(
972 heartbeatTimer_,
973 mConsensus.parms().ledgerGRANULARITY,
974 [this]() {
975 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
976 processHeartbeatTimer();
977 });
978 },
979 [this]() { setHeartbeatTimer(); });
980}
981
982void
983NetworkOPsImp::setClusterTimer()
984{
985 using namespace std::chrono_literals;
986
987 setTimer(
988 clusterTimer_,
989 10s,
990 [this]() {
991 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
992 processClusterTimer();
993 });
994 },
995 [this]() { setClusterTimer(); });
996}
997
998void
999NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
1000{
1001 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
1002 << toBase58(subInfo.index_->accountId_);
1003 using namespace std::chrono_literals;
1004 setTimer(
1005 accountHistoryTxTimer_,
1006 4s,
1007 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1008 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1009}
1010
1011void
1012NetworkOPsImp::processHeartbeatTimer()
1013{
1014 RclConsensusLogger clog(
1015 "Heartbeat Timer", mConsensus.validating(), m_journal);
1016 {
1017 std::unique_lock lock{app_.getMasterMutex()};
1018
1019 // VFALCO NOTE This is for diagnosing a crash on exit
1020 LoadManager& mgr(app_.getLoadManager());
1022
1023 std::size_t const numPeers = app_.overlay().size();
1024
1025 // do we have sufficient peers? If not, we are disconnected.
1026 if (numPeers < minPeerCount_)
1027 {
1028 if (mMode != OperatingMode::DISCONNECTED)
1029 {
1030 setMode(OperatingMode::DISCONNECTED);
1032 ss << "Node count (" << numPeers << ") has fallen "
1033 << "below required minimum (" << minPeerCount_ << ").";
1034 JLOG(m_journal.warn()) << ss.str();
1035 CLOG(clog.ss()) << "set mode to DISCONNECTED: " << ss.str();
1036 }
1037 else
1038 {
1039 CLOG(clog.ss())
1040 << "already DISCONNECTED. too few peers (" << numPeers
1041 << "), need at least " << minPeerCount_;
1042 }
1043
1044 // MasterMutex lock need not be held to call setHeartbeatTimer()
1045 lock.unlock();
1046 // We do not call mConsensus.timerEntry until there are enough
1047 // peers providing meaningful inputs to consensus
1048 setHeartbeatTimer();
1049
1050 return;
1051 }
1052
1053 if (mMode == OperatingMode::DISCONNECTED)
1054 {
1055 setMode(OperatingMode::CONNECTED);
1056 JLOG(m_journal.info())
1057 << "Node count (" << numPeers << ") is sufficient.";
1058 CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
1059 << " peers. ";
1060 }
1061
1062 // Check if the last validated ledger forces a change between these
1063 // states.
1064 auto origMode = mMode.load();
1065 CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
1066 if (mMode == OperatingMode::SYNCING)
1067 setMode(OperatingMode::SYNCING);
1068 else if (mMode == OperatingMode::CONNECTED)
1069 setMode(OperatingMode::CONNECTED);
1070 auto newMode = mMode.load();
1071 if (origMode != newMode)
1072 {
1073 CLOG(clog.ss())
1074 << ", changing to " << strOperatingMode(newMode, true);
1075 }
1076 CLOG(clog.ss()) << ". ";
1077 }
1078
1079 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.ss());
1080
1081 CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase);
1082 const ConsensusPhase currPhase = mConsensus.phase();
1083 if (mLastConsensusPhase != currPhase)
1084 {
1085 reportConsensusStateChange(currPhase);
1086 mLastConsensusPhase = currPhase;
1087 CLOG(clog.ss()) << " changed to " << to_string(mLastConsensusPhase);
1088 }
1089 CLOG(clog.ss()) << ". ";
1090
1091 setHeartbeatTimer();
1092}
1093
1094void
1095NetworkOPsImp::processClusterTimer()
1096{
1097 if (app_.cluster().size() == 0)
1098 return;
1099
1100 using namespace std::chrono_literals;
1101
1102 bool const update = app_.cluster().update(
1103 app_.nodeIdentity().first,
1104 "",
1105 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1106 ? app_.getFeeTrack().getLocalFee()
1107 : 0,
1108 app_.timeKeeper().now());
1109
1110 if (!update)
1111 {
1112 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1113 setClusterTimer();
1114 return;
1115 }
1116
1117 protocol::TMCluster cluster;
1118 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1119 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1120 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1121 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1122 n.set_nodeload(node.getLoadFee());
1123 if (!node.name().empty())
1124 n.set_nodename(node.name());
1125 });
1126
1127 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1128 for (auto& item : gossip.items)
1129 {
1130 protocol::TMLoadSource& node = *cluster.add_loadsources();
1131 node.set_name(to_string(item.address));
1132 node.set_cost(item.balance);
1133 }
1134 app_.overlay().foreach(send_if(
1135 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1136 peer_in_cluster()));
1137 setClusterTimer();
1138}
1139
1140//------------------------------------------------------------------------------
1141
1143NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1144 const
1145{
1146 if (mode == OperatingMode::FULL && admin)
1147 {
1148 auto const consensusMode = mConsensus.mode();
1149 if (consensusMode != ConsensusMode::wrongLedger)
1150 {
1151 if (consensusMode == ConsensusMode::proposing)
1152 return "proposing";
1153
1154 if (mConsensus.validating())
1155 return "validating";
1156 }
1157 }
1158
1159 return states_[static_cast<std::size_t>(mode)];
1160}
1161
1162void
1163NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1164{
1165 if (isNeedNetworkLedger())
1166 {
1167 // Nothing we can do if we've never been in sync
1168 return;
1169 }
1170
1171 // this is an asynchronous interface
1172 auto const trans = sterilize(*iTrans);
1173
1174 auto const txid = trans->getTransactionID();
1175 auto const flags = app_.getHashRouter().getFlags(txid);
1176
1177 if ((flags & SF_BAD) != 0)
1178 {
1179 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1180 return;
1181 }
1182
1183 try
1184 {
1185 auto const [validity, reason] = checkValidity(
1186 app_.getHashRouter(),
1187 *trans,
1188 m_ledgerMaster.getValidatedRules(),
1189 app_.config());
1190
1191 if (validity != Validity::Valid)
1192 {
1193 JLOG(m_journal.warn())
1194 << "Submitted transaction invalid: " << reason;
1195 return;
1196 }
1197 }
1198 catch (std::exception const& ex)
1199 {
1200 JLOG(m_journal.warn())
1201 << "Exception checking transaction " << txid << ": " << ex.what();
1202
1203 return;
1204 }
1205
1206 std::string reason;
1207
1208 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1209
1210 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1211 auto t = tx;
1212 processTransaction(t, false, false, FailHard::no);
1213 });
1214}
1215
1216void
1217NetworkOPsImp::processTransaction(
1218 std::shared_ptr<Transaction>& transaction,
1219 bool bUnlimited,
1220 bool bLocal,
1221 FailHard failType)
1222{
1223 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1224 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1225
1226 if ((newFlags & SF_BAD) != 0)
1227 {
1228 // cached bad
1229 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1230 transaction->setStatus(INVALID);
1231 transaction->setResult(temBAD_SIGNATURE);
1232 return;
1233 }
1234
1235 // NOTE eahennis - I think this check is redundant,
1236 // but I'm not 100% sure yet.
1237 // If so, only cost is looking up HashRouter flags.
1238 auto const view = m_ledgerMaster.getCurrentLedger();
1239 auto const [validity, reason] = checkValidity(
1240 app_.getHashRouter(),
1241 *transaction->getSTransaction(),
1242 view->rules(),
1243 app_.config());
1244 XRPL_ASSERT(
1245 validity == Validity::Valid,
1246 "ripple::NetworkOPsImp::processTransaction : valid validity");
1247
1248 // Not concerned with local checks at this point.
1249 if (validity == Validity::SigBad)
1250 {
1251 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1252 transaction->setStatus(INVALID);
1253 transaction->setResult(temBAD_SIGNATURE);
1254 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1255 return;
1256 }
1257
1258 // canonicalize can change our pointer
1259 app_.getMasterTransaction().canonicalize(&transaction);
1260
1261 if (bLocal)
1262 doTransactionSync(transaction, bUnlimited, failType);
1263 else
1264 doTransactionAsync(transaction, bUnlimited, failType);
1265}
1266
1267void
1268NetworkOPsImp::doTransactionAsync(
1269 std::shared_ptr<Transaction> transaction,
1270 bool bUnlimited,
1271 FailHard failType)
1272{
1273 std::lock_guard lock(mMutex);
1274
1275 if (transaction->getApplying())
1276 return;
1277
1278 mTransactions.push_back(
1279 TransactionStatus(transaction, bUnlimited, false, failType));
1280 transaction->setApplying();
1281
1282 if (mDispatchState == DispatchState::none)
1283 {
1284 if (m_job_queue.addJob(
1285 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1286 {
1287 mDispatchState = DispatchState::scheduled;
1288 }
1289 }
1290}
1291
1292void
1293NetworkOPsImp::doTransactionSync(
1294 std::shared_ptr<Transaction> transaction,
1295 bool bUnlimited,
1296 FailHard failType)
1297{
1298 std::unique_lock<std::mutex> lock(mMutex);
1299
1300 if (!transaction->getApplying())
1301 {
1302 mTransactions.push_back(
1303 TransactionStatus(transaction, bUnlimited, true, failType));
1304 transaction->setApplying();
1305 }
1306
1307 do
1308 {
1309 if (mDispatchState == DispatchState::running)
1310 {
1311 // A batch processing job is already running, so wait.
1312 mCond.wait(lock);
1313 }
1314 else
1315 {
1316 apply(lock);
1317
1318 if (mTransactions.size())
1319 {
1320 // More transactions need to be applied, but by another job.
1321 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1322 transactionBatch();
1323 }))
1324 {
1325 mDispatchState = DispatchState::scheduled;
1326 }
1327 }
1328 }
1329 } while (transaction->getApplying());
1330}
1331
1332void
1333NetworkOPsImp::transactionBatch()
1334{
1335 std::unique_lock<std::mutex> lock(mMutex);
1336
1337 if (mDispatchState == DispatchState::running)
1338 return;
1339
1340 while (mTransactions.size())
1341 {
1342 apply(lock);
1343 }
1344}
1345
1346void
1347NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1348{
1350 std::vector<TransactionStatus> transactions;
1351 mTransactions.swap(transactions);
1352 XRPL_ASSERT(
1353 !transactions.empty(),
1354 "ripple::NetworkOPsImp::apply : non-empty transactions");
1355 XRPL_ASSERT(
1356 mDispatchState != DispatchState::running,
1357 "ripple::NetworkOPsImp::apply : is not running");
1358
1359 mDispatchState = DispatchState::running;
1360
1361 batchLock.unlock();
1362
1363 {
1364 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1365 bool changed = false;
1366 {
1367 std::unique_lock ledgerLock{
1368 m_ledgerMaster.peekMutex(), std::defer_lock};
1369 std::lock(masterLock, ledgerLock);
1370
1371 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1372 for (TransactionStatus& e : transactions)
1373 {
1374 // we check before adding to the batch
1375 ApplyFlags flags = tapNONE;
1376 if (e.admin)
1377 flags |= tapUNLIMITED;
1378
1379 if (e.failType == FailHard::yes)
1380 flags |= tapFAIL_HARD;
1381
1382 auto const result = app_.getTxQ().apply(
1383 app_, view, e.transaction->getSTransaction(), flags, j);
1384 e.result = result.ter;
1385 e.applied = result.applied;
1386 changed = changed || result.applied;
1387 }
1388 return changed;
1389 });
1390 }
1391 if (changed)
1392 reportFeeChange();
1393
1394 std::optional<LedgerIndex> validatedLedgerIndex;
1395 if (auto const l = m_ledgerMaster.getValidatedLedger())
1396 validatedLedgerIndex = l->info().seq;
1397
1398 auto newOL = app_.openLedger().current();
1399 for (TransactionStatus& e : transactions)
1400 {
1401 e.transaction->clearSubmitResult();
1402
1403 if (e.applied)
1404 {
1405 pubProposedTransaction(
1406 newOL, e.transaction->getSTransaction(), e.result);
1407 e.transaction->setApplied();
1408 }
1409
1410 e.transaction->setResult(e.result);
1411
1412 if (isTemMalformed(e.result))
1413 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1414
1415#ifdef DEBUG
1416 if (e.result != tesSUCCESS)
1417 {
1418 std::string token, human;
1419
1420 if (transResultInfo(e.result, token, human))
1421 {
1422 JLOG(m_journal.info())
1423 << "TransactionResult: " << token << ": " << human;
1424 }
1425 }
1426#endif
1427
1428 bool addLocal = e.local;
1429
1430 if (e.result == tesSUCCESS)
1431 {
1432 JLOG(m_journal.debug())
1433 << "Transaction is now included in open ledger";
1434 e.transaction->setStatus(INCLUDED);
1435
1436 auto const& txCur = e.transaction->getSTransaction();
1437 auto const txNext = m_ledgerMaster.popAcctTransaction(txCur);
1438 if (txNext)
1439 {
1440 std::string reason;
1441 auto const trans = sterilize(*txNext);
1442 auto t = std::make_shared<Transaction>(trans, reason, app_);
1443 submit_held.emplace_back(t, false, false, FailHard::no);
1444 t->setApplying();
1445 }
1446 }
1447 else if (e.result == tefPAST_SEQ)
1448 {
1449 // duplicate or conflict
1450 JLOG(m_journal.info()) << "Transaction is obsolete";
1451 e.transaction->setStatus(OBSOLETE);
1452 }
1453 else if (e.result == terQUEUED)
1454 {
1455 JLOG(m_journal.debug())
1456 << "Transaction is likely to claim a"
1457 << " fee, but is queued until fee drops";
1458
1459 e.transaction->setStatus(HELD);
1460 // Add to held transactions, because it could get
1461 // kicked out of the queue, and this will try to
1462 // put it back.
1463 m_ledgerMaster.addHeldTransaction(e.transaction);
1464 e.transaction->setQueued();
1465 e.transaction->setKept();
1466 }
1467 else if (isTerRetry(e.result))
1468 {
1469 if (e.failType != FailHard::yes)
1470 {
1471 // transaction should be held
1472 JLOG(m_journal.debug())
1473 << "Transaction should be held: " << e.result;
1474 e.transaction->setStatus(HELD);
1475 m_ledgerMaster.addHeldTransaction(e.transaction);
1476 e.transaction->setKept();
1477 }
1478 }
1479 else
1480 {
1481 JLOG(m_journal.debug())
1482 << "Status other than success " << e.result;
1483 e.transaction->setStatus(INVALID);
1484 }
1485
1486 auto const enforceFailHard =
1487 e.failType == FailHard::yes && !isTesSuccess(e.result);
1488
1489 if (addLocal && !enforceFailHard)
1490 {
1491 m_localTX->push_back(
1492 m_ledgerMaster.getCurrentLedgerIndex(),
1493 e.transaction->getSTransaction());
1494 e.transaction->setKept();
1495 }
1496
1497 if ((e.applied ||
1498 ((mMode != OperatingMode::FULL) &&
1499 (e.failType != FailHard::yes) && e.local) ||
1500 (e.result == terQUEUED)) &&
1501 !enforceFailHard)
1502 {
1503 auto const toSkip =
1504 app_.getHashRouter().shouldRelay(e.transaction->getID());
1505
1506 if (toSkip)
1507 {
1508 protocol::TMTransaction tx;
1509 Serializer s;
1510
1511 e.transaction->getSTransaction()->add(s);
1512 tx.set_rawtransaction(s.data(), s.size());
1513 tx.set_status(protocol::tsCURRENT);
1514 tx.set_receivetimestamp(
1515 app_.timeKeeper().now().time_since_epoch().count());
1516 tx.set_deferred(e.result == terQUEUED);
1517 // FIXME: This should be when we received it
1518 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1519 e.transaction->setBroadcast();
1520 }
1521 }
1522
1523 if (validatedLedgerIndex)
1524 {
1525 auto [fee, accountSeq, availableSeq] =
1526 app_.getTxQ().getTxRequiredFeeAndSeq(
1527 *newOL, e.transaction->getSTransaction());
1528 e.transaction->setCurrentLedgerState(
1529 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1530 }
1531 }
1532 }
1533
1534 batchLock.lock();
1535
1536 for (TransactionStatus& e : transactions)
1537 e.transaction->clearApplying();
1538
1539 if (!submit_held.empty())
1540 {
1541 if (mTransactions.empty())
1542 mTransactions.swap(submit_held);
1543 else
1544 for (auto& e : submit_held)
1545 mTransactions.push_back(std::move(e));
1546 }
1547
1548 mCond.notify_all();
1549
1550 mDispatchState = DispatchState::none;
1551}
1552
1553//
1554// Owner functions
1555//
1556
1558NetworkOPsImp::getOwnerInfo(
1560 AccountID const& account)
1561{
1562 Json::Value jvObjects(Json::objectValue);
1563 auto root = keylet::ownerDir(account);
1564 auto sleNode = lpLedger->read(keylet::page(root));
1565 if (sleNode)
1566 {
1567 std::uint64_t uNodeDir;
1568
1569 do
1570 {
1571 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1572 {
1573 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1574 XRPL_ASSERT(
1575 sleCur,
1576 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1577
1578 switch (sleCur->getType())
1579 {
1580 case ltOFFER:
1581 if (!jvObjects.isMember(jss::offers))
1582 jvObjects[jss::offers] =
1584
1585 jvObjects[jss::offers].append(
1586 sleCur->getJson(JsonOptions::none));
1587 break;
1588
1589 case ltRIPPLE_STATE:
1590 if (!jvObjects.isMember(jss::ripple_lines))
1591 {
1592 jvObjects[jss::ripple_lines] =
1594 }
1595
1596 jvObjects[jss::ripple_lines].append(
1597 sleCur->getJson(JsonOptions::none));
1598 break;
1599
1600 case ltACCOUNT_ROOT:
1601 case ltDIR_NODE:
1602 default:
1603 UNREACHABLE(
1604 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1605 "type");
1606 break;
1607 }
1608 }
1609
1610 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1611
1612 if (uNodeDir)
1613 {
1614 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1615 XRPL_ASSERT(
1616 sleNode,
1617 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1618 }
1619 } while (uNodeDir);
1620 }
1621
1622 return jvObjects;
1623}
1624
1625//
1626// Other
1627//
1628
1629inline bool
1630NetworkOPsImp::isBlocked()
1631{
1632 return isAmendmentBlocked() || isUNLBlocked();
1633}
1634
1635inline bool
1636NetworkOPsImp::isAmendmentBlocked()
1637{
1638 return amendmentBlocked_;
1639}
1640
1641void
1642NetworkOPsImp::setAmendmentBlocked()
1643{
1644 amendmentBlocked_ = true;
1645 setMode(OperatingMode::CONNECTED);
1646}
1647
1648inline bool
1649NetworkOPsImp::isAmendmentWarned()
1650{
1651 return !amendmentBlocked_ && amendmentWarned_;
1652}
1653
1654inline void
1655NetworkOPsImp::setAmendmentWarned()
1656{
1657 amendmentWarned_ = true;
1658}
1659
1660inline void
1661NetworkOPsImp::clearAmendmentWarned()
1662{
1663 amendmentWarned_ = false;
1664}
1665
1666inline bool
1667NetworkOPsImp::isUNLBlocked()
1668{
1669 return unlBlocked_;
1670}
1671
1672void
1673NetworkOPsImp::setUNLBlocked()
1674{
1675 unlBlocked_ = true;
1676 setMode(OperatingMode::CONNECTED);
1677}
1678
1679inline void
1680NetworkOPsImp::clearUNLBlocked()
1681{
1682 unlBlocked_ = false;
1683}
1684
1685bool
1686NetworkOPsImp::checkLastClosedLedger(
1687 const Overlay::PeerSequence& peerList,
1688 uint256& networkClosed)
1689{
1690 // Returns true if there's an *abnormal* ledger issue, normal changing in
1691 // TRACKING mode should return false. Do we have sufficient validations for
1692 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1693 // better ledger available? If so, we are either tracking or full.
1694
1695 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1696
1697 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1698
1699 if (!ourClosed)
1700 return false;
1701
1702 uint256 closedLedger = ourClosed->info().hash;
1703 uint256 prevClosedLedger = ourClosed->info().parentHash;
1704 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1705 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1706
1707 //-------------------------------------------------------------------------
1708 // Determine preferred last closed ledger
1709
1710 auto& validations = app_.getValidations();
1711 JLOG(m_journal.debug())
1712 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1713
1714 // Will rely on peer LCL if no trusted validations exist
1716 peerCounts[closedLedger] = 0;
1717 if (mMode >= OperatingMode::TRACKING)
1718 peerCounts[closedLedger]++;
1719
1720 for (auto& peer : peerList)
1721 {
1722 uint256 peerLedger = peer->getClosedLedgerHash();
1723
1724 if (peerLedger.isNonZero())
1725 ++peerCounts[peerLedger];
1726 }
1727
1728 for (auto const& it : peerCounts)
1729 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1730
1731 uint256 preferredLCL = validations.getPreferredLCL(
1732 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1733 m_ledgerMaster.getValidLedgerIndex(),
1734 peerCounts);
1735
1736 bool switchLedgers = preferredLCL != closedLedger;
1737 if (switchLedgers)
1738 closedLedger = preferredLCL;
1739 //-------------------------------------------------------------------------
1740 if (switchLedgers && (closedLedger == prevClosedLedger))
1741 {
1742 // don't switch to our own previous ledger
1743 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1744 networkClosed = ourClosed->info().hash;
1745 switchLedgers = false;
1746 }
1747 else
1748 networkClosed = closedLedger;
1749
1750 if (!switchLedgers)
1751 return false;
1752
1753 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1754
1755 if (!consensus)
1756 consensus = app_.getInboundLedgers().acquire(
1757 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1758
1759 if (consensus &&
1760 (!m_ledgerMaster.canBeCurrent(consensus) ||
1761 !m_ledgerMaster.isCompatible(
1762 *consensus, m_journal.debug(), "Not switching")))
1763 {
1764 // Don't switch to a ledger not on the validated chain
1765 // or with an invalid close time or sequence
1766 networkClosed = ourClosed->info().hash;
1767 return false;
1768 }
1769
1770 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1771 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1772 << getJson({*ourClosed, {}});
1773 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1774
1775 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1776 {
1777 setMode(OperatingMode::CONNECTED);
1778 }
1779
1780 if (consensus)
1781 {
1782 // FIXME: If this rewinds the ledger sequence, or has the same
1783 // sequence, we should update the status on any stored transactions
1784 // in the invalidated ledgers.
1785 switchLastClosedLedger(consensus);
1786 }
1787
1788 return true;
1789}
1790
1791void
1792NetworkOPsImp::switchLastClosedLedger(
1793 std::shared_ptr<Ledger const> const& newLCL)
1794{
1795 // set the newLCL as our last closed ledger -- this is abnormal code
1796 JLOG(m_journal.error())
1797 << "JUMP last closed ledger to " << newLCL->info().hash;
1798
1799 clearNeedNetworkLedger();
1800
1801 // Update fee computations.
1802 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1803
1804 // Caller must own master lock
1805 {
1806 // Apply tx in old open ledger to new
1807 // open ledger. Then apply local tx.
1808
1809 auto retries = m_localTX->getTxSet();
1810 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1812 if (lastVal)
1813 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
1814 else
1815 rules.emplace(app_.config().features);
1816 app_.openLedger().accept(
1817 app_,
1818 *rules,
1819 newLCL,
1820 OrderedTxs({}),
1821 false,
1822 retries,
1823 tapNONE,
1824 "jump",
1825 [&](OpenView& view, beast::Journal j) {
1826 // Stuff the ledger with transactions from the queue.
1827 return app_.getTxQ().accept(app_, view);
1828 });
1829 }
1830
1831 m_ledgerMaster.switchLCL(newLCL);
1832
1833 protocol::TMStatusChange s;
1834 s.set_newevent(protocol::neSWITCHED_LEDGER);
1835 s.set_ledgerseq(newLCL->info().seq);
1836 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1837 s.set_ledgerhashprevious(
1838 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1839 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
1840
1841 app_.overlay().foreach(
1842 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1843}
1844
1845bool
1846NetworkOPsImp::beginConsensus(
1847 uint256 const& networkClosed,
1849{
1850 XRPL_ASSERT(
1851 networkClosed.isNonZero(),
1852 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
1853
1854 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
1855
1856 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
1857 << " with LCL " << closingInfo.parentHash;
1858
1859 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1860
1861 if (!prevLedger)
1862 {
1863 // this shouldn't happen unless we jump ledgers
1864 if (mMode == OperatingMode::FULL)
1865 {
1866 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
1867 setMode(OperatingMode::TRACKING);
1868 CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
1869 }
1870
1871 CLOG(clog) << "beginConsensus no previous ledger. ";
1872 return false;
1873 }
1874
1875 XRPL_ASSERT(
1876 prevLedger->info().hash == closingInfo.parentHash,
1877 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1878 "parent");
1879 XRPL_ASSERT(
1880 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
1881 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1882 "hash");
1883
1884 if (prevLedger->rules().enabled(featureNegativeUNL))
1885 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1886 TrustChanges const changes = app_.validators().updateTrusted(
1887 app_.getValidations().getCurrentNodeIDs(),
1888 closingInfo.parentCloseTime,
1889 *this,
1890 app_.overlay(),
1891 app_.getHashRouter());
1892
1893 if (!changes.added.empty() || !changes.removed.empty())
1894 {
1895 app_.getValidations().trustChanged(changes.added, changes.removed);
1896 // Update the AmendmentTable so it tracks the current validators.
1897 app_.getAmendmentTable().trustChanged(
1898 app_.validators().getQuorumKeys().second);
1899 }
1900
1901 mConsensus.startRound(
1902 app_.timeKeeper().closeTime(),
1903 networkClosed,
1904 prevLedger,
1905 changes.removed,
1906 changes.added,
1907 clog);
1908
1909 const ConsensusPhase currPhase = mConsensus.phase();
1910 if (mLastConsensusPhase != currPhase)
1911 {
1912 reportConsensusStateChange(currPhase);
1913 mLastConsensusPhase = currPhase;
1914 }
1915
1916 JLOG(m_journal.debug()) << "Initiating consensus engine";
1917 return true;
1918}
1919
1920bool
1921NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
1922{
1923 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1924}
1925
1926void
1927NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
1928{
1929 // We now have an additional transaction set
1930 // either created locally during the consensus process
1931 // or acquired from a peer
1932
1933 // Inform peers we have this set
1934 protocol::TMHaveTransactionSet msg;
1935 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1936 msg.set_status(protocol::tsHAVE);
1937 app_.overlay().foreach(
1938 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
1939
1940 // We acquired it because consensus asked us to
1941 if (fromAcquire)
1942 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
1943}
1944
1945void
1946NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
1947{
1948 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
1949
1950 for (auto const& it : app_.overlay().getActivePeers())
1951 {
1952 if (it && (it->getClosedLedgerHash() == deadLedger))
1953 {
1954 JLOG(m_journal.trace()) << "Killing obsolete peer status";
1955 it->cycleStatus();
1956 }
1957 }
1958
1959 uint256 networkClosed;
1960 bool ledgerChange =
1961 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1962
1963 if (networkClosed.isZero())
1964 {
1965 CLOG(clog) << "endConsensus last closed ledger is zero. ";
1966 return;
1967 }
1968
1969 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
1970 // we must count how many nodes share our LCL, how many nodes disagree with
1971 // our LCL, and how many validations our LCL has. We also want to check
1972 // timing to make sure there shouldn't be a newer LCL. We need this
1973 // information to do the next three tests.
1974
1975 if (((mMode == OperatingMode::CONNECTED) ||
1976 (mMode == OperatingMode::SYNCING)) &&
1977 !ledgerChange)
1978 {
1979 // Count number of peers that agree with us and UNL nodes whose
1980 // validations we have for LCL. If the ledger is good enough, go to
1981 // TRACKING - TODO
1982 if (!needNetworkLedger_)
1983 setMode(OperatingMode::TRACKING);
1984 }
1985
1986 if (((mMode == OperatingMode::CONNECTED) ||
1987 (mMode == OperatingMode::TRACKING)) &&
1988 !ledgerChange)
1989 {
1990 // check if the ledger is good enough to go to FULL
1991 // Note: Do not go to FULL if we don't have the previous ledger
1992 // check if the ledger is bad enough to go to CONNECTE D -- TODO
1993 auto current = m_ledgerMaster.getCurrentLedger();
1994 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
1995 2 * current->info().closeTimeResolution))
1996 {
1997 setMode(OperatingMode::FULL);
1998 }
1999 }
2000
2001 beginConsensus(networkClosed, clog);
2002}
2003
2004void
2005NetworkOPsImp::consensusViewChange()
2006{
2007 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2008 {
2009 setMode(OperatingMode::CONNECTED);
2010 }
2011}
2012
2013void
2014NetworkOPsImp::pubManifest(Manifest const& mo)
2015{
2016 // VFALCO consider std::shared_mutex
2017 std::lock_guard sl(mSubLock);
2018
2019 if (!mStreamMaps[sManifests].empty())
2020 {
2022
2023 jvObj[jss::type] = "manifestReceived";
2024 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
2025 if (mo.signingKey)
2026 jvObj[jss::signing_key] =
2027 toBase58(TokenType::NodePublic, *mo.signingKey);
2028 jvObj[jss::seq] = Json::UInt(mo.sequence);
2029 if (auto sig = mo.getSignature())
2030 jvObj[jss::signature] = strHex(*sig);
2031 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
2032 if (!mo.domain.empty())
2033 jvObj[jss::domain] = mo.domain;
2034 jvObj[jss::manifest] = strHex(mo.serialized);
2035
2036 for (auto i = mStreamMaps[sManifests].begin();
2037 i != mStreamMaps[sManifests].end();)
2038 {
2039 if (auto p = i->second.lock())
2040 {
2041 p->send(jvObj, true);
2042 ++i;
2043 }
2044 else
2045 {
2046 i = mStreamMaps[sManifests].erase(i);
2047 }
2048 }
2049 }
2050}
2051
2052NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2053 XRPAmount fee,
2054 TxQ::Metrics&& escalationMetrics,
2055 LoadFeeTrack const& loadFeeTrack)
2056 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2057 , loadBaseServer{loadFeeTrack.getLoadBase()}
2058 , baseFee{fee}
2059 , em{std::move(escalationMetrics)}
2060{
2061}
2062
2063bool
2065 NetworkOPsImp::ServerFeeSummary const& b) const
2066{
2067 if (loadFactorServer != b.loadFactorServer ||
2068 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2069 em.has_value() != b.em.has_value())
2070 return true;
2071
2072 if (em && b.em)
2073 {
2074 return (
2075 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2076 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2077 em->referenceFeeLevel != b.em->referenceFeeLevel);
2078 }
2079
2080 return false;
2081}
2082
2083// Need to cap to uint64 to uint32 due to JSON limitations
2084static std::uint32_t
2086{
2088
2089 return std::min(max32, v);
2090};
2091
2092void
2094{
2095 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2096 // list into a local array while holding the lock then release
2097 // the lock and call send on everyone.
2098 //
2100
2101 if (!mStreamMaps[sServer].empty())
2102 {
2104
2106 app_.openLedger().current()->fees().base,
2108 app_.getFeeTrack()};
2109
2110 jvObj[jss::type] = "serverStatus";
2111 jvObj[jss::server_status] = strOperatingMode();
2112 jvObj[jss::load_base] = f.loadBaseServer;
2113 jvObj[jss::load_factor_server] = f.loadFactorServer;
2114 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2115
2116 if (f.em)
2117 {
2118 auto const loadFactor = std::max(
2119 safe_cast<std::uint64_t>(f.loadFactorServer),
2120 mulDiv(
2121 f.em->openLedgerFeeLevel,
2122 f.loadBaseServer,
2123 f.em->referenceFeeLevel)
2125
2126 jvObj[jss::load_factor] = trunc32(loadFactor);
2127 jvObj[jss::load_factor_fee_escalation] =
2128 f.em->openLedgerFeeLevel.jsonClipped();
2129 jvObj[jss::load_factor_fee_queue] =
2130 f.em->minProcessingFeeLevel.jsonClipped();
2131 jvObj[jss::load_factor_fee_reference] =
2132 f.em->referenceFeeLevel.jsonClipped();
2133 }
2134 else
2135 jvObj[jss::load_factor] = f.loadFactorServer;
2136
2137 mLastFeeSummary = f;
2138
2139 for (auto i = mStreamMaps[sServer].begin();
2140 i != mStreamMaps[sServer].end();)
2141 {
2142 InfoSub::pointer p = i->second.lock();
2143
2144 // VFALCO TODO research the possibility of using thread queues and
2145 // linearizing the deletion of subscribers with the
2146 // sending of JSON data.
2147 if (p)
2148 {
2149 p->send(jvObj, true);
2150 ++i;
2151 }
2152 else
2153 {
2154 i = mStreamMaps[sServer].erase(i);
2155 }
2156 }
2157 }
2158}
2159
2160void
2162{
2164
2165 auto& streamMap = mStreamMaps[sConsensusPhase];
2166 if (!streamMap.empty())
2167 {
2169 jvObj[jss::type] = "consensusPhase";
2170 jvObj[jss::consensus] = to_string(phase);
2171
2172 for (auto i = streamMap.begin(); i != streamMap.end();)
2173 {
2174 if (auto p = i->second.lock())
2175 {
2176 p->send(jvObj, true);
2177 ++i;
2178 }
2179 else
2180 {
2181 i = streamMap.erase(i);
2182 }
2183 }
2184 }
2185}
2186
2187void
2189{
2190 // VFALCO consider std::shared_mutex
2192
2193 if (!mStreamMaps[sValidations].empty())
2194 {
2196
2197 auto const signerPublic = val->getSignerPublic();
2198
2199 jvObj[jss::type] = "validationReceived";
2200 jvObj[jss::validation_public_key] =
2201 toBase58(TokenType::NodePublic, signerPublic);
2202 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2203 jvObj[jss::signature] = strHex(val->getSignature());
2204 jvObj[jss::full] = val->isFull();
2205 jvObj[jss::flags] = val->getFlags();
2206 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2207 jvObj[jss::data] = strHex(val->getSerializer().slice());
2208
2209 if (auto version = (*val)[~sfServerVersion])
2210 jvObj[jss::server_version] = std::to_string(*version);
2211
2212 if (auto cookie = (*val)[~sfCookie])
2213 jvObj[jss::cookie] = std::to_string(*cookie);
2214
2215 if (auto hash = (*val)[~sfValidatedHash])
2216 jvObj[jss::validated_hash] = strHex(*hash);
2217
2218 auto const masterKey =
2219 app_.validatorManifests().getMasterKey(signerPublic);
2220
2221 if (masterKey != signerPublic)
2222 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2223
2224 // NOTE *seq is a number, but old API versions used string. We replace
2225 // number with a string using MultiApiJson near end of this function
2226 if (auto const seq = (*val)[~sfLedgerSequence])
2227 jvObj[jss::ledger_index] = *seq;
2228
2229 if (val->isFieldPresent(sfAmendments))
2230 {
2231 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2232 for (auto const& amendment : val->getFieldV256(sfAmendments))
2233 jvObj[jss::amendments].append(to_string(amendment));
2234 }
2235
2236 if (auto const closeTime = (*val)[~sfCloseTime])
2237 jvObj[jss::close_time] = *closeTime;
2238
2239 if (auto const loadFee = (*val)[~sfLoadFee])
2240 jvObj[jss::load_fee] = *loadFee;
2241
2242 if (auto const baseFee = val->at(~sfBaseFee))
2243 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2244
2245 if (auto const reserveBase = val->at(~sfReserveBase))
2246 jvObj[jss::reserve_base] = *reserveBase;
2247
2248 if (auto const reserveInc = val->at(~sfReserveIncrement))
2249 jvObj[jss::reserve_inc] = *reserveInc;
2250
2251 // (The ~ operator converts the Proxy to a std::optional, which
2252 // simplifies later operations)
2253 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2254 baseFeeXRP && baseFeeXRP->native())
2255 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2256
2257 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2258 reserveBaseXRP && reserveBaseXRP->native())
2259 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2260
2261 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2262 reserveIncXRP && reserveIncXRP->native())
2263 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2264
2265 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2266 // for consumers supporting different API versions
2267 MultiApiJson multiObj{jvObj};
2268 multiObj.visit(
2269 RPC::apiVersion<1>, //
2270 [](Json::Value& jvTx) {
2271 // Type conversion for older API versions to string
2272 if (jvTx.isMember(jss::ledger_index))
2273 {
2274 jvTx[jss::ledger_index] =
2275 std::to_string(jvTx[jss::ledger_index].asUInt());
2276 }
2277 });
2278
2279 for (auto i = mStreamMaps[sValidations].begin();
2280 i != mStreamMaps[sValidations].end();)
2281 {
2282 if (auto p = i->second.lock())
2283 {
2284 multiObj.visit(
2285 p->getApiVersion(), //
2286 [&](Json::Value const& jv) { p->send(jv, true); });
2287 ++i;
2288 }
2289 else
2290 {
2291 i = mStreamMaps[sValidations].erase(i);
2292 }
2293 }
2294 }
2295}
2296
2297void
2299{
2301
2302 if (!mStreamMaps[sPeerStatus].empty())
2303 {
2304 Json::Value jvObj(func());
2305
2306 jvObj[jss::type] = "peerStatusChange";
2307
2308 for (auto i = mStreamMaps[sPeerStatus].begin();
2309 i != mStreamMaps[sPeerStatus].end();)
2310 {
2311 InfoSub::pointer p = i->second.lock();
2312
2313 if (p)
2314 {
2315 p->send(jvObj, true);
2316 ++i;
2317 }
2318 else
2319 {
2320 i = mStreamMaps[sPeerStatus].erase(i);
2321 }
2322 }
2323 }
2324}
2325
2326void
2328{
2329 using namespace std::chrono_literals;
2330 if (om == OperatingMode::CONNECTED)
2331 {
2334 }
2335 else if (om == OperatingMode::SYNCING)
2336 {
2339 }
2340
2341 if ((om > OperatingMode::CONNECTED) && isBlocked())
2343
2344 if (mMode == om)
2345 return;
2346
2347 mMode = om;
2348
2349 accounting_.mode(om);
2350
2351 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2352 pubServer();
2353}
2354
2355bool
2358 std::string const& source)
2359{
2360 JLOG(m_journal.trace())
2361 << "recvValidation " << val->getLedgerHash() << " from " << source;
2362
2364 BypassAccept bypassAccept = BypassAccept::no;
2365 try
2366 {
2367 if (pendingValidations_.contains(val->getLedgerHash()))
2368 bypassAccept = BypassAccept::yes;
2369 else
2370 pendingValidations_.insert(val->getLedgerHash());
2371 scope_unlock unlock(lock);
2372 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2373 }
2374 catch (std::exception const& e)
2375 {
2376 JLOG(m_journal.warn())
2377 << "Exception thrown for handling new validation "
2378 << val->getLedgerHash() << ": " << e.what();
2379 }
2380 catch (...)
2381 {
2382 JLOG(m_journal.warn())
2383 << "Unknown exception thrown for handling new validation "
2384 << val->getLedgerHash();
2385 }
2386 if (bypassAccept == BypassAccept::no)
2387 {
2388 pendingValidations_.erase(val->getLedgerHash());
2389 }
2390 lock.unlock();
2391
2392 pubValidation(val);
2393
2394 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2396 ss << "VALIDATION: " << val->render() << " master_key: ";
2397 auto master = app_.validators().getTrustedKey(val->getSignerPublic());
2398 if (master)
2399 {
2400 ss << toBase58(TokenType::NodePublic, *master);
2401 }
2402 else
2403 {
2404 ss << "none";
2405 }
2406 return ss.str();
2407 }();
2408
2409 // We will always relay trusted validations; if configured, we will
2410 // also relay all untrusted validations.
2411 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2412}
2413
2416{
2417 return mConsensus.getJson(true);
2418}
2419
2421NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2422{
2424
2425 // System-level warnings
2426 {
2427 Json::Value warnings{Json::arrayValue};
2428 if (isAmendmentBlocked())
2429 {
2430 Json::Value& w = warnings.append(Json::objectValue);
2431 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2432 w[jss::message] =
2433 "This server is amendment blocked, and must be updated to be "
2434 "able to stay in sync with the network.";
2435 }
2436 if (isUNLBlocked())
2437 {
2438 Json::Value& w = warnings.append(Json::objectValue);
2439 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2440 w[jss::message] =
2441 "This server has an expired validator list. validators.txt "
2442 "may be incorrectly configured or some [validator_list_sites] "
2443 "may be unreachable.";
2444 }
2445 if (admin && isAmendmentWarned())
2446 {
2447 Json::Value& w = warnings.append(Json::objectValue);
2448 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2449 w[jss::message] =
2450 "One or more unsupported amendments have reached majority. "
2451 "Upgrade to the latest version before they are activated "
2452 "to avoid being amendment blocked.";
2453 if (auto const expected =
2455 {
2456 auto& d = w[jss::details] = Json::objectValue;
2457 d[jss::expected_date] = expected->time_since_epoch().count();
2458 d[jss::expected_date_UTC] = to_string(*expected);
2459 }
2460 }
2461
2462 if (warnings.size())
2463 info[jss::warnings] = std::move(warnings);
2464 }
2465
2466 // hostid: unique string describing the machine
2467 if (human)
2468 info[jss::hostid] = getHostId(admin);
2469
2470 // domain: if configured with a domain, report it:
2471 if (!app_.config().SERVER_DOMAIN.empty())
2472 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2473
2474 info[jss::build_version] = BuildInfo::getVersionString();
2475
2476 info[jss::server_state] = strOperatingMode(admin);
2477
2478 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2480
2482 info[jss::network_ledger] = "waiting";
2483
2484 info[jss::validation_quorum] =
2485 static_cast<Json::UInt>(app_.validators().quorum());
2486
2487 if (admin)
2488 {
2489 switch (app_.config().NODE_SIZE)
2490 {
2491 case 0:
2492 info[jss::node_size] = "tiny";
2493 break;
2494 case 1:
2495 info[jss::node_size] = "small";
2496 break;
2497 case 2:
2498 info[jss::node_size] = "medium";
2499 break;
2500 case 3:
2501 info[jss::node_size] = "large";
2502 break;
2503 case 4:
2504 info[jss::node_size] = "huge";
2505 break;
2506 }
2507
2508 auto when = app_.validators().expires();
2509
2510 if (!human)
2511 {
2512 if (when)
2513 info[jss::validator_list_expires] =
2514 safe_cast<Json::UInt>(when->time_since_epoch().count());
2515 else
2516 info[jss::validator_list_expires] = 0;
2517 }
2518 else
2519 {
2520 auto& x = (info[jss::validator_list] = Json::objectValue);
2521
2522 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2523
2524 if (when)
2525 {
2526 if (*when == TimeKeeper::time_point::max())
2527 {
2528 x[jss::expiration] = "never";
2529 x[jss::status] = "active";
2530 }
2531 else
2532 {
2533 x[jss::expiration] = to_string(*when);
2534
2535 if (*when > app_.timeKeeper().now())
2536 x[jss::status] = "active";
2537 else
2538 x[jss::status] = "expired";
2539 }
2540 }
2541 else
2542 {
2543 x[jss::status] = "unknown";
2544 x[jss::expiration] = "unknown";
2545 }
2546 }
2547
2548#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2549 {
2550 auto& x = (info[jss::git] = Json::objectValue);
2551#ifdef GIT_COMMIT_HASH
2552 x[jss::hash] = GIT_COMMIT_HASH;
2553#endif
2554#ifdef GIT_BRANCH
2555 x[jss::branch] = GIT_BRANCH;
2556#endif
2557 }
2558#endif
2559 }
2560 info[jss::io_latency_ms] =
2561 static_cast<Json::UInt>(app_.getIOLatency().count());
2562
2563 if (admin)
2564 {
2565 if (auto const localPubKey = app_.validators().localPublicKey();
2566 localPubKey && app_.getValidationPublicKey())
2567 {
2568 info[jss::pubkey_validator] =
2569 toBase58(TokenType::NodePublic, localPubKey.value());
2570 }
2571 else
2572 {
2573 info[jss::pubkey_validator] = "none";
2574 }
2575 }
2576
2577 if (counters)
2578 {
2579 info[jss::counters] = app_.getPerfLog().countersJson();
2580
2581 Json::Value nodestore(Json::objectValue);
2582 app_.getNodeStore().getCountsJson(nodestore);
2583 info[jss::counters][jss::nodestore] = nodestore;
2584 info[jss::current_activities] = app_.getPerfLog().currentJson();
2585 }
2586
2587 info[jss::pubkey_node] =
2589
2590 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2591
2593 info[jss::amendment_blocked] = true;
2594
2595 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2596
2597 if (fp != 0)
2598 info[jss::fetch_pack] = Json::UInt(fp);
2599
2600 info[jss::peers] = Json::UInt(app_.overlay().size());
2601
2602 Json::Value lastClose = Json::objectValue;
2603 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2604
2605 if (human)
2606 {
2607 lastClose[jss::converge_time_s] =
2609 }
2610 else
2611 {
2612 lastClose[jss::converge_time] =
2614 }
2615
2616 info[jss::last_close] = lastClose;
2617
2618 // info[jss::consensus] = mConsensus.getJson();
2619
2620 if (admin)
2621 info[jss::load] = m_job_queue.getJson();
2622
2623 if (auto const netid = app_.overlay().networkID())
2624 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2625
2626 auto const escalationMetrics =
2628
2629 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2630 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2631 /* Scale the escalated fee level to unitless "load factor".
2632 In practice, this just strips the units, but it will continue
2633 to work correctly if either base value ever changes. */
2634 auto const loadFactorFeeEscalation =
2635 mulDiv(
2636 escalationMetrics.openLedgerFeeLevel,
2637 loadBaseServer,
2638 escalationMetrics.referenceFeeLevel)
2640
2641 auto const loadFactor = std::max(
2642 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2643
2644 if (!human)
2645 {
2646 info[jss::load_base] = loadBaseServer;
2647 info[jss::load_factor] = trunc32(loadFactor);
2648 info[jss::load_factor_server] = loadFactorServer;
2649
2650 /* Json::Value doesn't support uint64, so clamp to max
2651 uint32 value. This is mostly theoretical, since there
2652 probably isn't enough extant XRP to drive the factor
2653 that high.
2654 */
2655 info[jss::load_factor_fee_escalation] =
2656 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2657 info[jss::load_factor_fee_queue] =
2658 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2659 info[jss::load_factor_fee_reference] =
2660 escalationMetrics.referenceFeeLevel.jsonClipped();
2661 }
2662 else
2663 {
2664 info[jss::load_factor] =
2665 static_cast<double>(loadFactor) / loadBaseServer;
2666
2667 if (loadFactorServer != loadFactor)
2668 info[jss::load_factor_server] =
2669 static_cast<double>(loadFactorServer) / loadBaseServer;
2670
2671 if (admin)
2672 {
2674 if (fee != loadBaseServer)
2675 info[jss::load_factor_local] =
2676 static_cast<double>(fee) / loadBaseServer;
2677 fee = app_.getFeeTrack().getRemoteFee();
2678 if (fee != loadBaseServer)
2679 info[jss::load_factor_net] =
2680 static_cast<double>(fee) / loadBaseServer;
2681 fee = app_.getFeeTrack().getClusterFee();
2682 if (fee != loadBaseServer)
2683 info[jss::load_factor_cluster] =
2684 static_cast<double>(fee) / loadBaseServer;
2685 }
2686 if (escalationMetrics.openLedgerFeeLevel !=
2687 escalationMetrics.referenceFeeLevel &&
2688 (admin || loadFactorFeeEscalation != loadFactor))
2689 info[jss::load_factor_fee_escalation] =
2690 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2691 escalationMetrics.referenceFeeLevel);
2692 if (escalationMetrics.minProcessingFeeLevel !=
2693 escalationMetrics.referenceFeeLevel)
2694 info[jss::load_factor_fee_queue] =
2695 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2696 escalationMetrics.referenceFeeLevel);
2697 }
2698
2699 bool valid = false;
2700 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2701
2702 if (lpClosed)
2703 valid = true;
2704 else
2705 lpClosed = m_ledgerMaster.getClosedLedger();
2706
2707 if (lpClosed)
2708 {
2709 XRPAmount const baseFee = lpClosed->fees().base;
2711 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2712 l[jss::hash] = to_string(lpClosed->info().hash);
2713
2714 if (!human)
2715 {
2716 l[jss::base_fee] = baseFee.jsonClipped();
2717 l[jss::reserve_base] =
2718 lpClosed->fees().accountReserve(0).jsonClipped();
2719 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2720 l[jss::close_time] = Json::Value::UInt(
2721 lpClosed->info().closeTime.time_since_epoch().count());
2722 }
2723 else
2724 {
2725 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2726 l[jss::reserve_base_xrp] =
2727 lpClosed->fees().accountReserve(0).decimalXRP();
2728 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2729
2730 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2731 std::abs(closeOffset.count()) >= 60)
2732 l[jss::close_time_offset] =
2733 static_cast<std::uint32_t>(closeOffset.count());
2734
2735 constexpr std::chrono::seconds highAgeThreshold{1000000};
2737 {
2738 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2739 l[jss::age] =
2740 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2741 }
2742 else
2743 {
2744 auto lCloseTime = lpClosed->info().closeTime;
2745 auto closeTime = app_.timeKeeper().closeTime();
2746 if (lCloseTime <= closeTime)
2747 {
2748 using namespace std::chrono_literals;
2749 auto age = closeTime - lCloseTime;
2750 l[jss::age] =
2751 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2752 }
2753 }
2754 }
2755
2756 if (valid)
2757 info[jss::validated_ledger] = l;
2758 else
2759 info[jss::closed_ledger] = l;
2760
2761 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2762 if (!lpPublished)
2763 info[jss::published_ledger] = "none";
2764 else if (lpPublished->info().seq != lpClosed->info().seq)
2765 info[jss::published_ledger] = lpPublished->info().seq;
2766 }
2767
2768 accounting_.json(info);
2769 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2770 info[jss::jq_trans_overflow] =
2772 info[jss::peer_disconnects] =
2774 info[jss::peer_disconnects_resources] =
2776
2777 // This array must be sorted in increasing order.
2778 static constexpr std::array<std::string_view, 7> protocols{
2779 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2780 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2781 {
2783 for (auto const& port : app_.getServerHandler().setup().ports)
2784 {
2785 // Don't publish admin ports for non-admin users
2786 if (!admin &&
2787 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2788 port.admin_user.empty() && port.admin_password.empty()))
2789 continue;
2792 std::begin(port.protocol),
2793 std::end(port.protocol),
2794 std::begin(protocols),
2795 std::end(protocols),
2796 std::back_inserter(proto));
2797 if (!proto.empty())
2798 {
2799 auto& jv = ports.append(Json::Value(Json::objectValue));
2800 jv[jss::port] = std::to_string(port.port);
2801 jv[jss::protocol] = Json::Value{Json::arrayValue};
2802 for (auto const& p : proto)
2803 jv[jss::protocol].append(p);
2804 }
2805 }
2806
2807 if (app_.config().exists(SECTION_PORT_GRPC))
2808 {
2809 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
2810 auto const optPort = grpcSection.get("port");
2811 if (optPort && grpcSection.get("ip"))
2812 {
2813 auto& jv = ports.append(Json::Value(Json::objectValue));
2814 jv[jss::port] = *optPort;
2815 jv[jss::protocol] = Json::Value{Json::arrayValue};
2816 jv[jss::protocol].append("grpc");
2817 }
2818 }
2819 info[jss::ports] = std::move(ports);
2820 }
2821
2822 return info;
2823}
2824
2825void
2827{
2829}
2830
2833{
2834 return app_.getInboundLedgers().getInfo();
2835}
2836
2837void
2839 std::shared_ptr<ReadView const> const& ledger,
2840 std::shared_ptr<STTx const> const& transaction,
2841 TER result)
2842{
2843 MultiApiJson jvObj =
2844 transJson(transaction, result, false, ledger, std::nullopt);
2845
2846 {
2848
2849 auto it = mStreamMaps[sRTTransactions].begin();
2850 while (it != mStreamMaps[sRTTransactions].end())
2851 {
2852 InfoSub::pointer p = it->second.lock();
2853
2854 if (p)
2855 {
2856 jvObj.visit(
2857 p->getApiVersion(), //
2858 [&](Json::Value const& jv) { p->send(jv, true); });
2859 ++it;
2860 }
2861 else
2862 {
2863 it = mStreamMaps[sRTTransactions].erase(it);
2864 }
2865 }
2866 }
2867
2868 pubProposedAccountTransaction(ledger, transaction, result);
2869}
2870
2871void
2873{
2874 // Ledgers are published only when they acquire sufficient validations
2875 // Holes are filled across connection loss or other catastrophe
2876
2878 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
2879 if (!alpAccepted)
2880 {
2881 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
2882 app_.getAcceptedLedgerCache().canonicalize_replace_client(
2883 lpAccepted->info().hash, alpAccepted);
2884 }
2885
2886 XRPL_ASSERT(
2887 alpAccepted->getLedger().get() == lpAccepted.get(),
2888 "ripple::NetworkOPsImp::pubLedger : accepted input");
2889
2890 {
2891 JLOG(m_journal.debug())
2892 << "Publishing ledger " << lpAccepted->info().seq << " "
2893 << lpAccepted->info().hash;
2894
2896
2897 if (!mStreamMaps[sLedger].empty())
2898 {
2900
2901 jvObj[jss::type] = "ledgerClosed";
2902 jvObj[jss::ledger_index] = lpAccepted->info().seq;
2903 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
2904 jvObj[jss::ledger_time] = Json::Value::UInt(
2905 lpAccepted->info().closeTime.time_since_epoch().count());
2906
2907 if (!lpAccepted->rules().enabled(featureXRPFees))
2908 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
2909 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2910 jvObj[jss::reserve_base] =
2911 lpAccepted->fees().accountReserve(0).jsonClipped();
2912 jvObj[jss::reserve_inc] =
2913 lpAccepted->fees().increment.jsonClipped();
2914
2915 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
2916
2918 {
2919 jvObj[jss::validated_ledgers] =
2921 }
2922
2923 auto it = mStreamMaps[sLedger].begin();
2924 while (it != mStreamMaps[sLedger].end())
2925 {
2926 InfoSub::pointer p = it->second.lock();
2927 if (p)
2928 {
2929 p->send(jvObj, true);
2930 ++it;
2931 }
2932 else
2933 it = mStreamMaps[sLedger].erase(it);
2934 }
2935 }
2936
2937 if (!mStreamMaps[sBookChanges].empty())
2938 {
2939 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
2940
2941 auto it = mStreamMaps[sBookChanges].begin();
2942 while (it != mStreamMaps[sBookChanges].end())
2943 {
2944 InfoSub::pointer p = it->second.lock();
2945 if (p)
2946 {
2947 p->send(jvObj, true);
2948 ++it;
2949 }
2950 else
2951 it = mStreamMaps[sBookChanges].erase(it);
2952 }
2953 }
2954
2955 {
2956 static bool firstTime = true;
2957 if (firstTime)
2958 {
2959 // First validated ledger, start delayed SubAccountHistory
2960 firstTime = false;
2961 for (auto& outer : mSubAccountHistory)
2962 {
2963 for (auto& inner : outer.second)
2964 {
2965 auto& subInfo = inner.second;
2966 if (subInfo.index_->separationLedgerSeq_ == 0)
2967 {
2969 alpAccepted->getLedger(), subInfo);
2970 }
2971 }
2972 }
2973 }
2974 }
2975 }
2976
2977 // Don't lock since pubAcceptedTransaction is locking.
2978 for (auto const& accTx : *alpAccepted)
2979 {
2980 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
2982 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
2983 }
2984}
2985
2986void
2988{
2990 app_.openLedger().current()->fees().base,
2992 app_.getFeeTrack()};
2993
2994 // only schedule the job if something has changed
2995 if (f != mLastFeeSummary)
2996 {
2998 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
2999 pubServer();
3000 });
3001 }
3002}
3003
3004void
3006{
3009 "reportConsensusStateChange->pubConsensus",
3010 [this, phase]() { pubConsensus(phase); });
3011}
3012
3013inline void
3015{
3016 m_localTX->sweep(view);
3017}
3018inline std::size_t
3020{
3021 return m_localTX->size();
3022}
3023
3024// This routine should only be used to publish accepted or validated
3025// transactions.
3028 std::shared_ptr<STTx const> const& transaction,
3029 TER result,
3030 bool validated,
3031 std::shared_ptr<ReadView const> const& ledger,
3033{
3035 std::string sToken;
3036 std::string sHuman;
3037
3038 transResultInfo(result, sToken, sHuman);
3039
3040 jvObj[jss::type] = "transaction";
3041 // NOTE jvObj is not a finished object for either API version. After
3042 // it's populated, we need to finish it for a specific API version. This is
3043 // done in a loop, near the end of this function.
3044 jvObj[jss::transaction] =
3045 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3046
3047 if (meta)
3048 {
3049 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3051 jvObj[jss::meta], *ledger, transaction, meta->get());
3053 jvObj[jss::meta], transaction, meta->get());
3054 }
3055
3056 if (!ledger->open())
3057 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3058
3059 if (validated)
3060 {
3061 jvObj[jss::ledger_index] = ledger->info().seq;
3062 jvObj[jss::transaction][jss::date] =
3063 ledger->info().closeTime.time_since_epoch().count();
3064 jvObj[jss::validated] = true;
3065 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3066
3067 // WRITEME: Put the account next seq here
3068 }
3069 else
3070 {
3071 jvObj[jss::validated] = false;
3072 jvObj[jss::ledger_current_index] = ledger->info().seq;
3073 }
3074
3075 jvObj[jss::status] = validated ? "closed" : "proposed";
3076 jvObj[jss::engine_result] = sToken;
3077 jvObj[jss::engine_result_code] = result;
3078 jvObj[jss::engine_result_message] = sHuman;
3079
3080 if (transaction->getTxnType() == ttOFFER_CREATE)
3081 {
3082 auto const account = transaction->getAccountID(sfAccount);
3083 auto const amount = transaction->getFieldAmount(sfTakerGets);
3084
3085 // If the offer create is not self funded then add the owner balance
3086 if (account != amount.issue().account)
3087 {
3088 auto const ownerFunds = accountFunds(
3089 *ledger,
3090 account,
3091 amount,
3093 app_.journal("View"));
3094 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3095 }
3096 }
3097
3098 std::string const hash = to_string(transaction->getTransactionID());
3099 MultiApiJson multiObj{jvObj};
3101 multiObj.visit(), //
3102 [&]<unsigned Version>(
3104 RPC::insertDeliverMax(
3105 jvTx[jss::transaction], transaction->getTxnType(), Version);
3106
3107 if constexpr (Version > 1)
3108 {
3109 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3110 jvTx[jss::hash] = hash;
3111 }
3112 else
3113 {
3114 jvTx[jss::transaction][jss::hash] = hash;
3115 }
3116 });
3117
3118 return multiObj;
3119}
3120
3121void
3123 std::shared_ptr<ReadView const> const& ledger,
3124 const AcceptedLedgerTx& transaction,
3125 bool last)
3126{
3127 auto const& stTxn = transaction.getTxn();
3128
3129 // Create two different Json objects, for different API versions
3130 auto const metaRef = std::ref(transaction.getMeta());
3131 auto const trResult = transaction.getResult();
3132 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3133
3134 {
3136
3137 auto it = mStreamMaps[sTransactions].begin();
3138 while (it != mStreamMaps[sTransactions].end())
3139 {
3140 InfoSub::pointer p = it->second.lock();
3141
3142 if (p)
3143 {
3144 jvObj.visit(
3145 p->getApiVersion(), //
3146 [&](Json::Value const& jv) { p->send(jv, true); });
3147 ++it;
3148 }
3149 else
3150 it = mStreamMaps[sTransactions].erase(it);
3151 }
3152
3153 it = mStreamMaps[sRTTransactions].begin();
3154
3155 while (it != mStreamMaps[sRTTransactions].end())
3156 {
3157 InfoSub::pointer p = it->second.lock();
3158
3159 if (p)
3160 {
3161 jvObj.visit(
3162 p->getApiVersion(), //
3163 [&](Json::Value const& jv) { p->send(jv, true); });
3164 ++it;
3165 }
3166 else
3167 it = mStreamMaps[sRTTransactions].erase(it);
3168 }
3169 }
3170
3171 if (transaction.getResult() == tesSUCCESS)
3172 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3173
3174 pubAccountTransaction(ledger, transaction, last);
3175}
3176
3177void
3179 std::shared_ptr<ReadView const> const& ledger,
3180 AcceptedLedgerTx const& transaction,
3181 bool last)
3182{
3184 int iProposed = 0;
3185 int iAccepted = 0;
3186
3187 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3188 auto const currLedgerSeq = ledger->seq();
3189 {
3191
3192 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3194 {
3195 for (auto const& affectedAccount : transaction.getAffected())
3196 {
3197 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3198 simiIt != mSubRTAccount.end())
3199 {
3200 auto it = simiIt->second.begin();
3201
3202 while (it != simiIt->second.end())
3203 {
3204 InfoSub::pointer p = it->second.lock();
3205
3206 if (p)
3207 {
3208 notify.insert(p);
3209 ++it;
3210 ++iProposed;
3211 }
3212 else
3213 it = simiIt->second.erase(it);
3214 }
3215 }
3216
3217 if (auto simiIt = mSubAccount.find(affectedAccount);
3218 simiIt != mSubAccount.end())
3219 {
3220 auto it = simiIt->second.begin();
3221 while (it != simiIt->second.end())
3222 {
3223 InfoSub::pointer p = it->second.lock();
3224
3225 if (p)
3226 {
3227 notify.insert(p);
3228 ++it;
3229 ++iAccepted;
3230 }
3231 else
3232 it = simiIt->second.erase(it);
3233 }
3234 }
3235
3236 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3237 histoIt != mSubAccountHistory.end())
3238 {
3239 auto& subs = histoIt->second;
3240 auto it = subs.begin();
3241 while (it != subs.end())
3242 {
3243 SubAccountHistoryInfoWeak const& info = it->second;
3244 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3245 {
3246 ++it;
3247 continue;
3248 }
3249
3250 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3251 {
3252 accountHistoryNotify.emplace_back(
3253 SubAccountHistoryInfo{isSptr, info.index_});
3254 ++it;
3255 }
3256 else
3257 {
3258 it = subs.erase(it);
3259 }
3260 }
3261 if (subs.empty())
3262 mSubAccountHistory.erase(histoIt);
3263 }
3264 }
3265 }
3266 }
3267
3268 JLOG(m_journal.trace())
3269 << "pubAccountTransaction: " << "proposed=" << iProposed
3270 << ", accepted=" << iAccepted;
3271
3272 if (!notify.empty() || !accountHistoryNotify.empty())
3273 {
3274 auto const& stTxn = transaction.getTxn();
3275
3276 // Create two different Json objects, for different API versions
3277 auto const metaRef = std::ref(transaction.getMeta());
3278 auto const trResult = transaction.getResult();
3279 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3280
3281 for (InfoSub::ref isrListener : notify)
3282 {
3283 jvObj.visit(
3284 isrListener->getApiVersion(), //
3285 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3286 }
3287
3288 if (last)
3289 jvObj.set(jss::account_history_boundary, true);
3290
3291 XRPL_ASSERT(
3292 jvObj.isMember(jss::account_history_tx_stream) ==
3294 "ripple::NetworkOPsImp::pubAccountTransaction : "
3295 "account_history_tx_stream not set");
3296 for (auto& info : accountHistoryNotify)
3297 {
3298 auto& index = info.index_;
3299 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3300 jvObj.set(jss::account_history_tx_first, true);
3301
3302 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3303
3304 jvObj.visit(
3305 info.sink_->getApiVersion(), //
3306 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3307 }
3308 }
3309}
3310
3311void
3313 std::shared_ptr<ReadView const> const& ledger,
3315 TER result)
3316{
3318 int iProposed = 0;
3319
3320 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3321
3322 {
3324
3325 if (mSubRTAccount.empty())
3326 return;
3327
3328 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3330 {
3331 for (auto const& affectedAccount : tx->getMentionedAccounts())
3332 {
3333 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3334 simiIt != mSubRTAccount.end())
3335 {
3336 auto it = simiIt->second.begin();
3337
3338 while (it != simiIt->second.end())
3339 {
3340 InfoSub::pointer p = it->second.lock();
3341
3342 if (p)
3343 {
3344 notify.insert(p);
3345 ++it;
3346 ++iProposed;
3347 }
3348 else
3349 it = simiIt->second.erase(it);
3350 }
3351 }
3352 }
3353 }
3354 }
3355
3356 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3357
3358 if (!notify.empty() || !accountHistoryNotify.empty())
3359 {
3360 // Create two different Json objects, for different API versions
3361 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3362
3363 for (InfoSub::ref isrListener : notify)
3364 jvObj.visit(
3365 isrListener->getApiVersion(), //
3366 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3367
3368 XRPL_ASSERT(
3369 jvObj.isMember(jss::account_history_tx_stream) ==
3371 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3372 "account_history_tx_stream not set");
3373 for (auto& info : accountHistoryNotify)
3374 {
3375 auto& index = info.index_;
3376 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3377 jvObj.set(jss::account_history_tx_first, true);
3378 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3379 jvObj.visit(
3380 info.sink_->getApiVersion(), //
3381 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3382 }
3383 }
3384}
3385
3386//
3387// Monitoring
3388//
3389
3390void
3392 InfoSub::ref isrListener,
3393 hash_set<AccountID> const& vnaAccountIDs,
3394 bool rt)
3395{
3396 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3397
3398 for (auto const& naAccountID : vnaAccountIDs)
3399 {
3400 JLOG(m_journal.trace())
3401 << "subAccount: account: " << toBase58(naAccountID);
3402
3403 isrListener->insertSubAccountInfo(naAccountID, rt);
3404 }
3405
3407
3408 for (auto const& naAccountID : vnaAccountIDs)
3409 {
3410 auto simIterator = subMap.find(naAccountID);
3411 if (simIterator == subMap.end())
3412 {
3413 // Not found, note that account has a new single listner.
3414 SubMapType usisElement;
3415 usisElement[isrListener->getSeq()] = isrListener;
3416 // VFALCO NOTE This is making a needless copy of naAccountID
3417 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3418 }
3419 else
3420 {
3421 // Found, note that the account has another listener.
3422 simIterator->second[isrListener->getSeq()] = isrListener;
3423 }
3424 }
3425}
3426
3427void
3429 InfoSub::ref isrListener,
3430 hash_set<AccountID> const& vnaAccountIDs,
3431 bool rt)
3432{
3433 for (auto const& naAccountID : vnaAccountIDs)
3434 {
3435 // Remove from the InfoSub
3436 isrListener->deleteSubAccountInfo(naAccountID, rt);
3437 }
3438
3439 // Remove from the server
3440 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3441}
3442
3443void
3445 std::uint64_t uSeq,
3446 hash_set<AccountID> const& vnaAccountIDs,
3447 bool rt)
3448{
3450
3451 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3452
3453 for (auto const& naAccountID : vnaAccountIDs)
3454 {
3455 auto simIterator = subMap.find(naAccountID);
3456
3457 if (simIterator != subMap.end())
3458 {
3459 // Found
3460 simIterator->second.erase(uSeq);
3461
3462 if (simIterator->second.empty())
3463 {
3464 // Don't need hash entry.
3465 subMap.erase(simIterator);
3466 }
3467 }
3468 }
3469}
3470
3471void
3473{
3474 enum DatabaseType { Sqlite, None };
3475 static const auto databaseType = [&]() -> DatabaseType {
3476 // Use a dynamic_cast to return DatabaseType::None
3477 // on failure.
3478 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3479 {
3480 return DatabaseType::Sqlite;
3481 }
3482 return DatabaseType::None;
3483 }();
3484
3485 if (databaseType == DatabaseType::None)
3486 {
3487 JLOG(m_journal.error())
3488 << "AccountHistory job for account "
3489 << toBase58(subInfo.index_->accountId_) << " no database";
3490 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3491 {
3492 sptr->send(rpcError(rpcINTERNAL), true);
3493 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3494 }
3495 return;
3496 }
3497
3500 "AccountHistoryTxStream",
3501 [this, dbType = databaseType, subInfo]() {
3502 auto const& accountId = subInfo.index_->accountId_;
3503 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3504 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3505
3506 JLOG(m_journal.trace())
3507 << "AccountHistory job for account " << toBase58(accountId)
3508 << " started. lastLedgerSeq=" << lastLedgerSeq;
3509
3510 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3511 std::shared_ptr<TxMeta> const& meta) -> bool {
3512 /*
3513 * genesis account: first tx is the one with seq 1
3514 * other account: first tx is the one created the account
3515 */
3516 if (accountId == genesisAccountId)
3517 {
3518 auto stx = tx->getSTransaction();
3519 if (stx->getAccountID(sfAccount) == accountId &&
3520 stx->getSeqProxy().value() == 1)
3521 return true;
3522 }
3523
3524 for (auto& node : meta->getNodes())
3525 {
3526 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3527 continue;
3528
3529 if (node.isFieldPresent(sfNewFields))
3530 {
3531 if (auto inner = dynamic_cast<const STObject*>(
3532 node.peekAtPField(sfNewFields));
3533 inner)
3534 {
3535 if (inner->isFieldPresent(sfAccount) &&
3536 inner->getAccountID(sfAccount) == accountId)
3537 {
3538 return true;
3539 }
3540 }
3541 }
3542 }
3543
3544 return false;
3545 };
3546
3547 auto send = [&](Json::Value const& jvObj,
3548 bool unsubscribe) -> bool {
3549 if (auto sptr = subInfo.sinkWptr_.lock())
3550 {
3551 sptr->send(jvObj, true);
3552 if (unsubscribe)
3553 unsubAccountHistory(sptr, accountId, false);
3554 return true;
3555 }
3556
3557 return false;
3558 };
3559
3560 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3561 bool unsubscribe) -> bool {
3562 if (auto sptr = subInfo.sinkWptr_.lock())
3563 {
3564 jvObj.visit(
3565 sptr->getApiVersion(), //
3566 [&](Json::Value const& jv) { sptr->send(jv, true); });
3567
3568 if (unsubscribe)
3569 unsubAccountHistory(sptr, accountId, false);
3570 return true;
3571 }
3572
3573 return false;
3574 };
3575
3576 auto getMoreTxns =
3577 [&](std::uint32_t minLedger,
3578 std::uint32_t maxLedger,
3583 switch (dbType)
3584 {
3585 case Sqlite: {
3586 auto db = static_cast<SQLiteDatabase*>(
3589 accountId, minLedger, maxLedger, marker, 0, true};
3590 return db->newestAccountTxPage(options);
3591 }
3592 default: {
3593 UNREACHABLE(
3594 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3595 "getMoreTxns : invalid database type");
3596 return {};
3597 }
3598 }
3599 };
3600
3601 /*
3602 * search backward until the genesis ledger or asked to stop
3603 */
3604 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3605 {
3606 int feeChargeCount = 0;
3607 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3608 {
3609 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3610 ++feeChargeCount;
3611 }
3612 else
3613 {
3614 JLOG(m_journal.trace())
3615 << "AccountHistory job for account "
3616 << toBase58(accountId) << " no InfoSub. Fee charged "
3617 << feeChargeCount << " times.";
3618 return;
3619 }
3620
3621 // try to search in 1024 ledgers till reaching genesis ledgers
3622 auto startLedgerSeq =
3623 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3624 JLOG(m_journal.trace())
3625 << "AccountHistory job for account " << toBase58(accountId)
3626 << ", working on ledger range [" << startLedgerSeq << ","
3627 << lastLedgerSeq << "]";
3628
3629 auto haveRange = [&]() -> bool {
3630 std::uint32_t validatedMin = UINT_MAX;
3631 std::uint32_t validatedMax = 0;
3632 auto haveSomeValidatedLedgers =
3634 validatedMin, validatedMax);
3635
3636 return haveSomeValidatedLedgers &&
3637 validatedMin <= startLedgerSeq &&
3638 lastLedgerSeq <= validatedMax;
3639 }();
3640
3641 if (!haveRange)
3642 {
3643 JLOG(m_journal.debug())
3644 << "AccountHistory reschedule job for account "
3645 << toBase58(accountId) << ", incomplete ledger range ["
3646 << startLedgerSeq << "," << lastLedgerSeq << "]";
3648 return;
3649 }
3650
3652 while (!subInfo.index_->stopHistorical_)
3653 {
3654 auto dbResult =
3655 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3656 if (!dbResult)
3657 {
3658 JLOG(m_journal.debug())
3659 << "AccountHistory job for account "
3660 << toBase58(accountId) << " getMoreTxns failed.";
3661 send(rpcError(rpcINTERNAL), true);
3662 return;
3663 }
3664
3665 auto const& txns = dbResult->first;
3666 marker = dbResult->second;
3667 size_t num_txns = txns.size();
3668 for (size_t i = 0; i < num_txns; ++i)
3669 {
3670 auto const& [tx, meta] = txns[i];
3671
3672 if (!tx || !meta)
3673 {
3674 JLOG(m_journal.debug())
3675 << "AccountHistory job for account "
3676 << toBase58(accountId) << " empty tx or meta.";
3677 send(rpcError(rpcINTERNAL), true);
3678 return;
3679 }
3680 auto curTxLedger =
3682 tx->getLedger());
3683 if (!curTxLedger)
3684 {
3685 JLOG(m_journal.debug())
3686 << "AccountHistory job for account "
3687 << toBase58(accountId) << " no ledger.";
3688 send(rpcError(rpcINTERNAL), true);
3689 return;
3690 }
3692 tx->getSTransaction();
3693 if (!stTxn)
3694 {
3695 JLOG(m_journal.debug())
3696 << "AccountHistory job for account "
3697 << toBase58(accountId)
3698 << " getSTransaction failed.";
3699 send(rpcError(rpcINTERNAL), true);
3700 return;
3701 }
3702
3703 auto const mRef = std::ref(*meta);
3704 auto const trR = meta->getResultTER();
3705 MultiApiJson jvTx =
3706 transJson(stTxn, trR, true, curTxLedger, mRef);
3707
3708 jvTx.set(
3709 jss::account_history_tx_index, txHistoryIndex--);
3710 if (i + 1 == num_txns ||
3711 txns[i + 1].first->getLedger() != tx->getLedger())
3712 jvTx.set(jss::account_history_boundary, true);
3713
3714 if (isFirstTx(tx, meta))
3715 {
3716 jvTx.set(jss::account_history_tx_first, true);
3717 sendMultiApiJson(jvTx, false);
3718
3719 JLOG(m_journal.trace())
3720 << "AccountHistory job for account "
3721 << toBase58(accountId)
3722 << " done, found last tx.";
3723 return;
3724 }
3725 else
3726 {
3727 sendMultiApiJson(jvTx, false);
3728 }
3729 }
3730
3731 if (marker)
3732 {
3733 JLOG(m_journal.trace())
3734 << "AccountHistory job for account "
3735 << toBase58(accountId)
3736 << " paging, marker=" << marker->ledgerSeq << ":"
3737 << marker->txnSeq;
3738 }
3739 else
3740 {
3741 break;
3742 }
3743 }
3744
3745 if (!subInfo.index_->stopHistorical_)
3746 {
3747 lastLedgerSeq = startLedgerSeq - 1;
3748 if (lastLedgerSeq <= 1)
3749 {
3750 JLOG(m_journal.trace())
3751 << "AccountHistory job for account "
3752 << toBase58(accountId)
3753 << " done, reached genesis ledger.";
3754 return;
3755 }
3756 }
3757 }
3758 });
3759}
3760
3761void
3763 std::shared_ptr<ReadView const> const& ledger,
3765{
3766 subInfo.index_->separationLedgerSeq_ = ledger->seq();
3767 auto const& accountId = subInfo.index_->accountId_;
3768 auto const accountKeylet = keylet::account(accountId);
3769 if (!ledger->exists(accountKeylet))
3770 {
3771 JLOG(m_journal.debug())
3772 << "subAccountHistoryStart, no account " << toBase58(accountId)
3773 << ", no need to add AccountHistory job.";
3774 return;
3775 }
3776 if (accountId == genesisAccountId)
3777 {
3778 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3779 {
3780 if (sleAcct->getFieldU32(sfSequence) == 1)
3781 {
3782 JLOG(m_journal.debug())
3783 << "subAccountHistoryStart, genesis account "
3784 << toBase58(accountId)
3785 << " does not have tx, no need to add AccountHistory job.";
3786 return;
3787 }
3788 }
3789 else
3790 {
3791 UNREACHABLE(
3792 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
3793 "access genesis account");
3794 return;
3795 }
3796 }
3797 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
3798 subInfo.index_->haveHistorical_ = true;
3799
3800 JLOG(m_journal.debug())
3801 << "subAccountHistoryStart, add AccountHistory job: accountId="
3802 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
3803
3804 addAccountHistoryJob(subInfo);
3805}
3806
3809 InfoSub::ref isrListener,
3810 AccountID const& accountId)
3811{
3812 if (!isrListener->insertSubAccountHistory(accountId))
3813 {
3814 JLOG(m_journal.debug())
3815 << "subAccountHistory, already subscribed to account "
3816 << toBase58(accountId);
3817 return rpcINVALID_PARAMS;
3818 }
3819
3822 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
3823 auto simIterator = mSubAccountHistory.find(accountId);
3824 if (simIterator == mSubAccountHistory.end())
3825 {
3827 inner.emplace(isrListener->getSeq(), ahi);
3829 simIterator, std::make_pair(accountId, inner));
3830 }
3831 else
3832 {
3833 simIterator->second.emplace(isrListener->getSeq(), ahi);
3834 }
3835
3836 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
3837 if (ledger)
3838 {
3839 subAccountHistoryStart(ledger, ahi);
3840 }
3841 else
3842 {
3843 // The node does not have validated ledgers, so wait for
3844 // one before start streaming.
3845 // In this case, the subscription is also considered successful.
3846 JLOG(m_journal.debug())
3847 << "subAccountHistory, no validated ledger yet, delay start";
3848 }
3849
3850 return rpcSUCCESS;
3851}
3852
3853void
3855 InfoSub::ref isrListener,
3856 AccountID const& account,
3857 bool historyOnly)
3858{
3859 if (!historyOnly)
3860 isrListener->deleteSubAccountHistory(account);
3861 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
3862}
3863
3864void
3866 std::uint64_t seq,
3867 const AccountID& account,
3868 bool historyOnly)
3869{
3871 auto simIterator = mSubAccountHistory.find(account);
3872 if (simIterator != mSubAccountHistory.end())
3873 {
3874 auto& subInfoMap = simIterator->second;
3875 auto subInfoIter = subInfoMap.find(seq);
3876 if (subInfoIter != subInfoMap.end())
3877 {
3878 subInfoIter->second.index_->stopHistorical_ = true;
3879 }
3880
3881 if (!historyOnly)
3882 {
3883 simIterator->second.erase(seq);
3884 if (simIterator->second.empty())
3885 {
3886 mSubAccountHistory.erase(simIterator);
3887 }
3888 }
3889 JLOG(m_journal.debug())
3890 << "unsubAccountHistory, account " << toBase58(account)
3891 << ", historyOnly = " << (historyOnly ? "true" : "false");
3892 }
3893}
3894
3895bool
3897{
3898 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
3899 listeners->addSubscriber(isrListener);
3900 else
3901 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
3902 return true;
3903}
3904
3905bool
3907{
3908 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
3909 listeners->removeSubscriber(uSeq);
3910
3911 return true;
3912}
3913
3917{
3918 // This code-path is exclusively used when the server is in standalone
3919 // mode via `ledger_accept`
3920 XRPL_ASSERT(
3921 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
3922
3923 if (!m_standalone)
3924 Throw<std::runtime_error>(
3925 "Operation only possible in STANDALONE mode.");
3926
3927 // FIXME Could we improve on this and remove the need for a specialized
3928 // API in Consensus?
3929 beginConsensus(m_ledgerMaster.getClosedLedger()->info().hash, {});
3930 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
3931 return m_ledgerMaster.getCurrentLedger()->info().seq;
3932}
3933
3934// <-- bool: true=added, false=already there
3935bool
3937{
3938 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
3939 {
3940 jvResult[jss::ledger_index] = lpClosed->info().seq;
3941 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
3942 jvResult[jss::ledger_time] = Json::Value::UInt(
3943 lpClosed->info().closeTime.time_since_epoch().count());
3944 if (!lpClosed->rules().enabled(featureXRPFees))
3945 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3946 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3947 jvResult[jss::reserve_base] =
3948 lpClosed->fees().accountReserve(0).jsonClipped();
3949 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3950 }
3951
3953 {
3954 jvResult[jss::validated_ledgers] =
3956 }
3957
3959 return mStreamMaps[sLedger]
3960 .emplace(isrListener->getSeq(), isrListener)
3961 .second;
3962}
3963
3964// <-- bool: true=added, false=already there
3965bool
3967{
3970 .emplace(isrListener->getSeq(), isrListener)
3971 .second;
3972}
3973
3974// <-- bool: true=erased, false=was not there
3975bool
3977{
3979 return mStreamMaps[sLedger].erase(uSeq);
3980}
3981
3982// <-- bool: true=erased, false=was not there
3983bool
3985{
3987 return mStreamMaps[sBookChanges].erase(uSeq);
3988}
3989
3990// <-- bool: true=added, false=already there
3991bool
3993{
3995 return mStreamMaps[sManifests]
3996 .emplace(isrListener->getSeq(), isrListener)
3997 .second;
3998}
3999
4000// <-- bool: true=erased, false=was not there
4001bool
4003{
4005 return mStreamMaps[sManifests].erase(uSeq);
4006}
4007
4008// <-- bool: true=added, false=already there
4009bool
4011 InfoSub::ref isrListener,
4012 Json::Value& jvResult,
4013 bool admin)
4014{
4015 uint256 uRandom;
4016
4017 if (m_standalone)
4018 jvResult[jss::stand_alone] = m_standalone;
4019
4020 // CHECKME: is it necessary to provide a random number here?
4021 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
4022
4023 auto const& feeTrack = app_.getFeeTrack();
4024 jvResult[jss::random] = to_string(uRandom);
4025 jvResult[jss::server_status] = strOperatingMode(admin);
4026 jvResult[jss::load_base] = feeTrack.getLoadBase();
4027 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4028 jvResult[jss::hostid] = getHostId(admin);
4029 jvResult[jss::pubkey_node] =
4031
4033 return mStreamMaps[sServer]
4034 .emplace(isrListener->getSeq(), isrListener)
4035 .second;
4036}
4037
4038// <-- bool: true=erased, false=was not there
4039bool
4041{
4043 return mStreamMaps[sServer].erase(uSeq);
4044}
4045
4046// <-- bool: true=added, false=already there
4047bool
4049{
4052 .emplace(isrListener->getSeq(), isrListener)
4053 .second;
4054}
4055
4056// <-- bool: true=erased, false=was not there
4057bool
4059{
4061 return mStreamMaps[sTransactions].erase(uSeq);
4062}
4063
4064// <-- bool: true=added, false=already there
4065bool
4067{
4070 .emplace(isrListener->getSeq(), isrListener)
4071 .second;
4072}
4073
4074// <-- bool: true=erased, false=was not there
4075bool
4077{
4079 return mStreamMaps[sRTTransactions].erase(uSeq);
4080}
4081
4082// <-- bool: true=added, false=already there
4083bool
4085{
4088 .emplace(isrListener->getSeq(), isrListener)
4089 .second;
4090}
4091
4092void
4094{
4095 accounting_.json(obj);
4096}
4097
4098// <-- bool: true=erased, false=was not there
4099bool
4101{
4103 return mStreamMaps[sValidations].erase(uSeq);
4104}
4105
4106// <-- bool: true=added, false=already there
4107bool
4109{
4111 return mStreamMaps[sPeerStatus]
4112 .emplace(isrListener->getSeq(), isrListener)
4113 .second;
4114}
4115
4116// <-- bool: true=erased, false=was not there
4117bool
4119{
4121 return mStreamMaps[sPeerStatus].erase(uSeq);
4122}
4123
4124// <-- bool: true=added, false=already there
4125bool
4127{
4130 .emplace(isrListener->getSeq(), isrListener)
4131 .second;
4132}
4133
4134// <-- bool: true=erased, false=was not there
4135bool
4137{
4139 return mStreamMaps[sConsensusPhase].erase(uSeq);
4140}
4141
4144{
4146
4147 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4148
4149 if (it != mRpcSubMap.end())
4150 return it->second;
4151
4152 return InfoSub::pointer();
4153}
4154
4157{
4159
4160 mRpcSubMap.emplace(strUrl, rspEntry);
4161
4162 return rspEntry;
4163}
4164
4165bool
4167{
4169 auto pInfo = findRpcSub(strUrl);
4170
4171 if (!pInfo)
4172 return false;
4173
4174 // check to see if any of the stream maps still hold a weak reference to
4175 // this entry before removing
4176 for (SubMapType const& map : mStreamMaps)
4177 {
4178 if (map.find(pInfo->getSeq()) != map.end())
4179 return false;
4180 }
4181 mRpcSubMap.erase(strUrl);
4182 return true;
4183}
4184
4185#ifndef USE_NEW_BOOK_PAGE
4186
4187// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4188// work, but it demonstrated poor performance.
4189//
4190void
4193 Book const& book,
4194 AccountID const& uTakerID,
4195 bool const bProof,
4196 unsigned int iLimit,
4197 Json::Value const& jvMarker,
4198 Json::Value& jvResult)
4199{ // CAUTION: This is the old get book page logic
4200 Json::Value& jvOffers =
4201 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4202
4204 const uint256 uBookBase = getBookBase(book);
4205 const uint256 uBookEnd = getQualityNext(uBookBase);
4206 uint256 uTipIndex = uBookBase;
4207
4208 if (auto stream = m_journal.trace())
4209 {
4210 stream << "getBookPage:" << book;
4211 stream << "getBookPage: uBookBase=" << uBookBase;
4212 stream << "getBookPage: uBookEnd=" << uBookEnd;
4213 stream << "getBookPage: uTipIndex=" << uTipIndex;
4214 }
4215
4216 ReadView const& view = *lpLedger;
4217
4218 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4219 isGlobalFrozen(view, book.in.account);
4220
4221 bool bDone = false;
4222 bool bDirectAdvance = true;
4223
4224 std::shared_ptr<SLE const> sleOfferDir;
4225 uint256 offerIndex;
4226 unsigned int uBookEntry;
4227 STAmount saDirRate;
4228
4229 auto const rate = transferRate(view, book.out.account);
4230 auto viewJ = app_.journal("View");
4231
4232 while (!bDone && iLimit-- > 0)
4233 {
4234 if (bDirectAdvance)
4235 {
4236 bDirectAdvance = false;
4237
4238 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4239
4240 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4241 if (ledgerIndex)
4242 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4243 else
4244 sleOfferDir.reset();
4245
4246 if (!sleOfferDir)
4247 {
4248 JLOG(m_journal.trace()) << "getBookPage: bDone";
4249 bDone = true;
4250 }
4251 else
4252 {
4253 uTipIndex = sleOfferDir->key();
4254 saDirRate = amountFromQuality(getQuality(uTipIndex));
4255
4256 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4257
4258 JLOG(m_journal.trace())
4259 << "getBookPage: uTipIndex=" << uTipIndex;
4260 JLOG(m_journal.trace())
4261 << "getBookPage: offerIndex=" << offerIndex;
4262 }
4263 }
4264
4265 if (!bDone)
4266 {
4267 auto sleOffer = view.read(keylet::offer(offerIndex));
4268
4269 if (sleOffer)
4270 {
4271 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4272 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4273 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4274 STAmount saOwnerFunds;
4275 bool firstOwnerOffer(true);
4276
4277 if (book.out.account == uOfferOwnerID)
4278 {
4279 // If an offer is selling issuer's own IOUs, it is fully
4280 // funded.
4281 saOwnerFunds = saTakerGets;
4282 }
4283 else if (bGlobalFreeze)
4284 {
4285 // If either asset is globally frozen, consider all offers
4286 // that aren't ours to be totally unfunded
4287 saOwnerFunds.clear(book.out);
4288 }
4289 else
4290 {
4291 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4292 if (umBalanceEntry != umBalance.end())
4293 {
4294 // Found in running balance table.
4295
4296 saOwnerFunds = umBalanceEntry->second;
4297 firstOwnerOffer = false;
4298 }
4299 else
4300 {
4301 // Did not find balance in table.
4302
4303 saOwnerFunds = accountHolds(
4304 view,
4305 uOfferOwnerID,
4306 book.out.currency,
4307 book.out.account,
4309 viewJ);
4310
4311 if (saOwnerFunds < beast::zero)
4312 {
4313 // Treat negative funds as zero.
4314
4315 saOwnerFunds.clear();
4316 }
4317 }
4318 }
4319
4320 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4321
4322 STAmount saTakerGetsFunded;
4323 STAmount saOwnerFundsLimit = saOwnerFunds;
4324 Rate offerRate = parityRate;
4325
4326 if (rate != parityRate
4327 // Have a tranfer fee.
4328 && uTakerID != book.out.account
4329 // Not taking offers of own IOUs.
4330 && book.out.account != uOfferOwnerID)
4331 // Offer owner not issuing ownfunds
4332 {
4333 // Need to charge a transfer fee to offer owner.
4334 offerRate = rate;
4335 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4336 }
4337
4338 if (saOwnerFundsLimit >= saTakerGets)
4339 {
4340 // Sufficient funds no shenanigans.
4341 saTakerGetsFunded = saTakerGets;
4342 }
4343 else
4344 {
4345 // Only provide, if not fully funded.
4346
4347 saTakerGetsFunded = saOwnerFundsLimit;
4348
4349 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4350 std::min(
4351 saTakerPays,
4352 multiply(
4353 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4354 .setJson(jvOffer[jss::taker_pays_funded]);
4355 }
4356
4357 STAmount saOwnerPays = (parityRate == offerRate)
4358 ? saTakerGetsFunded
4359 : std::min(
4360 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4361
4362 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4363
4364 // Include all offers funded and unfunded
4365 Json::Value& jvOf = jvOffers.append(jvOffer);
4366 jvOf[jss::quality] = saDirRate.getText();
4367
4368 if (firstOwnerOffer)
4369 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4370 }
4371 else
4372 {
4373 JLOG(m_journal.warn()) << "Missing offer";
4374 }
4375
4376 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4377 {
4378 bDirectAdvance = true;
4379 }
4380 else
4381 {
4382 JLOG(m_journal.trace())
4383 << "getBookPage: offerIndex=" << offerIndex;
4384 }
4385 }
4386 }
4387
4388 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4389 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4390}
4391
4392#else
4393
4394// This is the new code that uses the book iterators
4395// It has temporarily been disabled
4396
4397void
4400 Book const& book,
4401 AccountID const& uTakerID,
4402 bool const bProof,
4403 unsigned int iLimit,
4404 Json::Value const& jvMarker,
4405 Json::Value& jvResult)
4406{
4407 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4408
4410
4411 MetaView lesActive(lpLedger, tapNONE, true);
4412 OrderBookIterator obIterator(lesActive, book);
4413
4414 auto const rate = transferRate(lesActive, book.out.account);
4415
4416 const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4417 lesActive.isGlobalFrozen(book.in.account);
4418
4419 while (iLimit-- > 0 && obIterator.nextOffer())
4420 {
4421 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4422 if (sleOffer)
4423 {
4424 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4425 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4426 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4427 STAmount saDirRate = obIterator.getCurrentRate();
4428 STAmount saOwnerFunds;
4429
4430 if (book.out.account == uOfferOwnerID)
4431 {
4432 // If offer is selling issuer's own IOUs, it is fully funded.
4433 saOwnerFunds = saTakerGets;
4434 }
4435 else if (bGlobalFreeze)
4436 {
4437 // If either asset is globally frozen, consider all offers
4438 // that aren't ours to be totally unfunded
4439 saOwnerFunds.clear(book.out);
4440 }
4441 else
4442 {
4443 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4444
4445 if (umBalanceEntry != umBalance.end())
4446 {
4447 // Found in running balance table.
4448
4449 saOwnerFunds = umBalanceEntry->second;
4450 }
4451 else
4452 {
4453 // Did not find balance in table.
4454
4455 saOwnerFunds = lesActive.accountHolds(
4456 uOfferOwnerID,
4457 book.out.currency,
4458 book.out.account,
4460
4461 if (saOwnerFunds.isNegative())
4462 {
4463 // Treat negative funds as zero.
4464
4465 saOwnerFunds.zero();
4466 }
4467 }
4468 }
4469
4470 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4471
4472 STAmount saTakerGetsFunded;
4473 STAmount saOwnerFundsLimit = saOwnerFunds;
4474 Rate offerRate = parityRate;
4475
4476 if (rate != parityRate
4477 // Have a tranfer fee.
4478 && uTakerID != book.out.account
4479 // Not taking offers of own IOUs.
4480 && book.out.account != uOfferOwnerID)
4481 // Offer owner not issuing ownfunds
4482 {
4483 // Need to charge a transfer fee to offer owner.
4484 offerRate = rate;
4485 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4486 }
4487
4488 if (saOwnerFundsLimit >= saTakerGets)
4489 {
4490 // Sufficient funds no shenanigans.
4491 saTakerGetsFunded = saTakerGets;
4492 }
4493 else
4494 {
4495 // Only provide, if not fully funded.
4496 saTakerGetsFunded = saOwnerFundsLimit;
4497
4498 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4499
4500 // TOOD(tom): The result of this expression is not used - what's
4501 // going on here?
4502 std::min(
4503 saTakerPays,
4504 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4505 .setJson(jvOffer[jss::taker_pays_funded]);
4506 }
4507
4508 STAmount saOwnerPays = (parityRate == offerRate)
4509 ? saTakerGetsFunded
4510 : std::min(
4511 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4512
4513 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4514
4515 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4516 {
4517 // Only provide funded offers and offers of the taker.
4518 Json::Value& jvOf = jvOffers.append(jvOffer);
4519 jvOf[jss::quality] = saDirRate.getText();
4520 }
4521 }
4522 }
4523
4524 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4525 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4526}
4527
4528#endif
4529
4530inline void
4532{
4533 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4534 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4536 counters[static_cast<std::size_t>(mode)].dur += current;
4537
4540 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4541 .dur.count());
4543 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4544 .dur.count());
4546 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4548 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4549 .dur.count());
4551 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4552
4554 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4555 .transitions);
4557 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4558 .transitions);
4560 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4562 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4563 .transitions);
4565 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4566}
4567
4568void
4570{
4571 auto now = std::chrono::steady_clock::now();
4572
4573 std::lock_guard lock(mutex_);
4574 ++counters_[static_cast<std::size_t>(om)].transitions;
4575 if (om == OperatingMode::FULL &&
4576 counters_[static_cast<std::size_t>(om)].transitions == 1)
4577 {
4578 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4579 now - processStart_)
4580 .count();
4581 }
4582 counters_[static_cast<std::size_t>(mode_)].dur +=
4583 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4584
4585 mode_ = om;
4586 start_ = now;
4587}
4588
4589void
4591{
4592 auto [counters, mode, start, initialSync] = getCounterData();
4593 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4595 counters[static_cast<std::size_t>(mode)].dur += current;
4596
4597 obj[jss::state_accounting] = Json::objectValue;
4599 i <= static_cast<std::size_t>(OperatingMode::FULL);
4600 ++i)
4601 {
4602 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4603 auto& state = obj[jss::state_accounting][states_[i]];
4604 state[jss::transitions] = std::to_string(counters[i].transitions);
4605 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4606 }
4607 obj[jss::server_state_duration_us] = std::to_string(current.count());
4608 if (initialSync)
4609 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4610}
4611
4612//------------------------------------------------------------------------------
4613
4616 Application& app,
4618 bool standalone,
4619 std::size_t minPeerCount,
4620 bool startvalid,
4621 JobQueue& job_queue,
4623 ValidatorKeys const& validatorKeys,
4624 boost::asio::io_service& io_svc,
4625 beast::Journal journal,
4626 beast::insight::Collector::ptr const& collector)
4627{
4628 return std::make_unique<NetworkOPsImp>(
4629 app,
4630 clock,
4631 standalone,
4632 minPeerCount,
4633 startvalid,
4634 job_queue,
4636 validatorKeys,
4637 io_svc,
4638 journal,
4639 collector);
4640}
4641
4642} // namespace ripple
T back_inserter(T... args)
T begin(T... args)
Decorator for streaming out compact json.
Definition: json_writer.h:317
Lightweight wrapper to tag static string.
Definition: json_value.h:61
Represents a JSON value.
Definition: json_value.h:147
Json::UInt UInt
Definition: json_value.h:154
Value get(UInt index, const Value &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Definition: json_value.cpp:841
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:891
bool isMember(const char *key) const
Return true if the object has a member named key.
Definition: json_value.cpp:943
A generic endpoint for log messages.
Definition: Journal.h:59
Stream error() const
Definition: Journal.h:345
Stream debug() const
Definition: Journal.h:327
Stream info() const
Definition: Journal.h:333
Stream trace() const
Severity stream access functions.
Definition: Journal.h:321
Stream warn() const
Definition: Journal.h:339
A metric for measuring an integral value.
Definition: Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition: Gauge.h:68
A reference to a handler for performing polled collection.
Definition: Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition: Book.h:34
Issue in
Definition: Book.h:36
Issue out
Definition: Book.h:37
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition: ClusterNode.h:45
std::uint32_t getLoadFee() const
Definition: ClusterNode.h:51
NetClock::time_point getReportTime() const
Definition: ClusterNode.h:57
PublicKey const & identity() const
Definition: ClusterNode.h:63
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:48
std::string SERVER_DOMAIN
Definition: Config.h:286
std::size_t NODE_SIZE
Definition: Config.h:220
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition: Config.h:167
int RELAY_UNTRUSTED_VALIDATIONS
Definition: Config.h:176
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition: InfoSub.h:54
AccountID account
Definition: Issue.h:39
Currency currency
Definition: Issue.h:38
A pool of threads to perform work.
Definition: JobQueue.h:56
Json::Value getJson(int c=0)
Definition: JobQueue.cpp:212
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
Definition: LedgerMaster.h:266
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:80
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
Definition: LoadFeeTrack.h:46
std::uint32_t getClusterFee() const
Definition: LoadFeeTrack.h:83
std::uint32_t getLocalFee() const
Definition: LoadFeeTrack.h:76
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:90
std::uint32_t getRemoteFee() const
Definition: LoadFeeTrack.h:69
std::uint32_t getLoadFactor() const
Definition: LoadFeeTrack.h:96
Manages load sources.
Definition: LoadManager.h:46
void resetDeadlockDetector()
Reset the deadlock detection timer.
Definition: LoadManager.cpp:63
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
Definition: NetworkOPs.cpp:142
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
Definition: NetworkOPs.cpp:152
std::chrono::steady_clock::time_point start_
Definition: NetworkOPs.cpp:154
static std::array< Json::StaticString const, 5 > const states_
Definition: NetworkOPs.cpp:158
std::chrono::steady_clock::time_point const processStart_
Definition: NetworkOPs.cpp:156
Transaction with input flags and results to be applied in batches.
Definition: NetworkOPs.cpp:93
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
Definition: NetworkOPs.cpp:102
std::shared_ptr< Transaction > const transaction
Definition: NetworkOPs.cpp:95
boost::asio::steady_timer accountHistoryTxTimer_
Definition: NetworkOPs.cpp:734
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
Definition: NetworkOPs.cpp:866
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::vector< TransactionStatus > mTransactions
Definition: NetworkOPs.cpp:778
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Definition: NetworkOPs.cpp:724
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
Definition: NetworkOPs.cpp:736
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
Definition: NetworkOPs.cpp:884
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
Definition: NetworkOPs.cpp:732
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
Definition: NetworkOPs.cpp:119
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
Definition: NetworkOPs.cpp:720
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
Definition: NetworkOPs.cpp:263
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Definition: NetworkOPs.cpp:747
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, const bool bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
Definition: NetworkOPs.cpp:733
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
Definition: NetworkOPs.cpp:125
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:225
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
Definition: NetworkOPs.cpp:727
beast::Journal m_journal
Definition: NetworkOPs.cpp:718
SubInfoMapType mSubAccount
Definition: NetworkOPs.cpp:742
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
Definition: NetworkOPs.cpp:782
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
Definition: NetworkOPs.cpp:999
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
Definition: NetworkOPs.cpp:731
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
Definition: NetworkOPs.cpp:937
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
Definition: NetworkOPs.cpp:762
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
Definition: NetworkOPs.cpp:772
std::atomic< bool > unlBlocked_
Definition: NetworkOPs.cpp:729
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
Definition: NetworkOPs.cpp:722
std::atomic< bool > needNetworkLedger_
Definition: NetworkOPs.cpp:726
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
Definition: NetworkOPs.cpp:780
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
Definition: NetworkOPs.cpp:896
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
Definition: NetworkOPs.cpp:740
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
Definition: NetworkOPs.cpp:927
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
Definition: NetworkOPs.cpp:764
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
Definition: NetworkOPs.cpp:878
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
Definition: NetworkOPs.cpp:775
void setMode(OperatingMode om) override
void stop() override
Definition: NetworkOPs.cpp:567
void clearNeedNetworkLedger() override
Definition: NetworkOPs.cpp:890
DispatchState mDispatchState
Definition: NetworkOPs.cpp:777
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
Definition: NetworkOPs.cpp:743
bool checkLastClosedLedger(const Overlay::PeerSequence &, uint256 &networkClosed)
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
Definition: NetworkOPs.cpp:902
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
Definition: NetworkOPs.cpp:783
Application & app_
Definition: NetworkOPs.cpp:717
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
Definition: NetworkOPs.cpp:738
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
Definition: NetworkOPs.cpp:745
std::atomic< bool > amendmentWarned_
Definition: NetworkOPs.cpp:728
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
Definition: NetworkOPs.cpp:908
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition: NetworkOPs.h:87
void getCountsJson(Json::Value &obj)
Definition: Database.cpp:266
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:50
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:56
BookListeners::pointer getBookListeners(Book const &)
BookListeners::pointer makeBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, const AcceptedLedgerTx &alTx, MultiApiJson const &jvObj)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
Definition: RCLConsensus.h:55
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
Definition: RCLConsensus.h:449
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Definition: RCLConsensus.h:462
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
Represents a set of transactions in RCLConsensus.
Definition: RCLCxTx.h:66
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition: RFC1751.cpp:498
Collects logging information.
Definition: RCLConsensus.h:553
std::unique_ptr< std::stringstream > const & ss()
Definition: RCLConsensus.h:567
A view into a ledger.
Definition: ReadView.h:55
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition: STAmount.cpp:475
std::string getText() const override
Definition: STAmount.cpp:515
Issue const & issue() const
Definition: STAmount.h:487
std::optional< T > get(std::string const &name) const
Definition: BasicConfig.h:141
std::size_t size() const noexcept
Definition: Serializer.h:72
void const * data() const noexcept
Definition: Serializer.h:78
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition: TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition: TxQ.cpp:1777
static time_point now()
Definition: UptimeClock.cpp:63
Validator keys and manifest as set in configuration file.
Definition: ValidatorKeys.h:37
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition: XRPAmount.h:262
Json::Value jsonClipped() const
Definition: XRPAmount.h:218
iterator begin()
Definition: base_uint.h:135
static constexpr std::size_t size()
Definition: base_uint.h:525
bool isZero() const
Definition: base_uint.h:539
bool isNonZero() const
Definition: base_uint.h:544
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition: scope.h:231
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition: json_value.h:42
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
int Int
Definition: json_forwards.h:26
unsigned int UInt
Definition: json_forwards.h:27
void rngfill(void *buffer, std::size_t bytes, Generator &g)
Definition: rngfill.h:33
std::string const & getVersionString()
Server version.
Definition: BuildInfo.cpp:65
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition: BookChanges.h:47
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(PreclaimContext const &ctx, AccountID const &src)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition: Indexes.cpp:160
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition: Indexes.cpp:356
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition: Indexes.cpp:250
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:30
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:106
STAmount divide(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:87
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition: STTx.cpp:604
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition: View.cpp:442
@ fhZERO_IF_FROZEN
Definition: View.h:80
@ fhIGNORE_FREEZE
Definition: View.h:80
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition: View.cpp:136
std::uint64_t getQuality(uint256 const &uBase)
Definition: Indexes.cpp:125
@ INCLUDED
Definition: Transaction.h:49
@ OBSOLETE
Definition: Transaction.h:54
@ INVALID
Definition: Transaction.h:48
error_code_i
Definition: ErrorCodes.h:40
@ rpcSUCCESS
Definition: ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition: ErrorCodes.h:84
@ rpcINTERNAL
Definition: ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
Definition: SecretKey.cpp:351
auto constexpr muldiv_max
Definition: mulDiv.h:29
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition: LocalTxs.cpp:197
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition: View.cpp:650
STAmount amountFromQuality(std::uint64_t rate)
Definition: STAmount.cpp:822
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition: ErrorCodes.h:167
@ warnRPC_UNSUPPORTED_MAJORITY
Definition: ErrorCodes.h:165
@ warnRPC_AMENDMENT_BLOCKED
Definition: ErrorCodes.h:166
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition: NetworkOPs.h:66
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:47
bool isTesSuccess(TER x)
Definition: TER.h:656
bool isTerRetry(TER x)
Definition: TER.h:650
AccountID calcAccountID(PublicKey const &pk)
Definition: AccountID.cpp:160
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Definition: csprng.cpp:99
Json::Value rpcError(int iError)
Definition: RPCErr.cpp:29
@ tefPAST_SEQ
Definition: TER.h:175
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
Definition: NetworkOPs.cpp:847
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
bool isTemMalformed(TER x)
Definition: TER.h:638
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition: View.cpp:147
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition: ApiVersion.h:102
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:75
@ tesSUCCESS
Definition: TER.h:242
uint256 getQualityNext(uint256 const &uBase)
Definition: Indexes.cpp:117
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition: View.cpp:308
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition: ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition: chrono.h:93
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:629
FeeSetup setup_FeeVote(Section const &section)
Definition: Config.cpp:1090
Number root(Number f, unsigned d)
Definition: Number.cpp:630
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Definition: mulDiv.cpp:27
ApplyFlags
Definition: ApplyView.h:30
@ tapFAIL_HARD
Definition: ApplyView.h:35
@ tapUNLIMITED
Definition: ApplyView.h:42
@ tapNONE
Definition: ApplyView.h:31
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:37
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition: Seed.cpp:69
@ terQUEUED
Definition: TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition: TER.cpp:236
@ jtNETOP_CLUSTER
Definition: Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition: Job.h:47
@ jtTRANSACTION
Definition: Job.h:62
@ jtTXN_PROC
Definition: Job.h:82
@ jtCLIENT_CONSENSUS
Definition: Job.h:48
@ jtBATCH
Definition: Job.h:65
@ jtCLIENT_ACCT_HIST
Definition: Job.h:49
uint256 getBookBase(Book const &book)
Definition: Indexes.cpp:98
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition: View.cpp:173
static std::uint32_t trunc32(std::uint64_t v)
@ temBAD_SIGNATURE
Definition: TER.h:105
static auto const genesisAccountId
Definition: NetworkOPs.cpp:860
STL namespace.
T ref(T... args)
T reset(T... args)
T set_intersection(T... args)
T str(T... args)
std::string serialized
The manifest in serialized form.
Definition: Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition: Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition: Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition: Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Definition: Manifest.h:86
Server fees published on server subscription.
Definition: NetworkOPs.cpp:202
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
Definition: NetworkOPs.cpp:221
bool operator==(ServerFeeSummary const &b) const
Definition: NetworkOPs.cpp:213
beast::insight::Gauge full_transitions
Definition: NetworkOPs.cpp:834
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:789
beast::insight::Hook hook
Definition: NetworkOPs.cpp:823
beast::insight::Gauge connected_duration
Definition: NetworkOPs.cpp:825
beast::insight::Gauge tracking_duration
Definition: NetworkOPs.cpp:827
beast::insight::Gauge connected_transitions
Definition: NetworkOPs.cpp:831
beast::insight::Gauge disconnected_transitions
Definition: NetworkOPs.cpp:830
beast::insight::Gauge syncing_duration
Definition: NetworkOPs.cpp:826
beast::insight::Gauge tracking_transitions
Definition: NetworkOPs.cpp:833
beast::insight::Gauge full_duration
Definition: NetworkOPs.cpp:828
beast::insight::Gauge disconnected_duration
Definition: NetworkOPs.cpp:824
beast::insight::Gauge syncing_transitions
Definition: NetworkOPs.cpp:832
SubAccountHistoryIndex(AccountID const &accountId)
Definition: NetworkOPs.cpp:681
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:700
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:695
Represents a transfer rate.
Definition: Rate.h:38
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition: TxQ.h:162
void set(const char *key, auto const &v)
Definition: MultiApiJson.h:83
IsMemberResult isMember(const char *key) const
Definition: MultiApiJson.h:94
Select all peers (except optional excluded) that are in our cluster.
Definition: predicates.h:137
Sends a message to all peers.
Definition: predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)