rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/misc/AmendmentTable.h>
32#include <xrpld/app/misc/DeliverMax.h>
33#include <xrpld/app/misc/HashRouter.h>
34#include <xrpld/app/misc/LoadFeeTrack.h>
35#include <xrpld/app/misc/NetworkOPs.h>
36#include <xrpld/app/misc/Transaction.h>
37#include <xrpld/app/misc/TxQ.h>
38#include <xrpld/app/misc/ValidatorList.h>
39#include <xrpld/app/misc/detail/AccountTxPaging.h>
40#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
41#include <xrpld/app/tx/apply.h>
42#include <xrpld/consensus/Consensus.h>
43#include <xrpld/consensus/ConsensusParms.h>
44#include <xrpld/overlay/Cluster.h>
45#include <xrpld/overlay/Overlay.h>
46#include <xrpld/overlay/predicates.h>
47#include <xrpld/perflog/PerfLog.h>
48#include <xrpld/rpc/BookChanges.h>
49#include <xrpld/rpc/DeliveredAmount.h>
50#include <xrpld/rpc/MPTokenIssuanceID.h>
51#include <xrpld/rpc/ServerHandler.h>
52#include <xrpl/basics/UptimeClock.h>
53#include <xrpl/basics/mulDiv.h>
54#include <xrpl/basics/safe_cast.h>
55#include <xrpl/basics/scope.h>
56#include <xrpl/beast/utility/rngfill.h>
57#include <xrpl/crypto/RFC1751.h>
58#include <xrpl/crypto/csprng.h>
59#include <xrpl/protocol/BuildInfo.h>
60#include <xrpl/protocol/Feature.h>
61#include <xrpl/protocol/MultiApiJson.h>
62#include <xrpl/protocol/RPCErr.h>
63#include <xrpl/protocol/jss.h>
64#include <xrpl/resource/Fees.h>
65#include <xrpl/resource/ResourceManager.h>
66
67#include <boost/asio/ip/host_name.hpp>
68#include <boost/asio/steady_timer.hpp>
69
70#include <algorithm>
71#include <exception>
72#include <mutex>
73#include <optional>
74#include <set>
75#include <sstream>
76#include <string>
77#include <tuple>
78#include <unordered_map>
79
80namespace ripple {
81
82class NetworkOPsImp final : public NetworkOPs
83{
89 {
90 public:
92 bool const admin;
93 bool const local;
95 bool applied = false;
97
100 bool a,
101 bool l,
102 FailHard f)
103 : transaction(t), admin(a), local(l), failType(f)
104 {
105 XRPL_ASSERT(
107 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
108 "valid inputs");
109 }
110 };
111
115 enum class DispatchState : unsigned char {
116 none,
117 scheduled,
118 running,
119 };
120
122
138 {
139 struct Counters
140 {
141 explicit Counters() = default;
142
145 };
146
150 std::chrono::steady_clock::time_point start_ =
152 std::chrono::steady_clock::time_point const processStart_ = start_;
155
156 public:
158 {
160 .transitions = 1;
161 }
162
169 void
171
177 void
178 json(Json::Value& obj) const;
179
181 {
183 decltype(mode_) mode;
184 decltype(start_) start;
186 };
187
190 {
193 }
194 };
195
198 {
199 ServerFeeSummary() = default;
200
202 XRPAmount fee,
203 TxQ::Metrics&& escalationMetrics,
204 LoadFeeTrack const& loadFeeTrack);
205 bool
206 operator!=(ServerFeeSummary const& b) const;
207
208 bool
210 {
211 return !(*this != b);
212 }
213
218 };
219
220public:
222 Application& app,
224 bool standalone,
225 std::size_t minPeerCount,
226 bool start_valid,
227 JobQueue& job_queue,
229 ValidatorKeys const& validatorKeys,
230 boost::asio::io_service& io_svc,
231 beast::Journal journal,
232 beast::insight::Collector::ptr const& collector)
233 : app_(app)
234 , m_journal(journal)
237 , heartbeatTimer_(io_svc)
238 , clusterTimer_(io_svc)
239 , accountHistoryTxTimer_(io_svc)
240 , mConsensus(
241 app,
243 setup_FeeVote(app_.config().section("voting")),
244 app_.logs().journal("FeeVote")),
246 *m_localTX,
247 app.getInboundTransactions(),
248 beast::get_abstract_clock<std::chrono::steady_clock>(),
249 validatorKeys,
250 app_.logs().journal("LedgerConsensus"))
252 , m_job_queue(job_queue)
253 , m_standalone(standalone)
254 , minPeerCount_(start_valid ? 0 : minPeerCount)
255 , m_stats(std::bind(&NetworkOPsImp::collect_metrics, this), collector)
256 {
257 }
258
259 ~NetworkOPsImp() override
260 {
261 // This clear() is necessary to ensure the shared_ptrs in this map get
262 // destroyed NOW because the objects in this map invoke methods on this
263 // class when they are destroyed
265 }
266
267public:
269 getOperatingMode() const override;
270
272 strOperatingMode(OperatingMode const mode, bool const admin) const override;
273
275 strOperatingMode(bool const admin = false) const override;
276
277 //
278 // Transaction operations.
279 //
280
281 // Must complete immediately.
282 void
284
285 void
287 std::shared_ptr<Transaction>& transaction,
288 bool bUnlimited,
289 bool bLocal,
290 FailHard failType) override;
291
300 void
303 bool bUnlimited,
304 FailHard failType);
305
315 void
318 bool bUnlimited,
319 FailHard failtype);
320
324 void
326
332 void
334
335 //
336 // Owner functions.
337 //
338
342 AccountID const& account) override;
343
344 //
345 // Book functions.
346 //
347
348 void
351 Book const&,
352 AccountID const& uTakerID,
353 const bool bProof,
354 unsigned int iLimit,
355 Json::Value const& jvMarker,
356 Json::Value& jvResult) override;
357
358 // Ledger proposal/close functions.
359 bool
361
362 bool
365 std::string const& source) override;
366
367 void
368 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
369
370 // Network state machine.
371
372 // Used for the "jump" case.
373private:
374 void
376 bool
378
379public:
380 bool
382 uint256 const& networkClosed,
383 std::unique_ptr<std::stringstream> const& clog) override;
384 void
386 void
387 setStandAlone() override;
388
392 void
393 setStateTimer() override;
394
395 void
396 setNeedNetworkLedger() override;
397 void
398 clearNeedNetworkLedger() override;
399 bool
400 isNeedNetworkLedger() override;
401 bool
402 isFull() override;
403
404 void
405 setMode(OperatingMode om) override;
406
407 bool
408 isBlocked() override;
409 bool
410 isAmendmentBlocked() override;
411 void
412 setAmendmentBlocked() override;
413 bool
414 isAmendmentWarned() override;
415 void
416 setAmendmentWarned() override;
417 void
418 clearAmendmentWarned() override;
419 bool
420 isUNLBlocked() override;
421 void
422 setUNLBlocked() override;
423 void
424 clearUNLBlocked() override;
425 void
426 consensusViewChange() override;
427
429 getConsensusInfo() override;
431 getServerInfo(bool human, bool admin, bool counters) override;
432 void
433 clearLedgerFetch() override;
435 getLedgerFetchInfo() override;
438 std::optional<std::chrono::milliseconds> consensusDelay) override;
439 void
440 reportFeeChange() override;
441 void
443
444 void
445 updateLocalTx(ReadView const& view) override;
447 getLocalTxCount() override;
448
449 //
450 // Monitoring: publisher side.
451 //
452 void
453 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
454 void
457 std::shared_ptr<STTx const> const& transaction,
458 TER result) override;
459 void
460 pubValidation(std::shared_ptr<STValidation> const& val) override;
461
462 //--------------------------------------------------------------------------
463 //
464 // InfoSub::Source.
465 //
466 void
468 InfoSub::ref ispListener,
469 hash_set<AccountID> const& vnaAccountIDs,
470 bool rt) override;
471 void
473 InfoSub::ref ispListener,
474 hash_set<AccountID> const& vnaAccountIDs,
475 bool rt) override;
476
477 // Just remove the subscription from the tracking
478 // not from the InfoSub. Needed for InfoSub destruction
479 void
481 std::uint64_t seq,
482 hash_set<AccountID> const& vnaAccountIDs,
483 bool rt) override;
484
486 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
487 override;
488 void
490 InfoSub::ref ispListener,
491 AccountID const& account,
492 bool historyOnly) override;
493
494 void
496 std::uint64_t seq,
497 AccountID const& account,
498 bool historyOnly) override;
499
500 bool
501 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
502 bool
503 unsubLedger(std::uint64_t uListener) override;
504
505 bool
506 subBookChanges(InfoSub::ref ispListener) override;
507 bool
508 unsubBookChanges(std::uint64_t uListener) override;
509
510 bool
511 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
512 override;
513 bool
514 unsubServer(std::uint64_t uListener) override;
515
516 bool
517 subBook(InfoSub::ref ispListener, Book const&) override;
518 bool
519 unsubBook(std::uint64_t uListener, Book const&) override;
520
521 bool
522 subManifests(InfoSub::ref ispListener) override;
523 bool
524 unsubManifests(std::uint64_t uListener) override;
525 void
526 pubManifest(Manifest const&) override;
527
528 bool
529 subTransactions(InfoSub::ref ispListener) override;
530 bool
531 unsubTransactions(std::uint64_t uListener) override;
532
533 bool
534 subRTTransactions(InfoSub::ref ispListener) override;
535 bool
536 unsubRTTransactions(std::uint64_t uListener) override;
537
538 bool
539 subValidations(InfoSub::ref ispListener) override;
540 bool
541 unsubValidations(std::uint64_t uListener) override;
542
543 bool
544 subPeerStatus(InfoSub::ref ispListener) override;
545 bool
546 unsubPeerStatus(std::uint64_t uListener) override;
547 void
548 pubPeerStatus(std::function<Json::Value(void)> const&) override;
549
550 bool
551 subConsensus(InfoSub::ref ispListener) override;
552 bool
553 unsubConsensus(std::uint64_t uListener) override;
554
556 findRpcSub(std::string const& strUrl) override;
558 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
559 bool
560 tryRemoveRpcSub(std::string const& strUrl) override;
561
562 void
563 stop() override
564 {
565 {
566 boost::system::error_code ec;
567 heartbeatTimer_.cancel(ec);
568 if (ec)
569 {
570 JLOG(m_journal.error())
571 << "NetworkOPs: heartbeatTimer cancel error: "
572 << ec.message();
573 }
574
575 ec.clear();
576 clusterTimer_.cancel(ec);
577 if (ec)
578 {
579 JLOG(m_journal.error())
580 << "NetworkOPs: clusterTimer cancel error: "
581 << ec.message();
582 }
583
584 ec.clear();
585 accountHistoryTxTimer_.cancel(ec);
586 if (ec)
587 {
588 JLOG(m_journal.error())
589 << "NetworkOPs: accountHistoryTxTimer cancel error: "
590 << ec.message();
591 }
592 }
593 // Make sure that any waitHandlers pending in our timers are done.
594 using namespace std::chrono_literals;
595 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
596 }
597
598 void
599 stateAccounting(Json::Value& obj) override;
600
601private:
602 void
603 setTimer(
604 boost::asio::steady_timer& timer,
605 std::chrono::milliseconds const& expiry_time,
606 std::function<void()> onExpire,
607 std::function<void()> onError);
608 void
610 void
612 void
614 void
616
618 transJson(
619 std::shared_ptr<STTx const> const& transaction,
620 TER result,
621 bool validated,
624
625 void
628 AcceptedLedgerTx const& transaction,
629 bool last);
630
631 void
634 AcceptedLedgerTx const& transaction,
635 bool last);
636
637 void
640 std::shared_ptr<STTx const> const& transaction,
641 TER result);
642
643 void
644 pubServer();
645 void
647
649 getHostId(bool forAdmin);
650
651private:
655
656 /*
657 * With a validated ledger to separate history and future, the node
658 * streams historical txns with negative indexes starting from -1,
659 * and streams future txns starting from index 0.
660 * The SubAccountHistoryIndex struct maintains these indexes.
661 * It also has a flag stopHistorical_ for stopping streaming
662 * the historical txns.
663 */
665 {
667 // forward
669 // separate backward and forward
671 // history, backward
676
678 : accountId_(accountId)
679 , forwardTxIndex_(0)
682 , historyTxIndex_(-1)
683 , haveHistorical_(false)
684 , stopHistorical_(false)
685 {
686 }
687 };
689 {
692 };
694 {
697 };
700
704 void
708 void
710 void
712
715
717
719
721
726
728 boost::asio::steady_timer heartbeatTimer_;
729 boost::asio::steady_timer clusterTimer_;
730 boost::asio::steady_timer accountHistoryTxTimer_;
731
733
735
737
740
742
744
745 enum SubTypes {
746 sLedger, // Accepted ledgers.
747 sManifests, // Received validator manifests.
748 sServer, // When server changes connectivity state.
749 sTransactions, // All accepted transactions.
750 sRTTransactions, // All proposed and accepted transactions.
751 sValidations, // Received validations.
752 sPeerStatus, // Peer status changes.
753 sConsensusPhase, // Consensus phase
754 sBookChanges, // Per-ledger order book changes
755 sLastEntry // Any new entry must be ADDED ABOVE this one
756 };
757
759
761
763
764 // Whether we are in standalone mode.
765 bool const m_standalone;
766
767 // The number of nodes that we need to consider ourselves connected.
769
770 // Transaction batching.
775
777
780
781private:
782 struct Stats
783 {
784 template <class Handler>
786 Handler const& handler,
787 beast::insight::Collector::ptr const& collector)
788 : hook(collector->make_hook(handler))
789 , disconnected_duration(collector->make_gauge(
790 "State_Accounting",
791 "Disconnected_duration"))
792 , connected_duration(collector->make_gauge(
793 "State_Accounting",
794 "Connected_duration"))
796 collector->make_gauge("State_Accounting", "Syncing_duration"))
797 , tracking_duration(collector->make_gauge(
798 "State_Accounting",
799 "Tracking_duration"))
801 collector->make_gauge("State_Accounting", "Full_duration"))
802 , disconnected_transitions(collector->make_gauge(
803 "State_Accounting",
804 "Disconnected_transitions"))
805 , connected_transitions(collector->make_gauge(
806 "State_Accounting",
807 "Connected_transitions"))
808 , syncing_transitions(collector->make_gauge(
809 "State_Accounting",
810 "Syncing_transitions"))
811 , tracking_transitions(collector->make_gauge(
812 "State_Accounting",
813 "Tracking_transitions"))
815 collector->make_gauge("State_Accounting", "Full_transitions"))
816 {
817 }
818
825
831 };
832
833 std::mutex m_statsMutex; // Mutex to lock m_stats
835
836private:
837 void
839};
840
841//------------------------------------------------------------------------------
842
844 {"disconnected", "connected", "syncing", "tracking", "full"}};
845
847
855
856static auto const genesisAccountId = calcAccountID(
858 .first);
859
860//------------------------------------------------------------------------------
861inline OperatingMode
863{
864 return mMode;
865}
866
867inline std::string
868NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
869{
870 return strOperatingMode(mMode, admin);
871}
872
873inline void
875{
877}
878
879inline void
881{
882 needNetworkLedger_ = true;
883}
884
885inline void
887{
888 needNetworkLedger_ = false;
889}
890
891inline bool
893{
894 return needNetworkLedger_;
895}
896
897inline bool
899{
901}
902
905{
906 static std::string const hostname = boost::asio::ip::host_name();
907
908 if (forAdmin)
909 return hostname;
910
911 // For non-admin uses hash the node public key into a
912 // single RFC1751 word:
913 static std::string const shroudedHostId = [this]() {
914 auto const& id = app_.nodeIdentity();
915
916 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
917 }();
918
919 return shroudedHostId;
920}
921
922void
924{
926
927 // Only do this work if a cluster is configured
928 if (app_.cluster().size() != 0)
930}
931
932void
934 boost::asio::steady_timer& timer,
935 const std::chrono::milliseconds& expiry_time,
936 std::function<void()> onExpire,
937 std::function<void()> onError)
938{
939 // Only start the timer if waitHandlerCounter_ is not yet joined.
940 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
941 [this, onExpire, onError](boost::system::error_code const& e) {
942 if ((e.value() == boost::system::errc::success) &&
943 (!m_job_queue.isStopped()))
944 {
945 onExpire();
946 }
947 // Recover as best we can if an unexpected error occurs.
948 if (e.value() != boost::system::errc::success &&
949 e.value() != boost::asio::error::operation_aborted)
950 {
951 // Try again later and hope for the best.
952 JLOG(m_journal.error())
953 << "Timer got error '" << e.message()
954 << "'. Restarting timer.";
955 onError();
956 }
957 }))
958 {
959 timer.expires_from_now(expiry_time);
960 timer.async_wait(std::move(*optionalCountedHandler));
961 }
962}
963
964void
965NetworkOPsImp::setHeartbeatTimer()
966{
967 setTimer(
968 heartbeatTimer_,
969 mConsensus.parms().ledgerGRANULARITY,
970 [this]() {
971 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
972 processHeartbeatTimer();
973 });
974 },
975 [this]() { setHeartbeatTimer(); });
976}
977
978void
979NetworkOPsImp::setClusterTimer()
980{
981 using namespace std::chrono_literals;
982
983 setTimer(
984 clusterTimer_,
985 10s,
986 [this]() {
987 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
988 processClusterTimer();
989 });
990 },
991 [this]() { setClusterTimer(); });
992}
993
994void
995NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
996{
997 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
998 << toBase58(subInfo.index_->accountId_);
999 using namespace std::chrono_literals;
1000 setTimer(
1001 accountHistoryTxTimer_,
1002 4s,
1003 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1004 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1005}
1006
1007void
1008NetworkOPsImp::processHeartbeatTimer()
1009{
1010 RclConsensusLogger clog(
1011 "Heartbeat Timer", mConsensus.validating(), m_journal);
1012 {
1013 std::unique_lock lock{app_.getMasterMutex()};
1014
1015 // VFALCO NOTE This is for diagnosing a crash on exit
1016 LoadManager& mgr(app_.getLoadManager());
1018
1019 std::size_t const numPeers = app_.overlay().size();
1020
1021 // do we have sufficient peers? If not, we are disconnected.
1022 if (numPeers < minPeerCount_)
1023 {
1024 if (mMode != OperatingMode::DISCONNECTED)
1025 {
1026 setMode(OperatingMode::DISCONNECTED);
1028 ss << "Node count (" << numPeers << ") has fallen "
1029 << "below required minimum (" << minPeerCount_ << ").";
1030 JLOG(m_journal.warn()) << ss.str();
1031 CLOG(clog.ss()) << "set mode to DISCONNECTED: " << ss.str();
1032 }
1033 else
1034 {
1035 CLOG(clog.ss())
1036 << "already DISCONNECTED. too few peers (" << numPeers
1037 << "), need at least " << minPeerCount_;
1038 }
1039
1040 // MasterMutex lock need not be held to call setHeartbeatTimer()
1041 lock.unlock();
1042 // We do not call mConsensus.timerEntry until there are enough
1043 // peers providing meaningful inputs to consensus
1044 setHeartbeatTimer();
1045
1046 return;
1047 }
1048
1049 if (mMode == OperatingMode::DISCONNECTED)
1050 {
1051 setMode(OperatingMode::CONNECTED);
1052 JLOG(m_journal.info())
1053 << "Node count (" << numPeers << ") is sufficient.";
1054 CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
1055 << " peers. ";
1056 }
1057
1058 // Check if the last validated ledger forces a change between these
1059 // states.
1060 auto origMode = mMode.load();
1061 CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
1062 if (mMode == OperatingMode::SYNCING)
1063 setMode(OperatingMode::SYNCING);
1064 else if (mMode == OperatingMode::CONNECTED)
1065 setMode(OperatingMode::CONNECTED);
1066 auto newMode = mMode.load();
1067 if (origMode != newMode)
1068 {
1069 CLOG(clog.ss())
1070 << ", changing to " << strOperatingMode(newMode, true);
1071 }
1072 CLOG(clog.ss()) << ". ";
1073 }
1074
1075 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.ss());
1076
1077 CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase);
1078 const ConsensusPhase currPhase = mConsensus.phase();
1079 if (mLastConsensusPhase != currPhase)
1080 {
1081 reportConsensusStateChange(currPhase);
1082 mLastConsensusPhase = currPhase;
1083 CLOG(clog.ss()) << " changed to " << to_string(mLastConsensusPhase);
1084 }
1085 CLOG(clog.ss()) << ". ";
1086
1087 setHeartbeatTimer();
1088}
1089
1090void
1091NetworkOPsImp::processClusterTimer()
1092{
1093 if (app_.cluster().size() == 0)
1094 return;
1095
1096 using namespace std::chrono_literals;
1097
1098 bool const update = app_.cluster().update(
1099 app_.nodeIdentity().first,
1100 "",
1101 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1102 ? app_.getFeeTrack().getLocalFee()
1103 : 0,
1104 app_.timeKeeper().now());
1105
1106 if (!update)
1107 {
1108 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1109 setClusterTimer();
1110 return;
1111 }
1112
1113 protocol::TMCluster cluster;
1114 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1115 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1116 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1117 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1118 n.set_nodeload(node.getLoadFee());
1119 if (!node.name().empty())
1120 n.set_nodename(node.name());
1121 });
1122
1123 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1124 for (auto& item : gossip.items)
1125 {
1126 protocol::TMLoadSource& node = *cluster.add_loadsources();
1127 node.set_name(to_string(item.address));
1128 node.set_cost(item.balance);
1129 }
1130 app_.overlay().foreach(send_if(
1131 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1132 peer_in_cluster()));
1133 setClusterTimer();
1134}
1135
1136//------------------------------------------------------------------------------
1137
1139NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1140 const
1141{
1142 if (mode == OperatingMode::FULL && admin)
1143 {
1144 auto const consensusMode = mConsensus.mode();
1145 if (consensusMode != ConsensusMode::wrongLedger)
1146 {
1147 if (consensusMode == ConsensusMode::proposing)
1148 return "proposing";
1149
1150 if (mConsensus.validating())
1151 return "validating";
1152 }
1153 }
1154
1155 return states_[static_cast<std::size_t>(mode)];
1156}
1157
1158void
1159NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1160{
1161 if (isNeedNetworkLedger())
1162 {
1163 // Nothing we can do if we've never been in sync
1164 return;
1165 }
1166
1167 // this is an asynchronous interface
1168 auto const trans = sterilize(*iTrans);
1169
1170 auto const txid = trans->getTransactionID();
1171 auto const flags = app_.getHashRouter().getFlags(txid);
1172
1173 if ((flags & SF_BAD) != 0)
1174 {
1175 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1176 return;
1177 }
1178
1179 try
1180 {
1181 auto const [validity, reason] = checkValidity(
1182 app_.getHashRouter(),
1183 *trans,
1184 m_ledgerMaster.getValidatedRules(),
1185 app_.config());
1186
1187 if (validity != Validity::Valid)
1188 {
1189 JLOG(m_journal.warn())
1190 << "Submitted transaction invalid: " << reason;
1191 return;
1192 }
1193 }
1194 catch (std::exception const& ex)
1195 {
1196 JLOG(m_journal.warn())
1197 << "Exception checking transaction " << txid << ": " << ex.what();
1198
1199 return;
1200 }
1201
1202 std::string reason;
1203
1204 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1205
1206 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1207 auto t = tx;
1208 processTransaction(t, false, false, FailHard::no);
1209 });
1210}
1211
1212void
1213NetworkOPsImp::processTransaction(
1214 std::shared_ptr<Transaction>& transaction,
1215 bool bUnlimited,
1216 bool bLocal,
1217 FailHard failType)
1218{
1219 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1220 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1221
1222 if ((newFlags & SF_BAD) != 0)
1223 {
1224 // cached bad
1225 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1226 transaction->setStatus(INVALID);
1227 transaction->setResult(temBAD_SIGNATURE);
1228 return;
1229 }
1230
1231 // NOTE eahennis - I think this check is redundant,
1232 // but I'm not 100% sure yet.
1233 // If so, only cost is looking up HashRouter flags.
1234 auto const view = m_ledgerMaster.getCurrentLedger();
1235 auto const [validity, reason] = checkValidity(
1236 app_.getHashRouter(),
1237 *transaction->getSTransaction(),
1238 view->rules(),
1239 app_.config());
1240 XRPL_ASSERT(
1241 validity == Validity::Valid,
1242 "ripple::NetworkOPsImp::processTransaction : valid validity");
1243
1244 // Not concerned with local checks at this point.
1245 if (validity == Validity::SigBad)
1246 {
1247 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1248 transaction->setStatus(INVALID);
1249 transaction->setResult(temBAD_SIGNATURE);
1250 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1251 return;
1252 }
1253
1254 // canonicalize can change our pointer
1255 app_.getMasterTransaction().canonicalize(&transaction);
1256
1257 if (bLocal)
1258 doTransactionSync(transaction, bUnlimited, failType);
1259 else
1260 doTransactionAsync(transaction, bUnlimited, failType);
1261}
1262
1263void
1264NetworkOPsImp::doTransactionAsync(
1265 std::shared_ptr<Transaction> transaction,
1266 bool bUnlimited,
1267 FailHard failType)
1268{
1269 std::lock_guard lock(mMutex);
1270
1271 if (transaction->getApplying())
1272 return;
1273
1274 mTransactions.push_back(
1275 TransactionStatus(transaction, bUnlimited, false, failType));
1276 transaction->setApplying();
1277
1278 if (mDispatchState == DispatchState::none)
1279 {
1280 if (m_job_queue.addJob(
1281 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1282 {
1283 mDispatchState = DispatchState::scheduled;
1284 }
1285 }
1286}
1287
1288void
1289NetworkOPsImp::doTransactionSync(
1290 std::shared_ptr<Transaction> transaction,
1291 bool bUnlimited,
1292 FailHard failType)
1293{
1294 std::unique_lock<std::mutex> lock(mMutex);
1295
1296 if (!transaction->getApplying())
1297 {
1298 mTransactions.push_back(
1299 TransactionStatus(transaction, bUnlimited, true, failType));
1300 transaction->setApplying();
1301 }
1302
1303 do
1304 {
1305 if (mDispatchState == DispatchState::running)
1306 {
1307 // A batch processing job is already running, so wait.
1308 mCond.wait(lock);
1309 }
1310 else
1311 {
1312 apply(lock);
1313
1314 if (mTransactions.size())
1315 {
1316 // More transactions need to be applied, but by another job.
1317 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1318 transactionBatch();
1319 }))
1320 {
1321 mDispatchState = DispatchState::scheduled;
1322 }
1323 }
1324 }
1325 } while (transaction->getApplying());
1326}
1327
1328void
1329NetworkOPsImp::transactionBatch()
1330{
1331 std::unique_lock<std::mutex> lock(mMutex);
1332
1333 if (mDispatchState == DispatchState::running)
1334 return;
1335
1336 while (mTransactions.size())
1337 {
1338 apply(lock);
1339 }
1340}
1341
1342void
1343NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1344{
1346 std::vector<TransactionStatus> transactions;
1347 mTransactions.swap(transactions);
1348 XRPL_ASSERT(
1349 !transactions.empty(),
1350 "ripple::NetworkOPsImp::apply : non-empty transactions");
1351 XRPL_ASSERT(
1352 mDispatchState != DispatchState::running,
1353 "ripple::NetworkOPsImp::apply : is not running");
1354
1355 mDispatchState = DispatchState::running;
1356
1357 batchLock.unlock();
1358
1359 {
1360 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1361 bool changed = false;
1362 {
1363 std::unique_lock ledgerLock{
1364 m_ledgerMaster.peekMutex(), std::defer_lock};
1365 std::lock(masterLock, ledgerLock);
1366
1367 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1368 for (TransactionStatus& e : transactions)
1369 {
1370 // we check before adding to the batch
1371 ApplyFlags flags = tapNONE;
1372 if (e.admin)
1373 flags |= tapUNLIMITED;
1374
1375 if (e.failType == FailHard::yes)
1376 flags |= tapFAIL_HARD;
1377
1378 auto const result = app_.getTxQ().apply(
1379 app_, view, e.transaction->getSTransaction(), flags, j);
1380 e.result = result.ter;
1381 e.applied = result.applied;
1382 changed = changed || result.applied;
1383 }
1384 return changed;
1385 });
1386 }
1387 if (changed)
1388 reportFeeChange();
1389
1390 std::optional<LedgerIndex> validatedLedgerIndex;
1391 if (auto const l = m_ledgerMaster.getValidatedLedger())
1392 validatedLedgerIndex = l->info().seq;
1393
1394 auto newOL = app_.openLedger().current();
1395 for (TransactionStatus& e : transactions)
1396 {
1397 e.transaction->clearSubmitResult();
1398
1399 if (e.applied)
1400 {
1401 pubProposedTransaction(
1402 newOL, e.transaction->getSTransaction(), e.result);
1403 e.transaction->setApplied();
1404 }
1405
1406 e.transaction->setResult(e.result);
1407
1408 if (isTemMalformed(e.result))
1409 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1410
1411#ifdef DEBUG
1412 if (e.result != tesSUCCESS)
1413 {
1414 std::string token, human;
1415
1416 if (transResultInfo(e.result, token, human))
1417 {
1418 JLOG(m_journal.info())
1419 << "TransactionResult: " << token << ": " << human;
1420 }
1421 }
1422#endif
1423
1424 bool addLocal = e.local;
1425
1426 if (e.result == tesSUCCESS)
1427 {
1428 JLOG(m_journal.debug())
1429 << "Transaction is now included in open ledger";
1430 e.transaction->setStatus(INCLUDED);
1431
1432 auto const& txCur = e.transaction->getSTransaction();
1433 auto const txNext = m_ledgerMaster.popAcctTransaction(txCur);
1434 if (txNext)
1435 {
1436 std::string reason;
1437 auto const trans = sterilize(*txNext);
1438 auto t = std::make_shared<Transaction>(trans, reason, app_);
1439 submit_held.emplace_back(t, false, false, FailHard::no);
1440 t->setApplying();
1441 }
1442 }
1443 else if (e.result == tefPAST_SEQ)
1444 {
1445 // duplicate or conflict
1446 JLOG(m_journal.info()) << "Transaction is obsolete";
1447 e.transaction->setStatus(OBSOLETE);
1448 }
1449 else if (e.result == terQUEUED)
1450 {
1451 JLOG(m_journal.debug())
1452 << "Transaction is likely to claim a"
1453 << " fee, but is queued until fee drops";
1454
1455 e.transaction->setStatus(HELD);
1456 // Add to held transactions, because it could get
1457 // kicked out of the queue, and this will try to
1458 // put it back.
1459 m_ledgerMaster.addHeldTransaction(e.transaction);
1460 e.transaction->setQueued();
1461 e.transaction->setKept();
1462 }
1463 else if (isTerRetry(e.result))
1464 {
1465 if (e.failType != FailHard::yes)
1466 {
1467 // transaction should be held
1468 JLOG(m_journal.debug())
1469 << "Transaction should be held: " << e.result;
1470 e.transaction->setStatus(HELD);
1471 m_ledgerMaster.addHeldTransaction(e.transaction);
1472 e.transaction->setKept();
1473 }
1474 }
1475 else
1476 {
1477 JLOG(m_journal.debug())
1478 << "Status other than success " << e.result;
1479 e.transaction->setStatus(INVALID);
1480 }
1481
1482 auto const enforceFailHard =
1483 e.failType == FailHard::yes && !isTesSuccess(e.result);
1484
1485 if (addLocal && !enforceFailHard)
1486 {
1487 m_localTX->push_back(
1488 m_ledgerMaster.getCurrentLedgerIndex(),
1489 e.transaction->getSTransaction());
1490 e.transaction->setKept();
1491 }
1492
1493 if ((e.applied ||
1494 ((mMode != OperatingMode::FULL) &&
1495 (e.failType != FailHard::yes) && e.local) ||
1496 (e.result == terQUEUED)) &&
1497 !enforceFailHard)
1498 {
1499 auto const toSkip =
1500 app_.getHashRouter().shouldRelay(e.transaction->getID());
1501
1502 if (toSkip)
1503 {
1504 protocol::TMTransaction tx;
1505 Serializer s;
1506
1507 e.transaction->getSTransaction()->add(s);
1508 tx.set_rawtransaction(s.data(), s.size());
1509 tx.set_status(protocol::tsCURRENT);
1510 tx.set_receivetimestamp(
1511 app_.timeKeeper().now().time_since_epoch().count());
1512 tx.set_deferred(e.result == terQUEUED);
1513 // FIXME: This should be when we received it
1514 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1515 e.transaction->setBroadcast();
1516 }
1517 }
1518
1519 if (validatedLedgerIndex)
1520 {
1521 auto [fee, accountSeq, availableSeq] =
1522 app_.getTxQ().getTxRequiredFeeAndSeq(
1523 *newOL, e.transaction->getSTransaction());
1524 e.transaction->setCurrentLedgerState(
1525 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1526 }
1527 }
1528 }
1529
1530 batchLock.lock();
1531
1532 for (TransactionStatus& e : transactions)
1533 e.transaction->clearApplying();
1534
1535 if (!submit_held.empty())
1536 {
1537 if (mTransactions.empty())
1538 mTransactions.swap(submit_held);
1539 else
1540 for (auto& e : submit_held)
1541 mTransactions.push_back(std::move(e));
1542 }
1543
1544 mCond.notify_all();
1545
1546 mDispatchState = DispatchState::none;
1547}
1548
1549//
1550// Owner functions
1551//
1552
1554NetworkOPsImp::getOwnerInfo(
1556 AccountID const& account)
1557{
1558 Json::Value jvObjects(Json::objectValue);
1559 auto root = keylet::ownerDir(account);
1560 auto sleNode = lpLedger->read(keylet::page(root));
1561 if (sleNode)
1562 {
1563 std::uint64_t uNodeDir;
1564
1565 do
1566 {
1567 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1568 {
1569 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1570 XRPL_ASSERT(
1571 sleCur,
1572 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1573
1574 switch (sleCur->getType())
1575 {
1576 case ltOFFER:
1577 if (!jvObjects.isMember(jss::offers))
1578 jvObjects[jss::offers] =
1580
1581 jvObjects[jss::offers].append(
1582 sleCur->getJson(JsonOptions::none));
1583 break;
1584
1585 case ltRIPPLE_STATE:
1586 if (!jvObjects.isMember(jss::ripple_lines))
1587 {
1588 jvObjects[jss::ripple_lines] =
1590 }
1591
1592 jvObjects[jss::ripple_lines].append(
1593 sleCur->getJson(JsonOptions::none));
1594 break;
1595
1596 case ltACCOUNT_ROOT:
1597 case ltDIR_NODE:
1598 default:
1599 UNREACHABLE(
1600 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1601 "type");
1602 break;
1603 }
1604 }
1605
1606 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1607
1608 if (uNodeDir)
1609 {
1610 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1611 XRPL_ASSERT(
1612 sleNode,
1613 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1614 }
1615 } while (uNodeDir);
1616 }
1617
1618 return jvObjects;
1619}
1620
1621//
1622// Other
1623//
1624
1625inline bool
1626NetworkOPsImp::isBlocked()
1627{
1628 return isAmendmentBlocked() || isUNLBlocked();
1629}
1630
1631inline bool
1632NetworkOPsImp::isAmendmentBlocked()
1633{
1634 return amendmentBlocked_;
1635}
1636
1637void
1638NetworkOPsImp::setAmendmentBlocked()
1639{
1640 amendmentBlocked_ = true;
1641 setMode(OperatingMode::CONNECTED);
1642}
1643
1644inline bool
1645NetworkOPsImp::isAmendmentWarned()
1646{
1647 return !amendmentBlocked_ && amendmentWarned_;
1648}
1649
1650inline void
1651NetworkOPsImp::setAmendmentWarned()
1652{
1653 amendmentWarned_ = true;
1654}
1655
1656inline void
1657NetworkOPsImp::clearAmendmentWarned()
1658{
1659 amendmentWarned_ = false;
1660}
1661
1662inline bool
1663NetworkOPsImp::isUNLBlocked()
1664{
1665 return unlBlocked_;
1666}
1667
1668void
1669NetworkOPsImp::setUNLBlocked()
1670{
1671 unlBlocked_ = true;
1672 setMode(OperatingMode::CONNECTED);
1673}
1674
1675inline void
1676NetworkOPsImp::clearUNLBlocked()
1677{
1678 unlBlocked_ = false;
1679}
1680
1681bool
1682NetworkOPsImp::checkLastClosedLedger(
1683 const Overlay::PeerSequence& peerList,
1684 uint256& networkClosed)
1685{
1686 // Returns true if there's an *abnormal* ledger issue, normal changing in
1687 // TRACKING mode should return false. Do we have sufficient validations for
1688 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1689 // better ledger available? If so, we are either tracking or full.
1690
1691 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1692
1693 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1694
1695 if (!ourClosed)
1696 return false;
1697
1698 uint256 closedLedger = ourClosed->info().hash;
1699 uint256 prevClosedLedger = ourClosed->info().parentHash;
1700 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1701 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1702
1703 //-------------------------------------------------------------------------
1704 // Determine preferred last closed ledger
1705
1706 auto& validations = app_.getValidations();
1707 JLOG(m_journal.debug())
1708 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1709
1710 // Will rely on peer LCL if no trusted validations exist
1712 peerCounts[closedLedger] = 0;
1713 if (mMode >= OperatingMode::TRACKING)
1714 peerCounts[closedLedger]++;
1715
1716 for (auto& peer : peerList)
1717 {
1718 uint256 peerLedger = peer->getClosedLedgerHash();
1719
1720 if (peerLedger.isNonZero())
1721 ++peerCounts[peerLedger];
1722 }
1723
1724 for (auto const& it : peerCounts)
1725 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1726
1727 uint256 preferredLCL = validations.getPreferredLCL(
1728 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1729 m_ledgerMaster.getValidLedgerIndex(),
1730 peerCounts);
1731
1732 bool switchLedgers = preferredLCL != closedLedger;
1733 if (switchLedgers)
1734 closedLedger = preferredLCL;
1735 //-------------------------------------------------------------------------
1736 if (switchLedgers && (closedLedger == prevClosedLedger))
1737 {
1738 // don't switch to our own previous ledger
1739 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1740 networkClosed = ourClosed->info().hash;
1741 switchLedgers = false;
1742 }
1743 else
1744 networkClosed = closedLedger;
1745
1746 if (!switchLedgers)
1747 return false;
1748
1749 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1750
1751 if (!consensus)
1752 consensus = app_.getInboundLedgers().acquire(
1753 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1754
1755 if (consensus &&
1756 (!m_ledgerMaster.canBeCurrent(consensus) ||
1757 !m_ledgerMaster.isCompatible(
1758 *consensus, m_journal.debug(), "Not switching")))
1759 {
1760 // Don't switch to a ledger not on the validated chain
1761 // or with an invalid close time or sequence
1762 networkClosed = ourClosed->info().hash;
1763 return false;
1764 }
1765
1766 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1767 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1768 << getJson({*ourClosed, {}});
1769 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1770
1771 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1772 {
1773 setMode(OperatingMode::CONNECTED);
1774 }
1775
1776 if (consensus)
1777 {
1778 // FIXME: If this rewinds the ledger sequence, or has the same
1779 // sequence, we should update the status on any stored transactions
1780 // in the invalidated ledgers.
1781 switchLastClosedLedger(consensus);
1782 }
1783
1784 return true;
1785}
1786
1787void
1788NetworkOPsImp::switchLastClosedLedger(
1789 std::shared_ptr<Ledger const> const& newLCL)
1790{
1791 // set the newLCL as our last closed ledger -- this is abnormal code
1792 JLOG(m_journal.error())
1793 << "JUMP last closed ledger to " << newLCL->info().hash;
1794
1795 clearNeedNetworkLedger();
1796
1797 // Update fee computations.
1798 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1799
1800 // Caller must own master lock
1801 {
1802 // Apply tx in old open ledger to new
1803 // open ledger. Then apply local tx.
1804
1805 auto retries = m_localTX->getTxSet();
1806 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1808 if (lastVal)
1809 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
1810 else
1811 rules.emplace(app_.config().features);
1812 app_.openLedger().accept(
1813 app_,
1814 *rules,
1815 newLCL,
1816 OrderedTxs({}),
1817 false,
1818 retries,
1819 tapNONE,
1820 "jump",
1821 [&](OpenView& view, beast::Journal j) {
1822 // Stuff the ledger with transactions from the queue.
1823 return app_.getTxQ().accept(app_, view);
1824 });
1825 }
1826
1827 m_ledgerMaster.switchLCL(newLCL);
1828
1829 protocol::TMStatusChange s;
1830 s.set_newevent(protocol::neSWITCHED_LEDGER);
1831 s.set_ledgerseq(newLCL->info().seq);
1832 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1833 s.set_ledgerhashprevious(
1834 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1835 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
1836
1837 app_.overlay().foreach(
1838 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1839}
1840
1841bool
1842NetworkOPsImp::beginConsensus(
1843 uint256 const& networkClosed,
1845{
1846 XRPL_ASSERT(
1847 networkClosed.isNonZero(),
1848 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
1849
1850 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
1851
1852 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
1853 << " with LCL " << closingInfo.parentHash;
1854
1855 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1856
1857 if (!prevLedger)
1858 {
1859 // this shouldn't happen unless we jump ledgers
1860 if (mMode == OperatingMode::FULL)
1861 {
1862 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
1863 setMode(OperatingMode::TRACKING);
1864 CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
1865 }
1866
1867 CLOG(clog) << "beginConsensus no previous ledger. ";
1868 return false;
1869 }
1870
1871 XRPL_ASSERT(
1872 prevLedger->info().hash == closingInfo.parentHash,
1873 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1874 "parent");
1875 XRPL_ASSERT(
1876 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
1877 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1878 "hash");
1879
1880 if (prevLedger->rules().enabled(featureNegativeUNL))
1881 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1882 TrustChanges const changes = app_.validators().updateTrusted(
1883 app_.getValidations().getCurrentNodeIDs(),
1884 closingInfo.parentCloseTime,
1885 *this,
1886 app_.overlay(),
1887 app_.getHashRouter());
1888
1889 if (!changes.added.empty() || !changes.removed.empty())
1890 {
1891 app_.getValidations().trustChanged(changes.added, changes.removed);
1892 // Update the AmendmentTable so it tracks the current validators.
1893 app_.getAmendmentTable().trustChanged(
1894 app_.validators().getQuorumKeys().second);
1895 }
1896
1897 mConsensus.startRound(
1898 app_.timeKeeper().closeTime(),
1899 networkClosed,
1900 prevLedger,
1901 changes.removed,
1902 changes.added,
1903 clog);
1904
1905 const ConsensusPhase currPhase = mConsensus.phase();
1906 if (mLastConsensusPhase != currPhase)
1907 {
1908 reportConsensusStateChange(currPhase);
1909 mLastConsensusPhase = currPhase;
1910 }
1911
1912 JLOG(m_journal.debug()) << "Initiating consensus engine";
1913 return true;
1914}
1915
1916bool
1917NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
1918{
1919 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1920}
1921
1922void
1923NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
1924{
1925 // We now have an additional transaction set
1926 // either created locally during the consensus process
1927 // or acquired from a peer
1928
1929 // Inform peers we have this set
1930 protocol::TMHaveTransactionSet msg;
1931 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1932 msg.set_status(protocol::tsHAVE);
1933 app_.overlay().foreach(
1934 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
1935
1936 // We acquired it because consensus asked us to
1937 if (fromAcquire)
1938 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
1939}
1940
1941void
1942NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
1943{
1944 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
1945
1946 for (auto const& it : app_.overlay().getActivePeers())
1947 {
1948 if (it && (it->getClosedLedgerHash() == deadLedger))
1949 {
1950 JLOG(m_journal.trace()) << "Killing obsolete peer status";
1951 it->cycleStatus();
1952 }
1953 }
1954
1955 uint256 networkClosed;
1956 bool ledgerChange =
1957 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1958
1959 if (networkClosed.isZero())
1960 {
1961 CLOG(clog) << "endConsensus last closed ledger is zero. ";
1962 return;
1963 }
1964
1965 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
1966 // we must count how many nodes share our LCL, how many nodes disagree with
1967 // our LCL, and how many validations our LCL has. We also want to check
1968 // timing to make sure there shouldn't be a newer LCL. We need this
1969 // information to do the next three tests.
1970
1971 if (((mMode == OperatingMode::CONNECTED) ||
1972 (mMode == OperatingMode::SYNCING)) &&
1973 !ledgerChange)
1974 {
1975 // Count number of peers that agree with us and UNL nodes whose
1976 // validations we have for LCL. If the ledger is good enough, go to
1977 // TRACKING - TODO
1978 if (!needNetworkLedger_)
1979 setMode(OperatingMode::TRACKING);
1980 }
1981
1982 if (((mMode == OperatingMode::CONNECTED) ||
1983 (mMode == OperatingMode::TRACKING)) &&
1984 !ledgerChange)
1985 {
1986 // check if the ledger is good enough to go to FULL
1987 // Note: Do not go to FULL if we don't have the previous ledger
1988 // check if the ledger is bad enough to go to CONNECTE D -- TODO
1989 auto current = m_ledgerMaster.getCurrentLedger();
1990 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
1991 2 * current->info().closeTimeResolution))
1992 {
1993 setMode(OperatingMode::FULL);
1994 }
1995 }
1996
1997 beginConsensus(networkClosed, clog);
1998}
1999
2000void
2001NetworkOPsImp::consensusViewChange()
2002{
2003 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2004 {
2005 setMode(OperatingMode::CONNECTED);
2006 }
2007}
2008
2009void
2010NetworkOPsImp::pubManifest(Manifest const& mo)
2011{
2012 // VFALCO consider std::shared_mutex
2013 std::lock_guard sl(mSubLock);
2014
2015 if (!mStreamMaps[sManifests].empty())
2016 {
2018
2019 jvObj[jss::type] = "manifestReceived";
2020 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
2021 if (mo.signingKey)
2022 jvObj[jss::signing_key] =
2023 toBase58(TokenType::NodePublic, *mo.signingKey);
2024 jvObj[jss::seq] = Json::UInt(mo.sequence);
2025 if (auto sig = mo.getSignature())
2026 jvObj[jss::signature] = strHex(*sig);
2027 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
2028 if (!mo.domain.empty())
2029 jvObj[jss::domain] = mo.domain;
2030 jvObj[jss::manifest] = strHex(mo.serialized);
2031
2032 for (auto i = mStreamMaps[sManifests].begin();
2033 i != mStreamMaps[sManifests].end();)
2034 {
2035 if (auto p = i->second.lock())
2036 {
2037 p->send(jvObj, true);
2038 ++i;
2039 }
2040 else
2041 {
2042 i = mStreamMaps[sManifests].erase(i);
2043 }
2044 }
2045 }
2046}
2047
2048NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2049 XRPAmount fee,
2050 TxQ::Metrics&& escalationMetrics,
2051 LoadFeeTrack const& loadFeeTrack)
2052 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2053 , loadBaseServer{loadFeeTrack.getLoadBase()}
2054 , baseFee{fee}
2055 , em{std::move(escalationMetrics)}
2056{
2057}
2058
2059bool
2061 NetworkOPsImp::ServerFeeSummary const& b) const
2062{
2063 if (loadFactorServer != b.loadFactorServer ||
2064 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2065 em.has_value() != b.em.has_value())
2066 return true;
2067
2068 if (em && b.em)
2069 {
2070 return (
2071 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2072 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2073 em->referenceFeeLevel != b.em->referenceFeeLevel);
2074 }
2075
2076 return false;
2077}
2078
2079// Need to cap to uint64 to uint32 due to JSON limitations
2080static std::uint32_t
2082{
2084
2085 return std::min(max32, v);
2086};
2087
2088void
2090{
2091 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2092 // list into a local array while holding the lock then release
2093 // the lock and call send on everyone.
2094 //
2096
2097 if (!mStreamMaps[sServer].empty())
2098 {
2100
2102 app_.openLedger().current()->fees().base,
2104 app_.getFeeTrack()};
2105
2106 jvObj[jss::type] = "serverStatus";
2107 jvObj[jss::server_status] = strOperatingMode();
2108 jvObj[jss::load_base] = f.loadBaseServer;
2109 jvObj[jss::load_factor_server] = f.loadFactorServer;
2110 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2111
2112 if (f.em)
2113 {
2114 auto const loadFactor = std::max(
2115 safe_cast<std::uint64_t>(f.loadFactorServer),
2116 mulDiv(
2117 f.em->openLedgerFeeLevel,
2118 f.loadBaseServer,
2119 f.em->referenceFeeLevel)
2121
2122 jvObj[jss::load_factor] = trunc32(loadFactor);
2123 jvObj[jss::load_factor_fee_escalation] =
2124 f.em->openLedgerFeeLevel.jsonClipped();
2125 jvObj[jss::load_factor_fee_queue] =
2126 f.em->minProcessingFeeLevel.jsonClipped();
2127 jvObj[jss::load_factor_fee_reference] =
2128 f.em->referenceFeeLevel.jsonClipped();
2129 }
2130 else
2131 jvObj[jss::load_factor] = f.loadFactorServer;
2132
2133 mLastFeeSummary = f;
2134
2135 for (auto i = mStreamMaps[sServer].begin();
2136 i != mStreamMaps[sServer].end();)
2137 {
2138 InfoSub::pointer p = i->second.lock();
2139
2140 // VFALCO TODO research the possibility of using thread queues and
2141 // linearizing the deletion of subscribers with the
2142 // sending of JSON data.
2143 if (p)
2144 {
2145 p->send(jvObj, true);
2146 ++i;
2147 }
2148 else
2149 {
2150 i = mStreamMaps[sServer].erase(i);
2151 }
2152 }
2153 }
2154}
2155
2156void
2158{
2160
2161 auto& streamMap = mStreamMaps[sConsensusPhase];
2162 if (!streamMap.empty())
2163 {
2165 jvObj[jss::type] = "consensusPhase";
2166 jvObj[jss::consensus] = to_string(phase);
2167
2168 for (auto i = streamMap.begin(); i != streamMap.end();)
2169 {
2170 if (auto p = i->second.lock())
2171 {
2172 p->send(jvObj, true);
2173 ++i;
2174 }
2175 else
2176 {
2177 i = streamMap.erase(i);
2178 }
2179 }
2180 }
2181}
2182
2183void
2185{
2186 // VFALCO consider std::shared_mutex
2188
2189 if (!mStreamMaps[sValidations].empty())
2190 {
2192
2193 auto const signerPublic = val->getSignerPublic();
2194
2195 jvObj[jss::type] = "validationReceived";
2196 jvObj[jss::validation_public_key] =
2197 toBase58(TokenType::NodePublic, signerPublic);
2198 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2199 jvObj[jss::signature] = strHex(val->getSignature());
2200 jvObj[jss::full] = val->isFull();
2201 jvObj[jss::flags] = val->getFlags();
2202 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2203 jvObj[jss::data] = strHex(val->getSerializer().slice());
2204
2205 if (auto version = (*val)[~sfServerVersion])
2206 jvObj[jss::server_version] = std::to_string(*version);
2207
2208 if (auto cookie = (*val)[~sfCookie])
2209 jvObj[jss::cookie] = std::to_string(*cookie);
2210
2211 if (auto hash = (*val)[~sfValidatedHash])
2212 jvObj[jss::validated_hash] = strHex(*hash);
2213
2214 auto const masterKey =
2215 app_.validatorManifests().getMasterKey(signerPublic);
2216
2217 if (masterKey != signerPublic)
2218 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2219
2220 // NOTE *seq is a number, but old API versions used string. We replace
2221 // number with a string using MultiApiJson near end of this function
2222 if (auto const seq = (*val)[~sfLedgerSequence])
2223 jvObj[jss::ledger_index] = *seq;
2224
2225 if (val->isFieldPresent(sfAmendments))
2226 {
2227 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2228 for (auto const& amendment : val->getFieldV256(sfAmendments))
2229 jvObj[jss::amendments].append(to_string(amendment));
2230 }
2231
2232 if (auto const closeTime = (*val)[~sfCloseTime])
2233 jvObj[jss::close_time] = *closeTime;
2234
2235 if (auto const loadFee = (*val)[~sfLoadFee])
2236 jvObj[jss::load_fee] = *loadFee;
2237
2238 if (auto const baseFee = val->at(~sfBaseFee))
2239 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2240
2241 if (auto const reserveBase = val->at(~sfReserveBase))
2242 jvObj[jss::reserve_base] = *reserveBase;
2243
2244 if (auto const reserveInc = val->at(~sfReserveIncrement))
2245 jvObj[jss::reserve_inc] = *reserveInc;
2246
2247 // (The ~ operator converts the Proxy to a std::optional, which
2248 // simplifies later operations)
2249 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2250 baseFeeXRP && baseFeeXRP->native())
2251 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2252
2253 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2254 reserveBaseXRP && reserveBaseXRP->native())
2255 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2256
2257 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2258 reserveIncXRP && reserveIncXRP->native())
2259 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2260
2261 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2262 // for consumers supporting different API versions
2263 MultiApiJson multiObj{jvObj};
2264 multiObj.visit(
2265 RPC::apiVersion<1>, //
2266 [](Json::Value& jvTx) {
2267 // Type conversion for older API versions to string
2268 if (jvTx.isMember(jss::ledger_index))
2269 {
2270 jvTx[jss::ledger_index] =
2271 std::to_string(jvTx[jss::ledger_index].asUInt());
2272 }
2273 });
2274
2275 for (auto i = mStreamMaps[sValidations].begin();
2276 i != mStreamMaps[sValidations].end();)
2277 {
2278 if (auto p = i->second.lock())
2279 {
2280 multiObj.visit(
2281 p->getApiVersion(), //
2282 [&](Json::Value const& jv) { p->send(jv, true); });
2283 ++i;
2284 }
2285 else
2286 {
2287 i = mStreamMaps[sValidations].erase(i);
2288 }
2289 }
2290 }
2291}
2292
2293void
2295{
2297
2298 if (!mStreamMaps[sPeerStatus].empty())
2299 {
2300 Json::Value jvObj(func());
2301
2302 jvObj[jss::type] = "peerStatusChange";
2303
2304 for (auto i = mStreamMaps[sPeerStatus].begin();
2305 i != mStreamMaps[sPeerStatus].end();)
2306 {
2307 InfoSub::pointer p = i->second.lock();
2308
2309 if (p)
2310 {
2311 p->send(jvObj, true);
2312 ++i;
2313 }
2314 else
2315 {
2316 i = mStreamMaps[sPeerStatus].erase(i);
2317 }
2318 }
2319 }
2320}
2321
2322void
2324{
2325 using namespace std::chrono_literals;
2326 if (om == OperatingMode::CONNECTED)
2327 {
2330 }
2331 else if (om == OperatingMode::SYNCING)
2332 {
2335 }
2336
2337 if ((om > OperatingMode::CONNECTED) && isBlocked())
2339
2340 if (mMode == om)
2341 return;
2342
2343 mMode = om;
2344
2345 accounting_.mode(om);
2346
2347 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2348 pubServer();
2349}
2350
2351bool
2354 std::string const& source)
2355{
2356 JLOG(m_journal.trace())
2357 << "recvValidation " << val->getLedgerHash() << " from " << source;
2358
2360 BypassAccept bypassAccept = BypassAccept::no;
2361 try
2362 {
2363 if (pendingValidations_.contains(val->getLedgerHash()))
2364 bypassAccept = BypassAccept::yes;
2365 else
2366 pendingValidations_.insert(val->getLedgerHash());
2367 scope_unlock unlock(lock);
2368 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2369 }
2370 catch (std::exception const& e)
2371 {
2372 JLOG(m_journal.warn())
2373 << "Exception thrown for handling new validation "
2374 << val->getLedgerHash() << ": " << e.what();
2375 }
2376 catch (...)
2377 {
2378 JLOG(m_journal.warn())
2379 << "Unknown exception thrown for handling new validation "
2380 << val->getLedgerHash();
2381 }
2382 if (bypassAccept == BypassAccept::no)
2383 {
2384 pendingValidations_.erase(val->getLedgerHash());
2385 }
2386 lock.unlock();
2387
2388 pubValidation(val);
2389
2390 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2392 ss << "VALIDATION: " << val->render() << " master_key: ";
2393 auto master = app_.validators().getTrustedKey(val->getSignerPublic());
2394 if (master)
2395 {
2396 ss << toBase58(TokenType::NodePublic, *master);
2397 }
2398 else
2399 {
2400 ss << "none";
2401 }
2402 return ss.str();
2403 }();
2404
2405 // We will always relay trusted validations; if configured, we will
2406 // also relay all untrusted validations.
2407 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2408}
2409
2412{
2413 return mConsensus.getJson(true);
2414}
2415
2417NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2418{
2420
2421 // System-level warnings
2422 {
2423 Json::Value warnings{Json::arrayValue};
2424 if (isAmendmentBlocked())
2425 {
2426 Json::Value& w = warnings.append(Json::objectValue);
2427 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2428 w[jss::message] =
2429 "This server is amendment blocked, and must be updated to be "
2430 "able to stay in sync with the network.";
2431 }
2432 if (isUNLBlocked())
2433 {
2434 Json::Value& w = warnings.append(Json::objectValue);
2435 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2436 w[jss::message] =
2437 "This server has an expired validator list. validators.txt "
2438 "may be incorrectly configured or some [validator_list_sites] "
2439 "may be unreachable.";
2440 }
2441 if (admin && isAmendmentWarned())
2442 {
2443 Json::Value& w = warnings.append(Json::objectValue);
2444 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2445 w[jss::message] =
2446 "One or more unsupported amendments have reached majority. "
2447 "Upgrade to the latest version before they are activated "
2448 "to avoid being amendment blocked.";
2449 if (auto const expected =
2451 {
2452 auto& d = w[jss::details] = Json::objectValue;
2453 d[jss::expected_date] = expected->time_since_epoch().count();
2454 d[jss::expected_date_UTC] = to_string(*expected);
2455 }
2456 }
2457
2458 if (warnings.size())
2459 info[jss::warnings] = std::move(warnings);
2460 }
2461
2462 // hostid: unique string describing the machine
2463 if (human)
2464 info[jss::hostid] = getHostId(admin);
2465
2466 // domain: if configured with a domain, report it:
2467 if (!app_.config().SERVER_DOMAIN.empty())
2468 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2469
2470 info[jss::build_version] = BuildInfo::getVersionString();
2471
2472 info[jss::server_state] = strOperatingMode(admin);
2473
2474 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2476
2478 info[jss::network_ledger] = "waiting";
2479
2480 info[jss::validation_quorum] =
2481 static_cast<Json::UInt>(app_.validators().quorum());
2482
2483 if (admin)
2484 {
2485 switch (app_.config().NODE_SIZE)
2486 {
2487 case 0:
2488 info[jss::node_size] = "tiny";
2489 break;
2490 case 1:
2491 info[jss::node_size] = "small";
2492 break;
2493 case 2:
2494 info[jss::node_size] = "medium";
2495 break;
2496 case 3:
2497 info[jss::node_size] = "large";
2498 break;
2499 case 4:
2500 info[jss::node_size] = "huge";
2501 break;
2502 }
2503
2504 auto when = app_.validators().expires();
2505
2506 if (!human)
2507 {
2508 if (when)
2509 info[jss::validator_list_expires] =
2510 safe_cast<Json::UInt>(when->time_since_epoch().count());
2511 else
2512 info[jss::validator_list_expires] = 0;
2513 }
2514 else
2515 {
2516 auto& x = (info[jss::validator_list] = Json::objectValue);
2517
2518 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2519
2520 if (when)
2521 {
2522 if (*when == TimeKeeper::time_point::max())
2523 {
2524 x[jss::expiration] = "never";
2525 x[jss::status] = "active";
2526 }
2527 else
2528 {
2529 x[jss::expiration] = to_string(*when);
2530
2531 if (*when > app_.timeKeeper().now())
2532 x[jss::status] = "active";
2533 else
2534 x[jss::status] = "expired";
2535 }
2536 }
2537 else
2538 {
2539 x[jss::status] = "unknown";
2540 x[jss::expiration] = "unknown";
2541 }
2542 }
2543
2544#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2545 {
2546 auto& x = (info[jss::git] = Json::objectValue);
2547#ifdef GIT_COMMIT_HASH
2548 x[jss::hash] = GIT_COMMIT_HASH;
2549#endif
2550#ifdef GIT_BRANCH
2551 x[jss::branch] = GIT_BRANCH;
2552#endif
2553 }
2554#endif
2555 }
2556 info[jss::io_latency_ms] =
2557 static_cast<Json::UInt>(app_.getIOLatency().count());
2558
2559 if (admin)
2560 {
2561 if (auto const localPubKey = app_.validators().localPublicKey();
2562 localPubKey && app_.getValidationPublicKey())
2563 {
2564 info[jss::pubkey_validator] =
2565 toBase58(TokenType::NodePublic, localPubKey.value());
2566 }
2567 else
2568 {
2569 info[jss::pubkey_validator] = "none";
2570 }
2571 }
2572
2573 if (counters)
2574 {
2575 info[jss::counters] = app_.getPerfLog().countersJson();
2576
2577 Json::Value nodestore(Json::objectValue);
2578 app_.getNodeStore().getCountsJson(nodestore);
2579 info[jss::counters][jss::nodestore] = nodestore;
2580 info[jss::current_activities] = app_.getPerfLog().currentJson();
2581 }
2582
2583 info[jss::pubkey_node] =
2585
2586 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2587
2589 info[jss::amendment_blocked] = true;
2590
2591 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2592
2593 if (fp != 0)
2594 info[jss::fetch_pack] = Json::UInt(fp);
2595
2596 info[jss::peers] = Json::UInt(app_.overlay().size());
2597
2598 Json::Value lastClose = Json::objectValue;
2599 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2600
2601 if (human)
2602 {
2603 lastClose[jss::converge_time_s] =
2605 }
2606 else
2607 {
2608 lastClose[jss::converge_time] =
2610 }
2611
2612 info[jss::last_close] = lastClose;
2613
2614 // info[jss::consensus] = mConsensus.getJson();
2615
2616 if (admin)
2617 info[jss::load] = m_job_queue.getJson();
2618
2619 if (auto const netid = app_.overlay().networkID())
2620 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2621
2622 auto const escalationMetrics =
2624
2625 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2626 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2627 /* Scale the escalated fee level to unitless "load factor".
2628 In practice, this just strips the units, but it will continue
2629 to work correctly if either base value ever changes. */
2630 auto const loadFactorFeeEscalation =
2631 mulDiv(
2632 escalationMetrics.openLedgerFeeLevel,
2633 loadBaseServer,
2634 escalationMetrics.referenceFeeLevel)
2636
2637 auto const loadFactor = std::max(
2638 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2639
2640 if (!human)
2641 {
2642 info[jss::load_base] = loadBaseServer;
2643 info[jss::load_factor] = trunc32(loadFactor);
2644 info[jss::load_factor_server] = loadFactorServer;
2645
2646 /* Json::Value doesn't support uint64, so clamp to max
2647 uint32 value. This is mostly theoretical, since there
2648 probably isn't enough extant XRP to drive the factor
2649 that high.
2650 */
2651 info[jss::load_factor_fee_escalation] =
2652 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2653 info[jss::load_factor_fee_queue] =
2654 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2655 info[jss::load_factor_fee_reference] =
2656 escalationMetrics.referenceFeeLevel.jsonClipped();
2657 }
2658 else
2659 {
2660 info[jss::load_factor] =
2661 static_cast<double>(loadFactor) / loadBaseServer;
2662
2663 if (loadFactorServer != loadFactor)
2664 info[jss::load_factor_server] =
2665 static_cast<double>(loadFactorServer) / loadBaseServer;
2666
2667 if (admin)
2668 {
2670 if (fee != loadBaseServer)
2671 info[jss::load_factor_local] =
2672 static_cast<double>(fee) / loadBaseServer;
2673 fee = app_.getFeeTrack().getRemoteFee();
2674 if (fee != loadBaseServer)
2675 info[jss::load_factor_net] =
2676 static_cast<double>(fee) / loadBaseServer;
2677 fee = app_.getFeeTrack().getClusterFee();
2678 if (fee != loadBaseServer)
2679 info[jss::load_factor_cluster] =
2680 static_cast<double>(fee) / loadBaseServer;
2681 }
2682 if (escalationMetrics.openLedgerFeeLevel !=
2683 escalationMetrics.referenceFeeLevel &&
2684 (admin || loadFactorFeeEscalation != loadFactor))
2685 info[jss::load_factor_fee_escalation] =
2686 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2687 escalationMetrics.referenceFeeLevel);
2688 if (escalationMetrics.minProcessingFeeLevel !=
2689 escalationMetrics.referenceFeeLevel)
2690 info[jss::load_factor_fee_queue] =
2691 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2692 escalationMetrics.referenceFeeLevel);
2693 }
2694
2695 bool valid = false;
2696 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2697
2698 if (lpClosed)
2699 valid = true;
2700 else
2701 lpClosed = m_ledgerMaster.getClosedLedger();
2702
2703 if (lpClosed)
2704 {
2705 XRPAmount const baseFee = lpClosed->fees().base;
2707 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2708 l[jss::hash] = to_string(lpClosed->info().hash);
2709
2710 if (!human)
2711 {
2712 l[jss::base_fee] = baseFee.jsonClipped();
2713 l[jss::reserve_base] =
2714 lpClosed->fees().accountReserve(0).jsonClipped();
2715 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2716 l[jss::close_time] = Json::Value::UInt(
2717 lpClosed->info().closeTime.time_since_epoch().count());
2718 }
2719 else
2720 {
2721 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2722 l[jss::reserve_base_xrp] =
2723 lpClosed->fees().accountReserve(0).decimalXRP();
2724 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2725
2726 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2727 std::abs(closeOffset.count()) >= 60)
2728 l[jss::close_time_offset] =
2729 static_cast<std::uint32_t>(closeOffset.count());
2730
2731 constexpr std::chrono::seconds highAgeThreshold{1000000};
2733 {
2734 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2735 l[jss::age] =
2736 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2737 }
2738 else
2739 {
2740 auto lCloseTime = lpClosed->info().closeTime;
2741 auto closeTime = app_.timeKeeper().closeTime();
2742 if (lCloseTime <= closeTime)
2743 {
2744 using namespace std::chrono_literals;
2745 auto age = closeTime - lCloseTime;
2746 l[jss::age] =
2747 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2748 }
2749 }
2750 }
2751
2752 if (valid)
2753 info[jss::validated_ledger] = l;
2754 else
2755 info[jss::closed_ledger] = l;
2756
2757 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2758 if (!lpPublished)
2759 info[jss::published_ledger] = "none";
2760 else if (lpPublished->info().seq != lpClosed->info().seq)
2761 info[jss::published_ledger] = lpPublished->info().seq;
2762 }
2763
2764 accounting_.json(info);
2765 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2766 info[jss::jq_trans_overflow] =
2768 info[jss::peer_disconnects] =
2770 info[jss::peer_disconnects_resources] =
2772
2773 // This array must be sorted in increasing order.
2774 static constexpr std::array<std::string_view, 7> protocols{
2775 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2776 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2777 {
2779 for (auto const& port : app_.getServerHandler().setup().ports)
2780 {
2781 // Don't publish admin ports for non-admin users
2782 if (!admin &&
2783 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2784 port.admin_user.empty() && port.admin_password.empty()))
2785 continue;
2788 std::begin(port.protocol),
2789 std::end(port.protocol),
2790 std::begin(protocols),
2791 std::end(protocols),
2792 std::back_inserter(proto));
2793 if (!proto.empty())
2794 {
2795 auto& jv = ports.append(Json::Value(Json::objectValue));
2796 jv[jss::port] = std::to_string(port.port);
2797 jv[jss::protocol] = Json::Value{Json::arrayValue};
2798 for (auto const& p : proto)
2799 jv[jss::protocol].append(p);
2800 }
2801 }
2802
2803 if (app_.config().exists(SECTION_PORT_GRPC))
2804 {
2805 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
2806 auto const optPort = grpcSection.get("port");
2807 if (optPort && grpcSection.get("ip"))
2808 {
2809 auto& jv = ports.append(Json::Value(Json::objectValue));
2810 jv[jss::port] = *optPort;
2811 jv[jss::protocol] = Json::Value{Json::arrayValue};
2812 jv[jss::protocol].append("grpc");
2813 }
2814 }
2815 info[jss::ports] = std::move(ports);
2816 }
2817
2818 return info;
2819}
2820
2821void
2823{
2825}
2826
2829{
2830 return app_.getInboundLedgers().getInfo();
2831}
2832
2833void
2835 std::shared_ptr<ReadView const> const& ledger,
2836 std::shared_ptr<STTx const> const& transaction,
2837 TER result)
2838{
2839 MultiApiJson jvObj =
2840 transJson(transaction, result, false, ledger, std::nullopt);
2841
2842 {
2844
2845 auto it = mStreamMaps[sRTTransactions].begin();
2846 while (it != mStreamMaps[sRTTransactions].end())
2847 {
2848 InfoSub::pointer p = it->second.lock();
2849
2850 if (p)
2851 {
2852 jvObj.visit(
2853 p->getApiVersion(), //
2854 [&](Json::Value const& jv) { p->send(jv, true); });
2855 ++it;
2856 }
2857 else
2858 {
2859 it = mStreamMaps[sRTTransactions].erase(it);
2860 }
2861 }
2862 }
2863
2864 pubProposedAccountTransaction(ledger, transaction, result);
2865}
2866
2867void
2869{
2870 // Ledgers are published only when they acquire sufficient validations
2871 // Holes are filled across connection loss or other catastrophe
2872
2874 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
2875 if (!alpAccepted)
2876 {
2877 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
2878 app_.getAcceptedLedgerCache().canonicalize_replace_client(
2879 lpAccepted->info().hash, alpAccepted);
2880 }
2881
2882 XRPL_ASSERT(
2883 alpAccepted->getLedger().get() == lpAccepted.get(),
2884 "ripple::NetworkOPsImp::pubLedger : accepted input");
2885
2886 {
2887 JLOG(m_journal.debug())
2888 << "Publishing ledger " << lpAccepted->info().seq << " "
2889 << lpAccepted->info().hash;
2890
2892
2893 if (!mStreamMaps[sLedger].empty())
2894 {
2896
2897 jvObj[jss::type] = "ledgerClosed";
2898 jvObj[jss::ledger_index] = lpAccepted->info().seq;
2899 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
2900 jvObj[jss::ledger_time] = Json::Value::UInt(
2901 lpAccepted->info().closeTime.time_since_epoch().count());
2902
2903 if (!lpAccepted->rules().enabled(featureXRPFees))
2904 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
2905 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2906 jvObj[jss::reserve_base] =
2907 lpAccepted->fees().accountReserve(0).jsonClipped();
2908 jvObj[jss::reserve_inc] =
2909 lpAccepted->fees().increment.jsonClipped();
2910
2911 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
2912
2914 {
2915 jvObj[jss::validated_ledgers] =
2917 }
2918
2919 auto it = mStreamMaps[sLedger].begin();
2920 while (it != mStreamMaps[sLedger].end())
2921 {
2922 InfoSub::pointer p = it->second.lock();
2923 if (p)
2924 {
2925 p->send(jvObj, true);
2926 ++it;
2927 }
2928 else
2929 it = mStreamMaps[sLedger].erase(it);
2930 }
2931 }
2932
2933 if (!mStreamMaps[sBookChanges].empty())
2934 {
2935 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
2936
2937 auto it = mStreamMaps[sBookChanges].begin();
2938 while (it != mStreamMaps[sBookChanges].end())
2939 {
2940 InfoSub::pointer p = it->second.lock();
2941 if (p)
2942 {
2943 p->send(jvObj, true);
2944 ++it;
2945 }
2946 else
2947 it = mStreamMaps[sBookChanges].erase(it);
2948 }
2949 }
2950
2951 {
2952 static bool firstTime = true;
2953 if (firstTime)
2954 {
2955 // First validated ledger, start delayed SubAccountHistory
2956 firstTime = false;
2957 for (auto& outer : mSubAccountHistory)
2958 {
2959 for (auto& inner : outer.second)
2960 {
2961 auto& subInfo = inner.second;
2962 if (subInfo.index_->separationLedgerSeq_ == 0)
2963 {
2965 alpAccepted->getLedger(), subInfo);
2966 }
2967 }
2968 }
2969 }
2970 }
2971 }
2972
2973 // Don't lock since pubAcceptedTransaction is locking.
2974 for (auto const& accTx : *alpAccepted)
2975 {
2976 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
2978 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
2979 }
2980}
2981
2982void
2984{
2986 app_.openLedger().current()->fees().base,
2988 app_.getFeeTrack()};
2989
2990 // only schedule the job if something has changed
2991 if (f != mLastFeeSummary)
2992 {
2994 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
2995 pubServer();
2996 });
2997 }
2998}
2999
3000void
3002{
3005 "reportConsensusStateChange->pubConsensus",
3006 [this, phase]() { pubConsensus(phase); });
3007}
3008
3009inline void
3011{
3012 m_localTX->sweep(view);
3013}
3014inline std::size_t
3016{
3017 return m_localTX->size();
3018}
3019
3020// This routine should only be used to publish accepted or validated
3021// transactions.
3024 std::shared_ptr<STTx const> const& transaction,
3025 TER result,
3026 bool validated,
3027 std::shared_ptr<ReadView const> const& ledger,
3029{
3031 std::string sToken;
3032 std::string sHuman;
3033
3034 transResultInfo(result, sToken, sHuman);
3035
3036 jvObj[jss::type] = "transaction";
3037 // NOTE jvObj is not a finished object for either API version. After
3038 // it's populated, we need to finish it for a specific API version. This is
3039 // done in a loop, near the end of this function.
3040 jvObj[jss::transaction] =
3041 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3042
3043 if (meta)
3044 {
3045 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3047 jvObj[jss::meta], *ledger, transaction, meta->get());
3049 jvObj[jss::meta], transaction, meta->get());
3050 }
3051
3052 if (!ledger->open())
3053 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3054
3055 if (validated)
3056 {
3057 jvObj[jss::ledger_index] = ledger->info().seq;
3058 jvObj[jss::transaction][jss::date] =
3059 ledger->info().closeTime.time_since_epoch().count();
3060 jvObj[jss::validated] = true;
3061 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3062
3063 // WRITEME: Put the account next seq here
3064 }
3065 else
3066 {
3067 jvObj[jss::validated] = false;
3068 jvObj[jss::ledger_current_index] = ledger->info().seq;
3069 }
3070
3071 jvObj[jss::status] = validated ? "closed" : "proposed";
3072 jvObj[jss::engine_result] = sToken;
3073 jvObj[jss::engine_result_code] = result;
3074 jvObj[jss::engine_result_message] = sHuman;
3075
3076 if (transaction->getTxnType() == ttOFFER_CREATE)
3077 {
3078 auto const account = transaction->getAccountID(sfAccount);
3079 auto const amount = transaction->getFieldAmount(sfTakerGets);
3080
3081 // If the offer create is not self funded then add the owner balance
3082 if (account != amount.issue().account)
3083 {
3084 auto const ownerFunds = accountFunds(
3085 *ledger,
3086 account,
3087 amount,
3089 app_.journal("View"));
3090 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3091 }
3092 }
3093
3094 std::string const hash = to_string(transaction->getTransactionID());
3095 MultiApiJson multiObj{jvObj};
3097 multiObj.visit(), //
3098 [&]<unsigned Version>(
3100 RPC::insertDeliverMax(
3101 jvTx[jss::transaction], transaction->getTxnType(), Version);
3102
3103 if constexpr (Version > 1)
3104 {
3105 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3106 jvTx[jss::hash] = hash;
3107 }
3108 else
3109 {
3110 jvTx[jss::transaction][jss::hash] = hash;
3111 }
3112 });
3113
3114 return multiObj;
3115}
3116
3117void
3119 std::shared_ptr<ReadView const> const& ledger,
3120 const AcceptedLedgerTx& transaction,
3121 bool last)
3122{
3123 auto const& stTxn = transaction.getTxn();
3124
3125 // Create two different Json objects, for different API versions
3126 auto const metaRef = std::ref(transaction.getMeta());
3127 auto const trResult = transaction.getResult();
3128 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3129
3130 {
3132
3133 auto it = mStreamMaps[sTransactions].begin();
3134 while (it != mStreamMaps[sTransactions].end())
3135 {
3136 InfoSub::pointer p = it->second.lock();
3137
3138 if (p)
3139 {
3140 jvObj.visit(
3141 p->getApiVersion(), //
3142 [&](Json::Value const& jv) { p->send(jv, true); });
3143 ++it;
3144 }
3145 else
3146 it = mStreamMaps[sTransactions].erase(it);
3147 }
3148
3149 it = mStreamMaps[sRTTransactions].begin();
3150
3151 while (it != mStreamMaps[sRTTransactions].end())
3152 {
3153 InfoSub::pointer p = it->second.lock();
3154
3155 if (p)
3156 {
3157 jvObj.visit(
3158 p->getApiVersion(), //
3159 [&](Json::Value const& jv) { p->send(jv, true); });
3160 ++it;
3161 }
3162 else
3163 it = mStreamMaps[sRTTransactions].erase(it);
3164 }
3165 }
3166
3167 if (transaction.getResult() == tesSUCCESS)
3168 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3169
3170 pubAccountTransaction(ledger, transaction, last);
3171}
3172
3173void
3175 std::shared_ptr<ReadView const> const& ledger,
3176 AcceptedLedgerTx const& transaction,
3177 bool last)
3178{
3180 int iProposed = 0;
3181 int iAccepted = 0;
3182
3183 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3184 auto const currLedgerSeq = ledger->seq();
3185 {
3187
3188 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3190 {
3191 for (auto const& affectedAccount : transaction.getAffected())
3192 {
3193 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3194 simiIt != mSubRTAccount.end())
3195 {
3196 auto it = simiIt->second.begin();
3197
3198 while (it != simiIt->second.end())
3199 {
3200 InfoSub::pointer p = it->second.lock();
3201
3202 if (p)
3203 {
3204 notify.insert(p);
3205 ++it;
3206 ++iProposed;
3207 }
3208 else
3209 it = simiIt->second.erase(it);
3210 }
3211 }
3212
3213 if (auto simiIt = mSubAccount.find(affectedAccount);
3214 simiIt != mSubAccount.end())
3215 {
3216 auto it = simiIt->second.begin();
3217 while (it != simiIt->second.end())
3218 {
3219 InfoSub::pointer p = it->second.lock();
3220
3221 if (p)
3222 {
3223 notify.insert(p);
3224 ++it;
3225 ++iAccepted;
3226 }
3227 else
3228 it = simiIt->second.erase(it);
3229 }
3230 }
3231
3232 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3233 histoIt != mSubAccountHistory.end())
3234 {
3235 auto& subs = histoIt->second;
3236 auto it = subs.begin();
3237 while (it != subs.end())
3238 {
3239 SubAccountHistoryInfoWeak const& info = it->second;
3240 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3241 {
3242 ++it;
3243 continue;
3244 }
3245
3246 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3247 {
3248 accountHistoryNotify.emplace_back(
3249 SubAccountHistoryInfo{isSptr, info.index_});
3250 ++it;
3251 }
3252 else
3253 {
3254 it = subs.erase(it);
3255 }
3256 }
3257 if (subs.empty())
3258 mSubAccountHistory.erase(histoIt);
3259 }
3260 }
3261 }
3262 }
3263
3264 JLOG(m_journal.trace())
3265 << "pubAccountTransaction: " << "proposed=" << iProposed
3266 << ", accepted=" << iAccepted;
3267
3268 if (!notify.empty() || !accountHistoryNotify.empty())
3269 {
3270 auto const& stTxn = transaction.getTxn();
3271
3272 // Create two different Json objects, for different API versions
3273 auto const metaRef = std::ref(transaction.getMeta());
3274 auto const trResult = transaction.getResult();
3275 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3276
3277 for (InfoSub::ref isrListener : notify)
3278 {
3279 jvObj.visit(
3280 isrListener->getApiVersion(), //
3281 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3282 }
3283
3284 if (last)
3285 jvObj.set(jss::account_history_boundary, true);
3286
3287 XRPL_ASSERT(
3288 jvObj.isMember(jss::account_history_tx_stream) ==
3290 "ripple::NetworkOPsImp::pubAccountTransaction : "
3291 "account_history_tx_stream not set");
3292 for (auto& info : accountHistoryNotify)
3293 {
3294 auto& index = info.index_;
3295 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3296 jvObj.set(jss::account_history_tx_first, true);
3297
3298 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3299
3300 jvObj.visit(
3301 info.sink_->getApiVersion(), //
3302 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3303 }
3304 }
3305}
3306
3307void
3309 std::shared_ptr<ReadView const> const& ledger,
3311 TER result)
3312{
3314 int iProposed = 0;
3315
3316 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3317
3318 {
3320
3321 if (mSubRTAccount.empty())
3322 return;
3323
3324 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3326 {
3327 for (auto const& affectedAccount : tx->getMentionedAccounts())
3328 {
3329 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3330 simiIt != mSubRTAccount.end())
3331 {
3332 auto it = simiIt->second.begin();
3333
3334 while (it != simiIt->second.end())
3335 {
3336 InfoSub::pointer p = it->second.lock();
3337
3338 if (p)
3339 {
3340 notify.insert(p);
3341 ++it;
3342 ++iProposed;
3343 }
3344 else
3345 it = simiIt->second.erase(it);
3346 }
3347 }
3348 }
3349 }
3350 }
3351
3352 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3353
3354 if (!notify.empty() || !accountHistoryNotify.empty())
3355 {
3356 // Create two different Json objects, for different API versions
3357 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3358
3359 for (InfoSub::ref isrListener : notify)
3360 jvObj.visit(
3361 isrListener->getApiVersion(), //
3362 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3363
3364 XRPL_ASSERT(
3365 jvObj.isMember(jss::account_history_tx_stream) ==
3367 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3368 "account_history_tx_stream not set");
3369 for (auto& info : accountHistoryNotify)
3370 {
3371 auto& index = info.index_;
3372 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3373 jvObj.set(jss::account_history_tx_first, true);
3374 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3375 jvObj.visit(
3376 info.sink_->getApiVersion(), //
3377 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3378 }
3379 }
3380}
3381
3382//
3383// Monitoring
3384//
3385
3386void
3388 InfoSub::ref isrListener,
3389 hash_set<AccountID> const& vnaAccountIDs,
3390 bool rt)
3391{
3392 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3393
3394 for (auto const& naAccountID : vnaAccountIDs)
3395 {
3396 JLOG(m_journal.trace())
3397 << "subAccount: account: " << toBase58(naAccountID);
3398
3399 isrListener->insertSubAccountInfo(naAccountID, rt);
3400 }
3401
3403
3404 for (auto const& naAccountID : vnaAccountIDs)
3405 {
3406 auto simIterator = subMap.find(naAccountID);
3407 if (simIterator == subMap.end())
3408 {
3409 // Not found, note that account has a new single listner.
3410 SubMapType usisElement;
3411 usisElement[isrListener->getSeq()] = isrListener;
3412 // VFALCO NOTE This is making a needless copy of naAccountID
3413 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3414 }
3415 else
3416 {
3417 // Found, note that the account has another listener.
3418 simIterator->second[isrListener->getSeq()] = isrListener;
3419 }
3420 }
3421}
3422
3423void
3425 InfoSub::ref isrListener,
3426 hash_set<AccountID> const& vnaAccountIDs,
3427 bool rt)
3428{
3429 for (auto const& naAccountID : vnaAccountIDs)
3430 {
3431 // Remove from the InfoSub
3432 isrListener->deleteSubAccountInfo(naAccountID, rt);
3433 }
3434
3435 // Remove from the server
3436 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3437}
3438
3439void
3441 std::uint64_t uSeq,
3442 hash_set<AccountID> const& vnaAccountIDs,
3443 bool rt)
3444{
3446
3447 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3448
3449 for (auto const& naAccountID : vnaAccountIDs)
3450 {
3451 auto simIterator = subMap.find(naAccountID);
3452
3453 if (simIterator != subMap.end())
3454 {
3455 // Found
3456 simIterator->second.erase(uSeq);
3457
3458 if (simIterator->second.empty())
3459 {
3460 // Don't need hash entry.
3461 subMap.erase(simIterator);
3462 }
3463 }
3464 }
3465}
3466
3467void
3469{
3470 enum DatabaseType { Sqlite, None };
3471 static const auto databaseType = [&]() -> DatabaseType {
3472 // Use a dynamic_cast to return DatabaseType::None
3473 // on failure.
3474 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3475 {
3476 return DatabaseType::Sqlite;
3477 }
3478 return DatabaseType::None;
3479 }();
3480
3481 if (databaseType == DatabaseType::None)
3482 {
3483 JLOG(m_journal.error())
3484 << "AccountHistory job for account "
3485 << toBase58(subInfo.index_->accountId_) << " no database";
3486 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3487 {
3488 sptr->send(rpcError(rpcINTERNAL), true);
3489 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3490 }
3491 return;
3492 }
3493
3496 "AccountHistoryTxStream",
3497 [this, dbType = databaseType, subInfo]() {
3498 auto const& accountId = subInfo.index_->accountId_;
3499 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3500 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3501
3502 JLOG(m_journal.trace())
3503 << "AccountHistory job for account " << toBase58(accountId)
3504 << " started. lastLedgerSeq=" << lastLedgerSeq;
3505
3506 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3507 std::shared_ptr<TxMeta> const& meta) -> bool {
3508 /*
3509 * genesis account: first tx is the one with seq 1
3510 * other account: first tx is the one created the account
3511 */
3512 if (accountId == genesisAccountId)
3513 {
3514 auto stx = tx->getSTransaction();
3515 if (stx->getAccountID(sfAccount) == accountId &&
3516 stx->getSeqProxy().value() == 1)
3517 return true;
3518 }
3519
3520 for (auto& node : meta->getNodes())
3521 {
3522 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3523 continue;
3524
3525 if (node.isFieldPresent(sfNewFields))
3526 {
3527 if (auto inner = dynamic_cast<const STObject*>(
3528 node.peekAtPField(sfNewFields));
3529 inner)
3530 {
3531 if (inner->isFieldPresent(sfAccount) &&
3532 inner->getAccountID(sfAccount) == accountId)
3533 {
3534 return true;
3535 }
3536 }
3537 }
3538 }
3539
3540 return false;
3541 };
3542
3543 auto send = [&](Json::Value const& jvObj,
3544 bool unsubscribe) -> bool {
3545 if (auto sptr = subInfo.sinkWptr_.lock())
3546 {
3547 sptr->send(jvObj, true);
3548 if (unsubscribe)
3549 unsubAccountHistory(sptr, accountId, false);
3550 return true;
3551 }
3552
3553 return false;
3554 };
3555
3556 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3557 bool unsubscribe) -> bool {
3558 if (auto sptr = subInfo.sinkWptr_.lock())
3559 {
3560 jvObj.visit(
3561 sptr->getApiVersion(), //
3562 [&](Json::Value const& jv) { sptr->send(jv, true); });
3563
3564 if (unsubscribe)
3565 unsubAccountHistory(sptr, accountId, false);
3566 return true;
3567 }
3568
3569 return false;
3570 };
3571
3572 auto getMoreTxns =
3573 [&](std::uint32_t minLedger,
3574 std::uint32_t maxLedger,
3579 switch (dbType)
3580 {
3581 case Sqlite: {
3582 auto db = static_cast<SQLiteDatabase*>(
3585 accountId, minLedger, maxLedger, marker, 0, true};
3586 return db->newestAccountTxPage(options);
3587 }
3588 default: {
3589 UNREACHABLE(
3590 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3591 "getMoreTxns : invalid database type");
3592 return {};
3593 }
3594 }
3595 };
3596
3597 /*
3598 * search backward until the genesis ledger or asked to stop
3599 */
3600 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3601 {
3602 int feeChargeCount = 0;
3603 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3604 {
3605 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3606 ++feeChargeCount;
3607 }
3608 else
3609 {
3610 JLOG(m_journal.trace())
3611 << "AccountHistory job for account "
3612 << toBase58(accountId) << " no InfoSub. Fee charged "
3613 << feeChargeCount << " times.";
3614 return;
3615 }
3616
3617 // try to search in 1024 ledgers till reaching genesis ledgers
3618 auto startLedgerSeq =
3619 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3620 JLOG(m_journal.trace())
3621 << "AccountHistory job for account " << toBase58(accountId)
3622 << ", working on ledger range [" << startLedgerSeq << ","
3623 << lastLedgerSeq << "]";
3624
3625 auto haveRange = [&]() -> bool {
3626 std::uint32_t validatedMin = UINT_MAX;
3627 std::uint32_t validatedMax = 0;
3628 auto haveSomeValidatedLedgers =
3630 validatedMin, validatedMax);
3631
3632 return haveSomeValidatedLedgers &&
3633 validatedMin <= startLedgerSeq &&
3634 lastLedgerSeq <= validatedMax;
3635 }();
3636
3637 if (!haveRange)
3638 {
3639 JLOG(m_journal.debug())
3640 << "AccountHistory reschedule job for account "
3641 << toBase58(accountId) << ", incomplete ledger range ["
3642 << startLedgerSeq << "," << lastLedgerSeq << "]";
3644 return;
3645 }
3646
3648 while (!subInfo.index_->stopHistorical_)
3649 {
3650 auto dbResult =
3651 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3652 if (!dbResult)
3653 {
3654 JLOG(m_journal.debug())
3655 << "AccountHistory job for account "
3656 << toBase58(accountId) << " getMoreTxns failed.";
3657 send(rpcError(rpcINTERNAL), true);
3658 return;
3659 }
3660
3661 auto const& txns = dbResult->first;
3662 marker = dbResult->second;
3663 size_t num_txns = txns.size();
3664 for (size_t i = 0; i < num_txns; ++i)
3665 {
3666 auto const& [tx, meta] = txns[i];
3667
3668 if (!tx || !meta)
3669 {
3670 JLOG(m_journal.debug())
3671 << "AccountHistory job for account "
3672 << toBase58(accountId) << " empty tx or meta.";
3673 send(rpcError(rpcINTERNAL), true);
3674 return;
3675 }
3676 auto curTxLedger =
3678 tx->getLedger());
3679 if (!curTxLedger)
3680 {
3681 JLOG(m_journal.debug())
3682 << "AccountHistory job for account "
3683 << toBase58(accountId) << " no ledger.";
3684 send(rpcError(rpcINTERNAL), true);
3685 return;
3686 }
3688 tx->getSTransaction();
3689 if (!stTxn)
3690 {
3691 JLOG(m_journal.debug())
3692 << "AccountHistory job for account "
3693 << toBase58(accountId)
3694 << " getSTransaction failed.";
3695 send(rpcError(rpcINTERNAL), true);
3696 return;
3697 }
3698
3699 auto const mRef = std::ref(*meta);
3700 auto const trR = meta->getResultTER();
3701 MultiApiJson jvTx =
3702 transJson(stTxn, trR, true, curTxLedger, mRef);
3703
3704 jvTx.set(
3705 jss::account_history_tx_index, txHistoryIndex--);
3706 if (i + 1 == num_txns ||
3707 txns[i + 1].first->getLedger() != tx->getLedger())
3708 jvTx.set(jss::account_history_boundary, true);
3709
3710 if (isFirstTx(tx, meta))
3711 {
3712 jvTx.set(jss::account_history_tx_first, true);
3713 sendMultiApiJson(jvTx, false);
3714
3715 JLOG(m_journal.trace())
3716 << "AccountHistory job for account "
3717 << toBase58(accountId)
3718 << " done, found last tx.";
3719 return;
3720 }
3721 else
3722 {
3723 sendMultiApiJson(jvTx, false);
3724 }
3725 }
3726
3727 if (marker)
3728 {
3729 JLOG(m_journal.trace())
3730 << "AccountHistory job for account "
3731 << toBase58(accountId)
3732 << " paging, marker=" << marker->ledgerSeq << ":"
3733 << marker->txnSeq;
3734 }
3735 else
3736 {
3737 break;
3738 }
3739 }
3740
3741 if (!subInfo.index_->stopHistorical_)
3742 {
3743 lastLedgerSeq = startLedgerSeq - 1;
3744 if (lastLedgerSeq <= 1)
3745 {
3746 JLOG(m_journal.trace())
3747 << "AccountHistory job for account "
3748 << toBase58(accountId)
3749 << " done, reached genesis ledger.";
3750 return;
3751 }
3752 }
3753 }
3754 });
3755}
3756
3757void
3759 std::shared_ptr<ReadView const> const& ledger,
3761{
3762 subInfo.index_->separationLedgerSeq_ = ledger->seq();
3763 auto const& accountId = subInfo.index_->accountId_;
3764 auto const accountKeylet = keylet::account(accountId);
3765 if (!ledger->exists(accountKeylet))
3766 {
3767 JLOG(m_journal.debug())
3768 << "subAccountHistoryStart, no account " << toBase58(accountId)
3769 << ", no need to add AccountHistory job.";
3770 return;
3771 }
3772 if (accountId == genesisAccountId)
3773 {
3774 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3775 {
3776 if (sleAcct->getFieldU32(sfSequence) == 1)
3777 {
3778 JLOG(m_journal.debug())
3779 << "subAccountHistoryStart, genesis account "
3780 << toBase58(accountId)
3781 << " does not have tx, no need to add AccountHistory job.";
3782 return;
3783 }
3784 }
3785 else
3786 {
3787 UNREACHABLE(
3788 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
3789 "access genesis account");
3790 return;
3791 }
3792 }
3793 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
3794 subInfo.index_->haveHistorical_ = true;
3795
3796 JLOG(m_journal.debug())
3797 << "subAccountHistoryStart, add AccountHistory job: accountId="
3798 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
3799
3800 addAccountHistoryJob(subInfo);
3801}
3802
3805 InfoSub::ref isrListener,
3806 AccountID const& accountId)
3807{
3808 if (!isrListener->insertSubAccountHistory(accountId))
3809 {
3810 JLOG(m_journal.debug())
3811 << "subAccountHistory, already subscribed to account "
3812 << toBase58(accountId);
3813 return rpcINVALID_PARAMS;
3814 }
3815
3818 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
3819 auto simIterator = mSubAccountHistory.find(accountId);
3820 if (simIterator == mSubAccountHistory.end())
3821 {
3823 inner.emplace(isrListener->getSeq(), ahi);
3825 simIterator, std::make_pair(accountId, inner));
3826 }
3827 else
3828 {
3829 simIterator->second.emplace(isrListener->getSeq(), ahi);
3830 }
3831
3832 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
3833 if (ledger)
3834 {
3835 subAccountHistoryStart(ledger, ahi);
3836 }
3837 else
3838 {
3839 // The node does not have validated ledgers, so wait for
3840 // one before start streaming.
3841 // In this case, the subscription is also considered successful.
3842 JLOG(m_journal.debug())
3843 << "subAccountHistory, no validated ledger yet, delay start";
3844 }
3845
3846 return rpcSUCCESS;
3847}
3848
3849void
3851 InfoSub::ref isrListener,
3852 AccountID const& account,
3853 bool historyOnly)
3854{
3855 if (!historyOnly)
3856 isrListener->deleteSubAccountHistory(account);
3857 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
3858}
3859
3860void
3862 std::uint64_t seq,
3863 const AccountID& account,
3864 bool historyOnly)
3865{
3867 auto simIterator = mSubAccountHistory.find(account);
3868 if (simIterator != mSubAccountHistory.end())
3869 {
3870 auto& subInfoMap = simIterator->second;
3871 auto subInfoIter = subInfoMap.find(seq);
3872 if (subInfoIter != subInfoMap.end())
3873 {
3874 subInfoIter->second.index_->stopHistorical_ = true;
3875 }
3876
3877 if (!historyOnly)
3878 {
3879 simIterator->second.erase(seq);
3880 if (simIterator->second.empty())
3881 {
3882 mSubAccountHistory.erase(simIterator);
3883 }
3884 }
3885 JLOG(m_journal.debug())
3886 << "unsubAccountHistory, account " << toBase58(account)
3887 << ", historyOnly = " << (historyOnly ? "true" : "false");
3888 }
3889}
3890
3891bool
3893{
3894 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
3895 listeners->addSubscriber(isrListener);
3896 else
3897 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
3898 return true;
3899}
3900
3901bool
3903{
3904 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
3905 listeners->removeSubscriber(uSeq);
3906
3907 return true;
3908}
3909
3913{
3914 // This code-path is exclusively used when the server is in standalone
3915 // mode via `ledger_accept`
3916 XRPL_ASSERT(
3917 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
3918
3919 if (!m_standalone)
3920 Throw<std::runtime_error>(
3921 "Operation only possible in STANDALONE mode.");
3922
3923 // FIXME Could we improve on this and remove the need for a specialized
3924 // API in Consensus?
3925 beginConsensus(m_ledgerMaster.getClosedLedger()->info().hash, {});
3926 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
3927 return m_ledgerMaster.getCurrentLedger()->info().seq;
3928}
3929
3930// <-- bool: true=added, false=already there
3931bool
3933{
3934 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
3935 {
3936 jvResult[jss::ledger_index] = lpClosed->info().seq;
3937 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
3938 jvResult[jss::ledger_time] = Json::Value::UInt(
3939 lpClosed->info().closeTime.time_since_epoch().count());
3940 if (!lpClosed->rules().enabled(featureXRPFees))
3941 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3942 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3943 jvResult[jss::reserve_base] =
3944 lpClosed->fees().accountReserve(0).jsonClipped();
3945 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3946 }
3947
3949 {
3950 jvResult[jss::validated_ledgers] =
3952 }
3953
3955 return mStreamMaps[sLedger]
3956 .emplace(isrListener->getSeq(), isrListener)
3957 .second;
3958}
3959
3960// <-- bool: true=added, false=already there
3961bool
3963{
3966 .emplace(isrListener->getSeq(), isrListener)
3967 .second;
3968}
3969
3970// <-- bool: true=erased, false=was not there
3971bool
3973{
3975 return mStreamMaps[sLedger].erase(uSeq);
3976}
3977
3978// <-- bool: true=erased, false=was not there
3979bool
3981{
3983 return mStreamMaps[sBookChanges].erase(uSeq);
3984}
3985
3986// <-- bool: true=added, false=already there
3987bool
3989{
3991 return mStreamMaps[sManifests]
3992 .emplace(isrListener->getSeq(), isrListener)
3993 .second;
3994}
3995
3996// <-- bool: true=erased, false=was not there
3997bool
3999{
4001 return mStreamMaps[sManifests].erase(uSeq);
4002}
4003
4004// <-- bool: true=added, false=already there
4005bool
4007 InfoSub::ref isrListener,
4008 Json::Value& jvResult,
4009 bool admin)
4010{
4011 uint256 uRandom;
4012
4013 if (m_standalone)
4014 jvResult[jss::stand_alone] = m_standalone;
4015
4016 // CHECKME: is it necessary to provide a random number here?
4017 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
4018
4019 auto const& feeTrack = app_.getFeeTrack();
4020 jvResult[jss::random] = to_string(uRandom);
4021 jvResult[jss::server_status] = strOperatingMode(admin);
4022 jvResult[jss::load_base] = feeTrack.getLoadBase();
4023 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4024 jvResult[jss::hostid] = getHostId(admin);
4025 jvResult[jss::pubkey_node] =
4027
4029 return mStreamMaps[sServer]
4030 .emplace(isrListener->getSeq(), isrListener)
4031 .second;
4032}
4033
4034// <-- bool: true=erased, false=was not there
4035bool
4037{
4039 return mStreamMaps[sServer].erase(uSeq);
4040}
4041
4042// <-- bool: true=added, false=already there
4043bool
4045{
4048 .emplace(isrListener->getSeq(), isrListener)
4049 .second;
4050}
4051
4052// <-- bool: true=erased, false=was not there
4053bool
4055{
4057 return mStreamMaps[sTransactions].erase(uSeq);
4058}
4059
4060// <-- bool: true=added, false=already there
4061bool
4063{
4066 .emplace(isrListener->getSeq(), isrListener)
4067 .second;
4068}
4069
4070// <-- bool: true=erased, false=was not there
4071bool
4073{
4075 return mStreamMaps[sRTTransactions].erase(uSeq);
4076}
4077
4078// <-- bool: true=added, false=already there
4079bool
4081{
4084 .emplace(isrListener->getSeq(), isrListener)
4085 .second;
4086}
4087
4088void
4090{
4091 accounting_.json(obj);
4092}
4093
4094// <-- bool: true=erased, false=was not there
4095bool
4097{
4099 return mStreamMaps[sValidations].erase(uSeq);
4100}
4101
4102// <-- bool: true=added, false=already there
4103bool
4105{
4107 return mStreamMaps[sPeerStatus]
4108 .emplace(isrListener->getSeq(), isrListener)
4109 .second;
4110}
4111
4112// <-- bool: true=erased, false=was not there
4113bool
4115{
4117 return mStreamMaps[sPeerStatus].erase(uSeq);
4118}
4119
4120// <-- bool: true=added, false=already there
4121bool
4123{
4126 .emplace(isrListener->getSeq(), isrListener)
4127 .second;
4128}
4129
4130// <-- bool: true=erased, false=was not there
4131bool
4133{
4135 return mStreamMaps[sConsensusPhase].erase(uSeq);
4136}
4137
4140{
4142
4143 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4144
4145 if (it != mRpcSubMap.end())
4146 return it->second;
4147
4148 return InfoSub::pointer();
4149}
4150
4153{
4155
4156 mRpcSubMap.emplace(strUrl, rspEntry);
4157
4158 return rspEntry;
4159}
4160
4161bool
4163{
4165 auto pInfo = findRpcSub(strUrl);
4166
4167 if (!pInfo)
4168 return false;
4169
4170 // check to see if any of the stream maps still hold a weak reference to
4171 // this entry before removing
4172 for (SubMapType const& map : mStreamMaps)
4173 {
4174 if (map.find(pInfo->getSeq()) != map.end())
4175 return false;
4176 }
4177 mRpcSubMap.erase(strUrl);
4178 return true;
4179}
4180
4181#ifndef USE_NEW_BOOK_PAGE
4182
4183// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4184// work, but it demonstrated poor performance.
4185//
4186void
4189 Book const& book,
4190 AccountID const& uTakerID,
4191 bool const bProof,
4192 unsigned int iLimit,
4193 Json::Value const& jvMarker,
4194 Json::Value& jvResult)
4195{ // CAUTION: This is the old get book page logic
4196 Json::Value& jvOffers =
4197 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4198
4200 const uint256 uBookBase = getBookBase(book);
4201 const uint256 uBookEnd = getQualityNext(uBookBase);
4202 uint256 uTipIndex = uBookBase;
4203
4204 if (auto stream = m_journal.trace())
4205 {
4206 stream << "getBookPage:" << book;
4207 stream << "getBookPage: uBookBase=" << uBookBase;
4208 stream << "getBookPage: uBookEnd=" << uBookEnd;
4209 stream << "getBookPage: uTipIndex=" << uTipIndex;
4210 }
4211
4212 ReadView const& view = *lpLedger;
4213
4214 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4215 isGlobalFrozen(view, book.in.account);
4216
4217 bool bDone = false;
4218 bool bDirectAdvance = true;
4219
4220 std::shared_ptr<SLE const> sleOfferDir;
4221 uint256 offerIndex;
4222 unsigned int uBookEntry;
4223 STAmount saDirRate;
4224
4225 auto const rate = transferRate(view, book.out.account);
4226 auto viewJ = app_.journal("View");
4227
4228 while (!bDone && iLimit-- > 0)
4229 {
4230 if (bDirectAdvance)
4231 {
4232 bDirectAdvance = false;
4233
4234 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4235
4236 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4237 if (ledgerIndex)
4238 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4239 else
4240 sleOfferDir.reset();
4241
4242 if (!sleOfferDir)
4243 {
4244 JLOG(m_journal.trace()) << "getBookPage: bDone";
4245 bDone = true;
4246 }
4247 else
4248 {
4249 uTipIndex = sleOfferDir->key();
4250 saDirRate = amountFromQuality(getQuality(uTipIndex));
4251
4252 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4253
4254 JLOG(m_journal.trace())
4255 << "getBookPage: uTipIndex=" << uTipIndex;
4256 JLOG(m_journal.trace())
4257 << "getBookPage: offerIndex=" << offerIndex;
4258 }
4259 }
4260
4261 if (!bDone)
4262 {
4263 auto sleOffer = view.read(keylet::offer(offerIndex));
4264
4265 if (sleOffer)
4266 {
4267 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4268 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4269 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4270 STAmount saOwnerFunds;
4271 bool firstOwnerOffer(true);
4272
4273 if (book.out.account == uOfferOwnerID)
4274 {
4275 // If an offer is selling issuer's own IOUs, it is fully
4276 // funded.
4277 saOwnerFunds = saTakerGets;
4278 }
4279 else if (bGlobalFreeze)
4280 {
4281 // If either asset is globally frozen, consider all offers
4282 // that aren't ours to be totally unfunded
4283 saOwnerFunds.clear(book.out);
4284 }
4285 else
4286 {
4287 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4288 if (umBalanceEntry != umBalance.end())
4289 {
4290 // Found in running balance table.
4291
4292 saOwnerFunds = umBalanceEntry->second;
4293 firstOwnerOffer = false;
4294 }
4295 else
4296 {
4297 // Did not find balance in table.
4298
4299 saOwnerFunds = accountHolds(
4300 view,
4301 uOfferOwnerID,
4302 book.out.currency,
4303 book.out.account,
4305 viewJ);
4306
4307 if (saOwnerFunds < beast::zero)
4308 {
4309 // Treat negative funds as zero.
4310
4311 saOwnerFunds.clear();
4312 }
4313 }
4314 }
4315
4316 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4317
4318 STAmount saTakerGetsFunded;
4319 STAmount saOwnerFundsLimit = saOwnerFunds;
4320 Rate offerRate = parityRate;
4321
4322 if (rate != parityRate
4323 // Have a tranfer fee.
4324 && uTakerID != book.out.account
4325 // Not taking offers of own IOUs.
4326 && book.out.account != uOfferOwnerID)
4327 // Offer owner not issuing ownfunds
4328 {
4329 // Need to charge a transfer fee to offer owner.
4330 offerRate = rate;
4331 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4332 }
4333
4334 if (saOwnerFundsLimit >= saTakerGets)
4335 {
4336 // Sufficient funds no shenanigans.
4337 saTakerGetsFunded = saTakerGets;
4338 }
4339 else
4340 {
4341 // Only provide, if not fully funded.
4342
4343 saTakerGetsFunded = saOwnerFundsLimit;
4344
4345 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4346 std::min(
4347 saTakerPays,
4348 multiply(
4349 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4350 .setJson(jvOffer[jss::taker_pays_funded]);
4351 }
4352
4353 STAmount saOwnerPays = (parityRate == offerRate)
4354 ? saTakerGetsFunded
4355 : std::min(
4356 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4357
4358 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4359
4360 // Include all offers funded and unfunded
4361 Json::Value& jvOf = jvOffers.append(jvOffer);
4362 jvOf[jss::quality] = saDirRate.getText();
4363
4364 if (firstOwnerOffer)
4365 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4366 }
4367 else
4368 {
4369 JLOG(m_journal.warn()) << "Missing offer";
4370 }
4371
4372 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4373 {
4374 bDirectAdvance = true;
4375 }
4376 else
4377 {
4378 JLOG(m_journal.trace())
4379 << "getBookPage: offerIndex=" << offerIndex;
4380 }
4381 }
4382 }
4383
4384 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4385 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4386}
4387
4388#else
4389
4390// This is the new code that uses the book iterators
4391// It has temporarily been disabled
4392
4393void
4396 Book const& book,
4397 AccountID const& uTakerID,
4398 bool const bProof,
4399 unsigned int iLimit,
4400 Json::Value const& jvMarker,
4401 Json::Value& jvResult)
4402{
4403 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4404
4406
4407 MetaView lesActive(lpLedger, tapNONE, true);
4408 OrderBookIterator obIterator(lesActive, book);
4409
4410 auto const rate = transferRate(lesActive, book.out.account);
4411
4412 const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4413 lesActive.isGlobalFrozen(book.in.account);
4414
4415 while (iLimit-- > 0 && obIterator.nextOffer())
4416 {
4417 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4418 if (sleOffer)
4419 {
4420 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4421 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4422 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4423 STAmount saDirRate = obIterator.getCurrentRate();
4424 STAmount saOwnerFunds;
4425
4426 if (book.out.account == uOfferOwnerID)
4427 {
4428 // If offer is selling issuer's own IOUs, it is fully funded.
4429 saOwnerFunds = saTakerGets;
4430 }
4431 else if (bGlobalFreeze)
4432 {
4433 // If either asset is globally frozen, consider all offers
4434 // that aren't ours to be totally unfunded
4435 saOwnerFunds.clear(book.out);
4436 }
4437 else
4438 {
4439 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4440
4441 if (umBalanceEntry != umBalance.end())
4442 {
4443 // Found in running balance table.
4444
4445 saOwnerFunds = umBalanceEntry->second;
4446 }
4447 else
4448 {
4449 // Did not find balance in table.
4450
4451 saOwnerFunds = lesActive.accountHolds(
4452 uOfferOwnerID,
4453 book.out.currency,
4454 book.out.account,
4456
4457 if (saOwnerFunds.isNegative())
4458 {
4459 // Treat negative funds as zero.
4460
4461 saOwnerFunds.zero();
4462 }
4463 }
4464 }
4465
4466 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4467
4468 STAmount saTakerGetsFunded;
4469 STAmount saOwnerFundsLimit = saOwnerFunds;
4470 Rate offerRate = parityRate;
4471
4472 if (rate != parityRate
4473 // Have a tranfer fee.
4474 && uTakerID != book.out.account
4475 // Not taking offers of own IOUs.
4476 && book.out.account != uOfferOwnerID)
4477 // Offer owner not issuing ownfunds
4478 {
4479 // Need to charge a transfer fee to offer owner.
4480 offerRate = rate;
4481 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4482 }
4483
4484 if (saOwnerFundsLimit >= saTakerGets)
4485 {
4486 // Sufficient funds no shenanigans.
4487 saTakerGetsFunded = saTakerGets;
4488 }
4489 else
4490 {
4491 // Only provide, if not fully funded.
4492 saTakerGetsFunded = saOwnerFundsLimit;
4493
4494 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4495
4496 // TOOD(tom): The result of this expression is not used - what's
4497 // going on here?
4498 std::min(
4499 saTakerPays,
4500 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4501 .setJson(jvOffer[jss::taker_pays_funded]);
4502 }
4503
4504 STAmount saOwnerPays = (parityRate == offerRate)
4505 ? saTakerGetsFunded
4506 : std::min(
4507 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4508
4509 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4510
4511 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4512 {
4513 // Only provide funded offers and offers of the taker.
4514 Json::Value& jvOf = jvOffers.append(jvOffer);
4515 jvOf[jss::quality] = saDirRate.getText();
4516 }
4517 }
4518 }
4519
4520 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4521 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4522}
4523
4524#endif
4525
4526inline void
4528{
4529 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4530 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4532 counters[static_cast<std::size_t>(mode)].dur += current;
4533
4536 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4537 .dur.count());
4539 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4540 .dur.count());
4542 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4544 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4545 .dur.count());
4547 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4548
4550 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4551 .transitions);
4553 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4554 .transitions);
4556 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4558 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4559 .transitions);
4561 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4562}
4563
4564void
4566{
4567 auto now = std::chrono::steady_clock::now();
4568
4569 std::lock_guard lock(mutex_);
4570 ++counters_[static_cast<std::size_t>(om)].transitions;
4571 if (om == OperatingMode::FULL &&
4572 counters_[static_cast<std::size_t>(om)].transitions == 1)
4573 {
4574 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4575 now - processStart_)
4576 .count();
4577 }
4578 counters_[static_cast<std::size_t>(mode_)].dur +=
4579 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4580
4581 mode_ = om;
4582 start_ = now;
4583}
4584
4585void
4587{
4588 auto [counters, mode, start, initialSync] = getCounterData();
4589 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4591 counters[static_cast<std::size_t>(mode)].dur += current;
4592
4593 obj[jss::state_accounting] = Json::objectValue;
4595 i <= static_cast<std::size_t>(OperatingMode::FULL);
4596 ++i)
4597 {
4598 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4599 auto& state = obj[jss::state_accounting][states_[i]];
4600 state[jss::transitions] = std::to_string(counters[i].transitions);
4601 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4602 }
4603 obj[jss::server_state_duration_us] = std::to_string(current.count());
4604 if (initialSync)
4605 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4606}
4607
4608//------------------------------------------------------------------------------
4609
4612 Application& app,
4614 bool standalone,
4615 std::size_t minPeerCount,
4616 bool startvalid,
4617 JobQueue& job_queue,
4619 ValidatorKeys const& validatorKeys,
4620 boost::asio::io_service& io_svc,
4621 beast::Journal journal,
4622 beast::insight::Collector::ptr const& collector)
4623{
4624 return std::make_unique<NetworkOPsImp>(
4625 app,
4626 clock,
4627 standalone,
4628 minPeerCount,
4629 startvalid,
4630 job_queue,
4632 validatorKeys,
4633 io_svc,
4634 journal,
4635 collector);
4636}
4637
4638} // namespace ripple
T back_inserter(T... args)
T begin(T... args)
Decorator for streaming out compact json.
Definition: json_writer.h:318
Lightweight wrapper to tag static string.
Definition: json_value.h:62
Represents a JSON value.
Definition: json_value.h:148
Json::UInt UInt
Definition: json_value.h:155
Value get(UInt index, const Value &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Definition: json_value.cpp:847
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:897
bool isMember(const char *key) const
Return true if the object has a member named key.
Definition: json_value.cpp:949
A generic endpoint for log messages.
Definition: Journal.h:60
Stream error() const
Definition: Journal.h:346
Stream debug() const
Definition: Journal.h:328
Stream info() const
Definition: Journal.h:334
Stream trace() const
Severity stream access functions.
Definition: Journal.h:322
Stream warn() const
Definition: Journal.h:340
A metric for measuring an integral value.
Definition: Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition: Gauge.h:68
A reference to a handler for performing polled collection.
Definition: Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition: Book.h:35
Issue in
Definition: Book.h:37
Issue out
Definition: Book.h:38
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition: ClusterNode.h:46
std::uint32_t getLoadFee() const
Definition: ClusterNode.h:52
NetClock::time_point getReportTime() const
Definition: ClusterNode.h:58
PublicKey const & identity() const
Definition: ClusterNode.h:64
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:48
std::string SERVER_DOMAIN
Definition: Config.h:279
std::size_t NODE_SIZE
Definition: Config.h:213
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition: Config.h:160
int RELAY_UNTRUSTED_VALIDATIONS
Definition: Config.h:169
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition: InfoSub.h:53
AccountID account
Definition: Issue.h:39
Currency currency
Definition: Issue.h:38
A pool of threads to perform work.
Definition: JobQueue.h:55
Json::Value getJson(int c=0)
Definition: JobQueue.cpp:213
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:165
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
Definition: LedgerMaster.h:264
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:78
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
Definition: LoadFeeTrack.h:45
std::uint32_t getClusterFee() const
Definition: LoadFeeTrack.h:82
std::uint32_t getLocalFee() const
Definition: LoadFeeTrack.h:75
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:89
std::uint32_t getRemoteFee() const
Definition: LoadFeeTrack.h:68
std::uint32_t getLoadFactor() const
Definition: LoadFeeTrack.h:95
Manages load sources.
Definition: LoadManager.h:46
void resetDeadlockDetector()
Reset the deadlock detection timer.
Definition: LoadManager.cpp:63
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
Definition: NetworkOPs.cpp:138
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
Definition: NetworkOPs.cpp:148
std::chrono::steady_clock::time_point start_
Definition: NetworkOPs.cpp:150
static std::array< Json::StaticString const, 5 > const states_
Definition: NetworkOPs.cpp:154
std::chrono::steady_clock::time_point const processStart_
Definition: NetworkOPs.cpp:152
Transaction with input flags and results to be applied in batches.
Definition: NetworkOPs.cpp:89
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
Definition: NetworkOPs.cpp:98
std::shared_ptr< Transaction > const transaction
Definition: NetworkOPs.cpp:91
boost::asio::steady_timer accountHistoryTxTimer_
Definition: NetworkOPs.cpp:730
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
Definition: NetworkOPs.cpp:862
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::vector< TransactionStatus > mTransactions
Definition: NetworkOPs.cpp:774
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Definition: NetworkOPs.cpp:720
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
Definition: NetworkOPs.cpp:732
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
Definition: NetworkOPs.cpp:880
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
Definition: NetworkOPs.cpp:728
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
Definition: NetworkOPs.cpp:115
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
Definition: NetworkOPs.cpp:716
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
Definition: NetworkOPs.cpp:259
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Definition: NetworkOPs.cpp:743
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, const bool bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
Definition: NetworkOPs.cpp:729
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
Definition: NetworkOPs.cpp:121
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:221
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
Definition: NetworkOPs.cpp:723
beast::Journal m_journal
Definition: NetworkOPs.cpp:714
SubInfoMapType mSubAccount
Definition: NetworkOPs.cpp:738
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
Definition: NetworkOPs.cpp:778
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
Definition: NetworkOPs.cpp:995
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
Definition: NetworkOPs.cpp:727
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
Definition: NetworkOPs.cpp:933
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
Definition: NetworkOPs.cpp:758
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
Definition: NetworkOPs.cpp:768
std::atomic< bool > unlBlocked_
Definition: NetworkOPs.cpp:725
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
Definition: NetworkOPs.cpp:718
std::atomic< bool > needNetworkLedger_
Definition: NetworkOPs.cpp:722
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
Definition: NetworkOPs.cpp:776
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
Definition: NetworkOPs.cpp:892
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
Definition: NetworkOPs.cpp:736
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
Definition: NetworkOPs.cpp:923
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
Definition: NetworkOPs.cpp:760
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
Definition: NetworkOPs.cpp:874
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
Definition: NetworkOPs.cpp:771
void setMode(OperatingMode om) override
void stop() override
Definition: NetworkOPs.cpp:563
void clearNeedNetworkLedger() override
Definition: NetworkOPs.cpp:886
DispatchState mDispatchState
Definition: NetworkOPs.cpp:773
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
Definition: NetworkOPs.cpp:739
bool checkLastClosedLedger(const Overlay::PeerSequence &, uint256 &networkClosed)
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
Definition: NetworkOPs.cpp:898
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
Definition: NetworkOPs.cpp:779
Application & app_
Definition: NetworkOPs.cpp:713
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
Definition: NetworkOPs.cpp:734
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
Definition: NetworkOPs.cpp:741
std::atomic< bool > amendmentWarned_
Definition: NetworkOPs.cpp:724
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
Definition: NetworkOPs.cpp:904
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition: NetworkOPs.h:87
void getCountsJson(Json::Value &obj)
Definition: Database.cpp:267
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:49
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:56
BookListeners::pointer getBookListeners(Book const &)
BookListeners::pointer makeBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, const AcceptedLedgerTx &alTx, MultiApiJson const &jvObj)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
Definition: RCLConsensus.h:52
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
Definition: RCLConsensus.h:446
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Definition: RCLConsensus.h:459
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:43
Represents a set of transactions in RCLConsensus.
Definition: RCLCxTx.h:63
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition: RFC1751.cpp:507
Collects logging information.
Definition: RCLConsensus.h:550
std::unique_ptr< std::stringstream > const & ss()
Definition: RCLConsensus.h:564
A view into a ledger.
Definition: ReadView.h:51
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition: STAmount.cpp:507
std::string getText() const override
Definition: STAmount.cpp:547
Issue const & issue() const
Definition: STAmount.h:487
std::optional< T > get(std::string const &name) const
Definition: BasicConfig.h:140
std::size_t size() const noexcept
Definition: Serializer.h:73
void const * data() const noexcept
Definition: Serializer.h:79
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition: TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition: TxQ.cpp:1777
static time_point now()
Definition: UptimeClock.cpp:67
Validator keys and manifest as set in configuration file.
Definition: ValidatorKeys.h:38
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition: XRPAmount.h:262
Json::Value jsonClipped() const
Definition: XRPAmount.h:218
iterator begin()
Definition: base_uint.h:136
static constexpr std::size_t size()
Definition: base_uint.h:526
bool isZero() const
Definition: base_uint.h:540
bool isNonZero() const
Definition: base_uint.h:545
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition: scope.h:231
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition: json_value.h:43
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:44
int Int
Definition: json_forwards.h:26
unsigned int UInt
Definition: json_forwards.h:27
void rngfill(void *buffer, std::size_t bytes, Generator &g)
Definition: rngfill.h:34
std::string const & getVersionString()
Server version.
Definition: BuildInfo.cpp:68
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition: BookChanges.h:47
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(PreclaimContext const &ctx, AccountID const &src)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition: Indexes.cpp:175
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition: Indexes.cpp:371
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition: Indexes.cpp:265
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:31
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:114
STAmount divide(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:93
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition: STTx.cpp:631
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition: View.cpp:442
@ fhZERO_IF_FROZEN
Definition: View.h:75
@ fhIGNORE_FREEZE
Definition: View.h:75
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition: View.cpp:136
std::uint64_t getQuality(uint256 const &uBase)
Definition: Indexes.cpp:140
@ INCLUDED
Definition: Transaction.h:49
@ OBSOLETE
Definition: Transaction.h:54
@ INVALID
Definition: Transaction.h:48
error_code_i
Definition: ErrorCodes.h:40
@ rpcSUCCESS
Definition: ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition: ErrorCodes.h:84
@ rpcINTERNAL
Definition: ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
Definition: SecretKey.cpp:368
auto constexpr muldiv_max
Definition: mulDiv.h:29
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition: LocalTxs.cpp:196
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition: View.cpp:649
STAmount amountFromQuality(std::uint64_t rate)
Definition: STAmount.cpp:854
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition: ErrorCodes.h:167
@ warnRPC_UNSUPPORTED_MAJORITY
Definition: ErrorCodes.h:165
@ warnRPC_AMENDMENT_BLOCKED
Definition: ErrorCodes.h:166
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition: NetworkOPs.h:66
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:53
bool isTesSuccess(TER x)
Definition: TER.h:656
bool isTerRetry(TER x)
Definition: TER.h:650
AccountID calcAccountID(PublicKey const &pk)
Definition: AccountID.cpp:168
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Definition: csprng.cpp:103
Json::Value rpcError(int iError)
Definition: RPCErr.cpp:31
@ tefPAST_SEQ
Definition: TER.h:175
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
Definition: NetworkOPs.cpp:843
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
bool isTemMalformed(TER x)
Definition: TER.h:638
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition: View.cpp:147
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition: ApiVersion.h:102
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:75
@ tesSUCCESS
Definition: TER.h:242
uint256 getQualityNext(uint256 const &uBase)
Definition: Indexes.cpp:132
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition: View.cpp:308
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition: ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition: chrono.h:92
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:630
FeeSetup setup_FeeVote(Section const &section)
Definition: Config.cpp:1091
Number root(Number f, unsigned d)
Definition: Number.cpp:635
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Definition: mulDiv.cpp:32
ApplyFlags
Definition: ApplyView.h:30
@ tapFAIL_HARD
Definition: ApplyView.h:35
@ tapUNLIMITED
Definition: ApplyView.h:42
@ tapNONE
Definition: ApplyView.h:31
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:37
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition: Seed.cpp:76
@ terQUEUED
Definition: TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition: TER.cpp:242
@ jtNETOP_CLUSTER
Definition: Job.h:74
@ jtCLIENT_FEE_CHANGE
Definition: Job.h:46
@ jtTRANSACTION
Definition: Job.h:61
@ jtTXN_PROC
Definition: Job.h:81
@ jtCLIENT_CONSENSUS
Definition: Job.h:47
@ jtBATCH
Definition: Job.h:64
@ jtCLIENT_ACCT_HIST
Definition: Job.h:48
uint256 getBookBase(Book const &book)
Definition: Indexes.cpp:113
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition: View.cpp:173
static std::uint32_t trunc32(std::uint64_t v)
@ temBAD_SIGNATURE
Definition: TER.h:105
static auto const genesisAccountId
Definition: NetworkOPs.cpp:856
STL namespace.
T ref(T... args)
T reset(T... args)
T set_intersection(T... args)
T str(T... args)
std::string serialized
The manifest in serialized form.
Definition: Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition: Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition: Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition: Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Definition: Manifest.h:86
Server fees published on server subscription.
Definition: NetworkOPs.cpp:198
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
Definition: NetworkOPs.cpp:217
bool operator==(ServerFeeSummary const &b) const
Definition: NetworkOPs.cpp:209
beast::insight::Gauge full_transitions
Definition: NetworkOPs.cpp:830
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:785
beast::insight::Hook hook
Definition: NetworkOPs.cpp:819
beast::insight::Gauge connected_duration
Definition: NetworkOPs.cpp:821
beast::insight::Gauge tracking_duration
Definition: NetworkOPs.cpp:823
beast::insight::Gauge connected_transitions
Definition: NetworkOPs.cpp:827
beast::insight::Gauge disconnected_transitions
Definition: NetworkOPs.cpp:826
beast::insight::Gauge syncing_duration
Definition: NetworkOPs.cpp:822
beast::insight::Gauge tracking_transitions
Definition: NetworkOPs.cpp:829
beast::insight::Gauge full_duration
Definition: NetworkOPs.cpp:824
beast::insight::Gauge disconnected_duration
Definition: NetworkOPs.cpp:820
beast::insight::Gauge syncing_transitions
Definition: NetworkOPs.cpp:828
SubAccountHistoryIndex(AccountID const &accountId)
Definition: NetworkOPs.cpp:677
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:696
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:691
Represents a transfer rate.
Definition: Rate.h:40
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition: TxQ.h:164
void set(const char *key, auto const &v)
Definition: MultiApiJson.h:83
IsMemberResult isMember(const char *key) const
Definition: MultiApiJson.h:94
Select all peers (except optional excluded) that are in our cluster.
Definition: predicates.h:137
Sends a message to all peers.
Definition: predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)