rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/misc/AmendmentTable.h>
32#include <xrpld/app/misc/DeliverMax.h>
33#include <xrpld/app/misc/HashRouter.h>
34#include <xrpld/app/misc/LoadFeeTrack.h>
35#include <xrpld/app/misc/NetworkOPs.h>
36#include <xrpld/app/misc/Transaction.h>
37#include <xrpld/app/misc/TxQ.h>
38#include <xrpld/app/misc/ValidatorKeys.h>
39#include <xrpld/app/misc/ValidatorList.h>
40#include <xrpld/app/misc/detail/AccountTxPaging.h>
41#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
42#include <xrpld/app/tx/apply.h>
43#include <xrpld/consensus/Consensus.h>
44#include <xrpld/consensus/ConsensusParms.h>
45#include <xrpld/overlay/Cluster.h>
46#include <xrpld/overlay/Overlay.h>
47#include <xrpld/overlay/predicates.h>
48#include <xrpld/perflog/PerfLog.h>
49#include <xrpld/rpc/BookChanges.h>
50#include <xrpld/rpc/DeliveredAmount.h>
51#include <xrpld/rpc/MPTokenIssuanceID.h>
52#include <xrpld/rpc/ServerHandler.h>
53
54#include <xrpl/basics/UptimeClock.h>
55#include <xrpl/basics/mulDiv.h>
56#include <xrpl/basics/safe_cast.h>
57#include <xrpl/basics/scope.h>
58#include <xrpl/beast/utility/rngfill.h>
59#include <xrpl/crypto/RFC1751.h>
60#include <xrpl/crypto/csprng.h>
61#include <xrpl/protocol/BuildInfo.h>
62#include <xrpl/protocol/Feature.h>
63#include <xrpl/protocol/MultiApiJson.h>
64#include <xrpl/protocol/RPCErr.h>
65#include <xrpl/protocol/jss.h>
66#include <xrpl/resource/Fees.h>
67#include <xrpl/resource/ResourceManager.h>
68
69#include <boost/asio/ip/host_name.hpp>
70#include <boost/asio/steady_timer.hpp>
71
72#include <algorithm>
73#include <exception>
74#include <mutex>
75#include <optional>
76#include <set>
77#include <sstream>
78#include <string>
79#include <tuple>
80#include <unordered_map>
81
82namespace ripple {
83
84class NetworkOPsImp final : public NetworkOPs
85{
91 {
92 public:
94 bool const admin;
95 bool const local;
97 bool applied = false;
99
102 bool a,
103 bool l,
104 FailHard f)
105 : transaction(t), admin(a), local(l), failType(f)
106 {
107 XRPL_ASSERT(
109 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
110 "valid inputs");
111 }
112 };
113
117 enum class DispatchState : unsigned char {
118 none,
119 scheduled,
120 running,
121 };
122
124
140 {
141 struct Counters
142 {
143 explicit Counters() = default;
144
147 };
148
152 std::chrono::steady_clock::time_point start_ =
154 std::chrono::steady_clock::time_point const processStart_ = start_;
157
158 public:
160 {
162 .transitions = 1;
163 }
164
171 void
173
179 void
180 json(Json::Value& obj) const;
181
183 {
185 decltype(mode_) mode;
186 decltype(start_) start;
188 };
189
192 {
195 }
196 };
197
200 {
201 ServerFeeSummary() = default;
202
204 XRPAmount fee,
205 TxQ::Metrics&& escalationMetrics,
206 LoadFeeTrack const& loadFeeTrack);
207 bool
208 operator!=(ServerFeeSummary const& b) const;
209
210 bool
212 {
213 return !(*this != b);
214 }
215
220 };
221
222public:
224 Application& app,
226 bool standalone,
227 std::size_t minPeerCount,
228 bool start_valid,
229 JobQueue& job_queue,
231 ValidatorKeys const& validatorKeys,
232 boost::asio::io_service& io_svc,
233 beast::Journal journal,
234 beast::insight::Collector::ptr const& collector)
235 : app_(app)
236 , m_journal(journal)
239 , heartbeatTimer_(io_svc)
240 , clusterTimer_(io_svc)
241 , accountHistoryTxTimer_(io_svc)
242 , mConsensus(
243 app,
245 setup_FeeVote(app_.config().section("voting")),
246 app_.logs().journal("FeeVote")),
248 *m_localTX,
249 app.getInboundTransactions(),
250 beast::get_abstract_clock<std::chrono::steady_clock>(),
251 validatorKeys,
252 app_.logs().journal("LedgerConsensus"))
253 , validatorPK_(
254 validatorKeys.keys ? validatorKeys.keys->publicKey
255 : decltype(validatorPK_){})
257 validatorKeys.keys ? validatorKeys.keys->masterPublicKey
258 : decltype(validatorMasterPK_){})
260 , m_job_queue(job_queue)
261 , m_standalone(standalone)
262 , minPeerCount_(start_valid ? 0 : minPeerCount)
264 {
265 }
266
267 ~NetworkOPsImp() override
268 {
269 // This clear() is necessary to ensure the shared_ptrs in this map get
270 // destroyed NOW because the objects in this map invoke methods on this
271 // class when they are destroyed
273 }
274
275public:
277 getOperatingMode() const override;
278
280 strOperatingMode(OperatingMode const mode, bool const admin) const override;
281
283 strOperatingMode(bool const admin = false) const override;
284
285 //
286 // Transaction operations.
287 //
288
289 // Must complete immediately.
290 void
292
293 void
295 std::shared_ptr<Transaction>& transaction,
296 bool bUnlimited,
297 bool bLocal,
298 FailHard failType) override;
299
308 void
311 bool bUnlimited,
312 FailHard failType);
313
323 void
326 bool bUnlimited,
327 FailHard failtype);
328
332 void
334
340 void
342
343 //
344 // Owner functions.
345 //
346
350 AccountID const& account) override;
351
352 //
353 // Book functions.
354 //
355
356 void
359 Book const&,
360 AccountID const& uTakerID,
361 const bool bProof,
362 unsigned int iLimit,
363 Json::Value const& jvMarker,
364 Json::Value& jvResult) override;
365
366 // Ledger proposal/close functions.
367 bool
369
370 bool
373 std::string const& source) override;
374
375 void
376 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
377
378 // Network state machine.
379
380 // Used for the "jump" case.
381private:
382 void
384 bool
386
387public:
388 bool
390 uint256 const& networkClosed,
391 std::unique_ptr<std::stringstream> const& clog) override;
392 void
394 void
395 setStandAlone() override;
396
400 void
401 setStateTimer() override;
402
403 void
404 setNeedNetworkLedger() override;
405 void
406 clearNeedNetworkLedger() override;
407 bool
408 isNeedNetworkLedger() override;
409 bool
410 isFull() override;
411
412 void
413 setMode(OperatingMode om) override;
414
415 bool
416 isBlocked() override;
417 bool
418 isAmendmentBlocked() override;
419 void
420 setAmendmentBlocked() override;
421 bool
422 isAmendmentWarned() override;
423 void
424 setAmendmentWarned() override;
425 void
426 clearAmendmentWarned() override;
427 bool
428 isUNLBlocked() override;
429 void
430 setUNLBlocked() override;
431 void
432 clearUNLBlocked() override;
433 void
434 consensusViewChange() override;
435
437 getConsensusInfo() override;
439 getServerInfo(bool human, bool admin, bool counters) override;
440 void
441 clearLedgerFetch() override;
443 getLedgerFetchInfo() override;
446 std::optional<std::chrono::milliseconds> consensusDelay) override;
447 void
448 reportFeeChange() override;
449 void
451
452 void
453 updateLocalTx(ReadView const& view) override;
455 getLocalTxCount() override;
456
457 //
458 // Monitoring: publisher side.
459 //
460 void
461 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
462 void
465 std::shared_ptr<STTx const> const& transaction,
466 TER result) override;
467 void
468 pubValidation(std::shared_ptr<STValidation> const& val) override;
469
470 //--------------------------------------------------------------------------
471 //
472 // InfoSub::Source.
473 //
474 void
476 InfoSub::ref ispListener,
477 hash_set<AccountID> const& vnaAccountIDs,
478 bool rt) override;
479 void
481 InfoSub::ref ispListener,
482 hash_set<AccountID> const& vnaAccountIDs,
483 bool rt) override;
484
485 // Just remove the subscription from the tracking
486 // not from the InfoSub. Needed for InfoSub destruction
487 void
489 std::uint64_t seq,
490 hash_set<AccountID> const& vnaAccountIDs,
491 bool rt) override;
492
494 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
495 override;
496 void
498 InfoSub::ref ispListener,
499 AccountID const& account,
500 bool historyOnly) override;
501
502 void
504 std::uint64_t seq,
505 AccountID const& account,
506 bool historyOnly) override;
507
508 bool
509 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
510 bool
511 unsubLedger(std::uint64_t uListener) override;
512
513 bool
514 subBookChanges(InfoSub::ref ispListener) override;
515 bool
516 unsubBookChanges(std::uint64_t uListener) override;
517
518 bool
519 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
520 override;
521 bool
522 unsubServer(std::uint64_t uListener) override;
523
524 bool
525 subBook(InfoSub::ref ispListener, Book const&) override;
526 bool
527 unsubBook(std::uint64_t uListener, Book const&) override;
528
529 bool
530 subManifests(InfoSub::ref ispListener) override;
531 bool
532 unsubManifests(std::uint64_t uListener) override;
533 void
534 pubManifest(Manifest const&) override;
535
536 bool
537 subTransactions(InfoSub::ref ispListener) override;
538 bool
539 unsubTransactions(std::uint64_t uListener) override;
540
541 bool
542 subRTTransactions(InfoSub::ref ispListener) override;
543 bool
544 unsubRTTransactions(std::uint64_t uListener) override;
545
546 bool
547 subValidations(InfoSub::ref ispListener) override;
548 bool
549 unsubValidations(std::uint64_t uListener) override;
550
551 bool
552 subPeerStatus(InfoSub::ref ispListener) override;
553 bool
554 unsubPeerStatus(std::uint64_t uListener) override;
555 void
556 pubPeerStatus(std::function<Json::Value(void)> const&) override;
557
558 bool
559 subConsensus(InfoSub::ref ispListener) override;
560 bool
561 unsubConsensus(std::uint64_t uListener) override;
562
564 findRpcSub(std::string const& strUrl) override;
566 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
567 bool
568 tryRemoveRpcSub(std::string const& strUrl) override;
569
570 void
571 stop() override
572 {
573 {
574 boost::system::error_code ec;
575 heartbeatTimer_.cancel(ec);
576 if (ec)
577 {
578 JLOG(m_journal.error())
579 << "NetworkOPs: heartbeatTimer cancel error: "
580 << ec.message();
581 }
582
583 ec.clear();
584 clusterTimer_.cancel(ec);
585 if (ec)
586 {
587 JLOG(m_journal.error())
588 << "NetworkOPs: clusterTimer cancel error: "
589 << ec.message();
590 }
591
592 ec.clear();
593 accountHistoryTxTimer_.cancel(ec);
594 if (ec)
595 {
596 JLOG(m_journal.error())
597 << "NetworkOPs: accountHistoryTxTimer cancel error: "
598 << ec.message();
599 }
600 }
601 // Make sure that any waitHandlers pending in our timers are done.
602 using namespace std::chrono_literals;
603 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
604 }
605
606 void
607 stateAccounting(Json::Value& obj) override;
608
609private:
610 void
611 setTimer(
612 boost::asio::steady_timer& timer,
613 std::chrono::milliseconds const& expiry_time,
614 std::function<void()> onExpire,
615 std::function<void()> onError);
616 void
618 void
620 void
622 void
624
626 transJson(
627 std::shared_ptr<STTx const> const& transaction,
628 TER result,
629 bool validated,
632
633 void
636 AcceptedLedgerTx const& transaction,
637 bool last);
638
639 void
642 AcceptedLedgerTx const& transaction,
643 bool last);
644
645 void
648 std::shared_ptr<STTx const> const& transaction,
649 TER result);
650
651 void
652 pubServer();
653 void
655
657 getHostId(bool forAdmin);
658
659private:
663
664 /*
665 * With a validated ledger to separate history and future, the node
666 * streams historical txns with negative indexes starting from -1,
667 * and streams future txns starting from index 0.
668 * The SubAccountHistoryIndex struct maintains these indexes.
669 * It also has a flag stopHistorical_ for stopping streaming
670 * the historical txns.
671 */
673 {
675 // forward
677 // separate backward and forward
679 // history, backward
684
686 : accountId_(accountId)
687 , forwardTxIndex_(0)
690 , historyTxIndex_(-1)
691 , haveHistorical_(false)
692 , stopHistorical_(false)
693 {
694 }
695 };
697 {
700 };
702 {
705 };
708
712 void
716 void
718 void
720
723
725
727
729
734
736 boost::asio::steady_timer heartbeatTimer_;
737 boost::asio::steady_timer clusterTimer_;
738 boost::asio::steady_timer accountHistoryTxTimer_;
739
741
744
746
748
751
753
755
756 enum SubTypes {
757 sLedger, // Accepted ledgers.
758 sManifests, // Received validator manifests.
759 sServer, // When server changes connectivity state.
760 sTransactions, // All accepted transactions.
761 sRTTransactions, // All proposed and accepted transactions.
762 sValidations, // Received validations.
763 sPeerStatus, // Peer status changes.
764 sConsensusPhase, // Consensus phase
765 sBookChanges, // Per-ledger order book changes
766 sLastEntry // Any new entry must be ADDED ABOVE this one
767 };
768
770
772
774
775 // Whether we are in standalone mode.
776 bool const m_standalone;
777
778 // The number of nodes that we need to consider ourselves connected.
780
781 // Transaction batching.
786
788
791
792private:
793 struct Stats
794 {
795 template <class Handler>
797 Handler const& handler,
798 beast::insight::Collector::ptr const& collector)
799 : hook(collector->make_hook(handler))
800 , disconnected_duration(collector->make_gauge(
801 "State_Accounting",
802 "Disconnected_duration"))
803 , connected_duration(collector->make_gauge(
804 "State_Accounting",
805 "Connected_duration"))
807 collector->make_gauge("State_Accounting", "Syncing_duration"))
808 , tracking_duration(collector->make_gauge(
809 "State_Accounting",
810 "Tracking_duration"))
812 collector->make_gauge("State_Accounting", "Full_duration"))
813 , disconnected_transitions(collector->make_gauge(
814 "State_Accounting",
815 "Disconnected_transitions"))
816 , connected_transitions(collector->make_gauge(
817 "State_Accounting",
818 "Connected_transitions"))
819 , syncing_transitions(collector->make_gauge(
820 "State_Accounting",
821 "Syncing_transitions"))
822 , tracking_transitions(collector->make_gauge(
823 "State_Accounting",
824 "Tracking_transitions"))
826 collector->make_gauge("State_Accounting", "Full_transitions"))
827 {
828 }
829
836
842 };
843
844 std::mutex m_statsMutex; // Mutex to lock m_stats
846
847private:
848 void
850};
851
852//------------------------------------------------------------------------------
853
855 {"disconnected", "connected", "syncing", "tracking", "full"}};
856
858
866
867static auto const genesisAccountId = calcAccountID(
869 .first);
870
871//------------------------------------------------------------------------------
872inline OperatingMode
874{
875 return mMode;
876}
877
878inline std::string
879NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
880{
881 return strOperatingMode(mMode, admin);
882}
883
884inline void
886{
888}
889
890inline void
892{
893 needNetworkLedger_ = true;
894}
895
896inline void
898{
899 needNetworkLedger_ = false;
900}
901
902inline bool
904{
905 return needNetworkLedger_;
906}
907
908inline bool
910{
912}
913
916{
917 static std::string const hostname = boost::asio::ip::host_name();
918
919 if (forAdmin)
920 return hostname;
921
922 // For non-admin uses hash the node public key into a
923 // single RFC1751 word:
924 static std::string const shroudedHostId = [this]() {
925 auto const& id = app_.nodeIdentity();
926
927 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
928 }();
929
930 return shroudedHostId;
931}
932
933void
935{
937
938 // Only do this work if a cluster is configured
939 if (app_.cluster().size() != 0)
941}
942
943void
945 boost::asio::steady_timer& timer,
946 const std::chrono::milliseconds& expiry_time,
947 std::function<void()> onExpire,
948 std::function<void()> onError)
949{
950 // Only start the timer if waitHandlerCounter_ is not yet joined.
951 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
952 [this, onExpire, onError](boost::system::error_code const& e) {
953 if ((e.value() == boost::system::errc::success) &&
954 (!m_job_queue.isStopped()))
955 {
956 onExpire();
957 }
958 // Recover as best we can if an unexpected error occurs.
959 if (e.value() != boost::system::errc::success &&
960 e.value() != boost::asio::error::operation_aborted)
961 {
962 // Try again later and hope for the best.
963 JLOG(m_journal.error())
964 << "Timer got error '" << e.message()
965 << "'. Restarting timer.";
966 onError();
967 }
968 }))
969 {
970 timer.expires_from_now(expiry_time);
971 timer.async_wait(std::move(*optionalCountedHandler));
972 }
973}
974
975void
976NetworkOPsImp::setHeartbeatTimer()
977{
978 setTimer(
979 heartbeatTimer_,
980 mConsensus.parms().ledgerGRANULARITY,
981 [this]() {
982 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
983 processHeartbeatTimer();
984 });
985 },
986 [this]() { setHeartbeatTimer(); });
987}
988
989void
990NetworkOPsImp::setClusterTimer()
991{
992 using namespace std::chrono_literals;
993
994 setTimer(
995 clusterTimer_,
996 10s,
997 [this]() {
998 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
999 processClusterTimer();
1000 });
1001 },
1002 [this]() { setClusterTimer(); });
1003}
1004
1005void
1006NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
1007{
1008 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
1009 << toBase58(subInfo.index_->accountId_);
1010 using namespace std::chrono_literals;
1011 setTimer(
1012 accountHistoryTxTimer_,
1013 4s,
1014 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1015 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1016}
1017
1018void
1019NetworkOPsImp::processHeartbeatTimer()
1020{
1021 RclConsensusLogger clog(
1022 "Heartbeat Timer", mConsensus.validating(), m_journal);
1023 {
1024 std::unique_lock lock{app_.getMasterMutex()};
1025
1026 // VFALCO NOTE This is for diagnosing a crash on exit
1027 LoadManager& mgr(app_.getLoadManager());
1028 mgr.heartbeat();
1029
1030 std::size_t const numPeers = app_.overlay().size();
1031
1032 // do we have sufficient peers? If not, we are disconnected.
1033 if (numPeers < minPeerCount_)
1034 {
1035 if (mMode != OperatingMode::DISCONNECTED)
1036 {
1037 setMode(OperatingMode::DISCONNECTED);
1039 ss << "Node count (" << numPeers << ") has fallen "
1040 << "below required minimum (" << minPeerCount_ << ").";
1041 JLOG(m_journal.warn()) << ss.str();
1042 CLOG(clog.ss()) << "set mode to DISCONNECTED: " << ss.str();
1043 }
1044 else
1045 {
1046 CLOG(clog.ss())
1047 << "already DISCONNECTED. too few peers (" << numPeers
1048 << "), need at least " << minPeerCount_;
1049 }
1050
1051 // MasterMutex lock need not be held to call setHeartbeatTimer()
1052 lock.unlock();
1053 // We do not call mConsensus.timerEntry until there are enough
1054 // peers providing meaningful inputs to consensus
1055 setHeartbeatTimer();
1056
1057 return;
1058 }
1059
1060 if (mMode == OperatingMode::DISCONNECTED)
1061 {
1062 setMode(OperatingMode::CONNECTED);
1063 JLOG(m_journal.info())
1064 << "Node count (" << numPeers << ") is sufficient.";
1065 CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
1066 << " peers. ";
1067 }
1068
1069 // Check if the last validated ledger forces a change between these
1070 // states.
1071 auto origMode = mMode.load();
1072 CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
1073 if (mMode == OperatingMode::SYNCING)
1074 setMode(OperatingMode::SYNCING);
1075 else if (mMode == OperatingMode::CONNECTED)
1076 setMode(OperatingMode::CONNECTED);
1077 auto newMode = mMode.load();
1078 if (origMode != newMode)
1079 {
1080 CLOG(clog.ss())
1081 << ", changing to " << strOperatingMode(newMode, true);
1082 }
1083 CLOG(clog.ss()) << ". ";
1084 }
1085
1086 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.ss());
1087
1088 CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase);
1089 const ConsensusPhase currPhase = mConsensus.phase();
1090 if (mLastConsensusPhase != currPhase)
1091 {
1092 reportConsensusStateChange(currPhase);
1093 mLastConsensusPhase = currPhase;
1094 CLOG(clog.ss()) << " changed to " << to_string(mLastConsensusPhase);
1095 }
1096 CLOG(clog.ss()) << ". ";
1097
1098 setHeartbeatTimer();
1099}
1100
1101void
1102NetworkOPsImp::processClusterTimer()
1103{
1104 if (app_.cluster().size() == 0)
1105 return;
1106
1107 using namespace std::chrono_literals;
1108
1109 bool const update = app_.cluster().update(
1110 app_.nodeIdentity().first,
1111 "",
1112 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1113 ? app_.getFeeTrack().getLocalFee()
1114 : 0,
1115 app_.timeKeeper().now());
1116
1117 if (!update)
1118 {
1119 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1120 setClusterTimer();
1121 return;
1122 }
1123
1124 protocol::TMCluster cluster;
1125 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1126 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1127 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1128 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1129 n.set_nodeload(node.getLoadFee());
1130 if (!node.name().empty())
1131 n.set_nodename(node.name());
1132 });
1133
1134 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1135 for (auto& item : gossip.items)
1136 {
1137 protocol::TMLoadSource& node = *cluster.add_loadsources();
1138 node.set_name(to_string(item.address));
1139 node.set_cost(item.balance);
1140 }
1141 app_.overlay().foreach(send_if(
1142 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1143 peer_in_cluster()));
1144 setClusterTimer();
1145}
1146
1147//------------------------------------------------------------------------------
1148
1150NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1151 const
1152{
1153 if (mode == OperatingMode::FULL && admin)
1154 {
1155 auto const consensusMode = mConsensus.mode();
1156 if (consensusMode != ConsensusMode::wrongLedger)
1157 {
1158 if (consensusMode == ConsensusMode::proposing)
1159 return "proposing";
1160
1161 if (mConsensus.validating())
1162 return "validating";
1163 }
1164 }
1165
1166 return states_[static_cast<std::size_t>(mode)];
1167}
1168
1169void
1170NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1171{
1172 if (isNeedNetworkLedger())
1173 {
1174 // Nothing we can do if we've never been in sync
1175 return;
1176 }
1177
1178 // this is an asynchronous interface
1179 auto const trans = sterilize(*iTrans);
1180
1181 auto const txid = trans->getTransactionID();
1182 auto const flags = app_.getHashRouter().getFlags(txid);
1183
1184 if ((flags & SF_BAD) != 0)
1185 {
1186 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1187 return;
1188 }
1189
1190 try
1191 {
1192 auto const [validity, reason] = checkValidity(
1193 app_.getHashRouter(),
1194 *trans,
1195 m_ledgerMaster.getValidatedRules(),
1196 app_.config());
1197
1198 if (validity != Validity::Valid)
1199 {
1200 JLOG(m_journal.warn())
1201 << "Submitted transaction invalid: " << reason;
1202 return;
1203 }
1204 }
1205 catch (std::exception const& ex)
1206 {
1207 JLOG(m_journal.warn())
1208 << "Exception checking transaction " << txid << ": " << ex.what();
1209
1210 return;
1211 }
1212
1213 std::string reason;
1214
1215 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1216
1217 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1218 auto t = tx;
1219 processTransaction(t, false, false, FailHard::no);
1220 });
1221}
1222
1223void
1224NetworkOPsImp::processTransaction(
1225 std::shared_ptr<Transaction>& transaction,
1226 bool bUnlimited,
1227 bool bLocal,
1228 FailHard failType)
1229{
1230 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1231 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1232
1233 if ((newFlags & SF_BAD) != 0)
1234 {
1235 // cached bad
1236 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1237 transaction->setStatus(INVALID);
1238 transaction->setResult(temBAD_SIGNATURE);
1239 return;
1240 }
1241
1242 // NOTE eahennis - I think this check is redundant,
1243 // but I'm not 100% sure yet.
1244 // If so, only cost is looking up HashRouter flags.
1245 auto const view = m_ledgerMaster.getCurrentLedger();
1246 auto const [validity, reason] = checkValidity(
1247 app_.getHashRouter(),
1248 *transaction->getSTransaction(),
1249 view->rules(),
1250 app_.config());
1251 XRPL_ASSERT(
1252 validity == Validity::Valid,
1253 "ripple::NetworkOPsImp::processTransaction : valid validity");
1254
1255 // Not concerned with local checks at this point.
1256 if (validity == Validity::SigBad)
1257 {
1258 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1259 transaction->setStatus(INVALID);
1260 transaction->setResult(temBAD_SIGNATURE);
1261 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1262 return;
1263 }
1264
1265 // canonicalize can change our pointer
1266 app_.getMasterTransaction().canonicalize(&transaction);
1267
1268 if (bLocal)
1269 doTransactionSync(transaction, bUnlimited, failType);
1270 else
1271 doTransactionAsync(transaction, bUnlimited, failType);
1272}
1273
1274void
1275NetworkOPsImp::doTransactionAsync(
1276 std::shared_ptr<Transaction> transaction,
1277 bool bUnlimited,
1278 FailHard failType)
1279{
1280 std::lock_guard lock(mMutex);
1281
1282 if (transaction->getApplying())
1283 return;
1284
1285 mTransactions.push_back(
1286 TransactionStatus(transaction, bUnlimited, false, failType));
1287 transaction->setApplying();
1288
1289 if (mDispatchState == DispatchState::none)
1290 {
1291 if (m_job_queue.addJob(
1292 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1293 {
1294 mDispatchState = DispatchState::scheduled;
1295 }
1296 }
1297}
1298
1299void
1300NetworkOPsImp::doTransactionSync(
1301 std::shared_ptr<Transaction> transaction,
1302 bool bUnlimited,
1303 FailHard failType)
1304{
1305 std::unique_lock<std::mutex> lock(mMutex);
1306
1307 if (!transaction->getApplying())
1308 {
1309 mTransactions.push_back(
1310 TransactionStatus(transaction, bUnlimited, true, failType));
1311 transaction->setApplying();
1312 }
1313
1314 do
1315 {
1316 if (mDispatchState == DispatchState::running)
1317 {
1318 // A batch processing job is already running, so wait.
1319 mCond.wait(lock);
1320 }
1321 else
1322 {
1323 apply(lock);
1324
1325 if (mTransactions.size())
1326 {
1327 // More transactions need to be applied, but by another job.
1328 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1329 transactionBatch();
1330 }))
1331 {
1332 mDispatchState = DispatchState::scheduled;
1333 }
1334 }
1335 }
1336 } while (transaction->getApplying());
1337}
1338
1339void
1340NetworkOPsImp::transactionBatch()
1341{
1342 std::unique_lock<std::mutex> lock(mMutex);
1343
1344 if (mDispatchState == DispatchState::running)
1345 return;
1346
1347 while (mTransactions.size())
1348 {
1349 apply(lock);
1350 }
1351}
1352
1353void
1354NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1355{
1357 std::vector<TransactionStatus> transactions;
1358 mTransactions.swap(transactions);
1359 XRPL_ASSERT(
1360 !transactions.empty(),
1361 "ripple::NetworkOPsImp::apply : non-empty transactions");
1362 XRPL_ASSERT(
1363 mDispatchState != DispatchState::running,
1364 "ripple::NetworkOPsImp::apply : is not running");
1365
1366 mDispatchState = DispatchState::running;
1367
1368 batchLock.unlock();
1369
1370 {
1371 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1372 bool changed = false;
1373 {
1374 std::unique_lock ledgerLock{
1375 m_ledgerMaster.peekMutex(), std::defer_lock};
1376 std::lock(masterLock, ledgerLock);
1377
1378 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1379 for (TransactionStatus& e : transactions)
1380 {
1381 // we check before adding to the batch
1382 ApplyFlags flags = tapNONE;
1383 if (e.admin)
1384 flags |= tapUNLIMITED;
1385
1386 if (e.failType == FailHard::yes)
1387 flags |= tapFAIL_HARD;
1388
1389 auto const result = app_.getTxQ().apply(
1390 app_, view, e.transaction->getSTransaction(), flags, j);
1391 e.result = result.ter;
1392 e.applied = result.applied;
1393 changed = changed || result.applied;
1394 }
1395 return changed;
1396 });
1397 }
1398 if (changed)
1399 reportFeeChange();
1400
1401 std::optional<LedgerIndex> validatedLedgerIndex;
1402 if (auto const l = m_ledgerMaster.getValidatedLedger())
1403 validatedLedgerIndex = l->info().seq;
1404
1405 auto newOL = app_.openLedger().current();
1406 for (TransactionStatus& e : transactions)
1407 {
1408 e.transaction->clearSubmitResult();
1409
1410 if (e.applied)
1411 {
1412 pubProposedTransaction(
1413 newOL, e.transaction->getSTransaction(), e.result);
1414 e.transaction->setApplied();
1415 }
1416
1417 e.transaction->setResult(e.result);
1418
1419 if (isTemMalformed(e.result))
1420 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1421
1422#ifdef DEBUG
1423 if (e.result != tesSUCCESS)
1424 {
1425 std::string token, human;
1426
1427 if (transResultInfo(e.result, token, human))
1428 {
1429 JLOG(m_journal.info())
1430 << "TransactionResult: " << token << ": " << human;
1431 }
1432 }
1433#endif
1434
1435 bool addLocal = e.local;
1436
1437 if (e.result == tesSUCCESS)
1438 {
1439 JLOG(m_journal.debug())
1440 << "Transaction is now included in open ledger";
1441 e.transaction->setStatus(INCLUDED);
1442
1443 auto const& txCur = e.transaction->getSTransaction();
1444 auto const txNext = m_ledgerMaster.popAcctTransaction(txCur);
1445 if (txNext)
1446 {
1447 std::string reason;
1448 auto const trans = sterilize(*txNext);
1449 auto t = std::make_shared<Transaction>(trans, reason, app_);
1450 submit_held.emplace_back(t, false, false, FailHard::no);
1451 t->setApplying();
1452 }
1453 }
1454 else if (e.result == tefPAST_SEQ)
1455 {
1456 // duplicate or conflict
1457 JLOG(m_journal.info()) << "Transaction is obsolete";
1458 e.transaction->setStatus(OBSOLETE);
1459 }
1460 else if (e.result == terQUEUED)
1461 {
1462 JLOG(m_journal.debug())
1463 << "Transaction is likely to claim a"
1464 << " fee, but is queued until fee drops";
1465
1466 e.transaction->setStatus(HELD);
1467 // Add to held transactions, because it could get
1468 // kicked out of the queue, and this will try to
1469 // put it back.
1470 m_ledgerMaster.addHeldTransaction(e.transaction);
1471 e.transaction->setQueued();
1472 e.transaction->setKept();
1473 }
1474 else if (isTerRetry(e.result))
1475 {
1476 if (e.failType != FailHard::yes)
1477 {
1478 // transaction should be held
1479 JLOG(m_journal.debug())
1480 << "Transaction should be held: " << e.result;
1481 e.transaction->setStatus(HELD);
1482 m_ledgerMaster.addHeldTransaction(e.transaction);
1483 e.transaction->setKept();
1484 }
1485 }
1486 else
1487 {
1488 JLOG(m_journal.debug())
1489 << "Status other than success " << e.result;
1490 e.transaction->setStatus(INVALID);
1491 }
1492
1493 auto const enforceFailHard =
1494 e.failType == FailHard::yes && !isTesSuccess(e.result);
1495
1496 if (addLocal && !enforceFailHard)
1497 {
1498 m_localTX->push_back(
1499 m_ledgerMaster.getCurrentLedgerIndex(),
1500 e.transaction->getSTransaction());
1501 e.transaction->setKept();
1502 }
1503
1504 if ((e.applied ||
1505 ((mMode != OperatingMode::FULL) &&
1506 (e.failType != FailHard::yes) && e.local) ||
1507 (e.result == terQUEUED)) &&
1508 !enforceFailHard)
1509 {
1510 auto const toSkip =
1511 app_.getHashRouter().shouldRelay(e.transaction->getID());
1512
1513 if (toSkip)
1514 {
1515 protocol::TMTransaction tx;
1516 Serializer s;
1517
1518 e.transaction->getSTransaction()->add(s);
1519 tx.set_rawtransaction(s.data(), s.size());
1520 tx.set_status(protocol::tsCURRENT);
1521 tx.set_receivetimestamp(
1522 app_.timeKeeper().now().time_since_epoch().count());
1523 tx.set_deferred(e.result == terQUEUED);
1524 // FIXME: This should be when we received it
1525 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1526 e.transaction->setBroadcast();
1527 }
1528 }
1529
1530 if (validatedLedgerIndex)
1531 {
1532 auto [fee, accountSeq, availableSeq] =
1533 app_.getTxQ().getTxRequiredFeeAndSeq(
1534 *newOL, e.transaction->getSTransaction());
1535 e.transaction->setCurrentLedgerState(
1536 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1537 }
1538 }
1539 }
1540
1541 batchLock.lock();
1542
1543 for (TransactionStatus& e : transactions)
1544 e.transaction->clearApplying();
1545
1546 if (!submit_held.empty())
1547 {
1548 if (mTransactions.empty())
1549 mTransactions.swap(submit_held);
1550 else
1551 for (auto& e : submit_held)
1552 mTransactions.push_back(std::move(e));
1553 }
1554
1555 mCond.notify_all();
1556
1557 mDispatchState = DispatchState::none;
1558}
1559
1560//
1561// Owner functions
1562//
1563
1565NetworkOPsImp::getOwnerInfo(
1567 AccountID const& account)
1568{
1569 Json::Value jvObjects(Json::objectValue);
1570 auto root = keylet::ownerDir(account);
1571 auto sleNode = lpLedger->read(keylet::page(root));
1572 if (sleNode)
1573 {
1574 std::uint64_t uNodeDir;
1575
1576 do
1577 {
1578 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1579 {
1580 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1581 XRPL_ASSERT(
1582 sleCur,
1583 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1584
1585 switch (sleCur->getType())
1586 {
1587 case ltOFFER:
1588 if (!jvObjects.isMember(jss::offers))
1589 jvObjects[jss::offers] =
1591
1592 jvObjects[jss::offers].append(
1593 sleCur->getJson(JsonOptions::none));
1594 break;
1595
1596 case ltRIPPLE_STATE:
1597 if (!jvObjects.isMember(jss::ripple_lines))
1598 {
1599 jvObjects[jss::ripple_lines] =
1601 }
1602
1603 jvObjects[jss::ripple_lines].append(
1604 sleCur->getJson(JsonOptions::none));
1605 break;
1606
1607 case ltACCOUNT_ROOT:
1608 case ltDIR_NODE:
1609 default:
1610 UNREACHABLE(
1611 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1612 "type");
1613 break;
1614 }
1615 }
1616
1617 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1618
1619 if (uNodeDir)
1620 {
1621 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1622 XRPL_ASSERT(
1623 sleNode,
1624 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1625 }
1626 } while (uNodeDir);
1627 }
1628
1629 return jvObjects;
1630}
1631
1632//
1633// Other
1634//
1635
1636inline bool
1637NetworkOPsImp::isBlocked()
1638{
1639 return isAmendmentBlocked() || isUNLBlocked();
1640}
1641
1642inline bool
1643NetworkOPsImp::isAmendmentBlocked()
1644{
1645 return amendmentBlocked_;
1646}
1647
1648void
1649NetworkOPsImp::setAmendmentBlocked()
1650{
1651 amendmentBlocked_ = true;
1652 setMode(OperatingMode::CONNECTED);
1653}
1654
1655inline bool
1656NetworkOPsImp::isAmendmentWarned()
1657{
1658 return !amendmentBlocked_ && amendmentWarned_;
1659}
1660
1661inline void
1662NetworkOPsImp::setAmendmentWarned()
1663{
1664 amendmentWarned_ = true;
1665}
1666
1667inline void
1668NetworkOPsImp::clearAmendmentWarned()
1669{
1670 amendmentWarned_ = false;
1671}
1672
1673inline bool
1674NetworkOPsImp::isUNLBlocked()
1675{
1676 return unlBlocked_;
1677}
1678
1679void
1680NetworkOPsImp::setUNLBlocked()
1681{
1682 unlBlocked_ = true;
1683 setMode(OperatingMode::CONNECTED);
1684}
1685
1686inline void
1687NetworkOPsImp::clearUNLBlocked()
1688{
1689 unlBlocked_ = false;
1690}
1691
1692bool
1693NetworkOPsImp::checkLastClosedLedger(
1694 const Overlay::PeerSequence& peerList,
1695 uint256& networkClosed)
1696{
1697 // Returns true if there's an *abnormal* ledger issue, normal changing in
1698 // TRACKING mode should return false. Do we have sufficient validations for
1699 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1700 // better ledger available? If so, we are either tracking or full.
1701
1702 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1703
1704 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1705
1706 if (!ourClosed)
1707 return false;
1708
1709 uint256 closedLedger = ourClosed->info().hash;
1710 uint256 prevClosedLedger = ourClosed->info().parentHash;
1711 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1712 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1713
1714 //-------------------------------------------------------------------------
1715 // Determine preferred last closed ledger
1716
1717 auto& validations = app_.getValidations();
1718 JLOG(m_journal.debug())
1719 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1720
1721 // Will rely on peer LCL if no trusted validations exist
1723 peerCounts[closedLedger] = 0;
1724 if (mMode >= OperatingMode::TRACKING)
1725 peerCounts[closedLedger]++;
1726
1727 for (auto& peer : peerList)
1728 {
1729 uint256 peerLedger = peer->getClosedLedgerHash();
1730
1731 if (peerLedger.isNonZero())
1732 ++peerCounts[peerLedger];
1733 }
1734
1735 for (auto const& it : peerCounts)
1736 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1737
1738 uint256 preferredLCL = validations.getPreferredLCL(
1739 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1740 m_ledgerMaster.getValidLedgerIndex(),
1741 peerCounts);
1742
1743 bool switchLedgers = preferredLCL != closedLedger;
1744 if (switchLedgers)
1745 closedLedger = preferredLCL;
1746 //-------------------------------------------------------------------------
1747 if (switchLedgers && (closedLedger == prevClosedLedger))
1748 {
1749 // don't switch to our own previous ledger
1750 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1751 networkClosed = ourClosed->info().hash;
1752 switchLedgers = false;
1753 }
1754 else
1755 networkClosed = closedLedger;
1756
1757 if (!switchLedgers)
1758 return false;
1759
1760 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1761
1762 if (!consensus)
1763 consensus = app_.getInboundLedgers().acquire(
1764 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1765
1766 if (consensus &&
1767 (!m_ledgerMaster.canBeCurrent(consensus) ||
1768 !m_ledgerMaster.isCompatible(
1769 *consensus, m_journal.debug(), "Not switching")))
1770 {
1771 // Don't switch to a ledger not on the validated chain
1772 // or with an invalid close time or sequence
1773 networkClosed = ourClosed->info().hash;
1774 return false;
1775 }
1776
1777 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1778 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1779 << getJson({*ourClosed, {}});
1780 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1781
1782 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1783 {
1784 setMode(OperatingMode::CONNECTED);
1785 }
1786
1787 if (consensus)
1788 {
1789 // FIXME: If this rewinds the ledger sequence, or has the same
1790 // sequence, we should update the status on any stored transactions
1791 // in the invalidated ledgers.
1792 switchLastClosedLedger(consensus);
1793 }
1794
1795 return true;
1796}
1797
1798void
1799NetworkOPsImp::switchLastClosedLedger(
1800 std::shared_ptr<Ledger const> const& newLCL)
1801{
1802 // set the newLCL as our last closed ledger -- this is abnormal code
1803 JLOG(m_journal.error())
1804 << "JUMP last closed ledger to " << newLCL->info().hash;
1805
1806 clearNeedNetworkLedger();
1807
1808 // Update fee computations.
1809 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1810
1811 // Caller must own master lock
1812 {
1813 // Apply tx in old open ledger to new
1814 // open ledger. Then apply local tx.
1815
1816 auto retries = m_localTX->getTxSet();
1817 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1819 if (lastVal)
1820 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
1821 else
1822 rules.emplace(app_.config().features);
1823 app_.openLedger().accept(
1824 app_,
1825 *rules,
1826 newLCL,
1827 OrderedTxs({}),
1828 false,
1829 retries,
1830 tapNONE,
1831 "jump",
1832 [&](OpenView& view, beast::Journal j) {
1833 // Stuff the ledger with transactions from the queue.
1834 return app_.getTxQ().accept(app_, view);
1835 });
1836 }
1837
1838 m_ledgerMaster.switchLCL(newLCL);
1839
1840 protocol::TMStatusChange s;
1841 s.set_newevent(protocol::neSWITCHED_LEDGER);
1842 s.set_ledgerseq(newLCL->info().seq);
1843 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1844 s.set_ledgerhashprevious(
1845 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1846 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
1847
1848 app_.overlay().foreach(
1849 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1850}
1851
1852bool
1853NetworkOPsImp::beginConsensus(
1854 uint256 const& networkClosed,
1856{
1857 XRPL_ASSERT(
1858 networkClosed.isNonZero(),
1859 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
1860
1861 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
1862
1863 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
1864 << " with LCL " << closingInfo.parentHash;
1865
1866 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1867
1868 if (!prevLedger)
1869 {
1870 // this shouldn't happen unless we jump ledgers
1871 if (mMode == OperatingMode::FULL)
1872 {
1873 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
1874 setMode(OperatingMode::TRACKING);
1875 CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
1876 }
1877
1878 CLOG(clog) << "beginConsensus no previous ledger. ";
1879 return false;
1880 }
1881
1882 XRPL_ASSERT(
1883 prevLedger->info().hash == closingInfo.parentHash,
1884 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1885 "parent");
1886 XRPL_ASSERT(
1887 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
1888 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1889 "hash");
1890
1891 if (prevLedger->rules().enabled(featureNegativeUNL))
1892 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1893 TrustChanges const changes = app_.validators().updateTrusted(
1894 app_.getValidations().getCurrentNodeIDs(),
1895 closingInfo.parentCloseTime,
1896 *this,
1897 app_.overlay(),
1898 app_.getHashRouter());
1899
1900 if (!changes.added.empty() || !changes.removed.empty())
1901 {
1902 app_.getValidations().trustChanged(changes.added, changes.removed);
1903 // Update the AmendmentTable so it tracks the current validators.
1904 app_.getAmendmentTable().trustChanged(
1905 app_.validators().getQuorumKeys().second);
1906 }
1907
1908 mConsensus.startRound(
1909 app_.timeKeeper().closeTime(),
1910 networkClosed,
1911 prevLedger,
1912 changes.removed,
1913 changes.added,
1914 clog);
1915
1916 const ConsensusPhase currPhase = mConsensus.phase();
1917 if (mLastConsensusPhase != currPhase)
1918 {
1919 reportConsensusStateChange(currPhase);
1920 mLastConsensusPhase = currPhase;
1921 }
1922
1923 JLOG(m_journal.debug()) << "Initiating consensus engine";
1924 return true;
1925}
1926
1927bool
1928NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
1929{
1930 auto const& peerKey = peerPos.publicKey();
1931 if (validatorPK_ == peerKey || validatorMasterPK_ == peerKey)
1932 {
1933 // Could indicate a operator misconfiguration where two nodes are
1934 // running with the same validator key configured, so this isn't fatal,
1935 // and it doesn't necessarily indicate peer misbehavior. But since this
1936 // is a trusted message, it could be a very big deal. Either way, we
1937 // don't want to relay the proposal. Note that the byzantine behavior
1938 // detection in handleNewValidation will notify other peers.
1939 UNREACHABLE(
1940 "ripple::NetworkOPsImp::processTrustedProposal : received own "
1941 "proposal");
1942 JLOG(m_journal.error())
1943 << "Received a TRUSTED proposal signed with my key from a peer";
1944 return false;
1945 }
1946
1947 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1948}
1949
1950void
1951NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
1952{
1953 // We now have an additional transaction set
1954 // either created locally during the consensus process
1955 // or acquired from a peer
1956
1957 // Inform peers we have this set
1958 protocol::TMHaveTransactionSet msg;
1959 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1960 msg.set_status(protocol::tsHAVE);
1961 app_.overlay().foreach(
1962 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
1963
1964 // We acquired it because consensus asked us to
1965 if (fromAcquire)
1966 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
1967}
1968
1969void
1970NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
1971{
1972 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
1973
1974 for (auto const& it : app_.overlay().getActivePeers())
1975 {
1976 if (it && (it->getClosedLedgerHash() == deadLedger))
1977 {
1978 JLOG(m_journal.trace()) << "Killing obsolete peer status";
1979 it->cycleStatus();
1980 }
1981 }
1982
1983 uint256 networkClosed;
1984 bool ledgerChange =
1985 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1986
1987 if (networkClosed.isZero())
1988 {
1989 CLOG(clog) << "endConsensus last closed ledger is zero. ";
1990 return;
1991 }
1992
1993 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
1994 // we must count how many nodes share our LCL, how many nodes disagree with
1995 // our LCL, and how many validations our LCL has. We also want to check
1996 // timing to make sure there shouldn't be a newer LCL. We need this
1997 // information to do the next three tests.
1998
1999 if (((mMode == OperatingMode::CONNECTED) ||
2000 (mMode == OperatingMode::SYNCING)) &&
2001 !ledgerChange)
2002 {
2003 // Count number of peers that agree with us and UNL nodes whose
2004 // validations we have for LCL. If the ledger is good enough, go to
2005 // TRACKING - TODO
2006 if (!needNetworkLedger_)
2007 setMode(OperatingMode::TRACKING);
2008 }
2009
2010 if (((mMode == OperatingMode::CONNECTED) ||
2011 (mMode == OperatingMode::TRACKING)) &&
2012 !ledgerChange)
2013 {
2014 // check if the ledger is good enough to go to FULL
2015 // Note: Do not go to FULL if we don't have the previous ledger
2016 // check if the ledger is bad enough to go to CONNECTE D -- TODO
2017 auto current = m_ledgerMaster.getCurrentLedger();
2018 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
2019 2 * current->info().closeTimeResolution))
2020 {
2021 setMode(OperatingMode::FULL);
2022 }
2023 }
2024
2025 beginConsensus(networkClosed, clog);
2026}
2027
2028void
2029NetworkOPsImp::consensusViewChange()
2030{
2031 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2032 {
2033 setMode(OperatingMode::CONNECTED);
2034 }
2035}
2036
2037void
2038NetworkOPsImp::pubManifest(Manifest const& mo)
2039{
2040 // VFALCO consider std::shared_mutex
2041 std::lock_guard sl(mSubLock);
2042
2043 if (!mStreamMaps[sManifests].empty())
2044 {
2046
2047 jvObj[jss::type] = "manifestReceived";
2048 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
2049 if (mo.signingKey)
2050 jvObj[jss::signing_key] =
2051 toBase58(TokenType::NodePublic, *mo.signingKey);
2052 jvObj[jss::seq] = Json::UInt(mo.sequence);
2053 if (auto sig = mo.getSignature())
2054 jvObj[jss::signature] = strHex(*sig);
2055 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
2056 if (!mo.domain.empty())
2057 jvObj[jss::domain] = mo.domain;
2058 jvObj[jss::manifest] = strHex(mo.serialized);
2059
2060 for (auto i = mStreamMaps[sManifests].begin();
2061 i != mStreamMaps[sManifests].end();)
2062 {
2063 if (auto p = i->second.lock())
2064 {
2065 p->send(jvObj, true);
2066 ++i;
2067 }
2068 else
2069 {
2070 i = mStreamMaps[sManifests].erase(i);
2071 }
2072 }
2073 }
2074}
2075
2076NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2077 XRPAmount fee,
2078 TxQ::Metrics&& escalationMetrics,
2079 LoadFeeTrack const& loadFeeTrack)
2080 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2081 , loadBaseServer{loadFeeTrack.getLoadBase()}
2082 , baseFee{fee}
2083 , em{std::move(escalationMetrics)}
2084{
2085}
2086
2087bool
2089 NetworkOPsImp::ServerFeeSummary const& b) const
2090{
2091 if (loadFactorServer != b.loadFactorServer ||
2092 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2093 em.has_value() != b.em.has_value())
2094 return true;
2095
2096 if (em && b.em)
2097 {
2098 return (
2099 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2100 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2101 em->referenceFeeLevel != b.em->referenceFeeLevel);
2102 }
2103
2104 return false;
2105}
2106
2107// Need to cap to uint64 to uint32 due to JSON limitations
2108static std::uint32_t
2110{
2112
2113 return std::min(max32, v);
2114};
2115
2116void
2118{
2119 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2120 // list into a local array while holding the lock then release
2121 // the lock and call send on everyone.
2122 //
2124
2125 if (!mStreamMaps[sServer].empty())
2126 {
2128
2130 app_.openLedger().current()->fees().base,
2132 app_.getFeeTrack()};
2133
2134 jvObj[jss::type] = "serverStatus";
2135 jvObj[jss::server_status] = strOperatingMode();
2136 jvObj[jss::load_base] = f.loadBaseServer;
2137 jvObj[jss::load_factor_server] = f.loadFactorServer;
2138 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2139
2140 if (f.em)
2141 {
2142 auto const loadFactor = std::max(
2143 safe_cast<std::uint64_t>(f.loadFactorServer),
2144 mulDiv(
2145 f.em->openLedgerFeeLevel,
2146 f.loadBaseServer,
2147 f.em->referenceFeeLevel)
2149
2150 jvObj[jss::load_factor] = trunc32(loadFactor);
2151 jvObj[jss::load_factor_fee_escalation] =
2152 f.em->openLedgerFeeLevel.jsonClipped();
2153 jvObj[jss::load_factor_fee_queue] =
2154 f.em->minProcessingFeeLevel.jsonClipped();
2155 jvObj[jss::load_factor_fee_reference] =
2156 f.em->referenceFeeLevel.jsonClipped();
2157 }
2158 else
2159 jvObj[jss::load_factor] = f.loadFactorServer;
2160
2161 mLastFeeSummary = f;
2162
2163 for (auto i = mStreamMaps[sServer].begin();
2164 i != mStreamMaps[sServer].end();)
2165 {
2166 InfoSub::pointer p = i->second.lock();
2167
2168 // VFALCO TODO research the possibility of using thread queues and
2169 // linearizing the deletion of subscribers with the
2170 // sending of JSON data.
2171 if (p)
2172 {
2173 p->send(jvObj, true);
2174 ++i;
2175 }
2176 else
2177 {
2178 i = mStreamMaps[sServer].erase(i);
2179 }
2180 }
2181 }
2182}
2183
2184void
2186{
2188
2189 auto& streamMap = mStreamMaps[sConsensusPhase];
2190 if (!streamMap.empty())
2191 {
2193 jvObj[jss::type] = "consensusPhase";
2194 jvObj[jss::consensus] = to_string(phase);
2195
2196 for (auto i = streamMap.begin(); i != streamMap.end();)
2197 {
2198 if (auto p = i->second.lock())
2199 {
2200 p->send(jvObj, true);
2201 ++i;
2202 }
2203 else
2204 {
2205 i = streamMap.erase(i);
2206 }
2207 }
2208 }
2209}
2210
2211void
2213{
2214 // VFALCO consider std::shared_mutex
2216
2217 if (!mStreamMaps[sValidations].empty())
2218 {
2220
2221 auto const signerPublic = val->getSignerPublic();
2222
2223 jvObj[jss::type] = "validationReceived";
2224 jvObj[jss::validation_public_key] =
2225 toBase58(TokenType::NodePublic, signerPublic);
2226 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2227 jvObj[jss::signature] = strHex(val->getSignature());
2228 jvObj[jss::full] = val->isFull();
2229 jvObj[jss::flags] = val->getFlags();
2230 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2231 jvObj[jss::data] = strHex(val->getSerializer().slice());
2232
2233 if (auto version = (*val)[~sfServerVersion])
2234 jvObj[jss::server_version] = std::to_string(*version);
2235
2236 if (auto cookie = (*val)[~sfCookie])
2237 jvObj[jss::cookie] = std::to_string(*cookie);
2238
2239 if (auto hash = (*val)[~sfValidatedHash])
2240 jvObj[jss::validated_hash] = strHex(*hash);
2241
2242 auto const masterKey =
2243 app_.validatorManifests().getMasterKey(signerPublic);
2244
2245 if (masterKey != signerPublic)
2246 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2247
2248 // NOTE *seq is a number, but old API versions used string. We replace
2249 // number with a string using MultiApiJson near end of this function
2250 if (auto const seq = (*val)[~sfLedgerSequence])
2251 jvObj[jss::ledger_index] = *seq;
2252
2253 if (val->isFieldPresent(sfAmendments))
2254 {
2255 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2256 for (auto const& amendment : val->getFieldV256(sfAmendments))
2257 jvObj[jss::amendments].append(to_string(amendment));
2258 }
2259
2260 if (auto const closeTime = (*val)[~sfCloseTime])
2261 jvObj[jss::close_time] = *closeTime;
2262
2263 if (auto const loadFee = (*val)[~sfLoadFee])
2264 jvObj[jss::load_fee] = *loadFee;
2265
2266 if (auto const baseFee = val->at(~sfBaseFee))
2267 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2268
2269 if (auto const reserveBase = val->at(~sfReserveBase))
2270 jvObj[jss::reserve_base] = *reserveBase;
2271
2272 if (auto const reserveInc = val->at(~sfReserveIncrement))
2273 jvObj[jss::reserve_inc] = *reserveInc;
2274
2275 // (The ~ operator converts the Proxy to a std::optional, which
2276 // simplifies later operations)
2277 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2278 baseFeeXRP && baseFeeXRP->native())
2279 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2280
2281 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2282 reserveBaseXRP && reserveBaseXRP->native())
2283 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2284
2285 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2286 reserveIncXRP && reserveIncXRP->native())
2287 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2288
2289 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2290 // for consumers supporting different API versions
2291 MultiApiJson multiObj{jvObj};
2292 multiObj.visit(
2293 RPC::apiVersion<1>, //
2294 [](Json::Value& jvTx) {
2295 // Type conversion for older API versions to string
2296 if (jvTx.isMember(jss::ledger_index))
2297 {
2298 jvTx[jss::ledger_index] =
2299 std::to_string(jvTx[jss::ledger_index].asUInt());
2300 }
2301 });
2302
2303 for (auto i = mStreamMaps[sValidations].begin();
2304 i != mStreamMaps[sValidations].end();)
2305 {
2306 if (auto p = i->second.lock())
2307 {
2308 multiObj.visit(
2309 p->getApiVersion(), //
2310 [&](Json::Value const& jv) { p->send(jv, true); });
2311 ++i;
2312 }
2313 else
2314 {
2315 i = mStreamMaps[sValidations].erase(i);
2316 }
2317 }
2318 }
2319}
2320
2321void
2323{
2325
2326 if (!mStreamMaps[sPeerStatus].empty())
2327 {
2328 Json::Value jvObj(func());
2329
2330 jvObj[jss::type] = "peerStatusChange";
2331
2332 for (auto i = mStreamMaps[sPeerStatus].begin();
2333 i != mStreamMaps[sPeerStatus].end();)
2334 {
2335 InfoSub::pointer p = i->second.lock();
2336
2337 if (p)
2338 {
2339 p->send(jvObj, true);
2340 ++i;
2341 }
2342 else
2343 {
2344 i = mStreamMaps[sPeerStatus].erase(i);
2345 }
2346 }
2347 }
2348}
2349
2350void
2352{
2353 using namespace std::chrono_literals;
2354 if (om == OperatingMode::CONNECTED)
2355 {
2358 }
2359 else if (om == OperatingMode::SYNCING)
2360 {
2363 }
2364
2365 if ((om > OperatingMode::CONNECTED) && isBlocked())
2367
2368 if (mMode == om)
2369 return;
2370
2371 mMode = om;
2372
2373 accounting_.mode(om);
2374
2375 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2376 pubServer();
2377}
2378
2379bool
2382 std::string const& source)
2383{
2384 JLOG(m_journal.trace())
2385 << "recvValidation " << val->getLedgerHash() << " from " << source;
2386
2388 BypassAccept bypassAccept = BypassAccept::no;
2389 try
2390 {
2391 if (pendingValidations_.contains(val->getLedgerHash()))
2392 bypassAccept = BypassAccept::yes;
2393 else
2394 pendingValidations_.insert(val->getLedgerHash());
2395 scope_unlock unlock(lock);
2396 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2397 }
2398 catch (std::exception const& e)
2399 {
2400 JLOG(m_journal.warn())
2401 << "Exception thrown for handling new validation "
2402 << val->getLedgerHash() << ": " << e.what();
2403 }
2404 catch (...)
2405 {
2406 JLOG(m_journal.warn())
2407 << "Unknown exception thrown for handling new validation "
2408 << val->getLedgerHash();
2409 }
2410 if (bypassAccept == BypassAccept::no)
2411 {
2412 pendingValidations_.erase(val->getLedgerHash());
2413 }
2414 lock.unlock();
2415
2416 pubValidation(val);
2417
2418 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2420 ss << "VALIDATION: " << val->render() << " master_key: ";
2421 auto master = app_.validators().getTrustedKey(val->getSignerPublic());
2422 if (master)
2423 {
2424 ss << toBase58(TokenType::NodePublic, *master);
2425 }
2426 else
2427 {
2428 ss << "none";
2429 }
2430 return ss.str();
2431 }();
2432
2433 // We will always relay trusted validations; if configured, we will
2434 // also relay all untrusted validations.
2435 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2436}
2437
2440{
2441 return mConsensus.getJson(true);
2442}
2443
2445NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2446{
2448
2449 // System-level warnings
2450 {
2451 Json::Value warnings{Json::arrayValue};
2452 if (isAmendmentBlocked())
2453 {
2454 Json::Value& w = warnings.append(Json::objectValue);
2455 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2456 w[jss::message] =
2457 "This server is amendment blocked, and must be updated to be "
2458 "able to stay in sync with the network.";
2459 }
2460 if (isUNLBlocked())
2461 {
2462 Json::Value& w = warnings.append(Json::objectValue);
2463 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2464 w[jss::message] =
2465 "This server has an expired validator list. validators.txt "
2466 "may be incorrectly configured or some [validator_list_sites] "
2467 "may be unreachable.";
2468 }
2469 if (admin && isAmendmentWarned())
2470 {
2471 Json::Value& w = warnings.append(Json::objectValue);
2472 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2473 w[jss::message] =
2474 "One or more unsupported amendments have reached majority. "
2475 "Upgrade to the latest version before they are activated "
2476 "to avoid being amendment blocked.";
2477 if (auto const expected =
2479 {
2480 auto& d = w[jss::details] = Json::objectValue;
2481 d[jss::expected_date] = expected->time_since_epoch().count();
2482 d[jss::expected_date_UTC] = to_string(*expected);
2483 }
2484 }
2485
2486 if (warnings.size())
2487 info[jss::warnings] = std::move(warnings);
2488 }
2489
2490 // hostid: unique string describing the machine
2491 if (human)
2492 info[jss::hostid] = getHostId(admin);
2493
2494 // domain: if configured with a domain, report it:
2495 if (!app_.config().SERVER_DOMAIN.empty())
2496 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2497
2498 info[jss::build_version] = BuildInfo::getVersionString();
2499
2500 info[jss::server_state] = strOperatingMode(admin);
2501
2502 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2504
2506 info[jss::network_ledger] = "waiting";
2507
2508 info[jss::validation_quorum] =
2509 static_cast<Json::UInt>(app_.validators().quorum());
2510
2511 if (admin)
2512 {
2513 switch (app_.config().NODE_SIZE)
2514 {
2515 case 0:
2516 info[jss::node_size] = "tiny";
2517 break;
2518 case 1:
2519 info[jss::node_size] = "small";
2520 break;
2521 case 2:
2522 info[jss::node_size] = "medium";
2523 break;
2524 case 3:
2525 info[jss::node_size] = "large";
2526 break;
2527 case 4:
2528 info[jss::node_size] = "huge";
2529 break;
2530 }
2531
2532 auto when = app_.validators().expires();
2533
2534 if (!human)
2535 {
2536 if (when)
2537 info[jss::validator_list_expires] =
2538 safe_cast<Json::UInt>(when->time_since_epoch().count());
2539 else
2540 info[jss::validator_list_expires] = 0;
2541 }
2542 else
2543 {
2544 auto& x = (info[jss::validator_list] = Json::objectValue);
2545
2546 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2547
2548 if (when)
2549 {
2550 if (*when == TimeKeeper::time_point::max())
2551 {
2552 x[jss::expiration] = "never";
2553 x[jss::status] = "active";
2554 }
2555 else
2556 {
2557 x[jss::expiration] = to_string(*when);
2558
2559 if (*when > app_.timeKeeper().now())
2560 x[jss::status] = "active";
2561 else
2562 x[jss::status] = "expired";
2563 }
2564 }
2565 else
2566 {
2567 x[jss::status] = "unknown";
2568 x[jss::expiration] = "unknown";
2569 }
2570 }
2571
2572#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2573 {
2574 auto& x = (info[jss::git] = Json::objectValue);
2575#ifdef GIT_COMMIT_HASH
2576 x[jss::hash] = GIT_COMMIT_HASH;
2577#endif
2578#ifdef GIT_BRANCH
2579 x[jss::branch] = GIT_BRANCH;
2580#endif
2581 }
2582#endif
2583 }
2584 info[jss::io_latency_ms] =
2585 static_cast<Json::UInt>(app_.getIOLatency().count());
2586
2587 if (admin)
2588 {
2589 if (auto const localPubKey = app_.validators().localPublicKey();
2590 localPubKey && app_.getValidationPublicKey())
2591 {
2592 info[jss::pubkey_validator] =
2593 toBase58(TokenType::NodePublic, localPubKey.value());
2594 }
2595 else
2596 {
2597 info[jss::pubkey_validator] = "none";
2598 }
2599 }
2600
2601 if (counters)
2602 {
2603 info[jss::counters] = app_.getPerfLog().countersJson();
2604
2605 Json::Value nodestore(Json::objectValue);
2606 app_.getNodeStore().getCountsJson(nodestore);
2607 info[jss::counters][jss::nodestore] = nodestore;
2608 info[jss::current_activities] = app_.getPerfLog().currentJson();
2609 }
2610
2611 info[jss::pubkey_node] =
2613
2614 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2615
2617 info[jss::amendment_blocked] = true;
2618
2619 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2620
2621 if (fp != 0)
2622 info[jss::fetch_pack] = Json::UInt(fp);
2623
2624 info[jss::peers] = Json::UInt(app_.overlay().size());
2625
2626 Json::Value lastClose = Json::objectValue;
2627 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2628
2629 if (human)
2630 {
2631 lastClose[jss::converge_time_s] =
2633 }
2634 else
2635 {
2636 lastClose[jss::converge_time] =
2638 }
2639
2640 info[jss::last_close] = lastClose;
2641
2642 // info[jss::consensus] = mConsensus.getJson();
2643
2644 if (admin)
2645 info[jss::load] = m_job_queue.getJson();
2646
2647 if (auto const netid = app_.overlay().networkID())
2648 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2649
2650 auto const escalationMetrics =
2652
2653 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2654 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2655 /* Scale the escalated fee level to unitless "load factor".
2656 In practice, this just strips the units, but it will continue
2657 to work correctly if either base value ever changes. */
2658 auto const loadFactorFeeEscalation =
2659 mulDiv(
2660 escalationMetrics.openLedgerFeeLevel,
2661 loadBaseServer,
2662 escalationMetrics.referenceFeeLevel)
2664
2665 auto const loadFactor = std::max(
2666 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2667
2668 if (!human)
2669 {
2670 info[jss::load_base] = loadBaseServer;
2671 info[jss::load_factor] = trunc32(loadFactor);
2672 info[jss::load_factor_server] = loadFactorServer;
2673
2674 /* Json::Value doesn't support uint64, so clamp to max
2675 uint32 value. This is mostly theoretical, since there
2676 probably isn't enough extant XRP to drive the factor
2677 that high.
2678 */
2679 info[jss::load_factor_fee_escalation] =
2680 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2681 info[jss::load_factor_fee_queue] =
2682 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2683 info[jss::load_factor_fee_reference] =
2684 escalationMetrics.referenceFeeLevel.jsonClipped();
2685 }
2686 else
2687 {
2688 info[jss::load_factor] =
2689 static_cast<double>(loadFactor) / loadBaseServer;
2690
2691 if (loadFactorServer != loadFactor)
2692 info[jss::load_factor_server] =
2693 static_cast<double>(loadFactorServer) / loadBaseServer;
2694
2695 if (admin)
2696 {
2698 if (fee != loadBaseServer)
2699 info[jss::load_factor_local] =
2700 static_cast<double>(fee) / loadBaseServer;
2701 fee = app_.getFeeTrack().getRemoteFee();
2702 if (fee != loadBaseServer)
2703 info[jss::load_factor_net] =
2704 static_cast<double>(fee) / loadBaseServer;
2705 fee = app_.getFeeTrack().getClusterFee();
2706 if (fee != loadBaseServer)
2707 info[jss::load_factor_cluster] =
2708 static_cast<double>(fee) / loadBaseServer;
2709 }
2710 if (escalationMetrics.openLedgerFeeLevel !=
2711 escalationMetrics.referenceFeeLevel &&
2712 (admin || loadFactorFeeEscalation != loadFactor))
2713 info[jss::load_factor_fee_escalation] =
2714 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2715 escalationMetrics.referenceFeeLevel);
2716 if (escalationMetrics.minProcessingFeeLevel !=
2717 escalationMetrics.referenceFeeLevel)
2718 info[jss::load_factor_fee_queue] =
2719 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2720 escalationMetrics.referenceFeeLevel);
2721 }
2722
2723 bool valid = false;
2724 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2725
2726 if (lpClosed)
2727 valid = true;
2728 else
2729 lpClosed = m_ledgerMaster.getClosedLedger();
2730
2731 if (lpClosed)
2732 {
2733 XRPAmount const baseFee = lpClosed->fees().base;
2735 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2736 l[jss::hash] = to_string(lpClosed->info().hash);
2737
2738 if (!human)
2739 {
2740 l[jss::base_fee] = baseFee.jsonClipped();
2741 l[jss::reserve_base] =
2742 lpClosed->fees().accountReserve(0).jsonClipped();
2743 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2744 l[jss::close_time] = Json::Value::UInt(
2745 lpClosed->info().closeTime.time_since_epoch().count());
2746 }
2747 else
2748 {
2749 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2750 l[jss::reserve_base_xrp] =
2751 lpClosed->fees().accountReserve(0).decimalXRP();
2752 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2753
2754 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2755 std::abs(closeOffset.count()) >= 60)
2756 l[jss::close_time_offset] =
2757 static_cast<std::uint32_t>(closeOffset.count());
2758
2759 constexpr std::chrono::seconds highAgeThreshold{1000000};
2761 {
2762 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2763 l[jss::age] =
2764 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2765 }
2766 else
2767 {
2768 auto lCloseTime = lpClosed->info().closeTime;
2769 auto closeTime = app_.timeKeeper().closeTime();
2770 if (lCloseTime <= closeTime)
2771 {
2772 using namespace std::chrono_literals;
2773 auto age = closeTime - lCloseTime;
2774 l[jss::age] =
2775 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2776 }
2777 }
2778 }
2779
2780 if (valid)
2781 info[jss::validated_ledger] = l;
2782 else
2783 info[jss::closed_ledger] = l;
2784
2785 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2786 if (!lpPublished)
2787 info[jss::published_ledger] = "none";
2788 else if (lpPublished->info().seq != lpClosed->info().seq)
2789 info[jss::published_ledger] = lpPublished->info().seq;
2790 }
2791
2792 accounting_.json(info);
2793 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2794 info[jss::jq_trans_overflow] =
2796 info[jss::peer_disconnects] =
2798 info[jss::peer_disconnects_resources] =
2800
2801 // This array must be sorted in increasing order.
2802 static constexpr std::array<std::string_view, 7> protocols{
2803 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2804 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2805 {
2807 for (auto const& port : app_.getServerHandler().setup().ports)
2808 {
2809 // Don't publish admin ports for non-admin users
2810 if (!admin &&
2811 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2812 port.admin_user.empty() && port.admin_password.empty()))
2813 continue;
2816 std::begin(port.protocol),
2817 std::end(port.protocol),
2818 std::begin(protocols),
2819 std::end(protocols),
2820 std::back_inserter(proto));
2821 if (!proto.empty())
2822 {
2823 auto& jv = ports.append(Json::Value(Json::objectValue));
2824 jv[jss::port] = std::to_string(port.port);
2825 jv[jss::protocol] = Json::Value{Json::arrayValue};
2826 for (auto const& p : proto)
2827 jv[jss::protocol].append(p);
2828 }
2829 }
2830
2831 if (app_.config().exists(SECTION_PORT_GRPC))
2832 {
2833 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
2834 auto const optPort = grpcSection.get("port");
2835 if (optPort && grpcSection.get("ip"))
2836 {
2837 auto& jv = ports.append(Json::Value(Json::objectValue));
2838 jv[jss::port] = *optPort;
2839 jv[jss::protocol] = Json::Value{Json::arrayValue};
2840 jv[jss::protocol].append("grpc");
2841 }
2842 }
2843 info[jss::ports] = std::move(ports);
2844 }
2845
2846 return info;
2847}
2848
2849void
2851{
2853}
2854
2857{
2858 return app_.getInboundLedgers().getInfo();
2859}
2860
2861void
2863 std::shared_ptr<ReadView const> const& ledger,
2864 std::shared_ptr<STTx const> const& transaction,
2865 TER result)
2866{
2867 MultiApiJson jvObj =
2868 transJson(transaction, result, false, ledger, std::nullopt);
2869
2870 {
2872
2873 auto it = mStreamMaps[sRTTransactions].begin();
2874 while (it != mStreamMaps[sRTTransactions].end())
2875 {
2876 InfoSub::pointer p = it->second.lock();
2877
2878 if (p)
2879 {
2880 jvObj.visit(
2881 p->getApiVersion(), //
2882 [&](Json::Value const& jv) { p->send(jv, true); });
2883 ++it;
2884 }
2885 else
2886 {
2887 it = mStreamMaps[sRTTransactions].erase(it);
2888 }
2889 }
2890 }
2891
2892 pubProposedAccountTransaction(ledger, transaction, result);
2893}
2894
2895void
2897{
2898 // Ledgers are published only when they acquire sufficient validations
2899 // Holes are filled across connection loss or other catastrophe
2900
2902 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
2903 if (!alpAccepted)
2904 {
2905 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
2906 app_.getAcceptedLedgerCache().canonicalize_replace_client(
2907 lpAccepted->info().hash, alpAccepted);
2908 }
2909
2910 XRPL_ASSERT(
2911 alpAccepted->getLedger().get() == lpAccepted.get(),
2912 "ripple::NetworkOPsImp::pubLedger : accepted input");
2913
2914 {
2915 JLOG(m_journal.debug())
2916 << "Publishing ledger " << lpAccepted->info().seq << " "
2917 << lpAccepted->info().hash;
2918
2920
2921 if (!mStreamMaps[sLedger].empty())
2922 {
2924
2925 jvObj[jss::type] = "ledgerClosed";
2926 jvObj[jss::ledger_index] = lpAccepted->info().seq;
2927 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
2928 jvObj[jss::ledger_time] = Json::Value::UInt(
2929 lpAccepted->info().closeTime.time_since_epoch().count());
2930
2931 if (!lpAccepted->rules().enabled(featureXRPFees))
2932 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
2933 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2934 jvObj[jss::reserve_base] =
2935 lpAccepted->fees().accountReserve(0).jsonClipped();
2936 jvObj[jss::reserve_inc] =
2937 lpAccepted->fees().increment.jsonClipped();
2938
2939 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
2940
2942 {
2943 jvObj[jss::validated_ledgers] =
2945 }
2946
2947 auto it = mStreamMaps[sLedger].begin();
2948 while (it != mStreamMaps[sLedger].end())
2949 {
2950 InfoSub::pointer p = it->second.lock();
2951 if (p)
2952 {
2953 p->send(jvObj, true);
2954 ++it;
2955 }
2956 else
2957 it = mStreamMaps[sLedger].erase(it);
2958 }
2959 }
2960
2961 if (!mStreamMaps[sBookChanges].empty())
2962 {
2963 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
2964
2965 auto it = mStreamMaps[sBookChanges].begin();
2966 while (it != mStreamMaps[sBookChanges].end())
2967 {
2968 InfoSub::pointer p = it->second.lock();
2969 if (p)
2970 {
2971 p->send(jvObj, true);
2972 ++it;
2973 }
2974 else
2975 it = mStreamMaps[sBookChanges].erase(it);
2976 }
2977 }
2978
2979 {
2980 static bool firstTime = true;
2981 if (firstTime)
2982 {
2983 // First validated ledger, start delayed SubAccountHistory
2984 firstTime = false;
2985 for (auto& outer : mSubAccountHistory)
2986 {
2987 for (auto& inner : outer.second)
2988 {
2989 auto& subInfo = inner.second;
2990 if (subInfo.index_->separationLedgerSeq_ == 0)
2991 {
2993 alpAccepted->getLedger(), subInfo);
2994 }
2995 }
2996 }
2997 }
2998 }
2999 }
3000
3001 // Don't lock since pubAcceptedTransaction is locking.
3002 for (auto const& accTx : *alpAccepted)
3003 {
3004 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
3006 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
3007 }
3008}
3009
3010void
3012{
3014 app_.openLedger().current()->fees().base,
3016 app_.getFeeTrack()};
3017
3018 // only schedule the job if something has changed
3019 if (f != mLastFeeSummary)
3020 {
3022 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
3023 pubServer();
3024 });
3025 }
3026}
3027
3028void
3030{
3033 "reportConsensusStateChange->pubConsensus",
3034 [this, phase]() { pubConsensus(phase); });
3035}
3036
3037inline void
3039{
3040 m_localTX->sweep(view);
3041}
3042inline std::size_t
3044{
3045 return m_localTX->size();
3046}
3047
3048// This routine should only be used to publish accepted or validated
3049// transactions.
3052 std::shared_ptr<STTx const> const& transaction,
3053 TER result,
3054 bool validated,
3055 std::shared_ptr<ReadView const> const& ledger,
3057{
3059 std::string sToken;
3060 std::string sHuman;
3061
3062 transResultInfo(result, sToken, sHuman);
3063
3064 jvObj[jss::type] = "transaction";
3065 // NOTE jvObj is not a finished object for either API version. After
3066 // it's populated, we need to finish it for a specific API version. This is
3067 // done in a loop, near the end of this function.
3068 jvObj[jss::transaction] =
3069 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3070
3071 if (meta)
3072 {
3073 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3075 jvObj[jss::meta], *ledger, transaction, meta->get());
3077 jvObj[jss::meta], transaction, meta->get());
3078 }
3079
3080 if (!ledger->open())
3081 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3082
3083 if (validated)
3084 {
3085 jvObj[jss::ledger_index] = ledger->info().seq;
3086 jvObj[jss::transaction][jss::date] =
3087 ledger->info().closeTime.time_since_epoch().count();
3088 jvObj[jss::validated] = true;
3089 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3090
3091 // WRITEME: Put the account next seq here
3092 }
3093 else
3094 {
3095 jvObj[jss::validated] = false;
3096 jvObj[jss::ledger_current_index] = ledger->info().seq;
3097 }
3098
3099 jvObj[jss::status] = validated ? "closed" : "proposed";
3100 jvObj[jss::engine_result] = sToken;
3101 jvObj[jss::engine_result_code] = result;
3102 jvObj[jss::engine_result_message] = sHuman;
3103
3104 if (transaction->getTxnType() == ttOFFER_CREATE)
3105 {
3106 auto const account = transaction->getAccountID(sfAccount);
3107 auto const amount = transaction->getFieldAmount(sfTakerGets);
3108
3109 // If the offer create is not self funded then add the owner balance
3110 if (account != amount.issue().account)
3111 {
3112 auto const ownerFunds = accountFunds(
3113 *ledger,
3114 account,
3115 amount,
3117 app_.journal("View"));
3118 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3119 }
3120 }
3121
3122 std::string const hash = to_string(transaction->getTransactionID());
3123 MultiApiJson multiObj{jvObj};
3125 multiObj.visit(), //
3126 [&]<unsigned Version>(
3128 RPC::insertDeliverMax(
3129 jvTx[jss::transaction], transaction->getTxnType(), Version);
3130
3131 if constexpr (Version > 1)
3132 {
3133 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3134 jvTx[jss::hash] = hash;
3135 }
3136 else
3137 {
3138 jvTx[jss::transaction][jss::hash] = hash;
3139 }
3140 });
3141
3142 return multiObj;
3143}
3144
3145void
3147 std::shared_ptr<ReadView const> const& ledger,
3148 const AcceptedLedgerTx& transaction,
3149 bool last)
3150{
3151 auto const& stTxn = transaction.getTxn();
3152
3153 // Create two different Json objects, for different API versions
3154 auto const metaRef = std::ref(transaction.getMeta());
3155 auto const trResult = transaction.getResult();
3156 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3157
3158 {
3160
3161 auto it = mStreamMaps[sTransactions].begin();
3162 while (it != mStreamMaps[sTransactions].end())
3163 {
3164 InfoSub::pointer p = it->second.lock();
3165
3166 if (p)
3167 {
3168 jvObj.visit(
3169 p->getApiVersion(), //
3170 [&](Json::Value const& jv) { p->send(jv, true); });
3171 ++it;
3172 }
3173 else
3174 it = mStreamMaps[sTransactions].erase(it);
3175 }
3176
3177 it = mStreamMaps[sRTTransactions].begin();
3178
3179 while (it != mStreamMaps[sRTTransactions].end())
3180 {
3181 InfoSub::pointer p = it->second.lock();
3182
3183 if (p)
3184 {
3185 jvObj.visit(
3186 p->getApiVersion(), //
3187 [&](Json::Value const& jv) { p->send(jv, true); });
3188 ++it;
3189 }
3190 else
3191 it = mStreamMaps[sRTTransactions].erase(it);
3192 }
3193 }
3194
3195 if (transaction.getResult() == tesSUCCESS)
3196 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3197
3198 pubAccountTransaction(ledger, transaction, last);
3199}
3200
3201void
3203 std::shared_ptr<ReadView const> const& ledger,
3204 AcceptedLedgerTx const& transaction,
3205 bool last)
3206{
3208 int iProposed = 0;
3209 int iAccepted = 0;
3210
3211 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3212 auto const currLedgerSeq = ledger->seq();
3213 {
3215
3216 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3218 {
3219 for (auto const& affectedAccount : transaction.getAffected())
3220 {
3221 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3222 simiIt != mSubRTAccount.end())
3223 {
3224 auto it = simiIt->second.begin();
3225
3226 while (it != simiIt->second.end())
3227 {
3228 InfoSub::pointer p = it->second.lock();
3229
3230 if (p)
3231 {
3232 notify.insert(p);
3233 ++it;
3234 ++iProposed;
3235 }
3236 else
3237 it = simiIt->second.erase(it);
3238 }
3239 }
3240
3241 if (auto simiIt = mSubAccount.find(affectedAccount);
3242 simiIt != mSubAccount.end())
3243 {
3244 auto it = simiIt->second.begin();
3245 while (it != simiIt->second.end())
3246 {
3247 InfoSub::pointer p = it->second.lock();
3248
3249 if (p)
3250 {
3251 notify.insert(p);
3252 ++it;
3253 ++iAccepted;
3254 }
3255 else
3256 it = simiIt->second.erase(it);
3257 }
3258 }
3259
3260 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3261 histoIt != mSubAccountHistory.end())
3262 {
3263 auto& subs = histoIt->second;
3264 auto it = subs.begin();
3265 while (it != subs.end())
3266 {
3267 SubAccountHistoryInfoWeak const& info = it->second;
3268 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3269 {
3270 ++it;
3271 continue;
3272 }
3273
3274 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3275 {
3276 accountHistoryNotify.emplace_back(
3277 SubAccountHistoryInfo{isSptr, info.index_});
3278 ++it;
3279 }
3280 else
3281 {
3282 it = subs.erase(it);
3283 }
3284 }
3285 if (subs.empty())
3286 mSubAccountHistory.erase(histoIt);
3287 }
3288 }
3289 }
3290 }
3291
3292 JLOG(m_journal.trace())
3293 << "pubAccountTransaction: " << "proposed=" << iProposed
3294 << ", accepted=" << iAccepted;
3295
3296 if (!notify.empty() || !accountHistoryNotify.empty())
3297 {
3298 auto const& stTxn = transaction.getTxn();
3299
3300 // Create two different Json objects, for different API versions
3301 auto const metaRef = std::ref(transaction.getMeta());
3302 auto const trResult = transaction.getResult();
3303 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3304
3305 for (InfoSub::ref isrListener : notify)
3306 {
3307 jvObj.visit(
3308 isrListener->getApiVersion(), //
3309 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3310 }
3311
3312 if (last)
3313 jvObj.set(jss::account_history_boundary, true);
3314
3315 XRPL_ASSERT(
3316 jvObj.isMember(jss::account_history_tx_stream) ==
3318 "ripple::NetworkOPsImp::pubAccountTransaction : "
3319 "account_history_tx_stream not set");
3320 for (auto& info : accountHistoryNotify)
3321 {
3322 auto& index = info.index_;
3323 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3324 jvObj.set(jss::account_history_tx_first, true);
3325
3326 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3327
3328 jvObj.visit(
3329 info.sink_->getApiVersion(), //
3330 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3331 }
3332 }
3333}
3334
3335void
3337 std::shared_ptr<ReadView const> const& ledger,
3339 TER result)
3340{
3342 int iProposed = 0;
3343
3344 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3345
3346 {
3348
3349 if (mSubRTAccount.empty())
3350 return;
3351
3352 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3354 {
3355 for (auto const& affectedAccount : tx->getMentionedAccounts())
3356 {
3357 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3358 simiIt != mSubRTAccount.end())
3359 {
3360 auto it = simiIt->second.begin();
3361
3362 while (it != simiIt->second.end())
3363 {
3364 InfoSub::pointer p = it->second.lock();
3365
3366 if (p)
3367 {
3368 notify.insert(p);
3369 ++it;
3370 ++iProposed;
3371 }
3372 else
3373 it = simiIt->second.erase(it);
3374 }
3375 }
3376 }
3377 }
3378 }
3379
3380 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3381
3382 if (!notify.empty() || !accountHistoryNotify.empty())
3383 {
3384 // Create two different Json objects, for different API versions
3385 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3386
3387 for (InfoSub::ref isrListener : notify)
3388 jvObj.visit(
3389 isrListener->getApiVersion(), //
3390 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3391
3392 XRPL_ASSERT(
3393 jvObj.isMember(jss::account_history_tx_stream) ==
3395 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3396 "account_history_tx_stream not set");
3397 for (auto& info : accountHistoryNotify)
3398 {
3399 auto& index = info.index_;
3400 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3401 jvObj.set(jss::account_history_tx_first, true);
3402 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3403 jvObj.visit(
3404 info.sink_->getApiVersion(), //
3405 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3406 }
3407 }
3408}
3409
3410//
3411// Monitoring
3412//
3413
3414void
3416 InfoSub::ref isrListener,
3417 hash_set<AccountID> const& vnaAccountIDs,
3418 bool rt)
3419{
3420 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3421
3422 for (auto const& naAccountID : vnaAccountIDs)
3423 {
3424 JLOG(m_journal.trace())
3425 << "subAccount: account: " << toBase58(naAccountID);
3426
3427 isrListener->insertSubAccountInfo(naAccountID, rt);
3428 }
3429
3431
3432 for (auto const& naAccountID : vnaAccountIDs)
3433 {
3434 auto simIterator = subMap.find(naAccountID);
3435 if (simIterator == subMap.end())
3436 {
3437 // Not found, note that account has a new single listner.
3438 SubMapType usisElement;
3439 usisElement[isrListener->getSeq()] = isrListener;
3440 // VFALCO NOTE This is making a needless copy of naAccountID
3441 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3442 }
3443 else
3444 {
3445 // Found, note that the account has another listener.
3446 simIterator->second[isrListener->getSeq()] = isrListener;
3447 }
3448 }
3449}
3450
3451void
3453 InfoSub::ref isrListener,
3454 hash_set<AccountID> const& vnaAccountIDs,
3455 bool rt)
3456{
3457 for (auto const& naAccountID : vnaAccountIDs)
3458 {
3459 // Remove from the InfoSub
3460 isrListener->deleteSubAccountInfo(naAccountID, rt);
3461 }
3462
3463 // Remove from the server
3464 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3465}
3466
3467void
3469 std::uint64_t uSeq,
3470 hash_set<AccountID> const& vnaAccountIDs,
3471 bool rt)
3472{
3474
3475 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3476
3477 for (auto const& naAccountID : vnaAccountIDs)
3478 {
3479 auto simIterator = subMap.find(naAccountID);
3480
3481 if (simIterator != subMap.end())
3482 {
3483 // Found
3484 simIterator->second.erase(uSeq);
3485
3486 if (simIterator->second.empty())
3487 {
3488 // Don't need hash entry.
3489 subMap.erase(simIterator);
3490 }
3491 }
3492 }
3493}
3494
3495void
3497{
3498 enum DatabaseType { Sqlite, None };
3499 static const auto databaseType = [&]() -> DatabaseType {
3500 // Use a dynamic_cast to return DatabaseType::None
3501 // on failure.
3502 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3503 {
3504 return DatabaseType::Sqlite;
3505 }
3506 return DatabaseType::None;
3507 }();
3508
3509 if (databaseType == DatabaseType::None)
3510 {
3511 JLOG(m_journal.error())
3512 << "AccountHistory job for account "
3513 << toBase58(subInfo.index_->accountId_) << " no database";
3514 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3515 {
3516 sptr->send(rpcError(rpcINTERNAL), true);
3517 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3518 }
3519 return;
3520 }
3521
3524 "AccountHistoryTxStream",
3525 [this, dbType = databaseType, subInfo]() {
3526 auto const& accountId = subInfo.index_->accountId_;
3527 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3528 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3529
3530 JLOG(m_journal.trace())
3531 << "AccountHistory job for account " << toBase58(accountId)
3532 << " started. lastLedgerSeq=" << lastLedgerSeq;
3533
3534 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3535 std::shared_ptr<TxMeta> const& meta) -> bool {
3536 /*
3537 * genesis account: first tx is the one with seq 1
3538 * other account: first tx is the one created the account
3539 */
3540 if (accountId == genesisAccountId)
3541 {
3542 auto stx = tx->getSTransaction();
3543 if (stx->getAccountID(sfAccount) == accountId &&
3544 stx->getSeqProxy().value() == 1)
3545 return true;
3546 }
3547
3548 for (auto& node : meta->getNodes())
3549 {
3550 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3551 continue;
3552
3553 if (node.isFieldPresent(sfNewFields))
3554 {
3555 if (auto inner = dynamic_cast<const STObject*>(
3556 node.peekAtPField(sfNewFields));
3557 inner)
3558 {
3559 if (inner->isFieldPresent(sfAccount) &&
3560 inner->getAccountID(sfAccount) == accountId)
3561 {
3562 return true;
3563 }
3564 }
3565 }
3566 }
3567
3568 return false;
3569 };
3570
3571 auto send = [&](Json::Value const& jvObj,
3572 bool unsubscribe) -> bool {
3573 if (auto sptr = subInfo.sinkWptr_.lock())
3574 {
3575 sptr->send(jvObj, true);
3576 if (unsubscribe)
3577 unsubAccountHistory(sptr, accountId, false);
3578 return true;
3579 }
3580
3581 return false;
3582 };
3583
3584 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3585 bool unsubscribe) -> bool {
3586 if (auto sptr = subInfo.sinkWptr_.lock())
3587 {
3588 jvObj.visit(
3589 sptr->getApiVersion(), //
3590 [&](Json::Value const& jv) { sptr->send(jv, true); });
3591
3592 if (unsubscribe)
3593 unsubAccountHistory(sptr, accountId, false);
3594 return true;
3595 }
3596
3597 return false;
3598 };
3599
3600 auto getMoreTxns =
3601 [&](std::uint32_t minLedger,
3602 std::uint32_t maxLedger,
3607 switch (dbType)
3608 {
3609 case Sqlite: {
3610 auto db = static_cast<SQLiteDatabase*>(
3613 accountId, minLedger, maxLedger, marker, 0, true};
3614 return db->newestAccountTxPage(options);
3615 }
3616 default: {
3617 UNREACHABLE(
3618 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3619 "getMoreTxns : invalid database type");
3620 return {};
3621 }
3622 }
3623 };
3624
3625 /*
3626 * search backward until the genesis ledger or asked to stop
3627 */
3628 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3629 {
3630 int feeChargeCount = 0;
3631 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3632 {
3633 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3634 ++feeChargeCount;
3635 }
3636 else
3637 {
3638 JLOG(m_journal.trace())
3639 << "AccountHistory job for account "
3640 << toBase58(accountId) << " no InfoSub. Fee charged "
3641 << feeChargeCount << " times.";
3642 return;
3643 }
3644
3645 // try to search in 1024 ledgers till reaching genesis ledgers
3646 auto startLedgerSeq =
3647 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3648 JLOG(m_journal.trace())
3649 << "AccountHistory job for account " << toBase58(accountId)
3650 << ", working on ledger range [" << startLedgerSeq << ","
3651 << lastLedgerSeq << "]";
3652
3653 auto haveRange = [&]() -> bool {
3654 std::uint32_t validatedMin = UINT_MAX;
3655 std::uint32_t validatedMax = 0;
3656 auto haveSomeValidatedLedgers =
3658 validatedMin, validatedMax);
3659
3660 return haveSomeValidatedLedgers &&
3661 validatedMin <= startLedgerSeq &&
3662 lastLedgerSeq <= validatedMax;
3663 }();
3664
3665 if (!haveRange)
3666 {
3667 JLOG(m_journal.debug())
3668 << "AccountHistory reschedule job for account "
3669 << toBase58(accountId) << ", incomplete ledger range ["
3670 << startLedgerSeq << "," << lastLedgerSeq << "]";
3672 return;
3673 }
3674
3676 while (!subInfo.index_->stopHistorical_)
3677 {
3678 auto dbResult =
3679 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3680 if (!dbResult)
3681 {
3682 JLOG(m_journal.debug())
3683 << "AccountHistory job for account "
3684 << toBase58(accountId) << " getMoreTxns failed.";
3685 send(rpcError(rpcINTERNAL), true);
3686 return;
3687 }
3688
3689 auto const& txns = dbResult->first;
3690 marker = dbResult->second;
3691 size_t num_txns = txns.size();
3692 for (size_t i = 0; i < num_txns; ++i)
3693 {
3694 auto const& [tx, meta] = txns[i];
3695
3696 if (!tx || !meta)
3697 {
3698 JLOG(m_journal.debug())
3699 << "AccountHistory job for account "
3700 << toBase58(accountId) << " empty tx or meta.";
3701 send(rpcError(rpcINTERNAL), true);
3702 return;
3703 }
3704 auto curTxLedger =
3706 tx->getLedger());
3707 if (!curTxLedger)
3708 {
3709 JLOG(m_journal.debug())
3710 << "AccountHistory job for account "
3711 << toBase58(accountId) << " no ledger.";
3712 send(rpcError(rpcINTERNAL), true);
3713 return;
3714 }
3716 tx->getSTransaction();
3717 if (!stTxn)
3718 {
3719 JLOG(m_journal.debug())
3720 << "AccountHistory job for account "
3721 << toBase58(accountId)
3722 << " getSTransaction failed.";
3723 send(rpcError(rpcINTERNAL), true);
3724 return;
3725 }
3726
3727 auto const mRef = std::ref(*meta);
3728 auto const trR = meta->getResultTER();
3729 MultiApiJson jvTx =
3730 transJson(stTxn, trR, true, curTxLedger, mRef);
3731
3732 jvTx.set(
3733 jss::account_history_tx_index, txHistoryIndex--);
3734 if (i + 1 == num_txns ||
3735 txns[i + 1].first->getLedger() != tx->getLedger())
3736 jvTx.set(jss::account_history_boundary, true);
3737
3738 if (isFirstTx(tx, meta))
3739 {
3740 jvTx.set(jss::account_history_tx_first, true);
3741 sendMultiApiJson(jvTx, false);
3742
3743 JLOG(m_journal.trace())
3744 << "AccountHistory job for account "
3745 << toBase58(accountId)
3746 << " done, found last tx.";
3747 return;
3748 }
3749 else
3750 {
3751 sendMultiApiJson(jvTx, false);
3752 }
3753 }
3754
3755 if (marker)
3756 {
3757 JLOG(m_journal.trace())
3758 << "AccountHistory job for account "
3759 << toBase58(accountId)
3760 << " paging, marker=" << marker->ledgerSeq << ":"
3761 << marker->txnSeq;
3762 }
3763 else
3764 {
3765 break;
3766 }
3767 }
3768
3769 if (!subInfo.index_->stopHistorical_)
3770 {
3771 lastLedgerSeq = startLedgerSeq - 1;
3772 if (lastLedgerSeq <= 1)
3773 {
3774 JLOG(m_journal.trace())
3775 << "AccountHistory job for account "
3776 << toBase58(accountId)
3777 << " done, reached genesis ledger.";
3778 return;
3779 }
3780 }
3781 }
3782 });
3783}
3784
3785void
3787 std::shared_ptr<ReadView const> const& ledger,
3789{
3790 subInfo.index_->separationLedgerSeq_ = ledger->seq();
3791 auto const& accountId = subInfo.index_->accountId_;
3792 auto const accountKeylet = keylet::account(accountId);
3793 if (!ledger->exists(accountKeylet))
3794 {
3795 JLOG(m_journal.debug())
3796 << "subAccountHistoryStart, no account " << toBase58(accountId)
3797 << ", no need to add AccountHistory job.";
3798 return;
3799 }
3800 if (accountId == genesisAccountId)
3801 {
3802 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3803 {
3804 if (sleAcct->getFieldU32(sfSequence) == 1)
3805 {
3806 JLOG(m_journal.debug())
3807 << "subAccountHistoryStart, genesis account "
3808 << toBase58(accountId)
3809 << " does not have tx, no need to add AccountHistory job.";
3810 return;
3811 }
3812 }
3813 else
3814 {
3815 UNREACHABLE(
3816 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
3817 "access genesis account");
3818 return;
3819 }
3820 }
3821 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
3822 subInfo.index_->haveHistorical_ = true;
3823
3824 JLOG(m_journal.debug())
3825 << "subAccountHistoryStart, add AccountHistory job: accountId="
3826 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
3827
3828 addAccountHistoryJob(subInfo);
3829}
3830
3833 InfoSub::ref isrListener,
3834 AccountID const& accountId)
3835{
3836 if (!isrListener->insertSubAccountHistory(accountId))
3837 {
3838 JLOG(m_journal.debug())
3839 << "subAccountHistory, already subscribed to account "
3840 << toBase58(accountId);
3841 return rpcINVALID_PARAMS;
3842 }
3843
3846 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
3847 auto simIterator = mSubAccountHistory.find(accountId);
3848 if (simIterator == mSubAccountHistory.end())
3849 {
3851 inner.emplace(isrListener->getSeq(), ahi);
3853 simIterator, std::make_pair(accountId, inner));
3854 }
3855 else
3856 {
3857 simIterator->second.emplace(isrListener->getSeq(), ahi);
3858 }
3859
3860 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
3861 if (ledger)
3862 {
3863 subAccountHistoryStart(ledger, ahi);
3864 }
3865 else
3866 {
3867 // The node does not have validated ledgers, so wait for
3868 // one before start streaming.
3869 // In this case, the subscription is also considered successful.
3870 JLOG(m_journal.debug())
3871 << "subAccountHistory, no validated ledger yet, delay start";
3872 }
3873
3874 return rpcSUCCESS;
3875}
3876
3877void
3879 InfoSub::ref isrListener,
3880 AccountID const& account,
3881 bool historyOnly)
3882{
3883 if (!historyOnly)
3884 isrListener->deleteSubAccountHistory(account);
3885 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
3886}
3887
3888void
3890 std::uint64_t seq,
3891 const AccountID& account,
3892 bool historyOnly)
3893{
3895 auto simIterator = mSubAccountHistory.find(account);
3896 if (simIterator != mSubAccountHistory.end())
3897 {
3898 auto& subInfoMap = simIterator->second;
3899 auto subInfoIter = subInfoMap.find(seq);
3900 if (subInfoIter != subInfoMap.end())
3901 {
3902 subInfoIter->second.index_->stopHistorical_ = true;
3903 }
3904
3905 if (!historyOnly)
3906 {
3907 simIterator->second.erase(seq);
3908 if (simIterator->second.empty())
3909 {
3910 mSubAccountHistory.erase(simIterator);
3911 }
3912 }
3913 JLOG(m_journal.debug())
3914 << "unsubAccountHistory, account " << toBase58(account)
3915 << ", historyOnly = " << (historyOnly ? "true" : "false");
3916 }
3917}
3918
3919bool
3921{
3922 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
3923 listeners->addSubscriber(isrListener);
3924 else
3925 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
3926 return true;
3927}
3928
3929bool
3931{
3932 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
3933 listeners->removeSubscriber(uSeq);
3934
3935 return true;
3936}
3937
3941{
3942 // This code-path is exclusively used when the server is in standalone
3943 // mode via `ledger_accept`
3944 XRPL_ASSERT(
3945 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
3946
3947 if (!m_standalone)
3948 Throw<std::runtime_error>(
3949 "Operation only possible in STANDALONE mode.");
3950
3951 // FIXME Could we improve on this and remove the need for a specialized
3952 // API in Consensus?
3953 beginConsensus(m_ledgerMaster.getClosedLedger()->info().hash, {});
3954 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
3955 return m_ledgerMaster.getCurrentLedger()->info().seq;
3956}
3957
3958// <-- bool: true=added, false=already there
3959bool
3961{
3962 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
3963 {
3964 jvResult[jss::ledger_index] = lpClosed->info().seq;
3965 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
3966 jvResult[jss::ledger_time] = Json::Value::UInt(
3967 lpClosed->info().closeTime.time_since_epoch().count());
3968 if (!lpClosed->rules().enabled(featureXRPFees))
3969 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3970 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3971 jvResult[jss::reserve_base] =
3972 lpClosed->fees().accountReserve(0).jsonClipped();
3973 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3974 }
3975
3977 {
3978 jvResult[jss::validated_ledgers] =
3980 }
3981
3983 return mStreamMaps[sLedger]
3984 .emplace(isrListener->getSeq(), isrListener)
3985 .second;
3986}
3987
3988// <-- bool: true=added, false=already there
3989bool
3991{
3994 .emplace(isrListener->getSeq(), isrListener)
3995 .second;
3996}
3997
3998// <-- bool: true=erased, false=was not there
3999bool
4001{
4003 return mStreamMaps[sLedger].erase(uSeq);
4004}
4005
4006// <-- bool: true=erased, false=was not there
4007bool
4009{
4011 return mStreamMaps[sBookChanges].erase(uSeq);
4012}
4013
4014// <-- bool: true=added, false=already there
4015bool
4017{
4019 return mStreamMaps[sManifests]
4020 .emplace(isrListener->getSeq(), isrListener)
4021 .second;
4022}
4023
4024// <-- bool: true=erased, false=was not there
4025bool
4027{
4029 return mStreamMaps[sManifests].erase(uSeq);
4030}
4031
4032// <-- bool: true=added, false=already there
4033bool
4035 InfoSub::ref isrListener,
4036 Json::Value& jvResult,
4037 bool admin)
4038{
4039 uint256 uRandom;
4040
4041 if (m_standalone)
4042 jvResult[jss::stand_alone] = m_standalone;
4043
4044 // CHECKME: is it necessary to provide a random number here?
4045 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
4046
4047 auto const& feeTrack = app_.getFeeTrack();
4048 jvResult[jss::random] = to_string(uRandom);
4049 jvResult[jss::server_status] = strOperatingMode(admin);
4050 jvResult[jss::load_base] = feeTrack.getLoadBase();
4051 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4052 jvResult[jss::hostid] = getHostId(admin);
4053 jvResult[jss::pubkey_node] =
4055
4057 return mStreamMaps[sServer]
4058 .emplace(isrListener->getSeq(), isrListener)
4059 .second;
4060}
4061
4062// <-- bool: true=erased, false=was not there
4063bool
4065{
4067 return mStreamMaps[sServer].erase(uSeq);
4068}
4069
4070// <-- bool: true=added, false=already there
4071bool
4073{
4076 .emplace(isrListener->getSeq(), isrListener)
4077 .second;
4078}
4079
4080// <-- bool: true=erased, false=was not there
4081bool
4083{
4085 return mStreamMaps[sTransactions].erase(uSeq);
4086}
4087
4088// <-- bool: true=added, false=already there
4089bool
4091{
4094 .emplace(isrListener->getSeq(), isrListener)
4095 .second;
4096}
4097
4098// <-- bool: true=erased, false=was not there
4099bool
4101{
4103 return mStreamMaps[sRTTransactions].erase(uSeq);
4104}
4105
4106// <-- bool: true=added, false=already there
4107bool
4109{
4112 .emplace(isrListener->getSeq(), isrListener)
4113 .second;
4114}
4115
4116void
4118{
4119 accounting_.json(obj);
4120}
4121
4122// <-- bool: true=erased, false=was not there
4123bool
4125{
4127 return mStreamMaps[sValidations].erase(uSeq);
4128}
4129
4130// <-- bool: true=added, false=already there
4131bool
4133{
4135 return mStreamMaps[sPeerStatus]
4136 .emplace(isrListener->getSeq(), isrListener)
4137 .second;
4138}
4139
4140// <-- bool: true=erased, false=was not there
4141bool
4143{
4145 return mStreamMaps[sPeerStatus].erase(uSeq);
4146}
4147
4148// <-- bool: true=added, false=already there
4149bool
4151{
4154 .emplace(isrListener->getSeq(), isrListener)
4155 .second;
4156}
4157
4158// <-- bool: true=erased, false=was not there
4159bool
4161{
4163 return mStreamMaps[sConsensusPhase].erase(uSeq);
4164}
4165
4168{
4170
4171 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4172
4173 if (it != mRpcSubMap.end())
4174 return it->second;
4175
4176 return InfoSub::pointer();
4177}
4178
4181{
4183
4184 mRpcSubMap.emplace(strUrl, rspEntry);
4185
4186 return rspEntry;
4187}
4188
4189bool
4191{
4193 auto pInfo = findRpcSub(strUrl);
4194
4195 if (!pInfo)
4196 return false;
4197
4198 // check to see if any of the stream maps still hold a weak reference to
4199 // this entry before removing
4200 for (SubMapType const& map : mStreamMaps)
4201 {
4202 if (map.find(pInfo->getSeq()) != map.end())
4203 return false;
4204 }
4205 mRpcSubMap.erase(strUrl);
4206 return true;
4207}
4208
4209#ifndef USE_NEW_BOOK_PAGE
4210
4211// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4212// work, but it demonstrated poor performance.
4213//
4214void
4217 Book const& book,
4218 AccountID const& uTakerID,
4219 bool const bProof,
4220 unsigned int iLimit,
4221 Json::Value const& jvMarker,
4222 Json::Value& jvResult)
4223{ // CAUTION: This is the old get book page logic
4224 Json::Value& jvOffers =
4225 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4226
4228 const uint256 uBookBase = getBookBase(book);
4229 const uint256 uBookEnd = getQualityNext(uBookBase);
4230 uint256 uTipIndex = uBookBase;
4231
4232 if (auto stream = m_journal.trace())
4233 {
4234 stream << "getBookPage:" << book;
4235 stream << "getBookPage: uBookBase=" << uBookBase;
4236 stream << "getBookPage: uBookEnd=" << uBookEnd;
4237 stream << "getBookPage: uTipIndex=" << uTipIndex;
4238 }
4239
4240 ReadView const& view = *lpLedger;
4241
4242 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4243 isGlobalFrozen(view, book.in.account);
4244
4245 bool bDone = false;
4246 bool bDirectAdvance = true;
4247
4248 std::shared_ptr<SLE const> sleOfferDir;
4249 uint256 offerIndex;
4250 unsigned int uBookEntry;
4251 STAmount saDirRate;
4252
4253 auto const rate = transferRate(view, book.out.account);
4254 auto viewJ = app_.journal("View");
4255
4256 while (!bDone && iLimit-- > 0)
4257 {
4258 if (bDirectAdvance)
4259 {
4260 bDirectAdvance = false;
4261
4262 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4263
4264 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4265 if (ledgerIndex)
4266 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4267 else
4268 sleOfferDir.reset();
4269
4270 if (!sleOfferDir)
4271 {
4272 JLOG(m_journal.trace()) << "getBookPage: bDone";
4273 bDone = true;
4274 }
4275 else
4276 {
4277 uTipIndex = sleOfferDir->key();
4278 saDirRate = amountFromQuality(getQuality(uTipIndex));
4279
4280 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4281
4282 JLOG(m_journal.trace())
4283 << "getBookPage: uTipIndex=" << uTipIndex;
4284 JLOG(m_journal.trace())
4285 << "getBookPage: offerIndex=" << offerIndex;
4286 }
4287 }
4288
4289 if (!bDone)
4290 {
4291 auto sleOffer = view.read(keylet::offer(offerIndex));
4292
4293 if (sleOffer)
4294 {
4295 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4296 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4297 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4298 STAmount saOwnerFunds;
4299 bool firstOwnerOffer(true);
4300
4301 if (book.out.account == uOfferOwnerID)
4302 {
4303 // If an offer is selling issuer's own IOUs, it is fully
4304 // funded.
4305 saOwnerFunds = saTakerGets;
4306 }
4307 else if (bGlobalFreeze)
4308 {
4309 // If either asset is globally frozen, consider all offers
4310 // that aren't ours to be totally unfunded
4311 saOwnerFunds.clear(book.out);
4312 }
4313 else
4314 {
4315 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4316 if (umBalanceEntry != umBalance.end())
4317 {
4318 // Found in running balance table.
4319
4320 saOwnerFunds = umBalanceEntry->second;
4321 firstOwnerOffer = false;
4322 }
4323 else
4324 {
4325 // Did not find balance in table.
4326
4327 saOwnerFunds = accountHolds(
4328 view,
4329 uOfferOwnerID,
4330 book.out.currency,
4331 book.out.account,
4333 viewJ);
4334
4335 if (saOwnerFunds < beast::zero)
4336 {
4337 // Treat negative funds as zero.
4338
4339 saOwnerFunds.clear();
4340 }
4341 }
4342 }
4343
4344 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4345
4346 STAmount saTakerGetsFunded;
4347 STAmount saOwnerFundsLimit = saOwnerFunds;
4348 Rate offerRate = parityRate;
4349
4350 if (rate != parityRate
4351 // Have a tranfer fee.
4352 && uTakerID != book.out.account
4353 // Not taking offers of own IOUs.
4354 && book.out.account != uOfferOwnerID)
4355 // Offer owner not issuing ownfunds
4356 {
4357 // Need to charge a transfer fee to offer owner.
4358 offerRate = rate;
4359 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4360 }
4361
4362 if (saOwnerFundsLimit >= saTakerGets)
4363 {
4364 // Sufficient funds no shenanigans.
4365 saTakerGetsFunded = saTakerGets;
4366 }
4367 else
4368 {
4369 // Only provide, if not fully funded.
4370
4371 saTakerGetsFunded = saOwnerFundsLimit;
4372
4373 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4374 std::min(
4375 saTakerPays,
4376 multiply(
4377 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4378 .setJson(jvOffer[jss::taker_pays_funded]);
4379 }
4380
4381 STAmount saOwnerPays = (parityRate == offerRate)
4382 ? saTakerGetsFunded
4383 : std::min(
4384 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4385
4386 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4387
4388 // Include all offers funded and unfunded
4389 Json::Value& jvOf = jvOffers.append(jvOffer);
4390 jvOf[jss::quality] = saDirRate.getText();
4391
4392 if (firstOwnerOffer)
4393 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4394 }
4395 else
4396 {
4397 JLOG(m_journal.warn()) << "Missing offer";
4398 }
4399
4400 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4401 {
4402 bDirectAdvance = true;
4403 }
4404 else
4405 {
4406 JLOG(m_journal.trace())
4407 << "getBookPage: offerIndex=" << offerIndex;
4408 }
4409 }
4410 }
4411
4412 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4413 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4414}
4415
4416#else
4417
4418// This is the new code that uses the book iterators
4419// It has temporarily been disabled
4420
4421void
4424 Book const& book,
4425 AccountID const& uTakerID,
4426 bool const bProof,
4427 unsigned int iLimit,
4428 Json::Value const& jvMarker,
4429 Json::Value& jvResult)
4430{
4431 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4432
4434
4435 MetaView lesActive(lpLedger, tapNONE, true);
4436 OrderBookIterator obIterator(lesActive, book);
4437
4438 auto const rate = transferRate(lesActive, book.out.account);
4439
4440 const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4441 lesActive.isGlobalFrozen(book.in.account);
4442
4443 while (iLimit-- > 0 && obIterator.nextOffer())
4444 {
4445 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4446 if (sleOffer)
4447 {
4448 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4449 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4450 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4451 STAmount saDirRate = obIterator.getCurrentRate();
4452 STAmount saOwnerFunds;
4453
4454 if (book.out.account == uOfferOwnerID)
4455 {
4456 // If offer is selling issuer's own IOUs, it is fully funded.
4457 saOwnerFunds = saTakerGets;
4458 }
4459 else if (bGlobalFreeze)
4460 {
4461 // If either asset is globally frozen, consider all offers
4462 // that aren't ours to be totally unfunded
4463 saOwnerFunds.clear(book.out);
4464 }
4465 else
4466 {
4467 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4468
4469 if (umBalanceEntry != umBalance.end())
4470 {
4471 // Found in running balance table.
4472
4473 saOwnerFunds = umBalanceEntry->second;
4474 }
4475 else
4476 {
4477 // Did not find balance in table.
4478
4479 saOwnerFunds = lesActive.accountHolds(
4480 uOfferOwnerID,
4481 book.out.currency,
4482 book.out.account,
4484
4485 if (saOwnerFunds.isNegative())
4486 {
4487 // Treat negative funds as zero.
4488
4489 saOwnerFunds.zero();
4490 }
4491 }
4492 }
4493
4494 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4495
4496 STAmount saTakerGetsFunded;
4497 STAmount saOwnerFundsLimit = saOwnerFunds;
4498 Rate offerRate = parityRate;
4499
4500 if (rate != parityRate
4501 // Have a tranfer fee.
4502 && uTakerID != book.out.account
4503 // Not taking offers of own IOUs.
4504 && book.out.account != uOfferOwnerID)
4505 // Offer owner not issuing ownfunds
4506 {
4507 // Need to charge a transfer fee to offer owner.
4508 offerRate = rate;
4509 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4510 }
4511
4512 if (saOwnerFundsLimit >= saTakerGets)
4513 {
4514 // Sufficient funds no shenanigans.
4515 saTakerGetsFunded = saTakerGets;
4516 }
4517 else
4518 {
4519 // Only provide, if not fully funded.
4520 saTakerGetsFunded = saOwnerFundsLimit;
4521
4522 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4523
4524 // TOOD(tom): The result of this expression is not used - what's
4525 // going on here?
4526 std::min(
4527 saTakerPays,
4528 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4529 .setJson(jvOffer[jss::taker_pays_funded]);
4530 }
4531
4532 STAmount saOwnerPays = (parityRate == offerRate)
4533 ? saTakerGetsFunded
4534 : std::min(
4535 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4536
4537 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4538
4539 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4540 {
4541 // Only provide funded offers and offers of the taker.
4542 Json::Value& jvOf = jvOffers.append(jvOffer);
4543 jvOf[jss::quality] = saDirRate.getText();
4544 }
4545 }
4546 }
4547
4548 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4549 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4550}
4551
4552#endif
4553
4554inline void
4556{
4557 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4558 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4560 counters[static_cast<std::size_t>(mode)].dur += current;
4561
4564 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4565 .dur.count());
4567 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4568 .dur.count());
4570 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4572 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4573 .dur.count());
4575 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4576
4578 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4579 .transitions);
4581 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4582 .transitions);
4584 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4586 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4587 .transitions);
4589 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4590}
4591
4592void
4594{
4595 auto now = std::chrono::steady_clock::now();
4596
4597 std::lock_guard lock(mutex_);
4598 ++counters_[static_cast<std::size_t>(om)].transitions;
4599 if (om == OperatingMode::FULL &&
4600 counters_[static_cast<std::size_t>(om)].transitions == 1)
4601 {
4602 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4603 now - processStart_)
4604 .count();
4605 }
4606 counters_[static_cast<std::size_t>(mode_)].dur +=
4607 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4608
4609 mode_ = om;
4610 start_ = now;
4611}
4612
4613void
4615{
4616 auto [counters, mode, start, initialSync] = getCounterData();
4617 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4619 counters[static_cast<std::size_t>(mode)].dur += current;
4620
4621 obj[jss::state_accounting] = Json::objectValue;
4623 i <= static_cast<std::size_t>(OperatingMode::FULL);
4624 ++i)
4625 {
4626 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4627 auto& state = obj[jss::state_accounting][states_[i]];
4628 state[jss::transitions] = std::to_string(counters[i].transitions);
4629 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4630 }
4631 obj[jss::server_state_duration_us] = std::to_string(current.count());
4632 if (initialSync)
4633 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4634}
4635
4636//------------------------------------------------------------------------------
4637
4640 Application& app,
4642 bool standalone,
4643 std::size_t minPeerCount,
4644 bool startvalid,
4645 JobQueue& job_queue,
4647 ValidatorKeys const& validatorKeys,
4648 boost::asio::io_service& io_svc,
4649 beast::Journal journal,
4650 beast::insight::Collector::ptr const& collector)
4651{
4652 return std::make_unique<NetworkOPsImp>(
4653 app,
4654 clock,
4655 standalone,
4656 minPeerCount,
4657 startvalid,
4658 job_queue,
4660 validatorKeys,
4661 io_svc,
4662 journal,
4663 collector);
4664}
4665
4666} // namespace ripple
T back_inserter(T... args)
T begin(T... args)
T bind(T... args)
Decorator for streaming out compact json.
Definition: json_writer.h:318
Lightweight wrapper to tag static string.
Definition: json_value.h:62
Represents a JSON value.
Definition: json_value.h:148
Json::UInt UInt
Definition: json_value.h:155
Value get(UInt index, const Value &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Definition: json_value.cpp:847
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:897
bool isMember(const char *key) const
Return true if the object has a member named key.
Definition: json_value.cpp:949
A generic endpoint for log messages.
Definition: Journal.h:60
Stream error() const
Definition: Journal.h:346
Stream debug() const
Definition: Journal.h:328
Stream info() const
Definition: Journal.h:334
Stream trace() const
Severity stream access functions.
Definition: Journal.h:322
Stream warn() const
Definition: Journal.h:340
A metric for measuring an integral value.
Definition: Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition: Gauge.h:68
A reference to a handler for performing polled collection.
Definition: Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition: Book.h:35
Issue in
Definition: Book.h:37
Issue out
Definition: Book.h:38
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition: ClusterNode.h:46
std::uint32_t getLoadFee() const
Definition: ClusterNode.h:52
NetClock::time_point getReportTime() const
Definition: ClusterNode.h:58
PublicKey const & identity() const
Definition: ClusterNode.h:64
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:49
std::string SERVER_DOMAIN
Definition: Config.h:279
std::size_t NODE_SIZE
Definition: Config.h:213
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition: Config.h:160
int RELAY_UNTRUSTED_VALIDATIONS
Definition: Config.h:169
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition: InfoSub.h:54
AccountID account
Definition: Issue.h:39
Currency currency
Definition: Issue.h:38
A pool of threads to perform work.
Definition: JobQueue.h:56
Json::Value getJson(int c=0)
Definition: JobQueue.cpp:214
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
Definition: LedgerMaster.h:265
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:79
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
Definition: LoadFeeTrack.h:45
std::uint32_t getClusterFee() const
Definition: LoadFeeTrack.h:82
std::uint32_t getLocalFee() const
Definition: LoadFeeTrack.h:75
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:89
std::uint32_t getRemoteFee() const
Definition: LoadFeeTrack.h:68
std::uint32_t getLoadFactor() const
Definition: LoadFeeTrack.h:95
Manages load sources.
Definition: LoadManager.h:46
void heartbeat()
Reset the stall detection timer.
Definition: LoadManager.cpp:64
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
Definition: NetworkOPs.cpp:140
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
Definition: NetworkOPs.cpp:150
std::chrono::steady_clock::time_point start_
Definition: NetworkOPs.cpp:152
static std::array< Json::StaticString const, 5 > const states_
Definition: NetworkOPs.cpp:156
std::chrono::steady_clock::time_point const processStart_
Definition: NetworkOPs.cpp:154
Transaction with input flags and results to be applied in batches.
Definition: NetworkOPs.cpp:91
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
Definition: NetworkOPs.cpp:100
std::shared_ptr< Transaction > const transaction
Definition: NetworkOPs.cpp:93
boost::asio::steady_timer accountHistoryTxTimer_
Definition: NetworkOPs.cpp:738
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
Definition: NetworkOPs.cpp:873
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::vector< TransactionStatus > mTransactions
Definition: NetworkOPs.cpp:785
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Definition: NetworkOPs.cpp:728
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
Definition: NetworkOPs.cpp:740
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
Definition: NetworkOPs.cpp:891
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
Definition: NetworkOPs.cpp:736
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
Definition: NetworkOPs.cpp:117
std::optional< PublicKey > const validatorPK_
Definition: NetworkOPs.cpp:742
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
Definition: NetworkOPs.cpp:724
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
Definition: NetworkOPs.cpp:267
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Definition: NetworkOPs.cpp:754
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, const bool bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
Definition: NetworkOPs.cpp:737
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
Definition: NetworkOPs.cpp:123
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:223
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
Definition: NetworkOPs.cpp:731
beast::Journal m_journal
Definition: NetworkOPs.cpp:722
SubInfoMapType mSubAccount
Definition: NetworkOPs.cpp:749
std::optional< PublicKey > const validatorMasterPK_
Definition: NetworkOPs.cpp:743
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
Definition: NetworkOPs.cpp:789
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
Definition: NetworkOPs.cpp:735
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
Definition: NetworkOPs.cpp:944
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
Definition: NetworkOPs.cpp:769
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
Definition: NetworkOPs.cpp:779
std::atomic< bool > unlBlocked_
Definition: NetworkOPs.cpp:733
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
Definition: NetworkOPs.cpp:726
std::atomic< bool > needNetworkLedger_
Definition: NetworkOPs.cpp:730
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
Definition: NetworkOPs.cpp:787
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
Definition: NetworkOPs.cpp:903
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
Definition: NetworkOPs.cpp:747
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
Definition: NetworkOPs.cpp:934
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
Definition: NetworkOPs.cpp:771
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
Definition: NetworkOPs.cpp:885
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
Definition: NetworkOPs.cpp:782
void setMode(OperatingMode om) override
void stop() override
Definition: NetworkOPs.cpp:571
void clearNeedNetworkLedger() override
Definition: NetworkOPs.cpp:897
DispatchState mDispatchState
Definition: NetworkOPs.cpp:784
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
Definition: NetworkOPs.cpp:750
bool checkLastClosedLedger(const Overlay::PeerSequence &, uint256 &networkClosed)
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
Definition: NetworkOPs.cpp:909
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
Definition: NetworkOPs.cpp:790
Application & app_
Definition: NetworkOPs.cpp:721
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
Definition: NetworkOPs.cpp:745
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
Definition: NetworkOPs.cpp:752
std::atomic< bool > amendmentWarned_
Definition: NetworkOPs.cpp:732
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
Definition: NetworkOPs.cpp:915
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition: NetworkOPs.h:88
void getCountsJson(Json::Value &obj)
Definition: Database.cpp:268
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:49
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:57
BookListeners::pointer getBookListeners(Book const &)
BookListeners::pointer makeBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, const AcceptedLedgerTx &alTx, MultiApiJson const &jvObj)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
Definition: RCLConsensus.h:53
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
Definition: RCLConsensus.h:447
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Definition: RCLConsensus.h:460
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:78
Represents a set of transactions in RCLConsensus.
Definition: RCLCxTx.h:63
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition: RFC1751.cpp:507
Collects logging information.
Definition: RCLConsensus.h:551
std::unique_ptr< std::stringstream > const & ss()
Definition: RCLConsensus.h:565
A view into a ledger.
Definition: ReadView.h:52
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition: STAmount.cpp:507
std::string getText() const override
Definition: STAmount.cpp:547
Issue const & issue() const
Definition: STAmount.h:487
std::optional< T > get(std::string const &name) const
Definition: BasicConfig.h:140
std::size_t size() const noexcept
Definition: Serializer.h:73
void const * data() const noexcept
Definition: Serializer.h:79
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition: TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition: TxQ.cpp:1778
static time_point now()
Definition: UptimeClock.cpp:67
Validator keys and manifest as set in configuration file.
Definition: ValidatorKeys.h:38
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition: XRPAmount.h:262
Json::Value jsonClipped() const
Definition: XRPAmount.h:218
iterator begin()
Definition: base_uint.h:136
static constexpr std::size_t size()
Definition: base_uint.h:526
bool isZero() const
Definition: base_uint.h:540
bool isNonZero() const
Definition: base_uint.h:545
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition: scope.h:231
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition: json_value.h:43
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:44
int Int
Definition: json_forwards.h:26
unsigned int UInt
Definition: json_forwards.h:27
void rngfill(void *buffer, std::size_t bytes, Generator &g)
Definition: rngfill.h:34
std::string const & getVersionString()
Server version.
Definition: BuildInfo.cpp:68
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition: BookChanges.h:47
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(PreclaimContext const &ctx, AccountID const &src)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition: Indexes.cpp:175
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition: Indexes.cpp:371
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition: Indexes.cpp:265
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:32
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:114
STAmount divide(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:93
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition: STTx.cpp:631
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition: View.cpp:443
@ fhZERO_IF_FROZEN
Definition: View.h:76
@ fhIGNORE_FREEZE
Definition: View.h:76
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition: View.cpp:137
std::uint64_t getQuality(uint256 const &uBase)
Definition: Indexes.cpp:140
@ INCLUDED
Definition: Transaction.h:49
@ OBSOLETE
Definition: Transaction.h:54
@ INVALID
Definition: Transaction.h:48
error_code_i
Definition: ErrorCodes.h:40
@ rpcSUCCESS
Definition: ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition: ErrorCodes.h:84
@ rpcINTERNAL
Definition: ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
Definition: SecretKey.cpp:369
auto constexpr muldiv_max
Definition: mulDiv.h:29
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition: LocalTxs.cpp:197
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition: View.cpp:650
STAmount amountFromQuality(std::uint64_t rate)
Definition: STAmount.cpp:854
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition: ErrorCodes.h:167
@ warnRPC_UNSUPPORTED_MAJORITY
Definition: ErrorCodes.h:165
@ warnRPC_AMENDMENT_BLOCKED
Definition: ErrorCodes.h:166
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition: NetworkOPs.h:67
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:53
bool isTesSuccess(TER x)
Definition: TER.h:656
bool isTerRetry(TER x)
Definition: TER.h:650
AccountID calcAccountID(PublicKey const &pk)
Definition: AccountID.cpp:168
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Definition: csprng.cpp:103
Json::Value rpcError(int iError)
Definition: RPCErr.cpp:31
@ tefPAST_SEQ
Definition: TER.h:175
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
Definition: NetworkOPs.cpp:854
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
bool isTemMalformed(TER x)
Definition: TER.h:638
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition: View.cpp:148
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition: ApiVersion.h:102
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:75
@ tesSUCCESS
Definition: TER.h:242
uint256 getQualityNext(uint256 const &uBase)
Definition: Indexes.cpp:132
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition: View.cpp:309
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition: ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition: chrono.h:92
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:630
FeeSetup setup_FeeVote(Section const &section)
Definition: Config.cpp:1092
Number root(Number f, unsigned d)
Definition: Number.cpp:635
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Definition: mulDiv.cpp:32
ApplyFlags
Definition: ApplyView.h:31
@ tapFAIL_HARD
Definition: ApplyView.h:36
@ tapUNLIMITED
Definition: ApplyView.h:43
@ tapNONE
Definition: ApplyView.h:32
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:38
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition: Seed.cpp:76
@ terQUEUED
Definition: TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition: TER.cpp:242
@ jtNETOP_CLUSTER
Definition: Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition: Job.h:47
@ jtTRANSACTION
Definition: Job.h:62
@ jtTXN_PROC
Definition: Job.h:82
@ jtCLIENT_CONSENSUS
Definition: Job.h:48
@ jtBATCH
Definition: Job.h:65
@ jtCLIENT_ACCT_HIST
Definition: Job.h:49
uint256 getBookBase(Book const &book)
Definition: Indexes.cpp:113
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition: View.cpp:174
static std::uint32_t trunc32(std::uint64_t v)
@ temBAD_SIGNATURE
Definition: TER.h:105
static auto const genesisAccountId
Definition: NetworkOPs.cpp:867
STL namespace.
T ref(T... args)
T reset(T... args)
T set_intersection(T... args)
T str(T... args)
std::string serialized
The manifest in serialized form.
Definition: Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition: Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition: Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition: Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Definition: Manifest.h:86
Server fees published on server subscription.
Definition: NetworkOPs.cpp:200
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
Definition: NetworkOPs.cpp:219
bool operator==(ServerFeeSummary const &b) const
Definition: NetworkOPs.cpp:211
beast::insight::Gauge full_transitions
Definition: NetworkOPs.cpp:841
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:796
beast::insight::Hook hook
Definition: NetworkOPs.cpp:830
beast::insight::Gauge connected_duration
Definition: NetworkOPs.cpp:832
beast::insight::Gauge tracking_duration
Definition: NetworkOPs.cpp:834
beast::insight::Gauge connected_transitions
Definition: NetworkOPs.cpp:838
beast::insight::Gauge disconnected_transitions
Definition: NetworkOPs.cpp:837
beast::insight::Gauge syncing_duration
Definition: NetworkOPs.cpp:833
beast::insight::Gauge tracking_transitions
Definition: NetworkOPs.cpp:840
beast::insight::Gauge full_duration
Definition: NetworkOPs.cpp:835
beast::insight::Gauge disconnected_duration
Definition: NetworkOPs.cpp:831
beast::insight::Gauge syncing_transitions
Definition: NetworkOPs.cpp:839
SubAccountHistoryIndex(AccountID const &accountId)
Definition: NetworkOPs.cpp:685
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:704
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:699
Represents a transfer rate.
Definition: Rate.h:40
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition: TxQ.h:165
void set(const char *key, auto const &v)
Definition: MultiApiJson.h:83
IsMemberResult isMember(const char *key) const
Definition: MultiApiJson.h:94
Select all peers (except optional excluded) that are in our cluster.
Definition: predicates.h:137
Sends a message to all peers.
Definition: predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)