rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/misc/AmendmentTable.h>
32#include <xrpld/app/misc/DeliverMax.h>
33#include <xrpld/app/misc/HashRouter.h>
34#include <xrpld/app/misc/LoadFeeTrack.h>
35#include <xrpld/app/misc/NetworkOPs.h>
36#include <xrpld/app/misc/Transaction.h>
37#include <xrpld/app/misc/TxQ.h>
38#include <xrpld/app/misc/ValidatorList.h>
39#include <xrpld/app/misc/detail/AccountTxPaging.h>
40#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
41#include <xrpld/app/tx/apply.h>
42#include <xrpld/consensus/Consensus.h>
43#include <xrpld/consensus/ConsensusParms.h>
44#include <xrpld/overlay/Cluster.h>
45#include <xrpld/overlay/Overlay.h>
46#include <xrpld/overlay/predicates.h>
47#include <xrpld/perflog/PerfLog.h>
48#include <xrpld/rpc/BookChanges.h>
49#include <xrpld/rpc/DeliveredAmount.h>
50#include <xrpld/rpc/MPTokenIssuanceID.h>
51#include <xrpld/rpc/ServerHandler.h>
52
53#include <xrpl/basics/UptimeClock.h>
54#include <xrpl/basics/mulDiv.h>
55#include <xrpl/basics/safe_cast.h>
56#include <xrpl/basics/scope.h>
57#include <xrpl/beast/utility/rngfill.h>
58#include <xrpl/crypto/RFC1751.h>
59#include <xrpl/crypto/csprng.h>
60#include <xrpl/protocol/BuildInfo.h>
61#include <xrpl/protocol/Feature.h>
62#include <xrpl/protocol/MultiApiJson.h>
63#include <xrpl/protocol/RPCErr.h>
64#include <xrpl/protocol/jss.h>
65#include <xrpl/resource/Fees.h>
66#include <xrpl/resource/ResourceManager.h>
67
68#include <boost/asio/ip/host_name.hpp>
69#include <boost/asio/steady_timer.hpp>
70
71#include <algorithm>
72#include <exception>
73#include <mutex>
74#include <optional>
75#include <set>
76#include <sstream>
77#include <string>
78#include <tuple>
79#include <unordered_map>
80
81namespace ripple {
82
83class NetworkOPsImp final : public NetworkOPs
84{
90 {
91 public:
93 bool const admin;
94 bool const local;
96 bool applied = false;
98
101 bool a,
102 bool l,
103 FailHard f)
104 : transaction(t), admin(a), local(l), failType(f)
105 {
106 XRPL_ASSERT(
108 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
109 "valid inputs");
110 }
111 };
112
116 enum class DispatchState : unsigned char {
117 none,
118 scheduled,
119 running,
120 };
121
123
139 {
140 struct Counters
141 {
142 explicit Counters() = default;
143
146 };
147
151 std::chrono::steady_clock::time_point start_ =
153 std::chrono::steady_clock::time_point const processStart_ = start_;
156
157 public:
159 {
161 .transitions = 1;
162 }
163
170 void
172
178 void
179 json(Json::Value& obj) const;
180
182 {
184 decltype(mode_) mode;
185 decltype(start_) start;
187 };
188
191 {
194 }
195 };
196
199 {
200 ServerFeeSummary() = default;
201
203 XRPAmount fee,
204 TxQ::Metrics&& escalationMetrics,
205 LoadFeeTrack const& loadFeeTrack);
206 bool
207 operator!=(ServerFeeSummary const& b) const;
208
209 bool
211 {
212 return !(*this != b);
213 }
214
219 };
220
221public:
223 Application& app,
225 bool standalone,
226 std::size_t minPeerCount,
227 bool start_valid,
228 JobQueue& job_queue,
230 ValidatorKeys const& validatorKeys,
231 boost::asio::io_service& io_svc,
232 beast::Journal journal,
233 beast::insight::Collector::ptr const& collector)
234 : app_(app)
235 , m_journal(journal)
238 , heartbeatTimer_(io_svc)
239 , clusterTimer_(io_svc)
240 , accountHistoryTxTimer_(io_svc)
241 , mConsensus(
242 app,
244 setup_FeeVote(app_.config().section("voting")),
245 app_.logs().journal("FeeVote")),
247 *m_localTX,
248 app.getInboundTransactions(),
249 beast::get_abstract_clock<std::chrono::steady_clock>(),
250 validatorKeys,
251 app_.logs().journal("LedgerConsensus"))
253 , m_job_queue(job_queue)
254 , m_standalone(standalone)
255 , minPeerCount_(start_valid ? 0 : minPeerCount)
256 , m_stats(std::bind(&NetworkOPsImp::collect_metrics, this), collector)
257 {
258 }
259
260 ~NetworkOPsImp() override
261 {
262 // This clear() is necessary to ensure the shared_ptrs in this map get
263 // destroyed NOW because the objects in this map invoke methods on this
264 // class when they are destroyed
266 }
267
268public:
270 getOperatingMode() const override;
271
273 strOperatingMode(OperatingMode const mode, bool const admin) const override;
274
276 strOperatingMode(bool const admin = false) const override;
277
278 //
279 // Transaction operations.
280 //
281
282 // Must complete immediately.
283 void
285
286 void
288 std::shared_ptr<Transaction>& transaction,
289 bool bUnlimited,
290 bool bLocal,
291 FailHard failType) override;
292
301 void
304 bool bUnlimited,
305 FailHard failType);
306
316 void
319 bool bUnlimited,
320 FailHard failtype);
321
325 void
327
333 void
335
336 //
337 // Owner functions.
338 //
339
343 AccountID const& account) override;
344
345 //
346 // Book functions.
347 //
348
349 void
352 Book const&,
353 AccountID const& uTakerID,
354 const bool bProof,
355 unsigned int iLimit,
356 Json::Value const& jvMarker,
357 Json::Value& jvResult) override;
358
359 // Ledger proposal/close functions.
360 bool
362
363 bool
366 std::string const& source) override;
367
368 void
369 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
370
371 // Network state machine.
372
373 // Used for the "jump" case.
374private:
375 void
377 bool
379
380public:
381 bool
383 uint256 const& networkClosed,
384 std::unique_ptr<std::stringstream> const& clog) override;
385 void
387 void
388 setStandAlone() override;
389
393 void
394 setStateTimer() override;
395
396 void
397 setNeedNetworkLedger() override;
398 void
399 clearNeedNetworkLedger() override;
400 bool
401 isNeedNetworkLedger() override;
402 bool
403 isFull() override;
404
405 void
406 setMode(OperatingMode om) override;
407
408 bool
409 isBlocked() override;
410 bool
411 isAmendmentBlocked() override;
412 void
413 setAmendmentBlocked() override;
414 bool
415 isAmendmentWarned() override;
416 void
417 setAmendmentWarned() override;
418 void
419 clearAmendmentWarned() override;
420 bool
421 isUNLBlocked() override;
422 void
423 setUNLBlocked() override;
424 void
425 clearUNLBlocked() override;
426 void
427 consensusViewChange() override;
428
430 getConsensusInfo() override;
432 getServerInfo(bool human, bool admin, bool counters) override;
433 void
434 clearLedgerFetch() override;
436 getLedgerFetchInfo() override;
439 std::optional<std::chrono::milliseconds> consensusDelay) override;
440 void
441 reportFeeChange() override;
442 void
444
445 void
446 updateLocalTx(ReadView const& view) override;
448 getLocalTxCount() override;
449
450 //
451 // Monitoring: publisher side.
452 //
453 void
454 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
455 void
458 std::shared_ptr<STTx const> const& transaction,
459 TER result) override;
460 void
461 pubValidation(std::shared_ptr<STValidation> const& val) override;
462
463 //--------------------------------------------------------------------------
464 //
465 // InfoSub::Source.
466 //
467 void
469 InfoSub::ref ispListener,
470 hash_set<AccountID> const& vnaAccountIDs,
471 bool rt) override;
472 void
474 InfoSub::ref ispListener,
475 hash_set<AccountID> const& vnaAccountIDs,
476 bool rt) override;
477
478 // Just remove the subscription from the tracking
479 // not from the InfoSub. Needed for InfoSub destruction
480 void
482 std::uint64_t seq,
483 hash_set<AccountID> const& vnaAccountIDs,
484 bool rt) override;
485
487 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
488 override;
489 void
491 InfoSub::ref ispListener,
492 AccountID const& account,
493 bool historyOnly) override;
494
495 void
497 std::uint64_t seq,
498 AccountID const& account,
499 bool historyOnly) override;
500
501 bool
502 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
503 bool
504 unsubLedger(std::uint64_t uListener) override;
505
506 bool
507 subBookChanges(InfoSub::ref ispListener) override;
508 bool
509 unsubBookChanges(std::uint64_t uListener) override;
510
511 bool
512 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
513 override;
514 bool
515 unsubServer(std::uint64_t uListener) override;
516
517 bool
518 subBook(InfoSub::ref ispListener, Book const&) override;
519 bool
520 unsubBook(std::uint64_t uListener, Book const&) override;
521
522 bool
523 subManifests(InfoSub::ref ispListener) override;
524 bool
525 unsubManifests(std::uint64_t uListener) override;
526 void
527 pubManifest(Manifest const&) override;
528
529 bool
530 subTransactions(InfoSub::ref ispListener) override;
531 bool
532 unsubTransactions(std::uint64_t uListener) override;
533
534 bool
535 subRTTransactions(InfoSub::ref ispListener) override;
536 bool
537 unsubRTTransactions(std::uint64_t uListener) override;
538
539 bool
540 subValidations(InfoSub::ref ispListener) override;
541 bool
542 unsubValidations(std::uint64_t uListener) override;
543
544 bool
545 subPeerStatus(InfoSub::ref ispListener) override;
546 bool
547 unsubPeerStatus(std::uint64_t uListener) override;
548 void
549 pubPeerStatus(std::function<Json::Value(void)> const&) override;
550
551 bool
552 subConsensus(InfoSub::ref ispListener) override;
553 bool
554 unsubConsensus(std::uint64_t uListener) override;
555
557 findRpcSub(std::string const& strUrl) override;
559 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
560 bool
561 tryRemoveRpcSub(std::string const& strUrl) override;
562
563 void
564 stop() override
565 {
566 {
567 boost::system::error_code ec;
568 heartbeatTimer_.cancel(ec);
569 if (ec)
570 {
571 JLOG(m_journal.error())
572 << "NetworkOPs: heartbeatTimer cancel error: "
573 << ec.message();
574 }
575
576 ec.clear();
577 clusterTimer_.cancel(ec);
578 if (ec)
579 {
580 JLOG(m_journal.error())
581 << "NetworkOPs: clusterTimer cancel error: "
582 << ec.message();
583 }
584
585 ec.clear();
586 accountHistoryTxTimer_.cancel(ec);
587 if (ec)
588 {
589 JLOG(m_journal.error())
590 << "NetworkOPs: accountHistoryTxTimer cancel error: "
591 << ec.message();
592 }
593 }
594 // Make sure that any waitHandlers pending in our timers are done.
595 using namespace std::chrono_literals;
596 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
597 }
598
599 void
600 stateAccounting(Json::Value& obj) override;
601
602private:
603 void
604 setTimer(
605 boost::asio::steady_timer& timer,
606 std::chrono::milliseconds const& expiry_time,
607 std::function<void()> onExpire,
608 std::function<void()> onError);
609 void
611 void
613 void
615 void
617
619 transJson(
620 std::shared_ptr<STTx const> const& transaction,
621 TER result,
622 bool validated,
625
626 void
629 AcceptedLedgerTx const& transaction,
630 bool last);
631
632 void
635 AcceptedLedgerTx const& transaction,
636 bool last);
637
638 void
641 std::shared_ptr<STTx const> const& transaction,
642 TER result);
643
644 void
645 pubServer();
646 void
648
650 getHostId(bool forAdmin);
651
652private:
656
657 /*
658 * With a validated ledger to separate history and future, the node
659 * streams historical txns with negative indexes starting from -1,
660 * and streams future txns starting from index 0.
661 * The SubAccountHistoryIndex struct maintains these indexes.
662 * It also has a flag stopHistorical_ for stopping streaming
663 * the historical txns.
664 */
666 {
668 // forward
670 // separate backward and forward
672 // history, backward
677
679 : accountId_(accountId)
680 , forwardTxIndex_(0)
683 , historyTxIndex_(-1)
684 , haveHistorical_(false)
685 , stopHistorical_(false)
686 {
687 }
688 };
690 {
693 };
695 {
698 };
701
705 void
709 void
711 void
713
716
718
720
722
727
729 boost::asio::steady_timer heartbeatTimer_;
730 boost::asio::steady_timer clusterTimer_;
731 boost::asio::steady_timer accountHistoryTxTimer_;
732
734
736
738
741
743
745
746 enum SubTypes {
747 sLedger, // Accepted ledgers.
748 sManifests, // Received validator manifests.
749 sServer, // When server changes connectivity state.
750 sTransactions, // All accepted transactions.
751 sRTTransactions, // All proposed and accepted transactions.
752 sValidations, // Received validations.
753 sPeerStatus, // Peer status changes.
754 sConsensusPhase, // Consensus phase
755 sBookChanges, // Per-ledger order book changes
756 sLastEntry // Any new entry must be ADDED ABOVE this one
757 };
758
760
762
764
765 // Whether we are in standalone mode.
766 bool const m_standalone;
767
768 // The number of nodes that we need to consider ourselves connected.
770
771 // Transaction batching.
776
778
781
782private:
783 struct Stats
784 {
785 template <class Handler>
787 Handler const& handler,
788 beast::insight::Collector::ptr const& collector)
789 : hook(collector->make_hook(handler))
790 , disconnected_duration(collector->make_gauge(
791 "State_Accounting",
792 "Disconnected_duration"))
793 , connected_duration(collector->make_gauge(
794 "State_Accounting",
795 "Connected_duration"))
797 collector->make_gauge("State_Accounting", "Syncing_duration"))
798 , tracking_duration(collector->make_gauge(
799 "State_Accounting",
800 "Tracking_duration"))
802 collector->make_gauge("State_Accounting", "Full_duration"))
803 , disconnected_transitions(collector->make_gauge(
804 "State_Accounting",
805 "Disconnected_transitions"))
806 , connected_transitions(collector->make_gauge(
807 "State_Accounting",
808 "Connected_transitions"))
809 , syncing_transitions(collector->make_gauge(
810 "State_Accounting",
811 "Syncing_transitions"))
812 , tracking_transitions(collector->make_gauge(
813 "State_Accounting",
814 "Tracking_transitions"))
816 collector->make_gauge("State_Accounting", "Full_transitions"))
817 {
818 }
819
826
832 };
833
834 std::mutex m_statsMutex; // Mutex to lock m_stats
836
837private:
838 void
840};
841
842//------------------------------------------------------------------------------
843
845 {"disconnected", "connected", "syncing", "tracking", "full"}};
846
848
856
857static auto const genesisAccountId = calcAccountID(
859 .first);
860
861//------------------------------------------------------------------------------
862inline OperatingMode
864{
865 return mMode;
866}
867
868inline std::string
869NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
870{
871 return strOperatingMode(mMode, admin);
872}
873
874inline void
876{
878}
879
880inline void
882{
883 needNetworkLedger_ = true;
884}
885
886inline void
888{
889 needNetworkLedger_ = false;
890}
891
892inline bool
894{
895 return needNetworkLedger_;
896}
897
898inline bool
900{
902}
903
906{
907 static std::string const hostname = boost::asio::ip::host_name();
908
909 if (forAdmin)
910 return hostname;
911
912 // For non-admin uses hash the node public key into a
913 // single RFC1751 word:
914 static std::string const shroudedHostId = [this]() {
915 auto const& id = app_.nodeIdentity();
916
917 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
918 }();
919
920 return shroudedHostId;
921}
922
923void
925{
927
928 // Only do this work if a cluster is configured
929 if (app_.cluster().size() != 0)
931}
932
933void
935 boost::asio::steady_timer& timer,
936 const std::chrono::milliseconds& expiry_time,
937 std::function<void()> onExpire,
938 std::function<void()> onError)
939{
940 // Only start the timer if waitHandlerCounter_ is not yet joined.
941 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
942 [this, onExpire, onError](boost::system::error_code const& e) {
943 if ((e.value() == boost::system::errc::success) &&
944 (!m_job_queue.isStopped()))
945 {
946 onExpire();
947 }
948 // Recover as best we can if an unexpected error occurs.
949 if (e.value() != boost::system::errc::success &&
950 e.value() != boost::asio::error::operation_aborted)
951 {
952 // Try again later and hope for the best.
953 JLOG(m_journal.error())
954 << "Timer got error '" << e.message()
955 << "'. Restarting timer.";
956 onError();
957 }
958 }))
959 {
960 timer.expires_from_now(expiry_time);
961 timer.async_wait(std::move(*optionalCountedHandler));
962 }
963}
964
965void
966NetworkOPsImp::setHeartbeatTimer()
967{
968 setTimer(
969 heartbeatTimer_,
970 mConsensus.parms().ledgerGRANULARITY,
971 [this]() {
972 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
973 processHeartbeatTimer();
974 });
975 },
976 [this]() { setHeartbeatTimer(); });
977}
978
979void
980NetworkOPsImp::setClusterTimer()
981{
982 using namespace std::chrono_literals;
983
984 setTimer(
985 clusterTimer_,
986 10s,
987 [this]() {
988 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
989 processClusterTimer();
990 });
991 },
992 [this]() { setClusterTimer(); });
993}
994
995void
996NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
997{
998 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
999 << toBase58(subInfo.index_->accountId_);
1000 using namespace std::chrono_literals;
1001 setTimer(
1002 accountHistoryTxTimer_,
1003 4s,
1004 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1005 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1006}
1007
1008void
1009NetworkOPsImp::processHeartbeatTimer()
1010{
1011 RclConsensusLogger clog(
1012 "Heartbeat Timer", mConsensus.validating(), m_journal);
1013 {
1014 std::unique_lock lock{app_.getMasterMutex()};
1015
1016 // VFALCO NOTE This is for diagnosing a crash on exit
1017 LoadManager& mgr(app_.getLoadManager());
1018 mgr.heartbeat();
1019
1020 std::size_t const numPeers = app_.overlay().size();
1021
1022 // do we have sufficient peers? If not, we are disconnected.
1023 if (numPeers < minPeerCount_)
1024 {
1025 if (mMode != OperatingMode::DISCONNECTED)
1026 {
1027 setMode(OperatingMode::DISCONNECTED);
1029 ss << "Node count (" << numPeers << ") has fallen "
1030 << "below required minimum (" << minPeerCount_ << ").";
1031 JLOG(m_journal.warn()) << ss.str();
1032 CLOG(clog.ss()) << "set mode to DISCONNECTED: " << ss.str();
1033 }
1034 else
1035 {
1036 CLOG(clog.ss())
1037 << "already DISCONNECTED. too few peers (" << numPeers
1038 << "), need at least " << minPeerCount_;
1039 }
1040
1041 // MasterMutex lock need not be held to call setHeartbeatTimer()
1042 lock.unlock();
1043 // We do not call mConsensus.timerEntry until there are enough
1044 // peers providing meaningful inputs to consensus
1045 setHeartbeatTimer();
1046
1047 return;
1048 }
1049
1050 if (mMode == OperatingMode::DISCONNECTED)
1051 {
1052 setMode(OperatingMode::CONNECTED);
1053 JLOG(m_journal.info())
1054 << "Node count (" << numPeers << ") is sufficient.";
1055 CLOG(clog.ss()) << "setting mode to CONNECTED based on " << numPeers
1056 << " peers. ";
1057 }
1058
1059 // Check if the last validated ledger forces a change between these
1060 // states.
1061 auto origMode = mMode.load();
1062 CLOG(clog.ss()) << "mode: " << strOperatingMode(origMode, true);
1063 if (mMode == OperatingMode::SYNCING)
1064 setMode(OperatingMode::SYNCING);
1065 else if (mMode == OperatingMode::CONNECTED)
1066 setMode(OperatingMode::CONNECTED);
1067 auto newMode = mMode.load();
1068 if (origMode != newMode)
1069 {
1070 CLOG(clog.ss())
1071 << ", changing to " << strOperatingMode(newMode, true);
1072 }
1073 CLOG(clog.ss()) << ". ";
1074 }
1075
1076 mConsensus.timerEntry(app_.timeKeeper().closeTime(), clog.ss());
1077
1078 CLOG(clog.ss()) << "consensus phase " << to_string(mLastConsensusPhase);
1079 const ConsensusPhase currPhase = mConsensus.phase();
1080 if (mLastConsensusPhase != currPhase)
1081 {
1082 reportConsensusStateChange(currPhase);
1083 mLastConsensusPhase = currPhase;
1084 CLOG(clog.ss()) << " changed to " << to_string(mLastConsensusPhase);
1085 }
1086 CLOG(clog.ss()) << ". ";
1087
1088 setHeartbeatTimer();
1089}
1090
1091void
1092NetworkOPsImp::processClusterTimer()
1093{
1094 if (app_.cluster().size() == 0)
1095 return;
1096
1097 using namespace std::chrono_literals;
1098
1099 bool const update = app_.cluster().update(
1100 app_.nodeIdentity().first,
1101 "",
1102 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1103 ? app_.getFeeTrack().getLocalFee()
1104 : 0,
1105 app_.timeKeeper().now());
1106
1107 if (!update)
1108 {
1109 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1110 setClusterTimer();
1111 return;
1112 }
1113
1114 protocol::TMCluster cluster;
1115 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1116 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1117 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1118 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1119 n.set_nodeload(node.getLoadFee());
1120 if (!node.name().empty())
1121 n.set_nodename(node.name());
1122 });
1123
1124 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1125 for (auto& item : gossip.items)
1126 {
1127 protocol::TMLoadSource& node = *cluster.add_loadsources();
1128 node.set_name(to_string(item.address));
1129 node.set_cost(item.balance);
1130 }
1131 app_.overlay().foreach(send_if(
1132 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1133 peer_in_cluster()));
1134 setClusterTimer();
1135}
1136
1137//------------------------------------------------------------------------------
1138
1140NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1141 const
1142{
1143 if (mode == OperatingMode::FULL && admin)
1144 {
1145 auto const consensusMode = mConsensus.mode();
1146 if (consensusMode != ConsensusMode::wrongLedger)
1147 {
1148 if (consensusMode == ConsensusMode::proposing)
1149 return "proposing";
1150
1151 if (mConsensus.validating())
1152 return "validating";
1153 }
1154 }
1155
1156 return states_[static_cast<std::size_t>(mode)];
1157}
1158
1159void
1160NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1161{
1162 if (isNeedNetworkLedger())
1163 {
1164 // Nothing we can do if we've never been in sync
1165 return;
1166 }
1167
1168 // this is an asynchronous interface
1169 auto const trans = sterilize(*iTrans);
1170
1171 auto const txid = trans->getTransactionID();
1172 auto const flags = app_.getHashRouter().getFlags(txid);
1173
1174 if ((flags & SF_BAD) != 0)
1175 {
1176 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1177 return;
1178 }
1179
1180 try
1181 {
1182 auto const [validity, reason] = checkValidity(
1183 app_.getHashRouter(),
1184 *trans,
1185 m_ledgerMaster.getValidatedRules(),
1186 app_.config());
1187
1188 if (validity != Validity::Valid)
1189 {
1190 JLOG(m_journal.warn())
1191 << "Submitted transaction invalid: " << reason;
1192 return;
1193 }
1194 }
1195 catch (std::exception const& ex)
1196 {
1197 JLOG(m_journal.warn())
1198 << "Exception checking transaction " << txid << ": " << ex.what();
1199
1200 return;
1201 }
1202
1203 std::string reason;
1204
1205 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1206
1207 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1208 auto t = tx;
1209 processTransaction(t, false, false, FailHard::no);
1210 });
1211}
1212
1213void
1214NetworkOPsImp::processTransaction(
1215 std::shared_ptr<Transaction>& transaction,
1216 bool bUnlimited,
1217 bool bLocal,
1218 FailHard failType)
1219{
1220 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1221 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1222
1223 if ((newFlags & SF_BAD) != 0)
1224 {
1225 // cached bad
1226 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1227 transaction->setStatus(INVALID);
1228 transaction->setResult(temBAD_SIGNATURE);
1229 return;
1230 }
1231
1232 // NOTE eahennis - I think this check is redundant,
1233 // but I'm not 100% sure yet.
1234 // If so, only cost is looking up HashRouter flags.
1235 auto const view = m_ledgerMaster.getCurrentLedger();
1236 auto const [validity, reason] = checkValidity(
1237 app_.getHashRouter(),
1238 *transaction->getSTransaction(),
1239 view->rules(),
1240 app_.config());
1241 XRPL_ASSERT(
1242 validity == Validity::Valid,
1243 "ripple::NetworkOPsImp::processTransaction : valid validity");
1244
1245 // Not concerned with local checks at this point.
1246 if (validity == Validity::SigBad)
1247 {
1248 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1249 transaction->setStatus(INVALID);
1250 transaction->setResult(temBAD_SIGNATURE);
1251 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1252 return;
1253 }
1254
1255 // canonicalize can change our pointer
1256 app_.getMasterTransaction().canonicalize(&transaction);
1257
1258 if (bLocal)
1259 doTransactionSync(transaction, bUnlimited, failType);
1260 else
1261 doTransactionAsync(transaction, bUnlimited, failType);
1262}
1263
1264void
1265NetworkOPsImp::doTransactionAsync(
1266 std::shared_ptr<Transaction> transaction,
1267 bool bUnlimited,
1268 FailHard failType)
1269{
1270 std::lock_guard lock(mMutex);
1271
1272 if (transaction->getApplying())
1273 return;
1274
1275 mTransactions.push_back(
1276 TransactionStatus(transaction, bUnlimited, false, failType));
1277 transaction->setApplying();
1278
1279 if (mDispatchState == DispatchState::none)
1280 {
1281 if (m_job_queue.addJob(
1282 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1283 {
1284 mDispatchState = DispatchState::scheduled;
1285 }
1286 }
1287}
1288
1289void
1290NetworkOPsImp::doTransactionSync(
1291 std::shared_ptr<Transaction> transaction,
1292 bool bUnlimited,
1293 FailHard failType)
1294{
1295 std::unique_lock<std::mutex> lock(mMutex);
1296
1297 if (!transaction->getApplying())
1298 {
1299 mTransactions.push_back(
1300 TransactionStatus(transaction, bUnlimited, true, failType));
1301 transaction->setApplying();
1302 }
1303
1304 do
1305 {
1306 if (mDispatchState == DispatchState::running)
1307 {
1308 // A batch processing job is already running, so wait.
1309 mCond.wait(lock);
1310 }
1311 else
1312 {
1313 apply(lock);
1314
1315 if (mTransactions.size())
1316 {
1317 // More transactions need to be applied, but by another job.
1318 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1319 transactionBatch();
1320 }))
1321 {
1322 mDispatchState = DispatchState::scheduled;
1323 }
1324 }
1325 }
1326 } while (transaction->getApplying());
1327}
1328
1329void
1330NetworkOPsImp::transactionBatch()
1331{
1332 std::unique_lock<std::mutex> lock(mMutex);
1333
1334 if (mDispatchState == DispatchState::running)
1335 return;
1336
1337 while (mTransactions.size())
1338 {
1339 apply(lock);
1340 }
1341}
1342
1343void
1344NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1345{
1347 std::vector<TransactionStatus> transactions;
1348 mTransactions.swap(transactions);
1349 XRPL_ASSERT(
1350 !transactions.empty(),
1351 "ripple::NetworkOPsImp::apply : non-empty transactions");
1352 XRPL_ASSERT(
1353 mDispatchState != DispatchState::running,
1354 "ripple::NetworkOPsImp::apply : is not running");
1355
1356 mDispatchState = DispatchState::running;
1357
1358 batchLock.unlock();
1359
1360 {
1361 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1362 bool changed = false;
1363 {
1364 std::unique_lock ledgerLock{
1365 m_ledgerMaster.peekMutex(), std::defer_lock};
1366 std::lock(masterLock, ledgerLock);
1367
1368 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1369 for (TransactionStatus& e : transactions)
1370 {
1371 // we check before adding to the batch
1372 ApplyFlags flags = tapNONE;
1373 if (e.admin)
1374 flags |= tapUNLIMITED;
1375
1376 if (e.failType == FailHard::yes)
1377 flags |= tapFAIL_HARD;
1378
1379 auto const result = app_.getTxQ().apply(
1380 app_, view, e.transaction->getSTransaction(), flags, j);
1381 e.result = result.ter;
1382 e.applied = result.applied;
1383 changed = changed || result.applied;
1384 }
1385 return changed;
1386 });
1387 }
1388 if (changed)
1389 reportFeeChange();
1390
1391 std::optional<LedgerIndex> validatedLedgerIndex;
1392 if (auto const l = m_ledgerMaster.getValidatedLedger())
1393 validatedLedgerIndex = l->info().seq;
1394
1395 auto newOL = app_.openLedger().current();
1396 for (TransactionStatus& e : transactions)
1397 {
1398 e.transaction->clearSubmitResult();
1399
1400 if (e.applied)
1401 {
1402 pubProposedTransaction(
1403 newOL, e.transaction->getSTransaction(), e.result);
1404 e.transaction->setApplied();
1405 }
1406
1407 e.transaction->setResult(e.result);
1408
1409 if (isTemMalformed(e.result))
1410 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1411
1412#ifdef DEBUG
1413 if (e.result != tesSUCCESS)
1414 {
1415 std::string token, human;
1416
1417 if (transResultInfo(e.result, token, human))
1418 {
1419 JLOG(m_journal.info())
1420 << "TransactionResult: " << token << ": " << human;
1421 }
1422 }
1423#endif
1424
1425 bool addLocal = e.local;
1426
1427 if (e.result == tesSUCCESS)
1428 {
1429 JLOG(m_journal.debug())
1430 << "Transaction is now included in open ledger";
1431 e.transaction->setStatus(INCLUDED);
1432
1433 auto const& txCur = e.transaction->getSTransaction();
1434 auto const txNext = m_ledgerMaster.popAcctTransaction(txCur);
1435 if (txNext)
1436 {
1437 std::string reason;
1438 auto const trans = sterilize(*txNext);
1439 auto t = std::make_shared<Transaction>(trans, reason, app_);
1440 submit_held.emplace_back(t, false, false, FailHard::no);
1441 t->setApplying();
1442 }
1443 }
1444 else if (e.result == tefPAST_SEQ)
1445 {
1446 // duplicate or conflict
1447 JLOG(m_journal.info()) << "Transaction is obsolete";
1448 e.transaction->setStatus(OBSOLETE);
1449 }
1450 else if (e.result == terQUEUED)
1451 {
1452 JLOG(m_journal.debug())
1453 << "Transaction is likely to claim a"
1454 << " fee, but is queued until fee drops";
1455
1456 e.transaction->setStatus(HELD);
1457 // Add to held transactions, because it could get
1458 // kicked out of the queue, and this will try to
1459 // put it back.
1460 m_ledgerMaster.addHeldTransaction(e.transaction);
1461 e.transaction->setQueued();
1462 e.transaction->setKept();
1463 }
1464 else if (isTerRetry(e.result))
1465 {
1466 if (e.failType != FailHard::yes)
1467 {
1468 // transaction should be held
1469 JLOG(m_journal.debug())
1470 << "Transaction should be held: " << e.result;
1471 e.transaction->setStatus(HELD);
1472 m_ledgerMaster.addHeldTransaction(e.transaction);
1473 e.transaction->setKept();
1474 }
1475 }
1476 else
1477 {
1478 JLOG(m_journal.debug())
1479 << "Status other than success " << e.result;
1480 e.transaction->setStatus(INVALID);
1481 }
1482
1483 auto const enforceFailHard =
1484 e.failType == FailHard::yes && !isTesSuccess(e.result);
1485
1486 if (addLocal && !enforceFailHard)
1487 {
1488 m_localTX->push_back(
1489 m_ledgerMaster.getCurrentLedgerIndex(),
1490 e.transaction->getSTransaction());
1491 e.transaction->setKept();
1492 }
1493
1494 if ((e.applied ||
1495 ((mMode != OperatingMode::FULL) &&
1496 (e.failType != FailHard::yes) && e.local) ||
1497 (e.result == terQUEUED)) &&
1498 !enforceFailHard)
1499 {
1500 auto const toSkip =
1501 app_.getHashRouter().shouldRelay(e.transaction->getID());
1502
1503 if (toSkip)
1504 {
1505 protocol::TMTransaction tx;
1506 Serializer s;
1507
1508 e.transaction->getSTransaction()->add(s);
1509 tx.set_rawtransaction(s.data(), s.size());
1510 tx.set_status(protocol::tsCURRENT);
1511 tx.set_receivetimestamp(
1512 app_.timeKeeper().now().time_since_epoch().count());
1513 tx.set_deferred(e.result == terQUEUED);
1514 // FIXME: This should be when we received it
1515 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1516 e.transaction->setBroadcast();
1517 }
1518 }
1519
1520 if (validatedLedgerIndex)
1521 {
1522 auto [fee, accountSeq, availableSeq] =
1523 app_.getTxQ().getTxRequiredFeeAndSeq(
1524 *newOL, e.transaction->getSTransaction());
1525 e.transaction->setCurrentLedgerState(
1526 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1527 }
1528 }
1529 }
1530
1531 batchLock.lock();
1532
1533 for (TransactionStatus& e : transactions)
1534 e.transaction->clearApplying();
1535
1536 if (!submit_held.empty())
1537 {
1538 if (mTransactions.empty())
1539 mTransactions.swap(submit_held);
1540 else
1541 for (auto& e : submit_held)
1542 mTransactions.push_back(std::move(e));
1543 }
1544
1545 mCond.notify_all();
1546
1547 mDispatchState = DispatchState::none;
1548}
1549
1550//
1551// Owner functions
1552//
1553
1555NetworkOPsImp::getOwnerInfo(
1557 AccountID const& account)
1558{
1559 Json::Value jvObjects(Json::objectValue);
1560 auto root = keylet::ownerDir(account);
1561 auto sleNode = lpLedger->read(keylet::page(root));
1562 if (sleNode)
1563 {
1564 std::uint64_t uNodeDir;
1565
1566 do
1567 {
1568 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1569 {
1570 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1571 XRPL_ASSERT(
1572 sleCur,
1573 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1574
1575 switch (sleCur->getType())
1576 {
1577 case ltOFFER:
1578 if (!jvObjects.isMember(jss::offers))
1579 jvObjects[jss::offers] =
1581
1582 jvObjects[jss::offers].append(
1583 sleCur->getJson(JsonOptions::none));
1584 break;
1585
1586 case ltRIPPLE_STATE:
1587 if (!jvObjects.isMember(jss::ripple_lines))
1588 {
1589 jvObjects[jss::ripple_lines] =
1591 }
1592
1593 jvObjects[jss::ripple_lines].append(
1594 sleCur->getJson(JsonOptions::none));
1595 break;
1596
1597 case ltACCOUNT_ROOT:
1598 case ltDIR_NODE:
1599 default:
1600 UNREACHABLE(
1601 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1602 "type");
1603 break;
1604 }
1605 }
1606
1607 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1608
1609 if (uNodeDir)
1610 {
1611 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1612 XRPL_ASSERT(
1613 sleNode,
1614 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1615 }
1616 } while (uNodeDir);
1617 }
1618
1619 return jvObjects;
1620}
1621
1622//
1623// Other
1624//
1625
1626inline bool
1627NetworkOPsImp::isBlocked()
1628{
1629 return isAmendmentBlocked() || isUNLBlocked();
1630}
1631
1632inline bool
1633NetworkOPsImp::isAmendmentBlocked()
1634{
1635 return amendmentBlocked_;
1636}
1637
1638void
1639NetworkOPsImp::setAmendmentBlocked()
1640{
1641 amendmentBlocked_ = true;
1642 setMode(OperatingMode::CONNECTED);
1643}
1644
1645inline bool
1646NetworkOPsImp::isAmendmentWarned()
1647{
1648 return !amendmentBlocked_ && amendmentWarned_;
1649}
1650
1651inline void
1652NetworkOPsImp::setAmendmentWarned()
1653{
1654 amendmentWarned_ = true;
1655}
1656
1657inline void
1658NetworkOPsImp::clearAmendmentWarned()
1659{
1660 amendmentWarned_ = false;
1661}
1662
1663inline bool
1664NetworkOPsImp::isUNLBlocked()
1665{
1666 return unlBlocked_;
1667}
1668
1669void
1670NetworkOPsImp::setUNLBlocked()
1671{
1672 unlBlocked_ = true;
1673 setMode(OperatingMode::CONNECTED);
1674}
1675
1676inline void
1677NetworkOPsImp::clearUNLBlocked()
1678{
1679 unlBlocked_ = false;
1680}
1681
1682bool
1683NetworkOPsImp::checkLastClosedLedger(
1684 const Overlay::PeerSequence& peerList,
1685 uint256& networkClosed)
1686{
1687 // Returns true if there's an *abnormal* ledger issue, normal changing in
1688 // TRACKING mode should return false. Do we have sufficient validations for
1689 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1690 // better ledger available? If so, we are either tracking or full.
1691
1692 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1693
1694 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1695
1696 if (!ourClosed)
1697 return false;
1698
1699 uint256 closedLedger = ourClosed->info().hash;
1700 uint256 prevClosedLedger = ourClosed->info().parentHash;
1701 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1702 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1703
1704 //-------------------------------------------------------------------------
1705 // Determine preferred last closed ledger
1706
1707 auto& validations = app_.getValidations();
1708 JLOG(m_journal.debug())
1709 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1710
1711 // Will rely on peer LCL if no trusted validations exist
1713 peerCounts[closedLedger] = 0;
1714 if (mMode >= OperatingMode::TRACKING)
1715 peerCounts[closedLedger]++;
1716
1717 for (auto& peer : peerList)
1718 {
1719 uint256 peerLedger = peer->getClosedLedgerHash();
1720
1721 if (peerLedger.isNonZero())
1722 ++peerCounts[peerLedger];
1723 }
1724
1725 for (auto const& it : peerCounts)
1726 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1727
1728 uint256 preferredLCL = validations.getPreferredLCL(
1729 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1730 m_ledgerMaster.getValidLedgerIndex(),
1731 peerCounts);
1732
1733 bool switchLedgers = preferredLCL != closedLedger;
1734 if (switchLedgers)
1735 closedLedger = preferredLCL;
1736 //-------------------------------------------------------------------------
1737 if (switchLedgers && (closedLedger == prevClosedLedger))
1738 {
1739 // don't switch to our own previous ledger
1740 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1741 networkClosed = ourClosed->info().hash;
1742 switchLedgers = false;
1743 }
1744 else
1745 networkClosed = closedLedger;
1746
1747 if (!switchLedgers)
1748 return false;
1749
1750 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1751
1752 if (!consensus)
1753 consensus = app_.getInboundLedgers().acquire(
1754 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1755
1756 if (consensus &&
1757 (!m_ledgerMaster.canBeCurrent(consensus) ||
1758 !m_ledgerMaster.isCompatible(
1759 *consensus, m_journal.debug(), "Not switching")))
1760 {
1761 // Don't switch to a ledger not on the validated chain
1762 // or with an invalid close time or sequence
1763 networkClosed = ourClosed->info().hash;
1764 return false;
1765 }
1766
1767 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1768 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1769 << getJson({*ourClosed, {}});
1770 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1771
1772 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1773 {
1774 setMode(OperatingMode::CONNECTED);
1775 }
1776
1777 if (consensus)
1778 {
1779 // FIXME: If this rewinds the ledger sequence, or has the same
1780 // sequence, we should update the status on any stored transactions
1781 // in the invalidated ledgers.
1782 switchLastClosedLedger(consensus);
1783 }
1784
1785 return true;
1786}
1787
1788void
1789NetworkOPsImp::switchLastClosedLedger(
1790 std::shared_ptr<Ledger const> const& newLCL)
1791{
1792 // set the newLCL as our last closed ledger -- this is abnormal code
1793 JLOG(m_journal.error())
1794 << "JUMP last closed ledger to " << newLCL->info().hash;
1795
1796 clearNeedNetworkLedger();
1797
1798 // Update fee computations.
1799 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1800
1801 // Caller must own master lock
1802 {
1803 // Apply tx in old open ledger to new
1804 // open ledger. Then apply local tx.
1805
1806 auto retries = m_localTX->getTxSet();
1807 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1809 if (lastVal)
1810 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
1811 else
1812 rules.emplace(app_.config().features);
1813 app_.openLedger().accept(
1814 app_,
1815 *rules,
1816 newLCL,
1817 OrderedTxs({}),
1818 false,
1819 retries,
1820 tapNONE,
1821 "jump",
1822 [&](OpenView& view, beast::Journal j) {
1823 // Stuff the ledger with transactions from the queue.
1824 return app_.getTxQ().accept(app_, view);
1825 });
1826 }
1827
1828 m_ledgerMaster.switchLCL(newLCL);
1829
1830 protocol::TMStatusChange s;
1831 s.set_newevent(protocol::neSWITCHED_LEDGER);
1832 s.set_ledgerseq(newLCL->info().seq);
1833 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1834 s.set_ledgerhashprevious(
1835 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1836 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
1837
1838 app_.overlay().foreach(
1839 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1840}
1841
1842bool
1843NetworkOPsImp::beginConsensus(
1844 uint256 const& networkClosed,
1846{
1847 XRPL_ASSERT(
1848 networkClosed.isNonZero(),
1849 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
1850
1851 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
1852
1853 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
1854 << " with LCL " << closingInfo.parentHash;
1855
1856 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1857
1858 if (!prevLedger)
1859 {
1860 // this shouldn't happen unless we jump ledgers
1861 if (mMode == OperatingMode::FULL)
1862 {
1863 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
1864 setMode(OperatingMode::TRACKING);
1865 CLOG(clog) << "beginConsensus Don't have LCL, going to tracking. ";
1866 }
1867
1868 CLOG(clog) << "beginConsensus no previous ledger. ";
1869 return false;
1870 }
1871
1872 XRPL_ASSERT(
1873 prevLedger->info().hash == closingInfo.parentHash,
1874 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1875 "parent");
1876 XRPL_ASSERT(
1877 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
1878 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1879 "hash");
1880
1881 if (prevLedger->rules().enabled(featureNegativeUNL))
1882 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1883 TrustChanges const changes = app_.validators().updateTrusted(
1884 app_.getValidations().getCurrentNodeIDs(),
1885 closingInfo.parentCloseTime,
1886 *this,
1887 app_.overlay(),
1888 app_.getHashRouter());
1889
1890 if (!changes.added.empty() || !changes.removed.empty())
1891 {
1892 app_.getValidations().trustChanged(changes.added, changes.removed);
1893 // Update the AmendmentTable so it tracks the current validators.
1894 app_.getAmendmentTable().trustChanged(
1895 app_.validators().getQuorumKeys().second);
1896 }
1897
1898 mConsensus.startRound(
1899 app_.timeKeeper().closeTime(),
1900 networkClosed,
1901 prevLedger,
1902 changes.removed,
1903 changes.added,
1904 clog);
1905
1906 const ConsensusPhase currPhase = mConsensus.phase();
1907 if (mLastConsensusPhase != currPhase)
1908 {
1909 reportConsensusStateChange(currPhase);
1910 mLastConsensusPhase = currPhase;
1911 }
1912
1913 JLOG(m_journal.debug()) << "Initiating consensus engine";
1914 return true;
1915}
1916
1917bool
1918NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
1919{
1920 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1921}
1922
1923void
1924NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
1925{
1926 // We now have an additional transaction set
1927 // either created locally during the consensus process
1928 // or acquired from a peer
1929
1930 // Inform peers we have this set
1931 protocol::TMHaveTransactionSet msg;
1932 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1933 msg.set_status(protocol::tsHAVE);
1934 app_.overlay().foreach(
1935 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
1936
1937 // We acquired it because consensus asked us to
1938 if (fromAcquire)
1939 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
1940}
1941
1942void
1943NetworkOPsImp::endConsensus(std::unique_ptr<std::stringstream> const& clog)
1944{
1945 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
1946
1947 for (auto const& it : app_.overlay().getActivePeers())
1948 {
1949 if (it && (it->getClosedLedgerHash() == deadLedger))
1950 {
1951 JLOG(m_journal.trace()) << "Killing obsolete peer status";
1952 it->cycleStatus();
1953 }
1954 }
1955
1956 uint256 networkClosed;
1957 bool ledgerChange =
1958 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1959
1960 if (networkClosed.isZero())
1961 {
1962 CLOG(clog) << "endConsensus last closed ledger is zero. ";
1963 return;
1964 }
1965
1966 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
1967 // we must count how many nodes share our LCL, how many nodes disagree with
1968 // our LCL, and how many validations our LCL has. We also want to check
1969 // timing to make sure there shouldn't be a newer LCL. We need this
1970 // information to do the next three tests.
1971
1972 if (((mMode == OperatingMode::CONNECTED) ||
1973 (mMode == OperatingMode::SYNCING)) &&
1974 !ledgerChange)
1975 {
1976 // Count number of peers that agree with us and UNL nodes whose
1977 // validations we have for LCL. If the ledger is good enough, go to
1978 // TRACKING - TODO
1979 if (!needNetworkLedger_)
1980 setMode(OperatingMode::TRACKING);
1981 }
1982
1983 if (((mMode == OperatingMode::CONNECTED) ||
1984 (mMode == OperatingMode::TRACKING)) &&
1985 !ledgerChange)
1986 {
1987 // check if the ledger is good enough to go to FULL
1988 // Note: Do not go to FULL if we don't have the previous ledger
1989 // check if the ledger is bad enough to go to CONNECTE D -- TODO
1990 auto current = m_ledgerMaster.getCurrentLedger();
1991 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
1992 2 * current->info().closeTimeResolution))
1993 {
1994 setMode(OperatingMode::FULL);
1995 }
1996 }
1997
1998 beginConsensus(networkClosed, clog);
1999}
2000
2001void
2002NetworkOPsImp::consensusViewChange()
2003{
2004 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
2005 {
2006 setMode(OperatingMode::CONNECTED);
2007 }
2008}
2009
2010void
2011NetworkOPsImp::pubManifest(Manifest const& mo)
2012{
2013 // VFALCO consider std::shared_mutex
2014 std::lock_guard sl(mSubLock);
2015
2016 if (!mStreamMaps[sManifests].empty())
2017 {
2019
2020 jvObj[jss::type] = "manifestReceived";
2021 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
2022 if (mo.signingKey)
2023 jvObj[jss::signing_key] =
2024 toBase58(TokenType::NodePublic, *mo.signingKey);
2025 jvObj[jss::seq] = Json::UInt(mo.sequence);
2026 if (auto sig = mo.getSignature())
2027 jvObj[jss::signature] = strHex(*sig);
2028 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
2029 if (!mo.domain.empty())
2030 jvObj[jss::domain] = mo.domain;
2031 jvObj[jss::manifest] = strHex(mo.serialized);
2032
2033 for (auto i = mStreamMaps[sManifests].begin();
2034 i != mStreamMaps[sManifests].end();)
2035 {
2036 if (auto p = i->second.lock())
2037 {
2038 p->send(jvObj, true);
2039 ++i;
2040 }
2041 else
2042 {
2043 i = mStreamMaps[sManifests].erase(i);
2044 }
2045 }
2046 }
2047}
2048
2049NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2050 XRPAmount fee,
2051 TxQ::Metrics&& escalationMetrics,
2052 LoadFeeTrack const& loadFeeTrack)
2053 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2054 , loadBaseServer{loadFeeTrack.getLoadBase()}
2055 , baseFee{fee}
2056 , em{std::move(escalationMetrics)}
2057{
2058}
2059
2060bool
2062 NetworkOPsImp::ServerFeeSummary const& b) const
2063{
2064 if (loadFactorServer != b.loadFactorServer ||
2065 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2066 em.has_value() != b.em.has_value())
2067 return true;
2068
2069 if (em && b.em)
2070 {
2071 return (
2072 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2073 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2074 em->referenceFeeLevel != b.em->referenceFeeLevel);
2075 }
2076
2077 return false;
2078}
2079
2080// Need to cap to uint64 to uint32 due to JSON limitations
2081static std::uint32_t
2083{
2085
2086 return std::min(max32, v);
2087};
2088
2089void
2091{
2092 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2093 // list into a local array while holding the lock then release
2094 // the lock and call send on everyone.
2095 //
2097
2098 if (!mStreamMaps[sServer].empty())
2099 {
2101
2103 app_.openLedger().current()->fees().base,
2105 app_.getFeeTrack()};
2106
2107 jvObj[jss::type] = "serverStatus";
2108 jvObj[jss::server_status] = strOperatingMode();
2109 jvObj[jss::load_base] = f.loadBaseServer;
2110 jvObj[jss::load_factor_server] = f.loadFactorServer;
2111 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2112
2113 if (f.em)
2114 {
2115 auto const loadFactor = std::max(
2116 safe_cast<std::uint64_t>(f.loadFactorServer),
2117 mulDiv(
2118 f.em->openLedgerFeeLevel,
2119 f.loadBaseServer,
2120 f.em->referenceFeeLevel)
2122
2123 jvObj[jss::load_factor] = trunc32(loadFactor);
2124 jvObj[jss::load_factor_fee_escalation] =
2125 f.em->openLedgerFeeLevel.jsonClipped();
2126 jvObj[jss::load_factor_fee_queue] =
2127 f.em->minProcessingFeeLevel.jsonClipped();
2128 jvObj[jss::load_factor_fee_reference] =
2129 f.em->referenceFeeLevel.jsonClipped();
2130 }
2131 else
2132 jvObj[jss::load_factor] = f.loadFactorServer;
2133
2134 mLastFeeSummary = f;
2135
2136 for (auto i = mStreamMaps[sServer].begin();
2137 i != mStreamMaps[sServer].end();)
2138 {
2139 InfoSub::pointer p = i->second.lock();
2140
2141 // VFALCO TODO research the possibility of using thread queues and
2142 // linearizing the deletion of subscribers with the
2143 // sending of JSON data.
2144 if (p)
2145 {
2146 p->send(jvObj, true);
2147 ++i;
2148 }
2149 else
2150 {
2151 i = mStreamMaps[sServer].erase(i);
2152 }
2153 }
2154 }
2155}
2156
2157void
2159{
2161
2162 auto& streamMap = mStreamMaps[sConsensusPhase];
2163 if (!streamMap.empty())
2164 {
2166 jvObj[jss::type] = "consensusPhase";
2167 jvObj[jss::consensus] = to_string(phase);
2168
2169 for (auto i = streamMap.begin(); i != streamMap.end();)
2170 {
2171 if (auto p = i->second.lock())
2172 {
2173 p->send(jvObj, true);
2174 ++i;
2175 }
2176 else
2177 {
2178 i = streamMap.erase(i);
2179 }
2180 }
2181 }
2182}
2183
2184void
2186{
2187 // VFALCO consider std::shared_mutex
2189
2190 if (!mStreamMaps[sValidations].empty())
2191 {
2193
2194 auto const signerPublic = val->getSignerPublic();
2195
2196 jvObj[jss::type] = "validationReceived";
2197 jvObj[jss::validation_public_key] =
2198 toBase58(TokenType::NodePublic, signerPublic);
2199 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2200 jvObj[jss::signature] = strHex(val->getSignature());
2201 jvObj[jss::full] = val->isFull();
2202 jvObj[jss::flags] = val->getFlags();
2203 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2204 jvObj[jss::data] = strHex(val->getSerializer().slice());
2205
2206 if (auto version = (*val)[~sfServerVersion])
2207 jvObj[jss::server_version] = std::to_string(*version);
2208
2209 if (auto cookie = (*val)[~sfCookie])
2210 jvObj[jss::cookie] = std::to_string(*cookie);
2211
2212 if (auto hash = (*val)[~sfValidatedHash])
2213 jvObj[jss::validated_hash] = strHex(*hash);
2214
2215 auto const masterKey =
2216 app_.validatorManifests().getMasterKey(signerPublic);
2217
2218 if (masterKey != signerPublic)
2219 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2220
2221 // NOTE *seq is a number, but old API versions used string. We replace
2222 // number with a string using MultiApiJson near end of this function
2223 if (auto const seq = (*val)[~sfLedgerSequence])
2224 jvObj[jss::ledger_index] = *seq;
2225
2226 if (val->isFieldPresent(sfAmendments))
2227 {
2228 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2229 for (auto const& amendment : val->getFieldV256(sfAmendments))
2230 jvObj[jss::amendments].append(to_string(amendment));
2231 }
2232
2233 if (auto const closeTime = (*val)[~sfCloseTime])
2234 jvObj[jss::close_time] = *closeTime;
2235
2236 if (auto const loadFee = (*val)[~sfLoadFee])
2237 jvObj[jss::load_fee] = *loadFee;
2238
2239 if (auto const baseFee = val->at(~sfBaseFee))
2240 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2241
2242 if (auto const reserveBase = val->at(~sfReserveBase))
2243 jvObj[jss::reserve_base] = *reserveBase;
2244
2245 if (auto const reserveInc = val->at(~sfReserveIncrement))
2246 jvObj[jss::reserve_inc] = *reserveInc;
2247
2248 // (The ~ operator converts the Proxy to a std::optional, which
2249 // simplifies later operations)
2250 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2251 baseFeeXRP && baseFeeXRP->native())
2252 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2253
2254 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2255 reserveBaseXRP && reserveBaseXRP->native())
2256 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2257
2258 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2259 reserveIncXRP && reserveIncXRP->native())
2260 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2261
2262 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2263 // for consumers supporting different API versions
2264 MultiApiJson multiObj{jvObj};
2265 multiObj.visit(
2266 RPC::apiVersion<1>, //
2267 [](Json::Value& jvTx) {
2268 // Type conversion for older API versions to string
2269 if (jvTx.isMember(jss::ledger_index))
2270 {
2271 jvTx[jss::ledger_index] =
2272 std::to_string(jvTx[jss::ledger_index].asUInt());
2273 }
2274 });
2275
2276 for (auto i = mStreamMaps[sValidations].begin();
2277 i != mStreamMaps[sValidations].end();)
2278 {
2279 if (auto p = i->second.lock())
2280 {
2281 multiObj.visit(
2282 p->getApiVersion(), //
2283 [&](Json::Value const& jv) { p->send(jv, true); });
2284 ++i;
2285 }
2286 else
2287 {
2288 i = mStreamMaps[sValidations].erase(i);
2289 }
2290 }
2291 }
2292}
2293
2294void
2296{
2298
2299 if (!mStreamMaps[sPeerStatus].empty())
2300 {
2301 Json::Value jvObj(func());
2302
2303 jvObj[jss::type] = "peerStatusChange";
2304
2305 for (auto i = mStreamMaps[sPeerStatus].begin();
2306 i != mStreamMaps[sPeerStatus].end();)
2307 {
2308 InfoSub::pointer p = i->second.lock();
2309
2310 if (p)
2311 {
2312 p->send(jvObj, true);
2313 ++i;
2314 }
2315 else
2316 {
2317 i = mStreamMaps[sPeerStatus].erase(i);
2318 }
2319 }
2320 }
2321}
2322
2323void
2325{
2326 using namespace std::chrono_literals;
2327 if (om == OperatingMode::CONNECTED)
2328 {
2331 }
2332 else if (om == OperatingMode::SYNCING)
2333 {
2336 }
2337
2338 if ((om > OperatingMode::CONNECTED) && isBlocked())
2340
2341 if (mMode == om)
2342 return;
2343
2344 mMode = om;
2345
2346 accounting_.mode(om);
2347
2348 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2349 pubServer();
2350}
2351
2352bool
2355 std::string const& source)
2356{
2357 JLOG(m_journal.trace())
2358 << "recvValidation " << val->getLedgerHash() << " from " << source;
2359
2361 BypassAccept bypassAccept = BypassAccept::no;
2362 try
2363 {
2364 if (pendingValidations_.contains(val->getLedgerHash()))
2365 bypassAccept = BypassAccept::yes;
2366 else
2367 pendingValidations_.insert(val->getLedgerHash());
2368 scope_unlock unlock(lock);
2369 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2370 }
2371 catch (std::exception const& e)
2372 {
2373 JLOG(m_journal.warn())
2374 << "Exception thrown for handling new validation "
2375 << val->getLedgerHash() << ": " << e.what();
2376 }
2377 catch (...)
2378 {
2379 JLOG(m_journal.warn())
2380 << "Unknown exception thrown for handling new validation "
2381 << val->getLedgerHash();
2382 }
2383 if (bypassAccept == BypassAccept::no)
2384 {
2385 pendingValidations_.erase(val->getLedgerHash());
2386 }
2387 lock.unlock();
2388
2389 pubValidation(val);
2390
2391 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2393 ss << "VALIDATION: " << val->render() << " master_key: ";
2394 auto master = app_.validators().getTrustedKey(val->getSignerPublic());
2395 if (master)
2396 {
2397 ss << toBase58(TokenType::NodePublic, *master);
2398 }
2399 else
2400 {
2401 ss << "none";
2402 }
2403 return ss.str();
2404 }();
2405
2406 // We will always relay trusted validations; if configured, we will
2407 // also relay all untrusted validations.
2408 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2409}
2410
2413{
2414 return mConsensus.getJson(true);
2415}
2416
2418NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2419{
2421
2422 // System-level warnings
2423 {
2424 Json::Value warnings{Json::arrayValue};
2425 if (isAmendmentBlocked())
2426 {
2427 Json::Value& w = warnings.append(Json::objectValue);
2428 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2429 w[jss::message] =
2430 "This server is amendment blocked, and must be updated to be "
2431 "able to stay in sync with the network.";
2432 }
2433 if (isUNLBlocked())
2434 {
2435 Json::Value& w = warnings.append(Json::objectValue);
2436 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2437 w[jss::message] =
2438 "This server has an expired validator list. validators.txt "
2439 "may be incorrectly configured or some [validator_list_sites] "
2440 "may be unreachable.";
2441 }
2442 if (admin && isAmendmentWarned())
2443 {
2444 Json::Value& w = warnings.append(Json::objectValue);
2445 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2446 w[jss::message] =
2447 "One or more unsupported amendments have reached majority. "
2448 "Upgrade to the latest version before they are activated "
2449 "to avoid being amendment blocked.";
2450 if (auto const expected =
2452 {
2453 auto& d = w[jss::details] = Json::objectValue;
2454 d[jss::expected_date] = expected->time_since_epoch().count();
2455 d[jss::expected_date_UTC] = to_string(*expected);
2456 }
2457 }
2458
2459 if (warnings.size())
2460 info[jss::warnings] = std::move(warnings);
2461 }
2462
2463 // hostid: unique string describing the machine
2464 if (human)
2465 info[jss::hostid] = getHostId(admin);
2466
2467 // domain: if configured with a domain, report it:
2468 if (!app_.config().SERVER_DOMAIN.empty())
2469 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2470
2471 info[jss::build_version] = BuildInfo::getVersionString();
2472
2473 info[jss::server_state] = strOperatingMode(admin);
2474
2475 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2477
2479 info[jss::network_ledger] = "waiting";
2480
2481 info[jss::validation_quorum] =
2482 static_cast<Json::UInt>(app_.validators().quorum());
2483
2484 if (admin)
2485 {
2486 switch (app_.config().NODE_SIZE)
2487 {
2488 case 0:
2489 info[jss::node_size] = "tiny";
2490 break;
2491 case 1:
2492 info[jss::node_size] = "small";
2493 break;
2494 case 2:
2495 info[jss::node_size] = "medium";
2496 break;
2497 case 3:
2498 info[jss::node_size] = "large";
2499 break;
2500 case 4:
2501 info[jss::node_size] = "huge";
2502 break;
2503 }
2504
2505 auto when = app_.validators().expires();
2506
2507 if (!human)
2508 {
2509 if (when)
2510 info[jss::validator_list_expires] =
2511 safe_cast<Json::UInt>(when->time_since_epoch().count());
2512 else
2513 info[jss::validator_list_expires] = 0;
2514 }
2515 else
2516 {
2517 auto& x = (info[jss::validator_list] = Json::objectValue);
2518
2519 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2520
2521 if (when)
2522 {
2523 if (*when == TimeKeeper::time_point::max())
2524 {
2525 x[jss::expiration] = "never";
2526 x[jss::status] = "active";
2527 }
2528 else
2529 {
2530 x[jss::expiration] = to_string(*when);
2531
2532 if (*when > app_.timeKeeper().now())
2533 x[jss::status] = "active";
2534 else
2535 x[jss::status] = "expired";
2536 }
2537 }
2538 else
2539 {
2540 x[jss::status] = "unknown";
2541 x[jss::expiration] = "unknown";
2542 }
2543 }
2544
2545#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2546 {
2547 auto& x = (info[jss::git] = Json::objectValue);
2548#ifdef GIT_COMMIT_HASH
2549 x[jss::hash] = GIT_COMMIT_HASH;
2550#endif
2551#ifdef GIT_BRANCH
2552 x[jss::branch] = GIT_BRANCH;
2553#endif
2554 }
2555#endif
2556 }
2557 info[jss::io_latency_ms] =
2558 static_cast<Json::UInt>(app_.getIOLatency().count());
2559
2560 if (admin)
2561 {
2562 if (auto const localPubKey = app_.validators().localPublicKey();
2563 localPubKey && app_.getValidationPublicKey())
2564 {
2565 info[jss::pubkey_validator] =
2566 toBase58(TokenType::NodePublic, localPubKey.value());
2567 }
2568 else
2569 {
2570 info[jss::pubkey_validator] = "none";
2571 }
2572 }
2573
2574 if (counters)
2575 {
2576 info[jss::counters] = app_.getPerfLog().countersJson();
2577
2578 Json::Value nodestore(Json::objectValue);
2579 app_.getNodeStore().getCountsJson(nodestore);
2580 info[jss::counters][jss::nodestore] = nodestore;
2581 info[jss::current_activities] = app_.getPerfLog().currentJson();
2582 }
2583
2584 info[jss::pubkey_node] =
2586
2587 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2588
2590 info[jss::amendment_blocked] = true;
2591
2592 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2593
2594 if (fp != 0)
2595 info[jss::fetch_pack] = Json::UInt(fp);
2596
2597 info[jss::peers] = Json::UInt(app_.overlay().size());
2598
2599 Json::Value lastClose = Json::objectValue;
2600 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2601
2602 if (human)
2603 {
2604 lastClose[jss::converge_time_s] =
2606 }
2607 else
2608 {
2609 lastClose[jss::converge_time] =
2611 }
2612
2613 info[jss::last_close] = lastClose;
2614
2615 // info[jss::consensus] = mConsensus.getJson();
2616
2617 if (admin)
2618 info[jss::load] = m_job_queue.getJson();
2619
2620 if (auto const netid = app_.overlay().networkID())
2621 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2622
2623 auto const escalationMetrics =
2625
2626 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2627 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2628 /* Scale the escalated fee level to unitless "load factor".
2629 In practice, this just strips the units, but it will continue
2630 to work correctly if either base value ever changes. */
2631 auto const loadFactorFeeEscalation =
2632 mulDiv(
2633 escalationMetrics.openLedgerFeeLevel,
2634 loadBaseServer,
2635 escalationMetrics.referenceFeeLevel)
2637
2638 auto const loadFactor = std::max(
2639 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2640
2641 if (!human)
2642 {
2643 info[jss::load_base] = loadBaseServer;
2644 info[jss::load_factor] = trunc32(loadFactor);
2645 info[jss::load_factor_server] = loadFactorServer;
2646
2647 /* Json::Value doesn't support uint64, so clamp to max
2648 uint32 value. This is mostly theoretical, since there
2649 probably isn't enough extant XRP to drive the factor
2650 that high.
2651 */
2652 info[jss::load_factor_fee_escalation] =
2653 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2654 info[jss::load_factor_fee_queue] =
2655 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2656 info[jss::load_factor_fee_reference] =
2657 escalationMetrics.referenceFeeLevel.jsonClipped();
2658 }
2659 else
2660 {
2661 info[jss::load_factor] =
2662 static_cast<double>(loadFactor) / loadBaseServer;
2663
2664 if (loadFactorServer != loadFactor)
2665 info[jss::load_factor_server] =
2666 static_cast<double>(loadFactorServer) / loadBaseServer;
2667
2668 if (admin)
2669 {
2671 if (fee != loadBaseServer)
2672 info[jss::load_factor_local] =
2673 static_cast<double>(fee) / loadBaseServer;
2674 fee = app_.getFeeTrack().getRemoteFee();
2675 if (fee != loadBaseServer)
2676 info[jss::load_factor_net] =
2677 static_cast<double>(fee) / loadBaseServer;
2678 fee = app_.getFeeTrack().getClusterFee();
2679 if (fee != loadBaseServer)
2680 info[jss::load_factor_cluster] =
2681 static_cast<double>(fee) / loadBaseServer;
2682 }
2683 if (escalationMetrics.openLedgerFeeLevel !=
2684 escalationMetrics.referenceFeeLevel &&
2685 (admin || loadFactorFeeEscalation != loadFactor))
2686 info[jss::load_factor_fee_escalation] =
2687 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2688 escalationMetrics.referenceFeeLevel);
2689 if (escalationMetrics.minProcessingFeeLevel !=
2690 escalationMetrics.referenceFeeLevel)
2691 info[jss::load_factor_fee_queue] =
2692 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2693 escalationMetrics.referenceFeeLevel);
2694 }
2695
2696 bool valid = false;
2697 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2698
2699 if (lpClosed)
2700 valid = true;
2701 else
2702 lpClosed = m_ledgerMaster.getClosedLedger();
2703
2704 if (lpClosed)
2705 {
2706 XRPAmount const baseFee = lpClosed->fees().base;
2708 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2709 l[jss::hash] = to_string(lpClosed->info().hash);
2710
2711 if (!human)
2712 {
2713 l[jss::base_fee] = baseFee.jsonClipped();
2714 l[jss::reserve_base] =
2715 lpClosed->fees().accountReserve(0).jsonClipped();
2716 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2717 l[jss::close_time] = Json::Value::UInt(
2718 lpClosed->info().closeTime.time_since_epoch().count());
2719 }
2720 else
2721 {
2722 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2723 l[jss::reserve_base_xrp] =
2724 lpClosed->fees().accountReserve(0).decimalXRP();
2725 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2726
2727 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2728 std::abs(closeOffset.count()) >= 60)
2729 l[jss::close_time_offset] =
2730 static_cast<std::uint32_t>(closeOffset.count());
2731
2732 constexpr std::chrono::seconds highAgeThreshold{1000000};
2734 {
2735 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2736 l[jss::age] =
2737 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2738 }
2739 else
2740 {
2741 auto lCloseTime = lpClosed->info().closeTime;
2742 auto closeTime = app_.timeKeeper().closeTime();
2743 if (lCloseTime <= closeTime)
2744 {
2745 using namespace std::chrono_literals;
2746 auto age = closeTime - lCloseTime;
2747 l[jss::age] =
2748 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2749 }
2750 }
2751 }
2752
2753 if (valid)
2754 info[jss::validated_ledger] = l;
2755 else
2756 info[jss::closed_ledger] = l;
2757
2758 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2759 if (!lpPublished)
2760 info[jss::published_ledger] = "none";
2761 else if (lpPublished->info().seq != lpClosed->info().seq)
2762 info[jss::published_ledger] = lpPublished->info().seq;
2763 }
2764
2765 accounting_.json(info);
2766 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2767 info[jss::jq_trans_overflow] =
2769 info[jss::peer_disconnects] =
2771 info[jss::peer_disconnects_resources] =
2773
2774 // This array must be sorted in increasing order.
2775 static constexpr std::array<std::string_view, 7> protocols{
2776 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2777 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2778 {
2780 for (auto const& port : app_.getServerHandler().setup().ports)
2781 {
2782 // Don't publish admin ports for non-admin users
2783 if (!admin &&
2784 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2785 port.admin_user.empty() && port.admin_password.empty()))
2786 continue;
2789 std::begin(port.protocol),
2790 std::end(port.protocol),
2791 std::begin(protocols),
2792 std::end(protocols),
2793 std::back_inserter(proto));
2794 if (!proto.empty())
2795 {
2796 auto& jv = ports.append(Json::Value(Json::objectValue));
2797 jv[jss::port] = std::to_string(port.port);
2798 jv[jss::protocol] = Json::Value{Json::arrayValue};
2799 for (auto const& p : proto)
2800 jv[jss::protocol].append(p);
2801 }
2802 }
2803
2804 if (app_.config().exists(SECTION_PORT_GRPC))
2805 {
2806 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
2807 auto const optPort = grpcSection.get("port");
2808 if (optPort && grpcSection.get("ip"))
2809 {
2810 auto& jv = ports.append(Json::Value(Json::objectValue));
2811 jv[jss::port] = *optPort;
2812 jv[jss::protocol] = Json::Value{Json::arrayValue};
2813 jv[jss::protocol].append("grpc");
2814 }
2815 }
2816 info[jss::ports] = std::move(ports);
2817 }
2818
2819 return info;
2820}
2821
2822void
2824{
2826}
2827
2830{
2831 return app_.getInboundLedgers().getInfo();
2832}
2833
2834void
2836 std::shared_ptr<ReadView const> const& ledger,
2837 std::shared_ptr<STTx const> const& transaction,
2838 TER result)
2839{
2840 MultiApiJson jvObj =
2841 transJson(transaction, result, false, ledger, std::nullopt);
2842
2843 {
2845
2846 auto it = mStreamMaps[sRTTransactions].begin();
2847 while (it != mStreamMaps[sRTTransactions].end())
2848 {
2849 InfoSub::pointer p = it->second.lock();
2850
2851 if (p)
2852 {
2853 jvObj.visit(
2854 p->getApiVersion(), //
2855 [&](Json::Value const& jv) { p->send(jv, true); });
2856 ++it;
2857 }
2858 else
2859 {
2860 it = mStreamMaps[sRTTransactions].erase(it);
2861 }
2862 }
2863 }
2864
2865 pubProposedAccountTransaction(ledger, transaction, result);
2866}
2867
2868void
2870{
2871 // Ledgers are published only when they acquire sufficient validations
2872 // Holes are filled across connection loss or other catastrophe
2873
2875 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
2876 if (!alpAccepted)
2877 {
2878 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
2879 app_.getAcceptedLedgerCache().canonicalize_replace_client(
2880 lpAccepted->info().hash, alpAccepted);
2881 }
2882
2883 XRPL_ASSERT(
2884 alpAccepted->getLedger().get() == lpAccepted.get(),
2885 "ripple::NetworkOPsImp::pubLedger : accepted input");
2886
2887 {
2888 JLOG(m_journal.debug())
2889 << "Publishing ledger " << lpAccepted->info().seq << " "
2890 << lpAccepted->info().hash;
2891
2893
2894 if (!mStreamMaps[sLedger].empty())
2895 {
2897
2898 jvObj[jss::type] = "ledgerClosed";
2899 jvObj[jss::ledger_index] = lpAccepted->info().seq;
2900 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
2901 jvObj[jss::ledger_time] = Json::Value::UInt(
2902 lpAccepted->info().closeTime.time_since_epoch().count());
2903
2904 if (!lpAccepted->rules().enabled(featureXRPFees))
2905 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
2906 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2907 jvObj[jss::reserve_base] =
2908 lpAccepted->fees().accountReserve(0).jsonClipped();
2909 jvObj[jss::reserve_inc] =
2910 lpAccepted->fees().increment.jsonClipped();
2911
2912 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
2913
2915 {
2916 jvObj[jss::validated_ledgers] =
2918 }
2919
2920 auto it = mStreamMaps[sLedger].begin();
2921 while (it != mStreamMaps[sLedger].end())
2922 {
2923 InfoSub::pointer p = it->second.lock();
2924 if (p)
2925 {
2926 p->send(jvObj, true);
2927 ++it;
2928 }
2929 else
2930 it = mStreamMaps[sLedger].erase(it);
2931 }
2932 }
2933
2934 if (!mStreamMaps[sBookChanges].empty())
2935 {
2936 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
2937
2938 auto it = mStreamMaps[sBookChanges].begin();
2939 while (it != mStreamMaps[sBookChanges].end())
2940 {
2941 InfoSub::pointer p = it->second.lock();
2942 if (p)
2943 {
2944 p->send(jvObj, true);
2945 ++it;
2946 }
2947 else
2948 it = mStreamMaps[sBookChanges].erase(it);
2949 }
2950 }
2951
2952 {
2953 static bool firstTime = true;
2954 if (firstTime)
2955 {
2956 // First validated ledger, start delayed SubAccountHistory
2957 firstTime = false;
2958 for (auto& outer : mSubAccountHistory)
2959 {
2960 for (auto& inner : outer.second)
2961 {
2962 auto& subInfo = inner.second;
2963 if (subInfo.index_->separationLedgerSeq_ == 0)
2964 {
2966 alpAccepted->getLedger(), subInfo);
2967 }
2968 }
2969 }
2970 }
2971 }
2972 }
2973
2974 // Don't lock since pubAcceptedTransaction is locking.
2975 for (auto const& accTx : *alpAccepted)
2976 {
2977 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
2979 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
2980 }
2981}
2982
2983void
2985{
2987 app_.openLedger().current()->fees().base,
2989 app_.getFeeTrack()};
2990
2991 // only schedule the job if something has changed
2992 if (f != mLastFeeSummary)
2993 {
2995 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
2996 pubServer();
2997 });
2998 }
2999}
3000
3001void
3003{
3006 "reportConsensusStateChange->pubConsensus",
3007 [this, phase]() { pubConsensus(phase); });
3008}
3009
3010inline void
3012{
3013 m_localTX->sweep(view);
3014}
3015inline std::size_t
3017{
3018 return m_localTX->size();
3019}
3020
3021// This routine should only be used to publish accepted or validated
3022// transactions.
3025 std::shared_ptr<STTx const> const& transaction,
3026 TER result,
3027 bool validated,
3028 std::shared_ptr<ReadView const> const& ledger,
3030{
3032 std::string sToken;
3033 std::string sHuman;
3034
3035 transResultInfo(result, sToken, sHuman);
3036
3037 jvObj[jss::type] = "transaction";
3038 // NOTE jvObj is not a finished object for either API version. After
3039 // it's populated, we need to finish it for a specific API version. This is
3040 // done in a loop, near the end of this function.
3041 jvObj[jss::transaction] =
3042 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3043
3044 if (meta)
3045 {
3046 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3048 jvObj[jss::meta], *ledger, transaction, meta->get());
3050 jvObj[jss::meta], transaction, meta->get());
3051 }
3052
3053 if (!ledger->open())
3054 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3055
3056 if (validated)
3057 {
3058 jvObj[jss::ledger_index] = ledger->info().seq;
3059 jvObj[jss::transaction][jss::date] =
3060 ledger->info().closeTime.time_since_epoch().count();
3061 jvObj[jss::validated] = true;
3062 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3063
3064 // WRITEME: Put the account next seq here
3065 }
3066 else
3067 {
3068 jvObj[jss::validated] = false;
3069 jvObj[jss::ledger_current_index] = ledger->info().seq;
3070 }
3071
3072 jvObj[jss::status] = validated ? "closed" : "proposed";
3073 jvObj[jss::engine_result] = sToken;
3074 jvObj[jss::engine_result_code] = result;
3075 jvObj[jss::engine_result_message] = sHuman;
3076
3077 if (transaction->getTxnType() == ttOFFER_CREATE)
3078 {
3079 auto const account = transaction->getAccountID(sfAccount);
3080 auto const amount = transaction->getFieldAmount(sfTakerGets);
3081
3082 // If the offer create is not self funded then add the owner balance
3083 if (account != amount.issue().account)
3084 {
3085 auto const ownerFunds = accountFunds(
3086 *ledger,
3087 account,
3088 amount,
3090 app_.journal("View"));
3091 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3092 }
3093 }
3094
3095 std::string const hash = to_string(transaction->getTransactionID());
3096 MultiApiJson multiObj{jvObj};
3098 multiObj.visit(), //
3099 [&]<unsigned Version>(
3101 RPC::insertDeliverMax(
3102 jvTx[jss::transaction], transaction->getTxnType(), Version);
3103
3104 if constexpr (Version > 1)
3105 {
3106 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3107 jvTx[jss::hash] = hash;
3108 }
3109 else
3110 {
3111 jvTx[jss::transaction][jss::hash] = hash;
3112 }
3113 });
3114
3115 return multiObj;
3116}
3117
3118void
3120 std::shared_ptr<ReadView const> const& ledger,
3121 const AcceptedLedgerTx& transaction,
3122 bool last)
3123{
3124 auto const& stTxn = transaction.getTxn();
3125
3126 // Create two different Json objects, for different API versions
3127 auto const metaRef = std::ref(transaction.getMeta());
3128 auto const trResult = transaction.getResult();
3129 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3130
3131 {
3133
3134 auto it = mStreamMaps[sTransactions].begin();
3135 while (it != mStreamMaps[sTransactions].end())
3136 {
3137 InfoSub::pointer p = it->second.lock();
3138
3139 if (p)
3140 {
3141 jvObj.visit(
3142 p->getApiVersion(), //
3143 [&](Json::Value const& jv) { p->send(jv, true); });
3144 ++it;
3145 }
3146 else
3147 it = mStreamMaps[sTransactions].erase(it);
3148 }
3149
3150 it = mStreamMaps[sRTTransactions].begin();
3151
3152 while (it != mStreamMaps[sRTTransactions].end())
3153 {
3154 InfoSub::pointer p = it->second.lock();
3155
3156 if (p)
3157 {
3158 jvObj.visit(
3159 p->getApiVersion(), //
3160 [&](Json::Value const& jv) { p->send(jv, true); });
3161 ++it;
3162 }
3163 else
3164 it = mStreamMaps[sRTTransactions].erase(it);
3165 }
3166 }
3167
3168 if (transaction.getResult() == tesSUCCESS)
3169 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3170
3171 pubAccountTransaction(ledger, transaction, last);
3172}
3173
3174void
3176 std::shared_ptr<ReadView const> const& ledger,
3177 AcceptedLedgerTx const& transaction,
3178 bool last)
3179{
3181 int iProposed = 0;
3182 int iAccepted = 0;
3183
3184 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3185 auto const currLedgerSeq = ledger->seq();
3186 {
3188
3189 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3191 {
3192 for (auto const& affectedAccount : transaction.getAffected())
3193 {
3194 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3195 simiIt != mSubRTAccount.end())
3196 {
3197 auto it = simiIt->second.begin();
3198
3199 while (it != simiIt->second.end())
3200 {
3201 InfoSub::pointer p = it->second.lock();
3202
3203 if (p)
3204 {
3205 notify.insert(p);
3206 ++it;
3207 ++iProposed;
3208 }
3209 else
3210 it = simiIt->second.erase(it);
3211 }
3212 }
3213
3214 if (auto simiIt = mSubAccount.find(affectedAccount);
3215 simiIt != mSubAccount.end())
3216 {
3217 auto it = simiIt->second.begin();
3218 while (it != simiIt->second.end())
3219 {
3220 InfoSub::pointer p = it->second.lock();
3221
3222 if (p)
3223 {
3224 notify.insert(p);
3225 ++it;
3226 ++iAccepted;
3227 }
3228 else
3229 it = simiIt->second.erase(it);
3230 }
3231 }
3232
3233 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3234 histoIt != mSubAccountHistory.end())
3235 {
3236 auto& subs = histoIt->second;
3237 auto it = subs.begin();
3238 while (it != subs.end())
3239 {
3240 SubAccountHistoryInfoWeak const& info = it->second;
3241 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3242 {
3243 ++it;
3244 continue;
3245 }
3246
3247 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3248 {
3249 accountHistoryNotify.emplace_back(
3250 SubAccountHistoryInfo{isSptr, info.index_});
3251 ++it;
3252 }
3253 else
3254 {
3255 it = subs.erase(it);
3256 }
3257 }
3258 if (subs.empty())
3259 mSubAccountHistory.erase(histoIt);
3260 }
3261 }
3262 }
3263 }
3264
3265 JLOG(m_journal.trace())
3266 << "pubAccountTransaction: " << "proposed=" << iProposed
3267 << ", accepted=" << iAccepted;
3268
3269 if (!notify.empty() || !accountHistoryNotify.empty())
3270 {
3271 auto const& stTxn = transaction.getTxn();
3272
3273 // Create two different Json objects, for different API versions
3274 auto const metaRef = std::ref(transaction.getMeta());
3275 auto const trResult = transaction.getResult();
3276 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3277
3278 for (InfoSub::ref isrListener : notify)
3279 {
3280 jvObj.visit(
3281 isrListener->getApiVersion(), //
3282 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3283 }
3284
3285 if (last)
3286 jvObj.set(jss::account_history_boundary, true);
3287
3288 XRPL_ASSERT(
3289 jvObj.isMember(jss::account_history_tx_stream) ==
3291 "ripple::NetworkOPsImp::pubAccountTransaction : "
3292 "account_history_tx_stream not set");
3293 for (auto& info : accountHistoryNotify)
3294 {
3295 auto& index = info.index_;
3296 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3297 jvObj.set(jss::account_history_tx_first, true);
3298
3299 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3300
3301 jvObj.visit(
3302 info.sink_->getApiVersion(), //
3303 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3304 }
3305 }
3306}
3307
3308void
3310 std::shared_ptr<ReadView const> const& ledger,
3312 TER result)
3313{
3315 int iProposed = 0;
3316
3317 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3318
3319 {
3321
3322 if (mSubRTAccount.empty())
3323 return;
3324
3325 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3327 {
3328 for (auto const& affectedAccount : tx->getMentionedAccounts())
3329 {
3330 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3331 simiIt != mSubRTAccount.end())
3332 {
3333 auto it = simiIt->second.begin();
3334
3335 while (it != simiIt->second.end())
3336 {
3337 InfoSub::pointer p = it->second.lock();
3338
3339 if (p)
3340 {
3341 notify.insert(p);
3342 ++it;
3343 ++iProposed;
3344 }
3345 else
3346 it = simiIt->second.erase(it);
3347 }
3348 }
3349 }
3350 }
3351 }
3352
3353 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3354
3355 if (!notify.empty() || !accountHistoryNotify.empty())
3356 {
3357 // Create two different Json objects, for different API versions
3358 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3359
3360 for (InfoSub::ref isrListener : notify)
3361 jvObj.visit(
3362 isrListener->getApiVersion(), //
3363 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3364
3365 XRPL_ASSERT(
3366 jvObj.isMember(jss::account_history_tx_stream) ==
3368 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3369 "account_history_tx_stream not set");
3370 for (auto& info : accountHistoryNotify)
3371 {
3372 auto& index = info.index_;
3373 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3374 jvObj.set(jss::account_history_tx_first, true);
3375 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3376 jvObj.visit(
3377 info.sink_->getApiVersion(), //
3378 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3379 }
3380 }
3381}
3382
3383//
3384// Monitoring
3385//
3386
3387void
3389 InfoSub::ref isrListener,
3390 hash_set<AccountID> const& vnaAccountIDs,
3391 bool rt)
3392{
3393 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3394
3395 for (auto const& naAccountID : vnaAccountIDs)
3396 {
3397 JLOG(m_journal.trace())
3398 << "subAccount: account: " << toBase58(naAccountID);
3399
3400 isrListener->insertSubAccountInfo(naAccountID, rt);
3401 }
3402
3404
3405 for (auto const& naAccountID : vnaAccountIDs)
3406 {
3407 auto simIterator = subMap.find(naAccountID);
3408 if (simIterator == subMap.end())
3409 {
3410 // Not found, note that account has a new single listner.
3411 SubMapType usisElement;
3412 usisElement[isrListener->getSeq()] = isrListener;
3413 // VFALCO NOTE This is making a needless copy of naAccountID
3414 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3415 }
3416 else
3417 {
3418 // Found, note that the account has another listener.
3419 simIterator->second[isrListener->getSeq()] = isrListener;
3420 }
3421 }
3422}
3423
3424void
3426 InfoSub::ref isrListener,
3427 hash_set<AccountID> const& vnaAccountIDs,
3428 bool rt)
3429{
3430 for (auto const& naAccountID : vnaAccountIDs)
3431 {
3432 // Remove from the InfoSub
3433 isrListener->deleteSubAccountInfo(naAccountID, rt);
3434 }
3435
3436 // Remove from the server
3437 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3438}
3439
3440void
3442 std::uint64_t uSeq,
3443 hash_set<AccountID> const& vnaAccountIDs,
3444 bool rt)
3445{
3447
3448 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3449
3450 for (auto const& naAccountID : vnaAccountIDs)
3451 {
3452 auto simIterator = subMap.find(naAccountID);
3453
3454 if (simIterator != subMap.end())
3455 {
3456 // Found
3457 simIterator->second.erase(uSeq);
3458
3459 if (simIterator->second.empty())
3460 {
3461 // Don't need hash entry.
3462 subMap.erase(simIterator);
3463 }
3464 }
3465 }
3466}
3467
3468void
3470{
3471 enum DatabaseType { Sqlite, None };
3472 static const auto databaseType = [&]() -> DatabaseType {
3473 // Use a dynamic_cast to return DatabaseType::None
3474 // on failure.
3475 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3476 {
3477 return DatabaseType::Sqlite;
3478 }
3479 return DatabaseType::None;
3480 }();
3481
3482 if (databaseType == DatabaseType::None)
3483 {
3484 JLOG(m_journal.error())
3485 << "AccountHistory job for account "
3486 << toBase58(subInfo.index_->accountId_) << " no database";
3487 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3488 {
3489 sptr->send(rpcError(rpcINTERNAL), true);
3490 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3491 }
3492 return;
3493 }
3494
3497 "AccountHistoryTxStream",
3498 [this, dbType = databaseType, subInfo]() {
3499 auto const& accountId = subInfo.index_->accountId_;
3500 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3501 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3502
3503 JLOG(m_journal.trace())
3504 << "AccountHistory job for account " << toBase58(accountId)
3505 << " started. lastLedgerSeq=" << lastLedgerSeq;
3506
3507 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3508 std::shared_ptr<TxMeta> const& meta) -> bool {
3509 /*
3510 * genesis account: first tx is the one with seq 1
3511 * other account: first tx is the one created the account
3512 */
3513 if (accountId == genesisAccountId)
3514 {
3515 auto stx = tx->getSTransaction();
3516 if (stx->getAccountID(sfAccount) == accountId &&
3517 stx->getSeqProxy().value() == 1)
3518 return true;
3519 }
3520
3521 for (auto& node : meta->getNodes())
3522 {
3523 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3524 continue;
3525
3526 if (node.isFieldPresent(sfNewFields))
3527 {
3528 if (auto inner = dynamic_cast<const STObject*>(
3529 node.peekAtPField(sfNewFields));
3530 inner)
3531 {
3532 if (inner->isFieldPresent(sfAccount) &&
3533 inner->getAccountID(sfAccount) == accountId)
3534 {
3535 return true;
3536 }
3537 }
3538 }
3539 }
3540
3541 return false;
3542 };
3543
3544 auto send = [&](Json::Value const& jvObj,
3545 bool unsubscribe) -> bool {
3546 if (auto sptr = subInfo.sinkWptr_.lock())
3547 {
3548 sptr->send(jvObj, true);
3549 if (unsubscribe)
3550 unsubAccountHistory(sptr, accountId, false);
3551 return true;
3552 }
3553
3554 return false;
3555 };
3556
3557 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3558 bool unsubscribe) -> bool {
3559 if (auto sptr = subInfo.sinkWptr_.lock())
3560 {
3561 jvObj.visit(
3562 sptr->getApiVersion(), //
3563 [&](Json::Value const& jv) { sptr->send(jv, true); });
3564
3565 if (unsubscribe)
3566 unsubAccountHistory(sptr, accountId, false);
3567 return true;
3568 }
3569
3570 return false;
3571 };
3572
3573 auto getMoreTxns =
3574 [&](std::uint32_t minLedger,
3575 std::uint32_t maxLedger,
3580 switch (dbType)
3581 {
3582 case Sqlite: {
3583 auto db = static_cast<SQLiteDatabase*>(
3586 accountId, minLedger, maxLedger, marker, 0, true};
3587 return db->newestAccountTxPage(options);
3588 }
3589 default: {
3590 UNREACHABLE(
3591 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3592 "getMoreTxns : invalid database type");
3593 return {};
3594 }
3595 }
3596 };
3597
3598 /*
3599 * search backward until the genesis ledger or asked to stop
3600 */
3601 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3602 {
3603 int feeChargeCount = 0;
3604 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3605 {
3606 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3607 ++feeChargeCount;
3608 }
3609 else
3610 {
3611 JLOG(m_journal.trace())
3612 << "AccountHistory job for account "
3613 << toBase58(accountId) << " no InfoSub. Fee charged "
3614 << feeChargeCount << " times.";
3615 return;
3616 }
3617
3618 // try to search in 1024 ledgers till reaching genesis ledgers
3619 auto startLedgerSeq =
3620 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3621 JLOG(m_journal.trace())
3622 << "AccountHistory job for account " << toBase58(accountId)
3623 << ", working on ledger range [" << startLedgerSeq << ","
3624 << lastLedgerSeq << "]";
3625
3626 auto haveRange = [&]() -> bool {
3627 std::uint32_t validatedMin = UINT_MAX;
3628 std::uint32_t validatedMax = 0;
3629 auto haveSomeValidatedLedgers =
3631 validatedMin, validatedMax);
3632
3633 return haveSomeValidatedLedgers &&
3634 validatedMin <= startLedgerSeq &&
3635 lastLedgerSeq <= validatedMax;
3636 }();
3637
3638 if (!haveRange)
3639 {
3640 JLOG(m_journal.debug())
3641 << "AccountHistory reschedule job for account "
3642 << toBase58(accountId) << ", incomplete ledger range ["
3643 << startLedgerSeq << "," << lastLedgerSeq << "]";
3645 return;
3646 }
3647
3649 while (!subInfo.index_->stopHistorical_)
3650 {
3651 auto dbResult =
3652 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3653 if (!dbResult)
3654 {
3655 JLOG(m_journal.debug())
3656 << "AccountHistory job for account "
3657 << toBase58(accountId) << " getMoreTxns failed.";
3658 send(rpcError(rpcINTERNAL), true);
3659 return;
3660 }
3661
3662 auto const& txns = dbResult->first;
3663 marker = dbResult->second;
3664 size_t num_txns = txns.size();
3665 for (size_t i = 0; i < num_txns; ++i)
3666 {
3667 auto const& [tx, meta] = txns[i];
3668
3669 if (!tx || !meta)
3670 {
3671 JLOG(m_journal.debug())
3672 << "AccountHistory job for account "
3673 << toBase58(accountId) << " empty tx or meta.";
3674 send(rpcError(rpcINTERNAL), true);
3675 return;
3676 }
3677 auto curTxLedger =
3679 tx->getLedger());
3680 if (!curTxLedger)
3681 {
3682 JLOG(m_journal.debug())
3683 << "AccountHistory job for account "
3684 << toBase58(accountId) << " no ledger.";
3685 send(rpcError(rpcINTERNAL), true);
3686 return;
3687 }
3689 tx->getSTransaction();
3690 if (!stTxn)
3691 {
3692 JLOG(m_journal.debug())
3693 << "AccountHistory job for account "
3694 << toBase58(accountId)
3695 << " getSTransaction failed.";
3696 send(rpcError(rpcINTERNAL), true);
3697 return;
3698 }
3699
3700 auto const mRef = std::ref(*meta);
3701 auto const trR = meta->getResultTER();
3702 MultiApiJson jvTx =
3703 transJson(stTxn, trR, true, curTxLedger, mRef);
3704
3705 jvTx.set(
3706 jss::account_history_tx_index, txHistoryIndex--);
3707 if (i + 1 == num_txns ||
3708 txns[i + 1].first->getLedger() != tx->getLedger())
3709 jvTx.set(jss::account_history_boundary, true);
3710
3711 if (isFirstTx(tx, meta))
3712 {
3713 jvTx.set(jss::account_history_tx_first, true);
3714 sendMultiApiJson(jvTx, false);
3715
3716 JLOG(m_journal.trace())
3717 << "AccountHistory job for account "
3718 << toBase58(accountId)
3719 << " done, found last tx.";
3720 return;
3721 }
3722 else
3723 {
3724 sendMultiApiJson(jvTx, false);
3725 }
3726 }
3727
3728 if (marker)
3729 {
3730 JLOG(m_journal.trace())
3731 << "AccountHistory job for account "
3732 << toBase58(accountId)
3733 << " paging, marker=" << marker->ledgerSeq << ":"
3734 << marker->txnSeq;
3735 }
3736 else
3737 {
3738 break;
3739 }
3740 }
3741
3742 if (!subInfo.index_->stopHistorical_)
3743 {
3744 lastLedgerSeq = startLedgerSeq - 1;
3745 if (lastLedgerSeq <= 1)
3746 {
3747 JLOG(m_journal.trace())
3748 << "AccountHistory job for account "
3749 << toBase58(accountId)
3750 << " done, reached genesis ledger.";
3751 return;
3752 }
3753 }
3754 }
3755 });
3756}
3757
3758void
3760 std::shared_ptr<ReadView const> const& ledger,
3762{
3763 subInfo.index_->separationLedgerSeq_ = ledger->seq();
3764 auto const& accountId = subInfo.index_->accountId_;
3765 auto const accountKeylet = keylet::account(accountId);
3766 if (!ledger->exists(accountKeylet))
3767 {
3768 JLOG(m_journal.debug())
3769 << "subAccountHistoryStart, no account " << toBase58(accountId)
3770 << ", no need to add AccountHistory job.";
3771 return;
3772 }
3773 if (accountId == genesisAccountId)
3774 {
3775 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3776 {
3777 if (sleAcct->getFieldU32(sfSequence) == 1)
3778 {
3779 JLOG(m_journal.debug())
3780 << "subAccountHistoryStart, genesis account "
3781 << toBase58(accountId)
3782 << " does not have tx, no need to add AccountHistory job.";
3783 return;
3784 }
3785 }
3786 else
3787 {
3788 UNREACHABLE(
3789 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
3790 "access genesis account");
3791 return;
3792 }
3793 }
3794 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
3795 subInfo.index_->haveHistorical_ = true;
3796
3797 JLOG(m_journal.debug())
3798 << "subAccountHistoryStart, add AccountHistory job: accountId="
3799 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
3800
3801 addAccountHistoryJob(subInfo);
3802}
3803
3806 InfoSub::ref isrListener,
3807 AccountID const& accountId)
3808{
3809 if (!isrListener->insertSubAccountHistory(accountId))
3810 {
3811 JLOG(m_journal.debug())
3812 << "subAccountHistory, already subscribed to account "
3813 << toBase58(accountId);
3814 return rpcINVALID_PARAMS;
3815 }
3816
3819 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
3820 auto simIterator = mSubAccountHistory.find(accountId);
3821 if (simIterator == mSubAccountHistory.end())
3822 {
3824 inner.emplace(isrListener->getSeq(), ahi);
3826 simIterator, std::make_pair(accountId, inner));
3827 }
3828 else
3829 {
3830 simIterator->second.emplace(isrListener->getSeq(), ahi);
3831 }
3832
3833 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
3834 if (ledger)
3835 {
3836 subAccountHistoryStart(ledger, ahi);
3837 }
3838 else
3839 {
3840 // The node does not have validated ledgers, so wait for
3841 // one before start streaming.
3842 // In this case, the subscription is also considered successful.
3843 JLOG(m_journal.debug())
3844 << "subAccountHistory, no validated ledger yet, delay start";
3845 }
3846
3847 return rpcSUCCESS;
3848}
3849
3850void
3852 InfoSub::ref isrListener,
3853 AccountID const& account,
3854 bool historyOnly)
3855{
3856 if (!historyOnly)
3857 isrListener->deleteSubAccountHistory(account);
3858 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
3859}
3860
3861void
3863 std::uint64_t seq,
3864 const AccountID& account,
3865 bool historyOnly)
3866{
3868 auto simIterator = mSubAccountHistory.find(account);
3869 if (simIterator != mSubAccountHistory.end())
3870 {
3871 auto& subInfoMap = simIterator->second;
3872 auto subInfoIter = subInfoMap.find(seq);
3873 if (subInfoIter != subInfoMap.end())
3874 {
3875 subInfoIter->second.index_->stopHistorical_ = true;
3876 }
3877
3878 if (!historyOnly)
3879 {
3880 simIterator->second.erase(seq);
3881 if (simIterator->second.empty())
3882 {
3883 mSubAccountHistory.erase(simIterator);
3884 }
3885 }
3886 JLOG(m_journal.debug())
3887 << "unsubAccountHistory, account " << toBase58(account)
3888 << ", historyOnly = " << (historyOnly ? "true" : "false");
3889 }
3890}
3891
3892bool
3894{
3895 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
3896 listeners->addSubscriber(isrListener);
3897 else
3898 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
3899 return true;
3900}
3901
3902bool
3904{
3905 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
3906 listeners->removeSubscriber(uSeq);
3907
3908 return true;
3909}
3910
3914{
3915 // This code-path is exclusively used when the server is in standalone
3916 // mode via `ledger_accept`
3917 XRPL_ASSERT(
3918 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
3919
3920 if (!m_standalone)
3921 Throw<std::runtime_error>(
3922 "Operation only possible in STANDALONE mode.");
3923
3924 // FIXME Could we improve on this and remove the need for a specialized
3925 // API in Consensus?
3926 beginConsensus(m_ledgerMaster.getClosedLedger()->info().hash, {});
3927 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
3928 return m_ledgerMaster.getCurrentLedger()->info().seq;
3929}
3930
3931// <-- bool: true=added, false=already there
3932bool
3934{
3935 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
3936 {
3937 jvResult[jss::ledger_index] = lpClosed->info().seq;
3938 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
3939 jvResult[jss::ledger_time] = Json::Value::UInt(
3940 lpClosed->info().closeTime.time_since_epoch().count());
3941 if (!lpClosed->rules().enabled(featureXRPFees))
3942 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3943 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3944 jvResult[jss::reserve_base] =
3945 lpClosed->fees().accountReserve(0).jsonClipped();
3946 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3947 }
3948
3950 {
3951 jvResult[jss::validated_ledgers] =
3953 }
3954
3956 return mStreamMaps[sLedger]
3957 .emplace(isrListener->getSeq(), isrListener)
3958 .second;
3959}
3960
3961// <-- bool: true=added, false=already there
3962bool
3964{
3967 .emplace(isrListener->getSeq(), isrListener)
3968 .second;
3969}
3970
3971// <-- bool: true=erased, false=was not there
3972bool
3974{
3976 return mStreamMaps[sLedger].erase(uSeq);
3977}
3978
3979// <-- bool: true=erased, false=was not there
3980bool
3982{
3984 return mStreamMaps[sBookChanges].erase(uSeq);
3985}
3986
3987// <-- bool: true=added, false=already there
3988bool
3990{
3992 return mStreamMaps[sManifests]
3993 .emplace(isrListener->getSeq(), isrListener)
3994 .second;
3995}
3996
3997// <-- bool: true=erased, false=was not there
3998bool
4000{
4002 return mStreamMaps[sManifests].erase(uSeq);
4003}
4004
4005// <-- bool: true=added, false=already there
4006bool
4008 InfoSub::ref isrListener,
4009 Json::Value& jvResult,
4010 bool admin)
4011{
4012 uint256 uRandom;
4013
4014 if (m_standalone)
4015 jvResult[jss::stand_alone] = m_standalone;
4016
4017 // CHECKME: is it necessary to provide a random number here?
4018 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
4019
4020 auto const& feeTrack = app_.getFeeTrack();
4021 jvResult[jss::random] = to_string(uRandom);
4022 jvResult[jss::server_status] = strOperatingMode(admin);
4023 jvResult[jss::load_base] = feeTrack.getLoadBase();
4024 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
4025 jvResult[jss::hostid] = getHostId(admin);
4026 jvResult[jss::pubkey_node] =
4028
4030 return mStreamMaps[sServer]
4031 .emplace(isrListener->getSeq(), isrListener)
4032 .second;
4033}
4034
4035// <-- bool: true=erased, false=was not there
4036bool
4038{
4040 return mStreamMaps[sServer].erase(uSeq);
4041}
4042
4043// <-- bool: true=added, false=already there
4044bool
4046{
4049 .emplace(isrListener->getSeq(), isrListener)
4050 .second;
4051}
4052
4053// <-- bool: true=erased, false=was not there
4054bool
4056{
4058 return mStreamMaps[sTransactions].erase(uSeq);
4059}
4060
4061// <-- bool: true=added, false=already there
4062bool
4064{
4067 .emplace(isrListener->getSeq(), isrListener)
4068 .second;
4069}
4070
4071// <-- bool: true=erased, false=was not there
4072bool
4074{
4076 return mStreamMaps[sRTTransactions].erase(uSeq);
4077}
4078
4079// <-- bool: true=added, false=already there
4080bool
4082{
4085 .emplace(isrListener->getSeq(), isrListener)
4086 .second;
4087}
4088
4089void
4091{
4092 accounting_.json(obj);
4093}
4094
4095// <-- bool: true=erased, false=was not there
4096bool
4098{
4100 return mStreamMaps[sValidations].erase(uSeq);
4101}
4102
4103// <-- bool: true=added, false=already there
4104bool
4106{
4108 return mStreamMaps[sPeerStatus]
4109 .emplace(isrListener->getSeq(), isrListener)
4110 .second;
4111}
4112
4113// <-- bool: true=erased, false=was not there
4114bool
4116{
4118 return mStreamMaps[sPeerStatus].erase(uSeq);
4119}
4120
4121// <-- bool: true=added, false=already there
4122bool
4124{
4127 .emplace(isrListener->getSeq(), isrListener)
4128 .second;
4129}
4130
4131// <-- bool: true=erased, false=was not there
4132bool
4134{
4136 return mStreamMaps[sConsensusPhase].erase(uSeq);
4137}
4138
4141{
4143
4144 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4145
4146 if (it != mRpcSubMap.end())
4147 return it->second;
4148
4149 return InfoSub::pointer();
4150}
4151
4154{
4156
4157 mRpcSubMap.emplace(strUrl, rspEntry);
4158
4159 return rspEntry;
4160}
4161
4162bool
4164{
4166 auto pInfo = findRpcSub(strUrl);
4167
4168 if (!pInfo)
4169 return false;
4170
4171 // check to see if any of the stream maps still hold a weak reference to
4172 // this entry before removing
4173 for (SubMapType const& map : mStreamMaps)
4174 {
4175 if (map.find(pInfo->getSeq()) != map.end())
4176 return false;
4177 }
4178 mRpcSubMap.erase(strUrl);
4179 return true;
4180}
4181
4182#ifndef USE_NEW_BOOK_PAGE
4183
4184// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4185// work, but it demonstrated poor performance.
4186//
4187void
4190 Book const& book,
4191 AccountID const& uTakerID,
4192 bool const bProof,
4193 unsigned int iLimit,
4194 Json::Value const& jvMarker,
4195 Json::Value& jvResult)
4196{ // CAUTION: This is the old get book page logic
4197 Json::Value& jvOffers =
4198 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4199
4201 const uint256 uBookBase = getBookBase(book);
4202 const uint256 uBookEnd = getQualityNext(uBookBase);
4203 uint256 uTipIndex = uBookBase;
4204
4205 if (auto stream = m_journal.trace())
4206 {
4207 stream << "getBookPage:" << book;
4208 stream << "getBookPage: uBookBase=" << uBookBase;
4209 stream << "getBookPage: uBookEnd=" << uBookEnd;
4210 stream << "getBookPage: uTipIndex=" << uTipIndex;
4211 }
4212
4213 ReadView const& view = *lpLedger;
4214
4215 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4216 isGlobalFrozen(view, book.in.account);
4217
4218 bool bDone = false;
4219 bool bDirectAdvance = true;
4220
4221 std::shared_ptr<SLE const> sleOfferDir;
4222 uint256 offerIndex;
4223 unsigned int uBookEntry;
4224 STAmount saDirRate;
4225
4226 auto const rate = transferRate(view, book.out.account);
4227 auto viewJ = app_.journal("View");
4228
4229 while (!bDone && iLimit-- > 0)
4230 {
4231 if (bDirectAdvance)
4232 {
4233 bDirectAdvance = false;
4234
4235 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4236
4237 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4238 if (ledgerIndex)
4239 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4240 else
4241 sleOfferDir.reset();
4242
4243 if (!sleOfferDir)
4244 {
4245 JLOG(m_journal.trace()) << "getBookPage: bDone";
4246 bDone = true;
4247 }
4248 else
4249 {
4250 uTipIndex = sleOfferDir->key();
4251 saDirRate = amountFromQuality(getQuality(uTipIndex));
4252
4253 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4254
4255 JLOG(m_journal.trace())
4256 << "getBookPage: uTipIndex=" << uTipIndex;
4257 JLOG(m_journal.trace())
4258 << "getBookPage: offerIndex=" << offerIndex;
4259 }
4260 }
4261
4262 if (!bDone)
4263 {
4264 auto sleOffer = view.read(keylet::offer(offerIndex));
4265
4266 if (sleOffer)
4267 {
4268 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4269 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4270 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4271 STAmount saOwnerFunds;
4272 bool firstOwnerOffer(true);
4273
4274 if (book.out.account == uOfferOwnerID)
4275 {
4276 // If an offer is selling issuer's own IOUs, it is fully
4277 // funded.
4278 saOwnerFunds = saTakerGets;
4279 }
4280 else if (bGlobalFreeze)
4281 {
4282 // If either asset is globally frozen, consider all offers
4283 // that aren't ours to be totally unfunded
4284 saOwnerFunds.clear(book.out);
4285 }
4286 else
4287 {
4288 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4289 if (umBalanceEntry != umBalance.end())
4290 {
4291 // Found in running balance table.
4292
4293 saOwnerFunds = umBalanceEntry->second;
4294 firstOwnerOffer = false;
4295 }
4296 else
4297 {
4298 // Did not find balance in table.
4299
4300 saOwnerFunds = accountHolds(
4301 view,
4302 uOfferOwnerID,
4303 book.out.currency,
4304 book.out.account,
4306 viewJ);
4307
4308 if (saOwnerFunds < beast::zero)
4309 {
4310 // Treat negative funds as zero.
4311
4312 saOwnerFunds.clear();
4313 }
4314 }
4315 }
4316
4317 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4318
4319 STAmount saTakerGetsFunded;
4320 STAmount saOwnerFundsLimit = saOwnerFunds;
4321 Rate offerRate = parityRate;
4322
4323 if (rate != parityRate
4324 // Have a tranfer fee.
4325 && uTakerID != book.out.account
4326 // Not taking offers of own IOUs.
4327 && book.out.account != uOfferOwnerID)
4328 // Offer owner not issuing ownfunds
4329 {
4330 // Need to charge a transfer fee to offer owner.
4331 offerRate = rate;
4332 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4333 }
4334
4335 if (saOwnerFundsLimit >= saTakerGets)
4336 {
4337 // Sufficient funds no shenanigans.
4338 saTakerGetsFunded = saTakerGets;
4339 }
4340 else
4341 {
4342 // Only provide, if not fully funded.
4343
4344 saTakerGetsFunded = saOwnerFundsLimit;
4345
4346 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4347 std::min(
4348 saTakerPays,
4349 multiply(
4350 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4351 .setJson(jvOffer[jss::taker_pays_funded]);
4352 }
4353
4354 STAmount saOwnerPays = (parityRate == offerRate)
4355 ? saTakerGetsFunded
4356 : std::min(
4357 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4358
4359 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4360
4361 // Include all offers funded and unfunded
4362 Json::Value& jvOf = jvOffers.append(jvOffer);
4363 jvOf[jss::quality] = saDirRate.getText();
4364
4365 if (firstOwnerOffer)
4366 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4367 }
4368 else
4369 {
4370 JLOG(m_journal.warn()) << "Missing offer";
4371 }
4372
4373 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4374 {
4375 bDirectAdvance = true;
4376 }
4377 else
4378 {
4379 JLOG(m_journal.trace())
4380 << "getBookPage: offerIndex=" << offerIndex;
4381 }
4382 }
4383 }
4384
4385 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4386 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4387}
4388
4389#else
4390
4391// This is the new code that uses the book iterators
4392// It has temporarily been disabled
4393
4394void
4397 Book const& book,
4398 AccountID const& uTakerID,
4399 bool const bProof,
4400 unsigned int iLimit,
4401 Json::Value const& jvMarker,
4402 Json::Value& jvResult)
4403{
4404 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4405
4407
4408 MetaView lesActive(lpLedger, tapNONE, true);
4409 OrderBookIterator obIterator(lesActive, book);
4410
4411 auto const rate = transferRate(lesActive, book.out.account);
4412
4413 const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4414 lesActive.isGlobalFrozen(book.in.account);
4415
4416 while (iLimit-- > 0 && obIterator.nextOffer())
4417 {
4418 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4419 if (sleOffer)
4420 {
4421 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4422 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4423 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4424 STAmount saDirRate = obIterator.getCurrentRate();
4425 STAmount saOwnerFunds;
4426
4427 if (book.out.account == uOfferOwnerID)
4428 {
4429 // If offer is selling issuer's own IOUs, it is fully funded.
4430 saOwnerFunds = saTakerGets;
4431 }
4432 else if (bGlobalFreeze)
4433 {
4434 // If either asset is globally frozen, consider all offers
4435 // that aren't ours to be totally unfunded
4436 saOwnerFunds.clear(book.out);
4437 }
4438 else
4439 {
4440 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4441
4442 if (umBalanceEntry != umBalance.end())
4443 {
4444 // Found in running balance table.
4445
4446 saOwnerFunds = umBalanceEntry->second;
4447 }
4448 else
4449 {
4450 // Did not find balance in table.
4451
4452 saOwnerFunds = lesActive.accountHolds(
4453 uOfferOwnerID,
4454 book.out.currency,
4455 book.out.account,
4457
4458 if (saOwnerFunds.isNegative())
4459 {
4460 // Treat negative funds as zero.
4461
4462 saOwnerFunds.zero();
4463 }
4464 }
4465 }
4466
4467 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4468
4469 STAmount saTakerGetsFunded;
4470 STAmount saOwnerFundsLimit = saOwnerFunds;
4471 Rate offerRate = parityRate;
4472
4473 if (rate != parityRate
4474 // Have a tranfer fee.
4475 && uTakerID != book.out.account
4476 // Not taking offers of own IOUs.
4477 && book.out.account != uOfferOwnerID)
4478 // Offer owner not issuing ownfunds
4479 {
4480 // Need to charge a transfer fee to offer owner.
4481 offerRate = rate;
4482 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4483 }
4484
4485 if (saOwnerFundsLimit >= saTakerGets)
4486 {
4487 // Sufficient funds no shenanigans.
4488 saTakerGetsFunded = saTakerGets;
4489 }
4490 else
4491 {
4492 // Only provide, if not fully funded.
4493 saTakerGetsFunded = saOwnerFundsLimit;
4494
4495 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4496
4497 // TOOD(tom): The result of this expression is not used - what's
4498 // going on here?
4499 std::min(
4500 saTakerPays,
4501 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4502 .setJson(jvOffer[jss::taker_pays_funded]);
4503 }
4504
4505 STAmount saOwnerPays = (parityRate == offerRate)
4506 ? saTakerGetsFunded
4507 : std::min(
4508 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4509
4510 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4511
4512 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4513 {
4514 // Only provide funded offers and offers of the taker.
4515 Json::Value& jvOf = jvOffers.append(jvOffer);
4516 jvOf[jss::quality] = saDirRate.getText();
4517 }
4518 }
4519 }
4520
4521 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4522 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4523}
4524
4525#endif
4526
4527inline void
4529{
4530 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4531 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4533 counters[static_cast<std::size_t>(mode)].dur += current;
4534
4537 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4538 .dur.count());
4540 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4541 .dur.count());
4543 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4545 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4546 .dur.count());
4548 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4549
4551 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4552 .transitions);
4554 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4555 .transitions);
4557 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4559 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4560 .transitions);
4562 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4563}
4564
4565void
4567{
4568 auto now = std::chrono::steady_clock::now();
4569
4570 std::lock_guard lock(mutex_);
4571 ++counters_[static_cast<std::size_t>(om)].transitions;
4572 if (om == OperatingMode::FULL &&
4573 counters_[static_cast<std::size_t>(om)].transitions == 1)
4574 {
4575 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4576 now - processStart_)
4577 .count();
4578 }
4579 counters_[static_cast<std::size_t>(mode_)].dur +=
4580 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4581
4582 mode_ = om;
4583 start_ = now;
4584}
4585
4586void
4588{
4589 auto [counters, mode, start, initialSync] = getCounterData();
4590 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4592 counters[static_cast<std::size_t>(mode)].dur += current;
4593
4594 obj[jss::state_accounting] = Json::objectValue;
4596 i <= static_cast<std::size_t>(OperatingMode::FULL);
4597 ++i)
4598 {
4599 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4600 auto& state = obj[jss::state_accounting][states_[i]];
4601 state[jss::transitions] = std::to_string(counters[i].transitions);
4602 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4603 }
4604 obj[jss::server_state_duration_us] = std::to_string(current.count());
4605 if (initialSync)
4606 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4607}
4608
4609//------------------------------------------------------------------------------
4610
4613 Application& app,
4615 bool standalone,
4616 std::size_t minPeerCount,
4617 bool startvalid,
4618 JobQueue& job_queue,
4620 ValidatorKeys const& validatorKeys,
4621 boost::asio::io_service& io_svc,
4622 beast::Journal journal,
4623 beast::insight::Collector::ptr const& collector)
4624{
4625 return std::make_unique<NetworkOPsImp>(
4626 app,
4627 clock,
4628 standalone,
4629 minPeerCount,
4630 startvalid,
4631 job_queue,
4633 validatorKeys,
4634 io_svc,
4635 journal,
4636 collector);
4637}
4638
4639} // namespace ripple
T back_inserter(T... args)
T begin(T... args)
Decorator for streaming out compact json.
Definition: json_writer.h:318
Lightweight wrapper to tag static string.
Definition: json_value.h:62
Represents a JSON value.
Definition: json_value.h:148
Json::UInt UInt
Definition: json_value.h:155
Value get(UInt index, const Value &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Definition: json_value.cpp:847
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:897
bool isMember(const char *key) const
Return true if the object has a member named key.
Definition: json_value.cpp:949
A generic endpoint for log messages.
Definition: Journal.h:60
Stream error() const
Definition: Journal.h:346
Stream debug() const
Definition: Journal.h:328
Stream info() const
Definition: Journal.h:334
Stream trace() const
Severity stream access functions.
Definition: Journal.h:322
Stream warn() const
Definition: Journal.h:340
A metric for measuring an integral value.
Definition: Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition: Gauge.h:68
A reference to a handler for performing polled collection.
Definition: Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition: Book.h:35
Issue in
Definition: Book.h:37
Issue out
Definition: Book.h:38
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition: ClusterNode.h:46
std::uint32_t getLoadFee() const
Definition: ClusterNode.h:52
NetClock::time_point getReportTime() const
Definition: ClusterNode.h:58
PublicKey const & identity() const
Definition: ClusterNode.h:64
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:49
std::string SERVER_DOMAIN
Definition: Config.h:279
std::size_t NODE_SIZE
Definition: Config.h:213
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition: Config.h:160
int RELAY_UNTRUSTED_VALIDATIONS
Definition: Config.h:169
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition: InfoSub.h:54
AccountID account
Definition: Issue.h:39
Currency currency
Definition: Issue.h:38
A pool of threads to perform work.
Definition: JobQueue.h:56
Json::Value getJson(int c=0)
Definition: JobQueue.cpp:214
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
Definition: LedgerMaster.h:265
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:79
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
Definition: LoadFeeTrack.h:45
std::uint32_t getClusterFee() const
Definition: LoadFeeTrack.h:82
std::uint32_t getLocalFee() const
Definition: LoadFeeTrack.h:75
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:89
std::uint32_t getRemoteFee() const
Definition: LoadFeeTrack.h:68
std::uint32_t getLoadFactor() const
Definition: LoadFeeTrack.h:95
Manages load sources.
Definition: LoadManager.h:46
void heartbeat()
Reset the stall detection timer.
Definition: LoadManager.cpp:64
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
Definition: NetworkOPs.cpp:139
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
Definition: NetworkOPs.cpp:149
std::chrono::steady_clock::time_point start_
Definition: NetworkOPs.cpp:151
static std::array< Json::StaticString const, 5 > const states_
Definition: NetworkOPs.cpp:155
std::chrono::steady_clock::time_point const processStart_
Definition: NetworkOPs.cpp:153
Transaction with input flags and results to be applied in batches.
Definition: NetworkOPs.cpp:90
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
Definition: NetworkOPs.cpp:99
std::shared_ptr< Transaction > const transaction
Definition: NetworkOPs.cpp:92
boost::asio::steady_timer accountHistoryTxTimer_
Definition: NetworkOPs.cpp:731
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
Definition: NetworkOPs.cpp:863
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::vector< TransactionStatus > mTransactions
Definition: NetworkOPs.cpp:775
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Definition: NetworkOPs.cpp:721
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
Definition: NetworkOPs.cpp:733
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
Definition: NetworkOPs.cpp:881
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
Definition: NetworkOPs.cpp:729
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
Definition: NetworkOPs.cpp:116
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
Definition: NetworkOPs.cpp:717
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
Definition: NetworkOPs.cpp:260
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Definition: NetworkOPs.cpp:744
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, const bool bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
Definition: NetworkOPs.cpp:730
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
Definition: NetworkOPs.cpp:122
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:222
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
Definition: NetworkOPs.cpp:724
beast::Journal m_journal
Definition: NetworkOPs.cpp:715
SubInfoMapType mSubAccount
Definition: NetworkOPs.cpp:739
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
Definition: NetworkOPs.cpp:779
bool beginConsensus(uint256 const &networkClosed, std::unique_ptr< std::stringstream > const &clog) override
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
Definition: NetworkOPs.cpp:996
bool unsubValidations(std::uint64_t uListener) override
void endConsensus(std::unique_ptr< std::stringstream > const &clog) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
Definition: NetworkOPs.cpp:728
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
Definition: NetworkOPs.cpp:934
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
Definition: NetworkOPs.cpp:759
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
Definition: NetworkOPs.cpp:769
std::atomic< bool > unlBlocked_
Definition: NetworkOPs.cpp:726
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
Definition: NetworkOPs.cpp:719
std::atomic< bool > needNetworkLedger_
Definition: NetworkOPs.cpp:723
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
Definition: NetworkOPs.cpp:777
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
Definition: NetworkOPs.cpp:893
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
Definition: NetworkOPs.cpp:737
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
Definition: NetworkOPs.cpp:924
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
Definition: NetworkOPs.cpp:761
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
Definition: NetworkOPs.cpp:875
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
Definition: NetworkOPs.cpp:772
void setMode(OperatingMode om) override
void stop() override
Definition: NetworkOPs.cpp:564
void clearNeedNetworkLedger() override
Definition: NetworkOPs.cpp:887
DispatchState mDispatchState
Definition: NetworkOPs.cpp:774
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
Definition: NetworkOPs.cpp:740
bool checkLastClosedLedger(const Overlay::PeerSequence &, uint256 &networkClosed)
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
Definition: NetworkOPs.cpp:899
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
Definition: NetworkOPs.cpp:780
Application & app_
Definition: NetworkOPs.cpp:714
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
Definition: NetworkOPs.cpp:735
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
Definition: NetworkOPs.cpp:742
std::atomic< bool > amendmentWarned_
Definition: NetworkOPs.cpp:725
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
Definition: NetworkOPs.cpp:905
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition: NetworkOPs.h:88
void getCountsJson(Json::Value &obj)
Definition: Database.cpp:268
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:49
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:57
BookListeners::pointer getBookListeners(Book const &)
BookListeners::pointer makeBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, const AcceptedLedgerTx &alTx, MultiApiJson const &jvObj)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
Definition: RCLConsensus.h:53
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
Definition: RCLConsensus.h:447
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Definition: RCLConsensus.h:460
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
Represents a set of transactions in RCLConsensus.
Definition: RCLCxTx.h:63
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition: RFC1751.cpp:507
Collects logging information.
Definition: RCLConsensus.h:551
std::unique_ptr< std::stringstream > const & ss()
Definition: RCLConsensus.h:565
A view into a ledger.
Definition: ReadView.h:52
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition: STAmount.cpp:507
std::string getText() const override
Definition: STAmount.cpp:547
Issue const & issue() const
Definition: STAmount.h:487
std::optional< T > get(std::string const &name) const
Definition: BasicConfig.h:140
std::size_t size() const noexcept
Definition: Serializer.h:73
void const * data() const noexcept
Definition: Serializer.h:79
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition: TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition: TxQ.cpp:1778
static time_point now()
Definition: UptimeClock.cpp:67
Validator keys and manifest as set in configuration file.
Definition: ValidatorKeys.h:38
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition: XRPAmount.h:262
Json::Value jsonClipped() const
Definition: XRPAmount.h:218
iterator begin()
Definition: base_uint.h:136
static constexpr std::size_t size()
Definition: base_uint.h:526
bool isZero() const
Definition: base_uint.h:540
bool isNonZero() const
Definition: base_uint.h:545
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition: scope.h:231
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition: json_value.h:43
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:44
int Int
Definition: json_forwards.h:26
unsigned int UInt
Definition: json_forwards.h:27
void rngfill(void *buffer, std::size_t bytes, Generator &g)
Definition: rngfill.h:34
std::string const & getVersionString()
Server version.
Definition: BuildInfo.cpp:68
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition: BookChanges.h:47
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(PreclaimContext const &ctx, AccountID const &src)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition: Indexes.cpp:175
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition: Indexes.cpp:371
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition: Indexes.cpp:265
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:32
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:114
STAmount divide(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:93
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition: STTx.cpp:631
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition: View.cpp:443
@ fhZERO_IF_FROZEN
Definition: View.h:76
@ fhIGNORE_FREEZE
Definition: View.h:76
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition: View.cpp:137
std::uint64_t getQuality(uint256 const &uBase)
Definition: Indexes.cpp:140
@ INCLUDED
Definition: Transaction.h:49
@ OBSOLETE
Definition: Transaction.h:54
@ INVALID
Definition: Transaction.h:48
error_code_i
Definition: ErrorCodes.h:40
@ rpcSUCCESS
Definition: ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition: ErrorCodes.h:84
@ rpcINTERNAL
Definition: ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
Definition: SecretKey.cpp:369
auto constexpr muldiv_max
Definition: mulDiv.h:29
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition: LocalTxs.cpp:197
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition: View.cpp:650
STAmount amountFromQuality(std::uint64_t rate)
Definition: STAmount.cpp:854
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition: ErrorCodes.h:167
@ warnRPC_UNSUPPORTED_MAJORITY
Definition: ErrorCodes.h:165
@ warnRPC_AMENDMENT_BLOCKED
Definition: ErrorCodes.h:166
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition: NetworkOPs.h:67
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:53
bool isTesSuccess(TER x)
Definition: TER.h:656
bool isTerRetry(TER x)
Definition: TER.h:650
AccountID calcAccountID(PublicKey const &pk)
Definition: AccountID.cpp:168
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Definition: csprng.cpp:103
Json::Value rpcError(int iError)
Definition: RPCErr.cpp:31
@ tefPAST_SEQ
Definition: TER.h:175
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
Definition: NetworkOPs.cpp:844
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
bool isTemMalformed(TER x)
Definition: TER.h:638
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition: View.cpp:148
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition: ApiVersion.h:102
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:75
@ tesSUCCESS
Definition: TER.h:242
uint256 getQualityNext(uint256 const &uBase)
Definition: Indexes.cpp:132
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition: View.cpp:309
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition: ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition: chrono.h:92
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:630
FeeSetup setup_FeeVote(Section const &section)
Definition: Config.cpp:1092
Number root(Number f, unsigned d)
Definition: Number.cpp:635
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Definition: mulDiv.cpp:32
ApplyFlags
Definition: ApplyView.h:31
@ tapFAIL_HARD
Definition: ApplyView.h:36
@ tapUNLIMITED
Definition: ApplyView.h:43
@ tapNONE
Definition: ApplyView.h:32
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:38
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition: Seed.cpp:76
@ terQUEUED
Definition: TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition: TER.cpp:242
@ jtNETOP_CLUSTER
Definition: Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition: Job.h:47
@ jtTRANSACTION
Definition: Job.h:62
@ jtTXN_PROC
Definition: Job.h:82
@ jtCLIENT_CONSENSUS
Definition: Job.h:48
@ jtBATCH
Definition: Job.h:65
@ jtCLIENT_ACCT_HIST
Definition: Job.h:49
uint256 getBookBase(Book const &book)
Definition: Indexes.cpp:113
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition: View.cpp:174
static std::uint32_t trunc32(std::uint64_t v)
@ temBAD_SIGNATURE
Definition: TER.h:105
static auto const genesisAccountId
Definition: NetworkOPs.cpp:857
STL namespace.
T ref(T... args)
T reset(T... args)
T set_intersection(T... args)
T str(T... args)
std::string serialized
The manifest in serialized form.
Definition: Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition: Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition: Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition: Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Definition: Manifest.h:86
Server fees published on server subscription.
Definition: NetworkOPs.cpp:199
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
Definition: NetworkOPs.cpp:218
bool operator==(ServerFeeSummary const &b) const
Definition: NetworkOPs.cpp:210
beast::insight::Gauge full_transitions
Definition: NetworkOPs.cpp:831
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:786
beast::insight::Hook hook
Definition: NetworkOPs.cpp:820
beast::insight::Gauge connected_duration
Definition: NetworkOPs.cpp:822
beast::insight::Gauge tracking_duration
Definition: NetworkOPs.cpp:824
beast::insight::Gauge connected_transitions
Definition: NetworkOPs.cpp:828
beast::insight::Gauge disconnected_transitions
Definition: NetworkOPs.cpp:827
beast::insight::Gauge syncing_duration
Definition: NetworkOPs.cpp:823
beast::insight::Gauge tracking_transitions
Definition: NetworkOPs.cpp:830
beast::insight::Gauge full_duration
Definition: NetworkOPs.cpp:825
beast::insight::Gauge disconnected_duration
Definition: NetworkOPs.cpp:821
beast::insight::Gauge syncing_transitions
Definition: NetworkOPs.cpp:829
SubAccountHistoryIndex(AccountID const &accountId)
Definition: NetworkOPs.cpp:678
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:697
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:692
Represents a transfer rate.
Definition: Rate.h:40
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition: TxQ.h:165
void set(const char *key, auto const &v)
Definition: MultiApiJson.h:83
IsMemberResult isMember(const char *key) const
Definition: MultiApiJson.h:94
Select all peers (except optional excluded) that are in our cluster.
Definition: predicates.h:137
Sends a message to all peers.
Definition: predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)