rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/misc/AmendmentTable.h>
32#include <xrpld/app/misc/DeliverMax.h>
33#include <xrpld/app/misc/HashRouter.h>
34#include <xrpld/app/misc/LoadFeeTrack.h>
35#include <xrpld/app/misc/NetworkOPs.h>
36#include <xrpld/app/misc/Transaction.h>
37#include <xrpld/app/misc/TxQ.h>
38#include <xrpld/app/misc/ValidatorKeys.h>
39#include <xrpld/app/misc/ValidatorList.h>
40#include <xrpld/app/misc/detail/AccountTxPaging.h>
41#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
42#include <xrpld/app/tx/apply.h>
43#include <xrpld/consensus/Consensus.h>
44#include <xrpld/consensus/ConsensusParms.h>
45#include <xrpld/overlay/Cluster.h>
46#include <xrpld/overlay/Overlay.h>
47#include <xrpld/overlay/predicates.h>
48#include <xrpld/perflog/PerfLog.h>
49#include <xrpld/rpc/BookChanges.h>
50#include <xrpld/rpc/DeliveredAmount.h>
51#include <xrpld/rpc/MPTokenIssuanceID.h>
52#include <xrpld/rpc/ServerHandler.h>
53#include <xrpl/basics/UptimeClock.h>
54#include <xrpl/basics/mulDiv.h>
55#include <xrpl/basics/safe_cast.h>
56#include <xrpl/basics/scope.h>
57#include <xrpl/beast/rfc2616.h>
58#include <xrpl/beast/utility/rngfill.h>
59#include <xrpl/crypto/RFC1751.h>
60#include <xrpl/crypto/csprng.h>
61#include <xrpl/json/to_string.h>
62#include <xrpl/protocol/BuildInfo.h>
63#include <xrpl/protocol/Feature.h>
64#include <xrpl/protocol/MultiApiJson.h>
65#include <xrpl/protocol/RPCErr.h>
66#include <xrpl/protocol/STParsedJSON.h>
67#include <xrpl/protocol/jss.h>
68#include <xrpl/resource/Fees.h>
69#include <xrpl/resource/ResourceManager.h>
70#include <boost/asio/ip/host_name.hpp>
71#include <boost/asio/steady_timer.hpp>
72
73#include <algorithm>
74#include <exception>
75#include <mutex>
76#include <optional>
77#include <set>
78#include <string>
79#include <tuple>
80#include <unordered_map>
81#include <utility>
82
83namespace ripple {
84
85class NetworkOPsImp final : public NetworkOPs
86{
92 {
93 public:
95 bool const admin;
96 bool const local;
98 bool applied = false;
100
103 bool a,
104 bool l,
105 FailHard f)
106 : transaction(t), admin(a), local(l), failType(f)
107 {
108 XRPL_ASSERT(
110 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
111 "valid inputs");
112 }
113 };
114
118 enum class DispatchState : unsigned char {
119 none,
120 scheduled,
121 running,
122 };
123
125
141 {
142 struct Counters
143 {
144 explicit Counters() = default;
145
148 };
149
153 std::chrono::steady_clock::time_point start_ =
155 std::chrono::steady_clock::time_point const processStart_ = start_;
158
159 public:
161 {
163 .transitions = 1;
164 }
165
172 void
174
180 void
181 json(Json::Value& obj) const;
182
184 {
186 decltype(mode_) mode;
187 decltype(start_) start;
189 };
190
193 {
196 }
197 };
198
201 {
202 ServerFeeSummary() = default;
203
205 XRPAmount fee,
206 TxQ::Metrics&& escalationMetrics,
207 LoadFeeTrack const& loadFeeTrack);
208 bool
209 operator!=(ServerFeeSummary const& b) const;
210
211 bool
213 {
214 return !(*this != b);
215 }
216
221 };
222
223public:
225 Application& app,
227 bool standalone,
228 std::size_t minPeerCount,
229 bool start_valid,
230 JobQueue& job_queue,
232 ValidatorKeys const& validatorKeys,
233 boost::asio::io_service& io_svc,
234 beast::Journal journal,
235 beast::insight::Collector::ptr const& collector)
236 : app_(app)
237 , m_journal(journal)
240 , heartbeatTimer_(io_svc)
241 , clusterTimer_(io_svc)
242 , accountHistoryTxTimer_(io_svc)
243 , mConsensus(
244 app,
246 setup_FeeVote(app_.config().section("voting")),
247 app_.logs().journal("FeeVote")),
249 *m_localTX,
250 app.getInboundTransactions(),
251 beast::get_abstract_clock<std::chrono::steady_clock>(),
252 validatorKeys,
253 app_.logs().journal("LedgerConsensus"))
255 , m_job_queue(job_queue)
256 , m_standalone(standalone)
257 , minPeerCount_(start_valid ? 0 : minPeerCount)
258 , m_stats(std::bind(&NetworkOPsImp::collect_metrics, this), collector)
259 {
260 }
261
262 ~NetworkOPsImp() override
263 {
264 // This clear() is necessary to ensure the shared_ptrs in this map get
265 // destroyed NOW because the objects in this map invoke methods on this
266 // class when they are destroyed
268 }
269
270public:
272 getOperatingMode() const override;
273
275 strOperatingMode(OperatingMode const mode, bool const admin) const override;
276
278 strOperatingMode(bool const admin = false) const override;
279
280 //
281 // Transaction operations.
282 //
283
284 // Must complete immediately.
285 void
287
288 void
290 std::shared_ptr<Transaction>& transaction,
291 bool bUnlimited,
292 bool bLocal,
293 FailHard failType) override;
294
303 void
306 bool bUnlimited,
307 FailHard failType);
308
318 void
321 bool bUnlimited,
322 FailHard failtype);
323
327 void
329
335 void
337
338 //
339 // Owner functions.
340 //
341
345 AccountID const& account) override;
346
347 //
348 // Book functions.
349 //
350
351 void
354 Book const&,
355 AccountID const& uTakerID,
356 const bool bProof,
357 unsigned int iLimit,
358 Json::Value const& jvMarker,
359 Json::Value& jvResult) override;
360
361 // Ledger proposal/close functions.
362 bool
364
365 bool
368 std::string const& source) override;
369
370 void
371 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
372
373 // Network state machine.
374
375 // Used for the "jump" case.
376private:
377 void
379 bool
381
382public:
383 bool
384 beginConsensus(uint256 const& networkClosed) override;
385 void
386 endConsensus() override;
387 void
388 setStandAlone() override;
389
393 void
394 setStateTimer() override;
395
396 void
397 setNeedNetworkLedger() override;
398 void
399 clearNeedNetworkLedger() override;
400 bool
401 isNeedNetworkLedger() override;
402 bool
403 isFull() override;
404
405 void
406 setMode(OperatingMode om) override;
407
408 bool
409 isBlocked() override;
410 bool
411 isAmendmentBlocked() override;
412 void
413 setAmendmentBlocked() override;
414 bool
415 isAmendmentWarned() override;
416 void
417 setAmendmentWarned() override;
418 void
419 clearAmendmentWarned() override;
420 bool
421 isUNLBlocked() override;
422 void
423 setUNLBlocked() override;
424 void
425 clearUNLBlocked() override;
426 void
427 consensusViewChange() override;
428
430 getConsensusInfo() override;
432 getServerInfo(bool human, bool admin, bool counters) override;
433 void
434 clearLedgerFetch() override;
436 getLedgerFetchInfo() override;
439 std::optional<std::chrono::milliseconds> consensusDelay) override;
440 void
441 reportFeeChange() override;
442 void
444
445 void
446 updateLocalTx(ReadView const& view) override;
448 getLocalTxCount() override;
449
450 //
451 // Monitoring: publisher side.
452 //
453 void
454 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
455 void
458 std::shared_ptr<STTx const> const& transaction,
459 TER result) override;
460 void
461 pubValidation(std::shared_ptr<STValidation> const& val) override;
462
463 //--------------------------------------------------------------------------
464 //
465 // InfoSub::Source.
466 //
467 void
469 InfoSub::ref ispListener,
470 hash_set<AccountID> const& vnaAccountIDs,
471 bool rt) override;
472 void
474 InfoSub::ref ispListener,
475 hash_set<AccountID> const& vnaAccountIDs,
476 bool rt) override;
477
478 // Just remove the subscription from the tracking
479 // not from the InfoSub. Needed for InfoSub destruction
480 void
482 std::uint64_t seq,
483 hash_set<AccountID> const& vnaAccountIDs,
484 bool rt) override;
485
487 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
488 override;
489 void
491 InfoSub::ref ispListener,
492 AccountID const& account,
493 bool historyOnly) override;
494
495 void
497 std::uint64_t seq,
498 AccountID const& account,
499 bool historyOnly) override;
500
501 bool
502 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
503 bool
504 unsubLedger(std::uint64_t uListener) override;
505
506 bool
507 subBookChanges(InfoSub::ref ispListener) override;
508 bool
509 unsubBookChanges(std::uint64_t uListener) override;
510
511 bool
512 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
513 override;
514 bool
515 unsubServer(std::uint64_t uListener) override;
516
517 bool
518 subBook(InfoSub::ref ispListener, Book const&) override;
519 bool
520 unsubBook(std::uint64_t uListener, Book const&) override;
521
522 bool
523 subManifests(InfoSub::ref ispListener) override;
524 bool
525 unsubManifests(std::uint64_t uListener) override;
526 void
527 pubManifest(Manifest const&) override;
528
529 bool
530 subTransactions(InfoSub::ref ispListener) override;
531 bool
532 unsubTransactions(std::uint64_t uListener) override;
533
534 bool
535 subRTTransactions(InfoSub::ref ispListener) override;
536 bool
537 unsubRTTransactions(std::uint64_t uListener) override;
538
539 bool
540 subValidations(InfoSub::ref ispListener) override;
541 bool
542 unsubValidations(std::uint64_t uListener) override;
543
544 bool
545 subPeerStatus(InfoSub::ref ispListener) override;
546 bool
547 unsubPeerStatus(std::uint64_t uListener) override;
548 void
549 pubPeerStatus(std::function<Json::Value(void)> const&) override;
550
551 bool
552 subConsensus(InfoSub::ref ispListener) override;
553 bool
554 unsubConsensus(std::uint64_t uListener) override;
555
557 findRpcSub(std::string const& strUrl) override;
559 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
560 bool
561 tryRemoveRpcSub(std::string const& strUrl) override;
562
563 void
564 stop() override
565 {
566 {
567 boost::system::error_code ec;
568 heartbeatTimer_.cancel(ec);
569 if (ec)
570 {
571 JLOG(m_journal.error())
572 << "NetworkOPs: heartbeatTimer cancel error: "
573 << ec.message();
574 }
575
576 ec.clear();
577 clusterTimer_.cancel(ec);
578 if (ec)
579 {
580 JLOG(m_journal.error())
581 << "NetworkOPs: clusterTimer cancel error: "
582 << ec.message();
583 }
584
585 ec.clear();
586 accountHistoryTxTimer_.cancel(ec);
587 if (ec)
588 {
589 JLOG(m_journal.error())
590 << "NetworkOPs: accountHistoryTxTimer cancel error: "
591 << ec.message();
592 }
593 }
594 // Make sure that any waitHandlers pending in our timers are done.
595 using namespace std::chrono_literals;
596 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
597 }
598
599 void
600 stateAccounting(Json::Value& obj) override;
601
602private:
603 void
604 setTimer(
605 boost::asio::steady_timer& timer,
606 std::chrono::milliseconds const& expiry_time,
607 std::function<void()> onExpire,
608 std::function<void()> onError);
609 void
611 void
613 void
615 void
617
619 transJson(
620 std::shared_ptr<STTx const> const& transaction,
621 TER result,
622 bool validated,
625
626 void
629 AcceptedLedgerTx const& transaction,
630 bool last);
631
632 void
635 AcceptedLedgerTx const& transaction,
636 bool last);
637
638 void
641 std::shared_ptr<STTx const> const& transaction,
642 TER result);
643
644 void
645 pubServer();
646 void
648
650 getHostId(bool forAdmin);
651
652private:
656
657 /*
658 * With a validated ledger to separate history and future, the node
659 * streams historical txns with negative indexes starting from -1,
660 * and streams future txns starting from index 0.
661 * The SubAccountHistoryIndex struct maintains these indexes.
662 * It also has a flag stopHistorical_ for stopping streaming
663 * the historical txns.
664 */
666 {
668 // forward
670 // separate backward and forward
672 // history, backward
677
679 : accountId_(accountId)
680 , forwardTxIndex_(0)
683 , historyTxIndex_(-1)
684 , haveHistorical_(false)
685 , stopHistorical_(false)
686 {
687 }
688 };
690 {
693 };
695 {
698 };
701
705 void
709 void
711 void
713
716
718
720
722
727
729 boost::asio::steady_timer heartbeatTimer_;
730 boost::asio::steady_timer clusterTimer_;
731 boost::asio::steady_timer accountHistoryTxTimer_;
732
734
736
738
741
743
745
746 enum SubTypes {
747 sLedger, // Accepted ledgers.
748 sManifests, // Received validator manifests.
749 sServer, // When server changes connectivity state.
750 sTransactions, // All accepted transactions.
751 sRTTransactions, // All proposed and accepted transactions.
752 sValidations, // Received validations.
753 sPeerStatus, // Peer status changes.
754 sConsensusPhase, // Consensus phase
755 sBookChanges, // Per-ledger order book changes
756 sLastEntry // Any new entry must be ADDED ABOVE this one
757 };
758
760
762
764
765 // Whether we are in standalone mode.
766 bool const m_standalone;
767
768 // The number of nodes that we need to consider ourselves connected.
770
771 // Transaction batching.
776
778
781
782private:
783 struct Stats
784 {
785 template <class Handler>
787 Handler const& handler,
788 beast::insight::Collector::ptr const& collector)
789 : hook(collector->make_hook(handler))
790 , disconnected_duration(collector->make_gauge(
791 "State_Accounting",
792 "Disconnected_duration"))
793 , connected_duration(collector->make_gauge(
794 "State_Accounting",
795 "Connected_duration"))
797 collector->make_gauge("State_Accounting", "Syncing_duration"))
798 , tracking_duration(collector->make_gauge(
799 "State_Accounting",
800 "Tracking_duration"))
802 collector->make_gauge("State_Accounting", "Full_duration"))
803 , disconnected_transitions(collector->make_gauge(
804 "State_Accounting",
805 "Disconnected_transitions"))
806 , connected_transitions(collector->make_gauge(
807 "State_Accounting",
808 "Connected_transitions"))
809 , syncing_transitions(collector->make_gauge(
810 "State_Accounting",
811 "Syncing_transitions"))
812 , tracking_transitions(collector->make_gauge(
813 "State_Accounting",
814 "Tracking_transitions"))
816 collector->make_gauge("State_Accounting", "Full_transitions"))
817 {
818 }
819
826
832 };
833
834 std::mutex m_statsMutex; // Mutex to lock m_stats
836
837private:
838 void
840};
841
842//------------------------------------------------------------------------------
843
845 {"disconnected", "connected", "syncing", "tracking", "full"}};
846
848
856
857static auto const genesisAccountId = calcAccountID(
859 .first);
860
861//------------------------------------------------------------------------------
862inline OperatingMode
864{
865 return mMode;
866}
867
868inline std::string
869NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
870{
871 return strOperatingMode(mMode, admin);
872}
873
874inline void
876{
878}
879
880inline void
882{
883 needNetworkLedger_ = true;
884}
885
886inline void
888{
889 needNetworkLedger_ = false;
890}
891
892inline bool
894{
895 return needNetworkLedger_;
896}
897
898inline bool
900{
902}
903
906{
907 static std::string const hostname = boost::asio::ip::host_name();
908
909 if (forAdmin)
910 return hostname;
911
912 // For non-admin uses hash the node public key into a
913 // single RFC1751 word:
914 static std::string const shroudedHostId = [this]() {
915 auto const& id = app_.nodeIdentity();
916
917 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
918 }();
919
920 return shroudedHostId;
921}
922
923void
925{
927
928 // Only do this work if a cluster is configured
929 if (app_.cluster().size() != 0)
931}
932
933void
935 boost::asio::steady_timer& timer,
936 const std::chrono::milliseconds& expiry_time,
937 std::function<void()> onExpire,
938 std::function<void()> onError)
939{
940 // Only start the timer if waitHandlerCounter_ is not yet joined.
941 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
942 [this, onExpire, onError](boost::system::error_code const& e) {
943 if ((e.value() == boost::system::errc::success) &&
944 (!m_job_queue.isStopped()))
945 {
946 onExpire();
947 }
948 // Recover as best we can if an unexpected error occurs.
949 if (e.value() != boost::system::errc::success &&
950 e.value() != boost::asio::error::operation_aborted)
951 {
952 // Try again later and hope for the best.
953 JLOG(m_journal.error())
954 << "Timer got error '" << e.message()
955 << "'. Restarting timer.";
956 onError();
957 }
958 }))
959 {
960 timer.expires_from_now(expiry_time);
961 timer.async_wait(std::move(*optionalCountedHandler));
962 }
963}
964
965void
966NetworkOPsImp::setHeartbeatTimer()
967{
968 setTimer(
969 heartbeatTimer_,
970 mConsensus.parms().ledgerGRANULARITY,
971 [this]() {
972 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
973 processHeartbeatTimer();
974 });
975 },
976 [this]() { setHeartbeatTimer(); });
977}
978
979void
980NetworkOPsImp::setClusterTimer()
981{
982 using namespace std::chrono_literals;
983
984 setTimer(
985 clusterTimer_,
986 10s,
987 [this]() {
988 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
989 processClusterTimer();
990 });
991 },
992 [this]() { setClusterTimer(); });
993}
994
995void
996NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
997{
998 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
999 << toBase58(subInfo.index_->accountId_);
1000 using namespace std::chrono_literals;
1001 setTimer(
1002 accountHistoryTxTimer_,
1003 4s,
1004 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1005 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1006}
1007
1008void
1009NetworkOPsImp::processHeartbeatTimer()
1010{
1011 {
1012 std::unique_lock lock{app_.getMasterMutex()};
1013
1014 // VFALCO NOTE This is for diagnosing a crash on exit
1015 LoadManager& mgr(app_.getLoadManager());
1017
1018 std::size_t const numPeers = app_.overlay().size();
1019
1020 // do we have sufficient peers? If not, we are disconnected.
1021 if (numPeers < minPeerCount_)
1022 {
1023 if (mMode != OperatingMode::DISCONNECTED)
1024 {
1025 setMode(OperatingMode::DISCONNECTED);
1026 JLOG(m_journal.warn())
1027 << "Node count (" << numPeers << ") has fallen "
1028 << "below required minimum (" << minPeerCount_ << ").";
1029 }
1030
1031 // MasterMutex lock need not be held to call setHeartbeatTimer()
1032 lock.unlock();
1033 // We do not call mConsensus.timerEntry until there are enough
1034 // peers providing meaningful inputs to consensus
1035 setHeartbeatTimer();
1036 return;
1037 }
1038
1039 if (mMode == OperatingMode::DISCONNECTED)
1040 {
1041 setMode(OperatingMode::CONNECTED);
1042 JLOG(m_journal.info())
1043 << "Node count (" << numPeers << ") is sufficient.";
1044 }
1045
1046 // Check if the last validated ledger forces a change between these
1047 // states.
1048 if (mMode == OperatingMode::SYNCING)
1049 setMode(OperatingMode::SYNCING);
1050 else if (mMode == OperatingMode::CONNECTED)
1051 setMode(OperatingMode::CONNECTED);
1052 }
1053
1054 mConsensus.timerEntry(app_.timeKeeper().closeTime());
1055
1056 const ConsensusPhase currPhase = mConsensus.phase();
1057 if (mLastConsensusPhase != currPhase)
1058 {
1059 reportConsensusStateChange(currPhase);
1060 mLastConsensusPhase = currPhase;
1061 }
1062
1063 setHeartbeatTimer();
1064}
1065
1066void
1067NetworkOPsImp::processClusterTimer()
1068{
1069 if (app_.cluster().size() == 0)
1070 return;
1071
1072 using namespace std::chrono_literals;
1073
1074 bool const update = app_.cluster().update(
1075 app_.nodeIdentity().first,
1076 "",
1077 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1078 ? app_.getFeeTrack().getLocalFee()
1079 : 0,
1080 app_.timeKeeper().now());
1081
1082 if (!update)
1083 {
1084 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1085 setClusterTimer();
1086 return;
1087 }
1088
1089 protocol::TMCluster cluster;
1090 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1091 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1092 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1093 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1094 n.set_nodeload(node.getLoadFee());
1095 if (!node.name().empty())
1096 n.set_nodename(node.name());
1097 });
1098
1099 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1100 for (auto& item : gossip.items)
1101 {
1102 protocol::TMLoadSource& node = *cluster.add_loadsources();
1103 node.set_name(to_string(item.address));
1104 node.set_cost(item.balance);
1105 }
1106 app_.overlay().foreach(send_if(
1107 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1108 peer_in_cluster()));
1109 setClusterTimer();
1110}
1111
1112//------------------------------------------------------------------------------
1113
1115NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1116 const
1117{
1118 if (mode == OperatingMode::FULL && admin)
1119 {
1120 auto const consensusMode = mConsensus.mode();
1121 if (consensusMode != ConsensusMode::wrongLedger)
1122 {
1123 if (consensusMode == ConsensusMode::proposing)
1124 return "proposing";
1125
1126 if (mConsensus.validating())
1127 return "validating";
1128 }
1129 }
1130
1131 return states_[static_cast<std::size_t>(mode)];
1132}
1133
1134void
1135NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1136{
1137 if (isNeedNetworkLedger())
1138 {
1139 // Nothing we can do if we've never been in sync
1140 return;
1141 }
1142
1143 // this is an asynchronous interface
1144 auto const trans = sterilize(*iTrans);
1145
1146 auto const txid = trans->getTransactionID();
1147 auto const flags = app_.getHashRouter().getFlags(txid);
1148
1149 if ((flags & SF_BAD) != 0)
1150 {
1151 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1152 return;
1153 }
1154
1155 try
1156 {
1157 auto const [validity, reason] = checkValidity(
1158 app_.getHashRouter(),
1159 *trans,
1160 m_ledgerMaster.getValidatedRules(),
1161 app_.config());
1162
1163 if (validity != Validity::Valid)
1164 {
1165 JLOG(m_journal.warn())
1166 << "Submitted transaction invalid: " << reason;
1167 return;
1168 }
1169 }
1170 catch (std::exception const& ex)
1171 {
1172 JLOG(m_journal.warn())
1173 << "Exception checking transaction " << txid << ": " << ex.what();
1174
1175 return;
1176 }
1177
1178 std::string reason;
1179
1180 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1181
1182 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1183 auto t = tx;
1184 processTransaction(t, false, false, FailHard::no);
1185 });
1186}
1187
1188void
1189NetworkOPsImp::processTransaction(
1190 std::shared_ptr<Transaction>& transaction,
1191 bool bUnlimited,
1192 bool bLocal,
1193 FailHard failType)
1194{
1195 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1196 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1197
1198 if ((newFlags & SF_BAD) != 0)
1199 {
1200 // cached bad
1201 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1202 transaction->setStatus(INVALID);
1203 transaction->setResult(temBAD_SIGNATURE);
1204 return;
1205 }
1206
1207 // NOTE eahennis - I think this check is redundant,
1208 // but I'm not 100% sure yet.
1209 // If so, only cost is looking up HashRouter flags.
1210 auto const view = m_ledgerMaster.getCurrentLedger();
1211 auto const [validity, reason] = checkValidity(
1212 app_.getHashRouter(),
1213 *transaction->getSTransaction(),
1214 view->rules(),
1215 app_.config());
1216 XRPL_ASSERT(
1217 validity == Validity::Valid,
1218 "ripple::NetworkOPsImp::processTransaction : valid validity");
1219
1220 // Not concerned with local checks at this point.
1221 if (validity == Validity::SigBad)
1222 {
1223 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1224 transaction->setStatus(INVALID);
1225 transaction->setResult(temBAD_SIGNATURE);
1226 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1227 return;
1228 }
1229
1230 // canonicalize can change our pointer
1231 app_.getMasterTransaction().canonicalize(&transaction);
1232
1233 if (bLocal)
1234 doTransactionSync(transaction, bUnlimited, failType);
1235 else
1236 doTransactionAsync(transaction, bUnlimited, failType);
1237}
1238
1239void
1240NetworkOPsImp::doTransactionAsync(
1241 std::shared_ptr<Transaction> transaction,
1242 bool bUnlimited,
1243 FailHard failType)
1244{
1245 std::lock_guard lock(mMutex);
1246
1247 if (transaction->getApplying())
1248 return;
1249
1250 mTransactions.push_back(
1251 TransactionStatus(transaction, bUnlimited, false, failType));
1252 transaction->setApplying();
1253
1254 if (mDispatchState == DispatchState::none)
1255 {
1256 if (m_job_queue.addJob(
1257 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1258 {
1259 mDispatchState = DispatchState::scheduled;
1260 }
1261 }
1262}
1263
1264void
1265NetworkOPsImp::doTransactionSync(
1266 std::shared_ptr<Transaction> transaction,
1267 bool bUnlimited,
1268 FailHard failType)
1269{
1270 std::unique_lock<std::mutex> lock(mMutex);
1271
1272 if (!transaction->getApplying())
1273 {
1274 mTransactions.push_back(
1275 TransactionStatus(transaction, bUnlimited, true, failType));
1276 transaction->setApplying();
1277 }
1278
1279 do
1280 {
1281 if (mDispatchState == DispatchState::running)
1282 {
1283 // A batch processing job is already running, so wait.
1284 mCond.wait(lock);
1285 }
1286 else
1287 {
1288 apply(lock);
1289
1290 if (mTransactions.size())
1291 {
1292 // More transactions need to be applied, but by another job.
1293 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1294 transactionBatch();
1295 }))
1296 {
1297 mDispatchState = DispatchState::scheduled;
1298 }
1299 }
1300 }
1301 } while (transaction->getApplying());
1302}
1303
1304void
1305NetworkOPsImp::transactionBatch()
1306{
1307 std::unique_lock<std::mutex> lock(mMutex);
1308
1309 if (mDispatchState == DispatchState::running)
1310 return;
1311
1312 while (mTransactions.size())
1313 {
1314 apply(lock);
1315 }
1316}
1317
1318void
1319NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1320{
1322 std::vector<TransactionStatus> transactions;
1323 mTransactions.swap(transactions);
1324 XRPL_ASSERT(
1325 !transactions.empty(),
1326 "ripple::NetworkOPsImp::apply : non-empty transactions");
1327 XRPL_ASSERT(
1328 mDispatchState != DispatchState::running,
1329 "ripple::NetworkOPsImp::apply : is not running");
1330
1331 mDispatchState = DispatchState::running;
1332
1333 batchLock.unlock();
1334
1335 {
1336 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1337 bool changed = false;
1338 {
1339 std::unique_lock ledgerLock{
1340 m_ledgerMaster.peekMutex(), std::defer_lock};
1341 std::lock(masterLock, ledgerLock);
1342
1343 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1344 for (TransactionStatus& e : transactions)
1345 {
1346 // we check before adding to the batch
1347 ApplyFlags flags = tapNONE;
1348 if (e.admin)
1349 flags |= tapUNLIMITED;
1350
1351 if (e.failType == FailHard::yes)
1352 flags |= tapFAIL_HARD;
1353
1354 auto const result = app_.getTxQ().apply(
1355 app_, view, e.transaction->getSTransaction(), flags, j);
1356 e.result = result.ter;
1357 e.applied = result.applied;
1358 changed = changed || result.applied;
1359 }
1360 return changed;
1361 });
1362 }
1363 if (changed)
1364 reportFeeChange();
1365
1366 std::optional<LedgerIndex> validatedLedgerIndex;
1367 if (auto const l = m_ledgerMaster.getValidatedLedger())
1368 validatedLedgerIndex = l->info().seq;
1369
1370 auto newOL = app_.openLedger().current();
1371 for (TransactionStatus& e : transactions)
1372 {
1373 e.transaction->clearSubmitResult();
1374
1375 if (e.applied)
1376 {
1377 pubProposedTransaction(
1378 newOL, e.transaction->getSTransaction(), e.result);
1379 e.transaction->setApplied();
1380 }
1381
1382 e.transaction->setResult(e.result);
1383
1384 if (isTemMalformed(e.result))
1385 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1386
1387#ifdef DEBUG
1388 if (e.result != tesSUCCESS)
1389 {
1390 std::string token, human;
1391
1392 if (transResultInfo(e.result, token, human))
1393 {
1394 JLOG(m_journal.info())
1395 << "TransactionResult: " << token << ": " << human;
1396 }
1397 }
1398#endif
1399
1400 bool addLocal = e.local;
1401
1402 if (e.result == tesSUCCESS)
1403 {
1404 JLOG(m_journal.debug())
1405 << "Transaction is now included in open ledger";
1406 e.transaction->setStatus(INCLUDED);
1407
1408 auto const& txCur = e.transaction->getSTransaction();
1409 auto const txNext = m_ledgerMaster.popAcctTransaction(txCur);
1410 if (txNext)
1411 {
1412 std::string reason;
1413 auto const trans = sterilize(*txNext);
1414 auto t = std::make_shared<Transaction>(trans, reason, app_);
1415 submit_held.emplace_back(t, false, false, FailHard::no);
1416 t->setApplying();
1417 }
1418 }
1419 else if (e.result == tefPAST_SEQ)
1420 {
1421 // duplicate or conflict
1422 JLOG(m_journal.info()) << "Transaction is obsolete";
1423 e.transaction->setStatus(OBSOLETE);
1424 }
1425 else if (e.result == terQUEUED)
1426 {
1427 JLOG(m_journal.debug())
1428 << "Transaction is likely to claim a"
1429 << " fee, but is queued until fee drops";
1430
1431 e.transaction->setStatus(HELD);
1432 // Add to held transactions, because it could get
1433 // kicked out of the queue, and this will try to
1434 // put it back.
1435 m_ledgerMaster.addHeldTransaction(e.transaction);
1436 e.transaction->setQueued();
1437 e.transaction->setKept();
1438 }
1439 else if (isTerRetry(e.result))
1440 {
1441 if (e.failType != FailHard::yes)
1442 {
1443 // transaction should be held
1444 JLOG(m_journal.debug())
1445 << "Transaction should be held: " << e.result;
1446 e.transaction->setStatus(HELD);
1447 m_ledgerMaster.addHeldTransaction(e.transaction);
1448 e.transaction->setKept();
1449 }
1450 }
1451 else
1452 {
1453 JLOG(m_journal.debug())
1454 << "Status other than success " << e.result;
1455 e.transaction->setStatus(INVALID);
1456 }
1457
1458 auto const enforceFailHard =
1459 e.failType == FailHard::yes && !isTesSuccess(e.result);
1460
1461 if (addLocal && !enforceFailHard)
1462 {
1463 m_localTX->push_back(
1464 m_ledgerMaster.getCurrentLedgerIndex(),
1465 e.transaction->getSTransaction());
1466 e.transaction->setKept();
1467 }
1468
1469 if ((e.applied ||
1470 ((mMode != OperatingMode::FULL) &&
1471 (e.failType != FailHard::yes) && e.local) ||
1472 (e.result == terQUEUED)) &&
1473 !enforceFailHard)
1474 {
1475 auto const toSkip =
1476 app_.getHashRouter().shouldRelay(e.transaction->getID());
1477
1478 if (toSkip)
1479 {
1480 protocol::TMTransaction tx;
1481 Serializer s;
1482
1483 e.transaction->getSTransaction()->add(s);
1484 tx.set_rawtransaction(s.data(), s.size());
1485 tx.set_status(protocol::tsCURRENT);
1486 tx.set_receivetimestamp(
1487 app_.timeKeeper().now().time_since_epoch().count());
1488 tx.set_deferred(e.result == terQUEUED);
1489 // FIXME: This should be when we received it
1490 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1491 e.transaction->setBroadcast();
1492 }
1493 }
1494
1495 if (validatedLedgerIndex)
1496 {
1497 auto [fee, accountSeq, availableSeq] =
1498 app_.getTxQ().getTxRequiredFeeAndSeq(
1499 *newOL, e.transaction->getSTransaction());
1500 e.transaction->setCurrentLedgerState(
1501 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1502 }
1503 }
1504 }
1505
1506 batchLock.lock();
1507
1508 for (TransactionStatus& e : transactions)
1509 e.transaction->clearApplying();
1510
1511 if (!submit_held.empty())
1512 {
1513 if (mTransactions.empty())
1514 mTransactions.swap(submit_held);
1515 else
1516 for (auto& e : submit_held)
1517 mTransactions.push_back(std::move(e));
1518 }
1519
1520 mCond.notify_all();
1521
1522 mDispatchState = DispatchState::none;
1523}
1524
1525//
1526// Owner functions
1527//
1528
1530NetworkOPsImp::getOwnerInfo(
1532 AccountID const& account)
1533{
1534 Json::Value jvObjects(Json::objectValue);
1535 auto root = keylet::ownerDir(account);
1536 auto sleNode = lpLedger->read(keylet::page(root));
1537 if (sleNode)
1538 {
1539 std::uint64_t uNodeDir;
1540
1541 do
1542 {
1543 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1544 {
1545 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1546 XRPL_ASSERT(
1547 sleCur,
1548 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1549
1550 switch (sleCur->getType())
1551 {
1552 case ltOFFER:
1553 if (!jvObjects.isMember(jss::offers))
1554 jvObjects[jss::offers] =
1556
1557 jvObjects[jss::offers].append(
1558 sleCur->getJson(JsonOptions::none));
1559 break;
1560
1561 case ltRIPPLE_STATE:
1562 if (!jvObjects.isMember(jss::ripple_lines))
1563 {
1564 jvObjects[jss::ripple_lines] =
1566 }
1567
1568 jvObjects[jss::ripple_lines].append(
1569 sleCur->getJson(JsonOptions::none));
1570 break;
1571
1572 case ltACCOUNT_ROOT:
1573 case ltDIR_NODE:
1574 default:
1575 UNREACHABLE(
1576 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1577 "type");
1578 break;
1579 }
1580 }
1581
1582 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1583
1584 if (uNodeDir)
1585 {
1586 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1587 XRPL_ASSERT(
1588 sleNode,
1589 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1590 }
1591 } while (uNodeDir);
1592 }
1593
1594 return jvObjects;
1595}
1596
1597//
1598// Other
1599//
1600
1601inline bool
1602NetworkOPsImp::isBlocked()
1603{
1604 return isAmendmentBlocked() || isUNLBlocked();
1605}
1606
1607inline bool
1608NetworkOPsImp::isAmendmentBlocked()
1609{
1610 return amendmentBlocked_;
1611}
1612
1613void
1614NetworkOPsImp::setAmendmentBlocked()
1615{
1616 amendmentBlocked_ = true;
1617 setMode(OperatingMode::CONNECTED);
1618}
1619
1620inline bool
1621NetworkOPsImp::isAmendmentWarned()
1622{
1623 return !amendmentBlocked_ && amendmentWarned_;
1624}
1625
1626inline void
1627NetworkOPsImp::setAmendmentWarned()
1628{
1629 amendmentWarned_ = true;
1630}
1631
1632inline void
1633NetworkOPsImp::clearAmendmentWarned()
1634{
1635 amendmentWarned_ = false;
1636}
1637
1638inline bool
1639NetworkOPsImp::isUNLBlocked()
1640{
1641 return unlBlocked_;
1642}
1643
1644void
1645NetworkOPsImp::setUNLBlocked()
1646{
1647 unlBlocked_ = true;
1648 setMode(OperatingMode::CONNECTED);
1649}
1650
1651inline void
1652NetworkOPsImp::clearUNLBlocked()
1653{
1654 unlBlocked_ = false;
1655}
1656
1657bool
1658NetworkOPsImp::checkLastClosedLedger(
1659 const Overlay::PeerSequence& peerList,
1660 uint256& networkClosed)
1661{
1662 // Returns true if there's an *abnormal* ledger issue, normal changing in
1663 // TRACKING mode should return false. Do we have sufficient validations for
1664 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1665 // better ledger available? If so, we are either tracking or full.
1666
1667 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1668
1669 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1670
1671 if (!ourClosed)
1672 return false;
1673
1674 uint256 closedLedger = ourClosed->info().hash;
1675 uint256 prevClosedLedger = ourClosed->info().parentHash;
1676 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1677 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1678
1679 //-------------------------------------------------------------------------
1680 // Determine preferred last closed ledger
1681
1682 auto& validations = app_.getValidations();
1683 JLOG(m_journal.debug())
1684 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1685
1686 // Will rely on peer LCL if no trusted validations exist
1688 peerCounts[closedLedger] = 0;
1689 if (mMode >= OperatingMode::TRACKING)
1690 peerCounts[closedLedger]++;
1691
1692 for (auto& peer : peerList)
1693 {
1694 uint256 peerLedger = peer->getClosedLedgerHash();
1695
1696 if (peerLedger.isNonZero())
1697 ++peerCounts[peerLedger];
1698 }
1699
1700 for (auto const& it : peerCounts)
1701 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1702
1703 uint256 preferredLCL = validations.getPreferredLCL(
1704 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1705 m_ledgerMaster.getValidLedgerIndex(),
1706 peerCounts);
1707
1708 bool switchLedgers = preferredLCL != closedLedger;
1709 if (switchLedgers)
1710 closedLedger = preferredLCL;
1711 //-------------------------------------------------------------------------
1712 if (switchLedgers && (closedLedger == prevClosedLedger))
1713 {
1714 // don't switch to our own previous ledger
1715 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1716 networkClosed = ourClosed->info().hash;
1717 switchLedgers = false;
1718 }
1719 else
1720 networkClosed = closedLedger;
1721
1722 if (!switchLedgers)
1723 return false;
1724
1725 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1726
1727 if (!consensus)
1728 consensus = app_.getInboundLedgers().acquire(
1729 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1730
1731 if (consensus &&
1732 (!m_ledgerMaster.canBeCurrent(consensus) ||
1733 !m_ledgerMaster.isCompatible(
1734 *consensus, m_journal.debug(), "Not switching")))
1735 {
1736 // Don't switch to a ledger not on the validated chain
1737 // or with an invalid close time or sequence
1738 networkClosed = ourClosed->info().hash;
1739 return false;
1740 }
1741
1742 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1743 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1744 << getJson({*ourClosed, {}});
1745 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1746
1747 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1748 {
1749 setMode(OperatingMode::CONNECTED);
1750 }
1751
1752 if (consensus)
1753 {
1754 // FIXME: If this rewinds the ledger sequence, or has the same
1755 // sequence, we should update the status on any stored transactions
1756 // in the invalidated ledgers.
1757 switchLastClosedLedger(consensus);
1758 }
1759
1760 return true;
1761}
1762
1763void
1764NetworkOPsImp::switchLastClosedLedger(
1765 std::shared_ptr<Ledger const> const& newLCL)
1766{
1767 // set the newLCL as our last closed ledger -- this is abnormal code
1768 JLOG(m_journal.error())
1769 << "JUMP last closed ledger to " << newLCL->info().hash;
1770
1771 clearNeedNetworkLedger();
1772
1773 // Update fee computations.
1774 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1775
1776 // Caller must own master lock
1777 {
1778 // Apply tx in old open ledger to new
1779 // open ledger. Then apply local tx.
1780
1781 auto retries = m_localTX->getTxSet();
1782 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1784 if (lastVal)
1785 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
1786 else
1787 rules.emplace(app_.config().features);
1788 app_.openLedger().accept(
1789 app_,
1790 *rules,
1791 newLCL,
1792 OrderedTxs({}),
1793 false,
1794 retries,
1795 tapNONE,
1796 "jump",
1797 [&](OpenView& view, beast::Journal j) {
1798 // Stuff the ledger with transactions from the queue.
1799 return app_.getTxQ().accept(app_, view);
1800 });
1801 }
1802
1803 m_ledgerMaster.switchLCL(newLCL);
1804
1805 protocol::TMStatusChange s;
1806 s.set_newevent(protocol::neSWITCHED_LEDGER);
1807 s.set_ledgerseq(newLCL->info().seq);
1808 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1809 s.set_ledgerhashprevious(
1810 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1811 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
1812
1813 app_.overlay().foreach(
1814 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1815}
1816
1817bool
1818NetworkOPsImp::beginConsensus(uint256 const& networkClosed)
1819{
1820 XRPL_ASSERT(
1821 networkClosed.isNonZero(),
1822 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
1823
1824 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
1825
1826 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
1827 << " with LCL " << closingInfo.parentHash;
1828
1829 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1830
1831 if (!prevLedger)
1832 {
1833 // this shouldn't happen unless we jump ledgers
1834 if (mMode == OperatingMode::FULL)
1835 {
1836 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
1837 setMode(OperatingMode::TRACKING);
1838 }
1839
1840 return false;
1841 }
1842
1843 XRPL_ASSERT(
1844 prevLedger->info().hash == closingInfo.parentHash,
1845 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1846 "parent");
1847 XRPL_ASSERT(
1848 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
1849 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1850 "hash");
1851
1852 if (prevLedger->rules().enabled(featureNegativeUNL))
1853 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1854 TrustChanges const changes = app_.validators().updateTrusted(
1855 app_.getValidations().getCurrentNodeIDs(),
1856 closingInfo.parentCloseTime,
1857 *this,
1858 app_.overlay(),
1859 app_.getHashRouter());
1860
1861 if (!changes.added.empty() || !changes.removed.empty())
1862 {
1863 app_.getValidations().trustChanged(changes.added, changes.removed);
1864 // Update the AmendmentTable so it tracks the current validators.
1865 app_.getAmendmentTable().trustChanged(
1866 app_.validators().getQuorumKeys().second);
1867 }
1868
1869 mConsensus.startRound(
1870 app_.timeKeeper().closeTime(),
1871 networkClosed,
1872 prevLedger,
1873 changes.removed,
1874 changes.added);
1875
1876 const ConsensusPhase currPhase = mConsensus.phase();
1877 if (mLastConsensusPhase != currPhase)
1878 {
1879 reportConsensusStateChange(currPhase);
1880 mLastConsensusPhase = currPhase;
1881 }
1882
1883 JLOG(m_journal.debug()) << "Initiating consensus engine";
1884 return true;
1885}
1886
1887bool
1888NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
1889{
1890 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1891}
1892
1893void
1894NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
1895{
1896 // We now have an additional transaction set
1897 // either created locally during the consensus process
1898 // or acquired from a peer
1899
1900 // Inform peers we have this set
1901 protocol::TMHaveTransactionSet msg;
1902 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1903 msg.set_status(protocol::tsHAVE);
1904 app_.overlay().foreach(
1905 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
1906
1907 // We acquired it because consensus asked us to
1908 if (fromAcquire)
1909 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
1910}
1911
1912void
1913NetworkOPsImp::endConsensus()
1914{
1915 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
1916
1917 for (auto const& it : app_.overlay().getActivePeers())
1918 {
1919 if (it && (it->getClosedLedgerHash() == deadLedger))
1920 {
1921 JLOG(m_journal.trace()) << "Killing obsolete peer status";
1922 it->cycleStatus();
1923 }
1924 }
1925
1926 uint256 networkClosed;
1927 bool ledgerChange =
1928 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1929
1930 if (networkClosed.isZero())
1931 return;
1932
1933 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
1934 // we must count how many nodes share our LCL, how many nodes disagree with
1935 // our LCL, and how many validations our LCL has. We also want to check
1936 // timing to make sure there shouldn't be a newer LCL. We need this
1937 // information to do the next three tests.
1938
1939 if (((mMode == OperatingMode::CONNECTED) ||
1940 (mMode == OperatingMode::SYNCING)) &&
1941 !ledgerChange)
1942 {
1943 // Count number of peers that agree with us and UNL nodes whose
1944 // validations we have for LCL. If the ledger is good enough, go to
1945 // TRACKING - TODO
1946 if (!needNetworkLedger_)
1947 setMode(OperatingMode::TRACKING);
1948 }
1949
1950 if (((mMode == OperatingMode::CONNECTED) ||
1951 (mMode == OperatingMode::TRACKING)) &&
1952 !ledgerChange)
1953 {
1954 // check if the ledger is good enough to go to FULL
1955 // Note: Do not go to FULL if we don't have the previous ledger
1956 // check if the ledger is bad enough to go to CONNECTE D -- TODO
1957 auto current = m_ledgerMaster.getCurrentLedger();
1958 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
1959 2 * current->info().closeTimeResolution))
1960 {
1961 setMode(OperatingMode::FULL);
1962 }
1963 }
1964
1965 beginConsensus(networkClosed);
1966}
1967
1968void
1969NetworkOPsImp::consensusViewChange()
1970{
1971 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
1972 {
1973 setMode(OperatingMode::CONNECTED);
1974 }
1975}
1976
1977void
1978NetworkOPsImp::pubManifest(Manifest const& mo)
1979{
1980 // VFALCO consider std::shared_mutex
1981 std::lock_guard sl(mSubLock);
1982
1983 if (!mStreamMaps[sManifests].empty())
1984 {
1986
1987 jvObj[jss::type] = "manifestReceived";
1988 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
1989 if (mo.signingKey)
1990 jvObj[jss::signing_key] =
1991 toBase58(TokenType::NodePublic, *mo.signingKey);
1992 jvObj[jss::seq] = Json::UInt(mo.sequence);
1993 if (auto sig = mo.getSignature())
1994 jvObj[jss::signature] = strHex(*sig);
1995 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
1996 if (!mo.domain.empty())
1997 jvObj[jss::domain] = mo.domain;
1998 jvObj[jss::manifest] = strHex(mo.serialized);
1999
2000 for (auto i = mStreamMaps[sManifests].begin();
2001 i != mStreamMaps[sManifests].end();)
2002 {
2003 if (auto p = i->second.lock())
2004 {
2005 p->send(jvObj, true);
2006 ++i;
2007 }
2008 else
2009 {
2010 i = mStreamMaps[sManifests].erase(i);
2011 }
2012 }
2013 }
2014}
2015
2016NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2017 XRPAmount fee,
2018 TxQ::Metrics&& escalationMetrics,
2019 LoadFeeTrack const& loadFeeTrack)
2020 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2021 , loadBaseServer{loadFeeTrack.getLoadBase()}
2022 , baseFee{fee}
2023 , em{std::move(escalationMetrics)}
2024{
2025}
2026
2027bool
2029 NetworkOPsImp::ServerFeeSummary const& b) const
2030{
2031 if (loadFactorServer != b.loadFactorServer ||
2032 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2033 em.has_value() != b.em.has_value())
2034 return true;
2035
2036 if (em && b.em)
2037 {
2038 return (
2039 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2040 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2041 em->referenceFeeLevel != b.em->referenceFeeLevel);
2042 }
2043
2044 return false;
2045}
2046
2047// Need to cap to uint64 to uint32 due to JSON limitations
2048static std::uint32_t
2050{
2052
2053 return std::min(max32, v);
2054};
2055
2056void
2058{
2059 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2060 // list into a local array while holding the lock then release
2061 // the lock and call send on everyone.
2062 //
2064
2065 if (!mStreamMaps[sServer].empty())
2066 {
2068
2070 app_.openLedger().current()->fees().base,
2072 app_.getFeeTrack()};
2073
2074 jvObj[jss::type] = "serverStatus";
2075 jvObj[jss::server_status] = strOperatingMode();
2076 jvObj[jss::load_base] = f.loadBaseServer;
2077 jvObj[jss::load_factor_server] = f.loadFactorServer;
2078 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2079
2080 if (f.em)
2081 {
2082 auto const loadFactor = std::max(
2083 safe_cast<std::uint64_t>(f.loadFactorServer),
2084 mulDiv(
2085 f.em->openLedgerFeeLevel,
2086 f.loadBaseServer,
2087 f.em->referenceFeeLevel)
2089
2090 jvObj[jss::load_factor] = trunc32(loadFactor);
2091 jvObj[jss::load_factor_fee_escalation] =
2092 f.em->openLedgerFeeLevel.jsonClipped();
2093 jvObj[jss::load_factor_fee_queue] =
2094 f.em->minProcessingFeeLevel.jsonClipped();
2095 jvObj[jss::load_factor_fee_reference] =
2096 f.em->referenceFeeLevel.jsonClipped();
2097 }
2098 else
2099 jvObj[jss::load_factor] = f.loadFactorServer;
2100
2101 mLastFeeSummary = f;
2102
2103 for (auto i = mStreamMaps[sServer].begin();
2104 i != mStreamMaps[sServer].end();)
2105 {
2106 InfoSub::pointer p = i->second.lock();
2107
2108 // VFALCO TODO research the possibility of using thread queues and
2109 // linearizing the deletion of subscribers with the
2110 // sending of JSON data.
2111 if (p)
2112 {
2113 p->send(jvObj, true);
2114 ++i;
2115 }
2116 else
2117 {
2118 i = mStreamMaps[sServer].erase(i);
2119 }
2120 }
2121 }
2122}
2123
2124void
2126{
2128
2129 auto& streamMap = mStreamMaps[sConsensusPhase];
2130 if (!streamMap.empty())
2131 {
2133 jvObj[jss::type] = "consensusPhase";
2134 jvObj[jss::consensus] = to_string(phase);
2135
2136 for (auto i = streamMap.begin(); i != streamMap.end();)
2137 {
2138 if (auto p = i->second.lock())
2139 {
2140 p->send(jvObj, true);
2141 ++i;
2142 }
2143 else
2144 {
2145 i = streamMap.erase(i);
2146 }
2147 }
2148 }
2149}
2150
2151void
2153{
2154 // VFALCO consider std::shared_mutex
2156
2157 if (!mStreamMaps[sValidations].empty())
2158 {
2160
2161 auto const signerPublic = val->getSignerPublic();
2162
2163 jvObj[jss::type] = "validationReceived";
2164 jvObj[jss::validation_public_key] =
2165 toBase58(TokenType::NodePublic, signerPublic);
2166 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2167 jvObj[jss::signature] = strHex(val->getSignature());
2168 jvObj[jss::full] = val->isFull();
2169 jvObj[jss::flags] = val->getFlags();
2170 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2171 jvObj[jss::data] = strHex(val->getSerializer().slice());
2172
2173 if (auto version = (*val)[~sfServerVersion])
2174 jvObj[jss::server_version] = std::to_string(*version);
2175
2176 if (auto cookie = (*val)[~sfCookie])
2177 jvObj[jss::cookie] = std::to_string(*cookie);
2178
2179 if (auto hash = (*val)[~sfValidatedHash])
2180 jvObj[jss::validated_hash] = strHex(*hash);
2181
2182 auto const masterKey =
2183 app_.validatorManifests().getMasterKey(signerPublic);
2184
2185 if (masterKey != signerPublic)
2186 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2187
2188 // NOTE *seq is a number, but old API versions used string. We replace
2189 // number with a string using MultiApiJson near end of this function
2190 if (auto const seq = (*val)[~sfLedgerSequence])
2191 jvObj[jss::ledger_index] = *seq;
2192
2193 if (val->isFieldPresent(sfAmendments))
2194 {
2195 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2196 for (auto const& amendment : val->getFieldV256(sfAmendments))
2197 jvObj[jss::amendments].append(to_string(amendment));
2198 }
2199
2200 if (auto const closeTime = (*val)[~sfCloseTime])
2201 jvObj[jss::close_time] = *closeTime;
2202
2203 if (auto const loadFee = (*val)[~sfLoadFee])
2204 jvObj[jss::load_fee] = *loadFee;
2205
2206 if (auto const baseFee = val->at(~sfBaseFee))
2207 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2208
2209 if (auto const reserveBase = val->at(~sfReserveBase))
2210 jvObj[jss::reserve_base] = *reserveBase;
2211
2212 if (auto const reserveInc = val->at(~sfReserveIncrement))
2213 jvObj[jss::reserve_inc] = *reserveInc;
2214
2215 // (The ~ operator converts the Proxy to a std::optional, which
2216 // simplifies later operations)
2217 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2218 baseFeeXRP && baseFeeXRP->native())
2219 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2220
2221 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2222 reserveBaseXRP && reserveBaseXRP->native())
2223 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2224
2225 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2226 reserveIncXRP && reserveIncXRP->native())
2227 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2228
2229 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2230 // for consumers supporting different API versions
2231 MultiApiJson multiObj{jvObj};
2232 multiObj.visit(
2233 RPC::apiVersion<1>, //
2234 [](Json::Value& jvTx) {
2235 // Type conversion for older API versions to string
2236 if (jvTx.isMember(jss::ledger_index))
2237 {
2238 jvTx[jss::ledger_index] =
2239 std::to_string(jvTx[jss::ledger_index].asUInt());
2240 }
2241 });
2242
2243 for (auto i = mStreamMaps[sValidations].begin();
2244 i != mStreamMaps[sValidations].end();)
2245 {
2246 if (auto p = i->second.lock())
2247 {
2248 multiObj.visit(
2249 p->getApiVersion(), //
2250 [&](Json::Value const& jv) { p->send(jv, true); });
2251 ++i;
2252 }
2253 else
2254 {
2255 i = mStreamMaps[sValidations].erase(i);
2256 }
2257 }
2258 }
2259}
2260
2261void
2263{
2265
2266 if (!mStreamMaps[sPeerStatus].empty())
2267 {
2268 Json::Value jvObj(func());
2269
2270 jvObj[jss::type] = "peerStatusChange";
2271
2272 for (auto i = mStreamMaps[sPeerStatus].begin();
2273 i != mStreamMaps[sPeerStatus].end();)
2274 {
2275 InfoSub::pointer p = i->second.lock();
2276
2277 if (p)
2278 {
2279 p->send(jvObj, true);
2280 ++i;
2281 }
2282 else
2283 {
2284 i = mStreamMaps[sPeerStatus].erase(i);
2285 }
2286 }
2287 }
2288}
2289
2290void
2292{
2293 using namespace std::chrono_literals;
2294 if (om == OperatingMode::CONNECTED)
2295 {
2298 }
2299 else if (om == OperatingMode::SYNCING)
2300 {
2303 }
2304
2305 if ((om > OperatingMode::CONNECTED) && isBlocked())
2307
2308 if (mMode == om)
2309 return;
2310
2311 mMode = om;
2312
2313 accounting_.mode(om);
2314
2315 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2316 pubServer();
2317}
2318
2319bool
2322 std::string const& source)
2323{
2324 JLOG(m_journal.trace())
2325 << "recvValidation " << val->getLedgerHash() << " from " << source;
2326
2328 BypassAccept bypassAccept = BypassAccept::no;
2329 try
2330 {
2331 if (pendingValidations_.contains(val->getLedgerHash()))
2332 bypassAccept = BypassAccept::yes;
2333 else
2334 pendingValidations_.insert(val->getLedgerHash());
2335 scope_unlock unlock(lock);
2336 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2337 }
2338 catch (std::exception const& e)
2339 {
2340 JLOG(m_journal.warn())
2341 << "Exception thrown for handling new validation "
2342 << val->getLedgerHash() << ": " << e.what();
2343 }
2344 catch (...)
2345 {
2346 JLOG(m_journal.warn())
2347 << "Unknown exception thrown for handling new validation "
2348 << val->getLedgerHash();
2349 }
2350 if (bypassAccept == BypassAccept::no)
2351 {
2352 pendingValidations_.erase(val->getLedgerHash());
2353 }
2354 lock.unlock();
2355
2356 pubValidation(val);
2357
2358 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2360 ss << "VALIDATION: " << val->render() << " master_key: ";
2361 auto master = app_.validators().getTrustedKey(val->getSignerPublic());
2362 if (master)
2363 {
2364 ss << toBase58(TokenType::NodePublic, *master);
2365 }
2366 else
2367 {
2368 ss << "none";
2369 }
2370 return ss.str();
2371 }();
2372
2373 // We will always relay trusted validations; if configured, we will
2374 // also relay all untrusted validations.
2375 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2376}
2377
2380{
2381 return mConsensus.getJson(true);
2382}
2383
2385NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2386{
2388
2389 // System-level warnings
2390 {
2391 Json::Value warnings{Json::arrayValue};
2392 if (isAmendmentBlocked())
2393 {
2394 Json::Value& w = warnings.append(Json::objectValue);
2395 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2396 w[jss::message] =
2397 "This server is amendment blocked, and must be updated to be "
2398 "able to stay in sync with the network.";
2399 }
2400 if (isUNLBlocked())
2401 {
2402 Json::Value& w = warnings.append(Json::objectValue);
2403 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2404 w[jss::message] =
2405 "This server has an expired validator list. validators.txt "
2406 "may be incorrectly configured or some [validator_list_sites] "
2407 "may be unreachable.";
2408 }
2409 if (admin && isAmendmentWarned())
2410 {
2411 Json::Value& w = warnings.append(Json::objectValue);
2412 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2413 w[jss::message] =
2414 "One or more unsupported amendments have reached majority. "
2415 "Upgrade to the latest version before they are activated "
2416 "to avoid being amendment blocked.";
2417 if (auto const expected =
2419 {
2420 auto& d = w[jss::details] = Json::objectValue;
2421 d[jss::expected_date] = expected->time_since_epoch().count();
2422 d[jss::expected_date_UTC] = to_string(*expected);
2423 }
2424 }
2425
2426 if (warnings.size())
2427 info[jss::warnings] = std::move(warnings);
2428 }
2429
2430 // hostid: unique string describing the machine
2431 if (human)
2432 info[jss::hostid] = getHostId(admin);
2433
2434 // domain: if configured with a domain, report it:
2435 if (!app_.config().SERVER_DOMAIN.empty())
2436 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2437
2438 info[jss::build_version] = BuildInfo::getVersionString();
2439
2440 info[jss::server_state] = strOperatingMode(admin);
2441
2442 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2444
2446 info[jss::network_ledger] = "waiting";
2447
2448 info[jss::validation_quorum] =
2449 static_cast<Json::UInt>(app_.validators().quorum());
2450
2451 if (admin)
2452 {
2453 switch (app_.config().NODE_SIZE)
2454 {
2455 case 0:
2456 info[jss::node_size] = "tiny";
2457 break;
2458 case 1:
2459 info[jss::node_size] = "small";
2460 break;
2461 case 2:
2462 info[jss::node_size] = "medium";
2463 break;
2464 case 3:
2465 info[jss::node_size] = "large";
2466 break;
2467 case 4:
2468 info[jss::node_size] = "huge";
2469 break;
2470 }
2471
2472 auto when = app_.validators().expires();
2473
2474 if (!human)
2475 {
2476 if (when)
2477 info[jss::validator_list_expires] =
2478 safe_cast<Json::UInt>(when->time_since_epoch().count());
2479 else
2480 info[jss::validator_list_expires] = 0;
2481 }
2482 else
2483 {
2484 auto& x = (info[jss::validator_list] = Json::objectValue);
2485
2486 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2487
2488 if (when)
2489 {
2490 if (*when == TimeKeeper::time_point::max())
2491 {
2492 x[jss::expiration] = "never";
2493 x[jss::status] = "active";
2494 }
2495 else
2496 {
2497 x[jss::expiration] = to_string(*when);
2498
2499 if (*when > app_.timeKeeper().now())
2500 x[jss::status] = "active";
2501 else
2502 x[jss::status] = "expired";
2503 }
2504 }
2505 else
2506 {
2507 x[jss::status] = "unknown";
2508 x[jss::expiration] = "unknown";
2509 }
2510 }
2511
2512#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2513 {
2514 auto& x = (info[jss::git] = Json::objectValue);
2515#ifdef GIT_COMMIT_HASH
2516 x[jss::hash] = GIT_COMMIT_HASH;
2517#endif
2518#ifdef GIT_BRANCH
2519 x[jss::branch] = GIT_BRANCH;
2520#endif
2521 }
2522#endif
2523 }
2524 info[jss::io_latency_ms] =
2525 static_cast<Json::UInt>(app_.getIOLatency().count());
2526
2527 if (admin)
2528 {
2529 if (auto const localPubKey = app_.validators().localPublicKey();
2530 localPubKey && app_.getValidationPublicKey())
2531 {
2532 info[jss::pubkey_validator] =
2533 toBase58(TokenType::NodePublic, localPubKey.value());
2534 }
2535 else
2536 {
2537 info[jss::pubkey_validator] = "none";
2538 }
2539 }
2540
2541 if (counters)
2542 {
2543 info[jss::counters] = app_.getPerfLog().countersJson();
2544
2545 Json::Value nodestore(Json::objectValue);
2546 app_.getNodeStore().getCountsJson(nodestore);
2547 info[jss::counters][jss::nodestore] = nodestore;
2548 info[jss::current_activities] = app_.getPerfLog().currentJson();
2549 }
2550
2551 info[jss::pubkey_node] =
2553
2554 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2555
2557 info[jss::amendment_blocked] = true;
2558
2559 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2560
2561 if (fp != 0)
2562 info[jss::fetch_pack] = Json::UInt(fp);
2563
2564 info[jss::peers] = Json::UInt(app_.overlay().size());
2565
2566 Json::Value lastClose = Json::objectValue;
2567 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2568
2569 if (human)
2570 {
2571 lastClose[jss::converge_time_s] =
2573 }
2574 else
2575 {
2576 lastClose[jss::converge_time] =
2578 }
2579
2580 info[jss::last_close] = lastClose;
2581
2582 // info[jss::consensus] = mConsensus.getJson();
2583
2584 if (admin)
2585 info[jss::load] = m_job_queue.getJson();
2586
2587 if (auto const netid = app_.overlay().networkID())
2588 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2589
2590 auto const escalationMetrics =
2592
2593 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2594 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2595 /* Scale the escalated fee level to unitless "load factor".
2596 In practice, this just strips the units, but it will continue
2597 to work correctly if either base value ever changes. */
2598 auto const loadFactorFeeEscalation =
2599 mulDiv(
2600 escalationMetrics.openLedgerFeeLevel,
2601 loadBaseServer,
2602 escalationMetrics.referenceFeeLevel)
2604
2605 auto const loadFactor = std::max(
2606 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2607
2608 if (!human)
2609 {
2610 info[jss::load_base] = loadBaseServer;
2611 info[jss::load_factor] = trunc32(loadFactor);
2612 info[jss::load_factor_server] = loadFactorServer;
2613
2614 /* Json::Value doesn't support uint64, so clamp to max
2615 uint32 value. This is mostly theoretical, since there
2616 probably isn't enough extant XRP to drive the factor
2617 that high.
2618 */
2619 info[jss::load_factor_fee_escalation] =
2620 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2621 info[jss::load_factor_fee_queue] =
2622 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2623 info[jss::load_factor_fee_reference] =
2624 escalationMetrics.referenceFeeLevel.jsonClipped();
2625 }
2626 else
2627 {
2628 info[jss::load_factor] =
2629 static_cast<double>(loadFactor) / loadBaseServer;
2630
2631 if (loadFactorServer != loadFactor)
2632 info[jss::load_factor_server] =
2633 static_cast<double>(loadFactorServer) / loadBaseServer;
2634
2635 if (admin)
2636 {
2638 if (fee != loadBaseServer)
2639 info[jss::load_factor_local] =
2640 static_cast<double>(fee) / loadBaseServer;
2641 fee = app_.getFeeTrack().getRemoteFee();
2642 if (fee != loadBaseServer)
2643 info[jss::load_factor_net] =
2644 static_cast<double>(fee) / loadBaseServer;
2645 fee = app_.getFeeTrack().getClusterFee();
2646 if (fee != loadBaseServer)
2647 info[jss::load_factor_cluster] =
2648 static_cast<double>(fee) / loadBaseServer;
2649 }
2650 if (escalationMetrics.openLedgerFeeLevel !=
2651 escalationMetrics.referenceFeeLevel &&
2652 (admin || loadFactorFeeEscalation != loadFactor))
2653 info[jss::load_factor_fee_escalation] =
2654 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2655 escalationMetrics.referenceFeeLevel);
2656 if (escalationMetrics.minProcessingFeeLevel !=
2657 escalationMetrics.referenceFeeLevel)
2658 info[jss::load_factor_fee_queue] =
2659 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2660 escalationMetrics.referenceFeeLevel);
2661 }
2662
2663 bool valid = false;
2664 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2665
2666 if (lpClosed)
2667 valid = true;
2668 else
2669 lpClosed = m_ledgerMaster.getClosedLedger();
2670
2671 if (lpClosed)
2672 {
2673 XRPAmount const baseFee = lpClosed->fees().base;
2675 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2676 l[jss::hash] = to_string(lpClosed->info().hash);
2677
2678 if (!human)
2679 {
2680 l[jss::base_fee] = baseFee.jsonClipped();
2681 l[jss::reserve_base] =
2682 lpClosed->fees().accountReserve(0).jsonClipped();
2683 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2684 l[jss::close_time] = Json::Value::UInt(
2685 lpClosed->info().closeTime.time_since_epoch().count());
2686 }
2687 else
2688 {
2689 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2690 l[jss::reserve_base_xrp] =
2691 lpClosed->fees().accountReserve(0).decimalXRP();
2692 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2693
2694 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2695 std::abs(closeOffset.count()) >= 60)
2696 l[jss::close_time_offset] =
2697 static_cast<std::uint32_t>(closeOffset.count());
2698
2699 constexpr std::chrono::seconds highAgeThreshold{1000000};
2701 {
2702 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2703 l[jss::age] =
2704 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2705 }
2706 else
2707 {
2708 auto lCloseTime = lpClosed->info().closeTime;
2709 auto closeTime = app_.timeKeeper().closeTime();
2710 if (lCloseTime <= closeTime)
2711 {
2712 using namespace std::chrono_literals;
2713 auto age = closeTime - lCloseTime;
2714 l[jss::age] =
2715 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2716 }
2717 }
2718 }
2719
2720 if (valid)
2721 info[jss::validated_ledger] = l;
2722 else
2723 info[jss::closed_ledger] = l;
2724
2725 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2726 if (!lpPublished)
2727 info[jss::published_ledger] = "none";
2728 else if (lpPublished->info().seq != lpClosed->info().seq)
2729 info[jss::published_ledger] = lpPublished->info().seq;
2730 }
2731
2732 accounting_.json(info);
2733 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2734 info[jss::jq_trans_overflow] =
2736 info[jss::peer_disconnects] =
2738 info[jss::peer_disconnects_resources] =
2740
2741 // This array must be sorted in increasing order.
2742 static constexpr std::array<std::string_view, 7> protocols{
2743 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2744 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2745 {
2747 for (auto const& port : app_.getServerHandler().setup().ports)
2748 {
2749 // Don't publish admin ports for non-admin users
2750 if (!admin &&
2751 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2752 port.admin_user.empty() && port.admin_password.empty()))
2753 continue;
2756 std::begin(port.protocol),
2757 std::end(port.protocol),
2758 std::begin(protocols),
2759 std::end(protocols),
2760 std::back_inserter(proto));
2761 if (!proto.empty())
2762 {
2763 auto& jv = ports.append(Json::Value(Json::objectValue));
2764 jv[jss::port] = std::to_string(port.port);
2765 jv[jss::protocol] = Json::Value{Json::arrayValue};
2766 for (auto const& p : proto)
2767 jv[jss::protocol].append(p);
2768 }
2769 }
2770
2771 if (app_.config().exists(SECTION_PORT_GRPC))
2772 {
2773 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
2774 auto const optPort = grpcSection.get("port");
2775 if (optPort && grpcSection.get("ip"))
2776 {
2777 auto& jv = ports.append(Json::Value(Json::objectValue));
2778 jv[jss::port] = *optPort;
2779 jv[jss::protocol] = Json::Value{Json::arrayValue};
2780 jv[jss::protocol].append("grpc");
2781 }
2782 }
2783 info[jss::ports] = std::move(ports);
2784 }
2785
2786 return info;
2787}
2788
2789void
2791{
2793}
2794
2797{
2798 return app_.getInboundLedgers().getInfo();
2799}
2800
2801void
2803 std::shared_ptr<ReadView const> const& ledger,
2804 std::shared_ptr<STTx const> const& transaction,
2805 TER result)
2806{
2807 MultiApiJson jvObj =
2808 transJson(transaction, result, false, ledger, std::nullopt);
2809
2810 {
2812
2813 auto it = mStreamMaps[sRTTransactions].begin();
2814 while (it != mStreamMaps[sRTTransactions].end())
2815 {
2816 InfoSub::pointer p = it->second.lock();
2817
2818 if (p)
2819 {
2820 jvObj.visit(
2821 p->getApiVersion(), //
2822 [&](Json::Value const& jv) { p->send(jv, true); });
2823 ++it;
2824 }
2825 else
2826 {
2827 it = mStreamMaps[sRTTransactions].erase(it);
2828 }
2829 }
2830 }
2831
2832 pubProposedAccountTransaction(ledger, transaction, result);
2833}
2834
2835void
2837{
2838 // Ledgers are published only when they acquire sufficient validations
2839 // Holes are filled across connection loss or other catastrophe
2840
2842 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
2843 if (!alpAccepted)
2844 {
2845 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
2846 app_.getAcceptedLedgerCache().canonicalize_replace_client(
2847 lpAccepted->info().hash, alpAccepted);
2848 }
2849
2850 XRPL_ASSERT(
2851 alpAccepted->getLedger().get() == lpAccepted.get(),
2852 "ripple::NetworkOPsImp::pubLedger : accepted input");
2853
2854 {
2855 JLOG(m_journal.debug())
2856 << "Publishing ledger " << lpAccepted->info().seq << " "
2857 << lpAccepted->info().hash;
2858
2860
2861 if (!mStreamMaps[sLedger].empty())
2862 {
2864
2865 jvObj[jss::type] = "ledgerClosed";
2866 jvObj[jss::ledger_index] = lpAccepted->info().seq;
2867 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
2868 jvObj[jss::ledger_time] = Json::Value::UInt(
2869 lpAccepted->info().closeTime.time_since_epoch().count());
2870
2871 if (!lpAccepted->rules().enabled(featureXRPFees))
2872 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
2873 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2874 jvObj[jss::reserve_base] =
2875 lpAccepted->fees().accountReserve(0).jsonClipped();
2876 jvObj[jss::reserve_inc] =
2877 lpAccepted->fees().increment.jsonClipped();
2878
2879 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
2880
2882 {
2883 jvObj[jss::validated_ledgers] =
2885 }
2886
2887 auto it = mStreamMaps[sLedger].begin();
2888 while (it != mStreamMaps[sLedger].end())
2889 {
2890 InfoSub::pointer p = it->second.lock();
2891 if (p)
2892 {
2893 p->send(jvObj, true);
2894 ++it;
2895 }
2896 else
2897 it = mStreamMaps[sLedger].erase(it);
2898 }
2899 }
2900
2901 if (!mStreamMaps[sBookChanges].empty())
2902 {
2903 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
2904
2905 auto it = mStreamMaps[sBookChanges].begin();
2906 while (it != mStreamMaps[sBookChanges].end())
2907 {
2908 InfoSub::pointer p = it->second.lock();
2909 if (p)
2910 {
2911 p->send(jvObj, true);
2912 ++it;
2913 }
2914 else
2915 it = mStreamMaps[sBookChanges].erase(it);
2916 }
2917 }
2918
2919 {
2920 static bool firstTime = true;
2921 if (firstTime)
2922 {
2923 // First validated ledger, start delayed SubAccountHistory
2924 firstTime = false;
2925 for (auto& outer : mSubAccountHistory)
2926 {
2927 for (auto& inner : outer.second)
2928 {
2929 auto& subInfo = inner.second;
2930 if (subInfo.index_->separationLedgerSeq_ == 0)
2931 {
2933 alpAccepted->getLedger(), subInfo);
2934 }
2935 }
2936 }
2937 }
2938 }
2939 }
2940
2941 // Don't lock since pubAcceptedTransaction is locking.
2942 for (auto const& accTx : *alpAccepted)
2943 {
2944 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
2946 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
2947 }
2948}
2949
2950void
2952{
2954 app_.openLedger().current()->fees().base,
2956 app_.getFeeTrack()};
2957
2958 // only schedule the job if something has changed
2959 if (f != mLastFeeSummary)
2960 {
2962 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
2963 pubServer();
2964 });
2965 }
2966}
2967
2968void
2970{
2973 "reportConsensusStateChange->pubConsensus",
2974 [this, phase]() { pubConsensus(phase); });
2975}
2976
2977inline void
2979{
2980 m_localTX->sweep(view);
2981}
2982inline std::size_t
2984{
2985 return m_localTX->size();
2986}
2987
2988// This routine should only be used to publish accepted or validated
2989// transactions.
2992 std::shared_ptr<STTx const> const& transaction,
2993 TER result,
2994 bool validated,
2995 std::shared_ptr<ReadView const> const& ledger,
2997{
2999 std::string sToken;
3000 std::string sHuman;
3001
3002 transResultInfo(result, sToken, sHuman);
3003
3004 jvObj[jss::type] = "transaction";
3005 // NOTE jvObj is not a finished object for either API version. After
3006 // it's populated, we need to finish it for a specific API version. This is
3007 // done in a loop, near the end of this function.
3008 jvObj[jss::transaction] =
3009 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3010
3011 if (meta)
3012 {
3013 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3015 jvObj[jss::meta], *ledger, transaction, meta->get());
3017 jvObj[jss::meta], transaction, meta->get());
3018 }
3019
3020 if (!ledger->open())
3021 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3022
3023 if (validated)
3024 {
3025 jvObj[jss::ledger_index] = ledger->info().seq;
3026 jvObj[jss::transaction][jss::date] =
3027 ledger->info().closeTime.time_since_epoch().count();
3028 jvObj[jss::validated] = true;
3029 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3030
3031 // WRITEME: Put the account next seq here
3032 }
3033 else
3034 {
3035 jvObj[jss::validated] = false;
3036 jvObj[jss::ledger_current_index] = ledger->info().seq;
3037 }
3038
3039 jvObj[jss::status] = validated ? "closed" : "proposed";
3040 jvObj[jss::engine_result] = sToken;
3041 jvObj[jss::engine_result_code] = result;
3042 jvObj[jss::engine_result_message] = sHuman;
3043
3044 if (transaction->getTxnType() == ttOFFER_CREATE)
3045 {
3046 auto const account = transaction->getAccountID(sfAccount);
3047 auto const amount = transaction->getFieldAmount(sfTakerGets);
3048
3049 // If the offer create is not self funded then add the owner balance
3050 if (account != amount.issue().account)
3051 {
3052 auto const ownerFunds = accountFunds(
3053 *ledger,
3054 account,
3055 amount,
3057 app_.journal("View"));
3058 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3059 }
3060 }
3061
3062 std::string const hash = to_string(transaction->getTransactionID());
3063 MultiApiJson multiObj{jvObj};
3065 multiObj.visit(), //
3066 [&]<unsigned Version>(
3068 RPC::insertDeliverMax(
3069 jvTx[jss::transaction], transaction->getTxnType(), Version);
3070
3071 if constexpr (Version > 1)
3072 {
3073 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3074 jvTx[jss::hash] = hash;
3075 }
3076 else
3077 {
3078 jvTx[jss::transaction][jss::hash] = hash;
3079 }
3080 });
3081
3082 return multiObj;
3083}
3084
3085void
3087 std::shared_ptr<ReadView const> const& ledger,
3088 const AcceptedLedgerTx& transaction,
3089 bool last)
3090{
3091 auto const& stTxn = transaction.getTxn();
3092
3093 // Create two different Json objects, for different API versions
3094 auto const metaRef = std::ref(transaction.getMeta());
3095 auto const trResult = transaction.getResult();
3096 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3097
3098 {
3100
3101 auto it = mStreamMaps[sTransactions].begin();
3102 while (it != mStreamMaps[sTransactions].end())
3103 {
3104 InfoSub::pointer p = it->second.lock();
3105
3106 if (p)
3107 {
3108 jvObj.visit(
3109 p->getApiVersion(), //
3110 [&](Json::Value const& jv) { p->send(jv, true); });
3111 ++it;
3112 }
3113 else
3114 it = mStreamMaps[sTransactions].erase(it);
3115 }
3116
3117 it = mStreamMaps[sRTTransactions].begin();
3118
3119 while (it != mStreamMaps[sRTTransactions].end())
3120 {
3121 InfoSub::pointer p = it->second.lock();
3122
3123 if (p)
3124 {
3125 jvObj.visit(
3126 p->getApiVersion(), //
3127 [&](Json::Value const& jv) { p->send(jv, true); });
3128 ++it;
3129 }
3130 else
3131 it = mStreamMaps[sRTTransactions].erase(it);
3132 }
3133 }
3134
3135 if (transaction.getResult() == tesSUCCESS)
3136 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3137
3138 pubAccountTransaction(ledger, transaction, last);
3139}
3140
3141void
3143 std::shared_ptr<ReadView const> const& ledger,
3144 AcceptedLedgerTx const& transaction,
3145 bool last)
3146{
3148 int iProposed = 0;
3149 int iAccepted = 0;
3150
3151 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3152 auto const currLedgerSeq = ledger->seq();
3153 {
3155
3156 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3158 {
3159 for (auto const& affectedAccount : transaction.getAffected())
3160 {
3161 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3162 simiIt != mSubRTAccount.end())
3163 {
3164 auto it = simiIt->second.begin();
3165
3166 while (it != simiIt->second.end())
3167 {
3168 InfoSub::pointer p = it->second.lock();
3169
3170 if (p)
3171 {
3172 notify.insert(p);
3173 ++it;
3174 ++iProposed;
3175 }
3176 else
3177 it = simiIt->second.erase(it);
3178 }
3179 }
3180
3181 if (auto simiIt = mSubAccount.find(affectedAccount);
3182 simiIt != mSubAccount.end())
3183 {
3184 auto it = simiIt->second.begin();
3185 while (it != simiIt->second.end())
3186 {
3187 InfoSub::pointer p = it->second.lock();
3188
3189 if (p)
3190 {
3191 notify.insert(p);
3192 ++it;
3193 ++iAccepted;
3194 }
3195 else
3196 it = simiIt->second.erase(it);
3197 }
3198 }
3199
3200 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3201 histoIt != mSubAccountHistory.end())
3202 {
3203 auto& subs = histoIt->second;
3204 auto it = subs.begin();
3205 while (it != subs.end())
3206 {
3207 SubAccountHistoryInfoWeak const& info = it->second;
3208 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3209 {
3210 ++it;
3211 continue;
3212 }
3213
3214 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3215 {
3216 accountHistoryNotify.emplace_back(
3217 SubAccountHistoryInfo{isSptr, info.index_});
3218 ++it;
3219 }
3220 else
3221 {
3222 it = subs.erase(it);
3223 }
3224 }
3225 if (subs.empty())
3226 mSubAccountHistory.erase(histoIt);
3227 }
3228 }
3229 }
3230 }
3231
3232 JLOG(m_journal.trace())
3233 << "pubAccountTransaction: " << "proposed=" << iProposed
3234 << ", accepted=" << iAccepted;
3235
3236 if (!notify.empty() || !accountHistoryNotify.empty())
3237 {
3238 auto const& stTxn = transaction.getTxn();
3239
3240 // Create two different Json objects, for different API versions
3241 auto const metaRef = std::ref(transaction.getMeta());
3242 auto const trResult = transaction.getResult();
3243 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3244
3245 for (InfoSub::ref isrListener : notify)
3246 {
3247 jvObj.visit(
3248 isrListener->getApiVersion(), //
3249 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3250 }
3251
3252 if (last)
3253 jvObj.set(jss::account_history_boundary, true);
3254
3255 XRPL_ASSERT(
3256 jvObj.isMember(jss::account_history_tx_stream) ==
3258 "ripple::NetworkOPsImp::pubAccountTransaction : "
3259 "account_history_tx_stream not set");
3260 for (auto& info : accountHistoryNotify)
3261 {
3262 auto& index = info.index_;
3263 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3264 jvObj.set(jss::account_history_tx_first, true);
3265
3266 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3267
3268 jvObj.visit(
3269 info.sink_->getApiVersion(), //
3270 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3271 }
3272 }
3273}
3274
3275void
3277 std::shared_ptr<ReadView const> const& ledger,
3279 TER result)
3280{
3282 int iProposed = 0;
3283
3284 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3285
3286 {
3288
3289 if (mSubRTAccount.empty())
3290 return;
3291
3292 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3294 {
3295 for (auto const& affectedAccount : tx->getMentionedAccounts())
3296 {
3297 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3298 simiIt != mSubRTAccount.end())
3299 {
3300 auto it = simiIt->second.begin();
3301
3302 while (it != simiIt->second.end())
3303 {
3304 InfoSub::pointer p = it->second.lock();
3305
3306 if (p)
3307 {
3308 notify.insert(p);
3309 ++it;
3310 ++iProposed;
3311 }
3312 else
3313 it = simiIt->second.erase(it);
3314 }
3315 }
3316 }
3317 }
3318 }
3319
3320 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3321
3322 if (!notify.empty() || !accountHistoryNotify.empty())
3323 {
3324 // Create two different Json objects, for different API versions
3325 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3326
3327 for (InfoSub::ref isrListener : notify)
3328 jvObj.visit(
3329 isrListener->getApiVersion(), //
3330 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3331
3332 XRPL_ASSERT(
3333 jvObj.isMember(jss::account_history_tx_stream) ==
3335 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3336 "account_history_tx_stream not set");
3337 for (auto& info : accountHistoryNotify)
3338 {
3339 auto& index = info.index_;
3340 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3341 jvObj.set(jss::account_history_tx_first, true);
3342 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3343 jvObj.visit(
3344 info.sink_->getApiVersion(), //
3345 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3346 }
3347 }
3348}
3349
3350//
3351// Monitoring
3352//
3353
3354void
3356 InfoSub::ref isrListener,
3357 hash_set<AccountID> const& vnaAccountIDs,
3358 bool rt)
3359{
3360 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3361
3362 for (auto const& naAccountID : vnaAccountIDs)
3363 {
3364 JLOG(m_journal.trace())
3365 << "subAccount: account: " << toBase58(naAccountID);
3366
3367 isrListener->insertSubAccountInfo(naAccountID, rt);
3368 }
3369
3371
3372 for (auto const& naAccountID : vnaAccountIDs)
3373 {
3374 auto simIterator = subMap.find(naAccountID);
3375 if (simIterator == subMap.end())
3376 {
3377 // Not found, note that account has a new single listner.
3378 SubMapType usisElement;
3379 usisElement[isrListener->getSeq()] = isrListener;
3380 // VFALCO NOTE This is making a needless copy of naAccountID
3381 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3382 }
3383 else
3384 {
3385 // Found, note that the account has another listener.
3386 simIterator->second[isrListener->getSeq()] = isrListener;
3387 }
3388 }
3389}
3390
3391void
3393 InfoSub::ref isrListener,
3394 hash_set<AccountID> const& vnaAccountIDs,
3395 bool rt)
3396{
3397 for (auto const& naAccountID : vnaAccountIDs)
3398 {
3399 // Remove from the InfoSub
3400 isrListener->deleteSubAccountInfo(naAccountID, rt);
3401 }
3402
3403 // Remove from the server
3404 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3405}
3406
3407void
3409 std::uint64_t uSeq,
3410 hash_set<AccountID> const& vnaAccountIDs,
3411 bool rt)
3412{
3414
3415 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3416
3417 for (auto const& naAccountID : vnaAccountIDs)
3418 {
3419 auto simIterator = subMap.find(naAccountID);
3420
3421 if (simIterator != subMap.end())
3422 {
3423 // Found
3424 simIterator->second.erase(uSeq);
3425
3426 if (simIterator->second.empty())
3427 {
3428 // Don't need hash entry.
3429 subMap.erase(simIterator);
3430 }
3431 }
3432 }
3433}
3434
3435void
3437{
3438 enum DatabaseType { Sqlite, None };
3439 static const auto databaseType = [&]() -> DatabaseType {
3440 // Use a dynamic_cast to return DatabaseType::None
3441 // on failure.
3442 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3443 {
3444 return DatabaseType::Sqlite;
3445 }
3446 return DatabaseType::None;
3447 }();
3448
3449 if (databaseType == DatabaseType::None)
3450 {
3451 JLOG(m_journal.error())
3452 << "AccountHistory job for account "
3453 << toBase58(subInfo.index_->accountId_) << " no database";
3454 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3455 {
3456 sptr->send(rpcError(rpcINTERNAL), true);
3457 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3458 }
3459 return;
3460 }
3461
3464 "AccountHistoryTxStream",
3465 [this, dbType = databaseType, subInfo]() {
3466 auto const& accountId = subInfo.index_->accountId_;
3467 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3468 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3469
3470 JLOG(m_journal.trace())
3471 << "AccountHistory job for account " << toBase58(accountId)
3472 << " started. lastLedgerSeq=" << lastLedgerSeq;
3473
3474 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3475 std::shared_ptr<TxMeta> const& meta) -> bool {
3476 /*
3477 * genesis account: first tx is the one with seq 1
3478 * other account: first tx is the one created the account
3479 */
3480 if (accountId == genesisAccountId)
3481 {
3482 auto stx = tx->getSTransaction();
3483 if (stx->getAccountID(sfAccount) == accountId &&
3484 stx->getSeqProxy().value() == 1)
3485 return true;
3486 }
3487
3488 for (auto& node : meta->getNodes())
3489 {
3490 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3491 continue;
3492
3493 if (node.isFieldPresent(sfNewFields))
3494 {
3495 if (auto inner = dynamic_cast<const STObject*>(
3496 node.peekAtPField(sfNewFields));
3497 inner)
3498 {
3499 if (inner->isFieldPresent(sfAccount) &&
3500 inner->getAccountID(sfAccount) == accountId)
3501 {
3502 return true;
3503 }
3504 }
3505 }
3506 }
3507
3508 return false;
3509 };
3510
3511 auto send = [&](Json::Value const& jvObj,
3512 bool unsubscribe) -> bool {
3513 if (auto sptr = subInfo.sinkWptr_.lock())
3514 {
3515 sptr->send(jvObj, true);
3516 if (unsubscribe)
3517 unsubAccountHistory(sptr, accountId, false);
3518 return true;
3519 }
3520
3521 return false;
3522 };
3523
3524 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3525 bool unsubscribe) -> bool {
3526 if (auto sptr = subInfo.sinkWptr_.lock())
3527 {
3528 jvObj.visit(
3529 sptr->getApiVersion(), //
3530 [&](Json::Value const& jv) { sptr->send(jv, true); });
3531
3532 if (unsubscribe)
3533 unsubAccountHistory(sptr, accountId, false);
3534 return true;
3535 }
3536
3537 return false;
3538 };
3539
3540 auto getMoreTxns =
3541 [&](std::uint32_t minLedger,
3542 std::uint32_t maxLedger,
3547 switch (dbType)
3548 {
3549 case Sqlite: {
3550 auto db = static_cast<SQLiteDatabase*>(
3553 accountId, minLedger, maxLedger, marker, 0, true};
3554 return db->newestAccountTxPage(options);
3555 }
3556 default: {
3557 UNREACHABLE(
3558 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3559 "getMoreTxns : invalid database type");
3560 return {};
3561 }
3562 }
3563 };
3564
3565 /*
3566 * search backward until the genesis ledger or asked to stop
3567 */
3568 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3569 {
3570 int feeChargeCount = 0;
3571 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3572 {
3573 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3574 ++feeChargeCount;
3575 }
3576 else
3577 {
3578 JLOG(m_journal.trace())
3579 << "AccountHistory job for account "
3580 << toBase58(accountId) << " no InfoSub. Fee charged "
3581 << feeChargeCount << " times.";
3582 return;
3583 }
3584
3585 // try to search in 1024 ledgers till reaching genesis ledgers
3586 auto startLedgerSeq =
3587 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3588 JLOG(m_journal.trace())
3589 << "AccountHistory job for account " << toBase58(accountId)
3590 << ", working on ledger range [" << startLedgerSeq << ","
3591 << lastLedgerSeq << "]";
3592
3593 auto haveRange = [&]() -> bool {
3594 std::uint32_t validatedMin = UINT_MAX;
3595 std::uint32_t validatedMax = 0;
3596 auto haveSomeValidatedLedgers =
3598 validatedMin, validatedMax);
3599
3600 return haveSomeValidatedLedgers &&
3601 validatedMin <= startLedgerSeq &&
3602 lastLedgerSeq <= validatedMax;
3603 }();
3604
3605 if (!haveRange)
3606 {
3607 JLOG(m_journal.debug())
3608 << "AccountHistory reschedule job for account "
3609 << toBase58(accountId) << ", incomplete ledger range ["
3610 << startLedgerSeq << "," << lastLedgerSeq << "]";
3612 return;
3613 }
3614
3616 while (!subInfo.index_->stopHistorical_)
3617 {
3618 auto dbResult =
3619 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3620 if (!dbResult)
3621 {
3622 JLOG(m_journal.debug())
3623 << "AccountHistory job for account "
3624 << toBase58(accountId) << " getMoreTxns failed.";
3625 send(rpcError(rpcINTERNAL), true);
3626 return;
3627 }
3628
3629 auto const& txns = dbResult->first;
3630 marker = dbResult->second;
3631 size_t num_txns = txns.size();
3632 for (size_t i = 0; i < num_txns; ++i)
3633 {
3634 auto const& [tx, meta] = txns[i];
3635
3636 if (!tx || !meta)
3637 {
3638 JLOG(m_journal.debug())
3639 << "AccountHistory job for account "
3640 << toBase58(accountId) << " empty tx or meta.";
3641 send(rpcError(rpcINTERNAL), true);
3642 return;
3643 }
3644 auto curTxLedger =
3646 tx->getLedger());
3647 if (!curTxLedger)
3648 {
3649 JLOG(m_journal.debug())
3650 << "AccountHistory job for account "
3651 << toBase58(accountId) << " no ledger.";
3652 send(rpcError(rpcINTERNAL), true);
3653 return;
3654 }
3656 tx->getSTransaction();
3657 if (!stTxn)
3658 {
3659 JLOG(m_journal.debug())
3660 << "AccountHistory job for account "
3661 << toBase58(accountId)
3662 << " getSTransaction failed.";
3663 send(rpcError(rpcINTERNAL), true);
3664 return;
3665 }
3666
3667 auto const mRef = std::ref(*meta);
3668 auto const trR = meta->getResultTER();
3669 MultiApiJson jvTx =
3670 transJson(stTxn, trR, true, curTxLedger, mRef);
3671
3672 jvTx.set(
3673 jss::account_history_tx_index, txHistoryIndex--);
3674 if (i + 1 == num_txns ||
3675 txns[i + 1].first->getLedger() != tx->getLedger())
3676 jvTx.set(jss::account_history_boundary, true);
3677
3678 if (isFirstTx(tx, meta))
3679 {
3680 jvTx.set(jss::account_history_tx_first, true);
3681 sendMultiApiJson(jvTx, false);
3682
3683 JLOG(m_journal.trace())
3684 << "AccountHistory job for account "
3685 << toBase58(accountId)
3686 << " done, found last tx.";
3687 return;
3688 }
3689 else
3690 {
3691 sendMultiApiJson(jvTx, false);
3692 }
3693 }
3694
3695 if (marker)
3696 {
3697 JLOG(m_journal.trace())
3698 << "AccountHistory job for account "
3699 << toBase58(accountId)
3700 << " paging, marker=" << marker->ledgerSeq << ":"
3701 << marker->txnSeq;
3702 }
3703 else
3704 {
3705 break;
3706 }
3707 }
3708
3709 if (!subInfo.index_->stopHistorical_)
3710 {
3711 lastLedgerSeq = startLedgerSeq - 1;
3712 if (lastLedgerSeq <= 1)
3713 {
3714 JLOG(m_journal.trace())
3715 << "AccountHistory job for account "
3716 << toBase58(accountId)
3717 << " done, reached genesis ledger.";
3718 return;
3719 }
3720 }
3721 }
3722 });
3723}
3724
3725void
3727 std::shared_ptr<ReadView const> const& ledger,
3729{
3730 subInfo.index_->separationLedgerSeq_ = ledger->seq();
3731 auto const& accountId = subInfo.index_->accountId_;
3732 auto const accountKeylet = keylet::account(accountId);
3733 if (!ledger->exists(accountKeylet))
3734 {
3735 JLOG(m_journal.debug())
3736 << "subAccountHistoryStart, no account " << toBase58(accountId)
3737 << ", no need to add AccountHistory job.";
3738 return;
3739 }
3740 if (accountId == genesisAccountId)
3741 {
3742 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3743 {
3744 if (sleAcct->getFieldU32(sfSequence) == 1)
3745 {
3746 JLOG(m_journal.debug())
3747 << "subAccountHistoryStart, genesis account "
3748 << toBase58(accountId)
3749 << " does not have tx, no need to add AccountHistory job.";
3750 return;
3751 }
3752 }
3753 else
3754 {
3755 UNREACHABLE(
3756 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
3757 "access genesis account");
3758 return;
3759 }
3760 }
3761 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
3762 subInfo.index_->haveHistorical_ = true;
3763
3764 JLOG(m_journal.debug())
3765 << "subAccountHistoryStart, add AccountHistory job: accountId="
3766 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
3767
3768 addAccountHistoryJob(subInfo);
3769}
3770
3773 InfoSub::ref isrListener,
3774 AccountID const& accountId)
3775{
3776 if (!isrListener->insertSubAccountHistory(accountId))
3777 {
3778 JLOG(m_journal.debug())
3779 << "subAccountHistory, already subscribed to account "
3780 << toBase58(accountId);
3781 return rpcINVALID_PARAMS;
3782 }
3783
3786 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
3787 auto simIterator = mSubAccountHistory.find(accountId);
3788 if (simIterator == mSubAccountHistory.end())
3789 {
3791 inner.emplace(isrListener->getSeq(), ahi);
3793 simIterator, std::make_pair(accountId, inner));
3794 }
3795 else
3796 {
3797 simIterator->second.emplace(isrListener->getSeq(), ahi);
3798 }
3799
3800 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
3801 if (ledger)
3802 {
3803 subAccountHistoryStart(ledger, ahi);
3804 }
3805 else
3806 {
3807 // The node does not have validated ledgers, so wait for
3808 // one before start streaming.
3809 // In this case, the subscription is also considered successful.
3810 JLOG(m_journal.debug())
3811 << "subAccountHistory, no validated ledger yet, delay start";
3812 }
3813
3814 return rpcSUCCESS;
3815}
3816
3817void
3819 InfoSub::ref isrListener,
3820 AccountID const& account,
3821 bool historyOnly)
3822{
3823 if (!historyOnly)
3824 isrListener->deleteSubAccountHistory(account);
3825 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
3826}
3827
3828void
3830 std::uint64_t seq,
3831 const AccountID& account,
3832 bool historyOnly)
3833{
3835 auto simIterator = mSubAccountHistory.find(account);
3836 if (simIterator != mSubAccountHistory.end())
3837 {
3838 auto& subInfoMap = simIterator->second;
3839 auto subInfoIter = subInfoMap.find(seq);
3840 if (subInfoIter != subInfoMap.end())
3841 {
3842 subInfoIter->second.index_->stopHistorical_ = true;
3843 }
3844
3845 if (!historyOnly)
3846 {
3847 simIterator->second.erase(seq);
3848 if (simIterator->second.empty())
3849 {
3850 mSubAccountHistory.erase(simIterator);
3851 }
3852 }
3853 JLOG(m_journal.debug())
3854 << "unsubAccountHistory, account " << toBase58(account)
3855 << ", historyOnly = " << (historyOnly ? "true" : "false");
3856 }
3857}
3858
3859bool
3861{
3862 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
3863 listeners->addSubscriber(isrListener);
3864 else
3865 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
3866 return true;
3867}
3868
3869bool
3871{
3872 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
3873 listeners->removeSubscriber(uSeq);
3874
3875 return true;
3876}
3877
3881{
3882 // This code-path is exclusively used when the server is in standalone
3883 // mode via `ledger_accept`
3884 XRPL_ASSERT(
3885 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
3886
3887 if (!m_standalone)
3888 Throw<std::runtime_error>(
3889 "Operation only possible in STANDALONE mode.");
3890
3891 // FIXME Could we improve on this and remove the need for a specialized
3892 // API in Consensus?
3894 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
3895 return m_ledgerMaster.getCurrentLedger()->info().seq;
3896}
3897
3898// <-- bool: true=added, false=already there
3899bool
3901{
3902 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
3903 {
3904 jvResult[jss::ledger_index] = lpClosed->info().seq;
3905 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
3906 jvResult[jss::ledger_time] = Json::Value::UInt(
3907 lpClosed->info().closeTime.time_since_epoch().count());
3908 if (!lpClosed->rules().enabled(featureXRPFees))
3909 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3910 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3911 jvResult[jss::reserve_base] =
3912 lpClosed->fees().accountReserve(0).jsonClipped();
3913 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3914 }
3915
3917 {
3918 jvResult[jss::validated_ledgers] =
3920 }
3921
3923 return mStreamMaps[sLedger]
3924 .emplace(isrListener->getSeq(), isrListener)
3925 .second;
3926}
3927
3928// <-- bool: true=added, false=already there
3929bool
3931{
3934 .emplace(isrListener->getSeq(), isrListener)
3935 .second;
3936}
3937
3938// <-- bool: true=erased, false=was not there
3939bool
3941{
3943 return mStreamMaps[sLedger].erase(uSeq);
3944}
3945
3946// <-- bool: true=erased, false=was not there
3947bool
3949{
3951 return mStreamMaps[sBookChanges].erase(uSeq);
3952}
3953
3954// <-- bool: true=added, false=already there
3955bool
3957{
3959 return mStreamMaps[sManifests]
3960 .emplace(isrListener->getSeq(), isrListener)
3961 .second;
3962}
3963
3964// <-- bool: true=erased, false=was not there
3965bool
3967{
3969 return mStreamMaps[sManifests].erase(uSeq);
3970}
3971
3972// <-- bool: true=added, false=already there
3973bool
3975 InfoSub::ref isrListener,
3976 Json::Value& jvResult,
3977 bool admin)
3978{
3979 uint256 uRandom;
3980
3981 if (m_standalone)
3982 jvResult[jss::stand_alone] = m_standalone;
3983
3984 // CHECKME: is it necessary to provide a random number here?
3985 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
3986
3987 auto const& feeTrack = app_.getFeeTrack();
3988 jvResult[jss::random] = to_string(uRandom);
3989 jvResult[jss::server_status] = strOperatingMode(admin);
3990 jvResult[jss::load_base] = feeTrack.getLoadBase();
3991 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
3992 jvResult[jss::hostid] = getHostId(admin);
3993 jvResult[jss::pubkey_node] =
3995
3997 return mStreamMaps[sServer]
3998 .emplace(isrListener->getSeq(), isrListener)
3999 .second;
4000}
4001
4002// <-- bool: true=erased, false=was not there
4003bool
4005{
4007 return mStreamMaps[sServer].erase(uSeq);
4008}
4009
4010// <-- bool: true=added, false=already there
4011bool
4013{
4016 .emplace(isrListener->getSeq(), isrListener)
4017 .second;
4018}
4019
4020// <-- bool: true=erased, false=was not there
4021bool
4023{
4025 return mStreamMaps[sTransactions].erase(uSeq);
4026}
4027
4028// <-- bool: true=added, false=already there
4029bool
4031{
4034 .emplace(isrListener->getSeq(), isrListener)
4035 .second;
4036}
4037
4038// <-- bool: true=erased, false=was not there
4039bool
4041{
4043 return mStreamMaps[sRTTransactions].erase(uSeq);
4044}
4045
4046// <-- bool: true=added, false=already there
4047bool
4049{
4052 .emplace(isrListener->getSeq(), isrListener)
4053 .second;
4054}
4055
4056void
4058{
4059 accounting_.json(obj);
4060}
4061
4062// <-- bool: true=erased, false=was not there
4063bool
4065{
4067 return mStreamMaps[sValidations].erase(uSeq);
4068}
4069
4070// <-- bool: true=added, false=already there
4071bool
4073{
4075 return mStreamMaps[sPeerStatus]
4076 .emplace(isrListener->getSeq(), isrListener)
4077 .second;
4078}
4079
4080// <-- bool: true=erased, false=was not there
4081bool
4083{
4085 return mStreamMaps[sPeerStatus].erase(uSeq);
4086}
4087
4088// <-- bool: true=added, false=already there
4089bool
4091{
4094 .emplace(isrListener->getSeq(), isrListener)
4095 .second;
4096}
4097
4098// <-- bool: true=erased, false=was not there
4099bool
4101{
4103 return mStreamMaps[sConsensusPhase].erase(uSeq);
4104}
4105
4108{
4110
4111 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4112
4113 if (it != mRpcSubMap.end())
4114 return it->second;
4115
4116 return InfoSub::pointer();
4117}
4118
4121{
4123
4124 mRpcSubMap.emplace(strUrl, rspEntry);
4125
4126 return rspEntry;
4127}
4128
4129bool
4131{
4133 auto pInfo = findRpcSub(strUrl);
4134
4135 if (!pInfo)
4136 return false;
4137
4138 // check to see if any of the stream maps still hold a weak reference to
4139 // this entry before removing
4140 for (SubMapType const& map : mStreamMaps)
4141 {
4142 if (map.find(pInfo->getSeq()) != map.end())
4143 return false;
4144 }
4145 mRpcSubMap.erase(strUrl);
4146 return true;
4147}
4148
4149#ifndef USE_NEW_BOOK_PAGE
4150
4151// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4152// work, but it demonstrated poor performance.
4153//
4154void
4157 Book const& book,
4158 AccountID const& uTakerID,
4159 bool const bProof,
4160 unsigned int iLimit,
4161 Json::Value const& jvMarker,
4162 Json::Value& jvResult)
4163{ // CAUTION: This is the old get book page logic
4164 Json::Value& jvOffers =
4165 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4166
4168 const uint256 uBookBase = getBookBase(book);
4169 const uint256 uBookEnd = getQualityNext(uBookBase);
4170 uint256 uTipIndex = uBookBase;
4171
4172 if (auto stream = m_journal.trace())
4173 {
4174 stream << "getBookPage:" << book;
4175 stream << "getBookPage: uBookBase=" << uBookBase;
4176 stream << "getBookPage: uBookEnd=" << uBookEnd;
4177 stream << "getBookPage: uTipIndex=" << uTipIndex;
4178 }
4179
4180 ReadView const& view = *lpLedger;
4181
4182 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4183 isGlobalFrozen(view, book.in.account);
4184
4185 bool bDone = false;
4186 bool bDirectAdvance = true;
4187
4188 std::shared_ptr<SLE const> sleOfferDir;
4189 uint256 offerIndex;
4190 unsigned int uBookEntry;
4191 STAmount saDirRate;
4192
4193 auto const rate = transferRate(view, book.out.account);
4194 auto viewJ = app_.journal("View");
4195
4196 while (!bDone && iLimit-- > 0)
4197 {
4198 if (bDirectAdvance)
4199 {
4200 bDirectAdvance = false;
4201
4202 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4203
4204 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4205 if (ledgerIndex)
4206 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4207 else
4208 sleOfferDir.reset();
4209
4210 if (!sleOfferDir)
4211 {
4212 JLOG(m_journal.trace()) << "getBookPage: bDone";
4213 bDone = true;
4214 }
4215 else
4216 {
4217 uTipIndex = sleOfferDir->key();
4218 saDirRate = amountFromQuality(getQuality(uTipIndex));
4219
4220 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4221
4222 JLOG(m_journal.trace())
4223 << "getBookPage: uTipIndex=" << uTipIndex;
4224 JLOG(m_journal.trace())
4225 << "getBookPage: offerIndex=" << offerIndex;
4226 }
4227 }
4228
4229 if (!bDone)
4230 {
4231 auto sleOffer = view.read(keylet::offer(offerIndex));
4232
4233 if (sleOffer)
4234 {
4235 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4236 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4237 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4238 STAmount saOwnerFunds;
4239 bool firstOwnerOffer(true);
4240
4241 if (book.out.account == uOfferOwnerID)
4242 {
4243 // If an offer is selling issuer's own IOUs, it is fully
4244 // funded.
4245 saOwnerFunds = saTakerGets;
4246 }
4247 else if (bGlobalFreeze)
4248 {
4249 // If either asset is globally frozen, consider all offers
4250 // that aren't ours to be totally unfunded
4251 saOwnerFunds.clear(book.out);
4252 }
4253 else
4254 {
4255 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4256 if (umBalanceEntry != umBalance.end())
4257 {
4258 // Found in running balance table.
4259
4260 saOwnerFunds = umBalanceEntry->second;
4261 firstOwnerOffer = false;
4262 }
4263 else
4264 {
4265 // Did not find balance in table.
4266
4267 saOwnerFunds = accountHolds(
4268 view,
4269 uOfferOwnerID,
4270 book.out.currency,
4271 book.out.account,
4273 viewJ);
4274
4275 if (saOwnerFunds < beast::zero)
4276 {
4277 // Treat negative funds as zero.
4278
4279 saOwnerFunds.clear();
4280 }
4281 }
4282 }
4283
4284 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4285
4286 STAmount saTakerGetsFunded;
4287 STAmount saOwnerFundsLimit = saOwnerFunds;
4288 Rate offerRate = parityRate;
4289
4290 if (rate != parityRate
4291 // Have a tranfer fee.
4292 && uTakerID != book.out.account
4293 // Not taking offers of own IOUs.
4294 && book.out.account != uOfferOwnerID)
4295 // Offer owner not issuing ownfunds
4296 {
4297 // Need to charge a transfer fee to offer owner.
4298 offerRate = rate;
4299 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4300 }
4301
4302 if (saOwnerFundsLimit >= saTakerGets)
4303 {
4304 // Sufficient funds no shenanigans.
4305 saTakerGetsFunded = saTakerGets;
4306 }
4307 else
4308 {
4309 // Only provide, if not fully funded.
4310
4311 saTakerGetsFunded = saOwnerFundsLimit;
4312
4313 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4314 std::min(
4315 saTakerPays,
4316 multiply(
4317 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4318 .setJson(jvOffer[jss::taker_pays_funded]);
4319 }
4320
4321 STAmount saOwnerPays = (parityRate == offerRate)
4322 ? saTakerGetsFunded
4323 : std::min(
4324 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4325
4326 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4327
4328 // Include all offers funded and unfunded
4329 Json::Value& jvOf = jvOffers.append(jvOffer);
4330 jvOf[jss::quality] = saDirRate.getText();
4331
4332 if (firstOwnerOffer)
4333 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4334 }
4335 else
4336 {
4337 JLOG(m_journal.warn()) << "Missing offer";
4338 }
4339
4340 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4341 {
4342 bDirectAdvance = true;
4343 }
4344 else
4345 {
4346 JLOG(m_journal.trace())
4347 << "getBookPage: offerIndex=" << offerIndex;
4348 }
4349 }
4350 }
4351
4352 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4353 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4354}
4355
4356#else
4357
4358// This is the new code that uses the book iterators
4359// It has temporarily been disabled
4360
4361void
4364 Book const& book,
4365 AccountID const& uTakerID,
4366 bool const bProof,
4367 unsigned int iLimit,
4368 Json::Value const& jvMarker,
4369 Json::Value& jvResult)
4370{
4371 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4372
4374
4375 MetaView lesActive(lpLedger, tapNONE, true);
4376 OrderBookIterator obIterator(lesActive, book);
4377
4378 auto const rate = transferRate(lesActive, book.out.account);
4379
4380 const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4381 lesActive.isGlobalFrozen(book.in.account);
4382
4383 while (iLimit-- > 0 && obIterator.nextOffer())
4384 {
4385 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4386 if (sleOffer)
4387 {
4388 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4389 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4390 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4391 STAmount saDirRate = obIterator.getCurrentRate();
4392 STAmount saOwnerFunds;
4393
4394 if (book.out.account == uOfferOwnerID)
4395 {
4396 // If offer is selling issuer's own IOUs, it is fully funded.
4397 saOwnerFunds = saTakerGets;
4398 }
4399 else if (bGlobalFreeze)
4400 {
4401 // If either asset is globally frozen, consider all offers
4402 // that aren't ours to be totally unfunded
4403 saOwnerFunds.clear(book.out);
4404 }
4405 else
4406 {
4407 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4408
4409 if (umBalanceEntry != umBalance.end())
4410 {
4411 // Found in running balance table.
4412
4413 saOwnerFunds = umBalanceEntry->second;
4414 }
4415 else
4416 {
4417 // Did not find balance in table.
4418
4419 saOwnerFunds = lesActive.accountHolds(
4420 uOfferOwnerID,
4421 book.out.currency,
4422 book.out.account,
4424
4425 if (saOwnerFunds.isNegative())
4426 {
4427 // Treat negative funds as zero.
4428
4429 saOwnerFunds.zero();
4430 }
4431 }
4432 }
4433
4434 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4435
4436 STAmount saTakerGetsFunded;
4437 STAmount saOwnerFundsLimit = saOwnerFunds;
4438 Rate offerRate = parityRate;
4439
4440 if (rate != parityRate
4441 // Have a tranfer fee.
4442 && uTakerID != book.out.account
4443 // Not taking offers of own IOUs.
4444 && book.out.account != uOfferOwnerID)
4445 // Offer owner not issuing ownfunds
4446 {
4447 // Need to charge a transfer fee to offer owner.
4448 offerRate = rate;
4449 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4450 }
4451
4452 if (saOwnerFundsLimit >= saTakerGets)
4453 {
4454 // Sufficient funds no shenanigans.
4455 saTakerGetsFunded = saTakerGets;
4456 }
4457 else
4458 {
4459 // Only provide, if not fully funded.
4460 saTakerGetsFunded = saOwnerFundsLimit;
4461
4462 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4463
4464 // TOOD(tom): The result of this expression is not used - what's
4465 // going on here?
4466 std::min(
4467 saTakerPays,
4468 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4469 .setJson(jvOffer[jss::taker_pays_funded]);
4470 }
4471
4472 STAmount saOwnerPays = (parityRate == offerRate)
4473 ? saTakerGetsFunded
4474 : std::min(
4475 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4476
4477 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4478
4479 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4480 {
4481 // Only provide funded offers and offers of the taker.
4482 Json::Value& jvOf = jvOffers.append(jvOffer);
4483 jvOf[jss::quality] = saDirRate.getText();
4484 }
4485 }
4486 }
4487
4488 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4489 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4490}
4491
4492#endif
4493
4494inline void
4496{
4497 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4498 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4500 counters[static_cast<std::size_t>(mode)].dur += current;
4501
4504 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4505 .dur.count());
4507 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4508 .dur.count());
4510 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4512 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4513 .dur.count());
4515 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4516
4518 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4519 .transitions);
4521 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4522 .transitions);
4524 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4526 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4527 .transitions);
4529 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4530}
4531
4532void
4534{
4535 auto now = std::chrono::steady_clock::now();
4536
4537 std::lock_guard lock(mutex_);
4538 ++counters_[static_cast<std::size_t>(om)].transitions;
4539 if (om == OperatingMode::FULL &&
4540 counters_[static_cast<std::size_t>(om)].transitions == 1)
4541 {
4542 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4543 now - processStart_)
4544 .count();
4545 }
4546 counters_[static_cast<std::size_t>(mode_)].dur +=
4547 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4548
4549 mode_ = om;
4550 start_ = now;
4551}
4552
4553void
4555{
4556 auto [counters, mode, start, initialSync] = getCounterData();
4557 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4559 counters[static_cast<std::size_t>(mode)].dur += current;
4560
4561 obj[jss::state_accounting] = Json::objectValue;
4563 i <= static_cast<std::size_t>(OperatingMode::FULL);
4564 ++i)
4565 {
4566 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4567 auto& state = obj[jss::state_accounting][states_[i]];
4568 state[jss::transitions] = std::to_string(counters[i].transitions);
4569 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4570 }
4571 obj[jss::server_state_duration_us] = std::to_string(current.count());
4572 if (initialSync)
4573 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4574}
4575
4576//------------------------------------------------------------------------------
4577
4580 Application& app,
4582 bool standalone,
4583 std::size_t minPeerCount,
4584 bool startvalid,
4585 JobQueue& job_queue,
4587 ValidatorKeys const& validatorKeys,
4588 boost::asio::io_service& io_svc,
4589 beast::Journal journal,
4590 beast::insight::Collector::ptr const& collector)
4591{
4592 return std::make_unique<NetworkOPsImp>(
4593 app,
4594 clock,
4595 standalone,
4596 minPeerCount,
4597 startvalid,
4598 job_queue,
4600 validatorKeys,
4601 io_svc,
4602 journal,
4603 collector);
4604}
4605
4606} // namespace ripple
T back_inserter(T... args)
T begin(T... args)
Decorator for streaming out compact json.
Definition: json_writer.h:317
Lightweight wrapper to tag static string.
Definition: json_value.h:61
Represents a JSON value.
Definition: json_value.h:147
Json::UInt UInt
Definition: json_value.h:154
Value get(UInt index, const Value &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Definition: json_value.cpp:841
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:891
bool isMember(const char *key) const
Return true if the object has a member named key.
Definition: json_value.cpp:943
A generic endpoint for log messages.
Definition: Journal.h:59
Stream error() const
Definition: Journal.h:335
Stream debug() const
Definition: Journal.h:317
Stream info() const
Definition: Journal.h:323
Stream trace() const
Severity stream access functions.
Definition: Journal.h:311
Stream warn() const
Definition: Journal.h:329
A metric for measuring an integral value.
Definition: Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition: Gauge.h:68
A reference to a handler for performing polled collection.
Definition: Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition: Book.h:34
Issue in
Definition: Book.h:36
Issue out
Definition: Book.h:37
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition: ClusterNode.h:45
std::uint32_t getLoadFee() const
Definition: ClusterNode.h:51
NetClock::time_point getReportTime() const
Definition: ClusterNode.h:57
PublicKey const & identity() const
Definition: ClusterNode.h:63
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:48
std::string SERVER_DOMAIN
Definition: Config.h:286
std::size_t NODE_SIZE
Definition: Config.h:220
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition: Config.h:167
int RELAY_UNTRUSTED_VALIDATIONS
Definition: Config.h:176
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition: InfoSub.h:54
AccountID account
Definition: Issue.h:39
Currency currency
Definition: Issue.h:38
A pool of threads to perform work.
Definition: JobQueue.h:56
Json::Value getJson(int c=0)
Definition: JobQueue.cpp:212
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
Definition: LedgerMaster.h:266
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:80
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
Definition: LoadFeeTrack.h:46
std::uint32_t getClusterFee() const
Definition: LoadFeeTrack.h:83
std::uint32_t getLocalFee() const
Definition: LoadFeeTrack.h:76
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:90
std::uint32_t getRemoteFee() const
Definition: LoadFeeTrack.h:69
std::uint32_t getLoadFactor() const
Definition: LoadFeeTrack.h:96
Manages load sources.
Definition: LoadManager.h:46
void resetDeadlockDetector()
Reset the deadlock detection timer.
Definition: LoadManager.cpp:63
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
Definition: NetworkOPs.cpp:141
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
Definition: NetworkOPs.cpp:151
std::chrono::steady_clock::time_point start_
Definition: NetworkOPs.cpp:153
static std::array< Json::StaticString const, 5 > const states_
Definition: NetworkOPs.cpp:157
std::chrono::steady_clock::time_point const processStart_
Definition: NetworkOPs.cpp:155
Transaction with input flags and results to be applied in batches.
Definition: NetworkOPs.cpp:92
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
Definition: NetworkOPs.cpp:101
std::shared_ptr< Transaction > const transaction
Definition: NetworkOPs.cpp:94
boost::asio::steady_timer accountHistoryTxTimer_
Definition: NetworkOPs.cpp:731
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
Definition: NetworkOPs.cpp:863
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::vector< TransactionStatus > mTransactions
Definition: NetworkOPs.cpp:775
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Definition: NetworkOPs.cpp:721
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
Definition: NetworkOPs.cpp:733
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
Definition: NetworkOPs.cpp:881
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
Definition: NetworkOPs.cpp:729
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
Definition: NetworkOPs.cpp:118
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
Definition: NetworkOPs.cpp:717
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
Definition: NetworkOPs.cpp:262
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Definition: NetworkOPs.cpp:744
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, const bool bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
Definition: NetworkOPs.cpp:730
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
Definition: NetworkOPs.cpp:124
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:224
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
Definition: NetworkOPs.cpp:724
beast::Journal m_journal
Definition: NetworkOPs.cpp:715
SubInfoMapType mSubAccount
Definition: NetworkOPs.cpp:739
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
Definition: NetworkOPs.cpp:779
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
Definition: NetworkOPs.cpp:996
bool unsubValidations(std::uint64_t uListener) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
Definition: NetworkOPs.cpp:728
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
Definition: NetworkOPs.cpp:934
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
Definition: NetworkOPs.cpp:759
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
Definition: NetworkOPs.cpp:769
std::atomic< bool > unlBlocked_
Definition: NetworkOPs.cpp:726
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
Definition: NetworkOPs.cpp:719
std::atomic< bool > needNetworkLedger_
Definition: NetworkOPs.cpp:723
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
Definition: NetworkOPs.cpp:777
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
Definition: NetworkOPs.cpp:893
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void endConsensus() override
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
Definition: NetworkOPs.cpp:737
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
Definition: NetworkOPs.cpp:924
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
Definition: NetworkOPs.cpp:761
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
Definition: NetworkOPs.cpp:875
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
Definition: NetworkOPs.cpp:772
void setMode(OperatingMode om) override
void stop() override
Definition: NetworkOPs.cpp:564
void clearNeedNetworkLedger() override
Definition: NetworkOPs.cpp:887
DispatchState mDispatchState
Definition: NetworkOPs.cpp:774
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
Definition: NetworkOPs.cpp:740
bool checkLastClosedLedger(const Overlay::PeerSequence &, uint256 &networkClosed)
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
Definition: NetworkOPs.cpp:899
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
Definition: NetworkOPs.cpp:780
Application & app_
Definition: NetworkOPs.cpp:714
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
Definition: NetworkOPs.cpp:735
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
Definition: NetworkOPs.cpp:742
std::atomic< bool > amendmentWarned_
Definition: NetworkOPs.cpp:725
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
Definition: NetworkOPs.cpp:905
bool beginConsensus(uint256 const &networkClosed) override
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition: NetworkOPs.h:87
void getCountsJson(Json::Value &obj)
Definition: Database.cpp:266
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:50
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:56
BookListeners::pointer getBookListeners(Book const &)
BookListeners::pointer makeBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, const AcceptedLedgerTx &alTx, MultiApiJson const &jvObj)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
Definition: RCLConsensus.h:51
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
Definition: RCLConsensus.h:443
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Definition: RCLConsensus.h:456
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
Represents a set of transactions in RCLConsensus.
Definition: RCLCxTx.h:66
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition: RFC1751.cpp:498
A view into a ledger.
Definition: ReadView.h:55
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition: STAmount.cpp:475
std::string getText() const override
Definition: STAmount.cpp:515
Issue const & issue() const
Definition: STAmount.h:487
std::optional< T > get(std::string const &name) const
Definition: BasicConfig.h:141
std::size_t size() const noexcept
Definition: Serializer.h:72
void const * data() const noexcept
Definition: Serializer.h:78
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition: TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition: TxQ.cpp:1777
static time_point now()
Definition: UptimeClock.cpp:63
Validator keys and manifest as set in configuration file.
Definition: ValidatorKeys.h:37
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition: XRPAmount.h:262
Json::Value jsonClipped() const
Definition: XRPAmount.h:218
iterator begin()
Definition: base_uint.h:135
static constexpr std::size_t size()
Definition: base_uint.h:525
bool isZero() const
Definition: base_uint.h:539
bool isNonZero() const
Definition: base_uint.h:544
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition: scope.h:231
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition: json_value.h:42
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
int Int
Definition: json_forwards.h:26
unsigned int UInt
Definition: json_forwards.h:27
void rngfill(void *buffer, std::size_t bytes, Generator &g)
Definition: rngfill.h:33
std::string const & getVersionString()
Server version.
Definition: BuildInfo.cpp:65
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition: BookChanges.h:47
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(PreclaimContext const &ctx, AccountID const &src)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition: Indexes.cpp:160
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition: Indexes.cpp:356
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition: Indexes.cpp:250
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:30
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:106
STAmount divide(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:87
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition: STTx.cpp:604
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition: View.cpp:442
@ fhZERO_IF_FROZEN
Definition: View.h:80
@ fhIGNORE_FREEZE
Definition: View.h:80
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition: View.cpp:136
std::uint64_t getQuality(uint256 const &uBase)
Definition: Indexes.cpp:125
@ INCLUDED
Definition: Transaction.h:49
@ OBSOLETE
Definition: Transaction.h:54
@ INVALID
Definition: Transaction.h:48
error_code_i
Definition: ErrorCodes.h:40
@ rpcSUCCESS
Definition: ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition: ErrorCodes.h:84
@ rpcINTERNAL
Definition: ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
Definition: SecretKey.cpp:351
auto constexpr muldiv_max
Definition: mulDiv.h:29
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition: LocalTxs.cpp:197
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition: View.cpp:650
STAmount amountFromQuality(std::uint64_t rate)
Definition: STAmount.cpp:822
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition: ErrorCodes.h:167
@ warnRPC_UNSUPPORTED_MAJORITY
Definition: ErrorCodes.h:165
@ warnRPC_AMENDMENT_BLOCKED
Definition: ErrorCodes.h:166
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition: NetworkOPs.h:66
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:47
bool isTesSuccess(TER x)
Definition: TER.h:656
bool isTerRetry(TER x)
Definition: TER.h:650
AccountID calcAccountID(PublicKey const &pk)
Definition: AccountID.cpp:160
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Definition: csprng.cpp:99
Json::Value rpcError(int iError)
Definition: RPCErr.cpp:29
@ tefPAST_SEQ
Definition: TER.h:175
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
Definition: NetworkOPs.cpp:844
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
bool isTemMalformed(TER x)
Definition: TER.h:638
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition: View.cpp:147
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition: ApiVersion.h:102
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:75
@ tesSUCCESS
Definition: TER.h:242
uint256 getQualityNext(uint256 const &uBase)
Definition: Indexes.cpp:117
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition: View.cpp:308
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition: ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition: chrono.h:93
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:629
FeeSetup setup_FeeVote(Section const &section)
Definition: Config.cpp:1090
Number root(Number f, unsigned d)
Definition: Number.cpp:630
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Definition: mulDiv.cpp:27
ApplyFlags
Definition: ApplyView.h:30
@ tapFAIL_HARD
Definition: ApplyView.h:35
@ tapUNLIMITED
Definition: ApplyView.h:42
@ tapNONE
Definition: ApplyView.h:31
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:37
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition: Seed.cpp:69
@ terQUEUED
Definition: TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition: TER.cpp:236
@ jtNETOP_CLUSTER
Definition: Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition: Job.h:47
@ jtTRANSACTION
Definition: Job.h:62
@ jtTXN_PROC
Definition: Job.h:82
@ jtCLIENT_CONSENSUS
Definition: Job.h:48
@ jtBATCH
Definition: Job.h:65
@ jtCLIENT_ACCT_HIST
Definition: Job.h:49
uint256 getBookBase(Book const &book)
Definition: Indexes.cpp:98
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition: View.cpp:173
static std::uint32_t trunc32(std::uint64_t v)
@ temBAD_SIGNATURE
Definition: TER.h:105
static auto const genesisAccountId
Definition: NetworkOPs.cpp:857
STL namespace.
T ref(T... args)
T reset(T... args)
T set_intersection(T... args)
T str(T... args)
std::string serialized
The manifest in serialized form.
Definition: Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition: Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition: Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition: Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Definition: Manifest.h:86
Server fees published on server subscription.
Definition: NetworkOPs.cpp:201
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
Definition: NetworkOPs.cpp:220
bool operator==(ServerFeeSummary const &b) const
Definition: NetworkOPs.cpp:212
beast::insight::Gauge full_transitions
Definition: NetworkOPs.cpp:831
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:786
beast::insight::Hook hook
Definition: NetworkOPs.cpp:820
beast::insight::Gauge connected_duration
Definition: NetworkOPs.cpp:822
beast::insight::Gauge tracking_duration
Definition: NetworkOPs.cpp:824
beast::insight::Gauge connected_transitions
Definition: NetworkOPs.cpp:828
beast::insight::Gauge disconnected_transitions
Definition: NetworkOPs.cpp:827
beast::insight::Gauge syncing_duration
Definition: NetworkOPs.cpp:823
beast::insight::Gauge tracking_transitions
Definition: NetworkOPs.cpp:830
beast::insight::Gauge full_duration
Definition: NetworkOPs.cpp:825
beast::insight::Gauge disconnected_duration
Definition: NetworkOPs.cpp:821
beast::insight::Gauge syncing_transitions
Definition: NetworkOPs.cpp:829
SubAccountHistoryIndex(AccountID const &accountId)
Definition: NetworkOPs.cpp:678
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:697
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:692
Represents a transfer rate.
Definition: Rate.h:38
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition: TxQ.h:162
void set(const char *key, auto const &v)
Definition: MultiApiJson.h:83
IsMemberResult isMember(const char *key) const
Definition: MultiApiJson.h:94
Select all peers (except optional excluded) that are in our cluster.
Definition: predicates.h:137
Sends a message to all peers.
Definition: predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)