rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/misc/AmendmentTable.h>
32#include <xrpld/app/misc/DeliverMax.h>
33#include <xrpld/app/misc/HashRouter.h>
34#include <xrpld/app/misc/LoadFeeTrack.h>
35#include <xrpld/app/misc/NetworkOPs.h>
36#include <xrpld/app/misc/Transaction.h>
37#include <xrpld/app/misc/TxQ.h>
38#include <xrpld/app/misc/ValidatorKeys.h>
39#include <xrpld/app/misc/ValidatorList.h>
40#include <xrpld/app/misc/detail/AccountTxPaging.h>
41#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
42#include <xrpld/app/tx/apply.h>
43#include <xrpld/consensus/Consensus.h>
44#include <xrpld/consensus/ConsensusParms.h>
45#include <xrpld/overlay/Cluster.h>
46#include <xrpld/overlay/Overlay.h>
47#include <xrpld/overlay/predicates.h>
48#include <xrpld/perflog/PerfLog.h>
49#include <xrpld/rpc/BookChanges.h>
50#include <xrpld/rpc/DeliveredAmount.h>
51#include <xrpld/rpc/MPTokenIssuanceID.h>
52#include <xrpld/rpc/ServerHandler.h>
53#include <xrpl/basics/CanProcess.h>
54#include <xrpl/basics/UptimeClock.h>
55#include <xrpl/basics/mulDiv.h>
56#include <xrpl/basics/safe_cast.h>
57#include <xrpl/beast/rfc2616.h>
58#include <xrpl/beast/utility/rngfill.h>
59#include <xrpl/crypto/RFC1751.h>
60#include <xrpl/crypto/csprng.h>
61#include <xrpl/json/to_string.h>
62#include <xrpl/protocol/BuildInfo.h>
63#include <xrpl/protocol/Feature.h>
64#include <xrpl/protocol/MultiApiJson.h>
65#include <xrpl/protocol/RPCErr.h>
66#include <xrpl/protocol/STParsedJSON.h>
67#include <xrpl/protocol/jss.h>
68#include <xrpl/resource/Fees.h>
69#include <xrpl/resource/ResourceManager.h>
70#include <boost/asio/ip/host_name.hpp>
71#include <boost/asio/steady_timer.hpp>
72
73#include <algorithm>
74#include <exception>
75#include <mutex>
76#include <optional>
77#include <set>
78#include <string>
79#include <tuple>
80#include <unordered_map>
81#include <utility>
82
83namespace ripple {
84
85class NetworkOPsImp final : public NetworkOPs
86{
92 {
93 public:
95 bool const admin;
96 bool const local;
98 bool applied = false;
100
103 bool a,
104 bool l,
105 FailHard f)
106 : transaction(t), admin(a), local(l), failType(f)
107 {
108 XRPL_ASSERT(
110 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
111 "valid inputs");
112 }
113 };
114
118 enum class DispatchState : unsigned char {
119 none,
120 scheduled,
121 running,
122 };
123
125
141 {
142 struct Counters
143 {
144 explicit Counters() = default;
145
148 };
149
153 std::chrono::steady_clock::time_point start_ =
155 std::chrono::steady_clock::time_point const processStart_ = start_;
158
159 public:
161 {
163 .transitions = 1;
164 }
165
172 void
174
180 void
181 json(Json::Value& obj) const;
182
184 {
186 decltype(mode_) mode;
187 decltype(start_) start;
189 };
190
193 {
196 }
197 };
198
201 {
202 ServerFeeSummary() = default;
203
205 XRPAmount fee,
206 TxQ::Metrics&& escalationMetrics,
207 LoadFeeTrack const& loadFeeTrack);
208 bool
209 operator!=(ServerFeeSummary const& b) const;
210
211 bool
213 {
214 return !(*this != b);
215 }
216
221 };
222
223public:
225 Application& app,
227 bool standalone,
228 std::size_t minPeerCount,
229 bool start_valid,
230 JobQueue& job_queue,
232 ValidatorKeys const& validatorKeys,
233 boost::asio::io_service& io_svc,
234 beast::Journal journal,
235 beast::insight::Collector::ptr const& collector)
236 : app_(app)
237 , m_journal(journal)
240 , heartbeatTimer_(io_svc)
241 , clusterTimer_(io_svc)
242 , accountHistoryTxTimer_(io_svc)
243 , mConsensus(
244 app,
246 setup_FeeVote(app_.config().section("voting")),
247 app_.logs().journal("FeeVote")),
249 *m_localTX,
250 app.getInboundTransactions(),
251 beast::get_abstract_clock<std::chrono::steady_clock>(),
252 validatorKeys,
253 app_.logs().journal("LedgerConsensus"))
255 , m_job_queue(job_queue)
256 , m_standalone(standalone)
257 , minPeerCount_(start_valid ? 0 : minPeerCount)
258 , m_stats(std::bind(&NetworkOPsImp::collect_metrics, this), collector)
259 {
260 }
261
262 ~NetworkOPsImp() override
263 {
264 // This clear() is necessary to ensure the shared_ptrs in this map get
265 // destroyed NOW because the objects in this map invoke methods on this
266 // class when they are destroyed
268 }
269
270public:
272 getOperatingMode() const override;
273
275 strOperatingMode(OperatingMode const mode, bool const admin) const override;
276
278 strOperatingMode(bool const admin = false) const override;
279
280 //
281 // Transaction operations.
282 //
283
284 // Must complete immediately.
285 void
287
288 void
290 std::shared_ptr<Transaction>& transaction,
291 bool bUnlimited,
292 bool bLocal,
293 FailHard failType) override;
294
303 void
306 bool bUnlimited,
307 FailHard failType);
308
318 void
321 bool bUnlimited,
322 FailHard failtype);
323
327 void
329
335 void
337
338 //
339 // Owner functions.
340 //
341
345 AccountID const& account) override;
346
347 //
348 // Book functions.
349 //
350
351 void
354 Book const&,
355 AccountID const& uTakerID,
356 const bool bProof,
357 unsigned int iLimit,
358 Json::Value const& jvMarker,
359 Json::Value& jvResult) override;
360
361 // Ledger proposal/close functions.
362 bool
364
365 bool
368 std::string const& source) override;
369
370 void
371 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
372
373 // Network state machine.
374
375 // Used for the "jump" case.
376private:
377 void
379 bool
381
382public:
383 bool
384 beginConsensus(uint256 const& networkClosed) override;
385 void
386 endConsensus() override;
387 void
388 setStandAlone() override;
389
393 void
394 setStateTimer() override;
395
396 void
397 setNeedNetworkLedger() override;
398 void
399 clearNeedNetworkLedger() override;
400 bool
401 isNeedNetworkLedger() override;
402 bool
403 isFull() override;
404
405 void
406 setMode(OperatingMode om, const char* reason) override;
407
408 bool
409 isBlocked() override;
410 bool
411 isAmendmentBlocked() override;
412 void
413 setAmendmentBlocked() override;
414 bool
415 isAmendmentWarned() override;
416 void
417 setAmendmentWarned() override;
418 void
419 clearAmendmentWarned() override;
420 bool
421 isUNLBlocked() override;
422 void
423 setUNLBlocked() override;
424 void
425 clearUNLBlocked() override;
426 void
427 consensusViewChange() override;
428
430 getConsensusInfo() override;
432 getServerInfo(bool human, bool admin, bool counters) override;
433 void
434 clearLedgerFetch() override;
436 getLedgerFetchInfo() override;
439 std::optional<std::chrono::milliseconds> consensusDelay) override;
440 void
441 reportFeeChange() override;
442 void
444
445 void
446 updateLocalTx(ReadView const& view) override;
448 getLocalTxCount() override;
449
450 //
451 // Monitoring: publisher side.
452 //
453 void
454 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
455 void
458 std::shared_ptr<STTx const> const& transaction,
459 TER result) override;
460 void
461 pubValidation(std::shared_ptr<STValidation> const& val) override;
462
463 //--------------------------------------------------------------------------
464 //
465 // InfoSub::Source.
466 //
467 void
469 InfoSub::ref ispListener,
470 hash_set<AccountID> const& vnaAccountIDs,
471 bool rt) override;
472 void
474 InfoSub::ref ispListener,
475 hash_set<AccountID> const& vnaAccountIDs,
476 bool rt) override;
477
478 // Just remove the subscription from the tracking
479 // not from the InfoSub. Needed for InfoSub destruction
480 void
482 std::uint64_t seq,
483 hash_set<AccountID> const& vnaAccountIDs,
484 bool rt) override;
485
487 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
488 override;
489 void
491 InfoSub::ref ispListener,
492 AccountID const& account,
493 bool historyOnly) override;
494
495 void
497 std::uint64_t seq,
498 AccountID const& account,
499 bool historyOnly) override;
500
501 bool
502 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
503 bool
504 unsubLedger(std::uint64_t uListener) override;
505
506 bool
507 subBookChanges(InfoSub::ref ispListener) override;
508 bool
509 unsubBookChanges(std::uint64_t uListener) override;
510
511 bool
512 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
513 override;
514 bool
515 unsubServer(std::uint64_t uListener) override;
516
517 bool
518 subBook(InfoSub::ref ispListener, Book const&) override;
519 bool
520 unsubBook(std::uint64_t uListener, Book const&) override;
521
522 bool
523 subManifests(InfoSub::ref ispListener) override;
524 bool
525 unsubManifests(std::uint64_t uListener) override;
526 void
527 pubManifest(Manifest const&) override;
528
529 bool
530 subTransactions(InfoSub::ref ispListener) override;
531 bool
532 unsubTransactions(std::uint64_t uListener) override;
533
534 bool
535 subRTTransactions(InfoSub::ref ispListener) override;
536 bool
537 unsubRTTransactions(std::uint64_t uListener) override;
538
539 bool
540 subValidations(InfoSub::ref ispListener) override;
541 bool
542 unsubValidations(std::uint64_t uListener) override;
543
544 bool
545 subPeerStatus(InfoSub::ref ispListener) override;
546 bool
547 unsubPeerStatus(std::uint64_t uListener) override;
548 void
549 pubPeerStatus(std::function<Json::Value(void)> const&) override;
550
551 bool
552 subConsensus(InfoSub::ref ispListener) override;
553 bool
554 unsubConsensus(std::uint64_t uListener) override;
555
557 findRpcSub(std::string const& strUrl) override;
559 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
560 bool
561 tryRemoveRpcSub(std::string const& strUrl) override;
562
563 void
564 stop() override
565 {
566 {
567 boost::system::error_code ec;
568 heartbeatTimer_.cancel(ec);
569 if (ec)
570 {
571 JLOG(m_journal.error())
572 << "NetworkOPs: heartbeatTimer cancel error: "
573 << ec.message();
574 }
575
576 ec.clear();
577 clusterTimer_.cancel(ec);
578 if (ec)
579 {
580 JLOG(m_journal.error())
581 << "NetworkOPs: clusterTimer cancel error: "
582 << ec.message();
583 }
584
585 ec.clear();
586 accountHistoryTxTimer_.cancel(ec);
587 if (ec)
588 {
589 JLOG(m_journal.error())
590 << "NetworkOPs: accountHistoryTxTimer cancel error: "
591 << ec.message();
592 }
593 }
594 // Make sure that any waitHandlers pending in our timers are done.
595 using namespace std::chrono_literals;
596 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
597 }
598
599 void
600 stateAccounting(Json::Value& obj) override;
601
602private:
603 void
604 setTimer(
605 boost::asio::steady_timer& timer,
606 std::chrono::milliseconds const& expiry_time,
607 std::function<void()> onExpire,
608 std::function<void()> onError);
609 void
611 void
613 void
615 void
617
619 transJson(
620 std::shared_ptr<STTx const> const& transaction,
621 TER result,
622 bool validated,
625
626 void
629 AcceptedLedgerTx const& transaction,
630 bool last);
631
632 void
635 AcceptedLedgerTx const& transaction,
636 bool last);
637
638 void
641 std::shared_ptr<STTx const> const& transaction,
642 TER result);
643
644 void
645 pubServer();
646 void
648
650 getHostId(bool forAdmin);
651
652private:
656
657 /*
658 * With a validated ledger to separate history and future, the node
659 * streams historical txns with negative indexes starting from -1,
660 * and streams future txns starting from index 0.
661 * The SubAccountHistoryIndex struct maintains these indexes.
662 * It also has a flag stopHistorical_ for stopping streaming
663 * the historical txns.
664 */
666 {
668 // forward
670 // separate backward and forward
672 // history, backward
677
679 : accountId_(accountId)
680 , forwardTxIndex_(0)
683 , historyTxIndex_(-1)
684 , haveHistorical_(false)
685 , stopHistorical_(false)
686 {
687 }
688 };
690 {
693 };
695 {
698 };
701
705 void
709 void
711 void
713
716
718
720
722
727
729 boost::asio::steady_timer heartbeatTimer_;
730 boost::asio::steady_timer clusterTimer_;
731 boost::asio::steady_timer accountHistoryTxTimer_;
732
734
736
738
741
743
745
746 enum SubTypes {
747 sLedger, // Accepted ledgers.
748 sManifests, // Received validator manifests.
749 sServer, // When server changes connectivity state.
750 sTransactions, // All accepted transactions.
751 sRTTransactions, // All proposed and accepted transactions.
752 sValidations, // Received validations.
753 sPeerStatus, // Peer status changes.
754 sConsensusPhase, // Consensus phase
755 sBookChanges, // Per-ledger order book changes
756 sLastEntry // Any new entry must be ADDED ABOVE this one
757 };
758
760
762
764
765 // Whether we are in standalone mode.
766 bool const m_standalone;
767
768 // The number of nodes that we need to consider ourselves connected.
770
771 // Transaction batching.
776
778
781
782private:
783 struct Stats
784 {
785 template <class Handler>
787 Handler const& handler,
788 beast::insight::Collector::ptr const& collector)
789 : hook(collector->make_hook(handler))
790 , disconnected_duration(collector->make_gauge(
791 "State_Accounting",
792 "Disconnected_duration"))
793 , connected_duration(collector->make_gauge(
794 "State_Accounting",
795 "Connected_duration"))
797 collector->make_gauge("State_Accounting", "Syncing_duration"))
798 , tracking_duration(collector->make_gauge(
799 "State_Accounting",
800 "Tracking_duration"))
802 collector->make_gauge("State_Accounting", "Full_duration"))
803 , disconnected_transitions(collector->make_gauge(
804 "State_Accounting",
805 "Disconnected_transitions"))
806 , connected_transitions(collector->make_gauge(
807 "State_Accounting",
808 "Connected_transitions"))
809 , syncing_transitions(collector->make_gauge(
810 "State_Accounting",
811 "Syncing_transitions"))
812 , tracking_transitions(collector->make_gauge(
813 "State_Accounting",
814 "Tracking_transitions"))
816 collector->make_gauge("State_Accounting", "Full_transitions"))
817 {
818 }
819
826
832 };
833
834 std::mutex m_statsMutex; // Mutex to lock m_stats
836
837private:
838 void
840};
841
842//------------------------------------------------------------------------------
843
845 {"disconnected", "connected", "syncing", "tracking", "full"}};
846
848
856
857static auto const genesisAccountId = calcAccountID(
859 .first);
860
861//------------------------------------------------------------------------------
862inline OperatingMode
864{
865 return mMode;
866}
867
868inline std::string
869NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
870{
871 return strOperatingMode(mMode, admin);
872}
873
874inline void
876{
877 setMode(OperatingMode::FULL, "setStandAlone");
878}
879
880inline void
882{
883 needNetworkLedger_ = true;
884}
885
886inline void
888{
889 needNetworkLedger_ = false;
890}
891
892inline bool
894{
895 return needNetworkLedger_;
896}
897
898inline bool
900{
902}
903
906{
907 static std::string const hostname = boost::asio::ip::host_name();
908
909 if (forAdmin)
910 return hostname;
911
912 // For non-admin uses hash the node public key into a
913 // single RFC1751 word:
914 static std::string const shroudedHostId = [this]() {
915 auto const& id = app_.nodeIdentity();
916
917 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
918 }();
919
920 return shroudedHostId;
921}
922
923void
925{
927
928 // Only do this work if a cluster is configured
929 if (app_.cluster().size() != 0)
931}
932
933void
935 boost::asio::steady_timer& timer,
936 const std::chrono::milliseconds& expiry_time,
937 std::function<void()> onExpire,
938 std::function<void()> onError)
939{
940 // Only start the timer if waitHandlerCounter_ is not yet joined.
941 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
942 [this, onExpire, onError](boost::system::error_code const& e) {
943 if ((e.value() == boost::system::errc::success) &&
944 (!m_job_queue.isStopped()))
945 {
946 onExpire();
947 }
948 // Recover as best we can if an unexpected error occurs.
949 if (e.value() != boost::system::errc::success &&
950 e.value() != boost::asio::error::operation_aborted)
951 {
952 // Try again later and hope for the best.
953 JLOG(m_journal.error())
954 << "Timer got error '" << e.message()
955 << "'. Restarting timer.";
956 onError();
957 }
958 }))
959 {
960 timer.expires_from_now(expiry_time);
961 timer.async_wait(std::move(*optionalCountedHandler));
962 }
963}
964
965void
966NetworkOPsImp::setHeartbeatTimer()
967{
968 setTimer(
969 heartbeatTimer_,
970 mConsensus.parms().ledgerGRANULARITY,
971 [this]() {
972 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
973 processHeartbeatTimer();
974 });
975 },
976 [this]() { setHeartbeatTimer(); });
977}
978
979void
980NetworkOPsImp::setClusterTimer()
981{
982 using namespace std::chrono_literals;
983
984 setTimer(
985 clusterTimer_,
986 10s,
987 [this]() {
988 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
989 processClusterTimer();
990 });
991 },
992 [this]() { setClusterTimer(); });
993}
994
995void
996NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
997{
998 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
999 << toBase58(subInfo.index_->accountId_);
1000 using namespace std::chrono_literals;
1001 setTimer(
1002 accountHistoryTxTimer_,
1003 4s,
1004 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1005 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1006}
1007
1008void
1009NetworkOPsImp::processHeartbeatTimer()
1010{
1011 {
1012 std::unique_lock lock{app_.getMasterMutex()};
1013
1014 // VFALCO NOTE This is for diagnosing a crash on exit
1015 LoadManager& mgr(app_.getLoadManager());
1017
1018 std::size_t const numPeers = app_.overlay().size();
1019
1020 // do we have sufficient peers? If not, we are disconnected.
1021 if (numPeers < minPeerCount_)
1022 {
1023 if (mMode != OperatingMode::DISCONNECTED)
1024 {
1025 setMode(
1026 OperatingMode::DISCONNECTED,
1027 "Heartbeat: insufficient peers");
1028 JLOG(m_journal.warn())
1029 << "Node count (" << numPeers << ") has fallen "
1030 << "below required minimum (" << minPeerCount_ << ").";
1031 }
1032
1033 // MasterMutex lock need not be held to call setHeartbeatTimer()
1034 lock.unlock();
1035 // We do not call mConsensus.timerEntry until there are enough
1036 // peers providing meaningful inputs to consensus
1037 setHeartbeatTimer();
1038 return;
1039 }
1040
1041 if (mMode == OperatingMode::DISCONNECTED)
1042 {
1043 setMode(OperatingMode::CONNECTED, "Heartbeat: sufficient peers");
1044 JLOG(m_journal.info())
1045 << "Node count (" << numPeers << ") is sufficient.";
1046 }
1047
1048 // Check if the last validated ledger forces a change between these
1049 // states.
1050 if (mMode == OperatingMode::SYNCING)
1051 setMode(OperatingMode::SYNCING, "Heartbeat: check syncing");
1052 else if (mMode == OperatingMode::CONNECTED)
1053 setMode(OperatingMode::CONNECTED, "Heartbeat: check connected");
1054 }
1055
1056 mConsensus.timerEntry(app_.timeKeeper().closeTime());
1057
1058 const ConsensusPhase currPhase = mConsensus.phase();
1059 if (mLastConsensusPhase != currPhase)
1060 {
1061 reportConsensusStateChange(currPhase);
1062 mLastConsensusPhase = currPhase;
1063 }
1064
1065 setHeartbeatTimer();
1066}
1067
1068void
1069NetworkOPsImp::processClusterTimer()
1070{
1071 if (app_.cluster().size() == 0)
1072 return;
1073
1074 using namespace std::chrono_literals;
1075
1076 bool const update = app_.cluster().update(
1077 app_.nodeIdentity().first,
1078 "",
1079 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1080 ? app_.getFeeTrack().getLocalFee()
1081 : 0,
1082 app_.timeKeeper().now());
1083
1084 if (!update)
1085 {
1086 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1087 setClusterTimer();
1088 return;
1089 }
1090
1091 protocol::TMCluster cluster;
1092 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1093 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1094 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1095 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1096 n.set_nodeload(node.getLoadFee());
1097 if (!node.name().empty())
1098 n.set_nodename(node.name());
1099 });
1100
1101 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1102 for (auto& item : gossip.items)
1103 {
1104 protocol::TMLoadSource& node = *cluster.add_loadsources();
1105 node.set_name(to_string(item.address));
1106 node.set_cost(item.balance);
1107 }
1108 app_.overlay().foreach(send_if(
1109 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1110 peer_in_cluster()));
1111 setClusterTimer();
1112}
1113
1114//------------------------------------------------------------------------------
1115
1117NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1118 const
1119{
1120 if (mode == OperatingMode::FULL && admin)
1121 {
1122 auto const consensusMode = mConsensus.mode();
1123 if (consensusMode != ConsensusMode::wrongLedger)
1124 {
1125 if (consensusMode == ConsensusMode::proposing)
1126 return "proposing";
1127
1128 if (mConsensus.validating())
1129 return "validating";
1130 }
1131 }
1132
1133 return states_[static_cast<std::size_t>(mode)];
1134}
1135
1136void
1137NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1138{
1139 if (isNeedNetworkLedger())
1140 {
1141 // Nothing we can do if we've never been in sync
1142 return;
1143 }
1144
1145 // this is an asynchronous interface
1146 auto const trans = sterilize(*iTrans);
1147
1148 auto const txid = trans->getTransactionID();
1149 auto const flags = app_.getHashRouter().getFlags(txid);
1150
1151 if ((flags & SF_BAD) != 0)
1152 {
1153 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1154 return;
1155 }
1156
1157 try
1158 {
1159 auto const [validity, reason] = checkValidity(
1160 app_.getHashRouter(),
1161 *trans,
1162 m_ledgerMaster.getValidatedRules(),
1163 app_.config());
1164
1165 if (validity != Validity::Valid)
1166 {
1167 JLOG(m_journal.warn())
1168 << "Submitted transaction invalid: " << reason;
1169 return;
1170 }
1171 }
1172 catch (std::exception const& ex)
1173 {
1174 JLOG(m_journal.warn())
1175 << "Exception checking transaction " << txid << ": " << ex.what();
1176
1177 return;
1178 }
1179
1180 std::string reason;
1181
1182 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1183
1184 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1185 auto t = tx;
1186 processTransaction(t, false, false, FailHard::no);
1187 });
1188}
1189
1190void
1191NetworkOPsImp::processTransaction(
1192 std::shared_ptr<Transaction>& transaction,
1193 bool bUnlimited,
1194 bool bLocal,
1195 FailHard failType)
1196{
1197 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1198 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1199
1200 if ((newFlags & SF_BAD) != 0)
1201 {
1202 // cached bad
1203 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1204 transaction->setStatus(INVALID);
1205 transaction->setResult(temBAD_SIGNATURE);
1206 return;
1207 }
1208
1209 // NOTE eahennis - I think this check is redundant,
1210 // but I'm not 100% sure yet.
1211 // If so, only cost is looking up HashRouter flags.
1212 auto const view = m_ledgerMaster.getCurrentLedger();
1213 auto const [validity, reason] = checkValidity(
1214 app_.getHashRouter(),
1215 *transaction->getSTransaction(),
1216 view->rules(),
1217 app_.config());
1218 XRPL_ASSERT(
1219 validity == Validity::Valid,
1220 "ripple::NetworkOPsImp::processTransaction : valid validity");
1221
1222 // Not concerned with local checks at this point.
1223 if (validity == Validity::SigBad)
1224 {
1225 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1226 transaction->setStatus(INVALID);
1227 transaction->setResult(temBAD_SIGNATURE);
1228 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1229 return;
1230 }
1231
1232 // canonicalize can change our pointer
1233 app_.getMasterTransaction().canonicalize(&transaction);
1234
1235 if (bLocal)
1236 doTransactionSync(transaction, bUnlimited, failType);
1237 else
1238 doTransactionAsync(transaction, bUnlimited, failType);
1239}
1240
1241void
1242NetworkOPsImp::doTransactionAsync(
1243 std::shared_ptr<Transaction> transaction,
1244 bool bUnlimited,
1245 FailHard failType)
1246{
1247 std::lock_guard lock(mMutex);
1248
1249 if (transaction->getApplying())
1250 return;
1251
1252 mTransactions.push_back(
1253 TransactionStatus(transaction, bUnlimited, false, failType));
1254 transaction->setApplying();
1255
1256 if (mDispatchState == DispatchState::none)
1257 {
1258 if (m_job_queue.addJob(
1259 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1260 {
1261 mDispatchState = DispatchState::scheduled;
1262 }
1263 }
1264}
1265
1266void
1267NetworkOPsImp::doTransactionSync(
1268 std::shared_ptr<Transaction> transaction,
1269 bool bUnlimited,
1270 FailHard failType)
1271{
1272 std::unique_lock<std::mutex> lock(mMutex);
1273
1274 if (!transaction->getApplying())
1275 {
1276 mTransactions.push_back(
1277 TransactionStatus(transaction, bUnlimited, true, failType));
1278 transaction->setApplying();
1279 }
1280
1281 do
1282 {
1283 if (mDispatchState == DispatchState::running)
1284 {
1285 // A batch processing job is already running, so wait.
1286 mCond.wait(lock);
1287 }
1288 else
1289 {
1290 apply(lock);
1291
1292 if (mTransactions.size())
1293 {
1294 // More transactions need to be applied, but by another job.
1295 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1296 transactionBatch();
1297 }))
1298 {
1299 mDispatchState = DispatchState::scheduled;
1300 }
1301 }
1302 }
1303 } while (transaction->getApplying());
1304}
1305
1306void
1307NetworkOPsImp::transactionBatch()
1308{
1309 std::unique_lock<std::mutex> lock(mMutex);
1310
1311 if (mDispatchState == DispatchState::running)
1312 return;
1313
1314 while (mTransactions.size())
1315 {
1316 apply(lock);
1317 }
1318}
1319
1320void
1321NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1322{
1324 std::vector<TransactionStatus> transactions;
1325 mTransactions.swap(transactions);
1326 XRPL_ASSERT(
1327 !transactions.empty(),
1328 "ripple::NetworkOPsImp::apply : non-empty transactions");
1329 XRPL_ASSERT(
1330 mDispatchState != DispatchState::running,
1331 "ripple::NetworkOPsImp::apply : is not running");
1332
1333 mDispatchState = DispatchState::running;
1334
1335 batchLock.unlock();
1336
1337 {
1338 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1339 bool changed = false;
1340 {
1341 std::unique_lock ledgerLock{
1342 m_ledgerMaster.peekMutex(), std::defer_lock};
1343 std::lock(masterLock, ledgerLock);
1344
1345 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1346 for (TransactionStatus& e : transactions)
1347 {
1348 // we check before adding to the batch
1349 ApplyFlags flags = tapNONE;
1350 if (e.admin)
1351 flags |= tapUNLIMITED;
1352
1353 if (e.failType == FailHard::yes)
1354 flags |= tapFAIL_HARD;
1355
1356 auto const result = app_.getTxQ().apply(
1357 app_, view, e.transaction->getSTransaction(), flags, j);
1358 e.result = result.ter;
1359 e.applied = result.applied;
1360 changed = changed || result.applied;
1361 }
1362 return changed;
1363 });
1364 }
1365 if (changed)
1366 reportFeeChange();
1367
1368 std::optional<LedgerIndex> validatedLedgerIndex;
1369 if (auto const l = m_ledgerMaster.getValidatedLedger())
1370 validatedLedgerIndex = l->info().seq;
1371
1372 auto newOL = app_.openLedger().current();
1373 for (TransactionStatus& e : transactions)
1374 {
1375 e.transaction->clearSubmitResult();
1376
1377 if (e.applied)
1378 {
1379 pubProposedTransaction(
1380 newOL, e.transaction->getSTransaction(), e.result);
1381 e.transaction->setApplied();
1382 }
1383
1384 e.transaction->setResult(e.result);
1385
1386 if (isTemMalformed(e.result))
1387 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1388
1389#ifdef DEBUG
1390 if (e.result != tesSUCCESS)
1391 {
1392 std::string token, human;
1393
1394 if (transResultInfo(e.result, token, human))
1395 {
1396 JLOG(m_journal.info())
1397 << "TransactionResult: " << token << ": " << human;
1398 }
1399 }
1400#endif
1401
1402 bool addLocal = e.local;
1403
1404 if (e.result == tesSUCCESS)
1405 {
1406 JLOG(m_journal.debug())
1407 << "Transaction is now included in open ledger";
1408 e.transaction->setStatus(INCLUDED);
1409
1410 auto const& txCur = e.transaction->getSTransaction();
1411 auto const txNext = m_ledgerMaster.popAcctTransaction(txCur);
1412 if (txNext)
1413 {
1414 std::string reason;
1415 auto const trans = sterilize(*txNext);
1416 auto t = std::make_shared<Transaction>(trans, reason, app_);
1417 submit_held.emplace_back(t, false, false, FailHard::no);
1418 t->setApplying();
1419 }
1420 }
1421 else if (e.result == tefPAST_SEQ)
1422 {
1423 // duplicate or conflict
1424 JLOG(m_journal.info()) << "Transaction is obsolete";
1425 e.transaction->setStatus(OBSOLETE);
1426 }
1427 else if (e.result == terQUEUED)
1428 {
1429 JLOG(m_journal.debug())
1430 << "Transaction is likely to claim a"
1431 << " fee, but is queued until fee drops";
1432
1433 e.transaction->setStatus(HELD);
1434 // Add to held transactions, because it could get
1435 // kicked out of the queue, and this will try to
1436 // put it back.
1437 m_ledgerMaster.addHeldTransaction(e.transaction);
1438 e.transaction->setQueued();
1439 e.transaction->setKept();
1440 }
1441 else if (isTerRetry(e.result))
1442 {
1443 if (e.failType != FailHard::yes)
1444 {
1445 // transaction should be held
1446 JLOG(m_journal.debug())
1447 << "Transaction should be held: " << e.result;
1448 e.transaction->setStatus(HELD);
1449 m_ledgerMaster.addHeldTransaction(e.transaction);
1450 e.transaction->setKept();
1451 }
1452 }
1453 else
1454 {
1455 JLOG(m_journal.debug())
1456 << "Status other than success " << e.result;
1457 e.transaction->setStatus(INVALID);
1458 }
1459
1460 auto const enforceFailHard =
1461 e.failType == FailHard::yes && !isTesSuccess(e.result);
1462
1463 if (addLocal && !enforceFailHard)
1464 {
1465 m_localTX->push_back(
1466 m_ledgerMaster.getCurrentLedgerIndex(),
1467 e.transaction->getSTransaction());
1468 e.transaction->setKept();
1469 }
1470
1471 if ((e.applied ||
1472 ((mMode != OperatingMode::FULL) &&
1473 (e.failType != FailHard::yes) && e.local) ||
1474 (e.result == terQUEUED)) &&
1475 !enforceFailHard)
1476 {
1477 auto const toSkip =
1478 app_.getHashRouter().shouldRelay(e.transaction->getID());
1479
1480 if (toSkip)
1481 {
1482 protocol::TMTransaction tx;
1483 Serializer s;
1484
1485 e.transaction->getSTransaction()->add(s);
1486 tx.set_rawtransaction(s.data(), s.size());
1487 tx.set_status(protocol::tsCURRENT);
1488 tx.set_receivetimestamp(
1489 app_.timeKeeper().now().time_since_epoch().count());
1490 tx.set_deferred(e.result == terQUEUED);
1491 // FIXME: This should be when we received it
1492 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1493 e.transaction->setBroadcast();
1494 }
1495 }
1496
1497 if (validatedLedgerIndex)
1498 {
1499 auto [fee, accountSeq, availableSeq] =
1500 app_.getTxQ().getTxRequiredFeeAndSeq(
1501 *newOL, e.transaction->getSTransaction());
1502 e.transaction->setCurrentLedgerState(
1503 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1504 }
1505 }
1506 }
1507
1508 batchLock.lock();
1509
1510 for (TransactionStatus& e : transactions)
1511 e.transaction->clearApplying();
1512
1513 if (!submit_held.empty())
1514 {
1515 if (mTransactions.empty())
1516 mTransactions.swap(submit_held);
1517 else
1518 for (auto& e : submit_held)
1519 mTransactions.push_back(std::move(e));
1520 }
1521
1522 mCond.notify_all();
1523
1524 mDispatchState = DispatchState::none;
1525}
1526
1527//
1528// Owner functions
1529//
1530
1532NetworkOPsImp::getOwnerInfo(
1534 AccountID const& account)
1535{
1536 Json::Value jvObjects(Json::objectValue);
1537 auto root = keylet::ownerDir(account);
1538 auto sleNode = lpLedger->read(keylet::page(root));
1539 if (sleNode)
1540 {
1541 std::uint64_t uNodeDir;
1542
1543 do
1544 {
1545 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1546 {
1547 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1548 XRPL_ASSERT(
1549 sleCur,
1550 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1551
1552 switch (sleCur->getType())
1553 {
1554 case ltOFFER:
1555 if (!jvObjects.isMember(jss::offers))
1556 jvObjects[jss::offers] =
1558
1559 jvObjects[jss::offers].append(
1560 sleCur->getJson(JsonOptions::none));
1561 break;
1562
1563 case ltRIPPLE_STATE:
1564 if (!jvObjects.isMember(jss::ripple_lines))
1565 {
1566 jvObjects[jss::ripple_lines] =
1568 }
1569
1570 jvObjects[jss::ripple_lines].append(
1571 sleCur->getJson(JsonOptions::none));
1572 break;
1573
1574 case ltACCOUNT_ROOT:
1575 case ltDIR_NODE:
1576 default:
1577 UNREACHABLE(
1578 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1579 "type");
1580 break;
1581 }
1582 }
1583
1584 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1585
1586 if (uNodeDir)
1587 {
1588 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1589 XRPL_ASSERT(
1590 sleNode,
1591 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1592 }
1593 } while (uNodeDir);
1594 }
1595
1596 return jvObjects;
1597}
1598
1599//
1600// Other
1601//
1602
1603inline bool
1604NetworkOPsImp::isBlocked()
1605{
1606 return isAmendmentBlocked() || isUNLBlocked();
1607}
1608
1609inline bool
1610NetworkOPsImp::isAmendmentBlocked()
1611{
1612 return amendmentBlocked_;
1613}
1614
1615void
1616NetworkOPsImp::setAmendmentBlocked()
1617{
1618 amendmentBlocked_ = true;
1619 setMode(OperatingMode::CONNECTED, "setAmendmentBlocked");
1620}
1621
1622inline bool
1623NetworkOPsImp::isAmendmentWarned()
1624{
1625 return !amendmentBlocked_ && amendmentWarned_;
1626}
1627
1628inline void
1629NetworkOPsImp::setAmendmentWarned()
1630{
1631 amendmentWarned_ = true;
1632}
1633
1634inline void
1635NetworkOPsImp::clearAmendmentWarned()
1636{
1637 amendmentWarned_ = false;
1638}
1639
1640inline bool
1641NetworkOPsImp::isUNLBlocked()
1642{
1643 return unlBlocked_;
1644}
1645
1646void
1647NetworkOPsImp::setUNLBlocked()
1648{
1649 unlBlocked_ = true;
1650 setMode(OperatingMode::CONNECTED, "setUNLBlocked");
1651}
1652
1653inline void
1654NetworkOPsImp::clearUNLBlocked()
1655{
1656 unlBlocked_ = false;
1657}
1658
1659bool
1660NetworkOPsImp::checkLastClosedLedger(
1661 const Overlay::PeerSequence& peerList,
1662 uint256& networkClosed)
1663{
1664 // Returns true if there's an *abnormal* ledger issue, normal changing in
1665 // TRACKING mode should return false. Do we have sufficient validations for
1666 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1667 // better ledger available? If so, we are either tracking or full.
1668
1669 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1670
1671 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1672
1673 if (!ourClosed)
1674 return false;
1675
1676 uint256 closedLedger = ourClosed->info().hash;
1677 uint256 prevClosedLedger = ourClosed->info().parentHash;
1678 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1679 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1680
1681 //-------------------------------------------------------------------------
1682 // Determine preferred last closed ledger
1683
1684 auto& validations = app_.getValidations();
1685 JLOG(m_journal.debug())
1686 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1687
1688 // Will rely on peer LCL if no trusted validations exist
1690 peerCounts[closedLedger] = 0;
1691 if (mMode >= OperatingMode::TRACKING)
1692 peerCounts[closedLedger]++;
1693
1694 for (auto& peer : peerList)
1695 {
1696 uint256 peerLedger = peer->getClosedLedgerHash();
1697
1698 if (peerLedger.isNonZero())
1699 ++peerCounts[peerLedger];
1700 }
1701
1702 for (auto const& it : peerCounts)
1703 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1704
1705 uint256 preferredLCL = validations.getPreferredLCL(
1706 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1707 m_ledgerMaster.getValidLedgerIndex(),
1708 peerCounts);
1709
1710 bool switchLedgers = preferredLCL != closedLedger;
1711 if (switchLedgers)
1712 closedLedger = preferredLCL;
1713 //-------------------------------------------------------------------------
1714 if (switchLedgers && (closedLedger == prevClosedLedger))
1715 {
1716 // don't switch to our own previous ledger
1717 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1718 networkClosed = ourClosed->info().hash;
1719 switchLedgers = false;
1720 }
1721 else
1722 networkClosed = closedLedger;
1723
1724 if (!switchLedgers)
1725 return false;
1726
1727 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1728
1729 if (!consensus)
1730 consensus = app_.getInboundLedgers().acquire(
1731 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1732
1733 if (consensus &&
1734 (!m_ledgerMaster.canBeCurrent(consensus) ||
1735 !m_ledgerMaster.isCompatible(
1736 *consensus, m_journal.debug(), "Not switching")))
1737 {
1738 // Don't switch to a ledger not on the validated chain
1739 // or with an invalid close time or sequence
1740 networkClosed = ourClosed->info().hash;
1741 return false;
1742 }
1743
1744 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1745 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1746 << getJson({*ourClosed, {}});
1747 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1748
1749 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1750 {
1751 setMode(OperatingMode::CONNECTED, "check LCL: not on consensus ledger");
1752 }
1753
1754 if (consensus)
1755 {
1756 // FIXME: If this rewinds the ledger sequence, or has the same
1757 // sequence, we should update the status on any stored transactions
1758 // in the invalidated ledgers.
1759 switchLastClosedLedger(consensus);
1760 }
1761
1762 return true;
1763}
1764
1765void
1766NetworkOPsImp::switchLastClosedLedger(
1767 std::shared_ptr<Ledger const> const& newLCL)
1768{
1769 // set the newLCL as our last closed ledger -- this is abnormal code
1770 JLOG(m_journal.error())
1771 << "JUMP last closed ledger to " << newLCL->info().hash;
1772
1773 clearNeedNetworkLedger();
1774
1775 // Update fee computations.
1776 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1777
1778 // Caller must own master lock
1779 {
1780 // Apply tx in old open ledger to new
1781 // open ledger. Then apply local tx.
1782
1783 auto retries = m_localTX->getTxSet();
1784 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1786 if (lastVal)
1787 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
1788 else
1789 rules.emplace(app_.config().features);
1790 app_.openLedger().accept(
1791 app_,
1792 *rules,
1793 newLCL,
1794 OrderedTxs({}),
1795 false,
1796 retries,
1797 tapNONE,
1798 "jump",
1799 [&](OpenView& view, beast::Journal j) {
1800 // Stuff the ledger with transactions from the queue.
1801 return app_.getTxQ().accept(app_, view);
1802 });
1803 }
1804
1805 m_ledgerMaster.switchLCL(newLCL);
1806
1807 protocol::TMStatusChange s;
1808 s.set_newevent(protocol::neSWITCHED_LEDGER);
1809 s.set_ledgerseq(newLCL->info().seq);
1810 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1811 s.set_ledgerhashprevious(
1812 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1813 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
1814
1815 app_.overlay().foreach(
1816 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1817}
1818
1819bool
1820NetworkOPsImp::beginConsensus(uint256 const& networkClosed)
1821{
1822 XRPL_ASSERT(
1823 networkClosed.isNonZero(),
1824 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
1825
1826 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
1827
1828 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
1829 << " with LCL " << closingInfo.parentHash;
1830
1831 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1832
1833 if (!prevLedger)
1834 {
1835 // this shouldn't happen unless we jump ledgers
1836 if (mMode == OperatingMode::FULL)
1837 {
1838 JLOG(m_journal.warn())
1839 << "beginConsensus Don't have LCL, going to tracking";
1840 setMode(OperatingMode::TRACKING, "beginConsensus: No LCL");
1841 }
1842
1843 return false;
1844 }
1845
1846 XRPL_ASSERT(
1847 prevLedger->info().hash == closingInfo.parentHash,
1848 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1849 "parent");
1850 XRPL_ASSERT(
1851 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
1852 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1853 "hash");
1854
1855 if (prevLedger->rules().enabled(featureNegativeUNL))
1856 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1857 TrustChanges const changes = app_.validators().updateTrusted(
1858 app_.getValidations().getCurrentNodeIDs(),
1859 closingInfo.parentCloseTime,
1860 *this,
1861 app_.overlay(),
1862 app_.getHashRouter());
1863
1864 if (!changes.added.empty() || !changes.removed.empty())
1865 {
1866 app_.getValidations().trustChanged(changes.added, changes.removed);
1867 // Update the AmendmentTable so it tracks the current validators.
1868 app_.getAmendmentTable().trustChanged(
1869 app_.validators().getQuorumKeys().second);
1870 }
1871
1872 mConsensus.startRound(
1873 app_.timeKeeper().closeTime(),
1874 networkClosed,
1875 prevLedger,
1876 changes.removed,
1877 changes.added);
1878
1879 const ConsensusPhase currPhase = mConsensus.phase();
1880 if (mLastConsensusPhase != currPhase)
1881 {
1882 reportConsensusStateChange(currPhase);
1883 mLastConsensusPhase = currPhase;
1884 }
1885
1886 JLOG(m_journal.debug()) << "Initiating consensus engine";
1887 return true;
1888}
1889
1890bool
1891NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
1892{
1893 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1894}
1895
1896void
1897NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
1898{
1899 // We now have an additional transaction set
1900 // either created locally during the consensus process
1901 // or acquired from a peer
1902
1903 // Inform peers we have this set
1904 protocol::TMHaveTransactionSet msg;
1905 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1906 msg.set_status(protocol::tsHAVE);
1907 app_.overlay().foreach(
1908 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
1909
1910 // We acquired it because consensus asked us to
1911 if (fromAcquire)
1912 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
1913}
1914
1915void
1916NetworkOPsImp::endConsensus()
1917{
1918 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
1919
1920 for (auto const& it : app_.overlay().getActivePeers())
1921 {
1922 if (it && (it->getClosedLedgerHash() == deadLedger))
1923 {
1924 JLOG(m_journal.trace()) << "Killing obsolete peer status";
1925 it->cycleStatus();
1926 }
1927 }
1928
1929 uint256 networkClosed;
1930 bool ledgerChange =
1931 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1932
1933 if (networkClosed.isZero())
1934 return;
1935
1936 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
1937 // we must count how many nodes share our LCL, how many nodes disagree with
1938 // our LCL, and how many validations our LCL has. We also want to check
1939 // timing to make sure there shouldn't be a newer LCL. We need this
1940 // information to do the next three tests.
1941
1942 if (((mMode == OperatingMode::CONNECTED) ||
1943 (mMode == OperatingMode::SYNCING)) &&
1944 !ledgerChange)
1945 {
1946 // Count number of peers that agree with us and UNL nodes whose
1947 // validations we have for LCL. If the ledger is good enough, go to
1948 // TRACKING - TODO
1949 if (!needNetworkLedger_)
1950 setMode(OperatingMode::TRACKING, "endConsensus: check tracking");
1951 }
1952
1953 if (((mMode == OperatingMode::CONNECTED) ||
1954 (mMode == OperatingMode::TRACKING)) &&
1955 !ledgerChange)
1956 {
1957 // check if the ledger is good enough to go to FULL
1958 // Note: Do not go to FULL if we don't have the previous ledger
1959 // check if the ledger is bad enough to go to CONNECTE D -- TODO
1960 auto current = m_ledgerMaster.getCurrentLedger();
1961 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
1962 2 * current->info().closeTimeResolution))
1963 {
1964 setMode(OperatingMode::FULL, "endConsensus: check full");
1965 }
1966 }
1967
1968 beginConsensus(networkClosed);
1969}
1970
1971void
1972NetworkOPsImp::consensusViewChange()
1973{
1974 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
1975 {
1976 setMode(OperatingMode::CONNECTED, "consensusViewChange");
1977 }
1978}
1979
1980void
1981NetworkOPsImp::pubManifest(Manifest const& mo)
1982{
1983 // VFALCO consider std::shared_mutex
1984 std::lock_guard sl(mSubLock);
1985
1986 if (!mStreamMaps[sManifests].empty())
1987 {
1989
1990 jvObj[jss::type] = "manifestReceived";
1991 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
1992 if (mo.signingKey)
1993 jvObj[jss::signing_key] =
1994 toBase58(TokenType::NodePublic, *mo.signingKey);
1995 jvObj[jss::seq] = Json::UInt(mo.sequence);
1996 if (auto sig = mo.getSignature())
1997 jvObj[jss::signature] = strHex(*sig);
1998 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
1999 if (!mo.domain.empty())
2000 jvObj[jss::domain] = mo.domain;
2001 jvObj[jss::manifest] = strHex(mo.serialized);
2002
2003 for (auto i = mStreamMaps[sManifests].begin();
2004 i != mStreamMaps[sManifests].end();)
2005 {
2006 if (auto p = i->second.lock())
2007 {
2008 p->send(jvObj, true);
2009 ++i;
2010 }
2011 else
2012 {
2013 i = mStreamMaps[sManifests].erase(i);
2014 }
2015 }
2016 }
2017}
2018
2019NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2020 XRPAmount fee,
2021 TxQ::Metrics&& escalationMetrics,
2022 LoadFeeTrack const& loadFeeTrack)
2023 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2024 , loadBaseServer{loadFeeTrack.getLoadBase()}
2025 , baseFee{fee}
2026 , em{std::move(escalationMetrics)}
2027{
2028}
2029
2030bool
2032 NetworkOPsImp::ServerFeeSummary const& b) const
2033{
2034 if (loadFactorServer != b.loadFactorServer ||
2035 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2036 em.has_value() != b.em.has_value())
2037 return true;
2038
2039 if (em && b.em)
2040 {
2041 return (
2042 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2043 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2044 em->referenceFeeLevel != b.em->referenceFeeLevel);
2045 }
2046
2047 return false;
2048}
2049
2050// Need to cap to uint64 to uint32 due to JSON limitations
2051static std::uint32_t
2053{
2055
2056 return std::min(max32, v);
2057};
2058
2059void
2061{
2062 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2063 // list into a local array while holding the lock then release
2064 // the lock and call send on everyone.
2065 //
2067
2068 if (!mStreamMaps[sServer].empty())
2069 {
2071
2073 app_.openLedger().current()->fees().base,
2075 app_.getFeeTrack()};
2076
2077 jvObj[jss::type] = "serverStatus";
2078 jvObj[jss::server_status] = strOperatingMode();
2079 jvObj[jss::load_base] = f.loadBaseServer;
2080 jvObj[jss::load_factor_server] = f.loadFactorServer;
2081 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2082
2083 if (f.em)
2084 {
2085 auto const loadFactor = std::max(
2086 safe_cast<std::uint64_t>(f.loadFactorServer),
2087 mulDiv(
2088 f.em->openLedgerFeeLevel,
2089 f.loadBaseServer,
2090 f.em->referenceFeeLevel)
2092
2093 jvObj[jss::load_factor] = trunc32(loadFactor);
2094 jvObj[jss::load_factor_fee_escalation] =
2095 f.em->openLedgerFeeLevel.jsonClipped();
2096 jvObj[jss::load_factor_fee_queue] =
2097 f.em->minProcessingFeeLevel.jsonClipped();
2098 jvObj[jss::load_factor_fee_reference] =
2099 f.em->referenceFeeLevel.jsonClipped();
2100 }
2101 else
2102 jvObj[jss::load_factor] = f.loadFactorServer;
2103
2104 mLastFeeSummary = f;
2105
2106 for (auto i = mStreamMaps[sServer].begin();
2107 i != mStreamMaps[sServer].end();)
2108 {
2109 InfoSub::pointer p = i->second.lock();
2110
2111 // VFALCO TODO research the possibility of using thread queues and
2112 // linearizing the deletion of subscribers with the
2113 // sending of JSON data.
2114 if (p)
2115 {
2116 p->send(jvObj, true);
2117 ++i;
2118 }
2119 else
2120 {
2121 i = mStreamMaps[sServer].erase(i);
2122 }
2123 }
2124 }
2125}
2126
2127void
2129{
2131
2132 auto& streamMap = mStreamMaps[sConsensusPhase];
2133 if (!streamMap.empty())
2134 {
2136 jvObj[jss::type] = "consensusPhase";
2137 jvObj[jss::consensus] = to_string(phase);
2138
2139 for (auto i = streamMap.begin(); i != streamMap.end();)
2140 {
2141 if (auto p = i->second.lock())
2142 {
2143 p->send(jvObj, true);
2144 ++i;
2145 }
2146 else
2147 {
2148 i = streamMap.erase(i);
2149 }
2150 }
2151 }
2152}
2153
2154void
2156{
2157 // VFALCO consider std::shared_mutex
2159
2160 if (!mStreamMaps[sValidations].empty())
2161 {
2163
2164 auto const signerPublic = val->getSignerPublic();
2165
2166 jvObj[jss::type] = "validationReceived";
2167 jvObj[jss::validation_public_key] =
2168 toBase58(TokenType::NodePublic, signerPublic);
2169 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2170 jvObj[jss::signature] = strHex(val->getSignature());
2171 jvObj[jss::full] = val->isFull();
2172 jvObj[jss::flags] = val->getFlags();
2173 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2174 jvObj[jss::data] = strHex(val->getSerializer().slice());
2175
2176 if (auto version = (*val)[~sfServerVersion])
2177 jvObj[jss::server_version] = std::to_string(*version);
2178
2179 if (auto cookie = (*val)[~sfCookie])
2180 jvObj[jss::cookie] = std::to_string(*cookie);
2181
2182 if (auto hash = (*val)[~sfValidatedHash])
2183 jvObj[jss::validated_hash] = strHex(*hash);
2184
2185 auto const masterKey =
2186 app_.validatorManifests().getMasterKey(signerPublic);
2187
2188 if (masterKey != signerPublic)
2189 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2190
2191 // NOTE *seq is a number, but old API versions used string. We replace
2192 // number with a string using MultiApiJson near end of this function
2193 if (auto const seq = (*val)[~sfLedgerSequence])
2194 jvObj[jss::ledger_index] = *seq;
2195
2196 if (val->isFieldPresent(sfAmendments))
2197 {
2198 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2199 for (auto const& amendment : val->getFieldV256(sfAmendments))
2200 jvObj[jss::amendments].append(to_string(amendment));
2201 }
2202
2203 if (auto const closeTime = (*val)[~sfCloseTime])
2204 jvObj[jss::close_time] = *closeTime;
2205
2206 if (auto const loadFee = (*val)[~sfLoadFee])
2207 jvObj[jss::load_fee] = *loadFee;
2208
2209 if (auto const baseFee = val->at(~sfBaseFee))
2210 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2211
2212 if (auto const reserveBase = val->at(~sfReserveBase))
2213 jvObj[jss::reserve_base] = *reserveBase;
2214
2215 if (auto const reserveInc = val->at(~sfReserveIncrement))
2216 jvObj[jss::reserve_inc] = *reserveInc;
2217
2218 // (The ~ operator converts the Proxy to a std::optional, which
2219 // simplifies later operations)
2220 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2221 baseFeeXRP && baseFeeXRP->native())
2222 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2223
2224 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2225 reserveBaseXRP && reserveBaseXRP->native())
2226 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2227
2228 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2229 reserveIncXRP && reserveIncXRP->native())
2230 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2231
2232 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2233 // for consumers supporting different API versions
2234 MultiApiJson multiObj{jvObj};
2235 multiObj.visit(
2236 RPC::apiVersion<1>, //
2237 [](Json::Value& jvTx) {
2238 // Type conversion for older API versions to string
2239 if (jvTx.isMember(jss::ledger_index))
2240 {
2241 jvTx[jss::ledger_index] =
2242 std::to_string(jvTx[jss::ledger_index].asUInt());
2243 }
2244 });
2245
2246 for (auto i = mStreamMaps[sValidations].begin();
2247 i != mStreamMaps[sValidations].end();)
2248 {
2249 if (auto p = i->second.lock())
2250 {
2251 multiObj.visit(
2252 p->getApiVersion(), //
2253 [&](Json::Value const& jv) { p->send(jv, true); });
2254 ++i;
2255 }
2256 else
2257 {
2258 i = mStreamMaps[sValidations].erase(i);
2259 }
2260 }
2261 }
2262}
2263
2264void
2266{
2268
2269 if (!mStreamMaps[sPeerStatus].empty())
2270 {
2271 Json::Value jvObj(func());
2272
2273 jvObj[jss::type] = "peerStatusChange";
2274
2275 for (auto i = mStreamMaps[sPeerStatus].begin();
2276 i != mStreamMaps[sPeerStatus].end();)
2277 {
2278 InfoSub::pointer p = i->second.lock();
2279
2280 if (p)
2281 {
2282 p->send(jvObj, true);
2283 ++i;
2284 }
2285 else
2286 {
2287 i = mStreamMaps[sPeerStatus].erase(i);
2288 }
2289 }
2290 }
2291}
2292
2293void
2295{
2296 using namespace std::chrono_literals;
2297 if (om == OperatingMode::CONNECTED)
2298 {
2301 }
2302 else if (om == OperatingMode::SYNCING)
2303 {
2306 }
2307
2308 if ((om > OperatingMode::CONNECTED) && isBlocked())
2310
2311 if (mMode == om)
2312 return;
2313
2314 auto const sink = om < mMode ? m_journal.warn() : m_journal.info();
2315 mMode = om;
2316
2317 accounting_.mode(om);
2318
2319 JLOG(sink) << "STATE->" << strOperatingMode() << " - " << reason;
2320 pubServer();
2321}
2322
2323bool
2326 std::string const& source)
2327{
2328 JLOG(m_journal.trace())
2329 << "recvValidation " << val->getLedgerHash() << " from " << source;
2330
2331 {
2332 CanProcess const check(
2333 validationsMutex_, pendingValidations_, val->getLedgerHash());
2334 try
2335 {
2336 BypassAccept bypassAccept =
2338 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2339 }
2340 catch (std::exception const& e)
2341 {
2342 JLOG(m_journal.warn())
2343 << "Exception thrown for handling new validation "
2344 << val->getLedgerHash() << ": " << e.what();
2345 }
2346 catch (...)
2347 {
2348 JLOG(m_journal.warn())
2349 << "Unknown exception thrown for handling new validation "
2350 << val->getLedgerHash();
2351 }
2352 }
2353
2354 pubValidation(val);
2355
2356 // We will always relay trusted validations; if configured, we will
2357 // also relay all untrusted validations.
2358 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2359}
2360
2363{
2364 return mConsensus.getJson(true);
2365}
2366
2368NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2369{
2371
2372 // System-level warnings
2373 {
2374 Json::Value warnings{Json::arrayValue};
2375 if (isAmendmentBlocked())
2376 {
2377 Json::Value& w = warnings.append(Json::objectValue);
2378 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2379 w[jss::message] =
2380 "This server is amendment blocked, and must be updated to be "
2381 "able to stay in sync with the network.";
2382 }
2383 if (isUNLBlocked())
2384 {
2385 Json::Value& w = warnings.append(Json::objectValue);
2386 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2387 w[jss::message] =
2388 "This server has an expired validator list. validators.txt "
2389 "may be incorrectly configured or some [validator_list_sites] "
2390 "may be unreachable.";
2391 }
2392 if (admin && isAmendmentWarned())
2393 {
2394 Json::Value& w = warnings.append(Json::objectValue);
2395 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2396 w[jss::message] =
2397 "One or more unsupported amendments have reached majority. "
2398 "Upgrade to the latest version before they are activated "
2399 "to avoid being amendment blocked.";
2400 if (auto const expected =
2402 {
2403 auto& d = w[jss::details] = Json::objectValue;
2404 d[jss::expected_date] = expected->time_since_epoch().count();
2405 d[jss::expected_date_UTC] = to_string(*expected);
2406 }
2407 }
2408
2409 if (warnings.size())
2410 info[jss::warnings] = std::move(warnings);
2411 }
2412
2413 // hostid: unique string describing the machine
2414 if (human)
2415 info[jss::hostid] = getHostId(admin);
2416
2417 // domain: if configured with a domain, report it:
2418 if (!app_.config().SERVER_DOMAIN.empty())
2419 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2420
2421 info[jss::build_version] = BuildInfo::getVersionString();
2422
2423 info[jss::server_state] = strOperatingMode(admin);
2424
2425 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2427
2429 info[jss::network_ledger] = "waiting";
2430
2431 info[jss::validation_quorum] =
2432 static_cast<Json::UInt>(app_.validators().quorum());
2433
2434 if (admin)
2435 {
2436 switch (app_.config().NODE_SIZE)
2437 {
2438 case 0:
2439 info[jss::node_size] = "tiny";
2440 break;
2441 case 1:
2442 info[jss::node_size] = "small";
2443 break;
2444 case 2:
2445 info[jss::node_size] = "medium";
2446 break;
2447 case 3:
2448 info[jss::node_size] = "large";
2449 break;
2450 case 4:
2451 info[jss::node_size] = "huge";
2452 break;
2453 }
2454
2455 auto when = app_.validators().expires();
2456
2457 if (!human)
2458 {
2459 if (when)
2460 info[jss::validator_list_expires] =
2461 safe_cast<Json::UInt>(when->time_since_epoch().count());
2462 else
2463 info[jss::validator_list_expires] = 0;
2464 }
2465 else
2466 {
2467 auto& x = (info[jss::validator_list] = Json::objectValue);
2468
2469 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2470
2471 if (when)
2472 {
2473 if (*when == TimeKeeper::time_point::max())
2474 {
2475 x[jss::expiration] = "never";
2476 x[jss::status] = "active";
2477 }
2478 else
2479 {
2480 x[jss::expiration] = to_string(*when);
2481
2482 if (*when > app_.timeKeeper().now())
2483 x[jss::status] = "active";
2484 else
2485 x[jss::status] = "expired";
2486 }
2487 }
2488 else
2489 {
2490 x[jss::status] = "unknown";
2491 x[jss::expiration] = "unknown";
2492 }
2493 }
2494
2495#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2496 {
2497 auto& x = (info[jss::git] = Json::objectValue);
2498#ifdef GIT_COMMIT_HASH
2499 x[jss::hash] = GIT_COMMIT_HASH;
2500#endif
2501#ifdef GIT_BRANCH
2502 x[jss::branch] = GIT_BRANCH;
2503#endif
2504 }
2505#endif
2506 }
2507 info[jss::io_latency_ms] =
2508 static_cast<Json::UInt>(app_.getIOLatency().count());
2509
2510 if (admin)
2511 {
2512 if (auto const localPubKey = app_.validators().localPublicKey();
2513 localPubKey && app_.getValidationPublicKey())
2514 {
2515 info[jss::pubkey_validator] =
2516 toBase58(TokenType::NodePublic, localPubKey.value());
2517 }
2518 else
2519 {
2520 info[jss::pubkey_validator] = "none";
2521 }
2522 }
2523
2524 if (counters)
2525 {
2526 info[jss::counters] = app_.getPerfLog().countersJson();
2527
2528 Json::Value nodestore(Json::objectValue);
2529 app_.getNodeStore().getCountsJson(nodestore);
2530 info[jss::counters][jss::nodestore] = nodestore;
2531 info[jss::current_activities] = app_.getPerfLog().currentJson();
2532 }
2533
2534 info[jss::pubkey_node] =
2536
2537 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2538
2540 info[jss::amendment_blocked] = true;
2541
2542 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2543
2544 if (fp != 0)
2545 info[jss::fetch_pack] = Json::UInt(fp);
2546
2547 info[jss::peers] = Json::UInt(app_.overlay().size());
2548
2549 Json::Value lastClose = Json::objectValue;
2550 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2551
2552 if (human)
2553 {
2554 lastClose[jss::converge_time_s] =
2556 }
2557 else
2558 {
2559 lastClose[jss::converge_time] =
2561 }
2562
2563 info[jss::last_close] = lastClose;
2564
2565 // info[jss::consensus] = mConsensus.getJson();
2566
2567 if (admin)
2568 info[jss::load] = m_job_queue.getJson();
2569
2570 if (auto const netid = app_.overlay().networkID())
2571 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2572
2573 auto const escalationMetrics =
2575
2576 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2577 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2578 /* Scale the escalated fee level to unitless "load factor".
2579 In practice, this just strips the units, but it will continue
2580 to work correctly if either base value ever changes. */
2581 auto const loadFactorFeeEscalation =
2582 mulDiv(
2583 escalationMetrics.openLedgerFeeLevel,
2584 loadBaseServer,
2585 escalationMetrics.referenceFeeLevel)
2587
2588 auto const loadFactor = std::max(
2589 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2590
2591 if (!human)
2592 {
2593 info[jss::load_base] = loadBaseServer;
2594 info[jss::load_factor] = trunc32(loadFactor);
2595 info[jss::load_factor_server] = loadFactorServer;
2596
2597 /* Json::Value doesn't support uint64, so clamp to max
2598 uint32 value. This is mostly theoretical, since there
2599 probably isn't enough extant XRP to drive the factor
2600 that high.
2601 */
2602 info[jss::load_factor_fee_escalation] =
2603 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2604 info[jss::load_factor_fee_queue] =
2605 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2606 info[jss::load_factor_fee_reference] =
2607 escalationMetrics.referenceFeeLevel.jsonClipped();
2608 }
2609 else
2610 {
2611 info[jss::load_factor] =
2612 static_cast<double>(loadFactor) / loadBaseServer;
2613
2614 if (loadFactorServer != loadFactor)
2615 info[jss::load_factor_server] =
2616 static_cast<double>(loadFactorServer) / loadBaseServer;
2617
2618 if (admin)
2619 {
2621 if (fee != loadBaseServer)
2622 info[jss::load_factor_local] =
2623 static_cast<double>(fee) / loadBaseServer;
2624 fee = app_.getFeeTrack().getRemoteFee();
2625 if (fee != loadBaseServer)
2626 info[jss::load_factor_net] =
2627 static_cast<double>(fee) / loadBaseServer;
2628 fee = app_.getFeeTrack().getClusterFee();
2629 if (fee != loadBaseServer)
2630 info[jss::load_factor_cluster] =
2631 static_cast<double>(fee) / loadBaseServer;
2632 }
2633 if (escalationMetrics.openLedgerFeeLevel !=
2634 escalationMetrics.referenceFeeLevel &&
2635 (admin || loadFactorFeeEscalation != loadFactor))
2636 info[jss::load_factor_fee_escalation] =
2637 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2638 escalationMetrics.referenceFeeLevel);
2639 if (escalationMetrics.minProcessingFeeLevel !=
2640 escalationMetrics.referenceFeeLevel)
2641 info[jss::load_factor_fee_queue] =
2642 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2643 escalationMetrics.referenceFeeLevel);
2644 }
2645
2646 bool valid = false;
2647 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2648
2649 if (lpClosed)
2650 valid = true;
2651 else
2652 lpClosed = m_ledgerMaster.getClosedLedger();
2653
2654 if (lpClosed)
2655 {
2656 XRPAmount const baseFee = lpClosed->fees().base;
2658 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2659 l[jss::hash] = to_string(lpClosed->info().hash);
2660
2661 if (!human)
2662 {
2663 l[jss::base_fee] = baseFee.jsonClipped();
2664 l[jss::reserve_base] =
2665 lpClosed->fees().accountReserve(0).jsonClipped();
2666 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2667 l[jss::close_time] = Json::Value::UInt(
2668 lpClosed->info().closeTime.time_since_epoch().count());
2669 }
2670 else
2671 {
2672 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2673 l[jss::reserve_base_xrp] =
2674 lpClosed->fees().accountReserve(0).decimalXRP();
2675 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2676
2677 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2678 std::abs(closeOffset.count()) >= 60)
2679 l[jss::close_time_offset] =
2680 static_cast<std::uint32_t>(closeOffset.count());
2681
2682 constexpr std::chrono::seconds highAgeThreshold{1000000};
2684 {
2685 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2686 l[jss::age] =
2687 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2688 }
2689 else
2690 {
2691 auto lCloseTime = lpClosed->info().closeTime;
2692 auto closeTime = app_.timeKeeper().closeTime();
2693 if (lCloseTime <= closeTime)
2694 {
2695 using namespace std::chrono_literals;
2696 auto age = closeTime - lCloseTime;
2697 l[jss::age] =
2698 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2699 }
2700 }
2701 }
2702
2703 if (valid)
2704 info[jss::validated_ledger] = l;
2705 else
2706 info[jss::closed_ledger] = l;
2707
2708 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2709 if (!lpPublished)
2710 info[jss::published_ledger] = "none";
2711 else if (lpPublished->info().seq != lpClosed->info().seq)
2712 info[jss::published_ledger] = lpPublished->info().seq;
2713 }
2714
2715 accounting_.json(info);
2716 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2717 info[jss::jq_trans_overflow] =
2719 info[jss::peer_disconnects] =
2721 info[jss::peer_disconnects_resources] =
2723
2724 // This array must be sorted in increasing order.
2725 static constexpr std::array<std::string_view, 7> protocols{
2726 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2727 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2728 {
2730 for (auto const& port : app_.getServerHandler().setup().ports)
2731 {
2732 // Don't publish admin ports for non-admin users
2733 if (!admin &&
2734 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2735 port.admin_user.empty() && port.admin_password.empty()))
2736 continue;
2739 std::begin(port.protocol),
2740 std::end(port.protocol),
2741 std::begin(protocols),
2742 std::end(protocols),
2743 std::back_inserter(proto));
2744 if (!proto.empty())
2745 {
2746 auto& jv = ports.append(Json::Value(Json::objectValue));
2747 jv[jss::port] = std::to_string(port.port);
2748 jv[jss::protocol] = Json::Value{Json::arrayValue};
2749 for (auto const& p : proto)
2750 jv[jss::protocol].append(p);
2751 }
2752 }
2753
2754 if (app_.config().exists(SECTION_PORT_GRPC))
2755 {
2756 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
2757 auto const optPort = grpcSection.get("port");
2758 if (optPort && grpcSection.get("ip"))
2759 {
2760 auto& jv = ports.append(Json::Value(Json::objectValue));
2761 jv[jss::port] = *optPort;
2762 jv[jss::protocol] = Json::Value{Json::arrayValue};
2763 jv[jss::protocol].append("grpc");
2764 }
2765 }
2766 info[jss::ports] = std::move(ports);
2767 }
2768
2769 return info;
2770}
2771
2772void
2774{
2776}
2777
2780{
2781 return app_.getInboundLedgers().getInfo();
2782}
2783
2784void
2786 std::shared_ptr<ReadView const> const& ledger,
2787 std::shared_ptr<STTx const> const& transaction,
2788 TER result)
2789{
2790 MultiApiJson jvObj =
2791 transJson(transaction, result, false, ledger, std::nullopt);
2792
2793 {
2795
2796 auto it = mStreamMaps[sRTTransactions].begin();
2797 while (it != mStreamMaps[sRTTransactions].end())
2798 {
2799 InfoSub::pointer p = it->second.lock();
2800
2801 if (p)
2802 {
2803 jvObj.visit(
2804 p->getApiVersion(), //
2805 [&](Json::Value const& jv) { p->send(jv, true); });
2806 ++it;
2807 }
2808 else
2809 {
2810 it = mStreamMaps[sRTTransactions].erase(it);
2811 }
2812 }
2813 }
2814
2815 pubProposedAccountTransaction(ledger, transaction, result);
2816}
2817
2818void
2820{
2821 // Ledgers are published only when they acquire sufficient validations
2822 // Holes are filled across connection loss or other catastrophe
2823
2825 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
2826 if (!alpAccepted)
2827 {
2828 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
2829 app_.getAcceptedLedgerCache().canonicalize_replace_client(
2830 lpAccepted->info().hash, alpAccepted);
2831 }
2832
2833 XRPL_ASSERT(
2834 alpAccepted->getLedger().get() == lpAccepted.get(),
2835 "ripple::NetworkOPsImp::pubLedger : accepted input");
2836
2837 {
2838 JLOG(m_journal.debug())
2839 << "Publishing ledger " << lpAccepted->info().seq << " "
2840 << lpAccepted->info().hash;
2841
2843
2844 if (!mStreamMaps[sLedger].empty())
2845 {
2847
2848 jvObj[jss::type] = "ledgerClosed";
2849 jvObj[jss::ledger_index] = lpAccepted->info().seq;
2850 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
2851 jvObj[jss::ledger_time] = Json::Value::UInt(
2852 lpAccepted->info().closeTime.time_since_epoch().count());
2853
2854 if (!lpAccepted->rules().enabled(featureXRPFees))
2855 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
2856 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2857 jvObj[jss::reserve_base] =
2858 lpAccepted->fees().accountReserve(0).jsonClipped();
2859 jvObj[jss::reserve_inc] =
2860 lpAccepted->fees().increment.jsonClipped();
2861
2862 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
2863
2865 {
2866 jvObj[jss::validated_ledgers] =
2868 }
2869
2870 auto it = mStreamMaps[sLedger].begin();
2871 while (it != mStreamMaps[sLedger].end())
2872 {
2873 InfoSub::pointer p = it->second.lock();
2874 if (p)
2875 {
2876 p->send(jvObj, true);
2877 ++it;
2878 }
2879 else
2880 it = mStreamMaps[sLedger].erase(it);
2881 }
2882 }
2883
2884 if (!mStreamMaps[sBookChanges].empty())
2885 {
2886 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
2887
2888 auto it = mStreamMaps[sBookChanges].begin();
2889 while (it != mStreamMaps[sBookChanges].end())
2890 {
2891 InfoSub::pointer p = it->second.lock();
2892 if (p)
2893 {
2894 p->send(jvObj, true);
2895 ++it;
2896 }
2897 else
2898 it = mStreamMaps[sBookChanges].erase(it);
2899 }
2900 }
2901
2902 {
2903 static bool firstTime = true;
2904 if (firstTime)
2905 {
2906 // First validated ledger, start delayed SubAccountHistory
2907 firstTime = false;
2908 for (auto& outer : mSubAccountHistory)
2909 {
2910 for (auto& inner : outer.second)
2911 {
2912 auto& subInfo = inner.second;
2913 if (subInfo.index_->separationLedgerSeq_ == 0)
2914 {
2916 alpAccepted->getLedger(), subInfo);
2917 }
2918 }
2919 }
2920 }
2921 }
2922 }
2923
2924 // Don't lock since pubAcceptedTransaction is locking.
2925 for (auto const& accTx : *alpAccepted)
2926 {
2927 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
2929 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
2930 }
2931}
2932
2933void
2935{
2937 app_.openLedger().current()->fees().base,
2939 app_.getFeeTrack()};
2940
2941 // only schedule the job if something has changed
2942 if (f != mLastFeeSummary)
2943 {
2945 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
2946 pubServer();
2947 });
2948 }
2949}
2950
2951void
2953{
2956 "reportConsensusStateChange->pubConsensus",
2957 [this, phase]() { pubConsensus(phase); });
2958}
2959
2960inline void
2962{
2963 m_localTX->sweep(view);
2964}
2965inline std::size_t
2967{
2968 return m_localTX->size();
2969}
2970
2971// This routine should only be used to publish accepted or validated
2972// transactions.
2975 std::shared_ptr<STTx const> const& transaction,
2976 TER result,
2977 bool validated,
2978 std::shared_ptr<ReadView const> const& ledger,
2980{
2982 std::string sToken;
2983 std::string sHuman;
2984
2985 transResultInfo(result, sToken, sHuman);
2986
2987 jvObj[jss::type] = "transaction";
2988 // NOTE jvObj is not a finished object for either API version. After
2989 // it's populated, we need to finish it for a specific API version. This is
2990 // done in a loop, near the end of this function.
2991 jvObj[jss::transaction] =
2992 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
2993
2994 if (meta)
2995 {
2996 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
2998 jvObj[jss::meta], *ledger, transaction, meta->get());
3000 jvObj[jss::meta], transaction, meta->get());
3001 }
3002
3003 if (!ledger->open())
3004 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3005
3006 if (validated)
3007 {
3008 jvObj[jss::ledger_index] = ledger->info().seq;
3009 jvObj[jss::transaction][jss::date] =
3010 ledger->info().closeTime.time_since_epoch().count();
3011 jvObj[jss::validated] = true;
3012 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3013
3014 // WRITEME: Put the account next seq here
3015 }
3016 else
3017 {
3018 jvObj[jss::validated] = false;
3019 jvObj[jss::ledger_current_index] = ledger->info().seq;
3020 }
3021
3022 jvObj[jss::status] = validated ? "closed" : "proposed";
3023 jvObj[jss::engine_result] = sToken;
3024 jvObj[jss::engine_result_code] = result;
3025 jvObj[jss::engine_result_message] = sHuman;
3026
3027 if (transaction->getTxnType() == ttOFFER_CREATE)
3028 {
3029 auto const account = transaction->getAccountID(sfAccount);
3030 auto const amount = transaction->getFieldAmount(sfTakerGets);
3031
3032 // If the offer create is not self funded then add the owner balance
3033 if (account != amount.issue().account)
3034 {
3035 auto const ownerFunds = accountFunds(
3036 *ledger,
3037 account,
3038 amount,
3040 app_.journal("View"));
3041 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3042 }
3043 }
3044
3045 std::string const hash = to_string(transaction->getTransactionID());
3046 MultiApiJson multiObj{jvObj};
3048 multiObj.visit(), //
3049 [&]<unsigned Version>(
3051 RPC::insertDeliverMax(
3052 jvTx[jss::transaction], transaction->getTxnType(), Version);
3053
3054 if constexpr (Version > 1)
3055 {
3056 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3057 jvTx[jss::hash] = hash;
3058 }
3059 else
3060 {
3061 jvTx[jss::transaction][jss::hash] = hash;
3062 }
3063 });
3064
3065 return multiObj;
3066}
3067
3068void
3070 std::shared_ptr<ReadView const> const& ledger,
3071 const AcceptedLedgerTx& transaction,
3072 bool last)
3073{
3074 auto const& stTxn = transaction.getTxn();
3075
3076 // Create two different Json objects, for different API versions
3077 auto const metaRef = std::ref(transaction.getMeta());
3078 auto const trResult = transaction.getResult();
3079 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3080
3081 {
3083
3084 auto it = mStreamMaps[sTransactions].begin();
3085 while (it != mStreamMaps[sTransactions].end())
3086 {
3087 InfoSub::pointer p = it->second.lock();
3088
3089 if (p)
3090 {
3091 jvObj.visit(
3092 p->getApiVersion(), //
3093 [&](Json::Value const& jv) { p->send(jv, true); });
3094 ++it;
3095 }
3096 else
3097 it = mStreamMaps[sTransactions].erase(it);
3098 }
3099
3100 it = mStreamMaps[sRTTransactions].begin();
3101
3102 while (it != mStreamMaps[sRTTransactions].end())
3103 {
3104 InfoSub::pointer p = it->second.lock();
3105
3106 if (p)
3107 {
3108 jvObj.visit(
3109 p->getApiVersion(), //
3110 [&](Json::Value const& jv) { p->send(jv, true); });
3111 ++it;
3112 }
3113 else
3114 it = mStreamMaps[sRTTransactions].erase(it);
3115 }
3116 }
3117
3118 if (transaction.getResult() == tesSUCCESS)
3119 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3120
3121 pubAccountTransaction(ledger, transaction, last);
3122}
3123
3124void
3126 std::shared_ptr<ReadView const> const& ledger,
3127 AcceptedLedgerTx const& transaction,
3128 bool last)
3129{
3131 int iProposed = 0;
3132 int iAccepted = 0;
3133
3134 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3135 auto const currLedgerSeq = ledger->seq();
3136 {
3138
3139 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3141 {
3142 for (auto const& affectedAccount : transaction.getAffected())
3143 {
3144 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3145 simiIt != mSubRTAccount.end())
3146 {
3147 auto it = simiIt->second.begin();
3148
3149 while (it != simiIt->second.end())
3150 {
3151 InfoSub::pointer p = it->second.lock();
3152
3153 if (p)
3154 {
3155 notify.insert(p);
3156 ++it;
3157 ++iProposed;
3158 }
3159 else
3160 it = simiIt->second.erase(it);
3161 }
3162 }
3163
3164 if (auto simiIt = mSubAccount.find(affectedAccount);
3165 simiIt != mSubAccount.end())
3166 {
3167 auto it = simiIt->second.begin();
3168 while (it != simiIt->second.end())
3169 {
3170 InfoSub::pointer p = it->second.lock();
3171
3172 if (p)
3173 {
3174 notify.insert(p);
3175 ++it;
3176 ++iAccepted;
3177 }
3178 else
3179 it = simiIt->second.erase(it);
3180 }
3181 }
3182
3183 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3184 histoIt != mSubAccountHistory.end())
3185 {
3186 auto& subs = histoIt->second;
3187 auto it = subs.begin();
3188 while (it != subs.end())
3189 {
3190 SubAccountHistoryInfoWeak const& info = it->second;
3191 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3192 {
3193 ++it;
3194 continue;
3195 }
3196
3197 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3198 {
3199 accountHistoryNotify.emplace_back(
3200 SubAccountHistoryInfo{isSptr, info.index_});
3201 ++it;
3202 }
3203 else
3204 {
3205 it = subs.erase(it);
3206 }
3207 }
3208 if (subs.empty())
3209 mSubAccountHistory.erase(histoIt);
3210 }
3211 }
3212 }
3213 }
3214
3215 JLOG(m_journal.trace())
3216 << "pubAccountTransaction: " << "proposed=" << iProposed
3217 << ", accepted=" << iAccepted;
3218
3219 if (!notify.empty() || !accountHistoryNotify.empty())
3220 {
3221 auto const& stTxn = transaction.getTxn();
3222
3223 // Create two different Json objects, for different API versions
3224 auto const metaRef = std::ref(transaction.getMeta());
3225 auto const trResult = transaction.getResult();
3226 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3227
3228 for (InfoSub::ref isrListener : notify)
3229 {
3230 jvObj.visit(
3231 isrListener->getApiVersion(), //
3232 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3233 }
3234
3235 if (last)
3236 jvObj.set(jss::account_history_boundary, true);
3237
3238 XRPL_ASSERT(
3239 jvObj.isMember(jss::account_history_tx_stream) ==
3241 "ripple::NetworkOPsImp::pubAccountTransaction : "
3242 "account_history_tx_stream not set");
3243 for (auto& info : accountHistoryNotify)
3244 {
3245 auto& index = info.index_;
3246 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3247 jvObj.set(jss::account_history_tx_first, true);
3248
3249 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3250
3251 jvObj.visit(
3252 info.sink_->getApiVersion(), //
3253 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3254 }
3255 }
3256}
3257
3258void
3260 std::shared_ptr<ReadView const> const& ledger,
3262 TER result)
3263{
3265 int iProposed = 0;
3266
3267 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3268
3269 {
3271
3272 if (mSubRTAccount.empty())
3273 return;
3274
3275 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3277 {
3278 for (auto const& affectedAccount : tx->getMentionedAccounts())
3279 {
3280 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3281 simiIt != mSubRTAccount.end())
3282 {
3283 auto it = simiIt->second.begin();
3284
3285 while (it != simiIt->second.end())
3286 {
3287 InfoSub::pointer p = it->second.lock();
3288
3289 if (p)
3290 {
3291 notify.insert(p);
3292 ++it;
3293 ++iProposed;
3294 }
3295 else
3296 it = simiIt->second.erase(it);
3297 }
3298 }
3299 }
3300 }
3301 }
3302
3303 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3304
3305 if (!notify.empty() || !accountHistoryNotify.empty())
3306 {
3307 // Create two different Json objects, for different API versions
3308 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3309
3310 for (InfoSub::ref isrListener : notify)
3311 jvObj.visit(
3312 isrListener->getApiVersion(), //
3313 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3314
3315 XRPL_ASSERT(
3316 jvObj.isMember(jss::account_history_tx_stream) ==
3318 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3319 "account_history_tx_stream not set");
3320 for (auto& info : accountHistoryNotify)
3321 {
3322 auto& index = info.index_;
3323 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3324 jvObj.set(jss::account_history_tx_first, true);
3325 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3326 jvObj.visit(
3327 info.sink_->getApiVersion(), //
3328 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3329 }
3330 }
3331}
3332
3333//
3334// Monitoring
3335//
3336
3337void
3339 InfoSub::ref isrListener,
3340 hash_set<AccountID> const& vnaAccountIDs,
3341 bool rt)
3342{
3343 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3344
3345 for (auto const& naAccountID : vnaAccountIDs)
3346 {
3347 JLOG(m_journal.trace())
3348 << "subAccount: account: " << toBase58(naAccountID);
3349
3350 isrListener->insertSubAccountInfo(naAccountID, rt);
3351 }
3352
3354
3355 for (auto const& naAccountID : vnaAccountIDs)
3356 {
3357 auto simIterator = subMap.find(naAccountID);
3358 if (simIterator == subMap.end())
3359 {
3360 // Not found, note that account has a new single listner.
3361 SubMapType usisElement;
3362 usisElement[isrListener->getSeq()] = isrListener;
3363 // VFALCO NOTE This is making a needless copy of naAccountID
3364 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3365 }
3366 else
3367 {
3368 // Found, note that the account has another listener.
3369 simIterator->second[isrListener->getSeq()] = isrListener;
3370 }
3371 }
3372}
3373
3374void
3376 InfoSub::ref isrListener,
3377 hash_set<AccountID> const& vnaAccountIDs,
3378 bool rt)
3379{
3380 for (auto const& naAccountID : vnaAccountIDs)
3381 {
3382 // Remove from the InfoSub
3383 isrListener->deleteSubAccountInfo(naAccountID, rt);
3384 }
3385
3386 // Remove from the server
3387 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3388}
3389
3390void
3392 std::uint64_t uSeq,
3393 hash_set<AccountID> const& vnaAccountIDs,
3394 bool rt)
3395{
3397
3398 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3399
3400 for (auto const& naAccountID : vnaAccountIDs)
3401 {
3402 auto simIterator = subMap.find(naAccountID);
3403
3404 if (simIterator != subMap.end())
3405 {
3406 // Found
3407 simIterator->second.erase(uSeq);
3408
3409 if (simIterator->second.empty())
3410 {
3411 // Don't need hash entry.
3412 subMap.erase(simIterator);
3413 }
3414 }
3415 }
3416}
3417
3418void
3420{
3421 enum DatabaseType { Sqlite, None };
3422 static const auto databaseType = [&]() -> DatabaseType {
3423 // Use a dynamic_cast to return DatabaseType::None
3424 // on failure.
3425 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3426 {
3427 return DatabaseType::Sqlite;
3428 }
3429 return DatabaseType::None;
3430 }();
3431
3432 if (databaseType == DatabaseType::None)
3433 {
3434 JLOG(m_journal.error())
3435 << "AccountHistory job for account "
3436 << toBase58(subInfo.index_->accountId_) << " no database";
3437 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3438 {
3439 sptr->send(rpcError(rpcINTERNAL), true);
3440 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3441 }
3442 return;
3443 }
3444
3447 "AccountHistoryTxStream",
3448 [this, dbType = databaseType, subInfo]() {
3449 auto const& accountId = subInfo.index_->accountId_;
3450 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3451 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3452
3453 JLOG(m_journal.trace())
3454 << "AccountHistory job for account " << toBase58(accountId)
3455 << " started. lastLedgerSeq=" << lastLedgerSeq;
3456
3457 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3458 std::shared_ptr<TxMeta> const& meta) -> bool {
3459 /*
3460 * genesis account: first tx is the one with seq 1
3461 * other account: first tx is the one created the account
3462 */
3463 if (accountId == genesisAccountId)
3464 {
3465 auto stx = tx->getSTransaction();
3466 if (stx->getAccountID(sfAccount) == accountId &&
3467 stx->getSeqProxy().value() == 1)
3468 return true;
3469 }
3470
3471 for (auto& node : meta->getNodes())
3472 {
3473 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3474 continue;
3475
3476 if (node.isFieldPresent(sfNewFields))
3477 {
3478 if (auto inner = dynamic_cast<const STObject*>(
3479 node.peekAtPField(sfNewFields));
3480 inner)
3481 {
3482 if (inner->isFieldPresent(sfAccount) &&
3483 inner->getAccountID(sfAccount) == accountId)
3484 {
3485 return true;
3486 }
3487 }
3488 }
3489 }
3490
3491 return false;
3492 };
3493
3494 auto send = [&](Json::Value const& jvObj,
3495 bool unsubscribe) -> bool {
3496 if (auto sptr = subInfo.sinkWptr_.lock())
3497 {
3498 sptr->send(jvObj, true);
3499 if (unsubscribe)
3500 unsubAccountHistory(sptr, accountId, false);
3501 return true;
3502 }
3503
3504 return false;
3505 };
3506
3507 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3508 bool unsubscribe) -> bool {
3509 if (auto sptr = subInfo.sinkWptr_.lock())
3510 {
3511 jvObj.visit(
3512 sptr->getApiVersion(), //
3513 [&](Json::Value const& jv) { sptr->send(jv, true); });
3514
3515 if (unsubscribe)
3516 unsubAccountHistory(sptr, accountId, false);
3517 return true;
3518 }
3519
3520 return false;
3521 };
3522
3523 auto getMoreTxns =
3524 [&](std::uint32_t minLedger,
3525 std::uint32_t maxLedger,
3530 switch (dbType)
3531 {
3532 case Sqlite: {
3533 auto db = static_cast<SQLiteDatabase*>(
3536 accountId, minLedger, maxLedger, marker, 0, true};
3537 return db->newestAccountTxPage(options);
3538 }
3539 default: {
3540 UNREACHABLE(
3541 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3542 "getMoreTxns : invalid database type");
3543 return {};
3544 }
3545 }
3546 };
3547
3548 /*
3549 * search backward until the genesis ledger or asked to stop
3550 */
3551 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3552 {
3553 int feeChargeCount = 0;
3554 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3555 {
3556 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3557 ++feeChargeCount;
3558 }
3559 else
3560 {
3561 JLOG(m_journal.trace())
3562 << "AccountHistory job for account "
3563 << toBase58(accountId) << " no InfoSub. Fee charged "
3564 << feeChargeCount << " times.";
3565 return;
3566 }
3567
3568 // try to search in 1024 ledgers till reaching genesis ledgers
3569 auto startLedgerSeq =
3570 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3571 JLOG(m_journal.trace())
3572 << "AccountHistory job for account " << toBase58(accountId)
3573 << ", working on ledger range [" << startLedgerSeq << ","
3574 << lastLedgerSeq << "]";
3575
3576 auto haveRange = [&]() -> bool {
3577 std::uint32_t validatedMin = UINT_MAX;
3578 std::uint32_t validatedMax = 0;
3579 auto haveSomeValidatedLedgers =
3581 validatedMin, validatedMax);
3582
3583 return haveSomeValidatedLedgers &&
3584 validatedMin <= startLedgerSeq &&
3585 lastLedgerSeq <= validatedMax;
3586 }();
3587
3588 if (!haveRange)
3589 {
3590 JLOG(m_journal.debug())
3591 << "AccountHistory reschedule job for account "
3592 << toBase58(accountId) << ", incomplete ledger range ["
3593 << startLedgerSeq << "," << lastLedgerSeq << "]";
3595 return;
3596 }
3597
3599 while (!subInfo.index_->stopHistorical_)
3600 {
3601 auto dbResult =
3602 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3603 if (!dbResult)
3604 {
3605 JLOG(m_journal.debug())
3606 << "AccountHistory job for account "
3607 << toBase58(accountId) << " getMoreTxns failed.";
3608 send(rpcError(rpcINTERNAL), true);
3609 return;
3610 }
3611
3612 auto const& txns = dbResult->first;
3613 marker = dbResult->second;
3614 size_t num_txns = txns.size();
3615 for (size_t i = 0; i < num_txns; ++i)
3616 {
3617 auto const& [tx, meta] = txns[i];
3618
3619 if (!tx || !meta)
3620 {
3621 JLOG(m_journal.debug())
3622 << "AccountHistory job for account "
3623 << toBase58(accountId) << " empty tx or meta.";
3624 send(rpcError(rpcINTERNAL), true);
3625 return;
3626 }
3627 auto curTxLedger =
3629 tx->getLedger());
3630 if (!curTxLedger)
3631 {
3632 JLOG(m_journal.debug())
3633 << "AccountHistory job for account "
3634 << toBase58(accountId) << " no ledger.";
3635 send(rpcError(rpcINTERNAL), true);
3636 return;
3637 }
3639 tx->getSTransaction();
3640 if (!stTxn)
3641 {
3642 JLOG(m_journal.debug())
3643 << "AccountHistory job for account "
3644 << toBase58(accountId)
3645 << " getSTransaction failed.";
3646 send(rpcError(rpcINTERNAL), true);
3647 return;
3648 }
3649
3650 auto const mRef = std::ref(*meta);
3651 auto const trR = meta->getResultTER();
3652 MultiApiJson jvTx =
3653 transJson(stTxn, trR, true, curTxLedger, mRef);
3654
3655 jvTx.set(
3656 jss::account_history_tx_index, txHistoryIndex--);
3657 if (i + 1 == num_txns ||
3658 txns[i + 1].first->getLedger() != tx->getLedger())
3659 jvTx.set(jss::account_history_boundary, true);
3660
3661 if (isFirstTx(tx, meta))
3662 {
3663 jvTx.set(jss::account_history_tx_first, true);
3664 sendMultiApiJson(jvTx, false);
3665
3666 JLOG(m_journal.trace())
3667 << "AccountHistory job for account "
3668 << toBase58(accountId)
3669 << " done, found last tx.";
3670 return;
3671 }
3672 else
3673 {
3674 sendMultiApiJson(jvTx, false);
3675 }
3676 }
3677
3678 if (marker)
3679 {
3680 JLOG(m_journal.trace())
3681 << "AccountHistory job for account "
3682 << toBase58(accountId)
3683 << " paging, marker=" << marker->ledgerSeq << ":"
3684 << marker->txnSeq;
3685 }
3686 else
3687 {
3688 break;
3689 }
3690 }
3691
3692 if (!subInfo.index_->stopHistorical_)
3693 {
3694 lastLedgerSeq = startLedgerSeq - 1;
3695 if (lastLedgerSeq <= 1)
3696 {
3697 JLOG(m_journal.trace())
3698 << "AccountHistory job for account "
3699 << toBase58(accountId)
3700 << " done, reached genesis ledger.";
3701 return;
3702 }
3703 }
3704 }
3705 });
3706}
3707
3708void
3710 std::shared_ptr<ReadView const> const& ledger,
3712{
3713 subInfo.index_->separationLedgerSeq_ = ledger->seq();
3714 auto const& accountId = subInfo.index_->accountId_;
3715 auto const accountKeylet = keylet::account(accountId);
3716 if (!ledger->exists(accountKeylet))
3717 {
3718 JLOG(m_journal.debug())
3719 << "subAccountHistoryStart, no account " << toBase58(accountId)
3720 << ", no need to add AccountHistory job.";
3721 return;
3722 }
3723 if (accountId == genesisAccountId)
3724 {
3725 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3726 {
3727 if (sleAcct->getFieldU32(sfSequence) == 1)
3728 {
3729 JLOG(m_journal.debug())
3730 << "subAccountHistoryStart, genesis account "
3731 << toBase58(accountId)
3732 << " does not have tx, no need to add AccountHistory job.";
3733 return;
3734 }
3735 }
3736 else
3737 {
3738 UNREACHABLE(
3739 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
3740 "access genesis account");
3741 return;
3742 }
3743 }
3744 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
3745 subInfo.index_->haveHistorical_ = true;
3746
3747 JLOG(m_journal.debug())
3748 << "subAccountHistoryStart, add AccountHistory job: accountId="
3749 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
3750
3751 addAccountHistoryJob(subInfo);
3752}
3753
3756 InfoSub::ref isrListener,
3757 AccountID const& accountId)
3758{
3759 if (!isrListener->insertSubAccountHistory(accountId))
3760 {
3761 JLOG(m_journal.debug())
3762 << "subAccountHistory, already subscribed to account "
3763 << toBase58(accountId);
3764 return rpcINVALID_PARAMS;
3765 }
3766
3769 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
3770 auto simIterator = mSubAccountHistory.find(accountId);
3771 if (simIterator == mSubAccountHistory.end())
3772 {
3774 inner.emplace(isrListener->getSeq(), ahi);
3776 simIterator, std::make_pair(accountId, inner));
3777 }
3778 else
3779 {
3780 simIterator->second.emplace(isrListener->getSeq(), ahi);
3781 }
3782
3783 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
3784 if (ledger)
3785 {
3786 subAccountHistoryStart(ledger, ahi);
3787 }
3788 else
3789 {
3790 // The node does not have validated ledgers, so wait for
3791 // one before start streaming.
3792 // In this case, the subscription is also considered successful.
3793 JLOG(m_journal.debug())
3794 << "subAccountHistory, no validated ledger yet, delay start";
3795 }
3796
3797 return rpcSUCCESS;
3798}
3799
3800void
3802 InfoSub::ref isrListener,
3803 AccountID const& account,
3804 bool historyOnly)
3805{
3806 if (!historyOnly)
3807 isrListener->deleteSubAccountHistory(account);
3808 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
3809}
3810
3811void
3813 std::uint64_t seq,
3814 const AccountID& account,
3815 bool historyOnly)
3816{
3818 auto simIterator = mSubAccountHistory.find(account);
3819 if (simIterator != mSubAccountHistory.end())
3820 {
3821 auto& subInfoMap = simIterator->second;
3822 auto subInfoIter = subInfoMap.find(seq);
3823 if (subInfoIter != subInfoMap.end())
3824 {
3825 subInfoIter->second.index_->stopHistorical_ = true;
3826 }
3827
3828 if (!historyOnly)
3829 {
3830 simIterator->second.erase(seq);
3831 if (simIterator->second.empty())
3832 {
3833 mSubAccountHistory.erase(simIterator);
3834 }
3835 }
3836 JLOG(m_journal.debug())
3837 << "unsubAccountHistory, account " << toBase58(account)
3838 << ", historyOnly = " << (historyOnly ? "true" : "false");
3839 }
3840}
3841
3842bool
3844{
3845 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
3846 listeners->addSubscriber(isrListener);
3847 else
3848 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
3849 return true;
3850}
3851
3852bool
3854{
3855 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
3856 listeners->removeSubscriber(uSeq);
3857
3858 return true;
3859}
3860
3864{
3865 // This code-path is exclusively used when the server is in standalone
3866 // mode via `ledger_accept`
3867 XRPL_ASSERT(
3868 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
3869
3870 if (!m_standalone)
3871 Throw<std::runtime_error>(
3872 "Operation only possible in STANDALONE mode.");
3873
3874 // FIXME Could we improve on this and remove the need for a specialized
3875 // API in Consensus?
3877 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
3878 return m_ledgerMaster.getCurrentLedger()->info().seq;
3879}
3880
3881// <-- bool: true=added, false=already there
3882bool
3884{
3885 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
3886 {
3887 jvResult[jss::ledger_index] = lpClosed->info().seq;
3888 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
3889 jvResult[jss::ledger_time] = Json::Value::UInt(
3890 lpClosed->info().closeTime.time_since_epoch().count());
3891 if (!lpClosed->rules().enabled(featureXRPFees))
3892 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3893 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3894 jvResult[jss::reserve_base] =
3895 lpClosed->fees().accountReserve(0).jsonClipped();
3896 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3897 }
3898
3900 {
3901 jvResult[jss::validated_ledgers] =
3903 }
3904
3906 return mStreamMaps[sLedger]
3907 .emplace(isrListener->getSeq(), isrListener)
3908 .second;
3909}
3910
3911// <-- bool: true=added, false=already there
3912bool
3914{
3917 .emplace(isrListener->getSeq(), isrListener)
3918 .second;
3919}
3920
3921// <-- bool: true=erased, false=was not there
3922bool
3924{
3926 return mStreamMaps[sLedger].erase(uSeq);
3927}
3928
3929// <-- bool: true=erased, false=was not there
3930bool
3932{
3934 return mStreamMaps[sBookChanges].erase(uSeq);
3935}
3936
3937// <-- bool: true=added, false=already there
3938bool
3940{
3942 return mStreamMaps[sManifests]
3943 .emplace(isrListener->getSeq(), isrListener)
3944 .second;
3945}
3946
3947// <-- bool: true=erased, false=was not there
3948bool
3950{
3952 return mStreamMaps[sManifests].erase(uSeq);
3953}
3954
3955// <-- bool: true=added, false=already there
3956bool
3958 InfoSub::ref isrListener,
3959 Json::Value& jvResult,
3960 bool admin)
3961{
3962 uint256 uRandom;
3963
3964 if (m_standalone)
3965 jvResult[jss::stand_alone] = m_standalone;
3966
3967 // CHECKME: is it necessary to provide a random number here?
3968 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
3969
3970 auto const& feeTrack = app_.getFeeTrack();
3971 jvResult[jss::random] = to_string(uRandom);
3972 jvResult[jss::server_status] = strOperatingMode(admin);
3973 jvResult[jss::load_base] = feeTrack.getLoadBase();
3974 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
3975 jvResult[jss::hostid] = getHostId(admin);
3976 jvResult[jss::pubkey_node] =
3978
3980 return mStreamMaps[sServer]
3981 .emplace(isrListener->getSeq(), isrListener)
3982 .second;
3983}
3984
3985// <-- bool: true=erased, false=was not there
3986bool
3988{
3990 return mStreamMaps[sServer].erase(uSeq);
3991}
3992
3993// <-- bool: true=added, false=already there
3994bool
3996{
3999 .emplace(isrListener->getSeq(), isrListener)
4000 .second;
4001}
4002
4003// <-- bool: true=erased, false=was not there
4004bool
4006{
4008 return mStreamMaps[sTransactions].erase(uSeq);
4009}
4010
4011// <-- bool: true=added, false=already there
4012bool
4014{
4017 .emplace(isrListener->getSeq(), isrListener)
4018 .second;
4019}
4020
4021// <-- bool: true=erased, false=was not there
4022bool
4024{
4026 return mStreamMaps[sRTTransactions].erase(uSeq);
4027}
4028
4029// <-- bool: true=added, false=already there
4030bool
4032{
4035 .emplace(isrListener->getSeq(), isrListener)
4036 .second;
4037}
4038
4039void
4041{
4042 accounting_.json(obj);
4043}
4044
4045// <-- bool: true=erased, false=was not there
4046bool
4048{
4050 return mStreamMaps[sValidations].erase(uSeq);
4051}
4052
4053// <-- bool: true=added, false=already there
4054bool
4056{
4058 return mStreamMaps[sPeerStatus]
4059 .emplace(isrListener->getSeq(), isrListener)
4060 .second;
4061}
4062
4063// <-- bool: true=erased, false=was not there
4064bool
4066{
4068 return mStreamMaps[sPeerStatus].erase(uSeq);
4069}
4070
4071// <-- bool: true=added, false=already there
4072bool
4074{
4077 .emplace(isrListener->getSeq(), isrListener)
4078 .second;
4079}
4080
4081// <-- bool: true=erased, false=was not there
4082bool
4084{
4086 return mStreamMaps[sConsensusPhase].erase(uSeq);
4087}
4088
4091{
4093
4094 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4095
4096 if (it != mRpcSubMap.end())
4097 return it->second;
4098
4099 return InfoSub::pointer();
4100}
4101
4104{
4106
4107 mRpcSubMap.emplace(strUrl, rspEntry);
4108
4109 return rspEntry;
4110}
4111
4112bool
4114{
4116 auto pInfo = findRpcSub(strUrl);
4117
4118 if (!pInfo)
4119 return false;
4120
4121 // check to see if any of the stream maps still hold a weak reference to
4122 // this entry before removing
4123 for (SubMapType const& map : mStreamMaps)
4124 {
4125 if (map.find(pInfo->getSeq()) != map.end())
4126 return false;
4127 }
4128 mRpcSubMap.erase(strUrl);
4129 return true;
4130}
4131
4132#ifndef USE_NEW_BOOK_PAGE
4133
4134// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4135// work, but it demonstrated poor performance.
4136//
4137void
4140 Book const& book,
4141 AccountID const& uTakerID,
4142 bool const bProof,
4143 unsigned int iLimit,
4144 Json::Value const& jvMarker,
4145 Json::Value& jvResult)
4146{ // CAUTION: This is the old get book page logic
4147 Json::Value& jvOffers =
4148 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4149
4151 const uint256 uBookBase = getBookBase(book);
4152 const uint256 uBookEnd = getQualityNext(uBookBase);
4153 uint256 uTipIndex = uBookBase;
4154
4155 if (auto stream = m_journal.trace())
4156 {
4157 stream << "getBookPage:" << book;
4158 stream << "getBookPage: uBookBase=" << uBookBase;
4159 stream << "getBookPage: uBookEnd=" << uBookEnd;
4160 stream << "getBookPage: uTipIndex=" << uTipIndex;
4161 }
4162
4163 ReadView const& view = *lpLedger;
4164
4165 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4166 isGlobalFrozen(view, book.in.account);
4167
4168 bool bDone = false;
4169 bool bDirectAdvance = true;
4170
4171 std::shared_ptr<SLE const> sleOfferDir;
4172 uint256 offerIndex;
4173 unsigned int uBookEntry;
4174 STAmount saDirRate;
4175
4176 auto const rate = transferRate(view, book.out.account);
4177 auto viewJ = app_.journal("View");
4178
4179 while (!bDone && iLimit-- > 0)
4180 {
4181 if (bDirectAdvance)
4182 {
4183 bDirectAdvance = false;
4184
4185 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4186
4187 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4188 if (ledgerIndex)
4189 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4190 else
4191 sleOfferDir.reset();
4192
4193 if (!sleOfferDir)
4194 {
4195 JLOG(m_journal.trace()) << "getBookPage: bDone";
4196 bDone = true;
4197 }
4198 else
4199 {
4200 uTipIndex = sleOfferDir->key();
4201 saDirRate = amountFromQuality(getQuality(uTipIndex));
4202
4203 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4204
4205 JLOG(m_journal.trace())
4206 << "getBookPage: uTipIndex=" << uTipIndex;
4207 JLOG(m_journal.trace())
4208 << "getBookPage: offerIndex=" << offerIndex;
4209 }
4210 }
4211
4212 if (!bDone)
4213 {
4214 auto sleOffer = view.read(keylet::offer(offerIndex));
4215
4216 if (sleOffer)
4217 {
4218 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4219 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4220 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4221 STAmount saOwnerFunds;
4222 bool firstOwnerOffer(true);
4223
4224 if (book.out.account == uOfferOwnerID)
4225 {
4226 // If an offer is selling issuer's own IOUs, it is fully
4227 // funded.
4228 saOwnerFunds = saTakerGets;
4229 }
4230 else if (bGlobalFreeze)
4231 {
4232 // If either asset is globally frozen, consider all offers
4233 // that aren't ours to be totally unfunded
4234 saOwnerFunds.clear(book.out);
4235 }
4236 else
4237 {
4238 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4239 if (umBalanceEntry != umBalance.end())
4240 {
4241 // Found in running balance table.
4242
4243 saOwnerFunds = umBalanceEntry->second;
4244 firstOwnerOffer = false;
4245 }
4246 else
4247 {
4248 // Did not find balance in table.
4249
4250 saOwnerFunds = accountHolds(
4251 view,
4252 uOfferOwnerID,
4253 book.out.currency,
4254 book.out.account,
4256 viewJ);
4257
4258 if (saOwnerFunds < beast::zero)
4259 {
4260 // Treat negative funds as zero.
4261
4262 saOwnerFunds.clear();
4263 }
4264 }
4265 }
4266
4267 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4268
4269 STAmount saTakerGetsFunded;
4270 STAmount saOwnerFundsLimit = saOwnerFunds;
4271 Rate offerRate = parityRate;
4272
4273 if (rate != parityRate
4274 // Have a tranfer fee.
4275 && uTakerID != book.out.account
4276 // Not taking offers of own IOUs.
4277 && book.out.account != uOfferOwnerID)
4278 // Offer owner not issuing ownfunds
4279 {
4280 // Need to charge a transfer fee to offer owner.
4281 offerRate = rate;
4282 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4283 }
4284
4285 if (saOwnerFundsLimit >= saTakerGets)
4286 {
4287 // Sufficient funds no shenanigans.
4288 saTakerGetsFunded = saTakerGets;
4289 }
4290 else
4291 {
4292 // Only provide, if not fully funded.
4293
4294 saTakerGetsFunded = saOwnerFundsLimit;
4295
4296 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4297 std::min(
4298 saTakerPays,
4299 multiply(
4300 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4301 .setJson(jvOffer[jss::taker_pays_funded]);
4302 }
4303
4304 STAmount saOwnerPays = (parityRate == offerRate)
4305 ? saTakerGetsFunded
4306 : std::min(
4307 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4308
4309 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4310
4311 // Include all offers funded and unfunded
4312 Json::Value& jvOf = jvOffers.append(jvOffer);
4313 jvOf[jss::quality] = saDirRate.getText();
4314
4315 if (firstOwnerOffer)
4316 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4317 }
4318 else
4319 {
4320 JLOG(m_journal.warn()) << "Missing offer";
4321 }
4322
4323 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4324 {
4325 bDirectAdvance = true;
4326 }
4327 else
4328 {
4329 JLOG(m_journal.trace())
4330 << "getBookPage: offerIndex=" << offerIndex;
4331 }
4332 }
4333 }
4334
4335 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4336 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4337}
4338
4339#else
4340
4341// This is the new code that uses the book iterators
4342// It has temporarily been disabled
4343
4344void
4347 Book const& book,
4348 AccountID const& uTakerID,
4349 bool const bProof,
4350 unsigned int iLimit,
4351 Json::Value const& jvMarker,
4352 Json::Value& jvResult)
4353{
4354 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4355
4357
4358 MetaView lesActive(lpLedger, tapNONE, true);
4359 OrderBookIterator obIterator(lesActive, book);
4360
4361 auto const rate = transferRate(lesActive, book.out.account);
4362
4363 const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4364 lesActive.isGlobalFrozen(book.in.account);
4365
4366 while (iLimit-- > 0 && obIterator.nextOffer())
4367 {
4368 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4369 if (sleOffer)
4370 {
4371 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4372 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4373 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4374 STAmount saDirRate = obIterator.getCurrentRate();
4375 STAmount saOwnerFunds;
4376
4377 if (book.out.account == uOfferOwnerID)
4378 {
4379 // If offer is selling issuer's own IOUs, it is fully funded.
4380 saOwnerFunds = saTakerGets;
4381 }
4382 else if (bGlobalFreeze)
4383 {
4384 // If either asset is globally frozen, consider all offers
4385 // that aren't ours to be totally unfunded
4386 saOwnerFunds.clear(book.out);
4387 }
4388 else
4389 {
4390 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4391
4392 if (umBalanceEntry != umBalance.end())
4393 {
4394 // Found in running balance table.
4395
4396 saOwnerFunds = umBalanceEntry->second;
4397 }
4398 else
4399 {
4400 // Did not find balance in table.
4401
4402 saOwnerFunds = lesActive.accountHolds(
4403 uOfferOwnerID,
4404 book.out.currency,
4405 book.out.account,
4407
4408 if (saOwnerFunds.isNegative())
4409 {
4410 // Treat negative funds as zero.
4411
4412 saOwnerFunds.zero();
4413 }
4414 }
4415 }
4416
4417 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4418
4419 STAmount saTakerGetsFunded;
4420 STAmount saOwnerFundsLimit = saOwnerFunds;
4421 Rate offerRate = parityRate;
4422
4423 if (rate != parityRate
4424 // Have a tranfer fee.
4425 && uTakerID != book.out.account
4426 // Not taking offers of own IOUs.
4427 && book.out.account != uOfferOwnerID)
4428 // Offer owner not issuing ownfunds
4429 {
4430 // Need to charge a transfer fee to offer owner.
4431 offerRate = rate;
4432 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4433 }
4434
4435 if (saOwnerFundsLimit >= saTakerGets)
4436 {
4437 // Sufficient funds no shenanigans.
4438 saTakerGetsFunded = saTakerGets;
4439 }
4440 else
4441 {
4442 // Only provide, if not fully funded.
4443 saTakerGetsFunded = saOwnerFundsLimit;
4444
4445 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4446
4447 // TOOD(tom): The result of this expression is not used - what's
4448 // going on here?
4449 std::min(
4450 saTakerPays,
4451 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4452 .setJson(jvOffer[jss::taker_pays_funded]);
4453 }
4454
4455 STAmount saOwnerPays = (parityRate == offerRate)
4456 ? saTakerGetsFunded
4457 : std::min(
4458 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4459
4460 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4461
4462 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4463 {
4464 // Only provide funded offers and offers of the taker.
4465 Json::Value& jvOf = jvOffers.append(jvOffer);
4466 jvOf[jss::quality] = saDirRate.getText();
4467 }
4468 }
4469 }
4470
4471 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4472 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4473}
4474
4475#endif
4476
4477inline void
4479{
4480 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4481 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4483 counters[static_cast<std::size_t>(mode)].dur += current;
4484
4487 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4488 .dur.count());
4490 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4491 .dur.count());
4493 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4495 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4496 .dur.count());
4498 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4499
4501 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4502 .transitions);
4504 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4505 .transitions);
4507 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4509 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4510 .transitions);
4512 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4513}
4514
4515void
4517{
4518 auto now = std::chrono::steady_clock::now();
4519
4520 std::lock_guard lock(mutex_);
4521 ++counters_[static_cast<std::size_t>(om)].transitions;
4522 if (om == OperatingMode::FULL &&
4523 counters_[static_cast<std::size_t>(om)].transitions == 1)
4524 {
4525 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4526 now - processStart_)
4527 .count();
4528 }
4529 counters_[static_cast<std::size_t>(mode_)].dur +=
4530 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4531
4532 mode_ = om;
4533 start_ = now;
4534}
4535
4536void
4538{
4539 auto [counters, mode, start, initialSync] = getCounterData();
4540 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4542 counters[static_cast<std::size_t>(mode)].dur += current;
4543
4544 obj[jss::state_accounting] = Json::objectValue;
4546 i <= static_cast<std::size_t>(OperatingMode::FULL);
4547 ++i)
4548 {
4549 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4550 auto& state = obj[jss::state_accounting][states_[i]];
4551 state[jss::transitions] = std::to_string(counters[i].transitions);
4552 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4553 }
4554 obj[jss::server_state_duration_us] = std::to_string(current.count());
4555 if (initialSync)
4556 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4557}
4558
4559//------------------------------------------------------------------------------
4560
4563 Application& app,
4565 bool standalone,
4566 std::size_t minPeerCount,
4567 bool startvalid,
4568 JobQueue& job_queue,
4570 ValidatorKeys const& validatorKeys,
4571 boost::asio::io_service& io_svc,
4572 beast::Journal journal,
4573 beast::insight::Collector::ptr const& collector)
4574{
4575 return std::make_unique<NetworkOPsImp>(
4576 app,
4577 clock,
4578 standalone,
4579 minPeerCount,
4580 startvalid,
4581 job_queue,
4583 validatorKeys,
4584 io_svc,
4585 journal,
4586 collector);
4587}
4588
4589} // namespace ripple
T back_inserter(T... args)
T begin(T... args)
RAII class to check if an Item is already being processed on another thread, as indicated by it's pre...
Definition: CanProcess.h:67
Decorator for streaming out compact json.
Definition: json_writer.h:317
Lightweight wrapper to tag static string.
Definition: json_value.h:61
Represents a JSON value.
Definition: json_value.h:147
Json::UInt UInt
Definition: json_value.h:154
Value get(UInt index, const Value &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Definition: json_value.cpp:841
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:891
bool isMember(const char *key) const
Return true if the object has a member named key.
Definition: json_value.cpp:943
A generic endpoint for log messages.
Definition: Journal.h:59
Stream error() const
Definition: Journal.h:335
Stream debug() const
Definition: Journal.h:317
Stream info() const
Definition: Journal.h:323
Stream trace() const
Severity stream access functions.
Definition: Journal.h:311
Stream warn() const
Definition: Journal.h:329
A metric for measuring an integral value.
Definition: Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition: Gauge.h:68
A reference to a handler for performing polled collection.
Definition: Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition: Book.h:34
Issue in
Definition: Book.h:36
Issue out
Definition: Book.h:37
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition: ClusterNode.h:45
std::uint32_t getLoadFee() const
Definition: ClusterNode.h:51
NetClock::time_point getReportTime() const
Definition: ClusterNode.h:57
PublicKey const & identity() const
Definition: ClusterNode.h:63
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:48
std::string SERVER_DOMAIN
Definition: Config.h:286
std::size_t NODE_SIZE
Definition: Config.h:220
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition: Config.h:167
int RELAY_UNTRUSTED_VALIDATIONS
Definition: Config.h:176
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition: InfoSub.h:54
AccountID account
Definition: Issue.h:39
Currency currency
Definition: Issue.h:38
A pool of threads to perform work.
Definition: JobQueue.h:56
Json::Value getJson(int c=0)
Definition: JobQueue.cpp:212
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
Definition: LedgerMaster.h:266
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:80
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
Definition: LoadFeeTrack.h:46
std::uint32_t getClusterFee() const
Definition: LoadFeeTrack.h:83
std::uint32_t getLocalFee() const
Definition: LoadFeeTrack.h:76
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:90
std::uint32_t getRemoteFee() const
Definition: LoadFeeTrack.h:69
std::uint32_t getLoadFactor() const
Definition: LoadFeeTrack.h:96
Manages load sources.
Definition: LoadManager.h:46
void resetDeadlockDetector()
Reset the deadlock detection timer.
Definition: LoadManager.cpp:63
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
Definition: NetworkOPs.cpp:141
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
Definition: NetworkOPs.cpp:151
std::chrono::steady_clock::time_point start_
Definition: NetworkOPs.cpp:153
static std::array< Json::StaticString const, 5 > const states_
Definition: NetworkOPs.cpp:157
std::chrono::steady_clock::time_point const processStart_
Definition: NetworkOPs.cpp:155
Transaction with input flags and results to be applied in batches.
Definition: NetworkOPs.cpp:92
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
Definition: NetworkOPs.cpp:101
std::shared_ptr< Transaction > const transaction
Definition: NetworkOPs.cpp:94
boost::asio::steady_timer accountHistoryTxTimer_
Definition: NetworkOPs.cpp:731
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
Definition: NetworkOPs.cpp:863
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::vector< TransactionStatus > mTransactions
Definition: NetworkOPs.cpp:775
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Definition: NetworkOPs.cpp:721
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
Definition: NetworkOPs.cpp:733
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
Definition: NetworkOPs.cpp:881
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
Definition: NetworkOPs.cpp:729
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
Definition: NetworkOPs.cpp:118
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
Definition: NetworkOPs.cpp:717
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
Definition: NetworkOPs.cpp:262
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Definition: NetworkOPs.cpp:744
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, const bool bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
Definition: NetworkOPs.cpp:730
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
Definition: NetworkOPs.cpp:124
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:224
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
Definition: NetworkOPs.cpp:724
beast::Journal m_journal
Definition: NetworkOPs.cpp:715
SubInfoMapType mSubAccount
Definition: NetworkOPs.cpp:739
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
Definition: NetworkOPs.cpp:779
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
Definition: NetworkOPs.cpp:996
bool unsubValidations(std::uint64_t uListener) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
Definition: NetworkOPs.cpp:728
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
Definition: NetworkOPs.cpp:934
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
Definition: NetworkOPs.cpp:759
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
Definition: NetworkOPs.cpp:769
std::atomic< bool > unlBlocked_
Definition: NetworkOPs.cpp:726
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
Definition: NetworkOPs.cpp:719
std::atomic< bool > needNetworkLedger_
Definition: NetworkOPs.cpp:723
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
Definition: NetworkOPs.cpp:777
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
Definition: NetworkOPs.cpp:893
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void endConsensus() override
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
Definition: NetworkOPs.cpp:737
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
Definition: NetworkOPs.cpp:924
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
Definition: NetworkOPs.cpp:761
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
Definition: NetworkOPs.cpp:875
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
Definition: NetworkOPs.cpp:772
void stop() override
Definition: NetworkOPs.cpp:564
void clearNeedNetworkLedger() override
Definition: NetworkOPs.cpp:887
DispatchState mDispatchState
Definition: NetworkOPs.cpp:774
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
Definition: NetworkOPs.cpp:740
bool checkLastClosedLedger(const Overlay::PeerSequence &, uint256 &networkClosed)
void reportFeeChange() override
void setMode(OperatingMode om, const char *reason) override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
Definition: NetworkOPs.cpp:899
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
Definition: NetworkOPs.cpp:780
Application & app_
Definition: NetworkOPs.cpp:714
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
Definition: NetworkOPs.cpp:735
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
Definition: NetworkOPs.cpp:742
std::atomic< bool > amendmentWarned_
Definition: NetworkOPs.cpp:725
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
Definition: NetworkOPs.cpp:905
bool beginConsensus(uint256 const &networkClosed) override
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition: NetworkOPs.h:87
void getCountsJson(Json::Value &obj)
Definition: Database.cpp:266
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:50
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:56
BookListeners::pointer getBookListeners(Book const &)
BookListeners::pointer makeBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, const AcceptedLedgerTx &alTx, MultiApiJson const &jvObj)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
Definition: RCLConsensus.h:51
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
Definition: RCLConsensus.h:443
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Definition: RCLConsensus.h:456
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
Represents a set of transactions in RCLConsensus.
Definition: RCLCxTx.h:66
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition: RFC1751.cpp:498
A view into a ledger.
Definition: ReadView.h:55
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition: STAmount.cpp:475
std::string getText() const override
Definition: STAmount.cpp:515
Issue const & issue() const
Definition: STAmount.h:487
std::optional< T > get(std::string const &name) const
Definition: BasicConfig.h:141
std::size_t size() const noexcept
Definition: Serializer.h:72
void const * data() const noexcept
Definition: Serializer.h:78
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition: TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition: TxQ.cpp:1777
static time_point now()
Definition: UptimeClock.cpp:63
Validator keys and manifest as set in configuration file.
Definition: ValidatorKeys.h:37
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition: XRPAmount.h:262
Json::Value jsonClipped() const
Definition: XRPAmount.h:218
iterator begin()
Definition: base_uint.h:135
static constexpr std::size_t size()
Definition: base_uint.h:525
bool isZero() const
Definition: base_uint.h:539
bool isNonZero() const
Definition: base_uint.h:544
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition: json_value.h:42
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
int Int
Definition: json_forwards.h:26
unsigned int UInt
Definition: json_forwards.h:27
void rngfill(void *buffer, std::size_t bytes, Generator &g)
Definition: rngfill.h:33
std::string const & getVersionString()
Server version.
Definition: BuildInfo.cpp:65
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition: BookChanges.h:47
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(PreclaimContext const &ctx, AccountID const &src)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition: Indexes.cpp:160
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition: Indexes.cpp:356
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition: Indexes.cpp:250
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:30
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:106
STAmount divide(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:87
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition: STTx.cpp:604
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition: View.cpp:442
@ fhZERO_IF_FROZEN
Definition: View.h:80
@ fhIGNORE_FREEZE
Definition: View.h:80
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition: View.cpp:136
std::uint64_t getQuality(uint256 const &uBase)
Definition: Indexes.cpp:125
@ INCLUDED
Definition: Transaction.h:49
@ OBSOLETE
Definition: Transaction.h:54
@ INVALID
Definition: Transaction.h:48
error_code_i
Definition: ErrorCodes.h:40
@ rpcSUCCESS
Definition: ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition: ErrorCodes.h:84
@ rpcINTERNAL
Definition: ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
Definition: SecretKey.cpp:351
auto constexpr muldiv_max
Definition: mulDiv.h:29
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition: LocalTxs.cpp:197
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition: View.cpp:650
STAmount amountFromQuality(std::uint64_t rate)
Definition: STAmount.cpp:822
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition: ErrorCodes.h:167
@ warnRPC_UNSUPPORTED_MAJORITY
Definition: ErrorCodes.h:165
@ warnRPC_AMENDMENT_BLOCKED
Definition: ErrorCodes.h:166
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition: NetworkOPs.h:66
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:47
bool isTesSuccess(TER x)
Definition: TER.h:656
bool isTerRetry(TER x)
Definition: TER.h:650
AccountID calcAccountID(PublicKey const &pk)
Definition: AccountID.cpp:160
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Definition: csprng.cpp:99
Json::Value rpcError(int iError)
Definition: RPCErr.cpp:29
@ tefPAST_SEQ
Definition: TER.h:175
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
Definition: NetworkOPs.cpp:844
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
bool isTemMalformed(TER x)
Definition: TER.h:638
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition: View.cpp:147
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition: ApiVersion.h:102
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:75
@ tesSUCCESS
Definition: TER.h:242
uint256 getQualityNext(uint256 const &uBase)
Definition: Indexes.cpp:117
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition: View.cpp:308
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition: ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition: chrono.h:93
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:629
FeeSetup setup_FeeVote(Section const &section)
Definition: Config.cpp:1090
Number root(Number f, unsigned d)
Definition: Number.cpp:630
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Definition: mulDiv.cpp:27
ApplyFlags
Definition: ApplyView.h:30
@ tapFAIL_HARD
Definition: ApplyView.h:35
@ tapUNLIMITED
Definition: ApplyView.h:42
@ tapNONE
Definition: ApplyView.h:31
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:37
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition: Seed.cpp:69
@ terQUEUED
Definition: TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition: TER.cpp:236
@ jtNETOP_CLUSTER
Definition: Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition: Job.h:47
@ jtTRANSACTION
Definition: Job.h:62
@ jtTXN_PROC
Definition: Job.h:82
@ jtCLIENT_CONSENSUS
Definition: Job.h:48
@ jtBATCH
Definition: Job.h:65
@ jtCLIENT_ACCT_HIST
Definition: Job.h:49
uint256 getBookBase(Book const &book)
Definition: Indexes.cpp:98
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition: View.cpp:173
static std::uint32_t trunc32(std::uint64_t v)
@ temBAD_SIGNATURE
Definition: TER.h:105
static auto const genesisAccountId
Definition: NetworkOPs.cpp:857
STL namespace.
T ref(T... args)
T reset(T... args)
T set_intersection(T... args)
std::string serialized
The manifest in serialized form.
Definition: Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition: Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition: Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition: Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Definition: Manifest.h:86
Server fees published on server subscription.
Definition: NetworkOPs.cpp:201
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
Definition: NetworkOPs.cpp:220
bool operator==(ServerFeeSummary const &b) const
Definition: NetworkOPs.cpp:212
beast::insight::Gauge full_transitions
Definition: NetworkOPs.cpp:831
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:786
beast::insight::Hook hook
Definition: NetworkOPs.cpp:820
beast::insight::Gauge connected_duration
Definition: NetworkOPs.cpp:822
beast::insight::Gauge tracking_duration
Definition: NetworkOPs.cpp:824
beast::insight::Gauge connected_transitions
Definition: NetworkOPs.cpp:828
beast::insight::Gauge disconnected_transitions
Definition: NetworkOPs.cpp:827
beast::insight::Gauge syncing_duration
Definition: NetworkOPs.cpp:823
beast::insight::Gauge tracking_transitions
Definition: NetworkOPs.cpp:830
beast::insight::Gauge full_duration
Definition: NetworkOPs.cpp:825
beast::insight::Gauge disconnected_duration
Definition: NetworkOPs.cpp:821
beast::insight::Gauge syncing_transitions
Definition: NetworkOPs.cpp:829
SubAccountHistoryIndex(AccountID const &accountId)
Definition: NetworkOPs.cpp:678
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:697
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:692
Represents a transfer rate.
Definition: Rate.h:38
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition: TxQ.h:162
void set(const char *key, auto const &v)
Definition: MultiApiJson.h:83
IsMemberResult isMember(const char *key) const
Definition: MultiApiJson.h:94
Select all peers (except optional excluded) that are in our cluster.
Definition: predicates.h:137
Sends a message to all peers.
Definition: predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)