rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/misc/AmendmentTable.h>
32#include <xrpld/app/misc/DeliverMax.h>
33#include <xrpld/app/misc/HashRouter.h>
34#include <xrpld/app/misc/LoadFeeTrack.h>
35#include <xrpld/app/misc/NetworkOPs.h>
36#include <xrpld/app/misc/Transaction.h>
37#include <xrpld/app/misc/TxQ.h>
38#include <xrpld/app/misc/ValidatorKeys.h>
39#include <xrpld/app/misc/ValidatorList.h>
40#include <xrpld/app/misc/detail/AccountTxPaging.h>
41#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
42#include <xrpld/app/tx/apply.h>
43#include <xrpld/consensus/Consensus.h>
44#include <xrpld/consensus/ConsensusParms.h>
45#include <xrpld/overlay/Cluster.h>
46#include <xrpld/overlay/Overlay.h>
47#include <xrpld/overlay/predicates.h>
48#include <xrpld/perflog/PerfLog.h>
49#include <xrpld/rpc/BookChanges.h>
50#include <xrpld/rpc/DeliveredAmount.h>
51#include <xrpld/rpc/MPTokenIssuanceID.h>
52#include <xrpld/rpc/ServerHandler.h>
53#include <xrpl/basics/CanProcess.h>
54#include <xrpl/basics/UptimeClock.h>
55#include <xrpl/basics/mulDiv.h>
56#include <xrpl/basics/safe_cast.h>
57#include <xrpl/beast/rfc2616.h>
58#include <xrpl/beast/utility/rngfill.h>
59#include <xrpl/crypto/RFC1751.h>
60#include <xrpl/crypto/csprng.h>
61#include <xrpl/json/to_string.h>
62#include <xrpl/protocol/BuildInfo.h>
63#include <xrpl/protocol/Feature.h>
64#include <xrpl/protocol/MultiApiJson.h>
65#include <xrpl/protocol/RPCErr.h>
66#include <xrpl/protocol/STParsedJSON.h>
67#include <xrpl/protocol/jss.h>
68#include <xrpl/resource/Fees.h>
69#include <xrpl/resource/ResourceManager.h>
70#include <boost/asio/ip/host_name.hpp>
71#include <boost/asio/steady_timer.hpp>
72
73#include <algorithm>
74#include <exception>
75#include <mutex>
76#include <optional>
77#include <set>
78#include <string>
79#include <tuple>
80#include <unordered_map>
81#include <utility>
82
83namespace ripple {
84
85class NetworkOPsImp final : public NetworkOPs
86{
92 {
93 public:
95 bool const admin;
96 bool const local;
98 bool applied = false;
100
103 bool a,
104 bool l,
105 FailHard f)
106 : transaction(t), admin(a), local(l), failType(f)
107 {
108 XRPL_ASSERT(
110 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
111 "valid inputs");
112 }
113 };
114
118 enum class DispatchState : unsigned char {
119 none,
120 scheduled,
121 running,
122 };
123
125
141 {
142 struct Counters
143 {
144 explicit Counters() = default;
145
148 };
149
153 std::chrono::steady_clock::time_point start_ =
155 std::chrono::steady_clock::time_point const processStart_ = start_;
158
159 public:
161 {
163 .transitions = 1;
164 }
165
172 void
174
180 void
181 json(Json::Value& obj) const;
182
184 {
186 decltype(mode_) mode;
187 decltype(start_) start;
189 };
190
193 {
196 }
197 };
198
201 {
202 ServerFeeSummary() = default;
203
205 XRPAmount fee,
206 TxQ::Metrics&& escalationMetrics,
207 LoadFeeTrack const& loadFeeTrack);
208 bool
209 operator!=(ServerFeeSummary const& b) const;
210
211 bool
213 {
214 return !(*this != b);
215 }
216
221 };
222
223public:
225 Application& app,
227 bool standalone,
228 std::size_t minPeerCount,
229 bool start_valid,
230 JobQueue& job_queue,
232 ValidatorKeys const& validatorKeys,
233 boost::asio::io_service& io_svc,
234 beast::Journal journal,
235 beast::insight::Collector::ptr const& collector)
236 : app_(app)
237 , m_journal(journal)
240 , heartbeatTimer_(io_svc)
241 , clusterTimer_(io_svc)
242 , accountHistoryTxTimer_(io_svc)
243 , mConsensus(
244 app,
246 setup_FeeVote(app_.config().section("voting")),
247 app_.logs().journal("FeeVote")),
249 *m_localTX,
250 app.getInboundTransactions(),
251 beast::get_abstract_clock<std::chrono::steady_clock>(),
252 validatorKeys,
253 app_.logs().journal("LedgerConsensus"))
255 , m_job_queue(job_queue)
256 , m_standalone(standalone)
257 , minPeerCount_(start_valid ? 0 : minPeerCount)
258 , m_stats(std::bind(&NetworkOPsImp::collect_metrics, this), collector)
259 {
260 }
261
262 ~NetworkOPsImp() override
263 {
264 // This clear() is necessary to ensure the shared_ptrs in this map get
265 // destroyed NOW because the objects in this map invoke methods on this
266 // class when they are destroyed
268 }
269
270public:
272 getOperatingMode() const override;
273
275 strOperatingMode(OperatingMode const mode, bool const admin) const override;
276
278 strOperatingMode(bool const admin = false) const override;
279
280 //
281 // Transaction operations.
282 //
283
284 // Must complete immediately.
285 void
287
288 void
290 std::shared_ptr<Transaction>& transaction,
291 bool bUnlimited,
292 bool bLocal,
293 FailHard failType) override;
294
303 void
306 bool bUnlimited,
307 FailHard failType);
308
318 void
321 bool bUnlimited,
322 FailHard failtype);
323
327 void
329
335 void
337
338 //
339 // Owner functions.
340 //
341
345 AccountID const& account) override;
346
347 //
348 // Book functions.
349 //
350
351 void
354 Book const&,
355 AccountID const& uTakerID,
356 const bool bProof,
357 unsigned int iLimit,
358 Json::Value const& jvMarker,
359 Json::Value& jvResult) override;
360
361 // Ledger proposal/close functions.
362 bool
364
365 bool
368 std::string const& source) override;
369
370 void
371 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
372
373 // Network state machine.
374
375 // Used for the "jump" case.
376private:
377 void
379 bool
381
382public:
383 bool
384 beginConsensus(uint256 const& networkClosed) override;
385 void
386 endConsensus() override;
387 void
388 setStandAlone() override;
389
393 void
394 setStateTimer() override;
395
396 void
397 setNeedNetworkLedger() override;
398 void
399 clearNeedNetworkLedger() override;
400 bool
401 isNeedNetworkLedger() override;
402 bool
403 isFull() override;
404
405 void
406 setMode(OperatingMode om, const char* reason) override;
407
408 bool
409 isBlocked() override;
410 bool
411 isAmendmentBlocked() override;
412 void
413 setAmendmentBlocked() override;
414 bool
415 isAmendmentWarned() override;
416 void
417 setAmendmentWarned() override;
418 void
419 clearAmendmentWarned() override;
420 bool
421 isUNLBlocked() override;
422 void
423 setUNLBlocked() override;
424 void
425 clearUNLBlocked() override;
426 void
427 consensusViewChange() override;
428
430 getConsensusInfo() override;
432 getServerInfo(bool human, bool admin, bool counters) override;
433 void
434 clearLedgerFetch() override;
436 getLedgerFetchInfo() override;
439 std::optional<std::chrono::milliseconds> consensusDelay) override;
440 void
441 reportFeeChange() override;
442 void
444
445 void
446 updateLocalTx(ReadView const& view) override;
448 getLocalTxCount() override;
449
450 //
451 // Monitoring: publisher side.
452 //
453 void
454 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
455 void
458 std::shared_ptr<STTx const> const& transaction,
459 TER result) override;
460 void
461 pubValidation(std::shared_ptr<STValidation> const& val) override;
462
463 //--------------------------------------------------------------------------
464 //
465 // InfoSub::Source.
466 //
467 void
469 InfoSub::ref ispListener,
470 hash_set<AccountID> const& vnaAccountIDs,
471 bool rt) override;
472 void
474 InfoSub::ref ispListener,
475 hash_set<AccountID> const& vnaAccountIDs,
476 bool rt) override;
477
478 // Just remove the subscription from the tracking
479 // not from the InfoSub. Needed for InfoSub destruction
480 void
482 std::uint64_t seq,
483 hash_set<AccountID> const& vnaAccountIDs,
484 bool rt) override;
485
487 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
488 override;
489 void
491 InfoSub::ref ispListener,
492 AccountID const& account,
493 bool historyOnly) override;
494
495 void
497 std::uint64_t seq,
498 AccountID const& account,
499 bool historyOnly) override;
500
501 bool
502 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
503 bool
504 unsubLedger(std::uint64_t uListener) override;
505
506 bool
507 subBookChanges(InfoSub::ref ispListener) override;
508 bool
509 unsubBookChanges(std::uint64_t uListener) override;
510
511 bool
512 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
513 override;
514 bool
515 unsubServer(std::uint64_t uListener) override;
516
517 bool
518 subBook(InfoSub::ref ispListener, Book const&) override;
519 bool
520 unsubBook(std::uint64_t uListener, Book const&) override;
521
522 bool
523 subManifests(InfoSub::ref ispListener) override;
524 bool
525 unsubManifests(std::uint64_t uListener) override;
526 void
527 pubManifest(Manifest const&) override;
528
529 bool
530 subTransactions(InfoSub::ref ispListener) override;
531 bool
532 unsubTransactions(std::uint64_t uListener) override;
533
534 bool
535 subRTTransactions(InfoSub::ref ispListener) override;
536 bool
537 unsubRTTransactions(std::uint64_t uListener) override;
538
539 bool
540 subValidations(InfoSub::ref ispListener) override;
541 bool
542 unsubValidations(std::uint64_t uListener) override;
543
544 bool
545 subPeerStatus(InfoSub::ref ispListener) override;
546 bool
547 unsubPeerStatus(std::uint64_t uListener) override;
548 void
549 pubPeerStatus(std::function<Json::Value(void)> const&) override;
550
551 bool
552 subConsensus(InfoSub::ref ispListener) override;
553 bool
554 unsubConsensus(std::uint64_t uListener) override;
555
557 findRpcSub(std::string const& strUrl) override;
559 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
560 bool
561 tryRemoveRpcSub(std::string const& strUrl) override;
562
563 void
564 stop() override
565 {
566 {
567 boost::system::error_code ec;
568 heartbeatTimer_.cancel(ec);
569 if (ec)
570 {
571 JLOG(m_journal.error())
572 << "NetworkOPs: heartbeatTimer cancel error: "
573 << ec.message();
574 }
575
576 ec.clear();
577 clusterTimer_.cancel(ec);
578 if (ec)
579 {
580 JLOG(m_journal.error())
581 << "NetworkOPs: clusterTimer cancel error: "
582 << ec.message();
583 }
584
585 ec.clear();
586 accountHistoryTxTimer_.cancel(ec);
587 if (ec)
588 {
589 JLOG(m_journal.error())
590 << "NetworkOPs: accountHistoryTxTimer cancel error: "
591 << ec.message();
592 }
593 }
594 // Make sure that any waitHandlers pending in our timers are done.
595 using namespace std::chrono_literals;
596 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
597 }
598
599 void
600 stateAccounting(Json::Value& obj) override;
601
602private:
603 void
604 setTimer(
605 boost::asio::steady_timer& timer,
606 std::chrono::milliseconds const& expiry_time,
607 std::function<void()> onExpire,
608 std::function<void()> onError);
609 void
611 void
613 void
615 void
617
619 transJson(
620 std::shared_ptr<STTx const> const& transaction,
621 TER result,
622 bool validated,
625
626 void
629 AcceptedLedgerTx const& transaction,
630 bool last);
631
632 void
635 AcceptedLedgerTx const& transaction,
636 bool last);
637
638 void
641 std::shared_ptr<STTx const> const& transaction,
642 TER result);
643
644 void
645 pubServer();
646 void
648
650 getHostId(bool forAdmin);
651
652private:
656
657 /*
658 * With a validated ledger to separate history and future, the node
659 * streams historical txns with negative indexes starting from -1,
660 * and streams future txns starting from index 0.
661 * The SubAccountHistoryIndex struct maintains these indexes.
662 * It also has a flag stopHistorical_ for stopping streaming
663 * the historical txns.
664 */
666 {
668 // forward
670 // separate backward and forward
672 // history, backward
677
679 : accountId_(accountId)
680 , forwardTxIndex_(0)
683 , historyTxIndex_(-1)
684 , haveHistorical_(false)
685 , stopHistorical_(false)
686 {
687 }
688 };
690 {
693 };
695 {
698 };
701
705 void
709 void
711 void
713
716
718
720
722
727
729 boost::asio::steady_timer heartbeatTimer_;
730 boost::asio::steady_timer clusterTimer_;
731 boost::asio::steady_timer accountHistoryTxTimer_;
732
734
736
738
741
743
745
746 enum SubTypes {
747 sLedger, // Accepted ledgers.
748 sManifests, // Received validator manifests.
749 sServer, // When server changes connectivity state.
750 sTransactions, // All accepted transactions.
751 sRTTransactions, // All proposed and accepted transactions.
752 sValidations, // Received validations.
753 sPeerStatus, // Peer status changes.
754 sConsensusPhase, // Consensus phase
755 sBookChanges, // Per-ledger order book changes
756 sLastEntry // Any new entry must be ADDED ABOVE this one
757 };
758
760
762
764
765 // Whether we are in standalone mode.
766 bool const m_standalone;
767
768 // The number of nodes that we need to consider ourselves connected.
770
771 // Transaction batching.
776
778
781
782private:
783 struct Stats
784 {
785 template <class Handler>
787 Handler const& handler,
788 beast::insight::Collector::ptr const& collector)
789 : hook(collector->make_hook(handler))
790 , disconnected_duration(collector->make_gauge(
791 "State_Accounting",
792 "Disconnected_duration"))
793 , connected_duration(collector->make_gauge(
794 "State_Accounting",
795 "Connected_duration"))
797 collector->make_gauge("State_Accounting", "Syncing_duration"))
798 , tracking_duration(collector->make_gauge(
799 "State_Accounting",
800 "Tracking_duration"))
802 collector->make_gauge("State_Accounting", "Full_duration"))
803 , disconnected_transitions(collector->make_gauge(
804 "State_Accounting",
805 "Disconnected_transitions"))
806 , connected_transitions(collector->make_gauge(
807 "State_Accounting",
808 "Connected_transitions"))
809 , syncing_transitions(collector->make_gauge(
810 "State_Accounting",
811 "Syncing_transitions"))
812 , tracking_transitions(collector->make_gauge(
813 "State_Accounting",
814 "Tracking_transitions"))
816 collector->make_gauge("State_Accounting", "Full_transitions"))
817 {
818 }
819
826
832 };
833
834 std::mutex m_statsMutex; // Mutex to lock m_stats
836
837private:
838 void
840};
841
842//------------------------------------------------------------------------------
843
845 {"disconnected", "connected", "syncing", "tracking", "full"}};
846
848
856
857static auto const genesisAccountId = calcAccountID(
859 .first);
860
861//------------------------------------------------------------------------------
862inline OperatingMode
864{
865 return mMode;
866}
867
868inline std::string
869NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
870{
871 return strOperatingMode(mMode, admin);
872}
873
874inline void
876{
877 setMode(OperatingMode::FULL, "setStandAlone");
878}
879
880inline void
882{
883 needNetworkLedger_ = true;
884}
885
886inline void
888{
889 needNetworkLedger_ = false;
890}
891
892inline bool
894{
895 return needNetworkLedger_;
896}
897
898inline bool
900{
902}
903
906{
907 static std::string const hostname = boost::asio::ip::host_name();
908
909 if (forAdmin)
910 return hostname;
911
912 // For non-admin uses hash the node public key into a
913 // single RFC1751 word:
914 static std::string const shroudedHostId = [this]() {
915 auto const& id = app_.nodeIdentity();
916
917 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
918 }();
919
920 return shroudedHostId;
921}
922
923void
925{
927
928 // Only do this work if a cluster is configured
929 if (app_.cluster().size() != 0)
931}
932
933void
935 boost::asio::steady_timer& timer,
936 const std::chrono::milliseconds& expiry_time,
937 std::function<void()> onExpire,
938 std::function<void()> onError)
939{
940 // Only start the timer if waitHandlerCounter_ is not yet joined.
941 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
942 [this, onExpire, onError](boost::system::error_code const& e) {
943 if ((e.value() == boost::system::errc::success) &&
944 (!m_job_queue.isStopped()))
945 {
946 onExpire();
947 }
948 // Recover as best we can if an unexpected error occurs.
949 if (e.value() != boost::system::errc::success &&
950 e.value() != boost::asio::error::operation_aborted)
951 {
952 // Try again later and hope for the best.
953 JLOG(m_journal.error())
954 << "Timer got error '" << e.message()
955 << "'. Restarting timer.";
956 onError();
957 }
958 }))
959 {
960 timer.expires_from_now(expiry_time);
961 timer.async_wait(std::move(*optionalCountedHandler));
962 }
963}
964
965void
966NetworkOPsImp::setHeartbeatTimer()
967{
968 setTimer(
969 heartbeatTimer_,
970 mConsensus.parms().ledgerGRANULARITY,
971 [this]() {
972 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
973 processHeartbeatTimer();
974 });
975 },
976 [this]() { setHeartbeatTimer(); });
977}
978
979void
980NetworkOPsImp::setClusterTimer()
981{
982 using namespace std::chrono_literals;
983
984 setTimer(
985 clusterTimer_,
986 10s,
987 [this]() {
988 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
989 processClusterTimer();
990 });
991 },
992 [this]() { setClusterTimer(); });
993}
994
995void
996NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
997{
998 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
999 << toBase58(subInfo.index_->accountId_);
1000 using namespace std::chrono_literals;
1001 setTimer(
1002 accountHistoryTxTimer_,
1003 4s,
1004 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1005 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1006}
1007
1008void
1009NetworkOPsImp::processHeartbeatTimer()
1010{
1011 {
1012 std::unique_lock lock{app_.getMasterMutex()};
1013
1014 // VFALCO NOTE This is for diagnosing a crash on exit
1015 LoadManager& mgr(app_.getLoadManager());
1017
1018 std::size_t const numPeers = app_.overlay().size();
1019
1020 // do we have sufficient peers? If not, we are disconnected.
1021 if (numPeers < minPeerCount_)
1022 {
1023 if (mMode != OperatingMode::DISCONNECTED)
1024 {
1025 setMode(
1026 OperatingMode::DISCONNECTED,
1027 "Heartbeat: insufficient peers");
1028 JLOG(m_journal.warn())
1029 << "Node count (" << numPeers << ") has fallen "
1030 << "below required minimum (" << minPeerCount_ << ").";
1031 }
1032
1033 // MasterMutex lock need not be held to call setHeartbeatTimer()
1034 lock.unlock();
1035 // We do not call mConsensus.timerEntry until there are enough
1036 // peers providing meaningful inputs to consensus
1037 setHeartbeatTimer();
1038 return;
1039 }
1040
1041 if (mMode == OperatingMode::DISCONNECTED)
1042 {
1043 setMode(OperatingMode::CONNECTED, "Heartbeat: sufficient peers");
1044 JLOG(m_journal.info())
1045 << "Node count (" << numPeers << ") is sufficient.";
1046 }
1047
1048 // Check if the last validated ledger forces a change between these
1049 // states.
1050 if (mMode == OperatingMode::SYNCING)
1051 setMode(OperatingMode::SYNCING, "Heartbeat: check syncing");
1052 else if (mMode == OperatingMode::CONNECTED)
1053 setMode(OperatingMode::CONNECTED, "Heartbeat: check connected");
1054 }
1055
1056 mConsensus.timerEntry(app_.timeKeeper().closeTime());
1057
1058 const ConsensusPhase currPhase = mConsensus.phase();
1059 if (mLastConsensusPhase != currPhase)
1060 {
1061 reportConsensusStateChange(currPhase);
1062 mLastConsensusPhase = currPhase;
1063 }
1064
1065 setHeartbeatTimer();
1066}
1067
1068void
1069NetworkOPsImp::processClusterTimer()
1070{
1071 if (app_.cluster().size() == 0)
1072 return;
1073
1074 using namespace std::chrono_literals;
1075
1076 bool const update = app_.cluster().update(
1077 app_.nodeIdentity().first,
1078 "",
1079 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1080 ? app_.getFeeTrack().getLocalFee()
1081 : 0,
1082 app_.timeKeeper().now());
1083
1084 if (!update)
1085 {
1086 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1087 setClusterTimer();
1088 return;
1089 }
1090
1091 protocol::TMCluster cluster;
1092 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1093 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1094 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1095 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1096 n.set_nodeload(node.getLoadFee());
1097 if (!node.name().empty())
1098 n.set_nodename(node.name());
1099 });
1100
1101 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1102 for (auto& item : gossip.items)
1103 {
1104 protocol::TMLoadSource& node = *cluster.add_loadsources();
1105 node.set_name(to_string(item.address));
1106 node.set_cost(item.balance);
1107 }
1108 app_.overlay().foreach(send_if(
1109 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1110 peer_in_cluster()));
1111 setClusterTimer();
1112}
1113
1114//------------------------------------------------------------------------------
1115
1117NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1118 const
1119{
1120 if (mode == OperatingMode::FULL && admin)
1121 {
1122 auto const consensusMode = mConsensus.mode();
1123 if (consensusMode != ConsensusMode::wrongLedger)
1124 {
1125 if (consensusMode == ConsensusMode::proposing)
1126 return "proposing";
1127
1128 if (mConsensus.validating())
1129 return "validating";
1130 }
1131 }
1132
1133 return states_[static_cast<std::size_t>(mode)];
1134}
1135
1136void
1137NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1138{
1139 if (isNeedNetworkLedger())
1140 {
1141 // Nothing we can do if we've never been in sync
1142 return;
1143 }
1144
1145 // this is an asynchronous interface
1146 auto const trans = sterilize(*iTrans);
1147
1148 auto const txid = trans->getTransactionID();
1149 auto const flags = app_.getHashRouter().getFlags(txid);
1150
1151 if ((flags & SF_BAD) != 0)
1152 {
1153 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1154 return;
1155 }
1156
1157 try
1158 {
1159 auto const [validity, reason] = checkValidity(
1160 app_.getHashRouter(),
1161 *trans,
1162 m_ledgerMaster.getValidatedRules(),
1163 app_.config());
1164
1165 if (validity != Validity::Valid)
1166 {
1167 JLOG(m_journal.warn())
1168 << "Submitted transaction invalid: " << reason;
1169 return;
1170 }
1171 }
1172 catch (std::exception const& ex)
1173 {
1174 JLOG(m_journal.warn())
1175 << "Exception checking transaction " << txid << ": " << ex.what();
1176
1177 return;
1178 }
1179
1180 std::string reason;
1181
1182 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1183
1184 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1185 auto t = tx;
1186 processTransaction(t, false, false, FailHard::no);
1187 });
1188}
1189
1190void
1191NetworkOPsImp::processTransaction(
1192 std::shared_ptr<Transaction>& transaction,
1193 bool bUnlimited,
1194 bool bLocal,
1195 FailHard failType)
1196{
1197 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1198 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1199
1200 if ((newFlags & SF_BAD) != 0)
1201 {
1202 // cached bad
1203 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1204 transaction->setStatus(INVALID);
1205 transaction->setResult(temBAD_SIGNATURE);
1206 return;
1207 }
1208
1209 // NOTE eahennis - I think this check is redundant,
1210 // but I'm not 100% sure yet.
1211 // If so, only cost is looking up HashRouter flags.
1212 auto const view = m_ledgerMaster.getCurrentLedger();
1213 auto const [validity, reason] = checkValidity(
1214 app_.getHashRouter(),
1215 *transaction->getSTransaction(),
1216 view->rules(),
1217 app_.config());
1218 XRPL_ASSERT(
1219 validity == Validity::Valid,
1220 "ripple::NetworkOPsImp::processTransaction : valid validity");
1221
1222 // Not concerned with local checks at this point.
1223 if (validity == Validity::SigBad)
1224 {
1225 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1226 transaction->setStatus(INVALID);
1227 transaction->setResult(temBAD_SIGNATURE);
1228 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1229 return;
1230 }
1231
1232 // canonicalize can change our pointer
1233 app_.getMasterTransaction().canonicalize(&transaction);
1234
1235 if (bLocal)
1236 doTransactionSync(transaction, bUnlimited, failType);
1237 else
1238 doTransactionAsync(transaction, bUnlimited, failType);
1239}
1240
1241void
1242NetworkOPsImp::doTransactionAsync(
1243 std::shared_ptr<Transaction> transaction,
1244 bool bUnlimited,
1245 FailHard failType)
1246{
1247 std::lock_guard lock(mMutex);
1248
1249 if (transaction->getApplying())
1250 return;
1251
1252 mTransactions.push_back(
1253 TransactionStatus(transaction, bUnlimited, false, failType));
1254 transaction->setApplying();
1255
1256 if (mDispatchState == DispatchState::none)
1257 {
1258 if (m_job_queue.addJob(
1259 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1260 {
1261 mDispatchState = DispatchState::scheduled;
1262 }
1263 }
1264}
1265
1266void
1267NetworkOPsImp::doTransactionSync(
1268 std::shared_ptr<Transaction> transaction,
1269 bool bUnlimited,
1270 FailHard failType)
1271{
1272 std::unique_lock<std::mutex> lock(mMutex);
1273
1274 if (!transaction->getApplying())
1275 {
1276 mTransactions.push_back(
1277 TransactionStatus(transaction, bUnlimited, true, failType));
1278 transaction->setApplying();
1279 }
1280
1281 do
1282 {
1283 if (mDispatchState == DispatchState::running)
1284 {
1285 // A batch processing job is already running, so wait.
1286 mCond.wait(lock);
1287 }
1288 else
1289 {
1290 apply(lock);
1291
1292 if (mTransactions.size())
1293 {
1294 // More transactions need to be applied, but by another job.
1295 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1296 transactionBatch();
1297 }))
1298 {
1299 mDispatchState = DispatchState::scheduled;
1300 }
1301 }
1302 }
1303 } while (transaction->getApplying());
1304}
1305
1306void
1307NetworkOPsImp::transactionBatch()
1308{
1309 std::unique_lock<std::mutex> lock(mMutex);
1310
1311 if (mDispatchState == DispatchState::running)
1312 return;
1313
1314 while (mTransactions.size())
1315 {
1316 apply(lock);
1317 }
1318}
1319
1320void
1321NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1322{
1324 std::vector<TransactionStatus> transactions;
1325 mTransactions.swap(transactions);
1326 XRPL_ASSERT(
1327 !transactions.empty(),
1328 "ripple::NetworkOPsImp::apply : non-empty transactions");
1329 XRPL_ASSERT(
1330 mDispatchState != DispatchState::running,
1331 "ripple::NetworkOPsImp::apply : is not running");
1332
1333 mDispatchState = DispatchState::running;
1334
1335 batchLock.unlock();
1336
1337 {
1338 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1339 bool changed = false;
1340 {
1341 std::unique_lock ledgerLock{
1342 m_ledgerMaster.peekMutex(), std::defer_lock};
1343 std::lock(masterLock, ledgerLock);
1344
1345 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1346 for (TransactionStatus& e : transactions)
1347 {
1348 // we check before adding to the batch
1349 ApplyFlags flags = tapNONE;
1350 if (e.admin)
1351 flags |= tapUNLIMITED;
1352
1353 if (e.failType == FailHard::yes)
1354 flags |= tapFAIL_HARD;
1355
1356 auto const result = app_.getTxQ().apply(
1357 app_, view, e.transaction->getSTransaction(), flags, j);
1358 e.result = result.ter;
1359 e.applied = result.applied;
1360 changed = changed || result.applied;
1361 }
1362 return changed;
1363 });
1364 }
1365 if (changed)
1366 reportFeeChange();
1367
1368 std::optional<LedgerIndex> validatedLedgerIndex;
1369 if (auto const l = m_ledgerMaster.getValidatedLedger())
1370 validatedLedgerIndex = l->info().seq;
1371
1372 auto newOL = app_.openLedger().current();
1373 for (TransactionStatus& e : transactions)
1374 {
1375 e.transaction->clearSubmitResult();
1376
1377 if (e.applied)
1378 {
1379 pubProposedTransaction(
1380 newOL, e.transaction->getSTransaction(), e.result);
1381 e.transaction->setApplied();
1382 }
1383
1384 e.transaction->setResult(e.result);
1385
1386 if (isTemMalformed(e.result))
1387 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1388
1389#ifdef DEBUG
1390 if (e.result != tesSUCCESS)
1391 {
1392 std::string token, human;
1393
1394 if (transResultInfo(e.result, token, human))
1395 {
1396 JLOG(m_journal.info())
1397 << "TransactionResult: " << token << ": " << human;
1398 }
1399 }
1400#endif
1401
1402 bool addLocal = e.local;
1403
1404 if (e.result == tesSUCCESS)
1405 {
1406 JLOG(m_journal.debug())
1407 << "Transaction is now included in open ledger";
1408 e.transaction->setStatus(INCLUDED);
1409
1410 auto const& txCur = e.transaction->getSTransaction();
1411 auto const txNext = m_ledgerMaster.popAcctTransaction(txCur);
1412 if (txNext)
1413 {
1414 std::string reason;
1415 auto const trans = sterilize(*txNext);
1416 auto t = std::make_shared<Transaction>(trans, reason, app_);
1417 submit_held.emplace_back(t, false, false, FailHard::no);
1418 t->setApplying();
1419 }
1420 }
1421 else if (e.result == tefPAST_SEQ)
1422 {
1423 // duplicate or conflict
1424 JLOG(m_journal.info()) << "Transaction is obsolete";
1425 e.transaction->setStatus(OBSOLETE);
1426 }
1427 else if (e.result == terQUEUED)
1428 {
1429 JLOG(m_journal.debug())
1430 << "Transaction is likely to claim a"
1431 << " fee, but is queued until fee drops";
1432
1433 e.transaction->setStatus(HELD);
1434 // Add to held transactions, because it could get
1435 // kicked out of the queue, and this will try to
1436 // put it back.
1437 m_ledgerMaster.addHeldTransaction(e.transaction);
1438 e.transaction->setQueued();
1439 e.transaction->setKept();
1440 }
1441 else if (isTerRetry(e.result))
1442 {
1443 if (e.failType != FailHard::yes)
1444 {
1445 // transaction should be held
1446 JLOG(m_journal.debug())
1447 << "Transaction should be held: " << e.result;
1448 e.transaction->setStatus(HELD);
1449 m_ledgerMaster.addHeldTransaction(e.transaction);
1450 e.transaction->setKept();
1451 }
1452 }
1453 else
1454 {
1455 JLOG(m_journal.debug())
1456 << "Status other than success " << e.result;
1457 e.transaction->setStatus(INVALID);
1458 }
1459
1460 auto const enforceFailHard =
1461 e.failType == FailHard::yes && !isTesSuccess(e.result);
1462
1463 if (addLocal && !enforceFailHard)
1464 {
1465 m_localTX->push_back(
1466 m_ledgerMaster.getCurrentLedgerIndex(),
1467 e.transaction->getSTransaction());
1468 e.transaction->setKept();
1469 }
1470
1471 if ((e.applied ||
1472 ((mMode != OperatingMode::FULL) &&
1473 (e.failType != FailHard::yes) && e.local) ||
1474 (e.result == terQUEUED)) &&
1475 !enforceFailHard)
1476 {
1477 auto const toSkip =
1478 app_.getHashRouter().shouldRelay(e.transaction->getID());
1479
1480 if (toSkip)
1481 {
1482 protocol::TMTransaction tx;
1483 Serializer s;
1484
1485 e.transaction->getSTransaction()->add(s);
1486 tx.set_rawtransaction(s.data(), s.size());
1487 tx.set_status(protocol::tsCURRENT);
1488 tx.set_receivetimestamp(
1489 app_.timeKeeper().now().time_since_epoch().count());
1490 tx.set_deferred(e.result == terQUEUED);
1491 // FIXME: This should be when we received it
1492 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1493 e.transaction->setBroadcast();
1494 }
1495 }
1496
1497 if (validatedLedgerIndex)
1498 {
1499 auto [fee, accountSeq, availableSeq] =
1500 app_.getTxQ().getTxRequiredFeeAndSeq(
1501 *newOL, e.transaction->getSTransaction());
1502 e.transaction->setCurrentLedgerState(
1503 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1504 }
1505 }
1506 }
1507
1508 batchLock.lock();
1509
1510 for (TransactionStatus& e : transactions)
1511 e.transaction->clearApplying();
1512
1513 if (!submit_held.empty())
1514 {
1515 if (mTransactions.empty())
1516 mTransactions.swap(submit_held);
1517 else
1518 for (auto& e : submit_held)
1519 mTransactions.push_back(std::move(e));
1520 }
1521
1522 mCond.notify_all();
1523
1524 mDispatchState = DispatchState::none;
1525}
1526
1527//
1528// Owner functions
1529//
1530
1532NetworkOPsImp::getOwnerInfo(
1534 AccountID const& account)
1535{
1536 Json::Value jvObjects(Json::objectValue);
1537 auto root = keylet::ownerDir(account);
1538 auto sleNode = lpLedger->read(keylet::page(root));
1539 if (sleNode)
1540 {
1541 std::uint64_t uNodeDir;
1542
1543 do
1544 {
1545 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1546 {
1547 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1548 XRPL_ASSERT(
1549 sleCur,
1550 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1551
1552 switch (sleCur->getType())
1553 {
1554 case ltOFFER:
1555 if (!jvObjects.isMember(jss::offers))
1556 jvObjects[jss::offers] =
1558
1559 jvObjects[jss::offers].append(
1560 sleCur->getJson(JsonOptions::none));
1561 break;
1562
1563 case ltRIPPLE_STATE:
1564 if (!jvObjects.isMember(jss::ripple_lines))
1565 {
1566 jvObjects[jss::ripple_lines] =
1568 }
1569
1570 jvObjects[jss::ripple_lines].append(
1571 sleCur->getJson(JsonOptions::none));
1572 break;
1573
1574 case ltACCOUNT_ROOT:
1575 case ltDIR_NODE:
1576 default:
1577 UNREACHABLE(
1578 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1579 "type");
1580 break;
1581 }
1582 }
1583
1584 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1585
1586 if (uNodeDir)
1587 {
1588 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1589 XRPL_ASSERT(
1590 sleNode,
1591 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1592 }
1593 } while (uNodeDir);
1594 }
1595
1596 return jvObjects;
1597}
1598
1599//
1600// Other
1601//
1602
1603inline bool
1604NetworkOPsImp::isBlocked()
1605{
1606 return isAmendmentBlocked() || isUNLBlocked();
1607}
1608
1609inline bool
1610NetworkOPsImp::isAmendmentBlocked()
1611{
1612 return amendmentBlocked_;
1613}
1614
1615void
1616NetworkOPsImp::setAmendmentBlocked()
1617{
1618 amendmentBlocked_ = true;
1619 setMode(OperatingMode::CONNECTED, "setAmendmentBlocked");
1620}
1621
1622inline bool
1623NetworkOPsImp::isAmendmentWarned()
1624{
1625 return !amendmentBlocked_ && amendmentWarned_;
1626}
1627
1628inline void
1629NetworkOPsImp::setAmendmentWarned()
1630{
1631 amendmentWarned_ = true;
1632}
1633
1634inline void
1635NetworkOPsImp::clearAmendmentWarned()
1636{
1637 amendmentWarned_ = false;
1638}
1639
1640inline bool
1641NetworkOPsImp::isUNLBlocked()
1642{
1643 return unlBlocked_;
1644}
1645
1646void
1647NetworkOPsImp::setUNLBlocked()
1648{
1649 unlBlocked_ = true;
1650 setMode(OperatingMode::CONNECTED, "setUNLBlocked");
1651}
1652
1653inline void
1654NetworkOPsImp::clearUNLBlocked()
1655{
1656 unlBlocked_ = false;
1657}
1658
1659bool
1660NetworkOPsImp::checkLastClosedLedger(
1661 const Overlay::PeerSequence& peerList,
1662 uint256& networkClosed)
1663{
1664 // Returns true if there's an *abnormal* ledger issue, normal changing in
1665 // TRACKING mode should return false. Do we have sufficient validations for
1666 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1667 // better ledger available? If so, we are either tracking or full.
1668
1669 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1670
1671 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1672
1673 if (!ourClosed)
1674 return false;
1675
1676 uint256 closedLedger = ourClosed->info().hash;
1677 uint256 prevClosedLedger = ourClosed->info().parentHash;
1678 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1679 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1680
1681 //-------------------------------------------------------------------------
1682 // Determine preferred last closed ledger
1683
1684 auto& validations = app_.getValidations();
1685 JLOG(m_journal.debug())
1686 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1687
1688 // Will rely on peer LCL if no trusted validations exist
1690 peerCounts[closedLedger] = 0;
1691 if (mMode >= OperatingMode::TRACKING)
1692 peerCounts[closedLedger]++;
1693
1694 for (auto& peer : peerList)
1695 {
1696 uint256 peerLedger = peer->getClosedLedgerHash();
1697
1698 if (peerLedger.isNonZero())
1699 ++peerCounts[peerLedger];
1700 }
1701
1702 for (auto const& it : peerCounts)
1703 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1704
1705 uint256 preferredLCL = validations.getPreferredLCL(
1706 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1707 m_ledgerMaster.getValidLedgerIndex(),
1708 peerCounts);
1709
1710 bool switchLedgers = preferredLCL != closedLedger;
1711 if (switchLedgers)
1712 closedLedger = preferredLCL;
1713 //-------------------------------------------------------------------------
1714 if (switchLedgers && (closedLedger == prevClosedLedger))
1715 {
1716 // don't switch to our own previous ledger
1717 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1718 networkClosed = ourClosed->info().hash;
1719 switchLedgers = false;
1720 }
1721 else
1722 networkClosed = closedLedger;
1723
1724 if (!switchLedgers)
1725 return false;
1726
1727 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1728
1729 if (!consensus)
1730 consensus = app_.getInboundLedgers().acquire(
1731 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1732
1733 if (consensus &&
1734 (!m_ledgerMaster.canBeCurrent(consensus) ||
1735 !m_ledgerMaster.isCompatible(
1736 *consensus, m_journal.debug(), "Not switching")))
1737 {
1738 // Don't switch to a ledger not on the validated chain
1739 // or with an invalid close time or sequence
1740 networkClosed = ourClosed->info().hash;
1741 return false;
1742 }
1743
1744 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1745 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1746 << getJson({*ourClosed, {}});
1747 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1748
1749 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1750 {
1751 setMode(OperatingMode::CONNECTED, "check LCL: not on consensus ledger");
1752 }
1753
1754 if (consensus)
1755 {
1756 // FIXME: If this rewinds the ledger sequence, or has the same
1757 // sequence, we should update the status on any stored transactions
1758 // in the invalidated ledgers.
1759 switchLastClosedLedger(consensus);
1760 }
1761
1762 return true;
1763}
1764
1765void
1766NetworkOPsImp::switchLastClosedLedger(
1767 std::shared_ptr<Ledger const> const& newLCL)
1768{
1769 // set the newLCL as our last closed ledger -- this is abnormal code
1770 JLOG(m_journal.error())
1771 << "JUMP last closed ledger to " << newLCL->info().hash;
1772
1773 clearNeedNetworkLedger();
1774
1775 // Update fee computations.
1776 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1777
1778 // Caller must own master lock
1779 {
1780 // Apply tx in old open ledger to new
1781 // open ledger. Then apply local tx.
1782
1783 auto retries = m_localTX->getTxSet();
1784 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1786 if (lastVal)
1787 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
1788 else
1789 rules.emplace(app_.config().features);
1790 app_.openLedger().accept(
1791 app_,
1792 *rules,
1793 newLCL,
1794 OrderedTxs({}),
1795 false,
1796 retries,
1797 tapNONE,
1798 "jump",
1799 [&](OpenView& view, beast::Journal j) {
1800 // Stuff the ledger with transactions from the queue.
1801 return app_.getTxQ().accept(app_, view);
1802 });
1803 }
1804
1805 m_ledgerMaster.switchLCL(newLCL);
1806
1807 protocol::TMStatusChange s;
1808 s.set_newevent(protocol::neSWITCHED_LEDGER);
1809 s.set_ledgerseq(newLCL->info().seq);
1810 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1811 s.set_ledgerhashprevious(
1812 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1813 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
1814
1815 app_.overlay().foreach(
1816 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1817}
1818
1819bool
1820NetworkOPsImp::beginConsensus(uint256 const& networkClosed)
1821{
1822 XRPL_ASSERT(
1823 networkClosed.isNonZero(),
1824 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
1825
1826 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
1827
1828 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
1829 << " with LCL " << closingInfo.parentHash;
1830
1831 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1832
1833 if (!prevLedger)
1834 {
1835 // this shouldn't happen unless we jump ledgers
1836 if (mMode == OperatingMode::FULL)
1837 {
1838 JLOG(m_journal.warn())
1839 << "beginConsensus Don't have LCL, going to tracking";
1840 setMode(OperatingMode::TRACKING, "beginConsensus: No LCL");
1841 }
1842
1843 return false;
1844 }
1845
1846 XRPL_ASSERT(
1847 prevLedger->info().hash == closingInfo.parentHash,
1848 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1849 "parent");
1850 XRPL_ASSERT(
1851 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
1852 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1853 "hash");
1854
1855 if (prevLedger->rules().enabled(featureNegativeUNL))
1856 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1857 TrustChanges const changes = app_.validators().updateTrusted(
1858 app_.getValidations().getCurrentNodeIDs(),
1859 closingInfo.parentCloseTime,
1860 *this,
1861 app_.overlay(),
1862 app_.getHashRouter());
1863
1864 if (!changes.added.empty() || !changes.removed.empty())
1865 {
1866 app_.getValidations().trustChanged(changes.added, changes.removed);
1867 // Update the AmendmentTable so it tracks the current validators.
1868 app_.getAmendmentTable().trustChanged(
1869 app_.validators().getQuorumKeys().second);
1870 }
1871
1872 mConsensus.startRound(
1873 app_.timeKeeper().closeTime(),
1874 networkClosed,
1875 prevLedger,
1876 changes.removed,
1877 changes.added);
1878
1879 const ConsensusPhase currPhase = mConsensus.phase();
1880 if (mLastConsensusPhase != currPhase)
1881 {
1882 reportConsensusStateChange(currPhase);
1883 mLastConsensusPhase = currPhase;
1884 }
1885
1886 JLOG(m_journal.debug()) << "Initiating consensus engine";
1887 return true;
1888}
1889
1890bool
1891NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
1892{
1893 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1894}
1895
1896void
1897NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
1898{
1899 // We now have an additional transaction set
1900 // either created locally during the consensus process
1901 // or acquired from a peer
1902
1903 // Inform peers we have this set
1904 protocol::TMHaveTransactionSet msg;
1905 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1906 msg.set_status(protocol::tsHAVE);
1907 app_.overlay().foreach(
1908 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
1909
1910 // We acquired it because consensus asked us to
1911 if (fromAcquire)
1912 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
1913}
1914
1915void
1916NetworkOPsImp::endConsensus()
1917{
1918 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
1919
1920 for (auto const& it : app_.overlay().getActivePeers())
1921 {
1922 if (it && (it->getClosedLedgerHash() == deadLedger))
1923 {
1924 JLOG(m_journal.trace()) << "Killing obsolete peer status";
1925 it->cycleStatus();
1926 }
1927 }
1928
1929 uint256 networkClosed;
1930 bool ledgerChange =
1931 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1932
1933 if (networkClosed.isZero())
1934 return;
1935
1936 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
1937 // we must count how many nodes share our LCL, how many nodes disagree with
1938 // our LCL, and how many validations our LCL has. We also want to check
1939 // timing to make sure there shouldn't be a newer LCL. We need this
1940 // information to do the next three tests.
1941
1942 if (((mMode == OperatingMode::CONNECTED) ||
1943 (mMode == OperatingMode::SYNCING)) &&
1944 !ledgerChange)
1945 {
1946 // Count number of peers that agree with us and UNL nodes whose
1947 // validations we have for LCL. If the ledger is good enough, go to
1948 // TRACKING - TODO
1949 if (!needNetworkLedger_)
1950 setMode(OperatingMode::TRACKING, "endConsensus: check tracking");
1951 }
1952
1953 if (((mMode == OperatingMode::CONNECTED) ||
1954 (mMode == OperatingMode::TRACKING)) &&
1955 !ledgerChange)
1956 {
1957 // check if the ledger is good enough to go to FULL
1958 // Note: Do not go to FULL if we don't have the previous ledger
1959 // check if the ledger is bad enough to go to CONNECTE D -- TODO
1960 auto current = m_ledgerMaster.getCurrentLedger();
1961 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
1962 2 * current->info().closeTimeResolution))
1963 {
1964 setMode(OperatingMode::FULL, "endConsensus: check full");
1965 }
1966 }
1967
1968 beginConsensus(networkClosed);
1969}
1970
1971void
1972NetworkOPsImp::consensusViewChange()
1973{
1974 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
1975 {
1976 setMode(OperatingMode::CONNECTED, "consensusViewChange");
1977 }
1978}
1979
1980void
1981NetworkOPsImp::pubManifest(Manifest const& mo)
1982{
1983 // VFALCO consider std::shared_mutex
1984 std::lock_guard sl(mSubLock);
1985
1986 if (!mStreamMaps[sManifests].empty())
1987 {
1989
1990 jvObj[jss::type] = "manifestReceived";
1991 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
1992 if (mo.signingKey)
1993 jvObj[jss::signing_key] =
1994 toBase58(TokenType::NodePublic, *mo.signingKey);
1995 jvObj[jss::seq] = Json::UInt(mo.sequence);
1996 if (auto sig = mo.getSignature())
1997 jvObj[jss::signature] = strHex(*sig);
1998 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
1999 if (!mo.domain.empty())
2000 jvObj[jss::domain] = mo.domain;
2001 jvObj[jss::manifest] = strHex(mo.serialized);
2002
2003 for (auto i = mStreamMaps[sManifests].begin();
2004 i != mStreamMaps[sManifests].end();)
2005 {
2006 if (auto p = i->second.lock())
2007 {
2008 p->send(jvObj, true);
2009 ++i;
2010 }
2011 else
2012 {
2013 i = mStreamMaps[sManifests].erase(i);
2014 }
2015 }
2016 }
2017}
2018
2019NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2020 XRPAmount fee,
2021 TxQ::Metrics&& escalationMetrics,
2022 LoadFeeTrack const& loadFeeTrack)
2023 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2024 , loadBaseServer{loadFeeTrack.getLoadBase()}
2025 , baseFee{fee}
2026 , em{std::move(escalationMetrics)}
2027{
2028}
2029
2030bool
2032 NetworkOPsImp::ServerFeeSummary const& b) const
2033{
2034 if (loadFactorServer != b.loadFactorServer ||
2035 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2036 em.has_value() != b.em.has_value())
2037 return true;
2038
2039 if (em && b.em)
2040 {
2041 return (
2042 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2043 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2044 em->referenceFeeLevel != b.em->referenceFeeLevel);
2045 }
2046
2047 return false;
2048}
2049
2050// Need to cap to uint64 to uint32 due to JSON limitations
2051static std::uint32_t
2053{
2055
2056 return std::min(max32, v);
2057};
2058
2059void
2061{
2062 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2063 // list into a local array while holding the lock then release
2064 // the lock and call send on everyone.
2065 //
2067
2068 if (!mStreamMaps[sServer].empty())
2069 {
2071
2073 app_.openLedger().current()->fees().base,
2075 app_.getFeeTrack()};
2076
2077 jvObj[jss::type] = "serverStatus";
2078 jvObj[jss::server_status] = strOperatingMode();
2079 jvObj[jss::load_base] = f.loadBaseServer;
2080 jvObj[jss::load_factor_server] = f.loadFactorServer;
2081 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2082
2083 if (f.em)
2084 {
2085 auto const loadFactor = std::max(
2086 safe_cast<std::uint64_t>(f.loadFactorServer),
2087 mulDiv(
2088 f.em->openLedgerFeeLevel,
2089 f.loadBaseServer,
2090 f.em->referenceFeeLevel)
2092
2093 jvObj[jss::load_factor] = trunc32(loadFactor);
2094 jvObj[jss::load_factor_fee_escalation] =
2095 f.em->openLedgerFeeLevel.jsonClipped();
2096 jvObj[jss::load_factor_fee_queue] =
2097 f.em->minProcessingFeeLevel.jsonClipped();
2098 jvObj[jss::load_factor_fee_reference] =
2099 f.em->referenceFeeLevel.jsonClipped();
2100 }
2101 else
2102 jvObj[jss::load_factor] = f.loadFactorServer;
2103
2104 mLastFeeSummary = f;
2105
2106 for (auto i = mStreamMaps[sServer].begin();
2107 i != mStreamMaps[sServer].end();)
2108 {
2109 InfoSub::pointer p = i->second.lock();
2110
2111 // VFALCO TODO research the possibility of using thread queues and
2112 // linearizing the deletion of subscribers with the
2113 // sending of JSON data.
2114 if (p)
2115 {
2116 p->send(jvObj, true);
2117 ++i;
2118 }
2119 else
2120 {
2121 i = mStreamMaps[sServer].erase(i);
2122 }
2123 }
2124 }
2125}
2126
2127void
2129{
2131
2132 auto& streamMap = mStreamMaps[sConsensusPhase];
2133 if (!streamMap.empty())
2134 {
2136 jvObj[jss::type] = "consensusPhase";
2137 jvObj[jss::consensus] = to_string(phase);
2138
2139 for (auto i = streamMap.begin(); i != streamMap.end();)
2140 {
2141 if (auto p = i->second.lock())
2142 {
2143 p->send(jvObj, true);
2144 ++i;
2145 }
2146 else
2147 {
2148 i = streamMap.erase(i);
2149 }
2150 }
2151 }
2152}
2153
2154void
2156{
2157 // VFALCO consider std::shared_mutex
2159
2160 if (!mStreamMaps[sValidations].empty())
2161 {
2163
2164 auto const signerPublic = val->getSignerPublic();
2165
2166 jvObj[jss::type] = "validationReceived";
2167 jvObj[jss::validation_public_key] =
2168 toBase58(TokenType::NodePublic, signerPublic);
2169 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2170 jvObj[jss::signature] = strHex(val->getSignature());
2171 jvObj[jss::full] = val->isFull();
2172 jvObj[jss::flags] = val->getFlags();
2173 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2174 jvObj[jss::data] = strHex(val->getSerializer().slice());
2175
2176 if (auto version = (*val)[~sfServerVersion])
2177 jvObj[jss::server_version] = std::to_string(*version);
2178
2179 if (auto cookie = (*val)[~sfCookie])
2180 jvObj[jss::cookie] = std::to_string(*cookie);
2181
2182 if (auto hash = (*val)[~sfValidatedHash])
2183 jvObj[jss::validated_hash] = strHex(*hash);
2184
2185 auto const masterKey =
2186 app_.validatorManifests().getMasterKey(signerPublic);
2187
2188 if (masterKey != signerPublic)
2189 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2190
2191 // NOTE *seq is a number, but old API versions used string. We replace
2192 // number with a string using MultiApiJson near end of this function
2193 if (auto const seq = (*val)[~sfLedgerSequence])
2194 jvObj[jss::ledger_index] = *seq;
2195
2196 if (val->isFieldPresent(sfAmendments))
2197 {
2198 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2199 for (auto const& amendment : val->getFieldV256(sfAmendments))
2200 jvObj[jss::amendments].append(to_string(amendment));
2201 }
2202
2203 if (auto const closeTime = (*val)[~sfCloseTime])
2204 jvObj[jss::close_time] = *closeTime;
2205
2206 if (auto const loadFee = (*val)[~sfLoadFee])
2207 jvObj[jss::load_fee] = *loadFee;
2208
2209 if (auto const baseFee = val->at(~sfBaseFee))
2210 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2211
2212 if (auto const reserveBase = val->at(~sfReserveBase))
2213 jvObj[jss::reserve_base] = *reserveBase;
2214
2215 if (auto const reserveInc = val->at(~sfReserveIncrement))
2216 jvObj[jss::reserve_inc] = *reserveInc;
2217
2218 // (The ~ operator converts the Proxy to a std::optional, which
2219 // simplifies later operations)
2220 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2221 baseFeeXRP && baseFeeXRP->native())
2222 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2223
2224 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2225 reserveBaseXRP && reserveBaseXRP->native())
2226 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2227
2228 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2229 reserveIncXRP && reserveIncXRP->native())
2230 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2231
2232 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2233 // for consumers supporting different API versions
2234 MultiApiJson multiObj{jvObj};
2235 multiObj.visit(
2236 RPC::apiVersion<1>, //
2237 [](Json::Value& jvTx) {
2238 // Type conversion for older API versions to string
2239 if (jvTx.isMember(jss::ledger_index))
2240 {
2241 jvTx[jss::ledger_index] =
2242 std::to_string(jvTx[jss::ledger_index].asUInt());
2243 }
2244 });
2245
2246 for (auto i = mStreamMaps[sValidations].begin();
2247 i != mStreamMaps[sValidations].end();)
2248 {
2249 if (auto p = i->second.lock())
2250 {
2251 multiObj.visit(
2252 p->getApiVersion(), //
2253 [&](Json::Value const& jv) { p->send(jv, true); });
2254 ++i;
2255 }
2256 else
2257 {
2258 i = mStreamMaps[sValidations].erase(i);
2259 }
2260 }
2261 }
2262}
2263
2264void
2266{
2268
2269 if (!mStreamMaps[sPeerStatus].empty())
2270 {
2271 Json::Value jvObj(func());
2272
2273 jvObj[jss::type] = "peerStatusChange";
2274
2275 for (auto i = mStreamMaps[sPeerStatus].begin();
2276 i != mStreamMaps[sPeerStatus].end();)
2277 {
2278 InfoSub::pointer p = i->second.lock();
2279
2280 if (p)
2281 {
2282 p->send(jvObj, true);
2283 ++i;
2284 }
2285 else
2286 {
2287 i = mStreamMaps[sPeerStatus].erase(i);
2288 }
2289 }
2290 }
2291}
2292
2293void
2295{
2296 using namespace std::chrono_literals;
2297 if (om == OperatingMode::CONNECTED)
2298 {
2301 }
2302 else if (om == OperatingMode::SYNCING)
2303 {
2306 }
2307
2308 if ((om > OperatingMode::CONNECTED) && isBlocked())
2310
2311 if (mMode == om)
2312 return;
2313
2314 auto const sink = om < mMode ? m_journal.warn() : m_journal.info();
2315 mMode = om;
2316
2317 accounting_.mode(om);
2318
2319 JLOG(sink) << "STATE->" << strOperatingMode() << " - " << reason;
2320 pubServer();
2321}
2322
2323bool
2326 std::string const& source)
2327{
2328 JLOG(m_journal.trace())
2329 << "recvValidation " << val->getLedgerHash() << " from " << source;
2330
2331 {
2332 CanProcess const check(
2333 validationsMutex_, pendingValidations_, val->getLedgerHash());
2334 try
2335 {
2336 BypassAccept bypassAccept =
2338 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2339 }
2340 catch (std::exception const& e)
2341 {
2342 JLOG(m_journal.warn())
2343 << "Exception thrown for handling new validation "
2344 << val->getLedgerHash() << ": " << e.what();
2345 }
2346 catch (...)
2347 {
2348 JLOG(m_journal.warn())
2349 << "Unknown exception thrown for handling new validation "
2350 << val->getLedgerHash();
2351 }
2352 }
2353
2354 pubValidation(val);
2355
2356 JLOG(m_journal.debug()) << [this, &val]() -> auto {
2358 ss << "VALIDATION: " << val->render() << " master_key: ";
2359 auto master = app_.validators().getTrustedKey(val->getSignerPublic());
2360 if (master)
2361 {
2362 ss << toBase58(TokenType::NodePublic, *master);
2363 }
2364 else
2365 {
2366 ss << "none";
2367 }
2368 return ss.str();
2369 }();
2370
2371 // We will always relay trusted validations; if configured, we will
2372 // also relay all untrusted validations.
2373 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2374}
2375
2378{
2379 return mConsensus.getJson(true);
2380}
2381
2383NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2384{
2386
2387 // System-level warnings
2388 {
2389 Json::Value warnings{Json::arrayValue};
2390 if (isAmendmentBlocked())
2391 {
2392 Json::Value& w = warnings.append(Json::objectValue);
2393 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2394 w[jss::message] =
2395 "This server is amendment blocked, and must be updated to be "
2396 "able to stay in sync with the network.";
2397 }
2398 if (isUNLBlocked())
2399 {
2400 Json::Value& w = warnings.append(Json::objectValue);
2401 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2402 w[jss::message] =
2403 "This server has an expired validator list. validators.txt "
2404 "may be incorrectly configured or some [validator_list_sites] "
2405 "may be unreachable.";
2406 }
2407 if (admin && isAmendmentWarned())
2408 {
2409 Json::Value& w = warnings.append(Json::objectValue);
2410 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2411 w[jss::message] =
2412 "One or more unsupported amendments have reached majority. "
2413 "Upgrade to the latest version before they are activated "
2414 "to avoid being amendment blocked.";
2415 if (auto const expected =
2417 {
2418 auto& d = w[jss::details] = Json::objectValue;
2419 d[jss::expected_date] = expected->time_since_epoch().count();
2420 d[jss::expected_date_UTC] = to_string(*expected);
2421 }
2422 }
2423
2424 if (warnings.size())
2425 info[jss::warnings] = std::move(warnings);
2426 }
2427
2428 // hostid: unique string describing the machine
2429 if (human)
2430 info[jss::hostid] = getHostId(admin);
2431
2432 // domain: if configured with a domain, report it:
2433 if (!app_.config().SERVER_DOMAIN.empty())
2434 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2435
2436 info[jss::build_version] = BuildInfo::getVersionString();
2437
2438 info[jss::server_state] = strOperatingMode(admin);
2439
2440 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2442
2444 info[jss::network_ledger] = "waiting";
2445
2446 info[jss::validation_quorum] =
2447 static_cast<Json::UInt>(app_.validators().quorum());
2448
2449 if (admin)
2450 {
2451 switch (app_.config().NODE_SIZE)
2452 {
2453 case 0:
2454 info[jss::node_size] = "tiny";
2455 break;
2456 case 1:
2457 info[jss::node_size] = "small";
2458 break;
2459 case 2:
2460 info[jss::node_size] = "medium";
2461 break;
2462 case 3:
2463 info[jss::node_size] = "large";
2464 break;
2465 case 4:
2466 info[jss::node_size] = "huge";
2467 break;
2468 }
2469
2470 auto when = app_.validators().expires();
2471
2472 if (!human)
2473 {
2474 if (when)
2475 info[jss::validator_list_expires] =
2476 safe_cast<Json::UInt>(when->time_since_epoch().count());
2477 else
2478 info[jss::validator_list_expires] = 0;
2479 }
2480 else
2481 {
2482 auto& x = (info[jss::validator_list] = Json::objectValue);
2483
2484 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2485
2486 if (when)
2487 {
2488 if (*when == TimeKeeper::time_point::max())
2489 {
2490 x[jss::expiration] = "never";
2491 x[jss::status] = "active";
2492 }
2493 else
2494 {
2495 x[jss::expiration] = to_string(*when);
2496
2497 if (*when > app_.timeKeeper().now())
2498 x[jss::status] = "active";
2499 else
2500 x[jss::status] = "expired";
2501 }
2502 }
2503 else
2504 {
2505 x[jss::status] = "unknown";
2506 x[jss::expiration] = "unknown";
2507 }
2508 }
2509
2510#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2511 {
2512 auto& x = (info[jss::git] = Json::objectValue);
2513#ifdef GIT_COMMIT_HASH
2514 x[jss::hash] = GIT_COMMIT_HASH;
2515#endif
2516#ifdef GIT_BRANCH
2517 x[jss::branch] = GIT_BRANCH;
2518#endif
2519 }
2520#endif
2521 }
2522 info[jss::io_latency_ms] =
2523 static_cast<Json::UInt>(app_.getIOLatency().count());
2524
2525 if (admin)
2526 {
2527 if (auto const localPubKey = app_.validators().localPublicKey();
2528 localPubKey && app_.getValidationPublicKey())
2529 {
2530 info[jss::pubkey_validator] =
2531 toBase58(TokenType::NodePublic, localPubKey.value());
2532 }
2533 else
2534 {
2535 info[jss::pubkey_validator] = "none";
2536 }
2537 }
2538
2539 if (counters)
2540 {
2541 info[jss::counters] = app_.getPerfLog().countersJson();
2542
2543 Json::Value nodestore(Json::objectValue);
2544 app_.getNodeStore().getCountsJson(nodestore);
2545 info[jss::counters][jss::nodestore] = nodestore;
2546 info[jss::current_activities] = app_.getPerfLog().currentJson();
2547 }
2548
2549 info[jss::pubkey_node] =
2551
2552 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2553
2555 info[jss::amendment_blocked] = true;
2556
2557 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2558
2559 if (fp != 0)
2560 info[jss::fetch_pack] = Json::UInt(fp);
2561
2562 info[jss::peers] = Json::UInt(app_.overlay().size());
2563
2564 Json::Value lastClose = Json::objectValue;
2565 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2566
2567 if (human)
2568 {
2569 lastClose[jss::converge_time_s] =
2571 }
2572 else
2573 {
2574 lastClose[jss::converge_time] =
2576 }
2577
2578 info[jss::last_close] = lastClose;
2579
2580 // info[jss::consensus] = mConsensus.getJson();
2581
2582 if (admin)
2583 info[jss::load] = m_job_queue.getJson();
2584
2585 if (auto const netid = app_.overlay().networkID())
2586 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2587
2588 auto const escalationMetrics =
2590
2591 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2592 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2593 /* Scale the escalated fee level to unitless "load factor".
2594 In practice, this just strips the units, but it will continue
2595 to work correctly if either base value ever changes. */
2596 auto const loadFactorFeeEscalation =
2597 mulDiv(
2598 escalationMetrics.openLedgerFeeLevel,
2599 loadBaseServer,
2600 escalationMetrics.referenceFeeLevel)
2602
2603 auto const loadFactor = std::max(
2604 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2605
2606 if (!human)
2607 {
2608 info[jss::load_base] = loadBaseServer;
2609 info[jss::load_factor] = trunc32(loadFactor);
2610 info[jss::load_factor_server] = loadFactorServer;
2611
2612 /* Json::Value doesn't support uint64, so clamp to max
2613 uint32 value. This is mostly theoretical, since there
2614 probably isn't enough extant XRP to drive the factor
2615 that high.
2616 */
2617 info[jss::load_factor_fee_escalation] =
2618 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2619 info[jss::load_factor_fee_queue] =
2620 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2621 info[jss::load_factor_fee_reference] =
2622 escalationMetrics.referenceFeeLevel.jsonClipped();
2623 }
2624 else
2625 {
2626 info[jss::load_factor] =
2627 static_cast<double>(loadFactor) / loadBaseServer;
2628
2629 if (loadFactorServer != loadFactor)
2630 info[jss::load_factor_server] =
2631 static_cast<double>(loadFactorServer) / loadBaseServer;
2632
2633 if (admin)
2634 {
2636 if (fee != loadBaseServer)
2637 info[jss::load_factor_local] =
2638 static_cast<double>(fee) / loadBaseServer;
2639 fee = app_.getFeeTrack().getRemoteFee();
2640 if (fee != loadBaseServer)
2641 info[jss::load_factor_net] =
2642 static_cast<double>(fee) / loadBaseServer;
2643 fee = app_.getFeeTrack().getClusterFee();
2644 if (fee != loadBaseServer)
2645 info[jss::load_factor_cluster] =
2646 static_cast<double>(fee) / loadBaseServer;
2647 }
2648 if (escalationMetrics.openLedgerFeeLevel !=
2649 escalationMetrics.referenceFeeLevel &&
2650 (admin || loadFactorFeeEscalation != loadFactor))
2651 info[jss::load_factor_fee_escalation] =
2652 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2653 escalationMetrics.referenceFeeLevel);
2654 if (escalationMetrics.minProcessingFeeLevel !=
2655 escalationMetrics.referenceFeeLevel)
2656 info[jss::load_factor_fee_queue] =
2657 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2658 escalationMetrics.referenceFeeLevel);
2659 }
2660
2661 bool valid = false;
2662 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2663
2664 if (lpClosed)
2665 valid = true;
2666 else
2667 lpClosed = m_ledgerMaster.getClosedLedger();
2668
2669 if (lpClosed)
2670 {
2671 XRPAmount const baseFee = lpClosed->fees().base;
2673 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2674 l[jss::hash] = to_string(lpClosed->info().hash);
2675
2676 if (!human)
2677 {
2678 l[jss::base_fee] = baseFee.jsonClipped();
2679 l[jss::reserve_base] =
2680 lpClosed->fees().accountReserve(0).jsonClipped();
2681 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2682 l[jss::close_time] = Json::Value::UInt(
2683 lpClosed->info().closeTime.time_since_epoch().count());
2684 }
2685 else
2686 {
2687 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2688 l[jss::reserve_base_xrp] =
2689 lpClosed->fees().accountReserve(0).decimalXRP();
2690 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2691
2692 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2693 std::abs(closeOffset.count()) >= 60)
2694 l[jss::close_time_offset] =
2695 static_cast<std::uint32_t>(closeOffset.count());
2696
2697 constexpr std::chrono::seconds highAgeThreshold{1000000};
2699 {
2700 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2701 l[jss::age] =
2702 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2703 }
2704 else
2705 {
2706 auto lCloseTime = lpClosed->info().closeTime;
2707 auto closeTime = app_.timeKeeper().closeTime();
2708 if (lCloseTime <= closeTime)
2709 {
2710 using namespace std::chrono_literals;
2711 auto age = closeTime - lCloseTime;
2712 l[jss::age] =
2713 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2714 }
2715 }
2716 }
2717
2718 if (valid)
2719 info[jss::validated_ledger] = l;
2720 else
2721 info[jss::closed_ledger] = l;
2722
2723 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2724 if (!lpPublished)
2725 info[jss::published_ledger] = "none";
2726 else if (lpPublished->info().seq != lpClosed->info().seq)
2727 info[jss::published_ledger] = lpPublished->info().seq;
2728 }
2729
2730 accounting_.json(info);
2731 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2732 info[jss::jq_trans_overflow] =
2734 info[jss::peer_disconnects] =
2736 info[jss::peer_disconnects_resources] =
2738
2739 // This array must be sorted in increasing order.
2740 static constexpr std::array<std::string_view, 7> protocols{
2741 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2742 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2743 {
2745 for (auto const& port : app_.getServerHandler().setup().ports)
2746 {
2747 // Don't publish admin ports for non-admin users
2748 if (!admin &&
2749 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2750 port.admin_user.empty() && port.admin_password.empty()))
2751 continue;
2754 std::begin(port.protocol),
2755 std::end(port.protocol),
2756 std::begin(protocols),
2757 std::end(protocols),
2758 std::back_inserter(proto));
2759 if (!proto.empty())
2760 {
2761 auto& jv = ports.append(Json::Value(Json::objectValue));
2762 jv[jss::port] = std::to_string(port.port);
2763 jv[jss::protocol] = Json::Value{Json::arrayValue};
2764 for (auto const& p : proto)
2765 jv[jss::protocol].append(p);
2766 }
2767 }
2768
2769 if (app_.config().exists(SECTION_PORT_GRPC))
2770 {
2771 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
2772 auto const optPort = grpcSection.get("port");
2773 if (optPort && grpcSection.get("ip"))
2774 {
2775 auto& jv = ports.append(Json::Value(Json::objectValue));
2776 jv[jss::port] = *optPort;
2777 jv[jss::protocol] = Json::Value{Json::arrayValue};
2778 jv[jss::protocol].append("grpc");
2779 }
2780 }
2781 info[jss::ports] = std::move(ports);
2782 }
2783
2784 return info;
2785}
2786
2787void
2789{
2791}
2792
2795{
2796 return app_.getInboundLedgers().getInfo();
2797}
2798
2799void
2801 std::shared_ptr<ReadView const> const& ledger,
2802 std::shared_ptr<STTx const> const& transaction,
2803 TER result)
2804{
2805 MultiApiJson jvObj =
2806 transJson(transaction, result, false, ledger, std::nullopt);
2807
2808 {
2810
2811 auto it = mStreamMaps[sRTTransactions].begin();
2812 while (it != mStreamMaps[sRTTransactions].end())
2813 {
2814 InfoSub::pointer p = it->second.lock();
2815
2816 if (p)
2817 {
2818 jvObj.visit(
2819 p->getApiVersion(), //
2820 [&](Json::Value const& jv) { p->send(jv, true); });
2821 ++it;
2822 }
2823 else
2824 {
2825 it = mStreamMaps[sRTTransactions].erase(it);
2826 }
2827 }
2828 }
2829
2830 pubProposedAccountTransaction(ledger, transaction, result);
2831}
2832
2833void
2835{
2836 // Ledgers are published only when they acquire sufficient validations
2837 // Holes are filled across connection loss or other catastrophe
2838
2840 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
2841 if (!alpAccepted)
2842 {
2843 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
2844 app_.getAcceptedLedgerCache().canonicalize_replace_client(
2845 lpAccepted->info().hash, alpAccepted);
2846 }
2847
2848 XRPL_ASSERT(
2849 alpAccepted->getLedger().get() == lpAccepted.get(),
2850 "ripple::NetworkOPsImp::pubLedger : accepted input");
2851
2852 {
2853 JLOG(m_journal.debug())
2854 << "Publishing ledger " << lpAccepted->info().seq << " "
2855 << lpAccepted->info().hash;
2856
2858
2859 if (!mStreamMaps[sLedger].empty())
2860 {
2862
2863 jvObj[jss::type] = "ledgerClosed";
2864 jvObj[jss::ledger_index] = lpAccepted->info().seq;
2865 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
2866 jvObj[jss::ledger_time] = Json::Value::UInt(
2867 lpAccepted->info().closeTime.time_since_epoch().count());
2868
2869 if (!lpAccepted->rules().enabled(featureXRPFees))
2870 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
2871 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2872 jvObj[jss::reserve_base] =
2873 lpAccepted->fees().accountReserve(0).jsonClipped();
2874 jvObj[jss::reserve_inc] =
2875 lpAccepted->fees().increment.jsonClipped();
2876
2877 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
2878
2880 {
2881 jvObj[jss::validated_ledgers] =
2883 }
2884
2885 auto it = mStreamMaps[sLedger].begin();
2886 while (it != mStreamMaps[sLedger].end())
2887 {
2888 InfoSub::pointer p = it->second.lock();
2889 if (p)
2890 {
2891 p->send(jvObj, true);
2892 ++it;
2893 }
2894 else
2895 it = mStreamMaps[sLedger].erase(it);
2896 }
2897 }
2898
2899 if (!mStreamMaps[sBookChanges].empty())
2900 {
2901 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
2902
2903 auto it = mStreamMaps[sBookChanges].begin();
2904 while (it != mStreamMaps[sBookChanges].end())
2905 {
2906 InfoSub::pointer p = it->second.lock();
2907 if (p)
2908 {
2909 p->send(jvObj, true);
2910 ++it;
2911 }
2912 else
2913 it = mStreamMaps[sBookChanges].erase(it);
2914 }
2915 }
2916
2917 {
2918 static bool firstTime = true;
2919 if (firstTime)
2920 {
2921 // First validated ledger, start delayed SubAccountHistory
2922 firstTime = false;
2923 for (auto& outer : mSubAccountHistory)
2924 {
2925 for (auto& inner : outer.second)
2926 {
2927 auto& subInfo = inner.second;
2928 if (subInfo.index_->separationLedgerSeq_ == 0)
2929 {
2931 alpAccepted->getLedger(), subInfo);
2932 }
2933 }
2934 }
2935 }
2936 }
2937 }
2938
2939 // Don't lock since pubAcceptedTransaction is locking.
2940 for (auto const& accTx : *alpAccepted)
2941 {
2942 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
2944 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
2945 }
2946}
2947
2948void
2950{
2952 app_.openLedger().current()->fees().base,
2954 app_.getFeeTrack()};
2955
2956 // only schedule the job if something has changed
2957 if (f != mLastFeeSummary)
2958 {
2960 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
2961 pubServer();
2962 });
2963 }
2964}
2965
2966void
2968{
2971 "reportConsensusStateChange->pubConsensus",
2972 [this, phase]() { pubConsensus(phase); });
2973}
2974
2975inline void
2977{
2978 m_localTX->sweep(view);
2979}
2980inline std::size_t
2982{
2983 return m_localTX->size();
2984}
2985
2986// This routine should only be used to publish accepted or validated
2987// transactions.
2990 std::shared_ptr<STTx const> const& transaction,
2991 TER result,
2992 bool validated,
2993 std::shared_ptr<ReadView const> const& ledger,
2995{
2997 std::string sToken;
2998 std::string sHuman;
2999
3000 transResultInfo(result, sToken, sHuman);
3001
3002 jvObj[jss::type] = "transaction";
3003 // NOTE jvObj is not a finished object for either API version. After
3004 // it's populated, we need to finish it for a specific API version. This is
3005 // done in a loop, near the end of this function.
3006 jvObj[jss::transaction] =
3007 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
3008
3009 if (meta)
3010 {
3011 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3013 jvObj[jss::meta], *ledger, transaction, meta->get());
3015 jvObj[jss::meta], transaction, meta->get());
3016 }
3017
3018 if (!ledger->open())
3019 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3020
3021 if (validated)
3022 {
3023 jvObj[jss::ledger_index] = ledger->info().seq;
3024 jvObj[jss::transaction][jss::date] =
3025 ledger->info().closeTime.time_since_epoch().count();
3026 jvObj[jss::validated] = true;
3027 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3028
3029 // WRITEME: Put the account next seq here
3030 }
3031 else
3032 {
3033 jvObj[jss::validated] = false;
3034 jvObj[jss::ledger_current_index] = ledger->info().seq;
3035 }
3036
3037 jvObj[jss::status] = validated ? "closed" : "proposed";
3038 jvObj[jss::engine_result] = sToken;
3039 jvObj[jss::engine_result_code] = result;
3040 jvObj[jss::engine_result_message] = sHuman;
3041
3042 if (transaction->getTxnType() == ttOFFER_CREATE)
3043 {
3044 auto const account = transaction->getAccountID(sfAccount);
3045 auto const amount = transaction->getFieldAmount(sfTakerGets);
3046
3047 // If the offer create is not self funded then add the owner balance
3048 if (account != amount.issue().account)
3049 {
3050 auto const ownerFunds = accountFunds(
3051 *ledger,
3052 account,
3053 amount,
3055 app_.journal("View"));
3056 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3057 }
3058 }
3059
3060 std::string const hash = to_string(transaction->getTransactionID());
3061 MultiApiJson multiObj{jvObj};
3063 multiObj.visit(), //
3064 [&]<unsigned Version>(
3066 RPC::insertDeliverMax(
3067 jvTx[jss::transaction], transaction->getTxnType(), Version);
3068
3069 if constexpr (Version > 1)
3070 {
3071 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3072 jvTx[jss::hash] = hash;
3073 }
3074 else
3075 {
3076 jvTx[jss::transaction][jss::hash] = hash;
3077 }
3078 });
3079
3080 return multiObj;
3081}
3082
3083void
3085 std::shared_ptr<ReadView const> const& ledger,
3086 const AcceptedLedgerTx& transaction,
3087 bool last)
3088{
3089 auto const& stTxn = transaction.getTxn();
3090
3091 // Create two different Json objects, for different API versions
3092 auto const metaRef = std::ref(transaction.getMeta());
3093 auto const trResult = transaction.getResult();
3094 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3095
3096 {
3098
3099 auto it = mStreamMaps[sTransactions].begin();
3100 while (it != mStreamMaps[sTransactions].end())
3101 {
3102 InfoSub::pointer p = it->second.lock();
3103
3104 if (p)
3105 {
3106 jvObj.visit(
3107 p->getApiVersion(), //
3108 [&](Json::Value const& jv) { p->send(jv, true); });
3109 ++it;
3110 }
3111 else
3112 it = mStreamMaps[sTransactions].erase(it);
3113 }
3114
3115 it = mStreamMaps[sRTTransactions].begin();
3116
3117 while (it != mStreamMaps[sRTTransactions].end())
3118 {
3119 InfoSub::pointer p = it->second.lock();
3120
3121 if (p)
3122 {
3123 jvObj.visit(
3124 p->getApiVersion(), //
3125 [&](Json::Value const& jv) { p->send(jv, true); });
3126 ++it;
3127 }
3128 else
3129 it = mStreamMaps[sRTTransactions].erase(it);
3130 }
3131 }
3132
3133 if (transaction.getResult() == tesSUCCESS)
3134 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3135
3136 pubAccountTransaction(ledger, transaction, last);
3137}
3138
3139void
3141 std::shared_ptr<ReadView const> const& ledger,
3142 AcceptedLedgerTx const& transaction,
3143 bool last)
3144{
3146 int iProposed = 0;
3147 int iAccepted = 0;
3148
3149 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3150 auto const currLedgerSeq = ledger->seq();
3151 {
3153
3154 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3156 {
3157 for (auto const& affectedAccount : transaction.getAffected())
3158 {
3159 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3160 simiIt != mSubRTAccount.end())
3161 {
3162 auto it = simiIt->second.begin();
3163
3164 while (it != simiIt->second.end())
3165 {
3166 InfoSub::pointer p = it->second.lock();
3167
3168 if (p)
3169 {
3170 notify.insert(p);
3171 ++it;
3172 ++iProposed;
3173 }
3174 else
3175 it = simiIt->second.erase(it);
3176 }
3177 }
3178
3179 if (auto simiIt = mSubAccount.find(affectedAccount);
3180 simiIt != mSubAccount.end())
3181 {
3182 auto it = simiIt->second.begin();
3183 while (it != simiIt->second.end())
3184 {
3185 InfoSub::pointer p = it->second.lock();
3186
3187 if (p)
3188 {
3189 notify.insert(p);
3190 ++it;
3191 ++iAccepted;
3192 }
3193 else
3194 it = simiIt->second.erase(it);
3195 }
3196 }
3197
3198 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3199 histoIt != mSubAccountHistory.end())
3200 {
3201 auto& subs = histoIt->second;
3202 auto it = subs.begin();
3203 while (it != subs.end())
3204 {
3205 SubAccountHistoryInfoWeak const& info = it->second;
3206 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3207 {
3208 ++it;
3209 continue;
3210 }
3211
3212 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3213 {
3214 accountHistoryNotify.emplace_back(
3215 SubAccountHistoryInfo{isSptr, info.index_});
3216 ++it;
3217 }
3218 else
3219 {
3220 it = subs.erase(it);
3221 }
3222 }
3223 if (subs.empty())
3224 mSubAccountHistory.erase(histoIt);
3225 }
3226 }
3227 }
3228 }
3229
3230 JLOG(m_journal.trace())
3231 << "pubAccountTransaction: " << "proposed=" << iProposed
3232 << ", accepted=" << iAccepted;
3233
3234 if (!notify.empty() || !accountHistoryNotify.empty())
3235 {
3236 auto const& stTxn = transaction.getTxn();
3237
3238 // Create two different Json objects, for different API versions
3239 auto const metaRef = std::ref(transaction.getMeta());
3240 auto const trResult = transaction.getResult();
3241 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3242
3243 for (InfoSub::ref isrListener : notify)
3244 {
3245 jvObj.visit(
3246 isrListener->getApiVersion(), //
3247 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3248 }
3249
3250 if (last)
3251 jvObj.set(jss::account_history_boundary, true);
3252
3253 XRPL_ASSERT(
3254 jvObj.isMember(jss::account_history_tx_stream) ==
3256 "ripple::NetworkOPsImp::pubAccountTransaction : "
3257 "account_history_tx_stream not set");
3258 for (auto& info : accountHistoryNotify)
3259 {
3260 auto& index = info.index_;
3261 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3262 jvObj.set(jss::account_history_tx_first, true);
3263
3264 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3265
3266 jvObj.visit(
3267 info.sink_->getApiVersion(), //
3268 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3269 }
3270 }
3271}
3272
3273void
3275 std::shared_ptr<ReadView const> const& ledger,
3277 TER result)
3278{
3280 int iProposed = 0;
3281
3282 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3283
3284 {
3286
3287 if (mSubRTAccount.empty())
3288 return;
3289
3290 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3292 {
3293 for (auto const& affectedAccount : tx->getMentionedAccounts())
3294 {
3295 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3296 simiIt != mSubRTAccount.end())
3297 {
3298 auto it = simiIt->second.begin();
3299
3300 while (it != simiIt->second.end())
3301 {
3302 InfoSub::pointer p = it->second.lock();
3303
3304 if (p)
3305 {
3306 notify.insert(p);
3307 ++it;
3308 ++iProposed;
3309 }
3310 else
3311 it = simiIt->second.erase(it);
3312 }
3313 }
3314 }
3315 }
3316 }
3317
3318 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3319
3320 if (!notify.empty() || !accountHistoryNotify.empty())
3321 {
3322 // Create two different Json objects, for different API versions
3323 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3324
3325 for (InfoSub::ref isrListener : notify)
3326 jvObj.visit(
3327 isrListener->getApiVersion(), //
3328 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3329
3330 XRPL_ASSERT(
3331 jvObj.isMember(jss::account_history_tx_stream) ==
3333 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3334 "account_history_tx_stream not set");
3335 for (auto& info : accountHistoryNotify)
3336 {
3337 auto& index = info.index_;
3338 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3339 jvObj.set(jss::account_history_tx_first, true);
3340 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3341 jvObj.visit(
3342 info.sink_->getApiVersion(), //
3343 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3344 }
3345 }
3346}
3347
3348//
3349// Monitoring
3350//
3351
3352void
3354 InfoSub::ref isrListener,
3355 hash_set<AccountID> const& vnaAccountIDs,
3356 bool rt)
3357{
3358 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3359
3360 for (auto const& naAccountID : vnaAccountIDs)
3361 {
3362 JLOG(m_journal.trace())
3363 << "subAccount: account: " << toBase58(naAccountID);
3364
3365 isrListener->insertSubAccountInfo(naAccountID, rt);
3366 }
3367
3369
3370 for (auto const& naAccountID : vnaAccountIDs)
3371 {
3372 auto simIterator = subMap.find(naAccountID);
3373 if (simIterator == subMap.end())
3374 {
3375 // Not found, note that account has a new single listner.
3376 SubMapType usisElement;
3377 usisElement[isrListener->getSeq()] = isrListener;
3378 // VFALCO NOTE This is making a needless copy of naAccountID
3379 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3380 }
3381 else
3382 {
3383 // Found, note that the account has another listener.
3384 simIterator->second[isrListener->getSeq()] = isrListener;
3385 }
3386 }
3387}
3388
3389void
3391 InfoSub::ref isrListener,
3392 hash_set<AccountID> const& vnaAccountIDs,
3393 bool rt)
3394{
3395 for (auto const& naAccountID : vnaAccountIDs)
3396 {
3397 // Remove from the InfoSub
3398 isrListener->deleteSubAccountInfo(naAccountID, rt);
3399 }
3400
3401 // Remove from the server
3402 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3403}
3404
3405void
3407 std::uint64_t uSeq,
3408 hash_set<AccountID> const& vnaAccountIDs,
3409 bool rt)
3410{
3412
3413 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3414
3415 for (auto const& naAccountID : vnaAccountIDs)
3416 {
3417 auto simIterator = subMap.find(naAccountID);
3418
3419 if (simIterator != subMap.end())
3420 {
3421 // Found
3422 simIterator->second.erase(uSeq);
3423
3424 if (simIterator->second.empty())
3425 {
3426 // Don't need hash entry.
3427 subMap.erase(simIterator);
3428 }
3429 }
3430 }
3431}
3432
3433void
3435{
3436 enum DatabaseType { Sqlite, None };
3437 static const auto databaseType = [&]() -> DatabaseType {
3438 // Use a dynamic_cast to return DatabaseType::None
3439 // on failure.
3440 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3441 {
3442 return DatabaseType::Sqlite;
3443 }
3444 return DatabaseType::None;
3445 }();
3446
3447 if (databaseType == DatabaseType::None)
3448 {
3449 JLOG(m_journal.error())
3450 << "AccountHistory job for account "
3451 << toBase58(subInfo.index_->accountId_) << " no database";
3452 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3453 {
3454 sptr->send(rpcError(rpcINTERNAL), true);
3455 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3456 }
3457 return;
3458 }
3459
3462 "AccountHistoryTxStream",
3463 [this, dbType = databaseType, subInfo]() {
3464 auto const& accountId = subInfo.index_->accountId_;
3465 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3466 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3467
3468 JLOG(m_journal.trace())
3469 << "AccountHistory job for account " << toBase58(accountId)
3470 << " started. lastLedgerSeq=" << lastLedgerSeq;
3471
3472 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3473 std::shared_ptr<TxMeta> const& meta) -> bool {
3474 /*
3475 * genesis account: first tx is the one with seq 1
3476 * other account: first tx is the one created the account
3477 */
3478 if (accountId == genesisAccountId)
3479 {
3480 auto stx = tx->getSTransaction();
3481 if (stx->getAccountID(sfAccount) == accountId &&
3482 stx->getSeqProxy().value() == 1)
3483 return true;
3484 }
3485
3486 for (auto& node : meta->getNodes())
3487 {
3488 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3489 continue;
3490
3491 if (node.isFieldPresent(sfNewFields))
3492 {
3493 if (auto inner = dynamic_cast<const STObject*>(
3494 node.peekAtPField(sfNewFields));
3495 inner)
3496 {
3497 if (inner->isFieldPresent(sfAccount) &&
3498 inner->getAccountID(sfAccount) == accountId)
3499 {
3500 return true;
3501 }
3502 }
3503 }
3504 }
3505
3506 return false;
3507 };
3508
3509 auto send = [&](Json::Value const& jvObj,
3510 bool unsubscribe) -> bool {
3511 if (auto sptr = subInfo.sinkWptr_.lock())
3512 {
3513 sptr->send(jvObj, true);
3514 if (unsubscribe)
3515 unsubAccountHistory(sptr, accountId, false);
3516 return true;
3517 }
3518
3519 return false;
3520 };
3521
3522 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3523 bool unsubscribe) -> bool {
3524 if (auto sptr = subInfo.sinkWptr_.lock())
3525 {
3526 jvObj.visit(
3527 sptr->getApiVersion(), //
3528 [&](Json::Value const& jv) { sptr->send(jv, true); });
3529
3530 if (unsubscribe)
3531 unsubAccountHistory(sptr, accountId, false);
3532 return true;
3533 }
3534
3535 return false;
3536 };
3537
3538 auto getMoreTxns =
3539 [&](std::uint32_t minLedger,
3540 std::uint32_t maxLedger,
3545 switch (dbType)
3546 {
3547 case Sqlite: {
3548 auto db = static_cast<SQLiteDatabase*>(
3551 accountId, minLedger, maxLedger, marker, 0, true};
3552 return db->newestAccountTxPage(options);
3553 }
3554 default: {
3555 UNREACHABLE(
3556 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3557 "getMoreTxns : invalid database type");
3558 return {};
3559 }
3560 }
3561 };
3562
3563 /*
3564 * search backward until the genesis ledger or asked to stop
3565 */
3566 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3567 {
3568 int feeChargeCount = 0;
3569 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3570 {
3571 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3572 ++feeChargeCount;
3573 }
3574 else
3575 {
3576 JLOG(m_journal.trace())
3577 << "AccountHistory job for account "
3578 << toBase58(accountId) << " no InfoSub. Fee charged "
3579 << feeChargeCount << " times.";
3580 return;
3581 }
3582
3583 // try to search in 1024 ledgers till reaching genesis ledgers
3584 auto startLedgerSeq =
3585 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3586 JLOG(m_journal.trace())
3587 << "AccountHistory job for account " << toBase58(accountId)
3588 << ", working on ledger range [" << startLedgerSeq << ","
3589 << lastLedgerSeq << "]";
3590
3591 auto haveRange = [&]() -> bool {
3592 std::uint32_t validatedMin = UINT_MAX;
3593 std::uint32_t validatedMax = 0;
3594 auto haveSomeValidatedLedgers =
3596 validatedMin, validatedMax);
3597
3598 return haveSomeValidatedLedgers &&
3599 validatedMin <= startLedgerSeq &&
3600 lastLedgerSeq <= validatedMax;
3601 }();
3602
3603 if (!haveRange)
3604 {
3605 JLOG(m_journal.debug())
3606 << "AccountHistory reschedule job for account "
3607 << toBase58(accountId) << ", incomplete ledger range ["
3608 << startLedgerSeq << "," << lastLedgerSeq << "]";
3610 return;
3611 }
3612
3614 while (!subInfo.index_->stopHistorical_)
3615 {
3616 auto dbResult =
3617 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3618 if (!dbResult)
3619 {
3620 JLOG(m_journal.debug())
3621 << "AccountHistory job for account "
3622 << toBase58(accountId) << " getMoreTxns failed.";
3623 send(rpcError(rpcINTERNAL), true);
3624 return;
3625 }
3626
3627 auto const& txns = dbResult->first;
3628 marker = dbResult->second;
3629 size_t num_txns = txns.size();
3630 for (size_t i = 0; i < num_txns; ++i)
3631 {
3632 auto const& [tx, meta] = txns[i];
3633
3634 if (!tx || !meta)
3635 {
3636 JLOG(m_journal.debug())
3637 << "AccountHistory job for account "
3638 << toBase58(accountId) << " empty tx or meta.";
3639 send(rpcError(rpcINTERNAL), true);
3640 return;
3641 }
3642 auto curTxLedger =
3644 tx->getLedger());
3645 if (!curTxLedger)
3646 {
3647 JLOG(m_journal.debug())
3648 << "AccountHistory job for account "
3649 << toBase58(accountId) << " no ledger.";
3650 send(rpcError(rpcINTERNAL), true);
3651 return;
3652 }
3654 tx->getSTransaction();
3655 if (!stTxn)
3656 {
3657 JLOG(m_journal.debug())
3658 << "AccountHistory job for account "
3659 << toBase58(accountId)
3660 << " getSTransaction failed.";
3661 send(rpcError(rpcINTERNAL), true);
3662 return;
3663 }
3664
3665 auto const mRef = std::ref(*meta);
3666 auto const trR = meta->getResultTER();
3667 MultiApiJson jvTx =
3668 transJson(stTxn, trR, true, curTxLedger, mRef);
3669
3670 jvTx.set(
3671 jss::account_history_tx_index, txHistoryIndex--);
3672 if (i + 1 == num_txns ||
3673 txns[i + 1].first->getLedger() != tx->getLedger())
3674 jvTx.set(jss::account_history_boundary, true);
3675
3676 if (isFirstTx(tx, meta))
3677 {
3678 jvTx.set(jss::account_history_tx_first, true);
3679 sendMultiApiJson(jvTx, false);
3680
3681 JLOG(m_journal.trace())
3682 << "AccountHistory job for account "
3683 << toBase58(accountId)
3684 << " done, found last tx.";
3685 return;
3686 }
3687 else
3688 {
3689 sendMultiApiJson(jvTx, false);
3690 }
3691 }
3692
3693 if (marker)
3694 {
3695 JLOG(m_journal.trace())
3696 << "AccountHistory job for account "
3697 << toBase58(accountId)
3698 << " paging, marker=" << marker->ledgerSeq << ":"
3699 << marker->txnSeq;
3700 }
3701 else
3702 {
3703 break;
3704 }
3705 }
3706
3707 if (!subInfo.index_->stopHistorical_)
3708 {
3709 lastLedgerSeq = startLedgerSeq - 1;
3710 if (lastLedgerSeq <= 1)
3711 {
3712 JLOG(m_journal.trace())
3713 << "AccountHistory job for account "
3714 << toBase58(accountId)
3715 << " done, reached genesis ledger.";
3716 return;
3717 }
3718 }
3719 }
3720 });
3721}
3722
3723void
3725 std::shared_ptr<ReadView const> const& ledger,
3727{
3728 subInfo.index_->separationLedgerSeq_ = ledger->seq();
3729 auto const& accountId = subInfo.index_->accountId_;
3730 auto const accountKeylet = keylet::account(accountId);
3731 if (!ledger->exists(accountKeylet))
3732 {
3733 JLOG(m_journal.debug())
3734 << "subAccountHistoryStart, no account " << toBase58(accountId)
3735 << ", no need to add AccountHistory job.";
3736 return;
3737 }
3738 if (accountId == genesisAccountId)
3739 {
3740 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3741 {
3742 if (sleAcct->getFieldU32(sfSequence) == 1)
3743 {
3744 JLOG(m_journal.debug())
3745 << "subAccountHistoryStart, genesis account "
3746 << toBase58(accountId)
3747 << " does not have tx, no need to add AccountHistory job.";
3748 return;
3749 }
3750 }
3751 else
3752 {
3753 UNREACHABLE(
3754 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
3755 "access genesis account");
3756 return;
3757 }
3758 }
3759 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
3760 subInfo.index_->haveHistorical_ = true;
3761
3762 JLOG(m_journal.debug())
3763 << "subAccountHistoryStart, add AccountHistory job: accountId="
3764 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
3765
3766 addAccountHistoryJob(subInfo);
3767}
3768
3771 InfoSub::ref isrListener,
3772 AccountID const& accountId)
3773{
3774 if (!isrListener->insertSubAccountHistory(accountId))
3775 {
3776 JLOG(m_journal.debug())
3777 << "subAccountHistory, already subscribed to account "
3778 << toBase58(accountId);
3779 return rpcINVALID_PARAMS;
3780 }
3781
3784 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
3785 auto simIterator = mSubAccountHistory.find(accountId);
3786 if (simIterator == mSubAccountHistory.end())
3787 {
3789 inner.emplace(isrListener->getSeq(), ahi);
3791 simIterator, std::make_pair(accountId, inner));
3792 }
3793 else
3794 {
3795 simIterator->second.emplace(isrListener->getSeq(), ahi);
3796 }
3797
3798 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
3799 if (ledger)
3800 {
3801 subAccountHistoryStart(ledger, ahi);
3802 }
3803 else
3804 {
3805 // The node does not have validated ledgers, so wait for
3806 // one before start streaming.
3807 // In this case, the subscription is also considered successful.
3808 JLOG(m_journal.debug())
3809 << "subAccountHistory, no validated ledger yet, delay start";
3810 }
3811
3812 return rpcSUCCESS;
3813}
3814
3815void
3817 InfoSub::ref isrListener,
3818 AccountID const& account,
3819 bool historyOnly)
3820{
3821 if (!historyOnly)
3822 isrListener->deleteSubAccountHistory(account);
3823 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
3824}
3825
3826void
3828 std::uint64_t seq,
3829 const AccountID& account,
3830 bool historyOnly)
3831{
3833 auto simIterator = mSubAccountHistory.find(account);
3834 if (simIterator != mSubAccountHistory.end())
3835 {
3836 auto& subInfoMap = simIterator->second;
3837 auto subInfoIter = subInfoMap.find(seq);
3838 if (subInfoIter != subInfoMap.end())
3839 {
3840 subInfoIter->second.index_->stopHistorical_ = true;
3841 }
3842
3843 if (!historyOnly)
3844 {
3845 simIterator->second.erase(seq);
3846 if (simIterator->second.empty())
3847 {
3848 mSubAccountHistory.erase(simIterator);
3849 }
3850 }
3851 JLOG(m_journal.debug())
3852 << "unsubAccountHistory, account " << toBase58(account)
3853 << ", historyOnly = " << (historyOnly ? "true" : "false");
3854 }
3855}
3856
3857bool
3859{
3860 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
3861 listeners->addSubscriber(isrListener);
3862 else
3863 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
3864 return true;
3865}
3866
3867bool
3869{
3870 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
3871 listeners->removeSubscriber(uSeq);
3872
3873 return true;
3874}
3875
3879{
3880 // This code-path is exclusively used when the server is in standalone
3881 // mode via `ledger_accept`
3882 XRPL_ASSERT(
3883 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
3884
3885 if (!m_standalone)
3886 Throw<std::runtime_error>(
3887 "Operation only possible in STANDALONE mode.");
3888
3889 // FIXME Could we improve on this and remove the need for a specialized
3890 // API in Consensus?
3892 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
3893 return m_ledgerMaster.getCurrentLedger()->info().seq;
3894}
3895
3896// <-- bool: true=added, false=already there
3897bool
3899{
3900 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
3901 {
3902 jvResult[jss::ledger_index] = lpClosed->info().seq;
3903 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
3904 jvResult[jss::ledger_time] = Json::Value::UInt(
3905 lpClosed->info().closeTime.time_since_epoch().count());
3906 if (!lpClosed->rules().enabled(featureXRPFees))
3907 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3908 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3909 jvResult[jss::reserve_base] =
3910 lpClosed->fees().accountReserve(0).jsonClipped();
3911 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3912 }
3913
3915 {
3916 jvResult[jss::validated_ledgers] =
3918 }
3919
3921 return mStreamMaps[sLedger]
3922 .emplace(isrListener->getSeq(), isrListener)
3923 .second;
3924}
3925
3926// <-- bool: true=added, false=already there
3927bool
3929{
3932 .emplace(isrListener->getSeq(), isrListener)
3933 .second;
3934}
3935
3936// <-- bool: true=erased, false=was not there
3937bool
3939{
3941 return mStreamMaps[sLedger].erase(uSeq);
3942}
3943
3944// <-- bool: true=erased, false=was not there
3945bool
3947{
3949 return mStreamMaps[sBookChanges].erase(uSeq);
3950}
3951
3952// <-- bool: true=added, false=already there
3953bool
3955{
3957 return mStreamMaps[sManifests]
3958 .emplace(isrListener->getSeq(), isrListener)
3959 .second;
3960}
3961
3962// <-- bool: true=erased, false=was not there
3963bool
3965{
3967 return mStreamMaps[sManifests].erase(uSeq);
3968}
3969
3970// <-- bool: true=added, false=already there
3971bool
3973 InfoSub::ref isrListener,
3974 Json::Value& jvResult,
3975 bool admin)
3976{
3977 uint256 uRandom;
3978
3979 if (m_standalone)
3980 jvResult[jss::stand_alone] = m_standalone;
3981
3982 // CHECKME: is it necessary to provide a random number here?
3983 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
3984
3985 auto const& feeTrack = app_.getFeeTrack();
3986 jvResult[jss::random] = to_string(uRandom);
3987 jvResult[jss::server_status] = strOperatingMode(admin);
3988 jvResult[jss::load_base] = feeTrack.getLoadBase();
3989 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
3990 jvResult[jss::hostid] = getHostId(admin);
3991 jvResult[jss::pubkey_node] =
3993
3995 return mStreamMaps[sServer]
3996 .emplace(isrListener->getSeq(), isrListener)
3997 .second;
3998}
3999
4000// <-- bool: true=erased, false=was not there
4001bool
4003{
4005 return mStreamMaps[sServer].erase(uSeq);
4006}
4007
4008// <-- bool: true=added, false=already there
4009bool
4011{
4014 .emplace(isrListener->getSeq(), isrListener)
4015 .second;
4016}
4017
4018// <-- bool: true=erased, false=was not there
4019bool
4021{
4023 return mStreamMaps[sTransactions].erase(uSeq);
4024}
4025
4026// <-- bool: true=added, false=already there
4027bool
4029{
4032 .emplace(isrListener->getSeq(), isrListener)
4033 .second;
4034}
4035
4036// <-- bool: true=erased, false=was not there
4037bool
4039{
4041 return mStreamMaps[sRTTransactions].erase(uSeq);
4042}
4043
4044// <-- bool: true=added, false=already there
4045bool
4047{
4050 .emplace(isrListener->getSeq(), isrListener)
4051 .second;
4052}
4053
4054void
4056{
4057 accounting_.json(obj);
4058}
4059
4060// <-- bool: true=erased, false=was not there
4061bool
4063{
4065 return mStreamMaps[sValidations].erase(uSeq);
4066}
4067
4068// <-- bool: true=added, false=already there
4069bool
4071{
4073 return mStreamMaps[sPeerStatus]
4074 .emplace(isrListener->getSeq(), isrListener)
4075 .second;
4076}
4077
4078// <-- bool: true=erased, false=was not there
4079bool
4081{
4083 return mStreamMaps[sPeerStatus].erase(uSeq);
4084}
4085
4086// <-- bool: true=added, false=already there
4087bool
4089{
4092 .emplace(isrListener->getSeq(), isrListener)
4093 .second;
4094}
4095
4096// <-- bool: true=erased, false=was not there
4097bool
4099{
4101 return mStreamMaps[sConsensusPhase].erase(uSeq);
4102}
4103
4106{
4108
4109 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4110
4111 if (it != mRpcSubMap.end())
4112 return it->second;
4113
4114 return InfoSub::pointer();
4115}
4116
4119{
4121
4122 mRpcSubMap.emplace(strUrl, rspEntry);
4123
4124 return rspEntry;
4125}
4126
4127bool
4129{
4131 auto pInfo = findRpcSub(strUrl);
4132
4133 if (!pInfo)
4134 return false;
4135
4136 // check to see if any of the stream maps still hold a weak reference to
4137 // this entry before removing
4138 for (SubMapType const& map : mStreamMaps)
4139 {
4140 if (map.find(pInfo->getSeq()) != map.end())
4141 return false;
4142 }
4143 mRpcSubMap.erase(strUrl);
4144 return true;
4145}
4146
4147#ifndef USE_NEW_BOOK_PAGE
4148
4149// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4150// work, but it demonstrated poor performance.
4151//
4152void
4155 Book const& book,
4156 AccountID const& uTakerID,
4157 bool const bProof,
4158 unsigned int iLimit,
4159 Json::Value const& jvMarker,
4160 Json::Value& jvResult)
4161{ // CAUTION: This is the old get book page logic
4162 Json::Value& jvOffers =
4163 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4164
4166 const uint256 uBookBase = getBookBase(book);
4167 const uint256 uBookEnd = getQualityNext(uBookBase);
4168 uint256 uTipIndex = uBookBase;
4169
4170 if (auto stream = m_journal.trace())
4171 {
4172 stream << "getBookPage:" << book;
4173 stream << "getBookPage: uBookBase=" << uBookBase;
4174 stream << "getBookPage: uBookEnd=" << uBookEnd;
4175 stream << "getBookPage: uTipIndex=" << uTipIndex;
4176 }
4177
4178 ReadView const& view = *lpLedger;
4179
4180 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4181 isGlobalFrozen(view, book.in.account);
4182
4183 bool bDone = false;
4184 bool bDirectAdvance = true;
4185
4186 std::shared_ptr<SLE const> sleOfferDir;
4187 uint256 offerIndex;
4188 unsigned int uBookEntry;
4189 STAmount saDirRate;
4190
4191 auto const rate = transferRate(view, book.out.account);
4192 auto viewJ = app_.journal("View");
4193
4194 while (!bDone && iLimit-- > 0)
4195 {
4196 if (bDirectAdvance)
4197 {
4198 bDirectAdvance = false;
4199
4200 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4201
4202 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4203 if (ledgerIndex)
4204 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4205 else
4206 sleOfferDir.reset();
4207
4208 if (!sleOfferDir)
4209 {
4210 JLOG(m_journal.trace()) << "getBookPage: bDone";
4211 bDone = true;
4212 }
4213 else
4214 {
4215 uTipIndex = sleOfferDir->key();
4216 saDirRate = amountFromQuality(getQuality(uTipIndex));
4217
4218 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4219
4220 JLOG(m_journal.trace())
4221 << "getBookPage: uTipIndex=" << uTipIndex;
4222 JLOG(m_journal.trace())
4223 << "getBookPage: offerIndex=" << offerIndex;
4224 }
4225 }
4226
4227 if (!bDone)
4228 {
4229 auto sleOffer = view.read(keylet::offer(offerIndex));
4230
4231 if (sleOffer)
4232 {
4233 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4234 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4235 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4236 STAmount saOwnerFunds;
4237 bool firstOwnerOffer(true);
4238
4239 if (book.out.account == uOfferOwnerID)
4240 {
4241 // If an offer is selling issuer's own IOUs, it is fully
4242 // funded.
4243 saOwnerFunds = saTakerGets;
4244 }
4245 else if (bGlobalFreeze)
4246 {
4247 // If either asset is globally frozen, consider all offers
4248 // that aren't ours to be totally unfunded
4249 saOwnerFunds.clear(book.out);
4250 }
4251 else
4252 {
4253 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4254 if (umBalanceEntry != umBalance.end())
4255 {
4256 // Found in running balance table.
4257
4258 saOwnerFunds = umBalanceEntry->second;
4259 firstOwnerOffer = false;
4260 }
4261 else
4262 {
4263 // Did not find balance in table.
4264
4265 saOwnerFunds = accountHolds(
4266 view,
4267 uOfferOwnerID,
4268 book.out.currency,
4269 book.out.account,
4271 viewJ);
4272
4273 if (saOwnerFunds < beast::zero)
4274 {
4275 // Treat negative funds as zero.
4276
4277 saOwnerFunds.clear();
4278 }
4279 }
4280 }
4281
4282 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4283
4284 STAmount saTakerGetsFunded;
4285 STAmount saOwnerFundsLimit = saOwnerFunds;
4286 Rate offerRate = parityRate;
4287
4288 if (rate != parityRate
4289 // Have a tranfer fee.
4290 && uTakerID != book.out.account
4291 // Not taking offers of own IOUs.
4292 && book.out.account != uOfferOwnerID)
4293 // Offer owner not issuing ownfunds
4294 {
4295 // Need to charge a transfer fee to offer owner.
4296 offerRate = rate;
4297 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4298 }
4299
4300 if (saOwnerFundsLimit >= saTakerGets)
4301 {
4302 // Sufficient funds no shenanigans.
4303 saTakerGetsFunded = saTakerGets;
4304 }
4305 else
4306 {
4307 // Only provide, if not fully funded.
4308
4309 saTakerGetsFunded = saOwnerFundsLimit;
4310
4311 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4312 std::min(
4313 saTakerPays,
4314 multiply(
4315 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4316 .setJson(jvOffer[jss::taker_pays_funded]);
4317 }
4318
4319 STAmount saOwnerPays = (parityRate == offerRate)
4320 ? saTakerGetsFunded
4321 : std::min(
4322 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4323
4324 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4325
4326 // Include all offers funded and unfunded
4327 Json::Value& jvOf = jvOffers.append(jvOffer);
4328 jvOf[jss::quality] = saDirRate.getText();
4329
4330 if (firstOwnerOffer)
4331 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4332 }
4333 else
4334 {
4335 JLOG(m_journal.warn()) << "Missing offer";
4336 }
4337
4338 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4339 {
4340 bDirectAdvance = true;
4341 }
4342 else
4343 {
4344 JLOG(m_journal.trace())
4345 << "getBookPage: offerIndex=" << offerIndex;
4346 }
4347 }
4348 }
4349
4350 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4351 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4352}
4353
4354#else
4355
4356// This is the new code that uses the book iterators
4357// It has temporarily been disabled
4358
4359void
4362 Book const& book,
4363 AccountID const& uTakerID,
4364 bool const bProof,
4365 unsigned int iLimit,
4366 Json::Value const& jvMarker,
4367 Json::Value& jvResult)
4368{
4369 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4370
4372
4373 MetaView lesActive(lpLedger, tapNONE, true);
4374 OrderBookIterator obIterator(lesActive, book);
4375
4376 auto const rate = transferRate(lesActive, book.out.account);
4377
4378 const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4379 lesActive.isGlobalFrozen(book.in.account);
4380
4381 while (iLimit-- > 0 && obIterator.nextOffer())
4382 {
4383 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4384 if (sleOffer)
4385 {
4386 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4387 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4388 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4389 STAmount saDirRate = obIterator.getCurrentRate();
4390 STAmount saOwnerFunds;
4391
4392 if (book.out.account == uOfferOwnerID)
4393 {
4394 // If offer is selling issuer's own IOUs, it is fully funded.
4395 saOwnerFunds = saTakerGets;
4396 }
4397 else if (bGlobalFreeze)
4398 {
4399 // If either asset is globally frozen, consider all offers
4400 // that aren't ours to be totally unfunded
4401 saOwnerFunds.clear(book.out);
4402 }
4403 else
4404 {
4405 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4406
4407 if (umBalanceEntry != umBalance.end())
4408 {
4409 // Found in running balance table.
4410
4411 saOwnerFunds = umBalanceEntry->second;
4412 }
4413 else
4414 {
4415 // Did not find balance in table.
4416
4417 saOwnerFunds = lesActive.accountHolds(
4418 uOfferOwnerID,
4419 book.out.currency,
4420 book.out.account,
4422
4423 if (saOwnerFunds.isNegative())
4424 {
4425 // Treat negative funds as zero.
4426
4427 saOwnerFunds.zero();
4428 }
4429 }
4430 }
4431
4432 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4433
4434 STAmount saTakerGetsFunded;
4435 STAmount saOwnerFundsLimit = saOwnerFunds;
4436 Rate offerRate = parityRate;
4437
4438 if (rate != parityRate
4439 // Have a tranfer fee.
4440 && uTakerID != book.out.account
4441 // Not taking offers of own IOUs.
4442 && book.out.account != uOfferOwnerID)
4443 // Offer owner not issuing ownfunds
4444 {
4445 // Need to charge a transfer fee to offer owner.
4446 offerRate = rate;
4447 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4448 }
4449
4450 if (saOwnerFundsLimit >= saTakerGets)
4451 {
4452 // Sufficient funds no shenanigans.
4453 saTakerGetsFunded = saTakerGets;
4454 }
4455 else
4456 {
4457 // Only provide, if not fully funded.
4458 saTakerGetsFunded = saOwnerFundsLimit;
4459
4460 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4461
4462 // TOOD(tom): The result of this expression is not used - what's
4463 // going on here?
4464 std::min(
4465 saTakerPays,
4466 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4467 .setJson(jvOffer[jss::taker_pays_funded]);
4468 }
4469
4470 STAmount saOwnerPays = (parityRate == offerRate)
4471 ? saTakerGetsFunded
4472 : std::min(
4473 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4474
4475 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4476
4477 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4478 {
4479 // Only provide funded offers and offers of the taker.
4480 Json::Value& jvOf = jvOffers.append(jvOffer);
4481 jvOf[jss::quality] = saDirRate.getText();
4482 }
4483 }
4484 }
4485
4486 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4487 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4488}
4489
4490#endif
4491
4492inline void
4494{
4495 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4496 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4498 counters[static_cast<std::size_t>(mode)].dur += current;
4499
4502 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4503 .dur.count());
4505 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4506 .dur.count());
4508 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4510 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4511 .dur.count());
4513 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4514
4516 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4517 .transitions);
4519 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4520 .transitions);
4522 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4524 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4525 .transitions);
4527 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4528}
4529
4530void
4532{
4533 auto now = std::chrono::steady_clock::now();
4534
4535 std::lock_guard lock(mutex_);
4536 ++counters_[static_cast<std::size_t>(om)].transitions;
4537 if (om == OperatingMode::FULL &&
4538 counters_[static_cast<std::size_t>(om)].transitions == 1)
4539 {
4540 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4541 now - processStart_)
4542 .count();
4543 }
4544 counters_[static_cast<std::size_t>(mode_)].dur +=
4545 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4546
4547 mode_ = om;
4548 start_ = now;
4549}
4550
4551void
4553{
4554 auto [counters, mode, start, initialSync] = getCounterData();
4555 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4557 counters[static_cast<std::size_t>(mode)].dur += current;
4558
4559 obj[jss::state_accounting] = Json::objectValue;
4561 i <= static_cast<std::size_t>(OperatingMode::FULL);
4562 ++i)
4563 {
4564 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4565 auto& state = obj[jss::state_accounting][states_[i]];
4566 state[jss::transitions] = std::to_string(counters[i].transitions);
4567 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4568 }
4569 obj[jss::server_state_duration_us] = std::to_string(current.count());
4570 if (initialSync)
4571 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4572}
4573
4574//------------------------------------------------------------------------------
4575
4578 Application& app,
4580 bool standalone,
4581 std::size_t minPeerCount,
4582 bool startvalid,
4583 JobQueue& job_queue,
4585 ValidatorKeys const& validatorKeys,
4586 boost::asio::io_service& io_svc,
4587 beast::Journal journal,
4588 beast::insight::Collector::ptr const& collector)
4589{
4590 return std::make_unique<NetworkOPsImp>(
4591 app,
4592 clock,
4593 standalone,
4594 minPeerCount,
4595 startvalid,
4596 job_queue,
4598 validatorKeys,
4599 io_svc,
4600 journal,
4601 collector);
4602}
4603
4604} // namespace ripple
T back_inserter(T... args)
T begin(T... args)
RAII class to check if an Item is already being processed on another thread, as indicated by it's pre...
Definition: CanProcess.h:67
Decorator for streaming out compact json.
Definition: json_writer.h:317
Lightweight wrapper to tag static string.
Definition: json_value.h:61
Represents a JSON value.
Definition: json_value.h:147
Json::UInt UInt
Definition: json_value.h:154
Value get(UInt index, const Value &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Definition: json_value.cpp:841
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:891
bool isMember(const char *key) const
Return true if the object has a member named key.
Definition: json_value.cpp:943
A generic endpoint for log messages.
Definition: Journal.h:59
Stream error() const
Definition: Journal.h:335
Stream debug() const
Definition: Journal.h:317
Stream info() const
Definition: Journal.h:323
Stream trace() const
Severity stream access functions.
Definition: Journal.h:311
Stream warn() const
Definition: Journal.h:329
A metric for measuring an integral value.
Definition: Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition: Gauge.h:68
A reference to a handler for performing polled collection.
Definition: Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition: Book.h:34
Issue in
Definition: Book.h:36
Issue out
Definition: Book.h:37
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition: ClusterNode.h:45
std::uint32_t getLoadFee() const
Definition: ClusterNode.h:51
NetClock::time_point getReportTime() const
Definition: ClusterNode.h:57
PublicKey const & identity() const
Definition: ClusterNode.h:63
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:48
std::string SERVER_DOMAIN
Definition: Config.h:286
std::size_t NODE_SIZE
Definition: Config.h:220
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition: Config.h:167
int RELAY_UNTRUSTED_VALIDATIONS
Definition: Config.h:176
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition: InfoSub.h:54
AccountID account
Definition: Issue.h:39
Currency currency
Definition: Issue.h:38
A pool of threads to perform work.
Definition: JobQueue.h:56
Json::Value getJson(int c=0)
Definition: JobQueue.cpp:212
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
Definition: LedgerMaster.h:266
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:80
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
Definition: LoadFeeTrack.h:46
std::uint32_t getClusterFee() const
Definition: LoadFeeTrack.h:83
std::uint32_t getLocalFee() const
Definition: LoadFeeTrack.h:76
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:90
std::uint32_t getRemoteFee() const
Definition: LoadFeeTrack.h:69
std::uint32_t getLoadFactor() const
Definition: LoadFeeTrack.h:96
Manages load sources.
Definition: LoadManager.h:46
void resetDeadlockDetector()
Reset the deadlock detection timer.
Definition: LoadManager.cpp:63
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
Definition: NetworkOPs.cpp:141
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
Definition: NetworkOPs.cpp:151
std::chrono::steady_clock::time_point start_
Definition: NetworkOPs.cpp:153
static std::array< Json::StaticString const, 5 > const states_
Definition: NetworkOPs.cpp:157
std::chrono::steady_clock::time_point const processStart_
Definition: NetworkOPs.cpp:155
Transaction with input flags and results to be applied in batches.
Definition: NetworkOPs.cpp:92
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
Definition: NetworkOPs.cpp:101
std::shared_ptr< Transaction > const transaction
Definition: NetworkOPs.cpp:94
boost::asio::steady_timer accountHistoryTxTimer_
Definition: NetworkOPs.cpp:731
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
Definition: NetworkOPs.cpp:863
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::vector< TransactionStatus > mTransactions
Definition: NetworkOPs.cpp:775
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Definition: NetworkOPs.cpp:721
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
Definition: NetworkOPs.cpp:733
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
Definition: NetworkOPs.cpp:881
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
Definition: NetworkOPs.cpp:729
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
Definition: NetworkOPs.cpp:118
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
Definition: NetworkOPs.cpp:717
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
Definition: NetworkOPs.cpp:262
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Definition: NetworkOPs.cpp:744
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, const bool bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
Definition: NetworkOPs.cpp:730
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
Definition: NetworkOPs.cpp:124
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:224
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
Definition: NetworkOPs.cpp:724
beast::Journal m_journal
Definition: NetworkOPs.cpp:715
SubInfoMapType mSubAccount
Definition: NetworkOPs.cpp:739
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
Definition: NetworkOPs.cpp:779
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
Definition: NetworkOPs.cpp:996
bool unsubValidations(std::uint64_t uListener) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
Definition: NetworkOPs.cpp:728
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
Definition: NetworkOPs.cpp:934
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
Definition: NetworkOPs.cpp:759
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
Definition: NetworkOPs.cpp:769
std::atomic< bool > unlBlocked_
Definition: NetworkOPs.cpp:726
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
Definition: NetworkOPs.cpp:719
std::atomic< bool > needNetworkLedger_
Definition: NetworkOPs.cpp:723
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
Definition: NetworkOPs.cpp:777
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
Definition: NetworkOPs.cpp:893
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void endConsensus() override
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
Definition: NetworkOPs.cpp:737
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
Definition: NetworkOPs.cpp:924
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
Definition: NetworkOPs.cpp:761
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
Definition: NetworkOPs.cpp:875
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
Definition: NetworkOPs.cpp:772
void stop() override
Definition: NetworkOPs.cpp:564
void clearNeedNetworkLedger() override
Definition: NetworkOPs.cpp:887
DispatchState mDispatchState
Definition: NetworkOPs.cpp:774
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
Definition: NetworkOPs.cpp:740
bool checkLastClosedLedger(const Overlay::PeerSequence &, uint256 &networkClosed)
void reportFeeChange() override
void setMode(OperatingMode om, const char *reason) override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
Definition: NetworkOPs.cpp:899
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
Definition: NetworkOPs.cpp:780
Application & app_
Definition: NetworkOPs.cpp:714
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
Definition: NetworkOPs.cpp:735
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
Definition: NetworkOPs.cpp:742
std::atomic< bool > amendmentWarned_
Definition: NetworkOPs.cpp:725
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
Definition: NetworkOPs.cpp:905
bool beginConsensus(uint256 const &networkClosed) override
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition: NetworkOPs.h:87
void getCountsJson(Json::Value &obj)
Definition: Database.cpp:266
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:50
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:56
BookListeners::pointer getBookListeners(Book const &)
BookListeners::pointer makeBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, const AcceptedLedgerTx &alTx, MultiApiJson const &jvObj)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
Definition: RCLConsensus.h:51
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
Definition: RCLConsensus.h:443
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Definition: RCLConsensus.h:456
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
Represents a set of transactions in RCLConsensus.
Definition: RCLCxTx.h:66
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition: RFC1751.cpp:498
A view into a ledger.
Definition: ReadView.h:55
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition: STAmount.cpp:475
std::string getText() const override
Definition: STAmount.cpp:515
Issue const & issue() const
Definition: STAmount.h:487
std::optional< T > get(std::string const &name) const
Definition: BasicConfig.h:141
std::size_t size() const noexcept
Definition: Serializer.h:72
void const * data() const noexcept
Definition: Serializer.h:78
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition: TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition: TxQ.cpp:1777
static time_point now()
Definition: UptimeClock.cpp:63
Validator keys and manifest as set in configuration file.
Definition: ValidatorKeys.h:37
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > getTrustedKey(PublicKey const &identity) const
Returns master public key if public key is trusted.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition: XRPAmount.h:262
Json::Value jsonClipped() const
Definition: XRPAmount.h:218
iterator begin()
Definition: base_uint.h:135
static constexpr std::size_t size()
Definition: base_uint.h:525
bool isZero() const
Definition: base_uint.h:539
bool isNonZero() const
Definition: base_uint.h:544
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition: json_value.h:42
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
int Int
Definition: json_forwards.h:26
unsigned int UInt
Definition: json_forwards.h:27
void rngfill(void *buffer, std::size_t bytes, Generator &g)
Definition: rngfill.h:33
std::string const & getVersionString()
Server version.
Definition: BuildInfo.cpp:65
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition: BookChanges.h:47
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(PreclaimContext const &ctx, AccountID const &src)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition: Indexes.cpp:160
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition: Indexes.cpp:356
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition: Indexes.cpp:250
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:30
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:106
STAmount divide(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:87
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition: STTx.cpp:604
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition: View.cpp:442
@ fhZERO_IF_FROZEN
Definition: View.h:80
@ fhIGNORE_FREEZE
Definition: View.h:80
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition: View.cpp:136
std::uint64_t getQuality(uint256 const &uBase)
Definition: Indexes.cpp:125
@ INCLUDED
Definition: Transaction.h:49
@ OBSOLETE
Definition: Transaction.h:54
@ INVALID
Definition: Transaction.h:48
error_code_i
Definition: ErrorCodes.h:40
@ rpcSUCCESS
Definition: ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition: ErrorCodes.h:84
@ rpcINTERNAL
Definition: ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
Definition: SecretKey.cpp:351
auto constexpr muldiv_max
Definition: mulDiv.h:29
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition: LocalTxs.cpp:197
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition: View.cpp:650
STAmount amountFromQuality(std::uint64_t rate)
Definition: STAmount.cpp:822
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition: ErrorCodes.h:167
@ warnRPC_UNSUPPORTED_MAJORITY
Definition: ErrorCodes.h:165
@ warnRPC_AMENDMENT_BLOCKED
Definition: ErrorCodes.h:166
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition: NetworkOPs.h:66
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:47
bool isTesSuccess(TER x)
Definition: TER.h:656
bool isTerRetry(TER x)
Definition: TER.h:650
AccountID calcAccountID(PublicKey const &pk)
Definition: AccountID.cpp:160
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Definition: csprng.cpp:99
Json::Value rpcError(int iError)
Definition: RPCErr.cpp:29
@ tefPAST_SEQ
Definition: TER.h:175
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
Definition: NetworkOPs.cpp:844
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
bool isTemMalformed(TER x)
Definition: TER.h:638
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition: View.cpp:147
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition: ApiVersion.h:102
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:75
@ tesSUCCESS
Definition: TER.h:242
uint256 getQualityNext(uint256 const &uBase)
Definition: Indexes.cpp:117
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition: View.cpp:308
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition: ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition: chrono.h:93
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:629
FeeSetup setup_FeeVote(Section const &section)
Definition: Config.cpp:1090
Number root(Number f, unsigned d)
Definition: Number.cpp:630
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Definition: mulDiv.cpp:27
ApplyFlags
Definition: ApplyView.h:30
@ tapFAIL_HARD
Definition: ApplyView.h:35
@ tapUNLIMITED
Definition: ApplyView.h:42
@ tapNONE
Definition: ApplyView.h:31
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:37
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition: Seed.cpp:69
@ terQUEUED
Definition: TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition: TER.cpp:236
@ jtNETOP_CLUSTER
Definition: Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition: Job.h:47
@ jtTRANSACTION
Definition: Job.h:62
@ jtTXN_PROC
Definition: Job.h:82
@ jtCLIENT_CONSENSUS
Definition: Job.h:48
@ jtBATCH
Definition: Job.h:65
@ jtCLIENT_ACCT_HIST
Definition: Job.h:49
uint256 getBookBase(Book const &book)
Definition: Indexes.cpp:98
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition: View.cpp:173
static std::uint32_t trunc32(std::uint64_t v)
@ temBAD_SIGNATURE
Definition: TER.h:105
static auto const genesisAccountId
Definition: NetworkOPs.cpp:857
STL namespace.
T ref(T... args)
T reset(T... args)
T set_intersection(T... args)
T str(T... args)
std::string serialized
The manifest in serialized form.
Definition: Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition: Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition: Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition: Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Definition: Manifest.h:86
Server fees published on server subscription.
Definition: NetworkOPs.cpp:201
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
Definition: NetworkOPs.cpp:220
bool operator==(ServerFeeSummary const &b) const
Definition: NetworkOPs.cpp:212
beast::insight::Gauge full_transitions
Definition: NetworkOPs.cpp:831
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:786
beast::insight::Hook hook
Definition: NetworkOPs.cpp:820
beast::insight::Gauge connected_duration
Definition: NetworkOPs.cpp:822
beast::insight::Gauge tracking_duration
Definition: NetworkOPs.cpp:824
beast::insight::Gauge connected_transitions
Definition: NetworkOPs.cpp:828
beast::insight::Gauge disconnected_transitions
Definition: NetworkOPs.cpp:827
beast::insight::Gauge syncing_duration
Definition: NetworkOPs.cpp:823
beast::insight::Gauge tracking_transitions
Definition: NetworkOPs.cpp:830
beast::insight::Gauge full_duration
Definition: NetworkOPs.cpp:825
beast::insight::Gauge disconnected_duration
Definition: NetworkOPs.cpp:821
beast::insight::Gauge syncing_transitions
Definition: NetworkOPs.cpp:829
SubAccountHistoryIndex(AccountID const &accountId)
Definition: NetworkOPs.cpp:678
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:697
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:692
Represents a transfer rate.
Definition: Rate.h:38
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition: TxQ.h:162
void set(const char *key, auto const &v)
Definition: MultiApiJson.h:83
IsMemberResult isMember(const char *key) const
Definition: MultiApiJson.h:94
Select all peers (except optional excluded) that are in our cluster.
Definition: predicates.h:137
Sends a message to all peers.
Definition: predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)