rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/misc/AmendmentTable.h>
32#include <xrpld/app/misc/DeliverMax.h>
33#include <xrpld/app/misc/HashRouter.h>
34#include <xrpld/app/misc/LoadFeeTrack.h>
35#include <xrpld/app/misc/NetworkOPs.h>
36#include <xrpld/app/misc/Transaction.h>
37#include <xrpld/app/misc/TxQ.h>
38#include <xrpld/app/misc/ValidatorKeys.h>
39#include <xrpld/app/misc/ValidatorList.h>
40#include <xrpld/app/misc/detail/AccountTxPaging.h>
41#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
42#include <xrpld/app/tx/apply.h>
43#include <xrpld/consensus/Consensus.h>
44#include <xrpld/consensus/ConsensusParms.h>
45#include <xrpld/overlay/Cluster.h>
46#include <xrpld/overlay/Overlay.h>
47#include <xrpld/overlay/predicates.h>
48#include <xrpld/perflog/PerfLog.h>
49#include <xrpld/rpc/BookChanges.h>
50#include <xrpld/rpc/DeliveredAmount.h>
51#include <xrpld/rpc/MPTokenIssuanceID.h>
52#include <xrpld/rpc/ServerHandler.h>
53#include <xrpl/basics/UptimeClock.h>
54#include <xrpl/basics/mulDiv.h>
55#include <xrpl/basics/safe_cast.h>
56#include <xrpl/basics/scope.h>
57#include <xrpl/beast/rfc2616.h>
58#include <xrpl/beast/utility/rngfill.h>
59#include <xrpl/crypto/RFC1751.h>
60#include <xrpl/crypto/csprng.h>
61#include <xrpl/json/to_string.h>
62#include <xrpl/protocol/BuildInfo.h>
63#include <xrpl/protocol/Feature.h>
64#include <xrpl/protocol/MultiApiJson.h>
65#include <xrpl/protocol/RPCErr.h>
66#include <xrpl/protocol/STParsedJSON.h>
67#include <xrpl/protocol/jss.h>
68#include <xrpl/resource/Fees.h>
69#include <xrpl/resource/ResourceManager.h>
70#include <boost/asio/ip/host_name.hpp>
71#include <boost/asio/steady_timer.hpp>
72
73#include <algorithm>
74#include <exception>
75#include <mutex>
76#include <optional>
77#include <set>
78#include <string>
79#include <tuple>
80#include <unordered_map>
81#include <utility>
82
83namespace ripple {
84
85class NetworkOPsImp final : public NetworkOPs
86{
92 {
93 public:
95 bool const admin;
96 bool const local;
98 bool applied = false;
100
103 bool a,
104 bool l,
105 FailHard f)
106 : transaction(t), admin(a), local(l), failType(f)
107 {
108 XRPL_ASSERT(
110 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
111 "valid inputs");
112 }
113 };
114
118 enum class DispatchState : unsigned char {
119 none,
120 scheduled,
121 running,
122 };
123
125
141 {
142 struct Counters
143 {
144 explicit Counters() = default;
145
148 };
149
153 std::chrono::steady_clock::time_point start_ =
155 std::chrono::steady_clock::time_point const processStart_ = start_;
158
159 public:
161 {
163 .transitions = 1;
164 }
165
172 void
174
180 void
181 json(Json::Value& obj) const;
182
184 {
186 decltype(mode_) mode;
187 decltype(start_) start;
189 };
190
193 {
196 }
197 };
198
201 {
202 ServerFeeSummary() = default;
203
205 XRPAmount fee,
206 TxQ::Metrics&& escalationMetrics,
207 LoadFeeTrack const& loadFeeTrack);
208 bool
209 operator!=(ServerFeeSummary const& b) const;
210
211 bool
213 {
214 return !(*this != b);
215 }
216
221 };
222
223public:
225 Application& app,
227 bool standalone,
228 std::size_t minPeerCount,
229 bool start_valid,
230 JobQueue& job_queue,
232 ValidatorKeys const& validatorKeys,
233 boost::asio::io_service& io_svc,
234 beast::Journal journal,
235 beast::insight::Collector::ptr const& collector)
236 : app_(app)
237 , m_journal(journal)
240 , heartbeatTimer_(io_svc)
241 , clusterTimer_(io_svc)
242 , accountHistoryTxTimer_(io_svc)
243 , mConsensus(
244 app,
246 setup_FeeVote(app_.config().section("voting")),
247 app_.logs().journal("FeeVote")),
249 *m_localTX,
250 app.getInboundTransactions(),
251 beast::get_abstract_clock<std::chrono::steady_clock>(),
252 validatorKeys,
253 app_.logs().journal("LedgerConsensus"))
255 , m_job_queue(job_queue)
256 , m_standalone(standalone)
257 , minPeerCount_(start_valid ? 0 : minPeerCount)
258 , m_stats(std::bind(&NetworkOPsImp::collect_metrics, this), collector)
259 {
260 }
261
262 ~NetworkOPsImp() override
263 {
264 // This clear() is necessary to ensure the shared_ptrs in this map get
265 // destroyed NOW because the objects in this map invoke methods on this
266 // class when they are destroyed
268 }
269
270public:
272 getOperatingMode() const override;
273
275 strOperatingMode(OperatingMode const mode, bool const admin) const override;
276
278 strOperatingMode(bool const admin = false) const override;
279
280 //
281 // Transaction operations.
282 //
283
284 // Must complete immediately.
285 void
287
288 void
290 std::shared_ptr<Transaction>& transaction,
291 bool bUnlimited,
292 bool bLocal,
293 FailHard failType) override;
294
303 void
306 bool bUnlimited,
307 FailHard failType);
308
318 void
321 bool bUnlimited,
322 FailHard failtype);
323
327 void
329
335 void
337
338 //
339 // Owner functions.
340 //
341
345 AccountID const& account) override;
346
347 //
348 // Book functions.
349 //
350
351 void
354 Book const&,
355 AccountID const& uTakerID,
356 const bool bProof,
357 unsigned int iLimit,
358 Json::Value const& jvMarker,
359 Json::Value& jvResult) override;
360
361 // Ledger proposal/close functions.
362 bool
364
365 bool
368 std::string const& source) override;
369
370 void
371 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
372
373 // Network state machine.
374
375 // Used for the "jump" case.
376private:
377 void
379 bool
381
382public:
383 bool
384 beginConsensus(uint256 const& networkClosed) override;
385 void
386 endConsensus() override;
387 void
388 setStandAlone() override;
389
393 void
394 setStateTimer() override;
395
396 void
397 setNeedNetworkLedger() override;
398 void
399 clearNeedNetworkLedger() override;
400 bool
401 isNeedNetworkLedger() override;
402 bool
403 isFull() override;
404
405 void
406 setMode(OperatingMode om) override;
407
408 bool
409 isBlocked() override;
410 bool
411 isAmendmentBlocked() override;
412 void
413 setAmendmentBlocked() override;
414 bool
415 isAmendmentWarned() override;
416 void
417 setAmendmentWarned() override;
418 void
419 clearAmendmentWarned() override;
420 bool
421 isUNLBlocked() override;
422 void
423 setUNLBlocked() override;
424 void
425 clearUNLBlocked() override;
426 void
427 consensusViewChange() override;
428
430 getConsensusInfo() override;
432 getServerInfo(bool human, bool admin, bool counters) override;
433 void
434 clearLedgerFetch() override;
436 getLedgerFetchInfo() override;
439 std::optional<std::chrono::milliseconds> consensusDelay) override;
440 void
441 reportFeeChange() override;
442 void
444
445 void
446 updateLocalTx(ReadView const& view) override;
448 getLocalTxCount() override;
449
450 //
451 // Monitoring: publisher side.
452 //
453 void
454 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
455 void
458 std::shared_ptr<STTx const> const& transaction,
459 TER result) override;
460 void
461 pubValidation(std::shared_ptr<STValidation> const& val) override;
462
463 //--------------------------------------------------------------------------
464 //
465 // InfoSub::Source.
466 //
467 void
469 InfoSub::ref ispListener,
470 hash_set<AccountID> const& vnaAccountIDs,
471 bool rt) override;
472 void
474 InfoSub::ref ispListener,
475 hash_set<AccountID> const& vnaAccountIDs,
476 bool rt) override;
477
478 // Just remove the subscription from the tracking
479 // not from the InfoSub. Needed for InfoSub destruction
480 void
482 std::uint64_t seq,
483 hash_set<AccountID> const& vnaAccountIDs,
484 bool rt) override;
485
487 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
488 override;
489 void
491 InfoSub::ref ispListener,
492 AccountID const& account,
493 bool historyOnly) override;
494
495 void
497 std::uint64_t seq,
498 AccountID const& account,
499 bool historyOnly) override;
500
501 bool
502 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
503 bool
504 unsubLedger(std::uint64_t uListener) override;
505
506 bool
507 subBookChanges(InfoSub::ref ispListener) override;
508 bool
509 unsubBookChanges(std::uint64_t uListener) override;
510
511 bool
512 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
513 override;
514 bool
515 unsubServer(std::uint64_t uListener) override;
516
517 bool
518 subBook(InfoSub::ref ispListener, Book const&) override;
519 bool
520 unsubBook(std::uint64_t uListener, Book const&) override;
521
522 bool
523 subManifests(InfoSub::ref ispListener) override;
524 bool
525 unsubManifests(std::uint64_t uListener) override;
526 void
527 pubManifest(Manifest const&) override;
528
529 bool
530 subTransactions(InfoSub::ref ispListener) override;
531 bool
532 unsubTransactions(std::uint64_t uListener) override;
533
534 bool
535 subRTTransactions(InfoSub::ref ispListener) override;
536 bool
537 unsubRTTransactions(std::uint64_t uListener) override;
538
539 bool
540 subValidations(InfoSub::ref ispListener) override;
541 bool
542 unsubValidations(std::uint64_t uListener) override;
543
544 bool
545 subPeerStatus(InfoSub::ref ispListener) override;
546 bool
547 unsubPeerStatus(std::uint64_t uListener) override;
548 void
549 pubPeerStatus(std::function<Json::Value(void)> const&) override;
550
551 bool
552 subConsensus(InfoSub::ref ispListener) override;
553 bool
554 unsubConsensus(std::uint64_t uListener) override;
555
557 findRpcSub(std::string const& strUrl) override;
559 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
560 bool
561 tryRemoveRpcSub(std::string const& strUrl) override;
562
563 void
564 stop() override
565 {
566 {
567 boost::system::error_code ec;
568 heartbeatTimer_.cancel(ec);
569 if (ec)
570 {
571 JLOG(m_journal.error())
572 << "NetworkOPs: heartbeatTimer cancel error: "
573 << ec.message();
574 }
575
576 ec.clear();
577 clusterTimer_.cancel(ec);
578 if (ec)
579 {
580 JLOG(m_journal.error())
581 << "NetworkOPs: clusterTimer cancel error: "
582 << ec.message();
583 }
584
585 ec.clear();
586 accountHistoryTxTimer_.cancel(ec);
587 if (ec)
588 {
589 JLOG(m_journal.error())
590 << "NetworkOPs: accountHistoryTxTimer cancel error: "
591 << ec.message();
592 }
593 }
594 // Make sure that any waitHandlers pending in our timers are done.
595 using namespace std::chrono_literals;
596 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
597 }
598
599 void
600 stateAccounting(Json::Value& obj) override;
601
602private:
603 void
604 setTimer(
605 boost::asio::steady_timer& timer,
606 std::chrono::milliseconds const& expiry_time,
607 std::function<void()> onExpire,
608 std::function<void()> onError);
609 void
611 void
613 void
615 void
617
619 transJson(
620 std::shared_ptr<STTx const> const& transaction,
621 TER result,
622 bool validated,
625
626 void
629 AcceptedLedgerTx const& transaction,
630 bool last);
631
632 void
635 AcceptedLedgerTx const& transaction,
636 bool last);
637
638 void
641 std::shared_ptr<STTx const> const& transaction,
642 TER result);
643
644 void
645 pubServer();
646 void
648
650 getHostId(bool forAdmin);
651
652private:
656
657 /*
658 * With a validated ledger to separate history and future, the node
659 * streams historical txns with negative indexes starting from -1,
660 * and streams future txns starting from index 0.
661 * The SubAccountHistoryIndex struct maintains these indexes.
662 * It also has a flag stopHistorical_ for stopping streaming
663 * the historical txns.
664 */
666 {
668 // forward
670 // separate backward and forward
672 // history, backward
677
679 : accountId_(accountId)
680 , forwardTxIndex_(0)
683 , historyTxIndex_(-1)
684 , haveHistorical_(false)
685 , stopHistorical_(false)
686 {
687 }
688 };
690 {
693 };
695 {
698 };
701
705 void
709 void
711 void
713
716
718
720
722
727
729 boost::asio::steady_timer heartbeatTimer_;
730 boost::asio::steady_timer clusterTimer_;
731 boost::asio::steady_timer accountHistoryTxTimer_;
732
734
736
738
741
743
745
746 enum SubTypes {
747 sLedger, // Accepted ledgers.
748 sManifests, // Received validator manifests.
749 sServer, // When server changes connectivity state.
750 sTransactions, // All accepted transactions.
751 sRTTransactions, // All proposed and accepted transactions.
752 sValidations, // Received validations.
753 sPeerStatus, // Peer status changes.
754 sConsensusPhase, // Consensus phase
755 sBookChanges, // Per-ledger order book changes
756 sLastEntry // Any new entry must be ADDED ABOVE this one
757 };
758
760
762
764
765 // Whether we are in standalone mode.
766 bool const m_standalone;
767
768 // The number of nodes that we need to consider ourselves connected.
770
771 // Transaction batching.
776
778
781
782private:
783 struct Stats
784 {
785 template <class Handler>
787 Handler const& handler,
788 beast::insight::Collector::ptr const& collector)
789 : hook(collector->make_hook(handler))
790 , disconnected_duration(collector->make_gauge(
791 "State_Accounting",
792 "Disconnected_duration"))
793 , connected_duration(collector->make_gauge(
794 "State_Accounting",
795 "Connected_duration"))
797 collector->make_gauge("State_Accounting", "Syncing_duration"))
798 , tracking_duration(collector->make_gauge(
799 "State_Accounting",
800 "Tracking_duration"))
802 collector->make_gauge("State_Accounting", "Full_duration"))
803 , disconnected_transitions(collector->make_gauge(
804 "State_Accounting",
805 "Disconnected_transitions"))
806 , connected_transitions(collector->make_gauge(
807 "State_Accounting",
808 "Connected_transitions"))
809 , syncing_transitions(collector->make_gauge(
810 "State_Accounting",
811 "Syncing_transitions"))
812 , tracking_transitions(collector->make_gauge(
813 "State_Accounting",
814 "Tracking_transitions"))
816 collector->make_gauge("State_Accounting", "Full_transitions"))
817 {
818 }
819
826
832 };
833
834 std::mutex m_statsMutex; // Mutex to lock m_stats
836
837private:
838 void
840};
841
842//------------------------------------------------------------------------------
843
845 {"disconnected", "connected", "syncing", "tracking", "full"}};
846
848
856
857static auto const genesisAccountId = calcAccountID(
859 .first);
860
861//------------------------------------------------------------------------------
862inline OperatingMode
864{
865 return mMode;
866}
867
868inline std::string
869NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
870{
871 return strOperatingMode(mMode, admin);
872}
873
874inline void
876{
878}
879
880inline void
882{
883 needNetworkLedger_ = true;
884}
885
886inline void
888{
889 needNetworkLedger_ = false;
890}
891
892inline bool
894{
895 return needNetworkLedger_;
896}
897
898inline bool
900{
902}
903
906{
907 static std::string const hostname = boost::asio::ip::host_name();
908
909 if (forAdmin)
910 return hostname;
911
912 // For non-admin uses hash the node public key into a
913 // single RFC1751 word:
914 static std::string const shroudedHostId = [this]() {
915 auto const& id = app_.nodeIdentity();
916
917 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
918 }();
919
920 return shroudedHostId;
921}
922
923void
925{
927
928 // Only do this work if a cluster is configured
929 if (app_.cluster().size() != 0)
931}
932
933void
935 boost::asio::steady_timer& timer,
936 const std::chrono::milliseconds& expiry_time,
937 std::function<void()> onExpire,
938 std::function<void()> onError)
939{
940 // Only start the timer if waitHandlerCounter_ is not yet joined.
941 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
942 [this, onExpire, onError](boost::system::error_code const& e) {
943 if ((e.value() == boost::system::errc::success) &&
944 (!m_job_queue.isStopped()))
945 {
946 onExpire();
947 }
948 // Recover as best we can if an unexpected error occurs.
949 if (e.value() != boost::system::errc::success &&
950 e.value() != boost::asio::error::operation_aborted)
951 {
952 // Try again later and hope for the best.
953 JLOG(m_journal.error())
954 << "Timer got error '" << e.message()
955 << "'. Restarting timer.";
956 onError();
957 }
958 }))
959 {
960 timer.expires_from_now(expiry_time);
961 timer.async_wait(std::move(*optionalCountedHandler));
962 }
963}
964
965void
966NetworkOPsImp::setHeartbeatTimer()
967{
968 setTimer(
969 heartbeatTimer_,
970 mConsensus.parms().ledgerGRANULARITY,
971 [this]() {
972 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
973 processHeartbeatTimer();
974 });
975 },
976 [this]() { setHeartbeatTimer(); });
977}
978
979void
980NetworkOPsImp::setClusterTimer()
981{
982 using namespace std::chrono_literals;
983
984 setTimer(
985 clusterTimer_,
986 10s,
987 [this]() {
988 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
989 processClusterTimer();
990 });
991 },
992 [this]() { setClusterTimer(); });
993}
994
995void
996NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
997{
998 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
999 << toBase58(subInfo.index_->accountId_);
1000 using namespace std::chrono_literals;
1001 setTimer(
1002 accountHistoryTxTimer_,
1003 4s,
1004 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1005 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1006}
1007
1008void
1009NetworkOPsImp::processHeartbeatTimer()
1010{
1011 {
1012 std::unique_lock lock{app_.getMasterMutex()};
1013
1014 // VFALCO NOTE This is for diagnosing a crash on exit
1015 LoadManager& mgr(app_.getLoadManager());
1017
1018 std::size_t const numPeers = app_.overlay().size();
1019
1020 // do we have sufficient peers? If not, we are disconnected.
1021 if (numPeers < minPeerCount_)
1022 {
1023 if (mMode != OperatingMode::DISCONNECTED)
1024 {
1025 setMode(OperatingMode::DISCONNECTED);
1026 JLOG(m_journal.warn())
1027 << "Node count (" << numPeers << ") has fallen "
1028 << "below required minimum (" << minPeerCount_ << ").";
1029 }
1030
1031 // MasterMutex lock need not be held to call setHeartbeatTimer()
1032 lock.unlock();
1033 // We do not call mConsensus.timerEntry until there are enough
1034 // peers providing meaningful inputs to consensus
1035 setHeartbeatTimer();
1036 return;
1037 }
1038
1039 if (mMode == OperatingMode::DISCONNECTED)
1040 {
1041 setMode(OperatingMode::CONNECTED);
1042 JLOG(m_journal.info())
1043 << "Node count (" << numPeers << ") is sufficient.";
1044 }
1045
1046 // Check if the last validated ledger forces a change between these
1047 // states.
1048 if (mMode == OperatingMode::SYNCING)
1049 setMode(OperatingMode::SYNCING);
1050 else if (mMode == OperatingMode::CONNECTED)
1051 setMode(OperatingMode::CONNECTED);
1052 }
1053
1054 mConsensus.timerEntry(app_.timeKeeper().closeTime());
1055
1056 const ConsensusPhase currPhase = mConsensus.phase();
1057 if (mLastConsensusPhase != currPhase)
1058 {
1059 reportConsensusStateChange(currPhase);
1060 mLastConsensusPhase = currPhase;
1061 }
1062
1063 setHeartbeatTimer();
1064}
1065
1066void
1067NetworkOPsImp::processClusterTimer()
1068{
1069 if (app_.cluster().size() == 0)
1070 return;
1071
1072 using namespace std::chrono_literals;
1073
1074 bool const update = app_.cluster().update(
1075 app_.nodeIdentity().first,
1076 "",
1077 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1078 ? app_.getFeeTrack().getLocalFee()
1079 : 0,
1080 app_.timeKeeper().now());
1081
1082 if (!update)
1083 {
1084 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1085 setClusterTimer();
1086 return;
1087 }
1088
1089 protocol::TMCluster cluster;
1090 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1091 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1092 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1093 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1094 n.set_nodeload(node.getLoadFee());
1095 if (!node.name().empty())
1096 n.set_nodename(node.name());
1097 });
1098
1099 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1100 for (auto& item : gossip.items)
1101 {
1102 protocol::TMLoadSource& node = *cluster.add_loadsources();
1103 node.set_name(to_string(item.address));
1104 node.set_cost(item.balance);
1105 }
1106 app_.overlay().foreach(send_if(
1107 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1108 peer_in_cluster()));
1109 setClusterTimer();
1110}
1111
1112//------------------------------------------------------------------------------
1113
1115NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1116 const
1117{
1118 if (mode == OperatingMode::FULL && admin)
1119 {
1120 auto const consensusMode = mConsensus.mode();
1121 if (consensusMode != ConsensusMode::wrongLedger)
1122 {
1123 if (consensusMode == ConsensusMode::proposing)
1124 return "proposing";
1125
1126 if (mConsensus.validating())
1127 return "validating";
1128 }
1129 }
1130
1131 return states_[static_cast<std::size_t>(mode)];
1132}
1133
1134void
1135NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1136{
1137 if (isNeedNetworkLedger())
1138 {
1139 // Nothing we can do if we've never been in sync
1140 return;
1141 }
1142
1143 // this is an asynchronous interface
1144 auto const trans = sterilize(*iTrans);
1145
1146 auto const txid = trans->getTransactionID();
1147 auto const flags = app_.getHashRouter().getFlags(txid);
1148
1149 if ((flags & SF_BAD) != 0)
1150 {
1151 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1152 return;
1153 }
1154
1155 try
1156 {
1157 auto const [validity, reason] = checkValidity(
1158 app_.getHashRouter(),
1159 *trans,
1160 m_ledgerMaster.getValidatedRules(),
1161 app_.config());
1162
1163 if (validity != Validity::Valid)
1164 {
1165 JLOG(m_journal.warn())
1166 << "Submitted transaction invalid: " << reason;
1167 return;
1168 }
1169 }
1170 catch (std::exception const& ex)
1171 {
1172 JLOG(m_journal.warn())
1173 << "Exception checking transaction " << txid << ": " << ex.what();
1174
1175 return;
1176 }
1177
1178 std::string reason;
1179
1180 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1181
1182 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1183 auto t = tx;
1184 processTransaction(t, false, false, FailHard::no);
1185 });
1186}
1187
1188void
1189NetworkOPsImp::processTransaction(
1190 std::shared_ptr<Transaction>& transaction,
1191 bool bUnlimited,
1192 bool bLocal,
1193 FailHard failType)
1194{
1195 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1196 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1197
1198 if ((newFlags & SF_BAD) != 0)
1199 {
1200 // cached bad
1201 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1202 transaction->setStatus(INVALID);
1203 transaction->setResult(temBAD_SIGNATURE);
1204 return;
1205 }
1206
1207 // NOTE eahennis - I think this check is redundant,
1208 // but I'm not 100% sure yet.
1209 // If so, only cost is looking up HashRouter flags.
1210 auto const view = m_ledgerMaster.getCurrentLedger();
1211 auto const [validity, reason] = checkValidity(
1212 app_.getHashRouter(),
1213 *transaction->getSTransaction(),
1214 view->rules(),
1215 app_.config());
1216 XRPL_ASSERT(
1217 validity == Validity::Valid,
1218 "ripple::NetworkOPsImp::processTransaction : valid validity");
1219
1220 // Not concerned with local checks at this point.
1221 if (validity == Validity::SigBad)
1222 {
1223 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1224 transaction->setStatus(INVALID);
1225 transaction->setResult(temBAD_SIGNATURE);
1226 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1227 return;
1228 }
1229
1230 // canonicalize can change our pointer
1231 app_.getMasterTransaction().canonicalize(&transaction);
1232
1233 if (bLocal)
1234 doTransactionSync(transaction, bUnlimited, failType);
1235 else
1236 doTransactionAsync(transaction, bUnlimited, failType);
1237}
1238
1239void
1240NetworkOPsImp::doTransactionAsync(
1241 std::shared_ptr<Transaction> transaction,
1242 bool bUnlimited,
1243 FailHard failType)
1244{
1245 std::lock_guard lock(mMutex);
1246
1247 if (transaction->getApplying())
1248 return;
1249
1250 mTransactions.push_back(
1251 TransactionStatus(transaction, bUnlimited, false, failType));
1252 transaction->setApplying();
1253
1254 if (mDispatchState == DispatchState::none)
1255 {
1256 if (m_job_queue.addJob(
1257 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1258 {
1259 mDispatchState = DispatchState::scheduled;
1260 }
1261 }
1262}
1263
1264void
1265NetworkOPsImp::doTransactionSync(
1266 std::shared_ptr<Transaction> transaction,
1267 bool bUnlimited,
1268 FailHard failType)
1269{
1270 std::unique_lock<std::mutex> lock(mMutex);
1271
1272 if (!transaction->getApplying())
1273 {
1274 mTransactions.push_back(
1275 TransactionStatus(transaction, bUnlimited, true, failType));
1276 transaction->setApplying();
1277 }
1278
1279 do
1280 {
1281 if (mDispatchState == DispatchState::running)
1282 {
1283 // A batch processing job is already running, so wait.
1284 mCond.wait(lock);
1285 }
1286 else
1287 {
1288 apply(lock);
1289
1290 if (mTransactions.size())
1291 {
1292 // More transactions need to be applied, but by another job.
1293 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1294 transactionBatch();
1295 }))
1296 {
1297 mDispatchState = DispatchState::scheduled;
1298 }
1299 }
1300 }
1301 } while (transaction->getApplying());
1302}
1303
1304void
1305NetworkOPsImp::transactionBatch()
1306{
1307 std::unique_lock<std::mutex> lock(mMutex);
1308
1309 if (mDispatchState == DispatchState::running)
1310 return;
1311
1312 while (mTransactions.size())
1313 {
1314 apply(lock);
1315 }
1316}
1317
1318void
1319NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1320{
1322 std::vector<TransactionStatus> transactions;
1323 mTransactions.swap(transactions);
1324 XRPL_ASSERT(
1325 !transactions.empty(),
1326 "ripple::NetworkOPsImp::apply : non-empty transactions");
1327 XRPL_ASSERT(
1328 mDispatchState != DispatchState::running,
1329 "ripple::NetworkOPsImp::apply : is not running");
1330
1331 mDispatchState = DispatchState::running;
1332
1333 batchLock.unlock();
1334
1335 {
1336 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1337 bool changed = false;
1338 {
1339 std::unique_lock ledgerLock{
1340 m_ledgerMaster.peekMutex(), std::defer_lock};
1341 std::lock(masterLock, ledgerLock);
1342
1343 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1344 for (TransactionStatus& e : transactions)
1345 {
1346 // we check before adding to the batch
1347 ApplyFlags flags = tapNONE;
1348 if (e.admin)
1349 flags |= tapUNLIMITED;
1350
1351 if (e.failType == FailHard::yes)
1352 flags |= tapFAIL_HARD;
1353
1354 auto const result = app_.getTxQ().apply(
1355 app_, view, e.transaction->getSTransaction(), flags, j);
1356 e.result = result.ter;
1357 e.applied = result.applied;
1358 changed = changed || result.applied;
1359 }
1360 return changed;
1361 });
1362 }
1363 if (changed)
1364 reportFeeChange();
1365
1366 std::optional<LedgerIndex> validatedLedgerIndex;
1367 if (auto const l = m_ledgerMaster.getValidatedLedger())
1368 validatedLedgerIndex = l->info().seq;
1369
1370 auto newOL = app_.openLedger().current();
1371 for (TransactionStatus& e : transactions)
1372 {
1373 e.transaction->clearSubmitResult();
1374
1375 if (e.applied)
1376 {
1377 pubProposedTransaction(
1378 newOL, e.transaction->getSTransaction(), e.result);
1379 e.transaction->setApplied();
1380 }
1381
1382 e.transaction->setResult(e.result);
1383
1384 if (isTemMalformed(e.result))
1385 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1386
1387#ifdef DEBUG
1388 if (e.result != tesSUCCESS)
1389 {
1390 std::string token, human;
1391
1392 if (transResultInfo(e.result, token, human))
1393 {
1394 JLOG(m_journal.info())
1395 << "TransactionResult: " << token << ": " << human;
1396 }
1397 }
1398#endif
1399
1400 bool addLocal = e.local;
1401
1402 if (e.result == tesSUCCESS)
1403 {
1404 JLOG(m_journal.debug())
1405 << "Transaction is now included in open ledger";
1406 e.transaction->setStatus(INCLUDED);
1407
1408 auto const& txCur = e.transaction->getSTransaction();
1409 auto const txNext = m_ledgerMaster.popAcctTransaction(txCur);
1410 if (txNext)
1411 {
1412 std::string reason;
1413 auto const trans = sterilize(*txNext);
1414 auto t = std::make_shared<Transaction>(trans, reason, app_);
1415 submit_held.emplace_back(t, false, false, FailHard::no);
1416 t->setApplying();
1417 }
1418 }
1419 else if (e.result == tefPAST_SEQ)
1420 {
1421 // duplicate or conflict
1422 JLOG(m_journal.info()) << "Transaction is obsolete";
1423 e.transaction->setStatus(OBSOLETE);
1424 }
1425 else if (e.result == terQUEUED)
1426 {
1427 JLOG(m_journal.debug())
1428 << "Transaction is likely to claim a"
1429 << " fee, but is queued until fee drops";
1430
1431 e.transaction->setStatus(HELD);
1432 // Add to held transactions, because it could get
1433 // kicked out of the queue, and this will try to
1434 // put it back.
1435 m_ledgerMaster.addHeldTransaction(e.transaction);
1436 e.transaction->setQueued();
1437 e.transaction->setKept();
1438 }
1439 else if (isTerRetry(e.result))
1440 {
1441 if (e.failType != FailHard::yes)
1442 {
1443 // transaction should be held
1444 JLOG(m_journal.debug())
1445 << "Transaction should be held: " << e.result;
1446 e.transaction->setStatus(HELD);
1447 m_ledgerMaster.addHeldTransaction(e.transaction);
1448 e.transaction->setKept();
1449 }
1450 }
1451 else
1452 {
1453 JLOG(m_journal.debug())
1454 << "Status other than success " << e.result;
1455 e.transaction->setStatus(INVALID);
1456 }
1457
1458 auto const enforceFailHard =
1459 e.failType == FailHard::yes && !isTesSuccess(e.result);
1460
1461 if (addLocal && !enforceFailHard)
1462 {
1463 m_localTX->push_back(
1464 m_ledgerMaster.getCurrentLedgerIndex(),
1465 e.transaction->getSTransaction());
1466 e.transaction->setKept();
1467 }
1468
1469 if ((e.applied ||
1470 ((mMode != OperatingMode::FULL) &&
1471 (e.failType != FailHard::yes) && e.local) ||
1472 (e.result == terQUEUED)) &&
1473 !enforceFailHard)
1474 {
1475 auto const toSkip =
1476 app_.getHashRouter().shouldRelay(e.transaction->getID());
1477
1478 if (toSkip)
1479 {
1480 protocol::TMTransaction tx;
1481 Serializer s;
1482
1483 e.transaction->getSTransaction()->add(s);
1484 tx.set_rawtransaction(s.data(), s.size());
1485 tx.set_status(protocol::tsCURRENT);
1486 tx.set_receivetimestamp(
1487 app_.timeKeeper().now().time_since_epoch().count());
1488 tx.set_deferred(e.result == terQUEUED);
1489 // FIXME: This should be when we received it
1490 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1491 e.transaction->setBroadcast();
1492 }
1493 }
1494
1495 if (validatedLedgerIndex)
1496 {
1497 auto [fee, accountSeq, availableSeq] =
1498 app_.getTxQ().getTxRequiredFeeAndSeq(
1499 *newOL, e.transaction->getSTransaction());
1500 e.transaction->setCurrentLedgerState(
1501 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1502 }
1503 }
1504 }
1505
1506 batchLock.lock();
1507
1508 for (TransactionStatus& e : transactions)
1509 e.transaction->clearApplying();
1510
1511 if (!submit_held.empty())
1512 {
1513 if (mTransactions.empty())
1514 mTransactions.swap(submit_held);
1515 else
1516 for (auto& e : submit_held)
1517 mTransactions.push_back(std::move(e));
1518 }
1519
1520 mCond.notify_all();
1521
1522 mDispatchState = DispatchState::none;
1523}
1524
1525//
1526// Owner functions
1527//
1528
1530NetworkOPsImp::getOwnerInfo(
1532 AccountID const& account)
1533{
1534 Json::Value jvObjects(Json::objectValue);
1535 auto root = keylet::ownerDir(account);
1536 auto sleNode = lpLedger->read(keylet::page(root));
1537 if (sleNode)
1538 {
1539 std::uint64_t uNodeDir;
1540
1541 do
1542 {
1543 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1544 {
1545 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1546 XRPL_ASSERT(
1547 sleCur,
1548 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1549
1550 switch (sleCur->getType())
1551 {
1552 case ltOFFER:
1553 if (!jvObjects.isMember(jss::offers))
1554 jvObjects[jss::offers] =
1556
1557 jvObjects[jss::offers].append(
1558 sleCur->getJson(JsonOptions::none));
1559 break;
1560
1561 case ltRIPPLE_STATE:
1562 if (!jvObjects.isMember(jss::ripple_lines))
1563 {
1564 jvObjects[jss::ripple_lines] =
1566 }
1567
1568 jvObjects[jss::ripple_lines].append(
1569 sleCur->getJson(JsonOptions::none));
1570 break;
1571
1572 case ltACCOUNT_ROOT:
1573 case ltDIR_NODE:
1574 default:
1575 UNREACHABLE(
1576 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1577 "type");
1578 break;
1579 }
1580 }
1581
1582 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1583
1584 if (uNodeDir)
1585 {
1586 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1587 XRPL_ASSERT(
1588 sleNode,
1589 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1590 }
1591 } while (uNodeDir);
1592 }
1593
1594 return jvObjects;
1595}
1596
1597//
1598// Other
1599//
1600
1601inline bool
1602NetworkOPsImp::isBlocked()
1603{
1604 return isAmendmentBlocked() || isUNLBlocked();
1605}
1606
1607inline bool
1608NetworkOPsImp::isAmendmentBlocked()
1609{
1610 return amendmentBlocked_;
1611}
1612
1613void
1614NetworkOPsImp::setAmendmentBlocked()
1615{
1616 amendmentBlocked_ = true;
1617 setMode(OperatingMode::CONNECTED);
1618}
1619
1620inline bool
1621NetworkOPsImp::isAmendmentWarned()
1622{
1623 return !amendmentBlocked_ && amendmentWarned_;
1624}
1625
1626inline void
1627NetworkOPsImp::setAmendmentWarned()
1628{
1629 amendmentWarned_ = true;
1630}
1631
1632inline void
1633NetworkOPsImp::clearAmendmentWarned()
1634{
1635 amendmentWarned_ = false;
1636}
1637
1638inline bool
1639NetworkOPsImp::isUNLBlocked()
1640{
1641 return unlBlocked_;
1642}
1643
1644void
1645NetworkOPsImp::setUNLBlocked()
1646{
1647 unlBlocked_ = true;
1648 setMode(OperatingMode::CONNECTED);
1649}
1650
1651inline void
1652NetworkOPsImp::clearUNLBlocked()
1653{
1654 unlBlocked_ = false;
1655}
1656
1657bool
1658NetworkOPsImp::checkLastClosedLedger(
1659 const Overlay::PeerSequence& peerList,
1660 uint256& networkClosed)
1661{
1662 // Returns true if there's an *abnormal* ledger issue, normal changing in
1663 // TRACKING mode should return false. Do we have sufficient validations for
1664 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1665 // better ledger available? If so, we are either tracking or full.
1666
1667 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1668
1669 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1670
1671 if (!ourClosed)
1672 return false;
1673
1674 uint256 closedLedger = ourClosed->info().hash;
1675 uint256 prevClosedLedger = ourClosed->info().parentHash;
1676 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1677 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1678
1679 //-------------------------------------------------------------------------
1680 // Determine preferred last closed ledger
1681
1682 auto& validations = app_.getValidations();
1683 JLOG(m_journal.debug())
1684 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1685
1686 // Will rely on peer LCL if no trusted validations exist
1688 peerCounts[closedLedger] = 0;
1689 if (mMode >= OperatingMode::TRACKING)
1690 peerCounts[closedLedger]++;
1691
1692 for (auto& peer : peerList)
1693 {
1694 uint256 peerLedger = peer->getClosedLedgerHash();
1695
1696 if (peerLedger.isNonZero())
1697 ++peerCounts[peerLedger];
1698 }
1699
1700 for (auto const& it : peerCounts)
1701 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1702
1703 uint256 preferredLCL = validations.getPreferredLCL(
1704 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1705 m_ledgerMaster.getValidLedgerIndex(),
1706 peerCounts);
1707
1708 bool switchLedgers = preferredLCL != closedLedger;
1709 if (switchLedgers)
1710 closedLedger = preferredLCL;
1711 //-------------------------------------------------------------------------
1712 if (switchLedgers && (closedLedger == prevClosedLedger))
1713 {
1714 // don't switch to our own previous ledger
1715 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1716 networkClosed = ourClosed->info().hash;
1717 switchLedgers = false;
1718 }
1719 else
1720 networkClosed = closedLedger;
1721
1722 if (!switchLedgers)
1723 return false;
1724
1725 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1726
1727 if (!consensus)
1728 consensus = app_.getInboundLedgers().acquire(
1729 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1730
1731 if (consensus &&
1732 (!m_ledgerMaster.canBeCurrent(consensus) ||
1733 !m_ledgerMaster.isCompatible(
1734 *consensus, m_journal.debug(), "Not switching")))
1735 {
1736 // Don't switch to a ledger not on the validated chain
1737 // or with an invalid close time or sequence
1738 networkClosed = ourClosed->info().hash;
1739 return false;
1740 }
1741
1742 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1743 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1744 << getJson({*ourClosed, {}});
1745 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1746
1747 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1748 {
1749 setMode(OperatingMode::CONNECTED);
1750 }
1751
1752 if (consensus)
1753 {
1754 // FIXME: If this rewinds the ledger sequence, or has the same
1755 // sequence, we should update the status on any stored transactions
1756 // in the invalidated ledgers.
1757 switchLastClosedLedger(consensus);
1758 }
1759
1760 return true;
1761}
1762
1763void
1764NetworkOPsImp::switchLastClosedLedger(
1765 std::shared_ptr<Ledger const> const& newLCL)
1766{
1767 // set the newLCL as our last closed ledger -- this is abnormal code
1768 JLOG(m_journal.error())
1769 << "JUMP last closed ledger to " << newLCL->info().hash;
1770
1771 clearNeedNetworkLedger();
1772
1773 // Update fee computations.
1774 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1775
1776 // Caller must own master lock
1777 {
1778 // Apply tx in old open ledger to new
1779 // open ledger. Then apply local tx.
1780
1781 auto retries = m_localTX->getTxSet();
1782 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1784 if (lastVal)
1785 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
1786 else
1787 rules.emplace(app_.config().features);
1788 app_.openLedger().accept(
1789 app_,
1790 *rules,
1791 newLCL,
1792 OrderedTxs({}),
1793 false,
1794 retries,
1795 tapNONE,
1796 "jump",
1797 [&](OpenView& view, beast::Journal j) {
1798 // Stuff the ledger with transactions from the queue.
1799 return app_.getTxQ().accept(app_, view);
1800 });
1801 }
1802
1803 m_ledgerMaster.switchLCL(newLCL);
1804
1805 protocol::TMStatusChange s;
1806 s.set_newevent(protocol::neSWITCHED_LEDGER);
1807 s.set_ledgerseq(newLCL->info().seq);
1808 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1809 s.set_ledgerhashprevious(
1810 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1811 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
1812
1813 app_.overlay().foreach(
1814 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1815}
1816
1817bool
1818NetworkOPsImp::beginConsensus(uint256 const& networkClosed)
1819{
1820 XRPL_ASSERT(
1821 networkClosed.isNonZero(),
1822 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
1823
1824 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
1825
1826 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
1827 << " with LCL " << closingInfo.parentHash;
1828
1829 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1830
1831 if (!prevLedger)
1832 {
1833 // this shouldn't happen unless we jump ledgers
1834 if (mMode == OperatingMode::FULL)
1835 {
1836 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
1837 setMode(OperatingMode::TRACKING);
1838 }
1839
1840 return false;
1841 }
1842
1843 XRPL_ASSERT(
1844 prevLedger->info().hash == closingInfo.parentHash,
1845 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1846 "parent");
1847 XRPL_ASSERT(
1848 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
1849 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1850 "hash");
1851
1852 if (prevLedger->rules().enabled(featureNegativeUNL))
1853 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1854 TrustChanges const changes = app_.validators().updateTrusted(
1855 app_.getValidations().getCurrentNodeIDs(),
1856 closingInfo.parentCloseTime,
1857 *this,
1858 app_.overlay(),
1859 app_.getHashRouter());
1860
1861 if (!changes.added.empty() || !changes.removed.empty())
1862 {
1863 app_.getValidations().trustChanged(changes.added, changes.removed);
1864 // Update the AmendmentTable so it tracks the current validators.
1865 app_.getAmendmentTable().trustChanged(
1866 app_.validators().getQuorumKeys().second);
1867 }
1868
1869 mConsensus.startRound(
1870 app_.timeKeeper().closeTime(),
1871 networkClosed,
1872 prevLedger,
1873 changes.removed,
1874 changes.added);
1875
1876 const ConsensusPhase currPhase = mConsensus.phase();
1877 if (mLastConsensusPhase != currPhase)
1878 {
1879 reportConsensusStateChange(currPhase);
1880 mLastConsensusPhase = currPhase;
1881 }
1882
1883 JLOG(m_journal.debug()) << "Initiating consensus engine";
1884 return true;
1885}
1886
1887bool
1888NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
1889{
1890 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1891}
1892
1893void
1894NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
1895{
1896 // We now have an additional transaction set
1897 // either created locally during the consensus process
1898 // or acquired from a peer
1899
1900 // Inform peers we have this set
1901 protocol::TMHaveTransactionSet msg;
1902 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1903 msg.set_status(protocol::tsHAVE);
1904 app_.overlay().foreach(
1905 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
1906
1907 // We acquired it because consensus asked us to
1908 if (fromAcquire)
1909 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
1910}
1911
1912void
1913NetworkOPsImp::endConsensus()
1914{
1915 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
1916
1917 for (auto const& it : app_.overlay().getActivePeers())
1918 {
1919 if (it && (it->getClosedLedgerHash() == deadLedger))
1920 {
1921 JLOG(m_journal.trace()) << "Killing obsolete peer status";
1922 it->cycleStatus();
1923 }
1924 }
1925
1926 uint256 networkClosed;
1927 bool ledgerChange =
1928 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1929
1930 if (networkClosed.isZero())
1931 return;
1932
1933 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
1934 // we must count how many nodes share our LCL, how many nodes disagree with
1935 // our LCL, and how many validations our LCL has. We also want to check
1936 // timing to make sure there shouldn't be a newer LCL. We need this
1937 // information to do the next three tests.
1938
1939 if (((mMode == OperatingMode::CONNECTED) ||
1940 (mMode == OperatingMode::SYNCING)) &&
1941 !ledgerChange)
1942 {
1943 // Count number of peers that agree with us and UNL nodes whose
1944 // validations we have for LCL. If the ledger is good enough, go to
1945 // TRACKING - TODO
1946 if (!needNetworkLedger_)
1947 setMode(OperatingMode::TRACKING);
1948 }
1949
1950 if (((mMode == OperatingMode::CONNECTED) ||
1951 (mMode == OperatingMode::TRACKING)) &&
1952 !ledgerChange)
1953 {
1954 // check if the ledger is good enough to go to FULL
1955 // Note: Do not go to FULL if we don't have the previous ledger
1956 // check if the ledger is bad enough to go to CONNECTE D -- TODO
1957 auto current = m_ledgerMaster.getCurrentLedger();
1958 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
1959 2 * current->info().closeTimeResolution))
1960 {
1961 setMode(OperatingMode::FULL);
1962 }
1963 }
1964
1965 beginConsensus(networkClosed);
1966}
1967
1968void
1969NetworkOPsImp::consensusViewChange()
1970{
1971 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
1972 {
1973 setMode(OperatingMode::CONNECTED);
1974 }
1975}
1976
1977void
1978NetworkOPsImp::pubManifest(Manifest const& mo)
1979{
1980 // VFALCO consider std::shared_mutex
1981 std::lock_guard sl(mSubLock);
1982
1983 if (!mStreamMaps[sManifests].empty())
1984 {
1986
1987 jvObj[jss::type] = "manifestReceived";
1988 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
1989 if (mo.signingKey)
1990 jvObj[jss::signing_key] =
1991 toBase58(TokenType::NodePublic, *mo.signingKey);
1992 jvObj[jss::seq] = Json::UInt(mo.sequence);
1993 if (auto sig = mo.getSignature())
1994 jvObj[jss::signature] = strHex(*sig);
1995 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
1996 if (!mo.domain.empty())
1997 jvObj[jss::domain] = mo.domain;
1998 jvObj[jss::manifest] = strHex(mo.serialized);
1999
2000 for (auto i = mStreamMaps[sManifests].begin();
2001 i != mStreamMaps[sManifests].end();)
2002 {
2003 if (auto p = i->second.lock())
2004 {
2005 p->send(jvObj, true);
2006 ++i;
2007 }
2008 else
2009 {
2010 i = mStreamMaps[sManifests].erase(i);
2011 }
2012 }
2013 }
2014}
2015
2016NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2017 XRPAmount fee,
2018 TxQ::Metrics&& escalationMetrics,
2019 LoadFeeTrack const& loadFeeTrack)
2020 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2021 , loadBaseServer{loadFeeTrack.getLoadBase()}
2022 , baseFee{fee}
2023 , em{std::move(escalationMetrics)}
2024{
2025}
2026
2027bool
2029 NetworkOPsImp::ServerFeeSummary const& b) const
2030{
2031 if (loadFactorServer != b.loadFactorServer ||
2032 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2033 em.has_value() != b.em.has_value())
2034 return true;
2035
2036 if (em && b.em)
2037 {
2038 return (
2039 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2040 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2041 em->referenceFeeLevel != b.em->referenceFeeLevel);
2042 }
2043
2044 return false;
2045}
2046
2047// Need to cap to uint64 to uint32 due to JSON limitations
2048static std::uint32_t
2050{
2052
2053 return std::min(max32, v);
2054};
2055
2056void
2058{
2059 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2060 // list into a local array while holding the lock then release
2061 // the lock and call send on everyone.
2062 //
2064
2065 if (!mStreamMaps[sServer].empty())
2066 {
2068
2070 app_.openLedger().current()->fees().base,
2072 app_.getFeeTrack()};
2073
2074 jvObj[jss::type] = "serverStatus";
2075 jvObj[jss::server_status] = strOperatingMode();
2076 jvObj[jss::load_base] = f.loadBaseServer;
2077 jvObj[jss::load_factor_server] = f.loadFactorServer;
2078 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2079
2080 if (f.em)
2081 {
2082 auto const loadFactor = std::max(
2083 safe_cast<std::uint64_t>(f.loadFactorServer),
2084 mulDiv(
2085 f.em->openLedgerFeeLevel,
2086 f.loadBaseServer,
2087 f.em->referenceFeeLevel)
2089
2090 jvObj[jss::load_factor] = trunc32(loadFactor);
2091 jvObj[jss::load_factor_fee_escalation] =
2092 f.em->openLedgerFeeLevel.jsonClipped();
2093 jvObj[jss::load_factor_fee_queue] =
2094 f.em->minProcessingFeeLevel.jsonClipped();
2095 jvObj[jss::load_factor_fee_reference] =
2096 f.em->referenceFeeLevel.jsonClipped();
2097 }
2098 else
2099 jvObj[jss::load_factor] = f.loadFactorServer;
2100
2101 mLastFeeSummary = f;
2102
2103 for (auto i = mStreamMaps[sServer].begin();
2104 i != mStreamMaps[sServer].end();)
2105 {
2106 InfoSub::pointer p = i->second.lock();
2107
2108 // VFALCO TODO research the possibility of using thread queues and
2109 // linearizing the deletion of subscribers with the
2110 // sending of JSON data.
2111 if (p)
2112 {
2113 p->send(jvObj, true);
2114 ++i;
2115 }
2116 else
2117 {
2118 i = mStreamMaps[sServer].erase(i);
2119 }
2120 }
2121 }
2122}
2123
2124void
2126{
2128
2129 auto& streamMap = mStreamMaps[sConsensusPhase];
2130 if (!streamMap.empty())
2131 {
2133 jvObj[jss::type] = "consensusPhase";
2134 jvObj[jss::consensus] = to_string(phase);
2135
2136 for (auto i = streamMap.begin(); i != streamMap.end();)
2137 {
2138 if (auto p = i->second.lock())
2139 {
2140 p->send(jvObj, true);
2141 ++i;
2142 }
2143 else
2144 {
2145 i = streamMap.erase(i);
2146 }
2147 }
2148 }
2149}
2150
2151void
2153{
2154 // VFALCO consider std::shared_mutex
2156
2157 if (!mStreamMaps[sValidations].empty())
2158 {
2160
2161 auto const signerPublic = val->getSignerPublic();
2162
2163 jvObj[jss::type] = "validationReceived";
2164 jvObj[jss::validation_public_key] =
2165 toBase58(TokenType::NodePublic, signerPublic);
2166 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2167 jvObj[jss::signature] = strHex(val->getSignature());
2168 jvObj[jss::full] = val->isFull();
2169 jvObj[jss::flags] = val->getFlags();
2170 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2171 jvObj[jss::data] = strHex(val->getSerializer().slice());
2172
2173 if (auto version = (*val)[~sfServerVersion])
2174 jvObj[jss::server_version] = std::to_string(*version);
2175
2176 if (auto cookie = (*val)[~sfCookie])
2177 jvObj[jss::cookie] = std::to_string(*cookie);
2178
2179 if (auto hash = (*val)[~sfValidatedHash])
2180 jvObj[jss::validated_hash] = strHex(*hash);
2181
2182 auto const masterKey =
2183 app_.validatorManifests().getMasterKey(signerPublic);
2184
2185 if (masterKey != signerPublic)
2186 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2187
2188 // NOTE *seq is a number, but old API versions used string. We replace
2189 // number with a string using MultiApiJson near end of this function
2190 if (auto const seq = (*val)[~sfLedgerSequence])
2191 jvObj[jss::ledger_index] = *seq;
2192
2193 if (val->isFieldPresent(sfAmendments))
2194 {
2195 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2196 for (auto const& amendment : val->getFieldV256(sfAmendments))
2197 jvObj[jss::amendments].append(to_string(amendment));
2198 }
2199
2200 if (auto const closeTime = (*val)[~sfCloseTime])
2201 jvObj[jss::close_time] = *closeTime;
2202
2203 if (auto const loadFee = (*val)[~sfLoadFee])
2204 jvObj[jss::load_fee] = *loadFee;
2205
2206 if (auto const baseFee = val->at(~sfBaseFee))
2207 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2208
2209 if (auto const reserveBase = val->at(~sfReserveBase))
2210 jvObj[jss::reserve_base] = *reserveBase;
2211
2212 if (auto const reserveInc = val->at(~sfReserveIncrement))
2213 jvObj[jss::reserve_inc] = *reserveInc;
2214
2215 // (The ~ operator converts the Proxy to a std::optional, which
2216 // simplifies later operations)
2217 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2218 baseFeeXRP && baseFeeXRP->native())
2219 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2220
2221 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2222 reserveBaseXRP && reserveBaseXRP->native())
2223 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2224
2225 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2226 reserveIncXRP && reserveIncXRP->native())
2227 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2228
2229 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2230 // for consumers supporting different API versions
2231 MultiApiJson multiObj{jvObj};
2232 multiObj.visit(
2233 RPC::apiVersion<1>, //
2234 [](Json::Value& jvTx) {
2235 // Type conversion for older API versions to string
2236 if (jvTx.isMember(jss::ledger_index))
2237 {
2238 jvTx[jss::ledger_index] =
2239 std::to_string(jvTx[jss::ledger_index].asUInt());
2240 }
2241 });
2242
2243 for (auto i = mStreamMaps[sValidations].begin();
2244 i != mStreamMaps[sValidations].end();)
2245 {
2246 if (auto p = i->second.lock())
2247 {
2248 multiObj.visit(
2249 p->getApiVersion(), //
2250 [&](Json::Value const& jv) { p->send(jv, true); });
2251 ++i;
2252 }
2253 else
2254 {
2255 i = mStreamMaps[sValidations].erase(i);
2256 }
2257 }
2258 }
2259}
2260
2261void
2263{
2265
2266 if (!mStreamMaps[sPeerStatus].empty())
2267 {
2268 Json::Value jvObj(func());
2269
2270 jvObj[jss::type] = "peerStatusChange";
2271
2272 for (auto i = mStreamMaps[sPeerStatus].begin();
2273 i != mStreamMaps[sPeerStatus].end();)
2274 {
2275 InfoSub::pointer p = i->second.lock();
2276
2277 if (p)
2278 {
2279 p->send(jvObj, true);
2280 ++i;
2281 }
2282 else
2283 {
2284 i = mStreamMaps[sPeerStatus].erase(i);
2285 }
2286 }
2287 }
2288}
2289
2290void
2292{
2293 using namespace std::chrono_literals;
2294 if (om == OperatingMode::CONNECTED)
2295 {
2298 }
2299 else if (om == OperatingMode::SYNCING)
2300 {
2303 }
2304
2305 if ((om > OperatingMode::CONNECTED) && isBlocked())
2307
2308 if (mMode == om)
2309 return;
2310
2311 mMode = om;
2312
2313 accounting_.mode(om);
2314
2315 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2316 pubServer();
2317}
2318
2319bool
2322 std::string const& source)
2323{
2324 JLOG(m_journal.trace())
2325 << "recvValidation " << val->getLedgerHash() << " from " << source;
2326
2328 BypassAccept bypassAccept = BypassAccept::no;
2329 try
2330 {
2331 if (pendingValidations_.contains(val->getLedgerHash()))
2332 bypassAccept = BypassAccept::yes;
2333 else
2334 pendingValidations_.insert(val->getLedgerHash());
2335 scope_unlock unlock(lock);
2336 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2337 }
2338 catch (std::exception const& e)
2339 {
2340 JLOG(m_journal.warn())
2341 << "Exception thrown for handling new validation "
2342 << val->getLedgerHash() << ": " << e.what();
2343 }
2344 catch (...)
2345 {
2346 JLOG(m_journal.warn())
2347 << "Unknown exception thrown for handling new validation "
2348 << val->getLedgerHash();
2349 }
2350 if (bypassAccept == BypassAccept::no)
2351 {
2352 pendingValidations_.erase(val->getLedgerHash());
2353 }
2354 lock.unlock();
2355
2356 pubValidation(val);
2357
2358 // We will always relay trusted validations; if configured, we will
2359 // also relay all untrusted validations.
2360 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2361}
2362
2365{
2366 return mConsensus.getJson(true);
2367}
2368
2370NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2371{
2373
2374 // System-level warnings
2375 {
2376 Json::Value warnings{Json::arrayValue};
2377 if (isAmendmentBlocked())
2378 {
2379 Json::Value& w = warnings.append(Json::objectValue);
2380 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2381 w[jss::message] =
2382 "This server is amendment blocked, and must be updated to be "
2383 "able to stay in sync with the network.";
2384 }
2385 if (isUNLBlocked())
2386 {
2387 Json::Value& w = warnings.append(Json::objectValue);
2388 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2389 w[jss::message] =
2390 "This server has an expired validator list. validators.txt "
2391 "may be incorrectly configured or some [validator_list_sites] "
2392 "may be unreachable.";
2393 }
2394 if (admin && isAmendmentWarned())
2395 {
2396 Json::Value& w = warnings.append(Json::objectValue);
2397 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2398 w[jss::message] =
2399 "One or more unsupported amendments have reached majority. "
2400 "Upgrade to the latest version before they are activated "
2401 "to avoid being amendment blocked.";
2402 if (auto const expected =
2404 {
2405 auto& d = w[jss::details] = Json::objectValue;
2406 d[jss::expected_date] = expected->time_since_epoch().count();
2407 d[jss::expected_date_UTC] = to_string(*expected);
2408 }
2409 }
2410
2411 if (warnings.size())
2412 info[jss::warnings] = std::move(warnings);
2413 }
2414
2415 // hostid: unique string describing the machine
2416 if (human)
2417 info[jss::hostid] = getHostId(admin);
2418
2419 // domain: if configured with a domain, report it:
2420 if (!app_.config().SERVER_DOMAIN.empty())
2421 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2422
2423 info[jss::build_version] = BuildInfo::getVersionString();
2424
2425 info[jss::server_state] = strOperatingMode(admin);
2426
2427 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2429
2431 info[jss::network_ledger] = "waiting";
2432
2433 info[jss::validation_quorum] =
2434 static_cast<Json::UInt>(app_.validators().quorum());
2435
2436 if (admin)
2437 {
2438 switch (app_.config().NODE_SIZE)
2439 {
2440 case 0:
2441 info[jss::node_size] = "tiny";
2442 break;
2443 case 1:
2444 info[jss::node_size] = "small";
2445 break;
2446 case 2:
2447 info[jss::node_size] = "medium";
2448 break;
2449 case 3:
2450 info[jss::node_size] = "large";
2451 break;
2452 case 4:
2453 info[jss::node_size] = "huge";
2454 break;
2455 }
2456
2457 auto when = app_.validators().expires();
2458
2459 if (!human)
2460 {
2461 if (when)
2462 info[jss::validator_list_expires] =
2463 safe_cast<Json::UInt>(when->time_since_epoch().count());
2464 else
2465 info[jss::validator_list_expires] = 0;
2466 }
2467 else
2468 {
2469 auto& x = (info[jss::validator_list] = Json::objectValue);
2470
2471 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2472
2473 if (when)
2474 {
2475 if (*when == TimeKeeper::time_point::max())
2476 {
2477 x[jss::expiration] = "never";
2478 x[jss::status] = "active";
2479 }
2480 else
2481 {
2482 x[jss::expiration] = to_string(*when);
2483
2484 if (*when > app_.timeKeeper().now())
2485 x[jss::status] = "active";
2486 else
2487 x[jss::status] = "expired";
2488 }
2489 }
2490 else
2491 {
2492 x[jss::status] = "unknown";
2493 x[jss::expiration] = "unknown";
2494 }
2495 }
2496
2497#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH)
2498 {
2499 auto& x = (info[jss::git] = Json::objectValue);
2500#ifdef GIT_COMMIT_HASH
2501 x[jss::hash] = GIT_COMMIT_HASH;
2502#endif
2503#ifdef GIT_BRANCH
2504 x[jss::branch] = GIT_BRANCH;
2505#endif
2506 }
2507#endif
2508 }
2509 info[jss::io_latency_ms] =
2510 static_cast<Json::UInt>(app_.getIOLatency().count());
2511
2512 if (admin)
2513 {
2514 if (auto const localPubKey = app_.validators().localPublicKey();
2515 localPubKey && app_.getValidationPublicKey())
2516 {
2517 info[jss::pubkey_validator] =
2518 toBase58(TokenType::NodePublic, localPubKey.value());
2519 }
2520 else
2521 {
2522 info[jss::pubkey_validator] = "none";
2523 }
2524 }
2525
2526 if (counters)
2527 {
2528 info[jss::counters] = app_.getPerfLog().countersJson();
2529
2530 Json::Value nodestore(Json::objectValue);
2531 app_.getNodeStore().getCountsJson(nodestore);
2532 info[jss::counters][jss::nodestore] = nodestore;
2533 info[jss::current_activities] = app_.getPerfLog().currentJson();
2534 }
2535
2536 info[jss::pubkey_node] =
2538
2539 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2540
2542 info[jss::amendment_blocked] = true;
2543
2544 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2545
2546 if (fp != 0)
2547 info[jss::fetch_pack] = Json::UInt(fp);
2548
2549 info[jss::peers] = Json::UInt(app_.overlay().size());
2550
2551 Json::Value lastClose = Json::objectValue;
2552 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2553
2554 if (human)
2555 {
2556 lastClose[jss::converge_time_s] =
2558 }
2559 else
2560 {
2561 lastClose[jss::converge_time] =
2563 }
2564
2565 info[jss::last_close] = lastClose;
2566
2567 // info[jss::consensus] = mConsensus.getJson();
2568
2569 if (admin)
2570 info[jss::load] = m_job_queue.getJson();
2571
2572 if (auto const netid = app_.overlay().networkID())
2573 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2574
2575 auto const escalationMetrics =
2577
2578 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2579 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2580 /* Scale the escalated fee level to unitless "load factor".
2581 In practice, this just strips the units, but it will continue
2582 to work correctly if either base value ever changes. */
2583 auto const loadFactorFeeEscalation =
2584 mulDiv(
2585 escalationMetrics.openLedgerFeeLevel,
2586 loadBaseServer,
2587 escalationMetrics.referenceFeeLevel)
2589
2590 auto const loadFactor = std::max(
2591 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2592
2593 if (!human)
2594 {
2595 info[jss::load_base] = loadBaseServer;
2596 info[jss::load_factor] = trunc32(loadFactor);
2597 info[jss::load_factor_server] = loadFactorServer;
2598
2599 /* Json::Value doesn't support uint64, so clamp to max
2600 uint32 value. This is mostly theoretical, since there
2601 probably isn't enough extant XRP to drive the factor
2602 that high.
2603 */
2604 info[jss::load_factor_fee_escalation] =
2605 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2606 info[jss::load_factor_fee_queue] =
2607 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2608 info[jss::load_factor_fee_reference] =
2609 escalationMetrics.referenceFeeLevel.jsonClipped();
2610 }
2611 else
2612 {
2613 info[jss::load_factor] =
2614 static_cast<double>(loadFactor) / loadBaseServer;
2615
2616 if (loadFactorServer != loadFactor)
2617 info[jss::load_factor_server] =
2618 static_cast<double>(loadFactorServer) / loadBaseServer;
2619
2620 if (admin)
2621 {
2623 if (fee != loadBaseServer)
2624 info[jss::load_factor_local] =
2625 static_cast<double>(fee) / loadBaseServer;
2626 fee = app_.getFeeTrack().getRemoteFee();
2627 if (fee != loadBaseServer)
2628 info[jss::load_factor_net] =
2629 static_cast<double>(fee) / loadBaseServer;
2630 fee = app_.getFeeTrack().getClusterFee();
2631 if (fee != loadBaseServer)
2632 info[jss::load_factor_cluster] =
2633 static_cast<double>(fee) / loadBaseServer;
2634 }
2635 if (escalationMetrics.openLedgerFeeLevel !=
2636 escalationMetrics.referenceFeeLevel &&
2637 (admin || loadFactorFeeEscalation != loadFactor))
2638 info[jss::load_factor_fee_escalation] =
2639 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2640 escalationMetrics.referenceFeeLevel);
2641 if (escalationMetrics.minProcessingFeeLevel !=
2642 escalationMetrics.referenceFeeLevel)
2643 info[jss::load_factor_fee_queue] =
2644 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2645 escalationMetrics.referenceFeeLevel);
2646 }
2647
2648 bool valid = false;
2649 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2650
2651 if (lpClosed)
2652 valid = true;
2653 else
2654 lpClosed = m_ledgerMaster.getClosedLedger();
2655
2656 if (lpClosed)
2657 {
2658 XRPAmount const baseFee = lpClosed->fees().base;
2660 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2661 l[jss::hash] = to_string(lpClosed->info().hash);
2662
2663 if (!human)
2664 {
2665 l[jss::base_fee] = baseFee.jsonClipped();
2666 l[jss::reserve_base] =
2667 lpClosed->fees().accountReserve(0).jsonClipped();
2668 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2669 l[jss::close_time] = Json::Value::UInt(
2670 lpClosed->info().closeTime.time_since_epoch().count());
2671 }
2672 else
2673 {
2674 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2675 l[jss::reserve_base_xrp] =
2676 lpClosed->fees().accountReserve(0).decimalXRP();
2677 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2678
2679 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2680 std::abs(closeOffset.count()) >= 60)
2681 l[jss::close_time_offset] =
2682 static_cast<std::uint32_t>(closeOffset.count());
2683
2684 constexpr std::chrono::seconds highAgeThreshold{1000000};
2686 {
2687 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2688 l[jss::age] =
2689 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2690 }
2691 else
2692 {
2693 auto lCloseTime = lpClosed->info().closeTime;
2694 auto closeTime = app_.timeKeeper().closeTime();
2695 if (lCloseTime <= closeTime)
2696 {
2697 using namespace std::chrono_literals;
2698 auto age = closeTime - lCloseTime;
2699 l[jss::age] =
2700 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2701 }
2702 }
2703 }
2704
2705 if (valid)
2706 info[jss::validated_ledger] = l;
2707 else
2708 info[jss::closed_ledger] = l;
2709
2710 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2711 if (!lpPublished)
2712 info[jss::published_ledger] = "none";
2713 else if (lpPublished->info().seq != lpClosed->info().seq)
2714 info[jss::published_ledger] = lpPublished->info().seq;
2715 }
2716
2717 accounting_.json(info);
2718 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2719 info[jss::jq_trans_overflow] =
2721 info[jss::peer_disconnects] =
2723 info[jss::peer_disconnects_resources] =
2725
2726 // This array must be sorted in increasing order.
2727 static constexpr std::array<std::string_view, 7> protocols{
2728 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2729 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2730 {
2732 for (auto const& port : app_.getServerHandler().setup().ports)
2733 {
2734 // Don't publish admin ports for non-admin users
2735 if (!admin &&
2736 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2737 port.admin_user.empty() && port.admin_password.empty()))
2738 continue;
2741 std::begin(port.protocol),
2742 std::end(port.protocol),
2743 std::begin(protocols),
2744 std::end(protocols),
2745 std::back_inserter(proto));
2746 if (!proto.empty())
2747 {
2748 auto& jv = ports.append(Json::Value(Json::objectValue));
2749 jv[jss::port] = std::to_string(port.port);
2750 jv[jss::protocol] = Json::Value{Json::arrayValue};
2751 for (auto const& p : proto)
2752 jv[jss::protocol].append(p);
2753 }
2754 }
2755
2756 if (app_.config().exists(SECTION_PORT_GRPC))
2757 {
2758 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
2759 auto const optPort = grpcSection.get("port");
2760 if (optPort && grpcSection.get("ip"))
2761 {
2762 auto& jv = ports.append(Json::Value(Json::objectValue));
2763 jv[jss::port] = *optPort;
2764 jv[jss::protocol] = Json::Value{Json::arrayValue};
2765 jv[jss::protocol].append("grpc");
2766 }
2767 }
2768 info[jss::ports] = std::move(ports);
2769 }
2770
2771 return info;
2772}
2773
2774void
2776{
2778}
2779
2782{
2783 return app_.getInboundLedgers().getInfo();
2784}
2785
2786void
2788 std::shared_ptr<ReadView const> const& ledger,
2789 std::shared_ptr<STTx const> const& transaction,
2790 TER result)
2791{
2792 MultiApiJson jvObj =
2793 transJson(transaction, result, false, ledger, std::nullopt);
2794
2795 {
2797
2798 auto it = mStreamMaps[sRTTransactions].begin();
2799 while (it != mStreamMaps[sRTTransactions].end())
2800 {
2801 InfoSub::pointer p = it->second.lock();
2802
2803 if (p)
2804 {
2805 jvObj.visit(
2806 p->getApiVersion(), //
2807 [&](Json::Value const& jv) { p->send(jv, true); });
2808 ++it;
2809 }
2810 else
2811 {
2812 it = mStreamMaps[sRTTransactions].erase(it);
2813 }
2814 }
2815 }
2816
2817 pubProposedAccountTransaction(ledger, transaction, result);
2818}
2819
2820void
2822{
2823 // Ledgers are published only when they acquire sufficient validations
2824 // Holes are filled across connection loss or other catastrophe
2825
2827 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
2828 if (!alpAccepted)
2829 {
2830 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
2831 app_.getAcceptedLedgerCache().canonicalize_replace_client(
2832 lpAccepted->info().hash, alpAccepted);
2833 }
2834
2835 XRPL_ASSERT(
2836 alpAccepted->getLedger().get() == lpAccepted.get(),
2837 "ripple::NetworkOPsImp::pubLedger : accepted input");
2838
2839 {
2840 JLOG(m_journal.debug())
2841 << "Publishing ledger " << lpAccepted->info().seq << " "
2842 << lpAccepted->info().hash;
2843
2845
2846 if (!mStreamMaps[sLedger].empty())
2847 {
2849
2850 jvObj[jss::type] = "ledgerClosed";
2851 jvObj[jss::ledger_index] = lpAccepted->info().seq;
2852 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
2853 jvObj[jss::ledger_time] = Json::Value::UInt(
2854 lpAccepted->info().closeTime.time_since_epoch().count());
2855
2856 if (!lpAccepted->rules().enabled(featureXRPFees))
2857 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
2858 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2859 jvObj[jss::reserve_base] =
2860 lpAccepted->fees().accountReserve(0).jsonClipped();
2861 jvObj[jss::reserve_inc] =
2862 lpAccepted->fees().increment.jsonClipped();
2863
2864 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
2865
2867 {
2868 jvObj[jss::validated_ledgers] =
2870 }
2871
2872 auto it = mStreamMaps[sLedger].begin();
2873 while (it != mStreamMaps[sLedger].end())
2874 {
2875 InfoSub::pointer p = it->second.lock();
2876 if (p)
2877 {
2878 p->send(jvObj, true);
2879 ++it;
2880 }
2881 else
2882 it = mStreamMaps[sLedger].erase(it);
2883 }
2884 }
2885
2886 if (!mStreamMaps[sBookChanges].empty())
2887 {
2888 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
2889
2890 auto it = mStreamMaps[sBookChanges].begin();
2891 while (it != mStreamMaps[sBookChanges].end())
2892 {
2893 InfoSub::pointer p = it->second.lock();
2894 if (p)
2895 {
2896 p->send(jvObj, true);
2897 ++it;
2898 }
2899 else
2900 it = mStreamMaps[sBookChanges].erase(it);
2901 }
2902 }
2903
2904 {
2905 static bool firstTime = true;
2906 if (firstTime)
2907 {
2908 // First validated ledger, start delayed SubAccountHistory
2909 firstTime = false;
2910 for (auto& outer : mSubAccountHistory)
2911 {
2912 for (auto& inner : outer.second)
2913 {
2914 auto& subInfo = inner.second;
2915 if (subInfo.index_->separationLedgerSeq_ == 0)
2916 {
2918 alpAccepted->getLedger(), subInfo);
2919 }
2920 }
2921 }
2922 }
2923 }
2924 }
2925
2926 // Don't lock since pubAcceptedTransaction is locking.
2927 for (auto const& accTx : *alpAccepted)
2928 {
2929 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
2931 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
2932 }
2933}
2934
2935void
2937{
2939 app_.openLedger().current()->fees().base,
2941 app_.getFeeTrack()};
2942
2943 // only schedule the job if something has changed
2944 if (f != mLastFeeSummary)
2945 {
2947 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
2948 pubServer();
2949 });
2950 }
2951}
2952
2953void
2955{
2958 "reportConsensusStateChange->pubConsensus",
2959 [this, phase]() { pubConsensus(phase); });
2960}
2961
2962inline void
2964{
2965 m_localTX->sweep(view);
2966}
2967inline std::size_t
2969{
2970 return m_localTX->size();
2971}
2972
2973// This routine should only be used to publish accepted or validated
2974// transactions.
2977 std::shared_ptr<STTx const> const& transaction,
2978 TER result,
2979 bool validated,
2980 std::shared_ptr<ReadView const> const& ledger,
2982{
2984 std::string sToken;
2985 std::string sHuman;
2986
2987 transResultInfo(result, sToken, sHuman);
2988
2989 jvObj[jss::type] = "transaction";
2990 // NOTE jvObj is not a finished object for either API version. After
2991 // it's populated, we need to finish it for a specific API version. This is
2992 // done in a loop, near the end of this function.
2993 jvObj[jss::transaction] =
2994 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
2995
2996 if (meta)
2997 {
2998 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
3000 jvObj[jss::meta], *ledger, transaction, meta->get());
3002 jvObj[jss::meta], transaction, meta->get());
3003 }
3004
3005 if (!ledger->open())
3006 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
3007
3008 if (validated)
3009 {
3010 jvObj[jss::ledger_index] = ledger->info().seq;
3011 jvObj[jss::transaction][jss::date] =
3012 ledger->info().closeTime.time_since_epoch().count();
3013 jvObj[jss::validated] = true;
3014 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3015
3016 // WRITEME: Put the account next seq here
3017 }
3018 else
3019 {
3020 jvObj[jss::validated] = false;
3021 jvObj[jss::ledger_current_index] = ledger->info().seq;
3022 }
3023
3024 jvObj[jss::status] = validated ? "closed" : "proposed";
3025 jvObj[jss::engine_result] = sToken;
3026 jvObj[jss::engine_result_code] = result;
3027 jvObj[jss::engine_result_message] = sHuman;
3028
3029 if (transaction->getTxnType() == ttOFFER_CREATE)
3030 {
3031 auto const account = transaction->getAccountID(sfAccount);
3032 auto const amount = transaction->getFieldAmount(sfTakerGets);
3033
3034 // If the offer create is not self funded then add the owner balance
3035 if (account != amount.issue().account)
3036 {
3037 auto const ownerFunds = accountFunds(
3038 *ledger,
3039 account,
3040 amount,
3042 app_.journal("View"));
3043 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3044 }
3045 }
3046
3047 std::string const hash = to_string(transaction->getTransactionID());
3048 MultiApiJson multiObj{jvObj};
3050 multiObj.visit(), //
3051 [&]<unsigned Version>(
3053 RPC::insertDeliverMax(
3054 jvTx[jss::transaction], transaction->getTxnType(), Version);
3055
3056 if constexpr (Version > 1)
3057 {
3058 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3059 jvTx[jss::hash] = hash;
3060 }
3061 else
3062 {
3063 jvTx[jss::transaction][jss::hash] = hash;
3064 }
3065 });
3066
3067 return multiObj;
3068}
3069
3070void
3072 std::shared_ptr<ReadView const> const& ledger,
3073 const AcceptedLedgerTx& transaction,
3074 bool last)
3075{
3076 auto const& stTxn = transaction.getTxn();
3077
3078 // Create two different Json objects, for different API versions
3079 auto const metaRef = std::ref(transaction.getMeta());
3080 auto const trResult = transaction.getResult();
3081 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3082
3083 {
3085
3086 auto it = mStreamMaps[sTransactions].begin();
3087 while (it != mStreamMaps[sTransactions].end())
3088 {
3089 InfoSub::pointer p = it->second.lock();
3090
3091 if (p)
3092 {
3093 jvObj.visit(
3094 p->getApiVersion(), //
3095 [&](Json::Value const& jv) { p->send(jv, true); });
3096 ++it;
3097 }
3098 else
3099 it = mStreamMaps[sTransactions].erase(it);
3100 }
3101
3102 it = mStreamMaps[sRTTransactions].begin();
3103
3104 while (it != mStreamMaps[sRTTransactions].end())
3105 {
3106 InfoSub::pointer p = it->second.lock();
3107
3108 if (p)
3109 {
3110 jvObj.visit(
3111 p->getApiVersion(), //
3112 [&](Json::Value const& jv) { p->send(jv, true); });
3113 ++it;
3114 }
3115 else
3116 it = mStreamMaps[sRTTransactions].erase(it);
3117 }
3118 }
3119
3120 if (transaction.getResult() == tesSUCCESS)
3121 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3122
3123 pubAccountTransaction(ledger, transaction, last);
3124}
3125
3126void
3128 std::shared_ptr<ReadView const> const& ledger,
3129 AcceptedLedgerTx const& transaction,
3130 bool last)
3131{
3133 int iProposed = 0;
3134 int iAccepted = 0;
3135
3136 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3137 auto const currLedgerSeq = ledger->seq();
3138 {
3140
3141 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3143 {
3144 for (auto const& affectedAccount : transaction.getAffected())
3145 {
3146 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3147 simiIt != mSubRTAccount.end())
3148 {
3149 auto it = simiIt->second.begin();
3150
3151 while (it != simiIt->second.end())
3152 {
3153 InfoSub::pointer p = it->second.lock();
3154
3155 if (p)
3156 {
3157 notify.insert(p);
3158 ++it;
3159 ++iProposed;
3160 }
3161 else
3162 it = simiIt->second.erase(it);
3163 }
3164 }
3165
3166 if (auto simiIt = mSubAccount.find(affectedAccount);
3167 simiIt != mSubAccount.end())
3168 {
3169 auto it = simiIt->second.begin();
3170 while (it != simiIt->second.end())
3171 {
3172 InfoSub::pointer p = it->second.lock();
3173
3174 if (p)
3175 {
3176 notify.insert(p);
3177 ++it;
3178 ++iAccepted;
3179 }
3180 else
3181 it = simiIt->second.erase(it);
3182 }
3183 }
3184
3185 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3186 histoIt != mSubAccountHistory.end())
3187 {
3188 auto& subs = histoIt->second;
3189 auto it = subs.begin();
3190 while (it != subs.end())
3191 {
3192 SubAccountHistoryInfoWeak const& info = it->second;
3193 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3194 {
3195 ++it;
3196 continue;
3197 }
3198
3199 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3200 {
3201 accountHistoryNotify.emplace_back(
3202 SubAccountHistoryInfo{isSptr, info.index_});
3203 ++it;
3204 }
3205 else
3206 {
3207 it = subs.erase(it);
3208 }
3209 }
3210 if (subs.empty())
3211 mSubAccountHistory.erase(histoIt);
3212 }
3213 }
3214 }
3215 }
3216
3217 JLOG(m_journal.trace())
3218 << "pubAccountTransaction: " << "proposed=" << iProposed
3219 << ", accepted=" << iAccepted;
3220
3221 if (!notify.empty() || !accountHistoryNotify.empty())
3222 {
3223 auto const& stTxn = transaction.getTxn();
3224
3225 // Create two different Json objects, for different API versions
3226 auto const metaRef = std::ref(transaction.getMeta());
3227 auto const trResult = transaction.getResult();
3228 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3229
3230 for (InfoSub::ref isrListener : notify)
3231 {
3232 jvObj.visit(
3233 isrListener->getApiVersion(), //
3234 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3235 }
3236
3237 if (last)
3238 jvObj.set(jss::account_history_boundary, true);
3239
3240 XRPL_ASSERT(
3241 jvObj.isMember(jss::account_history_tx_stream) ==
3243 "ripple::NetworkOPsImp::pubAccountTransaction : "
3244 "account_history_tx_stream not set");
3245 for (auto& info : accountHistoryNotify)
3246 {
3247 auto& index = info.index_;
3248 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3249 jvObj.set(jss::account_history_tx_first, true);
3250
3251 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3252
3253 jvObj.visit(
3254 info.sink_->getApiVersion(), //
3255 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3256 }
3257 }
3258}
3259
3260void
3262 std::shared_ptr<ReadView const> const& ledger,
3264 TER result)
3265{
3267 int iProposed = 0;
3268
3269 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3270
3271 {
3273
3274 if (mSubRTAccount.empty())
3275 return;
3276
3277 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3279 {
3280 for (auto const& affectedAccount : tx->getMentionedAccounts())
3281 {
3282 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3283 simiIt != mSubRTAccount.end())
3284 {
3285 auto it = simiIt->second.begin();
3286
3287 while (it != simiIt->second.end())
3288 {
3289 InfoSub::pointer p = it->second.lock();
3290
3291 if (p)
3292 {
3293 notify.insert(p);
3294 ++it;
3295 ++iProposed;
3296 }
3297 else
3298 it = simiIt->second.erase(it);
3299 }
3300 }
3301 }
3302 }
3303 }
3304
3305 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3306
3307 if (!notify.empty() || !accountHistoryNotify.empty())
3308 {
3309 // Create two different Json objects, for different API versions
3310 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3311
3312 for (InfoSub::ref isrListener : notify)
3313 jvObj.visit(
3314 isrListener->getApiVersion(), //
3315 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3316
3317 XRPL_ASSERT(
3318 jvObj.isMember(jss::account_history_tx_stream) ==
3320 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3321 "account_history_tx_stream not set");
3322 for (auto& info : accountHistoryNotify)
3323 {
3324 auto& index = info.index_;
3325 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3326 jvObj.set(jss::account_history_tx_first, true);
3327 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3328 jvObj.visit(
3329 info.sink_->getApiVersion(), //
3330 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3331 }
3332 }
3333}
3334
3335//
3336// Monitoring
3337//
3338
3339void
3341 InfoSub::ref isrListener,
3342 hash_set<AccountID> const& vnaAccountIDs,
3343 bool rt)
3344{
3345 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3346
3347 for (auto const& naAccountID : vnaAccountIDs)
3348 {
3349 JLOG(m_journal.trace())
3350 << "subAccount: account: " << toBase58(naAccountID);
3351
3352 isrListener->insertSubAccountInfo(naAccountID, rt);
3353 }
3354
3356
3357 for (auto const& naAccountID : vnaAccountIDs)
3358 {
3359 auto simIterator = subMap.find(naAccountID);
3360 if (simIterator == subMap.end())
3361 {
3362 // Not found, note that account has a new single listner.
3363 SubMapType usisElement;
3364 usisElement[isrListener->getSeq()] = isrListener;
3365 // VFALCO NOTE This is making a needless copy of naAccountID
3366 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3367 }
3368 else
3369 {
3370 // Found, note that the account has another listener.
3371 simIterator->second[isrListener->getSeq()] = isrListener;
3372 }
3373 }
3374}
3375
3376void
3378 InfoSub::ref isrListener,
3379 hash_set<AccountID> const& vnaAccountIDs,
3380 bool rt)
3381{
3382 for (auto const& naAccountID : vnaAccountIDs)
3383 {
3384 // Remove from the InfoSub
3385 isrListener->deleteSubAccountInfo(naAccountID, rt);
3386 }
3387
3388 // Remove from the server
3389 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3390}
3391
3392void
3394 std::uint64_t uSeq,
3395 hash_set<AccountID> const& vnaAccountIDs,
3396 bool rt)
3397{
3399
3400 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3401
3402 for (auto const& naAccountID : vnaAccountIDs)
3403 {
3404 auto simIterator = subMap.find(naAccountID);
3405
3406 if (simIterator != subMap.end())
3407 {
3408 // Found
3409 simIterator->second.erase(uSeq);
3410
3411 if (simIterator->second.empty())
3412 {
3413 // Don't need hash entry.
3414 subMap.erase(simIterator);
3415 }
3416 }
3417 }
3418}
3419
3420void
3422{
3423 enum DatabaseType { Sqlite, None };
3424 static const auto databaseType = [&]() -> DatabaseType {
3425 // Use a dynamic_cast to return DatabaseType::None
3426 // on failure.
3427 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3428 {
3429 return DatabaseType::Sqlite;
3430 }
3431 return DatabaseType::None;
3432 }();
3433
3434 if (databaseType == DatabaseType::None)
3435 {
3436 JLOG(m_journal.error())
3437 << "AccountHistory job for account "
3438 << toBase58(subInfo.index_->accountId_) << " no database";
3439 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3440 {
3441 sptr->send(rpcError(rpcINTERNAL), true);
3442 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3443 }
3444 return;
3445 }
3446
3449 "AccountHistoryTxStream",
3450 [this, dbType = databaseType, subInfo]() {
3451 auto const& accountId = subInfo.index_->accountId_;
3452 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3453 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3454
3455 JLOG(m_journal.trace())
3456 << "AccountHistory job for account " << toBase58(accountId)
3457 << " started. lastLedgerSeq=" << lastLedgerSeq;
3458
3459 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3460 std::shared_ptr<TxMeta> const& meta) -> bool {
3461 /*
3462 * genesis account: first tx is the one with seq 1
3463 * other account: first tx is the one created the account
3464 */
3465 if (accountId == genesisAccountId)
3466 {
3467 auto stx = tx->getSTransaction();
3468 if (stx->getAccountID(sfAccount) == accountId &&
3469 stx->getSeqProxy().value() == 1)
3470 return true;
3471 }
3472
3473 for (auto& node : meta->getNodes())
3474 {
3475 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3476 continue;
3477
3478 if (node.isFieldPresent(sfNewFields))
3479 {
3480 if (auto inner = dynamic_cast<const STObject*>(
3481 node.peekAtPField(sfNewFields));
3482 inner)
3483 {
3484 if (inner->isFieldPresent(sfAccount) &&
3485 inner->getAccountID(sfAccount) == accountId)
3486 {
3487 return true;
3488 }
3489 }
3490 }
3491 }
3492
3493 return false;
3494 };
3495
3496 auto send = [&](Json::Value const& jvObj,
3497 bool unsubscribe) -> bool {
3498 if (auto sptr = subInfo.sinkWptr_.lock())
3499 {
3500 sptr->send(jvObj, true);
3501 if (unsubscribe)
3502 unsubAccountHistory(sptr, accountId, false);
3503 return true;
3504 }
3505
3506 return false;
3507 };
3508
3509 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3510 bool unsubscribe) -> bool {
3511 if (auto sptr = subInfo.sinkWptr_.lock())
3512 {
3513 jvObj.visit(
3514 sptr->getApiVersion(), //
3515 [&](Json::Value const& jv) { sptr->send(jv, true); });
3516
3517 if (unsubscribe)
3518 unsubAccountHistory(sptr, accountId, false);
3519 return true;
3520 }
3521
3522 return false;
3523 };
3524
3525 auto getMoreTxns =
3526 [&](std::uint32_t minLedger,
3527 std::uint32_t maxLedger,
3532 switch (dbType)
3533 {
3534 case Sqlite: {
3535 auto db = static_cast<SQLiteDatabase*>(
3538 accountId, minLedger, maxLedger, marker, 0, true};
3539 return db->newestAccountTxPage(options);
3540 }
3541 default: {
3542 UNREACHABLE(
3543 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3544 "getMoreTxns : invalid database type");
3545 return {};
3546 }
3547 }
3548 };
3549
3550 /*
3551 * search backward until the genesis ledger or asked to stop
3552 */
3553 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3554 {
3555 int feeChargeCount = 0;
3556 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3557 {
3558 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3559 ++feeChargeCount;
3560 }
3561 else
3562 {
3563 JLOG(m_journal.trace())
3564 << "AccountHistory job for account "
3565 << toBase58(accountId) << " no InfoSub. Fee charged "
3566 << feeChargeCount << " times.";
3567 return;
3568 }
3569
3570 // try to search in 1024 ledgers till reaching genesis ledgers
3571 auto startLedgerSeq =
3572 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3573 JLOG(m_journal.trace())
3574 << "AccountHistory job for account " << toBase58(accountId)
3575 << ", working on ledger range [" << startLedgerSeq << ","
3576 << lastLedgerSeq << "]";
3577
3578 auto haveRange = [&]() -> bool {
3579 std::uint32_t validatedMin = UINT_MAX;
3580 std::uint32_t validatedMax = 0;
3581 auto haveSomeValidatedLedgers =
3583 validatedMin, validatedMax);
3584
3585 return haveSomeValidatedLedgers &&
3586 validatedMin <= startLedgerSeq &&
3587 lastLedgerSeq <= validatedMax;
3588 }();
3589
3590 if (!haveRange)
3591 {
3592 JLOG(m_journal.debug())
3593 << "AccountHistory reschedule job for account "
3594 << toBase58(accountId) << ", incomplete ledger range ["
3595 << startLedgerSeq << "," << lastLedgerSeq << "]";
3597 return;
3598 }
3599
3601 while (!subInfo.index_->stopHistorical_)
3602 {
3603 auto dbResult =
3604 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3605 if (!dbResult)
3606 {
3607 JLOG(m_journal.debug())
3608 << "AccountHistory job for account "
3609 << toBase58(accountId) << " getMoreTxns failed.";
3610 send(rpcError(rpcINTERNAL), true);
3611 return;
3612 }
3613
3614 auto const& txns = dbResult->first;
3615 marker = dbResult->second;
3616 size_t num_txns = txns.size();
3617 for (size_t i = 0; i < num_txns; ++i)
3618 {
3619 auto const& [tx, meta] = txns[i];
3620
3621 if (!tx || !meta)
3622 {
3623 JLOG(m_journal.debug())
3624 << "AccountHistory job for account "
3625 << toBase58(accountId) << " empty tx or meta.";
3626 send(rpcError(rpcINTERNAL), true);
3627 return;
3628 }
3629 auto curTxLedger =
3631 tx->getLedger());
3632 if (!curTxLedger)
3633 {
3634 JLOG(m_journal.debug())
3635 << "AccountHistory job for account "
3636 << toBase58(accountId) << " no ledger.";
3637 send(rpcError(rpcINTERNAL), true);
3638 return;
3639 }
3641 tx->getSTransaction();
3642 if (!stTxn)
3643 {
3644 JLOG(m_journal.debug())
3645 << "AccountHistory job for account "
3646 << toBase58(accountId)
3647 << " getSTransaction failed.";
3648 send(rpcError(rpcINTERNAL), true);
3649 return;
3650 }
3651
3652 auto const mRef = std::ref(*meta);
3653 auto const trR = meta->getResultTER();
3654 MultiApiJson jvTx =
3655 transJson(stTxn, trR, true, curTxLedger, mRef);
3656
3657 jvTx.set(
3658 jss::account_history_tx_index, txHistoryIndex--);
3659 if (i + 1 == num_txns ||
3660 txns[i + 1].first->getLedger() != tx->getLedger())
3661 jvTx.set(jss::account_history_boundary, true);
3662
3663 if (isFirstTx(tx, meta))
3664 {
3665 jvTx.set(jss::account_history_tx_first, true);
3666 sendMultiApiJson(jvTx, false);
3667
3668 JLOG(m_journal.trace())
3669 << "AccountHistory job for account "
3670 << toBase58(accountId)
3671 << " done, found last tx.";
3672 return;
3673 }
3674 else
3675 {
3676 sendMultiApiJson(jvTx, false);
3677 }
3678 }
3679
3680 if (marker)
3681 {
3682 JLOG(m_journal.trace())
3683 << "AccountHistory job for account "
3684 << toBase58(accountId)
3685 << " paging, marker=" << marker->ledgerSeq << ":"
3686 << marker->txnSeq;
3687 }
3688 else
3689 {
3690 break;
3691 }
3692 }
3693
3694 if (!subInfo.index_->stopHistorical_)
3695 {
3696 lastLedgerSeq = startLedgerSeq - 1;
3697 if (lastLedgerSeq <= 1)
3698 {
3699 JLOG(m_journal.trace())
3700 << "AccountHistory job for account "
3701 << toBase58(accountId)
3702 << " done, reached genesis ledger.";
3703 return;
3704 }
3705 }
3706 }
3707 });
3708}
3709
3710void
3712 std::shared_ptr<ReadView const> const& ledger,
3714{
3715 subInfo.index_->separationLedgerSeq_ = ledger->seq();
3716 auto const& accountId = subInfo.index_->accountId_;
3717 auto const accountKeylet = keylet::account(accountId);
3718 if (!ledger->exists(accountKeylet))
3719 {
3720 JLOG(m_journal.debug())
3721 << "subAccountHistoryStart, no account " << toBase58(accountId)
3722 << ", no need to add AccountHistory job.";
3723 return;
3724 }
3725 if (accountId == genesisAccountId)
3726 {
3727 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3728 {
3729 if (sleAcct->getFieldU32(sfSequence) == 1)
3730 {
3731 JLOG(m_journal.debug())
3732 << "subAccountHistoryStart, genesis account "
3733 << toBase58(accountId)
3734 << " does not have tx, no need to add AccountHistory job.";
3735 return;
3736 }
3737 }
3738 else
3739 {
3740 UNREACHABLE(
3741 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
3742 "access genesis account");
3743 return;
3744 }
3745 }
3746 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
3747 subInfo.index_->haveHistorical_ = true;
3748
3749 JLOG(m_journal.debug())
3750 << "subAccountHistoryStart, add AccountHistory job: accountId="
3751 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
3752
3753 addAccountHistoryJob(subInfo);
3754}
3755
3758 InfoSub::ref isrListener,
3759 AccountID const& accountId)
3760{
3761 if (!isrListener->insertSubAccountHistory(accountId))
3762 {
3763 JLOG(m_journal.debug())
3764 << "subAccountHistory, already subscribed to account "
3765 << toBase58(accountId);
3766 return rpcINVALID_PARAMS;
3767 }
3768
3771 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
3772 auto simIterator = mSubAccountHistory.find(accountId);
3773 if (simIterator == mSubAccountHistory.end())
3774 {
3776 inner.emplace(isrListener->getSeq(), ahi);
3778 simIterator, std::make_pair(accountId, inner));
3779 }
3780 else
3781 {
3782 simIterator->second.emplace(isrListener->getSeq(), ahi);
3783 }
3784
3785 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
3786 if (ledger)
3787 {
3788 subAccountHistoryStart(ledger, ahi);
3789 }
3790 else
3791 {
3792 // The node does not have validated ledgers, so wait for
3793 // one before start streaming.
3794 // In this case, the subscription is also considered successful.
3795 JLOG(m_journal.debug())
3796 << "subAccountHistory, no validated ledger yet, delay start";
3797 }
3798
3799 return rpcSUCCESS;
3800}
3801
3802void
3804 InfoSub::ref isrListener,
3805 AccountID const& account,
3806 bool historyOnly)
3807{
3808 if (!historyOnly)
3809 isrListener->deleteSubAccountHistory(account);
3810 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
3811}
3812
3813void
3815 std::uint64_t seq,
3816 const AccountID& account,
3817 bool historyOnly)
3818{
3820 auto simIterator = mSubAccountHistory.find(account);
3821 if (simIterator != mSubAccountHistory.end())
3822 {
3823 auto& subInfoMap = simIterator->second;
3824 auto subInfoIter = subInfoMap.find(seq);
3825 if (subInfoIter != subInfoMap.end())
3826 {
3827 subInfoIter->second.index_->stopHistorical_ = true;
3828 }
3829
3830 if (!historyOnly)
3831 {
3832 simIterator->second.erase(seq);
3833 if (simIterator->second.empty())
3834 {
3835 mSubAccountHistory.erase(simIterator);
3836 }
3837 }
3838 JLOG(m_journal.debug())
3839 << "unsubAccountHistory, account " << toBase58(account)
3840 << ", historyOnly = " << (historyOnly ? "true" : "false");
3841 }
3842}
3843
3844bool
3846{
3847 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
3848 listeners->addSubscriber(isrListener);
3849 else
3850 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
3851 return true;
3852}
3853
3854bool
3856{
3857 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
3858 listeners->removeSubscriber(uSeq);
3859
3860 return true;
3861}
3862
3866{
3867 // This code-path is exclusively used when the server is in standalone
3868 // mode via `ledger_accept`
3869 XRPL_ASSERT(
3870 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
3871
3872 if (!m_standalone)
3873 Throw<std::runtime_error>(
3874 "Operation only possible in STANDALONE mode.");
3875
3876 // FIXME Could we improve on this and remove the need for a specialized
3877 // API in Consensus?
3879 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
3880 return m_ledgerMaster.getCurrentLedger()->info().seq;
3881}
3882
3883// <-- bool: true=added, false=already there
3884bool
3886{
3887 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
3888 {
3889 jvResult[jss::ledger_index] = lpClosed->info().seq;
3890 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
3891 jvResult[jss::ledger_time] = Json::Value::UInt(
3892 lpClosed->info().closeTime.time_since_epoch().count());
3893 if (!lpClosed->rules().enabled(featureXRPFees))
3894 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3895 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3896 jvResult[jss::reserve_base] =
3897 lpClosed->fees().accountReserve(0).jsonClipped();
3898 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3899 }
3900
3902 {
3903 jvResult[jss::validated_ledgers] =
3905 }
3906
3908 return mStreamMaps[sLedger]
3909 .emplace(isrListener->getSeq(), isrListener)
3910 .second;
3911}
3912
3913// <-- bool: true=added, false=already there
3914bool
3916{
3919 .emplace(isrListener->getSeq(), isrListener)
3920 .second;
3921}
3922
3923// <-- bool: true=erased, false=was not there
3924bool
3926{
3928 return mStreamMaps[sLedger].erase(uSeq);
3929}
3930
3931// <-- bool: true=erased, false=was not there
3932bool
3934{
3936 return mStreamMaps[sBookChanges].erase(uSeq);
3937}
3938
3939// <-- bool: true=added, false=already there
3940bool
3942{
3944 return mStreamMaps[sManifests]
3945 .emplace(isrListener->getSeq(), isrListener)
3946 .second;
3947}
3948
3949// <-- bool: true=erased, false=was not there
3950bool
3952{
3954 return mStreamMaps[sManifests].erase(uSeq);
3955}
3956
3957// <-- bool: true=added, false=already there
3958bool
3960 InfoSub::ref isrListener,
3961 Json::Value& jvResult,
3962 bool admin)
3963{
3964 uint256 uRandom;
3965
3966 if (m_standalone)
3967 jvResult[jss::stand_alone] = m_standalone;
3968
3969 // CHECKME: is it necessary to provide a random number here?
3970 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
3971
3972 auto const& feeTrack = app_.getFeeTrack();
3973 jvResult[jss::random] = to_string(uRandom);
3974 jvResult[jss::server_status] = strOperatingMode(admin);
3975 jvResult[jss::load_base] = feeTrack.getLoadBase();
3976 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
3977 jvResult[jss::hostid] = getHostId(admin);
3978 jvResult[jss::pubkey_node] =
3980
3982 return mStreamMaps[sServer]
3983 .emplace(isrListener->getSeq(), isrListener)
3984 .second;
3985}
3986
3987// <-- bool: true=erased, false=was not there
3988bool
3990{
3992 return mStreamMaps[sServer].erase(uSeq);
3993}
3994
3995// <-- bool: true=added, false=already there
3996bool
3998{
4001 .emplace(isrListener->getSeq(), isrListener)
4002 .second;
4003}
4004
4005// <-- bool: true=erased, false=was not there
4006bool
4008{
4010 return mStreamMaps[sTransactions].erase(uSeq);
4011}
4012
4013// <-- bool: true=added, false=already there
4014bool
4016{
4019 .emplace(isrListener->getSeq(), isrListener)
4020 .second;
4021}
4022
4023// <-- bool: true=erased, false=was not there
4024bool
4026{
4028 return mStreamMaps[sRTTransactions].erase(uSeq);
4029}
4030
4031// <-- bool: true=added, false=already there
4032bool
4034{
4037 .emplace(isrListener->getSeq(), isrListener)
4038 .second;
4039}
4040
4041void
4043{
4044 accounting_.json(obj);
4045}
4046
4047// <-- bool: true=erased, false=was not there
4048bool
4050{
4052 return mStreamMaps[sValidations].erase(uSeq);
4053}
4054
4055// <-- bool: true=added, false=already there
4056bool
4058{
4060 return mStreamMaps[sPeerStatus]
4061 .emplace(isrListener->getSeq(), isrListener)
4062 .second;
4063}
4064
4065// <-- bool: true=erased, false=was not there
4066bool
4068{
4070 return mStreamMaps[sPeerStatus].erase(uSeq);
4071}
4072
4073// <-- bool: true=added, false=already there
4074bool
4076{
4079 .emplace(isrListener->getSeq(), isrListener)
4080 .second;
4081}
4082
4083// <-- bool: true=erased, false=was not there
4084bool
4086{
4088 return mStreamMaps[sConsensusPhase].erase(uSeq);
4089}
4090
4093{
4095
4096 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4097
4098 if (it != mRpcSubMap.end())
4099 return it->second;
4100
4101 return InfoSub::pointer();
4102}
4103
4106{
4108
4109 mRpcSubMap.emplace(strUrl, rspEntry);
4110
4111 return rspEntry;
4112}
4113
4114bool
4116{
4118 auto pInfo = findRpcSub(strUrl);
4119
4120 if (!pInfo)
4121 return false;
4122
4123 // check to see if any of the stream maps still hold a weak reference to
4124 // this entry before removing
4125 for (SubMapType const& map : mStreamMaps)
4126 {
4127 if (map.find(pInfo->getSeq()) != map.end())
4128 return false;
4129 }
4130 mRpcSubMap.erase(strUrl);
4131 return true;
4132}
4133
4134#ifndef USE_NEW_BOOK_PAGE
4135
4136// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4137// work, but it demonstrated poor performance.
4138//
4139void
4142 Book const& book,
4143 AccountID const& uTakerID,
4144 bool const bProof,
4145 unsigned int iLimit,
4146 Json::Value const& jvMarker,
4147 Json::Value& jvResult)
4148{ // CAUTION: This is the old get book page logic
4149 Json::Value& jvOffers =
4150 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4151
4153 const uint256 uBookBase = getBookBase(book);
4154 const uint256 uBookEnd = getQualityNext(uBookBase);
4155 uint256 uTipIndex = uBookBase;
4156
4157 if (auto stream = m_journal.trace())
4158 {
4159 stream << "getBookPage:" << book;
4160 stream << "getBookPage: uBookBase=" << uBookBase;
4161 stream << "getBookPage: uBookEnd=" << uBookEnd;
4162 stream << "getBookPage: uTipIndex=" << uTipIndex;
4163 }
4164
4165 ReadView const& view = *lpLedger;
4166
4167 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4168 isGlobalFrozen(view, book.in.account);
4169
4170 bool bDone = false;
4171 bool bDirectAdvance = true;
4172
4173 std::shared_ptr<SLE const> sleOfferDir;
4174 uint256 offerIndex;
4175 unsigned int uBookEntry;
4176 STAmount saDirRate;
4177
4178 auto const rate = transferRate(view, book.out.account);
4179 auto viewJ = app_.journal("View");
4180
4181 while (!bDone && iLimit-- > 0)
4182 {
4183 if (bDirectAdvance)
4184 {
4185 bDirectAdvance = false;
4186
4187 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4188
4189 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4190 if (ledgerIndex)
4191 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4192 else
4193 sleOfferDir.reset();
4194
4195 if (!sleOfferDir)
4196 {
4197 JLOG(m_journal.trace()) << "getBookPage: bDone";
4198 bDone = true;
4199 }
4200 else
4201 {
4202 uTipIndex = sleOfferDir->key();
4203 saDirRate = amountFromQuality(getQuality(uTipIndex));
4204
4205 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4206
4207 JLOG(m_journal.trace())
4208 << "getBookPage: uTipIndex=" << uTipIndex;
4209 JLOG(m_journal.trace())
4210 << "getBookPage: offerIndex=" << offerIndex;
4211 }
4212 }
4213
4214 if (!bDone)
4215 {
4216 auto sleOffer = view.read(keylet::offer(offerIndex));
4217
4218 if (sleOffer)
4219 {
4220 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4221 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4222 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4223 STAmount saOwnerFunds;
4224 bool firstOwnerOffer(true);
4225
4226 if (book.out.account == uOfferOwnerID)
4227 {
4228 // If an offer is selling issuer's own IOUs, it is fully
4229 // funded.
4230 saOwnerFunds = saTakerGets;
4231 }
4232 else if (bGlobalFreeze)
4233 {
4234 // If either asset is globally frozen, consider all offers
4235 // that aren't ours to be totally unfunded
4236 saOwnerFunds.clear(book.out);
4237 }
4238 else
4239 {
4240 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4241 if (umBalanceEntry != umBalance.end())
4242 {
4243 // Found in running balance table.
4244
4245 saOwnerFunds = umBalanceEntry->second;
4246 firstOwnerOffer = false;
4247 }
4248 else
4249 {
4250 // Did not find balance in table.
4251
4252 saOwnerFunds = accountHolds(
4253 view,
4254 uOfferOwnerID,
4255 book.out.currency,
4256 book.out.account,
4258 viewJ);
4259
4260 if (saOwnerFunds < beast::zero)
4261 {
4262 // Treat negative funds as zero.
4263
4264 saOwnerFunds.clear();
4265 }
4266 }
4267 }
4268
4269 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4270
4271 STAmount saTakerGetsFunded;
4272 STAmount saOwnerFundsLimit = saOwnerFunds;
4273 Rate offerRate = parityRate;
4274
4275 if (rate != parityRate
4276 // Have a tranfer fee.
4277 && uTakerID != book.out.account
4278 // Not taking offers of own IOUs.
4279 && book.out.account != uOfferOwnerID)
4280 // Offer owner not issuing ownfunds
4281 {
4282 // Need to charge a transfer fee to offer owner.
4283 offerRate = rate;
4284 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4285 }
4286
4287 if (saOwnerFundsLimit >= saTakerGets)
4288 {
4289 // Sufficient funds no shenanigans.
4290 saTakerGetsFunded = saTakerGets;
4291 }
4292 else
4293 {
4294 // Only provide, if not fully funded.
4295
4296 saTakerGetsFunded = saOwnerFundsLimit;
4297
4298 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4299 std::min(
4300 saTakerPays,
4301 multiply(
4302 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4303 .setJson(jvOffer[jss::taker_pays_funded]);
4304 }
4305
4306 STAmount saOwnerPays = (parityRate == offerRate)
4307 ? saTakerGetsFunded
4308 : std::min(
4309 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4310
4311 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4312
4313 // Include all offers funded and unfunded
4314 Json::Value& jvOf = jvOffers.append(jvOffer);
4315 jvOf[jss::quality] = saDirRate.getText();
4316
4317 if (firstOwnerOffer)
4318 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4319 }
4320 else
4321 {
4322 JLOG(m_journal.warn()) << "Missing offer";
4323 }
4324
4325 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4326 {
4327 bDirectAdvance = true;
4328 }
4329 else
4330 {
4331 JLOG(m_journal.trace())
4332 << "getBookPage: offerIndex=" << offerIndex;
4333 }
4334 }
4335 }
4336
4337 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4338 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4339}
4340
4341#else
4342
4343// This is the new code that uses the book iterators
4344// It has temporarily been disabled
4345
4346void
4349 Book const& book,
4350 AccountID const& uTakerID,
4351 bool const bProof,
4352 unsigned int iLimit,
4353 Json::Value const& jvMarker,
4354 Json::Value& jvResult)
4355{
4356 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4357
4359
4360 MetaView lesActive(lpLedger, tapNONE, true);
4361 OrderBookIterator obIterator(lesActive, book);
4362
4363 auto const rate = transferRate(lesActive, book.out.account);
4364
4365 const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4366 lesActive.isGlobalFrozen(book.in.account);
4367
4368 while (iLimit-- > 0 && obIterator.nextOffer())
4369 {
4370 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4371 if (sleOffer)
4372 {
4373 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4374 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4375 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4376 STAmount saDirRate = obIterator.getCurrentRate();
4377 STAmount saOwnerFunds;
4378
4379 if (book.out.account == uOfferOwnerID)
4380 {
4381 // If offer is selling issuer's own IOUs, it is fully funded.
4382 saOwnerFunds = saTakerGets;
4383 }
4384 else if (bGlobalFreeze)
4385 {
4386 // If either asset is globally frozen, consider all offers
4387 // that aren't ours to be totally unfunded
4388 saOwnerFunds.clear(book.out);
4389 }
4390 else
4391 {
4392 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4393
4394 if (umBalanceEntry != umBalance.end())
4395 {
4396 // Found in running balance table.
4397
4398 saOwnerFunds = umBalanceEntry->second;
4399 }
4400 else
4401 {
4402 // Did not find balance in table.
4403
4404 saOwnerFunds = lesActive.accountHolds(
4405 uOfferOwnerID,
4406 book.out.currency,
4407 book.out.account,
4409
4410 if (saOwnerFunds.isNegative())
4411 {
4412 // Treat negative funds as zero.
4413
4414 saOwnerFunds.zero();
4415 }
4416 }
4417 }
4418
4419 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4420
4421 STAmount saTakerGetsFunded;
4422 STAmount saOwnerFundsLimit = saOwnerFunds;
4423 Rate offerRate = parityRate;
4424
4425 if (rate != parityRate
4426 // Have a tranfer fee.
4427 && uTakerID != book.out.account
4428 // Not taking offers of own IOUs.
4429 && book.out.account != uOfferOwnerID)
4430 // Offer owner not issuing ownfunds
4431 {
4432 // Need to charge a transfer fee to offer owner.
4433 offerRate = rate;
4434 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4435 }
4436
4437 if (saOwnerFundsLimit >= saTakerGets)
4438 {
4439 // Sufficient funds no shenanigans.
4440 saTakerGetsFunded = saTakerGets;
4441 }
4442 else
4443 {
4444 // Only provide, if not fully funded.
4445 saTakerGetsFunded = saOwnerFundsLimit;
4446
4447 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4448
4449 // TOOD(tom): The result of this expression is not used - what's
4450 // going on here?
4451 std::min(
4452 saTakerPays,
4453 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4454 .setJson(jvOffer[jss::taker_pays_funded]);
4455 }
4456
4457 STAmount saOwnerPays = (parityRate == offerRate)
4458 ? saTakerGetsFunded
4459 : std::min(
4460 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4461
4462 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4463
4464 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4465 {
4466 // Only provide funded offers and offers of the taker.
4467 Json::Value& jvOf = jvOffers.append(jvOffer);
4468 jvOf[jss::quality] = saDirRate.getText();
4469 }
4470 }
4471 }
4472
4473 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4474 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4475}
4476
4477#endif
4478
4479inline void
4481{
4482 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4483 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4485 counters[static_cast<std::size_t>(mode)].dur += current;
4486
4489 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4490 .dur.count());
4492 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4493 .dur.count());
4495 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4497 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4498 .dur.count());
4500 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4501
4503 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4504 .transitions);
4506 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4507 .transitions);
4509 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4511 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4512 .transitions);
4514 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4515}
4516
4517void
4519{
4520 auto now = std::chrono::steady_clock::now();
4521
4522 std::lock_guard lock(mutex_);
4523 ++counters_[static_cast<std::size_t>(om)].transitions;
4524 if (om == OperatingMode::FULL &&
4525 counters_[static_cast<std::size_t>(om)].transitions == 1)
4526 {
4527 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4528 now - processStart_)
4529 .count();
4530 }
4531 counters_[static_cast<std::size_t>(mode_)].dur +=
4532 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4533
4534 mode_ = om;
4535 start_ = now;
4536}
4537
4538void
4540{
4541 auto [counters, mode, start, initialSync] = getCounterData();
4542 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4544 counters[static_cast<std::size_t>(mode)].dur += current;
4545
4546 obj[jss::state_accounting] = Json::objectValue;
4548 i <= static_cast<std::size_t>(OperatingMode::FULL);
4549 ++i)
4550 {
4551 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4552 auto& state = obj[jss::state_accounting][states_[i]];
4553 state[jss::transitions] = std::to_string(counters[i].transitions);
4554 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4555 }
4556 obj[jss::server_state_duration_us] = std::to_string(current.count());
4557 if (initialSync)
4558 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4559}
4560
4561//------------------------------------------------------------------------------
4562
4565 Application& app,
4567 bool standalone,
4568 std::size_t minPeerCount,
4569 bool startvalid,
4570 JobQueue& job_queue,
4572 ValidatorKeys const& validatorKeys,
4573 boost::asio::io_service& io_svc,
4574 beast::Journal journal,
4575 beast::insight::Collector::ptr const& collector)
4576{
4577 return std::make_unique<NetworkOPsImp>(
4578 app,
4579 clock,
4580 standalone,
4581 minPeerCount,
4582 startvalid,
4583 job_queue,
4585 validatorKeys,
4586 io_svc,
4587 journal,
4588 collector);
4589}
4590
4591} // namespace ripple
T back_inserter(T... args)
T begin(T... args)
Decorator for streaming out compact json.
Definition: json_writer.h:317
Lightweight wrapper to tag static string.
Definition: json_value.h:61
Represents a JSON value.
Definition: json_value.h:147
Json::UInt UInt
Definition: json_value.h:154
Value get(UInt index, const Value &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Definition: json_value.cpp:841
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:891
bool isMember(const char *key) const
Return true if the object has a member named key.
Definition: json_value.cpp:943
A generic endpoint for log messages.
Definition: Journal.h:59
Stream error() const
Definition: Journal.h:335
Stream debug() const
Definition: Journal.h:317
Stream info() const
Definition: Journal.h:323
Stream trace() const
Severity stream access functions.
Definition: Journal.h:311
Stream warn() const
Definition: Journal.h:329
A metric for measuring an integral value.
Definition: Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition: Gauge.h:68
A reference to a handler for performing polled collection.
Definition: Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition: Book.h:34
Issue in
Definition: Book.h:36
Issue out
Definition: Book.h:37
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition: ClusterNode.h:45
std::uint32_t getLoadFee() const
Definition: ClusterNode.h:51
NetClock::time_point getReportTime() const
Definition: ClusterNode.h:57
PublicKey const & identity() const
Definition: ClusterNode.h:63
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:48
std::string SERVER_DOMAIN
Definition: Config.h:286
std::size_t NODE_SIZE
Definition: Config.h:220
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition: Config.h:167
int RELAY_UNTRUSTED_VALIDATIONS
Definition: Config.h:176
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition: InfoSub.h:54
AccountID account
Definition: Issue.h:39
Currency currency
Definition: Issue.h:38
A pool of threads to perform work.
Definition: JobQueue.h:56
Json::Value getJson(int c=0)
Definition: JobQueue.cpp:212
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
Definition: LedgerMaster.h:266
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:80
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
Definition: LoadFeeTrack.h:46
std::uint32_t getClusterFee() const
Definition: LoadFeeTrack.h:83
std::uint32_t getLocalFee() const
Definition: LoadFeeTrack.h:76
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:90
std::uint32_t getRemoteFee() const
Definition: LoadFeeTrack.h:69
std::uint32_t getLoadFactor() const
Definition: LoadFeeTrack.h:96
Manages load sources.
Definition: LoadManager.h:46
void resetDeadlockDetector()
Reset the deadlock detection timer.
Definition: LoadManager.cpp:63
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
Definition: NetworkOPs.cpp:141
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
Definition: NetworkOPs.cpp:151
std::chrono::steady_clock::time_point start_
Definition: NetworkOPs.cpp:153
static std::array< Json::StaticString const, 5 > const states_
Definition: NetworkOPs.cpp:157
std::chrono::steady_clock::time_point const processStart_
Definition: NetworkOPs.cpp:155
Transaction with input flags and results to be applied in batches.
Definition: NetworkOPs.cpp:92
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
Definition: NetworkOPs.cpp:101
std::shared_ptr< Transaction > const transaction
Definition: NetworkOPs.cpp:94
boost::asio::steady_timer accountHistoryTxTimer_
Definition: NetworkOPs.cpp:731
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
Definition: NetworkOPs.cpp:863
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::vector< TransactionStatus > mTransactions
Definition: NetworkOPs.cpp:775
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Definition: NetworkOPs.cpp:721
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
Definition: NetworkOPs.cpp:733
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
Definition: NetworkOPs.cpp:881
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
Definition: NetworkOPs.cpp:729
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
Definition: NetworkOPs.cpp:118
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
Definition: NetworkOPs.cpp:717
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
Definition: NetworkOPs.cpp:262
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Definition: NetworkOPs.cpp:744
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, const bool bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
Definition: NetworkOPs.cpp:730
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
Definition: NetworkOPs.cpp:124
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:224
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
Definition: NetworkOPs.cpp:724
beast::Journal m_journal
Definition: NetworkOPs.cpp:715
SubInfoMapType mSubAccount
Definition: NetworkOPs.cpp:739
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
Definition: NetworkOPs.cpp:779
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
Definition: NetworkOPs.cpp:996
bool unsubValidations(std::uint64_t uListener) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
Definition: NetworkOPs.cpp:728
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
Definition: NetworkOPs.cpp:934
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
Definition: NetworkOPs.cpp:759
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
Definition: NetworkOPs.cpp:769
std::atomic< bool > unlBlocked_
Definition: NetworkOPs.cpp:726
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
Definition: NetworkOPs.cpp:719
std::atomic< bool > needNetworkLedger_
Definition: NetworkOPs.cpp:723
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
Definition: NetworkOPs.cpp:777
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
Definition: NetworkOPs.cpp:893
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void endConsensus() override
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
Definition: NetworkOPs.cpp:737
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
Definition: NetworkOPs.cpp:924
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
Definition: NetworkOPs.cpp:761
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
Definition: NetworkOPs.cpp:875
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
Definition: NetworkOPs.cpp:772
void setMode(OperatingMode om) override
void stop() override
Definition: NetworkOPs.cpp:564
void clearNeedNetworkLedger() override
Definition: NetworkOPs.cpp:887
DispatchState mDispatchState
Definition: NetworkOPs.cpp:774
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
Definition: NetworkOPs.cpp:740
bool checkLastClosedLedger(const Overlay::PeerSequence &, uint256 &networkClosed)
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
Definition: NetworkOPs.cpp:899
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
Definition: NetworkOPs.cpp:780
Application & app_
Definition: NetworkOPs.cpp:714
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
Definition: NetworkOPs.cpp:735
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
Definition: NetworkOPs.cpp:742
std::atomic< bool > amendmentWarned_
Definition: NetworkOPs.cpp:725
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
Definition: NetworkOPs.cpp:905
bool beginConsensus(uint256 const &networkClosed) override
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition: NetworkOPs.h:87
void getCountsJson(Json::Value &obj)
Definition: Database.cpp:266
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:50
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:56
BookListeners::pointer getBookListeners(Book const &)
BookListeners::pointer makeBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, const AcceptedLedgerTx &alTx, MultiApiJson const &jvObj)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
Definition: RCLConsensus.h:51
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
Definition: RCLConsensus.h:443
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Definition: RCLConsensus.h:456
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
Represents a set of transactions in RCLConsensus.
Definition: RCLCxTx.h:66
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition: RFC1751.cpp:498
A view into a ledger.
Definition: ReadView.h:55
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition: STAmount.cpp:475
std::string getText() const override
Definition: STAmount.cpp:515
Issue const & issue() const
Definition: STAmount.h:487
std::optional< T > get(std::string const &name) const
Definition: BasicConfig.h:141
std::size_t size() const noexcept
Definition: Serializer.h:72
void const * data() const noexcept
Definition: Serializer.h:78
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition: TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition: TxQ.cpp:1777
static time_point now()
Definition: UptimeClock.cpp:63
Validator keys and manifest as set in configuration file.
Definition: ValidatorKeys.h:37
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition: XRPAmount.h:262
Json::Value jsonClipped() const
Definition: XRPAmount.h:218
iterator begin()
Definition: base_uint.h:135
static constexpr std::size_t size()
Definition: base_uint.h:525
bool isZero() const
Definition: base_uint.h:539
bool isNonZero() const
Definition: base_uint.h:544
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition: scope.h:231
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition: json_value.h:42
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
int Int
Definition: json_forwards.h:26
unsigned int UInt
Definition: json_forwards.h:27
void rngfill(void *buffer, std::size_t bytes, Generator &g)
Definition: rngfill.h:33
std::string const & getVersionString()
Server version.
Definition: BuildInfo.cpp:65
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition: BookChanges.h:47
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(PreclaimContext const &ctx, AccountID const &src)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition: Indexes.cpp:160
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition: Indexes.cpp:356
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition: Indexes.cpp:250
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:30
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:106
STAmount divide(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:87
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition: STTx.cpp:604
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition: View.cpp:442
@ fhZERO_IF_FROZEN
Definition: View.h:80
@ fhIGNORE_FREEZE
Definition: View.h:80
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition: View.cpp:136
std::uint64_t getQuality(uint256 const &uBase)
Definition: Indexes.cpp:125
@ INCLUDED
Definition: Transaction.h:49
@ OBSOLETE
Definition: Transaction.h:54
@ INVALID
Definition: Transaction.h:48
error_code_i
Definition: ErrorCodes.h:40
@ rpcSUCCESS
Definition: ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition: ErrorCodes.h:84
@ rpcINTERNAL
Definition: ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
Definition: SecretKey.cpp:351
auto constexpr muldiv_max
Definition: mulDiv.h:29
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition: LocalTxs.cpp:197
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition: View.cpp:650
STAmount amountFromQuality(std::uint64_t rate)
Definition: STAmount.cpp:822
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition: ErrorCodes.h:167
@ warnRPC_UNSUPPORTED_MAJORITY
Definition: ErrorCodes.h:165
@ warnRPC_AMENDMENT_BLOCKED
Definition: ErrorCodes.h:166
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition: NetworkOPs.h:66
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:47
bool isTesSuccess(TER x)
Definition: TER.h:656
bool isTerRetry(TER x)
Definition: TER.h:650
AccountID calcAccountID(PublicKey const &pk)
Definition: AccountID.cpp:160
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Definition: csprng.cpp:99
Json::Value rpcError(int iError)
Definition: RPCErr.cpp:29
@ tefPAST_SEQ
Definition: TER.h:175
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
Definition: NetworkOPs.cpp:844
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
bool isTemMalformed(TER x)
Definition: TER.h:638
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition: View.cpp:147
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition: ApiVersion.h:102
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:75
@ tesSUCCESS
Definition: TER.h:242
uint256 getQualityNext(uint256 const &uBase)
Definition: Indexes.cpp:117
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition: View.cpp:308
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition: ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition: chrono.h:93
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:629
FeeSetup setup_FeeVote(Section const &section)
Definition: Config.cpp:1090
Number root(Number f, unsigned d)
Definition: Number.cpp:630
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Definition: mulDiv.cpp:27
ApplyFlags
Definition: ApplyView.h:30
@ tapFAIL_HARD
Definition: ApplyView.h:35
@ tapUNLIMITED
Definition: ApplyView.h:42
@ tapNONE
Definition: ApplyView.h:31
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:37
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition: Seed.cpp:69
@ terQUEUED
Definition: TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition: TER.cpp:236
@ jtNETOP_CLUSTER
Definition: Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition: Job.h:47
@ jtTRANSACTION
Definition: Job.h:62
@ jtTXN_PROC
Definition: Job.h:82
@ jtCLIENT_CONSENSUS
Definition: Job.h:48
@ jtBATCH
Definition: Job.h:65
@ jtCLIENT_ACCT_HIST
Definition: Job.h:49
uint256 getBookBase(Book const &book)
Definition: Indexes.cpp:98
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition: View.cpp:173
static std::uint32_t trunc32(std::uint64_t v)
@ temBAD_SIGNATURE
Definition: TER.h:105
static auto const genesisAccountId
Definition: NetworkOPs.cpp:857
STL namespace.
T ref(T... args)
T reset(T... args)
T set_intersection(T... args)
std::string serialized
The manifest in serialized form.
Definition: Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition: Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition: Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition: Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Definition: Manifest.h:86
Server fees published on server subscription.
Definition: NetworkOPs.cpp:201
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
Definition: NetworkOPs.cpp:220
bool operator==(ServerFeeSummary const &b) const
Definition: NetworkOPs.cpp:212
beast::insight::Gauge full_transitions
Definition: NetworkOPs.cpp:831
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:786
beast::insight::Hook hook
Definition: NetworkOPs.cpp:820
beast::insight::Gauge connected_duration
Definition: NetworkOPs.cpp:822
beast::insight::Gauge tracking_duration
Definition: NetworkOPs.cpp:824
beast::insight::Gauge connected_transitions
Definition: NetworkOPs.cpp:828
beast::insight::Gauge disconnected_transitions
Definition: NetworkOPs.cpp:827
beast::insight::Gauge syncing_duration
Definition: NetworkOPs.cpp:823
beast::insight::Gauge tracking_transitions
Definition: NetworkOPs.cpp:830
beast::insight::Gauge full_duration
Definition: NetworkOPs.cpp:825
beast::insight::Gauge disconnected_duration
Definition: NetworkOPs.cpp:821
beast::insight::Gauge syncing_transitions
Definition: NetworkOPs.cpp:829
SubAccountHistoryIndex(AccountID const &accountId)
Definition: NetworkOPs.cpp:678
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:697
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:692
Represents a transfer rate.
Definition: Rate.h:38
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition: TxQ.h:162
void set(const char *key, auto const &v)
Definition: MultiApiJson.h:83
IsMemberResult isMember(const char *key) const
Definition: MultiApiJson.h:94
Select all peers (except optional excluded) that are in our cluster.
Definition: predicates.h:137
Sends a message to all peers.
Definition: predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)