rippled
Loading...
Searching...
No Matches
NetworkOPs.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLConsensus.h>
21#include <xrpld/app/consensus/RCLValidations.h>
22#include <xrpld/app/ledger/AcceptedLedger.h>
23#include <xrpld/app/ledger/InboundLedgers.h>
24#include <xrpld/app/ledger/LedgerMaster.h>
25#include <xrpld/app/ledger/LedgerToJson.h>
26#include <xrpld/app/ledger/LocalTxs.h>
27#include <xrpld/app/ledger/OpenLedger.h>
28#include <xrpld/app/ledger/OrderBookDB.h>
29#include <xrpld/app/ledger/TransactionMaster.h>
30#include <xrpld/app/main/LoadManager.h>
31#include <xrpld/app/misc/AmendmentTable.h>
32#include <xrpld/app/misc/DeliverMax.h>
33#include <xrpld/app/misc/HashRouter.h>
34#include <xrpld/app/misc/LoadFeeTrack.h>
35#include <xrpld/app/misc/NetworkOPs.h>
36#include <xrpld/app/misc/Transaction.h>
37#include <xrpld/app/misc/TxQ.h>
38#include <xrpld/app/misc/ValidatorKeys.h>
39#include <xrpld/app/misc/ValidatorList.h>
40#include <xrpld/app/misc/detail/AccountTxPaging.h>
41#include <xrpld/app/rdb/backend/SQLiteDatabase.h>
42#include <xrpld/app/tx/apply.h>
43#include <xrpld/consensus/Consensus.h>
44#include <xrpld/consensus/ConsensusParms.h>
45#include <xrpld/overlay/Cluster.h>
46#include <xrpld/overlay/Overlay.h>
47#include <xrpld/overlay/predicates.h>
48#include <xrpld/perflog/PerfLog.h>
49#include <xrpld/rpc/BookChanges.h>
50#include <xrpld/rpc/DeliveredAmount.h>
51#include <xrpld/rpc/MPTokenIssuanceID.h>
52#include <xrpld/rpc/ServerHandler.h>
53#include <xrpl/basics/UptimeClock.h>
54#include <xrpl/basics/mulDiv.h>
55#include <xrpl/basics/safe_cast.h>
56#include <xrpl/basics/scope.h>
57#include <xrpl/beast/rfc2616.h>
58#include <xrpl/beast/utility/rngfill.h>
59#include <xrpl/crypto/RFC1751.h>
60#include <xrpl/crypto/csprng.h>
61#include <xrpl/json/to_string.h>
62#include <xrpl/protocol/BuildInfo.h>
63#include <xrpl/protocol/Feature.h>
64#include <xrpl/protocol/MultiApiJson.h>
65#include <xrpl/protocol/RPCErr.h>
66#include <xrpl/protocol/STParsedJSON.h>
67#include <xrpl/protocol/jss.h>
68#include <xrpl/resource/Fees.h>
69#include <xrpl/resource/ResourceManager.h>
70#include <boost/asio/ip/host_name.hpp>
71#include <boost/asio/steady_timer.hpp>
72
73#include <algorithm>
74#include <exception>
75#include <mutex>
76#include <optional>
77#include <set>
78#include <string>
79#include <tuple>
80#include <unordered_map>
81#include <utility>
82
83namespace ripple {
84
85class NetworkOPsImp final : public NetworkOPs
86{
92 {
93 public:
95 bool const admin;
96 bool const local;
98 bool applied = false;
100
103 bool a,
104 bool l,
105 FailHard f)
106 : transaction(t), admin(a), local(l), failType(f)
107 {
108 XRPL_ASSERT(
110 "ripple::NetworkOPsImp::TransactionStatus::TransactionStatus : "
111 "valid inputs");
112 }
113 };
114
118 enum class DispatchState : unsigned char {
119 none,
120 scheduled,
121 running,
122 };
123
125
141 {
142 struct Counters
143 {
144 explicit Counters() = default;
145
148 };
149
153 std::chrono::steady_clock::time_point start_ =
155 std::chrono::steady_clock::time_point const processStart_ = start_;
158
159 public:
161 {
163 .transitions = 1;
164 }
165
172 void
174
180 void
181 json(Json::Value& obj) const;
182
184 {
186 decltype(mode_) mode;
187 decltype(start_) start;
189 };
190
193 {
196 }
197 };
198
201 {
202 ServerFeeSummary() = default;
203
205 XRPAmount fee,
206 TxQ::Metrics&& escalationMetrics,
207 LoadFeeTrack const& loadFeeTrack);
208 bool
209 operator!=(ServerFeeSummary const& b) const;
210
211 bool
213 {
214 return !(*this != b);
215 }
216
221 };
222
223public:
225 Application& app,
227 bool standalone,
228 std::size_t minPeerCount,
229 bool start_valid,
230 JobQueue& job_queue,
232 ValidatorKeys const& validatorKeys,
233 boost::asio::io_service& io_svc,
234 beast::Journal journal,
235 beast::insight::Collector::ptr const& collector)
236 : app_(app)
237 , m_journal(journal)
240 , heartbeatTimer_(io_svc)
241 , clusterTimer_(io_svc)
242 , accountHistoryTxTimer_(io_svc)
243 , mConsensus(
244 app,
246 setup_FeeVote(app_.config().section("voting")),
247 app_.logs().journal("FeeVote")),
249 *m_localTX,
250 app.getInboundTransactions(),
251 beast::get_abstract_clock<std::chrono::steady_clock>(),
252 validatorKeys,
253 app_.logs().journal("LedgerConsensus"))
255 , m_job_queue(job_queue)
256 , m_standalone(standalone)
257 , minPeerCount_(start_valid ? 0 : minPeerCount)
258 , m_stats(std::bind(&NetworkOPsImp::collect_metrics, this), collector)
259 {
260 }
261
262 ~NetworkOPsImp() override
263 {
264 // This clear() is necessary to ensure the shared_ptrs in this map get
265 // destroyed NOW because the objects in this map invoke methods on this
266 // class when they are destroyed
268 }
269
270public:
272 getOperatingMode() const override;
273
275 strOperatingMode(OperatingMode const mode, bool const admin) const override;
276
278 strOperatingMode(bool const admin = false) const override;
279
280 //
281 // Transaction operations.
282 //
283
284 // Must complete immediately.
285 void
287
288 void
290 std::shared_ptr<Transaction>& transaction,
291 bool bUnlimited,
292 bool bLocal,
293 FailHard failType) override;
294
303 void
306 bool bUnlimited,
307 FailHard failType);
308
318 void
321 bool bUnlimited,
322 FailHard failtype);
323
327 void
329
335 void
337
338 //
339 // Owner functions.
340 //
341
345 AccountID const& account) override;
346
347 //
348 // Book functions.
349 //
350
351 void
354 Book const&,
355 AccountID const& uTakerID,
356 const bool bProof,
357 unsigned int iLimit,
358 Json::Value const& jvMarker,
359 Json::Value& jvResult) override;
360
361 // Ledger proposal/close functions.
362 bool
364
365 bool
368 std::string const& source) override;
369
370 void
371 mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire) override;
372
373 // Network state machine.
374
375 // Used for the "jump" case.
376private:
377 void
379 bool
381
382public:
383 bool
384 beginConsensus(uint256 const& networkClosed) override;
385 void
386 endConsensus() override;
387 void
388 setStandAlone() override;
389
393 void
394 setStateTimer() override;
395
396 void
397 setNeedNetworkLedger() override;
398 void
399 clearNeedNetworkLedger() override;
400 bool
401 isNeedNetworkLedger() override;
402 bool
403 isFull() override;
404
405 void
406 setMode(OperatingMode om) override;
407
408 bool
409 isBlocked() override;
410 bool
411 isAmendmentBlocked() override;
412 void
413 setAmendmentBlocked() override;
414 bool
415 isAmendmentWarned() override;
416 void
417 setAmendmentWarned() override;
418 void
419 clearAmendmentWarned() override;
420 bool
421 isUNLBlocked() override;
422 void
423 setUNLBlocked() override;
424 void
425 clearUNLBlocked() override;
426 void
427 consensusViewChange() override;
428
430 getConsensusInfo() override;
432 getServerInfo(bool human, bool admin, bool counters) override;
433 void
434 clearLedgerFetch() override;
436 getLedgerFetchInfo() override;
439 std::optional<std::chrono::milliseconds> consensusDelay) override;
440 void
441 reportFeeChange() override;
442 void
444
445 void
446 updateLocalTx(ReadView const& view) override;
448 getLocalTxCount() override;
449
450 //
451 // Monitoring: publisher side.
452 //
453 void
454 pubLedger(std::shared_ptr<ReadView const> const& lpAccepted) override;
455 void
458 std::shared_ptr<STTx const> const& transaction,
459 TER result) override;
460 void
461 pubValidation(std::shared_ptr<STValidation> const& val) override;
462
463 //--------------------------------------------------------------------------
464 //
465 // InfoSub::Source.
466 //
467 void
469 InfoSub::ref ispListener,
470 hash_set<AccountID> const& vnaAccountIDs,
471 bool rt) override;
472 void
474 InfoSub::ref ispListener,
475 hash_set<AccountID> const& vnaAccountIDs,
476 bool rt) override;
477
478 // Just remove the subscription from the tracking
479 // not from the InfoSub. Needed for InfoSub destruction
480 void
482 std::uint64_t seq,
483 hash_set<AccountID> const& vnaAccountIDs,
484 bool rt) override;
485
487 subAccountHistory(InfoSub::ref ispListener, AccountID const& account)
488 override;
489 void
491 InfoSub::ref ispListener,
492 AccountID const& account,
493 bool historyOnly) override;
494
495 void
497 std::uint64_t seq,
498 AccountID const& account,
499 bool historyOnly) override;
500
501 bool
502 subLedger(InfoSub::ref ispListener, Json::Value& jvResult) override;
503 bool
504 unsubLedger(std::uint64_t uListener) override;
505
506 bool
507 subBookChanges(InfoSub::ref ispListener) override;
508 bool
509 unsubBookChanges(std::uint64_t uListener) override;
510
511 bool
512 subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin)
513 override;
514 bool
515 unsubServer(std::uint64_t uListener) override;
516
517 bool
518 subBook(InfoSub::ref ispListener, Book const&) override;
519 bool
520 unsubBook(std::uint64_t uListener, Book const&) override;
521
522 bool
523 subManifests(InfoSub::ref ispListener) override;
524 bool
525 unsubManifests(std::uint64_t uListener) override;
526 void
527 pubManifest(Manifest const&) override;
528
529 bool
530 subTransactions(InfoSub::ref ispListener) override;
531 bool
532 unsubTransactions(std::uint64_t uListener) override;
533
534 bool
535 subRTTransactions(InfoSub::ref ispListener) override;
536 bool
537 unsubRTTransactions(std::uint64_t uListener) override;
538
539 bool
540 subValidations(InfoSub::ref ispListener) override;
541 bool
542 unsubValidations(std::uint64_t uListener) override;
543
544 bool
545 subPeerStatus(InfoSub::ref ispListener) override;
546 bool
547 unsubPeerStatus(std::uint64_t uListener) override;
548 void
549 pubPeerStatus(std::function<Json::Value(void)> const&) override;
550
551 bool
552 subConsensus(InfoSub::ref ispListener) override;
553 bool
554 unsubConsensus(std::uint64_t uListener) override;
555
557 findRpcSub(std::string const& strUrl) override;
559 addRpcSub(std::string const& strUrl, InfoSub::ref) override;
560 bool
561 tryRemoveRpcSub(std::string const& strUrl) override;
562
563 void
564 stop() override
565 {
566 {
567 boost::system::error_code ec;
568 heartbeatTimer_.cancel(ec);
569 if (ec)
570 {
571 JLOG(m_journal.error())
572 << "NetworkOPs: heartbeatTimer cancel error: "
573 << ec.message();
574 }
575
576 ec.clear();
577 clusterTimer_.cancel(ec);
578 if (ec)
579 {
580 JLOG(m_journal.error())
581 << "NetworkOPs: clusterTimer cancel error: "
582 << ec.message();
583 }
584
585 ec.clear();
586 accountHistoryTxTimer_.cancel(ec);
587 if (ec)
588 {
589 JLOG(m_journal.error())
590 << "NetworkOPs: accountHistoryTxTimer cancel error: "
591 << ec.message();
592 }
593 }
594 // Make sure that any waitHandlers pending in our timers are done.
595 using namespace std::chrono_literals;
596 waitHandlerCounter_.join("NetworkOPs", 1s, m_journal);
597 }
598
599 void
600 stateAccounting(Json::Value& obj) override;
601
602private:
603 void
604 setTimer(
605 boost::asio::steady_timer& timer,
606 std::chrono::milliseconds const& expiry_time,
607 std::function<void()> onExpire,
608 std::function<void()> onError);
609 void
611 void
613 void
615 void
617
619 transJson(
620 std::shared_ptr<STTx const> const& transaction,
621 TER result,
622 bool validated,
625
626 void
629 AcceptedLedgerTx const& transaction,
630 bool last);
631
632 void
635 AcceptedLedgerTx const& transaction,
636 bool last);
637
638 void
641 std::shared_ptr<STTx const> const& transaction,
642 TER result);
643
644 void
645 pubServer();
646 void
648
650 getHostId(bool forAdmin);
651
652private:
656
657 /*
658 * With a validated ledger to separate history and future, the node
659 * streams historical txns with negative indexes starting from -1,
660 * and streams future txns starting from index 0.
661 * The SubAccountHistoryIndex struct maintains these indexes.
662 * It also has a flag stopHistorical_ for stopping streaming
663 * the historical txns.
664 */
666 {
668 // forward
670 // separate backward and forward
672 // history, backward
677
679 : accountId_(accountId)
680 , forwardTxIndex_(0)
683 , historyTxIndex_(-1)
684 , haveHistorical_(false)
685 , stopHistorical_(false)
686 {
687 }
688 };
690 {
693 };
695 {
698 };
701
705 void
709 void
711 void
713
716
718
720
722
727
729 boost::asio::steady_timer heartbeatTimer_;
730 boost::asio::steady_timer clusterTimer_;
731 boost::asio::steady_timer accountHistoryTxTimer_;
732
734
736
738
741
743
745
746 enum SubTypes {
747 sLedger, // Accepted ledgers.
748 sManifests, // Received validator manifests.
749 sServer, // When server changes connectivity state.
750 sTransactions, // All accepted transactions.
751 sRTTransactions, // All proposed and accepted transactions.
752 sValidations, // Received validations.
753 sPeerStatus, // Peer status changes.
754 sConsensusPhase, // Consensus phase
755 sBookChanges, // Per-ledger order book changes
756 sLastEntry // Any new entry must be ADDED ABOVE this one
757 };
758
760
762
764
765 // Whether we are in standalone mode.
766 bool const m_standalone;
767
768 // The number of nodes that we need to consider ourselves connected.
770
771 // Transaction batching.
776
778
781
782private:
783 struct Stats
784 {
785 template <class Handler>
787 Handler const& handler,
788 beast::insight::Collector::ptr const& collector)
789 : hook(collector->make_hook(handler))
790 , disconnected_duration(collector->make_gauge(
791 "State_Accounting",
792 "Disconnected_duration"))
793 , connected_duration(collector->make_gauge(
794 "State_Accounting",
795 "Connected_duration"))
797 collector->make_gauge("State_Accounting", "Syncing_duration"))
798 , tracking_duration(collector->make_gauge(
799 "State_Accounting",
800 "Tracking_duration"))
802 collector->make_gauge("State_Accounting", "Full_duration"))
803 , disconnected_transitions(collector->make_gauge(
804 "State_Accounting",
805 "Disconnected_transitions"))
806 , connected_transitions(collector->make_gauge(
807 "State_Accounting",
808 "Connected_transitions"))
809 , syncing_transitions(collector->make_gauge(
810 "State_Accounting",
811 "Syncing_transitions"))
812 , tracking_transitions(collector->make_gauge(
813 "State_Accounting",
814 "Tracking_transitions"))
816 collector->make_gauge("State_Accounting", "Full_transitions"))
817 {
818 }
819
826
832 };
833
834 std::mutex m_statsMutex; // Mutex to lock m_stats
836
837private:
838 void
840};
841
842//------------------------------------------------------------------------------
843
845 {"disconnected", "connected", "syncing", "tracking", "full"}};
846
848
856
857static auto const genesisAccountId = calcAccountID(
859 .first);
860
861//------------------------------------------------------------------------------
862inline OperatingMode
864{
865 return mMode;
866}
867
868inline std::string
869NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const
870{
871 return strOperatingMode(mMode, admin);
872}
873
874inline void
876{
878}
879
880inline void
882{
883 needNetworkLedger_ = true;
884}
885
886inline void
888{
889 needNetworkLedger_ = false;
890}
891
892inline bool
894{
895 return needNetworkLedger_;
896}
897
898inline bool
900{
902}
903
906{
907 static std::string const hostname = boost::asio::ip::host_name();
908
909 if (forAdmin)
910 return hostname;
911
912 // For non-admin uses hash the node public key into a
913 // single RFC1751 word:
914 static std::string const shroudedHostId = [this]() {
915 auto const& id = app_.nodeIdentity();
916
917 return RFC1751::getWordFromBlob(id.first.data(), id.first.size());
918 }();
919
920 return shroudedHostId;
921}
922
923void
925{
927
928 // Only do this work if a cluster is configured
929 if (app_.cluster().size() != 0)
931}
932
933void
935 boost::asio::steady_timer& timer,
936 const std::chrono::milliseconds& expiry_time,
937 std::function<void()> onExpire,
938 std::function<void()> onError)
939{
940 // Only start the timer if waitHandlerCounter_ is not yet joined.
941 if (auto optionalCountedHandler = waitHandlerCounter_.wrap(
942 [this, onExpire, onError](boost::system::error_code const& e) {
943 if ((e.value() == boost::system::errc::success) &&
944 (!m_job_queue.isStopped()))
945 {
946 onExpire();
947 }
948 // Recover as best we can if an unexpected error occurs.
949 if (e.value() != boost::system::errc::success &&
950 e.value() != boost::asio::error::operation_aborted)
951 {
952 // Try again later and hope for the best.
953 JLOG(m_journal.error())
954 << "Timer got error '" << e.message()
955 << "'. Restarting timer.";
956 onError();
957 }
958 }))
959 {
960 timer.expires_from_now(expiry_time);
961 timer.async_wait(std::move(*optionalCountedHandler));
962 }
963}
964
965void
966NetworkOPsImp::setHeartbeatTimer()
967{
968 setTimer(
969 heartbeatTimer_,
970 mConsensus.parms().ledgerGRANULARITY,
971 [this]() {
972 m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() {
973 processHeartbeatTimer();
974 });
975 },
976 [this]() { setHeartbeatTimer(); });
977}
978
979void
980NetworkOPsImp::setClusterTimer()
981{
982 using namespace std::chrono_literals;
983
984 setTimer(
985 clusterTimer_,
986 10s,
987 [this]() {
988 m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() {
989 processClusterTimer();
990 });
991 },
992 [this]() { setClusterTimer(); });
993}
994
995void
996NetworkOPsImp::setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
997{
998 JLOG(m_journal.debug()) << "Scheduling AccountHistory job for account "
999 << toBase58(subInfo.index_->accountId_);
1000 using namespace std::chrono_literals;
1001 setTimer(
1002 accountHistoryTxTimer_,
1003 4s,
1004 [this, subInfo]() { addAccountHistoryJob(subInfo); },
1005 [this, subInfo]() { setAccountHistoryJobTimer(subInfo); });
1006}
1007
1008void
1009NetworkOPsImp::processHeartbeatTimer()
1010{
1011 {
1012 std::unique_lock lock{app_.getMasterMutex()};
1013
1014 // VFALCO NOTE This is for diagnosing a crash on exit
1015 LoadManager& mgr(app_.getLoadManager());
1017
1018 std::size_t const numPeers = app_.overlay().size();
1019
1020 // do we have sufficient peers? If not, we are disconnected.
1021 if (numPeers < minPeerCount_)
1022 {
1023 if (mMode != OperatingMode::DISCONNECTED)
1024 {
1025 setMode(OperatingMode::DISCONNECTED);
1026 JLOG(m_journal.warn())
1027 << "Node count (" << numPeers << ") has fallen "
1028 << "below required minimum (" << minPeerCount_ << ").";
1029 }
1030
1031 // MasterMutex lock need not be held to call setHeartbeatTimer()
1032 lock.unlock();
1033 // We do not call mConsensus.timerEntry until there are enough
1034 // peers providing meaningful inputs to consensus
1035 setHeartbeatTimer();
1036 return;
1037 }
1038
1039 if (mMode == OperatingMode::DISCONNECTED)
1040 {
1041 setMode(OperatingMode::CONNECTED);
1042 JLOG(m_journal.info())
1043 << "Node count (" << numPeers << ") is sufficient.";
1044 }
1045
1046 // Check if the last validated ledger forces a change between these
1047 // states.
1048 if (mMode == OperatingMode::SYNCING)
1049 setMode(OperatingMode::SYNCING);
1050 else if (mMode == OperatingMode::CONNECTED)
1051 setMode(OperatingMode::CONNECTED);
1052 }
1053
1054 mConsensus.timerEntry(app_.timeKeeper().closeTime());
1055
1056 const ConsensusPhase currPhase = mConsensus.phase();
1057 if (mLastConsensusPhase != currPhase)
1058 {
1059 reportConsensusStateChange(currPhase);
1060 mLastConsensusPhase = currPhase;
1061 }
1062
1063 setHeartbeatTimer();
1064}
1065
1066void
1067NetworkOPsImp::processClusterTimer()
1068{
1069 if (app_.cluster().size() == 0)
1070 return;
1071
1072 using namespace std::chrono_literals;
1073
1074 bool const update = app_.cluster().update(
1075 app_.nodeIdentity().first,
1076 "",
1077 (m_ledgerMaster.getValidatedLedgerAge() <= 4min)
1078 ? app_.getFeeTrack().getLocalFee()
1079 : 0,
1080 app_.timeKeeper().now());
1081
1082 if (!update)
1083 {
1084 JLOG(m_journal.debug()) << "Too soon to send cluster update";
1085 setClusterTimer();
1086 return;
1087 }
1088
1089 protocol::TMCluster cluster;
1090 app_.cluster().for_each([&cluster](ClusterNode const& node) {
1091 protocol::TMClusterNode& n = *cluster.add_clusternodes();
1092 n.set_publickey(toBase58(TokenType::NodePublic, node.identity()));
1093 n.set_reporttime(node.getReportTime().time_since_epoch().count());
1094 n.set_nodeload(node.getLoadFee());
1095 if (!node.name().empty())
1096 n.set_nodename(node.name());
1097 });
1098
1099 Resource::Gossip gossip = app_.getResourceManager().exportConsumers();
1100 for (auto& item : gossip.items)
1101 {
1102 protocol::TMLoadSource& node = *cluster.add_loadsources();
1103 node.set_name(to_string(item.address));
1104 node.set_cost(item.balance);
1105 }
1106 app_.overlay().foreach(send_if(
1107 std::make_shared<Message>(cluster, protocol::mtCLUSTER),
1108 peer_in_cluster()));
1109 setClusterTimer();
1110}
1111
1112//------------------------------------------------------------------------------
1113
1115NetworkOPsImp::strOperatingMode(OperatingMode const mode, bool const admin)
1116 const
1117{
1118 if (mode == OperatingMode::FULL && admin)
1119 {
1120 auto const consensusMode = mConsensus.mode();
1121 if (consensusMode != ConsensusMode::wrongLedger)
1122 {
1123 if (consensusMode == ConsensusMode::proposing)
1124 return "proposing";
1125
1126 if (mConsensus.validating())
1127 return "validating";
1128 }
1129 }
1130
1131 return states_[static_cast<std::size_t>(mode)];
1132}
1133
1134void
1135NetworkOPsImp::submitTransaction(std::shared_ptr<STTx const> const& iTrans)
1136{
1137 if (isNeedNetworkLedger())
1138 {
1139 // Nothing we can do if we've never been in sync
1140 return;
1141 }
1142
1143 // this is an asynchronous interface
1144 auto const trans = sterilize(*iTrans);
1145
1146 auto const txid = trans->getTransactionID();
1147 auto const flags = app_.getHashRouter().getFlags(txid);
1148
1149 if ((flags & SF_BAD) != 0)
1150 {
1151 JLOG(m_journal.warn()) << "Submitted transaction cached bad";
1152 return;
1153 }
1154
1155 try
1156 {
1157 auto const [validity, reason] = checkValidity(
1158 app_.getHashRouter(),
1159 *trans,
1160 m_ledgerMaster.getValidatedRules(),
1161 app_.config());
1162
1163 if (validity != Validity::Valid)
1164 {
1165 JLOG(m_journal.warn())
1166 << "Submitted transaction invalid: " << reason;
1167 return;
1168 }
1169 }
1170 catch (std::exception const& ex)
1171 {
1172 JLOG(m_journal.warn())
1173 << "Exception checking transaction " << txid << ": " << ex.what();
1174
1175 return;
1176 }
1177
1178 std::string reason;
1179
1180 auto tx = std::make_shared<Transaction>(trans, reason, app_);
1181
1182 m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() {
1183 auto t = tx;
1184 processTransaction(t, false, false, FailHard::no);
1185 });
1186}
1187
1188void
1189NetworkOPsImp::processTransaction(
1190 std::shared_ptr<Transaction>& transaction,
1191 bool bUnlimited,
1192 bool bLocal,
1193 FailHard failType)
1194{
1195 auto ev = m_job_queue.makeLoadEvent(jtTXN_PROC, "ProcessTXN");
1196 auto const newFlags = app_.getHashRouter().getFlags(transaction->getID());
1197
1198 if ((newFlags & SF_BAD) != 0)
1199 {
1200 // cached bad
1201 JLOG(m_journal.warn()) << transaction->getID() << ": cached bad!\n";
1202 transaction->setStatus(INVALID);
1203 transaction->setResult(temBAD_SIGNATURE);
1204 return;
1205 }
1206
1207 // NOTE eahennis - I think this check is redundant,
1208 // but I'm not 100% sure yet.
1209 // If so, only cost is looking up HashRouter flags.
1210 auto const view = m_ledgerMaster.getCurrentLedger();
1211 auto const [validity, reason] = checkValidity(
1212 app_.getHashRouter(),
1213 *transaction->getSTransaction(),
1214 view->rules(),
1215 app_.config());
1216 XRPL_ASSERT(
1217 validity == Validity::Valid,
1218 "ripple::NetworkOPsImp::processTransaction : valid validity");
1219
1220 // Not concerned with local checks at this point.
1221 if (validity == Validity::SigBad)
1222 {
1223 JLOG(m_journal.info()) << "Transaction has bad signature: " << reason;
1224 transaction->setStatus(INVALID);
1225 transaction->setResult(temBAD_SIGNATURE);
1226 app_.getHashRouter().setFlags(transaction->getID(), SF_BAD);
1227 return;
1228 }
1229
1230 // canonicalize can change our pointer
1231 app_.getMasterTransaction().canonicalize(&transaction);
1232
1233 if (bLocal)
1234 doTransactionSync(transaction, bUnlimited, failType);
1235 else
1236 doTransactionAsync(transaction, bUnlimited, failType);
1237}
1238
1239void
1240NetworkOPsImp::doTransactionAsync(
1241 std::shared_ptr<Transaction> transaction,
1242 bool bUnlimited,
1243 FailHard failType)
1244{
1245 std::lock_guard lock(mMutex);
1246
1247 if (transaction->getApplying())
1248 return;
1249
1250 mTransactions.push_back(
1251 TransactionStatus(transaction, bUnlimited, false, failType));
1252 transaction->setApplying();
1253
1254 if (mDispatchState == DispatchState::none)
1255 {
1256 if (m_job_queue.addJob(
1257 jtBATCH, "transactionBatch", [this]() { transactionBatch(); }))
1258 {
1259 mDispatchState = DispatchState::scheduled;
1260 }
1261 }
1262}
1263
1264void
1265NetworkOPsImp::doTransactionSync(
1266 std::shared_ptr<Transaction> transaction,
1267 bool bUnlimited,
1268 FailHard failType)
1269{
1270 std::unique_lock<std::mutex> lock(mMutex);
1271
1272 if (!transaction->getApplying())
1273 {
1274 mTransactions.push_back(
1275 TransactionStatus(transaction, bUnlimited, true, failType));
1276 transaction->setApplying();
1277 }
1278
1279 do
1280 {
1281 if (mDispatchState == DispatchState::running)
1282 {
1283 // A batch processing job is already running, so wait.
1284 mCond.wait(lock);
1285 }
1286 else
1287 {
1288 apply(lock);
1289
1290 if (mTransactions.size())
1291 {
1292 // More transactions need to be applied, but by another job.
1293 if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() {
1294 transactionBatch();
1295 }))
1296 {
1297 mDispatchState = DispatchState::scheduled;
1298 }
1299 }
1300 }
1301 } while (transaction->getApplying());
1302}
1303
1304void
1305NetworkOPsImp::transactionBatch()
1306{
1307 std::unique_lock<std::mutex> lock(mMutex);
1308
1309 if (mDispatchState == DispatchState::running)
1310 return;
1311
1312 while (mTransactions.size())
1313 {
1314 apply(lock);
1315 }
1316}
1317
1318void
1319NetworkOPsImp::apply(std::unique_lock<std::mutex>& batchLock)
1320{
1322 std::vector<TransactionStatus> transactions;
1323 mTransactions.swap(transactions);
1324 XRPL_ASSERT(
1325 !transactions.empty(),
1326 "ripple::NetworkOPsImp::apply : non-empty transactions");
1327 XRPL_ASSERT(
1328 mDispatchState != DispatchState::running,
1329 "ripple::NetworkOPsImp::apply : is not running");
1330
1331 mDispatchState = DispatchState::running;
1332
1333 batchLock.unlock();
1334
1335 {
1336 std::unique_lock masterLock{app_.getMasterMutex(), std::defer_lock};
1337 bool changed = false;
1338 {
1339 std::unique_lock ledgerLock{
1340 m_ledgerMaster.peekMutex(), std::defer_lock};
1341 std::lock(masterLock, ledgerLock);
1342
1343 app_.openLedger().modify([&](OpenView& view, beast::Journal j) {
1344 for (TransactionStatus& e : transactions)
1345 {
1346 // we check before adding to the batch
1347 ApplyFlags flags = tapNONE;
1348 if (e.admin)
1349 flags |= tapUNLIMITED;
1350
1351 if (e.failType == FailHard::yes)
1352 flags |= tapFAIL_HARD;
1353
1354 auto const result = app_.getTxQ().apply(
1355 app_, view, e.transaction->getSTransaction(), flags, j);
1356 e.result = result.first;
1357 e.applied = result.second;
1358 changed = changed || result.second;
1359 }
1360 return changed;
1361 });
1362 }
1363 if (changed)
1364 reportFeeChange();
1365
1366 std::optional<LedgerIndex> validatedLedgerIndex;
1367 if (auto const l = m_ledgerMaster.getValidatedLedger())
1368 validatedLedgerIndex = l->info().seq;
1369
1370 auto newOL = app_.openLedger().current();
1371 for (TransactionStatus& e : transactions)
1372 {
1373 e.transaction->clearSubmitResult();
1374
1375 if (e.applied)
1376 {
1377 pubProposedTransaction(
1378 newOL, e.transaction->getSTransaction(), e.result);
1379 e.transaction->setApplied();
1380 }
1381
1382 e.transaction->setResult(e.result);
1383
1384 if (isTemMalformed(e.result))
1385 app_.getHashRouter().setFlags(e.transaction->getID(), SF_BAD);
1386
1387#ifdef DEBUG
1388 if (e.result != tesSUCCESS)
1389 {
1390 std::string token, human;
1391
1392 if (transResultInfo(e.result, token, human))
1393 {
1394 JLOG(m_journal.info())
1395 << "TransactionResult: " << token << ": " << human;
1396 }
1397 }
1398#endif
1399
1400 bool addLocal = e.local;
1401
1402 if (e.result == tesSUCCESS)
1403 {
1404 JLOG(m_journal.debug())
1405 << "Transaction is now included in open ledger";
1406 e.transaction->setStatus(INCLUDED);
1407
1408 auto const& txCur = e.transaction->getSTransaction();
1409 auto const txNext = m_ledgerMaster.popAcctTransaction(txCur);
1410 if (txNext)
1411 {
1412 std::string reason;
1413 auto const trans = sterilize(*txNext);
1414 auto t = std::make_shared<Transaction>(trans, reason, app_);
1415 submit_held.emplace_back(t, false, false, FailHard::no);
1416 t->setApplying();
1417 }
1418 }
1419 else if (e.result == tefPAST_SEQ)
1420 {
1421 // duplicate or conflict
1422 JLOG(m_journal.info()) << "Transaction is obsolete";
1423 e.transaction->setStatus(OBSOLETE);
1424 }
1425 else if (e.result == terQUEUED)
1426 {
1427 JLOG(m_journal.debug())
1428 << "Transaction is likely to claim a"
1429 << " fee, but is queued until fee drops";
1430
1431 e.transaction->setStatus(HELD);
1432 // Add to held transactions, because it could get
1433 // kicked out of the queue, and this will try to
1434 // put it back.
1435 m_ledgerMaster.addHeldTransaction(e.transaction);
1436 e.transaction->setQueued();
1437 e.transaction->setKept();
1438 }
1439 else if (isTerRetry(e.result))
1440 {
1441 if (e.failType != FailHard::yes)
1442 {
1443 // transaction should be held
1444 JLOG(m_journal.debug())
1445 << "Transaction should be held: " << e.result;
1446 e.transaction->setStatus(HELD);
1447 m_ledgerMaster.addHeldTransaction(e.transaction);
1448 e.transaction->setKept();
1449 }
1450 }
1451 else
1452 {
1453 JLOG(m_journal.debug())
1454 << "Status other than success " << e.result;
1455 e.transaction->setStatus(INVALID);
1456 }
1457
1458 auto const enforceFailHard =
1459 e.failType == FailHard::yes && !isTesSuccess(e.result);
1460
1461 if (addLocal && !enforceFailHard)
1462 {
1463 m_localTX->push_back(
1464 m_ledgerMaster.getCurrentLedgerIndex(),
1465 e.transaction->getSTransaction());
1466 e.transaction->setKept();
1467 }
1468
1469 if ((e.applied ||
1470 ((mMode != OperatingMode::FULL) &&
1471 (e.failType != FailHard::yes) && e.local) ||
1472 (e.result == terQUEUED)) &&
1473 !enforceFailHard)
1474 {
1475 auto const toSkip =
1476 app_.getHashRouter().shouldRelay(e.transaction->getID());
1477
1478 if (toSkip)
1479 {
1480 protocol::TMTransaction tx;
1481 Serializer s;
1482
1483 e.transaction->getSTransaction()->add(s);
1484 tx.set_rawtransaction(s.data(), s.size());
1485 tx.set_status(protocol::tsCURRENT);
1486 tx.set_receivetimestamp(
1487 app_.timeKeeper().now().time_since_epoch().count());
1488 tx.set_deferred(e.result == terQUEUED);
1489 // FIXME: This should be when we received it
1490 app_.overlay().relay(e.transaction->getID(), tx, *toSkip);
1491 e.transaction->setBroadcast();
1492 }
1493 }
1494
1495 if (validatedLedgerIndex)
1496 {
1497 auto [fee, accountSeq, availableSeq] =
1498 app_.getTxQ().getTxRequiredFeeAndSeq(
1499 *newOL, e.transaction->getSTransaction());
1500 e.transaction->setCurrentLedgerState(
1501 *validatedLedgerIndex, fee, accountSeq, availableSeq);
1502 }
1503 }
1504 }
1505
1506 batchLock.lock();
1507
1508 for (TransactionStatus& e : transactions)
1509 e.transaction->clearApplying();
1510
1511 if (!submit_held.empty())
1512 {
1513 if (mTransactions.empty())
1514 mTransactions.swap(submit_held);
1515 else
1516 for (auto& e : submit_held)
1517 mTransactions.push_back(std::move(e));
1518 }
1519
1520 mCond.notify_all();
1521
1522 mDispatchState = DispatchState::none;
1523}
1524
1525//
1526// Owner functions
1527//
1528
1530NetworkOPsImp::getOwnerInfo(
1532 AccountID const& account)
1533{
1534 Json::Value jvObjects(Json::objectValue);
1535 auto root = keylet::ownerDir(account);
1536 auto sleNode = lpLedger->read(keylet::page(root));
1537 if (sleNode)
1538 {
1539 std::uint64_t uNodeDir;
1540
1541 do
1542 {
1543 for (auto const& uDirEntry : sleNode->getFieldV256(sfIndexes))
1544 {
1545 auto sleCur = lpLedger->read(keylet::child(uDirEntry));
1546 XRPL_ASSERT(
1547 sleCur,
1548 "ripple::NetworkOPsImp::getOwnerInfo : non-null child SLE");
1549
1550 switch (sleCur->getType())
1551 {
1552 case ltOFFER:
1553 if (!jvObjects.isMember(jss::offers))
1554 jvObjects[jss::offers] =
1556
1557 jvObjects[jss::offers].append(
1558 sleCur->getJson(JsonOptions::none));
1559 break;
1560
1561 case ltRIPPLE_STATE:
1562 if (!jvObjects.isMember(jss::ripple_lines))
1563 {
1564 jvObjects[jss::ripple_lines] =
1566 }
1567
1568 jvObjects[jss::ripple_lines].append(
1569 sleCur->getJson(JsonOptions::none));
1570 break;
1571
1572 case ltACCOUNT_ROOT:
1573 case ltDIR_NODE:
1574 default:
1575 UNREACHABLE(
1576 "ripple::NetworkOPsImp::getOwnerInfo : invalid "
1577 "type");
1578 break;
1579 }
1580 }
1581
1582 uNodeDir = sleNode->getFieldU64(sfIndexNext);
1583
1584 if (uNodeDir)
1585 {
1586 sleNode = lpLedger->read(keylet::page(root, uNodeDir));
1587 XRPL_ASSERT(
1588 sleNode,
1589 "ripple::NetworkOPsImp::getOwnerInfo : read next page");
1590 }
1591 } while (uNodeDir);
1592 }
1593
1594 return jvObjects;
1595}
1596
1597//
1598// Other
1599//
1600
1601inline bool
1602NetworkOPsImp::isBlocked()
1603{
1604 return isAmendmentBlocked() || isUNLBlocked();
1605}
1606
1607inline bool
1608NetworkOPsImp::isAmendmentBlocked()
1609{
1610 return amendmentBlocked_;
1611}
1612
1613void
1614NetworkOPsImp::setAmendmentBlocked()
1615{
1616 amendmentBlocked_ = true;
1617 setMode(OperatingMode::CONNECTED);
1618}
1619
1620inline bool
1621NetworkOPsImp::isAmendmentWarned()
1622{
1623 return !amendmentBlocked_ && amendmentWarned_;
1624}
1625
1626inline void
1627NetworkOPsImp::setAmendmentWarned()
1628{
1629 amendmentWarned_ = true;
1630}
1631
1632inline void
1633NetworkOPsImp::clearAmendmentWarned()
1634{
1635 amendmentWarned_ = false;
1636}
1637
1638inline bool
1639NetworkOPsImp::isUNLBlocked()
1640{
1641 return unlBlocked_;
1642}
1643
1644void
1645NetworkOPsImp::setUNLBlocked()
1646{
1647 unlBlocked_ = true;
1648 setMode(OperatingMode::CONNECTED);
1649}
1650
1651inline void
1652NetworkOPsImp::clearUNLBlocked()
1653{
1654 unlBlocked_ = false;
1655}
1656
1657bool
1658NetworkOPsImp::checkLastClosedLedger(
1659 const Overlay::PeerSequence& peerList,
1660 uint256& networkClosed)
1661{
1662 // Returns true if there's an *abnormal* ledger issue, normal changing in
1663 // TRACKING mode should return false. Do we have sufficient validations for
1664 // our last closed ledger? Or do sufficient nodes agree? And do we have no
1665 // better ledger available? If so, we are either tracking or full.
1666
1667 JLOG(m_journal.trace()) << "NetworkOPsImp::checkLastClosedLedger";
1668
1669 auto const ourClosed = m_ledgerMaster.getClosedLedger();
1670
1671 if (!ourClosed)
1672 return false;
1673
1674 uint256 closedLedger = ourClosed->info().hash;
1675 uint256 prevClosedLedger = ourClosed->info().parentHash;
1676 JLOG(m_journal.trace()) << "OurClosed: " << closedLedger;
1677 JLOG(m_journal.trace()) << "PrevClosed: " << prevClosedLedger;
1678
1679 //-------------------------------------------------------------------------
1680 // Determine preferred last closed ledger
1681
1682 auto& validations = app_.getValidations();
1683 JLOG(m_journal.debug())
1684 << "ValidationTrie " << Json::Compact(validations.getJsonTrie());
1685
1686 // Will rely on peer LCL if no trusted validations exist
1688 peerCounts[closedLedger] = 0;
1689 if (mMode >= OperatingMode::TRACKING)
1690 peerCounts[closedLedger]++;
1691
1692 for (auto& peer : peerList)
1693 {
1694 uint256 peerLedger = peer->getClosedLedgerHash();
1695
1696 if (peerLedger.isNonZero())
1697 ++peerCounts[peerLedger];
1698 }
1699
1700 for (auto const& it : peerCounts)
1701 JLOG(m_journal.debug()) << "L: " << it.first << " n=" << it.second;
1702
1703 uint256 preferredLCL = validations.getPreferredLCL(
1704 RCLValidatedLedger{ourClosed, validations.adaptor().journal()},
1705 m_ledgerMaster.getValidLedgerIndex(),
1706 peerCounts);
1707
1708 bool switchLedgers = preferredLCL != closedLedger;
1709 if (switchLedgers)
1710 closedLedger = preferredLCL;
1711 //-------------------------------------------------------------------------
1712 if (switchLedgers && (closedLedger == prevClosedLedger))
1713 {
1714 // don't switch to our own previous ledger
1715 JLOG(m_journal.info()) << "We won't switch to our own previous ledger";
1716 networkClosed = ourClosed->info().hash;
1717 switchLedgers = false;
1718 }
1719 else
1720 networkClosed = closedLedger;
1721
1722 if (!switchLedgers)
1723 return false;
1724
1725 auto consensus = m_ledgerMaster.getLedgerByHash(closedLedger);
1726
1727 if (!consensus)
1728 consensus = app_.getInboundLedgers().acquire(
1729 closedLedger, 0, InboundLedger::Reason::CONSENSUS);
1730
1731 if (consensus &&
1732 (!m_ledgerMaster.canBeCurrent(consensus) ||
1733 !m_ledgerMaster.isCompatible(
1734 *consensus, m_journal.debug(), "Not switching")))
1735 {
1736 // Don't switch to a ledger not on the validated chain
1737 // or with an invalid close time or sequence
1738 networkClosed = ourClosed->info().hash;
1739 return false;
1740 }
1741
1742 JLOG(m_journal.warn()) << "We are not running on the consensus ledger";
1743 JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash
1744 << getJson({*ourClosed, {}});
1745 JLOG(m_journal.info()) << "Net LCL " << closedLedger;
1746
1747 if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL))
1748 {
1749 setMode(OperatingMode::CONNECTED);
1750 }
1751
1752 if (consensus)
1753 {
1754 // FIXME: If this rewinds the ledger sequence, or has the same
1755 // sequence, we should update the status on any stored transactions
1756 // in the invalidated ledgers.
1757 switchLastClosedLedger(consensus);
1758 }
1759
1760 return true;
1761}
1762
1763void
1764NetworkOPsImp::switchLastClosedLedger(
1765 std::shared_ptr<Ledger const> const& newLCL)
1766{
1767 // set the newLCL as our last closed ledger -- this is abnormal code
1768 JLOG(m_journal.error())
1769 << "JUMP last closed ledger to " << newLCL->info().hash;
1770
1771 clearNeedNetworkLedger();
1772
1773 // Update fee computations.
1774 app_.getTxQ().processClosedLedger(app_, *newLCL, true);
1775
1776 // Caller must own master lock
1777 {
1778 // Apply tx in old open ledger to new
1779 // open ledger. Then apply local tx.
1780
1781 auto retries = m_localTX->getTxSet();
1782 auto const lastVal = app_.getLedgerMaster().getValidatedLedger();
1784 if (lastVal)
1785 rules = makeRulesGivenLedger(*lastVal, app_.config().features);
1786 else
1787 rules.emplace(app_.config().features);
1788 app_.openLedger().accept(
1789 app_,
1790 *rules,
1791 newLCL,
1792 OrderedTxs({}),
1793 false,
1794 retries,
1795 tapNONE,
1796 "jump",
1797 [&](OpenView& view, beast::Journal j) {
1798 // Stuff the ledger with transactions from the queue.
1799 return app_.getTxQ().accept(app_, view);
1800 });
1801 }
1802
1803 m_ledgerMaster.switchLCL(newLCL);
1804
1805 protocol::TMStatusChange s;
1806 s.set_newevent(protocol::neSWITCHED_LEDGER);
1807 s.set_ledgerseq(newLCL->info().seq);
1808 s.set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1809 s.set_ledgerhashprevious(
1810 newLCL->info().parentHash.begin(), newLCL->info().parentHash.size());
1811 s.set_ledgerhash(newLCL->info().hash.begin(), newLCL->info().hash.size());
1812
1813 app_.overlay().foreach(
1814 send_always(std::make_shared<Message>(s, protocol::mtSTATUS_CHANGE)));
1815}
1816
1817bool
1818NetworkOPsImp::beginConsensus(uint256 const& networkClosed)
1819{
1820 XRPL_ASSERT(
1821 networkClosed.isNonZero(),
1822 "ripple::NetworkOPsImp::beginConsensus : nonzero input");
1823
1824 auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
1825
1826 JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
1827 << " with LCL " << closingInfo.parentHash;
1828
1829 auto prevLedger = m_ledgerMaster.getLedgerByHash(closingInfo.parentHash);
1830
1831 if (!prevLedger)
1832 {
1833 // this shouldn't happen unless we jump ledgers
1834 if (mMode == OperatingMode::FULL)
1835 {
1836 JLOG(m_journal.warn()) << "Don't have LCL, going to tracking";
1837 setMode(OperatingMode::TRACKING);
1838 }
1839
1840 return false;
1841 }
1842
1843 XRPL_ASSERT(
1844 prevLedger->info().hash == closingInfo.parentHash,
1845 "ripple::NetworkOPsImp::beginConsensus : prevLedger hash matches "
1846 "parent");
1847 XRPL_ASSERT(
1848 closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash,
1849 "ripple::NetworkOPsImp::beginConsensus : closedLedger parent matches "
1850 "hash");
1851
1852 if (prevLedger->rules().enabled(featureNegativeUNL))
1853 app_.validators().setNegativeUNL(prevLedger->negativeUNL());
1854 TrustChanges const changes = app_.validators().updateTrusted(
1855 app_.getValidations().getCurrentNodeIDs(),
1856 closingInfo.parentCloseTime,
1857 *this,
1858 app_.overlay(),
1859 app_.getHashRouter());
1860
1861 if (!changes.added.empty() || !changes.removed.empty())
1862 {
1863 app_.getValidations().trustChanged(changes.added, changes.removed);
1864 // Update the AmendmentTable so it tracks the current validators.
1865 app_.getAmendmentTable().trustChanged(
1866 app_.validators().getQuorumKeys().second);
1867 }
1868
1869 mConsensus.startRound(
1870 app_.timeKeeper().closeTime(),
1871 networkClosed,
1872 prevLedger,
1873 changes.removed,
1874 changes.added);
1875
1876 const ConsensusPhase currPhase = mConsensus.phase();
1877 if (mLastConsensusPhase != currPhase)
1878 {
1879 reportConsensusStateChange(currPhase);
1880 mLastConsensusPhase = currPhase;
1881 }
1882
1883 JLOG(m_journal.debug()) << "Initiating consensus engine";
1884 return true;
1885}
1886
1887bool
1888NetworkOPsImp::processTrustedProposal(RCLCxPeerPos peerPos)
1889{
1890 return mConsensus.peerProposal(app_.timeKeeper().closeTime(), peerPos);
1891}
1892
1893void
1894NetworkOPsImp::mapComplete(std::shared_ptr<SHAMap> const& map, bool fromAcquire)
1895{
1896 // We now have an additional transaction set
1897 // either created locally during the consensus process
1898 // or acquired from a peer
1899
1900 // Inform peers we have this set
1901 protocol::TMHaveTransactionSet msg;
1902 msg.set_hash(map->getHash().as_uint256().begin(), 256 / 8);
1903 msg.set_status(protocol::tsHAVE);
1904 app_.overlay().foreach(
1905 send_always(std::make_shared<Message>(msg, protocol::mtHAVE_SET)));
1906
1907 // We acquired it because consensus asked us to
1908 if (fromAcquire)
1909 mConsensus.gotTxSet(app_.timeKeeper().closeTime(), RCLTxSet{map});
1910}
1911
1912void
1913NetworkOPsImp::endConsensus()
1914{
1915 uint256 deadLedger = m_ledgerMaster.getClosedLedger()->info().parentHash;
1916
1917 for (auto const& it : app_.overlay().getActivePeers())
1918 {
1919 if (it && (it->getClosedLedgerHash() == deadLedger))
1920 {
1921 JLOG(m_journal.trace()) << "Killing obsolete peer status";
1922 it->cycleStatus();
1923 }
1924 }
1925
1926 uint256 networkClosed;
1927 bool ledgerChange =
1928 checkLastClosedLedger(app_.overlay().getActivePeers(), networkClosed);
1929
1930 if (networkClosed.isZero())
1931 return;
1932
1933 // WRITEME: Unless we are in FULL and in the process of doing a consensus,
1934 // we must count how many nodes share our LCL, how many nodes disagree with
1935 // our LCL, and how many validations our LCL has. We also want to check
1936 // timing to make sure there shouldn't be a newer LCL. We need this
1937 // information to do the next three tests.
1938
1939 if (((mMode == OperatingMode::CONNECTED) ||
1940 (mMode == OperatingMode::SYNCING)) &&
1941 !ledgerChange)
1942 {
1943 // Count number of peers that agree with us and UNL nodes whose
1944 // validations we have for LCL. If the ledger is good enough, go to
1945 // TRACKING - TODO
1946 if (!needNetworkLedger_)
1947 setMode(OperatingMode::TRACKING);
1948 }
1949
1950 if (((mMode == OperatingMode::CONNECTED) ||
1951 (mMode == OperatingMode::TRACKING)) &&
1952 !ledgerChange)
1953 {
1954 // check if the ledger is good enough to go to FULL
1955 // Note: Do not go to FULL if we don't have the previous ledger
1956 // check if the ledger is bad enough to go to CONNECTE D -- TODO
1957 auto current = m_ledgerMaster.getCurrentLedger();
1958 if (app_.timeKeeper().now() < (current->info().parentCloseTime +
1959 2 * current->info().closeTimeResolution))
1960 {
1961 setMode(OperatingMode::FULL);
1962 }
1963 }
1964
1965 beginConsensus(networkClosed);
1966}
1967
1968void
1969NetworkOPsImp::consensusViewChange()
1970{
1971 if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING))
1972 {
1973 setMode(OperatingMode::CONNECTED);
1974 }
1975}
1976
1977void
1978NetworkOPsImp::pubManifest(Manifest const& mo)
1979{
1980 // VFALCO consider std::shared_mutex
1981 std::lock_guard sl(mSubLock);
1982
1983 if (!mStreamMaps[sManifests].empty())
1984 {
1986
1987 jvObj[jss::type] = "manifestReceived";
1988 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, mo.masterKey);
1989 if (mo.signingKey)
1990 jvObj[jss::signing_key] =
1991 toBase58(TokenType::NodePublic, *mo.signingKey);
1992 jvObj[jss::seq] = Json::UInt(mo.sequence);
1993 if (auto sig = mo.getSignature())
1994 jvObj[jss::signature] = strHex(*sig);
1995 jvObj[jss::master_signature] = strHex(mo.getMasterSignature());
1996 if (!mo.domain.empty())
1997 jvObj[jss::domain] = mo.domain;
1998 jvObj[jss::manifest] = strHex(mo.serialized);
1999
2000 for (auto i = mStreamMaps[sManifests].begin();
2001 i != mStreamMaps[sManifests].end();)
2002 {
2003 if (auto p = i->second.lock())
2004 {
2005 p->send(jvObj, true);
2006 ++i;
2007 }
2008 else
2009 {
2010 i = mStreamMaps[sManifests].erase(i);
2011 }
2012 }
2013 }
2014}
2015
2016NetworkOPsImp::ServerFeeSummary::ServerFeeSummary(
2017 XRPAmount fee,
2018 TxQ::Metrics&& escalationMetrics,
2019 LoadFeeTrack const& loadFeeTrack)
2020 : loadFactorServer{loadFeeTrack.getLoadFactor()}
2021 , loadBaseServer{loadFeeTrack.getLoadBase()}
2022 , baseFee{fee}
2023 , em{std::move(escalationMetrics)}
2024{
2025}
2026
2027bool
2029 NetworkOPsImp::ServerFeeSummary const& b) const
2030{
2031 if (loadFactorServer != b.loadFactorServer ||
2032 loadBaseServer != b.loadBaseServer || baseFee != b.baseFee ||
2033 em.has_value() != b.em.has_value())
2034 return true;
2035
2036 if (em && b.em)
2037 {
2038 return (
2039 em->minProcessingFeeLevel != b.em->minProcessingFeeLevel ||
2040 em->openLedgerFeeLevel != b.em->openLedgerFeeLevel ||
2041 em->referenceFeeLevel != b.em->referenceFeeLevel);
2042 }
2043
2044 return false;
2045}
2046
2047// Need to cap to uint64 to uint32 due to JSON limitations
2048static std::uint32_t
2050{
2052
2053 return std::min(max32, v);
2054};
2055
2056void
2058{
2059 // VFALCO TODO Don't hold the lock across calls to send...make a copy of the
2060 // list into a local array while holding the lock then release
2061 // the lock and call send on everyone.
2062 //
2064
2065 if (!mStreamMaps[sServer].empty())
2066 {
2068
2070 app_.openLedger().current()->fees().base,
2072 app_.getFeeTrack()};
2073
2074 jvObj[jss::type] = "serverStatus";
2075 jvObj[jss::server_status] = strOperatingMode();
2076 jvObj[jss::load_base] = f.loadBaseServer;
2077 jvObj[jss::load_factor_server] = f.loadFactorServer;
2078 jvObj[jss::base_fee] = f.baseFee.jsonClipped();
2079
2080 if (f.em)
2081 {
2082 auto const loadFactor = std::max(
2083 safe_cast<std::uint64_t>(f.loadFactorServer),
2084 mulDiv(
2085 f.em->openLedgerFeeLevel,
2086 f.loadBaseServer,
2087 f.em->referenceFeeLevel)
2089
2090 jvObj[jss::load_factor] = trunc32(loadFactor);
2091 jvObj[jss::load_factor_fee_escalation] =
2092 f.em->openLedgerFeeLevel.jsonClipped();
2093 jvObj[jss::load_factor_fee_queue] =
2094 f.em->minProcessingFeeLevel.jsonClipped();
2095 jvObj[jss::load_factor_fee_reference] =
2096 f.em->referenceFeeLevel.jsonClipped();
2097 }
2098 else
2099 jvObj[jss::load_factor] = f.loadFactorServer;
2100
2101 mLastFeeSummary = f;
2102
2103 for (auto i = mStreamMaps[sServer].begin();
2104 i != mStreamMaps[sServer].end();)
2105 {
2106 InfoSub::pointer p = i->second.lock();
2107
2108 // VFALCO TODO research the possibility of using thread queues and
2109 // linearizing the deletion of subscribers with the
2110 // sending of JSON data.
2111 if (p)
2112 {
2113 p->send(jvObj, true);
2114 ++i;
2115 }
2116 else
2117 {
2118 i = mStreamMaps[sServer].erase(i);
2119 }
2120 }
2121 }
2122}
2123
2124void
2126{
2128
2129 auto& streamMap = mStreamMaps[sConsensusPhase];
2130 if (!streamMap.empty())
2131 {
2133 jvObj[jss::type] = "consensusPhase";
2134 jvObj[jss::consensus] = to_string(phase);
2135
2136 for (auto i = streamMap.begin(); i != streamMap.end();)
2137 {
2138 if (auto p = i->second.lock())
2139 {
2140 p->send(jvObj, true);
2141 ++i;
2142 }
2143 else
2144 {
2145 i = streamMap.erase(i);
2146 }
2147 }
2148 }
2149}
2150
2151void
2153{
2154 // VFALCO consider std::shared_mutex
2156
2157 if (!mStreamMaps[sValidations].empty())
2158 {
2160
2161 auto const signerPublic = val->getSignerPublic();
2162
2163 jvObj[jss::type] = "validationReceived";
2164 jvObj[jss::validation_public_key] =
2165 toBase58(TokenType::NodePublic, signerPublic);
2166 jvObj[jss::ledger_hash] = to_string(val->getLedgerHash());
2167 jvObj[jss::signature] = strHex(val->getSignature());
2168 jvObj[jss::full] = val->isFull();
2169 jvObj[jss::flags] = val->getFlags();
2170 jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
2171 jvObj[jss::data] = strHex(val->getSerializer().slice());
2172
2173 if (auto version = (*val)[~sfServerVersion])
2174 jvObj[jss::server_version] = std::to_string(*version);
2175
2176 if (auto cookie = (*val)[~sfCookie])
2177 jvObj[jss::cookie] = std::to_string(*cookie);
2178
2179 if (auto hash = (*val)[~sfValidatedHash])
2180 jvObj[jss::validated_hash] = strHex(*hash);
2181
2182 auto const masterKey =
2183 app_.validatorManifests().getMasterKey(signerPublic);
2184
2185 if (masterKey != signerPublic)
2186 jvObj[jss::master_key] = toBase58(TokenType::NodePublic, masterKey);
2187
2188 // NOTE *seq is a number, but old API versions used string. We replace
2189 // number with a string using MultiApiJson near end of this function
2190 if (auto const seq = (*val)[~sfLedgerSequence])
2191 jvObj[jss::ledger_index] = *seq;
2192
2193 if (val->isFieldPresent(sfAmendments))
2194 {
2195 jvObj[jss::amendments] = Json::Value(Json::arrayValue);
2196 for (auto const& amendment : val->getFieldV256(sfAmendments))
2197 jvObj[jss::amendments].append(to_string(amendment));
2198 }
2199
2200 if (auto const closeTime = (*val)[~sfCloseTime])
2201 jvObj[jss::close_time] = *closeTime;
2202
2203 if (auto const loadFee = (*val)[~sfLoadFee])
2204 jvObj[jss::load_fee] = *loadFee;
2205
2206 if (auto const baseFee = val->at(~sfBaseFee))
2207 jvObj[jss::base_fee] = static_cast<double>(*baseFee);
2208
2209 if (auto const reserveBase = val->at(~sfReserveBase))
2210 jvObj[jss::reserve_base] = *reserveBase;
2211
2212 if (auto const reserveInc = val->at(~sfReserveIncrement))
2213 jvObj[jss::reserve_inc] = *reserveInc;
2214
2215 // (The ~ operator converts the Proxy to a std::optional, which
2216 // simplifies later operations)
2217 if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops);
2218 baseFeeXRP && baseFeeXRP->native())
2219 jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped();
2220
2221 if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops);
2222 reserveBaseXRP && reserveBaseXRP->native())
2223 jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped();
2224
2225 if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops);
2226 reserveIncXRP && reserveIncXRP->native())
2227 jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped();
2228
2229 // NOTE Use MultiApiJson to publish two slightly different JSON objects
2230 // for consumers supporting different API versions
2231 MultiApiJson multiObj{jvObj};
2232 multiObj.visit(
2233 RPC::apiVersion<1>, //
2234 [](Json::Value& jvTx) {
2235 // Type conversion for older API versions to string
2236 if (jvTx.isMember(jss::ledger_index))
2237 {
2238 jvTx[jss::ledger_index] =
2239 std::to_string(jvTx[jss::ledger_index].asUInt());
2240 }
2241 });
2242
2243 for (auto i = mStreamMaps[sValidations].begin();
2244 i != mStreamMaps[sValidations].end();)
2245 {
2246 if (auto p = i->second.lock())
2247 {
2248 multiObj.visit(
2249 p->getApiVersion(), //
2250 [&](Json::Value const& jv) { p->send(jv, true); });
2251 ++i;
2252 }
2253 else
2254 {
2255 i = mStreamMaps[sValidations].erase(i);
2256 }
2257 }
2258 }
2259}
2260
2261void
2263{
2265
2266 if (!mStreamMaps[sPeerStatus].empty())
2267 {
2268 Json::Value jvObj(func());
2269
2270 jvObj[jss::type] = "peerStatusChange";
2271
2272 for (auto i = mStreamMaps[sPeerStatus].begin();
2273 i != mStreamMaps[sPeerStatus].end();)
2274 {
2275 InfoSub::pointer p = i->second.lock();
2276
2277 if (p)
2278 {
2279 p->send(jvObj, true);
2280 ++i;
2281 }
2282 else
2283 {
2284 i = mStreamMaps[sPeerStatus].erase(i);
2285 }
2286 }
2287 }
2288}
2289
2290void
2292{
2293 using namespace std::chrono_literals;
2294 if (om == OperatingMode::CONNECTED)
2295 {
2298 }
2299 else if (om == OperatingMode::SYNCING)
2300 {
2303 }
2304
2305 if ((om > OperatingMode::CONNECTED) && isBlocked())
2307
2308 if (mMode == om)
2309 return;
2310
2311 mMode = om;
2312
2313 accounting_.mode(om);
2314
2315 JLOG(m_journal.info()) << "STATE->" << strOperatingMode();
2316 pubServer();
2317}
2318
2319bool
2322 std::string const& source)
2323{
2324 JLOG(m_journal.trace())
2325 << "recvValidation " << val->getLedgerHash() << " from " << source;
2326
2328 BypassAccept bypassAccept = BypassAccept::no;
2329 try
2330 {
2331 if (pendingValidations_.contains(val->getLedgerHash()))
2332 bypassAccept = BypassAccept::yes;
2333 else
2334 pendingValidations_.insert(val->getLedgerHash());
2335 scope_unlock unlock(lock);
2336 handleNewValidation(app_, val, source, bypassAccept, m_journal);
2337 }
2338 catch (std::exception const& e)
2339 {
2340 JLOG(m_journal.warn())
2341 << "Exception thrown for handling new validation "
2342 << val->getLedgerHash() << ": " << e.what();
2343 }
2344 catch (...)
2345 {
2346 JLOG(m_journal.warn())
2347 << "Unknown exception thrown for handling new validation "
2348 << val->getLedgerHash();
2349 }
2350 if (bypassAccept == BypassAccept::no)
2351 {
2352 pendingValidations_.erase(val->getLedgerHash());
2353 }
2354 lock.unlock();
2355
2356 pubValidation(val);
2357
2358 // We will always relay trusted validations; if configured, we will
2359 // also relay all untrusted validations.
2360 return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted();
2361}
2362
2365{
2366 return mConsensus.getJson(true);
2367}
2368
2370NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters)
2371{
2373
2374 // System-level warnings
2375 {
2376 Json::Value warnings{Json::arrayValue};
2377 if (isAmendmentBlocked())
2378 {
2379 Json::Value& w = warnings.append(Json::objectValue);
2380 w[jss::id] = warnRPC_AMENDMENT_BLOCKED;
2381 w[jss::message] =
2382 "This server is amendment blocked, and must be updated to be "
2383 "able to stay in sync with the network.";
2384 }
2385 if (isUNLBlocked())
2386 {
2387 Json::Value& w = warnings.append(Json::objectValue);
2388 w[jss::id] = warnRPC_EXPIRED_VALIDATOR_LIST;
2389 w[jss::message] =
2390 "This server has an expired validator list. validators.txt "
2391 "may be incorrectly configured or some [validator_list_sites] "
2392 "may be unreachable.";
2393 }
2394 if (admin && isAmendmentWarned())
2395 {
2396 Json::Value& w = warnings.append(Json::objectValue);
2397 w[jss::id] = warnRPC_UNSUPPORTED_MAJORITY;
2398 w[jss::message] =
2399 "One or more unsupported amendments have reached majority. "
2400 "Upgrade to the latest version before they are activated "
2401 "to avoid being amendment blocked.";
2402 if (auto const expected =
2404 {
2405 auto& d = w[jss::details] = Json::objectValue;
2406 d[jss::expected_date] = expected->time_since_epoch().count();
2407 d[jss::expected_date_UTC] = to_string(*expected);
2408 }
2409 }
2410
2411 if (warnings.size())
2412 info[jss::warnings] = std::move(warnings);
2413 }
2414
2415 // hostid: unique string describing the machine
2416 if (human)
2417 info[jss::hostid] = getHostId(admin);
2418
2419 // domain: if configured with a domain, report it:
2420 if (!app_.config().SERVER_DOMAIN.empty())
2421 info[jss::server_domain] = app_.config().SERVER_DOMAIN;
2422
2423 info[jss::build_version] = BuildInfo::getVersionString();
2424
2425 info[jss::server_state] = strOperatingMode(admin);
2426
2427 info[jss::time] = to_string(std::chrono::floor<std::chrono::microseconds>(
2429
2431 info[jss::network_ledger] = "waiting";
2432
2433 info[jss::validation_quorum] =
2434 static_cast<Json::UInt>(app_.validators().quorum());
2435
2436 if (admin)
2437 {
2438 switch (app_.config().NODE_SIZE)
2439 {
2440 case 0:
2441 info[jss::node_size] = "tiny";
2442 break;
2443 case 1:
2444 info[jss::node_size] = "small";
2445 break;
2446 case 2:
2447 info[jss::node_size] = "medium";
2448 break;
2449 case 3:
2450 info[jss::node_size] = "large";
2451 break;
2452 case 4:
2453 info[jss::node_size] = "huge";
2454 break;
2455 }
2456
2457 auto when = app_.validators().expires();
2458
2459 if (!human)
2460 {
2461 if (when)
2462 info[jss::validator_list_expires] =
2463 safe_cast<Json::UInt>(when->time_since_epoch().count());
2464 else
2465 info[jss::validator_list_expires] = 0;
2466 }
2467 else
2468 {
2469 auto& x = (info[jss::validator_list] = Json::objectValue);
2470
2471 x[jss::count] = static_cast<Json::UInt>(app_.validators().count());
2472
2473 if (when)
2474 {
2475 if (*when == TimeKeeper::time_point::max())
2476 {
2477 x[jss::expiration] = "never";
2478 x[jss::status] = "active";
2479 }
2480 else
2481 {
2482 x[jss::expiration] = to_string(*when);
2483
2484 if (*when > app_.timeKeeper().now())
2485 x[jss::status] = "active";
2486 else
2487 x[jss::status] = "expired";
2488 }
2489 }
2490 else
2491 {
2492 x[jss::status] = "unknown";
2493 x[jss::expiration] = "unknown";
2494 }
2495 }
2496 }
2497 info[jss::io_latency_ms] =
2498 static_cast<Json::UInt>(app_.getIOLatency().count());
2499
2500 if (admin)
2501 {
2502 if (auto const localPubKey = app_.validators().localPublicKey();
2503 localPubKey && app_.getValidationPublicKey())
2504 {
2505 info[jss::pubkey_validator] =
2506 toBase58(TokenType::NodePublic, localPubKey.value());
2507 }
2508 else
2509 {
2510 info[jss::pubkey_validator] = "none";
2511 }
2512 }
2513
2514 if (counters)
2515 {
2516 info[jss::counters] = app_.getPerfLog().countersJson();
2517
2518 Json::Value nodestore(Json::objectValue);
2519 app_.getNodeStore().getCountsJson(nodestore);
2520 info[jss::counters][jss::nodestore] = nodestore;
2521 info[jss::current_activities] = app_.getPerfLog().currentJson();
2522 }
2523
2524 info[jss::pubkey_node] =
2526
2527 info[jss::complete_ledgers] = app_.getLedgerMaster().getCompleteLedgers();
2528
2530 info[jss::amendment_blocked] = true;
2531
2532 auto const fp = m_ledgerMaster.getFetchPackCacheSize();
2533
2534 if (fp != 0)
2535 info[jss::fetch_pack] = Json::UInt(fp);
2536
2537 info[jss::peers] = Json::UInt(app_.overlay().size());
2538
2539 Json::Value lastClose = Json::objectValue;
2540 lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers());
2541
2542 if (human)
2543 {
2544 lastClose[jss::converge_time_s] =
2546 }
2547 else
2548 {
2549 lastClose[jss::converge_time] =
2551 }
2552
2553 info[jss::last_close] = lastClose;
2554
2555 // info[jss::consensus] = mConsensus.getJson();
2556
2557 if (admin)
2558 info[jss::load] = m_job_queue.getJson();
2559
2560 if (auto const netid = app_.overlay().networkID())
2561 info[jss::network_id] = static_cast<Json::UInt>(*netid);
2562
2563 auto const escalationMetrics =
2565
2566 auto const loadFactorServer = app_.getFeeTrack().getLoadFactor();
2567 auto const loadBaseServer = app_.getFeeTrack().getLoadBase();
2568 /* Scale the escalated fee level to unitless "load factor".
2569 In practice, this just strips the units, but it will continue
2570 to work correctly if either base value ever changes. */
2571 auto const loadFactorFeeEscalation =
2572 mulDiv(
2573 escalationMetrics.openLedgerFeeLevel,
2574 loadBaseServer,
2575 escalationMetrics.referenceFeeLevel)
2577
2578 auto const loadFactor = std::max(
2579 safe_cast<std::uint64_t>(loadFactorServer), loadFactorFeeEscalation);
2580
2581 if (!human)
2582 {
2583 info[jss::load_base] = loadBaseServer;
2584 info[jss::load_factor] = trunc32(loadFactor);
2585 info[jss::load_factor_server] = loadFactorServer;
2586
2587 /* Json::Value doesn't support uint64, so clamp to max
2588 uint32 value. This is mostly theoretical, since there
2589 probably isn't enough extant XRP to drive the factor
2590 that high.
2591 */
2592 info[jss::load_factor_fee_escalation] =
2593 escalationMetrics.openLedgerFeeLevel.jsonClipped();
2594 info[jss::load_factor_fee_queue] =
2595 escalationMetrics.minProcessingFeeLevel.jsonClipped();
2596 info[jss::load_factor_fee_reference] =
2597 escalationMetrics.referenceFeeLevel.jsonClipped();
2598 }
2599 else
2600 {
2601 info[jss::load_factor] =
2602 static_cast<double>(loadFactor) / loadBaseServer;
2603
2604 if (loadFactorServer != loadFactor)
2605 info[jss::load_factor_server] =
2606 static_cast<double>(loadFactorServer) / loadBaseServer;
2607
2608 if (admin)
2609 {
2611 if (fee != loadBaseServer)
2612 info[jss::load_factor_local] =
2613 static_cast<double>(fee) / loadBaseServer;
2614 fee = app_.getFeeTrack().getRemoteFee();
2615 if (fee != loadBaseServer)
2616 info[jss::load_factor_net] =
2617 static_cast<double>(fee) / loadBaseServer;
2618 fee = app_.getFeeTrack().getClusterFee();
2619 if (fee != loadBaseServer)
2620 info[jss::load_factor_cluster] =
2621 static_cast<double>(fee) / loadBaseServer;
2622 }
2623 if (escalationMetrics.openLedgerFeeLevel !=
2624 escalationMetrics.referenceFeeLevel &&
2625 (admin || loadFactorFeeEscalation != loadFactor))
2626 info[jss::load_factor_fee_escalation] =
2627 escalationMetrics.openLedgerFeeLevel.decimalFromReference(
2628 escalationMetrics.referenceFeeLevel);
2629 if (escalationMetrics.minProcessingFeeLevel !=
2630 escalationMetrics.referenceFeeLevel)
2631 info[jss::load_factor_fee_queue] =
2632 escalationMetrics.minProcessingFeeLevel.decimalFromReference(
2633 escalationMetrics.referenceFeeLevel);
2634 }
2635
2636 bool valid = false;
2637 auto lpClosed = m_ledgerMaster.getValidatedLedger();
2638
2639 if (lpClosed)
2640 valid = true;
2641 else
2642 lpClosed = m_ledgerMaster.getClosedLedger();
2643
2644 if (lpClosed)
2645 {
2646 XRPAmount const baseFee = lpClosed->fees().base;
2648 l[jss::seq] = Json::UInt(lpClosed->info().seq);
2649 l[jss::hash] = to_string(lpClosed->info().hash);
2650
2651 if (!human)
2652 {
2653 l[jss::base_fee] = baseFee.jsonClipped();
2654 l[jss::reserve_base] =
2655 lpClosed->fees().accountReserve(0).jsonClipped();
2656 l[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
2657 l[jss::close_time] = Json::Value::UInt(
2658 lpClosed->info().closeTime.time_since_epoch().count());
2659 }
2660 else
2661 {
2662 l[jss::base_fee_xrp] = baseFee.decimalXRP();
2663 l[jss::reserve_base_xrp] =
2664 lpClosed->fees().accountReserve(0).decimalXRP();
2665 l[jss::reserve_inc_xrp] = lpClosed->fees().increment.decimalXRP();
2666
2667 if (auto const closeOffset = app_.timeKeeper().closeOffset();
2668 std::abs(closeOffset.count()) >= 60)
2669 l[jss::close_time_offset] =
2670 static_cast<std::uint32_t>(closeOffset.count());
2671
2672 constexpr std::chrono::seconds highAgeThreshold{1000000};
2674 {
2675 auto const age = m_ledgerMaster.getValidatedLedgerAge();
2676 l[jss::age] =
2677 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2678 }
2679 else
2680 {
2681 auto lCloseTime = lpClosed->info().closeTime;
2682 auto closeTime = app_.timeKeeper().closeTime();
2683 if (lCloseTime <= closeTime)
2684 {
2685 using namespace std::chrono_literals;
2686 auto age = closeTime - lCloseTime;
2687 l[jss::age] =
2688 Json::UInt(age < highAgeThreshold ? age.count() : 0);
2689 }
2690 }
2691 }
2692
2693 if (valid)
2694 info[jss::validated_ledger] = l;
2695 else
2696 info[jss::closed_ledger] = l;
2697
2698 auto lpPublished = m_ledgerMaster.getPublishedLedger();
2699 if (!lpPublished)
2700 info[jss::published_ledger] = "none";
2701 else if (lpPublished->info().seq != lpClosed->info().seq)
2702 info[jss::published_ledger] = lpPublished->info().seq;
2703 }
2704
2705 accounting_.json(info);
2706 info[jss::uptime] = UptimeClock::now().time_since_epoch().count();
2707 info[jss::jq_trans_overflow] =
2709 info[jss::peer_disconnects] =
2711 info[jss::peer_disconnects_resources] =
2713
2714 // This array must be sorted in increasing order.
2715 static constexpr std::array<std::string_view, 7> protocols{
2716 "http", "https", "peer", "ws", "ws2", "wss", "wss2"};
2717 static_assert(std::is_sorted(std::begin(protocols), std::end(protocols)));
2718 {
2720 for (auto const& port : app_.getServerHandler().setup().ports)
2721 {
2722 // Don't publish admin ports for non-admin users
2723 if (!admin &&
2724 !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() &&
2725 port.admin_user.empty() && port.admin_password.empty()))
2726 continue;
2729 std::begin(port.protocol),
2730 std::end(port.protocol),
2731 std::begin(protocols),
2732 std::end(protocols),
2733 std::back_inserter(proto));
2734 if (!proto.empty())
2735 {
2736 auto& jv = ports.append(Json::Value(Json::objectValue));
2737 jv[jss::port] = std::to_string(port.port);
2738 jv[jss::protocol] = Json::Value{Json::arrayValue};
2739 for (auto const& p : proto)
2740 jv[jss::protocol].append(p);
2741 }
2742 }
2743
2744 if (app_.config().exists(SECTION_PORT_GRPC))
2745 {
2746 auto const& grpcSection = app_.config().section(SECTION_PORT_GRPC);
2747 auto const optPort = grpcSection.get("port");
2748 if (optPort && grpcSection.get("ip"))
2749 {
2750 auto& jv = ports.append(Json::Value(Json::objectValue));
2751 jv[jss::port] = *optPort;
2752 jv[jss::protocol] = Json::Value{Json::arrayValue};
2753 jv[jss::protocol].append("grpc");
2754 }
2755 }
2756 info[jss::ports] = std::move(ports);
2757 }
2758
2759 return info;
2760}
2761
2762void
2764{
2766}
2767
2770{
2771 return app_.getInboundLedgers().getInfo();
2772}
2773
2774void
2776 std::shared_ptr<ReadView const> const& ledger,
2777 std::shared_ptr<STTx const> const& transaction,
2778 TER result)
2779{
2780 MultiApiJson jvObj =
2781 transJson(transaction, result, false, ledger, std::nullopt);
2782
2783 {
2785
2786 auto it = mStreamMaps[sRTTransactions].begin();
2787 while (it != mStreamMaps[sRTTransactions].end())
2788 {
2789 InfoSub::pointer p = it->second.lock();
2790
2791 if (p)
2792 {
2793 jvObj.visit(
2794 p->getApiVersion(), //
2795 [&](Json::Value const& jv) { p->send(jv, true); });
2796 ++it;
2797 }
2798 else
2799 {
2800 it = mStreamMaps[sRTTransactions].erase(it);
2801 }
2802 }
2803 }
2804
2805 pubProposedAccountTransaction(ledger, transaction, result);
2806}
2807
2808void
2810{
2811 // Ledgers are published only when they acquire sufficient validations
2812 // Holes are filled across connection loss or other catastrophe
2813
2815 app_.getAcceptedLedgerCache().fetch(lpAccepted->info().hash);
2816 if (!alpAccepted)
2817 {
2818 alpAccepted = std::make_shared<AcceptedLedger>(lpAccepted, app_);
2819 app_.getAcceptedLedgerCache().canonicalize_replace_client(
2820 lpAccepted->info().hash, alpAccepted);
2821 }
2822
2823 XRPL_ASSERT(
2824 alpAccepted->getLedger().get() == lpAccepted.get(),
2825 "ripple::NetworkOPsImp::pubLedger : accepted input");
2826
2827 {
2828 JLOG(m_journal.debug())
2829 << "Publishing ledger " << lpAccepted->info().seq << " "
2830 << lpAccepted->info().hash;
2831
2833
2834 if (!mStreamMaps[sLedger].empty())
2835 {
2837
2838 jvObj[jss::type] = "ledgerClosed";
2839 jvObj[jss::ledger_index] = lpAccepted->info().seq;
2840 jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash);
2841 jvObj[jss::ledger_time] = Json::Value::UInt(
2842 lpAccepted->info().closeTime.time_since_epoch().count());
2843
2844 if (!lpAccepted->rules().enabled(featureXRPFees))
2845 jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
2846 jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
2847 jvObj[jss::reserve_base] =
2848 lpAccepted->fees().accountReserve(0).jsonClipped();
2849 jvObj[jss::reserve_inc] =
2850 lpAccepted->fees().increment.jsonClipped();
2851
2852 jvObj[jss::txn_count] = Json::UInt(alpAccepted->size());
2853
2855 {
2856 jvObj[jss::validated_ledgers] =
2858 }
2859
2860 auto it = mStreamMaps[sLedger].begin();
2861 while (it != mStreamMaps[sLedger].end())
2862 {
2863 InfoSub::pointer p = it->second.lock();
2864 if (p)
2865 {
2866 p->send(jvObj, true);
2867 ++it;
2868 }
2869 else
2870 it = mStreamMaps[sLedger].erase(it);
2871 }
2872 }
2873
2874 if (!mStreamMaps[sBookChanges].empty())
2875 {
2876 Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted);
2877
2878 auto it = mStreamMaps[sBookChanges].begin();
2879 while (it != mStreamMaps[sBookChanges].end())
2880 {
2881 InfoSub::pointer p = it->second.lock();
2882 if (p)
2883 {
2884 p->send(jvObj, true);
2885 ++it;
2886 }
2887 else
2888 it = mStreamMaps[sBookChanges].erase(it);
2889 }
2890 }
2891
2892 {
2893 static bool firstTime = true;
2894 if (firstTime)
2895 {
2896 // First validated ledger, start delayed SubAccountHistory
2897 firstTime = false;
2898 for (auto& outer : mSubAccountHistory)
2899 {
2900 for (auto& inner : outer.second)
2901 {
2902 auto& subInfo = inner.second;
2903 if (subInfo.index_->separationLedgerSeq_ == 0)
2904 {
2906 alpAccepted->getLedger(), subInfo);
2907 }
2908 }
2909 }
2910 }
2911 }
2912 }
2913
2914 // Don't lock since pubAcceptedTransaction is locking.
2915 for (auto const& accTx : *alpAccepted)
2916 {
2917 JLOG(m_journal.trace()) << "pubAccepted: " << accTx->getJson();
2919 lpAccepted, *accTx, accTx == *(--alpAccepted->end()));
2920 }
2921}
2922
2923void
2925{
2927 app_.openLedger().current()->fees().base,
2929 app_.getFeeTrack()};
2930
2931 // only schedule the job if something has changed
2932 if (f != mLastFeeSummary)
2933 {
2935 jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() {
2936 pubServer();
2937 });
2938 }
2939}
2940
2941void
2943{
2946 "reportConsensusStateChange->pubConsensus",
2947 [this, phase]() { pubConsensus(phase); });
2948}
2949
2950inline void
2952{
2953 m_localTX->sweep(view);
2954}
2955inline std::size_t
2957{
2958 return m_localTX->size();
2959}
2960
2961// This routine should only be used to publish accepted or validated
2962// transactions.
2965 std::shared_ptr<STTx const> const& transaction,
2966 TER result,
2967 bool validated,
2968 std::shared_ptr<ReadView const> const& ledger,
2970{
2972 std::string sToken;
2973 std::string sHuman;
2974
2975 transResultInfo(result, sToken, sHuman);
2976
2977 jvObj[jss::type] = "transaction";
2978 // NOTE jvObj is not a finished object for either API version. After
2979 // it's populated, we need to finish it for a specific API version. This is
2980 // done in a loop, near the end of this function.
2981 jvObj[jss::transaction] =
2982 transaction->getJson(JsonOptions::disable_API_prior_V2, false);
2983
2984 if (meta)
2985 {
2986 jvObj[jss::meta] = meta->get().getJson(JsonOptions::none);
2988 jvObj[jss::meta], *ledger, transaction, meta->get());
2990 jvObj[jss::meta], transaction, meta->get());
2991 }
2992
2993 if (!ledger->open())
2994 jvObj[jss::ledger_hash] = to_string(ledger->info().hash);
2995
2996 if (validated)
2997 {
2998 jvObj[jss::ledger_index] = ledger->info().seq;
2999 jvObj[jss::transaction][jss::date] =
3000 ledger->info().closeTime.time_since_epoch().count();
3001 jvObj[jss::validated] = true;
3002 jvObj[jss::close_time_iso] = to_string_iso(ledger->info().closeTime);
3003
3004 // WRITEME: Put the account next seq here
3005 }
3006 else
3007 {
3008 jvObj[jss::validated] = false;
3009 jvObj[jss::ledger_current_index] = ledger->info().seq;
3010 }
3011
3012 jvObj[jss::status] = validated ? "closed" : "proposed";
3013 jvObj[jss::engine_result] = sToken;
3014 jvObj[jss::engine_result_code] = result;
3015 jvObj[jss::engine_result_message] = sHuman;
3016
3017 if (transaction->getTxnType() == ttOFFER_CREATE)
3018 {
3019 auto const account = transaction->getAccountID(sfAccount);
3020 auto const amount = transaction->getFieldAmount(sfTakerGets);
3021
3022 // If the offer create is not self funded then add the owner balance
3023 if (account != amount.issue().account)
3024 {
3025 auto const ownerFunds = accountFunds(
3026 *ledger,
3027 account,
3028 amount,
3030 app_.journal("View"));
3031 jvObj[jss::transaction][jss::owner_funds] = ownerFunds.getText();
3032 }
3033 }
3034
3035 std::string const hash = to_string(transaction->getTransactionID());
3036 MultiApiJson multiObj{jvObj};
3038 multiObj.visit(), //
3039 [&]<unsigned Version>(
3041 RPC::insertDeliverMax(
3042 jvTx[jss::transaction], transaction->getTxnType(), Version);
3043
3044 if constexpr (Version > 1)
3045 {
3046 jvTx[jss::tx_json] = jvTx.removeMember(jss::transaction);
3047 jvTx[jss::hash] = hash;
3048 }
3049 else
3050 {
3051 jvTx[jss::transaction][jss::hash] = hash;
3052 }
3053 });
3054
3055 return multiObj;
3056}
3057
3058void
3060 std::shared_ptr<ReadView const> const& ledger,
3061 const AcceptedLedgerTx& transaction,
3062 bool last)
3063{
3064 auto const& stTxn = transaction.getTxn();
3065
3066 // Create two different Json objects, for different API versions
3067 auto const metaRef = std::ref(transaction.getMeta());
3068 auto const trResult = transaction.getResult();
3069 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3070
3071 {
3073
3074 auto it = mStreamMaps[sTransactions].begin();
3075 while (it != mStreamMaps[sTransactions].end())
3076 {
3077 InfoSub::pointer p = it->second.lock();
3078
3079 if (p)
3080 {
3081 jvObj.visit(
3082 p->getApiVersion(), //
3083 [&](Json::Value const& jv) { p->send(jv, true); });
3084 ++it;
3085 }
3086 else
3087 it = mStreamMaps[sTransactions].erase(it);
3088 }
3089
3090 it = mStreamMaps[sRTTransactions].begin();
3091
3092 while (it != mStreamMaps[sRTTransactions].end())
3093 {
3094 InfoSub::pointer p = it->second.lock();
3095
3096 if (p)
3097 {
3098 jvObj.visit(
3099 p->getApiVersion(), //
3100 [&](Json::Value const& jv) { p->send(jv, true); });
3101 ++it;
3102 }
3103 else
3104 it = mStreamMaps[sRTTransactions].erase(it);
3105 }
3106 }
3107
3108 if (transaction.getResult() == tesSUCCESS)
3109 app_.getOrderBookDB().processTxn(ledger, transaction, jvObj);
3110
3111 pubAccountTransaction(ledger, transaction, last);
3112}
3113
3114void
3116 std::shared_ptr<ReadView const> const& ledger,
3117 AcceptedLedgerTx const& transaction,
3118 bool last)
3119{
3121 int iProposed = 0;
3122 int iAccepted = 0;
3123
3124 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3125 auto const currLedgerSeq = ledger->seq();
3126 {
3128
3129 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3131 {
3132 for (auto const& affectedAccount : transaction.getAffected())
3133 {
3134 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3135 simiIt != mSubRTAccount.end())
3136 {
3137 auto it = simiIt->second.begin();
3138
3139 while (it != simiIt->second.end())
3140 {
3141 InfoSub::pointer p = it->second.lock();
3142
3143 if (p)
3144 {
3145 notify.insert(p);
3146 ++it;
3147 ++iProposed;
3148 }
3149 else
3150 it = simiIt->second.erase(it);
3151 }
3152 }
3153
3154 if (auto simiIt = mSubAccount.find(affectedAccount);
3155 simiIt != mSubAccount.end())
3156 {
3157 auto it = simiIt->second.begin();
3158 while (it != simiIt->second.end())
3159 {
3160 InfoSub::pointer p = it->second.lock();
3161
3162 if (p)
3163 {
3164 notify.insert(p);
3165 ++it;
3166 ++iAccepted;
3167 }
3168 else
3169 it = simiIt->second.erase(it);
3170 }
3171 }
3172
3173 if (auto histoIt = mSubAccountHistory.find(affectedAccount);
3174 histoIt != mSubAccountHistory.end())
3175 {
3176 auto& subs = histoIt->second;
3177 auto it = subs.begin();
3178 while (it != subs.end())
3179 {
3180 SubAccountHistoryInfoWeak const& info = it->second;
3181 if (currLedgerSeq <= info.index_->separationLedgerSeq_)
3182 {
3183 ++it;
3184 continue;
3185 }
3186
3187 if (auto isSptr = info.sinkWptr_.lock(); isSptr)
3188 {
3189 accountHistoryNotify.emplace_back(
3190 SubAccountHistoryInfo{isSptr, info.index_});
3191 ++it;
3192 }
3193 else
3194 {
3195 it = subs.erase(it);
3196 }
3197 }
3198 if (subs.empty())
3199 mSubAccountHistory.erase(histoIt);
3200 }
3201 }
3202 }
3203 }
3204
3205 JLOG(m_journal.trace())
3206 << "pubAccountTransaction: " << "proposed=" << iProposed
3207 << ", accepted=" << iAccepted;
3208
3209 if (!notify.empty() || !accountHistoryNotify.empty())
3210 {
3211 auto const& stTxn = transaction.getTxn();
3212
3213 // Create two different Json objects, for different API versions
3214 auto const metaRef = std::ref(transaction.getMeta());
3215 auto const trResult = transaction.getResult();
3216 MultiApiJson jvObj = transJson(stTxn, trResult, true, ledger, metaRef);
3217
3218 for (InfoSub::ref isrListener : notify)
3219 {
3220 jvObj.visit(
3221 isrListener->getApiVersion(), //
3222 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3223 }
3224
3225 if (last)
3226 jvObj.set(jss::account_history_boundary, true);
3227
3228 XRPL_ASSERT(
3229 jvObj.isMember(jss::account_history_tx_stream) ==
3231 "ripple::NetworkOPsImp::pubAccountTransaction : "
3232 "account_history_tx_stream not set");
3233 for (auto& info : accountHistoryNotify)
3234 {
3235 auto& index = info.index_;
3236 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3237 jvObj.set(jss::account_history_tx_first, true);
3238
3239 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3240
3241 jvObj.visit(
3242 info.sink_->getApiVersion(), //
3243 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3244 }
3245 }
3246}
3247
3248void
3250 std::shared_ptr<ReadView const> const& ledger,
3252 TER result)
3253{
3255 int iProposed = 0;
3256
3257 std::vector<SubAccountHistoryInfo> accountHistoryNotify;
3258
3259 {
3261
3262 if (mSubRTAccount.empty())
3263 return;
3264
3265 if (!mSubAccount.empty() || !mSubRTAccount.empty() ||
3267 {
3268 for (auto const& affectedAccount : tx->getMentionedAccounts())
3269 {
3270 if (auto simiIt = mSubRTAccount.find(affectedAccount);
3271 simiIt != mSubRTAccount.end())
3272 {
3273 auto it = simiIt->second.begin();
3274
3275 while (it != simiIt->second.end())
3276 {
3277 InfoSub::pointer p = it->second.lock();
3278
3279 if (p)
3280 {
3281 notify.insert(p);
3282 ++it;
3283 ++iProposed;
3284 }
3285 else
3286 it = simiIt->second.erase(it);
3287 }
3288 }
3289 }
3290 }
3291 }
3292
3293 JLOG(m_journal.trace()) << "pubProposedAccountTransaction: " << iProposed;
3294
3295 if (!notify.empty() || !accountHistoryNotify.empty())
3296 {
3297 // Create two different Json objects, for different API versions
3298 MultiApiJson jvObj = transJson(tx, result, false, ledger, std::nullopt);
3299
3300 for (InfoSub::ref isrListener : notify)
3301 jvObj.visit(
3302 isrListener->getApiVersion(), //
3303 [&](Json::Value const& jv) { isrListener->send(jv, true); });
3304
3305 XRPL_ASSERT(
3306 jvObj.isMember(jss::account_history_tx_stream) ==
3308 "ripple::NetworkOPs::pubProposedAccountTransaction : "
3309 "account_history_tx_stream not set");
3310 for (auto& info : accountHistoryNotify)
3311 {
3312 auto& index = info.index_;
3313 if (index->forwardTxIndex_ == 0 && !index->haveHistorical_)
3314 jvObj.set(jss::account_history_tx_first, true);
3315 jvObj.set(jss::account_history_tx_index, index->forwardTxIndex_++);
3316 jvObj.visit(
3317 info.sink_->getApiVersion(), //
3318 [&](Json::Value const& jv) { info.sink_->send(jv, true); });
3319 }
3320 }
3321}
3322
3323//
3324// Monitoring
3325//
3326
3327void
3329 InfoSub::ref isrListener,
3330 hash_set<AccountID> const& vnaAccountIDs,
3331 bool rt)
3332{
3333 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3334
3335 for (auto const& naAccountID : vnaAccountIDs)
3336 {
3337 JLOG(m_journal.trace())
3338 << "subAccount: account: " << toBase58(naAccountID);
3339
3340 isrListener->insertSubAccountInfo(naAccountID, rt);
3341 }
3342
3344
3345 for (auto const& naAccountID : vnaAccountIDs)
3346 {
3347 auto simIterator = subMap.find(naAccountID);
3348 if (simIterator == subMap.end())
3349 {
3350 // Not found, note that account has a new single listner.
3351 SubMapType usisElement;
3352 usisElement[isrListener->getSeq()] = isrListener;
3353 // VFALCO NOTE This is making a needless copy of naAccountID
3354 subMap.insert(simIterator, make_pair(naAccountID, usisElement));
3355 }
3356 else
3357 {
3358 // Found, note that the account has another listener.
3359 simIterator->second[isrListener->getSeq()] = isrListener;
3360 }
3361 }
3362}
3363
3364void
3366 InfoSub::ref isrListener,
3367 hash_set<AccountID> const& vnaAccountIDs,
3368 bool rt)
3369{
3370 for (auto const& naAccountID : vnaAccountIDs)
3371 {
3372 // Remove from the InfoSub
3373 isrListener->deleteSubAccountInfo(naAccountID, rt);
3374 }
3375
3376 // Remove from the server
3377 unsubAccountInternal(isrListener->getSeq(), vnaAccountIDs, rt);
3378}
3379
3380void
3382 std::uint64_t uSeq,
3383 hash_set<AccountID> const& vnaAccountIDs,
3384 bool rt)
3385{
3387
3388 SubInfoMapType& subMap = rt ? mSubRTAccount : mSubAccount;
3389
3390 for (auto const& naAccountID : vnaAccountIDs)
3391 {
3392 auto simIterator = subMap.find(naAccountID);
3393
3394 if (simIterator != subMap.end())
3395 {
3396 // Found
3397 simIterator->second.erase(uSeq);
3398
3399 if (simIterator->second.empty())
3400 {
3401 // Don't need hash entry.
3402 subMap.erase(simIterator);
3403 }
3404 }
3405 }
3406}
3407
3408void
3410{
3411 enum DatabaseType { Sqlite, None };
3412 static const auto databaseType = [&]() -> DatabaseType {
3413 // Use a dynamic_cast to return DatabaseType::None
3414 // on failure.
3415 if (dynamic_cast<SQLiteDatabase*>(&app_.getRelationalDatabase()))
3416 {
3417 return DatabaseType::Sqlite;
3418 }
3419 return DatabaseType::None;
3420 }();
3421
3422 if (databaseType == DatabaseType::None)
3423 {
3424 JLOG(m_journal.error())
3425 << "AccountHistory job for account "
3426 << toBase58(subInfo.index_->accountId_) << " no database";
3427 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3428 {
3429 sptr->send(rpcError(rpcINTERNAL), true);
3430 unsubAccountHistory(sptr, subInfo.index_->accountId_, false);
3431 }
3432 return;
3433 }
3434
3437 "AccountHistoryTxStream",
3438 [this, dbType = databaseType, subInfo]() {
3439 auto const& accountId = subInfo.index_->accountId_;
3440 auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_;
3441 auto& txHistoryIndex = subInfo.index_->historyTxIndex_;
3442
3443 JLOG(m_journal.trace())
3444 << "AccountHistory job for account " << toBase58(accountId)
3445 << " started. lastLedgerSeq=" << lastLedgerSeq;
3446
3447 auto isFirstTx = [&](std::shared_ptr<Transaction> const& tx,
3448 std::shared_ptr<TxMeta> const& meta) -> bool {
3449 /*
3450 * genesis account: first tx is the one with seq 1
3451 * other account: first tx is the one created the account
3452 */
3453 if (accountId == genesisAccountId)
3454 {
3455 auto stx = tx->getSTransaction();
3456 if (stx->getAccountID(sfAccount) == accountId &&
3457 stx->getSeqProxy().value() == 1)
3458 return true;
3459 }
3460
3461 for (auto& node : meta->getNodes())
3462 {
3463 if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT)
3464 continue;
3465
3466 if (node.isFieldPresent(sfNewFields))
3467 {
3468 if (auto inner = dynamic_cast<const STObject*>(
3469 node.peekAtPField(sfNewFields));
3470 inner)
3471 {
3472 if (inner->isFieldPresent(sfAccount) &&
3473 inner->getAccountID(sfAccount) == accountId)
3474 {
3475 return true;
3476 }
3477 }
3478 }
3479 }
3480
3481 return false;
3482 };
3483
3484 auto send = [&](Json::Value const& jvObj,
3485 bool unsubscribe) -> bool {
3486 if (auto sptr = subInfo.sinkWptr_.lock())
3487 {
3488 sptr->send(jvObj, true);
3489 if (unsubscribe)
3490 unsubAccountHistory(sptr, accountId, false);
3491 return true;
3492 }
3493
3494 return false;
3495 };
3496
3497 auto sendMultiApiJson = [&](MultiApiJson const& jvObj,
3498 bool unsubscribe) -> bool {
3499 if (auto sptr = subInfo.sinkWptr_.lock())
3500 {
3501 jvObj.visit(
3502 sptr->getApiVersion(), //
3503 [&](Json::Value const& jv) { sptr->send(jv, true); });
3504
3505 if (unsubscribe)
3506 unsubAccountHistory(sptr, accountId, false);
3507 return true;
3508 }
3509
3510 return false;
3511 };
3512
3513 auto getMoreTxns =
3514 [&](std::uint32_t minLedger,
3515 std::uint32_t maxLedger,
3520 switch (dbType)
3521 {
3522 case Sqlite: {
3523 auto db = static_cast<SQLiteDatabase*>(
3526 accountId, minLedger, maxLedger, marker, 0, true};
3527 return db->newestAccountTxPage(options);
3528 }
3529 default: {
3530 UNREACHABLE(
3531 "ripple::NetworkOPsImp::addAccountHistoryJob::"
3532 "getMoreTxns : invalid database type");
3533 return {};
3534 }
3535 }
3536 };
3537
3538 /*
3539 * search backward until the genesis ledger or asked to stop
3540 */
3541 while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_)
3542 {
3543 int feeChargeCount = 0;
3544 if (auto sptr = subInfo.sinkWptr_.lock(); sptr)
3545 {
3546 sptr->getConsumer().charge(Resource::feeMediumBurdenRPC);
3547 ++feeChargeCount;
3548 }
3549 else
3550 {
3551 JLOG(m_journal.trace())
3552 << "AccountHistory job for account "
3553 << toBase58(accountId) << " no InfoSub. Fee charged "
3554 << feeChargeCount << " times.";
3555 return;
3556 }
3557
3558 // try to search in 1024 ledgers till reaching genesis ledgers
3559 auto startLedgerSeq =
3560 (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2);
3561 JLOG(m_journal.trace())
3562 << "AccountHistory job for account " << toBase58(accountId)
3563 << ", working on ledger range [" << startLedgerSeq << ","
3564 << lastLedgerSeq << "]";
3565
3566 auto haveRange = [&]() -> bool {
3567 std::uint32_t validatedMin = UINT_MAX;
3568 std::uint32_t validatedMax = 0;
3569 auto haveSomeValidatedLedgers =
3571 validatedMin, validatedMax);
3572
3573 return haveSomeValidatedLedgers &&
3574 validatedMin <= startLedgerSeq &&
3575 lastLedgerSeq <= validatedMax;
3576 }();
3577
3578 if (!haveRange)
3579 {
3580 JLOG(m_journal.debug())
3581 << "AccountHistory reschedule job for account "
3582 << toBase58(accountId) << ", incomplete ledger range ["
3583 << startLedgerSeq << "," << lastLedgerSeq << "]";
3585 return;
3586 }
3587
3589 while (!subInfo.index_->stopHistorical_)
3590 {
3591 auto dbResult =
3592 getMoreTxns(startLedgerSeq, lastLedgerSeq, marker);
3593 if (!dbResult)
3594 {
3595 JLOG(m_journal.debug())
3596 << "AccountHistory job for account "
3597 << toBase58(accountId) << " getMoreTxns failed.";
3598 send(rpcError(rpcINTERNAL), true);
3599 return;
3600 }
3601
3602 auto const& txns = dbResult->first;
3603 marker = dbResult->second;
3604 size_t num_txns = txns.size();
3605 for (size_t i = 0; i < num_txns; ++i)
3606 {
3607 auto const& [tx, meta] = txns[i];
3608
3609 if (!tx || !meta)
3610 {
3611 JLOG(m_journal.debug())
3612 << "AccountHistory job for account "
3613 << toBase58(accountId) << " empty tx or meta.";
3614 send(rpcError(rpcINTERNAL), true);
3615 return;
3616 }
3617 auto curTxLedger =
3619 tx->getLedger());
3620 if (!curTxLedger)
3621 {
3622 JLOG(m_journal.debug())
3623 << "AccountHistory job for account "
3624 << toBase58(accountId) << " no ledger.";
3625 send(rpcError(rpcINTERNAL), true);
3626 return;
3627 }
3629 tx->getSTransaction();
3630 if (!stTxn)
3631 {
3632 JLOG(m_journal.debug())
3633 << "AccountHistory job for account "
3634 << toBase58(accountId)
3635 << " getSTransaction failed.";
3636 send(rpcError(rpcINTERNAL), true);
3637 return;
3638 }
3639
3640 auto const mRef = std::ref(*meta);
3641 auto const trR = meta->getResultTER();
3642 MultiApiJson jvTx =
3643 transJson(stTxn, trR, true, curTxLedger, mRef);
3644
3645 jvTx.set(
3646 jss::account_history_tx_index, txHistoryIndex--);
3647 if (i + 1 == num_txns ||
3648 txns[i + 1].first->getLedger() != tx->getLedger())
3649 jvTx.set(jss::account_history_boundary, true);
3650
3651 if (isFirstTx(tx, meta))
3652 {
3653 jvTx.set(jss::account_history_tx_first, true);
3654 sendMultiApiJson(jvTx, false);
3655
3656 JLOG(m_journal.trace())
3657 << "AccountHistory job for account "
3658 << toBase58(accountId)
3659 << " done, found last tx.";
3660 return;
3661 }
3662 else
3663 {
3664 sendMultiApiJson(jvTx, false);
3665 }
3666 }
3667
3668 if (marker)
3669 {
3670 JLOG(m_journal.trace())
3671 << "AccountHistory job for account "
3672 << toBase58(accountId)
3673 << " paging, marker=" << marker->ledgerSeq << ":"
3674 << marker->txnSeq;
3675 }
3676 else
3677 {
3678 break;
3679 }
3680 }
3681
3682 if (!subInfo.index_->stopHistorical_)
3683 {
3684 lastLedgerSeq = startLedgerSeq - 1;
3685 if (lastLedgerSeq <= 1)
3686 {
3687 JLOG(m_journal.trace())
3688 << "AccountHistory job for account "
3689 << toBase58(accountId)
3690 << " done, reached genesis ledger.";
3691 return;
3692 }
3693 }
3694 }
3695 });
3696}
3697
3698void
3700 std::shared_ptr<ReadView const> const& ledger,
3702{
3703 subInfo.index_->separationLedgerSeq_ = ledger->seq();
3704 auto const& accountId = subInfo.index_->accountId_;
3705 auto const accountKeylet = keylet::account(accountId);
3706 if (!ledger->exists(accountKeylet))
3707 {
3708 JLOG(m_journal.debug())
3709 << "subAccountHistoryStart, no account " << toBase58(accountId)
3710 << ", no need to add AccountHistory job.";
3711 return;
3712 }
3713 if (accountId == genesisAccountId)
3714 {
3715 if (auto const sleAcct = ledger->read(accountKeylet); sleAcct)
3716 {
3717 if (sleAcct->getFieldU32(sfSequence) == 1)
3718 {
3719 JLOG(m_journal.debug())
3720 << "subAccountHistoryStart, genesis account "
3721 << toBase58(accountId)
3722 << " does not have tx, no need to add AccountHistory job.";
3723 return;
3724 }
3725 }
3726 else
3727 {
3728 UNREACHABLE(
3729 "ripple::NetworkOPsImp::subAccountHistoryStart : failed to "
3730 "access genesis account");
3731 return;
3732 }
3733 }
3734 subInfo.index_->historyLastLedgerSeq_ = ledger->seq();
3735 subInfo.index_->haveHistorical_ = true;
3736
3737 JLOG(m_journal.debug())
3738 << "subAccountHistoryStart, add AccountHistory job: accountId="
3739 << toBase58(accountId) << ", currentLedgerSeq=" << ledger->seq();
3740
3741 addAccountHistoryJob(subInfo);
3742}
3743
3746 InfoSub::ref isrListener,
3747 AccountID const& accountId)
3748{
3749 if (!isrListener->insertSubAccountHistory(accountId))
3750 {
3751 JLOG(m_journal.debug())
3752 << "subAccountHistory, already subscribed to account "
3753 << toBase58(accountId);
3754 return rpcINVALID_PARAMS;
3755 }
3756
3759 isrListener, std::make_shared<SubAccountHistoryIndex>(accountId)};
3760 auto simIterator = mSubAccountHistory.find(accountId);
3761 if (simIterator == mSubAccountHistory.end())
3762 {
3764 inner.emplace(isrListener->getSeq(), ahi);
3766 simIterator, std::make_pair(accountId, inner));
3767 }
3768 else
3769 {
3770 simIterator->second.emplace(isrListener->getSeq(), ahi);
3771 }
3772
3773 auto const ledger = app_.getLedgerMaster().getValidatedLedger();
3774 if (ledger)
3775 {
3776 subAccountHistoryStart(ledger, ahi);
3777 }
3778 else
3779 {
3780 // The node does not have validated ledgers, so wait for
3781 // one before start streaming.
3782 // In this case, the subscription is also considered successful.
3783 JLOG(m_journal.debug())
3784 << "subAccountHistory, no validated ledger yet, delay start";
3785 }
3786
3787 return rpcSUCCESS;
3788}
3789
3790void
3792 InfoSub::ref isrListener,
3793 AccountID const& account,
3794 bool historyOnly)
3795{
3796 if (!historyOnly)
3797 isrListener->deleteSubAccountHistory(account);
3798 unsubAccountHistoryInternal(isrListener->getSeq(), account, historyOnly);
3799}
3800
3801void
3803 std::uint64_t seq,
3804 const AccountID& account,
3805 bool historyOnly)
3806{
3808 auto simIterator = mSubAccountHistory.find(account);
3809 if (simIterator != mSubAccountHistory.end())
3810 {
3811 auto& subInfoMap = simIterator->second;
3812 auto subInfoIter = subInfoMap.find(seq);
3813 if (subInfoIter != subInfoMap.end())
3814 {
3815 subInfoIter->second.index_->stopHistorical_ = true;
3816 }
3817
3818 if (!historyOnly)
3819 {
3820 simIterator->second.erase(seq);
3821 if (simIterator->second.empty())
3822 {
3823 mSubAccountHistory.erase(simIterator);
3824 }
3825 }
3826 JLOG(m_journal.debug())
3827 << "unsubAccountHistory, account " << toBase58(account)
3828 << ", historyOnly = " << (historyOnly ? "true" : "false");
3829 }
3830}
3831
3832bool
3834{
3835 if (auto listeners = app_.getOrderBookDB().makeBookListeners(book))
3836 listeners->addSubscriber(isrListener);
3837 else
3838 UNREACHABLE("ripple::NetworkOPsImp::subBook : null book listeners");
3839 return true;
3840}
3841
3842bool
3844{
3845 if (auto listeners = app_.getOrderBookDB().getBookListeners(book))
3846 listeners->removeSubscriber(uSeq);
3847
3848 return true;
3849}
3850
3854{
3855 // This code-path is exclusively used when the server is in standalone
3856 // mode via `ledger_accept`
3857 XRPL_ASSERT(
3858 m_standalone, "ripple::NetworkOPsImp::acceptLedger : is standalone");
3859
3860 if (!m_standalone)
3861 Throw<std::runtime_error>(
3862 "Operation only possible in STANDALONE mode.");
3863
3864 // FIXME Could we improve on this and remove the need for a specialized
3865 // API in Consensus?
3867 mConsensus.simulate(app_.timeKeeper().closeTime(), consensusDelay);
3868 return m_ledgerMaster.getCurrentLedger()->info().seq;
3869}
3870
3871// <-- bool: true=added, false=already there
3872bool
3874{
3875 if (auto lpClosed = m_ledgerMaster.getValidatedLedger())
3876 {
3877 jvResult[jss::ledger_index] = lpClosed->info().seq;
3878 jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
3879 jvResult[jss::ledger_time] = Json::Value::UInt(
3880 lpClosed->info().closeTime.time_since_epoch().count());
3881 if (!lpClosed->rules().enabled(featureXRPFees))
3882 jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
3883 jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();
3884 jvResult[jss::reserve_base] =
3885 lpClosed->fees().accountReserve(0).jsonClipped();
3886 jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped();
3887 }
3888
3890 {
3891 jvResult[jss::validated_ledgers] =
3893 }
3894
3896 return mStreamMaps[sLedger]
3897 .emplace(isrListener->getSeq(), isrListener)
3898 .second;
3899}
3900
3901// <-- bool: true=added, false=already there
3902bool
3904{
3907 .emplace(isrListener->getSeq(), isrListener)
3908 .second;
3909}
3910
3911// <-- bool: true=erased, false=was not there
3912bool
3914{
3916 return mStreamMaps[sLedger].erase(uSeq);
3917}
3918
3919// <-- bool: true=erased, false=was not there
3920bool
3922{
3924 return mStreamMaps[sBookChanges].erase(uSeq);
3925}
3926
3927// <-- bool: true=added, false=already there
3928bool
3930{
3932 return mStreamMaps[sManifests]
3933 .emplace(isrListener->getSeq(), isrListener)
3934 .second;
3935}
3936
3937// <-- bool: true=erased, false=was not there
3938bool
3940{
3942 return mStreamMaps[sManifests].erase(uSeq);
3943}
3944
3945// <-- bool: true=added, false=already there
3946bool
3948 InfoSub::ref isrListener,
3949 Json::Value& jvResult,
3950 bool admin)
3951{
3952 uint256 uRandom;
3953
3954 if (m_standalone)
3955 jvResult[jss::stand_alone] = m_standalone;
3956
3957 // CHECKME: is it necessary to provide a random number here?
3958 beast::rngfill(uRandom.begin(), uRandom.size(), crypto_prng());
3959
3960 auto const& feeTrack = app_.getFeeTrack();
3961 jvResult[jss::random] = to_string(uRandom);
3962 jvResult[jss::server_status] = strOperatingMode(admin);
3963 jvResult[jss::load_base] = feeTrack.getLoadBase();
3964 jvResult[jss::load_factor] = feeTrack.getLoadFactor();
3965 jvResult[jss::hostid] = getHostId(admin);
3966 jvResult[jss::pubkey_node] =
3968
3970 return mStreamMaps[sServer]
3971 .emplace(isrListener->getSeq(), isrListener)
3972 .second;
3973}
3974
3975// <-- bool: true=erased, false=was not there
3976bool
3978{
3980 return mStreamMaps[sServer].erase(uSeq);
3981}
3982
3983// <-- bool: true=added, false=already there
3984bool
3986{
3989 .emplace(isrListener->getSeq(), isrListener)
3990 .second;
3991}
3992
3993// <-- bool: true=erased, false=was not there
3994bool
3996{
3998 return mStreamMaps[sTransactions].erase(uSeq);
3999}
4000
4001// <-- bool: true=added, false=already there
4002bool
4004{
4007 .emplace(isrListener->getSeq(), isrListener)
4008 .second;
4009}
4010
4011// <-- bool: true=erased, false=was not there
4012bool
4014{
4016 return mStreamMaps[sRTTransactions].erase(uSeq);
4017}
4018
4019// <-- bool: true=added, false=already there
4020bool
4022{
4025 .emplace(isrListener->getSeq(), isrListener)
4026 .second;
4027}
4028
4029void
4031{
4032 accounting_.json(obj);
4033}
4034
4035// <-- bool: true=erased, false=was not there
4036bool
4038{
4040 return mStreamMaps[sValidations].erase(uSeq);
4041}
4042
4043// <-- bool: true=added, false=already there
4044bool
4046{
4048 return mStreamMaps[sPeerStatus]
4049 .emplace(isrListener->getSeq(), isrListener)
4050 .second;
4051}
4052
4053// <-- bool: true=erased, false=was not there
4054bool
4056{
4058 return mStreamMaps[sPeerStatus].erase(uSeq);
4059}
4060
4061// <-- bool: true=added, false=already there
4062bool
4064{
4067 .emplace(isrListener->getSeq(), isrListener)
4068 .second;
4069}
4070
4071// <-- bool: true=erased, false=was not there
4072bool
4074{
4076 return mStreamMaps[sConsensusPhase].erase(uSeq);
4077}
4078
4081{
4083
4084 subRpcMapType::iterator it = mRpcSubMap.find(strUrl);
4085
4086 if (it != mRpcSubMap.end())
4087 return it->second;
4088
4089 return InfoSub::pointer();
4090}
4091
4094{
4096
4097 mRpcSubMap.emplace(strUrl, rspEntry);
4098
4099 return rspEntry;
4100}
4101
4102bool
4104{
4106 auto pInfo = findRpcSub(strUrl);
4107
4108 if (!pInfo)
4109 return false;
4110
4111 // check to see if any of the stream maps still hold a weak reference to
4112 // this entry before removing
4113 for (SubMapType const& map : mStreamMaps)
4114 {
4115 if (map.find(pInfo->getSeq()) != map.end())
4116 return false;
4117 }
4118 mRpcSubMap.erase(strUrl);
4119 return true;
4120}
4121
4122#ifndef USE_NEW_BOOK_PAGE
4123
4124// NIKB FIXME this should be looked at. There's no reason why this shouldn't
4125// work, but it demonstrated poor performance.
4126//
4127void
4130 Book const& book,
4131 AccountID const& uTakerID,
4132 bool const bProof,
4133 unsigned int iLimit,
4134 Json::Value const& jvMarker,
4135 Json::Value& jvResult)
4136{ // CAUTION: This is the old get book page logic
4137 Json::Value& jvOffers =
4138 (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4139
4141 const uint256 uBookBase = getBookBase(book);
4142 const uint256 uBookEnd = getQualityNext(uBookBase);
4143 uint256 uTipIndex = uBookBase;
4144
4145 if (auto stream = m_journal.trace())
4146 {
4147 stream << "getBookPage:" << book;
4148 stream << "getBookPage: uBookBase=" << uBookBase;
4149 stream << "getBookPage: uBookEnd=" << uBookEnd;
4150 stream << "getBookPage: uTipIndex=" << uTipIndex;
4151 }
4152
4153 ReadView const& view = *lpLedger;
4154
4155 bool const bGlobalFreeze = isGlobalFrozen(view, book.out.account) ||
4156 isGlobalFrozen(view, book.in.account);
4157
4158 bool bDone = false;
4159 bool bDirectAdvance = true;
4160
4161 std::shared_ptr<SLE const> sleOfferDir;
4162 uint256 offerIndex;
4163 unsigned int uBookEntry;
4164 STAmount saDirRate;
4165
4166 auto const rate = transferRate(view, book.out.account);
4167 auto viewJ = app_.journal("View");
4168
4169 while (!bDone && iLimit-- > 0)
4170 {
4171 if (bDirectAdvance)
4172 {
4173 bDirectAdvance = false;
4174
4175 JLOG(m_journal.trace()) << "getBookPage: bDirectAdvance";
4176
4177 auto const ledgerIndex = view.succ(uTipIndex, uBookEnd);
4178 if (ledgerIndex)
4179 sleOfferDir = view.read(keylet::page(*ledgerIndex));
4180 else
4181 sleOfferDir.reset();
4182
4183 if (!sleOfferDir)
4184 {
4185 JLOG(m_journal.trace()) << "getBookPage: bDone";
4186 bDone = true;
4187 }
4188 else
4189 {
4190 uTipIndex = sleOfferDir->key();
4191 saDirRate = amountFromQuality(getQuality(uTipIndex));
4192
4193 cdirFirst(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex);
4194
4195 JLOG(m_journal.trace())
4196 << "getBookPage: uTipIndex=" << uTipIndex;
4197 JLOG(m_journal.trace())
4198 << "getBookPage: offerIndex=" << offerIndex;
4199 }
4200 }
4201
4202 if (!bDone)
4203 {
4204 auto sleOffer = view.read(keylet::offer(offerIndex));
4205
4206 if (sleOffer)
4207 {
4208 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4209 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4210 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4211 STAmount saOwnerFunds;
4212 bool firstOwnerOffer(true);
4213
4214 if (book.out.account == uOfferOwnerID)
4215 {
4216 // If an offer is selling issuer's own IOUs, it is fully
4217 // funded.
4218 saOwnerFunds = saTakerGets;
4219 }
4220 else if (bGlobalFreeze)
4221 {
4222 // If either asset is globally frozen, consider all offers
4223 // that aren't ours to be totally unfunded
4224 saOwnerFunds.clear(book.out);
4225 }
4226 else
4227 {
4228 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4229 if (umBalanceEntry != umBalance.end())
4230 {
4231 // Found in running balance table.
4232
4233 saOwnerFunds = umBalanceEntry->second;
4234 firstOwnerOffer = false;
4235 }
4236 else
4237 {
4238 // Did not find balance in table.
4239
4240 saOwnerFunds = accountHolds(
4241 view,
4242 uOfferOwnerID,
4243 book.out.currency,
4244 book.out.account,
4246 viewJ);
4247
4248 if (saOwnerFunds < beast::zero)
4249 {
4250 // Treat negative funds as zero.
4251
4252 saOwnerFunds.clear();
4253 }
4254 }
4255 }
4256
4257 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4258
4259 STAmount saTakerGetsFunded;
4260 STAmount saOwnerFundsLimit = saOwnerFunds;
4261 Rate offerRate = parityRate;
4262
4263 if (rate != parityRate
4264 // Have a tranfer fee.
4265 && uTakerID != book.out.account
4266 // Not taking offers of own IOUs.
4267 && book.out.account != uOfferOwnerID)
4268 // Offer owner not issuing ownfunds
4269 {
4270 // Need to charge a transfer fee to offer owner.
4271 offerRate = rate;
4272 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4273 }
4274
4275 if (saOwnerFundsLimit >= saTakerGets)
4276 {
4277 // Sufficient funds no shenanigans.
4278 saTakerGetsFunded = saTakerGets;
4279 }
4280 else
4281 {
4282 // Only provide, if not fully funded.
4283
4284 saTakerGetsFunded = saOwnerFundsLimit;
4285
4286 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4287 std::min(
4288 saTakerPays,
4289 multiply(
4290 saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4291 .setJson(jvOffer[jss::taker_pays_funded]);
4292 }
4293
4294 STAmount saOwnerPays = (parityRate == offerRate)
4295 ? saTakerGetsFunded
4296 : std::min(
4297 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4298
4299 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4300
4301 // Include all offers funded and unfunded
4302 Json::Value& jvOf = jvOffers.append(jvOffer);
4303 jvOf[jss::quality] = saDirRate.getText();
4304
4305 if (firstOwnerOffer)
4306 jvOf[jss::owner_funds] = saOwnerFunds.getText();
4307 }
4308 else
4309 {
4310 JLOG(m_journal.warn()) << "Missing offer";
4311 }
4312
4313 if (!cdirNext(view, uTipIndex, sleOfferDir, uBookEntry, offerIndex))
4314 {
4315 bDirectAdvance = true;
4316 }
4317 else
4318 {
4319 JLOG(m_journal.trace())
4320 << "getBookPage: offerIndex=" << offerIndex;
4321 }
4322 }
4323 }
4324
4325 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4326 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4327}
4328
4329#else
4330
4331// This is the new code that uses the book iterators
4332// It has temporarily been disabled
4333
4334void
4337 Book const& book,
4338 AccountID const& uTakerID,
4339 bool const bProof,
4340 unsigned int iLimit,
4341 Json::Value const& jvMarker,
4342 Json::Value& jvResult)
4343{
4344 auto& jvOffers = (jvResult[jss::offers] = Json::Value(Json::arrayValue));
4345
4347
4348 MetaView lesActive(lpLedger, tapNONE, true);
4349 OrderBookIterator obIterator(lesActive, book);
4350
4351 auto const rate = transferRate(lesActive, book.out.account);
4352
4353 const bool bGlobalFreeze = lesActive.isGlobalFrozen(book.out.account) ||
4354 lesActive.isGlobalFrozen(book.in.account);
4355
4356 while (iLimit-- > 0 && obIterator.nextOffer())
4357 {
4358 SLE::pointer sleOffer = obIterator.getCurrentOffer();
4359 if (sleOffer)
4360 {
4361 auto const uOfferOwnerID = sleOffer->getAccountID(sfAccount);
4362 auto const& saTakerGets = sleOffer->getFieldAmount(sfTakerGets);
4363 auto const& saTakerPays = sleOffer->getFieldAmount(sfTakerPays);
4364 STAmount saDirRate = obIterator.getCurrentRate();
4365 STAmount saOwnerFunds;
4366
4367 if (book.out.account == uOfferOwnerID)
4368 {
4369 // If offer is selling issuer's own IOUs, it is fully funded.
4370 saOwnerFunds = saTakerGets;
4371 }
4372 else if (bGlobalFreeze)
4373 {
4374 // If either asset is globally frozen, consider all offers
4375 // that aren't ours to be totally unfunded
4376 saOwnerFunds.clear(book.out);
4377 }
4378 else
4379 {
4380 auto umBalanceEntry = umBalance.find(uOfferOwnerID);
4381
4382 if (umBalanceEntry != umBalance.end())
4383 {
4384 // Found in running balance table.
4385
4386 saOwnerFunds = umBalanceEntry->second;
4387 }
4388 else
4389 {
4390 // Did not find balance in table.
4391
4392 saOwnerFunds = lesActive.accountHolds(
4393 uOfferOwnerID,
4394 book.out.currency,
4395 book.out.account,
4397
4398 if (saOwnerFunds.isNegative())
4399 {
4400 // Treat negative funds as zero.
4401
4402 saOwnerFunds.zero();
4403 }
4404 }
4405 }
4406
4407 Json::Value jvOffer = sleOffer->getJson(JsonOptions::none);
4408
4409 STAmount saTakerGetsFunded;
4410 STAmount saOwnerFundsLimit = saOwnerFunds;
4411 Rate offerRate = parityRate;
4412
4413 if (rate != parityRate
4414 // Have a tranfer fee.
4415 && uTakerID != book.out.account
4416 // Not taking offers of own IOUs.
4417 && book.out.account != uOfferOwnerID)
4418 // Offer owner not issuing ownfunds
4419 {
4420 // Need to charge a transfer fee to offer owner.
4421 offerRate = rate;
4422 saOwnerFundsLimit = divide(saOwnerFunds, offerRate);
4423 }
4424
4425 if (saOwnerFundsLimit >= saTakerGets)
4426 {
4427 // Sufficient funds no shenanigans.
4428 saTakerGetsFunded = saTakerGets;
4429 }
4430 else
4431 {
4432 // Only provide, if not fully funded.
4433 saTakerGetsFunded = saOwnerFundsLimit;
4434
4435 saTakerGetsFunded.setJson(jvOffer[jss::taker_gets_funded]);
4436
4437 // TOOD(tom): The result of this expression is not used - what's
4438 // going on here?
4439 std::min(
4440 saTakerPays,
4441 multiply(saTakerGetsFunded, saDirRate, saTakerPays.issue()))
4442 .setJson(jvOffer[jss::taker_pays_funded]);
4443 }
4444
4445 STAmount saOwnerPays = (parityRate == offerRate)
4446 ? saTakerGetsFunded
4447 : std::min(
4448 saOwnerFunds, multiply(saTakerGetsFunded, offerRate));
4449
4450 umBalance[uOfferOwnerID] = saOwnerFunds - saOwnerPays;
4451
4452 if (!saOwnerFunds.isZero() || uOfferOwnerID == uTakerID)
4453 {
4454 // Only provide funded offers and offers of the taker.
4455 Json::Value& jvOf = jvOffers.append(jvOffer);
4456 jvOf[jss::quality] = saDirRate.getText();
4457 }
4458 }
4459 }
4460
4461 // jvResult[jss::marker] = Json::Value(Json::arrayValue);
4462 // jvResult[jss::nodes] = Json::Value(Json::arrayValue);
4463}
4464
4465#endif
4466
4467inline void
4469{
4470 auto [counters, mode, start, initialSync] = accounting_.getCounterData();
4471 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4473 counters[static_cast<std::size_t>(mode)].dur += current;
4474
4477 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4478 .dur.count());
4480 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4481 .dur.count());
4483 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].dur.count());
4485 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4486 .dur.count());
4488 counters[static_cast<std::size_t>(OperatingMode::FULL)].dur.count());
4489
4491 counters[static_cast<std::size_t>(OperatingMode::DISCONNECTED)]
4492 .transitions);
4494 counters[static_cast<std::size_t>(OperatingMode::CONNECTED)]
4495 .transitions);
4497 counters[static_cast<std::size_t>(OperatingMode::SYNCING)].transitions);
4499 counters[static_cast<std::size_t>(OperatingMode::TRACKING)]
4500 .transitions);
4502 counters[static_cast<std::size_t>(OperatingMode::FULL)].transitions);
4503}
4504
4505void
4507{
4508 auto now = std::chrono::steady_clock::now();
4509
4510 std::lock_guard lock(mutex_);
4511 ++counters_[static_cast<std::size_t>(om)].transitions;
4512 if (om == OperatingMode::FULL &&
4513 counters_[static_cast<std::size_t>(om)].transitions == 1)
4514 {
4515 initialSyncUs_ = std::chrono::duration_cast<std::chrono::microseconds>(
4516 now - processStart_)
4517 .count();
4518 }
4519 counters_[static_cast<std::size_t>(mode_)].dur +=
4520 std::chrono::duration_cast<std::chrono::microseconds>(now - start_);
4521
4522 mode_ = om;
4523 start_ = now;
4524}
4525
4526void
4528{
4529 auto [counters, mode, start, initialSync] = getCounterData();
4530 auto const current = std::chrono::duration_cast<std::chrono::microseconds>(
4532 counters[static_cast<std::size_t>(mode)].dur += current;
4533
4534 obj[jss::state_accounting] = Json::objectValue;
4536 i <= static_cast<std::size_t>(OperatingMode::FULL);
4537 ++i)
4538 {
4539 obj[jss::state_accounting][states_[i]] = Json::objectValue;
4540 auto& state = obj[jss::state_accounting][states_[i]];
4541 state[jss::transitions] = std::to_string(counters[i].transitions);
4542 state[jss::duration_us] = std::to_string(counters[i].dur.count());
4543 }
4544 obj[jss::server_state_duration_us] = std::to_string(current.count());
4545 if (initialSync)
4546 obj[jss::initial_sync_duration_us] = std::to_string(initialSync);
4547}
4548
4549//------------------------------------------------------------------------------
4550
4553 Application& app,
4555 bool standalone,
4556 std::size_t minPeerCount,
4557 bool startvalid,
4558 JobQueue& job_queue,
4560 ValidatorKeys const& validatorKeys,
4561 boost::asio::io_service& io_svc,
4562 beast::Journal journal,
4563 beast::insight::Collector::ptr const& collector)
4564{
4565 return std::make_unique<NetworkOPsImp>(
4566 app,
4567 clock,
4568 standalone,
4569 minPeerCount,
4570 startvalid,
4571 job_queue,
4573 validatorKeys,
4574 io_svc,
4575 journal,
4576 collector);
4577}
4578
4579} // namespace ripple
T back_inserter(T... args)
T begin(T... args)
Decorator for streaming out compact json.
Definition: json_writer.h:317
Lightweight wrapper to tag static string.
Definition: json_value.h:61
Represents a JSON value.
Definition: json_value.h:147
Json::UInt UInt
Definition: json_value.h:154
Value get(UInt index, const Value &defaultValue) const
If the array contains at least index+1 elements, returns the element value, otherwise returns default...
Definition: json_value.cpp:841
Value & append(const Value &value)
Append value to array at the end.
Definition: json_value.cpp:891
bool isMember(const char *key) const
Return true if the object has a member named key.
Definition: json_value.cpp:943
A generic endpoint for log messages.
Definition: Journal.h:59
Stream error() const
Definition: Journal.h:335
Stream debug() const
Definition: Journal.h:317
Stream info() const
Definition: Journal.h:323
Stream trace() const
Severity stream access functions.
Definition: Journal.h:311
Stream warn() const
Definition: Journal.h:329
A metric for measuring an integral value.
Definition: Gauge.h:40
void set(value_type value) const
Set the value on the gauge.
Definition: Gauge.h:68
A reference to a handler for performing polled collection.
Definition: Hook.h:32
A transaction that is in a closed ledger.
boost::container::flat_set< AccountID > const & getAffected() const
std::shared_ptr< STTx const > const & getTxn() const
TxMeta const & getMeta() const
virtual std::optional< NetClock::time_point > firstUnsupportedExpected() const =0
virtual Config & config()=0
virtual Overlay & overlay()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual OpenLedger & openLedger()=0
virtual beast::Journal journal(std::string const &name)=0
virtual NodeStore::Database & getNodeStore()=0
virtual ServerHandler & getServerHandler()=0
virtual std::chrono::milliseconds getIOLatency()=0
virtual OrderBookDB & getOrderBookDB()=0
virtual TimeKeeper & timeKeeper()=0
virtual TaggedCache< uint256, AcceptedLedger > & getAcceptedLedgerCache()=0
virtual JobQueue & getJobQueue()=0
virtual InboundLedgers & getInboundLedgers()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual RelationalDatabase & getRelationalDatabase()=0
virtual ManifestCache & validatorManifests()=0
virtual TxQ & getTxQ()=0
virtual perf::PerfLog & getPerfLog()=0
virtual Cluster & cluster()=0
virtual AmendmentTable & getAmendmentTable()=0
virtual std::pair< PublicKey, SecretKey > const & nodeIdentity()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Specifies an order book.
Definition: Book.h:34
Issue in
Definition: Book.h:36
Issue out
Definition: Book.h:37
Holds transactions which were deferred to the next pass of consensus.
The role of a ClosureCounter is to assist in shutdown by letting callers wait for the completion of c...
std::string const & name() const
Definition: ClusterNode.h:45
std::uint32_t getLoadFee() const
Definition: ClusterNode.h:51
NetClock::time_point getReportTime() const
Definition: ClusterNode.h:57
PublicKey const & identity() const
Definition: ClusterNode.h:63
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:48
std::string SERVER_DOMAIN
Definition: Config.h:286
std::size_t NODE_SIZE
Definition: Config.h:220
static constexpr std::uint32_t FEE_UNITS_DEPRECATED
Definition: Config.h:167
int RELAY_UNTRUSTED_VALIDATIONS
Definition: Config.h:176
virtual void clearFailures()=0
virtual Json::Value getInfo()=0
std::shared_ptr< InfoSub > pointer
Definition: InfoSub.h:54
AccountID account
Definition: Issue.h:39
Currency currency
Definition: Issue.h:38
A pool of threads to perform work.
Definition: JobQueue.h:56
Json::Value getJson(int c=0)
Definition: JobQueue.cpp:212
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
std::shared_ptr< Ledger const > getValidatedLedger()
bool haveValidated()
Whether we have ever fully validated a ledger.
Definition: LedgerMaster.h:266
std::shared_ptr< ReadView const > getCurrentLedger()
bool getValidatedRange(std::uint32_t &minVal, std::uint32_t &maxVal)
std::shared_ptr< Ledger const > getClosedLedger()
Definition: LedgerMaster.h:80
std::string getCompleteLedgers()
std::size_t getFetchPackCacheSize() const
std::shared_ptr< ReadView const > getPublishedLedger()
std::shared_ptr< Ledger const > getLedgerBySeq(std::uint32_t index)
std::chrono::seconds getValidatedLedgerAge()
Manages the current fee schedule.
Definition: LoadFeeTrack.h:46
std::uint32_t getClusterFee() const
Definition: LoadFeeTrack.h:83
std::uint32_t getLocalFee() const
Definition: LoadFeeTrack.h:76
std::uint32_t getLoadBase() const
Definition: LoadFeeTrack.h:90
std::uint32_t getRemoteFee() const
Definition: LoadFeeTrack.h:69
std::uint32_t getLoadFactor() const
Definition: LoadFeeTrack.h:96
Manages load sources.
Definition: LoadManager.h:46
void resetDeadlockDetector()
Reset the deadlock detection timer.
Definition: LoadManager.cpp:63
PublicKey getMasterKey(PublicKey const &pk) const
Returns ephemeral signing key's master public key.
State accounting records two attributes for each possible server state: 1) Amount of time spent in ea...
Definition: NetworkOPs.cpp:141
void mode(OperatingMode om)
Record state transition.
void json(Json::Value &obj) const
Output state counters in JSON format.
std::array< Counters, 5 > counters_
Definition: NetworkOPs.cpp:151
std::chrono::steady_clock::time_point start_
Definition: NetworkOPs.cpp:153
static std::array< Json::StaticString const, 5 > const states_
Definition: NetworkOPs.cpp:157
std::chrono::steady_clock::time_point const processStart_
Definition: NetworkOPs.cpp:155
Transaction with input flags and results to be applied in batches.
Definition: NetworkOPs.cpp:92
TransactionStatus(std::shared_ptr< Transaction > t, bool a, bool l, FailHard f)
Definition: NetworkOPs.cpp:101
std::shared_ptr< Transaction > const transaction
Definition: NetworkOPs.cpp:94
boost::asio::steady_timer accountHistoryTxTimer_
Definition: NetworkOPs.cpp:731
void pubProposedTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result) override
OperatingMode getOperatingMode() const override
Definition: NetworkOPs.cpp:863
std::string strOperatingMode(OperatingMode const mode, bool const admin) const override
std::vector< TransactionStatus > mTransactions
Definition: NetworkOPs.cpp:775
bool unsubBookChanges(std::uint64_t uListener) override
std::atomic< OperatingMode > mMode
Definition: NetworkOPs.cpp:721
Json::Value getLedgerFetchInfo() override
bool isUNLBlocked() override
RCLConsensus mConsensus
Definition: NetworkOPs.cpp:733
void unsubAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
Json::Value getOwnerInfo(std::shared_ptr< ReadView const > lpLedger, AccountID const &account) override
void setNeedNetworkLedger() override
Definition: NetworkOPs.cpp:881
void setUNLBlocked() override
void pubConsensus(ConsensusPhase phase)
void transactionBatch()
Apply transactions in batches.
void apply(std::unique_lock< std::mutex > &batchLock)
Attempt to apply transactions and post-process based on the results.
void setAmendmentBlocked() override
void processTransaction(std::shared_ptr< Transaction > &transaction, bool bUnlimited, bool bLocal, FailHard failType) override
Process transactions as they arrive from the network or which are submitted by clients.
void clearUNLBlocked() override
boost::asio::steady_timer heartbeatTimer_
Definition: NetworkOPs.cpp:729
void updateLocalTx(ReadView const &view) override
bool unsubManifests(std::uint64_t uListener) override
DispatchState
Synchronization states for transaction batches.
Definition: NetworkOPs.cpp:118
bool unsubTransactions(std::uint64_t uListener) override
void clearAmendmentWarned() override
std::size_t getLocalTxCount() override
std::unique_ptr< LocalTxs > m_localTX
Definition: NetworkOPs.cpp:717
bool subValidations(InfoSub::ref ispListener) override
bool subLedger(InfoSub::ref ispListener, Json::Value &jvResult) override
~NetworkOPsImp() override
Definition: NetworkOPs.cpp:262
bool isAmendmentBlocked() override
void unsubAccountHistoryInternal(std::uint64_t seq, AccountID const &account, bool historyOnly) override
SubAccountHistoryMapType mSubAccountHistory
Definition: NetworkOPs.cpp:744
void getBookPage(std::shared_ptr< ReadView const > &lpLedger, Book const &, AccountID const &uTakerID, const bool bProof, unsigned int iLimit, Json::Value const &jvMarker, Json::Value &jvResult) override
Json::Value getServerInfo(bool human, bool admin, bool counters) override
InfoSub::pointer addRpcSub(std::string const &strUrl, InfoSub::ref) override
boost::asio::steady_timer clusterTimer_
Definition: NetworkOPs.cpp:730
bool isAmendmentWarned() override
static std::array< char const *, 5 > const states_
Definition: NetworkOPs.cpp:124
bool subServer(InfoSub::ref ispListener, Json::Value &jvResult, bool admin) override
NetworkOPsImp(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool start_valid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:224
void unsubAccountInternal(std::uint64_t seq, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
std::atomic< bool > amendmentBlocked_
Definition: NetworkOPs.cpp:724
beast::Journal m_journal
Definition: NetworkOPs.cpp:715
SubInfoMapType mSubAccount
Definition: NetworkOPs.cpp:739
void unsubAccountHistory(InfoSub::ref ispListener, AccountID const &account, bool historyOnly) override
unsubscribe an account's transactions
std::set< uint256 > pendingValidations_
Definition: NetworkOPs.cpp:779
void doTransactionAsync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failtype)
For transactions not submitted by a locally connected client, fire and forget.
void setAccountHistoryJobTimer(SubAccountHistoryInfoWeak subInfo)
Definition: NetworkOPs.cpp:996
bool unsubValidations(std::uint64_t uListener) override
ClosureCounter< void, boost::system::error_code const & > waitHandlerCounter_
Definition: NetworkOPs.cpp:728
void pubLedger(std::shared_ptr< ReadView const > const &lpAccepted) override
void addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo)
void doTransactionSync(std::shared_ptr< Transaction > transaction, bool bUnlimited, FailHard failType)
For transactions submitted directly by a client, apply batch of transactions and wait for this transa...
void setTimer(boost::asio::steady_timer &timer, std::chrono::milliseconds const &expiry_time, std::function< void()> onExpire, std::function< void()> onError)
Definition: NetworkOPs.cpp:934
std::array< SubMapType, SubTypes::sLastEntry > mStreamMaps
Definition: NetworkOPs.cpp:759
bool unsubPeerStatus(std::uint64_t uListener) override
void pubValidation(std::shared_ptr< STValidation > const &val) override
std::size_t const minPeerCount_
Definition: NetworkOPs.cpp:769
std::atomic< bool > unlBlocked_
Definition: NetworkOPs.cpp:726
bool subBook(InfoSub::ref ispListener, Book const &) override
std::uint32_t acceptLedger(std::optional< std::chrono::milliseconds > consensusDelay) override
Accepts the current transaction tree, return the new ledger's sequence.
void stateAccounting(Json::Value &obj) override
void submitTransaction(std::shared_ptr< STTx const > const &) override
bool unsubRTTransactions(std::uint64_t uListener) override
Json::Value getConsensusInfo() override
std::recursive_mutex mSubLock
Definition: NetworkOPs.cpp:719
std::atomic< bool > needNetworkLedger_
Definition: NetworkOPs.cpp:723
bool recvValidation(std::shared_ptr< STValidation > const &val, std::string const &source) override
void switchLastClosedLedger(std::shared_ptr< Ledger const > const &newLCL)
StateAccounting accounting_
Definition: NetworkOPs.cpp:777
void reportConsensusStateChange(ConsensusPhase phase)
bool subConsensus(InfoSub::ref ispListener) override
bool isNeedNetworkLedger() override
Definition: NetworkOPs.cpp:893
void setAmendmentWarned() override
bool processTrustedProposal(RCLCxPeerPos proposal) override
void endConsensus() override
bool subPeerStatus(InfoSub::ref ispListener) override
void mapComplete(std::shared_ptr< SHAMap > const &map, bool fromAcquire) override
bool tryRemoveRpcSub(std::string const &strUrl) override
void pubAccountTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
LedgerMaster & m_ledgerMaster
Definition: NetworkOPs.cpp:737
void clearLedgerFetch() override
bool isBlocked() override
void consensusViewChange() override
void setStateTimer() override
Called to initially start our timers.
Definition: NetworkOPs.cpp:924
bool subManifests(InfoSub::ref ispListener) override
void pubValidatedTransaction(std::shared_ptr< ReadView const > const &ledger, AcceptedLedgerTx const &transaction, bool last)
void subAccount(InfoSub::ref ispListener, hash_set< AccountID > const &vnaAccountIDs, bool rt) override
bool unsubServer(std::uint64_t uListener) override
MultiApiJson transJson(std::shared_ptr< STTx const > const &transaction, TER result, bool validated, std::shared_ptr< ReadView const > const &ledger, std::optional< std::reference_wrapper< TxMeta const > > meta)
ServerFeeSummary mLastFeeSummary
Definition: NetworkOPs.cpp:761
void pubPeerStatus(std::function< Json::Value(void)> const &) override
void setStandAlone() override
Definition: NetworkOPs.cpp:875
bool subRTTransactions(InfoSub::ref ispListener) override
void pubProposedAccountTransaction(std::shared_ptr< ReadView const > const &ledger, std::shared_ptr< STTx const > const &transaction, TER result)
std::condition_variable mCond
Definition: NetworkOPs.cpp:772
void setMode(OperatingMode om) override
void stop() override
Definition: NetworkOPs.cpp:564
void clearNeedNetworkLedger() override
Definition: NetworkOPs.cpp:887
DispatchState mDispatchState
Definition: NetworkOPs.cpp:774
bool subBookChanges(InfoSub::ref ispListener) override
SubInfoMapType mSubRTAccount
Definition: NetworkOPs.cpp:740
bool checkLastClosedLedger(const Overlay::PeerSequence &, uint256 &networkClosed)
void reportFeeChange() override
bool unsubBook(std::uint64_t uListener, Book const &) override
void subAccountHistoryStart(std::shared_ptr< ReadView const > const &ledger, SubAccountHistoryInfoWeak &subInfo)
bool isFull() override
Definition: NetworkOPs.cpp:899
error_code_i subAccountHistory(InfoSub::ref ispListener, AccountID const &account) override
subscribe an account's new transactions and retrieve the account's historical transactions
std::mutex validationsMutex_
Definition: NetworkOPs.cpp:780
Application & app_
Definition: NetworkOPs.cpp:714
void pubManifest(Manifest const &) override
ConsensusPhase mLastConsensusPhase
Definition: NetworkOPs.cpp:735
bool subTransactions(InfoSub::ref ispListener) override
subRpcMapType mRpcSubMap
Definition: NetworkOPs.cpp:742
std::atomic< bool > amendmentWarned_
Definition: NetworkOPs.cpp:725
InfoSub::pointer findRpcSub(std::string const &strUrl) override
bool unsubLedger(std::uint64_t uListener) override
std::string getHostId(bool forAdmin)
Definition: NetworkOPs.cpp:905
bool beginConsensus(uint256 const &networkClosed) override
bool unsubConsensus(std::uint64_t uListener) override
Provides server functionality for clients.
Definition: NetworkOPs.h:87
void getCountsJson(Json::Value &obj)
Definition: Database.cpp:266
std::shared_ptr< OpenView const > current() const
Returns a view to the current open ledger.
Definition: OpenLedger.cpp:50
Writable ledger view that accumulates state and tx changes.
Definition: OpenView.h:56
BookListeners::pointer getBookListeners(Book const &)
BookListeners::pointer makeBookListeners(Book const &)
void processTxn(std::shared_ptr< ReadView const > const &ledger, const AcceptedLedgerTx &alTx, MultiApiJson const &jvObj)
virtual std::optional< std::uint32_t > networkID() const =0
Returns the ID of the network this server is configured for, if any.
virtual std::uint64_t getPeerDisconnect() const =0
virtual std::size_t size() const =0
Returns the number of active peers.
virtual std::uint64_t getJqTransOverflow() const =0
virtual std::uint64_t getPeerDisconnectCharges() const =0
Manages the generic consensus algorithm for use by the RCL.
Definition: RCLConsensus.h:51
std::size_t prevProposers() const
Get the number of proposing peers that participated in the previous round.
Definition: RCLConsensus.h:443
void simulate(NetClock::time_point const &now, std::optional< std::chrono::milliseconds > consensusDelay)
std::chrono::milliseconds prevRoundTime() const
Get duration of the previous round.
Definition: RCLConsensus.h:456
Json::Value getJson(bool full) const
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
Represents a set of transactions in RCLConsensus.
Definition: RCLCxTx.h:66
Wraps a ledger instance for use in generic Validations LedgerTrie.
static std::string getWordFromBlob(void const *blob, size_t bytes)
Chooses a single dictionary word from the data.
Definition: RFC1751.cpp:498
A view into a ledger.
Definition: ReadView.h:55
virtual std::shared_ptr< SLE const > read(Keylet const &k) const =0
Return the state item associated with a key.
virtual std::optional< key_type > succ(key_type const &key, std::optional< key_type > const &last=std::nullopt) const =0
Return the key of the next state item.
void setJson(Json::Value &) const
Definition: STAmount.cpp:475
std::string getText() const override
Definition: STAmount.cpp:515
Issue const & issue() const
Definition: STAmount.h:487
std::optional< T > get(std::string const &name) const
Definition: BasicConfig.h:138
std::size_t size() const noexcept
Definition: Serializer.h:72
void const * data() const noexcept
Definition: Serializer.h:78
void setup(Setup const &setup, beast::Journal journal)
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
std::chrono::seconds closeOffset() const
Definition: TimeKeeper.h:83
time_point closeTime() const
Returns the predicted close time, in network time.
Definition: TimeKeeper.h:76
Metrics getMetrics(OpenView const &view) const
Returns fee metrics in reference fee level units.
Definition: TxQ.cpp:1777
static time_point now()
Definition: UptimeClock.cpp:63
Validator keys and manifest as set in configuration file.
Definition: ValidatorKeys.h:37
std::size_t count() const
Return the number of configured validator list sites.
std::optional< PublicKey > localPublicKey() const
This function returns the local validator public key or a std::nullopt.
std::optional< TimeKeeper::time_point > expires() const
Return the time when the validator list will expire.
std::size_t quorum() const
Get quorum value for current trusted key set.
constexpr double decimalXRP() const
Definition: XRPAmount.h:262
Json::Value jsonClipped() const
Definition: XRPAmount.h:218
iterator begin()
Definition: base_uint.h:135
static constexpr std::size_t size()
Definition: base_uint.h:525
bool isZero() const
Definition: base_uint.h:539
bool isNonZero() const
Definition: base_uint.h:544
virtual Json::Value currentJson() const =0
Render currently executing jobs and RPC calls and durations in Json.
virtual Json::Value countersJson() const =0
Render performance counters in Json.
Automatically unlocks and re-locks a unique_lock object.
Definition: scope.h:231
T clear(T... args)
T emplace_back(T... args)
T emplace(T... args)
T empty(T... args)
T end(T... args)
T erase(T... args)
T find(T... args)
T get(T... args)
T insert(T... args)
T is_sorted(T... args)
T lock(T... args)
T make_pair(T... args)
T max(T... args)
T min(T... args)
@ arrayValue
array value (ordered list)
Definition: json_value.h:42
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
int Int
Definition: json_forwards.h:26
unsigned int UInt
Definition: json_forwards.h:27
void rngfill(void *buffer, std::size_t bytes, Generator &g)
Definition: rngfill.h:33
std::string const & getVersionString()
Server version.
Definition: BuildInfo.cpp:65
Json::Value computeBookChanges(std::shared_ptr< L const > const &lpAccepted)
Definition: BookChanges.h:47
void insertMPTokenIssuanceID(Json::Value &response, std::shared_ptr< STTx const > const &transaction, TxMeta const &transactionMeta)
void insertDeliveredAmount(Json::Value &meta, ReadView const &, std::shared_ptr< STTx const > const &serializedTx, TxMeta const &)
Add a delivered_amount field to the meta input/output parameter.
Charge const feeMediumBurdenRPC
TER valid(PreclaimContext const &ctx, AccountID const &src)
Keylet account(AccountID const &id) noexcept
AccountID root.
Definition: Indexes.cpp:160
Keylet page(uint256 const &root, std::uint64_t index=0) noexcept
A page in a directory.
Definition: Indexes.cpp:356
Keylet offer(AccountID const &id, std::uint32_t seq) noexcept
An offer from an account.
Definition: Indexes.cpp:250
Json::Value rate(Account const &account, double multiplier)
Set a transfer rate.
Definition: rate.cpp:30
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:106
STAmount divide(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:87
std::shared_ptr< STTx const > sterilize(STTx const &stx)
Sterilize a transaction.
Definition: STTx.cpp:604
STAmount accountFunds(ReadView const &view, AccountID const &id, STAmount const &saDefault, FreezeHandling freezeHandling, beast::Journal j)
Definition: View.cpp:366
@ fhZERO_IF_FROZEN
Definition: View.h:80
@ fhIGNORE_FREEZE
Definition: View.h:80
bool cdirFirst(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the first entry in the directory, advancing the index.
Definition: View.cpp:136
std::uint64_t getQuality(uint256 const &uBase)
Definition: Indexes.cpp:125
@ INCLUDED
Definition: Transaction.h:49
@ OBSOLETE
Definition: Transaction.h:54
@ INVALID
Definition: Transaction.h:48
error_code_i
Definition: ErrorCodes.h:40
@ rpcSUCCESS
Definition: ErrorCodes.h:44
@ rpcINVALID_PARAMS
Definition: ErrorCodes.h:84
@ rpcINTERNAL
Definition: ErrorCodes.h:130
std::pair< PublicKey, SecretKey > generateKeyPair(KeyType type, Seed const &seed)
Generate a key pair deterministically.
Definition: SecretKey.cpp:351
auto constexpr muldiv_max
Definition: mulDiv.h:29
std::unique_ptr< LocalTxs > make_LocalTxs()
Definition: LocalTxs.cpp:197
Rate transferRate(ReadView const &view, AccountID const &issuer)
Returns IOU issuer transfer fee as Rate.
Definition: View.cpp:573
STAmount amountFromQuality(std::uint64_t rate)
Definition: STAmount.cpp:822
void handleNewValidation(Application &app, std::shared_ptr< STValidation > const &val, std::string const &source, BypassAccept const bypassAccept, std::optional< beast::Journal > j)
Handle a new validation.
std::unique_ptr< NetworkOPs > make_NetworkOPs(Application &app, NetworkOPs::clock_type &clock, bool standalone, std::size_t minPeerCount, bool startvalid, JobQueue &job_queue, LedgerMaster &ledgerMaster, ValidatorKeys const &validatorKeys, boost::asio::io_service &io_svc, beast::Journal journal, beast::insight::Collector::ptr const &collector)
@ warnRPC_EXPIRED_VALIDATOR_LIST
Definition: ErrorCodes.h:164
@ warnRPC_UNSUPPORTED_MAJORITY
Definition: ErrorCodes.h:162
@ warnRPC_AMENDMENT_BLOCKED
Definition: ErrorCodes.h:163
std::unique_ptr< FeeVote > make_FeeVote(FeeSetup const &setup, beast::Journal journal)
Create an instance of the FeeVote logic.
OperatingMode
Specifies the mode under which the server believes it's operating.
Definition: NetworkOPs.h:66
@ TRACKING
convinced we agree with the network
@ DISCONNECTED
not ready to process requests
@ CONNECTED
convinced we are talking to the network
@ FULL
we have the ledger and can even validate
@ SYNCING
fallen slightly behind
STAmount multiply(STAmount const &amount, Rate const &rate)
Definition: Rate2.cpp:47
bool isTesSuccess(TER x)
Definition: TER.h:656
bool isTerRetry(TER x)
Definition: TER.h:650
AccountID calcAccountID(PublicKey const &pk)
Definition: AccountID.cpp:160
@ current
This was a new validation and was added.
csprng_engine & crypto_prng()
The default cryptographically secure PRNG.
Definition: csprng.cpp:99
Json::Value rpcError(int iError)
Definition: RPCErr.cpp:29
@ tefPAST_SEQ
Definition: TER.h:175
ConsensusPhase
Phases of consensus for a single ledger round.
static std::array< char const *, 5 > const stateNames
Definition: NetworkOPs.cpp:844
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
bool isTemMalformed(TER x)
Definition: TER.h:638
bool cdirNext(ReadView const &view, uint256 const &root, std::shared_ptr< SLE const > &page, unsigned int &index, uint256 &entry)
Returns the next entry in the directory, advancing the index.
Definition: View.cpp:147
void forAllApiVersions(Fn const &fn, Args &&... args)
Definition: ApiVersion.h:102
send_if_pred< Predicate > send_if(std::shared_ptr< Message > const &m, Predicate const &f)
Helper function to aid in type deduction.
Definition: predicates.h:75
@ tesSUCCESS
Definition: TER.h:242
uint256 getQualityNext(uint256 const &uBase)
Definition: Indexes.cpp:117
STAmount accountHolds(ReadView const &view, AccountID const &account, Currency const &currency, AccountID const &issuer, FreezeHandling zeroIfFrozen, beast::Journal j)
Definition: View.cpp:271
Rules makeRulesGivenLedger(DigestAwareReadView const &ledger, Rules const &current)
Definition: ReadView.cpp:69
std::string to_string_iso(date::sys_time< Duration > tp)
Definition: chrono.h:93
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:629
FeeSetup setup_FeeVote(Section const &section)
Definition: Config.cpp:1060
Number root(Number f, unsigned d)
Definition: Number.cpp:630
std::optional< std::uint64_t > mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div)
Return value*mul/div accurately.
Definition: mulDiv.cpp:27
ApplyFlags
Definition: ApplyView.h:30
@ tapFAIL_HARD
Definition: ApplyView.h:35
@ tapUNLIMITED
Definition: ApplyView.h:42
@ tapNONE
Definition: ApplyView.h:31
Json::Value getJson(LedgerFill const &fill)
Return a new Json::Value representing the ledger with given options.
@ ledgerMaster
ledger master data for signing
@ proposal
proposal for signing
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:37
Seed generateSeed(std::string const &passPhrase)
Generate a seed deterministically.
Definition: Seed.cpp:69
@ terQUEUED
Definition: TER.h:225
bool transResultInfo(TER code, std::string &token, std::string &text)
Definition: TER.cpp:236
@ jtNETOP_CLUSTER
Definition: Job.h:75
@ jtCLIENT_FEE_CHANGE
Definition: Job.h:47
@ jtTRANSACTION
Definition: Job.h:62
@ jtTXN_PROC
Definition: Job.h:82
@ jtCLIENT_CONSENSUS
Definition: Job.h:48
@ jtBATCH
Definition: Job.h:65
@ jtCLIENT_ACCT_HIST
Definition: Job.h:49
uint256 getBookBase(Book const &book)
Definition: Indexes.cpp:98
Rate const parityRate
A transfer rate signifying a 1:1 exchange.
bool isGlobalFrozen(ReadView const &view, AccountID const &issuer)
Definition: View.cpp:173
static std::uint32_t trunc32(std::uint64_t v)
@ temBAD_SIGNATURE
Definition: TER.h:105
static auto const genesisAccountId
Definition: NetworkOPs.cpp:857
STL namespace.
T ref(T... args)
T reset(T... args)
T set_intersection(T... args)
std::string serialized
The manifest in serialized form.
Definition: Manifest.h:83
std::uint32_t sequence
The sequence number of this manifest.
Definition: Manifest.h:95
std::string domain
The domain, if one was specified in the manifest; empty otherwise.
Definition: Manifest.h:98
std::optional< Blob > getSignature() const
Returns manifest signature.
std::optional< PublicKey > signingKey
The ephemeral key associated with this manifest.
Definition: Manifest.h:92
Blob getMasterSignature() const
Returns manifest master key signature.
PublicKey masterKey
The master key associated with this manifest.
Definition: Manifest.h:86
Server fees published on server subscription.
Definition: NetworkOPs.cpp:201
bool operator!=(ServerFeeSummary const &b) const
std::optional< TxQ::Metrics > em
Definition: NetworkOPs.cpp:220
bool operator==(ServerFeeSummary const &b) const
Definition: NetworkOPs.cpp:212
beast::insight::Gauge full_transitions
Definition: NetworkOPs.cpp:831
Stats(Handler const &handler, beast::insight::Collector::ptr const &collector)
Definition: NetworkOPs.cpp:786
beast::insight::Hook hook
Definition: NetworkOPs.cpp:820
beast::insight::Gauge connected_duration
Definition: NetworkOPs.cpp:822
beast::insight::Gauge tracking_duration
Definition: NetworkOPs.cpp:824
beast::insight::Gauge connected_transitions
Definition: NetworkOPs.cpp:828
beast::insight::Gauge disconnected_transitions
Definition: NetworkOPs.cpp:827
beast::insight::Gauge syncing_duration
Definition: NetworkOPs.cpp:823
beast::insight::Gauge tracking_transitions
Definition: NetworkOPs.cpp:830
beast::insight::Gauge full_duration
Definition: NetworkOPs.cpp:825
beast::insight::Gauge disconnected_duration
Definition: NetworkOPs.cpp:821
beast::insight::Gauge syncing_transitions
Definition: NetworkOPs.cpp:829
SubAccountHistoryIndex(AccountID const &accountId)
Definition: NetworkOPs.cpp:678
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:697
std::shared_ptr< SubAccountHistoryIndex > index_
Definition: NetworkOPs.cpp:692
Represents a transfer rate.
Definition: Rate.h:38
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
Changes in trusted nodes after updating validator list.
hash_set< NodeID > added
hash_set< NodeID > removed
Structure returned by TxQ::getMetrics, expressed in reference fee level units.
Definition: TxQ.h:162
void set(const char *key, auto const &v)
Definition: MultiApiJson.h:83
IsMemberResult isMember(const char *key) const
Definition: MultiApiJson.h:94
Select all peers (except optional excluded) that are in our cluster.
Definition: predicates.h:137
Sends a message to all peers.
Definition: predicates.h:32
T swap(T... args)
T time_since_epoch(T... args)
T to_string(T... args)
T unlock(T... args)
T value_or(T... args)
T what(T... args)