rippled
Loading...
Searching...
No Matches
PeerImp.cpp
1#include <xrpld/app/consensus/RCLValidations.h>
2#include <xrpld/app/ledger/InboundLedgers.h>
3#include <xrpld/app/ledger/InboundTransactions.h>
4#include <xrpld/app/ledger/LedgerMaster.h>
5#include <xrpld/app/ledger/TransactionMaster.h>
6#include <xrpld/app/misc/HashRouter.h>
7#include <xrpld/app/misc/LoadFeeTrack.h>
8#include <xrpld/app/misc/NetworkOPs.h>
9#include <xrpld/app/misc/Transaction.h>
10#include <xrpld/app/misc/ValidatorList.h>
11#include <xrpld/app/tx/apply.h>
12#include <xrpld/overlay/Cluster.h>
13#include <xrpld/overlay/detail/PeerImp.h>
14#include <xrpld/overlay/detail/Tuning.h>
15
16#include <xrpl/basics/UptimeClock.h>
17#include <xrpl/basics/base64.h>
18#include <xrpl/basics/random.h>
19#include <xrpl/basics/safe_cast.h>
20#include <xrpl/core/PerfLog.h>
21#include <xrpl/protocol/TxFlags.h>
22#include <xrpl/protocol/digest.h>
23
24#include <boost/algorithm/string/predicate.hpp>
25#include <boost/beast/core/ostream.hpp>
26
27#include <algorithm>
28#include <chrono>
29#include <memory>
30#include <mutex>
31#include <numeric>
32#include <sstream>
33
34using namespace std::chrono_literals;
35
36namespace xrpl {
37
38namespace {
40std::chrono::milliseconds constexpr peerHighLatency{300};
41
43std::chrono::seconds constexpr peerTimerInterval{60};
44
46std::chrono::seconds constexpr shutdownTimerInterval{5};
47
48} // namespace
49
50// TODO: Remove this exclusion once unit tests are added after the hotfix
51// release.
52
54 Application& app,
55 id_t id,
57 http_request_type&& request,
58 PublicKey const& publicKey,
60 Resource::Consumer consumer,
62 OverlayImpl& overlay)
63 : Child(overlay)
64 , app_(app)
65 , id_(id)
66 , fingerprint_(getFingerprint(slot->remote_endpoint(), publicKey, to_string(id)))
67 , prefix_(makePrefix(fingerprint_))
68 , sink_(app_.journal("Peer"), prefix_)
69 , p_sink_(app_.journal("Protocol"), prefix_)
70 , journal_(sink_)
71 , p_journal_(p_sink_)
72 , stream_ptr_(std::move(stream_ptr))
73 , socket_(stream_ptr_->next_layer().socket())
74 , stream_(*stream_ptr_)
75 , strand_(boost::asio::make_strand(socket_.get_executor()))
76 , timer_(waitable_timer{socket_.get_executor()})
77 , remote_address_(slot->remote_endpoint())
78 , overlay_(overlay)
79 , inbound_(true)
80 , protocol_(protocol)
81 , tracking_(Tracking::unknown)
82 , trackingTime_(clock_type::now())
83 , publicKey_(publicKey)
84 , lastPingTime_(clock_type::now())
85 , creationTime_(clock_type::now())
86 , squelch_(app_.journal("Squelch"))
87 , usage_(consumer)
88 , fee_{Resource::feeTrivialPeer, ""}
89 , slot_(slot)
90 , request_(std::move(request))
91 , headers_(request_)
92 , compressionEnabled_(
93 peerFeatureEnabled(headers_, FEATURE_COMPR, "lz4", app_.config().COMPRESSION) ? Compressed::On
94 : Compressed::Off)
95 , txReduceRelayEnabled_(peerFeatureEnabled(headers_, FEATURE_TXRR, app_.config().TX_REDUCE_RELAY_ENABLE))
96 , ledgerReplayEnabled_(peerFeatureEnabled(headers_, FEATURE_LEDGER_REPLAY, app_.config().LEDGER_REPLAY))
97 , ledgerReplayMsgHandler_(app, app.getLedgerReplayer())
98{
99 JLOG(journal_.info()) << "compression enabled " << (compressionEnabled_ == Compressed::On)
100 << " vp reduce-relay base squelch enabled "
103 << " tx reduce-relay enabled " << txReduceRelayEnabled_;
104}
105
107{
108 bool const inCluster{cluster()};
109
114
115 if (inCluster)
116 {
117 JLOG(journal_.warn()) << name() << " left cluster";
118 }
119}
120
121// Helper function to check for valid uint256 values in protobuf buffers
122static bool
124{
125 return pBuffStr.size() == uint256::size();
126}
127
128void
130{
131 if (!strand_.running_in_this_thread())
133
134 auto parseLedgerHash = [](std::string_view value) -> std::optional<uint256> {
135 if (uint256 ret; ret.parseHex(value))
136 return ret;
137
138 if (auto const s = base64_decode(value); s.size() == uint256::size())
139 return uint256{s};
140
141 return std::nullopt;
142 };
143
145 std::optional<uint256> previous;
146
147 if (auto const iter = headers_.find("Closed-Ledger"); iter != headers_.end())
148 {
149 closed = parseLedgerHash(iter->value());
150
151 if (!closed)
152 fail("Malformed handshake data (1)");
153 }
154
155 if (auto const iter = headers_.find("Previous-Ledger"); iter != headers_.end())
156 {
157 previous = parseLedgerHash(iter->value());
158
159 if (!previous)
160 fail("Malformed handshake data (2)");
161 }
162
163 if (previous && !closed)
164 fail("Malformed handshake data (3)");
165
166 {
168 if (closed)
169 closedLedgerHash_ = *closed;
170 if (previous)
171 previousLedgerHash_ = *previous;
172 }
173
174 if (inbound_)
175 doAccept();
176 else
178
179 // Anything else that needs to be done with the connection should be
180 // done in doProtocolStart
181}
182
183void
185{
186 if (!strand_.running_in_this_thread())
188
189 if (!socket_.is_open())
190 return;
191
192 // The rationale for using different severity levels is that
193 // outbound connections are under our control and may be logged
194 // at a higher level, but inbound connections are more numerous and
195 // uncontrolled so to prevent log flooding the severity is reduced.
196 JLOG(journal_.debug()) << "stop: Stop";
197
198 shutdown();
199}
200
201//------------------------------------------------------------------------------
202
203void
205{
206 if (!strand_.running_in_this_thread())
207 return post(strand_, std::bind(&PeerImp::send, shared_from_this(), m));
208
209 if (!socket_.is_open())
210 return;
211
212 // we are in progress of closing the connection
213 if (shutdown_)
214 return tryAsyncShutdown();
215
216 auto validator = m->getValidatorKey();
217 if (validator && !squelch_.expireSquelch(*validator))
218 {
220 TrafficCount::category::squelch_suppressed, static_cast<int>(m->getBuffer(compressionEnabled_).size()));
221 return;
222 }
223
224 // report categorized outgoing traffic
226 safe_cast<TrafficCount::category>(m->getCategory()),
227 static_cast<int>(m->getBuffer(compressionEnabled_).size()));
228
229 // report total outgoing traffic
231 TrafficCount::category::total, static_cast<int>(m->getBuffer(compressionEnabled_).size()));
232
233 auto sendq_size = send_queue_.size();
234
235 if (sendq_size < Tuning::targetSendQueue)
236 {
237 // To detect a peer that does not read from their
238 // side of the connection, we expect a peer to have
239 // a small senq periodically
240 large_sendq_ = 0;
241 }
242 else if (auto sink = journal_.debug(); sink && (sendq_size % Tuning::sendQueueLogFreq) == 0)
243 {
244 std::string const n = name();
245 sink << n << " sendq: " << sendq_size;
246 }
247
248 send_queue_.push(m);
249
250 if (sendq_size != 0)
251 return;
252
253 writePending_ = true;
254 boost::asio::async_write(
255 stream_,
256 boost::asio::buffer(send_queue_.front()->getBuffer(compressionEnabled_)),
257 bind_executor(
258 strand_,
259 std::bind(&PeerImp::onWriteMessage, shared_from_this(), std::placeholders::_1, std::placeholders::_2)));
260}
261
262void
264{
265 if (!strand_.running_in_this_thread())
267
268 if (!txQueue_.empty())
269 {
270 protocol::TMHaveTransactions ht;
272 txQueue_.begin(), txQueue_.end(), [&](auto const& hash) { ht.add_hashes(hash.data(), hash.size()); });
273 JLOG(p_journal_.trace()) << "sendTxQueue " << txQueue_.size();
274 txQueue_.clear();
275 send(std::make_shared<Message>(ht, protocol::mtHAVE_TRANSACTIONS));
276 }
277}
278
279void
281{
282 if (!strand_.running_in_this_thread())
283 return post(strand_, std::bind(&PeerImp::addTxQueue, shared_from_this(), hash));
284
286 {
287 JLOG(p_journal_.warn()) << "addTxQueue exceeds the cap";
288 sendTxQueue();
289 }
290
291 txQueue_.insert(hash);
292 JLOG(p_journal_.trace()) << "addTxQueue " << txQueue_.size();
293}
294
295void
297{
298 if (!strand_.running_in_this_thread())
300
301 auto removed = txQueue_.erase(hash);
302 JLOG(p_journal_.trace()) << "removeTxQueue " << removed;
303}
304
305void
307{
308 if ((usage_.charge(fee, context) == Resource::drop) && usage_.disconnect(p_journal_) &&
309 strand_.running_in_this_thread())
310 {
311 // Sever the connection
313 fail("charge: Resources");
314 }
315}
316
317//------------------------------------------------------------------------------
318
319bool
321{
322 auto const iter = headers_.find("Crawl");
323 if (iter == headers_.end())
324 return false;
325 return boost::iequals(iter->value(), "public");
326}
327
328bool
330{
331 return static_cast<bool>(app_.cluster().member(publicKey_));
332}
333
336{
337 if (inbound_)
338 return headers_["User-Agent"];
339 return headers_["Server"];
340}
341
344{
346
347 ret[jss::public_key] = toBase58(TokenType::NodePublic, publicKey_);
348 ret[jss::address] = remote_address_.to_string();
349
350 if (inbound_)
351 ret[jss::inbound] = true;
352
353 if (cluster())
354 {
355 ret[jss::cluster] = true;
356
357 if (auto const n = name(); !n.empty())
358 // Could move here if Json::Value supported moving from a string
359 ret[jss::name] = n;
360 }
361
362 if (auto const d = domain(); !d.empty())
363 ret[jss::server_domain] = std::string{d};
364
365 if (auto const nid = headers_["Network-ID"]; !nid.empty())
366 ret[jss::network_id] = std::string{nid};
367
368 ret[jss::load] = usage_.balance();
369
370 if (auto const version = getVersion(); !version.empty())
371 ret[jss::version] = std::string{version};
372
373 ret[jss::protocol] = to_string(protocol_);
374
375 {
377 if (latency_)
378 ret[jss::latency] = static_cast<Json::UInt>(latency_->count());
379 }
380
381 ret[jss::uptime] = static_cast<Json::UInt>(std::chrono::duration_cast<std::chrono::seconds>(uptime()).count());
382
383 std::uint32_t minSeq, maxSeq;
384 ledgerRange(minSeq, maxSeq);
385
386 if ((minSeq != 0) || (maxSeq != 0))
387 ret[jss::complete_ledgers] = std::to_string(minSeq) + " - " + std::to_string(maxSeq);
388
389 switch (tracking_.load())
390 {
392 ret[jss::track] = "diverged";
393 break;
394
396 ret[jss::track] = "unknown";
397 break;
398
400 // Nothing to do here
401 break;
402 }
403
404 uint256 closedLedgerHash;
405 protocol::TMStatusChange last_status;
406 {
408 closedLedgerHash = closedLedgerHash_;
409 last_status = last_status_;
410 }
411
412 if (closedLedgerHash != beast::zero)
413 ret[jss::ledger] = to_string(closedLedgerHash);
414
415 if (last_status.has_newstatus())
416 {
417 switch (last_status.newstatus())
418 {
419 case protocol::nsCONNECTING:
420 ret[jss::status] = "connecting";
421 break;
422
423 case protocol::nsCONNECTED:
424 ret[jss::status] = "connected";
425 break;
426
427 case protocol::nsMONITORING:
428 ret[jss::status] = "monitoring";
429 break;
430
431 case protocol::nsVALIDATING:
432 ret[jss::status] = "validating";
433 break;
434
435 case protocol::nsSHUTTING:
436 ret[jss::status] = "shutting";
437 break;
438
439 default:
440 JLOG(p_journal_.warn()) << "Unknown status: " << last_status.newstatus();
441 }
442 }
443
444 ret[jss::metrics] = Json::Value(Json::objectValue);
445 ret[jss::metrics][jss::total_bytes_recv] = std::to_string(metrics_.recv.total_bytes());
446 ret[jss::metrics][jss::total_bytes_sent] = std::to_string(metrics_.sent.total_bytes());
447 ret[jss::metrics][jss::avg_bps_recv] = std::to_string(metrics_.recv.average_bytes());
448 ret[jss::metrics][jss::avg_bps_sent] = std::to_string(metrics_.sent.average_bytes());
449
450 return ret;
451}
452
453bool
455{
456 switch (f)
457 {
459 return protocol_ >= make_protocol(2, 1);
461 return protocol_ >= make_protocol(2, 2);
464 }
465 return false;
466}
467
468//------------------------------------------------------------------------------
469
470bool
472{
473 {
475 if ((seq != 0) && (seq >= minLedger_) && (seq <= maxLedger_) && (tracking_.load() == Tracking::converged))
476 return true;
477 if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) != recentLedgers_.end())
478 return true;
479 }
480 return false;
481}
482
483void
485{
487
488 minSeq = minLedger_;
489 maxSeq = maxLedger_;
490}
491
492bool
493PeerImp::hasTxSet(uint256 const& hash) const
494{
496 return std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) != recentTxSets_.end();
497}
498
499void
501{
502 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
503 // guarded by recentLock_.
507}
508
509bool
511{
513 return (tracking_ != Tracking::diverged) && (uMin >= minLedger_) && (uMax <= maxLedger_);
514}
515
516//------------------------------------------------------------------------------
517
518void
520{
521 XRPL_ASSERT(strand_.running_in_this_thread(), "xrpl::PeerImp::fail : strand in this thread");
522
523 if (!socket_.is_open())
524 return;
525
526 JLOG(journal_.warn()) << name << ": " << ec.message();
527
528 shutdown();
529}
530
531void
533{
534 if (!strand_.running_in_this_thread())
535 return post(
536 strand_, std::bind((void(Peer::*)(std::string const&)) & PeerImp::fail, shared_from_this(), reason));
537
538 if (!socket_.is_open())
539 return;
540
541 // Call to name() locks, log only if the message will be outputted
543 {
544 std::string const n = name();
545 JLOG(journal_.warn()) << n << " failed: " << reason;
546 }
547
548 shutdown();
549}
550
551void
553{
554 XRPL_ASSERT(strand_.running_in_this_thread(), "xrpl::PeerImp::tryAsyncShutdown : strand in this thread");
555
557 return;
558
560 return;
561
562 shutdownStarted_ = true;
563
564 setTimer(shutdownTimerInterval);
565
566 // gracefully shutdown the SSL socket, performing a shutdown handshake
567 stream_.async_shutdown(
568 bind_executor(strand_, std::bind(&PeerImp::onShutdown, shared_from_this(), std::placeholders::_1)));
569}
570
571void
573{
574 XRPL_ASSERT(strand_.running_in_this_thread(), "xrpl::PeerImp::shutdown: strand in this thread");
575
576 if (!socket_.is_open() || shutdown_)
577 return;
578
579 shutdown_ = true;
580
581 boost::beast::get_lowest_layer(stream_).cancel();
582
584}
585
586void
588{
589 cancelTimer();
590 if (ec)
591 {
592 // - eof: the stream was cleanly closed
593 // - operation_aborted: an expired timer (slow shutdown)
594 // - stream_truncated: the tcp connection closed (no handshake) it could
595 // occur if a peer does not perform a graceful disconnect
596 // - broken_pipe: the peer is gone
597 bool shouldLog =
598 (ec != boost::asio::error::eof && ec != boost::asio::error::operation_aborted &&
599 ec.message().find("application data after close notify") == std::string::npos);
600
601 if (shouldLog)
602 {
603 JLOG(journal_.debug()) << "onShutdown: " << ec.message();
604 }
605 }
606
607 close();
608}
609
610void
612{
613 XRPL_ASSERT(strand_.running_in_this_thread(), "xrpl::PeerImp::close : strand in this thread");
614
615 if (!socket_.is_open())
616 return;
617
618 cancelTimer();
619
620 error_code ec;
621 socket_.close(ec);
622
624
625 // The rationale for using different severity levels is that
626 // outbound connections are under our control and may be logged
627 // at a higher level, but inbound connections are more numerous and
628 // uncontrolled so to prevent log flooding the severity is reduced.
629 JLOG((inbound_ ? journal_.debug() : journal_.info())) << "close: Closed";
630}
631
632//------------------------------------------------------------------------------
633
634void
636{
637 try
638 {
639 timer_.expires_after(interval);
640 }
641 catch (std::exception const& ex)
642 {
643 JLOG(journal_.error()) << "setTimer: " << ex.what();
644 return shutdown();
645 }
646
647 timer_.async_wait(bind_executor(strand_, std::bind(&PeerImp::onTimer, shared_from_this(), std::placeholders::_1)));
648}
649
650//------------------------------------------------------------------------------
651
654{
656 ss << "[" << fingerprint << "] ";
657 return ss.str();
658}
659
660void
662{
663 XRPL_ASSERT(strand_.running_in_this_thread(), "xrpl::PeerImp::onTimer : strand in this thread");
664
665 if (!socket_.is_open())
666 return;
667
668 if (ec)
669 {
670 // do not initiate shutdown, timers are frequently cancelled
671 if (ec == boost::asio::error::operation_aborted)
672 return;
673
674 // This should never happen
675 JLOG(journal_.error()) << "onTimer: " << ec.message();
676 return close();
677 }
678
679 // the timer expired before the shutdown completed
680 // force close the connection
681 if (shutdown_)
682 {
683 JLOG(journal_.debug()) << "onTimer: shutdown timer expired";
684 return close();
685 }
686
688 return fail("Large send queue");
689
690 if (auto const t = tracking_.load(); !inbound_ && t != Tracking::converged)
691 {
692 clock_type::duration duration;
693
694 {
696 duration = clock_type::now() - trackingTime_;
697 }
698
699 if ((t == Tracking::diverged && (duration > app_.config().MAX_DIVERGED_TIME)) ||
700 (t == Tracking::unknown && (duration > app_.config().MAX_UNKNOWN_TIME)))
701 {
703 return fail("Not useful");
704 }
705 }
706
707 // Already waiting for PONG
708 if (lastPingSeq_)
709 return fail("Ping Timeout");
710
712 lastPingSeq_ = rand_int<std::uint32_t>();
713
714 protocol::TMPing message;
715 message.set_type(protocol::TMPing::ptPING);
716 message.set_seq(*lastPingSeq_);
717
718 send(std::make_shared<Message>(message, protocol::mtPING));
719
720 setTimer(peerTimerInterval);
721}
722
723void
725{
726 try
727 {
728 timer_.cancel();
729 }
730 catch (std::exception const& ex)
731 {
732 JLOG(journal_.error()) << "cancelTimer: " << ex.what();
733 }
734}
735
736//------------------------------------------------------------------------------
737void
739{
740 XRPL_ASSERT(read_buffer_.size() == 0, "xrpl::PeerImp::doAccept : empty read buffer");
741
742 JLOG(journal_.debug()) << "doAccept";
743
744 // a shutdown was initiated before the handshake, there is nothing to do
745 if (shutdown_)
746 return tryAsyncShutdown();
747
748 auto const sharedValue = makeSharedValue(*stream_ptr_, journal_);
749
750 // This shouldn't fail since we already computed
751 // the shared value successfully in OverlayImpl
752 if (!sharedValue)
753 return fail("makeSharedValue: Unexpected failure");
754
755 JLOG(journal_.debug()) << "Protocol: " << to_string(protocol_);
756
757 if (auto member = app_.cluster().member(publicKey_))
758 {
759 {
761 name_ = *member;
762 }
763 JLOG(journal_.info()) << "Cluster name: " << *member;
764 }
765
767
768 // XXX Set timer: connection is in grace period to be useful.
769 // XXX Set timer: connection idle (idle may vary depending on connection
770 // type.)
771
773
774 boost::beast::ostream(*write_buffer) << makeResponse(
776 request_,
779 *sharedValue,
781 protocol_,
782 app_);
783
784 // Write the whole buffer and only start protocol when that's done.
785 boost::asio::async_write(
786 stream_,
787 write_buffer->data(),
788 boost::asio::transfer_all(),
789 bind_executor(
790 strand_, [this, write_buffer, self = shared_from_this()](error_code ec, std::size_t bytes_transferred) {
791 if (!socket_.is_open())
792 return;
793 if (ec == boost::asio::error::operation_aborted)
794 return tryAsyncShutdown();
795 if (ec)
796 return fail("onWriteResponse", ec);
797 if (write_buffer->size() == bytes_transferred)
798 return doProtocolStart();
799 return fail("Failed to write header");
800 }));
801}
802
805{
806 std::shared_lock read_lock{nameMutex_};
807 return name_;
808}
809
812{
813 return headers_["Server-Domain"];
814}
815
816//------------------------------------------------------------------------------
817
818// Protocol logic
819
820void
822{
823 // a shutdown was initiated before the handshare, there is nothing to do
824 if (shutdown_)
825 return tryAsyncShutdown();
826
828
829 // Send all the validator lists that have been loaded
831 {
833 std::uint32_t version,
835 PublicKey const& pubKey,
836 std::size_t maxSequence,
837 uint256 const& hash) {
839 *this, 0, pubKey, maxSequence, version, manifest, blobInfos, app_.getHashRouter(), p_journal_);
840
841 // Don't send it next time.
843 });
844 }
845
846 if (auto m = overlay_.getManifestsMessage())
847 send(m);
848
849 setTimer(peerTimerInterval);
850}
851
852// Called repeatedly with protocol message data
853void
855{
856 XRPL_ASSERT(strand_.running_in_this_thread(), "xrpl::PeerImp::onReadMessage : strand in this thread");
857
858 readPending_ = false;
859
860 if (!socket_.is_open())
861 return;
862
863 if (ec)
864 {
865 if (ec == boost::asio::error::eof)
866 {
867 JLOG(journal_.debug()) << "EOF";
868 return shutdown();
869 }
870
871 if (ec == boost::asio::error::operation_aborted)
872 return tryAsyncShutdown();
873
874 return fail("onReadMessage", ec);
875 }
876 // we started shutdown, no reason to process further data
877 if (shutdown_)
878 return tryAsyncShutdown();
879
880 if (auto stream = journal_.trace())
881 {
882 stream << "onReadMessage: " << (bytes_transferred > 0 ? to_string(bytes_transferred) + " bytes" : "");
883 }
884
885 metrics_.recv.add_message(bytes_transferred);
886
887 read_buffer_.commit(bytes_transferred);
888
889 auto hint = Tuning::readBufferBytes;
890
891 while (read_buffer_.size() > 0)
892 {
893 std::size_t bytes_consumed;
894
895 using namespace std::chrono_literals;
896 std::tie(bytes_consumed, ec) = perf::measureDurationAndLog(
897 [&]() { return invokeProtocolMessage(read_buffer_.data(), *this, hint); },
898 "invokeProtocolMessage",
899 350ms,
900 journal_);
901
902 if (!socket_.is_open())
903 return;
904
905 // the error_code is produced by invokeProtocolMessage
906 // it could be due to a bad message
907 if (ec)
908 return fail("onReadMessage", ec);
909
910 if (bytes_consumed == 0)
911 break;
912
913 read_buffer_.consume(bytes_consumed);
914 }
915
916 // check if a shutdown was initiated while processing messages
917 if (shutdown_)
918 return tryAsyncShutdown();
919
920 readPending_ = true;
921
922 XRPL_ASSERT(!shutdownStarted_, "xrpl::PeerImp::onReadMessage : shutdown started");
923
924 // Timeout on writes only
925 stream_.async_read_some(
927 bind_executor(
928 strand_,
929 std::bind(&PeerImp::onReadMessage, shared_from_this(), std::placeholders::_1, std::placeholders::_2)));
930}
931
932void
934{
935 XRPL_ASSERT(strand_.running_in_this_thread(), "xrpl::PeerImp::onWriteMessage : strand in this thread");
936
937 writePending_ = false;
938
939 if (!socket_.is_open())
940 return;
941
942 if (ec)
943 {
944 if (ec == boost::asio::error::operation_aborted)
945 return tryAsyncShutdown();
946
947 return fail("onWriteMessage", ec);
948 }
949
950 if (auto stream = journal_.trace())
951 {
952 stream << "onWriteMessage: " << (bytes_transferred > 0 ? to_string(bytes_transferred) + " bytes" : "");
953 }
954
955 metrics_.sent.add_message(bytes_transferred);
956
957 XRPL_ASSERT(!send_queue_.empty(), "xrpl::PeerImp::onWriteMessage : non-empty send buffer");
958 send_queue_.pop();
959
960 if (shutdown_)
961 return tryAsyncShutdown();
962
963 if (!send_queue_.empty())
964 {
965 writePending_ = true;
966 XRPL_ASSERT(!shutdownStarted_, "xrpl::PeerImp::onWriteMessage : shutdown started");
967
968 // Timeout on writes only
969 return boost::asio::async_write(
970 stream_,
971 boost::asio::buffer(send_queue_.front()->getBuffer(compressionEnabled_)),
972 bind_executor(
973 strand_,
974 std::bind(&PeerImp::onWriteMessage, shared_from_this(), std::placeholders::_1, std::placeholders::_2)));
975 }
976}
977
978//------------------------------------------------------------------------------
979//
980// ProtocolHandler
981//
982//------------------------------------------------------------------------------
983
984void
986{
987 // TODO
988}
989
990void
992 std::uint16_t type,
994 std::size_t size,
995 std::size_t uncompressed_size,
996 bool isCompressed)
997{
998 auto const name = protocolMessageName(type);
1001
1002 auto const category = TrafficCount::categorize(*m, static_cast<protocol::MessageType>(type), true);
1003
1004 // report total incoming traffic
1006
1007 // increase the traffic received for a specific category
1008 overlay_.reportInboundTraffic(category, static_cast<int>(size));
1009
1010 using namespace protocol;
1011 if ((type == MessageType::mtTRANSACTION || type == MessageType::mtHAVE_TRANSACTIONS ||
1012 type == MessageType::mtTRANSACTIONS ||
1013 // GET_OBJECTS
1015 // GET_LEDGER
1017 // LEDGER_DATA
1020 {
1021 overlay_.addTxMetrics(static_cast<MessageType>(type), static_cast<std::uint64_t>(size));
1022 }
1023 JLOG(journal_.trace()) << "onMessageBegin: " << type << " " << size << " " << uncompressed_size << " "
1024 << isCompressed;
1025}
1026
1027void
1033
1034void
1036{
1037 auto const s = m->list_size();
1038
1039 if (s == 0)
1040 {
1042 return;
1043 }
1044
1045 if (s > 100)
1047
1049 jtMANIFEST, "RcvManifests", [this, that = shared_from_this(), m]() { overlay_.onManifests(m, that); });
1050}
1051
1052void
1054{
1055 if (m->type() == protocol::TMPing::ptPING)
1056 {
1057 // We have received a ping request, reply with a pong
1059 m->set_type(protocol::TMPing::ptPONG);
1060 send(std::make_shared<Message>(*m, protocol::mtPING));
1061 return;
1062 }
1063
1064 if (m->type() == protocol::TMPing::ptPONG && m->has_seq())
1065 {
1066 // Only reset the ping sequence if we actually received a
1067 // PONG with the correct cookie. That way, any peers which
1068 // respond with incorrect cookies will eventually time out.
1069 if (m->seq() == lastPingSeq_)
1070 {
1072
1073 // Update latency estimate
1074 auto const rtt = std::chrono::round<std::chrono::milliseconds>(clock_type::now() - lastPingTime_);
1075
1077
1078 if (latency_)
1079 latency_ = (*latency_ * 7 + rtt) / 8;
1080 else
1081 latency_ = rtt;
1082 }
1083
1084 return;
1085 }
1086}
1087
1088void
1090{
1091 // VFALCO NOTE I think we should drop the peer immediately
1092 if (!cluster())
1093 {
1094 fee_.update(Resource::feeUselessData, "unknown cluster");
1095 return;
1096 }
1097
1098 for (int i = 0; i < m->clusternodes().size(); ++i)
1099 {
1100 protocol::TMClusterNode const& node = m->clusternodes(i);
1101
1103 if (node.has_nodename())
1104 name = node.nodename();
1105
1106 auto const publicKey = parseBase58<PublicKey>(TokenType::NodePublic, node.publickey());
1107
1108 // NIKB NOTE We should drop the peer immediately if
1109 // they send us a public key we can't parse
1110 if (publicKey)
1111 {
1112 auto const reportTime = NetClock::time_point{NetClock::duration{node.reporttime()}};
1113
1114 app_.cluster().update(*publicKey, name, node.nodeload(), reportTime);
1115 }
1116 }
1117
1118 int loadSources = m->loadsources().size();
1119 if (loadSources != 0)
1120 {
1121 Resource::Gossip gossip;
1122 gossip.items.reserve(loadSources);
1123 for (int i = 0; i < m->loadsources().size(); ++i)
1124 {
1125 protocol::TMLoadSource const& node = m->loadsources(i);
1127 item.address = beast::IP::Endpoint::from_string(node.name());
1128 item.balance = node.cost();
1129 if (item.address != beast::IP::Endpoint())
1130 gossip.items.push_back(item);
1131 }
1133 }
1134
1135 // Calculate the cluster fee:
1136 auto const thresh = app_.timeKeeper().now() - 90s;
1137 std::uint32_t clusterFee = 0;
1138
1140 fees.reserve(app_.cluster().size());
1141
1142 app_.cluster().for_each([&fees, thresh](ClusterNode const& status) {
1143 if (status.getReportTime() >= thresh)
1144 fees.push_back(status.getLoadFee());
1145 });
1146
1147 if (!fees.empty())
1148 {
1149 auto const index = fees.size() / 2;
1150 std::nth_element(fees.begin(), fees.begin() + index, fees.end());
1151 clusterFee = fees[index];
1152 }
1153
1154 app_.getFeeTrack().setClusterFee(clusterFee);
1155}
1156
1157void
1159{
1160 // Don't allow endpoints from peers that are not known tracking or are
1161 // not using a version of the message that we support:
1162 if (tracking_.load() != Tracking::converged || m->version() != 2)
1163 return;
1164
1165 // The number is arbitrary and doesn't have any real significance or
1166 // implication for the protocol.
1167 if (m->endpoints_v2().size() >= 1024)
1168 {
1169 fee_.update(Resource::feeUselessData, "endpoints too large");
1170 return;
1171 }
1172
1174 endpoints.reserve(m->endpoints_v2().size());
1175
1176 auto malformed = 0;
1177 for (auto const& tm : m->endpoints_v2())
1178 {
1179 auto result = beast::IP::Endpoint::from_string_checked(tm.endpoint());
1180
1181 if (!result)
1182 {
1183 JLOG(p_journal_.error()) << "failed to parse incoming endpoint: {" << tm.endpoint() << "}";
1184 malformed++;
1185 continue;
1186 }
1187
1188 // If hops == 0, this Endpoint describes the peer we are connected
1189 // to -- in that case, we take the remote address seen on the
1190 // socket and store that in the IP::Endpoint. If this is the first
1191 // time, then we'll verify that their listener can receive incoming
1192 // by performing a connectivity test. if hops > 0, then we just
1193 // take the address/port we were given
1194 if (tm.hops() == 0)
1195 result = remote_address_.at_port(result->port());
1196
1197 endpoints.emplace_back(*result, tm.hops());
1198 }
1199
1200 // Charge the peer for each malformed endpoint. As there still may be
1201 // multiple valid endpoints we don't return early.
1202 if (malformed > 0)
1203 {
1204 fee_.update(Resource::feeInvalidData * malformed, std::to_string(malformed) + " malformed endpoints");
1205 }
1206
1207 if (!endpoints.empty())
1208 overlay_.peerFinder().on_endpoints(slot_, endpoints);
1209}
1210
1211void
1216
1217void
1219{
1220 XRPL_ASSERT(eraseTxQueue != batch, ("xrpl::PeerImp::handleTransaction : valid inputs"));
1222 return;
1223
1225 {
1226 // If we've never been in synch, there's nothing we can do
1227 // with a transaction
1228 JLOG(p_journal_.debug()) << "Ignoring incoming transaction: Need network ledger";
1229 return;
1230 }
1231
1232 SerialIter sit(makeSlice(m->rawtransaction()));
1233
1234 try
1235 {
1236 auto stx = std::make_shared<STTx const>(sit);
1237 uint256 txID = stx->getTransactionID();
1238
1239 // Charge strongly for attempting to relay a txn with tfInnerBatchTxn
1240 // LCOV_EXCL_START
1241 /*
1242 There is no need to check whether the featureBatch amendment is
1243 enabled.
1244
1245 * If the `tfInnerBatchTxn` flag is set, and the amendment is
1246 enabled, then it's an invalid transaction because inner batch
1247 transactions should not be relayed.
1248 * If the `tfInnerBatchTxn` flag is set, and the amendment is *not*
1249 enabled, then the transaction is malformed because it's using an
1250 "unknown" flag. There's no need to waste the resources to send it
1251 to the transaction engine.
1252
1253 We don't normally check transaction validity at this level, but
1254 since we _need_ to check it when the amendment is enabled, we may as
1255 well drop it if the flag is set regardless.
1256 */
1257 if (stx->isFlag(tfInnerBatchTxn))
1258 {
1259 JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing "
1260 "tfInnerBatchTxn (handleTransaction).";
1261 fee_.update(Resource::feeModerateBurdenPeer, "inner batch txn");
1262 return;
1263 }
1264 // LCOV_EXCL_STOP
1265
1266 HashRouterFlags flags;
1267 constexpr std::chrono::seconds tx_interval = 10s;
1268
1269 if (!app_.getHashRouter().shouldProcess(txID, id_, flags, tx_interval))
1270 {
1271 // we have seen this transaction recently
1272 if (any(flags & HashRouterFlags::BAD))
1273 {
1274 fee_.update(Resource::feeUselessData, "known bad");
1275 JLOG(p_journal_.debug()) << "Ignoring known bad tx " << txID;
1276 }
1277
1278 // Erase only if the server has seen this tx. If the server has not
1279 // seen this tx then the tx could not has been queued for this peer.
1280 else if (eraseTxQueue && txReduceRelayEnabled())
1281 removeTxQueue(txID);
1282
1284
1285 return;
1286 }
1287
1288 JLOG(p_journal_.debug()) << "Got tx " << txID;
1289
1290 bool checkSignature = true;
1291 if (cluster())
1292 {
1293 if (!m->has_deferred() || !m->deferred())
1294 {
1295 // Skip local checks if a server we trust
1296 // put the transaction in its open ledger
1297 flags |= HashRouterFlags::TRUSTED;
1298 }
1299
1300 // for non-validator nodes only -- localPublicKey is set for
1301 // validators only
1303 {
1304 // For now, be paranoid and have each validator
1305 // check each transaction, regardless of source
1306 checkSignature = false;
1307 }
1308 }
1309
1311 {
1312 JLOG(p_journal_.trace()) << "No new transactions until synchronized";
1313 }
1315 {
1317 JLOG(p_journal_.info()) << "Transaction queue is full";
1318 }
1319 else
1320 {
1323 "RcvCheckTx",
1324 [weak = std::weak_ptr<PeerImp>(shared_from_this()), flags, checkSignature, batch, stx]() {
1325 if (auto peer = weak.lock())
1326 peer->checkTransaction(flags, checkSignature, stx, batch);
1327 });
1328 }
1329 }
1330 catch (std::exception const& ex)
1331 {
1332 JLOG(p_journal_.warn()) << "Transaction invalid: " << strHex(m->rawtransaction())
1333 << ". Exception: " << ex.what();
1334 }
1335}
1336
1337void
1339{
1340 auto badData = [&](std::string const& msg) {
1341 fee_.update(Resource::feeInvalidData, "get_ledger " + msg);
1342 JLOG(p_journal_.warn()) << "TMGetLedger: " << msg;
1343 };
1344 auto const itype{m->itype()};
1345
1346 // Verify ledger info type
1347 if (itype < protocol::liBASE || itype > protocol::liTS_CANDIDATE)
1348 return badData("Invalid ledger info type");
1349
1350 auto const ltype = [&m]() -> std::optional<::protocol::TMLedgerType> {
1351 if (m->has_ltype())
1352 return m->ltype();
1353 return std::nullopt;
1354 }();
1355
1356 if (itype == protocol::liTS_CANDIDATE)
1357 {
1358 if (!m->has_ledgerhash())
1359 return badData("Invalid TX candidate set, missing TX set hash");
1360 }
1361 else if (!m->has_ledgerhash() && !m->has_ledgerseq() && !(ltype && *ltype == protocol::ltCLOSED))
1362 {
1363 return badData("Invalid request");
1364 }
1365
1366 // Verify ledger type
1367 if (ltype && (*ltype < protocol::ltACCEPTED || *ltype > protocol::ltCLOSED))
1368 return badData("Invalid ledger type");
1369
1370 // Verify ledger hash
1371 if (m->has_ledgerhash() && !stringIsUint256Sized(m->ledgerhash()))
1372 return badData("Invalid ledger hash");
1373
1374 // Verify ledger sequence
1375 if (m->has_ledgerseq())
1376 {
1377 auto const ledgerSeq{m->ledgerseq()};
1378
1379 // Check if within a reasonable range
1380 using namespace std::chrono_literals;
1382 ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1383 {
1384 return badData("Invalid ledger sequence " + std::to_string(ledgerSeq));
1385 }
1386 }
1387
1388 // Verify ledger node IDs
1389 if (itype != protocol::liBASE)
1390 {
1391 if (m->nodeids_size() <= 0)
1392 return badData("Invalid ledger node IDs");
1393
1394 for (auto const& nodeId : m->nodeids())
1395 {
1397 return badData("Invalid SHAMap node ID");
1398 }
1399 }
1400
1401 // Verify query type
1402 if (m->has_querytype() && m->querytype() != protocol::qtINDIRECT)
1403 return badData("Invalid query type");
1404
1405 // Verify query depth
1406 if (m->has_querydepth())
1407 {
1408 if (m->querydepth() > Tuning::maxQueryDepth || itype == protocol::liBASE)
1409 {
1410 return badData("Invalid query depth");
1411 }
1412 }
1413
1414 // Queue a job to process the request
1416 app_.getJobQueue().addJob(jtLEDGER_REQ, "RcvGetLedger", [weak, m]() {
1417 if (auto peer = weak.lock())
1418 peer->processLedgerRequest(m);
1419 });
1420}
1421
1422void
1424{
1425 JLOG(p_journal_.trace()) << "onMessage, TMProofPathRequest";
1427 {
1428 fee_.update(Resource::feeMalformedRequest, "proof_path_request disabled");
1429 return;
1430 }
1431
1432 fee_.update(Resource::feeModerateBurdenPeer, "received a proof path request");
1434 app_.getJobQueue().addJob(jtREPLAY_REQ, "RcvProofPReq", [weak, m]() {
1435 if (auto peer = weak.lock())
1436 {
1437 auto reply = peer->ledgerReplayMsgHandler_.processProofPathRequest(m);
1438 if (reply.has_error())
1439 {
1440 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1441 peer->charge(Resource::feeMalformedRequest, "proof_path_request");
1442 else
1443 peer->charge(Resource::feeRequestNoReply, "proof_path_request");
1444 }
1445 else
1446 {
1447 peer->send(std::make_shared<Message>(reply, protocol::mtPROOF_PATH_RESPONSE));
1448 }
1449 }
1450 });
1451}
1452
1453void
1455{
1456 if (!ledgerReplayEnabled_)
1457 {
1458 fee_.update(Resource::feeMalformedRequest, "proof_path_response disabled");
1459 return;
1460 }
1461
1462 if (!ledgerReplayMsgHandler_.processProofPathResponse(m))
1463 {
1464 fee_.update(Resource::feeInvalidData, "proof_path_response");
1465 }
1466}
1467
1468void
1470{
1471 JLOG(p_journal_.trace()) << "onMessage, TMReplayDeltaRequest";
1472 if (!ledgerReplayEnabled_)
1473 {
1474 fee_.update(Resource::feeMalformedRequest, "replay_delta_request disabled");
1475 return;
1476 }
1477
1478 fee_.fee = Resource::feeModerateBurdenPeer;
1479 std::weak_ptr<PeerImp> weak = shared_from_this();
1480 app_.getJobQueue().addJob(jtREPLAY_REQ, "RcvReplDReq", [weak, m]() {
1481 if (auto peer = weak.lock())
1482 {
1483 auto reply = peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m);
1484 if (reply.has_error())
1485 {
1486 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1487 peer->charge(Resource::feeMalformedRequest, "replay_delta_request");
1488 else
1489 peer->charge(Resource::feeRequestNoReply, "replay_delta_request");
1490 }
1491 else
1492 {
1493 peer->send(std::make_shared<Message>(reply, protocol::mtREPLAY_DELTA_RESPONSE));
1494 }
1495 }
1496 });
1497}
1498
1499void
1501{
1502 if (!ledgerReplayEnabled_)
1503 {
1504 fee_.update(Resource::feeMalformedRequest, "replay_delta_response disabled");
1505 return;
1506 }
1507
1508 if (!ledgerReplayMsgHandler_.processReplayDeltaResponse(m))
1509 {
1510 fee_.update(Resource::feeInvalidData, "replay_delta_response");
1511 }
1512}
1513
1514void
1516{
1517 auto badData = [&](std::string const& msg) {
1518 fee_.update(Resource::feeInvalidData, msg);
1519 JLOG(p_journal_.warn()) << "TMLedgerData: " << msg;
1520 };
1521
1522 // Verify ledger hash
1523 if (!stringIsUint256Sized(m->ledgerhash()))
1524 return badData("Invalid ledger hash");
1525
1526 // Verify ledger sequence
1527 {
1528 auto const ledgerSeq{m->ledgerseq()};
1529 if (m->type() == protocol::liTS_CANDIDATE)
1530 {
1531 if (ledgerSeq != 0)
1532 {
1533 return badData("Invalid ledger sequence " + std::to_string(ledgerSeq));
1534 }
1535 }
1536 else
1537 {
1538 // Check if within a reasonable range
1539 using namespace std::chrono_literals;
1540 if (app_.getLedgerMaster().getValidatedLedgerAge() <= 10s &&
1541 ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1542 {
1543 return badData("Invalid ledger sequence " + std::to_string(ledgerSeq));
1544 }
1545 }
1546 }
1547
1548 // Verify ledger info type
1549 if (m->type() < protocol::liBASE || m->type() > protocol::liTS_CANDIDATE)
1550 return badData("Invalid ledger info type");
1551
1552 // Verify reply error
1553 if (m->has_error() && (m->error() < protocol::reNO_LEDGER || m->error() > protocol::reBAD_REQUEST))
1554 {
1555 return badData("Invalid reply error");
1556 }
1557
1558 // Verify ledger nodes.
1559 if (m->nodes_size() <= 0 || m->nodes_size() > Tuning::hardMaxReplyNodes)
1560 {
1561 return badData("Invalid Ledger/TXset nodes " + std::to_string(m->nodes_size()));
1562 }
1563
1564 // If there is a request cookie, attempt to relay the message
1565 if (m->has_requestcookie())
1566 {
1567 if (auto peer = overlay_.findPeerByShortID(m->requestcookie()))
1568 {
1569 m->clear_requestcookie();
1570 peer->send(std::make_shared<Message>(*m, protocol::mtLEDGER_DATA));
1571 }
1572 else
1573 {
1574 JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply";
1575 }
1576 return;
1577 }
1578
1579 uint256 const ledgerHash{m->ledgerhash()};
1580
1581 // Otherwise check if received data for a candidate transaction set
1582 if (m->type() == protocol::liTS_CANDIDATE)
1583 {
1584 std::weak_ptr<PeerImp> weak{shared_from_this()};
1585 app_.getJobQueue().addJob(jtTXN_DATA, "RcvPeerData", [weak, ledgerHash, m]() {
1586 if (auto peer = weak.lock())
1587 {
1588 peer->app_.getInboundTransactions().gotData(ledgerHash, peer, m);
1589 }
1590 });
1591 return;
1592 }
1593
1594 // Consume the message
1595 app_.getInboundLedgers().gotLedgerData(ledgerHash, shared_from_this(), m);
1596}
1597
1598void
1600{
1601 protocol::TMProposeSet& set = *m;
1602
1603 auto const sig = makeSlice(set.signature());
1604
1605 // Preliminary check for the validity of the signature: A DER encoded
1606 // signature can't be longer than 72 bytes.
1607 if ((std::clamp<std::size_t>(sig.size(), 64, 72) != sig.size()) ||
1608 (publicKeyType(makeSlice(set.nodepubkey())) != KeyType::secp256k1))
1609 {
1610 JLOG(p_journal_.warn()) << "Proposal: malformed";
1611 fee_.update(Resource::feeInvalidSignature, " signature can't be longer than 72 bytes");
1612 return;
1613 }
1614
1615 if (!stringIsUint256Sized(set.currenttxhash()) || !stringIsUint256Sized(set.previousledger()))
1616 {
1617 JLOG(p_journal_.warn()) << "Proposal: malformed";
1618 fee_.update(Resource::feeMalformedRequest, "bad hashes");
1619 return;
1620 }
1621
1622 // RH TODO: when isTrusted = false we should probably also cache a key
1623 // suppression for 30 seconds to avoid doing a relatively expensive lookup
1624 // every time a spam packet is received
1625 PublicKey const publicKey{makeSlice(set.nodepubkey())};
1626 auto const isTrusted = app_.validators().trusted(publicKey);
1627
1628 // If the operator has specified that untrusted proposals be dropped then
1629 // this happens here I.e. before further wasting CPU verifying the signature
1630 // of an untrusted key
1631 if (!isTrusted)
1632 {
1633 // report untrusted proposal messages
1634 overlay_.reportInboundTraffic(TrafficCount::category::proposal_untrusted, Message::messageSize(*m));
1635
1636 if (app_.config().RELAY_UNTRUSTED_PROPOSALS == -1)
1637 return;
1638 }
1639
1640 uint256 const proposeHash{set.currenttxhash()};
1641 uint256 const prevLedger{set.previousledger()};
1642
1643 NetClock::time_point const closeTime{NetClock::duration{set.closetime()}};
1644
1645 uint256 const suppression =
1646 proposalUniqueId(proposeHash, prevLedger, set.proposeseq(), closeTime, publicKey.slice(), sig);
1647
1648 if (auto [added, relayed] = app_.getHashRouter().addSuppressionPeerWithStatus(suppression, id_); !added)
1649 {
1650 // Count unique messages (Slots has it's own 'HashRouter'), which a peer
1651 // receives within IDLED seconds since the message has been relayed.
1652 if (relayed && (stopwatch().now() - *relayed) < reduce_relay::IDLED)
1653 overlay_.updateSlotAndSquelch(suppression, publicKey, id_, protocol::mtPROPOSE_LEDGER);
1654
1655 // report duplicate proposal messages
1656 overlay_.reportInboundTraffic(TrafficCount::category::proposal_duplicate, Message::messageSize(*m));
1657
1658 JLOG(p_journal_.trace()) << "Proposal: duplicate";
1659
1660 return;
1661 }
1662
1663 if (!isTrusted)
1664 {
1665 if (tracking_.load() == Tracking::diverged)
1666 {
1667 JLOG(p_journal_.debug()) << "Proposal: Dropping untrusted (peer divergence)";
1668 return;
1669 }
1670
1671 if (!cluster() && app_.getFeeTrack().isLoadedLocal())
1672 {
1673 JLOG(p_journal_.debug()) << "Proposal: Dropping untrusted (load)";
1674 return;
1675 }
1676 }
1677
1678 JLOG(p_journal_.trace()) << "Proposal: " << (isTrusted ? "trusted" : "untrusted");
1679
1680 auto proposal = RCLCxPeerPos(
1681 publicKey,
1682 sig,
1683 suppression,
1685 prevLedger,
1686 set.proposeseq(),
1687 proposeHash,
1688 closeTime,
1689 app_.timeKeeper().closeTime(),
1690 calcNodeID(app_.validatorManifests().getMasterKey(publicKey))});
1691
1692 std::weak_ptr<PeerImp> weak = shared_from_this();
1693 app_.getJobQueue().addJob(
1694 isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut, "checkPropose", [weak, isTrusted, m, proposal]() {
1695 if (auto peer = weak.lock())
1696 peer->checkPropose(isTrusted, m, proposal);
1697 });
1698}
1699
1700void
1702{
1703 JLOG(p_journal_.trace()) << "Status: Change";
1704
1705 if (!m->has_networktime())
1706 m->set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1707
1708 {
1709 std::lock_guard sl(recentLock_);
1710 if (!last_status_.has_newstatus() || m->has_newstatus())
1711 last_status_ = *m;
1712 else
1713 {
1714 // preserve old status
1715 protocol::NodeStatus status = last_status_.newstatus();
1716 last_status_ = *m;
1717 m->set_newstatus(status);
1718 }
1719 }
1720
1721 if (m->newevent() == protocol::neLOST_SYNC)
1722 {
1723 bool outOfSync{false};
1724 {
1725 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1726 // guarded by recentLock_.
1727 std::lock_guard sl(recentLock_);
1728 if (!closedLedgerHash_.isZero())
1729 {
1730 outOfSync = true;
1731 closedLedgerHash_.zero();
1732 }
1733 previousLedgerHash_.zero();
1734 }
1735 if (outOfSync)
1736 {
1737 JLOG(p_journal_.debug()) << "Status: Out of sync";
1738 }
1739 return;
1740 }
1741
1742 {
1743 uint256 closedLedgerHash{};
1744 bool const peerChangedLedgers{m->has_ledgerhash() && stringIsUint256Sized(m->ledgerhash())};
1745
1746 {
1747 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1748 // guarded by recentLock_.
1749 std::lock_guard sl(recentLock_);
1750 if (peerChangedLedgers)
1751 {
1752 closedLedgerHash_ = m->ledgerhash();
1753 closedLedgerHash = closedLedgerHash_;
1754 addLedger(closedLedgerHash, sl);
1755 }
1756 else
1757 {
1758 closedLedgerHash_.zero();
1759 }
1760
1761 if (m->has_ledgerhashprevious() && stringIsUint256Sized(m->ledgerhashprevious()))
1762 {
1763 previousLedgerHash_ = m->ledgerhashprevious();
1764 addLedger(previousLedgerHash_, sl);
1765 }
1766 else
1767 {
1768 previousLedgerHash_.zero();
1769 }
1770 }
1771 if (peerChangedLedgers)
1772 {
1773 JLOG(p_journal_.debug()) << "LCL is " << closedLedgerHash;
1774 }
1775 else
1776 {
1777 JLOG(p_journal_.debug()) << "Status: No ledger";
1778 }
1779 }
1780
1781 if (m->has_firstseq() && m->has_lastseq())
1782 {
1783 std::lock_guard sl(recentLock_);
1784
1785 minLedger_ = m->firstseq();
1786 maxLedger_ = m->lastseq();
1787
1788 if ((maxLedger_ < minLedger_) || (minLedger_ == 0) || (maxLedger_ == 0))
1789 minLedger_ = maxLedger_ = 0;
1790 }
1791
1792 if (m->has_ledgerseq() && app_.getLedgerMaster().getValidatedLedgerAge() < 2min)
1793 {
1794 checkTracking(m->ledgerseq(), app_.getLedgerMaster().getValidLedgerIndex());
1795 }
1796
1797 app_.getOPs().pubPeerStatus([=, this]() -> Json::Value {
1799
1800 if (m->has_newstatus())
1801 {
1802 switch (m->newstatus())
1803 {
1804 case protocol::nsCONNECTING:
1805 j[jss::status] = "CONNECTING";
1806 break;
1807 case protocol::nsCONNECTED:
1808 j[jss::status] = "CONNECTED";
1809 break;
1810 case protocol::nsMONITORING:
1811 j[jss::status] = "MONITORING";
1812 break;
1813 case protocol::nsVALIDATING:
1814 j[jss::status] = "VALIDATING";
1815 break;
1816 case protocol::nsSHUTTING:
1817 j[jss::status] = "SHUTTING";
1818 break;
1819 }
1820 }
1821
1822 if (m->has_newevent())
1823 {
1824 switch (m->newevent())
1825 {
1826 case protocol::neCLOSING_LEDGER:
1827 j[jss::action] = "CLOSING_LEDGER";
1828 break;
1829 case protocol::neACCEPTED_LEDGER:
1830 j[jss::action] = "ACCEPTED_LEDGER";
1831 break;
1832 case protocol::neSWITCHED_LEDGER:
1833 j[jss::action] = "SWITCHED_LEDGER";
1834 break;
1835 case protocol::neLOST_SYNC:
1836 j[jss::action] = "LOST_SYNC";
1837 break;
1838 }
1839 }
1840
1841 if (m->has_ledgerseq())
1842 {
1843 j[jss::ledger_index] = m->ledgerseq();
1844 }
1845
1846 if (m->has_ledgerhash())
1847 {
1848 uint256 closedLedgerHash{};
1849 {
1850 std::lock_guard sl(recentLock_);
1851 closedLedgerHash = closedLedgerHash_;
1852 }
1853 j[jss::ledger_hash] = to_string(closedLedgerHash);
1854 }
1855
1856 if (m->has_networktime())
1857 {
1858 j[jss::date] = Json::UInt(m->networktime());
1859 }
1860
1861 if (m->has_firstseq() && m->has_lastseq())
1862 {
1863 j[jss::ledger_index_min] = Json::UInt(m->firstseq());
1864 j[jss::ledger_index_max] = Json::UInt(m->lastseq());
1865 }
1866
1867 return j;
1868 });
1869}
1870
1871void
1872PeerImp::checkTracking(std::uint32_t validationSeq)
1873{
1874 std::uint32_t serverSeq;
1875 {
1876 // Extract the sequence number of the highest
1877 // ledger this peer has
1878 std::lock_guard sl(recentLock_);
1879
1880 serverSeq = maxLedger_;
1881 }
1882 if (serverSeq != 0)
1883 {
1884 // Compare the peer's ledger sequence to the
1885 // sequence of a recently-validated ledger
1886 checkTracking(serverSeq, validationSeq);
1887 }
1888}
1889
1890void
1891PeerImp::checkTracking(std::uint32_t seq1, std::uint32_t seq2)
1892{
1893 int diff = std::max(seq1, seq2) - std::min(seq1, seq2);
1894
1895 if (diff < Tuning::convergedLedgerLimit)
1896 {
1897 // The peer's ledger sequence is close to the validation's
1898 tracking_ = Tracking::converged;
1899 }
1900
1901 if ((diff > Tuning::divergedLedgerLimit) && (tracking_.load() != Tracking::diverged))
1902 {
1903 // The peer's ledger sequence is way off the validation's
1904 std::lock_guard sl(recentLock_);
1905
1906 tracking_ = Tracking::diverged;
1907 trackingTime_ = clock_type::now();
1908 }
1909}
1910
1911void
1913{
1914 if (!stringIsUint256Sized(m->hash()))
1915 {
1916 fee_.update(Resource::feeMalformedRequest, "bad hash");
1917 return;
1918 }
1919
1920 uint256 const hash{m->hash()};
1921
1922 if (m->status() == protocol::tsHAVE)
1923 {
1924 std::lock_guard sl(recentLock_);
1925
1926 if (std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) != recentTxSets_.end())
1927 {
1928 fee_.update(Resource::feeUselessData, "duplicate (tsHAVE)");
1929 return;
1930 }
1931
1932 recentTxSets_.push_back(hash);
1933 }
1934}
1935
1936void
1937PeerImp::onValidatorListMessage(
1938 std::string const& messageType,
1939 std::string const& manifest,
1940 std::uint32_t version,
1941 std::vector<ValidatorBlobInfo> const& blobs)
1942{
1943 // If there are no blobs, the message is malformed (possibly because of
1944 // ValidatorList class rules), so charge accordingly and skip processing.
1945 if (blobs.empty())
1946 {
1947 JLOG(p_journal_.warn()) << "Ignored malformed " << messageType;
1948 // This shouldn't ever happen with a well-behaved peer
1949 fee_.update(Resource::feeHeavyBurdenPeer, "no blobs");
1950 return;
1951 }
1952
1953 auto const hash = sha512Half(manifest, blobs, version);
1954
1955 JLOG(p_journal_.debug()) << "Received " << messageType;
1956
1957 if (!app_.getHashRouter().addSuppressionPeer(hash, id_))
1958 {
1959 JLOG(p_journal_.debug()) << messageType << ": received duplicate " << messageType;
1960 // Charging this fee here won't hurt the peer in the normal
1961 // course of operation (ie. refresh every 5 minutes), but
1962 // will add up if the peer is misbehaving.
1963 fee_.update(Resource::feeUselessData, "duplicate");
1964 return;
1965 }
1966
1967 auto const applyResult = app_.validators().applyListsAndBroadcast(
1968 manifest,
1969 version,
1970 blobs,
1971 remote_address_.to_string(),
1972 hash,
1973 app_.overlay(),
1974 app_.getHashRouter(),
1975 app_.getOPs());
1976
1977 JLOG(p_journal_.debug()) << "Processed " << messageType << " version " << version << " from "
1978 << (applyResult.publisherKey ? strHex(*applyResult.publisherKey)
1979 : "unknown or invalid publisher")
1980 << " with best result " << to_string(applyResult.bestDisposition());
1981
1982 // Act based on the best result
1983 switch (applyResult.bestDisposition())
1984 {
1985 // New list
1986 case ListDisposition::accepted:
1987 // Newest list is expired, and that needs to be broadcast, too
1988 case ListDisposition::expired:
1989 // Future list
1990 case ListDisposition::pending: {
1991 std::lock_guard<std::mutex> sl(recentLock_);
1992
1993 XRPL_ASSERT(
1994 applyResult.publisherKey,
1995 "xrpl::PeerImp::onValidatorListMessage : publisher key is "
1996 "set");
1997 auto const& pubKey = *applyResult.publisherKey;
1998#ifndef NDEBUG
1999 if (auto const iter = publisherListSequences_.find(pubKey); iter != publisherListSequences_.end())
2000 {
2001 XRPL_ASSERT(
2002 iter->second < applyResult.sequence, "xrpl::PeerImp::onValidatorListMessage : lower sequence");
2003 }
2004#endif
2005 publisherListSequences_[pubKey] = applyResult.sequence;
2006 }
2007 break;
2008 case ListDisposition::same_sequence:
2009 case ListDisposition::known_sequence:
2010#ifndef NDEBUG
2011 {
2012 std::lock_guard<std::mutex> sl(recentLock_);
2013 XRPL_ASSERT(
2014 applyResult.sequence && applyResult.publisherKey,
2015 "xrpl::PeerImp::onValidatorListMessage : nonzero sequence "
2016 "and set publisher key");
2017 XRPL_ASSERT(
2018 publisherListSequences_[*applyResult.publisherKey] <= applyResult.sequence,
2019 "xrpl::PeerImp::onValidatorListMessage : maximum sequence");
2020 }
2021#endif // !NDEBUG
2022
2023 break;
2024 case ListDisposition::stale:
2025 case ListDisposition::untrusted:
2026 case ListDisposition::invalid:
2027 case ListDisposition::unsupported_version:
2028 break;
2029 // LCOV_EXCL_START
2030 default:
2031 UNREACHABLE(
2032 "xrpl::PeerImp::onValidatorListMessage : invalid best list "
2033 "disposition");
2034 // LCOV_EXCL_STOP
2035 }
2036
2037 // Charge based on the worst result
2038 switch (applyResult.worstDisposition())
2039 {
2040 case ListDisposition::accepted:
2041 case ListDisposition::expired:
2042 case ListDisposition::pending:
2043 // No charges for good data
2044 break;
2045 case ListDisposition::same_sequence:
2046 case ListDisposition::known_sequence:
2047 // Charging this fee here won't hurt the peer in the normal
2048 // course of operation (ie. refresh every 5 minutes), but
2049 // will add up if the peer is misbehaving.
2050 fee_.update(Resource::feeUselessData, " duplicate (same_sequence or known_sequence)");
2051 break;
2052 case ListDisposition::stale:
2053 // There are very few good reasons for a peer to send an
2054 // old list, particularly more than once.
2055 fee_.update(Resource::feeInvalidData, "expired");
2056 break;
2057 case ListDisposition::untrusted:
2058 // Charging this fee here won't hurt the peer in the normal
2059 // course of operation (ie. refresh every 5 minutes), but
2060 // will add up if the peer is misbehaving.
2061 fee_.update(Resource::feeUselessData, "untrusted");
2062 break;
2063 case ListDisposition::invalid:
2064 // This shouldn't ever happen with a well-behaved peer
2065 fee_.update(Resource::feeInvalidSignature, "invalid list disposition");
2066 break;
2067 case ListDisposition::unsupported_version:
2068 // During a version transition, this may be legitimate.
2069 // If it happens frequently, that's probably bad.
2070 fee_.update(Resource::feeInvalidData, "version");
2071 break;
2072 // LCOV_EXCL_START
2073 default:
2074 UNREACHABLE(
2075 "xrpl::PeerImp::onValidatorListMessage : invalid worst list "
2076 "disposition");
2077 // LCOV_EXCL_STOP
2078 }
2079
2080 // Log based on all the results.
2081 for (auto const& [disp, count] : applyResult.dispositions)
2082 {
2083 switch (disp)
2084 {
2085 // New list
2086 case ListDisposition::accepted:
2087 JLOG(p_journal_.debug()) << "Applied " << count << " new " << messageType;
2088 break;
2089 // Newest list is expired, and that needs to be broadcast, too
2090 case ListDisposition::expired:
2091 JLOG(p_journal_.debug()) << "Applied " << count << " expired " << messageType;
2092 break;
2093 // Future list
2094 case ListDisposition::pending:
2095 JLOG(p_journal_.debug()) << "Processed " << count << " future " << messageType;
2096 break;
2097 case ListDisposition::same_sequence:
2098 JLOG(p_journal_.warn()) << "Ignored " << count << " " << messageType << "(s) with current sequence";
2099 break;
2100 case ListDisposition::known_sequence:
2101 JLOG(p_journal_.warn()) << "Ignored " << count << " " << messageType << "(s) with future sequence";
2102 break;
2103 case ListDisposition::stale:
2104 JLOG(p_journal_.warn()) << "Ignored " << count << "stale " << messageType;
2105 break;
2106 case ListDisposition::untrusted:
2107 JLOG(p_journal_.warn()) << "Ignored " << count << " untrusted " << messageType;
2108 break;
2109 case ListDisposition::unsupported_version:
2110 JLOG(p_journal_.warn()) << "Ignored " << count << "unsupported version " << messageType;
2111 break;
2112 case ListDisposition::invalid:
2113 JLOG(p_journal_.warn()) << "Ignored " << count << "invalid " << messageType;
2114 break;
2115 // LCOV_EXCL_START
2116 default:
2117 UNREACHABLE(
2118 "xrpl::PeerImp::onValidatorListMessage : invalid list "
2119 "disposition");
2120 // LCOV_EXCL_STOP
2121 }
2122 }
2123}
2124
2125void
2127{
2128 try
2129 {
2130 if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
2131 {
2132 JLOG(p_journal_.debug()) << "ValidatorList: received validator list from peer using "
2133 << "protocol version " << to_string(protocol_)
2134 << " which shouldn't support this feature.";
2135 fee_.update(Resource::feeUselessData, "unsupported peer");
2136 return;
2137 }
2138 onValidatorListMessage("ValidatorList", m->manifest(), m->version(), ValidatorList::parseBlobs(*m));
2139 }
2140 catch (std::exception const& e)
2141 {
2142 JLOG(p_journal_.warn()) << "ValidatorList: Exception, " << e.what();
2143 using namespace std::string_literals;
2144 fee_.update(Resource::feeInvalidData, e.what());
2145 }
2146}
2147
2148void
2150{
2151 try
2152 {
2153 if (!supportsFeature(ProtocolFeature::ValidatorList2Propagation))
2154 {
2155 JLOG(p_journal_.debug()) << "ValidatorListCollection: received validator list from peer "
2156 << "using protocol version " << to_string(protocol_)
2157 << " which shouldn't support this feature.";
2158 fee_.update(Resource::feeUselessData, "unsupported peer");
2159 return;
2160 }
2161 else if (m->version() < 2)
2162 {
2163 JLOG(p_journal_.debug()) << "ValidatorListCollection: received invalid validator list "
2164 "version "
2165 << m->version() << " from peer using protocol version " << to_string(protocol_);
2166 fee_.update(Resource::feeInvalidData, "wrong version");
2167 return;
2168 }
2169 onValidatorListMessage("ValidatorListCollection", m->manifest(), m->version(), ValidatorList::parseBlobs(*m));
2170 }
2171 catch (std::exception const& e)
2172 {
2173 JLOG(p_journal_.warn()) << "ValidatorListCollection: Exception, " << e.what();
2174 using namespace std::string_literals;
2175 fee_.update(Resource::feeInvalidData, e.what());
2176 }
2177}
2178
2179void
2181{
2182 if (m->validation().size() < 50)
2183 {
2184 JLOG(p_journal_.warn()) << "Validation: Too small";
2185 fee_.update(Resource::feeMalformedRequest, "too small");
2186 return;
2187 }
2188
2189 try
2190 {
2191 auto const closeTime = app_.timeKeeper().closeTime();
2192
2194 {
2195 SerialIter sit(makeSlice(m->validation()));
2197 std::ref(sit),
2198 [this](PublicKey const& pk) { return calcNodeID(app_.validatorManifests().getMasterKey(pk)); },
2199 false);
2200 val->setSeen(closeTime);
2201 }
2202
2203 if (!isCurrent(
2204 app_.getValidations().parms(), app_.timeKeeper().closeTime(), val->getSignTime(), val->getSeenTime()))
2205 {
2206 JLOG(p_journal_.trace()) << "Validation: Not current";
2207 fee_.update(Resource::feeUselessData, "not current");
2208 return;
2209 }
2210
2211 // RH TODO: when isTrusted = false we should probably also cache a key
2212 // suppression for 30 seconds to avoid doing a relatively expensive
2213 // lookup every time a spam packet is received
2214 auto const isTrusted = app_.validators().trusted(val->getSignerPublic());
2215
2216 // If the operator has specified that untrusted validations be
2217 // dropped then this happens here I.e. before further wasting CPU
2218 // verifying the signature of an untrusted key
2219 if (!isTrusted)
2220 {
2221 // increase untrusted validations received
2222 overlay_.reportInboundTraffic(TrafficCount::category::validation_untrusted, Message::messageSize(*m));
2223
2224 if (app_.config().RELAY_UNTRUSTED_VALIDATIONS == -1)
2225 return;
2226 }
2227
2228 auto key = sha512Half(makeSlice(m->validation()));
2229
2230 auto [added, relayed] = app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
2231
2232 if (!added)
2233 {
2234 // Count unique messages (Slots has it's own 'HashRouter'), which a
2235 // peer receives within IDLED seconds since the message has been
2236 // relayed.
2237 if (relayed && (stopwatch().now() - *relayed) < reduce_relay::IDLED)
2238 overlay_.updateSlotAndSquelch(key, val->getSignerPublic(), id_, protocol::mtVALIDATION);
2239
2240 // increase duplicate validations received
2241 overlay_.reportInboundTraffic(TrafficCount::category::validation_duplicate, Message::messageSize(*m));
2242
2243 JLOG(p_journal_.trace()) << "Validation: duplicate";
2244 return;
2245 }
2246
2247 if (!isTrusted && (tracking_.load() == Tracking::diverged))
2248 {
2249 JLOG(p_journal_.debug()) << "Dropping untrusted validation from diverged peer";
2250 }
2251 else if (isTrusted || !app_.getFeeTrack().isLoadedLocal())
2252 {
2253 std::string const name = isTrusted ? "ChkTrust" : "ChkUntrust";
2254
2255 std::weak_ptr<PeerImp> weak = shared_from_this();
2256 app_.getJobQueue().addJob(isTrusted ? jtVALIDATION_t : jtVALIDATION_ut, name, [weak, val, m, key]() {
2257 if (auto peer = weak.lock())
2258 peer->checkValidation(val, key, m);
2259 });
2260 }
2261 else
2262 {
2263 JLOG(p_journal_.debug()) << "Dropping untrusted validation for load";
2264 }
2265 }
2266 catch (std::exception const& e)
2267 {
2268 JLOG(p_journal_.warn()) << "Exception processing validation: " << e.what();
2269 using namespace std::string_literals;
2270 fee_.update(Resource::feeMalformedRequest, e.what());
2271 }
2272}
2273
2274void
2276{
2277 protocol::TMGetObjectByHash& packet = *m;
2278
2279 JLOG(p_journal_.trace()) << "received TMGetObjectByHash " << packet.type() << " " << packet.objects_size();
2280
2281 if (packet.query())
2282 {
2283 // this is a query
2284 if (send_queue_.size() >= Tuning::dropSendQueue)
2285 {
2286 JLOG(p_journal_.debug()) << "GetObject: Large send queue";
2287 return;
2288 }
2289
2290 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2291 {
2292 doFetchPack(m);
2293 return;
2294 }
2295
2296 if (packet.type() == protocol::TMGetObjectByHash::otTRANSACTIONS)
2297 {
2298 if (!txReduceRelayEnabled())
2299 {
2300 JLOG(p_journal_.error()) << "TMGetObjectByHash: tx reduce-relay is disabled";
2301 fee_.update(Resource::feeMalformedRequest, "disabled");
2302 return;
2303 }
2304
2305 std::weak_ptr<PeerImp> weak = shared_from_this();
2306 app_.getJobQueue().addJob(jtREQUESTED_TXN, "DoTxs", [weak, m]() {
2307 if (auto peer = weak.lock())
2308 peer->doTransactions(m);
2309 });
2310 return;
2311 }
2312
2313 protocol::TMGetObjectByHash reply;
2314
2315 reply.set_query(false);
2316
2317 if (packet.has_seq())
2318 reply.set_seq(packet.seq());
2319
2320 reply.set_type(packet.type());
2321
2322 if (packet.has_ledgerhash())
2323 {
2324 if (!stringIsUint256Sized(packet.ledgerhash()))
2325 {
2326 fee_.update(Resource::feeMalformedRequest, "ledger hash");
2327 return;
2328 }
2329
2330 reply.set_ledgerhash(packet.ledgerhash());
2331 }
2332
2333 fee_.update(Resource::feeModerateBurdenPeer, " received a get object by hash request");
2334
2335 // This is a very minimal implementation
2336 for (int i = 0; i < packet.objects_size(); ++i)
2337 {
2338 auto const& obj = packet.objects(i);
2339 if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2340 {
2341 uint256 const hash{obj.hash()};
2342 // VFALCO TODO Move this someplace more sensible so we dont
2343 // need to inject the NodeStore interfaces.
2344 std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2345 auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
2346 if (nodeObject)
2347 {
2348 protocol::TMIndexedObject& newObj = *reply.add_objects();
2349 newObj.set_hash(hash.begin(), hash.size());
2350 newObj.set_data(&nodeObject->getData().front(), nodeObject->getData().size());
2351
2352 if (obj.has_nodeid())
2353 newObj.set_index(obj.nodeid());
2354 if (obj.has_ledgerseq())
2355 newObj.set_ledgerseq(obj.ledgerseq());
2356
2357 // VFALCO NOTE "seq" in the message is obsolete
2358
2359 // Check if by adding this object, reply has reached its
2360 // limit
2361 if (reply.objects_size() >= Tuning::hardMaxReplyNodes)
2362 {
2363 fee_.update(Resource::feeModerateBurdenPeer, " Reply limit reached. Truncating reply.");
2364 break;
2365 }
2366 }
2367 }
2368 }
2369
2370 JLOG(p_journal_.trace()) << "GetObj: " << reply.objects_size() << " of " << packet.objects_size();
2371 send(std::make_shared<Message>(reply, protocol::mtGET_OBJECTS));
2372 }
2373 else
2374 {
2375 // this is a reply
2376 std::uint32_t pLSeq = 0;
2377 bool pLDo = true;
2378 bool progress = false;
2379
2380 for (int i = 0; i < packet.objects_size(); ++i)
2381 {
2382 protocol::TMIndexedObject const& obj = packet.objects(i);
2383
2384 if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2385 {
2386 if (obj.has_ledgerseq())
2387 {
2388 if (obj.ledgerseq() != pLSeq)
2389 {
2390 if (pLDo && (pLSeq != 0))
2391 {
2392 JLOG(p_journal_.debug()) << "GetObj: Full fetch pack for " << pLSeq;
2393 }
2394 pLSeq = obj.ledgerseq();
2395 pLDo = !app_.getLedgerMaster().haveLedger(pLSeq);
2396
2397 if (!pLDo)
2398 {
2399 JLOG(p_journal_.debug()) << "GetObj: Late fetch pack for " << pLSeq;
2400 }
2401 else
2402 progress = true;
2403 }
2404 }
2405
2406 if (pLDo)
2407 {
2408 uint256 const hash{obj.hash()};
2409
2410 app_.getLedgerMaster().addFetchPack(
2411 hash, std::make_shared<Blob>(obj.data().begin(), obj.data().end()));
2412 }
2413 }
2414 }
2415
2416 if (pLDo && (pLSeq != 0))
2417 {
2418 JLOG(p_journal_.debug()) << "GetObj: Partial fetch pack for " << pLSeq;
2419 }
2420 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2421 app_.getLedgerMaster().gotFetchPack(progress, pLSeq);
2422 }
2423}
2424
2425void
2427{
2428 if (!txReduceRelayEnabled())
2429 {
2430 JLOG(p_journal_.error()) << "TMHaveTransactions: tx reduce-relay is disabled";
2431 fee_.update(Resource::feeMalformedRequest, "disabled");
2432 return;
2433 }
2434
2435 std::weak_ptr<PeerImp> weak = shared_from_this();
2436 app_.getJobQueue().addJob(jtMISSING_TXN, "HandleHaveTxs", [weak, m]() {
2437 if (auto peer = weak.lock())
2438 peer->handleHaveTransactions(m);
2439 });
2440}
2441
2442void
2443PeerImp::handleHaveTransactions(std::shared_ptr<protocol::TMHaveTransactions> const& m)
2444{
2445 protocol::TMGetObjectByHash tmBH;
2446 tmBH.set_type(protocol::TMGetObjectByHash_ObjectType_otTRANSACTIONS);
2447 tmBH.set_query(true);
2448
2449 JLOG(p_journal_.trace()) << "received TMHaveTransactions " << m->hashes_size();
2450
2451 for (std::uint32_t i = 0; i < m->hashes_size(); i++)
2452 {
2453 if (!stringIsUint256Sized(m->hashes(i)))
2454 {
2455 JLOG(p_journal_.error()) << "TMHaveTransactions with invalid hash size";
2456 fee_.update(Resource::feeMalformedRequest, "hash size");
2457 return;
2458 }
2459
2460 uint256 hash(m->hashes(i));
2461
2462 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2463
2464 JLOG(p_journal_.trace()) << "checking transaction " << (bool)txn;
2465
2466 if (!txn)
2467 {
2468 JLOG(p_journal_.debug()) << "adding transaction to request";
2469
2470 auto obj = tmBH.add_objects();
2471 obj->set_hash(hash.data(), hash.size());
2472 }
2473 else
2474 {
2475 // Erase only if a peer has seen this tx. If the peer has not
2476 // seen this tx then the tx could not has been queued for this
2477 // peer.
2478 removeTxQueue(hash);
2479 }
2480 }
2481
2482 JLOG(p_journal_.trace()) << "transaction request object is " << tmBH.objects_size();
2483
2484 if (tmBH.objects_size() > 0)
2485 send(std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS));
2486}
2487
2488void
2490{
2491 if (!txReduceRelayEnabled())
2492 {
2493 JLOG(p_journal_.error()) << "TMTransactions: tx reduce-relay is disabled";
2494 fee_.update(Resource::feeMalformedRequest, "disabled");
2495 return;
2496 }
2497
2498 JLOG(p_journal_.trace()) << "received TMTransactions " << m->transactions_size();
2499
2500 overlay_.addTxMetrics(m->transactions_size());
2501
2502 for (std::uint32_t i = 0; i < m->transactions_size(); ++i)
2503 handleTransaction(
2504 std::shared_ptr<protocol::TMTransaction>(m->mutable_transactions(i), [](protocol::TMTransaction*) {}),
2505 false,
2506 true);
2507}
2508
2509void
2510PeerImp::onMessage(std::shared_ptr<protocol::TMSquelch> const& m)
2511{
2512 using on_message_fn = void (PeerImp::*)(std::shared_ptr<protocol::TMSquelch> const&);
2513 if (!strand_.running_in_this_thread())
2514 return post(strand_, std::bind((on_message_fn)&PeerImp::onMessage, shared_from_this(), m));
2515
2516 if (!m->has_validatorpubkey())
2517 {
2518 fee_.update(Resource::feeInvalidData, "squelch no pubkey");
2519 return;
2520 }
2521 auto validator = m->validatorpubkey();
2522 auto const slice{makeSlice(validator)};
2523 if (!publicKeyType(slice))
2524 {
2525 fee_.update(Resource::feeInvalidData, "squelch bad pubkey");
2526 return;
2527 }
2528 PublicKey key(slice);
2529
2530 // Ignore the squelch for validator's own messages.
2531 if (key == app_.getValidationPublicKey())
2532 {
2533 JLOG(p_journal_.debug()) << "onMessage: TMSquelch discarding validator's squelch " << slice;
2534 return;
2535 }
2536
2537 std::uint32_t duration = m->has_squelchduration() ? m->squelchduration() : 0;
2538 if (!m->squelch())
2539 squelch_.removeSquelch(key);
2540 else if (!squelch_.addSquelch(key, std::chrono::seconds{duration}))
2541 fee_.update(Resource::feeInvalidData, "squelch duration");
2542
2543 JLOG(p_journal_.debug()) << "onMessage: TMSquelch " << slice << " " << id() << " " << duration;
2544}
2545
2546//--------------------------------------------------------------------------
2547
2548void
2549PeerImp::addLedger(uint256 const& hash, std::lock_guard<std::mutex> const& lockedRecentLock)
2550{
2551 // lockedRecentLock is passed as a reminder that recentLock_ must be
2552 // locked by the caller.
2553 (void)lockedRecentLock;
2554
2555 if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) != recentLedgers_.end())
2556 return;
2557
2558 recentLedgers_.push_back(hash);
2559}
2560
2561void
2562PeerImp::doFetchPack(std::shared_ptr<protocol::TMGetObjectByHash> const& packet)
2563{
2564 // VFALCO TODO Invert this dependency using an observer and shared state
2565 // object. Don't queue fetch pack jobs if we're under load or we already
2566 // have some queued.
2567 if (app_.getFeeTrack().isLoadedLocal() || (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2568 (app_.getJobQueue().getJobCount(jtPACK) > 10))
2569 {
2570 JLOG(p_journal_.info()) << "Too busy to make fetch pack";
2571 return;
2572 }
2573
2574 if (!stringIsUint256Sized(packet->ledgerhash()))
2575 {
2576 JLOG(p_journal_.warn()) << "FetchPack hash size malformed";
2577 fee_.update(Resource::feeMalformedRequest, "hash size");
2578 return;
2579 }
2580
2581 fee_.fee = Resource::feeHeavyBurdenPeer;
2582
2583 uint256 const hash{packet->ledgerhash()};
2584
2585 std::weak_ptr<PeerImp> weak = shared_from_this();
2586 auto elapsed = UptimeClock::now();
2587 auto const pap = &app_;
2588 app_.getJobQueue().addJob(jtPACK, "MakeFetchPack", [pap, weak, packet, hash, elapsed]() {
2589 pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
2590 });
2591}
2592
2593void
2594PeerImp::doTransactions(std::shared_ptr<protocol::TMGetObjectByHash> const& packet)
2595{
2596 protocol::TMTransactions reply;
2597
2598 JLOG(p_journal_.trace()) << "received TMGetObjectByHash requesting tx " << packet->objects_size();
2599
2600 if (packet->objects_size() > reduce_relay::MAX_TX_QUEUE_SIZE)
2601 {
2602 JLOG(p_journal_.error()) << "doTransactions, invalid number of hashes";
2603 fee_.update(Resource::feeMalformedRequest, "too big");
2604 return;
2605 }
2606
2607 for (std::uint32_t i = 0; i < packet->objects_size(); ++i)
2608 {
2609 auto const& obj = packet->objects(i);
2610
2611 if (!stringIsUint256Sized(obj.hash()))
2612 {
2613 fee_.update(Resource::feeMalformedRequest, "hash size");
2614 return;
2615 }
2616
2617 uint256 hash(obj.hash());
2618
2619 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2620
2621 if (!txn)
2622 {
2623 JLOG(p_journal_.error()) << "doTransactions, transaction not found " << Slice(hash.data(), hash.size());
2624 fee_.update(Resource::feeMalformedRequest, "tx not found");
2625 return;
2626 }
2627
2628 Serializer s;
2629 auto tx = reply.add_transactions();
2630 auto sttx = txn->getSTransaction();
2631 sttx->add(s);
2632 tx->set_rawtransaction(s.data(), s.size());
2633 tx->set_status(txn->getStatus() == INCLUDED ? protocol::tsCURRENT : protocol::tsNEW);
2634 tx->set_receivetimestamp(app_.timeKeeper().now().time_since_epoch().count());
2635 tx->set_deferred(txn->getSubmitResult().queued);
2636 }
2637
2638 if (reply.transactions_size() > 0)
2639 send(std::make_shared<Message>(reply, protocol::mtTRANSACTIONS));
2640}
2641
2642void
2643PeerImp::checkTransaction(
2644 HashRouterFlags flags,
2645 bool checkSignature,
2646 std::shared_ptr<STTx const> const& stx,
2647 bool batch)
2648{
2649 // VFALCO TODO Rewrite to not use exceptions
2650 try
2651 {
2652 // charge strongly for relaying batch txns
2653 // LCOV_EXCL_START
2654 /*
2655 There is no need to check whether the featureBatch amendment is
2656 enabled.
2657
2658 * If the `tfInnerBatchTxn` flag is set, and the amendment is
2659 enabled, then it's an invalid transaction because inner batch
2660 transactions should not be relayed.
2661 * If the `tfInnerBatchTxn` flag is set, and the amendment is *not*
2662 enabled, then the transaction is malformed because it's using an
2663 "unknown" flag. There's no need to waste the resources to send it
2664 to the transaction engine.
2665
2666 We don't normally check transaction validity at this level, but
2667 since we _need_ to check it when the amendment is enabled, we may as
2668 well drop it if the flag is set regardless.
2669 */
2670 if (stx->isFlag(tfInnerBatchTxn))
2671 {
2672 JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing "
2673 "tfInnerBatchTxn (checkSignature).";
2674 charge(Resource::feeModerateBurdenPeer, "inner batch txn");
2675 return;
2676 }
2677 // LCOV_EXCL_STOP
2678
2679 // Expired?
2680 if (stx->isFieldPresent(sfLastLedgerSequence) &&
2681 (stx->getFieldU32(sfLastLedgerSequence) < app_.getLedgerMaster().getValidLedgerIndex()))
2682 {
2683 JLOG(p_journal_.info()) << "Marking transaction " << stx->getTransactionID()
2684 << "as BAD because it's expired";
2685 app_.getHashRouter().setFlags(stx->getTransactionID(), HashRouterFlags::BAD);
2686 charge(Resource::feeUselessData, "expired tx");
2687 return;
2688 }
2689
2690 if (isPseudoTx(*stx))
2691 {
2692 // Don't do anything with pseudo transactions except put them in the
2693 // TransactionMaster cache
2694 std::string reason;
2695 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2696 XRPL_ASSERT(
2697 tx->getStatus() == NEW,
2698 "xrpl::PeerImp::checkTransaction Transaction created "
2699 "correctly");
2700 if (tx->getStatus() == NEW)
2701 {
2702 JLOG(p_journal_.debug()) << "Processing " << (batch ? "batch" : "unsolicited")
2703 << " pseudo-transaction tx " << tx->getID();
2704
2705 app_.getMasterTransaction().canonicalize(&tx);
2706 // Tell the overlay about it, but don't relay it.
2707 auto const toSkip = app_.getHashRouter().shouldRelay(tx->getID());
2708 if (toSkip)
2709 {
2710 JLOG(p_journal_.debug()) << "Passing skipped pseudo pseudo-transaction tx " << tx->getID();
2711 app_.overlay().relay(tx->getID(), {}, *toSkip);
2712 }
2713 if (!batch)
2714 {
2715 JLOG(p_journal_.debug()) << "Charging for pseudo-transaction tx " << tx->getID();
2716 charge(Resource::feeUselessData, "pseudo tx");
2717 }
2718
2719 return;
2720 }
2721 }
2722
2723 if (checkSignature)
2724 {
2725 // Check the signature before handing off to the job queue.
2726 if (auto [valid, validReason] = checkValidity(
2727 app_.getHashRouter(), *stx, app_.getLedgerMaster().getValidatedRules(), app_.config());
2728 valid != Validity::Valid)
2729 {
2730 if (!validReason.empty())
2731 {
2732 JLOG(p_journal_.debug()) << "Exception checking transaction: " << validReason;
2733 }
2734
2735 // Probably not necessary to set HashRouterFlags::BAD, but
2736 // doesn't hurt.
2737 app_.getHashRouter().setFlags(stx->getTransactionID(), HashRouterFlags::BAD);
2738 charge(Resource::feeInvalidSignature, "check transaction signature failure");
2739 return;
2740 }
2741 }
2742 else
2743 {
2744 forceValidity(app_.getHashRouter(), stx->getTransactionID(), Validity::Valid);
2745 }
2746
2747 std::string reason;
2748 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2749
2750 if (tx->getStatus() == INVALID)
2751 {
2752 if (!reason.empty())
2753 {
2754 JLOG(p_journal_.debug()) << "Exception checking transaction: " << reason;
2755 }
2756 app_.getHashRouter().setFlags(stx->getTransactionID(), HashRouterFlags::BAD);
2757 charge(Resource::feeInvalidSignature, "tx (impossible)");
2758 return;
2759 }
2760
2761 bool const trusted = any(flags & HashRouterFlags::TRUSTED);
2762 app_.getOPs().processTransaction(tx, trusted, false, NetworkOPs::FailHard::no);
2763 }
2764 catch (std::exception const& ex)
2765 {
2766 JLOG(p_journal_.warn()) << "Exception in " << __func__ << ": " << ex.what();
2767 app_.getHashRouter().setFlags(stx->getTransactionID(), HashRouterFlags::BAD);
2768 using namespace std::string_literals;
2769 charge(Resource::feeInvalidData, "tx "s + ex.what());
2770 }
2771}
2772
2773// Called from our JobQueue
2774void
2775PeerImp::checkPropose(bool isTrusted, std::shared_ptr<protocol::TMProposeSet> const& packet, RCLCxPeerPos peerPos)
2776{
2777 JLOG(p_journal_.trace()) << "Checking " << (isTrusted ? "trusted" : "UNTRUSTED") << " proposal";
2778
2779 XRPL_ASSERT(packet, "xrpl::PeerImp::checkPropose : non-null packet");
2780
2781 if (!cluster() && !peerPos.checkSign())
2782 {
2783 std::string desc{"Proposal fails sig check"};
2784 JLOG(p_journal_.warn()) << desc;
2785 charge(Resource::feeInvalidSignature, desc);
2786 return;
2787 }
2788
2789 bool relay;
2790
2791 if (isTrusted)
2792 relay = app_.getOPs().processTrustedProposal(peerPos);
2793 else
2794 relay = app_.config().RELAY_UNTRUSTED_PROPOSALS == 1 || cluster();
2795
2796 if (relay)
2797 {
2798 // haveMessage contains peers, which are suppressed; i.e. the peers
2799 // are the source of the message, consequently the message should
2800 // not be relayed to these peers. But the message must be counted
2801 // as part of the squelch logic.
2802 auto haveMessage = app_.overlay().relay(*packet, peerPos.suppressionID(), peerPos.publicKey());
2803 if (!haveMessage.empty())
2804 overlay_.updateSlotAndSquelch(
2805 peerPos.suppressionID(), peerPos.publicKey(), std::move(haveMessage), protocol::mtPROPOSE_LEDGER);
2806 }
2807}
2808
2809void
2810PeerImp::checkValidation(
2812 uint256 const& key,
2814{
2815 if (!val->isValid())
2816 {
2817 std::string desc{"Validation forwarded by peer is invalid"};
2818 JLOG(p_journal_.debug()) << desc;
2819 charge(Resource::feeInvalidSignature, desc);
2820 return;
2821 }
2822
2823 // FIXME it should be safe to remove this try/catch. Investigate codepaths.
2824 try
2825 {
2826 if (app_.getOPs().recvValidation(val, std::to_string(id())) || cluster())
2827 {
2828 // haveMessage contains peers, which are suppressed; i.e. the peers
2829 // are the source of the message, consequently the message should
2830 // not be relayed to these peers. But the message must be counted
2831 // as part of the squelch logic.
2832 auto haveMessage = overlay_.relay(*packet, key, val->getSignerPublic());
2833 if (!haveMessage.empty())
2834 {
2835 overlay_.updateSlotAndSquelch(
2836 key, val->getSignerPublic(), std::move(haveMessage), protocol::mtVALIDATION);
2837 }
2838 }
2839 }
2840 catch (std::exception const& ex)
2841 {
2842 JLOG(p_journal_.trace()) << "Exception processing validation: " << ex.what();
2843 using namespace std::string_literals;
2844 charge(Resource::feeMalformedRequest, "validation "s + ex.what());
2845 }
2846}
2847
2848// Returns the set of peers that can help us get
2849// the TX tree with the specified root hash.
2850//
2852getPeerWithTree(OverlayImpl& ov, uint256 const& rootHash, PeerImp const* skip)
2853{
2855 int retScore = 0;
2856
2858 if (p->hasTxSet(rootHash) && p.get() != skip)
2859 {
2860 auto score = p->getScore(true);
2861 if (!ret || (score > retScore))
2862 {
2863 ret = std::move(p);
2864 retScore = score;
2865 }
2866 }
2867 });
2868
2869 return ret;
2870}
2871
2872// Returns a random peer weighted by how likely to
2873// have the ledger and how responsive it is.
2874//
2876getPeerWithLedger(OverlayImpl& ov, uint256 const& ledgerHash, LedgerIndex ledger, PeerImp const* skip)
2877{
2879 int retScore = 0;
2880
2882 if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
2883 {
2884 auto score = p->getScore(true);
2885 if (!ret || (score > retScore))
2886 {
2887 ret = std::move(p);
2888 retScore = score;
2889 }
2890 }
2891 });
2892
2893 return ret;
2894}
2895
2896void
2897PeerImp::sendLedgerBase(std::shared_ptr<Ledger const> const& ledger, protocol::TMLedgerData& ledgerData)
2898{
2899 JLOG(p_journal_.trace()) << "sendLedgerBase: Base data";
2900
2901 Serializer s(sizeof(LedgerHeader));
2902 addRaw(ledger->header(), s);
2903 ledgerData.add_nodes()->set_nodedata(s.getDataPtr(), s.getLength());
2904
2905 auto const& stateMap{ledger->stateMap()};
2906 if (stateMap.getHash() != beast::zero)
2907 {
2908 // Return account state root node if possible
2909 Serializer root(768);
2910
2911 stateMap.serializeRoot(root);
2912 ledgerData.add_nodes()->set_nodedata(root.getDataPtr(), root.getLength());
2913
2914 if (ledger->header().txHash != beast::zero)
2915 {
2916 auto const& txMap{ledger->txMap()};
2917 if (txMap.getHash() != beast::zero)
2918 {
2919 // Return TX root node if possible
2920 root.erase();
2921 txMap.serializeRoot(root);
2922 ledgerData.add_nodes()->set_nodedata(root.getDataPtr(), root.getLength());
2923 }
2924 }
2925 }
2926
2927 auto message{std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA)};
2928 send(message);
2929}
2930
2932PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
2933{
2934 JLOG(p_journal_.trace()) << "getLedger: Ledger";
2935
2937
2938 if (m->has_ledgerhash())
2939 {
2940 // Attempt to find ledger by hash
2941 uint256 const ledgerHash{m->ledgerhash()};
2942 ledger = app_.getLedgerMaster().getLedgerByHash(ledgerHash);
2943 if (!ledger)
2944 {
2945 JLOG(p_journal_.trace()) << "getLedger: Don't have ledger with hash " << ledgerHash;
2946
2947 if (m->has_querytype() && !m->has_requestcookie())
2948 {
2949 // Attempt to relay the request to a peer
2950 if (auto const peer =
2951 getPeerWithLedger(overlay_, ledgerHash, m->has_ledgerseq() ? m->ledgerseq() : 0, this))
2952 {
2953 m->set_requestcookie(id());
2954 peer->send(std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
2955 JLOG(p_journal_.debug()) << "getLedger: Request relayed to peer";
2956 return ledger;
2957 }
2958
2959 JLOG(p_journal_.trace()) << "getLedger: Failed to find peer to relay request";
2960 }
2961 }
2962 }
2963 else if (m->has_ledgerseq())
2964 {
2965 // Attempt to find ledger by sequence
2966 if (m->ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
2967 {
2968 JLOG(p_journal_.debug()) << "getLedger: Early ledger sequence request";
2969 }
2970 else
2971 {
2972 ledger = app_.getLedgerMaster().getLedgerBySeq(m->ledgerseq());
2973 if (!ledger)
2974 {
2975 JLOG(p_journal_.debug()) << "getLedger: Don't have ledger with sequence " << m->ledgerseq();
2976 }
2977 }
2978 }
2979 else if (m->has_ltype() && m->ltype() == protocol::ltCLOSED)
2980 {
2981 ledger = app_.getLedgerMaster().getClosedLedger();
2982 }
2983
2984 if (ledger)
2985 {
2986 // Validate retrieved ledger sequence
2987 auto const ledgerSeq{ledger->header().seq};
2988 if (m->has_ledgerseq())
2989 {
2990 if (ledgerSeq != m->ledgerseq())
2991 {
2992 // Do not resource charge a peer responding to a relay
2993 if (!m->has_requestcookie())
2994 charge(Resource::feeMalformedRequest, "get_ledger ledgerSeq");
2995
2996 ledger.reset();
2997 JLOG(p_journal_.warn()) << "getLedger: Invalid ledger sequence " << ledgerSeq;
2998 }
2999 }
3000 else if (ledgerSeq < app_.getLedgerMaster().getEarliestFetch())
3001 {
3002 ledger.reset();
3003 JLOG(p_journal_.debug()) << "getLedger: Early ledger sequence request " << ledgerSeq;
3004 }
3005 }
3006 else
3007 {
3008 JLOG(p_journal_.debug()) << "getLedger: Unable to find ledger";
3009 }
3010
3011 return ledger;
3012}
3013
3015PeerImp::getTxSet(std::shared_ptr<protocol::TMGetLedger> const& m) const
3016{
3017 JLOG(p_journal_.trace()) << "getTxSet: TX set";
3018
3019 uint256 const txSetHash{m->ledgerhash()};
3020 std::shared_ptr<SHAMap> shaMap{app_.getInboundTransactions().getSet(txSetHash, false)};
3021 if (!shaMap)
3022 {
3023 if (m->has_querytype() && !m->has_requestcookie())
3024 {
3025 // Attempt to relay the request to a peer
3026 if (auto const peer = getPeerWithTree(overlay_, txSetHash, this))
3027 {
3028 m->set_requestcookie(id());
3029 peer->send(std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
3030 JLOG(p_journal_.debug()) << "getTxSet: Request relayed";
3031 }
3032 else
3033 {
3034 JLOG(p_journal_.debug()) << "getTxSet: Failed to find relay peer";
3035 }
3036 }
3037 else
3038 {
3039 JLOG(p_journal_.debug()) << "getTxSet: Failed to find TX set";
3040 }
3041 }
3042
3043 return shaMap;
3044}
3045
3046void
3047PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
3048{
3049 // Do not resource charge a peer responding to a relay
3050 if (!m->has_requestcookie())
3051 charge(Resource::feeModerateBurdenPeer, "received a get ledger request");
3052
3055 SHAMap const* map{nullptr};
3056 protocol::TMLedgerData ledgerData;
3057 bool fatLeaves{true};
3058 auto const itype{m->itype()};
3059
3060 if (itype == protocol::liTS_CANDIDATE)
3061 {
3062 if (sharedMap = getTxSet(m); !sharedMap)
3063 return;
3064 map = sharedMap.get();
3065
3066 // Fill out the reply
3067 ledgerData.set_ledgerseq(0);
3068 ledgerData.set_ledgerhash(m->ledgerhash());
3069 ledgerData.set_type(protocol::liTS_CANDIDATE);
3070 if (m->has_requestcookie())
3071 ledgerData.set_requestcookie(m->requestcookie());
3072
3073 // We'll already have most transactions
3074 fatLeaves = false;
3075 }
3076 else
3077 {
3078 if (send_queue_.size() >= Tuning::dropSendQueue)
3079 {
3080 JLOG(p_journal_.debug()) << "processLedgerRequest: Large send queue";
3081 return;
3082 }
3083 if (app_.getFeeTrack().isLoadedLocal() && !cluster())
3084 {
3085 JLOG(p_journal_.debug()) << "processLedgerRequest: Too busy";
3086 return;
3087 }
3088
3089 if (ledger = getLedger(m); !ledger)
3090 return;
3091
3092 // Fill out the reply
3093 auto const ledgerHash{ledger->header().hash};
3094 ledgerData.set_ledgerhash(ledgerHash.begin(), ledgerHash.size());
3095 ledgerData.set_ledgerseq(ledger->header().seq);
3096 ledgerData.set_type(itype);
3097 if (m->has_requestcookie())
3098 ledgerData.set_requestcookie(m->requestcookie());
3099
3100 switch (itype)
3101 {
3102 case protocol::liBASE:
3103 sendLedgerBase(ledger, ledgerData);
3104 return;
3105
3106 case protocol::liTX_NODE:
3107 map = &ledger->txMap();
3108 JLOG(p_journal_.trace()) << "processLedgerRequest: TX map hash " << to_string(map->getHash());
3109 break;
3110
3111 case protocol::liAS_NODE:
3112 map = &ledger->stateMap();
3113 JLOG(p_journal_.trace()) << "processLedgerRequest: Account state map hash "
3114 << to_string(map->getHash());
3115 break;
3116
3117 default:
3118 // This case should not be possible here
3119 JLOG(p_journal_.error()) << "processLedgerRequest: Invalid ledger info type";
3120 return;
3121 }
3122 }
3123
3124 if (!map)
3125 {
3126 JLOG(p_journal_.warn()) << "processLedgerRequest: Unable to find map";
3127 return;
3128 }
3129
3130 // Add requested node data to reply
3131 if (m->nodeids_size() > 0)
3132 {
3133 auto const queryDepth{m->has_querydepth() ? m->querydepth() : (isHighLatency() ? 2 : 1)};
3134
3136
3137 for (int i = 0; i < m->nodeids_size() && ledgerData.nodes_size() < Tuning::softMaxReplyNodes; ++i)
3138 {
3139 auto const shaMapNodeId{deserializeSHAMapNodeID(m->nodeids(i))};
3140
3141 data.clear();
3142 data.reserve(Tuning::softMaxReplyNodes);
3143
3144 try
3145 {
3146 if (map->getNodeFat(*shaMapNodeId, data, fatLeaves, queryDepth))
3147 {
3148 JLOG(p_journal_.trace()) << "processLedgerRequest: getNodeFat got " << data.size() << " nodes";
3149
3150 for (auto const& d : data)
3151 {
3152 if (ledgerData.nodes_size() >= Tuning::hardMaxReplyNodes)
3153 break;
3154 protocol::TMLedgerNode* node{ledgerData.add_nodes()};
3155 node->set_nodeid(d.first.getRawString());
3156 node->set_nodedata(d.second.data(), d.second.size());
3157 }
3158 }
3159 else
3160 {
3161 JLOG(p_journal_.warn()) << "processLedgerRequest: getNodeFat returns false";
3162 }
3163 }
3164 catch (std::exception const& e)
3165 {
3166 std::string info;
3167 switch (itype)
3168 {
3169 case protocol::liBASE:
3170 // This case should not be possible here
3171 info = "Ledger base";
3172 break;
3173
3174 case protocol::liTX_NODE:
3175 info = "TX node";
3176 break;
3177
3178 case protocol::liAS_NODE:
3179 info = "AS node";
3180 break;
3181
3182 case protocol::liTS_CANDIDATE:
3183 info = "TS candidate";
3184 break;
3185
3186 default:
3187 info = "Invalid";
3188 break;
3189 }
3190
3191 if (!m->has_ledgerhash())
3192 info += ", no hash specified";
3193
3194 JLOG(p_journal_.warn()) << "processLedgerRequest: getNodeFat with nodeId " << *shaMapNodeId
3195 << " and ledger info type " << info << " throws exception: " << e.what();
3196 }
3197 }
3198
3199 JLOG(p_journal_.info()) << "processLedgerRequest: Got request for " << m->nodeids_size() << " nodes at depth "
3200 << queryDepth << ", return " << ledgerData.nodes_size() << " nodes";
3201 }
3202
3203 if (ledgerData.nodes_size() == 0)
3204 return;
3205
3206 send(std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA));
3207}
3208
3209int
3210PeerImp::getScore(bool haveItem) const
3211{
3212 // Random component of score, used to break ties and avoid
3213 // overloading the "best" peer
3214 static int const spRandomMax = 9999;
3215
3216 // Score for being very likely to have the thing we are
3217 // look for; should be roughly spRandomMax
3218 static int const spHaveItem = 10000;
3219
3220 // Score reduction for each millisecond of latency; should
3221 // be roughly spRandomMax divided by the maximum reasonable
3222 // latency
3223 static int const spLatency = 30;
3224
3225 // Penalty for unknown latency; should be roughly spRandomMax
3226 static int const spNoLatency = 8000;
3227
3228 int score = rand_int(spRandomMax);
3229
3230 if (haveItem)
3231 score += spHaveItem;
3232
3234 {
3235 std::lock_guard sl(recentLock_);
3236 latency = latency_;
3237 }
3238
3239 if (latency)
3240 score -= latency->count() * spLatency;
3241 else
3242 score -= spNoLatency;
3243
3244 return score;
3245}
3246
3247bool
3248PeerImp::isHighLatency() const
3249{
3250 std::lock_guard sl(recentLock_);
3251 return latency_ >= peerHighLatency;
3252}
3253
3254void
3255PeerImp::Metrics::add_message(std::uint64_t bytes)
3256{
3257 using namespace std::chrono_literals;
3258 std::unique_lock lock{mutex_};
3259
3260 totalBytes_ += bytes;
3261 accumBytes_ += bytes;
3262 auto const timeElapsed = clock_type::now() - intervalStart_;
3263 auto const timeElapsedInSecs = std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
3264
3265 if (timeElapsedInSecs >= 1s)
3266 {
3267 auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
3268 rollingAvg_.push_back(avgBytes);
3269
3270 auto const totalBytes = std::accumulate(rollingAvg_.begin(), rollingAvg_.end(), 0ull);
3271 rollingAvgBytes_ = totalBytes / rollingAvg_.size();
3272
3273 intervalStart_ = clock_type::now();
3274 accumBytes_ = 0;
3275 }
3276}
3277
3279PeerImp::Metrics::average_bytes() const
3280{
3281 std::shared_lock lock{mutex_};
3282 return rollingAvgBytes_;
3283}
3284
3286PeerImp::Metrics::total_bytes() const
3287{
3288 std::shared_lock lock{mutex_};
3289 return totalBytes_;
3290}
3291
3292} // namespace xrpl
T accumulate(T... args)
T bind(T... args)
Represents a JSON value.
Definition json_value.h:130
A version-independent IP address and port combination.
Definition IPEndpoint.h:18
Address const & address() const
Returns the address portion of this endpoint.
Definition IPEndpoint.h:55
static std::optional< Endpoint > from_string_checked(std::string const &s)
Create an Endpoint from a string.
Endpoint at_port(Port port) const
Returns a new Endpoint with a different port.
Definition IPEndpoint.h:48
static Endpoint from_string(std::string const &s)
std::string to_string() const
Returns a string representing the endpoint.
Stream error() const
Definition Journal.h:318
Stream debug() const
Definition Journal.h:300
bool active(Severity level) const
Returns true if any message would be logged at this severity level.
Definition Journal.h:286
Stream info() const
Definition Journal.h:306
Stream trace() const
Severity stream access functions.
Definition Journal.h:294
Stream warn() const
Definition Journal.h:312
virtual Config & config()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
bool update(PublicKey const &identity, std::string name, std::uint32_t loadFee=0, NetClock::time_point reportTime=NetClock::time_point{})
Store information about the state of a cluster node.
Definition Cluster.cpp:38
void for_each(std::function< void(ClusterNode const &)> func) const
Invokes the callback once for every cluster node.
Definition Cluster.cpp:60
std::optional< std::string > member(PublicKey const &node) const
Determines whether a node belongs in the cluster.
Definition Cluster.cpp:19
std::size_t size() const
The number of nodes in the cluster list.
Definition Cluster.cpp:30
int MAX_TRANSACTIONS
Definition Config.h:206
std::chrono::seconds MAX_DIVERGED_TIME
Definition Config.h:264
bool TX_REDUCE_RELAY_METRICS
Definition Config.h:245
std::chrono::seconds MAX_UNKNOWN_TIME
Definition Config.h:261
bool VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE
Definition Config.h:228
bool addSuppressionPeer(uint256 const &key, PeerShortID peer)
bool shouldProcess(uint256 const &key, PeerShortID peer, HashRouterFlags &flags, std::chrono::seconds tx_interval)
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition JobQueue.h:145
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition JobQueue.cpp:106
std::unique_ptr< LoadEvent > makeLoadEvent(JobType t, std::string const &name)
Return a scoped LoadEvent.
Definition JobQueue.cpp:143
std::chrono::seconds getValidatedLedgerAge()
LedgerIndex getValidLedgerIndex()
void setClusterFee(std::uint32_t fee)
static std::size_t messageSize(::google::protobuf::Message const &message)
Definition Message.cpp:32
virtual bool isNeedNetworkLedger()=0
void deletePeer(Peer::id_t id)
Called when the peer is deleted.
void remove(std::shared_ptr< PeerFinder::Slot > const &slot)
void addTxMetrics(Args... args)
Add tx reduce-relay metrics.
void incPeerDisconnectCharges() override
void activate(std::shared_ptr< PeerImp > const &peer)
Called when a peer has connected successfully This is called after the peer handshake has been comple...
void reportOutboundTraffic(TrafficCount::category cat, int bytes)
void for_each(UnaryFunc &&f) const
void onManifests(std::shared_ptr< protocol::TMManifests > const &m, std::shared_ptr< PeerImp > const &from)
PeerFinder::Manager & peerFinder()
Resource::Manager & resourceManager()
void incPeerDisconnect() override
Increment and retrieve counters for total peer disconnects, and disconnects we initiate for excessive...
void onPeerDeactivate(Peer::id_t id)
Setup const & setup() const
std::shared_ptr< Message > getManifestsMessage()
void reportInboundTraffic(TrafficCount::category cat, int bytes)
void incJqTransOverflow() override
Increment and retrieve counter for transaction job queue overflows.
virtual void on_closed(std::shared_ptr< Slot > const &slot)=0
Called when the slot is closed.
virtual Config config()=0
Returns the configuration for the manager.
virtual void on_endpoints(std::shared_ptr< Slot > const &slot, Endpoints const &endpoints)=0
Called when mtENDPOINTS is received.
virtual void on_failure(std::shared_ptr< Slot > const &slot)=0
Called when an outbound connection is deemed to have failed.
This class manages established peer-to-peer connections, handles message exchange,...
Definition PeerImp.h:96
std::optional< std::chrono::milliseconds > latency_
Definition PeerImp.h:157
void addTxQueue(uint256 const &hash) override
Add transaction's hash to the transactions' hashes queue.
Definition PeerImp.cpp:280
std::string getVersion() const
Return the version of rippled that the peer is running, if reported.
Definition PeerImp.cpp:335
std::unique_ptr< LoadEvent > load_event_
Definition PeerImp.h:233
beast::Journal const p_journal_
Definition PeerImp.h:119
void onMessage(std::shared_ptr< protocol::TMManifests > const &m)
Definition PeerImp.cpp:1035
ProtocolVersion protocol_
Definition PeerImp.h:138
bool txReduceRelayEnabled_
Definition PeerImp.h:245
void close()
Forcibly closes the underlying socket connection.
Definition PeerImp.cpp:611
bool readPending_
Definition PeerImp.h:227
void handleTransaction(std::shared_ptr< protocol::TMTransaction > const &m, bool eraseTxQueue, bool batch)
Called from onMessage(TMTransaction(s)).
Definition PeerImp.cpp:1218
http_request_type request_
Definition PeerImp.h:215
void removeTxQueue(uint256 const &hash) override
Remove transaction's hash from the transactions' hashes queue.
Definition PeerImp.cpp:296
std::string name() const
Definition PeerImp.cpp:804
bool txReduceRelayEnabled() const override
Definition PeerImp.h:486
std::shared_ptr< PeerFinder::Slot > const slot_
Definition PeerImp.h:213
boost::beast::http::fields const & headers_
Definition PeerImp.h:217
Compressed compressionEnabled_
Definition PeerImp.h:238
void onTimer(error_code const &ec)
Handles the expiration of the peer activity timer.
Definition PeerImp.cpp:661
boost::system::error_code error_code
Definition PeerImp.h:103
void tryAsyncShutdown()
Attempts to perform a graceful SSL shutdown if conditions are met.
Definition PeerImp.cpp:552
LedgerIndex minLedger_
Definition PeerImp.h:149
id_t const id_
Definition PeerImp.h:113
std::string const & fingerprint() const override
Definition PeerImp.h:664
void charge(Resource::Charge const &fee, std::string const &context) override
Adjust this peer's load balance based on the type of load imposed.
Definition PeerImp.cpp:306
bool ledgerReplayEnabled_
Definition PeerImp.h:247
void sendTxQueue() override
Send aggregated transactions' hashes.
Definition PeerImp.cpp:263
uint256 closedLedgerHash_
Definition PeerImp.h:151
reduce_relay::Squelch< UptimeClock > squelch_
Definition PeerImp.h:162
stream_type & stream_
Definition PeerImp.h:122
PeerImp(PeerImp const &)=delete
void cycleStatus() override
Definition PeerImp.cpp:500
std::shared_mutex nameMutex_
Definition PeerImp.h:145
socket_type & socket_
Definition PeerImp.h:121
beast::IP::Endpoint const remote_address_
Definition PeerImp.h:130
int large_sendq_
Definition PeerImp.h:232
std::string domain() const
Definition PeerImp.cpp:811
std::atomic< Tracking > tracking_
Definition PeerImp.h:140
void ledgerRange(std::uint32_t &minSeq, std::uint32_t &maxSeq) const override
Definition PeerImp.cpp:484
clock_type::duration uptime() const
Definition PeerImp.h:417
LedgerIndex maxLedger_
Definition PeerImp.h:150
Application & app_
Definition PeerImp.h:112
PublicKey const publicKey_
Definition PeerImp.h:143
void onMessageBegin(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m, std::size_t size, std::size_t uncompressed_size, bool isCompressed)
Definition PeerImp.cpp:991
Json::Value json() override
Definition PeerImp.cpp:343
bool cluster() const override
Returns true if this connection is a member of the cluster.
Definition PeerImp.cpp:329
void onWriteMessage(error_code ec, std::size_t bytes_transferred)
Definition PeerImp.cpp:933
virtual void run()
Definition PeerImp.cpp:129
OverlayImpl & overlay_
Definition PeerImp.h:134
std::queue< std::shared_ptr< Message > > send_queue_
Definition PeerImp.h:218
virtual ~PeerImp()
Definition PeerImp.cpp:106
static std::string makePrefix(std::string const &fingerprint)
Definition PeerImp.cpp:653
ChargeWithContext fee_
Definition PeerImp.h:212
void onShutdown(error_code ec)
Handles the completion of the asynchronous SSL shutdown.
Definition PeerImp.cpp:587
void onMessageUnknown(std::uint16_t type)
Definition PeerImp.cpp:985
protocol::TMStatusChange last_status_
Definition PeerImp.h:210
void onReadMessage(error_code ec, std::size_t bytes_transferred)
Definition PeerImp.cpp:854
Tracking
Whether the peer's view of the ledger converges or diverges from ours.
Definition PeerImp.h:99
std::unique_ptr< stream_type > stream_ptr_
Definition PeerImp.h:120
struct xrpl::PeerImp::@22 metrics_
boost::beast::multi_buffer read_buffer_
Definition PeerImp.h:214
void send(std::shared_ptr< Message > const &m) override
Definition PeerImp.cpp:204
hash_set< uint256 > txQueue_
Definition PeerImp.h:243
bool supportsFeature(ProtocolFeature f) const override
Definition PeerImp.cpp:454
void doProtocolStart()
Definition PeerImp.cpp:821
void shutdown()
Initiates the peer disconnection sequence.
Definition PeerImp.cpp:572
bool const inbound_
Definition PeerImp.h:135
boost::asio::strand< boost::asio::executor > strand_
Definition PeerImp.h:123
waitable_timer timer_
Definition PeerImp.h:126
clock_type::time_point lastPingTime_
Definition PeerImp.h:159
boost::circular_buffer< uint256 > recentLedgers_
Definition PeerImp.h:154
std::string name_
Definition PeerImp.h:144
bool hasTxSet(uint256 const &hash) const override
Definition PeerImp.cpp:493
bool writePending_
Definition PeerImp.h:230
boost::circular_buffer< uint256 > recentTxSets_
Definition PeerImp.h:155
clock_type::time_point trackingTime_
Definition PeerImp.h:141
beast::Journal const journal_
Definition PeerImp.h:118
void cancelTimer() noexcept
Cancels any pending wait on the peer activity timer.
Definition PeerImp.cpp:724
bool hasRange(std::uint32_t uMin, std::uint32_t uMax) override
Definition PeerImp.cpp:510
uint256 previousLedgerHash_
Definition PeerImp.h:152
Resource::Consumer usage_
Definition PeerImp.h:211
bool shutdownStarted_
Definition PeerImp.h:224
std::optional< std::uint32_t > lastPingSeq_
Definition PeerImp.h:158
bool crawl() const
Returns true if this connection will publicly share its IP address.
Definition PeerImp.cpp:320
void stop() override
Definition PeerImp.cpp:184
void setTimer(std::chrono::seconds interval)
Sets and starts the peer timer.
Definition PeerImp.cpp:635
bool shutdown_
Definition PeerImp.h:221
void doAccept()
Definition PeerImp.cpp:738
void fail(std::string const &name, error_code ec)
Handles a failure associated with a specific error code.
Definition PeerImp.cpp:519
std::mutex recentLock_
Definition PeerImp.h:209
bool hasLedger(uint256 const &hash, std::uint32_t seq) const override
Definition PeerImp.cpp:471
boost::asio::basic_waitable_timer< std::chrono::steady_clock > waitable_timer
Definition PeerImp.h:109
void onMessageEnd(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m)
Definition PeerImp.cpp:1028
Represents a peer connection in the overlay.
A public key.
Definition PublicKey.h:42
A peer's signed, proposed position for use in RCLConsensus.
uint256 const & suppressionID() const
Unique id used by hash router to suppress duplicates.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
bool checkSign() const
Verify the signing hash of the proposal.
A consumption charge.
Definition Charge.h:10
An endpoint that consumes resources.
Definition Consumer.h:16
bool disconnect(beast::Journal const &j)
Returns true if the consumer should be disconnected.
Definition Consumer.cpp:103
int balance()
Returns the credit balance representing consumption.
Definition Consumer.cpp:115
Disposition charge(Charge const &fee, std::string const &context={})
Apply a load charge to the consumer.
Definition Consumer.cpp:85
virtual void importConsumers(std::string const &origin, Gossip const &gossip)=0
Import packaged consumer information.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition SHAMap.h:77
void const * getDataPtr() const
Definition Serializer.h:197
int getLength() const
Definition Serializer.h:207
std::size_t size() const noexcept
Definition Serializer.h:50
void const * data() const noexcept
Definition Serializer.h:56
virtual JobQueue & getJobQueue()=0
virtual ValidatorList & validators()=0
virtual NetworkOPs & getOPs()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual Cluster & cluster()=0
virtual LedgerMaster & getLedgerMaster()=0
virtual HashRouter & getHashRouter()=0
virtual TimeKeeper & timeKeeper()=0
An immutable linear range of bytes.
Definition Slice.h:26
time_point now() const override
Returns the current time, using the server's clock.
Definition TimeKeeper.h:43
static category categorize(::google::protobuf::Message const &message, protocol::MessageType type, bool inbound)
Given a protocol message, determine which traffic category it belongs to.
static void sendValidatorList(Peer &peer, std::uint64_t peerSequence, PublicKey const &publisherKey, std::size_t maxSequence, std::uint32_t rawVersion, std::string const &rawManifest, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, HashRouter &hashRouter, beast::Journal j)
void for_each_available(std::function< void(std::string const &manifest, std::uint32_t version, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, PublicKey const &pubKey, std::size_t maxSequence, uint256 const &hash)> func) const
Invokes the callback once for every available publisher list's raw data members.
constexpr bool parseHex(std::string_view sv)
Parse a hex string into a base_uint.
Definition base_uint.h:471
pointer data()
Definition base_uint.h:101
static constexpr std::size_t size()
Definition base_uint.h:494
T emplace_back(T... args)
T empty(T... args)
T find(T... args)
T for_each(T... args)
T get(T... args)
T is_same_v
T load(T... args)
T lock(T... args)
T max(T... args)
T min(T... args)
@ objectValue
object value (collection of name/value pairs).
Definition json_value.h:26
unsigned int UInt
STL namespace.
Charge const feeInvalidData
Charge const feeModerateBurdenPeer
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeTrivialPeer
Charge const feeUselessData
std::size_t constexpr readBufferBytes
Size of buffer used to read from the socket.
@ sendQueueLogFreq
How often to log send queue size.
@ targetSendQueue
How many messages we consider reasonable sustained on a send queue.
@ sendqIntervals
How many timer intervals a sendq has to stay large before we disconnect.
@ maxQueryDepth
The maximum number of levels to search.
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
auto measureDurationAndLog(Func &&func, std::string const &actionDescription, std::chrono::duration< Rep, Period > maxDelay, beast::Journal const &journal)
Definition PerfLog.h:158
static constexpr std::size_t MAX_TX_QUEUE_SIZE
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition algorithm.h:5
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition digest.h:204
std::optional< uint256 > makeSharedValue(stream_type &ssl, beast::Journal journal)
Computes a shared value based on the SSL connection state.
static constexpr char FEATURE_COMPR[]
Definition Handshake.h:118
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition chrono.h:93
@ INVALID
Definition Transaction.h:28
@ INCLUDED
Definition Transaction.h:29
constexpr std::uint32_t tfInnerBatchTxn
Definition TxFlags.h:41
std::string to_string(base_uint< Bits, Tag > const &a)
Definition base_uint.h:597
std::string strHex(FwdIt begin, FwdIt end)
Definition strHex.h:10
boost::beast::http::request< boost::beast::http::dynamic_body > http_request_type
Definition Handoff.h:12
std::string base64_decode(std::string_view data)
constexpr ProtocolVersion make_protocol(std::uint16_t major, std::uint16_t minor)
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition AccountID.cpp:92
Number root(Number f, unsigned d)
Definition Number.cpp:938
static bool stringIsUint256Sized(std::string const &pBuffStr)
Definition PeerImp.cpp:123
static std::shared_ptr< PeerImp > getPeerWithLedger(OverlayImpl &ov, uint256 const &ledgerHash, LedgerIndex ledger, PeerImp const *skip)
Definition PeerImp.cpp:2876
std::optional< KeyType > publicKeyType(Slice const &slice)
Returns the type of public key.
std::pair< std::size_t, boost::system::error_code > invokeProtocolMessage(Buffers const &buffers, Handler &handler, std::size_t &hint)
Calls the handler for up to one protocol message in the passed buffers.
@ jtMISSING_TXN
Definition Job.h:42
@ jtLEDGER_REQ
Definition Job.h:38
@ jtTXN_DATA
Definition Job.h:48
@ jtPACK
Definition Job.h:22
@ jtPROPOSAL_ut
Definition Job.h:39
@ jtREPLAY_REQ
Definition Job.h:37
@ jtREQUESTED_TXN
Definition Job.h:43
@ jtVALIDATION_ut
Definition Job.h:33
@ jtPEER
Definition Job.h:59
@ jtTRANSACTION
Definition Job.h:41
@ jtVALIDATION_t
Definition Job.h:50
@ jtPROPOSAL_t
Definition Job.h:53
@ jtMANIFEST
Definition Job.h:34
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
bool peerFeatureEnabled(headers const &request, std::string const &feature, std::string value, bool config)
Check if a feature should be enabled for a peer.
Definition Handshake.h:168
NodeID calcNodeID(PublicKey const &)
Calculate the 160-bit node ID from a node public key.
std::string getFingerprint(beast::IP::Endpoint const &address, std::optional< PublicKey > const &publicKey=std::nullopt, std::optional< std::string > const &id=std::nullopt)
Definition PublicKey.h:241
HashRouterFlags
Definition HashRouter.h:14
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition apply.cpp:21
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
http_response_type makeResponse(bool crawlPublic, http_request_type const &req, beast::IP::Address public_ip, beast::IP::Address remote_ip, uint256 const &sharedValue, std::optional< std::uint32_t > networkID, ProtocolVersion protocol, Application &app)
Make http response.
@ manifest
Manifest.
@ proposal
proposal for signing
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
uint256 proposalUniqueId(uint256 const &proposeHash, uint256 const &previousLedger, std::uint32_t proposeSeq, NetClock::time_point closeTime, Slice const &publicKey, Slice const &signature)
Calculate a unique identifier for a signed proposal.
void forceValidity(HashRouter &router, uint256 const &txid, Validity validity)
Sets the validity of a given transaction in the cache.
Definition apply.cpp:89
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition Slice.h:213
static constexpr char FEATURE_LEDGER_REPLAY[]
Definition Handshake.h:124
static constexpr char FEATURE_VPRR[]
Definition Handshake.h:120
std::string protocolMessageName(int type)
Returns the name of a protocol message given its type.
bool isPseudoTx(STObject const &tx)
Check whether a transaction is a pseudo-transaction.
Definition STTx.cpp:776
static std::shared_ptr< PeerImp > getPeerWithTree(OverlayImpl &ov, uint256 const &rootHash, PeerImp const *skip)
Definition PeerImp.cpp:2852
static constexpr char FEATURE_TXRR[]
Definition Handshake.h:122
T nth_element(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T size(T... args)
T str(T... args)
Information about the notional ledger backing the view.
std::optional< std::uint32_t > networkID
Definition Overlay.h:52
beast::IP::Address public_ip
Definition Overlay.h:49
bool peerPrivate
true if we want our IP address kept private.
void update(Resource::Charge f, std::string const &add)
Definition PeerImp.h:197
Describes a single consumer.
Definition Gossip.h:17
beast::IP::Endpoint address
Definition Gossip.h:21
Data format for exchanging consumption information across peers.
Definition Gossip.h:12
std::vector< Item > items
Definition Gossip.h:24
T tie(T... args)
T to_string(T... args)
T what(T... args)