rippled
Loading...
Searching...
No Matches
PeerImp.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLValidations.h>
21#include <xrpld/app/ledger/InboundLedgers.h>
22#include <xrpld/app/ledger/InboundTransactions.h>
23#include <xrpld/app/ledger/LedgerMaster.h>
24#include <xrpld/app/ledger/TransactionMaster.h>
25#include <xrpld/app/misc/HashRouter.h>
26#include <xrpld/app/misc/LoadFeeTrack.h>
27#include <xrpld/app/misc/NetworkOPs.h>
28#include <xrpld/app/misc/Transaction.h>
29#include <xrpld/app/misc/ValidatorList.h>
30#include <xrpld/app/tx/apply.h>
31#include <xrpld/overlay/Cluster.h>
32#include <xrpld/overlay/detail/PeerImp.h>
33#include <xrpld/overlay/detail/Tuning.h>
34#include <xrpld/perflog/PerfLog.h>
35
36#include <xrpl/basics/UptimeClock.h>
37#include <xrpl/basics/base64.h>
38#include <xrpl/basics/random.h>
39#include <xrpl/basics/safe_cast.h>
40#include <xrpl/protocol/TxFlags.h>
41#include <xrpl/protocol/digest.h>
42
43#include <boost/algorithm/string/predicate.hpp>
44#include <boost/beast/core/ostream.hpp>
45
46#include <algorithm>
47#include <memory>
48#include <mutex>
49#include <numeric>
50#include <sstream>
51
52using namespace std::chrono_literals;
53
54namespace ripple {
55
56namespace {
58std::chrono::milliseconds constexpr peerHighLatency{300};
59
61std::chrono::seconds constexpr peerTimerInterval{60};
62} // namespace
63
64// TODO: Remove this exclusion once unit tests are added after the hotfix
65// release.
66
68 Application& app,
69 id_t id,
71 http_request_type&& request,
72 PublicKey const& publicKey,
74 Resource::Consumer consumer,
76 OverlayImpl& overlay)
77 : Child(overlay)
78 , app_(app)
79 , id_(id)
80 , sink_(app_.journal("Peer"), makePrefix(id))
81 , p_sink_(app_.journal("Protocol"), makePrefix(id))
82 , journal_(sink_)
83 , p_journal_(p_sink_)
84 , stream_ptr_(std::move(stream_ptr))
85 , socket_(stream_ptr_->next_layer().socket())
86 , stream_(*stream_ptr_)
87 , strand_(socket_.get_executor())
88 , timer_(waitable_timer{socket_.get_executor()})
89 , remote_address_(slot->remote_endpoint())
90 , overlay_(overlay)
91 , inbound_(true)
92 , protocol_(protocol)
93 , tracking_(Tracking::unknown)
94 , trackingTime_(clock_type::now())
95 , publicKey_(publicKey)
96 , lastPingTime_(clock_type::now())
97 , creationTime_(clock_type::now())
98 , squelch_(app_.journal("Squelch"))
99 , usage_(consumer)
100 , fee_{Resource::feeTrivialPeer, ""}
101 , slot_(slot)
102 , request_(std::move(request))
103 , headers_(request_)
104 , compressionEnabled_(
106 headers_,
108 "lz4",
109 app_.config().COMPRESSION)
110 ? Compressed::On
111 : Compressed::Off)
112 , txReduceRelayEnabled_(peerFeatureEnabled(
113 headers_,
115 app_.config().TX_REDUCE_RELAY_ENABLE))
116 , ledgerReplayEnabled_(peerFeatureEnabled(
117 headers_,
119 app_.config().LEDGER_REPLAY))
120 , ledgerReplayMsgHandler_(app, app.getLedgerReplayer())
121{
122 JLOG(journal_.info())
123 << "compression enabled " << (compressionEnabled_ == Compressed::On)
124 << " vp reduce-relay base squelch enabled "
126 headers_,
129 << " tx reduce-relay enabled " << txReduceRelayEnabled_ << " on "
130 << remote_address_ << " " << id_;
131}
132
134{
135 bool const inCluster{cluster()};
136
141
142 if (inCluster)
143 {
144 JLOG(journal_.warn()) << name() << " left cluster";
145 }
146}
147
148// Helper function to check for valid uint256 values in protobuf buffers
149static bool
151{
152 return pBuffStr.size() == uint256::size();
153}
154
155void
157{
158 if (!strand_.running_in_this_thread())
160
161 auto parseLedgerHash =
163 if (uint256 ret; ret.parseHex(value))
164 return ret;
165
166 if (auto const s = base64_decode(value); s.size() == uint256::size())
167 return uint256{s};
168
169 return std::nullopt;
170 };
171
173 std::optional<uint256> previous;
174
175 if (auto const iter = headers_.find("Closed-Ledger");
176 iter != headers_.end())
177 {
178 closed = parseLedgerHash(iter->value());
179
180 if (!closed)
181 fail("Malformed handshake data (1)");
182 }
183
184 if (auto const iter = headers_.find("Previous-Ledger");
185 iter != headers_.end())
186 {
187 previous = parseLedgerHash(iter->value());
188
189 if (!previous)
190 fail("Malformed handshake data (2)");
191 }
192
193 if (previous && !closed)
194 fail("Malformed handshake data (3)");
195
196 {
198 if (closed)
199 closedLedgerHash_ = *closed;
200 if (previous)
201 previousLedgerHash_ = *previous;
202 }
203
204 if (inbound_)
205 doAccept();
206 else
208
209 // Anything else that needs to be done with the connection should be
210 // done in doProtocolStart
211}
212
213void
215{
216 if (!strand_.running_in_this_thread())
218 if (socket_.is_open())
219 {
220 // The rationale for using different severity levels is that
221 // outbound connections are under our control and may be logged
222 // at a higher level, but inbound connections are more numerous and
223 // uncontrolled so to prevent log flooding the severity is reduced.
224 //
225 if (inbound_)
226 {
227 JLOG(journal_.debug()) << "Stop";
228 }
229 else
230 {
231 JLOG(journal_.info()) << "Stop";
232 }
233 }
234 close();
235}
236
237//------------------------------------------------------------------------------
238
239void
241{
242 if (!strand_.running_in_this_thread())
243 return post(strand_, std::bind(&PeerImp::send, shared_from_this(), m));
244 if (gracefulClose_)
245 return;
246 if (detaching_)
247 return;
248
249 auto validator = m->getValidatorKey();
250 if (validator && !squelch_.expireSquelch(*validator))
251 {
254 static_cast<int>(m->getBuffer(compressionEnabled_).size()));
255 return;
256 }
257
258 // report categorized outgoing traffic
260 safe_cast<TrafficCount::category>(m->getCategory()),
261 static_cast<int>(m->getBuffer(compressionEnabled_).size()));
262
263 // report total outgoing traffic
266 static_cast<int>(m->getBuffer(compressionEnabled_).size()));
267
268 auto sendq_size = send_queue_.size();
269
270 if (sendq_size < Tuning::targetSendQueue)
271 {
272 // To detect a peer that does not read from their
273 // side of the connection, we expect a peer to have
274 // a small senq periodically
275 large_sendq_ = 0;
276 }
277 else if (auto sink = journal_.debug();
278 sink && (sendq_size % Tuning::sendQueueLogFreq) == 0)
279 {
280 std::string const n = name();
281 sink << (n.empty() ? remote_address_.to_string() : n)
282 << " sendq: " << sendq_size;
283 }
284
285 send_queue_.push(m);
286
287 if (sendq_size != 0)
288 return;
289
290 boost::asio::async_write(
291 stream_,
292 boost::asio::buffer(
293 send_queue_.front()->getBuffer(compressionEnabled_)),
294 bind_executor(
295 strand_,
296 std::bind(
299 std::placeholders::_1,
300 std::placeholders::_2)));
301}
302
303void
305{
306 if (!strand_.running_in_this_thread())
307 return post(
309
310 if (!txQueue_.empty())
311 {
312 protocol::TMHaveTransactions ht;
313 std::for_each(txQueue_.begin(), txQueue_.end(), [&](auto const& hash) {
314 ht.add_hashes(hash.data(), hash.size());
315 });
316 JLOG(p_journal_.trace()) << "sendTxQueue " << txQueue_.size();
317 txQueue_.clear();
318 send(std::make_shared<Message>(ht, protocol::mtHAVE_TRANSACTIONS));
319 }
320}
321
322void
324{
325 if (!strand_.running_in_this_thread())
326 return post(
328
330 {
331 JLOG(p_journal_.warn()) << "addTxQueue exceeds the cap";
332 sendTxQueue();
333 }
334
335 txQueue_.insert(hash);
336 JLOG(p_journal_.trace()) << "addTxQueue " << txQueue_.size();
337}
338
339void
341{
342 if (!strand_.running_in_this_thread())
343 return post(
344 strand_,
346
347 auto removed = txQueue_.erase(hash);
348 JLOG(p_journal_.trace()) << "removeTxQueue " << removed;
349}
350
351void
353{
354 if ((usage_.charge(fee, context) == Resource::drop) &&
355 usage_.disconnect(p_journal_) && strand_.running_in_this_thread())
356 {
357 // Sever the connection
359 fail("charge: Resources");
360 }
361}
362
363//------------------------------------------------------------------------------
364
365bool
367{
368 auto const iter = headers_.find("Crawl");
369 if (iter == headers_.end())
370 return false;
371 return boost::iequals(iter->value(), "public");
372}
373
374bool
376{
377 return static_cast<bool>(app_.cluster().member(publicKey_));
378}
379
382{
383 if (inbound_)
384 return headers_["User-Agent"];
385 return headers_["Server"];
386}
387
390{
392
393 ret[jss::public_key] = toBase58(TokenType::NodePublic, publicKey_);
394 ret[jss::address] = remote_address_.to_string();
395
396 if (inbound_)
397 ret[jss::inbound] = true;
398
399 if (cluster())
400 {
401 ret[jss::cluster] = true;
402
403 if (auto const n = name(); !n.empty())
404 // Could move here if Json::Value supported moving from a string
405 ret[jss::name] = n;
406 }
407
408 if (auto const d = domain(); !d.empty())
409 ret[jss::server_domain] = std::string{d};
410
411 if (auto const nid = headers_["Network-ID"]; !nid.empty())
412 ret[jss::network_id] = std::string{nid};
413
414 ret[jss::load] = usage_.balance();
415
416 if (auto const version = getVersion(); !version.empty())
417 ret[jss::version] = std::string{version};
418
419 ret[jss::protocol] = to_string(protocol_);
420
421 {
423 if (latency_)
424 ret[jss::latency] = static_cast<Json::UInt>(latency_->count());
425 }
426
427 ret[jss::uptime] = static_cast<Json::UInt>(
428 std::chrono::duration_cast<std::chrono::seconds>(uptime()).count());
429
430 std::uint32_t minSeq, maxSeq;
431 ledgerRange(minSeq, maxSeq);
432
433 if ((minSeq != 0) || (maxSeq != 0))
434 ret[jss::complete_ledgers] =
435 std::to_string(minSeq) + " - " + std::to_string(maxSeq);
436
437 switch (tracking_.load())
438 {
440 ret[jss::track] = "diverged";
441 break;
442
444 ret[jss::track] = "unknown";
445 break;
446
448 // Nothing to do here
449 break;
450 }
451
452 uint256 closedLedgerHash;
453 protocol::TMStatusChange last_status;
454 {
456 closedLedgerHash = closedLedgerHash_;
457 last_status = last_status_;
458 }
459
460 if (closedLedgerHash != beast::zero)
461 ret[jss::ledger] = to_string(closedLedgerHash);
462
463 if (last_status.has_newstatus())
464 {
465 switch (last_status.newstatus())
466 {
467 case protocol::nsCONNECTING:
468 ret[jss::status] = "connecting";
469 break;
470
471 case protocol::nsCONNECTED:
472 ret[jss::status] = "connected";
473 break;
474
475 case protocol::nsMONITORING:
476 ret[jss::status] = "monitoring";
477 break;
478
479 case protocol::nsVALIDATING:
480 ret[jss::status] = "validating";
481 break;
482
483 case protocol::nsSHUTTING:
484 ret[jss::status] = "shutting";
485 break;
486
487 default:
488 JLOG(p_journal_.warn())
489 << "Unknown status: " << last_status.newstatus();
490 }
491 }
492
493 ret[jss::metrics] = Json::Value(Json::objectValue);
494 ret[jss::metrics][jss::total_bytes_recv] =
495 std::to_string(metrics_.recv.total_bytes());
496 ret[jss::metrics][jss::total_bytes_sent] =
497 std::to_string(metrics_.sent.total_bytes());
498 ret[jss::metrics][jss::avg_bps_recv] =
499 std::to_string(metrics_.recv.average_bytes());
500 ret[jss::metrics][jss::avg_bps_sent] =
501 std::to_string(metrics_.sent.average_bytes());
502
503 return ret;
504}
505
506bool
508{
509 switch (f)
510 {
512 return protocol_ >= make_protocol(2, 1);
514 return protocol_ >= make_protocol(2, 2);
517 }
518 return false;
519}
520
521//------------------------------------------------------------------------------
522
523bool
525{
526 {
528 if ((seq != 0) && (seq >= minLedger_) && (seq <= maxLedger_) &&
530 return true;
531 if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
532 recentLedgers_.end())
533 return true;
534 }
535 return false;
536}
537
538void
540{
542
543 minSeq = minLedger_;
544 maxSeq = maxLedger_;
545}
546
547bool
548PeerImp::hasTxSet(uint256 const& hash) const
549{
551 return std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
552 recentTxSets_.end();
553}
554
555void
557{
558 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
559 // guarded by recentLock_.
563}
564
565bool
567{
569 return (tracking_ != Tracking::diverged) && (uMin >= minLedger_) &&
570 (uMax <= maxLedger_);
571}
572
573//------------------------------------------------------------------------------
574
575void
577{
578 XRPL_ASSERT(
579 strand_.running_in_this_thread(),
580 "ripple::PeerImp::close : strand in this thread");
581 if (socket_.is_open())
582 {
583 detaching_ = true; // DEPRECATED
584 error_code ec;
585 timer_.cancel(ec);
586 socket_.close(ec);
588 if (inbound_)
589 {
590 JLOG(journal_.debug()) << "Closed";
591 }
592 else
593 {
594 JLOG(journal_.info()) << "Closed";
595 }
596 }
597}
598
599void
601{
602 if (!strand_.running_in_this_thread())
603 return post(
604 strand_,
605 std::bind(
606 (void(Peer::*)(std::string const&)) & PeerImp::fail,
608 reason));
610 {
611 std::string const n = name();
612 JLOG(journal_.warn()) << (n.empty() ? remote_address_.to_string() : n)
613 << " failed: " << reason;
614 }
615 close();
616}
617
618void
620{
621 XRPL_ASSERT(
622 strand_.running_in_this_thread(),
623 "ripple::PeerImp::fail : strand in this thread");
624 if (socket_.is_open())
625 {
626 JLOG(journal_.warn())
628 << " at " << remote_address_.to_string() << ": " << ec.message();
629 }
630 close();
631}
632
633void
635{
636 XRPL_ASSERT(
637 strand_.running_in_this_thread(),
638 "ripple::PeerImp::gracefulClose : strand in this thread");
639 XRPL_ASSERT(
640 socket_.is_open(), "ripple::PeerImp::gracefulClose : socket is open");
641 XRPL_ASSERT(
643 "ripple::PeerImp::gracefulClose : socket is not closing");
644 gracefulClose_ = true;
645 if (send_queue_.size() > 0)
646 return;
647 setTimer();
648 stream_.async_shutdown(bind_executor(
649 strand_,
650 std::bind(
651 &PeerImp::onShutdown, shared_from_this(), std::placeholders::_1)));
652}
653
654void
656{
657 error_code ec;
658 timer_.expires_from_now(peerTimerInterval, ec);
659
660 if (ec)
661 {
662 JLOG(journal_.error()) << "setTimer: " << ec.message();
663 return;
664 }
665 timer_.async_wait(bind_executor(
666 strand_,
667 std::bind(
668 &PeerImp::onTimer, shared_from_this(), std::placeholders::_1)));
669}
670
671// convenience for ignoring the error code
672void
674{
675 error_code ec;
676 timer_.cancel(ec);
677}
678
679//------------------------------------------------------------------------------
680
683{
685 ss << "[" << std::setfill('0') << std::setw(3) << id << "] ";
686 return ss.str();
687}
688
689void
691{
692 if (!socket_.is_open())
693 return;
694
695 if (ec == boost::asio::error::operation_aborted)
696 return;
697
698 if (ec)
699 {
700 // This should never happen
701 JLOG(journal_.error()) << "onTimer: " << ec.message();
702 return close();
703 }
704
706 {
707 fail("Large send queue");
708 return;
709 }
710
711 if (auto const t = tracking_.load(); !inbound_ && t != Tracking::converged)
712 {
713 clock_type::duration duration;
714
715 {
717 duration = clock_type::now() - trackingTime_;
718 }
719
720 if ((t == Tracking::diverged &&
721 (duration > app_.config().MAX_DIVERGED_TIME)) ||
722 (t == Tracking::unknown &&
723 (duration > app_.config().MAX_UNKNOWN_TIME)))
724 {
726 fail("Not useful");
727 return;
728 }
729 }
730
731 // Already waiting for PONG
732 if (lastPingSeq_)
733 {
734 fail("Ping Timeout");
735 return;
736 }
737
739 lastPingSeq_ = rand_int<std::uint32_t>();
740
741 protocol::TMPing message;
742 message.set_type(protocol::TMPing::ptPING);
743 message.set_seq(*lastPingSeq_);
744
745 send(std::make_shared<Message>(message, protocol::mtPING));
746
747 setTimer();
748}
749
750void
752{
753 cancelTimer();
754 // If we don't get eof then something went wrong
755 if (!ec)
756 {
757 JLOG(journal_.error()) << "onShutdown: expected error condition";
758 return close();
759 }
760 if (ec != boost::asio::error::eof)
761 return fail("onShutdown", ec);
762 close();
763}
764
765//------------------------------------------------------------------------------
766void
768{
769 XRPL_ASSERT(
770 read_buffer_.size() == 0,
771 "ripple::PeerImp::doAccept : empty read buffer");
772
773 JLOG(journal_.debug()) << "doAccept: " << remote_address_;
774
775 auto const sharedValue = makeSharedValue(*stream_ptr_, journal_);
776
777 // This shouldn't fail since we already computed
778 // the shared value successfully in OverlayImpl
779 if (!sharedValue)
780 return fail("makeSharedValue: Unexpected failure");
781
782 JLOG(journal_.info()) << "Protocol: " << to_string(protocol_);
783 JLOG(journal_.info()) << "Public Key: "
785
786 if (auto member = app_.cluster().member(publicKey_))
787 {
788 {
790 name_ = *member;
791 }
792 JLOG(journal_.info()) << "Cluster name: " << *member;
793 }
794
796
797 // XXX Set timer: connection is in grace period to be useful.
798 // XXX Set timer: connection idle (idle may vary depending on connection
799 // type.)
800
801 auto write_buffer = std::make_shared<boost::beast::multi_buffer>();
802
803 boost::beast::ostream(*write_buffer) << makeResponse(
805 request_,
808 *sharedValue,
810 protocol_,
811 app_);
812
813 // Write the whole buffer and only start protocol when that's done.
814 boost::asio::async_write(
815 stream_,
816 write_buffer->data(),
817 boost::asio::transfer_all(),
818 bind_executor(
819 strand_,
820 [this, write_buffer, self = shared_from_this()](
821 error_code ec, std::size_t bytes_transferred) {
822 if (!socket_.is_open())
823 return;
824 if (ec == boost::asio::error::operation_aborted)
825 return;
826 if (ec)
827 return fail("onWriteResponse", ec);
828 if (write_buffer->size() == bytes_transferred)
829 return doProtocolStart();
830 return fail("Failed to write header");
831 }));
832}
833
836{
837 std::shared_lock read_lock{nameMutex_};
838 return name_;
839}
840
843{
844 return headers_["Server-Domain"];
845}
846
847//------------------------------------------------------------------------------
848
849// Protocol logic
850
851void
853{
855
856 // Send all the validator lists that have been loaded
858 {
860 [&](std::string const& manifest,
861 std::uint32_t version,
863 PublicKey const& pubKey,
864 std::size_t maxSequence,
865 uint256 const& hash) {
867 *this,
868 0,
869 pubKey,
870 maxSequence,
871 version,
872 manifest,
873 blobInfos,
875 p_journal_);
876
877 // Don't send it next time.
879 });
880 }
881
882 if (auto m = overlay_.getManifestsMessage())
883 send(m);
884
885 setTimer();
886}
887
888// Called repeatedly with protocol message data
889void
891{
892 if (!socket_.is_open())
893 return;
894 if (ec == boost::asio::error::operation_aborted)
895 return;
896 if (ec == boost::asio::error::eof)
897 {
898 JLOG(journal_.info()) << "EOF";
899 return gracefulClose();
900 }
901 if (ec)
902 return fail("onReadMessage", ec);
903 if (auto stream = journal_.trace())
904 {
905 if (bytes_transferred > 0)
906 stream << "onReadMessage: " << bytes_transferred << " bytes";
907 else
908 stream << "onReadMessage";
909 }
910
911 metrics_.recv.add_message(bytes_transferred);
912
913 read_buffer_.commit(bytes_transferred);
914
915 auto hint = Tuning::readBufferBytes;
916
917 while (read_buffer_.size() > 0)
918 {
919 std::size_t bytes_consumed;
920
921 using namespace std::chrono_literals;
922 std::tie(bytes_consumed, ec) = perf::measureDurationAndLog(
923 [&]() {
924 return invokeProtocolMessage(read_buffer_.data(), *this, hint);
925 },
926 "invokeProtocolMessage",
927 350ms,
928 journal_);
929
930 if (ec)
931 return fail("onReadMessage", ec);
932 if (!socket_.is_open())
933 return;
934 if (gracefulClose_)
935 return;
936 if (bytes_consumed == 0)
937 break;
938 read_buffer_.consume(bytes_consumed);
939 }
940
941 // Timeout on writes only
942 stream_.async_read_some(
944 bind_executor(
945 strand_,
946 std::bind(
949 std::placeholders::_1,
950 std::placeholders::_2)));
951}
952
953void
955{
956 if (!socket_.is_open())
957 return;
958 if (ec == boost::asio::error::operation_aborted)
959 return;
960 if (ec)
961 return fail("onWriteMessage", ec);
962 if (auto stream = journal_.trace())
963 {
964 if (bytes_transferred > 0)
965 stream << "onWriteMessage: " << bytes_transferred << " bytes";
966 else
967 stream << "onWriteMessage";
968 }
969
970 metrics_.sent.add_message(bytes_transferred);
971
972 XRPL_ASSERT(
973 !send_queue_.empty(),
974 "ripple::PeerImp::onWriteMessage : non-empty send buffer");
975 send_queue_.pop();
976 if (!send_queue_.empty())
977 {
978 // Timeout on writes only
979 return boost::asio::async_write(
980 stream_,
981 boost::asio::buffer(
982 send_queue_.front()->getBuffer(compressionEnabled_)),
983 bind_executor(
984 strand_,
985 std::bind(
988 std::placeholders::_1,
989 std::placeholders::_2)));
990 }
991
992 if (gracefulClose_)
993 {
994 return stream_.async_shutdown(bind_executor(
995 strand_,
996 std::bind(
999 std::placeholders::_1)));
1000 }
1001}
1002
1003//------------------------------------------------------------------------------
1004//
1005// ProtocolHandler
1006//
1007//------------------------------------------------------------------------------
1008
1009void
1011{
1012 // TODO
1013}
1014
1015void
1017 std::uint16_t type,
1019 std::size_t size,
1020 std::size_t uncompressed_size,
1021 bool isCompressed)
1022{
1023 auto const name = protocolMessageName(type);
1026
1027 auto const category = TrafficCount::categorize(
1028 *m, static_cast<protocol::MessageType>(type), true);
1029
1030 // report total incoming traffic
1032 TrafficCount::category::total, static_cast<int>(size));
1033
1034 // increase the traffic received for a specific category
1035 overlay_.reportInboundTraffic(category, static_cast<int>(size));
1036
1037 using namespace protocol;
1038 if ((type == MessageType::mtTRANSACTION ||
1039 type == MessageType::mtHAVE_TRANSACTIONS ||
1040 type == MessageType::mtTRANSACTIONS ||
1041 // GET_OBJECTS
1043 // GET_LEDGER
1046 // LEDGER_DATA
1050 {
1052 static_cast<MessageType>(type), static_cast<std::uint64_t>(size));
1053 }
1054 JLOG(journal_.trace()) << "onMessageBegin: " << type << " " << size << " "
1055 << uncompressed_size << " " << isCompressed;
1056}
1057
1058void
1062{
1063 load_event_.reset();
1065}
1066
1067void
1069{
1070 auto const s = m->list_size();
1071
1072 if (s == 0)
1073 {
1075 return;
1076 }
1077
1078 if (s > 100)
1080
1082 jtMANIFEST, "receiveManifests", [this, that = shared_from_this(), m]() {
1083 overlay_.onManifests(m, that);
1084 });
1085}
1086
1087void
1089{
1090 if (m->type() == protocol::TMPing::ptPING)
1091 {
1092 // We have received a ping request, reply with a pong
1094 m->set_type(protocol::TMPing::ptPONG);
1095 send(std::make_shared<Message>(*m, protocol::mtPING));
1096 return;
1097 }
1098
1099 if (m->type() == protocol::TMPing::ptPONG && m->has_seq())
1100 {
1101 // Only reset the ping sequence if we actually received a
1102 // PONG with the correct cookie. That way, any peers which
1103 // respond with incorrect cookies will eventually time out.
1104 if (m->seq() == lastPingSeq_)
1105 {
1107
1108 // Update latency estimate
1109 auto const rtt = std::chrono::round<std::chrono::milliseconds>(
1111
1113
1114 if (latency_)
1115 latency_ = (*latency_ * 7 + rtt) / 8;
1116 else
1117 latency_ = rtt;
1118 }
1119
1120 return;
1121 }
1122}
1123
1124void
1126{
1127 // VFALCO NOTE I think we should drop the peer immediately
1128 if (!cluster())
1129 {
1130 fee_.update(Resource::feeUselessData, "unknown cluster");
1131 return;
1132 }
1133
1134 for (int i = 0; i < m->clusternodes().size(); ++i)
1135 {
1136 protocol::TMClusterNode const& node = m->clusternodes(i);
1137
1139 if (node.has_nodename())
1140 name = node.nodename();
1141
1142 auto const publicKey =
1143 parseBase58<PublicKey>(TokenType::NodePublic, node.publickey());
1144
1145 // NIKB NOTE We should drop the peer immediately if
1146 // they send us a public key we can't parse
1147 if (publicKey)
1148 {
1149 auto const reportTime =
1150 NetClock::time_point{NetClock::duration{node.reporttime()}};
1151
1153 *publicKey, name, node.nodeload(), reportTime);
1154 }
1155 }
1156
1157 int loadSources = m->loadsources().size();
1158 if (loadSources != 0)
1159 {
1160 Resource::Gossip gossip;
1161 gossip.items.reserve(loadSources);
1162 for (int i = 0; i < m->loadsources().size(); ++i)
1163 {
1164 protocol::TMLoadSource const& node = m->loadsources(i);
1166 item.address = beast::IP::Endpoint::from_string(node.name());
1167 item.balance = node.cost();
1168 if (item.address != beast::IP::Endpoint())
1169 gossip.items.push_back(item);
1170 }
1172 }
1173
1174 // Calculate the cluster fee:
1175 auto const thresh = app_.timeKeeper().now() - 90s;
1176 std::uint32_t clusterFee = 0;
1177
1179 fees.reserve(app_.cluster().size());
1180
1181 app_.cluster().for_each([&fees, thresh](ClusterNode const& status) {
1182 if (status.getReportTime() >= thresh)
1183 fees.push_back(status.getLoadFee());
1184 });
1185
1186 if (!fees.empty())
1187 {
1188 auto const index = fees.size() / 2;
1189 std::nth_element(fees.begin(), fees.begin() + index, fees.end());
1190 clusterFee = fees[index];
1191 }
1192
1193 app_.getFeeTrack().setClusterFee(clusterFee);
1194}
1195
1196void
1198{
1199 // Don't allow endpoints from peers that are not known tracking or are
1200 // not using a version of the message that we support:
1201 if (tracking_.load() != Tracking::converged || m->version() != 2)
1202 return;
1203
1204 // The number is arbitrary and doesn't have any real significance or
1205 // implication for the protocol.
1206 if (m->endpoints_v2().size() >= 1024)
1207 {
1208 fee_.update(Resource::feeUselessData, "endpoints too large");
1209 return;
1210 }
1211
1213 endpoints.reserve(m->endpoints_v2().size());
1214
1215 auto malformed = 0;
1216 for (auto const& tm : m->endpoints_v2())
1217 {
1218 auto result = beast::IP::Endpoint::from_string_checked(tm.endpoint());
1219
1220 if (!result)
1221 {
1222 JLOG(p_journal_.error()) << "failed to parse incoming endpoint: {"
1223 << tm.endpoint() << "}";
1224 malformed++;
1225 continue;
1226 }
1227
1228 // If hops == 0, this Endpoint describes the peer we are connected
1229 // to -- in that case, we take the remote address seen on the
1230 // socket and store that in the IP::Endpoint. If this is the first
1231 // time, then we'll verify that their listener can receive incoming
1232 // by performing a connectivity test. if hops > 0, then we just
1233 // take the address/port we were given
1234 if (tm.hops() == 0)
1235 result = remote_address_.at_port(result->port());
1236
1237 endpoints.emplace_back(*result, tm.hops());
1238 }
1239
1240 // Charge the peer for each malformed endpoint. As there still may be
1241 // multiple valid endpoints we don't return early.
1242 if (malformed > 0)
1243 {
1244 fee_.update(
1245 Resource::feeInvalidData * malformed,
1246 std::to_string(malformed) + " malformed endpoints");
1247 }
1248
1249 if (!endpoints.empty())
1250 overlay_.peerFinder().on_endpoints(slot_, endpoints);
1251}
1252
1253void
1255{
1256 handleTransaction(m, true, false);
1257}
1258
1259void
1262 bool eraseTxQueue,
1263 bool batch)
1264{
1265 XRPL_ASSERT(
1266 eraseTxQueue != batch,
1267 ("ripple::PeerImp::handleTransaction : valid inputs"));
1269 return;
1270
1272 {
1273 // If we've never been in synch, there's nothing we can do
1274 // with a transaction
1275 JLOG(p_journal_.debug()) << "Ignoring incoming transaction: "
1276 << "Need network ledger";
1277 return;
1278 }
1279
1280 SerialIter sit(makeSlice(m->rawtransaction()));
1281
1282 try
1283 {
1284 auto stx = std::make_shared<STTx const>(sit);
1285 uint256 txID = stx->getTransactionID();
1286
1287 // Charge strongly for attempting to relay a txn with tfInnerBatchTxn
1288 // LCOV_EXCL_START
1289 if (stx->isFlag(tfInnerBatchTxn) &&
1290 getCurrentTransactionRules()->enabled(featureBatch))
1291 {
1292 JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing "
1293 "tfInnerBatchTxn (handleTransaction).";
1294 fee_.update(Resource::feeModerateBurdenPeer, "inner batch txn");
1295 return;
1296 }
1297 // LCOV_EXCL_STOP
1298
1299 int flags;
1300 constexpr std::chrono::seconds tx_interval = 10s;
1301
1302 if (!app_.getHashRouter().shouldProcess(txID, id_, flags, tx_interval))
1303 {
1304 // we have seen this transaction recently
1305 if (flags & SF_BAD)
1306 {
1307 fee_.update(Resource::feeUselessData, "known bad");
1308 JLOG(p_journal_.debug()) << "Ignoring known bad tx " << txID;
1309 }
1310
1311 // Erase only if the server has seen this tx. If the server has not
1312 // seen this tx then the tx could not has been queued for this peer.
1313 else if (eraseTxQueue && txReduceRelayEnabled())
1314 removeTxQueue(txID);
1315
1319
1320 return;
1321 }
1322
1323 JLOG(p_journal_.debug()) << "Got tx " << txID;
1324
1325 bool checkSignature = true;
1326 if (cluster())
1327 {
1328 if (!m->has_deferred() || !m->deferred())
1329 {
1330 // Skip local checks if a server we trust
1331 // put the transaction in its open ledger
1332 flags |= SF_TRUSTED;
1333 }
1334
1335 // for non-validator nodes only -- localPublicKey is set for
1336 // validators only
1338 {
1339 // For now, be paranoid and have each validator
1340 // check each transaction, regardless of source
1341 checkSignature = false;
1342 }
1343 }
1344
1346 {
1347 JLOG(p_journal_.trace())
1348 << "No new transactions until synchronized";
1349 }
1350 else if (
1353 {
1355 JLOG(p_journal_.info()) << "Transaction queue is full";
1356 }
1357 else
1358 {
1361 "recvTransaction->checkTransaction",
1363 flags,
1364 checkSignature,
1365 batch,
1366 stx]() {
1367 if (auto peer = weak.lock())
1368 peer->checkTransaction(
1369 flags, checkSignature, stx, batch);
1370 });
1371 }
1372 }
1373 catch (std::exception const& ex)
1374 {
1375 JLOG(p_journal_.warn())
1376 << "Transaction invalid: " << strHex(m->rawtransaction())
1377 << ". Exception: " << ex.what();
1378 }
1379}
1380
1381void
1383{
1384 auto badData = [&](std::string const& msg) {
1385 fee_.update(Resource::feeInvalidData, "get_ledger " + msg);
1386 JLOG(p_journal_.warn()) << "TMGetLedger: " << msg;
1387 };
1388 auto const itype{m->itype()};
1389
1390 // Verify ledger info type
1391 if (itype < protocol::liBASE || itype > protocol::liTS_CANDIDATE)
1392 return badData("Invalid ledger info type");
1393
1394 auto const ltype = [&m]() -> std::optional<::protocol::TMLedgerType> {
1395 if (m->has_ltype())
1396 return m->ltype();
1397 return std::nullopt;
1398 }();
1399
1400 if (itype == protocol::liTS_CANDIDATE)
1401 {
1402 if (!m->has_ledgerhash())
1403 return badData("Invalid TX candidate set, missing TX set hash");
1404 }
1405 else if (
1406 !m->has_ledgerhash() && !m->has_ledgerseq() &&
1407 !(ltype && *ltype == protocol::ltCLOSED))
1408 {
1409 return badData("Invalid request");
1410 }
1411
1412 // Verify ledger type
1413 if (ltype && (*ltype < protocol::ltACCEPTED || *ltype > protocol::ltCLOSED))
1414 return badData("Invalid ledger type");
1415
1416 // Verify ledger hash
1417 if (m->has_ledgerhash() && !stringIsUint256Sized(m->ledgerhash()))
1418 return badData("Invalid ledger hash");
1419
1420 // Verify ledger sequence
1421 if (m->has_ledgerseq())
1422 {
1423 auto const ledgerSeq{m->ledgerseq()};
1424
1425 // Check if within a reasonable range
1426 using namespace std::chrono_literals;
1428 ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1429 {
1430 return badData(
1431 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1432 }
1433 }
1434
1435 // Verify ledger node IDs
1436 if (itype != protocol::liBASE)
1437 {
1438 if (m->nodeids_size() <= 0)
1439 return badData("Invalid ledger node IDs");
1440
1441 for (auto const& nodeId : m->nodeids())
1442 {
1443 if (deserializeSHAMapNodeID(nodeId) == std::nullopt)
1444 return badData("Invalid SHAMap node ID");
1445 }
1446 }
1447
1448 // Verify query type
1449 if (m->has_querytype() && m->querytype() != protocol::qtINDIRECT)
1450 return badData("Invalid query type");
1451
1452 // Verify query depth
1453 if (m->has_querydepth())
1454 {
1455 if (m->querydepth() > Tuning::maxQueryDepth ||
1456 itype == protocol::liBASE)
1457 {
1458 return badData("Invalid query depth");
1459 }
1460 }
1461
1462 // Queue a job to process the request
1464 app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m]() {
1465 if (auto peer = weak.lock())
1466 peer->processLedgerRequest(m);
1467 });
1468}
1469
1470void
1472{
1473 JLOG(p_journal_.trace()) << "onMessage, TMProofPathRequest";
1475 {
1476 fee_.update(
1477 Resource::feeMalformedRequest, "proof_path_request disabled");
1478 return;
1479 }
1480
1481 fee_.update(
1482 Resource::feeModerateBurdenPeer, "received a proof path request");
1485 jtREPLAY_REQ, "recvProofPathRequest", [weak, m]() {
1486 if (auto peer = weak.lock())
1487 {
1488 auto reply =
1489 peer->ledgerReplayMsgHandler_.processProofPathRequest(m);
1490 if (reply.has_error())
1491 {
1492 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1493 peer->charge(
1494 Resource::feeMalformedRequest,
1495 "proof_path_request");
1496 else
1497 peer->charge(
1498 Resource::feeRequestNoReply, "proof_path_request");
1499 }
1500 else
1501 {
1502 peer->send(std::make_shared<Message>(
1503 reply, protocol::mtPROOF_PATH_RESPONSE));
1504 }
1505 }
1506 });
1507}
1508
1509void
1511{
1512 if (!ledgerReplayEnabled_)
1513 {
1514 fee_.update(
1515 Resource::feeMalformedRequest, "proof_path_response disabled");
1516 return;
1517 }
1518
1519 if (!ledgerReplayMsgHandler_.processProofPathResponse(m))
1520 {
1521 fee_.update(Resource::feeInvalidData, "proof_path_response");
1522 }
1523}
1524
1525void
1527{
1528 JLOG(p_journal_.trace()) << "onMessage, TMReplayDeltaRequest";
1529 if (!ledgerReplayEnabled_)
1530 {
1531 fee_.update(
1532 Resource::feeMalformedRequest, "replay_delta_request disabled");
1533 return;
1534 }
1535
1536 fee_.fee = Resource::feeModerateBurdenPeer;
1537 std::weak_ptr<PeerImp> weak = shared_from_this();
1538 app_.getJobQueue().addJob(
1539 jtREPLAY_REQ, "recvReplayDeltaRequest", [weak, m]() {
1540 if (auto peer = weak.lock())
1541 {
1542 auto reply =
1543 peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m);
1544 if (reply.has_error())
1545 {
1546 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1547 peer->charge(
1548 Resource::feeMalformedRequest,
1549 "replay_delta_request");
1550 else
1551 peer->charge(
1552 Resource::feeRequestNoReply,
1553 "replay_delta_request");
1554 }
1555 else
1556 {
1557 peer->send(std::make_shared<Message>(
1558 reply, protocol::mtREPLAY_DELTA_RESPONSE));
1559 }
1560 }
1561 });
1562}
1563
1564void
1566{
1567 if (!ledgerReplayEnabled_)
1568 {
1569 fee_.update(
1570 Resource::feeMalformedRequest, "replay_delta_response disabled");
1571 return;
1572 }
1573
1574 if (!ledgerReplayMsgHandler_.processReplayDeltaResponse(m))
1575 {
1576 fee_.update(Resource::feeInvalidData, "replay_delta_response");
1577 }
1578}
1579
1580void
1582{
1583 auto badData = [&](std::string const& msg) {
1584 fee_.update(Resource::feeInvalidData, msg);
1585 JLOG(p_journal_.warn()) << "TMLedgerData: " << msg;
1586 };
1587
1588 // Verify ledger hash
1589 if (!stringIsUint256Sized(m->ledgerhash()))
1590 return badData("Invalid ledger hash");
1591
1592 // Verify ledger sequence
1593 {
1594 auto const ledgerSeq{m->ledgerseq()};
1595 if (m->type() == protocol::liTS_CANDIDATE)
1596 {
1597 if (ledgerSeq != 0)
1598 {
1599 return badData(
1600 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1601 }
1602 }
1603 else
1604 {
1605 // Check if within a reasonable range
1606 using namespace std::chrono_literals;
1607 if (app_.getLedgerMaster().getValidatedLedgerAge() <= 10s &&
1608 ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1609 {
1610 return badData(
1611 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1612 }
1613 }
1614 }
1615
1616 // Verify ledger info type
1617 if (m->type() < protocol::liBASE || m->type() > protocol::liTS_CANDIDATE)
1618 return badData("Invalid ledger info type");
1619
1620 // Verify reply error
1621 if (m->has_error() &&
1622 (m->error() < protocol::reNO_LEDGER ||
1623 m->error() > protocol::reBAD_REQUEST))
1624 {
1625 return badData("Invalid reply error");
1626 }
1627
1628 // Verify ledger nodes.
1629 if (m->nodes_size() <= 0 || m->nodes_size() > Tuning::hardMaxReplyNodes)
1630 {
1631 return badData(
1632 "Invalid Ledger/TXset nodes " + std::to_string(m->nodes_size()));
1633 }
1634
1635 // If there is a request cookie, attempt to relay the message
1636 if (m->has_requestcookie())
1637 {
1638 if (auto peer = overlay_.findPeerByShortID(m->requestcookie()))
1639 {
1640 m->clear_requestcookie();
1641 peer->send(std::make_shared<Message>(*m, protocol::mtLEDGER_DATA));
1642 }
1643 else
1644 {
1645 JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply";
1646 }
1647 return;
1648 }
1649
1650 uint256 const ledgerHash{m->ledgerhash()};
1651
1652 // Otherwise check if received data for a candidate transaction set
1653 if (m->type() == protocol::liTS_CANDIDATE)
1654 {
1655 std::weak_ptr<PeerImp> weak{shared_from_this()};
1656 app_.getJobQueue().addJob(
1657 jtTXN_DATA, "recvPeerData", [weak, ledgerHash, m]() {
1658 if (auto peer = weak.lock())
1659 {
1660 peer->app_.getInboundTransactions().gotData(
1661 ledgerHash, peer, m);
1662 }
1663 });
1664 return;
1665 }
1666
1667 // Consume the message
1668 app_.getInboundLedgers().gotLedgerData(ledgerHash, shared_from_this(), m);
1669}
1670
1671void
1673{
1674 protocol::TMProposeSet& set = *m;
1675
1676 auto const sig = makeSlice(set.signature());
1677
1678 // Preliminary check for the validity of the signature: A DER encoded
1679 // signature can't be longer than 72 bytes.
1680 if ((std::clamp<std::size_t>(sig.size(), 64, 72) != sig.size()) ||
1681 (publicKeyType(makeSlice(set.nodepubkey())) != KeyType::secp256k1))
1682 {
1683 JLOG(p_journal_.warn()) << "Proposal: malformed";
1684 fee_.update(
1685 Resource::feeInvalidSignature,
1686 " signature can't be longer than 72 bytes");
1687 return;
1688 }
1689
1690 if (!stringIsUint256Sized(set.currenttxhash()) ||
1691 !stringIsUint256Sized(set.previousledger()))
1692 {
1693 JLOG(p_journal_.warn()) << "Proposal: malformed";
1694 fee_.update(Resource::feeMalformedRequest, "bad hashes");
1695 return;
1696 }
1697
1698 // RH TODO: when isTrusted = false we should probably also cache a key
1699 // suppression for 30 seconds to avoid doing a relatively expensive lookup
1700 // every time a spam packet is received
1701 PublicKey const publicKey{makeSlice(set.nodepubkey())};
1702 auto const isTrusted = app_.validators().trusted(publicKey);
1703
1704 // If the operator has specified that untrusted proposals be dropped then
1705 // this happens here I.e. before further wasting CPU verifying the signature
1706 // of an untrusted key
1707 if (!isTrusted)
1708 {
1709 // report untrusted proposal messages
1710 overlay_.reportInboundTraffic(
1711 TrafficCount::category::proposal_untrusted,
1712 Message::messageSize(*m));
1713
1714 if (app_.config().RELAY_UNTRUSTED_PROPOSALS == -1)
1715 return;
1716 }
1717
1718 uint256 const proposeHash{set.currenttxhash()};
1719 uint256 const prevLedger{set.previousledger()};
1720
1721 NetClock::time_point const closeTime{NetClock::duration{set.closetime()}};
1722
1723 uint256 const suppression = proposalUniqueId(
1724 proposeHash,
1725 prevLedger,
1726 set.proposeseq(),
1727 closeTime,
1728 publicKey.slice(),
1729 sig);
1730
1731 if (auto [added, relayed] =
1732 app_.getHashRouter().addSuppressionPeerWithStatus(suppression, id_);
1733 !added)
1734 {
1735 // Count unique messages (Slots has it's own 'HashRouter'), which a peer
1736 // receives within IDLED seconds since the message has been relayed.
1737 if (relayed && (stopwatch().now() - *relayed) < reduce_relay::IDLED)
1738 overlay_.updateSlotAndSquelch(
1739 suppression, publicKey, id_, protocol::mtPROPOSE_LEDGER);
1740
1741 // report duplicate proposal messages
1742 overlay_.reportInboundTraffic(
1743 TrafficCount::category::proposal_duplicate,
1744 Message::messageSize(*m));
1745
1746 JLOG(p_journal_.trace()) << "Proposal: duplicate";
1747
1748 return;
1749 }
1750
1751 if (!isTrusted)
1752 {
1753 if (tracking_.load() == Tracking::diverged)
1754 {
1755 JLOG(p_journal_.debug())
1756 << "Proposal: Dropping untrusted (peer divergence)";
1757 return;
1758 }
1759
1760 if (!cluster() && app_.getFeeTrack().isLoadedLocal())
1761 {
1762 JLOG(p_journal_.debug()) << "Proposal: Dropping untrusted (load)";
1763 return;
1764 }
1765 }
1766
1767 JLOG(p_journal_.trace())
1768 << "Proposal: " << (isTrusted ? "trusted" : "untrusted");
1769
1770 auto proposal = RCLCxPeerPos(
1771 publicKey,
1772 sig,
1773 suppression,
1775 prevLedger,
1776 set.proposeseq(),
1777 proposeHash,
1778 closeTime,
1779 app_.timeKeeper().closeTime(),
1780 calcNodeID(app_.validatorManifests().getMasterKey(publicKey))});
1781
1782 std::weak_ptr<PeerImp> weak = shared_from_this();
1783 app_.getJobQueue().addJob(
1784 isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut,
1785 "recvPropose->checkPropose",
1786 [weak, isTrusted, m, proposal]() {
1787 if (auto peer = weak.lock())
1788 peer->checkPropose(isTrusted, m, proposal);
1789 });
1790}
1791
1792void
1794{
1795 JLOG(p_journal_.trace()) << "Status: Change";
1796
1797 if (!m->has_networktime())
1798 m->set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1799
1800 {
1801 std::lock_guard sl(recentLock_);
1802 if (!last_status_.has_newstatus() || m->has_newstatus())
1803 last_status_ = *m;
1804 else
1805 {
1806 // preserve old status
1807 protocol::NodeStatus status = last_status_.newstatus();
1808 last_status_ = *m;
1809 m->set_newstatus(status);
1810 }
1811 }
1812
1813 if (m->newevent() == protocol::neLOST_SYNC)
1814 {
1815 bool outOfSync{false};
1816 {
1817 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1818 // guarded by recentLock_.
1819 std::lock_guard sl(recentLock_);
1820 if (!closedLedgerHash_.isZero())
1821 {
1822 outOfSync = true;
1823 closedLedgerHash_.zero();
1824 }
1825 previousLedgerHash_.zero();
1826 }
1827 if (outOfSync)
1828 {
1829 JLOG(p_journal_.debug()) << "Status: Out of sync";
1830 }
1831 return;
1832 }
1833
1834 {
1835 uint256 closedLedgerHash{};
1836 bool const peerChangedLedgers{
1837 m->has_ledgerhash() && stringIsUint256Sized(m->ledgerhash())};
1838
1839 {
1840 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1841 // guarded by recentLock_.
1842 std::lock_guard sl(recentLock_);
1843 if (peerChangedLedgers)
1844 {
1845 closedLedgerHash_ = m->ledgerhash();
1846 closedLedgerHash = closedLedgerHash_;
1847 addLedger(closedLedgerHash, sl);
1848 }
1849 else
1850 {
1851 closedLedgerHash_.zero();
1852 }
1853
1854 if (m->has_ledgerhashprevious() &&
1855 stringIsUint256Sized(m->ledgerhashprevious()))
1856 {
1857 previousLedgerHash_ = m->ledgerhashprevious();
1858 addLedger(previousLedgerHash_, sl);
1859 }
1860 else
1861 {
1862 previousLedgerHash_.zero();
1863 }
1864 }
1865 if (peerChangedLedgers)
1866 {
1867 JLOG(p_journal_.debug()) << "LCL is " << closedLedgerHash;
1868 }
1869 else
1870 {
1871 JLOG(p_journal_.debug()) << "Status: No ledger";
1872 }
1873 }
1874
1875 if (m->has_firstseq() && m->has_lastseq())
1876 {
1877 std::lock_guard sl(recentLock_);
1878
1879 minLedger_ = m->firstseq();
1880 maxLedger_ = m->lastseq();
1881
1882 if ((maxLedger_ < minLedger_) || (minLedger_ == 0) || (maxLedger_ == 0))
1883 minLedger_ = maxLedger_ = 0;
1884 }
1885
1886 if (m->has_ledgerseq() &&
1887 app_.getLedgerMaster().getValidatedLedgerAge() < 2min)
1888 {
1889 checkTracking(
1890 m->ledgerseq(), app_.getLedgerMaster().getValidLedgerIndex());
1891 }
1892
1893 app_.getOPs().pubPeerStatus([=, this]() -> Json::Value {
1895
1896 if (m->has_newstatus())
1897 {
1898 switch (m->newstatus())
1899 {
1900 case protocol::nsCONNECTING:
1901 j[jss::status] = "CONNECTING";
1902 break;
1903 case protocol::nsCONNECTED:
1904 j[jss::status] = "CONNECTED";
1905 break;
1906 case protocol::nsMONITORING:
1907 j[jss::status] = "MONITORING";
1908 break;
1909 case protocol::nsVALIDATING:
1910 j[jss::status] = "VALIDATING";
1911 break;
1912 case protocol::nsSHUTTING:
1913 j[jss::status] = "SHUTTING";
1914 break;
1915 }
1916 }
1917
1918 if (m->has_newevent())
1919 {
1920 switch (m->newevent())
1921 {
1922 case protocol::neCLOSING_LEDGER:
1923 j[jss::action] = "CLOSING_LEDGER";
1924 break;
1925 case protocol::neACCEPTED_LEDGER:
1926 j[jss::action] = "ACCEPTED_LEDGER";
1927 break;
1928 case protocol::neSWITCHED_LEDGER:
1929 j[jss::action] = "SWITCHED_LEDGER";
1930 break;
1931 case protocol::neLOST_SYNC:
1932 j[jss::action] = "LOST_SYNC";
1933 break;
1934 }
1935 }
1936
1937 if (m->has_ledgerseq())
1938 {
1939 j[jss::ledger_index] = m->ledgerseq();
1940 }
1941
1942 if (m->has_ledgerhash())
1943 {
1944 uint256 closedLedgerHash{};
1945 {
1946 std::lock_guard sl(recentLock_);
1947 closedLedgerHash = closedLedgerHash_;
1948 }
1949 j[jss::ledger_hash] = to_string(closedLedgerHash);
1950 }
1951
1952 if (m->has_networktime())
1953 {
1954 j[jss::date] = Json::UInt(m->networktime());
1955 }
1956
1957 if (m->has_firstseq() && m->has_lastseq())
1958 {
1959 j[jss::ledger_index_min] = Json::UInt(m->firstseq());
1960 j[jss::ledger_index_max] = Json::UInt(m->lastseq());
1961 }
1962
1963 return j;
1964 });
1965}
1966
1967void
1968PeerImp::checkTracking(std::uint32_t validationSeq)
1969{
1970 std::uint32_t serverSeq;
1971 {
1972 // Extract the sequence number of the highest
1973 // ledger this peer has
1974 std::lock_guard sl(recentLock_);
1975
1976 serverSeq = maxLedger_;
1977 }
1978 if (serverSeq != 0)
1979 {
1980 // Compare the peer's ledger sequence to the
1981 // sequence of a recently-validated ledger
1982 checkTracking(serverSeq, validationSeq);
1983 }
1984}
1985
1986void
1987PeerImp::checkTracking(std::uint32_t seq1, std::uint32_t seq2)
1988{
1989 int diff = std::max(seq1, seq2) - std::min(seq1, seq2);
1990
1991 if (diff < Tuning::convergedLedgerLimit)
1992 {
1993 // The peer's ledger sequence is close to the validation's
1994 tracking_ = Tracking::converged;
1995 }
1996
1997 if ((diff > Tuning::divergedLedgerLimit) &&
1998 (tracking_.load() != Tracking::diverged))
1999 {
2000 // The peer's ledger sequence is way off the validation's
2001 std::lock_guard sl(recentLock_);
2002
2003 tracking_ = Tracking::diverged;
2004 trackingTime_ = clock_type::now();
2005 }
2006}
2007
2008void
2010{
2011 if (!stringIsUint256Sized(m->hash()))
2012 {
2013 fee_.update(Resource::feeMalformedRequest, "bad hash");
2014 return;
2015 }
2016
2017 uint256 const hash{m->hash()};
2018
2019 if (m->status() == protocol::tsHAVE)
2020 {
2021 std::lock_guard sl(recentLock_);
2022
2023 if (std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
2024 recentTxSets_.end())
2025 {
2026 fee_.update(Resource::feeUselessData, "duplicate (tsHAVE)");
2027 return;
2028 }
2029
2030 recentTxSets_.push_back(hash);
2031 }
2032}
2033
2034void
2035PeerImp::onValidatorListMessage(
2036 std::string const& messageType,
2037 std::string const& manifest,
2038 std::uint32_t version,
2039 std::vector<ValidatorBlobInfo> const& blobs)
2040{
2041 // If there are no blobs, the message is malformed (possibly because of
2042 // ValidatorList class rules), so charge accordingly and skip processing.
2043 if (blobs.empty())
2044 {
2045 JLOG(p_journal_.warn()) << "Ignored malformed " << messageType
2046 << " from peer " << remote_address_;
2047 // This shouldn't ever happen with a well-behaved peer
2048 fee_.update(Resource::feeHeavyBurdenPeer, "no blobs");
2049 return;
2050 }
2051
2052 auto const hash = sha512Half(manifest, blobs, version);
2053
2054 JLOG(p_journal_.debug())
2055 << "Received " << messageType << " from " << remote_address_.to_string()
2056 << " (" << id_ << ")";
2057
2058 if (!app_.getHashRouter().addSuppressionPeer(hash, id_))
2059 {
2060 JLOG(p_journal_.debug())
2061 << messageType << ": received duplicate " << messageType;
2062 // Charging this fee here won't hurt the peer in the normal
2063 // course of operation (ie. refresh every 5 minutes), but
2064 // will add up if the peer is misbehaving.
2065 fee_.update(Resource::feeUselessData, "duplicate");
2066 return;
2067 }
2068
2069 auto const applyResult = app_.validators().applyListsAndBroadcast(
2070 manifest,
2071 version,
2072 blobs,
2073 remote_address_.to_string(),
2074 hash,
2075 app_.overlay(),
2076 app_.getHashRouter(),
2077 app_.getOPs());
2078
2079 JLOG(p_journal_.debug())
2080 << "Processed " << messageType << " version " << version << " from "
2081 << (applyResult.publisherKey ? strHex(*applyResult.publisherKey)
2082 : "unknown or invalid publisher")
2083 << " from " << remote_address_.to_string() << " (" << id_
2084 << ") with best result " << to_string(applyResult.bestDisposition());
2085
2086 // Act based on the best result
2087 switch (applyResult.bestDisposition())
2088 {
2089 // New list
2090 case ListDisposition::accepted:
2091 // Newest list is expired, and that needs to be broadcast, too
2092 case ListDisposition::expired:
2093 // Future list
2094 case ListDisposition::pending: {
2095 std::lock_guard<std::mutex> sl(recentLock_);
2096
2097 XRPL_ASSERT(
2098 applyResult.publisherKey,
2099 "ripple::PeerImp::onValidatorListMessage : publisher key is "
2100 "set");
2101 auto const& pubKey = *applyResult.publisherKey;
2102#ifndef NDEBUG
2103 if (auto const iter = publisherListSequences_.find(pubKey);
2104 iter != publisherListSequences_.end())
2105 {
2106 XRPL_ASSERT(
2107 iter->second < applyResult.sequence,
2108 "ripple::PeerImp::onValidatorListMessage : lower sequence");
2109 }
2110#endif
2111 publisherListSequences_[pubKey] = applyResult.sequence;
2112 }
2113 break;
2114 case ListDisposition::same_sequence:
2115 case ListDisposition::known_sequence:
2116#ifndef NDEBUG
2117 {
2118 std::lock_guard<std::mutex> sl(recentLock_);
2119 XRPL_ASSERT(
2120 applyResult.sequence && applyResult.publisherKey,
2121 "ripple::PeerImp::onValidatorListMessage : nonzero sequence "
2122 "and set publisher key");
2123 XRPL_ASSERT(
2124 publisherListSequences_[*applyResult.publisherKey] <=
2125 applyResult.sequence,
2126 "ripple::PeerImp::onValidatorListMessage : maximum sequence");
2127 }
2128#endif // !NDEBUG
2129
2130 break;
2131 case ListDisposition::stale:
2132 case ListDisposition::untrusted:
2133 case ListDisposition::invalid:
2134 case ListDisposition::unsupported_version:
2135 break;
2136 default:
2137 UNREACHABLE(
2138 "ripple::PeerImp::onValidatorListMessage : invalid best list "
2139 "disposition");
2140 }
2141
2142 // Charge based on the worst result
2143 switch (applyResult.worstDisposition())
2144 {
2145 case ListDisposition::accepted:
2146 case ListDisposition::expired:
2147 case ListDisposition::pending:
2148 // No charges for good data
2149 break;
2150 case ListDisposition::same_sequence:
2151 case ListDisposition::known_sequence:
2152 // Charging this fee here won't hurt the peer in the normal
2153 // course of operation (ie. refresh every 5 minutes), but
2154 // will add up if the peer is misbehaving.
2155 fee_.update(
2156 Resource::feeUselessData,
2157 " duplicate (same_sequence or known_sequence)");
2158 break;
2159 case ListDisposition::stale:
2160 // There are very few good reasons for a peer to send an
2161 // old list, particularly more than once.
2162 fee_.update(Resource::feeInvalidData, "expired");
2163 break;
2164 case ListDisposition::untrusted:
2165 // Charging this fee here won't hurt the peer in the normal
2166 // course of operation (ie. refresh every 5 minutes), but
2167 // will add up if the peer is misbehaving.
2168 fee_.update(Resource::feeUselessData, "untrusted");
2169 break;
2170 case ListDisposition::invalid:
2171 // This shouldn't ever happen with a well-behaved peer
2172 fee_.update(
2173 Resource::feeInvalidSignature, "invalid list disposition");
2174 break;
2175 case ListDisposition::unsupported_version:
2176 // During a version transition, this may be legitimate.
2177 // If it happens frequently, that's probably bad.
2178 fee_.update(Resource::feeInvalidData, "version");
2179 break;
2180 default:
2181 UNREACHABLE(
2182 "ripple::PeerImp::onValidatorListMessage : invalid worst list "
2183 "disposition");
2184 }
2185
2186 // Log based on all the results.
2187 for (auto const& [disp, count] : applyResult.dispositions)
2188 {
2189 switch (disp)
2190 {
2191 // New list
2192 case ListDisposition::accepted:
2193 JLOG(p_journal_.debug())
2194 << "Applied " << count << " new " << messageType
2195 << "(s) from peer " << remote_address_;
2196 break;
2197 // Newest list is expired, and that needs to be broadcast, too
2198 case ListDisposition::expired:
2199 JLOG(p_journal_.debug())
2200 << "Applied " << count << " expired " << messageType
2201 << "(s) from peer " << remote_address_;
2202 break;
2203 // Future list
2204 case ListDisposition::pending:
2205 JLOG(p_journal_.debug())
2206 << "Processed " << count << " future " << messageType
2207 << "(s) from peer " << remote_address_;
2208 break;
2209 case ListDisposition::same_sequence:
2210 JLOG(p_journal_.warn())
2211 << "Ignored " << count << " " << messageType
2212 << "(s) with current sequence from peer "
2213 << remote_address_;
2214 break;
2215 case ListDisposition::known_sequence:
2216 JLOG(p_journal_.warn())
2217 << "Ignored " << count << " " << messageType
2218 << "(s) with future sequence from peer " << remote_address_;
2219 break;
2220 case ListDisposition::stale:
2221 JLOG(p_journal_.warn())
2222 << "Ignored " << count << "stale " << messageType
2223 << "(s) from peer " << remote_address_;
2224 break;
2225 case ListDisposition::untrusted:
2226 JLOG(p_journal_.warn())
2227 << "Ignored " << count << " untrusted " << messageType
2228 << "(s) from peer " << remote_address_;
2229 break;
2230 case ListDisposition::unsupported_version:
2231 JLOG(p_journal_.warn())
2232 << "Ignored " << count << "unsupported version "
2233 << messageType << "(s) from peer " << remote_address_;
2234 break;
2235 case ListDisposition::invalid:
2236 JLOG(p_journal_.warn())
2237 << "Ignored " << count << "invalid " << messageType
2238 << "(s) from peer " << remote_address_;
2239 break;
2240 default:
2241 UNREACHABLE(
2242 "ripple::PeerImp::onValidatorListMessage : invalid list "
2243 "disposition");
2244 }
2245 }
2246}
2247
2248void
2250{
2251 try
2252 {
2253 if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
2254 {
2255 JLOG(p_journal_.debug())
2256 << "ValidatorList: received validator list from peer using "
2257 << "protocol version " << to_string(protocol_)
2258 << " which shouldn't support this feature.";
2259 fee_.update(Resource::feeUselessData, "unsupported peer");
2260 return;
2261 }
2262 onValidatorListMessage(
2263 "ValidatorList",
2264 m->manifest(),
2265 m->version(),
2266 ValidatorList::parseBlobs(*m));
2267 }
2268 catch (std::exception const& e)
2269 {
2270 JLOG(p_journal_.warn()) << "ValidatorList: Exception, " << e.what()
2271 << " from peer " << remote_address_;
2272 using namespace std::string_literals;
2273 fee_.update(Resource::feeInvalidData, e.what());
2274 }
2275}
2276
2277void
2278PeerImp::onMessage(
2280{
2281 try
2282 {
2283 if (!supportsFeature(ProtocolFeature::ValidatorList2Propagation))
2284 {
2285 JLOG(p_journal_.debug())
2286 << "ValidatorListCollection: received validator list from peer "
2287 << "using protocol version " << to_string(protocol_)
2288 << " which shouldn't support this feature.";
2289 fee_.update(Resource::feeUselessData, "unsupported peer");
2290 return;
2291 }
2292 else if (m->version() < 2)
2293 {
2294 JLOG(p_journal_.debug())
2295 << "ValidatorListCollection: received invalid validator list "
2296 "version "
2297 << m->version() << " from peer using protocol version "
2298 << to_string(protocol_);
2299 fee_.update(Resource::feeInvalidData, "wrong version");
2300 return;
2301 }
2302 onValidatorListMessage(
2303 "ValidatorListCollection",
2304 m->manifest(),
2305 m->version(),
2306 ValidatorList::parseBlobs(*m));
2307 }
2308 catch (std::exception const& e)
2309 {
2310 JLOG(p_journal_.warn()) << "ValidatorListCollection: Exception, "
2311 << e.what() << " from peer " << remote_address_;
2312 using namespace std::string_literals;
2313 fee_.update(Resource::feeInvalidData, e.what());
2314 }
2315}
2316
2317void
2319{
2320 if (m->validation().size() < 50)
2321 {
2322 JLOG(p_journal_.warn()) << "Validation: Too small";
2323 fee_.update(Resource::feeMalformedRequest, "too small");
2324 return;
2325 }
2326
2327 try
2328 {
2329 auto const closeTime = app_.timeKeeper().closeTime();
2330
2332 {
2333 SerialIter sit(makeSlice(m->validation()));
2334 val = std::make_shared<STValidation>(
2335 std::ref(sit),
2336 [this](PublicKey const& pk) {
2337 return calcNodeID(
2338 app_.validatorManifests().getMasterKey(pk));
2339 },
2340 false);
2341 val->setSeen(closeTime);
2342 }
2343
2344 if (!isCurrent(
2345 app_.getValidations().parms(),
2346 app_.timeKeeper().closeTime(),
2347 val->getSignTime(),
2348 val->getSeenTime()))
2349 {
2350 JLOG(p_journal_.trace()) << "Validation: Not current";
2351 fee_.update(Resource::feeUselessData, "not current");
2352 return;
2353 }
2354
2355 // RH TODO: when isTrusted = false we should probably also cache a key
2356 // suppression for 30 seconds to avoid doing a relatively expensive
2357 // lookup every time a spam packet is received
2358 auto const isTrusted =
2359 app_.validators().trusted(val->getSignerPublic());
2360
2361 // If the operator has specified that untrusted validations be
2362 // dropped then this happens here I.e. before further wasting CPU
2363 // verifying the signature of an untrusted key
2364 if (!isTrusted)
2365 {
2366 // increase untrusted validations received
2367 overlay_.reportInboundTraffic(
2368 TrafficCount::category::validation_untrusted,
2369 Message::messageSize(*m));
2370
2371 if (app_.config().RELAY_UNTRUSTED_VALIDATIONS == -1)
2372 return;
2373 }
2374
2375 auto key = sha512Half(makeSlice(m->validation()));
2376
2377 auto [added, relayed] =
2378 app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
2379
2380 if (!added)
2381 {
2382 // Count unique messages (Slots has it's own 'HashRouter'), which a
2383 // peer receives within IDLED seconds since the message has been
2384 // relayed.
2385 if (relayed && (stopwatch().now() - *relayed) < reduce_relay::IDLED)
2386 overlay_.updateSlotAndSquelch(
2387 key, val->getSignerPublic(), id_, protocol::mtVALIDATION);
2388
2389 // increase duplicate validations received
2390 overlay_.reportInboundTraffic(
2391 TrafficCount::category::validation_duplicate,
2392 Message::messageSize(*m));
2393
2394 JLOG(p_journal_.trace()) << "Validation: duplicate";
2395 return;
2396 }
2397
2398 if (!isTrusted && (tracking_.load() == Tracking::diverged))
2399 {
2400 JLOG(p_journal_.debug())
2401 << "Dropping untrusted validation from diverged peer";
2402 }
2403 else if (isTrusted || !app_.getFeeTrack().isLoadedLocal())
2404 {
2405 std::string const name = [isTrusted, val]() {
2406 std::string ret =
2407 isTrusted ? "Trusted validation" : "Untrusted validation";
2408
2409#ifdef DEBUG
2410 ret += " " +
2411 std::to_string(val->getFieldU32(sfLedgerSequence)) + ": " +
2412 to_string(val->getNodeID());
2413#endif
2414
2415 return ret;
2416 }();
2417
2418 std::weak_ptr<PeerImp> weak = shared_from_this();
2419 app_.getJobQueue().addJob(
2420 isTrusted ? jtVALIDATION_t : jtVALIDATION_ut,
2421 name,
2422 [weak, val, m, key]() {
2423 if (auto peer = weak.lock())
2424 peer->checkValidation(val, key, m);
2425 });
2426 }
2427 else
2428 {
2429 JLOG(p_journal_.debug())
2430 << "Dropping untrusted validation for load";
2431 }
2432 }
2433 catch (std::exception const& e)
2434 {
2435 JLOG(p_journal_.warn())
2436 << "Exception processing validation: " << e.what();
2437 using namespace std::string_literals;
2438 fee_.update(Resource::feeMalformedRequest, e.what());
2439 }
2440}
2441
2442void
2444{
2445 protocol::TMGetObjectByHash& packet = *m;
2446
2447 JLOG(p_journal_.trace()) << "received TMGetObjectByHash " << packet.type()
2448 << " " << packet.objects_size();
2449
2450 if (packet.query())
2451 {
2452 // this is a query
2453 if (send_queue_.size() >= Tuning::dropSendQueue)
2454 {
2455 JLOG(p_journal_.debug()) << "GetObject: Large send queue";
2456 return;
2457 }
2458
2459 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2460 {
2461 doFetchPack(m);
2462 return;
2463 }
2464
2465 if (packet.type() == protocol::TMGetObjectByHash::otTRANSACTIONS)
2466 {
2467 if (!txReduceRelayEnabled())
2468 {
2469 JLOG(p_journal_.error())
2470 << "TMGetObjectByHash: tx reduce-relay is disabled";
2471 fee_.update(Resource::feeMalformedRequest, "disabled");
2472 return;
2473 }
2474
2475 std::weak_ptr<PeerImp> weak = shared_from_this();
2476 app_.getJobQueue().addJob(
2477 jtREQUESTED_TXN, "doTransactions", [weak, m]() {
2478 if (auto peer = weak.lock())
2479 peer->doTransactions(m);
2480 });
2481 return;
2482 }
2483
2484 protocol::TMGetObjectByHash reply;
2485
2486 reply.set_query(false);
2487
2488 if (packet.has_seq())
2489 reply.set_seq(packet.seq());
2490
2491 reply.set_type(packet.type());
2492
2493 if (packet.has_ledgerhash())
2494 {
2495 if (!stringIsUint256Sized(packet.ledgerhash()))
2496 {
2497 fee_.update(Resource::feeMalformedRequest, "ledger hash");
2498 return;
2499 }
2500
2501 reply.set_ledgerhash(packet.ledgerhash());
2502 }
2503
2504 fee_.update(
2505 Resource::feeModerateBurdenPeer,
2506 " received a get object by hash request");
2507
2508 // This is a very minimal implementation
2509 for (int i = 0; i < packet.objects_size(); ++i)
2510 {
2511 auto const& obj = packet.objects(i);
2512 if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2513 {
2514 uint256 const hash{obj.hash()};
2515 // VFALCO TODO Move this someplace more sensible so we dont
2516 // need to inject the NodeStore interfaces.
2517 std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2518 auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
2519 if (nodeObject)
2520 {
2521 protocol::TMIndexedObject& newObj = *reply.add_objects();
2522 newObj.set_hash(hash.begin(), hash.size());
2523 newObj.set_data(
2524 &nodeObject->getData().front(),
2525 nodeObject->getData().size());
2526
2527 if (obj.has_nodeid())
2528 newObj.set_index(obj.nodeid());
2529 if (obj.has_ledgerseq())
2530 newObj.set_ledgerseq(obj.ledgerseq());
2531
2532 // VFALCO NOTE "seq" in the message is obsolete
2533 }
2534 }
2535 }
2536
2537 JLOG(p_journal_.trace()) << "GetObj: " << reply.objects_size() << " of "
2538 << packet.objects_size();
2539 send(std::make_shared<Message>(reply, protocol::mtGET_OBJECTS));
2540 }
2541 else
2542 {
2543 // this is a reply
2544 std::uint32_t pLSeq = 0;
2545 bool pLDo = true;
2546 bool progress = false;
2547
2548 for (int i = 0; i < packet.objects_size(); ++i)
2549 {
2550 protocol::TMIndexedObject const& obj = packet.objects(i);
2551
2552 if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2553 {
2554 if (obj.has_ledgerseq())
2555 {
2556 if (obj.ledgerseq() != pLSeq)
2557 {
2558 if (pLDo && (pLSeq != 0))
2559 {
2560 JLOG(p_journal_.debug())
2561 << "GetObj: Full fetch pack for " << pLSeq;
2562 }
2563 pLSeq = obj.ledgerseq();
2564 pLDo = !app_.getLedgerMaster().haveLedger(pLSeq);
2565
2566 if (!pLDo)
2567 {
2568 JLOG(p_journal_.debug())
2569 << "GetObj: Late fetch pack for " << pLSeq;
2570 }
2571 else
2572 progress = true;
2573 }
2574 }
2575
2576 if (pLDo)
2577 {
2578 uint256 const hash{obj.hash()};
2579
2580 app_.getLedgerMaster().addFetchPack(
2581 hash,
2582 std::make_shared<Blob>(
2583 obj.data().begin(), obj.data().end()));
2584 }
2585 }
2586 }
2587
2588 if (pLDo && (pLSeq != 0))
2589 {
2590 JLOG(p_journal_.debug())
2591 << "GetObj: Partial fetch pack for " << pLSeq;
2592 }
2593 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2594 app_.getLedgerMaster().gotFetchPack(progress, pLSeq);
2595 }
2596}
2597
2598void
2600{
2601 if (!txReduceRelayEnabled())
2602 {
2603 JLOG(p_journal_.error())
2604 << "TMHaveTransactions: tx reduce-relay is disabled";
2605 fee_.update(Resource::feeMalformedRequest, "disabled");
2606 return;
2607 }
2608
2609 std::weak_ptr<PeerImp> weak = shared_from_this();
2610 app_.getJobQueue().addJob(
2611 jtMISSING_TXN, "handleHaveTransactions", [weak, m]() {
2612 if (auto peer = weak.lock())
2613 peer->handleHaveTransactions(m);
2614 });
2615}
2616
2617void
2618PeerImp::handleHaveTransactions(
2620{
2621 protocol::TMGetObjectByHash tmBH;
2622 tmBH.set_type(protocol::TMGetObjectByHash_ObjectType_otTRANSACTIONS);
2623 tmBH.set_query(true);
2624
2625 JLOG(p_journal_.trace())
2626 << "received TMHaveTransactions " << m->hashes_size();
2627
2628 for (std::uint32_t i = 0; i < m->hashes_size(); i++)
2629 {
2630 if (!stringIsUint256Sized(m->hashes(i)))
2631 {
2632 JLOG(p_journal_.error())
2633 << "TMHaveTransactions with invalid hash size";
2634 fee_.update(Resource::feeMalformedRequest, "hash size");
2635 return;
2636 }
2637
2638 uint256 hash(m->hashes(i));
2639
2640 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2641
2642 JLOG(p_journal_.trace()) << "checking transaction " << (bool)txn;
2643
2644 if (!txn)
2645 {
2646 JLOG(p_journal_.debug()) << "adding transaction to request";
2647
2648 auto obj = tmBH.add_objects();
2649 obj->set_hash(hash.data(), hash.size());
2650 }
2651 else
2652 {
2653 // Erase only if a peer has seen this tx. If the peer has not
2654 // seen this tx then the tx could not has been queued for this
2655 // peer.
2656 removeTxQueue(hash);
2657 }
2658 }
2659
2660 JLOG(p_journal_.trace())
2661 << "transaction request object is " << tmBH.objects_size();
2662
2663 if (tmBH.objects_size() > 0)
2664 send(std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS));
2665}
2666
2667void
2669{
2670 if (!txReduceRelayEnabled())
2671 {
2672 JLOG(p_journal_.error())
2673 << "TMTransactions: tx reduce-relay is disabled";
2674 fee_.update(Resource::feeMalformedRequest, "disabled");
2675 return;
2676 }
2677
2678 JLOG(p_journal_.trace())
2679 << "received TMTransactions " << m->transactions_size();
2680
2681 overlay_.addTxMetrics(m->transactions_size());
2682
2683 for (std::uint32_t i = 0; i < m->transactions_size(); ++i)
2684 handleTransaction(
2686 m->mutable_transactions(i), [](protocol::TMTransaction*) {}),
2687 false,
2688 true);
2689}
2690
2691void
2692PeerImp::onMessage(std::shared_ptr<protocol::TMSquelch> const& m)
2693{
2694 using on_message_fn =
2696 if (!strand_.running_in_this_thread())
2697 return post(
2698 strand_,
2699 std::bind(
2700 (on_message_fn)&PeerImp::onMessage, shared_from_this(), m));
2701
2702 if (!m->has_validatorpubkey())
2703 {
2704 fee_.update(Resource::feeInvalidData, "squelch no pubkey");
2705 return;
2706 }
2707 auto validator = m->validatorpubkey();
2708 auto const slice{makeSlice(validator)};
2709 if (!publicKeyType(slice))
2710 {
2711 fee_.update(Resource::feeInvalidData, "squelch bad pubkey");
2712 return;
2713 }
2714 PublicKey key(slice);
2715
2716 // Ignore the squelch for validator's own messages.
2717 if (key == app_.getValidationPublicKey())
2718 {
2719 JLOG(p_journal_.debug())
2720 << "onMessage: TMSquelch discarding validator's squelch " << slice;
2721 return;
2722 }
2723
2724 std::uint32_t duration =
2725 m->has_squelchduration() ? m->squelchduration() : 0;
2726 if (!m->squelch())
2727 squelch_.removeSquelch(key);
2728 else if (!squelch_.addSquelch(key, std::chrono::seconds{duration}))
2729 fee_.update(Resource::feeInvalidData, "squelch duration");
2730
2731 JLOG(p_journal_.debug())
2732 << "onMessage: TMSquelch " << slice << " " << id() << " " << duration;
2733}
2734
2735//--------------------------------------------------------------------------
2736
2737void
2738PeerImp::addLedger(
2739 uint256 const& hash,
2740 std::lock_guard<std::mutex> const& lockedRecentLock)
2741{
2742 // lockedRecentLock is passed as a reminder that recentLock_ must be
2743 // locked by the caller.
2744 (void)lockedRecentLock;
2745
2746 if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
2747 recentLedgers_.end())
2748 return;
2749
2750 recentLedgers_.push_back(hash);
2751}
2752
2753void
2754PeerImp::doFetchPack(std::shared_ptr<protocol::TMGetObjectByHash> const& packet)
2755{
2756 // VFALCO TODO Invert this dependency using an observer and shared state
2757 // object. Don't queue fetch pack jobs if we're under load or we already
2758 // have some queued.
2759 if (app_.getFeeTrack().isLoadedLocal() ||
2760 (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2761 (app_.getJobQueue().getJobCount(jtPACK) > 10))
2762 {
2763 JLOG(p_journal_.info()) << "Too busy to make fetch pack";
2764 return;
2765 }
2766
2767 if (!stringIsUint256Sized(packet->ledgerhash()))
2768 {
2769 JLOG(p_journal_.warn()) << "FetchPack hash size malformed";
2770 fee_.update(Resource::feeMalformedRequest, "hash size");
2771 return;
2772 }
2773
2774 fee_.fee = Resource::feeHeavyBurdenPeer;
2775
2776 uint256 const hash{packet->ledgerhash()};
2777
2778 std::weak_ptr<PeerImp> weak = shared_from_this();
2779 auto elapsed = UptimeClock::now();
2780 auto const pap = &app_;
2781 app_.getJobQueue().addJob(
2782 jtPACK, "MakeFetchPack", [pap, weak, packet, hash, elapsed]() {
2783 pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
2784 });
2785}
2786
2787void
2788PeerImp::doTransactions(
2790{
2791 protocol::TMTransactions reply;
2792
2793 JLOG(p_journal_.trace()) << "received TMGetObjectByHash requesting tx "
2794 << packet->objects_size();
2795
2796 if (packet->objects_size() > reduce_relay::MAX_TX_QUEUE_SIZE)
2797 {
2798 JLOG(p_journal_.error()) << "doTransactions, invalid number of hashes";
2799 fee_.update(Resource::feeMalformedRequest, "too big");
2800 return;
2801 }
2802
2803 for (std::uint32_t i = 0; i < packet->objects_size(); ++i)
2804 {
2805 auto const& obj = packet->objects(i);
2806
2807 if (!stringIsUint256Sized(obj.hash()))
2808 {
2809 fee_.update(Resource::feeMalformedRequest, "hash size");
2810 return;
2811 }
2812
2813 uint256 hash(obj.hash());
2814
2815 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2816
2817 if (!txn)
2818 {
2819 JLOG(p_journal_.error()) << "doTransactions, transaction not found "
2820 << Slice(hash.data(), hash.size());
2821 fee_.update(Resource::feeMalformedRequest, "tx not found");
2822 return;
2823 }
2824
2825 Serializer s;
2826 auto tx = reply.add_transactions();
2827 auto sttx = txn->getSTransaction();
2828 sttx->add(s);
2829 tx->set_rawtransaction(s.data(), s.size());
2830 tx->set_status(
2831 txn->getStatus() == INCLUDED ? protocol::tsCURRENT
2832 : protocol::tsNEW);
2833 tx->set_receivetimestamp(
2834 app_.timeKeeper().now().time_since_epoch().count());
2835 tx->set_deferred(txn->getSubmitResult().queued);
2836 }
2837
2838 if (reply.transactions_size() > 0)
2839 send(std::make_shared<Message>(reply, protocol::mtTRANSACTIONS));
2840}
2841
2842void
2843PeerImp::checkTransaction(
2844 int flags,
2845 bool checkSignature,
2846 std::shared_ptr<STTx const> const& stx,
2847 bool batch)
2848{
2849 // VFALCO TODO Rewrite to not use exceptions
2850 try
2851 {
2852 // charge strongly for relaying batch txns
2853 // LCOV_EXCL_START
2854 if (stx->isFlag(tfInnerBatchTxn) &&
2855 getCurrentTransactionRules()->enabled(featureBatch))
2856 {
2857 JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing "
2858 "tfInnerBatchTxn (checkSignature).";
2859 charge(Resource::feeModerateBurdenPeer, "inner batch txn");
2860 return;
2861 }
2862 // LCOV_EXCL_STOP
2863
2864 // Expired?
2865 if (stx->isFieldPresent(sfLastLedgerSequence) &&
2866 (stx->getFieldU32(sfLastLedgerSequence) <
2867 app_.getLedgerMaster().getValidLedgerIndex()))
2868 {
2869 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2870 charge(Resource::feeUselessData, "expired tx");
2871 return;
2872 }
2873
2874 if (isPseudoTx(*stx))
2875 {
2876 // Don't do anything with pseudo transactions except put them in the
2877 // TransactionMaster cache
2878 std::string reason;
2879 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2880 XRPL_ASSERT(
2881 tx->getStatus() == NEW,
2882 "ripple::PeerImp::checkTransaction Transaction created "
2883 "correctly");
2884 if (tx->getStatus() == NEW)
2885 {
2886 JLOG(p_journal_.debug())
2887 << "Processing " << (batch ? "batch" : "unsolicited")
2888 << " pseudo-transaction tx " << tx->getID();
2889
2890 app_.getMasterTransaction().canonicalize(&tx);
2891 // Tell the overlay about it, but don't relay it.
2892 auto const toSkip =
2893 app_.getHashRouter().shouldRelay(tx->getID());
2894 if (toSkip)
2895 {
2896 JLOG(p_journal_.debug())
2897 << "Passing skipped pseudo pseudo-transaction tx "
2898 << tx->getID();
2899 app_.overlay().relay(tx->getID(), {}, *toSkip);
2900 }
2901 if (!batch)
2902 {
2903 JLOG(p_journal_.debug())
2904 << "Charging for pseudo-transaction tx " << tx->getID();
2905 charge(Resource::feeUselessData, "pseudo tx");
2906 }
2907
2908 return;
2909 }
2910 }
2911
2912 if (checkSignature)
2913 {
2914 // Check the signature before handing off to the job queue.
2915 if (auto [valid, validReason] = checkValidity(
2916 app_.getHashRouter(),
2917 *stx,
2918 app_.getLedgerMaster().getValidatedRules(),
2919 app_.config());
2920 valid != Validity::Valid)
2921 {
2922 if (!validReason.empty())
2923 {
2924 JLOG(p_journal_.trace())
2925 << "Exception checking transaction: " << validReason;
2926 }
2927
2928 // Probably not necessary to set SF_BAD, but doesn't hurt.
2929 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2930 charge(
2931 Resource::feeInvalidSignature,
2932 "check transaction signature failure");
2933 return;
2934 }
2935 }
2936 else
2937 {
2939 app_.getHashRouter(), stx->getTransactionID(), Validity::Valid);
2940 }
2941
2942 std::string reason;
2943 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2944
2945 if (tx->getStatus() == INVALID)
2946 {
2947 if (!reason.empty())
2948 {
2949 JLOG(p_journal_.trace())
2950 << "Exception checking transaction: " << reason;
2951 }
2952 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2953 charge(Resource::feeInvalidSignature, "tx (impossible)");
2954 return;
2955 }
2956
2957 bool const trusted(flags & SF_TRUSTED);
2958 app_.getOPs().processTransaction(
2959 tx, trusted, false, NetworkOPs::FailHard::no);
2960 }
2961 catch (std::exception const& ex)
2962 {
2963 JLOG(p_journal_.warn())
2964 << "Exception in " << __func__ << ": " << ex.what();
2965 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2966 using namespace std::string_literals;
2967 charge(Resource::feeInvalidData, "tx "s + ex.what());
2968 }
2969}
2970
2971// Called from our JobQueue
2972void
2973PeerImp::checkPropose(
2974 bool isTrusted,
2976 RCLCxPeerPos peerPos)
2977{
2978 JLOG(p_journal_.trace())
2979 << "Checking " << (isTrusted ? "trusted" : "UNTRUSTED") << " proposal";
2980
2981 XRPL_ASSERT(packet, "ripple::PeerImp::checkPropose : non-null packet");
2982
2983 if (!cluster() && !peerPos.checkSign())
2984 {
2985 std::string desc{"Proposal fails sig check"};
2986 JLOG(p_journal_.warn()) << desc;
2987 charge(Resource::feeInvalidSignature, desc);
2988 return;
2989 }
2990
2991 bool relay;
2992
2993 if (isTrusted)
2994 relay = app_.getOPs().processTrustedProposal(peerPos);
2995 else
2996 relay = app_.config().RELAY_UNTRUSTED_PROPOSALS == 1 || cluster();
2997
2998 if (relay)
2999 {
3000 // haveMessage contains peers, which are suppressed; i.e. the peers
3001 // are the source of the message, consequently the message should
3002 // not be relayed to these peers. But the message must be counted
3003 // as part of the squelch logic.
3004 auto haveMessage = app_.overlay().relay(
3005 *packet, peerPos.suppressionID(), peerPos.publicKey());
3006 if (!haveMessage.empty())
3007 overlay_.updateSlotAndSquelch(
3008 peerPos.suppressionID(),
3009 peerPos.publicKey(),
3010 std::move(haveMessage),
3011 protocol::mtPROPOSE_LEDGER);
3012 }
3013}
3014
3015void
3016PeerImp::checkValidation(
3018 uint256 const& key,
3020{
3021 if (!val->isValid())
3022 {
3023 std::string desc{"Validation forwarded by peer is invalid"};
3024 JLOG(p_journal_.debug()) << desc;
3025 charge(Resource::feeInvalidSignature, desc);
3026 return;
3027 }
3028
3029 // FIXME it should be safe to remove this try/catch. Investigate codepaths.
3030 try
3031 {
3032 if (app_.getOPs().recvValidation(val, std::to_string(id())) ||
3033 cluster())
3034 {
3035 // haveMessage contains peers, which are suppressed; i.e. the peers
3036 // are the source of the message, consequently the message should
3037 // not be relayed to these peers. But the message must be counted
3038 // as part of the squelch logic.
3039 auto haveMessage =
3040 overlay_.relay(*packet, key, val->getSignerPublic());
3041 if (!haveMessage.empty())
3042 {
3043 overlay_.updateSlotAndSquelch(
3044 key,
3045 val->getSignerPublic(),
3046 std::move(haveMessage),
3047 protocol::mtVALIDATION);
3048 }
3049 }
3050 }
3051 catch (std::exception const& ex)
3052 {
3053 JLOG(p_journal_.trace())
3054 << "Exception processing validation: " << ex.what();
3055 using namespace std::string_literals;
3056 charge(Resource::feeMalformedRequest, "validation "s + ex.what());
3057 }
3058}
3059
3060// Returns the set of peers that can help us get
3061// the TX tree with the specified root hash.
3062//
3064getPeerWithTree(OverlayImpl& ov, uint256 const& rootHash, PeerImp const* skip)
3065{
3067 int retScore = 0;
3068
3070 if (p->hasTxSet(rootHash) && p.get() != skip)
3071 {
3072 auto score = p->getScore(true);
3073 if (!ret || (score > retScore))
3074 {
3075 ret = std::move(p);
3076 retScore = score;
3077 }
3078 }
3079 });
3080
3081 return ret;
3082}
3083
3084// Returns a random peer weighted by how likely to
3085// have the ledger and how responsive it is.
3086//
3089 OverlayImpl& ov,
3090 uint256 const& ledgerHash,
3091 LedgerIndex ledger,
3092 PeerImp const* skip)
3093{
3095 int retScore = 0;
3096
3098 if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
3099 {
3100 auto score = p->getScore(true);
3101 if (!ret || (score > retScore))
3102 {
3103 ret = std::move(p);
3104 retScore = score;
3105 }
3106 }
3107 });
3108
3109 return ret;
3110}
3111
3112void
3113PeerImp::sendLedgerBase(
3114 std::shared_ptr<Ledger const> const& ledger,
3115 protocol::TMLedgerData& ledgerData)
3116{
3117 JLOG(p_journal_.trace()) << "sendLedgerBase: Base data";
3118
3119 Serializer s(sizeof(LedgerInfo));
3120 addRaw(ledger->info(), s);
3121 ledgerData.add_nodes()->set_nodedata(s.getDataPtr(), s.getLength());
3122
3123 auto const& stateMap{ledger->stateMap()};
3124 if (stateMap.getHash() != beast::zero)
3125 {
3126 // Return account state root node if possible
3127 Serializer root(768);
3128
3129 stateMap.serializeRoot(root);
3130 ledgerData.add_nodes()->set_nodedata(
3131 root.getDataPtr(), root.getLength());
3132
3133 if (ledger->info().txHash != beast::zero)
3134 {
3135 auto const& txMap{ledger->txMap()};
3136 if (txMap.getHash() != beast::zero)
3137 {
3138 // Return TX root node if possible
3139 root.erase();
3140 txMap.serializeRoot(root);
3141 ledgerData.add_nodes()->set_nodedata(
3142 root.getDataPtr(), root.getLength());
3143 }
3144 }
3145 }
3146
3147 auto message{
3148 std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA)};
3149 send(message);
3150}
3151
3153PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
3154{
3155 JLOG(p_journal_.trace()) << "getLedger: Ledger";
3156
3158
3159 if (m->has_ledgerhash())
3160 {
3161 // Attempt to find ledger by hash
3162 uint256 const ledgerHash{m->ledgerhash()};
3163 ledger = app_.getLedgerMaster().getLedgerByHash(ledgerHash);
3164 if (!ledger)
3165 {
3166 JLOG(p_journal_.trace())
3167 << "getLedger: Don't have ledger with hash " << ledgerHash;
3168
3169 if (m->has_querytype() && !m->has_requestcookie())
3170 {
3171 // Attempt to relay the request to a peer
3172 if (auto const peer = getPeerWithLedger(
3173 overlay_,
3174 ledgerHash,
3175 m->has_ledgerseq() ? m->ledgerseq() : 0,
3176 this))
3177 {
3178 m->set_requestcookie(id());
3179 peer->send(
3180 std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
3181 JLOG(p_journal_.debug())
3182 << "getLedger: Request relayed to peer";
3183 return ledger;
3184 }
3185
3186 JLOG(p_journal_.trace())
3187 << "getLedger: Failed to find peer to relay request";
3188 }
3189 }
3190 }
3191 else if (m->has_ledgerseq())
3192 {
3193 // Attempt to find ledger by sequence
3194 if (m->ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
3195 {
3196 JLOG(p_journal_.debug())
3197 << "getLedger: Early ledger sequence request";
3198 }
3199 else
3200 {
3201 ledger = app_.getLedgerMaster().getLedgerBySeq(m->ledgerseq());
3202 if (!ledger)
3203 {
3204 JLOG(p_journal_.debug())
3205 << "getLedger: Don't have ledger with sequence "
3206 << m->ledgerseq();
3207 }
3208 }
3209 }
3210 else if (m->has_ltype() && m->ltype() == protocol::ltCLOSED)
3211 {
3212 ledger = app_.getLedgerMaster().getClosedLedger();
3213 }
3214
3215 if (ledger)
3216 {
3217 // Validate retrieved ledger sequence
3218 auto const ledgerSeq{ledger->info().seq};
3219 if (m->has_ledgerseq())
3220 {
3221 if (ledgerSeq != m->ledgerseq())
3222 {
3223 // Do not resource charge a peer responding to a relay
3224 if (!m->has_requestcookie())
3225 charge(
3226 Resource::feeMalformedRequest, "get_ledger ledgerSeq");
3227
3228 ledger.reset();
3229 JLOG(p_journal_.warn())
3230 << "getLedger: Invalid ledger sequence " << ledgerSeq;
3231 }
3232 }
3233 else if (ledgerSeq < app_.getLedgerMaster().getEarliestFetch())
3234 {
3235 ledger.reset();
3236 JLOG(p_journal_.debug())
3237 << "getLedger: Early ledger sequence request " << ledgerSeq;
3238 }
3239 }
3240 else
3241 {
3242 JLOG(p_journal_.debug()) << "getLedger: Unable to find ledger";
3243 }
3244
3245 return ledger;
3246}
3247
3249PeerImp::getTxSet(std::shared_ptr<protocol::TMGetLedger> const& m) const
3250{
3251 JLOG(p_journal_.trace()) << "getTxSet: TX set";
3252
3253 uint256 const txSetHash{m->ledgerhash()};
3255 app_.getInboundTransactions().getSet(txSetHash, false)};
3256 if (!shaMap)
3257 {
3258 if (m->has_querytype() && !m->has_requestcookie())
3259 {
3260 // Attempt to relay the request to a peer
3261 if (auto const peer = getPeerWithTree(overlay_, txSetHash, this))
3262 {
3263 m->set_requestcookie(id());
3264 peer->send(
3265 std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
3266 JLOG(p_journal_.debug()) << "getTxSet: Request relayed";
3267 }
3268 else
3269 {
3270 JLOG(p_journal_.debug())
3271 << "getTxSet: Failed to find relay peer";
3272 }
3273 }
3274 else
3275 {
3276 JLOG(p_journal_.debug()) << "getTxSet: Failed to find TX set";
3277 }
3278 }
3279
3280 return shaMap;
3281}
3282
3283void
3284PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
3285{
3286 // Do not resource charge a peer responding to a relay
3287 if (!m->has_requestcookie())
3288 charge(
3289 Resource::feeModerateBurdenPeer, "received a get ledger request");
3290
3293 SHAMap const* map{nullptr};
3294 protocol::TMLedgerData ledgerData;
3295 bool fatLeaves{true};
3296 auto const itype{m->itype()};
3297
3298 if (itype == protocol::liTS_CANDIDATE)
3299 {
3300 if (sharedMap = getTxSet(m); !sharedMap)
3301 return;
3302 map = sharedMap.get();
3303
3304 // Fill out the reply
3305 ledgerData.set_ledgerseq(0);
3306 ledgerData.set_ledgerhash(m->ledgerhash());
3307 ledgerData.set_type(protocol::liTS_CANDIDATE);
3308 if (m->has_requestcookie())
3309 ledgerData.set_requestcookie(m->requestcookie());
3310
3311 // We'll already have most transactions
3312 fatLeaves = false;
3313 }
3314 else
3315 {
3316 if (send_queue_.size() >= Tuning::dropSendQueue)
3317 {
3318 JLOG(p_journal_.debug())
3319 << "processLedgerRequest: Large send queue";
3320 return;
3321 }
3322 if (app_.getFeeTrack().isLoadedLocal() && !cluster())
3323 {
3324 JLOG(p_journal_.debug()) << "processLedgerRequest: Too busy";
3325 return;
3326 }
3327
3328 if (ledger = getLedger(m); !ledger)
3329 return;
3330
3331 // Fill out the reply
3332 auto const ledgerHash{ledger->info().hash};
3333 ledgerData.set_ledgerhash(ledgerHash.begin(), ledgerHash.size());
3334 ledgerData.set_ledgerseq(ledger->info().seq);
3335 ledgerData.set_type(itype);
3336 if (m->has_requestcookie())
3337 ledgerData.set_requestcookie(m->requestcookie());
3338
3339 switch (itype)
3340 {
3341 case protocol::liBASE:
3342 sendLedgerBase(ledger, ledgerData);
3343 return;
3344
3345 case protocol::liTX_NODE:
3346 map = &ledger->txMap();
3347 JLOG(p_journal_.trace()) << "processLedgerRequest: TX map hash "
3348 << to_string(map->getHash());
3349 break;
3350
3351 case protocol::liAS_NODE:
3352 map = &ledger->stateMap();
3353 JLOG(p_journal_.trace())
3354 << "processLedgerRequest: Account state map hash "
3355 << to_string(map->getHash());
3356 break;
3357
3358 default:
3359 // This case should not be possible here
3360 JLOG(p_journal_.error())
3361 << "processLedgerRequest: Invalid ledger info type";
3362 return;
3363 }
3364 }
3365
3366 if (!map)
3367 {
3368 JLOG(p_journal_.warn()) << "processLedgerRequest: Unable to find map";
3369 return;
3370 }
3371
3372 // Add requested node data to reply
3373 if (m->nodeids_size() > 0)
3374 {
3375 auto const queryDepth{
3376 m->has_querydepth() ? m->querydepth() : (isHighLatency() ? 2 : 1)};
3377
3379
3380 for (int i = 0; i < m->nodeids_size() &&
3381 ledgerData.nodes_size() < Tuning::softMaxReplyNodes;
3382 ++i)
3383 {
3384 auto const shaMapNodeId{deserializeSHAMapNodeID(m->nodeids(i))};
3385
3386 data.clear();
3387 data.reserve(Tuning::softMaxReplyNodes);
3388
3389 try
3390 {
3391 if (map->getNodeFat(*shaMapNodeId, data, fatLeaves, queryDepth))
3392 {
3393 JLOG(p_journal_.trace())
3394 << "processLedgerRequest: getNodeFat got "
3395 << data.size() << " nodes";
3396
3397 for (auto const& d : data)
3398 {
3399 if (ledgerData.nodes_size() >=
3400 Tuning::hardMaxReplyNodes)
3401 break;
3402 protocol::TMLedgerNode* node{ledgerData.add_nodes()};
3403 node->set_nodeid(d.first.getRawString());
3404 node->set_nodedata(d.second.data(), d.second.size());
3405 }
3406 }
3407 else
3408 {
3409 JLOG(p_journal_.warn())
3410 << "processLedgerRequest: getNodeFat returns false";
3411 }
3412 }
3413 catch (std::exception const& e)
3414 {
3415 std::string info;
3416 switch (itype)
3417 {
3418 case protocol::liBASE:
3419 // This case should not be possible here
3420 info = "Ledger base";
3421 break;
3422
3423 case protocol::liTX_NODE:
3424 info = "TX node";
3425 break;
3426
3427 case protocol::liAS_NODE:
3428 info = "AS node";
3429 break;
3430
3431 case protocol::liTS_CANDIDATE:
3432 info = "TS candidate";
3433 break;
3434
3435 default:
3436 info = "Invalid";
3437 break;
3438 }
3439
3440 if (!m->has_ledgerhash())
3441 info += ", no hash specified";
3442
3443 JLOG(p_journal_.error())
3444 << "processLedgerRequest: getNodeFat with nodeId "
3445 << *shaMapNodeId << " and ledger info type " << info
3446 << " throws exception: " << e.what();
3447 }
3448 }
3449
3450 JLOG(p_journal_.info())
3451 << "processLedgerRequest: Got request for " << m->nodeids_size()
3452 << " nodes at depth " << queryDepth << ", return "
3453 << ledgerData.nodes_size() << " nodes";
3454 }
3455
3456 if (ledgerData.nodes_size() == 0)
3457 return;
3458
3459 send(std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA));
3460}
3461
3462int
3463PeerImp::getScore(bool haveItem) const
3464{
3465 // Random component of score, used to break ties and avoid
3466 // overloading the "best" peer
3467 static int const spRandomMax = 9999;
3468
3469 // Score for being very likely to have the thing we are
3470 // look for; should be roughly spRandomMax
3471 static int const spHaveItem = 10000;
3472
3473 // Score reduction for each millisecond of latency; should
3474 // be roughly spRandomMax divided by the maximum reasonable
3475 // latency
3476 static int const spLatency = 30;
3477
3478 // Penalty for unknown latency; should be roughly spRandomMax
3479 static int const spNoLatency = 8000;
3480
3481 int score = rand_int(spRandomMax);
3482
3483 if (haveItem)
3484 score += spHaveItem;
3485
3487 {
3488 std::lock_guard sl(recentLock_);
3489 latency = latency_;
3490 }
3491
3492 if (latency)
3493 score -= latency->count() * spLatency;
3494 else
3495 score -= spNoLatency;
3496
3497 return score;
3498}
3499
3500bool
3501PeerImp::isHighLatency() const
3502{
3503 std::lock_guard sl(recentLock_);
3504 return latency_ >= peerHighLatency;
3505}
3506
3507void
3508PeerImp::Metrics::add_message(std::uint64_t bytes)
3509{
3510 using namespace std::chrono_literals;
3511 std::unique_lock lock{mutex_};
3512
3513 totalBytes_ += bytes;
3514 accumBytes_ += bytes;
3515 auto const timeElapsed = clock_type::now() - intervalStart_;
3516 auto const timeElapsedInSecs =
3517 std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
3518
3519 if (timeElapsedInSecs >= 1s)
3520 {
3521 auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
3522 rollingAvg_.push_back(avgBytes);
3523
3524 auto const totalBytes =
3525 std::accumulate(rollingAvg_.begin(), rollingAvg_.end(), 0ull);
3526 rollingAvgBytes_ = totalBytes / rollingAvg_.size();
3527
3528 intervalStart_ = clock_type::now();
3529 accumBytes_ = 0;
3530 }
3531}
3532
3534PeerImp::Metrics::average_bytes() const
3535{
3536 std::shared_lock lock{mutex_};
3537 return rollingAvgBytes_;
3538}
3539
3541PeerImp::Metrics::total_bytes() const
3542{
3543 std::shared_lock lock{mutex_};
3544 return totalBytes_;
3545}
3546
3547} // namespace ripple
T accumulate(T... args)
T bind(T... args)
Represents a JSON value.
Definition: json_value.h:150
A version-independent IP address and port combination.
Definition: IPEndpoint.h:39
Address const & address() const
Returns the address portion of this endpoint.
Definition: IPEndpoint.h:76
static std::optional< Endpoint > from_string_checked(std::string const &s)
Create an Endpoint from a string.
Definition: IPEndpoint.cpp:45
Endpoint at_port(Port port) const
Returns a new Endpoint with a different port.
Definition: IPEndpoint.h:69
static Endpoint from_string(std::string const &s)
Definition: IPEndpoint.cpp:59
std::string to_string() const
Returns a string representing the endpoint.
Definition: IPEndpoint.cpp:67
Stream error() const
Definition: Journal.h:346
Stream debug() const
Definition: Journal.h:328
bool active(Severity level) const
Returns true if any message would be logged at this severity level.
Definition: Journal.h:314
Stream info() const
Definition: Journal.h:334
Stream trace() const
Severity stream access functions.
Definition: Journal.h:322
Stream warn() const
Definition: Journal.h:340
virtual Config & config()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual TimeKeeper & timeKeeper()=0
virtual JobQueue & getJobQueue()=0
virtual NetworkOPs & getOPs()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual Cluster & cluster()=0
virtual HashRouter & getHashRouter()=0
void for_each(std::function< void(ClusterNode const &)> func) const
Invokes the callback once for every cluster node.
Definition: Cluster.cpp:83
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:49
bool update(PublicKey const &identity, std::string name, std::uint32_t loadFee=0, NetClock::time_point reportTime=NetClock::time_point{})
Store information about the state of a cluster node.
Definition: Cluster.cpp:57
std::optional< std::string > member(PublicKey const &node) const
Determines whether a node belongs in the cluster.
Definition: Cluster.cpp:38
bool VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE
Definition: Config.h:248
bool TX_REDUCE_RELAY_METRICS
Definition: Config.h:265
int MAX_TRANSACTIONS
Definition: Config.h:226
std::chrono::seconds MAX_DIVERGED_TIME
Definition: Config.h:284
std::chrono::seconds MAX_UNKNOWN_TIME
Definition: Config.h:281
bool shouldProcess(uint256 const &key, PeerShortID peer, int &flags, std::chrono::seconds tx_interval)
Definition: HashRouter.cpp:79
bool addSuppressionPeer(uint256 const &key, PeerShortID peer)
Definition: HashRouter.cpp:52
std::unique_ptr< LoadEvent > makeLoadEvent(JobType t, std::string const &name)
Return a scoped LoadEvent.
Definition: JobQueue.cpp:179
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition: JobQueue.cpp:142
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
LedgerIndex getValidLedgerIndex()
std::chrono::seconds getValidatedLedgerAge()
void setClusterFee(std::uint32_t fee)
Definition: LoadFeeTrack.h:113
static std::size_t messageSize(::google::protobuf::Message const &message)
Definition: Message.cpp:55
virtual bool isNeedNetworkLedger()=0
PeerFinder::Manager & peerFinder()
Definition: OverlayImpl.h:161
void activate(std::shared_ptr< PeerImp > const &peer)
Called when a peer has connected successfully This is called after the peer handshake has been comple...
void deletePeer(Peer::id_t id)
Called when the peer is deleted.
void incPeerDisconnect() override
Increment and retrieve counters for total peer disconnects, and disconnects we initiate for excessive...
Definition: OverlayImpl.h:366
void addTxMetrics(Args... args)
Add tx reduce-relay metrics.
Definition: OverlayImpl.h:437
void onPeerDeactivate(Peer::id_t id)
void remove(std::shared_ptr< PeerFinder::Slot > const &slot)
void reportOutboundTraffic(TrafficCount::category cat, int bytes)
void for_each(UnaryFunc &&f) const
Definition: OverlayImpl.h:278
Resource::Manager & resourceManager()
Definition: OverlayImpl.h:167
void reportInboundTraffic(TrafficCount::category cat, int bytes)
void onManifests(std::shared_ptr< protocol::TMManifests > const &m, std::shared_ptr< PeerImp > const &from)
Setup const & setup() const
Definition: OverlayImpl.h:173
std::shared_ptr< Message > getManifestsMessage()
void incPeerDisconnectCharges() override
Definition: OverlayImpl.h:378
void incJqTransOverflow() override
Increment and retrieve counter for transaction job queue overflows.
Definition: OverlayImpl.h:354
virtual void on_endpoints(std::shared_ptr< Slot > const &slot, Endpoints const &endpoints)=0
Called when mtENDPOINTS is received.
virtual Config config()=0
Returns the configuration for the manager.
virtual void on_closed(std::shared_ptr< Slot > const &slot)=0
Called when the slot is closed.
virtual void on_failure(std::shared_ptr< Slot > const &slot)=0
Called when an outbound connection is deemed to have failed.
std::queue< std::shared_ptr< Message > > send_queue_
Definition: PeerImp.h:176
std::unique_ptr< LoadEvent > load_event_
Definition: PeerImp.h:179
boost::beast::http::fields const & headers_
Definition: PeerImp.h:175
void onMessageEnd(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m)
Definition: PeerImp.cpp:1059
bool hasLedger(uint256 const &hash, std::uint32_t seq) const override
Definition: PeerImp.cpp:524
clock_type::duration uptime() const
Definition: PeerImp.h:364
void removeTxQueue(uint256 const &hash) override
Remove transaction's hash from the transactions' hashes queue.
Definition: PeerImp.cpp:340
protocol::TMStatusChange last_status_
Definition: PeerImp.h:168
boost::shared_mutex nameMutex_
Definition: PeerImp.h:101
std::string name_
Definition: PeerImp.h:100
boost::circular_buffer< uint256 > recentTxSets_
Definition: PeerImp.h:111
std::unique_ptr< stream_type > stream_ptr_
Definition: PeerImp.h:77
void onMessage(std::shared_ptr< protocol::TMManifests > const &m)
Definition: PeerImp.cpp:1068
bool detaching_
Definition: PeerImp.h:97
Tracking
Whether the peer's view of the ledger converges or diverges from ours.
Definition: PeerImp.h:57
Compressed compressionEnabled_
Definition: PeerImp.h:184
uint256 closedLedgerHash_
Definition: PeerImp.h:107
std::string domain() const
Definition: PeerImp.cpp:842
std::optional< std::uint32_t > lastPingSeq_
Definition: PeerImp.h:114
void onTimer(boost::system::error_code const &ec)
Definition: PeerImp.cpp:690
bool gracefulClose_
Definition: PeerImp.h:177
beast::Journal const journal_
Definition: PeerImp.h:75
virtual void run()
Definition: PeerImp.cpp:156
struct ripple::PeerImp::@21 metrics_
void gracefulClose()
Definition: PeerImp.cpp:634
LedgerIndex maxLedger_
Definition: PeerImp.h:106
beast::Journal const p_journal_
Definition: PeerImp.h:76
void cancelTimer()
Definition: PeerImp.cpp:673
bool const inbound_
Definition: PeerImp.h:90
PeerImp(PeerImp const &)=delete
Application & app_
Definition: PeerImp.h:71
void stop() override
Definition: PeerImp.cpp:214
bool hasRange(std::uint32_t uMin, std::uint32_t uMax) override
Definition: PeerImp.cpp:566
bool hasTxSet(uint256 const &hash) const override
Definition: PeerImp.cpp:548
clock_type::time_point lastPingTime_
Definition: PeerImp.h:115
void onMessageUnknown(std::uint16_t type)
Definition: PeerImp.cpp:1010
std::shared_ptr< PeerFinder::Slot > const slot_
Definition: PeerImp.h:171
boost::circular_buffer< uint256 > recentLedgers_
Definition: PeerImp.h:110
id_t const id_
Definition: PeerImp.h:72
std::optional< std::chrono::milliseconds > latency_
Definition: PeerImp.h:113
void handleTransaction(std::shared_ptr< protocol::TMTransaction > const &m, bool eraseTxQueue, bool batch)
Called from onMessage(TMTransaction(s)).
Definition: PeerImp.cpp:1260
beast::IP::Endpoint const remote_address_
Definition: PeerImp.h:85
Json::Value json() override
Definition: PeerImp.cpp:389
PublicKey const publicKey_
Definition: PeerImp.h:99
hash_set< uint256 > txQueue_
Definition: PeerImp.h:189
std::mutex recentLock_
Definition: PeerImp.h:167
void doAccept()
Definition: PeerImp.cpp:767
void onMessageBegin(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m, std::size_t size, std::size_t uncompressed_size, bool isCompressed)
Definition: PeerImp.cpp:1016
bool txReduceRelayEnabled_
Definition: PeerImp.h:191
clock_type::time_point trackingTime_
Definition: PeerImp.h:96
socket_type & socket_
Definition: PeerImp.h:78
ProtocolVersion protocol_
Definition: PeerImp.h:93
reduce_relay::Squelch< UptimeClock > squelch_
Definition: PeerImp.h:118
std::string getVersion() const
Return the version of rippled that the peer is running, if reported.
Definition: PeerImp.cpp:381
uint256 previousLedgerHash_
Definition: PeerImp.h:108
void charge(Resource::Charge const &fee, std::string const &context) override
Adjust this peer's load balance based on the type of load imposed.
Definition: PeerImp.cpp:352
void setTimer()
Definition: PeerImp.cpp:655
void send(std::shared_ptr< Message > const &m) override
Definition: PeerImp.cpp:240
static std::string makePrefix(id_t id)
Definition: PeerImp.cpp:682
std::string name() const
Definition: PeerImp.cpp:835
boost::system::error_code error_code
Definition: PeerImp.h:61
void onReadMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:890
bool ledgerReplayEnabled_
Definition: PeerImp.h:193
boost::asio::basic_waitable_timer< std::chrono::steady_clock > waitable_timer
Definition: PeerImp.h:68
bool crawl() const
Returns true if this connection will publicly share its IP address.
Definition: PeerImp.cpp:366
waitable_timer timer_
Definition: PeerImp.h:81
void sendTxQueue() override
Send aggregated transactions' hashes.
Definition: PeerImp.cpp:304
bool txReduceRelayEnabled() const override
Definition: PeerImp.h:437
bool supportsFeature(ProtocolFeature f) const override
Definition: PeerImp.cpp:507
ChargeWithContext fee_
Definition: PeerImp.h:170
void onWriteMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:954
http_request_type request_
Definition: PeerImp.h:173
OverlayImpl & overlay_
Definition: PeerImp.h:89
LedgerIndex minLedger_
Definition: PeerImp.h:105
virtual ~PeerImp()
Definition: PeerImp.cpp:133
void addTxQueue(uint256 const &hash) override
Add transaction's hash to the transactions' hashes queue.
Definition: PeerImp.cpp:323
int large_sendq_
Definition: PeerImp.h:178
stream_type & stream_
Definition: PeerImp.h:79
bool cluster() const override
Returns true if this connection is a member of the cluster.
Definition: PeerImp.cpp:375
void onShutdown(error_code ec)
Definition: PeerImp.cpp:751
boost::asio::strand< boost::asio::executor > strand_
Definition: PeerImp.h:80
void cycleStatus() override
Definition: PeerImp.cpp:556
boost::beast::multi_buffer read_buffer_
Definition: PeerImp.h:172
Resource::Consumer usage_
Definition: PeerImp.h:169
void ledgerRange(std::uint32_t &minSeq, std::uint32_t &maxSeq) const override
Definition: PeerImp.cpp:539
void doProtocolStart()
Definition: PeerImp.cpp:852
void fail(std::string const &reason)
Definition: PeerImp.cpp:600
std::atomic< Tracking > tracking_
Definition: PeerImp.h:95
Represents a peer connection in the overlay.
A public key.
Definition: PublicKey.h:62
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
bool checkSign() const
Verify the signing hash of the proposal.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:78
uint256 const & suppressionID() const
Unique id used by hash router to suppress duplicates.
Definition: RCLCxPeerPos.h:85
A consumption charge.
Definition: Charge.h:31
An endpoint that consumes resources.
Definition: Consumer.h:35
int balance()
Returns the credit balance representing consumption.
Definition: Consumer.cpp:137
bool disconnect(beast::Journal const &j)
Returns true if the consumer should be disconnected.
Definition: Consumer.cpp:124
Disposition charge(Charge const &fee, std::string const &context={})
Apply a load charge to the consumer.
Definition: Consumer.cpp:106
virtual void importConsumers(std::string const &origin, Gossip const &gossip)=0
Import packaged consumer information.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:98
std::size_t size() const noexcept
Definition: Serializer.h:73
void const * data() const noexcept
Definition: Serializer.h:79
int getLength() const
Definition: Serializer.h:234
void const * getDataPtr() const
Definition: Serializer.h:224
An immutable linear range of bytes.
Definition: Slice.h:46
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
static category categorize(::google::protobuf::Message const &message, protocol::MessageType type, bool inbound)
Given a protocol message, determine which traffic category it belongs to.
static void sendValidatorList(Peer &peer, std::uint64_t peerSequence, PublicKey const &publisherKey, std::size_t maxSequence, std::uint32_t rawVersion, std::string const &rawManifest, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, HashRouter &hashRouter, beast::Journal j)
void for_each_available(std::function< void(std::string const &manifest, std::uint32_t version, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, PublicKey const &pubKey, std::size_t maxSequence, uint256 const &hash)> func) const
Invokes the callback once for every available publisher list's raw data members.
pointer data()
Definition: base_uint.h:125
static constexpr std::size_t size()
Definition: base_uint.h:526
constexpr bool parseHex(std::string_view sv)
Parse a hex string into a base_uint.
Definition: base_uint.h:503
T emplace_back(T... args)
T empty(T... args)
T find(T... args)
T for_each(T... args)
T get(T... args)
T load(T... args)
T lock(T... args)
T max(T... args)
T min(T... args)
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:46
unsigned int UInt
Definition: json_forwards.h:27
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeInvalidData
Charge const feeUselessData
Charge const feeTrivialPeer
Charge const feeModerateBurdenPeer
std::size_t constexpr readBufferBytes
Size of buffer used to read from the socket.
@ targetSendQueue
How many messages we consider reasonable sustained on a send queue.
@ maxQueryDepth
The maximum number of levels to search.
@ sendqIntervals
How many timer intervals a sendq has to stay large before we disconnect.
@ sendQueueLogFreq
How often to log send queue size.
TER valid(PreclaimContext const &ctx, AccountID const &src)
auto measureDurationAndLog(Func &&func, std::string const &actionDescription, std::chrono::duration< Rep, Period > maxDelay, beast::Journal const &journal)
Definition: PerfLog.h:187
static constexpr std::size_t MAX_TX_QUEUE_SIZE
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string protocolMessageName(int type)
Returns the name of a protocol message given its type.
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:114
uint256 proposalUniqueId(uint256 const &proposeHash, uint256 const &previousLedger, std::uint32_t proposeSeq, NetClock::time_point closeTime, Slice const &publicKey, Slice const &signature)
Calculate a unique identifier for a signed proposal.
constexpr ProtocolVersion make_protocol(std::uint16_t major, std::uint16_t minor)
bool isPseudoTx(STObject const &tx)
Check whether a transaction is a pseudo-transaction.
Definition: STTx.cpp:814
@ INCLUDED
Definition: Transaction.h:49
@ INVALID
Definition: Transaction.h:48
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
static constexpr char FEATURE_COMPR[]
Definition: Handshake.h:141
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
Definition: Validations.h:149
std::string base64_decode(std::string_view data)
Definition: base64.cpp:248
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:315
http_response_type makeResponse(bool crawlPublic, http_request_type const &req, beast::IP::Address public_ip, beast::IP::Address remote_ip, uint256 const &sharedValue, std::optional< std::uint32_t > networkID, ProtocolVersion protocol, Application &app)
Make http response.
Definition: Handshake.cpp:392
static bool stringIsUint256Sized(std::string const &pBuffStr)
Definition: PeerImp.cpp:150
static constexpr char FEATURE_LEDGER_REPLAY[]
Definition: Handshake.h:147
std::pair< std::size_t, boost::system::error_code > invokeProtocolMessage(Buffers const &buffers, Handler &handler, std::size_t &hint)
Calls the handler for up to one protocol message in the passed buffers.
std::optional< uint256 > makeSharedValue(stream_type &ssl, beast::Journal journal)
Computes a shared value based on the SSL connection state.
Definition: Handshake.cpp:146
std::optional< KeyType > publicKeyType(Slice const &slice)
Returns the type of public key.
Definition: PublicKey.cpp:223
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
static std::shared_ptr< PeerImp > getPeerWithLedger(OverlayImpl &ov, uint256 const &ledgerHash, LedgerIndex ledger, PeerImp const *skip)
Definition: PeerImp.cpp:3088
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:244
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition: chrono.h:119
boost::beast::http::request< boost::beast::http::dynamic_body > http_request_type
Definition: Handoff.h:33
NodeID calcNodeID(PublicKey const &)
Calculate the 160-bit node ID from a node public key.
Definition: PublicKey.cpp:319
static std::shared_ptr< PeerImp > getPeerWithTree(OverlayImpl &ov, uint256 const &rootHash, PeerImp const *skip)
Definition: PeerImp.cpp:3064
bool peerFeatureEnabled(headers const &request, std::string const &feature, std::string value, bool config)
Check if a feature should be enabled for a peer.
Definition: Handshake.h:198
void forceValidity(HashRouter &router, uint256 const &txid, Validity validity)
Sets the validity of a given transaction in the cache.
Definition: apply.cpp:113
static constexpr char FEATURE_TXRR[]
Definition: Handshake.h:145
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:630
std::optional< Rules > const & getCurrentTransactionRules()
Definition: Rules.cpp:47
Number root(Number f, unsigned d)
Definition: Number.cpp:636
@ manifest
Manifest.
@ proposal
proposal for signing
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:39
@ jtLEDGER_REQ
Definition: Job.h:59
@ jtPROPOSAL_ut
Definition: Job.h:60
@ jtREPLAY_REQ
Definition: Job.h:58
@ jtTRANSACTION
Definition: Job.h:62
@ jtPEER
Definition: Job.h:80
@ jtREQUESTED_TXN
Definition: Job.h:64
@ jtMISSING_TXN
Definition: Job.h:63
@ jtVALIDATION_t
Definition: Job.h:71
@ jtMANIFEST
Definition: Job.h:55
@ jtTXN_DATA
Definition: Job.h:69
@ jtPACK
Definition: Job.h:43
@ jtVALIDATION_ut
Definition: Job.h:54
@ jtPROPOSAL_t
Definition: Job.h:74
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition: digest.h:225
static constexpr char FEATURE_VPRR[]
Definition: Handshake.h:143
constexpr std::uint32_t tfInnerBatchTxn
Definition: TxFlags.h:61
STL namespace.
T nth_element(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T setfill(T... args)
T setw(T... args)
T size(T... args)
T str(T... args)
Information about the notional ledger backing the view.
Definition: LedgerHeader.h:34
beast::IP::Address public_ip
Definition: Overlay.h:69
std::optional< std::uint32_t > networkID
Definition: Overlay.h:72
bool peerPrivate
true if we want our IP address kept private.
void update(Resource::Charge f, std::string const &add)
Definition: PeerImp.h:153
Describes a single consumer.
Definition: Gossip.h:35
beast::IP::Endpoint address
Definition: Gossip.h:39
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
T tie(T... args)
T to_string(T... args)
T what(T... args)