rippled
Loading...
Searching...
No Matches
PeerImp.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLValidations.h>
21#include <xrpld/app/ledger/InboundLedgers.h>
22#include <xrpld/app/ledger/InboundTransactions.h>
23#include <xrpld/app/ledger/LedgerMaster.h>
24#include <xrpld/app/ledger/TransactionMaster.h>
25#include <xrpld/app/misc/HashRouter.h>
26#include <xrpld/app/misc/LoadFeeTrack.h>
27#include <xrpld/app/misc/NetworkOPs.h>
28#include <xrpld/app/misc/Transaction.h>
29#include <xrpld/app/misc/ValidatorList.h>
30#include <xrpld/app/tx/apply.h>
31#include <xrpld/overlay/Cluster.h>
32#include <xrpld/overlay/detail/PeerImp.h>
33#include <xrpld/overlay/detail/Tuning.h>
34#include <xrpld/perflog/PerfLog.h>
35#include <xrpl/basics/UptimeClock.h>
36#include <xrpl/basics/base64.h>
37#include <xrpl/basics/random.h>
38#include <xrpl/basics/safe_cast.h>
39#include <xrpl/protocol/digest.h>
40
41#include <boost/algorithm/string/predicate.hpp>
42#include <boost/beast/core/ostream.hpp>
43
44#include <algorithm>
45#include <memory>
46#include <mutex>
47#include <numeric>
48#include <sstream>
49
50using namespace std::chrono_literals;
51
52namespace ripple {
53
54namespace {
56std::chrono::milliseconds constexpr peerHighLatency{300};
57
59std::chrono::seconds constexpr peerTimerInterval{60};
60} // namespace
61
62// TODO: Remove this exclusion once unit tests are added after the hotfix
63// release.
64
66 Application& app,
67 id_t id,
69 http_request_type&& request,
70 PublicKey const& publicKey,
72 Resource::Consumer consumer,
74 OverlayImpl& overlay)
75 : Child(overlay)
76 , app_(app)
77 , id_(id)
78 , sink_(app_.journal("Peer"), makePrefix(id))
79 , p_sink_(app_.journal("Protocol"), makePrefix(id))
80 , journal_(sink_)
81 , p_journal_(p_sink_)
82 , stream_ptr_(std::move(stream_ptr))
83 , socket_(stream_ptr_->next_layer().socket())
84 , stream_(*stream_ptr_)
85 , strand_(socket_.get_executor())
86 , timer_(waitable_timer{socket_.get_executor()})
87 , remote_address_(slot->remote_endpoint())
88 , overlay_(overlay)
89 , inbound_(true)
90 , protocol_(protocol)
91 , tracking_(Tracking::unknown)
92 , trackingTime_(clock_type::now())
93 , publicKey_(publicKey)
94 , lastPingTime_(clock_type::now())
95 , creationTime_(clock_type::now())
96 , squelch_(app_.journal("Squelch"))
97 , usage_(consumer)
98 , fee_{Resource::feeTrivialPeer, ""}
99 , slot_(slot)
100 , request_(std::move(request))
101 , headers_(request_)
102 , compressionEnabled_(
104 headers_,
106 "lz4",
107 app_.config().COMPRESSION)
108 ? Compressed::On
109 : Compressed::Off)
110 , txReduceRelayEnabled_(peerFeatureEnabled(
111 headers_,
113 app_.config().TX_REDUCE_RELAY_ENABLE))
114 , vpReduceRelayEnabled_(peerFeatureEnabled(
115 headers_,
117 app_.config().VP_REDUCE_RELAY_ENABLE))
118 , ledgerReplayEnabled_(peerFeatureEnabled(
119 headers_,
121 app_.config().LEDGER_REPLAY))
122 , ledgerReplayMsgHandler_(app, app.getLedgerReplayer())
123{
124 JLOG(journal_.info()) << "compression enabled "
125 << (compressionEnabled_ == Compressed::On)
126 << " vp reduce-relay enabled "
128 << " tx reduce-relay enabled "
130 << " " << id_;
131}
132
134{
135 const bool inCluster{cluster()};
136
141
142 if (inCluster)
143 {
144 JLOG(journal_.warn()) << name() << " left cluster";
145 }
146}
147
148// Helper function to check for valid uint256 values in protobuf buffers
149static bool
151{
152 return pBuffStr.size() == uint256::size();
153}
154
155void
157{
158 if (!strand_.running_in_this_thread())
160
161 auto parseLedgerHash =
163 if (uint256 ret; ret.parseHex(value))
164 return ret;
165
166 if (auto const s = base64_decode(value); s.size() == uint256::size())
167 return uint256{s};
168
169 return std::nullopt;
170 };
171
173 std::optional<uint256> previous;
174
175 if (auto const iter = headers_.find("Closed-Ledger");
176 iter != headers_.end())
177 {
178 closed = parseLedgerHash(iter->value());
179
180 if (!closed)
181 fail("Malformed handshake data (1)");
182 }
183
184 if (auto const iter = headers_.find("Previous-Ledger");
185 iter != headers_.end())
186 {
187 previous = parseLedgerHash(iter->value());
188
189 if (!previous)
190 fail("Malformed handshake data (2)");
191 }
192
193 if (previous && !closed)
194 fail("Malformed handshake data (3)");
195
196 {
198 if (closed)
199 closedLedgerHash_ = *closed;
200 if (previous)
201 previousLedgerHash_ = *previous;
202 }
203
204 if (inbound_)
205 doAccept();
206 else
208
209 // Anything else that needs to be done with the connection should be
210 // done in doProtocolStart
211}
212
213void
215{
216 if (!strand_.running_in_this_thread())
218 if (socket_.is_open())
219 {
220 // The rationale for using different severity levels is that
221 // outbound connections are under our control and may be logged
222 // at a higher level, but inbound connections are more numerous and
223 // uncontrolled so to prevent log flooding the severity is reduced.
224 //
225 if (inbound_)
226 {
227 JLOG(journal_.debug()) << "Stop";
228 }
229 else
230 {
231 JLOG(journal_.info()) << "Stop";
232 }
233 }
234 close();
235}
236
237//------------------------------------------------------------------------------
238
239void
241{
242 if (!strand_.running_in_this_thread())
243 return post(strand_, std::bind(&PeerImp::send, shared_from_this(), m));
244 if (gracefulClose_)
245 return;
246 if (detaching_)
247 return;
248
249 auto validator = m->getValidatorKey();
250 if (validator && !squelch_.expireSquelch(*validator))
251 return;
252
254 safe_cast<TrafficCount::category>(m->getCategory()),
255 false,
256 static_cast<int>(m->getBuffer(compressionEnabled_).size()));
257
258 auto sendq_size = send_queue_.size();
259
260 if (sendq_size < Tuning::targetSendQueue)
261 {
262 // To detect a peer that does not read from their
263 // side of the connection, we expect a peer to have
264 // a small senq periodically
265 large_sendq_ = 0;
266 }
267 else if (auto sink = journal_.debug();
268 sink && (sendq_size % Tuning::sendQueueLogFreq) == 0)
269 {
270 std::string const n = name();
271 sink << (n.empty() ? remote_address_.to_string() : n)
272 << " sendq: " << sendq_size;
273 }
274
275 send_queue_.push(m);
276
277 if (sendq_size != 0)
278 return;
279
280 boost::asio::async_write(
281 stream_,
282 boost::asio::buffer(
283 send_queue_.front()->getBuffer(compressionEnabled_)),
284 bind_executor(
285 strand_,
286 std::bind(
289 std::placeholders::_1,
290 std::placeholders::_2)));
291}
292
293void
295{
296 if (!strand_.running_in_this_thread())
297 return post(
299
300 if (!txQueue_.empty())
301 {
302 protocol::TMHaveTransactions ht;
303 std::for_each(txQueue_.begin(), txQueue_.end(), [&](auto const& hash) {
304 ht.add_hashes(hash.data(), hash.size());
305 });
306 JLOG(p_journal_.trace()) << "sendTxQueue " << txQueue_.size();
307 txQueue_.clear();
308 send(std::make_shared<Message>(ht, protocol::mtHAVE_TRANSACTIONS));
309 }
310}
311
312void
314{
315 if (!strand_.running_in_this_thread())
316 return post(
318
320 {
321 JLOG(p_journal_.warn()) << "addTxQueue exceeds the cap";
322 sendTxQueue();
323 }
324
325 txQueue_.insert(hash);
326 JLOG(p_journal_.trace()) << "addTxQueue " << txQueue_.size();
327}
328
329void
331{
332 if (!strand_.running_in_this_thread())
333 return post(
334 strand_,
336
337 auto removed = txQueue_.erase(hash);
338 JLOG(p_journal_.trace()) << "removeTxQueue " << removed;
339}
340
341void
343{
344 if ((usage_.charge(fee, context) == Resource::drop) &&
345 usage_.disconnect(p_journal_) && strand_.running_in_this_thread())
346 {
347 // Sever the connection
349 fail("charge: Resources");
350 }
351}
352
353//------------------------------------------------------------------------------
354
355bool
357{
358 auto const iter = headers_.find("Crawl");
359 if (iter == headers_.end())
360 return false;
361 return boost::iequals(iter->value(), "public");
362}
363
364bool
366{
367 return static_cast<bool>(app_.cluster().member(publicKey_));
368}
369
372{
373 if (inbound_)
374 return headers_["User-Agent"];
375 return headers_["Server"];
376}
377
380{
382
383 ret[jss::public_key] = toBase58(TokenType::NodePublic, publicKey_);
384 ret[jss::address] = remote_address_.to_string();
385
386 if (inbound_)
387 ret[jss::inbound] = true;
388
389 if (cluster())
390 {
391 ret[jss::cluster] = true;
392
393 if (auto const n = name(); !n.empty())
394 // Could move here if Json::Value supported moving from a string
395 ret[jss::name] = n;
396 }
397
398 if (auto const d = domain(); !d.empty())
399 ret[jss::server_domain] = std::string{d};
400
401 if (auto const nid = headers_["Network-ID"]; !nid.empty())
402 ret[jss::network_id] = std::string{nid};
403
404 ret[jss::load] = usage_.balance();
405
406 if (auto const version = getVersion(); !version.empty())
407 ret[jss::version] = std::string{version};
408
409 ret[jss::protocol] = to_string(protocol_);
410
411 {
413 if (latency_)
414 ret[jss::latency] = static_cast<Json::UInt>(latency_->count());
415 }
416
417 ret[jss::uptime] = static_cast<Json::UInt>(
418 std::chrono::duration_cast<std::chrono::seconds>(uptime()).count());
419
420 std::uint32_t minSeq, maxSeq;
421 ledgerRange(minSeq, maxSeq);
422
423 if ((minSeq != 0) || (maxSeq != 0))
424 ret[jss::complete_ledgers] =
425 std::to_string(minSeq) + " - " + std::to_string(maxSeq);
426
427 switch (tracking_.load())
428 {
430 ret[jss::track] = "diverged";
431 break;
432
434 ret[jss::track] = "unknown";
435 break;
436
438 // Nothing to do here
439 break;
440 }
441
442 uint256 closedLedgerHash;
443 protocol::TMStatusChange last_status;
444 {
446 closedLedgerHash = closedLedgerHash_;
447 last_status = last_status_;
448 }
449
450 if (closedLedgerHash != beast::zero)
451 ret[jss::ledger] = to_string(closedLedgerHash);
452
453 if (last_status.has_newstatus())
454 {
455 switch (last_status.newstatus())
456 {
457 case protocol::nsCONNECTING:
458 ret[jss::status] = "connecting";
459 break;
460
461 case protocol::nsCONNECTED:
462 ret[jss::status] = "connected";
463 break;
464
465 case protocol::nsMONITORING:
466 ret[jss::status] = "monitoring";
467 break;
468
469 case protocol::nsVALIDATING:
470 ret[jss::status] = "validating";
471 break;
472
473 case protocol::nsSHUTTING:
474 ret[jss::status] = "shutting";
475 break;
476
477 default:
478 JLOG(p_journal_.warn())
479 << "Unknown status: " << last_status.newstatus();
480 }
481 }
482
483 ret[jss::metrics] = Json::Value(Json::objectValue);
484 ret[jss::metrics][jss::total_bytes_recv] =
485 std::to_string(metrics_.recv.total_bytes());
486 ret[jss::metrics][jss::total_bytes_sent] =
487 std::to_string(metrics_.sent.total_bytes());
488 ret[jss::metrics][jss::avg_bps_recv] =
489 std::to_string(metrics_.recv.average_bytes());
490 ret[jss::metrics][jss::avg_bps_sent] =
491 std::to_string(metrics_.sent.average_bytes());
492
493 return ret;
494}
495
496bool
498{
499 switch (f)
500 {
502 return protocol_ >= make_protocol(2, 1);
504 return protocol_ >= make_protocol(2, 2);
507 }
508 return false;
509}
510
511//------------------------------------------------------------------------------
512
513bool
515{
516 {
518 if ((seq != 0) && (seq >= minLedger_) && (seq <= maxLedger_) &&
520 return true;
521 if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
522 recentLedgers_.end())
523 return true;
524 }
525 return false;
526}
527
528void
530{
532
533 minSeq = minLedger_;
534 maxSeq = maxLedger_;
535}
536
537bool
538PeerImp::hasTxSet(uint256 const& hash) const
539{
541 return std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
542 recentTxSets_.end();
543}
544
545void
547{
548 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
549 // guarded by recentLock_.
553}
554
555bool
557{
559 return (tracking_ != Tracking::diverged) && (uMin >= minLedger_) &&
560 (uMax <= maxLedger_);
561}
562
563//------------------------------------------------------------------------------
564
565void
567{
568 XRPL_ASSERT(
569 strand_.running_in_this_thread(),
570 "ripple::PeerImp::close : strand in this thread");
571 if (socket_.is_open())
572 {
573 detaching_ = true; // DEPRECATED
574 error_code ec;
575 timer_.cancel(ec);
576 socket_.close(ec);
578 if (inbound_)
579 {
580 JLOG(journal_.debug()) << "Closed";
581 }
582 else
583 {
584 JLOG(journal_.info()) << "Closed";
585 }
586 }
587}
588
589void
591{
592 if (!strand_.running_in_this_thread())
593 return post(
594 strand_,
595 std::bind(
596 (void(Peer::*)(std::string const&)) & PeerImp::fail,
598 reason));
600 {
601 std::string const n = name();
602 JLOG(journal_.warn()) << (n.empty() ? remote_address_.to_string() : n)
603 << " failed: " << reason;
604 }
605 close();
606}
607
608void
610{
611 XRPL_ASSERT(
612 strand_.running_in_this_thread(),
613 "ripple::PeerImp::fail : strand in this thread");
614 if (socket_.is_open())
615 {
616 JLOG(journal_.warn())
618 << " at " << remote_address_.to_string() << ": " << ec.message();
619 }
620 close();
621}
622
623void
625{
626 XRPL_ASSERT(
627 strand_.running_in_this_thread(),
628 "ripple::PeerImp::gracefulClose : strand in this thread");
629 XRPL_ASSERT(
630 socket_.is_open(), "ripple::PeerImp::gracefulClose : socket is open");
631 XRPL_ASSERT(
633 "ripple::PeerImp::gracefulClose : socket is not closing");
634 gracefulClose_ = true;
635 if (send_queue_.size() > 0)
636 return;
637 setTimer();
638 stream_.async_shutdown(bind_executor(
639 strand_,
640 std::bind(
641 &PeerImp::onShutdown, shared_from_this(), std::placeholders::_1)));
642}
643
644void
646{
647 error_code ec;
648 timer_.expires_from_now(peerTimerInterval, ec);
649
650 if (ec)
651 {
652 JLOG(journal_.error()) << "setTimer: " << ec.message();
653 return;
654 }
655 timer_.async_wait(bind_executor(
656 strand_,
657 std::bind(
658 &PeerImp::onTimer, shared_from_this(), std::placeholders::_1)));
659}
660
661// convenience for ignoring the error code
662void
664{
665 error_code ec;
666 timer_.cancel(ec);
667}
668
669//------------------------------------------------------------------------------
670
673{
675 ss << "[" << std::setfill('0') << std::setw(3) << id << "] ";
676 return ss.str();
677}
678
679void
681{
682 if (!socket_.is_open())
683 return;
684
685 if (ec == boost::asio::error::operation_aborted)
686 return;
687
688 if (ec)
689 {
690 // This should never happen
691 JLOG(journal_.error()) << "onTimer: " << ec.message();
692 return close();
693 }
694
696 {
697 fail("Large send queue");
698 return;
699 }
700
701 if (auto const t = tracking_.load(); !inbound_ && t != Tracking::converged)
702 {
703 clock_type::duration duration;
704
705 {
707 duration = clock_type::now() - trackingTime_;
708 }
709
710 if ((t == Tracking::diverged &&
711 (duration > app_.config().MAX_DIVERGED_TIME)) ||
712 (t == Tracking::unknown &&
713 (duration > app_.config().MAX_UNKNOWN_TIME)))
714 {
716 fail("Not useful");
717 return;
718 }
719 }
720
721 // Already waiting for PONG
722 if (lastPingSeq_)
723 {
724 fail("Ping Timeout");
725 return;
726 }
727
729 lastPingSeq_ = rand_int<std::uint32_t>();
730
731 protocol::TMPing message;
732 message.set_type(protocol::TMPing::ptPING);
733 message.set_seq(*lastPingSeq_);
734
735 send(std::make_shared<Message>(message, protocol::mtPING));
736
737 setTimer();
738}
739
740void
742{
743 cancelTimer();
744 // If we don't get eof then something went wrong
745 if (!ec)
746 {
747 JLOG(journal_.error()) << "onShutdown: expected error condition";
748 return close();
749 }
750 if (ec != boost::asio::error::eof)
751 return fail("onShutdown", ec);
752 close();
753}
754
755//------------------------------------------------------------------------------
756void
758{
759 XRPL_ASSERT(
760 read_buffer_.size() == 0,
761 "ripple::PeerImp::doAccept : empty read buffer");
762
763 JLOG(journal_.debug()) << "doAccept: " << remote_address_;
764
765 auto const sharedValue = makeSharedValue(*stream_ptr_, journal_);
766
767 // This shouldn't fail since we already computed
768 // the shared value successfully in OverlayImpl
769 if (!sharedValue)
770 return fail("makeSharedValue: Unexpected failure");
771
772 JLOG(journal_.info()) << "Protocol: " << to_string(protocol_);
773 JLOG(journal_.info()) << "Public Key: "
775
776 if (auto member = app_.cluster().member(publicKey_))
777 {
778 {
780 name_ = *member;
781 }
782 JLOG(journal_.info()) << "Cluster name: " << *member;
783 }
784
786
787 // XXX Set timer: connection is in grace period to be useful.
788 // XXX Set timer: connection idle (idle may vary depending on connection
789 // type.)
790
791 auto write_buffer = std::make_shared<boost::beast::multi_buffer>();
792
793 boost::beast::ostream(*write_buffer) << makeResponse(
795 request_,
798 *sharedValue,
800 protocol_,
801 app_);
802
803 // Write the whole buffer and only start protocol when that's done.
804 boost::asio::async_write(
805 stream_,
806 write_buffer->data(),
807 boost::asio::transfer_all(),
808 bind_executor(
809 strand_,
810 [this, write_buffer, self = shared_from_this()](
811 error_code ec, std::size_t bytes_transferred) {
812 if (!socket_.is_open())
813 return;
814 if (ec == boost::asio::error::operation_aborted)
815 return;
816 if (ec)
817 return fail("onWriteResponse", ec);
818 if (write_buffer->size() == bytes_transferred)
819 return doProtocolStart();
820 return fail("Failed to write header");
821 }));
822}
823
826{
827 std::shared_lock read_lock{nameMutex_};
828 return name_;
829}
830
833{
834 return headers_["Server-Domain"];
835}
836
837//------------------------------------------------------------------------------
838
839// Protocol logic
840
841void
843{
845
846 // Send all the validator lists that have been loaded
848 {
850 [&](std::string const& manifest,
851 std::uint32_t version,
853 PublicKey const& pubKey,
854 std::size_t maxSequence,
855 uint256 const& hash) {
857 *this,
858 0,
859 pubKey,
860 maxSequence,
861 version,
862 manifest,
863 blobInfos,
865 p_journal_);
866
867 // Don't send it next time.
869 });
870 }
871
872 if (auto m = overlay_.getManifestsMessage())
873 send(m);
874
875 setTimer();
876}
877
878// Called repeatedly with protocol message data
879void
881{
882 if (!socket_.is_open())
883 return;
884 if (ec == boost::asio::error::operation_aborted)
885 return;
886 if (ec == boost::asio::error::eof)
887 {
888 JLOG(journal_.info()) << "EOF";
889 return gracefulClose();
890 }
891 if (ec)
892 return fail("onReadMessage", ec);
893 if (auto stream = journal_.trace())
894 {
895 if (bytes_transferred > 0)
896 stream << "onReadMessage: " << bytes_transferred << " bytes";
897 else
898 stream << "onReadMessage";
899 }
900
901 metrics_.recv.add_message(bytes_transferred);
902
903 read_buffer_.commit(bytes_transferred);
904
905 auto hint = Tuning::readBufferBytes;
906
907 while (read_buffer_.size() > 0)
908 {
909 std::size_t bytes_consumed;
910
911 using namespace std::chrono_literals;
912 std::tie(bytes_consumed, ec) = perf::measureDurationAndLog(
913 [&]() {
914 return invokeProtocolMessage(read_buffer_.data(), *this, hint);
915 },
916 "invokeProtocolMessage",
917 350ms,
918 journal_);
919
920 if (ec)
921 return fail("onReadMessage", ec);
922 if (!socket_.is_open())
923 return;
924 if (gracefulClose_)
925 return;
926 if (bytes_consumed == 0)
927 break;
928 read_buffer_.consume(bytes_consumed);
929 }
930
931 // Timeout on writes only
932 stream_.async_read_some(
934 bind_executor(
935 strand_,
936 std::bind(
939 std::placeholders::_1,
940 std::placeholders::_2)));
941}
942
943void
945{
946 if (!socket_.is_open())
947 return;
948 if (ec == boost::asio::error::operation_aborted)
949 return;
950 if (ec)
951 return fail("onWriteMessage", ec);
952 if (auto stream = journal_.trace())
953 {
954 if (bytes_transferred > 0)
955 stream << "onWriteMessage: " << bytes_transferred << " bytes";
956 else
957 stream << "onWriteMessage";
958 }
959
960 metrics_.sent.add_message(bytes_transferred);
961
962 XRPL_ASSERT(
963 !send_queue_.empty(),
964 "ripple::PeerImp::onWriteMessage : non-empty send buffer");
965 send_queue_.pop();
966 if (!send_queue_.empty())
967 {
968 // Timeout on writes only
969 return boost::asio::async_write(
970 stream_,
971 boost::asio::buffer(
972 send_queue_.front()->getBuffer(compressionEnabled_)),
973 bind_executor(
974 strand_,
975 std::bind(
978 std::placeholders::_1,
979 std::placeholders::_2)));
980 }
981
982 if (gracefulClose_)
983 {
984 return stream_.async_shutdown(bind_executor(
985 strand_,
986 std::bind(
989 std::placeholders::_1)));
990 }
991}
992
993//------------------------------------------------------------------------------
994//
995// ProtocolHandler
996//
997//------------------------------------------------------------------------------
998
999void
1001{
1002 // TODO
1003}
1004
1005void
1007 std::uint16_t type,
1009 std::size_t size,
1010 std::size_t uncompressed_size,
1011 bool isCompressed)
1012{
1013 auto const name = protocolMessageName(type);
1016 auto const category = TrafficCount::categorize(*m, type, true);
1017 overlay_.reportTraffic(category, true, static_cast<int>(size));
1018 using namespace protocol;
1019 if ((type == MessageType::mtTRANSACTION ||
1020 type == MessageType::mtHAVE_TRANSACTIONS ||
1021 type == MessageType::mtTRANSACTIONS ||
1022 // GET_OBJECTS
1024 // GET_LEDGER
1027 // LEDGER_DATA
1031 {
1033 static_cast<MessageType>(type), static_cast<std::uint64_t>(size));
1034 }
1035 JLOG(journal_.trace()) << "onMessageBegin: " << type << " " << size << " "
1036 << uncompressed_size << " " << isCompressed;
1037}
1038
1039void
1043{
1044 load_event_.reset();
1046}
1047
1048void
1050{
1051 auto const s = m->list_size();
1052
1053 if (s == 0)
1054 {
1056 return;
1057 }
1058
1059 if (s > 100)
1061
1063 jtMANIFEST, "receiveManifests", [this, that = shared_from_this(), m]() {
1064 overlay_.onManifests(m, that);
1065 });
1066}
1067
1068void
1070{
1071 if (m->type() == protocol::TMPing::ptPING)
1072 {
1073 // We have received a ping request, reply with a pong
1075 m->set_type(protocol::TMPing::ptPONG);
1076 send(std::make_shared<Message>(*m, protocol::mtPING));
1077 return;
1078 }
1079
1080 if (m->type() == protocol::TMPing::ptPONG && m->has_seq())
1081 {
1082 // Only reset the ping sequence if we actually received a
1083 // PONG with the correct cookie. That way, any peers which
1084 // respond with incorrect cookies will eventually time out.
1085 if (m->seq() == lastPingSeq_)
1086 {
1088
1089 // Update latency estimate
1090 auto const rtt = std::chrono::round<std::chrono::milliseconds>(
1092
1094
1095 if (latency_)
1096 latency_ = (*latency_ * 7 + rtt) / 8;
1097 else
1098 latency_ = rtt;
1099 }
1100
1101 return;
1102 }
1103}
1104
1105void
1107{
1108 // VFALCO NOTE I think we should drop the peer immediately
1109 if (!cluster())
1110 {
1111 fee_.update(Resource::feeUselessData, "unknown cluster");
1112 return;
1113 }
1114
1115 for (int i = 0; i < m->clusternodes().size(); ++i)
1116 {
1117 protocol::TMClusterNode const& node = m->clusternodes(i);
1118
1120 if (node.has_nodename())
1121 name = node.nodename();
1122
1123 auto const publicKey =
1124 parseBase58<PublicKey>(TokenType::NodePublic, node.publickey());
1125
1126 // NIKB NOTE We should drop the peer immediately if
1127 // they send us a public key we can't parse
1128 if (publicKey)
1129 {
1130 auto const reportTime =
1131 NetClock::time_point{NetClock::duration{node.reporttime()}};
1132
1134 *publicKey, name, node.nodeload(), reportTime);
1135 }
1136 }
1137
1138 int loadSources = m->loadsources().size();
1139 if (loadSources != 0)
1140 {
1141 Resource::Gossip gossip;
1142 gossip.items.reserve(loadSources);
1143 for (int i = 0; i < m->loadsources().size(); ++i)
1144 {
1145 protocol::TMLoadSource const& node = m->loadsources(i);
1147 item.address = beast::IP::Endpoint::from_string(node.name());
1148 item.balance = node.cost();
1149 if (item.address != beast::IP::Endpoint())
1150 gossip.items.push_back(item);
1151 }
1153 }
1154
1155 // Calculate the cluster fee:
1156 auto const thresh = app_.timeKeeper().now() - 90s;
1157 std::uint32_t clusterFee = 0;
1158
1160 fees.reserve(app_.cluster().size());
1161
1162 app_.cluster().for_each([&fees, thresh](ClusterNode const& status) {
1163 if (status.getReportTime() >= thresh)
1164 fees.push_back(status.getLoadFee());
1165 });
1166
1167 if (!fees.empty())
1168 {
1169 auto const index = fees.size() / 2;
1170 std::nth_element(fees.begin(), fees.begin() + index, fees.end());
1171 clusterFee = fees[index];
1172 }
1173
1174 app_.getFeeTrack().setClusterFee(clusterFee);
1175}
1176
1177void
1179{
1180 // Don't allow endpoints from peers that are not known tracking or are
1181 // not using a version of the message that we support:
1182 if (tracking_.load() != Tracking::converged || m->version() != 2)
1183 return;
1184
1185 // The number is arbitrary and doesn't have any real significance or
1186 // implication for the protocol.
1187 if (m->endpoints_v2().size() >= 1024)
1188 {
1189 fee_.update(Resource::feeUselessData, "endpoints too large");
1190 return;
1191 }
1192
1194 endpoints.reserve(m->endpoints_v2().size());
1195
1196 auto malformed = 0;
1197 for (auto const& tm : m->endpoints_v2())
1198 {
1199 auto result = beast::IP::Endpoint::from_string_checked(tm.endpoint());
1200
1201 if (!result)
1202 {
1203 JLOG(p_journal_.error()) << "failed to parse incoming endpoint: {"
1204 << tm.endpoint() << "}";
1205 malformed++;
1206 continue;
1207 }
1208
1209 // If hops == 0, this Endpoint describes the peer we are connected
1210 // to -- in that case, we take the remote address seen on the
1211 // socket and store that in the IP::Endpoint. If this is the first
1212 // time, then we'll verify that their listener can receive incoming
1213 // by performing a connectivity test. if hops > 0, then we just
1214 // take the address/port we were given
1215 if (tm.hops() == 0)
1216 result = remote_address_.at_port(result->port());
1217
1218 endpoints.emplace_back(*result, tm.hops());
1219 }
1220
1221 // Charge the peer for each malformed endpoint. As there still may be
1222 // multiple valid endpoints we don't return early.
1223 if (malformed > 0)
1224 {
1225 fee_.update(
1226 Resource::feeInvalidData * malformed,
1227 std::to_string(malformed) + " malformed endpoints");
1228 }
1229
1230 if (!endpoints.empty())
1231 overlay_.peerFinder().on_endpoints(slot_, endpoints);
1232}
1233
1234void
1236{
1237 handleTransaction(m, true, false);
1238}
1239
1240void
1243 bool eraseTxQueue,
1244 bool batch)
1245{
1246 XRPL_ASSERT(
1247 eraseTxQueue != batch,
1248 ("ripple::PeerImp::handleTransaction correct function params"));
1250 return;
1251
1253 {
1254 // If we've never been in synch, there's nothing we can do
1255 // with a transaction
1256 JLOG(p_journal_.debug())
1257 << "Ignoring incoming transaction: " << "Need network ledger";
1258 return;
1259 }
1260
1261 SerialIter sit(makeSlice(m->rawtransaction()));
1262
1263 try
1264 {
1265 auto stx = std::make_shared<STTx const>(sit);
1266 uint256 txID = stx->getTransactionID();
1267
1268 int flags;
1269 constexpr std::chrono::seconds tx_interval = 10s;
1270
1271 if (!app_.getHashRouter().shouldProcess(txID, id_, flags, tx_interval))
1272 {
1273 // we have seen this transaction recently
1274 if (flags & SF_BAD)
1275 {
1276 fee_.update(Resource::feeUselessData, "known bad");
1277 JLOG(p_journal_.debug()) << "Ignoring known bad tx " << txID;
1278 }
1279
1280 // Erase only if the server has seen this tx. If the server has not
1281 // seen this tx then the tx could not has been queued for this peer.
1282 else if (eraseTxQueue && txReduceRelayEnabled())
1283 removeTxQueue(txID);
1284
1285 return;
1286 }
1287
1288 JLOG(p_journal_.debug()) << "Got tx " << txID;
1289
1290 bool checkSignature = true;
1291 if (cluster())
1292 {
1293 if (!m->has_deferred() || !m->deferred())
1294 {
1295 // Skip local checks if a server we trust
1296 // put the transaction in its open ledger
1297 flags |= SF_TRUSTED;
1298 }
1299
1300 // for non-validator nodes only -- localPublicKey is set for
1301 // validators only
1303 {
1304 // For now, be paranoid and have each validator
1305 // check each transaction, regardless of source
1306 checkSignature = false;
1307 }
1308 }
1309
1311 {
1312 JLOG(p_journal_.trace())
1313 << "No new transactions until synchronized";
1314 }
1315 else if (
1318 {
1320 JLOG(p_journal_.info()) << "Transaction queue is full";
1321 }
1322 else
1323 {
1326 "recvTransaction->checkTransaction",
1328 flags,
1329 checkSignature,
1330 batch,
1331 stx]() {
1332 if (auto peer = weak.lock())
1333 peer->checkTransaction(
1334 flags, checkSignature, stx, batch);
1335 });
1336 }
1337 }
1338 catch (std::exception const& ex)
1339 {
1340 JLOG(p_journal_.warn())
1341 << "Transaction invalid: " << strHex(m->rawtransaction())
1342 << ". Exception: " << ex.what();
1343 }
1344}
1345
1346void
1348{
1349 auto badData = [&](std::string const& msg) {
1350 fee_.update(Resource::feeInvalidData, "get_ledger " + msg);
1351 JLOG(p_journal_.warn()) << "TMGetLedger: " << msg;
1352 };
1353 auto const itype{m->itype()};
1354
1355 // Verify ledger info type
1356 if (itype < protocol::liBASE || itype > protocol::liTS_CANDIDATE)
1357 return badData("Invalid ledger info type");
1358
1359 auto const ltype = [&m]() -> std::optional<::protocol::TMLedgerType> {
1360 if (m->has_ltype())
1361 return m->ltype();
1362 return std::nullopt;
1363 }();
1364
1365 if (itype == protocol::liTS_CANDIDATE)
1366 {
1367 if (!m->has_ledgerhash())
1368 return badData("Invalid TX candidate set, missing TX set hash");
1369 }
1370 else if (
1371 !m->has_ledgerhash() && !m->has_ledgerseq() &&
1372 !(ltype && *ltype == protocol::ltCLOSED))
1373 {
1374 return badData("Invalid request");
1375 }
1376
1377 // Verify ledger type
1378 if (ltype && (*ltype < protocol::ltACCEPTED || *ltype > protocol::ltCLOSED))
1379 return badData("Invalid ledger type");
1380
1381 // Verify ledger hash
1382 if (m->has_ledgerhash() && !stringIsUint256Sized(m->ledgerhash()))
1383 return badData("Invalid ledger hash");
1384
1385 // Verify ledger sequence
1386 if (m->has_ledgerseq())
1387 {
1388 auto const ledgerSeq{m->ledgerseq()};
1389
1390 // Check if within a reasonable range
1391 using namespace std::chrono_literals;
1393 ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1394 {
1395 return badData(
1396 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1397 }
1398 }
1399
1400 // Verify ledger node IDs
1401 if (itype != protocol::liBASE)
1402 {
1403 if (m->nodeids_size() <= 0)
1404 return badData("Invalid ledger node IDs");
1405
1406 for (auto const& nodeId : m->nodeids())
1407 {
1408 if (deserializeSHAMapNodeID(nodeId) == std::nullopt)
1409 return badData("Invalid SHAMap node ID");
1410 }
1411 }
1412
1413 // Verify query type
1414 if (m->has_querytype() && m->querytype() != protocol::qtINDIRECT)
1415 return badData("Invalid query type");
1416
1417 // Verify query depth
1418 if (m->has_querydepth())
1419 {
1420 if (m->querydepth() > Tuning::maxQueryDepth ||
1421 itype == protocol::liBASE)
1422 {
1423 return badData("Invalid query depth");
1424 }
1425 }
1426
1427 // Queue a job to process the request
1429 app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m]() {
1430 if (auto peer = weak.lock())
1431 peer->processLedgerRequest(m);
1432 });
1433}
1434
1435void
1437{
1438 JLOG(p_journal_.trace()) << "onMessage, TMProofPathRequest";
1440 {
1441 fee_.update(
1442 Resource::feeMalformedRequest, "proof_path_request disabled");
1443 return;
1444 }
1445
1446 fee_.update(
1447 Resource::feeModerateBurdenPeer, "received a proof path request");
1450 jtREPLAY_REQ, "recvProofPathRequest", [weak, m]() {
1451 if (auto peer = weak.lock())
1452 {
1453 auto reply =
1454 peer->ledgerReplayMsgHandler_.processProofPathRequest(m);
1455 if (reply.has_error())
1456 {
1457 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1458 peer->charge(
1459 Resource::feeMalformedRequest,
1460 "proof_path_request");
1461 else
1462 peer->charge(
1463 Resource::feeRequestNoReply, "proof_path_request");
1464 }
1465 else
1466 {
1467 peer->send(std::make_shared<Message>(
1468 reply, protocol::mtPROOF_PATH_RESPONSE));
1469 }
1470 }
1471 });
1472}
1473
1474void
1476{
1477 if (!ledgerReplayEnabled_)
1478 {
1479 fee_.update(
1480 Resource::feeMalformedRequest, "proof_path_response disabled");
1481 return;
1482 }
1483
1484 if (!ledgerReplayMsgHandler_.processProofPathResponse(m))
1485 {
1486 fee_.update(Resource::feeInvalidData, "proof_path_response");
1487 }
1488}
1489
1490void
1492{
1493 JLOG(p_journal_.trace()) << "onMessage, TMReplayDeltaRequest";
1494 if (!ledgerReplayEnabled_)
1495 {
1496 fee_.update(
1497 Resource::feeMalformedRequest, "replay_delta_request disabled");
1498 return;
1499 }
1500
1501 fee_.fee = Resource::feeModerateBurdenPeer;
1502 std::weak_ptr<PeerImp> weak = shared_from_this();
1503 app_.getJobQueue().addJob(
1504 jtREPLAY_REQ, "recvReplayDeltaRequest", [weak, m]() {
1505 if (auto peer = weak.lock())
1506 {
1507 auto reply =
1508 peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m);
1509 if (reply.has_error())
1510 {
1511 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1512 peer->charge(
1513 Resource::feeMalformedRequest,
1514 "replay_delta_request");
1515 else
1516 peer->charge(
1517 Resource::feeRequestNoReply,
1518 "replay_delta_request");
1519 }
1520 else
1521 {
1522 peer->send(std::make_shared<Message>(
1523 reply, protocol::mtREPLAY_DELTA_RESPONSE));
1524 }
1525 }
1526 });
1527}
1528
1529void
1531{
1532 if (!ledgerReplayEnabled_)
1533 {
1534 fee_.update(
1535 Resource::feeMalformedRequest, "replay_delta_response disabled");
1536 return;
1537 }
1538
1539 if (!ledgerReplayMsgHandler_.processReplayDeltaResponse(m))
1540 {
1541 fee_.update(Resource::feeInvalidData, "replay_delta_response");
1542 }
1543}
1544
1545void
1547{
1548 auto badData = [&](std::string const& msg) {
1549 fee_.update(Resource::feeInvalidData, msg);
1550 JLOG(p_journal_.warn()) << "TMLedgerData: " << msg;
1551 };
1552
1553 // Verify ledger hash
1554 if (!stringIsUint256Sized(m->ledgerhash()))
1555 return badData("Invalid ledger hash");
1556
1557 // Verify ledger sequence
1558 {
1559 auto const ledgerSeq{m->ledgerseq()};
1560 if (m->type() == protocol::liTS_CANDIDATE)
1561 {
1562 if (ledgerSeq != 0)
1563 {
1564 return badData(
1565 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1566 }
1567 }
1568 else
1569 {
1570 // Check if within a reasonable range
1571 using namespace std::chrono_literals;
1572 if (app_.getLedgerMaster().getValidatedLedgerAge() <= 10s &&
1573 ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1574 {
1575 return badData(
1576 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1577 }
1578 }
1579 }
1580
1581 // Verify ledger info type
1582 if (m->type() < protocol::liBASE || m->type() > protocol::liTS_CANDIDATE)
1583 return badData("Invalid ledger info type");
1584
1585 // Verify reply error
1586 if (m->has_error() &&
1587 (m->error() < protocol::reNO_LEDGER ||
1588 m->error() > protocol::reBAD_REQUEST))
1589 {
1590 return badData("Invalid reply error");
1591 }
1592
1593 // Verify ledger nodes.
1594 if (m->nodes_size() <= 0 || m->nodes_size() > Tuning::hardMaxReplyNodes)
1595 {
1596 return badData(
1597 "Invalid Ledger/TXset nodes " + std::to_string(m->nodes_size()));
1598 }
1599
1600 // If there is a request cookie, attempt to relay the message
1601 if (m->has_requestcookie())
1602 {
1603 if (auto peer = overlay_.findPeerByShortID(m->requestcookie()))
1604 {
1605 m->clear_requestcookie();
1606 peer->send(std::make_shared<Message>(*m, protocol::mtLEDGER_DATA));
1607 }
1608 else
1609 {
1610 JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply";
1611 }
1612 return;
1613 }
1614
1615 uint256 const ledgerHash{m->ledgerhash()};
1616
1617 // Otherwise check if received data for a candidate transaction set
1618 if (m->type() == protocol::liTS_CANDIDATE)
1619 {
1620 std::weak_ptr<PeerImp> weak{shared_from_this()};
1621 app_.getJobQueue().addJob(
1622 jtTXN_DATA, "recvPeerData", [weak, ledgerHash, m]() {
1623 if (auto peer = weak.lock())
1624 {
1625 peer->app_.getInboundTransactions().gotData(
1626 ledgerHash, peer, m);
1627 }
1628 });
1629 return;
1630 }
1631
1632 // Consume the message
1633 app_.getInboundLedgers().gotLedgerData(ledgerHash, shared_from_this(), m);
1634}
1635
1636void
1638{
1639 protocol::TMProposeSet& set = *m;
1640
1641 auto const sig = makeSlice(set.signature());
1642
1643 // Preliminary check for the validity of the signature: A DER encoded
1644 // signature can't be longer than 72 bytes.
1645 if ((std::clamp<std::size_t>(sig.size(), 64, 72) != sig.size()) ||
1646 (publicKeyType(makeSlice(set.nodepubkey())) != KeyType::secp256k1))
1647 {
1648 JLOG(p_journal_.warn()) << "Proposal: malformed";
1649 fee_.update(
1650 Resource::feeInvalidSignature,
1651 " signature can't be longer than 72 bytes");
1652 return;
1653 }
1654
1655 if (!stringIsUint256Sized(set.currenttxhash()) ||
1656 !stringIsUint256Sized(set.previousledger()))
1657 {
1658 JLOG(p_journal_.warn()) << "Proposal: malformed";
1659 fee_.update(Resource::feeMalformedRequest, "bad hashes");
1660 return;
1661 }
1662
1663 // RH TODO: when isTrusted = false we should probably also cache a key
1664 // suppression for 30 seconds to avoid doing a relatively expensive lookup
1665 // every time a spam packet is received
1666 PublicKey const publicKey{makeSlice(set.nodepubkey())};
1667 auto const isTrusted = app_.validators().trusted(publicKey);
1668
1669 // If the operator has specified that untrusted proposals be dropped then
1670 // this happens here I.e. before further wasting CPU verifying the signature
1671 // of an untrusted key
1672 if (!isTrusted && app_.config().RELAY_UNTRUSTED_PROPOSALS == -1)
1673 return;
1674
1675 uint256 const proposeHash{set.currenttxhash()};
1676 uint256 const prevLedger{set.previousledger()};
1677
1678 NetClock::time_point const closeTime{NetClock::duration{set.closetime()}};
1679
1680 uint256 const suppression = proposalUniqueId(
1681 proposeHash,
1682 prevLedger,
1683 set.proposeseq(),
1684 closeTime,
1685 publicKey.slice(),
1686 sig);
1687
1688 if (auto [added, relayed] =
1689 app_.getHashRouter().addSuppressionPeerWithStatus(suppression, id_);
1690 !added)
1691 {
1692 // Count unique messages (Slots has it's own 'HashRouter'), which a peer
1693 // receives within IDLED seconds since the message has been relayed.
1694 if (reduceRelayReady() && relayed &&
1695 (stopwatch().now() - *relayed) < reduce_relay::IDLED)
1696 overlay_.updateSlotAndSquelch(
1697 suppression, publicKey, id_, protocol::mtPROPOSE_LEDGER);
1698 JLOG(p_journal_.trace()) << "Proposal: duplicate";
1699 return;
1700 }
1701
1702 if (!isTrusted)
1703 {
1704 if (tracking_.load() == Tracking::diverged)
1705 {
1706 JLOG(p_journal_.debug())
1707 << "Proposal: Dropping untrusted (peer divergence)";
1708 return;
1709 }
1710
1711 if (!cluster() && app_.getFeeTrack().isLoadedLocal())
1712 {
1713 JLOG(p_journal_.debug()) << "Proposal: Dropping untrusted (load)";
1714 return;
1715 }
1716 }
1717
1718 JLOG(p_journal_.trace())
1719 << "Proposal: " << (isTrusted ? "trusted" : "untrusted");
1720
1721 auto proposal = RCLCxPeerPos(
1722 publicKey,
1723 sig,
1724 suppression,
1726 prevLedger,
1727 set.proposeseq(),
1728 proposeHash,
1729 closeTime,
1730 app_.timeKeeper().closeTime(),
1731 calcNodeID(app_.validatorManifests().getMasterKey(publicKey))});
1732
1733 std::weak_ptr<PeerImp> weak = shared_from_this();
1734 app_.getJobQueue().addJob(
1735 isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut,
1736 "recvPropose->checkPropose",
1737 [weak, isTrusted, m, proposal]() {
1738 if (auto peer = weak.lock())
1739 peer->checkPropose(isTrusted, m, proposal);
1740 });
1741}
1742
1743void
1745{
1746 JLOG(p_journal_.trace()) << "Status: Change";
1747
1748 if (!m->has_networktime())
1749 m->set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1750
1751 {
1752 std::lock_guard sl(recentLock_);
1753 if (!last_status_.has_newstatus() || m->has_newstatus())
1754 last_status_ = *m;
1755 else
1756 {
1757 // preserve old status
1758 protocol::NodeStatus status = last_status_.newstatus();
1759 last_status_ = *m;
1760 m->set_newstatus(status);
1761 }
1762 }
1763
1764 if (m->newevent() == protocol::neLOST_SYNC)
1765 {
1766 bool outOfSync{false};
1767 {
1768 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1769 // guarded by recentLock_.
1770 std::lock_guard sl(recentLock_);
1771 if (!closedLedgerHash_.isZero())
1772 {
1773 outOfSync = true;
1774 closedLedgerHash_.zero();
1775 }
1776 previousLedgerHash_.zero();
1777 }
1778 if (outOfSync)
1779 {
1780 JLOG(p_journal_.debug()) << "Status: Out of sync";
1781 }
1782 return;
1783 }
1784
1785 {
1786 uint256 closedLedgerHash{};
1787 bool const peerChangedLedgers{
1788 m->has_ledgerhash() && stringIsUint256Sized(m->ledgerhash())};
1789
1790 {
1791 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1792 // guarded by recentLock_.
1793 std::lock_guard sl(recentLock_);
1794 if (peerChangedLedgers)
1795 {
1796 closedLedgerHash_ = m->ledgerhash();
1797 closedLedgerHash = closedLedgerHash_;
1798 addLedger(closedLedgerHash, sl);
1799 }
1800 else
1801 {
1802 closedLedgerHash_.zero();
1803 }
1804
1805 if (m->has_ledgerhashprevious() &&
1806 stringIsUint256Sized(m->ledgerhashprevious()))
1807 {
1808 previousLedgerHash_ = m->ledgerhashprevious();
1809 addLedger(previousLedgerHash_, sl);
1810 }
1811 else
1812 {
1813 previousLedgerHash_.zero();
1814 }
1815 }
1816 if (peerChangedLedgers)
1817 {
1818 JLOG(p_journal_.debug()) << "LCL is " << closedLedgerHash;
1819 }
1820 else
1821 {
1822 JLOG(p_journal_.debug()) << "Status: No ledger";
1823 }
1824 }
1825
1826 if (m->has_firstseq() && m->has_lastseq())
1827 {
1828 std::lock_guard sl(recentLock_);
1829
1830 minLedger_ = m->firstseq();
1831 maxLedger_ = m->lastseq();
1832
1833 if ((maxLedger_ < minLedger_) || (minLedger_ == 0) || (maxLedger_ == 0))
1834 minLedger_ = maxLedger_ = 0;
1835 }
1836
1837 if (m->has_ledgerseq() &&
1838 app_.getLedgerMaster().getValidatedLedgerAge() < 2min)
1839 {
1840 checkTracking(
1841 m->ledgerseq(), app_.getLedgerMaster().getValidLedgerIndex());
1842 }
1843
1844 app_.getOPs().pubPeerStatus([=, this]() -> Json::Value {
1846
1847 if (m->has_newstatus())
1848 {
1849 switch (m->newstatus())
1850 {
1851 case protocol::nsCONNECTING:
1852 j[jss::status] = "CONNECTING";
1853 break;
1854 case protocol::nsCONNECTED:
1855 j[jss::status] = "CONNECTED";
1856 break;
1857 case protocol::nsMONITORING:
1858 j[jss::status] = "MONITORING";
1859 break;
1860 case protocol::nsVALIDATING:
1861 j[jss::status] = "VALIDATING";
1862 break;
1863 case protocol::nsSHUTTING:
1864 j[jss::status] = "SHUTTING";
1865 break;
1866 }
1867 }
1868
1869 if (m->has_newevent())
1870 {
1871 switch (m->newevent())
1872 {
1873 case protocol::neCLOSING_LEDGER:
1874 j[jss::action] = "CLOSING_LEDGER";
1875 break;
1876 case protocol::neACCEPTED_LEDGER:
1877 j[jss::action] = "ACCEPTED_LEDGER";
1878 break;
1879 case protocol::neSWITCHED_LEDGER:
1880 j[jss::action] = "SWITCHED_LEDGER";
1881 break;
1882 case protocol::neLOST_SYNC:
1883 j[jss::action] = "LOST_SYNC";
1884 break;
1885 }
1886 }
1887
1888 if (m->has_ledgerseq())
1889 {
1890 j[jss::ledger_index] = m->ledgerseq();
1891 }
1892
1893 if (m->has_ledgerhash())
1894 {
1895 uint256 closedLedgerHash{};
1896 {
1897 std::lock_guard sl(recentLock_);
1898 closedLedgerHash = closedLedgerHash_;
1899 }
1900 j[jss::ledger_hash] = to_string(closedLedgerHash);
1901 }
1902
1903 if (m->has_networktime())
1904 {
1905 j[jss::date] = Json::UInt(m->networktime());
1906 }
1907
1908 if (m->has_firstseq() && m->has_lastseq())
1909 {
1910 j[jss::ledger_index_min] = Json::UInt(m->firstseq());
1911 j[jss::ledger_index_max] = Json::UInt(m->lastseq());
1912 }
1913
1914 return j;
1915 });
1916}
1917
1918void
1919PeerImp::checkTracking(std::uint32_t validationSeq)
1920{
1921 std::uint32_t serverSeq;
1922 {
1923 // Extract the sequence number of the highest
1924 // ledger this peer has
1925 std::lock_guard sl(recentLock_);
1926
1927 serverSeq = maxLedger_;
1928 }
1929 if (serverSeq != 0)
1930 {
1931 // Compare the peer's ledger sequence to the
1932 // sequence of a recently-validated ledger
1933 checkTracking(serverSeq, validationSeq);
1934 }
1935}
1936
1937void
1938PeerImp::checkTracking(std::uint32_t seq1, std::uint32_t seq2)
1939{
1940 int diff = std::max(seq1, seq2) - std::min(seq1, seq2);
1941
1942 if (diff < Tuning::convergedLedgerLimit)
1943 {
1944 // The peer's ledger sequence is close to the validation's
1945 tracking_ = Tracking::converged;
1946 }
1947
1948 if ((diff > Tuning::divergedLedgerLimit) &&
1949 (tracking_.load() != Tracking::diverged))
1950 {
1951 // The peer's ledger sequence is way off the validation's
1952 std::lock_guard sl(recentLock_);
1953
1954 tracking_ = Tracking::diverged;
1955 trackingTime_ = clock_type::now();
1956 }
1957}
1958
1959void
1961{
1962 if (!stringIsUint256Sized(m->hash()))
1963 {
1964 fee_.update(Resource::feeMalformedRequest, "bad hash");
1965 return;
1966 }
1967
1968 uint256 const hash{m->hash()};
1969
1970 if (m->status() == protocol::tsHAVE)
1971 {
1972 std::lock_guard sl(recentLock_);
1973
1974 if (std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
1975 recentTxSets_.end())
1976 {
1977 fee_.update(Resource::feeUselessData, "duplicate (tsHAVE)");
1978 return;
1979 }
1980
1981 recentTxSets_.push_back(hash);
1982 }
1983}
1984
1985void
1986PeerImp::onValidatorListMessage(
1987 std::string const& messageType,
1988 std::string const& manifest,
1989 std::uint32_t version,
1990 std::vector<ValidatorBlobInfo> const& blobs)
1991{
1992 // If there are no blobs, the message is malformed (possibly because of
1993 // ValidatorList class rules), so charge accordingly and skip processing.
1994 if (blobs.empty())
1995 {
1996 JLOG(p_journal_.warn()) << "Ignored malformed " << messageType
1997 << " from peer " << remote_address_;
1998 // This shouldn't ever happen with a well-behaved peer
1999 fee_.update(Resource::feeHeavyBurdenPeer, "no blobs");
2000 return;
2001 }
2002
2003 auto const hash = sha512Half(manifest, blobs, version);
2004
2005 JLOG(p_journal_.debug())
2006 << "Received " << messageType << " from " << remote_address_.to_string()
2007 << " (" << id_ << ")";
2008
2009 if (!app_.getHashRouter().addSuppressionPeer(hash, id_))
2010 {
2011 JLOG(p_journal_.debug())
2012 << messageType << ": received duplicate " << messageType;
2013 // Charging this fee here won't hurt the peer in the normal
2014 // course of operation (ie. refresh every 5 minutes), but
2015 // will add up if the peer is misbehaving.
2016 fee_.update(Resource::feeUselessData, "duplicate");
2017 return;
2018 }
2019
2020 auto const applyResult = app_.validators().applyListsAndBroadcast(
2021 manifest,
2022 version,
2023 blobs,
2024 remote_address_.to_string(),
2025 hash,
2026 app_.overlay(),
2027 app_.getHashRouter(),
2028 app_.getOPs());
2029
2030 JLOG(p_journal_.debug())
2031 << "Processed " << messageType << " version " << version << " from "
2032 << (applyResult.publisherKey ? strHex(*applyResult.publisherKey)
2033 : "unknown or invalid publisher")
2034 << " from " << remote_address_.to_string() << " (" << id_
2035 << ") with best result " << to_string(applyResult.bestDisposition());
2036
2037 // Act based on the best result
2038 switch (applyResult.bestDisposition())
2039 {
2040 // New list
2041 case ListDisposition::accepted:
2042 // Newest list is expired, and that needs to be broadcast, too
2043 case ListDisposition::expired:
2044 // Future list
2045 case ListDisposition::pending: {
2046 std::lock_guard<std::mutex> sl(recentLock_);
2047
2048 XRPL_ASSERT(
2049 applyResult.publisherKey,
2050 "ripple::PeerImp::onValidatorListMessage : publisher key is "
2051 "set");
2052 auto const& pubKey = *applyResult.publisherKey;
2053#ifndef NDEBUG
2054 if (auto const iter = publisherListSequences_.find(pubKey);
2055 iter != publisherListSequences_.end())
2056 {
2057 XRPL_ASSERT(
2058 iter->second < applyResult.sequence,
2059 "ripple::PeerImp::onValidatorListMessage : lower sequence");
2060 }
2061#endif
2062 publisherListSequences_[pubKey] = applyResult.sequence;
2063 }
2064 break;
2065 case ListDisposition::same_sequence:
2066 case ListDisposition::known_sequence:
2067#ifndef NDEBUG
2068 {
2069 std::lock_guard<std::mutex> sl(recentLock_);
2070 XRPL_ASSERT(
2071 applyResult.sequence && applyResult.publisherKey,
2072 "ripple::PeerImp::onValidatorListMessage : nonzero sequence "
2073 "and set publisher key");
2074 XRPL_ASSERT(
2075 publisherListSequences_[*applyResult.publisherKey] <=
2076 applyResult.sequence,
2077 "ripple::PeerImp::onValidatorListMessage : maximum sequence");
2078 }
2079#endif // !NDEBUG
2080
2081 break;
2082 case ListDisposition::stale:
2083 case ListDisposition::untrusted:
2084 case ListDisposition::invalid:
2085 case ListDisposition::unsupported_version:
2086 break;
2087 default:
2088 UNREACHABLE(
2089 "ripple::PeerImp::onValidatorListMessage : invalid best list "
2090 "disposition");
2091 }
2092
2093 // Charge based on the worst result
2094 switch (applyResult.worstDisposition())
2095 {
2096 case ListDisposition::accepted:
2097 case ListDisposition::expired:
2098 case ListDisposition::pending:
2099 // No charges for good data
2100 break;
2101 case ListDisposition::same_sequence:
2102 case ListDisposition::known_sequence:
2103 // Charging this fee here won't hurt the peer in the normal
2104 // course of operation (ie. refresh every 5 minutes), but
2105 // will add up if the peer is misbehaving.
2106 fee_.update(
2107 Resource::feeUselessData,
2108 " duplicate (same_sequence or known_sequence)");
2109 break;
2110 case ListDisposition::stale:
2111 // There are very few good reasons for a peer to send an
2112 // old list, particularly more than once.
2113 fee_.update(Resource::feeInvalidData, "expired");
2114 break;
2115 case ListDisposition::untrusted:
2116 // Charging this fee here won't hurt the peer in the normal
2117 // course of operation (ie. refresh every 5 minutes), but
2118 // will add up if the peer is misbehaving.
2119 fee_.update(Resource::feeUselessData, "untrusted");
2120 break;
2121 case ListDisposition::invalid:
2122 // This shouldn't ever happen with a well-behaved peer
2123 fee_.update(
2124 Resource::feeInvalidSignature, "invalid list disposition");
2125 break;
2126 case ListDisposition::unsupported_version:
2127 // During a version transition, this may be legitimate.
2128 // If it happens frequently, that's probably bad.
2129 fee_.update(Resource::feeInvalidData, "version");
2130 break;
2131 default:
2132 UNREACHABLE(
2133 "ripple::PeerImp::onValidatorListMessage : invalid worst list "
2134 "disposition");
2135 }
2136
2137 // Log based on all the results.
2138 for (auto const& [disp, count] : applyResult.dispositions)
2139 {
2140 switch (disp)
2141 {
2142 // New list
2143 case ListDisposition::accepted:
2144 JLOG(p_journal_.debug())
2145 << "Applied " << count << " new " << messageType
2146 << "(s) from peer " << remote_address_;
2147 break;
2148 // Newest list is expired, and that needs to be broadcast, too
2149 case ListDisposition::expired:
2150 JLOG(p_journal_.debug())
2151 << "Applied " << count << " expired " << messageType
2152 << "(s) from peer " << remote_address_;
2153 break;
2154 // Future list
2155 case ListDisposition::pending:
2156 JLOG(p_journal_.debug())
2157 << "Processed " << count << " future " << messageType
2158 << "(s) from peer " << remote_address_;
2159 break;
2160 case ListDisposition::same_sequence:
2161 JLOG(p_journal_.warn())
2162 << "Ignored " << count << " " << messageType
2163 << "(s) with current sequence from peer "
2164 << remote_address_;
2165 break;
2166 case ListDisposition::known_sequence:
2167 JLOG(p_journal_.warn())
2168 << "Ignored " << count << " " << messageType
2169 << "(s) with future sequence from peer " << remote_address_;
2170 break;
2171 case ListDisposition::stale:
2172 JLOG(p_journal_.warn())
2173 << "Ignored " << count << "stale " << messageType
2174 << "(s) from peer " << remote_address_;
2175 break;
2176 case ListDisposition::untrusted:
2177 JLOG(p_journal_.warn())
2178 << "Ignored " << count << " untrusted " << messageType
2179 << "(s) from peer " << remote_address_;
2180 break;
2181 case ListDisposition::unsupported_version:
2182 JLOG(p_journal_.warn())
2183 << "Ignored " << count << "unsupported version "
2184 << messageType << "(s) from peer " << remote_address_;
2185 break;
2186 case ListDisposition::invalid:
2187 JLOG(p_journal_.warn())
2188 << "Ignored " << count << "invalid " << messageType
2189 << "(s) from peer " << remote_address_;
2190 break;
2191 default:
2192 UNREACHABLE(
2193 "ripple::PeerImp::onValidatorListMessage : invalid list "
2194 "disposition");
2195 }
2196 }
2197}
2198
2199void
2201{
2202 try
2203 {
2204 if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
2205 {
2206 JLOG(p_journal_.debug())
2207 << "ValidatorList: received validator list from peer using "
2208 << "protocol version " << to_string(protocol_)
2209 << " which shouldn't support this feature.";
2210 fee_.update(Resource::feeUselessData, "unsupported peer");
2211 return;
2212 }
2213 onValidatorListMessage(
2214 "ValidatorList",
2215 m->manifest(),
2216 m->version(),
2217 ValidatorList::parseBlobs(*m));
2218 }
2219 catch (std::exception const& e)
2220 {
2221 JLOG(p_journal_.warn()) << "ValidatorList: Exception, " << e.what()
2222 << " from peer " << remote_address_;
2223 using namespace std::string_literals;
2224 fee_.update(Resource::feeInvalidData, e.what());
2225 }
2226}
2227
2228void
2229PeerImp::onMessage(
2231{
2232 try
2233 {
2234 if (!supportsFeature(ProtocolFeature::ValidatorList2Propagation))
2235 {
2236 JLOG(p_journal_.debug())
2237 << "ValidatorListCollection: received validator list from peer "
2238 << "using protocol version " << to_string(protocol_)
2239 << " which shouldn't support this feature.";
2240 fee_.update(Resource::feeUselessData, "unsupported peer");
2241 return;
2242 }
2243 else if (m->version() < 2)
2244 {
2245 JLOG(p_journal_.debug())
2246 << "ValidatorListCollection: received invalid validator list "
2247 "version "
2248 << m->version() << " from peer using protocol version "
2249 << to_string(protocol_);
2250 fee_.update(Resource::feeInvalidData, "wrong version");
2251 return;
2252 }
2253 onValidatorListMessage(
2254 "ValidatorListCollection",
2255 m->manifest(),
2256 m->version(),
2257 ValidatorList::parseBlobs(*m));
2258 }
2259 catch (std::exception const& e)
2260 {
2261 JLOG(p_journal_.warn()) << "ValidatorListCollection: Exception, "
2262 << e.what() << " from peer " << remote_address_;
2263 using namespace std::string_literals;
2264 fee_.update(Resource::feeInvalidData, e.what());
2265 }
2266}
2267
2268void
2270{
2271 if (m->validation().size() < 50)
2272 {
2273 JLOG(p_journal_.warn()) << "Validation: Too small";
2274 fee_.update(Resource::feeMalformedRequest, "too small");
2275 return;
2276 }
2277
2278 try
2279 {
2280 auto const closeTime = app_.timeKeeper().closeTime();
2281
2283 {
2284 SerialIter sit(makeSlice(m->validation()));
2285 val = std::make_shared<STValidation>(
2286 std::ref(sit),
2287 [this](PublicKey const& pk) {
2288 return calcNodeID(
2289 app_.validatorManifests().getMasterKey(pk));
2290 },
2291 false);
2292 val->setSeen(closeTime);
2293 }
2294
2295 if (!isCurrent(
2296 app_.getValidations().parms(),
2297 app_.timeKeeper().closeTime(),
2298 val->getSignTime(),
2299 val->getSeenTime()))
2300 {
2301 JLOG(p_journal_.trace()) << "Validation: Not current";
2302 fee_.update(Resource::feeUselessData, "not current");
2303 return;
2304 }
2305
2306 // RH TODO: when isTrusted = false we should probably also cache a key
2307 // suppression for 30 seconds to avoid doing a relatively expensive
2308 // lookup every time a spam packet is received
2309 auto const isTrusted =
2310 app_.validators().trusted(val->getSignerPublic());
2311
2312 // If the operator has specified that untrusted validations be dropped
2313 // then this happens here I.e. before further wasting CPU verifying the
2314 // signature of an untrusted key
2315 if (!isTrusted && app_.config().RELAY_UNTRUSTED_VALIDATIONS == -1)
2316 return;
2317
2318 auto key = sha512Half(makeSlice(m->validation()));
2319
2320 if (auto [added, relayed] =
2321 app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
2322 !added)
2323 {
2324 // Count unique messages (Slots has it's own 'HashRouter'), which a
2325 // peer receives within IDLED seconds since the message has been
2326 // relayed. Wait WAIT_ON_BOOTUP time to let the server establish
2327 // connections to peers.
2328 if (reduceRelayReady() && relayed &&
2329 (stopwatch().now() - *relayed) < reduce_relay::IDLED)
2330 overlay_.updateSlotAndSquelch(
2331 key, val->getSignerPublic(), id_, protocol::mtVALIDATION);
2332 JLOG(p_journal_.trace()) << "Validation: duplicate";
2333 return;
2334 }
2335
2336 if (!isTrusted && (tracking_.load() == Tracking::diverged))
2337 {
2338 JLOG(p_journal_.debug())
2339 << "Dropping untrusted validation from diverged peer";
2340 }
2341 else if (isTrusted || !app_.getFeeTrack().isLoadedLocal())
2342 {
2343 std::string const name = [isTrusted, val]() {
2344 std::string ret =
2345 isTrusted ? "Trusted validation" : "Untrusted validation";
2346
2347#ifdef DEBUG
2348 ret += " " +
2349 std::to_string(val->getFieldU32(sfLedgerSequence)) + ": " +
2350 to_string(val->getNodeID());
2351#endif
2352
2353 return ret;
2354 }();
2355
2356 std::weak_ptr<PeerImp> weak = shared_from_this();
2357 app_.getJobQueue().addJob(
2358 isTrusted ? jtVALIDATION_t : jtVALIDATION_ut,
2359 name,
2360 [weak, val, m, key]() {
2361 if (auto peer = weak.lock())
2362 peer->checkValidation(val, key, m);
2363 });
2364 }
2365 else
2366 {
2367 JLOG(p_journal_.debug())
2368 << "Dropping untrusted validation for load";
2369 }
2370 }
2371 catch (std::exception const& e)
2372 {
2373 JLOG(p_journal_.warn())
2374 << "Exception processing validation: " << e.what();
2375 using namespace std::string_literals;
2376 fee_.update(Resource::feeMalformedRequest, e.what());
2377 }
2378}
2379
2380void
2382{
2383 protocol::TMGetObjectByHash& packet = *m;
2384
2385 JLOG(p_journal_.trace()) << "received TMGetObjectByHash " << packet.type()
2386 << " " << packet.objects_size();
2387
2388 if (packet.query())
2389 {
2390 // this is a query
2391 if (send_queue_.size() >= Tuning::dropSendQueue)
2392 {
2393 JLOG(p_journal_.debug()) << "GetObject: Large send queue";
2394 return;
2395 }
2396
2397 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2398 {
2399 doFetchPack(m);
2400 return;
2401 }
2402
2403 if (packet.type() == protocol::TMGetObjectByHash::otTRANSACTIONS)
2404 {
2405 if (!txReduceRelayEnabled())
2406 {
2407 JLOG(p_journal_.error())
2408 << "TMGetObjectByHash: tx reduce-relay is disabled";
2409 fee_.update(Resource::feeMalformedRequest, "disabled");
2410 return;
2411 }
2412
2413 std::weak_ptr<PeerImp> weak = shared_from_this();
2414 app_.getJobQueue().addJob(
2415 jtREQUESTED_TXN, "doTransactions", [weak, m]() {
2416 if (auto peer = weak.lock())
2417 peer->doTransactions(m);
2418 });
2419 return;
2420 }
2421
2422 protocol::TMGetObjectByHash reply;
2423
2424 reply.set_query(false);
2425
2426 if (packet.has_seq())
2427 reply.set_seq(packet.seq());
2428
2429 reply.set_type(packet.type());
2430
2431 if (packet.has_ledgerhash())
2432 {
2433 if (!stringIsUint256Sized(packet.ledgerhash()))
2434 {
2435 fee_.update(Resource::feeMalformedRequest, "ledger hash");
2436 return;
2437 }
2438
2439 reply.set_ledgerhash(packet.ledgerhash());
2440 }
2441
2442 fee_.update(
2443 Resource::feeModerateBurdenPeer,
2444 " received a get object by hash request");
2445
2446 // This is a very minimal implementation
2447 for (int i = 0; i < packet.objects_size(); ++i)
2448 {
2449 auto const& obj = packet.objects(i);
2450 if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2451 {
2452 uint256 const hash{obj.hash()};
2453 // VFALCO TODO Move this someplace more sensible so we dont
2454 // need to inject the NodeStore interfaces.
2455 std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2456 auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
2457 if (nodeObject)
2458 {
2459 protocol::TMIndexedObject& newObj = *reply.add_objects();
2460 newObj.set_hash(hash.begin(), hash.size());
2461 newObj.set_data(
2462 &nodeObject->getData().front(),
2463 nodeObject->getData().size());
2464
2465 if (obj.has_nodeid())
2466 newObj.set_index(obj.nodeid());
2467 if (obj.has_ledgerseq())
2468 newObj.set_ledgerseq(obj.ledgerseq());
2469
2470 // VFALCO NOTE "seq" in the message is obsolete
2471 }
2472 }
2473 }
2474
2475 JLOG(p_journal_.trace()) << "GetObj: " << reply.objects_size() << " of "
2476 << packet.objects_size();
2477 send(std::make_shared<Message>(reply, protocol::mtGET_OBJECTS));
2478 }
2479 else
2480 {
2481 // this is a reply
2482 std::uint32_t pLSeq = 0;
2483 bool pLDo = true;
2484 bool progress = false;
2485
2486 for (int i = 0; i < packet.objects_size(); ++i)
2487 {
2488 const protocol::TMIndexedObject& obj = packet.objects(i);
2489
2490 if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2491 {
2492 if (obj.has_ledgerseq())
2493 {
2494 if (obj.ledgerseq() != pLSeq)
2495 {
2496 if (pLDo && (pLSeq != 0))
2497 {
2498 JLOG(p_journal_.debug())
2499 << "GetObj: Full fetch pack for " << pLSeq;
2500 }
2501 pLSeq = obj.ledgerseq();
2502 pLDo = !app_.getLedgerMaster().haveLedger(pLSeq);
2503
2504 if (!pLDo)
2505 {
2506 JLOG(p_journal_.debug())
2507 << "GetObj: Late fetch pack for " << pLSeq;
2508 }
2509 else
2510 progress = true;
2511 }
2512 }
2513
2514 if (pLDo)
2515 {
2516 uint256 const hash{obj.hash()};
2517
2518 app_.getLedgerMaster().addFetchPack(
2519 hash,
2520 std::make_shared<Blob>(
2521 obj.data().begin(), obj.data().end()));
2522 }
2523 }
2524 }
2525
2526 if (pLDo && (pLSeq != 0))
2527 {
2528 JLOG(p_journal_.debug())
2529 << "GetObj: Partial fetch pack for " << pLSeq;
2530 }
2531 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2532 app_.getLedgerMaster().gotFetchPack(progress, pLSeq);
2533 }
2534}
2535
2536void
2538{
2539 if (!txReduceRelayEnabled())
2540 {
2541 JLOG(p_journal_.error())
2542 << "TMHaveTransactions: tx reduce-relay is disabled";
2543 fee_.update(Resource::feeMalformedRequest, "disabled");
2544 return;
2545 }
2546
2547 std::weak_ptr<PeerImp> weak = shared_from_this();
2548 app_.getJobQueue().addJob(
2549 jtMISSING_TXN, "handleHaveTransactions", [weak, m]() {
2550 if (auto peer = weak.lock())
2551 peer->handleHaveTransactions(m);
2552 });
2553}
2554
2555void
2556PeerImp::handleHaveTransactions(
2558{
2559 protocol::TMGetObjectByHash tmBH;
2560 tmBH.set_type(protocol::TMGetObjectByHash_ObjectType_otTRANSACTIONS);
2561 tmBH.set_query(true);
2562
2563 JLOG(p_journal_.trace())
2564 << "received TMHaveTransactions " << m->hashes_size();
2565
2566 for (std::uint32_t i = 0; i < m->hashes_size(); i++)
2567 {
2568 if (!stringIsUint256Sized(m->hashes(i)))
2569 {
2570 JLOG(p_journal_.error())
2571 << "TMHaveTransactions with invalid hash size";
2572 fee_.update(Resource::feeMalformedRequest, "hash size");
2573 return;
2574 }
2575
2576 uint256 hash(m->hashes(i));
2577
2578 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2579
2580 JLOG(p_journal_.trace()) << "checking transaction " << (bool)txn;
2581
2582 if (!txn)
2583 {
2584 JLOG(p_journal_.debug()) << "adding transaction to request";
2585
2586 auto obj = tmBH.add_objects();
2587 obj->set_hash(hash.data(), hash.size());
2588 }
2589 else
2590 {
2591 // Erase only if a peer has seen this tx. If the peer has not
2592 // seen this tx then the tx could not has been queued for this
2593 // peer.
2594 removeTxQueue(hash);
2595 }
2596 }
2597
2598 JLOG(p_journal_.trace())
2599 << "transaction request object is " << tmBH.objects_size();
2600
2601 if (tmBH.objects_size() > 0)
2602 send(std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS));
2603}
2604
2605void
2607{
2608 if (!txReduceRelayEnabled())
2609 {
2610 JLOG(p_journal_.error())
2611 << "TMTransactions: tx reduce-relay is disabled";
2612 fee_.update(Resource::feeMalformedRequest, "disabled");
2613 return;
2614 }
2615
2616 JLOG(p_journal_.trace())
2617 << "received TMTransactions " << m->transactions_size();
2618
2619 overlay_.addTxMetrics(m->transactions_size());
2620
2621 for (std::uint32_t i = 0; i < m->transactions_size(); ++i)
2622 handleTransaction(
2624 m->mutable_transactions(i), [](protocol::TMTransaction*) {}),
2625 false,
2626 true);
2627}
2628
2629void
2630PeerImp::onMessage(std::shared_ptr<protocol::TMSquelch> const& m)
2631{
2632 using on_message_fn =
2634 if (!strand_.running_in_this_thread())
2635 return post(
2636 strand_,
2637 std::bind(
2638 (on_message_fn)&PeerImp::onMessage, shared_from_this(), m));
2639
2640 if (!m->has_validatorpubkey())
2641 {
2642 fee_.update(Resource::feeInvalidData, "squelch no pubkey");
2643 return;
2644 }
2645 auto validator = m->validatorpubkey();
2646 auto const slice{makeSlice(validator)};
2647 if (!publicKeyType(slice))
2648 {
2649 fee_.update(Resource::feeInvalidData, "squelch bad pubkey");
2650 return;
2651 }
2652 PublicKey key(slice);
2653
2654 // Ignore non-validator squelch
2655 if (!app_.validators().listed(key))
2656 {
2657 fee_.update(Resource::feeInvalidData, "squelch non-validator");
2658 JLOG(p_journal_.debug())
2659 << "onMessage: TMSquelch discarding non-validator squelch "
2660 << slice;
2661 return;
2662 }
2663
2664 // Ignore the squelch for validator's own messages.
2665 if (key == app_.getValidationPublicKey())
2666 {
2667 JLOG(p_journal_.debug())
2668 << "onMessage: TMSquelch discarding validator's squelch " << slice;
2669 return;
2670 }
2671
2672 std::uint32_t duration =
2673 m->has_squelchduration() ? m->squelchduration() : 0;
2674 if (!m->squelch())
2675 squelch_.removeSquelch(key);
2676 else if (!squelch_.addSquelch(key, std::chrono::seconds{duration}))
2677 fee_.update(Resource::feeInvalidData, "squelch duration");
2678
2679 JLOG(p_journal_.debug())
2680 << "onMessage: TMSquelch " << slice << " " << id() << " " << duration;
2681}
2682
2683//--------------------------------------------------------------------------
2684
2685void
2686PeerImp::addLedger(
2687 uint256 const& hash,
2688 std::lock_guard<std::mutex> const& lockedRecentLock)
2689{
2690 // lockedRecentLock is passed as a reminder that recentLock_ must be
2691 // locked by the caller.
2692 (void)lockedRecentLock;
2693
2694 if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
2695 recentLedgers_.end())
2696 return;
2697
2698 recentLedgers_.push_back(hash);
2699}
2700
2701void
2702PeerImp::doFetchPack(const std::shared_ptr<protocol::TMGetObjectByHash>& packet)
2703{
2704 // VFALCO TODO Invert this dependency using an observer and shared state
2705 // object. Don't queue fetch pack jobs if we're under load or we already
2706 // have some queued.
2707 if (app_.getFeeTrack().isLoadedLocal() ||
2708 (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2709 (app_.getJobQueue().getJobCount(jtPACK) > 10))
2710 {
2711 JLOG(p_journal_.info()) << "Too busy to make fetch pack";
2712 return;
2713 }
2714
2715 if (!stringIsUint256Sized(packet->ledgerhash()))
2716 {
2717 JLOG(p_journal_.warn()) << "FetchPack hash size malformed";
2718 fee_.update(Resource::feeMalformedRequest, "hash size");
2719 return;
2720 }
2721
2722 fee_.fee = Resource::feeHeavyBurdenPeer;
2723
2724 uint256 const hash{packet->ledgerhash()};
2725
2726 std::weak_ptr<PeerImp> weak = shared_from_this();
2727 auto elapsed = UptimeClock::now();
2728 auto const pap = &app_;
2729 app_.getJobQueue().addJob(
2730 jtPACK, "MakeFetchPack", [pap, weak, packet, hash, elapsed]() {
2731 pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
2732 });
2733}
2734
2735void
2736PeerImp::doTransactions(
2738{
2739 protocol::TMTransactions reply;
2740
2741 JLOG(p_journal_.trace()) << "received TMGetObjectByHash requesting tx "
2742 << packet->objects_size();
2743
2744 if (packet->objects_size() > reduce_relay::MAX_TX_QUEUE_SIZE)
2745 {
2746 JLOG(p_journal_.error()) << "doTransactions, invalid number of hashes";
2747 fee_.update(Resource::feeMalformedRequest, "too big");
2748 return;
2749 }
2750
2751 for (std::uint32_t i = 0; i < packet->objects_size(); ++i)
2752 {
2753 auto const& obj = packet->objects(i);
2754
2755 if (!stringIsUint256Sized(obj.hash()))
2756 {
2757 fee_.update(Resource::feeMalformedRequest, "hash size");
2758 return;
2759 }
2760
2761 uint256 hash(obj.hash());
2762
2763 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2764
2765 if (!txn)
2766 {
2767 JLOG(p_journal_.error()) << "doTransactions, transaction not found "
2768 << Slice(hash.data(), hash.size());
2769 fee_.update(Resource::feeMalformedRequest, "tx not found");
2770 return;
2771 }
2772
2773 Serializer s;
2774 auto tx = reply.add_transactions();
2775 auto sttx = txn->getSTransaction();
2776 sttx->add(s);
2777 tx->set_rawtransaction(s.data(), s.size());
2778 tx->set_status(
2779 txn->getStatus() == INCLUDED ? protocol::tsCURRENT
2780 : protocol::tsNEW);
2781 tx->set_receivetimestamp(
2782 app_.timeKeeper().now().time_since_epoch().count());
2783 tx->set_deferred(txn->getSubmitResult().queued);
2784 }
2785
2786 if (reply.transactions_size() > 0)
2787 send(std::make_shared<Message>(reply, protocol::mtTRANSACTIONS));
2788}
2789
2790void
2791PeerImp::checkTransaction(
2792 int flags,
2793 bool checkSignature,
2794 std::shared_ptr<STTx const> const& stx,
2795 bool batch)
2796{
2797 // VFALCO TODO Rewrite to not use exceptions
2798 try
2799 {
2800 // Expired?
2801 if (stx->isFieldPresent(sfLastLedgerSequence) &&
2802 (stx->getFieldU32(sfLastLedgerSequence) <
2803 app_.getLedgerMaster().getValidLedgerIndex()))
2804 {
2805 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2806 charge(Resource::feeUselessData, "expired tx");
2807 return;
2808 }
2809
2810 if (isPseudoTx(*stx))
2811 {
2812 // Don't do anything with pseudo transactions except put them in the
2813 // TransactionMaster cache
2814 std::string reason;
2815 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2816 XRPL_ASSERT(
2817 tx->getStatus() == NEW,
2818 "ripple::PeerImp::checkTransaction Transaction created "
2819 "correctly");
2820 if (tx->getStatus() == NEW)
2821 {
2822 JLOG(p_journal_.debug())
2823 << "Processing " << (batch ? "batch" : "unsolicited")
2824 << " pseudo-transaction tx " << tx->getID();
2825
2826 app_.getMasterTransaction().canonicalize(&tx);
2827 // Tell the overlay about it, but don't relay it.
2828 auto const toSkip =
2829 app_.getHashRouter().shouldRelay(tx->getID());
2830 if (toSkip)
2831 {
2832 JLOG(p_journal_.debug())
2833 << "Passing skipped pseudo pseudo-transaction tx "
2834 << tx->getID();
2835 app_.overlay().relay(tx->getID(), {}, *toSkip);
2836 }
2837 if (!batch)
2838 {
2839 JLOG(p_journal_.debug())
2840 << "Charging for pseudo-transaction tx " << tx->getID();
2841 charge(Resource::feeUselessData, "pseudo tx");
2842 }
2843
2844 return;
2845 }
2846 }
2847
2848 if (checkSignature)
2849 {
2850 // Check the signature before handing off to the job queue.
2851 if (auto [valid, validReason] = checkValidity(
2852 app_.getHashRouter(),
2853 *stx,
2854 app_.getLedgerMaster().getValidatedRules(),
2855 app_.config());
2856 valid != Validity::Valid)
2857 {
2858 if (!validReason.empty())
2859 {
2860 JLOG(p_journal_.trace())
2861 << "Exception checking transaction: " << validReason;
2862 }
2863
2864 // Probably not necessary to set SF_BAD, but doesn't hurt.
2865 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2866 charge(
2867 Resource::feeInvalidSignature,
2868 "check transaction signature failure");
2869 return;
2870 }
2871 }
2872 else
2873 {
2875 app_.getHashRouter(), stx->getTransactionID(), Validity::Valid);
2876 }
2877
2878 std::string reason;
2879 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2880
2881 if (tx->getStatus() == INVALID)
2882 {
2883 if (!reason.empty())
2884 {
2885 JLOG(p_journal_.trace())
2886 << "Exception checking transaction: " << reason;
2887 }
2888 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2889 charge(Resource::feeInvalidSignature, "tx (impossible)");
2890 return;
2891 }
2892
2893 bool const trusted(flags & SF_TRUSTED);
2894 app_.getOPs().processTransaction(
2895 tx, trusted, false, NetworkOPs::FailHard::no);
2896 }
2897 catch (std::exception const& ex)
2898 {
2899 JLOG(p_journal_.warn())
2900 << "Exception in " << __func__ << ": " << ex.what();
2901 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2902 using namespace std::string_literals;
2903 charge(Resource::feeInvalidData, "tx "s + ex.what());
2904 }
2905}
2906
2907// Called from our JobQueue
2908void
2909PeerImp::checkPropose(
2910 bool isTrusted,
2912 RCLCxPeerPos peerPos)
2913{
2914 JLOG(p_journal_.trace())
2915 << "Checking " << (isTrusted ? "trusted" : "UNTRUSTED") << " proposal";
2916
2917 XRPL_ASSERT(packet, "ripple::PeerImp::checkPropose : non-null packet");
2918
2919 if (!cluster() && !peerPos.checkSign())
2920 {
2921 std::string desc{"Proposal fails sig check"};
2922 JLOG(p_journal_.warn()) << desc;
2923 charge(Resource::feeInvalidSignature, desc);
2924 return;
2925 }
2926
2927 bool relay;
2928
2929 if (isTrusted)
2930 relay = app_.getOPs().processTrustedProposal(peerPos);
2931 else
2932 relay = app_.config().RELAY_UNTRUSTED_PROPOSALS == 1 || cluster();
2933
2934 if (relay)
2935 {
2936 // haveMessage contains peers, which are suppressed; i.e. the peers
2937 // are the source of the message, consequently the message should
2938 // not be relayed to these peers. But the message must be counted
2939 // as part of the squelch logic.
2940 auto haveMessage = app_.overlay().relay(
2941 *packet, peerPos.suppressionID(), peerPos.publicKey());
2942 if (reduceRelayReady() && !haveMessage.empty())
2943 overlay_.updateSlotAndSquelch(
2944 peerPos.suppressionID(),
2945 peerPos.publicKey(),
2946 std::move(haveMessage),
2947 protocol::mtPROPOSE_LEDGER);
2948 }
2949}
2950
2951void
2952PeerImp::checkValidation(
2954 uint256 const& key,
2956{
2957 if (!val->isValid())
2958 {
2959 std::string desc{"Validation forwarded by peer is invalid"};
2960 JLOG(p_journal_.debug()) << desc;
2961 charge(Resource::feeInvalidSignature, desc);
2962 return;
2963 }
2964
2965 // FIXME it should be safe to remove this try/catch. Investigate codepaths.
2966 try
2967 {
2968 if (app_.getOPs().recvValidation(val, std::to_string(id())) ||
2969 cluster())
2970 {
2971 // haveMessage contains peers, which are suppressed; i.e. the peers
2972 // are the source of the message, consequently the message should
2973 // not be relayed to these peers. But the message must be counted
2974 // as part of the squelch logic.
2975 auto haveMessage =
2976 overlay_.relay(*packet, key, val->getSignerPublic());
2977 if (reduceRelayReady() && !haveMessage.empty())
2978 {
2979 overlay_.updateSlotAndSquelch(
2980 key,
2981 val->getSignerPublic(),
2982 std::move(haveMessage),
2983 protocol::mtVALIDATION);
2984 }
2985 }
2986 }
2987 catch (std::exception const& ex)
2988 {
2989 JLOG(p_journal_.trace())
2990 << "Exception processing validation: " << ex.what();
2991 using namespace std::string_literals;
2992 charge(Resource::feeMalformedRequest, "validation "s + ex.what());
2993 }
2994}
2995
2996// Returns the set of peers that can help us get
2997// the TX tree with the specified root hash.
2998//
3000getPeerWithTree(OverlayImpl& ov, uint256 const& rootHash, PeerImp const* skip)
3001{
3003 int retScore = 0;
3004
3006 if (p->hasTxSet(rootHash) && p.get() != skip)
3007 {
3008 auto score = p->getScore(true);
3009 if (!ret || (score > retScore))
3010 {
3011 ret = std::move(p);
3012 retScore = score;
3013 }
3014 }
3015 });
3016
3017 return ret;
3018}
3019
3020// Returns a random peer weighted by how likely to
3021// have the ledger and how responsive it is.
3022//
3025 OverlayImpl& ov,
3026 uint256 const& ledgerHash,
3027 LedgerIndex ledger,
3028 PeerImp const* skip)
3029{
3031 int retScore = 0;
3032
3034 if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
3035 {
3036 auto score = p->getScore(true);
3037 if (!ret || (score > retScore))
3038 {
3039 ret = std::move(p);
3040 retScore = score;
3041 }
3042 }
3043 });
3044
3045 return ret;
3046}
3047
3048void
3049PeerImp::sendLedgerBase(
3050 std::shared_ptr<Ledger const> const& ledger,
3051 protocol::TMLedgerData& ledgerData)
3052{
3053 JLOG(p_journal_.trace()) << "sendLedgerBase: Base data";
3054
3055 Serializer s(sizeof(LedgerInfo));
3056 addRaw(ledger->info(), s);
3057 ledgerData.add_nodes()->set_nodedata(s.getDataPtr(), s.getLength());
3058
3059 auto const& stateMap{ledger->stateMap()};
3060 if (stateMap.getHash() != beast::zero)
3061 {
3062 // Return account state root node if possible
3063 Serializer root(768);
3064
3065 stateMap.serializeRoot(root);
3066 ledgerData.add_nodes()->set_nodedata(
3067 root.getDataPtr(), root.getLength());
3068
3069 if (ledger->info().txHash != beast::zero)
3070 {
3071 auto const& txMap{ledger->txMap()};
3072 if (txMap.getHash() != beast::zero)
3073 {
3074 // Return TX root node if possible
3075 root.erase();
3076 txMap.serializeRoot(root);
3077 ledgerData.add_nodes()->set_nodedata(
3078 root.getDataPtr(), root.getLength());
3079 }
3080 }
3081 }
3082
3083 auto message{
3084 std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA)};
3085 send(message);
3086}
3087
3089PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
3090{
3091 JLOG(p_journal_.trace()) << "getLedger: Ledger";
3092
3094
3095 if (m->has_ledgerhash())
3096 {
3097 // Attempt to find ledger by hash
3098 uint256 const ledgerHash{m->ledgerhash()};
3099 ledger = app_.getLedgerMaster().getLedgerByHash(ledgerHash);
3100 if (!ledger)
3101 {
3102 JLOG(p_journal_.trace())
3103 << "getLedger: Don't have ledger with hash " << ledgerHash;
3104
3105 if (m->has_querytype() && !m->has_requestcookie())
3106 {
3107 // Attempt to relay the request to a peer
3108 if (auto const peer = getPeerWithLedger(
3109 overlay_,
3110 ledgerHash,
3111 m->has_ledgerseq() ? m->ledgerseq() : 0,
3112 this))
3113 {
3114 m->set_requestcookie(id());
3115 peer->send(
3116 std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
3117 JLOG(p_journal_.debug())
3118 << "getLedger: Request relayed to peer";
3119 return ledger;
3120 }
3121
3122 JLOG(p_journal_.trace())
3123 << "getLedger: Failed to find peer to relay request";
3124 }
3125 }
3126 }
3127 else if (m->has_ledgerseq())
3128 {
3129 // Attempt to find ledger by sequence
3130 if (m->ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
3131 {
3132 JLOG(p_journal_.debug())
3133 << "getLedger: Early ledger sequence request";
3134 }
3135 else
3136 {
3137 ledger = app_.getLedgerMaster().getLedgerBySeq(m->ledgerseq());
3138 if (!ledger)
3139 {
3140 JLOG(p_journal_.debug())
3141 << "getLedger: Don't have ledger with sequence "
3142 << m->ledgerseq();
3143 }
3144 }
3145 }
3146 else if (m->has_ltype() && m->ltype() == protocol::ltCLOSED)
3147 {
3148 ledger = app_.getLedgerMaster().getClosedLedger();
3149 }
3150
3151 if (ledger)
3152 {
3153 // Validate retrieved ledger sequence
3154 auto const ledgerSeq{ledger->info().seq};
3155 if (m->has_ledgerseq())
3156 {
3157 if (ledgerSeq != m->ledgerseq())
3158 {
3159 // Do not resource charge a peer responding to a relay
3160 if (!m->has_requestcookie())
3161 charge(
3162 Resource::feeMalformedRequest, "get_ledger ledgerSeq");
3163
3164 ledger.reset();
3165 JLOG(p_journal_.warn())
3166 << "getLedger: Invalid ledger sequence " << ledgerSeq;
3167 }
3168 }
3169 else if (ledgerSeq < app_.getLedgerMaster().getEarliestFetch())
3170 {
3171 ledger.reset();
3172 JLOG(p_journal_.debug())
3173 << "getLedger: Early ledger sequence request " << ledgerSeq;
3174 }
3175 }
3176 else
3177 {
3178 JLOG(p_journal_.debug()) << "getLedger: Unable to find ledger";
3179 }
3180
3181 return ledger;
3182}
3183
3185PeerImp::getTxSet(std::shared_ptr<protocol::TMGetLedger> const& m) const
3186{
3187 JLOG(p_journal_.trace()) << "getTxSet: TX set";
3188
3189 uint256 const txSetHash{m->ledgerhash()};
3191 app_.getInboundTransactions().getSet(txSetHash, false)};
3192 if (!shaMap)
3193 {
3194 if (m->has_querytype() && !m->has_requestcookie())
3195 {
3196 // Attempt to relay the request to a peer
3197 if (auto const peer = getPeerWithTree(overlay_, txSetHash, this))
3198 {
3199 m->set_requestcookie(id());
3200 peer->send(
3201 std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
3202 JLOG(p_journal_.debug()) << "getTxSet: Request relayed";
3203 }
3204 else
3205 {
3206 JLOG(p_journal_.debug())
3207 << "getTxSet: Failed to find relay peer";
3208 }
3209 }
3210 else
3211 {
3212 JLOG(p_journal_.debug()) << "getTxSet: Failed to find TX set";
3213 }
3214 }
3215
3216 return shaMap;
3217}
3218
3219void
3220PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
3221{
3222 // Do not resource charge a peer responding to a relay
3223 if (!m->has_requestcookie())
3224 charge(
3225 Resource::feeModerateBurdenPeer, "received a get ledger request");
3226
3229 SHAMap const* map{nullptr};
3230 protocol::TMLedgerData ledgerData;
3231 bool fatLeaves{true};
3232 auto const itype{m->itype()};
3233
3234 if (itype == protocol::liTS_CANDIDATE)
3235 {
3236 if (sharedMap = getTxSet(m); !sharedMap)
3237 return;
3238 map = sharedMap.get();
3239
3240 // Fill out the reply
3241 ledgerData.set_ledgerseq(0);
3242 ledgerData.set_ledgerhash(m->ledgerhash());
3243 ledgerData.set_type(protocol::liTS_CANDIDATE);
3244 if (m->has_requestcookie())
3245 ledgerData.set_requestcookie(m->requestcookie());
3246
3247 // We'll already have most transactions
3248 fatLeaves = false;
3249 }
3250 else
3251 {
3252 if (send_queue_.size() >= Tuning::dropSendQueue)
3253 {
3254 JLOG(p_journal_.debug())
3255 << "processLedgerRequest: Large send queue";
3256 return;
3257 }
3258 if (app_.getFeeTrack().isLoadedLocal() && !cluster())
3259 {
3260 JLOG(p_journal_.debug()) << "processLedgerRequest: Too busy";
3261 return;
3262 }
3263
3264 if (ledger = getLedger(m); !ledger)
3265 return;
3266
3267 // Fill out the reply
3268 auto const ledgerHash{ledger->info().hash};
3269 ledgerData.set_ledgerhash(ledgerHash.begin(), ledgerHash.size());
3270 ledgerData.set_ledgerseq(ledger->info().seq);
3271 ledgerData.set_type(itype);
3272 if (m->has_requestcookie())
3273 ledgerData.set_requestcookie(m->requestcookie());
3274
3275 switch (itype)
3276 {
3277 case protocol::liBASE:
3278 sendLedgerBase(ledger, ledgerData);
3279 return;
3280
3281 case protocol::liTX_NODE:
3282 map = &ledger->txMap();
3283 JLOG(p_journal_.trace()) << "processLedgerRequest: TX map hash "
3284 << to_string(map->getHash());
3285 break;
3286
3287 case protocol::liAS_NODE:
3288 map = &ledger->stateMap();
3289 JLOG(p_journal_.trace())
3290 << "processLedgerRequest: Account state map hash "
3291 << to_string(map->getHash());
3292 break;
3293
3294 default:
3295 // This case should not be possible here
3296 JLOG(p_journal_.error())
3297 << "processLedgerRequest: Invalid ledger info type";
3298 return;
3299 }
3300 }
3301
3302 if (!map)
3303 {
3304 JLOG(p_journal_.warn()) << "processLedgerRequest: Unable to find map";
3305 return;
3306 }
3307
3308 // Add requested node data to reply
3309 if (m->nodeids_size() > 0)
3310 {
3311 auto const queryDepth{
3312 m->has_querydepth() ? m->querydepth() : (isHighLatency() ? 2 : 1)};
3313
3315
3316 for (int i = 0; i < m->nodeids_size() &&
3317 ledgerData.nodes_size() < Tuning::softMaxReplyNodes;
3318 ++i)
3319 {
3320 auto const shaMapNodeId{deserializeSHAMapNodeID(m->nodeids(i))};
3321
3322 data.clear();
3323 data.reserve(Tuning::softMaxReplyNodes);
3324
3325 try
3326 {
3327 if (map->getNodeFat(*shaMapNodeId, data, fatLeaves, queryDepth))
3328 {
3329 JLOG(p_journal_.trace())
3330 << "processLedgerRequest: getNodeFat got "
3331 << data.size() << " nodes";
3332
3333 for (auto const& d : data)
3334 {
3335 if (ledgerData.nodes_size() >=
3336 Tuning::hardMaxReplyNodes)
3337 break;
3338 protocol::TMLedgerNode* node{ledgerData.add_nodes()};
3339 node->set_nodeid(d.first.getRawString());
3340 node->set_nodedata(d.second.data(), d.second.size());
3341 }
3342 }
3343 else
3344 {
3345 JLOG(p_journal_.warn())
3346 << "processLedgerRequest: getNodeFat returns false";
3347 }
3348 }
3349 catch (std::exception const& e)
3350 {
3351 std::string info;
3352 switch (itype)
3353 {
3354 case protocol::liBASE:
3355 // This case should not be possible here
3356 info = "Ledger base";
3357 break;
3358
3359 case protocol::liTX_NODE:
3360 info = "TX node";
3361 break;
3362
3363 case protocol::liAS_NODE:
3364 info = "AS node";
3365 break;
3366
3367 case protocol::liTS_CANDIDATE:
3368 info = "TS candidate";
3369 break;
3370
3371 default:
3372 info = "Invalid";
3373 break;
3374 }
3375
3376 if (!m->has_ledgerhash())
3377 info += ", no hash specified";
3378
3379 JLOG(p_journal_.error())
3380 << "processLedgerRequest: getNodeFat with nodeId "
3381 << *shaMapNodeId << " and ledger info type " << info
3382 << " throws exception: " << e.what();
3383 }
3384 }
3385
3386 JLOG(p_journal_.info())
3387 << "processLedgerRequest: Got request for " << m->nodeids_size()
3388 << " nodes at depth " << queryDepth << ", return "
3389 << ledgerData.nodes_size() << " nodes";
3390 }
3391
3392 if (ledgerData.nodes_size() == 0)
3393 return;
3394
3395 send(std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA));
3396}
3397
3398int
3399PeerImp::getScore(bool haveItem) const
3400{
3401 // Random component of score, used to break ties and avoid
3402 // overloading the "best" peer
3403 static const int spRandomMax = 9999;
3404
3405 // Score for being very likely to have the thing we are
3406 // look for; should be roughly spRandomMax
3407 static const int spHaveItem = 10000;
3408
3409 // Score reduction for each millisecond of latency; should
3410 // be roughly spRandomMax divided by the maximum reasonable
3411 // latency
3412 static const int spLatency = 30;
3413
3414 // Penalty for unknown latency; should be roughly spRandomMax
3415 static const int spNoLatency = 8000;
3416
3417 int score = rand_int(spRandomMax);
3418
3419 if (haveItem)
3420 score += spHaveItem;
3421
3423 {
3424 std::lock_guard sl(recentLock_);
3425 latency = latency_;
3426 }
3427
3428 if (latency)
3429 score -= latency->count() * spLatency;
3430 else
3431 score -= spNoLatency;
3432
3433 return score;
3434}
3435
3436bool
3437PeerImp::isHighLatency() const
3438{
3439 std::lock_guard sl(recentLock_);
3440 return latency_ >= peerHighLatency;
3441}
3442
3443bool
3444PeerImp::reduceRelayReady()
3445{
3446 if (!reduceRelayReady_)
3447 reduceRelayReady_ =
3448 reduce_relay::epoch<std::chrono::minutes>(UptimeClock::now()) >
3449 reduce_relay::WAIT_ON_BOOTUP;
3450 return vpReduceRelayEnabled_ && reduceRelayReady_;
3451}
3452
3453void
3454PeerImp::Metrics::add_message(std::uint64_t bytes)
3455{
3456 using namespace std::chrono_literals;
3457 std::unique_lock lock{mutex_};
3458
3459 totalBytes_ += bytes;
3460 accumBytes_ += bytes;
3461 auto const timeElapsed = clock_type::now() - intervalStart_;
3462 auto const timeElapsedInSecs =
3463 std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
3464
3465 if (timeElapsedInSecs >= 1s)
3466 {
3467 auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
3468 rollingAvg_.push_back(avgBytes);
3469
3470 auto const totalBytes =
3471 std::accumulate(rollingAvg_.begin(), rollingAvg_.end(), 0ull);
3472 rollingAvgBytes_ = totalBytes / rollingAvg_.size();
3473
3474 intervalStart_ = clock_type::now();
3475 accumBytes_ = 0;
3476 }
3477}
3478
3480PeerImp::Metrics::average_bytes() const
3481{
3482 std::shared_lock lock{mutex_};
3483 return rollingAvgBytes_;
3484}
3485
3487PeerImp::Metrics::total_bytes() const
3488{
3489 std::shared_lock lock{mutex_};
3490 return totalBytes_;
3491}
3492
3493} // namespace ripple
T accumulate(T... args)
T bind(T... args)
Represents a JSON value.
Definition: json_value.h:147
A version-independent IP address and port combination.
Definition: IPEndpoint.h:39
Address const & address() const
Returns the address portion of this endpoint.
Definition: IPEndpoint.h:76
static std::optional< Endpoint > from_string_checked(std::string const &s)
Create an Endpoint from a string.
Definition: IPEndpoint.cpp:35
Endpoint at_port(Port port) const
Returns a new Endpoint with a different port.
Definition: IPEndpoint.h:69
static Endpoint from_string(std::string const &s)
Definition: IPEndpoint.cpp:49
std::string to_string() const
Returns a string representing the endpoint.
Definition: IPEndpoint.cpp:57
Stream error() const
Definition: Journal.h:335
Stream debug() const
Definition: Journal.h:317
bool active(Severity level) const
Returns true if any message would be logged at this severity level.
Definition: Journal.h:303
Stream info() const
Definition: Journal.h:323
Stream trace() const
Severity stream access functions.
Definition: Journal.h:311
Stream warn() const
Definition: Journal.h:329
virtual Config & config()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual TimeKeeper & timeKeeper()=0
virtual JobQueue & getJobQueue()=0
virtual NetworkOPs & getOPs()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual Cluster & cluster()=0
virtual HashRouter & getHashRouter()=0
void for_each(std::function< void(ClusterNode const &)> func) const
Invokes the callback once for every cluster node.
Definition: Cluster.cpp:82
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:48
bool update(PublicKey const &identity, std::string name, std::uint32_t loadFee=0, NetClock::time_point reportTime=NetClock::time_point{})
Store information about the state of a cluster node.
Definition: Cluster.cpp:56
std::optional< std::string > member(PublicKey const &node) const
Determines whether a node belongs in the cluster.
Definition: Cluster.cpp:37
bool TX_REDUCE_RELAY_METRICS
Definition: Config.h:273
int MAX_TRANSACTIONS
Definition: Config.h:233
std::chrono::seconds MAX_DIVERGED_TIME
Definition: Config.h:292
std::chrono::seconds MAX_UNKNOWN_TIME
Definition: Config.h:289
bool shouldProcess(uint256 const &key, PeerShortID peer, int &flags, std::chrono::seconds tx_interval)
Definition: HashRouter.cpp:78
bool addSuppressionPeer(uint256 const &key, PeerShortID peer)
Definition: HashRouter.cpp:51
std::unique_ptr< LoadEvent > makeLoadEvent(JobType t, std::string const &name)
Return a scoped LoadEvent.
Definition: JobQueue.cpp:177
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition: JobQueue.cpp:140
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
LedgerIndex getValidLedgerIndex()
std::chrono::seconds getValidatedLedgerAge()
void setClusterFee(std::uint32_t fee)
Definition: LoadFeeTrack.h:114
virtual bool isNeedNetworkLedger()=0
PeerFinder::Manager & peerFinder()
Definition: OverlayImpl.h:158
void reportTraffic(TrafficCount::category cat, bool isInbound, int bytes)
void activate(std::shared_ptr< PeerImp > const &peer)
Called when a peer has connected successfully This is called after the peer handshake has been comple...
void deletePeer(Peer::id_t id)
Called when the peer is deleted.
void incPeerDisconnect() override
Increment and retrieve counters for total peer disconnects, and disconnects we initiate for excessive...
Definition: OverlayImpl.h:360
void addTxMetrics(Args... args)
Add tx reduce-relay metrics.
Definition: OverlayImpl.h:431
void onPeerDeactivate(Peer::id_t id)
void remove(std::shared_ptr< PeerFinder::Slot > const &slot)
void for_each(UnaryFunc &&f) const
Definition: OverlayImpl.h:275
Resource::Manager & resourceManager()
Definition: OverlayImpl.h:164
void onManifests(std::shared_ptr< protocol::TMManifests > const &m, std::shared_ptr< PeerImp > const &from)
Setup const & setup() const
Definition: OverlayImpl.h:170
std::shared_ptr< Message > getManifestsMessage()
void incPeerDisconnectCharges() override
Definition: OverlayImpl.h:372
void incJqTransOverflow() override
Increment and retrieve counter for transaction job queue overflows.
Definition: OverlayImpl.h:348
virtual void on_endpoints(std::shared_ptr< Slot > const &slot, Endpoints const &endpoints)=0
Called when mtENDPOINTS is received.
virtual Config config()=0
Returns the configuration for the manager.
virtual void on_closed(std::shared_ptr< Slot > const &slot)=0
Called when the slot is closed.
virtual void on_failure(std::shared_ptr< Slot > const &slot)=0
Called when an outbound connection is deemed to have failed.
std::queue< std::shared_ptr< Message > > send_queue_
Definition: PeerImp.h:176
bool vpReduceRelayEnabled_
Definition: PeerImp.h:194
std::unique_ptr< LoadEvent > load_event_
Definition: PeerImp.h:179
boost::beast::http::fields const & headers_
Definition: PeerImp.h:175
void onMessageEnd(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m)
Definition: PeerImp.cpp:1040
bool hasLedger(uint256 const &hash, std::uint32_t seq) const override
Definition: PeerImp.cpp:514
clock_type::duration uptime() const
Definition: PeerImp.h:366
void removeTxQueue(uint256 const &hash) override
Remove transaction's hash from the transactions' hashes queue.
Definition: PeerImp.cpp:330
protocol::TMStatusChange last_status_
Definition: PeerImp.h:168
boost::shared_mutex nameMutex_
Definition: PeerImp.h:101
std::string name_
Definition: PeerImp.h:100
boost::circular_buffer< uint256 > recentTxSets_
Definition: PeerImp.h:111
std::unique_ptr< stream_type > stream_ptr_
Definition: PeerImp.h:77
void onMessage(std::shared_ptr< protocol::TMManifests > const &m)
Definition: PeerImp.cpp:1049
bool detaching_
Definition: PeerImp.h:97
Tracking
Whether the peer's view of the ledger converges or diverges from ours.
Definition: PeerImp.h:57
Compressed compressionEnabled_
Definition: PeerImp.h:184
uint256 closedLedgerHash_
Definition: PeerImp.h:107
std::string domain() const
Definition: PeerImp.cpp:832
std::optional< std::uint32_t > lastPingSeq_
Definition: PeerImp.h:114
void onTimer(boost::system::error_code const &ec)
Definition: PeerImp.cpp:680
bool gracefulClose_
Definition: PeerImp.h:177
beast::Journal const journal_
Definition: PeerImp.h:75
virtual void run()
Definition: PeerImp.cpp:156
void gracefulClose()
Definition: PeerImp.cpp:624
LedgerIndex maxLedger_
Definition: PeerImp.h:106
beast::Journal const p_journal_
Definition: PeerImp.h:76
void cancelTimer()
Definition: PeerImp.cpp:663
bool const inbound_
Definition: PeerImp.h:90
PeerImp(PeerImp const &)=delete
Application & app_
Definition: PeerImp.h:71
void stop() override
Definition: PeerImp.cpp:214
bool hasRange(std::uint32_t uMin, std::uint32_t uMax) override
Definition: PeerImp.cpp:556
bool hasTxSet(uint256 const &hash) const override
Definition: PeerImp.cpp:538
clock_type::time_point lastPingTime_
Definition: PeerImp.h:115
void onMessageUnknown(std::uint16_t type)
Definition: PeerImp.cpp:1000
std::shared_ptr< PeerFinder::Slot > const slot_
Definition: PeerImp.h:171
boost::circular_buffer< uint256 > recentLedgers_
Definition: PeerImp.h:110
id_t const id_
Definition: PeerImp.h:72
std::optional< std::chrono::milliseconds > latency_
Definition: PeerImp.h:113
void handleTransaction(std::shared_ptr< protocol::TMTransaction > const &m, bool eraseTxQueue, bool batch)
Called from onMessage(TMTransaction(s)).
Definition: PeerImp.cpp:1241
beast::IP::Endpoint const remote_address_
Definition: PeerImp.h:85
Json::Value json() override
Definition: PeerImp.cpp:379
PublicKey const publicKey_
Definition: PeerImp.h:99
hash_set< uint256 > txQueue_
Definition: PeerImp.h:189
std::mutex recentLock_
Definition: PeerImp.h:167
void doAccept()
Definition: PeerImp.cpp:757
void onMessageBegin(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m, std::size_t size, std::size_t uncompressed_size, bool isCompressed)
Definition: PeerImp.cpp:1006
bool txReduceRelayEnabled_
Definition: PeerImp.h:191
clock_type::time_point trackingTime_
Definition: PeerImp.h:96
socket_type & socket_
Definition: PeerImp.h:78
ProtocolVersion protocol_
Definition: PeerImp.h:93
reduce_relay::Squelch< UptimeClock > squelch_
Definition: PeerImp.h:118
std::string getVersion() const
Return the version of rippled that the peer is running, if reported.
Definition: PeerImp.cpp:371
struct ripple::PeerImp::@22 metrics_
uint256 previousLedgerHash_
Definition: PeerImp.h:108
void charge(Resource::Charge const &fee, std::string const &context) override
Adjust this peer's load balance based on the type of load imposed.
Definition: PeerImp.cpp:342
void setTimer()
Definition: PeerImp.cpp:645
void send(std::shared_ptr< Message > const &m) override
Definition: PeerImp.cpp:240
static std::string makePrefix(id_t id)
Definition: PeerImp.cpp:672
std::string name() const
Definition: PeerImp.cpp:825
boost::system::error_code error_code
Definition: PeerImp.h:61
void onReadMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:880
bool ledgerReplayEnabled_
Definition: PeerImp.h:195
boost::asio::basic_waitable_timer< std::chrono::steady_clock > waitable_timer
Definition: PeerImp.h:68
bool crawl() const
Returns true if this connection will publicly share its IP address.
Definition: PeerImp.cpp:356
waitable_timer timer_
Definition: PeerImp.h:81
void sendTxQueue() override
Send aggregated transactions' hashes.
Definition: PeerImp.cpp:294
bool txReduceRelayEnabled() const override
Definition: PeerImp.h:439
bool supportsFeature(ProtocolFeature f) const override
Definition: PeerImp.cpp:497
ChargeWithContext fee_
Definition: PeerImp.h:170
void onWriteMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:944
http_request_type request_
Definition: PeerImp.h:173
OverlayImpl & overlay_
Definition: PeerImp.h:89
LedgerIndex minLedger_
Definition: PeerImp.h:105
virtual ~PeerImp()
Definition: PeerImp.cpp:133
void addTxQueue(uint256 const &hash) override
Add transaction's hash to the transactions' hashes queue.
Definition: PeerImp.cpp:313
int large_sendq_
Definition: PeerImp.h:178
stream_type & stream_
Definition: PeerImp.h:79
bool cluster() const override
Returns true if this connection is a member of the cluster.
Definition: PeerImp.cpp:365
void onShutdown(error_code ec)
Definition: PeerImp.cpp:741
boost::asio::strand< boost::asio::executor > strand_
Definition: PeerImp.h:80
void cycleStatus() override
Definition: PeerImp.cpp:546
boost::beast::multi_buffer read_buffer_
Definition: PeerImp.h:172
Resource::Consumer usage_
Definition: PeerImp.h:169
void ledgerRange(std::uint32_t &minSeq, std::uint32_t &maxSeq) const override
Definition: PeerImp.cpp:529
void doProtocolStart()
Definition: PeerImp.cpp:842
void fail(std::string const &reason)
Definition: PeerImp.cpp:590
std::atomic< Tracking > tracking_
Definition: PeerImp.h:95
Represents a peer connection in the overlay.
A public key.
Definition: PublicKey.h:62
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
bool checkSign() const
Verify the signing hash of the proposal.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:78
uint256 const & suppressionID() const
Unique id used by hash router to suppress duplicates.
Definition: RCLCxPeerPos.h:85
A consumption charge.
Definition: Charge.h:31
An endpoint that consumes resources.
Definition: Consumer.h:35
int balance()
Returns the credit balance representing consumption.
Definition: Consumer.cpp:130
bool disconnect(beast::Journal const &j)
Returns true if the consumer should be disconnected.
Definition: Consumer.cpp:117
Disposition charge(Charge const &fee, std::string const &context={})
Apply a load charge to the consumer.
Definition: Consumer.cpp:99
virtual void importConsumers(std::string const &origin, Gossip const &gossip)=0
Import packaged consumer information.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:96
std::size_t size() const noexcept
Definition: Serializer.h:72
void const * data() const noexcept
Definition: Serializer.h:78
int getLength() const
Definition: Serializer.h:233
const void * getDataPtr() const
Definition: Serializer.h:223
An immutable linear range of bytes.
Definition: Slice.h:45
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
static category categorize(::google::protobuf::Message const &message, int type, bool inbound)
Given a protocol message, determine which traffic category it belongs to.
static void sendValidatorList(Peer &peer, std::uint64_t peerSequence, PublicKey const &publisherKey, std::size_t maxSequence, std::uint32_t rawVersion, std::string const &rawManifest, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, HashRouter &hashRouter, beast::Journal j)
void for_each_available(std::function< void(std::string const &manifest, std::uint32_t version, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, PublicKey const &pubKey, std::size_t maxSequence, uint256 const &hash)> func) const
Invokes the callback once for every available publisher list's raw data members.
pointer data()
Definition: base_uint.h:124
static constexpr std::size_t size()
Definition: base_uint.h:525
constexpr bool parseHex(std::string_view sv)
Parse a hex string into a base_uint.
Definition: base_uint.h:502
T emplace_back(T... args)
T empty(T... args)
T find(T... args)
T for_each(T... args)
T get(T... args)
T load(T... args)
T lock(T... args)
T max(T... args)
T min(T... args)
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
unsigned int UInt
Definition: json_forwards.h:27
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeInvalidData
Charge const feeUselessData
Charge const feeTrivialPeer
Charge const feeModerateBurdenPeer
std::size_t constexpr readBufferBytes
Size of buffer used to read from the socket.
@ targetSendQueue
How many messages we consider reasonable sustained on a send queue.
@ maxQueryDepth
The maximum number of levels to search.
@ sendqIntervals
How many timer intervals a sendq has to stay large before we disconnect.
@ sendQueueLogFreq
How often to log send queue size.
TER valid(PreclaimContext const &ctx, AccountID const &src)
auto measureDurationAndLog(Func &&func, const std::string &actionDescription, std::chrono::duration< Rep, Period > maxDelay, const beast::Journal &journal)
Definition: PerfLog.h:184
static constexpr std::size_t MAX_TX_QUEUE_SIZE
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string protocolMessageName(int type)
Returns the name of a protocol message given its type.
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:106
uint256 proposalUniqueId(uint256 const &proposeHash, uint256 const &previousLedger, std::uint32_t proposeSeq, NetClock::time_point closeTime, Slice const &publicKey, Slice const &signature)
Calculate a unique identifier for a signed proposal.
constexpr ProtocolVersion make_protocol(std::uint16_t major, std::uint16_t minor)
bool isPseudoTx(STObject const &tx)
Check whether a transaction is a pseudo-transaction.
Definition: STTx.cpp:613
@ INCLUDED
Definition: Transaction.h:49
@ INVALID
Definition: Transaction.h:48
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
static constexpr char FEATURE_COMPR[]
Definition: Handshake.h:142
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
Definition: Validations.h:148
std::string base64_decode(std::string_view data)
Definition: base64.cpp:245
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:316
http_response_type makeResponse(bool crawlPublic, http_request_type const &req, beast::IP::Address public_ip, beast::IP::Address remote_ip, uint256 const &sharedValue, std::optional< std::uint32_t > networkID, ProtocolVersion protocol, Application &app)
Make http response.
Definition: Handshake.cpp:389
static bool stringIsUint256Sized(std::string const &pBuffStr)
Definition: PeerImp.cpp:150
static constexpr char FEATURE_LEDGER_REPLAY[]
Definition: Handshake.h:148
std::pair< std::size_t, boost::system::error_code > invokeProtocolMessage(Buffers const &buffers, Handler &handler, std::size_t &hint)
Calls the handler for up to one protocol message in the passed buffers.
std::optional< uint256 > makeSharedValue(stream_type &ssl, beast::Journal journal)
Computes a shared value based on the SSL connection state.
Definition: Handshake.cpp:143
std::optional< KeyType > publicKeyType(Slice const &slice)
Returns the type of public key.
Definition: PublicKey.cpp:207
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
static std::shared_ptr< PeerImp > getPeerWithLedger(OverlayImpl &ov, uint256 const &ledgerHash, LedgerIndex ledger, PeerImp const *skip)
Definition: PeerImp.cpp:3024
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:243
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition: chrono.h:120
boost::beast::http::request< boost::beast::http::dynamic_body > http_request_type
Definition: Handoff.h:31
NodeID calcNodeID(PublicKey const &)
Calculate the 160-bit node ID from a node public key.
Definition: PublicKey.cpp:303
static std::shared_ptr< PeerImp > getPeerWithTree(OverlayImpl &ov, uint256 const &rootHash, PeerImp const *skip)
Definition: PeerImp.cpp:3000
bool peerFeatureEnabled(headers const &request, std::string const &feature, std::string value, bool config)
Check if a feature should be enabled for a peer.
Definition: Handshake.h:199
void forceValidity(HashRouter &router, uint256 const &txid, Validity validity)
Sets the validity of a given transaction in the cache.
Definition: apply.cpp:89
static constexpr char FEATURE_TXRR[]
Definition: Handshake.h:146
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:629
Number root(Number f, unsigned d)
Definition: Number.cpp:630
@ manifest
Manifest.
@ proposal
proposal for signing
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:37
@ jtLEDGER_REQ
Definition: Job.h:59
@ jtPROPOSAL_ut
Definition: Job.h:60
@ jtREPLAY_REQ
Definition: Job.h:58
@ jtTRANSACTION
Definition: Job.h:62
@ jtPEER
Definition: Job.h:80
@ jtREQUESTED_TXN
Definition: Job.h:64
@ jtMISSING_TXN
Definition: Job.h:63
@ jtVALIDATION_t
Definition: Job.h:71
@ jtMANIFEST
Definition: Job.h:55
@ jtTXN_DATA
Definition: Job.h:69
@ jtPACK
Definition: Job.h:43
@ jtVALIDATION_ut
Definition: Job.h:54
@ jtPROPOSAL_t
Definition: Job.h:74
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition: digest.h:223
static constexpr char FEATURE_VPRR[]
Definition: Handshake.h:144
STL namespace.
T nth_element(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T setfill(T... args)
T setw(T... args)
T size(T... args)
T str(T... args)
Information about the notional ledger backing the view.
Definition: LedgerHeader.h:34
beast::IP::Address public_ip
Definition: Overlay.h:72
std::optional< std::uint32_t > networkID
Definition: Overlay.h:75
bool peerPrivate
true if we want our IP address kept private.
void update(Resource::Charge f, std::string const &add)
Definition: PeerImp.h:154
Describes a single consumer.
Definition: Gossip.h:35
beast::IP::Endpoint address
Definition: Gossip.h:39
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
T tie(T... args)
T to_string(T... args)
T what(T... args)