rippled
Loading...
Searching...
No Matches
PeerImp.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLValidations.h>
21#include <xrpld/app/ledger/InboundLedgers.h>
22#include <xrpld/app/ledger/InboundTransactions.h>
23#include <xrpld/app/ledger/LedgerMaster.h>
24#include <xrpld/app/ledger/TransactionMaster.h>
25#include <xrpld/app/misc/HashRouter.h>
26#include <xrpld/app/misc/LoadFeeTrack.h>
27#include <xrpld/app/misc/NetworkOPs.h>
28#include <xrpld/app/misc/Transaction.h>
29#include <xrpld/app/misc/ValidatorList.h>
30#include <xrpld/app/tx/apply.h>
31#include <xrpld/overlay/Cluster.h>
32#include <xrpld/overlay/detail/PeerImp.h>
33#include <xrpld/overlay/detail/Tuning.h>
34#include <xrpld/perflog/PerfLog.h>
35
36#include <xrpl/basics/UptimeClock.h>
37#include <xrpl/basics/base64.h>
38#include <xrpl/basics/random.h>
39#include <xrpl/basics/safe_cast.h>
40#include <xrpl/protocol/TxFlags.h>
41#include <xrpl/protocol/digest.h>
42
43#include <boost/algorithm/string/predicate.hpp>
44#include <boost/beast/core/ostream.hpp>
45
46#include <algorithm>
47#include <memory>
48#include <mutex>
49#include <numeric>
50#include <sstream>
51
52using namespace std::chrono_literals;
53
54namespace ripple {
55
56namespace {
58std::chrono::milliseconds constexpr peerHighLatency{300};
59
61std::chrono::seconds constexpr peerTimerInterval{60};
62} // namespace
63
64// TODO: Remove this exclusion once unit tests are added after the hotfix
65// release.
66
68 Application& app,
69 id_t id,
71 http_request_type&& request,
72 PublicKey const& publicKey,
74 Resource::Consumer consumer,
76 OverlayImpl& overlay)
77 : Child(overlay)
78 , app_(app)
79 , id_(id)
80 , sink_(app_.journal("Peer"), makePrefix(id))
81 , p_sink_(app_.journal("Protocol"), makePrefix(id))
82 , journal_(sink_)
83 , p_journal_(p_sink_)
84 , stream_ptr_(std::move(stream_ptr))
85 , socket_(stream_ptr_->next_layer().socket())
86 , stream_(*stream_ptr_)
87 , strand_(socket_.get_executor())
88 , timer_(waitable_timer{socket_.get_executor()})
89 , remote_address_(slot->remote_endpoint())
90 , overlay_(overlay)
91 , inbound_(true)
92 , protocol_(protocol)
93 , tracking_(Tracking::unknown)
94 , trackingTime_(clock_type::now())
95 , publicKey_(publicKey)
96 , lastPingTime_(clock_type::now())
97 , creationTime_(clock_type::now())
98 , squelch_(app_.journal("Squelch"))
99 , usage_(consumer)
100 , fee_{Resource::feeTrivialPeer, ""}
101 , slot_(slot)
102 , request_(std::move(request))
103 , headers_(request_)
104 , compressionEnabled_(
106 headers_,
108 "lz4",
109 app_.config().COMPRESSION)
110 ? Compressed::On
111 : Compressed::Off)
112 , txReduceRelayEnabled_(peerFeatureEnabled(
113 headers_,
115 app_.config().TX_REDUCE_RELAY_ENABLE))
116 , vpReduceRelayEnabled_(app_.config().VP_REDUCE_RELAY_ENABLE)
117 , ledgerReplayEnabled_(peerFeatureEnabled(
118 headers_,
120 app_.config().LEDGER_REPLAY))
121 , ledgerReplayMsgHandler_(app, app.getLedgerReplayer())
122{
123 JLOG(journal_.info()) << "compression enabled "
124 << (compressionEnabled_ == Compressed::On)
125 << " vp reduce-relay enabled "
127 << " tx reduce-relay enabled "
129 << " " << id_;
130}
131
133{
134 bool const inCluster{cluster()};
135
140
141 if (inCluster)
142 {
143 JLOG(journal_.warn()) << name() << " left cluster";
144 }
145}
146
147// Helper function to check for valid uint256 values in protobuf buffers
148static bool
150{
151 return pBuffStr.size() == uint256::size();
152}
153
154void
156{
157 if (!strand_.running_in_this_thread())
159
160 auto parseLedgerHash =
162 if (uint256 ret; ret.parseHex(value))
163 return ret;
164
165 if (auto const s = base64_decode(value); s.size() == uint256::size())
166 return uint256{s};
167
168 return std::nullopt;
169 };
170
172 std::optional<uint256> previous;
173
174 if (auto const iter = headers_.find("Closed-Ledger");
175 iter != headers_.end())
176 {
177 closed = parseLedgerHash(iter->value());
178
179 if (!closed)
180 fail("Malformed handshake data (1)");
181 }
182
183 if (auto const iter = headers_.find("Previous-Ledger");
184 iter != headers_.end())
185 {
186 previous = parseLedgerHash(iter->value());
187
188 if (!previous)
189 fail("Malformed handshake data (2)");
190 }
191
192 if (previous && !closed)
193 fail("Malformed handshake data (3)");
194
195 {
197 if (closed)
198 closedLedgerHash_ = *closed;
199 if (previous)
200 previousLedgerHash_ = *previous;
201 }
202
203 if (inbound_)
204 doAccept();
205 else
207
208 // Anything else that needs to be done with the connection should be
209 // done in doProtocolStart
210}
211
212void
214{
215 if (!strand_.running_in_this_thread())
217 if (socket_.is_open())
218 {
219 // The rationale for using different severity levels is that
220 // outbound connections are under our control and may be logged
221 // at a higher level, but inbound connections are more numerous and
222 // uncontrolled so to prevent log flooding the severity is reduced.
223 //
224 if (inbound_)
225 {
226 JLOG(journal_.debug()) << "Stop";
227 }
228 else
229 {
230 JLOG(journal_.info()) << "Stop";
231 }
232 }
233 close();
234}
235
236//------------------------------------------------------------------------------
237
238void
240{
241 if (!strand_.running_in_this_thread())
242 return post(strand_, std::bind(&PeerImp::send, shared_from_this(), m));
243 if (gracefulClose_)
244 return;
245 if (detaching_)
246 return;
247
248 auto validator = m->getValidatorKey();
249 if (validator && !squelch_.expireSquelch(*validator))
250 {
253 static_cast<int>(m->getBuffer(compressionEnabled_).size()));
254 return;
255 }
256
257 // report categorized outgoing traffic
259 safe_cast<TrafficCount::category>(m->getCategory()),
260 static_cast<int>(m->getBuffer(compressionEnabled_).size()));
261
262 // report total outgoing traffic
265 static_cast<int>(m->getBuffer(compressionEnabled_).size()));
266
267 auto sendq_size = send_queue_.size();
268
269 if (sendq_size < Tuning::targetSendQueue)
270 {
271 // To detect a peer that does not read from their
272 // side of the connection, we expect a peer to have
273 // a small senq periodically
274 large_sendq_ = 0;
275 }
276 else if (auto sink = journal_.debug();
277 sink && (sendq_size % Tuning::sendQueueLogFreq) == 0)
278 {
279 std::string const n = name();
280 sink << (n.empty() ? remote_address_.to_string() : n)
281 << " sendq: " << sendq_size;
282 }
283
284 send_queue_.push(m);
285
286 if (sendq_size != 0)
287 return;
288
289 boost::asio::async_write(
290 stream_,
291 boost::asio::buffer(
292 send_queue_.front()->getBuffer(compressionEnabled_)),
293 bind_executor(
294 strand_,
295 std::bind(
298 std::placeholders::_1,
299 std::placeholders::_2)));
300}
301
302void
304{
305 if (!strand_.running_in_this_thread())
306 return post(
308
309 if (!txQueue_.empty())
310 {
311 protocol::TMHaveTransactions ht;
312 std::for_each(txQueue_.begin(), txQueue_.end(), [&](auto const& hash) {
313 ht.add_hashes(hash.data(), hash.size());
314 });
315 JLOG(p_journal_.trace()) << "sendTxQueue " << txQueue_.size();
316 txQueue_.clear();
317 send(std::make_shared<Message>(ht, protocol::mtHAVE_TRANSACTIONS));
318 }
319}
320
321void
323{
324 if (!strand_.running_in_this_thread())
325 return post(
327
329 {
330 JLOG(p_journal_.warn()) << "addTxQueue exceeds the cap";
331 sendTxQueue();
332 }
333
334 txQueue_.insert(hash);
335 JLOG(p_journal_.trace()) << "addTxQueue " << txQueue_.size();
336}
337
338void
340{
341 if (!strand_.running_in_this_thread())
342 return post(
343 strand_,
345
346 auto removed = txQueue_.erase(hash);
347 JLOG(p_journal_.trace()) << "removeTxQueue " << removed;
348}
349
350void
352{
353 if ((usage_.charge(fee, context) == Resource::drop) &&
354 usage_.disconnect(p_journal_) && strand_.running_in_this_thread())
355 {
356 // Sever the connection
358 fail("charge: Resources");
359 }
360}
361
362//------------------------------------------------------------------------------
363
364bool
366{
367 auto const iter = headers_.find("Crawl");
368 if (iter == headers_.end())
369 return false;
370 return boost::iequals(iter->value(), "public");
371}
372
373bool
375{
376 return static_cast<bool>(app_.cluster().member(publicKey_));
377}
378
381{
382 if (inbound_)
383 return headers_["User-Agent"];
384 return headers_["Server"];
385}
386
389{
391
392 ret[jss::public_key] = toBase58(TokenType::NodePublic, publicKey_);
393 ret[jss::address] = remote_address_.to_string();
394
395 if (inbound_)
396 ret[jss::inbound] = true;
397
398 if (cluster())
399 {
400 ret[jss::cluster] = true;
401
402 if (auto const n = name(); !n.empty())
403 // Could move here if Json::Value supported moving from a string
404 ret[jss::name] = n;
405 }
406
407 if (auto const d = domain(); !d.empty())
408 ret[jss::server_domain] = std::string{d};
409
410 if (auto const nid = headers_["Network-ID"]; !nid.empty())
411 ret[jss::network_id] = std::string{nid};
412
413 ret[jss::load] = usage_.balance();
414
415 if (auto const version = getVersion(); !version.empty())
416 ret[jss::version] = std::string{version};
417
418 ret[jss::protocol] = to_string(protocol_);
419
420 {
422 if (latency_)
423 ret[jss::latency] = static_cast<Json::UInt>(latency_->count());
424 }
425
426 ret[jss::uptime] = static_cast<Json::UInt>(
427 std::chrono::duration_cast<std::chrono::seconds>(uptime()).count());
428
429 std::uint32_t minSeq, maxSeq;
430 ledgerRange(minSeq, maxSeq);
431
432 if ((minSeq != 0) || (maxSeq != 0))
433 ret[jss::complete_ledgers] =
434 std::to_string(minSeq) + " - " + std::to_string(maxSeq);
435
436 switch (tracking_.load())
437 {
439 ret[jss::track] = "diverged";
440 break;
441
443 ret[jss::track] = "unknown";
444 break;
445
447 // Nothing to do here
448 break;
449 }
450
451 uint256 closedLedgerHash;
452 protocol::TMStatusChange last_status;
453 {
455 closedLedgerHash = closedLedgerHash_;
456 last_status = last_status_;
457 }
458
459 if (closedLedgerHash != beast::zero)
460 ret[jss::ledger] = to_string(closedLedgerHash);
461
462 if (last_status.has_newstatus())
463 {
464 switch (last_status.newstatus())
465 {
466 case protocol::nsCONNECTING:
467 ret[jss::status] = "connecting";
468 break;
469
470 case protocol::nsCONNECTED:
471 ret[jss::status] = "connected";
472 break;
473
474 case protocol::nsMONITORING:
475 ret[jss::status] = "monitoring";
476 break;
477
478 case protocol::nsVALIDATING:
479 ret[jss::status] = "validating";
480 break;
481
482 case protocol::nsSHUTTING:
483 ret[jss::status] = "shutting";
484 break;
485
486 default:
487 JLOG(p_journal_.warn())
488 << "Unknown status: " << last_status.newstatus();
489 }
490 }
491
492 ret[jss::metrics] = Json::Value(Json::objectValue);
493 ret[jss::metrics][jss::total_bytes_recv] =
494 std::to_string(metrics_.recv.total_bytes());
495 ret[jss::metrics][jss::total_bytes_sent] =
496 std::to_string(metrics_.sent.total_bytes());
497 ret[jss::metrics][jss::avg_bps_recv] =
498 std::to_string(metrics_.recv.average_bytes());
499 ret[jss::metrics][jss::avg_bps_sent] =
500 std::to_string(metrics_.sent.average_bytes());
501
502 return ret;
503}
504
505bool
507{
508 switch (f)
509 {
511 return protocol_ >= make_protocol(2, 1);
513 return protocol_ >= make_protocol(2, 2);
516 }
517 return false;
518}
519
520//------------------------------------------------------------------------------
521
522bool
524{
525 {
527 if ((seq != 0) && (seq >= minLedger_) && (seq <= maxLedger_) &&
529 return true;
530 if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
531 recentLedgers_.end())
532 return true;
533 }
534 return false;
535}
536
537void
539{
541
542 minSeq = minLedger_;
543 maxSeq = maxLedger_;
544}
545
546bool
547PeerImp::hasTxSet(uint256 const& hash) const
548{
550 return std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
551 recentTxSets_.end();
552}
553
554void
556{
557 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
558 // guarded by recentLock_.
562}
563
564bool
566{
568 return (tracking_ != Tracking::diverged) && (uMin >= minLedger_) &&
569 (uMax <= maxLedger_);
570}
571
572//------------------------------------------------------------------------------
573
574void
576{
577 XRPL_ASSERT(
578 strand_.running_in_this_thread(),
579 "ripple::PeerImp::close : strand in this thread");
580 if (socket_.is_open())
581 {
582 detaching_ = true; // DEPRECATED
583 error_code ec;
584 timer_.cancel(ec);
585 socket_.close(ec);
587 if (inbound_)
588 {
589 JLOG(journal_.debug()) << "Closed";
590 }
591 else
592 {
593 JLOG(journal_.info()) << "Closed";
594 }
595 }
596}
597
598void
600{
601 if (!strand_.running_in_this_thread())
602 return post(
603 strand_,
604 std::bind(
605 (void(Peer::*)(std::string const&)) & PeerImp::fail,
607 reason));
609 {
610 std::string const n = name();
611 JLOG(journal_.warn()) << (n.empty() ? remote_address_.to_string() : n)
612 << " failed: " << reason;
613 }
614 close();
615}
616
617void
619{
620 XRPL_ASSERT(
621 strand_.running_in_this_thread(),
622 "ripple::PeerImp::fail : strand in this thread");
623 if (socket_.is_open())
624 {
625 JLOG(journal_.warn())
627 << " at " << remote_address_.to_string() << ": " << ec.message();
628 }
629 close();
630}
631
632void
634{
635 XRPL_ASSERT(
636 strand_.running_in_this_thread(),
637 "ripple::PeerImp::gracefulClose : strand in this thread");
638 XRPL_ASSERT(
639 socket_.is_open(), "ripple::PeerImp::gracefulClose : socket is open");
640 XRPL_ASSERT(
642 "ripple::PeerImp::gracefulClose : socket is not closing");
643 gracefulClose_ = true;
644 if (send_queue_.size() > 0)
645 return;
646 setTimer();
647 stream_.async_shutdown(bind_executor(
648 strand_,
649 std::bind(
650 &PeerImp::onShutdown, shared_from_this(), std::placeholders::_1)));
651}
652
653void
655{
656 error_code ec;
657 timer_.expires_from_now(peerTimerInterval, ec);
658
659 if (ec)
660 {
661 JLOG(journal_.error()) << "setTimer: " << ec.message();
662 return;
663 }
664 timer_.async_wait(bind_executor(
665 strand_,
666 std::bind(
667 &PeerImp::onTimer, shared_from_this(), std::placeholders::_1)));
668}
669
670// convenience for ignoring the error code
671void
673{
674 error_code ec;
675 timer_.cancel(ec);
676}
677
678//------------------------------------------------------------------------------
679
682{
684 ss << "[" << std::setfill('0') << std::setw(3) << id << "] ";
685 return ss.str();
686}
687
688void
690{
691 if (!socket_.is_open())
692 return;
693
694 if (ec == boost::asio::error::operation_aborted)
695 return;
696
697 if (ec)
698 {
699 // This should never happen
700 JLOG(journal_.error()) << "onTimer: " << ec.message();
701 return close();
702 }
703
705 {
706 fail("Large send queue");
707 return;
708 }
709
710 if (auto const t = tracking_.load(); !inbound_ && t != Tracking::converged)
711 {
712 clock_type::duration duration;
713
714 {
716 duration = clock_type::now() - trackingTime_;
717 }
718
719 if ((t == Tracking::diverged &&
720 (duration > app_.config().MAX_DIVERGED_TIME)) ||
721 (t == Tracking::unknown &&
722 (duration > app_.config().MAX_UNKNOWN_TIME)))
723 {
725 fail("Not useful");
726 return;
727 }
728 }
729
730 // Already waiting for PONG
731 if (lastPingSeq_)
732 {
733 fail("Ping Timeout");
734 return;
735 }
736
738 lastPingSeq_ = rand_int<std::uint32_t>();
739
740 protocol::TMPing message;
741 message.set_type(protocol::TMPing::ptPING);
742 message.set_seq(*lastPingSeq_);
743
744 send(std::make_shared<Message>(message, protocol::mtPING));
745
746 setTimer();
747}
748
749void
751{
752 cancelTimer();
753 // If we don't get eof then something went wrong
754 if (!ec)
755 {
756 JLOG(journal_.error()) << "onShutdown: expected error condition";
757 return close();
758 }
759 if (ec != boost::asio::error::eof)
760 return fail("onShutdown", ec);
761 close();
762}
763
764//------------------------------------------------------------------------------
765void
767{
768 XRPL_ASSERT(
769 read_buffer_.size() == 0,
770 "ripple::PeerImp::doAccept : empty read buffer");
771
772 JLOG(journal_.debug()) << "doAccept: " << remote_address_;
773
774 auto const sharedValue = makeSharedValue(*stream_ptr_, journal_);
775
776 // This shouldn't fail since we already computed
777 // the shared value successfully in OverlayImpl
778 if (!sharedValue)
779 return fail("makeSharedValue: Unexpected failure");
780
781 JLOG(journal_.info()) << "Protocol: " << to_string(protocol_);
782 JLOG(journal_.info()) << "Public Key: "
784
785 if (auto member = app_.cluster().member(publicKey_))
786 {
787 {
789 name_ = *member;
790 }
791 JLOG(journal_.info()) << "Cluster name: " << *member;
792 }
793
795
796 // XXX Set timer: connection is in grace period to be useful.
797 // XXX Set timer: connection idle (idle may vary depending on connection
798 // type.)
799
800 auto write_buffer = std::make_shared<boost::beast::multi_buffer>();
801
802 boost::beast::ostream(*write_buffer) << makeResponse(
804 request_,
807 *sharedValue,
809 protocol_,
810 app_);
811
812 // Write the whole buffer and only start protocol when that's done.
813 boost::asio::async_write(
814 stream_,
815 write_buffer->data(),
816 boost::asio::transfer_all(),
817 bind_executor(
818 strand_,
819 [this, write_buffer, self = shared_from_this()](
820 error_code ec, std::size_t bytes_transferred) {
821 if (!socket_.is_open())
822 return;
823 if (ec == boost::asio::error::operation_aborted)
824 return;
825 if (ec)
826 return fail("onWriteResponse", ec);
827 if (write_buffer->size() == bytes_transferred)
828 return doProtocolStart();
829 return fail("Failed to write header");
830 }));
831}
832
835{
836 std::shared_lock read_lock{nameMutex_};
837 return name_;
838}
839
842{
843 return headers_["Server-Domain"];
844}
845
846//------------------------------------------------------------------------------
847
848// Protocol logic
849
850void
852{
854
855 // Send all the validator lists that have been loaded
857 {
859 [&](std::string const& manifest,
860 std::uint32_t version,
862 PublicKey const& pubKey,
863 std::size_t maxSequence,
864 uint256 const& hash) {
866 *this,
867 0,
868 pubKey,
869 maxSequence,
870 version,
871 manifest,
872 blobInfos,
874 p_journal_);
875
876 // Don't send it next time.
878 });
879 }
880
881 if (auto m = overlay_.getManifestsMessage())
882 send(m);
883
884 setTimer();
885}
886
887// Called repeatedly with protocol message data
888void
890{
891 if (!socket_.is_open())
892 return;
893 if (ec == boost::asio::error::operation_aborted)
894 return;
895 if (ec == boost::asio::error::eof)
896 {
897 JLOG(journal_.info()) << "EOF";
898 return gracefulClose();
899 }
900 if (ec)
901 return fail("onReadMessage", ec);
902 if (auto stream = journal_.trace())
903 {
904 if (bytes_transferred > 0)
905 stream << "onReadMessage: " << bytes_transferred << " bytes";
906 else
907 stream << "onReadMessage";
908 }
909
910 metrics_.recv.add_message(bytes_transferred);
911
912 read_buffer_.commit(bytes_transferred);
913
914 auto hint = Tuning::readBufferBytes;
915
916 while (read_buffer_.size() > 0)
917 {
918 std::size_t bytes_consumed;
919
920 using namespace std::chrono_literals;
921 std::tie(bytes_consumed, ec) = perf::measureDurationAndLog(
922 [&]() {
923 return invokeProtocolMessage(read_buffer_.data(), *this, hint);
924 },
925 "invokeProtocolMessage",
926 350ms,
927 journal_);
928
929 if (ec)
930 return fail("onReadMessage", ec);
931 if (!socket_.is_open())
932 return;
933 if (gracefulClose_)
934 return;
935 if (bytes_consumed == 0)
936 break;
937 read_buffer_.consume(bytes_consumed);
938 }
939
940 // Timeout on writes only
941 stream_.async_read_some(
943 bind_executor(
944 strand_,
945 std::bind(
948 std::placeholders::_1,
949 std::placeholders::_2)));
950}
951
952void
954{
955 if (!socket_.is_open())
956 return;
957 if (ec == boost::asio::error::operation_aborted)
958 return;
959 if (ec)
960 return fail("onWriteMessage", ec);
961 if (auto stream = journal_.trace())
962 {
963 if (bytes_transferred > 0)
964 stream << "onWriteMessage: " << bytes_transferred << " bytes";
965 else
966 stream << "onWriteMessage";
967 }
968
969 metrics_.sent.add_message(bytes_transferred);
970
971 XRPL_ASSERT(
972 !send_queue_.empty(),
973 "ripple::PeerImp::onWriteMessage : non-empty send buffer");
974 send_queue_.pop();
975 if (!send_queue_.empty())
976 {
977 // Timeout on writes only
978 return boost::asio::async_write(
979 stream_,
980 boost::asio::buffer(
981 send_queue_.front()->getBuffer(compressionEnabled_)),
982 bind_executor(
983 strand_,
984 std::bind(
987 std::placeholders::_1,
988 std::placeholders::_2)));
989 }
990
991 if (gracefulClose_)
992 {
993 return stream_.async_shutdown(bind_executor(
994 strand_,
995 std::bind(
998 std::placeholders::_1)));
999 }
1000}
1001
1002//------------------------------------------------------------------------------
1003//
1004// ProtocolHandler
1005//
1006//------------------------------------------------------------------------------
1007
1008void
1010{
1011 // TODO
1012}
1013
1014void
1016 std::uint16_t type,
1018 std::size_t size,
1019 std::size_t uncompressed_size,
1020 bool isCompressed)
1021{
1022 auto const name = protocolMessageName(type);
1025
1026 auto const category = TrafficCount::categorize(
1027 *m, static_cast<protocol::MessageType>(type), true);
1028
1029 // report total incoming traffic
1031 TrafficCount::category::total, static_cast<int>(size));
1032
1033 // increase the traffic received for a specific category
1034 overlay_.reportInboundTraffic(category, static_cast<int>(size));
1035
1036 using namespace protocol;
1037 if ((type == MessageType::mtTRANSACTION ||
1038 type == MessageType::mtHAVE_TRANSACTIONS ||
1039 type == MessageType::mtTRANSACTIONS ||
1040 // GET_OBJECTS
1042 // GET_LEDGER
1045 // LEDGER_DATA
1049 {
1051 static_cast<MessageType>(type), static_cast<std::uint64_t>(size));
1052 }
1053 JLOG(journal_.trace()) << "onMessageBegin: " << type << " " << size << " "
1054 << uncompressed_size << " " << isCompressed;
1055}
1056
1057void
1061{
1062 load_event_.reset();
1064}
1065
1066void
1068{
1069 auto const s = m->list_size();
1070
1071 if (s == 0)
1072 {
1074 return;
1075 }
1076
1077 if (s > 100)
1079
1081 jtMANIFEST, "receiveManifests", [this, that = shared_from_this(), m]() {
1082 overlay_.onManifests(m, that);
1083 });
1084}
1085
1086void
1088{
1089 if (m->type() == protocol::TMPing::ptPING)
1090 {
1091 // We have received a ping request, reply with a pong
1093 m->set_type(protocol::TMPing::ptPONG);
1094 send(std::make_shared<Message>(*m, protocol::mtPING));
1095 return;
1096 }
1097
1098 if (m->type() == protocol::TMPing::ptPONG && m->has_seq())
1099 {
1100 // Only reset the ping sequence if we actually received a
1101 // PONG with the correct cookie. That way, any peers which
1102 // respond with incorrect cookies will eventually time out.
1103 if (m->seq() == lastPingSeq_)
1104 {
1106
1107 // Update latency estimate
1108 auto const rtt = std::chrono::round<std::chrono::milliseconds>(
1110
1112
1113 if (latency_)
1114 latency_ = (*latency_ * 7 + rtt) / 8;
1115 else
1116 latency_ = rtt;
1117 }
1118
1119 return;
1120 }
1121}
1122
1123void
1125{
1126 // VFALCO NOTE I think we should drop the peer immediately
1127 if (!cluster())
1128 {
1129 fee_.update(Resource::feeUselessData, "unknown cluster");
1130 return;
1131 }
1132
1133 for (int i = 0; i < m->clusternodes().size(); ++i)
1134 {
1135 protocol::TMClusterNode const& node = m->clusternodes(i);
1136
1138 if (node.has_nodename())
1139 name = node.nodename();
1140
1141 auto const publicKey =
1142 parseBase58<PublicKey>(TokenType::NodePublic, node.publickey());
1143
1144 // NIKB NOTE We should drop the peer immediately if
1145 // they send us a public key we can't parse
1146 if (publicKey)
1147 {
1148 auto const reportTime =
1149 NetClock::time_point{NetClock::duration{node.reporttime()}};
1150
1152 *publicKey, name, node.nodeload(), reportTime);
1153 }
1154 }
1155
1156 int loadSources = m->loadsources().size();
1157 if (loadSources != 0)
1158 {
1159 Resource::Gossip gossip;
1160 gossip.items.reserve(loadSources);
1161 for (int i = 0; i < m->loadsources().size(); ++i)
1162 {
1163 protocol::TMLoadSource const& node = m->loadsources(i);
1165 item.address = beast::IP::Endpoint::from_string(node.name());
1166 item.balance = node.cost();
1167 if (item.address != beast::IP::Endpoint())
1168 gossip.items.push_back(item);
1169 }
1171 }
1172
1173 // Calculate the cluster fee:
1174 auto const thresh = app_.timeKeeper().now() - 90s;
1175 std::uint32_t clusterFee = 0;
1176
1178 fees.reserve(app_.cluster().size());
1179
1180 app_.cluster().for_each([&fees, thresh](ClusterNode const& status) {
1181 if (status.getReportTime() >= thresh)
1182 fees.push_back(status.getLoadFee());
1183 });
1184
1185 if (!fees.empty())
1186 {
1187 auto const index = fees.size() / 2;
1188 std::nth_element(fees.begin(), fees.begin() + index, fees.end());
1189 clusterFee = fees[index];
1190 }
1191
1192 app_.getFeeTrack().setClusterFee(clusterFee);
1193}
1194
1195void
1197{
1198 // Don't allow endpoints from peers that are not known tracking or are
1199 // not using a version of the message that we support:
1200 if (tracking_.load() != Tracking::converged || m->version() != 2)
1201 return;
1202
1203 // The number is arbitrary and doesn't have any real significance or
1204 // implication for the protocol.
1205 if (m->endpoints_v2().size() >= 1024)
1206 {
1207 fee_.update(Resource::feeUselessData, "endpoints too large");
1208 return;
1209 }
1210
1212 endpoints.reserve(m->endpoints_v2().size());
1213
1214 auto malformed = 0;
1215 for (auto const& tm : m->endpoints_v2())
1216 {
1217 auto result = beast::IP::Endpoint::from_string_checked(tm.endpoint());
1218
1219 if (!result)
1220 {
1221 JLOG(p_journal_.error()) << "failed to parse incoming endpoint: {"
1222 << tm.endpoint() << "}";
1223 malformed++;
1224 continue;
1225 }
1226
1227 // If hops == 0, this Endpoint describes the peer we are connected
1228 // to -- in that case, we take the remote address seen on the
1229 // socket and store that in the IP::Endpoint. If this is the first
1230 // time, then we'll verify that their listener can receive incoming
1231 // by performing a connectivity test. if hops > 0, then we just
1232 // take the address/port we were given
1233 if (tm.hops() == 0)
1234 result = remote_address_.at_port(result->port());
1235
1236 endpoints.emplace_back(*result, tm.hops());
1237 }
1238
1239 // Charge the peer for each malformed endpoint. As there still may be
1240 // multiple valid endpoints we don't return early.
1241 if (malformed > 0)
1242 {
1243 fee_.update(
1244 Resource::feeInvalidData * malformed,
1245 std::to_string(malformed) + " malformed endpoints");
1246 }
1247
1248 if (!endpoints.empty())
1249 overlay_.peerFinder().on_endpoints(slot_, endpoints);
1250}
1251
1252void
1254{
1255 handleTransaction(m, true, false);
1256}
1257
1258void
1261 bool eraseTxQueue,
1262 bool batch)
1263{
1264 XRPL_ASSERT(
1265 eraseTxQueue != batch,
1266 ("ripple::PeerImp::handleTransaction : valid inputs"));
1268 return;
1269
1271 {
1272 // If we've never been in synch, there's nothing we can do
1273 // with a transaction
1274 JLOG(p_journal_.debug()) << "Ignoring incoming transaction: "
1275 << "Need network ledger";
1276 return;
1277 }
1278
1279 SerialIter sit(makeSlice(m->rawtransaction()));
1280
1281 try
1282 {
1283 auto stx = std::make_shared<STTx const>(sit);
1284 uint256 txID = stx->getTransactionID();
1285
1286 // Charge strongly for attempting to relay a txn with tfInnerBatchTxn
1287 // LCOV_EXCL_START
1288 if (stx->isFlag(tfInnerBatchTxn) &&
1289 getCurrentTransactionRules()->enabled(featureBatch))
1290 {
1291 JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing "
1292 "tfInnerBatchTxn (handleTransaction).";
1293 fee_.update(Resource::feeModerateBurdenPeer, "inner batch txn");
1294 return;
1295 }
1296 // LCOV_EXCL_STOP
1297
1298 int flags;
1299 constexpr std::chrono::seconds tx_interval = 10s;
1300
1301 if (!app_.getHashRouter().shouldProcess(txID, id_, flags, tx_interval))
1302 {
1303 // we have seen this transaction recently
1304 if (flags & SF_BAD)
1305 {
1306 fee_.update(Resource::feeUselessData, "known bad");
1307 JLOG(p_journal_.debug()) << "Ignoring known bad tx " << txID;
1308 }
1309
1310 // Erase only if the server has seen this tx. If the server has not
1311 // seen this tx then the tx could not has been queued for this peer.
1312 else if (eraseTxQueue && txReduceRelayEnabled())
1313 removeTxQueue(txID);
1314
1318
1319 return;
1320 }
1321
1322 JLOG(p_journal_.debug()) << "Got tx " << txID;
1323
1324 bool checkSignature = true;
1325 if (cluster())
1326 {
1327 if (!m->has_deferred() || !m->deferred())
1328 {
1329 // Skip local checks if a server we trust
1330 // put the transaction in its open ledger
1331 flags |= SF_TRUSTED;
1332 }
1333
1334 // for non-validator nodes only -- localPublicKey is set for
1335 // validators only
1337 {
1338 // For now, be paranoid and have each validator
1339 // check each transaction, regardless of source
1340 checkSignature = false;
1341 }
1342 }
1343
1345 {
1346 JLOG(p_journal_.trace())
1347 << "No new transactions until synchronized";
1348 }
1349 else if (
1352 {
1354 JLOG(p_journal_.info()) << "Transaction queue is full";
1355 }
1356 else
1357 {
1360 "recvTransaction->checkTransaction",
1362 flags,
1363 checkSignature,
1364 batch,
1365 stx]() {
1366 if (auto peer = weak.lock())
1367 peer->checkTransaction(
1368 flags, checkSignature, stx, batch);
1369 });
1370 }
1371 }
1372 catch (std::exception const& ex)
1373 {
1374 JLOG(p_journal_.warn())
1375 << "Transaction invalid: " << strHex(m->rawtransaction())
1376 << ". Exception: " << ex.what();
1377 }
1378}
1379
1380void
1382{
1383 auto badData = [&](std::string const& msg) {
1384 fee_.update(Resource::feeInvalidData, "get_ledger " + msg);
1385 JLOG(p_journal_.warn()) << "TMGetLedger: " << msg;
1386 };
1387 auto const itype{m->itype()};
1388
1389 // Verify ledger info type
1390 if (itype < protocol::liBASE || itype > protocol::liTS_CANDIDATE)
1391 return badData("Invalid ledger info type");
1392
1393 auto const ltype = [&m]() -> std::optional<::protocol::TMLedgerType> {
1394 if (m->has_ltype())
1395 return m->ltype();
1396 return std::nullopt;
1397 }();
1398
1399 if (itype == protocol::liTS_CANDIDATE)
1400 {
1401 if (!m->has_ledgerhash())
1402 return badData("Invalid TX candidate set, missing TX set hash");
1403 }
1404 else if (
1405 !m->has_ledgerhash() && !m->has_ledgerseq() &&
1406 !(ltype && *ltype == protocol::ltCLOSED))
1407 {
1408 return badData("Invalid request");
1409 }
1410
1411 // Verify ledger type
1412 if (ltype && (*ltype < protocol::ltACCEPTED || *ltype > protocol::ltCLOSED))
1413 return badData("Invalid ledger type");
1414
1415 // Verify ledger hash
1416 if (m->has_ledgerhash() && !stringIsUint256Sized(m->ledgerhash()))
1417 return badData("Invalid ledger hash");
1418
1419 // Verify ledger sequence
1420 if (m->has_ledgerseq())
1421 {
1422 auto const ledgerSeq{m->ledgerseq()};
1423
1424 // Check if within a reasonable range
1425 using namespace std::chrono_literals;
1427 ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1428 {
1429 return badData(
1430 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1431 }
1432 }
1433
1434 // Verify ledger node IDs
1435 if (itype != protocol::liBASE)
1436 {
1437 if (m->nodeids_size() <= 0)
1438 return badData("Invalid ledger node IDs");
1439
1440 for (auto const& nodeId : m->nodeids())
1441 {
1442 if (deserializeSHAMapNodeID(nodeId) == std::nullopt)
1443 return badData("Invalid SHAMap node ID");
1444 }
1445 }
1446
1447 // Verify query type
1448 if (m->has_querytype() && m->querytype() != protocol::qtINDIRECT)
1449 return badData("Invalid query type");
1450
1451 // Verify query depth
1452 if (m->has_querydepth())
1453 {
1454 if (m->querydepth() > Tuning::maxQueryDepth ||
1455 itype == protocol::liBASE)
1456 {
1457 return badData("Invalid query depth");
1458 }
1459 }
1460
1461 // Queue a job to process the request
1463 app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m]() {
1464 if (auto peer = weak.lock())
1465 peer->processLedgerRequest(m);
1466 });
1467}
1468
1469void
1471{
1472 JLOG(p_journal_.trace()) << "onMessage, TMProofPathRequest";
1474 {
1475 fee_.update(
1476 Resource::feeMalformedRequest, "proof_path_request disabled");
1477 return;
1478 }
1479
1480 fee_.update(
1481 Resource::feeModerateBurdenPeer, "received a proof path request");
1484 jtREPLAY_REQ, "recvProofPathRequest", [weak, m]() {
1485 if (auto peer = weak.lock())
1486 {
1487 auto reply =
1488 peer->ledgerReplayMsgHandler_.processProofPathRequest(m);
1489 if (reply.has_error())
1490 {
1491 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1492 peer->charge(
1493 Resource::feeMalformedRequest,
1494 "proof_path_request");
1495 else
1496 peer->charge(
1497 Resource::feeRequestNoReply, "proof_path_request");
1498 }
1499 else
1500 {
1501 peer->send(std::make_shared<Message>(
1502 reply, protocol::mtPROOF_PATH_RESPONSE));
1503 }
1504 }
1505 });
1506}
1507
1508void
1510{
1511 if (!ledgerReplayEnabled_)
1512 {
1513 fee_.update(
1514 Resource::feeMalformedRequest, "proof_path_response disabled");
1515 return;
1516 }
1517
1518 if (!ledgerReplayMsgHandler_.processProofPathResponse(m))
1519 {
1520 fee_.update(Resource::feeInvalidData, "proof_path_response");
1521 }
1522}
1523
1524void
1526{
1527 JLOG(p_journal_.trace()) << "onMessage, TMReplayDeltaRequest";
1528 if (!ledgerReplayEnabled_)
1529 {
1530 fee_.update(
1531 Resource::feeMalformedRequest, "replay_delta_request disabled");
1532 return;
1533 }
1534
1535 fee_.fee = Resource::feeModerateBurdenPeer;
1536 std::weak_ptr<PeerImp> weak = shared_from_this();
1537 app_.getJobQueue().addJob(
1538 jtREPLAY_REQ, "recvReplayDeltaRequest", [weak, m]() {
1539 if (auto peer = weak.lock())
1540 {
1541 auto reply =
1542 peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m);
1543 if (reply.has_error())
1544 {
1545 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1546 peer->charge(
1547 Resource::feeMalformedRequest,
1548 "replay_delta_request");
1549 else
1550 peer->charge(
1551 Resource::feeRequestNoReply,
1552 "replay_delta_request");
1553 }
1554 else
1555 {
1556 peer->send(std::make_shared<Message>(
1557 reply, protocol::mtREPLAY_DELTA_RESPONSE));
1558 }
1559 }
1560 });
1561}
1562
1563void
1565{
1566 if (!ledgerReplayEnabled_)
1567 {
1568 fee_.update(
1569 Resource::feeMalformedRequest, "replay_delta_response disabled");
1570 return;
1571 }
1572
1573 if (!ledgerReplayMsgHandler_.processReplayDeltaResponse(m))
1574 {
1575 fee_.update(Resource::feeInvalidData, "replay_delta_response");
1576 }
1577}
1578
1579void
1581{
1582 auto badData = [&](std::string const& msg) {
1583 fee_.update(Resource::feeInvalidData, msg);
1584 JLOG(p_journal_.warn()) << "TMLedgerData: " << msg;
1585 };
1586
1587 // Verify ledger hash
1588 if (!stringIsUint256Sized(m->ledgerhash()))
1589 return badData("Invalid ledger hash");
1590
1591 // Verify ledger sequence
1592 {
1593 auto const ledgerSeq{m->ledgerseq()};
1594 if (m->type() == protocol::liTS_CANDIDATE)
1595 {
1596 if (ledgerSeq != 0)
1597 {
1598 return badData(
1599 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1600 }
1601 }
1602 else
1603 {
1604 // Check if within a reasonable range
1605 using namespace std::chrono_literals;
1606 if (app_.getLedgerMaster().getValidatedLedgerAge() <= 10s &&
1607 ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1608 {
1609 return badData(
1610 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1611 }
1612 }
1613 }
1614
1615 // Verify ledger info type
1616 if (m->type() < protocol::liBASE || m->type() > protocol::liTS_CANDIDATE)
1617 return badData("Invalid ledger info type");
1618
1619 // Verify reply error
1620 if (m->has_error() &&
1621 (m->error() < protocol::reNO_LEDGER ||
1622 m->error() > protocol::reBAD_REQUEST))
1623 {
1624 return badData("Invalid reply error");
1625 }
1626
1627 // Verify ledger nodes.
1628 if (m->nodes_size() <= 0 || m->nodes_size() > Tuning::hardMaxReplyNodes)
1629 {
1630 return badData(
1631 "Invalid Ledger/TXset nodes " + std::to_string(m->nodes_size()));
1632 }
1633
1634 // If there is a request cookie, attempt to relay the message
1635 if (m->has_requestcookie())
1636 {
1637 if (auto peer = overlay_.findPeerByShortID(m->requestcookie()))
1638 {
1639 m->clear_requestcookie();
1640 peer->send(std::make_shared<Message>(*m, protocol::mtLEDGER_DATA));
1641 }
1642 else
1643 {
1644 JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply";
1645 }
1646 return;
1647 }
1648
1649 uint256 const ledgerHash{m->ledgerhash()};
1650
1651 // Otherwise check if received data for a candidate transaction set
1652 if (m->type() == protocol::liTS_CANDIDATE)
1653 {
1654 std::weak_ptr<PeerImp> weak{shared_from_this()};
1655 app_.getJobQueue().addJob(
1656 jtTXN_DATA, "recvPeerData", [weak, ledgerHash, m]() {
1657 if (auto peer = weak.lock())
1658 {
1659 peer->app_.getInboundTransactions().gotData(
1660 ledgerHash, peer, m);
1661 }
1662 });
1663 return;
1664 }
1665
1666 // Consume the message
1667 app_.getInboundLedgers().gotLedgerData(ledgerHash, shared_from_this(), m);
1668}
1669
1670void
1672{
1673 protocol::TMProposeSet& set = *m;
1674
1675 auto const sig = makeSlice(set.signature());
1676
1677 // Preliminary check for the validity of the signature: A DER encoded
1678 // signature can't be longer than 72 bytes.
1679 if ((std::clamp<std::size_t>(sig.size(), 64, 72) != sig.size()) ||
1680 (publicKeyType(makeSlice(set.nodepubkey())) != KeyType::secp256k1))
1681 {
1682 JLOG(p_journal_.warn()) << "Proposal: malformed";
1683 fee_.update(
1684 Resource::feeInvalidSignature,
1685 " signature can't be longer than 72 bytes");
1686 return;
1687 }
1688
1689 if (!stringIsUint256Sized(set.currenttxhash()) ||
1690 !stringIsUint256Sized(set.previousledger()))
1691 {
1692 JLOG(p_journal_.warn()) << "Proposal: malformed";
1693 fee_.update(Resource::feeMalformedRequest, "bad hashes");
1694 return;
1695 }
1696
1697 // RH TODO: when isTrusted = false we should probably also cache a key
1698 // suppression for 30 seconds to avoid doing a relatively expensive lookup
1699 // every time a spam packet is received
1700 PublicKey const publicKey{makeSlice(set.nodepubkey())};
1701 auto const isTrusted = app_.validators().trusted(publicKey);
1702
1703 // If the operator has specified that untrusted proposals be dropped then
1704 // this happens here I.e. before further wasting CPU verifying the signature
1705 // of an untrusted key
1706 if (!isTrusted)
1707 {
1708 // report untrusted proposal messages
1709 overlay_.reportInboundTraffic(
1710 TrafficCount::category::proposal_untrusted,
1711 Message::messageSize(*m));
1712
1713 if (app_.config().RELAY_UNTRUSTED_PROPOSALS == -1)
1714 return;
1715 }
1716
1717 uint256 const proposeHash{set.currenttxhash()};
1718 uint256 const prevLedger{set.previousledger()};
1719
1720 NetClock::time_point const closeTime{NetClock::duration{set.closetime()}};
1721
1722 uint256 const suppression = proposalUniqueId(
1723 proposeHash,
1724 prevLedger,
1725 set.proposeseq(),
1726 closeTime,
1727 publicKey.slice(),
1728 sig);
1729
1730 if (auto [added, relayed] =
1731 app_.getHashRouter().addSuppressionPeerWithStatus(suppression, id_);
1732 !added)
1733 {
1734 // Count unique messages (Slots has it's own 'HashRouter'), which a peer
1735 // receives within IDLED seconds since the message has been relayed.
1736 if (reduceRelayReady() && relayed &&
1737 (stopwatch().now() - *relayed) < reduce_relay::IDLED)
1738 overlay_.updateSlotAndSquelch(
1739 suppression, publicKey, id_, protocol::mtPROPOSE_LEDGER);
1740
1741 // report duplicate proposal messages
1742 overlay_.reportInboundTraffic(
1743 TrafficCount::category::proposal_duplicate,
1744 Message::messageSize(*m));
1745
1746 JLOG(p_journal_.trace()) << "Proposal: duplicate";
1747
1748 return;
1749 }
1750
1751 if (!isTrusted)
1752 {
1753 if (tracking_.load() == Tracking::diverged)
1754 {
1755 JLOG(p_journal_.debug())
1756 << "Proposal: Dropping untrusted (peer divergence)";
1757 return;
1758 }
1759
1760 if (!cluster() && app_.getFeeTrack().isLoadedLocal())
1761 {
1762 JLOG(p_journal_.debug()) << "Proposal: Dropping untrusted (load)";
1763 return;
1764 }
1765 }
1766
1767 JLOG(p_journal_.trace())
1768 << "Proposal: " << (isTrusted ? "trusted" : "untrusted");
1769
1770 auto proposal = RCLCxPeerPos(
1771 publicKey,
1772 sig,
1773 suppression,
1775 prevLedger,
1776 set.proposeseq(),
1777 proposeHash,
1778 closeTime,
1779 app_.timeKeeper().closeTime(),
1780 calcNodeID(app_.validatorManifests().getMasterKey(publicKey))});
1781
1782 std::weak_ptr<PeerImp> weak = shared_from_this();
1783 app_.getJobQueue().addJob(
1784 isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut,
1785 "recvPropose->checkPropose",
1786 [weak, isTrusted, m, proposal]() {
1787 if (auto peer = weak.lock())
1788 peer->checkPropose(isTrusted, m, proposal);
1789 });
1790}
1791
1792void
1794{
1795 JLOG(p_journal_.trace()) << "Status: Change";
1796
1797 if (!m->has_networktime())
1798 m->set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1799
1800 {
1801 std::lock_guard sl(recentLock_);
1802 if (!last_status_.has_newstatus() || m->has_newstatus())
1803 last_status_ = *m;
1804 else
1805 {
1806 // preserve old status
1807 protocol::NodeStatus status = last_status_.newstatus();
1808 last_status_ = *m;
1809 m->set_newstatus(status);
1810 }
1811 }
1812
1813 if (m->newevent() == protocol::neLOST_SYNC)
1814 {
1815 bool outOfSync{false};
1816 {
1817 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1818 // guarded by recentLock_.
1819 std::lock_guard sl(recentLock_);
1820 if (!closedLedgerHash_.isZero())
1821 {
1822 outOfSync = true;
1823 closedLedgerHash_.zero();
1824 }
1825 previousLedgerHash_.zero();
1826 }
1827 if (outOfSync)
1828 {
1829 JLOG(p_journal_.debug()) << "Status: Out of sync";
1830 }
1831 return;
1832 }
1833
1834 {
1835 uint256 closedLedgerHash{};
1836 bool const peerChangedLedgers{
1837 m->has_ledgerhash() && stringIsUint256Sized(m->ledgerhash())};
1838
1839 {
1840 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1841 // guarded by recentLock_.
1842 std::lock_guard sl(recentLock_);
1843 if (peerChangedLedgers)
1844 {
1845 closedLedgerHash_ = m->ledgerhash();
1846 closedLedgerHash = closedLedgerHash_;
1847 addLedger(closedLedgerHash, sl);
1848 }
1849 else
1850 {
1851 closedLedgerHash_.zero();
1852 }
1853
1854 if (m->has_ledgerhashprevious() &&
1855 stringIsUint256Sized(m->ledgerhashprevious()))
1856 {
1857 previousLedgerHash_ = m->ledgerhashprevious();
1858 addLedger(previousLedgerHash_, sl);
1859 }
1860 else
1861 {
1862 previousLedgerHash_.zero();
1863 }
1864 }
1865 if (peerChangedLedgers)
1866 {
1867 JLOG(p_journal_.debug()) << "LCL is " << closedLedgerHash;
1868 }
1869 else
1870 {
1871 JLOG(p_journal_.debug()) << "Status: No ledger";
1872 }
1873 }
1874
1875 if (m->has_firstseq() && m->has_lastseq())
1876 {
1877 std::lock_guard sl(recentLock_);
1878
1879 minLedger_ = m->firstseq();
1880 maxLedger_ = m->lastseq();
1881
1882 if ((maxLedger_ < minLedger_) || (minLedger_ == 0) || (maxLedger_ == 0))
1883 minLedger_ = maxLedger_ = 0;
1884 }
1885
1886 if (m->has_ledgerseq() &&
1887 app_.getLedgerMaster().getValidatedLedgerAge() < 2min)
1888 {
1889 checkTracking(
1890 m->ledgerseq(), app_.getLedgerMaster().getValidLedgerIndex());
1891 }
1892
1893 app_.getOPs().pubPeerStatus([=, this]() -> Json::Value {
1895
1896 if (m->has_newstatus())
1897 {
1898 switch (m->newstatus())
1899 {
1900 case protocol::nsCONNECTING:
1901 j[jss::status] = "CONNECTING";
1902 break;
1903 case protocol::nsCONNECTED:
1904 j[jss::status] = "CONNECTED";
1905 break;
1906 case protocol::nsMONITORING:
1907 j[jss::status] = "MONITORING";
1908 break;
1909 case protocol::nsVALIDATING:
1910 j[jss::status] = "VALIDATING";
1911 break;
1912 case protocol::nsSHUTTING:
1913 j[jss::status] = "SHUTTING";
1914 break;
1915 }
1916 }
1917
1918 if (m->has_newevent())
1919 {
1920 switch (m->newevent())
1921 {
1922 case protocol::neCLOSING_LEDGER:
1923 j[jss::action] = "CLOSING_LEDGER";
1924 break;
1925 case protocol::neACCEPTED_LEDGER:
1926 j[jss::action] = "ACCEPTED_LEDGER";
1927 break;
1928 case protocol::neSWITCHED_LEDGER:
1929 j[jss::action] = "SWITCHED_LEDGER";
1930 break;
1931 case protocol::neLOST_SYNC:
1932 j[jss::action] = "LOST_SYNC";
1933 break;
1934 }
1935 }
1936
1937 if (m->has_ledgerseq())
1938 {
1939 j[jss::ledger_index] = m->ledgerseq();
1940 }
1941
1942 if (m->has_ledgerhash())
1943 {
1944 uint256 closedLedgerHash{};
1945 {
1946 std::lock_guard sl(recentLock_);
1947 closedLedgerHash = closedLedgerHash_;
1948 }
1949 j[jss::ledger_hash] = to_string(closedLedgerHash);
1950 }
1951
1952 if (m->has_networktime())
1953 {
1954 j[jss::date] = Json::UInt(m->networktime());
1955 }
1956
1957 if (m->has_firstseq() && m->has_lastseq())
1958 {
1959 j[jss::ledger_index_min] = Json::UInt(m->firstseq());
1960 j[jss::ledger_index_max] = Json::UInt(m->lastseq());
1961 }
1962
1963 return j;
1964 });
1965}
1966
1967void
1968PeerImp::checkTracking(std::uint32_t validationSeq)
1969{
1970 std::uint32_t serverSeq;
1971 {
1972 // Extract the sequence number of the highest
1973 // ledger this peer has
1974 std::lock_guard sl(recentLock_);
1975
1976 serverSeq = maxLedger_;
1977 }
1978 if (serverSeq != 0)
1979 {
1980 // Compare the peer's ledger sequence to the
1981 // sequence of a recently-validated ledger
1982 checkTracking(serverSeq, validationSeq);
1983 }
1984}
1985
1986void
1987PeerImp::checkTracking(std::uint32_t seq1, std::uint32_t seq2)
1988{
1989 int diff = std::max(seq1, seq2) - std::min(seq1, seq2);
1990
1991 if (diff < Tuning::convergedLedgerLimit)
1992 {
1993 // The peer's ledger sequence is close to the validation's
1994 tracking_ = Tracking::converged;
1995 }
1996
1997 if ((diff > Tuning::divergedLedgerLimit) &&
1998 (tracking_.load() != Tracking::diverged))
1999 {
2000 // The peer's ledger sequence is way off the validation's
2001 std::lock_guard sl(recentLock_);
2002
2003 tracking_ = Tracking::diverged;
2004 trackingTime_ = clock_type::now();
2005 }
2006}
2007
2008void
2010{
2011 if (!stringIsUint256Sized(m->hash()))
2012 {
2013 fee_.update(Resource::feeMalformedRequest, "bad hash");
2014 return;
2015 }
2016
2017 uint256 const hash{m->hash()};
2018
2019 if (m->status() == protocol::tsHAVE)
2020 {
2021 std::lock_guard sl(recentLock_);
2022
2023 if (std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
2024 recentTxSets_.end())
2025 {
2026 fee_.update(Resource::feeUselessData, "duplicate (tsHAVE)");
2027 return;
2028 }
2029
2030 recentTxSets_.push_back(hash);
2031 }
2032}
2033
2034void
2035PeerImp::onValidatorListMessage(
2036 std::string const& messageType,
2037 std::string const& manifest,
2038 std::uint32_t version,
2039 std::vector<ValidatorBlobInfo> const& blobs)
2040{
2041 // If there are no blobs, the message is malformed (possibly because of
2042 // ValidatorList class rules), so charge accordingly and skip processing.
2043 if (blobs.empty())
2044 {
2045 JLOG(p_journal_.warn()) << "Ignored malformed " << messageType
2046 << " from peer " << remote_address_;
2047 // This shouldn't ever happen with a well-behaved peer
2048 fee_.update(Resource::feeHeavyBurdenPeer, "no blobs");
2049 return;
2050 }
2051
2052 auto const hash = sha512Half(manifest, blobs, version);
2053
2054 JLOG(p_journal_.debug())
2055 << "Received " << messageType << " from " << remote_address_.to_string()
2056 << " (" << id_ << ")";
2057
2058 if (!app_.getHashRouter().addSuppressionPeer(hash, id_))
2059 {
2060 JLOG(p_journal_.debug())
2061 << messageType << ": received duplicate " << messageType;
2062 // Charging this fee here won't hurt the peer in the normal
2063 // course of operation (ie. refresh every 5 minutes), but
2064 // will add up if the peer is misbehaving.
2065 fee_.update(Resource::feeUselessData, "duplicate");
2066 return;
2067 }
2068
2069 auto const applyResult = app_.validators().applyListsAndBroadcast(
2070 manifest,
2071 version,
2072 blobs,
2073 remote_address_.to_string(),
2074 hash,
2075 app_.overlay(),
2076 app_.getHashRouter(),
2077 app_.getOPs());
2078
2079 JLOG(p_journal_.debug())
2080 << "Processed " << messageType << " version " << version << " from "
2081 << (applyResult.publisherKey ? strHex(*applyResult.publisherKey)
2082 : "unknown or invalid publisher")
2083 << " from " << remote_address_.to_string() << " (" << id_
2084 << ") with best result " << to_string(applyResult.bestDisposition());
2085
2086 // Act based on the best result
2087 switch (applyResult.bestDisposition())
2088 {
2089 // New list
2090 case ListDisposition::accepted:
2091 // Newest list is expired, and that needs to be broadcast, too
2092 case ListDisposition::expired:
2093 // Future list
2094 case ListDisposition::pending: {
2095 std::lock_guard<std::mutex> sl(recentLock_);
2096
2097 XRPL_ASSERT(
2098 applyResult.publisherKey,
2099 "ripple::PeerImp::onValidatorListMessage : publisher key is "
2100 "set");
2101 auto const& pubKey = *applyResult.publisherKey;
2102#ifndef NDEBUG
2103 if (auto const iter = publisherListSequences_.find(pubKey);
2104 iter != publisherListSequences_.end())
2105 {
2106 XRPL_ASSERT(
2107 iter->second < applyResult.sequence,
2108 "ripple::PeerImp::onValidatorListMessage : lower sequence");
2109 }
2110#endif
2111 publisherListSequences_[pubKey] = applyResult.sequence;
2112 }
2113 break;
2114 case ListDisposition::same_sequence:
2115 case ListDisposition::known_sequence:
2116#ifndef NDEBUG
2117 {
2118 std::lock_guard<std::mutex> sl(recentLock_);
2119 XRPL_ASSERT(
2120 applyResult.sequence && applyResult.publisherKey,
2121 "ripple::PeerImp::onValidatorListMessage : nonzero sequence "
2122 "and set publisher key");
2123 XRPL_ASSERT(
2124 publisherListSequences_[*applyResult.publisherKey] <=
2125 applyResult.sequence,
2126 "ripple::PeerImp::onValidatorListMessage : maximum sequence");
2127 }
2128#endif // !NDEBUG
2129
2130 break;
2131 case ListDisposition::stale:
2132 case ListDisposition::untrusted:
2133 case ListDisposition::invalid:
2134 case ListDisposition::unsupported_version:
2135 break;
2136 default:
2137 UNREACHABLE(
2138 "ripple::PeerImp::onValidatorListMessage : invalid best list "
2139 "disposition");
2140 }
2141
2142 // Charge based on the worst result
2143 switch (applyResult.worstDisposition())
2144 {
2145 case ListDisposition::accepted:
2146 case ListDisposition::expired:
2147 case ListDisposition::pending:
2148 // No charges for good data
2149 break;
2150 case ListDisposition::same_sequence:
2151 case ListDisposition::known_sequence:
2152 // Charging this fee here won't hurt the peer in the normal
2153 // course of operation (ie. refresh every 5 minutes), but
2154 // will add up if the peer is misbehaving.
2155 fee_.update(
2156 Resource::feeUselessData,
2157 " duplicate (same_sequence or known_sequence)");
2158 break;
2159 case ListDisposition::stale:
2160 // There are very few good reasons for a peer to send an
2161 // old list, particularly more than once.
2162 fee_.update(Resource::feeInvalidData, "expired");
2163 break;
2164 case ListDisposition::untrusted:
2165 // Charging this fee here won't hurt the peer in the normal
2166 // course of operation (ie. refresh every 5 minutes), but
2167 // will add up if the peer is misbehaving.
2168 fee_.update(Resource::feeUselessData, "untrusted");
2169 break;
2170 case ListDisposition::invalid:
2171 // This shouldn't ever happen with a well-behaved peer
2172 fee_.update(
2173 Resource::feeInvalidSignature, "invalid list disposition");
2174 break;
2175 case ListDisposition::unsupported_version:
2176 // During a version transition, this may be legitimate.
2177 // If it happens frequently, that's probably bad.
2178 fee_.update(Resource::feeInvalidData, "version");
2179 break;
2180 default:
2181 UNREACHABLE(
2182 "ripple::PeerImp::onValidatorListMessage : invalid worst list "
2183 "disposition");
2184 }
2185
2186 // Log based on all the results.
2187 for (auto const& [disp, count] : applyResult.dispositions)
2188 {
2189 switch (disp)
2190 {
2191 // New list
2192 case ListDisposition::accepted:
2193 JLOG(p_journal_.debug())
2194 << "Applied " << count << " new " << messageType
2195 << "(s) from peer " << remote_address_;
2196 break;
2197 // Newest list is expired, and that needs to be broadcast, too
2198 case ListDisposition::expired:
2199 JLOG(p_journal_.debug())
2200 << "Applied " << count << " expired " << messageType
2201 << "(s) from peer " << remote_address_;
2202 break;
2203 // Future list
2204 case ListDisposition::pending:
2205 JLOG(p_journal_.debug())
2206 << "Processed " << count << " future " << messageType
2207 << "(s) from peer " << remote_address_;
2208 break;
2209 case ListDisposition::same_sequence:
2210 JLOG(p_journal_.warn())
2211 << "Ignored " << count << " " << messageType
2212 << "(s) with current sequence from peer "
2213 << remote_address_;
2214 break;
2215 case ListDisposition::known_sequence:
2216 JLOG(p_journal_.warn())
2217 << "Ignored " << count << " " << messageType
2218 << "(s) with future sequence from peer " << remote_address_;
2219 break;
2220 case ListDisposition::stale:
2221 JLOG(p_journal_.warn())
2222 << "Ignored " << count << "stale " << messageType
2223 << "(s) from peer " << remote_address_;
2224 break;
2225 case ListDisposition::untrusted:
2226 JLOG(p_journal_.warn())
2227 << "Ignored " << count << " untrusted " << messageType
2228 << "(s) from peer " << remote_address_;
2229 break;
2230 case ListDisposition::unsupported_version:
2231 JLOG(p_journal_.warn())
2232 << "Ignored " << count << "unsupported version "
2233 << messageType << "(s) from peer " << remote_address_;
2234 break;
2235 case ListDisposition::invalid:
2236 JLOG(p_journal_.warn())
2237 << "Ignored " << count << "invalid " << messageType
2238 << "(s) from peer " << remote_address_;
2239 break;
2240 default:
2241 UNREACHABLE(
2242 "ripple::PeerImp::onValidatorListMessage : invalid list "
2243 "disposition");
2244 }
2245 }
2246}
2247
2248void
2250{
2251 try
2252 {
2253 if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
2254 {
2255 JLOG(p_journal_.debug())
2256 << "ValidatorList: received validator list from peer using "
2257 << "protocol version " << to_string(protocol_)
2258 << " which shouldn't support this feature.";
2259 fee_.update(Resource::feeUselessData, "unsupported peer");
2260 return;
2261 }
2262 onValidatorListMessage(
2263 "ValidatorList",
2264 m->manifest(),
2265 m->version(),
2266 ValidatorList::parseBlobs(*m));
2267 }
2268 catch (std::exception const& e)
2269 {
2270 JLOG(p_journal_.warn()) << "ValidatorList: Exception, " << e.what()
2271 << " from peer " << remote_address_;
2272 using namespace std::string_literals;
2273 fee_.update(Resource::feeInvalidData, e.what());
2274 }
2275}
2276
2277void
2278PeerImp::onMessage(
2280{
2281 try
2282 {
2283 if (!supportsFeature(ProtocolFeature::ValidatorList2Propagation))
2284 {
2285 JLOG(p_journal_.debug())
2286 << "ValidatorListCollection: received validator list from peer "
2287 << "using protocol version " << to_string(protocol_)
2288 << " which shouldn't support this feature.";
2289 fee_.update(Resource::feeUselessData, "unsupported peer");
2290 return;
2291 }
2292 else if (m->version() < 2)
2293 {
2294 JLOG(p_journal_.debug())
2295 << "ValidatorListCollection: received invalid validator list "
2296 "version "
2297 << m->version() << " from peer using protocol version "
2298 << to_string(protocol_);
2299 fee_.update(Resource::feeInvalidData, "wrong version");
2300 return;
2301 }
2302 onValidatorListMessage(
2303 "ValidatorListCollection",
2304 m->manifest(),
2305 m->version(),
2306 ValidatorList::parseBlobs(*m));
2307 }
2308 catch (std::exception const& e)
2309 {
2310 JLOG(p_journal_.warn()) << "ValidatorListCollection: Exception, "
2311 << e.what() << " from peer " << remote_address_;
2312 using namespace std::string_literals;
2313 fee_.update(Resource::feeInvalidData, e.what());
2314 }
2315}
2316
2317void
2319{
2320 if (m->validation().size() < 50)
2321 {
2322 JLOG(p_journal_.warn()) << "Validation: Too small";
2323 fee_.update(Resource::feeMalformedRequest, "too small");
2324 return;
2325 }
2326
2327 try
2328 {
2329 auto const closeTime = app_.timeKeeper().closeTime();
2330
2332 {
2333 SerialIter sit(makeSlice(m->validation()));
2334 val = std::make_shared<STValidation>(
2335 std::ref(sit),
2336 [this](PublicKey const& pk) {
2337 return calcNodeID(
2338 app_.validatorManifests().getMasterKey(pk));
2339 },
2340 false);
2341 val->setSeen(closeTime);
2342 }
2343
2344 if (!isCurrent(
2345 app_.getValidations().parms(),
2346 app_.timeKeeper().closeTime(),
2347 val->getSignTime(),
2348 val->getSeenTime()))
2349 {
2350 JLOG(p_journal_.trace()) << "Validation: Not current";
2351 fee_.update(Resource::feeUselessData, "not current");
2352 return;
2353 }
2354
2355 // RH TODO: when isTrusted = false we should probably also cache a key
2356 // suppression for 30 seconds to avoid doing a relatively expensive
2357 // lookup every time a spam packet is received
2358 auto const isTrusted =
2359 app_.validators().trusted(val->getSignerPublic());
2360
2361 // If the operator has specified that untrusted validations be
2362 // dropped then this happens here I.e. before further wasting CPU
2363 // verifying the signature of an untrusted key
2364 if (!isTrusted)
2365 {
2366 // increase untrusted validations received
2367 overlay_.reportInboundTraffic(
2368 TrafficCount::category::validation_untrusted,
2369 Message::messageSize(*m));
2370
2371 if (app_.config().RELAY_UNTRUSTED_VALIDATIONS == -1)
2372 return;
2373 }
2374
2375 auto key = sha512Half(makeSlice(m->validation()));
2376
2377 auto [added, relayed] =
2378 app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
2379
2380 if (!added)
2381 {
2382 // Count unique messages (Slots has it's own 'HashRouter'), which a
2383 // peer receives within IDLED seconds since the message has been
2384 // relayed. Wait WAIT_ON_BOOTUP time to let the server establish
2385 // connections to peers.
2386 if (reduceRelayReady() && relayed &&
2387 (stopwatch().now() - *relayed) < reduce_relay::IDLED)
2388 overlay_.updateSlotAndSquelch(
2389 key, val->getSignerPublic(), id_, protocol::mtVALIDATION);
2390
2391 // increase duplicate validations received
2392 overlay_.reportInboundTraffic(
2393 TrafficCount::category::validation_duplicate,
2394 Message::messageSize(*m));
2395
2396 JLOG(p_journal_.trace()) << "Validation: duplicate";
2397 return;
2398 }
2399
2400 if (!isTrusted && (tracking_.load() == Tracking::diverged))
2401 {
2402 JLOG(p_journal_.debug())
2403 << "Dropping untrusted validation from diverged peer";
2404 }
2405 else if (isTrusted || !app_.getFeeTrack().isLoadedLocal())
2406 {
2407 std::string const name = [isTrusted, val]() {
2408 std::string ret =
2409 isTrusted ? "Trusted validation" : "Untrusted validation";
2410
2411#ifdef DEBUG
2412 ret += " " +
2413 std::to_string(val->getFieldU32(sfLedgerSequence)) + ": " +
2414 to_string(val->getNodeID());
2415#endif
2416
2417 return ret;
2418 }();
2419
2420 std::weak_ptr<PeerImp> weak = shared_from_this();
2421 app_.getJobQueue().addJob(
2422 isTrusted ? jtVALIDATION_t : jtVALIDATION_ut,
2423 name,
2424 [weak, val, m, key]() {
2425 if (auto peer = weak.lock())
2426 peer->checkValidation(val, key, m);
2427 });
2428 }
2429 else
2430 {
2431 JLOG(p_journal_.debug())
2432 << "Dropping untrusted validation for load";
2433 }
2434 }
2435 catch (std::exception const& e)
2436 {
2437 JLOG(p_journal_.warn())
2438 << "Exception processing validation: " << e.what();
2439 using namespace std::string_literals;
2440 fee_.update(Resource::feeMalformedRequest, e.what());
2441 }
2442}
2443
2444void
2446{
2447 protocol::TMGetObjectByHash& packet = *m;
2448
2449 JLOG(p_journal_.trace()) << "received TMGetObjectByHash " << packet.type()
2450 << " " << packet.objects_size();
2451
2452 if (packet.query())
2453 {
2454 // this is a query
2455 if (send_queue_.size() >= Tuning::dropSendQueue)
2456 {
2457 JLOG(p_journal_.debug()) << "GetObject: Large send queue";
2458 return;
2459 }
2460
2461 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2462 {
2463 doFetchPack(m);
2464 return;
2465 }
2466
2467 if (packet.type() == protocol::TMGetObjectByHash::otTRANSACTIONS)
2468 {
2469 if (!txReduceRelayEnabled())
2470 {
2471 JLOG(p_journal_.error())
2472 << "TMGetObjectByHash: tx reduce-relay is disabled";
2473 fee_.update(Resource::feeMalformedRequest, "disabled");
2474 return;
2475 }
2476
2477 std::weak_ptr<PeerImp> weak = shared_from_this();
2478 app_.getJobQueue().addJob(
2479 jtREQUESTED_TXN, "doTransactions", [weak, m]() {
2480 if (auto peer = weak.lock())
2481 peer->doTransactions(m);
2482 });
2483 return;
2484 }
2485
2486 protocol::TMGetObjectByHash reply;
2487
2488 reply.set_query(false);
2489
2490 if (packet.has_seq())
2491 reply.set_seq(packet.seq());
2492
2493 reply.set_type(packet.type());
2494
2495 if (packet.has_ledgerhash())
2496 {
2497 if (!stringIsUint256Sized(packet.ledgerhash()))
2498 {
2499 fee_.update(Resource::feeMalformedRequest, "ledger hash");
2500 return;
2501 }
2502
2503 reply.set_ledgerhash(packet.ledgerhash());
2504 }
2505
2506 fee_.update(
2507 Resource::feeModerateBurdenPeer,
2508 " received a get object by hash request");
2509
2510 // This is a very minimal implementation
2511 for (int i = 0; i < packet.objects_size(); ++i)
2512 {
2513 auto const& obj = packet.objects(i);
2514 if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2515 {
2516 uint256 const hash{obj.hash()};
2517 // VFALCO TODO Move this someplace more sensible so we dont
2518 // need to inject the NodeStore interfaces.
2519 std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2520 auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
2521 if (nodeObject)
2522 {
2523 protocol::TMIndexedObject& newObj = *reply.add_objects();
2524 newObj.set_hash(hash.begin(), hash.size());
2525 newObj.set_data(
2526 &nodeObject->getData().front(),
2527 nodeObject->getData().size());
2528
2529 if (obj.has_nodeid())
2530 newObj.set_index(obj.nodeid());
2531 if (obj.has_ledgerseq())
2532 newObj.set_ledgerseq(obj.ledgerseq());
2533
2534 // VFALCO NOTE "seq" in the message is obsolete
2535 }
2536 }
2537 }
2538
2539 JLOG(p_journal_.trace()) << "GetObj: " << reply.objects_size() << " of "
2540 << packet.objects_size();
2541 send(std::make_shared<Message>(reply, protocol::mtGET_OBJECTS));
2542 }
2543 else
2544 {
2545 // this is a reply
2546 std::uint32_t pLSeq = 0;
2547 bool pLDo = true;
2548 bool progress = false;
2549
2550 for (int i = 0; i < packet.objects_size(); ++i)
2551 {
2552 protocol::TMIndexedObject const& obj = packet.objects(i);
2553
2554 if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2555 {
2556 if (obj.has_ledgerseq())
2557 {
2558 if (obj.ledgerseq() != pLSeq)
2559 {
2560 if (pLDo && (pLSeq != 0))
2561 {
2562 JLOG(p_journal_.debug())
2563 << "GetObj: Full fetch pack for " << pLSeq;
2564 }
2565 pLSeq = obj.ledgerseq();
2566 pLDo = !app_.getLedgerMaster().haveLedger(pLSeq);
2567
2568 if (!pLDo)
2569 {
2570 JLOG(p_journal_.debug())
2571 << "GetObj: Late fetch pack for " << pLSeq;
2572 }
2573 else
2574 progress = true;
2575 }
2576 }
2577
2578 if (pLDo)
2579 {
2580 uint256 const hash{obj.hash()};
2581
2582 app_.getLedgerMaster().addFetchPack(
2583 hash,
2584 std::make_shared<Blob>(
2585 obj.data().begin(), obj.data().end()));
2586 }
2587 }
2588 }
2589
2590 if (pLDo && (pLSeq != 0))
2591 {
2592 JLOG(p_journal_.debug())
2593 << "GetObj: Partial fetch pack for " << pLSeq;
2594 }
2595 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2596 app_.getLedgerMaster().gotFetchPack(progress, pLSeq);
2597 }
2598}
2599
2600void
2602{
2603 if (!txReduceRelayEnabled())
2604 {
2605 JLOG(p_journal_.error())
2606 << "TMHaveTransactions: tx reduce-relay is disabled";
2607 fee_.update(Resource::feeMalformedRequest, "disabled");
2608 return;
2609 }
2610
2611 std::weak_ptr<PeerImp> weak = shared_from_this();
2612 app_.getJobQueue().addJob(
2613 jtMISSING_TXN, "handleHaveTransactions", [weak, m]() {
2614 if (auto peer = weak.lock())
2615 peer->handleHaveTransactions(m);
2616 });
2617}
2618
2619void
2620PeerImp::handleHaveTransactions(
2622{
2623 protocol::TMGetObjectByHash tmBH;
2624 tmBH.set_type(protocol::TMGetObjectByHash_ObjectType_otTRANSACTIONS);
2625 tmBH.set_query(true);
2626
2627 JLOG(p_journal_.trace())
2628 << "received TMHaveTransactions " << m->hashes_size();
2629
2630 for (std::uint32_t i = 0; i < m->hashes_size(); i++)
2631 {
2632 if (!stringIsUint256Sized(m->hashes(i)))
2633 {
2634 JLOG(p_journal_.error())
2635 << "TMHaveTransactions with invalid hash size";
2636 fee_.update(Resource::feeMalformedRequest, "hash size");
2637 return;
2638 }
2639
2640 uint256 hash(m->hashes(i));
2641
2642 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2643
2644 JLOG(p_journal_.trace()) << "checking transaction " << (bool)txn;
2645
2646 if (!txn)
2647 {
2648 JLOG(p_journal_.debug()) << "adding transaction to request";
2649
2650 auto obj = tmBH.add_objects();
2651 obj->set_hash(hash.data(), hash.size());
2652 }
2653 else
2654 {
2655 // Erase only if a peer has seen this tx. If the peer has not
2656 // seen this tx then the tx could not has been queued for this
2657 // peer.
2658 removeTxQueue(hash);
2659 }
2660 }
2661
2662 JLOG(p_journal_.trace())
2663 << "transaction request object is " << tmBH.objects_size();
2664
2665 if (tmBH.objects_size() > 0)
2666 send(std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS));
2667}
2668
2669void
2671{
2672 if (!txReduceRelayEnabled())
2673 {
2674 JLOG(p_journal_.error())
2675 << "TMTransactions: tx reduce-relay is disabled";
2676 fee_.update(Resource::feeMalformedRequest, "disabled");
2677 return;
2678 }
2679
2680 JLOG(p_journal_.trace())
2681 << "received TMTransactions " << m->transactions_size();
2682
2683 overlay_.addTxMetrics(m->transactions_size());
2684
2685 for (std::uint32_t i = 0; i < m->transactions_size(); ++i)
2686 handleTransaction(
2688 m->mutable_transactions(i), [](protocol::TMTransaction*) {}),
2689 false,
2690 true);
2691}
2692
2693void
2694PeerImp::onMessage(std::shared_ptr<protocol::TMSquelch> const& m)
2695{
2696 using on_message_fn =
2698 if (!strand_.running_in_this_thread())
2699 return post(
2700 strand_,
2701 std::bind(
2702 (on_message_fn)&PeerImp::onMessage, shared_from_this(), m));
2703
2704 if (!m->has_validatorpubkey())
2705 {
2706 fee_.update(Resource::feeInvalidData, "squelch no pubkey");
2707 return;
2708 }
2709 auto validator = m->validatorpubkey();
2710 auto const slice{makeSlice(validator)};
2711 if (!publicKeyType(slice))
2712 {
2713 fee_.update(Resource::feeInvalidData, "squelch bad pubkey");
2714 return;
2715 }
2716 PublicKey key(slice);
2717
2718 // Ignore the squelch for validator's own messages.
2719 if (key == app_.getValidationPublicKey())
2720 {
2721 JLOG(p_journal_.debug())
2722 << "onMessage: TMSquelch discarding validator's squelch " << slice;
2723 return;
2724 }
2725
2726 std::uint32_t duration =
2727 m->has_squelchduration() ? m->squelchduration() : 0;
2728 if (!m->squelch())
2729 squelch_.removeSquelch(key);
2730 else if (!squelch_.addSquelch(key, std::chrono::seconds{duration}))
2731 fee_.update(Resource::feeInvalidData, "squelch duration");
2732
2733 JLOG(p_journal_.debug())
2734 << "onMessage: TMSquelch " << slice << " " << id() << " " << duration;
2735}
2736
2737//--------------------------------------------------------------------------
2738
2739void
2740PeerImp::addLedger(
2741 uint256 const& hash,
2742 std::lock_guard<std::mutex> const& lockedRecentLock)
2743{
2744 // lockedRecentLock is passed as a reminder that recentLock_ must be
2745 // locked by the caller.
2746 (void)lockedRecentLock;
2747
2748 if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
2749 recentLedgers_.end())
2750 return;
2751
2752 recentLedgers_.push_back(hash);
2753}
2754
2755void
2756PeerImp::doFetchPack(std::shared_ptr<protocol::TMGetObjectByHash> const& packet)
2757{
2758 // VFALCO TODO Invert this dependency using an observer and shared state
2759 // object. Don't queue fetch pack jobs if we're under load or we already
2760 // have some queued.
2761 if (app_.getFeeTrack().isLoadedLocal() ||
2762 (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2763 (app_.getJobQueue().getJobCount(jtPACK) > 10))
2764 {
2765 JLOG(p_journal_.info()) << "Too busy to make fetch pack";
2766 return;
2767 }
2768
2769 if (!stringIsUint256Sized(packet->ledgerhash()))
2770 {
2771 JLOG(p_journal_.warn()) << "FetchPack hash size malformed";
2772 fee_.update(Resource::feeMalformedRequest, "hash size");
2773 return;
2774 }
2775
2776 fee_.fee = Resource::feeHeavyBurdenPeer;
2777
2778 uint256 const hash{packet->ledgerhash()};
2779
2780 std::weak_ptr<PeerImp> weak = shared_from_this();
2781 auto elapsed = UptimeClock::now();
2782 auto const pap = &app_;
2783 app_.getJobQueue().addJob(
2784 jtPACK, "MakeFetchPack", [pap, weak, packet, hash, elapsed]() {
2785 pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
2786 });
2787}
2788
2789void
2790PeerImp::doTransactions(
2792{
2793 protocol::TMTransactions reply;
2794
2795 JLOG(p_journal_.trace()) << "received TMGetObjectByHash requesting tx "
2796 << packet->objects_size();
2797
2798 if (packet->objects_size() > reduce_relay::MAX_TX_QUEUE_SIZE)
2799 {
2800 JLOG(p_journal_.error()) << "doTransactions, invalid number of hashes";
2801 fee_.update(Resource::feeMalformedRequest, "too big");
2802 return;
2803 }
2804
2805 for (std::uint32_t i = 0; i < packet->objects_size(); ++i)
2806 {
2807 auto const& obj = packet->objects(i);
2808
2809 if (!stringIsUint256Sized(obj.hash()))
2810 {
2811 fee_.update(Resource::feeMalformedRequest, "hash size");
2812 return;
2813 }
2814
2815 uint256 hash(obj.hash());
2816
2817 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2818
2819 if (!txn)
2820 {
2821 JLOG(p_journal_.error()) << "doTransactions, transaction not found "
2822 << Slice(hash.data(), hash.size());
2823 fee_.update(Resource::feeMalformedRequest, "tx not found");
2824 return;
2825 }
2826
2827 Serializer s;
2828 auto tx = reply.add_transactions();
2829 auto sttx = txn->getSTransaction();
2830 sttx->add(s);
2831 tx->set_rawtransaction(s.data(), s.size());
2832 tx->set_status(
2833 txn->getStatus() == INCLUDED ? protocol::tsCURRENT
2834 : protocol::tsNEW);
2835 tx->set_receivetimestamp(
2836 app_.timeKeeper().now().time_since_epoch().count());
2837 tx->set_deferred(txn->getSubmitResult().queued);
2838 }
2839
2840 if (reply.transactions_size() > 0)
2841 send(std::make_shared<Message>(reply, protocol::mtTRANSACTIONS));
2842}
2843
2844void
2845PeerImp::checkTransaction(
2846 int flags,
2847 bool checkSignature,
2848 std::shared_ptr<STTx const> const& stx,
2849 bool batch)
2850{
2851 // VFALCO TODO Rewrite to not use exceptions
2852 try
2853 {
2854 // charge strongly for relaying batch txns
2855 // LCOV_EXCL_START
2856 if (stx->isFlag(tfInnerBatchTxn) &&
2857 getCurrentTransactionRules()->enabled(featureBatch))
2858 {
2859 JLOG(p_journal_.warn()) << "Ignoring Network relayed Tx containing "
2860 "tfInnerBatchTxn (checkSignature).";
2861 charge(Resource::feeModerateBurdenPeer, "inner batch txn");
2862 return;
2863 }
2864 // LCOV_EXCL_STOP
2865
2866 // Expired?
2867 if (stx->isFieldPresent(sfLastLedgerSequence) &&
2868 (stx->getFieldU32(sfLastLedgerSequence) <
2869 app_.getLedgerMaster().getValidLedgerIndex()))
2870 {
2871 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2872 charge(Resource::feeUselessData, "expired tx");
2873 return;
2874 }
2875
2876 if (isPseudoTx(*stx))
2877 {
2878 // Don't do anything with pseudo transactions except put them in the
2879 // TransactionMaster cache
2880 std::string reason;
2881 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2882 XRPL_ASSERT(
2883 tx->getStatus() == NEW,
2884 "ripple::PeerImp::checkTransaction Transaction created "
2885 "correctly");
2886 if (tx->getStatus() == NEW)
2887 {
2888 JLOG(p_journal_.debug())
2889 << "Processing " << (batch ? "batch" : "unsolicited")
2890 << " pseudo-transaction tx " << tx->getID();
2891
2892 app_.getMasterTransaction().canonicalize(&tx);
2893 // Tell the overlay about it, but don't relay it.
2894 auto const toSkip =
2895 app_.getHashRouter().shouldRelay(tx->getID());
2896 if (toSkip)
2897 {
2898 JLOG(p_journal_.debug())
2899 << "Passing skipped pseudo pseudo-transaction tx "
2900 << tx->getID();
2901 app_.overlay().relay(tx->getID(), {}, *toSkip);
2902 }
2903 if (!batch)
2904 {
2905 JLOG(p_journal_.debug())
2906 << "Charging for pseudo-transaction tx " << tx->getID();
2907 charge(Resource::feeUselessData, "pseudo tx");
2908 }
2909
2910 return;
2911 }
2912 }
2913
2914 if (checkSignature)
2915 {
2916 // Check the signature before handing off to the job queue.
2917 if (auto [valid, validReason] = checkValidity(
2918 app_.getHashRouter(),
2919 *stx,
2920 app_.getLedgerMaster().getValidatedRules(),
2921 app_.config());
2922 valid != Validity::Valid)
2923 {
2924 if (!validReason.empty())
2925 {
2926 JLOG(p_journal_.trace())
2927 << "Exception checking transaction: " << validReason;
2928 }
2929
2930 // Probably not necessary to set SF_BAD, but doesn't hurt.
2931 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2932 charge(
2933 Resource::feeInvalidSignature,
2934 "check transaction signature failure");
2935 return;
2936 }
2937 }
2938 else
2939 {
2941 app_.getHashRouter(), stx->getTransactionID(), Validity::Valid);
2942 }
2943
2944 std::string reason;
2945 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2946
2947 if (tx->getStatus() == INVALID)
2948 {
2949 if (!reason.empty())
2950 {
2951 JLOG(p_journal_.trace())
2952 << "Exception checking transaction: " << reason;
2953 }
2954 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2955 charge(Resource::feeInvalidSignature, "tx (impossible)");
2956 return;
2957 }
2958
2959 bool const trusted(flags & SF_TRUSTED);
2960 app_.getOPs().processTransaction(
2961 tx, trusted, false, NetworkOPs::FailHard::no);
2962 }
2963 catch (std::exception const& ex)
2964 {
2965 JLOG(p_journal_.warn())
2966 << "Exception in " << __func__ << ": " << ex.what();
2967 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2968 using namespace std::string_literals;
2969 charge(Resource::feeInvalidData, "tx "s + ex.what());
2970 }
2971}
2972
2973// Called from our JobQueue
2974void
2975PeerImp::checkPropose(
2976 bool isTrusted,
2978 RCLCxPeerPos peerPos)
2979{
2980 JLOG(p_journal_.trace())
2981 << "Checking " << (isTrusted ? "trusted" : "UNTRUSTED") << " proposal";
2982
2983 XRPL_ASSERT(packet, "ripple::PeerImp::checkPropose : non-null packet");
2984
2985 if (!cluster() && !peerPos.checkSign())
2986 {
2987 std::string desc{"Proposal fails sig check"};
2988 JLOG(p_journal_.warn()) << desc;
2989 charge(Resource::feeInvalidSignature, desc);
2990 return;
2991 }
2992
2993 bool relay;
2994
2995 if (isTrusted)
2996 relay = app_.getOPs().processTrustedProposal(peerPos);
2997 else
2998 relay = app_.config().RELAY_UNTRUSTED_PROPOSALS == 1 || cluster();
2999
3000 if (relay)
3001 {
3002 // haveMessage contains peers, which are suppressed; i.e. the peers
3003 // are the source of the message, consequently the message should
3004 // not be relayed to these peers. But the message must be counted
3005 // as part of the squelch logic.
3006 auto haveMessage = app_.overlay().relay(
3007 *packet, peerPos.suppressionID(), peerPos.publicKey());
3008 if (reduceRelayReady() && !haveMessage.empty())
3009 overlay_.updateSlotAndSquelch(
3010 peerPos.suppressionID(),
3011 peerPos.publicKey(),
3012 std::move(haveMessage),
3013 protocol::mtPROPOSE_LEDGER);
3014 }
3015}
3016
3017void
3018PeerImp::checkValidation(
3020 uint256 const& key,
3022{
3023 if (!val->isValid())
3024 {
3025 std::string desc{"Validation forwarded by peer is invalid"};
3026 JLOG(p_journal_.debug()) << desc;
3027 charge(Resource::feeInvalidSignature, desc);
3028 return;
3029 }
3030
3031 // FIXME it should be safe to remove this try/catch. Investigate codepaths.
3032 try
3033 {
3034 if (app_.getOPs().recvValidation(val, std::to_string(id())) ||
3035 cluster())
3036 {
3037 // haveMessage contains peers, which are suppressed; i.e. the peers
3038 // are the source of the message, consequently the message should
3039 // not be relayed to these peers. But the message must be counted
3040 // as part of the squelch logic.
3041 auto haveMessage =
3042 overlay_.relay(*packet, key, val->getSignerPublic());
3043 if (reduceRelayReady() && !haveMessage.empty())
3044 {
3045 overlay_.updateSlotAndSquelch(
3046 key,
3047 val->getSignerPublic(),
3048 std::move(haveMessage),
3049 protocol::mtVALIDATION);
3050 }
3051 }
3052 }
3053 catch (std::exception const& ex)
3054 {
3055 JLOG(p_journal_.trace())
3056 << "Exception processing validation: " << ex.what();
3057 using namespace std::string_literals;
3058 charge(Resource::feeMalformedRequest, "validation "s + ex.what());
3059 }
3060}
3061
3062// Returns the set of peers that can help us get
3063// the TX tree with the specified root hash.
3064//
3066getPeerWithTree(OverlayImpl& ov, uint256 const& rootHash, PeerImp const* skip)
3067{
3069 int retScore = 0;
3070
3072 if (p->hasTxSet(rootHash) && p.get() != skip)
3073 {
3074 auto score = p->getScore(true);
3075 if (!ret || (score > retScore))
3076 {
3077 ret = std::move(p);
3078 retScore = score;
3079 }
3080 }
3081 });
3082
3083 return ret;
3084}
3085
3086// Returns a random peer weighted by how likely to
3087// have the ledger and how responsive it is.
3088//
3091 OverlayImpl& ov,
3092 uint256 const& ledgerHash,
3093 LedgerIndex ledger,
3094 PeerImp const* skip)
3095{
3097 int retScore = 0;
3098
3100 if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
3101 {
3102 auto score = p->getScore(true);
3103 if (!ret || (score > retScore))
3104 {
3105 ret = std::move(p);
3106 retScore = score;
3107 }
3108 }
3109 });
3110
3111 return ret;
3112}
3113
3114void
3115PeerImp::sendLedgerBase(
3116 std::shared_ptr<Ledger const> const& ledger,
3117 protocol::TMLedgerData& ledgerData)
3118{
3119 JLOG(p_journal_.trace()) << "sendLedgerBase: Base data";
3120
3121 Serializer s(sizeof(LedgerInfo));
3122 addRaw(ledger->info(), s);
3123 ledgerData.add_nodes()->set_nodedata(s.getDataPtr(), s.getLength());
3124
3125 auto const& stateMap{ledger->stateMap()};
3126 if (stateMap.getHash() != beast::zero)
3127 {
3128 // Return account state root node if possible
3129 Serializer root(768);
3130
3131 stateMap.serializeRoot(root);
3132 ledgerData.add_nodes()->set_nodedata(
3133 root.getDataPtr(), root.getLength());
3134
3135 if (ledger->info().txHash != beast::zero)
3136 {
3137 auto const& txMap{ledger->txMap()};
3138 if (txMap.getHash() != beast::zero)
3139 {
3140 // Return TX root node if possible
3141 root.erase();
3142 txMap.serializeRoot(root);
3143 ledgerData.add_nodes()->set_nodedata(
3144 root.getDataPtr(), root.getLength());
3145 }
3146 }
3147 }
3148
3149 auto message{
3150 std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA)};
3151 send(message);
3152}
3153
3155PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
3156{
3157 JLOG(p_journal_.trace()) << "getLedger: Ledger";
3158
3160
3161 if (m->has_ledgerhash())
3162 {
3163 // Attempt to find ledger by hash
3164 uint256 const ledgerHash{m->ledgerhash()};
3165 ledger = app_.getLedgerMaster().getLedgerByHash(ledgerHash);
3166 if (!ledger)
3167 {
3168 JLOG(p_journal_.trace())
3169 << "getLedger: Don't have ledger with hash " << ledgerHash;
3170
3171 if (m->has_querytype() && !m->has_requestcookie())
3172 {
3173 // Attempt to relay the request to a peer
3174 if (auto const peer = getPeerWithLedger(
3175 overlay_,
3176 ledgerHash,
3177 m->has_ledgerseq() ? m->ledgerseq() : 0,
3178 this))
3179 {
3180 m->set_requestcookie(id());
3181 peer->send(
3182 std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
3183 JLOG(p_journal_.debug())
3184 << "getLedger: Request relayed to peer";
3185 return ledger;
3186 }
3187
3188 JLOG(p_journal_.trace())
3189 << "getLedger: Failed to find peer to relay request";
3190 }
3191 }
3192 }
3193 else if (m->has_ledgerseq())
3194 {
3195 // Attempt to find ledger by sequence
3196 if (m->ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
3197 {
3198 JLOG(p_journal_.debug())
3199 << "getLedger: Early ledger sequence request";
3200 }
3201 else
3202 {
3203 ledger = app_.getLedgerMaster().getLedgerBySeq(m->ledgerseq());
3204 if (!ledger)
3205 {
3206 JLOG(p_journal_.debug())
3207 << "getLedger: Don't have ledger with sequence "
3208 << m->ledgerseq();
3209 }
3210 }
3211 }
3212 else if (m->has_ltype() && m->ltype() == protocol::ltCLOSED)
3213 {
3214 ledger = app_.getLedgerMaster().getClosedLedger();
3215 }
3216
3217 if (ledger)
3218 {
3219 // Validate retrieved ledger sequence
3220 auto const ledgerSeq{ledger->info().seq};
3221 if (m->has_ledgerseq())
3222 {
3223 if (ledgerSeq != m->ledgerseq())
3224 {
3225 // Do not resource charge a peer responding to a relay
3226 if (!m->has_requestcookie())
3227 charge(
3228 Resource::feeMalformedRequest, "get_ledger ledgerSeq");
3229
3230 ledger.reset();
3231 JLOG(p_journal_.warn())
3232 << "getLedger: Invalid ledger sequence " << ledgerSeq;
3233 }
3234 }
3235 else if (ledgerSeq < app_.getLedgerMaster().getEarliestFetch())
3236 {
3237 ledger.reset();
3238 JLOG(p_journal_.debug())
3239 << "getLedger: Early ledger sequence request " << ledgerSeq;
3240 }
3241 }
3242 else
3243 {
3244 JLOG(p_journal_.debug()) << "getLedger: Unable to find ledger";
3245 }
3246
3247 return ledger;
3248}
3249
3251PeerImp::getTxSet(std::shared_ptr<protocol::TMGetLedger> const& m) const
3252{
3253 JLOG(p_journal_.trace()) << "getTxSet: TX set";
3254
3255 uint256 const txSetHash{m->ledgerhash()};
3257 app_.getInboundTransactions().getSet(txSetHash, false)};
3258 if (!shaMap)
3259 {
3260 if (m->has_querytype() && !m->has_requestcookie())
3261 {
3262 // Attempt to relay the request to a peer
3263 if (auto const peer = getPeerWithTree(overlay_, txSetHash, this))
3264 {
3265 m->set_requestcookie(id());
3266 peer->send(
3267 std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
3268 JLOG(p_journal_.debug()) << "getTxSet: Request relayed";
3269 }
3270 else
3271 {
3272 JLOG(p_journal_.debug())
3273 << "getTxSet: Failed to find relay peer";
3274 }
3275 }
3276 else
3277 {
3278 JLOG(p_journal_.debug()) << "getTxSet: Failed to find TX set";
3279 }
3280 }
3281
3282 return shaMap;
3283}
3284
3285void
3286PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
3287{
3288 // Do not resource charge a peer responding to a relay
3289 if (!m->has_requestcookie())
3290 charge(
3291 Resource::feeModerateBurdenPeer, "received a get ledger request");
3292
3295 SHAMap const* map{nullptr};
3296 protocol::TMLedgerData ledgerData;
3297 bool fatLeaves{true};
3298 auto const itype{m->itype()};
3299
3300 if (itype == protocol::liTS_CANDIDATE)
3301 {
3302 if (sharedMap = getTxSet(m); !sharedMap)
3303 return;
3304 map = sharedMap.get();
3305
3306 // Fill out the reply
3307 ledgerData.set_ledgerseq(0);
3308 ledgerData.set_ledgerhash(m->ledgerhash());
3309 ledgerData.set_type(protocol::liTS_CANDIDATE);
3310 if (m->has_requestcookie())
3311 ledgerData.set_requestcookie(m->requestcookie());
3312
3313 // We'll already have most transactions
3314 fatLeaves = false;
3315 }
3316 else
3317 {
3318 if (send_queue_.size() >= Tuning::dropSendQueue)
3319 {
3320 JLOG(p_journal_.debug())
3321 << "processLedgerRequest: Large send queue";
3322 return;
3323 }
3324 if (app_.getFeeTrack().isLoadedLocal() && !cluster())
3325 {
3326 JLOG(p_journal_.debug()) << "processLedgerRequest: Too busy";
3327 return;
3328 }
3329
3330 if (ledger = getLedger(m); !ledger)
3331 return;
3332
3333 // Fill out the reply
3334 auto const ledgerHash{ledger->info().hash};
3335 ledgerData.set_ledgerhash(ledgerHash.begin(), ledgerHash.size());
3336 ledgerData.set_ledgerseq(ledger->info().seq);
3337 ledgerData.set_type(itype);
3338 if (m->has_requestcookie())
3339 ledgerData.set_requestcookie(m->requestcookie());
3340
3341 switch (itype)
3342 {
3343 case protocol::liBASE:
3344 sendLedgerBase(ledger, ledgerData);
3345 return;
3346
3347 case protocol::liTX_NODE:
3348 map = &ledger->txMap();
3349 JLOG(p_journal_.trace()) << "processLedgerRequest: TX map hash "
3350 << to_string(map->getHash());
3351 break;
3352
3353 case protocol::liAS_NODE:
3354 map = &ledger->stateMap();
3355 JLOG(p_journal_.trace())
3356 << "processLedgerRequest: Account state map hash "
3357 << to_string(map->getHash());
3358 break;
3359
3360 default:
3361 // This case should not be possible here
3362 JLOG(p_journal_.error())
3363 << "processLedgerRequest: Invalid ledger info type";
3364 return;
3365 }
3366 }
3367
3368 if (!map)
3369 {
3370 JLOG(p_journal_.warn()) << "processLedgerRequest: Unable to find map";
3371 return;
3372 }
3373
3374 // Add requested node data to reply
3375 if (m->nodeids_size() > 0)
3376 {
3377 auto const queryDepth{
3378 m->has_querydepth() ? m->querydepth() : (isHighLatency() ? 2 : 1)};
3379
3381
3382 for (int i = 0; i < m->nodeids_size() &&
3383 ledgerData.nodes_size() < Tuning::softMaxReplyNodes;
3384 ++i)
3385 {
3386 auto const shaMapNodeId{deserializeSHAMapNodeID(m->nodeids(i))};
3387
3388 data.clear();
3389 data.reserve(Tuning::softMaxReplyNodes);
3390
3391 try
3392 {
3393 if (map->getNodeFat(*shaMapNodeId, data, fatLeaves, queryDepth))
3394 {
3395 JLOG(p_journal_.trace())
3396 << "processLedgerRequest: getNodeFat got "
3397 << data.size() << " nodes";
3398
3399 for (auto const& d : data)
3400 {
3401 if (ledgerData.nodes_size() >=
3402 Tuning::hardMaxReplyNodes)
3403 break;
3404 protocol::TMLedgerNode* node{ledgerData.add_nodes()};
3405 node->set_nodeid(d.first.getRawString());
3406 node->set_nodedata(d.second.data(), d.second.size());
3407 }
3408 }
3409 else
3410 {
3411 JLOG(p_journal_.warn())
3412 << "processLedgerRequest: getNodeFat returns false";
3413 }
3414 }
3415 catch (std::exception const& e)
3416 {
3417 std::string info;
3418 switch (itype)
3419 {
3420 case protocol::liBASE:
3421 // This case should not be possible here
3422 info = "Ledger base";
3423 break;
3424
3425 case protocol::liTX_NODE:
3426 info = "TX node";
3427 break;
3428
3429 case protocol::liAS_NODE:
3430 info = "AS node";
3431 break;
3432
3433 case protocol::liTS_CANDIDATE:
3434 info = "TS candidate";
3435 break;
3436
3437 default:
3438 info = "Invalid";
3439 break;
3440 }
3441
3442 if (!m->has_ledgerhash())
3443 info += ", no hash specified";
3444
3445 JLOG(p_journal_.error())
3446 << "processLedgerRequest: getNodeFat with nodeId "
3447 << *shaMapNodeId << " and ledger info type " << info
3448 << " throws exception: " << e.what();
3449 }
3450 }
3451
3452 JLOG(p_journal_.info())
3453 << "processLedgerRequest: Got request for " << m->nodeids_size()
3454 << " nodes at depth " << queryDepth << ", return "
3455 << ledgerData.nodes_size() << " nodes";
3456 }
3457
3458 if (ledgerData.nodes_size() == 0)
3459 return;
3460
3461 send(std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA));
3462}
3463
3464int
3465PeerImp::getScore(bool haveItem) const
3466{
3467 // Random component of score, used to break ties and avoid
3468 // overloading the "best" peer
3469 static int const spRandomMax = 9999;
3470
3471 // Score for being very likely to have the thing we are
3472 // look for; should be roughly spRandomMax
3473 static int const spHaveItem = 10000;
3474
3475 // Score reduction for each millisecond of latency; should
3476 // be roughly spRandomMax divided by the maximum reasonable
3477 // latency
3478 static int const spLatency = 30;
3479
3480 // Penalty for unknown latency; should be roughly spRandomMax
3481 static int const spNoLatency = 8000;
3482
3483 int score = rand_int(spRandomMax);
3484
3485 if (haveItem)
3486 score += spHaveItem;
3487
3489 {
3490 std::lock_guard sl(recentLock_);
3491 latency = latency_;
3492 }
3493
3494 if (latency)
3495 score -= latency->count() * spLatency;
3496 else
3497 score -= spNoLatency;
3498
3499 return score;
3500}
3501
3502bool
3503PeerImp::isHighLatency() const
3504{
3505 std::lock_guard sl(recentLock_);
3506 return latency_ >= peerHighLatency;
3507}
3508
3509bool
3510PeerImp::reduceRelayReady()
3511{
3512 if (!reduceRelayReady_)
3513 reduceRelayReady_ =
3514 reduce_relay::epoch<std::chrono::minutes>(UptimeClock::now()) >
3515 reduce_relay::WAIT_ON_BOOTUP;
3516 return vpReduceRelayEnabled_ && reduceRelayReady_;
3517}
3518
3519void
3520PeerImp::Metrics::add_message(std::uint64_t bytes)
3521{
3522 using namespace std::chrono_literals;
3523 std::unique_lock lock{mutex_};
3524
3525 totalBytes_ += bytes;
3526 accumBytes_ += bytes;
3527 auto const timeElapsed = clock_type::now() - intervalStart_;
3528 auto const timeElapsedInSecs =
3529 std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
3530
3531 if (timeElapsedInSecs >= 1s)
3532 {
3533 auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
3534 rollingAvg_.push_back(avgBytes);
3535
3536 auto const totalBytes =
3537 std::accumulate(rollingAvg_.begin(), rollingAvg_.end(), 0ull);
3538 rollingAvgBytes_ = totalBytes / rollingAvg_.size();
3539
3540 intervalStart_ = clock_type::now();
3541 accumBytes_ = 0;
3542 }
3543}
3544
3546PeerImp::Metrics::average_bytes() const
3547{
3548 std::shared_lock lock{mutex_};
3549 return rollingAvgBytes_;
3550}
3551
3553PeerImp::Metrics::total_bytes() const
3554{
3555 std::shared_lock lock{mutex_};
3556 return totalBytes_;
3557}
3558
3559} // namespace ripple
T accumulate(T... args)
T bind(T... args)
Represents a JSON value.
Definition: json_value.h:150
A version-independent IP address and port combination.
Definition: IPEndpoint.h:39
Address const & address() const
Returns the address portion of this endpoint.
Definition: IPEndpoint.h:76
static std::optional< Endpoint > from_string_checked(std::string const &s)
Create an Endpoint from a string.
Definition: IPEndpoint.cpp:45
Endpoint at_port(Port port) const
Returns a new Endpoint with a different port.
Definition: IPEndpoint.h:69
static Endpoint from_string(std::string const &s)
Definition: IPEndpoint.cpp:59
std::string to_string() const
Returns a string representing the endpoint.
Definition: IPEndpoint.cpp:67
Stream error() const
Definition: Journal.h:346
Stream debug() const
Definition: Journal.h:328
bool active(Severity level) const
Returns true if any message would be logged at this severity level.
Definition: Journal.h:314
Stream info() const
Definition: Journal.h:334
Stream trace() const
Severity stream access functions.
Definition: Journal.h:322
Stream warn() const
Definition: Journal.h:340
virtual Config & config()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual TimeKeeper & timeKeeper()=0
virtual JobQueue & getJobQueue()=0
virtual NetworkOPs & getOPs()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual Cluster & cluster()=0
virtual HashRouter & getHashRouter()=0
void for_each(std::function< void(ClusterNode const &)> func) const
Invokes the callback once for every cluster node.
Definition: Cluster.cpp:83
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:49
bool update(PublicKey const &identity, std::string name, std::uint32_t loadFee=0, NetClock::time_point reportTime=NetClock::time_point{})
Store information about the state of a cluster node.
Definition: Cluster.cpp:57
std::optional< std::string > member(PublicKey const &node) const
Determines whether a node belongs in the cluster.
Definition: Cluster.cpp:38
bool TX_REDUCE_RELAY_METRICS
Definition: Config.h:266
int MAX_TRANSACTIONS
Definition: Config.h:226
std::chrono::seconds MAX_DIVERGED_TIME
Definition: Config.h:285
std::chrono::seconds MAX_UNKNOWN_TIME
Definition: Config.h:282
bool shouldProcess(uint256 const &key, PeerShortID peer, int &flags, std::chrono::seconds tx_interval)
Definition: HashRouter.cpp:79
bool addSuppressionPeer(uint256 const &key, PeerShortID peer)
Definition: HashRouter.cpp:52
std::unique_ptr< LoadEvent > makeLoadEvent(JobType t, std::string const &name)
Return a scoped LoadEvent.
Definition: JobQueue.cpp:179
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition: JobQueue.cpp:142
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
LedgerIndex getValidLedgerIndex()
std::chrono::seconds getValidatedLedgerAge()
void setClusterFee(std::uint32_t fee)
Definition: LoadFeeTrack.h:113
static std::size_t messageSize(::google::protobuf::Message const &message)
Definition: Message.cpp:55
virtual bool isNeedNetworkLedger()=0
PeerFinder::Manager & peerFinder()
Definition: OverlayImpl.h:161
void activate(std::shared_ptr< PeerImp > const &peer)
Called when a peer has connected successfully This is called after the peer handshake has been comple...
void deletePeer(Peer::id_t id)
Called when the peer is deleted.
void incPeerDisconnect() override
Increment and retrieve counters for total peer disconnects, and disconnects we initiate for excessive...
Definition: OverlayImpl.h:366
void addTxMetrics(Args... args)
Add tx reduce-relay metrics.
Definition: OverlayImpl.h:437
void onPeerDeactivate(Peer::id_t id)
void remove(std::shared_ptr< PeerFinder::Slot > const &slot)
void reportOutboundTraffic(TrafficCount::category cat, int bytes)
void for_each(UnaryFunc &&f) const
Definition: OverlayImpl.h:278
Resource::Manager & resourceManager()
Definition: OverlayImpl.h:167
void reportInboundTraffic(TrafficCount::category cat, int bytes)
void onManifests(std::shared_ptr< protocol::TMManifests > const &m, std::shared_ptr< PeerImp > const &from)
Setup const & setup() const
Definition: OverlayImpl.h:173
std::shared_ptr< Message > getManifestsMessage()
void incPeerDisconnectCharges() override
Definition: OverlayImpl.h:378
void incJqTransOverflow() override
Increment and retrieve counter for transaction job queue overflows.
Definition: OverlayImpl.h:354
virtual void on_endpoints(std::shared_ptr< Slot > const &slot, Endpoints const &endpoints)=0
Called when mtENDPOINTS is received.
virtual Config config()=0
Returns the configuration for the manager.
virtual void on_closed(std::shared_ptr< Slot > const &slot)=0
Called when the slot is closed.
virtual void on_failure(std::shared_ptr< Slot > const &slot)=0
Called when an outbound connection is deemed to have failed.
std::queue< std::shared_ptr< Message > > send_queue_
Definition: PeerImp.h:177
bool vpReduceRelayEnabled_
Definition: PeerImp.h:195
std::unique_ptr< LoadEvent > load_event_
Definition: PeerImp.h:180
boost::beast::http::fields const & headers_
Definition: PeerImp.h:176
void onMessageEnd(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m)
Definition: PeerImp.cpp:1058
bool hasLedger(uint256 const &hash, std::uint32_t seq) const override
Definition: PeerImp.cpp:523
clock_type::duration uptime() const
Definition: PeerImp.h:367
void removeTxQueue(uint256 const &hash) override
Remove transaction's hash from the transactions' hashes queue.
Definition: PeerImp.cpp:339
protocol::TMStatusChange last_status_
Definition: PeerImp.h:169
boost::shared_mutex nameMutex_
Definition: PeerImp.h:101
std::string name_
Definition: PeerImp.h:100
boost::circular_buffer< uint256 > recentTxSets_
Definition: PeerImp.h:111
std::unique_ptr< stream_type > stream_ptr_
Definition: PeerImp.h:77
void onMessage(std::shared_ptr< protocol::TMManifests > const &m)
Definition: PeerImp.cpp:1067
bool detaching_
Definition: PeerImp.h:97
Tracking
Whether the peer's view of the ledger converges or diverges from ours.
Definition: PeerImp.h:57
Compressed compressionEnabled_
Definition: PeerImp.h:185
uint256 closedLedgerHash_
Definition: PeerImp.h:107
std::string domain() const
Definition: PeerImp.cpp:841
std::optional< std::uint32_t > lastPingSeq_
Definition: PeerImp.h:114
void onTimer(boost::system::error_code const &ec)
Definition: PeerImp.cpp:689
bool gracefulClose_
Definition: PeerImp.h:178
beast::Journal const journal_
Definition: PeerImp.h:75
virtual void run()
Definition: PeerImp.cpp:155
struct ripple::PeerImp::@21 metrics_
void gracefulClose()
Definition: PeerImp.cpp:633
LedgerIndex maxLedger_
Definition: PeerImp.h:106
beast::Journal const p_journal_
Definition: PeerImp.h:76
void cancelTimer()
Definition: PeerImp.cpp:672
bool const inbound_
Definition: PeerImp.h:90
PeerImp(PeerImp const &)=delete
Application & app_
Definition: PeerImp.h:71
void stop() override
Definition: PeerImp.cpp:213
bool hasRange(std::uint32_t uMin, std::uint32_t uMax) override
Definition: PeerImp.cpp:565
bool hasTxSet(uint256 const &hash) const override
Definition: PeerImp.cpp:547
clock_type::time_point lastPingTime_
Definition: PeerImp.h:115
void onMessageUnknown(std::uint16_t type)
Definition: PeerImp.cpp:1009
std::shared_ptr< PeerFinder::Slot > const slot_
Definition: PeerImp.h:172
boost::circular_buffer< uint256 > recentLedgers_
Definition: PeerImp.h:110
id_t const id_
Definition: PeerImp.h:72
std::optional< std::chrono::milliseconds > latency_
Definition: PeerImp.h:113
void handleTransaction(std::shared_ptr< protocol::TMTransaction > const &m, bool eraseTxQueue, bool batch)
Called from onMessage(TMTransaction(s)).
Definition: PeerImp.cpp:1259
beast::IP::Endpoint const remote_address_
Definition: PeerImp.h:85
Json::Value json() override
Definition: PeerImp.cpp:388
PublicKey const publicKey_
Definition: PeerImp.h:99
hash_set< uint256 > txQueue_
Definition: PeerImp.h:190
std::mutex recentLock_
Definition: PeerImp.h:168
void doAccept()
Definition: PeerImp.cpp:766
void onMessageBegin(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m, std::size_t size, std::size_t uncompressed_size, bool isCompressed)
Definition: PeerImp.cpp:1015
bool txReduceRelayEnabled_
Definition: PeerImp.h:192
clock_type::time_point trackingTime_
Definition: PeerImp.h:96
socket_type & socket_
Definition: PeerImp.h:78
ProtocolVersion protocol_
Definition: PeerImp.h:93
reduce_relay::Squelch< UptimeClock > squelch_
Definition: PeerImp.h:118
std::string getVersion() const
Return the version of rippled that the peer is running, if reported.
Definition: PeerImp.cpp:380
uint256 previousLedgerHash_
Definition: PeerImp.h:108
void charge(Resource::Charge const &fee, std::string const &context) override
Adjust this peer's load balance based on the type of load imposed.
Definition: PeerImp.cpp:351
void setTimer()
Definition: PeerImp.cpp:654
void send(std::shared_ptr< Message > const &m) override
Definition: PeerImp.cpp:239
static std::string makePrefix(id_t id)
Definition: PeerImp.cpp:681
std::string name() const
Definition: PeerImp.cpp:834
boost::system::error_code error_code
Definition: PeerImp.h:61
void onReadMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:889
bool ledgerReplayEnabled_
Definition: PeerImp.h:196
boost::asio::basic_waitable_timer< std::chrono::steady_clock > waitable_timer
Definition: PeerImp.h:68
bool crawl() const
Returns true if this connection will publicly share its IP address.
Definition: PeerImp.cpp:365
waitable_timer timer_
Definition: PeerImp.h:81
void sendTxQueue() override
Send aggregated transactions' hashes.
Definition: PeerImp.cpp:303
bool txReduceRelayEnabled() const override
Definition: PeerImp.h:440
bool supportsFeature(ProtocolFeature f) const override
Definition: PeerImp.cpp:506
ChargeWithContext fee_
Definition: PeerImp.h:171
void onWriteMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:953
http_request_type request_
Definition: PeerImp.h:174
OverlayImpl & overlay_
Definition: PeerImp.h:89
LedgerIndex minLedger_
Definition: PeerImp.h:105
virtual ~PeerImp()
Definition: PeerImp.cpp:132
void addTxQueue(uint256 const &hash) override
Add transaction's hash to the transactions' hashes queue.
Definition: PeerImp.cpp:322
int large_sendq_
Definition: PeerImp.h:179
stream_type & stream_
Definition: PeerImp.h:79
bool cluster() const override
Returns true if this connection is a member of the cluster.
Definition: PeerImp.cpp:374
void onShutdown(error_code ec)
Definition: PeerImp.cpp:750
boost::asio::strand< boost::asio::executor > strand_
Definition: PeerImp.h:80
void cycleStatus() override
Definition: PeerImp.cpp:555
boost::beast::multi_buffer read_buffer_
Definition: PeerImp.h:173
Resource::Consumer usage_
Definition: PeerImp.h:170
void ledgerRange(std::uint32_t &minSeq, std::uint32_t &maxSeq) const override
Definition: PeerImp.cpp:538
void doProtocolStart()
Definition: PeerImp.cpp:851
void fail(std::string const &reason)
Definition: PeerImp.cpp:599
std::atomic< Tracking > tracking_
Definition: PeerImp.h:95
Represents a peer connection in the overlay.
A public key.
Definition: PublicKey.h:62
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
bool checkSign() const
Verify the signing hash of the proposal.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:78
uint256 const & suppressionID() const
Unique id used by hash router to suppress duplicates.
Definition: RCLCxPeerPos.h:85
A consumption charge.
Definition: Charge.h:31
An endpoint that consumes resources.
Definition: Consumer.h:35
int balance()
Returns the credit balance representing consumption.
Definition: Consumer.cpp:137
bool disconnect(beast::Journal const &j)
Returns true if the consumer should be disconnected.
Definition: Consumer.cpp:124
Disposition charge(Charge const &fee, std::string const &context={})
Apply a load charge to the consumer.
Definition: Consumer.cpp:106
virtual void importConsumers(std::string const &origin, Gossip const &gossip)=0
Import packaged consumer information.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:98
std::size_t size() const noexcept
Definition: Serializer.h:73
void const * data() const noexcept
Definition: Serializer.h:79
int getLength() const
Definition: Serializer.h:234
void const * getDataPtr() const
Definition: Serializer.h:224
An immutable linear range of bytes.
Definition: Slice.h:46
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
static category categorize(::google::protobuf::Message const &message, protocol::MessageType type, bool inbound)
Given a protocol message, determine which traffic category it belongs to.
static void sendValidatorList(Peer &peer, std::uint64_t peerSequence, PublicKey const &publisherKey, std::size_t maxSequence, std::uint32_t rawVersion, std::string const &rawManifest, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, HashRouter &hashRouter, beast::Journal j)
void for_each_available(std::function< void(std::string const &manifest, std::uint32_t version, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, PublicKey const &pubKey, std::size_t maxSequence, uint256 const &hash)> func) const
Invokes the callback once for every available publisher list's raw data members.
pointer data()
Definition: base_uint.h:125
static constexpr std::size_t size()
Definition: base_uint.h:526
constexpr bool parseHex(std::string_view sv)
Parse a hex string into a base_uint.
Definition: base_uint.h:503
Set the fee on a JTx.
Definition: fee.h:37
Match set account flags.
Definition: flags.h:125
Set the regular signature on a JTx.
Definition: sig.h:35
T emplace_back(T... args)
T empty(T... args)
T find(T... args)
T for_each(T... args)
T get(T... args)
T load(T... args)
T lock(T... args)
T max(T... args)
T min(T... args)
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:46
unsigned int UInt
Definition: json_forwards.h:27
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeInvalidData
Charge const feeUselessData
Charge const feeTrivialPeer
Charge const feeModerateBurdenPeer
std::size_t constexpr readBufferBytes
Size of buffer used to read from the socket.
@ targetSendQueue
How many messages we consider reasonable sustained on a send queue.
@ maxQueryDepth
The maximum number of levels to search.
@ sendqIntervals
How many timer intervals a sendq has to stay large before we disconnect.
@ sendQueueLogFreq
How often to log send queue size.
TER valid(PreclaimContext const &ctx, AccountID const &src)
auto measureDurationAndLog(Func &&func, std::string const &actionDescription, std::chrono::duration< Rep, Period > maxDelay, beast::Journal const &journal)
Definition: PerfLog.h:187
static constexpr std::size_t MAX_TX_QUEUE_SIZE
std::unique_ptr< Config > validator(std::unique_ptr< Config >, std::string const &)
adjust configuration with params needed to be a validator
Definition: envconfig.cpp:113
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string protocolMessageName(int type)
Returns the name of a protocol message given its type.
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:114
uint256 proposalUniqueId(uint256 const &proposeHash, uint256 const &previousLedger, std::uint32_t proposeSeq, NetClock::time_point closeTime, Slice const &publicKey, Slice const &signature)
Calculate a unique identifier for a signed proposal.
constexpr ProtocolVersion make_protocol(std::uint16_t major, std::uint16_t minor)
bool isPseudoTx(STObject const &tx)
Check whether a transaction is a pseudo-transaction.
Definition: STTx.cpp:814
@ INCLUDED
Definition: Transaction.h:49
@ INVALID
Definition: Transaction.h:48
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
static constexpr char FEATURE_COMPR[]
Definition: Handshake.h:141
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
Definition: Validations.h:149
std::string base64_decode(std::string_view data)
Definition: base64.cpp:248
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:315
http_response_type makeResponse(bool crawlPublic, http_request_type const &req, beast::IP::Address public_ip, beast::IP::Address remote_ip, uint256 const &sharedValue, std::optional< std::uint32_t > networkID, ProtocolVersion protocol, Application &app)
Make http response.
Definition: Handshake.cpp:392
static bool stringIsUint256Sized(std::string const &pBuffStr)
Definition: PeerImp.cpp:149
static constexpr char FEATURE_LEDGER_REPLAY[]
Definition: Handshake.h:147
std::pair< std::size_t, boost::system::error_code > invokeProtocolMessage(Buffers const &buffers, Handler &handler, std::size_t &hint)
Calls the handler for up to one protocol message in the passed buffers.
std::optional< uint256 > makeSharedValue(stream_type &ssl, beast::Journal journal)
Computes a shared value based on the SSL connection state.
Definition: Handshake.cpp:146
std::optional< KeyType > publicKeyType(Slice const &slice)
Returns the type of public key.
Definition: PublicKey.cpp:223
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
static std::shared_ptr< PeerImp > getPeerWithLedger(OverlayImpl &ov, uint256 const &ledgerHash, LedgerIndex ledger, PeerImp const *skip)
Definition: PeerImp.cpp:3090
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:244
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition: chrono.h:119
boost::beast::http::request< boost::beast::http::dynamic_body > http_request_type
Definition: Handoff.h:33
NodeID calcNodeID(PublicKey const &)
Calculate the 160-bit node ID from a node public key.
Definition: PublicKey.cpp:319
static std::shared_ptr< PeerImp > getPeerWithTree(OverlayImpl &ov, uint256 const &rootHash, PeerImp const *skip)
Definition: PeerImp.cpp:3066
bool peerFeatureEnabled(headers const &request, std::string const &feature, std::string value, bool config)
Check if a feature should be enabled for a peer.
Definition: Handshake.h:198
void forceValidity(HashRouter &router, uint256 const &txid, Validity validity)
Sets the validity of a given transaction in the cache.
Definition: apply.cpp:113
static constexpr char FEATURE_TXRR[]
Definition: Handshake.h:145
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:630
std::optional< Rules > const & getCurrentTransactionRules()
Definition: Rules.cpp:47
Number root(Number f, unsigned d)
Definition: Number.cpp:636
@ manifest
Manifest.
@ proposal
proposal for signing
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:39
@ jtLEDGER_REQ
Definition: Job.h:59
@ jtPROPOSAL_ut
Definition: Job.h:60
@ jtREPLAY_REQ
Definition: Job.h:58
@ jtTRANSACTION
Definition: Job.h:62
@ jtPEER
Definition: Job.h:80
@ jtREQUESTED_TXN
Definition: Job.h:64
@ jtMISSING_TXN
Definition: Job.h:63
@ jtVALIDATION_t
Definition: Job.h:71
@ jtMANIFEST
Definition: Job.h:55
@ jtTXN_DATA
Definition: Job.h:69
@ jtPACK
Definition: Job.h:43
@ jtVALIDATION_ut
Definition: Job.h:54
@ jtPROPOSAL_t
Definition: Job.h:74
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition: digest.h:225
constexpr std::uint32_t tfInnerBatchTxn
Definition: TxFlags.h:61
STL namespace.
T nth_element(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T setfill(T... args)
T setw(T... args)
T size(T... args)
T str(T... args)
Information about the notional ledger backing the view.
Definition: LedgerHeader.h:34
beast::IP::Address public_ip
Definition: Overlay.h:69
std::optional< std::uint32_t > networkID
Definition: Overlay.h:72
bool peerPrivate
true if we want our IP address kept private.
void update(Resource::Charge f, std::string const &add)
Definition: PeerImp.h:154
Describes a single consumer.
Definition: Gossip.h:35
beast::IP::Endpoint address
Definition: Gossip.h:39
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
Set the sequence number on a JTx.
Definition: seq.h:34
T tie(T... args)
T to_string(T... args)
T what(T... args)