rippled
Loading...
Searching...
No Matches
PeerImp.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLValidations.h>
21#include <xrpld/app/ledger/InboundLedgers.h>
22#include <xrpld/app/ledger/InboundTransactions.h>
23#include <xrpld/app/ledger/LedgerMaster.h>
24#include <xrpld/app/ledger/TransactionMaster.h>
25#include <xrpld/app/misc/HashRouter.h>
26#include <xrpld/app/misc/LoadFeeTrack.h>
27#include <xrpld/app/misc/NetworkOPs.h>
28#include <xrpld/app/misc/Transaction.h>
29#include <xrpld/app/misc/ValidatorList.h>
30#include <xrpld/app/tx/apply.h>
31#include <xrpld/overlay/Cluster.h>
32#include <xrpld/overlay/detail/PeerImp.h>
33#include <xrpld/overlay/detail/Tuning.h>
34#include <xrpld/perflog/PerfLog.h>
35
36#include <xrpl/basics/UptimeClock.h>
37#include <xrpl/basics/base64.h>
38#include <xrpl/basics/random.h>
39#include <xrpl/basics/safe_cast.h>
40#include <xrpl/protocol/digest.h>
41
42#include <boost/algorithm/string/predicate.hpp>
43#include <boost/beast/core/ostream.hpp>
44
45#include <algorithm>
46#include <memory>
47#include <mutex>
48#include <numeric>
49#include <sstream>
50
51using namespace std::chrono_literals;
52
53namespace ripple {
54
55namespace {
57std::chrono::milliseconds constexpr peerHighLatency{300};
58
60std::chrono::seconds constexpr peerTimerInterval{60};
61} // namespace
62
63// TODO: Remove this exclusion once unit tests are added after the hotfix
64// release.
65
67 Application& app,
68 id_t id,
70 http_request_type&& request,
71 PublicKey const& publicKey,
73 Resource::Consumer consumer,
75 OverlayImpl& overlay)
76 : Child(overlay)
77 , app_(app)
78 , id_(id)
79 , sink_(app_.journal("Peer"), makePrefix(id))
80 , p_sink_(app_.journal("Protocol"), makePrefix(id))
81 , journal_(sink_)
82 , p_journal_(p_sink_)
83 , stream_ptr_(std::move(stream_ptr))
84 , socket_(stream_ptr_->next_layer().socket())
85 , stream_(*stream_ptr_)
86 , strand_(socket_.get_executor())
87 , timer_(waitable_timer{socket_.get_executor()})
88 , remote_address_(slot->remote_endpoint())
89 , overlay_(overlay)
90 , inbound_(true)
91 , protocol_(protocol)
92 , tracking_(Tracking::unknown)
93 , trackingTime_(clock_type::now())
94 , publicKey_(publicKey)
95 , lastPingTime_(clock_type::now())
96 , creationTime_(clock_type::now())
97 , squelch_(app_.journal("Squelch"))
98 , usage_(consumer)
99 , fee_{Resource::feeTrivialPeer, ""}
100 , slot_(slot)
101 , request_(std::move(request))
102 , headers_(request_)
103 , compressionEnabled_(
105 headers_,
107 "lz4",
108 app_.config().COMPRESSION)
109 ? Compressed::On
110 : Compressed::Off)
111 , txReduceRelayEnabled_(peerFeatureEnabled(
112 headers_,
114 app_.config().TX_REDUCE_RELAY_ENABLE))
115 , vpReduceRelayEnabled_(app_.config().VP_REDUCE_RELAY_ENABLE)
116 , ledgerReplayEnabled_(peerFeatureEnabled(
117 headers_,
119 app_.config().LEDGER_REPLAY))
120 , ledgerReplayMsgHandler_(app, app.getLedgerReplayer())
121{
122 JLOG(journal_.info()) << "compression enabled "
123 << (compressionEnabled_ == Compressed::On)
124 << " vp reduce-relay enabled "
126 << " tx reduce-relay enabled "
128 << " " << id_;
129}
130
132{
133 bool const inCluster{cluster()};
134
139
140 if (inCluster)
141 {
142 JLOG(journal_.warn()) << name() << " left cluster";
143 }
144}
145
146// Helper function to check for valid uint256 values in protobuf buffers
147static bool
149{
150 return pBuffStr.size() == uint256::size();
151}
152
153void
155{
156 if (!strand_.running_in_this_thread())
158
159 auto parseLedgerHash =
161 if (uint256 ret; ret.parseHex(value))
162 return ret;
163
164 if (auto const s = base64_decode(value); s.size() == uint256::size())
165 return uint256{s};
166
167 return std::nullopt;
168 };
169
171 std::optional<uint256> previous;
172
173 if (auto const iter = headers_.find("Closed-Ledger");
174 iter != headers_.end())
175 {
176 closed = parseLedgerHash(iter->value());
177
178 if (!closed)
179 fail("Malformed handshake data (1)");
180 }
181
182 if (auto const iter = headers_.find("Previous-Ledger");
183 iter != headers_.end())
184 {
185 previous = parseLedgerHash(iter->value());
186
187 if (!previous)
188 fail("Malformed handshake data (2)");
189 }
190
191 if (previous && !closed)
192 fail("Malformed handshake data (3)");
193
194 {
196 if (closed)
197 closedLedgerHash_ = *closed;
198 if (previous)
199 previousLedgerHash_ = *previous;
200 }
201
202 if (inbound_)
203 doAccept();
204 else
206
207 // Anything else that needs to be done with the connection should be
208 // done in doProtocolStart
209}
210
211void
213{
214 if (!strand_.running_in_this_thread())
216 if (socket_.is_open())
217 {
218 // The rationale for using different severity levels is that
219 // outbound connections are under our control and may be logged
220 // at a higher level, but inbound connections are more numerous and
221 // uncontrolled so to prevent log flooding the severity is reduced.
222 //
223 if (inbound_)
224 {
225 JLOG(journal_.debug()) << "Stop";
226 }
227 else
228 {
229 JLOG(journal_.info()) << "Stop";
230 }
231 }
232 close();
233}
234
235//------------------------------------------------------------------------------
236
237void
239{
240 if (!strand_.running_in_this_thread())
241 return post(strand_, std::bind(&PeerImp::send, shared_from_this(), m));
242 if (gracefulClose_)
243 return;
244 if (detaching_)
245 return;
246
247 auto validator = m->getValidatorKey();
248 if (validator && !squelch_.expireSquelch(*validator))
249 {
252 static_cast<int>(m->getBuffer(compressionEnabled_).size()));
253 return;
254 }
255
256 // report categorized outgoing traffic
258 safe_cast<TrafficCount::category>(m->getCategory()),
259 static_cast<int>(m->getBuffer(compressionEnabled_).size()));
260
261 // report total outgoing traffic
264 static_cast<int>(m->getBuffer(compressionEnabled_).size()));
265
266 auto sendq_size = send_queue_.size();
267
268 if (sendq_size < Tuning::targetSendQueue)
269 {
270 // To detect a peer that does not read from their
271 // side of the connection, we expect a peer to have
272 // a small senq periodically
273 large_sendq_ = 0;
274 }
275 else if (auto sink = journal_.debug();
276 sink && (sendq_size % Tuning::sendQueueLogFreq) == 0)
277 {
278 std::string const n = name();
279 sink << (n.empty() ? remote_address_.to_string() : n)
280 << " sendq: " << sendq_size;
281 }
282
283 send_queue_.push(m);
284
285 if (sendq_size != 0)
286 return;
287
288 boost::asio::async_write(
289 stream_,
290 boost::asio::buffer(
291 send_queue_.front()->getBuffer(compressionEnabled_)),
292 bind_executor(
293 strand_,
294 std::bind(
297 std::placeholders::_1,
298 std::placeholders::_2)));
299}
300
301void
303{
304 if (!strand_.running_in_this_thread())
305 return post(
307
308 if (!txQueue_.empty())
309 {
310 protocol::TMHaveTransactions ht;
311 std::for_each(txQueue_.begin(), txQueue_.end(), [&](auto const& hash) {
312 ht.add_hashes(hash.data(), hash.size());
313 });
314 JLOG(p_journal_.trace()) << "sendTxQueue " << txQueue_.size();
315 txQueue_.clear();
316 send(std::make_shared<Message>(ht, protocol::mtHAVE_TRANSACTIONS));
317 }
318}
319
320void
322{
323 if (!strand_.running_in_this_thread())
324 return post(
326
328 {
329 JLOG(p_journal_.warn()) << "addTxQueue exceeds the cap";
330 sendTxQueue();
331 }
332
333 txQueue_.insert(hash);
334 JLOG(p_journal_.trace()) << "addTxQueue " << txQueue_.size();
335}
336
337void
339{
340 if (!strand_.running_in_this_thread())
341 return post(
342 strand_,
344
345 auto removed = txQueue_.erase(hash);
346 JLOG(p_journal_.trace()) << "removeTxQueue " << removed;
347}
348
349void
351{
352 if ((usage_.charge(fee, context) == Resource::drop) &&
353 usage_.disconnect(p_journal_) && strand_.running_in_this_thread())
354 {
355 // Sever the connection
357 fail("charge: Resources");
358 }
359}
360
361//------------------------------------------------------------------------------
362
363bool
365{
366 auto const iter = headers_.find("Crawl");
367 if (iter == headers_.end())
368 return false;
369 return boost::iequals(iter->value(), "public");
370}
371
372bool
374{
375 return static_cast<bool>(app_.cluster().member(publicKey_));
376}
377
380{
381 if (inbound_)
382 return headers_["User-Agent"];
383 return headers_["Server"];
384}
385
388{
390
391 ret[jss::public_key] = toBase58(TokenType::NodePublic, publicKey_);
392 ret[jss::address] = remote_address_.to_string();
393
394 if (inbound_)
395 ret[jss::inbound] = true;
396
397 if (cluster())
398 {
399 ret[jss::cluster] = true;
400
401 if (auto const n = name(); !n.empty())
402 // Could move here if Json::Value supported moving from a string
403 ret[jss::name] = n;
404 }
405
406 if (auto const d = domain(); !d.empty())
407 ret[jss::server_domain] = std::string{d};
408
409 if (auto const nid = headers_["Network-ID"]; !nid.empty())
410 ret[jss::network_id] = std::string{nid};
411
412 ret[jss::load] = usage_.balance();
413
414 if (auto const version = getVersion(); !version.empty())
415 ret[jss::version] = std::string{version};
416
417 ret[jss::protocol] = to_string(protocol_);
418
419 {
421 if (latency_)
422 ret[jss::latency] = static_cast<Json::UInt>(latency_->count());
423 }
424
425 ret[jss::uptime] = static_cast<Json::UInt>(
426 std::chrono::duration_cast<std::chrono::seconds>(uptime()).count());
427
428 std::uint32_t minSeq, maxSeq;
429 ledgerRange(minSeq, maxSeq);
430
431 if ((minSeq != 0) || (maxSeq != 0))
432 ret[jss::complete_ledgers] =
433 std::to_string(minSeq) + " - " + std::to_string(maxSeq);
434
435 switch (tracking_.load())
436 {
438 ret[jss::track] = "diverged";
439 break;
440
442 ret[jss::track] = "unknown";
443 break;
444
446 // Nothing to do here
447 break;
448 }
449
450 uint256 closedLedgerHash;
451 protocol::TMStatusChange last_status;
452 {
454 closedLedgerHash = closedLedgerHash_;
455 last_status = last_status_;
456 }
457
458 if (closedLedgerHash != beast::zero)
459 ret[jss::ledger] = to_string(closedLedgerHash);
460
461 if (last_status.has_newstatus())
462 {
463 switch (last_status.newstatus())
464 {
465 case protocol::nsCONNECTING:
466 ret[jss::status] = "connecting";
467 break;
468
469 case protocol::nsCONNECTED:
470 ret[jss::status] = "connected";
471 break;
472
473 case protocol::nsMONITORING:
474 ret[jss::status] = "monitoring";
475 break;
476
477 case protocol::nsVALIDATING:
478 ret[jss::status] = "validating";
479 break;
480
481 case protocol::nsSHUTTING:
482 ret[jss::status] = "shutting";
483 break;
484
485 default:
486 JLOG(p_journal_.warn())
487 << "Unknown status: " << last_status.newstatus();
488 }
489 }
490
491 ret[jss::metrics] = Json::Value(Json::objectValue);
492 ret[jss::metrics][jss::total_bytes_recv] =
493 std::to_string(metrics_.recv.total_bytes());
494 ret[jss::metrics][jss::total_bytes_sent] =
495 std::to_string(metrics_.sent.total_bytes());
496 ret[jss::metrics][jss::avg_bps_recv] =
497 std::to_string(metrics_.recv.average_bytes());
498 ret[jss::metrics][jss::avg_bps_sent] =
499 std::to_string(metrics_.sent.average_bytes());
500
501 return ret;
502}
503
504bool
506{
507 switch (f)
508 {
510 return protocol_ >= make_protocol(2, 1);
512 return protocol_ >= make_protocol(2, 2);
515 }
516 return false;
517}
518
519//------------------------------------------------------------------------------
520
521bool
523{
524 {
526 if ((seq != 0) && (seq >= minLedger_) && (seq <= maxLedger_) &&
528 return true;
529 if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
530 recentLedgers_.end())
531 return true;
532 }
533 return false;
534}
535
536void
538{
540
541 minSeq = minLedger_;
542 maxSeq = maxLedger_;
543}
544
545bool
546PeerImp::hasTxSet(uint256 const& hash) const
547{
549 return std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
550 recentTxSets_.end();
551}
552
553void
555{
556 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
557 // guarded by recentLock_.
561}
562
563bool
565{
567 return (tracking_ != Tracking::diverged) && (uMin >= minLedger_) &&
568 (uMax <= maxLedger_);
569}
570
571//------------------------------------------------------------------------------
572
573void
575{
576 XRPL_ASSERT(
577 strand_.running_in_this_thread(),
578 "ripple::PeerImp::close : strand in this thread");
579 if (socket_.is_open())
580 {
581 detaching_ = true; // DEPRECATED
582 error_code ec;
583 timer_.cancel(ec);
584 socket_.close(ec);
586 if (inbound_)
587 {
588 JLOG(journal_.debug()) << "Closed";
589 }
590 else
591 {
592 JLOG(journal_.info()) << "Closed";
593 }
594 }
595}
596
597void
599{
600 if (!strand_.running_in_this_thread())
601 return post(
602 strand_,
603 std::bind(
604 (void(Peer::*)(std::string const&)) & PeerImp::fail,
606 reason));
608 {
609 std::string const n = name();
610 JLOG(journal_.warn()) << (n.empty() ? remote_address_.to_string() : n)
611 << " failed: " << reason;
612 }
613 close();
614}
615
616void
618{
619 XRPL_ASSERT(
620 strand_.running_in_this_thread(),
621 "ripple::PeerImp::fail : strand in this thread");
622 if (socket_.is_open())
623 {
624 JLOG(journal_.warn())
626 << " at " << remote_address_.to_string() << ": " << ec.message();
627 }
628 close();
629}
630
631void
633{
634 XRPL_ASSERT(
635 strand_.running_in_this_thread(),
636 "ripple::PeerImp::gracefulClose : strand in this thread");
637 XRPL_ASSERT(
638 socket_.is_open(), "ripple::PeerImp::gracefulClose : socket is open");
639 XRPL_ASSERT(
641 "ripple::PeerImp::gracefulClose : socket is not closing");
642 gracefulClose_ = true;
643 if (send_queue_.size() > 0)
644 return;
645 setTimer();
646 stream_.async_shutdown(bind_executor(
647 strand_,
648 std::bind(
649 &PeerImp::onShutdown, shared_from_this(), std::placeholders::_1)));
650}
651
652void
654{
655 error_code ec;
656 timer_.expires_from_now(peerTimerInterval, ec);
657
658 if (ec)
659 {
660 JLOG(journal_.error()) << "setTimer: " << ec.message();
661 return;
662 }
663 timer_.async_wait(bind_executor(
664 strand_,
665 std::bind(
666 &PeerImp::onTimer, shared_from_this(), std::placeholders::_1)));
667}
668
669// convenience for ignoring the error code
670void
672{
673 error_code ec;
674 timer_.cancel(ec);
675}
676
677//------------------------------------------------------------------------------
678
681{
683 ss << "[" << std::setfill('0') << std::setw(3) << id << "] ";
684 return ss.str();
685}
686
687void
689{
690 if (!socket_.is_open())
691 return;
692
693 if (ec == boost::asio::error::operation_aborted)
694 return;
695
696 if (ec)
697 {
698 // This should never happen
699 JLOG(journal_.error()) << "onTimer: " << ec.message();
700 return close();
701 }
702
704 {
705 fail("Large send queue");
706 return;
707 }
708
709 if (auto const t = tracking_.load(); !inbound_ && t != Tracking::converged)
710 {
711 clock_type::duration duration;
712
713 {
715 duration = clock_type::now() - trackingTime_;
716 }
717
718 if ((t == Tracking::diverged &&
719 (duration > app_.config().MAX_DIVERGED_TIME)) ||
720 (t == Tracking::unknown &&
721 (duration > app_.config().MAX_UNKNOWN_TIME)))
722 {
724 fail("Not useful");
725 return;
726 }
727 }
728
729 // Already waiting for PONG
730 if (lastPingSeq_)
731 {
732 fail("Ping Timeout");
733 return;
734 }
735
737 lastPingSeq_ = rand_int<std::uint32_t>();
738
739 protocol::TMPing message;
740 message.set_type(protocol::TMPing::ptPING);
741 message.set_seq(*lastPingSeq_);
742
743 send(std::make_shared<Message>(message, protocol::mtPING));
744
745 setTimer();
746}
747
748void
750{
751 cancelTimer();
752 // If we don't get eof then something went wrong
753 if (!ec)
754 {
755 JLOG(journal_.error()) << "onShutdown: expected error condition";
756 return close();
757 }
758 if (ec != boost::asio::error::eof)
759 return fail("onShutdown", ec);
760 close();
761}
762
763//------------------------------------------------------------------------------
764void
766{
767 XRPL_ASSERT(
768 read_buffer_.size() == 0,
769 "ripple::PeerImp::doAccept : empty read buffer");
770
771 JLOG(journal_.debug()) << "doAccept: " << remote_address_;
772
773 auto const sharedValue = makeSharedValue(*stream_ptr_, journal_);
774
775 // This shouldn't fail since we already computed
776 // the shared value successfully in OverlayImpl
777 if (!sharedValue)
778 return fail("makeSharedValue: Unexpected failure");
779
780 JLOG(journal_.info()) << "Protocol: " << to_string(protocol_);
781 JLOG(journal_.info()) << "Public Key: "
783
784 if (auto member = app_.cluster().member(publicKey_))
785 {
786 {
788 name_ = *member;
789 }
790 JLOG(journal_.info()) << "Cluster name: " << *member;
791 }
792
794
795 // XXX Set timer: connection is in grace period to be useful.
796 // XXX Set timer: connection idle (idle may vary depending on connection
797 // type.)
798
799 auto write_buffer = std::make_shared<boost::beast::multi_buffer>();
800
801 boost::beast::ostream(*write_buffer) << makeResponse(
803 request_,
806 *sharedValue,
808 protocol_,
809 app_);
810
811 // Write the whole buffer and only start protocol when that's done.
812 boost::asio::async_write(
813 stream_,
814 write_buffer->data(),
815 boost::asio::transfer_all(),
816 bind_executor(
817 strand_,
818 [this, write_buffer, self = shared_from_this()](
819 error_code ec, std::size_t bytes_transferred) {
820 if (!socket_.is_open())
821 return;
822 if (ec == boost::asio::error::operation_aborted)
823 return;
824 if (ec)
825 return fail("onWriteResponse", ec);
826 if (write_buffer->size() == bytes_transferred)
827 return doProtocolStart();
828 return fail("Failed to write header");
829 }));
830}
831
834{
835 std::shared_lock read_lock{nameMutex_};
836 return name_;
837}
838
841{
842 return headers_["Server-Domain"];
843}
844
845//------------------------------------------------------------------------------
846
847// Protocol logic
848
849void
851{
853
854 // Send all the validator lists that have been loaded
856 {
858 [&](std::string const& manifest,
859 std::uint32_t version,
861 PublicKey const& pubKey,
862 std::size_t maxSequence,
863 uint256 const& hash) {
865 *this,
866 0,
867 pubKey,
868 maxSequence,
869 version,
870 manifest,
871 blobInfos,
873 p_journal_);
874
875 // Don't send it next time.
877 });
878 }
879
880 if (auto m = overlay_.getManifestsMessage())
881 send(m);
882
883 setTimer();
884}
885
886// Called repeatedly with protocol message data
887void
889{
890 if (!socket_.is_open())
891 return;
892 if (ec == boost::asio::error::operation_aborted)
893 return;
894 if (ec == boost::asio::error::eof)
895 {
896 JLOG(journal_.info()) << "EOF";
897 return gracefulClose();
898 }
899 if (ec)
900 return fail("onReadMessage", ec);
901 if (auto stream = journal_.trace())
902 {
903 if (bytes_transferred > 0)
904 stream << "onReadMessage: " << bytes_transferred << " bytes";
905 else
906 stream << "onReadMessage";
907 }
908
909 metrics_.recv.add_message(bytes_transferred);
910
911 read_buffer_.commit(bytes_transferred);
912
913 auto hint = Tuning::readBufferBytes;
914
915 while (read_buffer_.size() > 0)
916 {
917 std::size_t bytes_consumed;
918
919 using namespace std::chrono_literals;
920 std::tie(bytes_consumed, ec) = perf::measureDurationAndLog(
921 [&]() {
922 return invokeProtocolMessage(read_buffer_.data(), *this, hint);
923 },
924 "invokeProtocolMessage",
925 350ms,
926 journal_);
927
928 if (ec)
929 return fail("onReadMessage", ec);
930 if (!socket_.is_open())
931 return;
932 if (gracefulClose_)
933 return;
934 if (bytes_consumed == 0)
935 break;
936 read_buffer_.consume(bytes_consumed);
937 }
938
939 // Timeout on writes only
940 stream_.async_read_some(
942 bind_executor(
943 strand_,
944 std::bind(
947 std::placeholders::_1,
948 std::placeholders::_2)));
949}
950
951void
953{
954 if (!socket_.is_open())
955 return;
956 if (ec == boost::asio::error::operation_aborted)
957 return;
958 if (ec)
959 return fail("onWriteMessage", ec);
960 if (auto stream = journal_.trace())
961 {
962 if (bytes_transferred > 0)
963 stream << "onWriteMessage: " << bytes_transferred << " bytes";
964 else
965 stream << "onWriteMessage";
966 }
967
968 metrics_.sent.add_message(bytes_transferred);
969
970 XRPL_ASSERT(
971 !send_queue_.empty(),
972 "ripple::PeerImp::onWriteMessage : non-empty send buffer");
973 send_queue_.pop();
974 if (!send_queue_.empty())
975 {
976 // Timeout on writes only
977 return boost::asio::async_write(
978 stream_,
979 boost::asio::buffer(
980 send_queue_.front()->getBuffer(compressionEnabled_)),
981 bind_executor(
982 strand_,
983 std::bind(
986 std::placeholders::_1,
987 std::placeholders::_2)));
988 }
989
990 if (gracefulClose_)
991 {
992 return stream_.async_shutdown(bind_executor(
993 strand_,
994 std::bind(
997 std::placeholders::_1)));
998 }
999}
1000
1001//------------------------------------------------------------------------------
1002//
1003// ProtocolHandler
1004//
1005//------------------------------------------------------------------------------
1006
1007void
1009{
1010 // TODO
1011}
1012
1013void
1015 std::uint16_t type,
1017 std::size_t size,
1018 std::size_t uncompressed_size,
1019 bool isCompressed)
1020{
1021 auto const name = protocolMessageName(type);
1024
1025 auto const category = TrafficCount::categorize(
1026 *m, static_cast<protocol::MessageType>(type), true);
1027
1028 // report total incoming traffic
1030 TrafficCount::category::total, static_cast<int>(size));
1031
1032 // increase the traffic received for a specific category
1033 overlay_.reportInboundTraffic(category, static_cast<int>(size));
1034
1035 using namespace protocol;
1036 if ((type == MessageType::mtTRANSACTION ||
1037 type == MessageType::mtHAVE_TRANSACTIONS ||
1038 type == MessageType::mtTRANSACTIONS ||
1039 // GET_OBJECTS
1041 // GET_LEDGER
1044 // LEDGER_DATA
1048 {
1050 static_cast<MessageType>(type), static_cast<std::uint64_t>(size));
1051 }
1052 JLOG(journal_.trace()) << "onMessageBegin: " << type << " " << size << " "
1053 << uncompressed_size << " " << isCompressed;
1054}
1055
1056void
1060{
1061 load_event_.reset();
1063}
1064
1065void
1067{
1068 auto const s = m->list_size();
1069
1070 if (s == 0)
1071 {
1073 return;
1074 }
1075
1076 if (s > 100)
1078
1080 jtMANIFEST, "receiveManifests", [this, that = shared_from_this(), m]() {
1081 overlay_.onManifests(m, that);
1082 });
1083}
1084
1085void
1087{
1088 if (m->type() == protocol::TMPing::ptPING)
1089 {
1090 // We have received a ping request, reply with a pong
1092 m->set_type(protocol::TMPing::ptPONG);
1093 send(std::make_shared<Message>(*m, protocol::mtPING));
1094 return;
1095 }
1096
1097 if (m->type() == protocol::TMPing::ptPONG && m->has_seq())
1098 {
1099 // Only reset the ping sequence if we actually received a
1100 // PONG with the correct cookie. That way, any peers which
1101 // respond with incorrect cookies will eventually time out.
1102 if (m->seq() == lastPingSeq_)
1103 {
1105
1106 // Update latency estimate
1107 auto const rtt = std::chrono::round<std::chrono::milliseconds>(
1109
1111
1112 if (latency_)
1113 latency_ = (*latency_ * 7 + rtt) / 8;
1114 else
1115 latency_ = rtt;
1116 }
1117
1118 return;
1119 }
1120}
1121
1122void
1124{
1125 // VFALCO NOTE I think we should drop the peer immediately
1126 if (!cluster())
1127 {
1128 fee_.update(Resource::feeUselessData, "unknown cluster");
1129 return;
1130 }
1131
1132 for (int i = 0; i < m->clusternodes().size(); ++i)
1133 {
1134 protocol::TMClusterNode const& node = m->clusternodes(i);
1135
1137 if (node.has_nodename())
1138 name = node.nodename();
1139
1140 auto const publicKey =
1141 parseBase58<PublicKey>(TokenType::NodePublic, node.publickey());
1142
1143 // NIKB NOTE We should drop the peer immediately if
1144 // they send us a public key we can't parse
1145 if (publicKey)
1146 {
1147 auto const reportTime =
1148 NetClock::time_point{NetClock::duration{node.reporttime()}};
1149
1151 *publicKey, name, node.nodeload(), reportTime);
1152 }
1153 }
1154
1155 int loadSources = m->loadsources().size();
1156 if (loadSources != 0)
1157 {
1158 Resource::Gossip gossip;
1159 gossip.items.reserve(loadSources);
1160 for (int i = 0; i < m->loadsources().size(); ++i)
1161 {
1162 protocol::TMLoadSource const& node = m->loadsources(i);
1164 item.address = beast::IP::Endpoint::from_string(node.name());
1165 item.balance = node.cost();
1166 if (item.address != beast::IP::Endpoint())
1167 gossip.items.push_back(item);
1168 }
1170 }
1171
1172 // Calculate the cluster fee:
1173 auto const thresh = app_.timeKeeper().now() - 90s;
1174 std::uint32_t clusterFee = 0;
1175
1177 fees.reserve(app_.cluster().size());
1178
1179 app_.cluster().for_each([&fees, thresh](ClusterNode const& status) {
1180 if (status.getReportTime() >= thresh)
1181 fees.push_back(status.getLoadFee());
1182 });
1183
1184 if (!fees.empty())
1185 {
1186 auto const index = fees.size() / 2;
1187 std::nth_element(fees.begin(), fees.begin() + index, fees.end());
1188 clusterFee = fees[index];
1189 }
1190
1191 app_.getFeeTrack().setClusterFee(clusterFee);
1192}
1193
1194void
1196{
1197 // Don't allow endpoints from peers that are not known tracking or are
1198 // not using a version of the message that we support:
1199 if (tracking_.load() != Tracking::converged || m->version() != 2)
1200 return;
1201
1202 // The number is arbitrary and doesn't have any real significance or
1203 // implication for the protocol.
1204 if (m->endpoints_v2().size() >= 1024)
1205 {
1206 fee_.update(Resource::feeUselessData, "endpoints too large");
1207 return;
1208 }
1209
1211 endpoints.reserve(m->endpoints_v2().size());
1212
1213 auto malformed = 0;
1214 for (auto const& tm : m->endpoints_v2())
1215 {
1216 auto result = beast::IP::Endpoint::from_string_checked(tm.endpoint());
1217
1218 if (!result)
1219 {
1220 JLOG(p_journal_.error()) << "failed to parse incoming endpoint: {"
1221 << tm.endpoint() << "}";
1222 malformed++;
1223 continue;
1224 }
1225
1226 // If hops == 0, this Endpoint describes the peer we are connected
1227 // to -- in that case, we take the remote address seen on the
1228 // socket and store that in the IP::Endpoint. If this is the first
1229 // time, then we'll verify that their listener can receive incoming
1230 // by performing a connectivity test. if hops > 0, then we just
1231 // take the address/port we were given
1232 if (tm.hops() == 0)
1233 result = remote_address_.at_port(result->port());
1234
1235 endpoints.emplace_back(*result, tm.hops());
1236 }
1237
1238 // Charge the peer for each malformed endpoint. As there still may be
1239 // multiple valid endpoints we don't return early.
1240 if (malformed > 0)
1241 {
1242 fee_.update(
1243 Resource::feeInvalidData * malformed,
1244 std::to_string(malformed) + " malformed endpoints");
1245 }
1246
1247 if (!endpoints.empty())
1248 overlay_.peerFinder().on_endpoints(slot_, endpoints);
1249}
1250
1251void
1253{
1254 handleTransaction(m, true, false);
1255}
1256
1257void
1260 bool eraseTxQueue,
1261 bool batch)
1262{
1263 XRPL_ASSERT(
1264 eraseTxQueue != batch,
1265 ("ripple::PeerImp::handleTransaction : valid inputs"));
1267 return;
1268
1270 {
1271 // If we've never been in synch, there's nothing we can do
1272 // with a transaction
1273 JLOG(p_journal_.debug()) << "Ignoring incoming transaction: "
1274 << "Need network ledger";
1275 return;
1276 }
1277
1278 SerialIter sit(makeSlice(m->rawtransaction()));
1279
1280 try
1281 {
1282 auto stx = std::make_shared<STTx const>(sit);
1283 uint256 txID = stx->getTransactionID();
1284
1285 int flags;
1286 constexpr std::chrono::seconds tx_interval = 10s;
1287
1288 if (!app_.getHashRouter().shouldProcess(txID, id_, flags, tx_interval))
1289 {
1290 // we have seen this transaction recently
1291 if (flags & SF_BAD)
1292 {
1293 fee_.update(Resource::feeUselessData, "known bad");
1294 JLOG(p_journal_.debug()) << "Ignoring known bad tx " << txID;
1295 }
1296
1297 // Erase only if the server has seen this tx. If the server has not
1298 // seen this tx then the tx could not has been queued for this peer.
1299 else if (eraseTxQueue && txReduceRelayEnabled())
1300 removeTxQueue(txID);
1301
1305
1306 return;
1307 }
1308
1309 JLOG(p_journal_.debug()) << "Got tx " << txID;
1310
1311 bool checkSignature = true;
1312 if (cluster())
1313 {
1314 if (!m->has_deferred() || !m->deferred())
1315 {
1316 // Skip local checks if a server we trust
1317 // put the transaction in its open ledger
1318 flags |= SF_TRUSTED;
1319 }
1320
1321 // for non-validator nodes only -- localPublicKey is set for
1322 // validators only
1324 {
1325 // For now, be paranoid and have each validator
1326 // check each transaction, regardless of source
1327 checkSignature = false;
1328 }
1329 }
1330
1332 {
1333 JLOG(p_journal_.trace())
1334 << "No new transactions until synchronized";
1335 }
1336 else if (
1339 {
1341 JLOG(p_journal_.info()) << "Transaction queue is full";
1342 }
1343 else
1344 {
1347 "recvTransaction->checkTransaction",
1349 flags,
1350 checkSignature,
1351 batch,
1352 stx]() {
1353 if (auto peer = weak.lock())
1354 peer->checkTransaction(
1355 flags, checkSignature, stx, batch);
1356 });
1357 }
1358 }
1359 catch (std::exception const& ex)
1360 {
1361 JLOG(p_journal_.warn())
1362 << "Transaction invalid: " << strHex(m->rawtransaction())
1363 << ". Exception: " << ex.what();
1364 }
1365}
1366
1367void
1369{
1370 auto badData = [&](std::string const& msg) {
1371 fee_.update(Resource::feeInvalidData, "get_ledger " + msg);
1372 JLOG(p_journal_.warn()) << "TMGetLedger: " << msg;
1373 };
1374 auto const itype{m->itype()};
1375
1376 // Verify ledger info type
1377 if (itype < protocol::liBASE || itype > protocol::liTS_CANDIDATE)
1378 return badData("Invalid ledger info type");
1379
1380 auto const ltype = [&m]() -> std::optional<::protocol::TMLedgerType> {
1381 if (m->has_ltype())
1382 return m->ltype();
1383 return std::nullopt;
1384 }();
1385
1386 if (itype == protocol::liTS_CANDIDATE)
1387 {
1388 if (!m->has_ledgerhash())
1389 return badData("Invalid TX candidate set, missing TX set hash");
1390 }
1391 else if (
1392 !m->has_ledgerhash() && !m->has_ledgerseq() &&
1393 !(ltype && *ltype == protocol::ltCLOSED))
1394 {
1395 return badData("Invalid request");
1396 }
1397
1398 // Verify ledger type
1399 if (ltype && (*ltype < protocol::ltACCEPTED || *ltype > protocol::ltCLOSED))
1400 return badData("Invalid ledger type");
1401
1402 // Verify ledger hash
1403 if (m->has_ledgerhash() && !stringIsUint256Sized(m->ledgerhash()))
1404 return badData("Invalid ledger hash");
1405
1406 // Verify ledger sequence
1407 if (m->has_ledgerseq())
1408 {
1409 auto const ledgerSeq{m->ledgerseq()};
1410
1411 // Check if within a reasonable range
1412 using namespace std::chrono_literals;
1414 ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1415 {
1416 return badData(
1417 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1418 }
1419 }
1420
1421 // Verify ledger node IDs
1422 if (itype != protocol::liBASE)
1423 {
1424 if (m->nodeids_size() <= 0)
1425 return badData("Invalid ledger node IDs");
1426
1427 for (auto const& nodeId : m->nodeids())
1428 {
1429 if (deserializeSHAMapNodeID(nodeId) == std::nullopt)
1430 return badData("Invalid SHAMap node ID");
1431 }
1432 }
1433
1434 // Verify query type
1435 if (m->has_querytype() && m->querytype() != protocol::qtINDIRECT)
1436 return badData("Invalid query type");
1437
1438 // Verify query depth
1439 if (m->has_querydepth())
1440 {
1441 if (m->querydepth() > Tuning::maxQueryDepth ||
1442 itype == protocol::liBASE)
1443 {
1444 return badData("Invalid query depth");
1445 }
1446 }
1447
1448 // Queue a job to process the request
1450 app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m]() {
1451 if (auto peer = weak.lock())
1452 peer->processLedgerRequest(m);
1453 });
1454}
1455
1456void
1458{
1459 JLOG(p_journal_.trace()) << "onMessage, TMProofPathRequest";
1461 {
1462 fee_.update(
1463 Resource::feeMalformedRequest, "proof_path_request disabled");
1464 return;
1465 }
1466
1467 fee_.update(
1468 Resource::feeModerateBurdenPeer, "received a proof path request");
1471 jtREPLAY_REQ, "recvProofPathRequest", [weak, m]() {
1472 if (auto peer = weak.lock())
1473 {
1474 auto reply =
1475 peer->ledgerReplayMsgHandler_.processProofPathRequest(m);
1476 if (reply.has_error())
1477 {
1478 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1479 peer->charge(
1480 Resource::feeMalformedRequest,
1481 "proof_path_request");
1482 else
1483 peer->charge(
1484 Resource::feeRequestNoReply, "proof_path_request");
1485 }
1486 else
1487 {
1488 peer->send(std::make_shared<Message>(
1489 reply, protocol::mtPROOF_PATH_RESPONSE));
1490 }
1491 }
1492 });
1493}
1494
1495void
1497{
1498 if (!ledgerReplayEnabled_)
1499 {
1500 fee_.update(
1501 Resource::feeMalformedRequest, "proof_path_response disabled");
1502 return;
1503 }
1504
1505 if (!ledgerReplayMsgHandler_.processProofPathResponse(m))
1506 {
1507 fee_.update(Resource::feeInvalidData, "proof_path_response");
1508 }
1509}
1510
1511void
1513{
1514 JLOG(p_journal_.trace()) << "onMessage, TMReplayDeltaRequest";
1515 if (!ledgerReplayEnabled_)
1516 {
1517 fee_.update(
1518 Resource::feeMalformedRequest, "replay_delta_request disabled");
1519 return;
1520 }
1521
1522 fee_.fee = Resource::feeModerateBurdenPeer;
1523 std::weak_ptr<PeerImp> weak = shared_from_this();
1524 app_.getJobQueue().addJob(
1525 jtREPLAY_REQ, "recvReplayDeltaRequest", [weak, m]() {
1526 if (auto peer = weak.lock())
1527 {
1528 auto reply =
1529 peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m);
1530 if (reply.has_error())
1531 {
1532 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1533 peer->charge(
1534 Resource::feeMalformedRequest,
1535 "replay_delta_request");
1536 else
1537 peer->charge(
1538 Resource::feeRequestNoReply,
1539 "replay_delta_request");
1540 }
1541 else
1542 {
1543 peer->send(std::make_shared<Message>(
1544 reply, protocol::mtREPLAY_DELTA_RESPONSE));
1545 }
1546 }
1547 });
1548}
1549
1550void
1552{
1553 if (!ledgerReplayEnabled_)
1554 {
1555 fee_.update(
1556 Resource::feeMalformedRequest, "replay_delta_response disabled");
1557 return;
1558 }
1559
1560 if (!ledgerReplayMsgHandler_.processReplayDeltaResponse(m))
1561 {
1562 fee_.update(Resource::feeInvalidData, "replay_delta_response");
1563 }
1564}
1565
1566void
1568{
1569 auto badData = [&](std::string const& msg) {
1570 fee_.update(Resource::feeInvalidData, msg);
1571 JLOG(p_journal_.warn()) << "TMLedgerData: " << msg;
1572 };
1573
1574 // Verify ledger hash
1575 if (!stringIsUint256Sized(m->ledgerhash()))
1576 return badData("Invalid ledger hash");
1577
1578 // Verify ledger sequence
1579 {
1580 auto const ledgerSeq{m->ledgerseq()};
1581 if (m->type() == protocol::liTS_CANDIDATE)
1582 {
1583 if (ledgerSeq != 0)
1584 {
1585 return badData(
1586 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1587 }
1588 }
1589 else
1590 {
1591 // Check if within a reasonable range
1592 using namespace std::chrono_literals;
1593 if (app_.getLedgerMaster().getValidatedLedgerAge() <= 10s &&
1594 ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1595 {
1596 return badData(
1597 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1598 }
1599 }
1600 }
1601
1602 // Verify ledger info type
1603 if (m->type() < protocol::liBASE || m->type() > protocol::liTS_CANDIDATE)
1604 return badData("Invalid ledger info type");
1605
1606 // Verify reply error
1607 if (m->has_error() &&
1608 (m->error() < protocol::reNO_LEDGER ||
1609 m->error() > protocol::reBAD_REQUEST))
1610 {
1611 return badData("Invalid reply error");
1612 }
1613
1614 // Verify ledger nodes.
1615 if (m->nodes_size() <= 0 || m->nodes_size() > Tuning::hardMaxReplyNodes)
1616 {
1617 return badData(
1618 "Invalid Ledger/TXset nodes " + std::to_string(m->nodes_size()));
1619 }
1620
1621 // If there is a request cookie, attempt to relay the message
1622 if (m->has_requestcookie())
1623 {
1624 if (auto peer = overlay_.findPeerByShortID(m->requestcookie()))
1625 {
1626 m->clear_requestcookie();
1627 peer->send(std::make_shared<Message>(*m, protocol::mtLEDGER_DATA));
1628 }
1629 else
1630 {
1631 JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply";
1632 }
1633 return;
1634 }
1635
1636 uint256 const ledgerHash{m->ledgerhash()};
1637
1638 // Otherwise check if received data for a candidate transaction set
1639 if (m->type() == protocol::liTS_CANDIDATE)
1640 {
1641 std::weak_ptr<PeerImp> weak{shared_from_this()};
1642 app_.getJobQueue().addJob(
1643 jtTXN_DATA, "recvPeerData", [weak, ledgerHash, m]() {
1644 if (auto peer = weak.lock())
1645 {
1646 peer->app_.getInboundTransactions().gotData(
1647 ledgerHash, peer, m);
1648 }
1649 });
1650 return;
1651 }
1652
1653 // Consume the message
1654 app_.getInboundLedgers().gotLedgerData(ledgerHash, shared_from_this(), m);
1655}
1656
1657void
1659{
1660 protocol::TMProposeSet& set = *m;
1661
1662 auto const sig = makeSlice(set.signature());
1663
1664 // Preliminary check for the validity of the signature: A DER encoded
1665 // signature can't be longer than 72 bytes.
1666 if ((std::clamp<std::size_t>(sig.size(), 64, 72) != sig.size()) ||
1667 (publicKeyType(makeSlice(set.nodepubkey())) != KeyType::secp256k1))
1668 {
1669 JLOG(p_journal_.warn()) << "Proposal: malformed";
1670 fee_.update(
1671 Resource::feeInvalidSignature,
1672 " signature can't be longer than 72 bytes");
1673 return;
1674 }
1675
1676 if (!stringIsUint256Sized(set.currenttxhash()) ||
1677 !stringIsUint256Sized(set.previousledger()))
1678 {
1679 JLOG(p_journal_.warn()) << "Proposal: malformed";
1680 fee_.update(Resource::feeMalformedRequest, "bad hashes");
1681 return;
1682 }
1683
1684 // RH TODO: when isTrusted = false we should probably also cache a key
1685 // suppression for 30 seconds to avoid doing a relatively expensive lookup
1686 // every time a spam packet is received
1687 PublicKey const publicKey{makeSlice(set.nodepubkey())};
1688 auto const isTrusted = app_.validators().trusted(publicKey);
1689
1690 // If the operator has specified that untrusted proposals be dropped then
1691 // this happens here I.e. before further wasting CPU verifying the signature
1692 // of an untrusted key
1693 if (!isTrusted)
1694 {
1695 // report untrusted proposal messages
1696 overlay_.reportInboundTraffic(
1697 TrafficCount::category::proposal_untrusted,
1698 Message::messageSize(*m));
1699
1700 if (app_.config().RELAY_UNTRUSTED_PROPOSALS == -1)
1701 return;
1702 }
1703
1704 uint256 const proposeHash{set.currenttxhash()};
1705 uint256 const prevLedger{set.previousledger()};
1706
1707 NetClock::time_point const closeTime{NetClock::duration{set.closetime()}};
1708
1709 uint256 const suppression = proposalUniqueId(
1710 proposeHash,
1711 prevLedger,
1712 set.proposeseq(),
1713 closeTime,
1714 publicKey.slice(),
1715 sig);
1716
1717 if (auto [added, relayed] =
1718 app_.getHashRouter().addSuppressionPeerWithStatus(suppression, id_);
1719 !added)
1720 {
1721 // Count unique messages (Slots has it's own 'HashRouter'), which a peer
1722 // receives within IDLED seconds since the message has been relayed.
1723 if (reduceRelayReady() && relayed &&
1724 (stopwatch().now() - *relayed) < reduce_relay::IDLED)
1725 overlay_.updateSlotAndSquelch(
1726 suppression, publicKey, id_, protocol::mtPROPOSE_LEDGER);
1727
1728 // report duplicate proposal messages
1729 overlay_.reportInboundTraffic(
1730 TrafficCount::category::proposal_duplicate,
1731 Message::messageSize(*m));
1732
1733 JLOG(p_journal_.trace()) << "Proposal: duplicate";
1734
1735 return;
1736 }
1737
1738 if (!isTrusted)
1739 {
1740 if (tracking_.load() == Tracking::diverged)
1741 {
1742 JLOG(p_journal_.debug())
1743 << "Proposal: Dropping untrusted (peer divergence)";
1744 return;
1745 }
1746
1747 if (!cluster() && app_.getFeeTrack().isLoadedLocal())
1748 {
1749 JLOG(p_journal_.debug()) << "Proposal: Dropping untrusted (load)";
1750 return;
1751 }
1752 }
1753
1754 JLOG(p_journal_.trace())
1755 << "Proposal: " << (isTrusted ? "trusted" : "untrusted");
1756
1757 auto proposal = RCLCxPeerPos(
1758 publicKey,
1759 sig,
1760 suppression,
1762 prevLedger,
1763 set.proposeseq(),
1764 proposeHash,
1765 closeTime,
1766 app_.timeKeeper().closeTime(),
1767 calcNodeID(app_.validatorManifests().getMasterKey(publicKey))});
1768
1769 std::weak_ptr<PeerImp> weak = shared_from_this();
1770 app_.getJobQueue().addJob(
1771 isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut,
1772 "recvPropose->checkPropose",
1773 [weak, isTrusted, m, proposal]() {
1774 if (auto peer = weak.lock())
1775 peer->checkPropose(isTrusted, m, proposal);
1776 });
1777}
1778
1779void
1781{
1782 JLOG(p_journal_.trace()) << "Status: Change";
1783
1784 if (!m->has_networktime())
1785 m->set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1786
1787 {
1788 std::lock_guard sl(recentLock_);
1789 if (!last_status_.has_newstatus() || m->has_newstatus())
1790 last_status_ = *m;
1791 else
1792 {
1793 // preserve old status
1794 protocol::NodeStatus status = last_status_.newstatus();
1795 last_status_ = *m;
1796 m->set_newstatus(status);
1797 }
1798 }
1799
1800 if (m->newevent() == protocol::neLOST_SYNC)
1801 {
1802 bool outOfSync{false};
1803 {
1804 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1805 // guarded by recentLock_.
1806 std::lock_guard sl(recentLock_);
1807 if (!closedLedgerHash_.isZero())
1808 {
1809 outOfSync = true;
1810 closedLedgerHash_.zero();
1811 }
1812 previousLedgerHash_.zero();
1813 }
1814 if (outOfSync)
1815 {
1816 JLOG(p_journal_.debug()) << "Status: Out of sync";
1817 }
1818 return;
1819 }
1820
1821 {
1822 uint256 closedLedgerHash{};
1823 bool const peerChangedLedgers{
1824 m->has_ledgerhash() && stringIsUint256Sized(m->ledgerhash())};
1825
1826 {
1827 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1828 // guarded by recentLock_.
1829 std::lock_guard sl(recentLock_);
1830 if (peerChangedLedgers)
1831 {
1832 closedLedgerHash_ = m->ledgerhash();
1833 closedLedgerHash = closedLedgerHash_;
1834 addLedger(closedLedgerHash, sl);
1835 }
1836 else
1837 {
1838 closedLedgerHash_.zero();
1839 }
1840
1841 if (m->has_ledgerhashprevious() &&
1842 stringIsUint256Sized(m->ledgerhashprevious()))
1843 {
1844 previousLedgerHash_ = m->ledgerhashprevious();
1845 addLedger(previousLedgerHash_, sl);
1846 }
1847 else
1848 {
1849 previousLedgerHash_.zero();
1850 }
1851 }
1852 if (peerChangedLedgers)
1853 {
1854 JLOG(p_journal_.debug()) << "LCL is " << closedLedgerHash;
1855 }
1856 else
1857 {
1858 JLOG(p_journal_.debug()) << "Status: No ledger";
1859 }
1860 }
1861
1862 if (m->has_firstseq() && m->has_lastseq())
1863 {
1864 std::lock_guard sl(recentLock_);
1865
1866 minLedger_ = m->firstseq();
1867 maxLedger_ = m->lastseq();
1868
1869 if ((maxLedger_ < minLedger_) || (minLedger_ == 0) || (maxLedger_ == 0))
1870 minLedger_ = maxLedger_ = 0;
1871 }
1872
1873 if (m->has_ledgerseq() &&
1874 app_.getLedgerMaster().getValidatedLedgerAge() < 2min)
1875 {
1876 checkTracking(
1877 m->ledgerseq(), app_.getLedgerMaster().getValidLedgerIndex());
1878 }
1879
1880 app_.getOPs().pubPeerStatus([=, this]() -> Json::Value {
1882
1883 if (m->has_newstatus())
1884 {
1885 switch (m->newstatus())
1886 {
1887 case protocol::nsCONNECTING:
1888 j[jss::status] = "CONNECTING";
1889 break;
1890 case protocol::nsCONNECTED:
1891 j[jss::status] = "CONNECTED";
1892 break;
1893 case protocol::nsMONITORING:
1894 j[jss::status] = "MONITORING";
1895 break;
1896 case protocol::nsVALIDATING:
1897 j[jss::status] = "VALIDATING";
1898 break;
1899 case protocol::nsSHUTTING:
1900 j[jss::status] = "SHUTTING";
1901 break;
1902 }
1903 }
1904
1905 if (m->has_newevent())
1906 {
1907 switch (m->newevent())
1908 {
1909 case protocol::neCLOSING_LEDGER:
1910 j[jss::action] = "CLOSING_LEDGER";
1911 break;
1912 case protocol::neACCEPTED_LEDGER:
1913 j[jss::action] = "ACCEPTED_LEDGER";
1914 break;
1915 case protocol::neSWITCHED_LEDGER:
1916 j[jss::action] = "SWITCHED_LEDGER";
1917 break;
1918 case protocol::neLOST_SYNC:
1919 j[jss::action] = "LOST_SYNC";
1920 break;
1921 }
1922 }
1923
1924 if (m->has_ledgerseq())
1925 {
1926 j[jss::ledger_index] = m->ledgerseq();
1927 }
1928
1929 if (m->has_ledgerhash())
1930 {
1931 uint256 closedLedgerHash{};
1932 {
1933 std::lock_guard sl(recentLock_);
1934 closedLedgerHash = closedLedgerHash_;
1935 }
1936 j[jss::ledger_hash] = to_string(closedLedgerHash);
1937 }
1938
1939 if (m->has_networktime())
1940 {
1941 j[jss::date] = Json::UInt(m->networktime());
1942 }
1943
1944 if (m->has_firstseq() && m->has_lastseq())
1945 {
1946 j[jss::ledger_index_min] = Json::UInt(m->firstseq());
1947 j[jss::ledger_index_max] = Json::UInt(m->lastseq());
1948 }
1949
1950 return j;
1951 });
1952}
1953
1954void
1955PeerImp::checkTracking(std::uint32_t validationSeq)
1956{
1957 std::uint32_t serverSeq;
1958 {
1959 // Extract the sequence number of the highest
1960 // ledger this peer has
1961 std::lock_guard sl(recentLock_);
1962
1963 serverSeq = maxLedger_;
1964 }
1965 if (serverSeq != 0)
1966 {
1967 // Compare the peer's ledger sequence to the
1968 // sequence of a recently-validated ledger
1969 checkTracking(serverSeq, validationSeq);
1970 }
1971}
1972
1973void
1974PeerImp::checkTracking(std::uint32_t seq1, std::uint32_t seq2)
1975{
1976 int diff = std::max(seq1, seq2) - std::min(seq1, seq2);
1977
1978 if (diff < Tuning::convergedLedgerLimit)
1979 {
1980 // The peer's ledger sequence is close to the validation's
1981 tracking_ = Tracking::converged;
1982 }
1983
1984 if ((diff > Tuning::divergedLedgerLimit) &&
1985 (tracking_.load() != Tracking::diverged))
1986 {
1987 // The peer's ledger sequence is way off the validation's
1988 std::lock_guard sl(recentLock_);
1989
1990 tracking_ = Tracking::diverged;
1991 trackingTime_ = clock_type::now();
1992 }
1993}
1994
1995void
1997{
1998 if (!stringIsUint256Sized(m->hash()))
1999 {
2000 fee_.update(Resource::feeMalformedRequest, "bad hash");
2001 return;
2002 }
2003
2004 uint256 const hash{m->hash()};
2005
2006 if (m->status() == protocol::tsHAVE)
2007 {
2008 std::lock_guard sl(recentLock_);
2009
2010 if (std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
2011 recentTxSets_.end())
2012 {
2013 fee_.update(Resource::feeUselessData, "duplicate (tsHAVE)");
2014 return;
2015 }
2016
2017 recentTxSets_.push_back(hash);
2018 }
2019}
2020
2021void
2022PeerImp::onValidatorListMessage(
2023 std::string const& messageType,
2024 std::string const& manifest,
2025 std::uint32_t version,
2026 std::vector<ValidatorBlobInfo> const& blobs)
2027{
2028 // If there are no blobs, the message is malformed (possibly because of
2029 // ValidatorList class rules), so charge accordingly and skip processing.
2030 if (blobs.empty())
2031 {
2032 JLOG(p_journal_.warn()) << "Ignored malformed " << messageType
2033 << " from peer " << remote_address_;
2034 // This shouldn't ever happen with a well-behaved peer
2035 fee_.update(Resource::feeHeavyBurdenPeer, "no blobs");
2036 return;
2037 }
2038
2039 auto const hash = sha512Half(manifest, blobs, version);
2040
2041 JLOG(p_journal_.debug())
2042 << "Received " << messageType << " from " << remote_address_.to_string()
2043 << " (" << id_ << ")";
2044
2045 if (!app_.getHashRouter().addSuppressionPeer(hash, id_))
2046 {
2047 JLOG(p_journal_.debug())
2048 << messageType << ": received duplicate " << messageType;
2049 // Charging this fee here won't hurt the peer in the normal
2050 // course of operation (ie. refresh every 5 minutes), but
2051 // will add up if the peer is misbehaving.
2052 fee_.update(Resource::feeUselessData, "duplicate");
2053 return;
2054 }
2055
2056 auto const applyResult = app_.validators().applyListsAndBroadcast(
2057 manifest,
2058 version,
2059 blobs,
2060 remote_address_.to_string(),
2061 hash,
2062 app_.overlay(),
2063 app_.getHashRouter(),
2064 app_.getOPs());
2065
2066 JLOG(p_journal_.debug())
2067 << "Processed " << messageType << " version " << version << " from "
2068 << (applyResult.publisherKey ? strHex(*applyResult.publisherKey)
2069 : "unknown or invalid publisher")
2070 << " from " << remote_address_.to_string() << " (" << id_
2071 << ") with best result " << to_string(applyResult.bestDisposition());
2072
2073 // Act based on the best result
2074 switch (applyResult.bestDisposition())
2075 {
2076 // New list
2077 case ListDisposition::accepted:
2078 // Newest list is expired, and that needs to be broadcast, too
2079 case ListDisposition::expired:
2080 // Future list
2081 case ListDisposition::pending: {
2082 std::lock_guard<std::mutex> sl(recentLock_);
2083
2084 XRPL_ASSERT(
2085 applyResult.publisherKey,
2086 "ripple::PeerImp::onValidatorListMessage : publisher key is "
2087 "set");
2088 auto const& pubKey = *applyResult.publisherKey;
2089#ifndef NDEBUG
2090 if (auto const iter = publisherListSequences_.find(pubKey);
2091 iter != publisherListSequences_.end())
2092 {
2093 XRPL_ASSERT(
2094 iter->second < applyResult.sequence,
2095 "ripple::PeerImp::onValidatorListMessage : lower sequence");
2096 }
2097#endif
2098 publisherListSequences_[pubKey] = applyResult.sequence;
2099 }
2100 break;
2101 case ListDisposition::same_sequence:
2102 case ListDisposition::known_sequence:
2103#ifndef NDEBUG
2104 {
2105 std::lock_guard<std::mutex> sl(recentLock_);
2106 XRPL_ASSERT(
2107 applyResult.sequence && applyResult.publisherKey,
2108 "ripple::PeerImp::onValidatorListMessage : nonzero sequence "
2109 "and set publisher key");
2110 XRPL_ASSERT(
2111 publisherListSequences_[*applyResult.publisherKey] <=
2112 applyResult.sequence,
2113 "ripple::PeerImp::onValidatorListMessage : maximum sequence");
2114 }
2115#endif // !NDEBUG
2116
2117 break;
2118 case ListDisposition::stale:
2119 case ListDisposition::untrusted:
2120 case ListDisposition::invalid:
2121 case ListDisposition::unsupported_version:
2122 break;
2123 default:
2124 UNREACHABLE(
2125 "ripple::PeerImp::onValidatorListMessage : invalid best list "
2126 "disposition");
2127 }
2128
2129 // Charge based on the worst result
2130 switch (applyResult.worstDisposition())
2131 {
2132 case ListDisposition::accepted:
2133 case ListDisposition::expired:
2134 case ListDisposition::pending:
2135 // No charges for good data
2136 break;
2137 case ListDisposition::same_sequence:
2138 case ListDisposition::known_sequence:
2139 // Charging this fee here won't hurt the peer in the normal
2140 // course of operation (ie. refresh every 5 minutes), but
2141 // will add up if the peer is misbehaving.
2142 fee_.update(
2143 Resource::feeUselessData,
2144 " duplicate (same_sequence or known_sequence)");
2145 break;
2146 case ListDisposition::stale:
2147 // There are very few good reasons for a peer to send an
2148 // old list, particularly more than once.
2149 fee_.update(Resource::feeInvalidData, "expired");
2150 break;
2151 case ListDisposition::untrusted:
2152 // Charging this fee here won't hurt the peer in the normal
2153 // course of operation (ie. refresh every 5 minutes), but
2154 // will add up if the peer is misbehaving.
2155 fee_.update(Resource::feeUselessData, "untrusted");
2156 break;
2157 case ListDisposition::invalid:
2158 // This shouldn't ever happen with a well-behaved peer
2159 fee_.update(
2160 Resource::feeInvalidSignature, "invalid list disposition");
2161 break;
2162 case ListDisposition::unsupported_version:
2163 // During a version transition, this may be legitimate.
2164 // If it happens frequently, that's probably bad.
2165 fee_.update(Resource::feeInvalidData, "version");
2166 break;
2167 default:
2168 UNREACHABLE(
2169 "ripple::PeerImp::onValidatorListMessage : invalid worst list "
2170 "disposition");
2171 }
2172
2173 // Log based on all the results.
2174 for (auto const& [disp, count] : applyResult.dispositions)
2175 {
2176 switch (disp)
2177 {
2178 // New list
2179 case ListDisposition::accepted:
2180 JLOG(p_journal_.debug())
2181 << "Applied " << count << " new " << messageType
2182 << "(s) from peer " << remote_address_;
2183 break;
2184 // Newest list is expired, and that needs to be broadcast, too
2185 case ListDisposition::expired:
2186 JLOG(p_journal_.debug())
2187 << "Applied " << count << " expired " << messageType
2188 << "(s) from peer " << remote_address_;
2189 break;
2190 // Future list
2191 case ListDisposition::pending:
2192 JLOG(p_journal_.debug())
2193 << "Processed " << count << " future " << messageType
2194 << "(s) from peer " << remote_address_;
2195 break;
2196 case ListDisposition::same_sequence:
2197 JLOG(p_journal_.warn())
2198 << "Ignored " << count << " " << messageType
2199 << "(s) with current sequence from peer "
2200 << remote_address_;
2201 break;
2202 case ListDisposition::known_sequence:
2203 JLOG(p_journal_.warn())
2204 << "Ignored " << count << " " << messageType
2205 << "(s) with future sequence from peer " << remote_address_;
2206 break;
2207 case ListDisposition::stale:
2208 JLOG(p_journal_.warn())
2209 << "Ignored " << count << "stale " << messageType
2210 << "(s) from peer " << remote_address_;
2211 break;
2212 case ListDisposition::untrusted:
2213 JLOG(p_journal_.warn())
2214 << "Ignored " << count << " untrusted " << messageType
2215 << "(s) from peer " << remote_address_;
2216 break;
2217 case ListDisposition::unsupported_version:
2218 JLOG(p_journal_.warn())
2219 << "Ignored " << count << "unsupported version "
2220 << messageType << "(s) from peer " << remote_address_;
2221 break;
2222 case ListDisposition::invalid:
2223 JLOG(p_journal_.warn())
2224 << "Ignored " << count << "invalid " << messageType
2225 << "(s) from peer " << remote_address_;
2226 break;
2227 default:
2228 UNREACHABLE(
2229 "ripple::PeerImp::onValidatorListMessage : invalid list "
2230 "disposition");
2231 }
2232 }
2233}
2234
2235void
2237{
2238 try
2239 {
2240 if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
2241 {
2242 JLOG(p_journal_.debug())
2243 << "ValidatorList: received validator list from peer using "
2244 << "protocol version " << to_string(protocol_)
2245 << " which shouldn't support this feature.";
2246 fee_.update(Resource::feeUselessData, "unsupported peer");
2247 return;
2248 }
2249 onValidatorListMessage(
2250 "ValidatorList",
2251 m->manifest(),
2252 m->version(),
2253 ValidatorList::parseBlobs(*m));
2254 }
2255 catch (std::exception const& e)
2256 {
2257 JLOG(p_journal_.warn()) << "ValidatorList: Exception, " << e.what()
2258 << " from peer " << remote_address_;
2259 using namespace std::string_literals;
2260 fee_.update(Resource::feeInvalidData, e.what());
2261 }
2262}
2263
2264void
2265PeerImp::onMessage(
2267{
2268 try
2269 {
2270 if (!supportsFeature(ProtocolFeature::ValidatorList2Propagation))
2271 {
2272 JLOG(p_journal_.debug())
2273 << "ValidatorListCollection: received validator list from peer "
2274 << "using protocol version " << to_string(protocol_)
2275 << " which shouldn't support this feature.";
2276 fee_.update(Resource::feeUselessData, "unsupported peer");
2277 return;
2278 }
2279 else if (m->version() < 2)
2280 {
2281 JLOG(p_journal_.debug())
2282 << "ValidatorListCollection: received invalid validator list "
2283 "version "
2284 << m->version() << " from peer using protocol version "
2285 << to_string(protocol_);
2286 fee_.update(Resource::feeInvalidData, "wrong version");
2287 return;
2288 }
2289 onValidatorListMessage(
2290 "ValidatorListCollection",
2291 m->manifest(),
2292 m->version(),
2293 ValidatorList::parseBlobs(*m));
2294 }
2295 catch (std::exception const& e)
2296 {
2297 JLOG(p_journal_.warn()) << "ValidatorListCollection: Exception, "
2298 << e.what() << " from peer " << remote_address_;
2299 using namespace std::string_literals;
2300 fee_.update(Resource::feeInvalidData, e.what());
2301 }
2302}
2303
2304void
2306{
2307 if (m->validation().size() < 50)
2308 {
2309 JLOG(p_journal_.warn()) << "Validation: Too small";
2310 fee_.update(Resource::feeMalformedRequest, "too small");
2311 return;
2312 }
2313
2314 try
2315 {
2316 auto const closeTime = app_.timeKeeper().closeTime();
2317
2319 {
2320 SerialIter sit(makeSlice(m->validation()));
2321 val = std::make_shared<STValidation>(
2322 std::ref(sit),
2323 [this](PublicKey const& pk) {
2324 return calcNodeID(
2325 app_.validatorManifests().getMasterKey(pk));
2326 },
2327 false);
2328 val->setSeen(closeTime);
2329 }
2330
2331 if (!isCurrent(
2332 app_.getValidations().parms(),
2333 app_.timeKeeper().closeTime(),
2334 val->getSignTime(),
2335 val->getSeenTime()))
2336 {
2337 JLOG(p_journal_.trace()) << "Validation: Not current";
2338 fee_.update(Resource::feeUselessData, "not current");
2339 return;
2340 }
2341
2342 // RH TODO: when isTrusted = false we should probably also cache a key
2343 // suppression for 30 seconds to avoid doing a relatively expensive
2344 // lookup every time a spam packet is received
2345 auto const isTrusted =
2346 app_.validators().trusted(val->getSignerPublic());
2347
2348 // If the operator has specified that untrusted validations be
2349 // dropped then this happens here I.e. before further wasting CPU
2350 // verifying the signature of an untrusted key
2351 if (!isTrusted)
2352 {
2353 // increase untrusted validations received
2354 overlay_.reportInboundTraffic(
2355 TrafficCount::category::validation_untrusted,
2356 Message::messageSize(*m));
2357
2358 if (app_.config().RELAY_UNTRUSTED_VALIDATIONS == -1)
2359 return;
2360 }
2361
2362 auto key = sha512Half(makeSlice(m->validation()));
2363
2364 auto [added, relayed] =
2365 app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
2366
2367 if (!added)
2368 {
2369 // Count unique messages (Slots has it's own 'HashRouter'), which a
2370 // peer receives within IDLED seconds since the message has been
2371 // relayed. Wait WAIT_ON_BOOTUP time to let the server establish
2372 // connections to peers.
2373 if (reduceRelayReady() && relayed &&
2374 (stopwatch().now() - *relayed) < reduce_relay::IDLED)
2375 overlay_.updateSlotAndSquelch(
2376 key, val->getSignerPublic(), id_, protocol::mtVALIDATION);
2377
2378 // increase duplicate validations received
2379 overlay_.reportInboundTraffic(
2380 TrafficCount::category::validation_duplicate,
2381 Message::messageSize(*m));
2382
2383 JLOG(p_journal_.trace()) << "Validation: duplicate";
2384 return;
2385 }
2386
2387 if (!isTrusted && (tracking_.load() == Tracking::diverged))
2388 {
2389 JLOG(p_journal_.debug())
2390 << "Dropping untrusted validation from diverged peer";
2391 }
2392 else if (isTrusted || !app_.getFeeTrack().isLoadedLocal())
2393 {
2394 std::string const name = [isTrusted, val]() {
2395 std::string ret =
2396 isTrusted ? "Trusted validation" : "Untrusted validation";
2397
2398#ifdef DEBUG
2399 ret += " " +
2400 std::to_string(val->getFieldU32(sfLedgerSequence)) + ": " +
2401 to_string(val->getNodeID());
2402#endif
2403
2404 return ret;
2405 }();
2406
2407 std::weak_ptr<PeerImp> weak = shared_from_this();
2408 app_.getJobQueue().addJob(
2409 isTrusted ? jtVALIDATION_t : jtVALIDATION_ut,
2410 name,
2411 [weak, val, m, key]() {
2412 if (auto peer = weak.lock())
2413 peer->checkValidation(val, key, m);
2414 });
2415 }
2416 else
2417 {
2418 JLOG(p_journal_.debug())
2419 << "Dropping untrusted validation for load";
2420 }
2421 }
2422 catch (std::exception const& e)
2423 {
2424 JLOG(p_journal_.warn())
2425 << "Exception processing validation: " << e.what();
2426 using namespace std::string_literals;
2427 fee_.update(Resource::feeMalformedRequest, e.what());
2428 }
2429}
2430
2431void
2433{
2434 protocol::TMGetObjectByHash& packet = *m;
2435
2436 JLOG(p_journal_.trace()) << "received TMGetObjectByHash " << packet.type()
2437 << " " << packet.objects_size();
2438
2439 if (packet.query())
2440 {
2441 // this is a query
2442 if (send_queue_.size() >= Tuning::dropSendQueue)
2443 {
2444 JLOG(p_journal_.debug()) << "GetObject: Large send queue";
2445 return;
2446 }
2447
2448 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2449 {
2450 doFetchPack(m);
2451 return;
2452 }
2453
2454 if (packet.type() == protocol::TMGetObjectByHash::otTRANSACTIONS)
2455 {
2456 if (!txReduceRelayEnabled())
2457 {
2458 JLOG(p_journal_.error())
2459 << "TMGetObjectByHash: tx reduce-relay is disabled";
2460 fee_.update(Resource::feeMalformedRequest, "disabled");
2461 return;
2462 }
2463
2464 std::weak_ptr<PeerImp> weak = shared_from_this();
2465 app_.getJobQueue().addJob(
2466 jtREQUESTED_TXN, "doTransactions", [weak, m]() {
2467 if (auto peer = weak.lock())
2468 peer->doTransactions(m);
2469 });
2470 return;
2471 }
2472
2473 protocol::TMGetObjectByHash reply;
2474
2475 reply.set_query(false);
2476
2477 if (packet.has_seq())
2478 reply.set_seq(packet.seq());
2479
2480 reply.set_type(packet.type());
2481
2482 if (packet.has_ledgerhash())
2483 {
2484 if (!stringIsUint256Sized(packet.ledgerhash()))
2485 {
2486 fee_.update(Resource::feeMalformedRequest, "ledger hash");
2487 return;
2488 }
2489
2490 reply.set_ledgerhash(packet.ledgerhash());
2491 }
2492
2493 fee_.update(
2494 Resource::feeModerateBurdenPeer,
2495 " received a get object by hash request");
2496
2497 // This is a very minimal implementation
2498 for (int i = 0; i < packet.objects_size(); ++i)
2499 {
2500 auto const& obj = packet.objects(i);
2501 if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2502 {
2503 uint256 const hash{obj.hash()};
2504 // VFALCO TODO Move this someplace more sensible so we dont
2505 // need to inject the NodeStore interfaces.
2506 std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2507 auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
2508 if (nodeObject)
2509 {
2510 protocol::TMIndexedObject& newObj = *reply.add_objects();
2511 newObj.set_hash(hash.begin(), hash.size());
2512 newObj.set_data(
2513 &nodeObject->getData().front(),
2514 nodeObject->getData().size());
2515
2516 if (obj.has_nodeid())
2517 newObj.set_index(obj.nodeid());
2518 if (obj.has_ledgerseq())
2519 newObj.set_ledgerseq(obj.ledgerseq());
2520
2521 // VFALCO NOTE "seq" in the message is obsolete
2522 }
2523 }
2524 }
2525
2526 JLOG(p_journal_.trace()) << "GetObj: " << reply.objects_size() << " of "
2527 << packet.objects_size();
2528 send(std::make_shared<Message>(reply, protocol::mtGET_OBJECTS));
2529 }
2530 else
2531 {
2532 // this is a reply
2533 std::uint32_t pLSeq = 0;
2534 bool pLDo = true;
2535 bool progress = false;
2536
2537 for (int i = 0; i < packet.objects_size(); ++i)
2538 {
2539 protocol::TMIndexedObject const& obj = packet.objects(i);
2540
2541 if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2542 {
2543 if (obj.has_ledgerseq())
2544 {
2545 if (obj.ledgerseq() != pLSeq)
2546 {
2547 if (pLDo && (pLSeq != 0))
2548 {
2549 JLOG(p_journal_.debug())
2550 << "GetObj: Full fetch pack for " << pLSeq;
2551 }
2552 pLSeq = obj.ledgerseq();
2553 pLDo = !app_.getLedgerMaster().haveLedger(pLSeq);
2554
2555 if (!pLDo)
2556 {
2557 JLOG(p_journal_.debug())
2558 << "GetObj: Late fetch pack for " << pLSeq;
2559 }
2560 else
2561 progress = true;
2562 }
2563 }
2564
2565 if (pLDo)
2566 {
2567 uint256 const hash{obj.hash()};
2568
2569 app_.getLedgerMaster().addFetchPack(
2570 hash,
2571 std::make_shared<Blob>(
2572 obj.data().begin(), obj.data().end()));
2573 }
2574 }
2575 }
2576
2577 if (pLDo && (pLSeq != 0))
2578 {
2579 JLOG(p_journal_.debug())
2580 << "GetObj: Partial fetch pack for " << pLSeq;
2581 }
2582 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2583 app_.getLedgerMaster().gotFetchPack(progress, pLSeq);
2584 }
2585}
2586
2587void
2589{
2590 if (!txReduceRelayEnabled())
2591 {
2592 JLOG(p_journal_.error())
2593 << "TMHaveTransactions: tx reduce-relay is disabled";
2594 fee_.update(Resource::feeMalformedRequest, "disabled");
2595 return;
2596 }
2597
2598 std::weak_ptr<PeerImp> weak = shared_from_this();
2599 app_.getJobQueue().addJob(
2600 jtMISSING_TXN, "handleHaveTransactions", [weak, m]() {
2601 if (auto peer = weak.lock())
2602 peer->handleHaveTransactions(m);
2603 });
2604}
2605
2606void
2607PeerImp::handleHaveTransactions(
2609{
2610 protocol::TMGetObjectByHash tmBH;
2611 tmBH.set_type(protocol::TMGetObjectByHash_ObjectType_otTRANSACTIONS);
2612 tmBH.set_query(true);
2613
2614 JLOG(p_journal_.trace())
2615 << "received TMHaveTransactions " << m->hashes_size();
2616
2617 for (std::uint32_t i = 0; i < m->hashes_size(); i++)
2618 {
2619 if (!stringIsUint256Sized(m->hashes(i)))
2620 {
2621 JLOG(p_journal_.error())
2622 << "TMHaveTransactions with invalid hash size";
2623 fee_.update(Resource::feeMalformedRequest, "hash size");
2624 return;
2625 }
2626
2627 uint256 hash(m->hashes(i));
2628
2629 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2630
2631 JLOG(p_journal_.trace()) << "checking transaction " << (bool)txn;
2632
2633 if (!txn)
2634 {
2635 JLOG(p_journal_.debug()) << "adding transaction to request";
2636
2637 auto obj = tmBH.add_objects();
2638 obj->set_hash(hash.data(), hash.size());
2639 }
2640 else
2641 {
2642 // Erase only if a peer has seen this tx. If the peer has not
2643 // seen this tx then the tx could not has been queued for this
2644 // peer.
2645 removeTxQueue(hash);
2646 }
2647 }
2648
2649 JLOG(p_journal_.trace())
2650 << "transaction request object is " << tmBH.objects_size();
2651
2652 if (tmBH.objects_size() > 0)
2653 send(std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS));
2654}
2655
2656void
2658{
2659 if (!txReduceRelayEnabled())
2660 {
2661 JLOG(p_journal_.error())
2662 << "TMTransactions: tx reduce-relay is disabled";
2663 fee_.update(Resource::feeMalformedRequest, "disabled");
2664 return;
2665 }
2666
2667 JLOG(p_journal_.trace())
2668 << "received TMTransactions " << m->transactions_size();
2669
2670 overlay_.addTxMetrics(m->transactions_size());
2671
2672 for (std::uint32_t i = 0; i < m->transactions_size(); ++i)
2673 handleTransaction(
2675 m->mutable_transactions(i), [](protocol::TMTransaction*) {}),
2676 false,
2677 true);
2678}
2679
2680void
2681PeerImp::onMessage(std::shared_ptr<protocol::TMSquelch> const& m)
2682{
2683 using on_message_fn =
2685 if (!strand_.running_in_this_thread())
2686 return post(
2687 strand_,
2688 std::bind(
2689 (on_message_fn)&PeerImp::onMessage, shared_from_this(), m));
2690
2691 if (!m->has_validatorpubkey())
2692 {
2693 fee_.update(Resource::feeInvalidData, "squelch no pubkey");
2694 return;
2695 }
2696 auto validator = m->validatorpubkey();
2697 auto const slice{makeSlice(validator)};
2698 if (!publicKeyType(slice))
2699 {
2700 fee_.update(Resource::feeInvalidData, "squelch bad pubkey");
2701 return;
2702 }
2703 PublicKey key(slice);
2704
2705 // Ignore the squelch for validator's own messages.
2706 if (key == app_.getValidationPublicKey())
2707 {
2708 JLOG(p_journal_.debug())
2709 << "onMessage: TMSquelch discarding validator's squelch " << slice;
2710 return;
2711 }
2712
2713 std::uint32_t duration =
2714 m->has_squelchduration() ? m->squelchduration() : 0;
2715 if (!m->squelch())
2716 squelch_.removeSquelch(key);
2717 else if (!squelch_.addSquelch(key, std::chrono::seconds{duration}))
2718 fee_.update(Resource::feeInvalidData, "squelch duration");
2719
2720 JLOG(p_journal_.debug())
2721 << "onMessage: TMSquelch " << slice << " " << id() << " " << duration;
2722}
2723
2724//--------------------------------------------------------------------------
2725
2726void
2727PeerImp::addLedger(
2728 uint256 const& hash,
2729 std::lock_guard<std::mutex> const& lockedRecentLock)
2730{
2731 // lockedRecentLock is passed as a reminder that recentLock_ must be
2732 // locked by the caller.
2733 (void)lockedRecentLock;
2734
2735 if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
2736 recentLedgers_.end())
2737 return;
2738
2739 recentLedgers_.push_back(hash);
2740}
2741
2742void
2743PeerImp::doFetchPack(std::shared_ptr<protocol::TMGetObjectByHash> const& packet)
2744{
2745 // VFALCO TODO Invert this dependency using an observer and shared state
2746 // object. Don't queue fetch pack jobs if we're under load or we already
2747 // have some queued.
2748 if (app_.getFeeTrack().isLoadedLocal() ||
2749 (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2750 (app_.getJobQueue().getJobCount(jtPACK) > 10))
2751 {
2752 JLOG(p_journal_.info()) << "Too busy to make fetch pack";
2753 return;
2754 }
2755
2756 if (!stringIsUint256Sized(packet->ledgerhash()))
2757 {
2758 JLOG(p_journal_.warn()) << "FetchPack hash size malformed";
2759 fee_.update(Resource::feeMalformedRequest, "hash size");
2760 return;
2761 }
2762
2763 fee_.fee = Resource::feeHeavyBurdenPeer;
2764
2765 uint256 const hash{packet->ledgerhash()};
2766
2767 std::weak_ptr<PeerImp> weak = shared_from_this();
2768 auto elapsed = UptimeClock::now();
2769 auto const pap = &app_;
2770 app_.getJobQueue().addJob(
2771 jtPACK, "MakeFetchPack", [pap, weak, packet, hash, elapsed]() {
2772 pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
2773 });
2774}
2775
2776void
2777PeerImp::doTransactions(
2779{
2780 protocol::TMTransactions reply;
2781
2782 JLOG(p_journal_.trace()) << "received TMGetObjectByHash requesting tx "
2783 << packet->objects_size();
2784
2785 if (packet->objects_size() > reduce_relay::MAX_TX_QUEUE_SIZE)
2786 {
2787 JLOG(p_journal_.error()) << "doTransactions, invalid number of hashes";
2788 fee_.update(Resource::feeMalformedRequest, "too big");
2789 return;
2790 }
2791
2792 for (std::uint32_t i = 0; i < packet->objects_size(); ++i)
2793 {
2794 auto const& obj = packet->objects(i);
2795
2796 if (!stringIsUint256Sized(obj.hash()))
2797 {
2798 fee_.update(Resource::feeMalformedRequest, "hash size");
2799 return;
2800 }
2801
2802 uint256 hash(obj.hash());
2803
2804 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2805
2806 if (!txn)
2807 {
2808 JLOG(p_journal_.error()) << "doTransactions, transaction not found "
2809 << Slice(hash.data(), hash.size());
2810 fee_.update(Resource::feeMalformedRequest, "tx not found");
2811 return;
2812 }
2813
2814 Serializer s;
2815 auto tx = reply.add_transactions();
2816 auto sttx = txn->getSTransaction();
2817 sttx->add(s);
2818 tx->set_rawtransaction(s.data(), s.size());
2819 tx->set_status(
2820 txn->getStatus() == INCLUDED ? protocol::tsCURRENT
2821 : protocol::tsNEW);
2822 tx->set_receivetimestamp(
2823 app_.timeKeeper().now().time_since_epoch().count());
2824 tx->set_deferred(txn->getSubmitResult().queued);
2825 }
2826
2827 if (reply.transactions_size() > 0)
2828 send(std::make_shared<Message>(reply, protocol::mtTRANSACTIONS));
2829}
2830
2831void
2832PeerImp::checkTransaction(
2833 int flags,
2834 bool checkSignature,
2835 std::shared_ptr<STTx const> const& stx,
2836 bool batch)
2837{
2838 // VFALCO TODO Rewrite to not use exceptions
2839 try
2840 {
2841 // Expired?
2842 if (stx->isFieldPresent(sfLastLedgerSequence) &&
2843 (stx->getFieldU32(sfLastLedgerSequence) <
2844 app_.getLedgerMaster().getValidLedgerIndex()))
2845 {
2846 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2847 charge(Resource::feeUselessData, "expired tx");
2848 return;
2849 }
2850
2851 if (isPseudoTx(*stx))
2852 {
2853 // Don't do anything with pseudo transactions except put them in the
2854 // TransactionMaster cache
2855 std::string reason;
2856 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2857 XRPL_ASSERT(
2858 tx->getStatus() == NEW,
2859 "ripple::PeerImp::checkTransaction Transaction created "
2860 "correctly");
2861 if (tx->getStatus() == NEW)
2862 {
2863 JLOG(p_journal_.debug())
2864 << "Processing " << (batch ? "batch" : "unsolicited")
2865 << " pseudo-transaction tx " << tx->getID();
2866
2867 app_.getMasterTransaction().canonicalize(&tx);
2868 // Tell the overlay about it, but don't relay it.
2869 auto const toSkip =
2870 app_.getHashRouter().shouldRelay(tx->getID());
2871 if (toSkip)
2872 {
2873 JLOG(p_journal_.debug())
2874 << "Passing skipped pseudo pseudo-transaction tx "
2875 << tx->getID();
2876 app_.overlay().relay(tx->getID(), {}, *toSkip);
2877 }
2878 if (!batch)
2879 {
2880 JLOG(p_journal_.debug())
2881 << "Charging for pseudo-transaction tx " << tx->getID();
2882 charge(Resource::feeUselessData, "pseudo tx");
2883 }
2884
2885 return;
2886 }
2887 }
2888
2889 if (checkSignature)
2890 {
2891 // Check the signature before handing off to the job queue.
2892 if (auto [valid, validReason] = checkValidity(
2893 app_.getHashRouter(),
2894 *stx,
2895 app_.getLedgerMaster().getValidatedRules(),
2896 app_.config());
2897 valid != Validity::Valid)
2898 {
2899 if (!validReason.empty())
2900 {
2901 JLOG(p_journal_.trace())
2902 << "Exception checking transaction: " << validReason;
2903 }
2904
2905 // Probably not necessary to set SF_BAD, but doesn't hurt.
2906 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2907 charge(
2908 Resource::feeInvalidSignature,
2909 "check transaction signature failure");
2910 return;
2911 }
2912 }
2913 else
2914 {
2916 app_.getHashRouter(), stx->getTransactionID(), Validity::Valid);
2917 }
2918
2919 std::string reason;
2920 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2921
2922 if (tx->getStatus() == INVALID)
2923 {
2924 if (!reason.empty())
2925 {
2926 JLOG(p_journal_.trace())
2927 << "Exception checking transaction: " << reason;
2928 }
2929 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2930 charge(Resource::feeInvalidSignature, "tx (impossible)");
2931 return;
2932 }
2933
2934 bool const trusted(flags & SF_TRUSTED);
2935 app_.getOPs().processTransaction(
2936 tx, trusted, false, NetworkOPs::FailHard::no);
2937 }
2938 catch (std::exception const& ex)
2939 {
2940 JLOG(p_journal_.warn())
2941 << "Exception in " << __func__ << ": " << ex.what();
2942 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2943 using namespace std::string_literals;
2944 charge(Resource::feeInvalidData, "tx "s + ex.what());
2945 }
2946}
2947
2948// Called from our JobQueue
2949void
2950PeerImp::checkPropose(
2951 bool isTrusted,
2953 RCLCxPeerPos peerPos)
2954{
2955 JLOG(p_journal_.trace())
2956 << "Checking " << (isTrusted ? "trusted" : "UNTRUSTED") << " proposal";
2957
2958 XRPL_ASSERT(packet, "ripple::PeerImp::checkPropose : non-null packet");
2959
2960 if (!cluster() && !peerPos.checkSign())
2961 {
2962 std::string desc{"Proposal fails sig check"};
2963 JLOG(p_journal_.warn()) << desc;
2964 charge(Resource::feeInvalidSignature, desc);
2965 return;
2966 }
2967
2968 bool relay;
2969
2970 if (isTrusted)
2971 relay = app_.getOPs().processTrustedProposal(peerPos);
2972 else
2973 relay = app_.config().RELAY_UNTRUSTED_PROPOSALS == 1 || cluster();
2974
2975 if (relay)
2976 {
2977 // haveMessage contains peers, which are suppressed; i.e. the peers
2978 // are the source of the message, consequently the message should
2979 // not be relayed to these peers. But the message must be counted
2980 // as part of the squelch logic.
2981 auto haveMessage = app_.overlay().relay(
2982 *packet, peerPos.suppressionID(), peerPos.publicKey());
2983 if (reduceRelayReady() && !haveMessage.empty())
2984 overlay_.updateSlotAndSquelch(
2985 peerPos.suppressionID(),
2986 peerPos.publicKey(),
2987 std::move(haveMessage),
2988 protocol::mtPROPOSE_LEDGER);
2989 }
2990}
2991
2992void
2993PeerImp::checkValidation(
2995 uint256 const& key,
2997{
2998 if (!val->isValid())
2999 {
3000 std::string desc{"Validation forwarded by peer is invalid"};
3001 JLOG(p_journal_.debug()) << desc;
3002 charge(Resource::feeInvalidSignature, desc);
3003 return;
3004 }
3005
3006 // FIXME it should be safe to remove this try/catch. Investigate codepaths.
3007 try
3008 {
3009 if (app_.getOPs().recvValidation(val, std::to_string(id())) ||
3010 cluster())
3011 {
3012 // haveMessage contains peers, which are suppressed; i.e. the peers
3013 // are the source of the message, consequently the message should
3014 // not be relayed to these peers. But the message must be counted
3015 // as part of the squelch logic.
3016 auto haveMessage =
3017 overlay_.relay(*packet, key, val->getSignerPublic());
3018 if (reduceRelayReady() && !haveMessage.empty())
3019 {
3020 overlay_.updateSlotAndSquelch(
3021 key,
3022 val->getSignerPublic(),
3023 std::move(haveMessage),
3024 protocol::mtVALIDATION);
3025 }
3026 }
3027 }
3028 catch (std::exception const& ex)
3029 {
3030 JLOG(p_journal_.trace())
3031 << "Exception processing validation: " << ex.what();
3032 using namespace std::string_literals;
3033 charge(Resource::feeMalformedRequest, "validation "s + ex.what());
3034 }
3035}
3036
3037// Returns the set of peers that can help us get
3038// the TX tree with the specified root hash.
3039//
3041getPeerWithTree(OverlayImpl& ov, uint256 const& rootHash, PeerImp const* skip)
3042{
3044 int retScore = 0;
3045
3047 if (p->hasTxSet(rootHash) && p.get() != skip)
3048 {
3049 auto score = p->getScore(true);
3050 if (!ret || (score > retScore))
3051 {
3052 ret = std::move(p);
3053 retScore = score;
3054 }
3055 }
3056 });
3057
3058 return ret;
3059}
3060
3061// Returns a random peer weighted by how likely to
3062// have the ledger and how responsive it is.
3063//
3066 OverlayImpl& ov,
3067 uint256 const& ledgerHash,
3068 LedgerIndex ledger,
3069 PeerImp const* skip)
3070{
3072 int retScore = 0;
3073
3075 if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
3076 {
3077 auto score = p->getScore(true);
3078 if (!ret || (score > retScore))
3079 {
3080 ret = std::move(p);
3081 retScore = score;
3082 }
3083 }
3084 });
3085
3086 return ret;
3087}
3088
3089void
3090PeerImp::sendLedgerBase(
3091 std::shared_ptr<Ledger const> const& ledger,
3092 protocol::TMLedgerData& ledgerData)
3093{
3094 JLOG(p_journal_.trace()) << "sendLedgerBase: Base data";
3095
3096 Serializer s(sizeof(LedgerInfo));
3097 addRaw(ledger->info(), s);
3098 ledgerData.add_nodes()->set_nodedata(s.getDataPtr(), s.getLength());
3099
3100 auto const& stateMap{ledger->stateMap()};
3101 if (stateMap.getHash() != beast::zero)
3102 {
3103 // Return account state root node if possible
3104 Serializer root(768);
3105
3106 stateMap.serializeRoot(root);
3107 ledgerData.add_nodes()->set_nodedata(
3108 root.getDataPtr(), root.getLength());
3109
3110 if (ledger->info().txHash != beast::zero)
3111 {
3112 auto const& txMap{ledger->txMap()};
3113 if (txMap.getHash() != beast::zero)
3114 {
3115 // Return TX root node if possible
3116 root.erase();
3117 txMap.serializeRoot(root);
3118 ledgerData.add_nodes()->set_nodedata(
3119 root.getDataPtr(), root.getLength());
3120 }
3121 }
3122 }
3123
3124 auto message{
3125 std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA)};
3126 send(message);
3127}
3128
3130PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
3131{
3132 JLOG(p_journal_.trace()) << "getLedger: Ledger";
3133
3135
3136 if (m->has_ledgerhash())
3137 {
3138 // Attempt to find ledger by hash
3139 uint256 const ledgerHash{m->ledgerhash()};
3140 ledger = app_.getLedgerMaster().getLedgerByHash(ledgerHash);
3141 if (!ledger)
3142 {
3143 JLOG(p_journal_.trace())
3144 << "getLedger: Don't have ledger with hash " << ledgerHash;
3145
3146 if (m->has_querytype() && !m->has_requestcookie())
3147 {
3148 // Attempt to relay the request to a peer
3149 if (auto const peer = getPeerWithLedger(
3150 overlay_,
3151 ledgerHash,
3152 m->has_ledgerseq() ? m->ledgerseq() : 0,
3153 this))
3154 {
3155 m->set_requestcookie(id());
3156 peer->send(
3157 std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
3158 JLOG(p_journal_.debug())
3159 << "getLedger: Request relayed to peer";
3160 return ledger;
3161 }
3162
3163 JLOG(p_journal_.trace())
3164 << "getLedger: Failed to find peer to relay request";
3165 }
3166 }
3167 }
3168 else if (m->has_ledgerseq())
3169 {
3170 // Attempt to find ledger by sequence
3171 if (m->ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
3172 {
3173 JLOG(p_journal_.debug())
3174 << "getLedger: Early ledger sequence request";
3175 }
3176 else
3177 {
3178 ledger = app_.getLedgerMaster().getLedgerBySeq(m->ledgerseq());
3179 if (!ledger)
3180 {
3181 JLOG(p_journal_.debug())
3182 << "getLedger: Don't have ledger with sequence "
3183 << m->ledgerseq();
3184 }
3185 }
3186 }
3187 else if (m->has_ltype() && m->ltype() == protocol::ltCLOSED)
3188 {
3189 ledger = app_.getLedgerMaster().getClosedLedger();
3190 }
3191
3192 if (ledger)
3193 {
3194 // Validate retrieved ledger sequence
3195 auto const ledgerSeq{ledger->info().seq};
3196 if (m->has_ledgerseq())
3197 {
3198 if (ledgerSeq != m->ledgerseq())
3199 {
3200 // Do not resource charge a peer responding to a relay
3201 if (!m->has_requestcookie())
3202 charge(
3203 Resource::feeMalformedRequest, "get_ledger ledgerSeq");
3204
3205 ledger.reset();
3206 JLOG(p_journal_.warn())
3207 << "getLedger: Invalid ledger sequence " << ledgerSeq;
3208 }
3209 }
3210 else if (ledgerSeq < app_.getLedgerMaster().getEarliestFetch())
3211 {
3212 ledger.reset();
3213 JLOG(p_journal_.debug())
3214 << "getLedger: Early ledger sequence request " << ledgerSeq;
3215 }
3216 }
3217 else
3218 {
3219 JLOG(p_journal_.debug()) << "getLedger: Unable to find ledger";
3220 }
3221
3222 return ledger;
3223}
3224
3226PeerImp::getTxSet(std::shared_ptr<protocol::TMGetLedger> const& m) const
3227{
3228 JLOG(p_journal_.trace()) << "getTxSet: TX set";
3229
3230 uint256 const txSetHash{m->ledgerhash()};
3232 app_.getInboundTransactions().getSet(txSetHash, false)};
3233 if (!shaMap)
3234 {
3235 if (m->has_querytype() && !m->has_requestcookie())
3236 {
3237 // Attempt to relay the request to a peer
3238 if (auto const peer = getPeerWithTree(overlay_, txSetHash, this))
3239 {
3240 m->set_requestcookie(id());
3241 peer->send(
3242 std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
3243 JLOG(p_journal_.debug()) << "getTxSet: Request relayed";
3244 }
3245 else
3246 {
3247 JLOG(p_journal_.debug())
3248 << "getTxSet: Failed to find relay peer";
3249 }
3250 }
3251 else
3252 {
3253 JLOG(p_journal_.debug()) << "getTxSet: Failed to find TX set";
3254 }
3255 }
3256
3257 return shaMap;
3258}
3259
3260void
3261PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
3262{
3263 // Do not resource charge a peer responding to a relay
3264 if (!m->has_requestcookie())
3265 charge(
3266 Resource::feeModerateBurdenPeer, "received a get ledger request");
3267
3270 SHAMap const* map{nullptr};
3271 protocol::TMLedgerData ledgerData;
3272 bool fatLeaves{true};
3273 auto const itype{m->itype()};
3274
3275 if (itype == protocol::liTS_CANDIDATE)
3276 {
3277 if (sharedMap = getTxSet(m); !sharedMap)
3278 return;
3279 map = sharedMap.get();
3280
3281 // Fill out the reply
3282 ledgerData.set_ledgerseq(0);
3283 ledgerData.set_ledgerhash(m->ledgerhash());
3284 ledgerData.set_type(protocol::liTS_CANDIDATE);
3285 if (m->has_requestcookie())
3286 ledgerData.set_requestcookie(m->requestcookie());
3287
3288 // We'll already have most transactions
3289 fatLeaves = false;
3290 }
3291 else
3292 {
3293 if (send_queue_.size() >= Tuning::dropSendQueue)
3294 {
3295 JLOG(p_journal_.debug())
3296 << "processLedgerRequest: Large send queue";
3297 return;
3298 }
3299 if (app_.getFeeTrack().isLoadedLocal() && !cluster())
3300 {
3301 JLOG(p_journal_.debug()) << "processLedgerRequest: Too busy";
3302 return;
3303 }
3304
3305 if (ledger = getLedger(m); !ledger)
3306 return;
3307
3308 // Fill out the reply
3309 auto const ledgerHash{ledger->info().hash};
3310 ledgerData.set_ledgerhash(ledgerHash.begin(), ledgerHash.size());
3311 ledgerData.set_ledgerseq(ledger->info().seq);
3312 ledgerData.set_type(itype);
3313 if (m->has_requestcookie())
3314 ledgerData.set_requestcookie(m->requestcookie());
3315
3316 switch (itype)
3317 {
3318 case protocol::liBASE:
3319 sendLedgerBase(ledger, ledgerData);
3320 return;
3321
3322 case protocol::liTX_NODE:
3323 map = &ledger->txMap();
3324 JLOG(p_journal_.trace()) << "processLedgerRequest: TX map hash "
3325 << to_string(map->getHash());
3326 break;
3327
3328 case protocol::liAS_NODE:
3329 map = &ledger->stateMap();
3330 JLOG(p_journal_.trace())
3331 << "processLedgerRequest: Account state map hash "
3332 << to_string(map->getHash());
3333 break;
3334
3335 default:
3336 // This case should not be possible here
3337 JLOG(p_journal_.error())
3338 << "processLedgerRequest: Invalid ledger info type";
3339 return;
3340 }
3341 }
3342
3343 if (!map)
3344 {
3345 JLOG(p_journal_.warn()) << "processLedgerRequest: Unable to find map";
3346 return;
3347 }
3348
3349 // Add requested node data to reply
3350 if (m->nodeids_size() > 0)
3351 {
3352 auto const queryDepth{
3353 m->has_querydepth() ? m->querydepth() : (isHighLatency() ? 2 : 1)};
3354
3356
3357 for (int i = 0; i < m->nodeids_size() &&
3358 ledgerData.nodes_size() < Tuning::softMaxReplyNodes;
3359 ++i)
3360 {
3361 auto const shaMapNodeId{deserializeSHAMapNodeID(m->nodeids(i))};
3362
3363 data.clear();
3364 data.reserve(Tuning::softMaxReplyNodes);
3365
3366 try
3367 {
3368 if (map->getNodeFat(*shaMapNodeId, data, fatLeaves, queryDepth))
3369 {
3370 JLOG(p_journal_.trace())
3371 << "processLedgerRequest: getNodeFat got "
3372 << data.size() << " nodes";
3373
3374 for (auto const& d : data)
3375 {
3376 if (ledgerData.nodes_size() >=
3377 Tuning::hardMaxReplyNodes)
3378 break;
3379 protocol::TMLedgerNode* node{ledgerData.add_nodes()};
3380 node->set_nodeid(d.first.getRawString());
3381 node->set_nodedata(d.second.data(), d.second.size());
3382 }
3383 }
3384 else
3385 {
3386 JLOG(p_journal_.warn())
3387 << "processLedgerRequest: getNodeFat returns false";
3388 }
3389 }
3390 catch (std::exception const& e)
3391 {
3392 std::string info;
3393 switch (itype)
3394 {
3395 case protocol::liBASE:
3396 // This case should not be possible here
3397 info = "Ledger base";
3398 break;
3399
3400 case protocol::liTX_NODE:
3401 info = "TX node";
3402 break;
3403
3404 case protocol::liAS_NODE:
3405 info = "AS node";
3406 break;
3407
3408 case protocol::liTS_CANDIDATE:
3409 info = "TS candidate";
3410 break;
3411
3412 default:
3413 info = "Invalid";
3414 break;
3415 }
3416
3417 if (!m->has_ledgerhash())
3418 info += ", no hash specified";
3419
3420 JLOG(p_journal_.error())
3421 << "processLedgerRequest: getNodeFat with nodeId "
3422 << *shaMapNodeId << " and ledger info type " << info
3423 << " throws exception: " << e.what();
3424 }
3425 }
3426
3427 JLOG(p_journal_.info())
3428 << "processLedgerRequest: Got request for " << m->nodeids_size()
3429 << " nodes at depth " << queryDepth << ", return "
3430 << ledgerData.nodes_size() << " nodes";
3431 }
3432
3433 if (ledgerData.nodes_size() == 0)
3434 return;
3435
3436 send(std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA));
3437}
3438
3439int
3440PeerImp::getScore(bool haveItem) const
3441{
3442 // Random component of score, used to break ties and avoid
3443 // overloading the "best" peer
3444 static int const spRandomMax = 9999;
3445
3446 // Score for being very likely to have the thing we are
3447 // look for; should be roughly spRandomMax
3448 static int const spHaveItem = 10000;
3449
3450 // Score reduction for each millisecond of latency; should
3451 // be roughly spRandomMax divided by the maximum reasonable
3452 // latency
3453 static int const spLatency = 30;
3454
3455 // Penalty for unknown latency; should be roughly spRandomMax
3456 static int const spNoLatency = 8000;
3457
3458 int score = rand_int(spRandomMax);
3459
3460 if (haveItem)
3461 score += spHaveItem;
3462
3464 {
3465 std::lock_guard sl(recentLock_);
3466 latency = latency_;
3467 }
3468
3469 if (latency)
3470 score -= latency->count() * spLatency;
3471 else
3472 score -= spNoLatency;
3473
3474 return score;
3475}
3476
3477bool
3478PeerImp::isHighLatency() const
3479{
3480 std::lock_guard sl(recentLock_);
3481 return latency_ >= peerHighLatency;
3482}
3483
3484bool
3485PeerImp::reduceRelayReady()
3486{
3487 if (!reduceRelayReady_)
3488 reduceRelayReady_ =
3489 reduce_relay::epoch<std::chrono::minutes>(UptimeClock::now()) >
3490 reduce_relay::WAIT_ON_BOOTUP;
3491 return vpReduceRelayEnabled_ && reduceRelayReady_;
3492}
3493
3494void
3495PeerImp::Metrics::add_message(std::uint64_t bytes)
3496{
3497 using namespace std::chrono_literals;
3498 std::unique_lock lock{mutex_};
3499
3500 totalBytes_ += bytes;
3501 accumBytes_ += bytes;
3502 auto const timeElapsed = clock_type::now() - intervalStart_;
3503 auto const timeElapsedInSecs =
3504 std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
3505
3506 if (timeElapsedInSecs >= 1s)
3507 {
3508 auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
3509 rollingAvg_.push_back(avgBytes);
3510
3511 auto const totalBytes =
3512 std::accumulate(rollingAvg_.begin(), rollingAvg_.end(), 0ull);
3513 rollingAvgBytes_ = totalBytes / rollingAvg_.size();
3514
3515 intervalStart_ = clock_type::now();
3516 accumBytes_ = 0;
3517 }
3518}
3519
3521PeerImp::Metrics::average_bytes() const
3522{
3523 std::shared_lock lock{mutex_};
3524 return rollingAvgBytes_;
3525}
3526
3528PeerImp::Metrics::total_bytes() const
3529{
3530 std::shared_lock lock{mutex_};
3531 return totalBytes_;
3532}
3533
3534} // namespace ripple
T accumulate(T... args)
T bind(T... args)
Represents a JSON value.
Definition: json_value.h:150
A version-independent IP address and port combination.
Definition: IPEndpoint.h:39
Address const & address() const
Returns the address portion of this endpoint.
Definition: IPEndpoint.h:76
static std::optional< Endpoint > from_string_checked(std::string const &s)
Create an Endpoint from a string.
Definition: IPEndpoint.cpp:45
Endpoint at_port(Port port) const
Returns a new Endpoint with a different port.
Definition: IPEndpoint.h:69
static Endpoint from_string(std::string const &s)
Definition: IPEndpoint.cpp:59
std::string to_string() const
Returns a string representing the endpoint.
Definition: IPEndpoint.cpp:67
Stream error() const
Definition: Journal.h:346
Stream debug() const
Definition: Journal.h:328
bool active(Severity level) const
Returns true if any message would be logged at this severity level.
Definition: Journal.h:314
Stream info() const
Definition: Journal.h:334
Stream trace() const
Severity stream access functions.
Definition: Journal.h:322
Stream warn() const
Definition: Journal.h:340
virtual Config & config()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual TimeKeeper & timeKeeper()=0
virtual JobQueue & getJobQueue()=0
virtual NetworkOPs & getOPs()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual Cluster & cluster()=0
virtual HashRouter & getHashRouter()=0
void for_each(std::function< void(ClusterNode const &)> func) const
Invokes the callback once for every cluster node.
Definition: Cluster.cpp:83
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:49
bool update(PublicKey const &identity, std::string name, std::uint32_t loadFee=0, NetClock::time_point reportTime=NetClock::time_point{})
Store information about the state of a cluster node.
Definition: Cluster.cpp:57
std::optional< std::string > member(PublicKey const &node) const
Determines whether a node belongs in the cluster.
Definition: Cluster.cpp:38
bool TX_REDUCE_RELAY_METRICS
Definition: Config.h:266
int MAX_TRANSACTIONS
Definition: Config.h:226
std::chrono::seconds MAX_DIVERGED_TIME
Definition: Config.h:285
std::chrono::seconds MAX_UNKNOWN_TIME
Definition: Config.h:282
bool shouldProcess(uint256 const &key, PeerShortID peer, int &flags, std::chrono::seconds tx_interval)
Definition: HashRouter.cpp:79
bool addSuppressionPeer(uint256 const &key, PeerShortID peer)
Definition: HashRouter.cpp:52
std::unique_ptr< LoadEvent > makeLoadEvent(JobType t, std::string const &name)
Return a scoped LoadEvent.
Definition: JobQueue.cpp:179
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition: JobQueue.cpp:142
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
LedgerIndex getValidLedgerIndex()
std::chrono::seconds getValidatedLedgerAge()
void setClusterFee(std::uint32_t fee)
Definition: LoadFeeTrack.h:113
static std::size_t messageSize(::google::protobuf::Message const &message)
Definition: Message.cpp:55
virtual bool isNeedNetworkLedger()=0
PeerFinder::Manager & peerFinder()
Definition: OverlayImpl.h:161
void activate(std::shared_ptr< PeerImp > const &peer)
Called when a peer has connected successfully This is called after the peer handshake has been comple...
void deletePeer(Peer::id_t id)
Called when the peer is deleted.
void incPeerDisconnect() override
Increment and retrieve counters for total peer disconnects, and disconnects we initiate for excessive...
Definition: OverlayImpl.h:366
void addTxMetrics(Args... args)
Add tx reduce-relay metrics.
Definition: OverlayImpl.h:437
void onPeerDeactivate(Peer::id_t id)
void remove(std::shared_ptr< PeerFinder::Slot > const &slot)
void reportOutboundTraffic(TrafficCount::category cat, int bytes)
void for_each(UnaryFunc &&f) const
Definition: OverlayImpl.h:278
Resource::Manager & resourceManager()
Definition: OverlayImpl.h:167
void reportInboundTraffic(TrafficCount::category cat, int bytes)
void onManifests(std::shared_ptr< protocol::TMManifests > const &m, std::shared_ptr< PeerImp > const &from)
Setup const & setup() const
Definition: OverlayImpl.h:173
std::shared_ptr< Message > getManifestsMessage()
void incPeerDisconnectCharges() override
Definition: OverlayImpl.h:378
void incJqTransOverflow() override
Increment and retrieve counter for transaction job queue overflows.
Definition: OverlayImpl.h:354
virtual void on_endpoints(std::shared_ptr< Slot > const &slot, Endpoints const &endpoints)=0
Called when mtENDPOINTS is received.
virtual Config config()=0
Returns the configuration for the manager.
virtual void on_closed(std::shared_ptr< Slot > const &slot)=0
Called when the slot is closed.
virtual void on_failure(std::shared_ptr< Slot > const &slot)=0
Called when an outbound connection is deemed to have failed.
std::queue< std::shared_ptr< Message > > send_queue_
Definition: PeerImp.h:177
bool vpReduceRelayEnabled_
Definition: PeerImp.h:195
std::unique_ptr< LoadEvent > load_event_
Definition: PeerImp.h:180
boost::beast::http::fields const & headers_
Definition: PeerImp.h:176
void onMessageEnd(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m)
Definition: PeerImp.cpp:1057
bool hasLedger(uint256 const &hash, std::uint32_t seq) const override
Definition: PeerImp.cpp:522
clock_type::duration uptime() const
Definition: PeerImp.h:367
void removeTxQueue(uint256 const &hash) override
Remove transaction's hash from the transactions' hashes queue.
Definition: PeerImp.cpp:338
protocol::TMStatusChange last_status_
Definition: PeerImp.h:169
boost::shared_mutex nameMutex_
Definition: PeerImp.h:101
std::string name_
Definition: PeerImp.h:100
boost::circular_buffer< uint256 > recentTxSets_
Definition: PeerImp.h:111
std::unique_ptr< stream_type > stream_ptr_
Definition: PeerImp.h:77
void onMessage(std::shared_ptr< protocol::TMManifests > const &m)
Definition: PeerImp.cpp:1066
bool detaching_
Definition: PeerImp.h:97
Tracking
Whether the peer's view of the ledger converges or diverges from ours.
Definition: PeerImp.h:57
Compressed compressionEnabled_
Definition: PeerImp.h:185
uint256 closedLedgerHash_
Definition: PeerImp.h:107
std::string domain() const
Definition: PeerImp.cpp:840
std::optional< std::uint32_t > lastPingSeq_
Definition: PeerImp.h:114
void onTimer(boost::system::error_code const &ec)
Definition: PeerImp.cpp:688
bool gracefulClose_
Definition: PeerImp.h:178
beast::Journal const journal_
Definition: PeerImp.h:75
virtual void run()
Definition: PeerImp.cpp:154
struct ripple::PeerImp::@21 metrics_
void gracefulClose()
Definition: PeerImp.cpp:632
LedgerIndex maxLedger_
Definition: PeerImp.h:106
beast::Journal const p_journal_
Definition: PeerImp.h:76
void cancelTimer()
Definition: PeerImp.cpp:671
bool const inbound_
Definition: PeerImp.h:90
PeerImp(PeerImp const &)=delete
Application & app_
Definition: PeerImp.h:71
void stop() override
Definition: PeerImp.cpp:212
bool hasRange(std::uint32_t uMin, std::uint32_t uMax) override
Definition: PeerImp.cpp:564
bool hasTxSet(uint256 const &hash) const override
Definition: PeerImp.cpp:546
clock_type::time_point lastPingTime_
Definition: PeerImp.h:115
void onMessageUnknown(std::uint16_t type)
Definition: PeerImp.cpp:1008
std::shared_ptr< PeerFinder::Slot > const slot_
Definition: PeerImp.h:172
boost::circular_buffer< uint256 > recentLedgers_
Definition: PeerImp.h:110
id_t const id_
Definition: PeerImp.h:72
std::optional< std::chrono::milliseconds > latency_
Definition: PeerImp.h:113
void handleTransaction(std::shared_ptr< protocol::TMTransaction > const &m, bool eraseTxQueue, bool batch)
Called from onMessage(TMTransaction(s)).
Definition: PeerImp.cpp:1258
beast::IP::Endpoint const remote_address_
Definition: PeerImp.h:85
Json::Value json() override
Definition: PeerImp.cpp:387
PublicKey const publicKey_
Definition: PeerImp.h:99
hash_set< uint256 > txQueue_
Definition: PeerImp.h:190
std::mutex recentLock_
Definition: PeerImp.h:168
void doAccept()
Definition: PeerImp.cpp:765
void onMessageBegin(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m, std::size_t size, std::size_t uncompressed_size, bool isCompressed)
Definition: PeerImp.cpp:1014
bool txReduceRelayEnabled_
Definition: PeerImp.h:192
clock_type::time_point trackingTime_
Definition: PeerImp.h:96
socket_type & socket_
Definition: PeerImp.h:78
ProtocolVersion protocol_
Definition: PeerImp.h:93
reduce_relay::Squelch< UptimeClock > squelch_
Definition: PeerImp.h:118
std::string getVersion() const
Return the version of rippled that the peer is running, if reported.
Definition: PeerImp.cpp:379
uint256 previousLedgerHash_
Definition: PeerImp.h:108
void charge(Resource::Charge const &fee, std::string const &context) override
Adjust this peer's load balance based on the type of load imposed.
Definition: PeerImp.cpp:350
void setTimer()
Definition: PeerImp.cpp:653
void send(std::shared_ptr< Message > const &m) override
Definition: PeerImp.cpp:238
static std::string makePrefix(id_t id)
Definition: PeerImp.cpp:680
std::string name() const
Definition: PeerImp.cpp:833
boost::system::error_code error_code
Definition: PeerImp.h:61
void onReadMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:888
bool ledgerReplayEnabled_
Definition: PeerImp.h:196
boost::asio::basic_waitable_timer< std::chrono::steady_clock > waitable_timer
Definition: PeerImp.h:68
bool crawl() const
Returns true if this connection will publicly share its IP address.
Definition: PeerImp.cpp:364
waitable_timer timer_
Definition: PeerImp.h:81
void sendTxQueue() override
Send aggregated transactions' hashes.
Definition: PeerImp.cpp:302
bool txReduceRelayEnabled() const override
Definition: PeerImp.h:440
bool supportsFeature(ProtocolFeature f) const override
Definition: PeerImp.cpp:505
ChargeWithContext fee_
Definition: PeerImp.h:171
void onWriteMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:952
http_request_type request_
Definition: PeerImp.h:174
OverlayImpl & overlay_
Definition: PeerImp.h:89
LedgerIndex minLedger_
Definition: PeerImp.h:105
virtual ~PeerImp()
Definition: PeerImp.cpp:131
void addTxQueue(uint256 const &hash) override
Add transaction's hash to the transactions' hashes queue.
Definition: PeerImp.cpp:321
int large_sendq_
Definition: PeerImp.h:179
stream_type & stream_
Definition: PeerImp.h:79
bool cluster() const override
Returns true if this connection is a member of the cluster.
Definition: PeerImp.cpp:373
void onShutdown(error_code ec)
Definition: PeerImp.cpp:749
boost::asio::strand< boost::asio::executor > strand_
Definition: PeerImp.h:80
void cycleStatus() override
Definition: PeerImp.cpp:554
boost::beast::multi_buffer read_buffer_
Definition: PeerImp.h:173
Resource::Consumer usage_
Definition: PeerImp.h:170
void ledgerRange(std::uint32_t &minSeq, std::uint32_t &maxSeq) const override
Definition: PeerImp.cpp:537
void doProtocolStart()
Definition: PeerImp.cpp:850
void fail(std::string const &reason)
Definition: PeerImp.cpp:598
std::atomic< Tracking > tracking_
Definition: PeerImp.h:95
Represents a peer connection in the overlay.
A public key.
Definition: PublicKey.h:62
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
bool checkSign() const
Verify the signing hash of the proposal.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:78
uint256 const & suppressionID() const
Unique id used by hash router to suppress duplicates.
Definition: RCLCxPeerPos.h:85
A consumption charge.
Definition: Charge.h:31
An endpoint that consumes resources.
Definition: Consumer.h:35
int balance()
Returns the credit balance representing consumption.
Definition: Consumer.cpp:137
bool disconnect(beast::Journal const &j)
Returns true if the consumer should be disconnected.
Definition: Consumer.cpp:124
Disposition charge(Charge const &fee, std::string const &context={})
Apply a load charge to the consumer.
Definition: Consumer.cpp:106
virtual void importConsumers(std::string const &origin, Gossip const &gossip)=0
Import packaged consumer information.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:98
std::size_t size() const noexcept
Definition: Serializer.h:73
void const * data() const noexcept
Definition: Serializer.h:79
int getLength() const
Definition: Serializer.h:234
void const * getDataPtr() const
Definition: Serializer.h:224
An immutable linear range of bytes.
Definition: Slice.h:46
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
static category categorize(::google::protobuf::Message const &message, protocol::MessageType type, bool inbound)
Given a protocol message, determine which traffic category it belongs to.
static void sendValidatorList(Peer &peer, std::uint64_t peerSequence, PublicKey const &publisherKey, std::size_t maxSequence, std::uint32_t rawVersion, std::string const &rawManifest, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, HashRouter &hashRouter, beast::Journal j)
void for_each_available(std::function< void(std::string const &manifest, std::uint32_t version, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, PublicKey const &pubKey, std::size_t maxSequence, uint256 const &hash)> func) const
Invokes the callback once for every available publisher list's raw data members.
pointer data()
Definition: base_uint.h:125
static constexpr std::size_t size()
Definition: base_uint.h:526
constexpr bool parseHex(std::string_view sv)
Parse a hex string into a base_uint.
Definition: base_uint.h:503
Set the fee on a JTx.
Definition: fee.h:37
Match set account flags.
Definition: flags.h:125
Set the regular signature on a JTx.
Definition: sig.h:35
T emplace_back(T... args)
T empty(T... args)
T find(T... args)
T for_each(T... args)
T get(T... args)
T load(T... args)
T lock(T... args)
T max(T... args)
T min(T... args)
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:46
unsigned int UInt
Definition: json_forwards.h:27
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeInvalidData
Charge const feeUselessData
Charge const feeTrivialPeer
Charge const feeModerateBurdenPeer
std::size_t constexpr readBufferBytes
Size of buffer used to read from the socket.
@ targetSendQueue
How many messages we consider reasonable sustained on a send queue.
@ maxQueryDepth
The maximum number of levels to search.
@ sendqIntervals
How many timer intervals a sendq has to stay large before we disconnect.
@ sendQueueLogFreq
How often to log send queue size.
TER valid(PreclaimContext const &ctx, AccountID const &src)
auto measureDurationAndLog(Func &&func, std::string const &actionDescription, std::chrono::duration< Rep, Period > maxDelay, beast::Journal const &journal)
Definition: PerfLog.h:187
static constexpr std::size_t MAX_TX_QUEUE_SIZE
std::unique_ptr< Config > validator(std::unique_ptr< Config >, std::string const &)
adjust configuration with params needed to be a validator
Definition: envconfig.cpp:113
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string protocolMessageName(int type)
Returns the name of a protocol message given its type.
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:114
uint256 proposalUniqueId(uint256 const &proposeHash, uint256 const &previousLedger, std::uint32_t proposeSeq, NetClock::time_point closeTime, Slice const &publicKey, Slice const &signature)
Calculate a unique identifier for a signed proposal.
constexpr ProtocolVersion make_protocol(std::uint16_t major, std::uint16_t minor)
bool isPseudoTx(STObject const &tx)
Check whether a transaction is a pseudo-transaction.
Definition: STTx.cpp:646
@ INCLUDED
Definition: Transaction.h:49
@ INVALID
Definition: Transaction.h:48
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
static constexpr char FEATURE_COMPR[]
Definition: Handshake.h:141
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
Definition: Validations.h:149
std::string base64_decode(std::string_view data)
Definition: base64.cpp:248
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:315
http_response_type makeResponse(bool crawlPublic, http_request_type const &req, beast::IP::Address public_ip, beast::IP::Address remote_ip, uint256 const &sharedValue, std::optional< std::uint32_t > networkID, ProtocolVersion protocol, Application &app)
Make http response.
Definition: Handshake.cpp:392
static bool stringIsUint256Sized(std::string const &pBuffStr)
Definition: PeerImp.cpp:148
static constexpr char FEATURE_LEDGER_REPLAY[]
Definition: Handshake.h:147
std::pair< std::size_t, boost::system::error_code > invokeProtocolMessage(Buffers const &buffers, Handler &handler, std::size_t &hint)
Calls the handler for up to one protocol message in the passed buffers.
std::optional< uint256 > makeSharedValue(stream_type &ssl, beast::Journal journal)
Computes a shared value based on the SSL connection state.
Definition: Handshake.cpp:146
std::optional< KeyType > publicKeyType(Slice const &slice)
Returns the type of public key.
Definition: PublicKey.cpp:223
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
static std::shared_ptr< PeerImp > getPeerWithLedger(OverlayImpl &ov, uint256 const &ledgerHash, LedgerIndex ledger, PeerImp const *skip)
Definition: PeerImp.cpp:3065
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:244
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition: chrono.h:119
boost::beast::http::request< boost::beast::http::dynamic_body > http_request_type
Definition: Handoff.h:33
NodeID calcNodeID(PublicKey const &)
Calculate the 160-bit node ID from a node public key.
Definition: PublicKey.cpp:319
static std::shared_ptr< PeerImp > getPeerWithTree(OverlayImpl &ov, uint256 const &rootHash, PeerImp const *skip)
Definition: PeerImp.cpp:3041
bool peerFeatureEnabled(headers const &request, std::string const &feature, std::string value, bool config)
Check if a feature should be enabled for a peer.
Definition: Handshake.h:198
void forceValidity(HashRouter &router, uint256 const &txid, Validity validity)
Sets the validity of a given transaction in the cache.
Definition: apply.cpp:90
static constexpr char FEATURE_TXRR[]
Definition: Handshake.h:145
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:630
Number root(Number f, unsigned d)
Definition: Number.cpp:636
@ manifest
Manifest.
@ proposal
proposal for signing
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:38
@ jtLEDGER_REQ
Definition: Job.h:59
@ jtPROPOSAL_ut
Definition: Job.h:60
@ jtREPLAY_REQ
Definition: Job.h:58
@ jtTRANSACTION
Definition: Job.h:62
@ jtPEER
Definition: Job.h:80
@ jtREQUESTED_TXN
Definition: Job.h:64
@ jtMISSING_TXN
Definition: Job.h:63
@ jtVALIDATION_t
Definition: Job.h:71
@ jtMANIFEST
Definition: Job.h:55
@ jtTXN_DATA
Definition: Job.h:69
@ jtPACK
Definition: Job.h:43
@ jtVALIDATION_ut
Definition: Job.h:54
@ jtPROPOSAL_t
Definition: Job.h:74
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition: digest.h:225
STL namespace.
T nth_element(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T setfill(T... args)
T setw(T... args)
T size(T... args)
T str(T... args)
Information about the notional ledger backing the view.
Definition: LedgerHeader.h:34
beast::IP::Address public_ip
Definition: Overlay.h:69
std::optional< std::uint32_t > networkID
Definition: Overlay.h:72
bool peerPrivate
true if we want our IP address kept private.
void update(Resource::Charge f, std::string const &add)
Definition: PeerImp.h:154
Describes a single consumer.
Definition: Gossip.h:35
beast::IP::Endpoint address
Definition: Gossip.h:39
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
Set the sequence number on a JTx.
Definition: seq.h:34
T tie(T... args)
T to_string(T... args)
T what(T... args)