rippled
Loading...
Searching...
No Matches
PeerImp.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLValidations.h>
21#include <xrpld/app/ledger/InboundLedgers.h>
22#include <xrpld/app/ledger/InboundTransactions.h>
23#include <xrpld/app/ledger/LedgerMaster.h>
24#include <xrpld/app/ledger/TransactionMaster.h>
25#include <xrpld/app/misc/HashRouter.h>
26#include <xrpld/app/misc/LoadFeeTrack.h>
27#include <xrpld/app/misc/NetworkOPs.h>
28#include <xrpld/app/misc/Transaction.h>
29#include <xrpld/app/misc/ValidatorList.h>
30#include <xrpld/app/tx/apply.h>
31#include <xrpld/overlay/Cluster.h>
32#include <xrpld/overlay/detail/PeerImp.h>
33#include <xrpld/overlay/detail/Tuning.h>
34#include <xrpld/perflog/PerfLog.h>
35
36#include <xrpl/basics/UptimeClock.h>
37#include <xrpl/basics/base64.h>
38#include <xrpl/basics/random.h>
39#include <xrpl/basics/safe_cast.h>
40#include <xrpl/protocol/digest.h>
41
42#include <boost/algorithm/string/predicate.hpp>
43#include <boost/beast/core/ostream.hpp>
44
45#include <algorithm>
46#include <memory>
47#include <mutex>
48#include <numeric>
49#include <sstream>
50
51using namespace std::chrono_literals;
52
53namespace ripple {
54
55namespace {
57std::chrono::milliseconds constexpr peerHighLatency{300};
58
60std::chrono::seconds constexpr peerTimerInterval{60};
61} // namespace
62
63// TODO: Remove this exclusion once unit tests are added after the hotfix
64// release.
65
67 Application& app,
68 id_t id,
70 http_request_type&& request,
71 PublicKey const& publicKey,
73 Resource::Consumer consumer,
75 OverlayImpl& overlay)
76 : Child(overlay)
77 , app_(app)
78 , id_(id)
79 , sink_(app_.journal("Peer"), makePrefix(id))
80 , p_sink_(app_.journal("Protocol"), makePrefix(id))
81 , journal_(sink_)
82 , p_journal_(p_sink_)
83 , stream_ptr_(std::move(stream_ptr))
84 , socket_(stream_ptr_->next_layer().socket())
85 , stream_(*stream_ptr_)
86 , strand_(socket_.get_executor())
87 , timer_(waitable_timer{socket_.get_executor()})
88 , remote_address_(slot->remote_endpoint())
89 , overlay_(overlay)
90 , inbound_(true)
91 , protocol_(protocol)
92 , tracking_(Tracking::unknown)
93 , trackingTime_(clock_type::now())
94 , publicKey_(publicKey)
95 , lastPingTime_(clock_type::now())
96 , creationTime_(clock_type::now())
97 , squelch_(app_.journal("Squelch"))
98 , usage_(consumer)
99 , fee_{Resource::feeTrivialPeer, ""}
100 , slot_(slot)
101 , request_(std::move(request))
102 , headers_(request_)
103 , compressionEnabled_(
105 headers_,
107 "lz4",
108 app_.config().COMPRESSION)
109 ? Compressed::On
110 : Compressed::Off)
111 , txReduceRelayEnabled_(peerFeatureEnabled(
112 headers_,
114 app_.config().TX_REDUCE_RELAY_ENABLE))
115 , vpReduceRelayEnabled_(peerFeatureEnabled(
116 headers_,
118 app_.config().VP_REDUCE_RELAY_ENABLE))
119 , ledgerReplayEnabled_(peerFeatureEnabled(
120 headers_,
122 app_.config().LEDGER_REPLAY))
123 , ledgerReplayMsgHandler_(app, app.getLedgerReplayer())
124{
125 JLOG(journal_.info()) << "compression enabled "
126 << (compressionEnabled_ == Compressed::On)
127 << " vp reduce-relay enabled "
129 << " tx reduce-relay enabled "
131 << " " << id_;
132}
133
135{
136 const bool inCluster{cluster()};
137
142
143 if (inCluster)
144 {
145 JLOG(journal_.warn()) << name() << " left cluster";
146 }
147}
148
149// Helper function to check for valid uint256 values in protobuf buffers
150static bool
152{
153 return pBuffStr.size() == uint256::size();
154}
155
156void
158{
159 if (!strand_.running_in_this_thread())
161
162 auto parseLedgerHash =
164 if (uint256 ret; ret.parseHex(value))
165 return ret;
166
167 if (auto const s = base64_decode(value); s.size() == uint256::size())
168 return uint256{s};
169
170 return std::nullopt;
171 };
172
174 std::optional<uint256> previous;
175
176 if (auto const iter = headers_.find("Closed-Ledger");
177 iter != headers_.end())
178 {
179 closed = parseLedgerHash(iter->value());
180
181 if (!closed)
182 fail("Malformed handshake data (1)");
183 }
184
185 if (auto const iter = headers_.find("Previous-Ledger");
186 iter != headers_.end())
187 {
188 previous = parseLedgerHash(iter->value());
189
190 if (!previous)
191 fail("Malformed handshake data (2)");
192 }
193
194 if (previous && !closed)
195 fail("Malformed handshake data (3)");
196
197 {
199 if (closed)
200 closedLedgerHash_ = *closed;
201 if (previous)
202 previousLedgerHash_ = *previous;
203 }
204
205 if (inbound_)
206 doAccept();
207 else
209
210 // Anything else that needs to be done with the connection should be
211 // done in doProtocolStart
212}
213
214void
216{
217 if (!strand_.running_in_this_thread())
219 if (socket_.is_open())
220 {
221 // The rationale for using different severity levels is that
222 // outbound connections are under our control and may be logged
223 // at a higher level, but inbound connections are more numerous and
224 // uncontrolled so to prevent log flooding the severity is reduced.
225 //
226 if (inbound_)
227 {
228 JLOG(journal_.debug()) << "Stop";
229 }
230 else
231 {
232 JLOG(journal_.info()) << "Stop";
233 }
234 }
235 close();
236}
237
238//------------------------------------------------------------------------------
239
240void
242{
243 if (!strand_.running_in_this_thread())
244 return post(strand_, std::bind(&PeerImp::send, shared_from_this(), m));
245 if (gracefulClose_)
246 return;
247 if (detaching_)
248 return;
249
250 auto validator = m->getValidatorKey();
251 if (validator && !squelch_.expireSquelch(*validator))
252 return;
253
255 safe_cast<TrafficCount::category>(m->getCategory()),
256 false,
257 static_cast<int>(m->getBuffer(compressionEnabled_).size()));
258
259 auto sendq_size = send_queue_.size();
260
261 if (sendq_size < Tuning::targetSendQueue)
262 {
263 // To detect a peer that does not read from their
264 // side of the connection, we expect a peer to have
265 // a small senq periodically
266 large_sendq_ = 0;
267 }
268 else if (auto sink = journal_.debug();
269 sink && (sendq_size % Tuning::sendQueueLogFreq) == 0)
270 {
271 std::string const n = name();
272 sink << (n.empty() ? remote_address_.to_string() : n)
273 << " sendq: " << sendq_size;
274 }
275
276 send_queue_.push(m);
277
278 if (sendq_size != 0)
279 return;
280
281 boost::asio::async_write(
282 stream_,
283 boost::asio::buffer(
284 send_queue_.front()->getBuffer(compressionEnabled_)),
285 bind_executor(
286 strand_,
287 std::bind(
290 std::placeholders::_1,
291 std::placeholders::_2)));
292}
293
294void
296{
297 if (!strand_.running_in_this_thread())
298 return post(
300
301 if (!txQueue_.empty())
302 {
303 protocol::TMHaveTransactions ht;
304 std::for_each(txQueue_.begin(), txQueue_.end(), [&](auto const& hash) {
305 ht.add_hashes(hash.data(), hash.size());
306 });
307 JLOG(p_journal_.trace()) << "sendTxQueue " << txQueue_.size();
308 txQueue_.clear();
309 send(std::make_shared<Message>(ht, protocol::mtHAVE_TRANSACTIONS));
310 }
311}
312
313void
315{
316 if (!strand_.running_in_this_thread())
317 return post(
319
321 {
322 JLOG(p_journal_.warn()) << "addTxQueue exceeds the cap";
323 sendTxQueue();
324 }
325
326 txQueue_.insert(hash);
327 JLOG(p_journal_.trace()) << "addTxQueue " << txQueue_.size();
328}
329
330void
332{
333 if (!strand_.running_in_this_thread())
334 return post(
335 strand_,
337
338 auto removed = txQueue_.erase(hash);
339 JLOG(p_journal_.trace()) << "removeTxQueue " << removed;
340}
341
342void
344{
345 if ((usage_.charge(fee, context) == Resource::drop) &&
346 usage_.disconnect(p_journal_) && strand_.running_in_this_thread())
347 {
348 // Sever the connection
350 fail("charge: Resources");
351 }
352}
353
354//------------------------------------------------------------------------------
355
356bool
358{
359 auto const iter = headers_.find("Crawl");
360 if (iter == headers_.end())
361 return false;
362 return boost::iequals(iter->value(), "public");
363}
364
365bool
367{
368 return static_cast<bool>(app_.cluster().member(publicKey_));
369}
370
373{
374 if (inbound_)
375 return headers_["User-Agent"];
376 return headers_["Server"];
377}
378
381{
383
384 ret[jss::public_key] = toBase58(TokenType::NodePublic, publicKey_);
385 ret[jss::address] = remote_address_.to_string();
386
387 if (inbound_)
388 ret[jss::inbound] = true;
389
390 if (cluster())
391 {
392 ret[jss::cluster] = true;
393
394 if (auto const n = name(); !n.empty())
395 // Could move here if Json::Value supported moving from a string
396 ret[jss::name] = n;
397 }
398
399 if (auto const d = domain(); !d.empty())
400 ret[jss::server_domain] = std::string{d};
401
402 if (auto const nid = headers_["Network-ID"]; !nid.empty())
403 ret[jss::network_id] = std::string{nid};
404
405 ret[jss::load] = usage_.balance();
406
407 if (auto const version = getVersion(); !version.empty())
408 ret[jss::version] = std::string{version};
409
410 ret[jss::protocol] = to_string(protocol_);
411
412 {
414 if (latency_)
415 ret[jss::latency] = static_cast<Json::UInt>(latency_->count());
416 }
417
418 ret[jss::uptime] = static_cast<Json::UInt>(
419 std::chrono::duration_cast<std::chrono::seconds>(uptime()).count());
420
421 std::uint32_t minSeq, maxSeq;
422 ledgerRange(minSeq, maxSeq);
423
424 if ((minSeq != 0) || (maxSeq != 0))
425 ret[jss::complete_ledgers] =
426 std::to_string(minSeq) + " - " + std::to_string(maxSeq);
427
428 switch (tracking_.load())
429 {
431 ret[jss::track] = "diverged";
432 break;
433
435 ret[jss::track] = "unknown";
436 break;
437
439 // Nothing to do here
440 break;
441 }
442
443 uint256 closedLedgerHash;
444 protocol::TMStatusChange last_status;
445 {
447 closedLedgerHash = closedLedgerHash_;
448 last_status = last_status_;
449 }
450
451 if (closedLedgerHash != beast::zero)
452 ret[jss::ledger] = to_string(closedLedgerHash);
453
454 if (last_status.has_newstatus())
455 {
456 switch (last_status.newstatus())
457 {
458 case protocol::nsCONNECTING:
459 ret[jss::status] = "connecting";
460 break;
461
462 case protocol::nsCONNECTED:
463 ret[jss::status] = "connected";
464 break;
465
466 case protocol::nsMONITORING:
467 ret[jss::status] = "monitoring";
468 break;
469
470 case protocol::nsVALIDATING:
471 ret[jss::status] = "validating";
472 break;
473
474 case protocol::nsSHUTTING:
475 ret[jss::status] = "shutting";
476 break;
477
478 default:
479 JLOG(p_journal_.warn())
480 << "Unknown status: " << last_status.newstatus();
481 }
482 }
483
484 ret[jss::metrics] = Json::Value(Json::objectValue);
485 ret[jss::metrics][jss::total_bytes_recv] =
486 std::to_string(metrics_.recv.total_bytes());
487 ret[jss::metrics][jss::total_bytes_sent] =
488 std::to_string(metrics_.sent.total_bytes());
489 ret[jss::metrics][jss::avg_bps_recv] =
490 std::to_string(metrics_.recv.average_bytes());
491 ret[jss::metrics][jss::avg_bps_sent] =
492 std::to_string(metrics_.sent.average_bytes());
493
494 return ret;
495}
496
497bool
499{
500 switch (f)
501 {
503 return protocol_ >= make_protocol(2, 1);
505 return protocol_ >= make_protocol(2, 2);
508 }
509 return false;
510}
511
512//------------------------------------------------------------------------------
513
514bool
516{
517 {
519 if ((seq != 0) && (seq >= minLedger_) && (seq <= maxLedger_) &&
521 return true;
522 if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
523 recentLedgers_.end())
524 return true;
525 }
526 return false;
527}
528
529void
531{
533
534 minSeq = minLedger_;
535 maxSeq = maxLedger_;
536}
537
538bool
539PeerImp::hasTxSet(uint256 const& hash) const
540{
542 return std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
543 recentTxSets_.end();
544}
545
546void
548{
549 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
550 // guarded by recentLock_.
554}
555
556bool
558{
560 return (tracking_ != Tracking::diverged) && (uMin >= minLedger_) &&
561 (uMax <= maxLedger_);
562}
563
564//------------------------------------------------------------------------------
565
566void
568{
569 XRPL_ASSERT(
570 strand_.running_in_this_thread(),
571 "ripple::PeerImp::close : strand in this thread");
572 if (socket_.is_open())
573 {
574 detaching_ = true; // DEPRECATED
575 error_code ec;
576 timer_.cancel(ec);
577 socket_.close(ec);
579 if (inbound_)
580 {
581 JLOG(journal_.debug()) << "Closed";
582 }
583 else
584 {
585 JLOG(journal_.info()) << "Closed";
586 }
587 }
588}
589
590void
592{
593 if (!strand_.running_in_this_thread())
594 return post(
595 strand_,
596 std::bind(
597 (void(Peer::*)(std::string const&)) & PeerImp::fail,
599 reason));
601 {
602 std::string const n = name();
603 JLOG(journal_.warn()) << (n.empty() ? remote_address_.to_string() : n)
604 << " failed: " << reason;
605 }
606 close();
607}
608
609void
611{
612 XRPL_ASSERT(
613 strand_.running_in_this_thread(),
614 "ripple::PeerImp::fail : strand in this thread");
615 if (socket_.is_open())
616 {
617 JLOG(journal_.warn())
619 << " at " << remote_address_.to_string() << ": " << ec.message();
620 }
621 close();
622}
623
624void
626{
627 XRPL_ASSERT(
628 strand_.running_in_this_thread(),
629 "ripple::PeerImp::gracefulClose : strand in this thread");
630 XRPL_ASSERT(
631 socket_.is_open(), "ripple::PeerImp::gracefulClose : socket is open");
632 XRPL_ASSERT(
634 "ripple::PeerImp::gracefulClose : socket is not closing");
635 gracefulClose_ = true;
636 if (send_queue_.size() > 0)
637 return;
638 setTimer();
639 stream_.async_shutdown(bind_executor(
640 strand_,
641 std::bind(
642 &PeerImp::onShutdown, shared_from_this(), std::placeholders::_1)));
643}
644
645void
647{
648 error_code ec;
649 timer_.expires_from_now(peerTimerInterval, ec);
650
651 if (ec)
652 {
653 JLOG(journal_.error()) << "setTimer: " << ec.message();
654 return;
655 }
656 timer_.async_wait(bind_executor(
657 strand_,
658 std::bind(
659 &PeerImp::onTimer, shared_from_this(), std::placeholders::_1)));
660}
661
662// convenience for ignoring the error code
663void
665{
666 error_code ec;
667 timer_.cancel(ec);
668}
669
670//------------------------------------------------------------------------------
671
674{
676 ss << "[" << std::setfill('0') << std::setw(3) << id << "] ";
677 return ss.str();
678}
679
680void
682{
683 if (!socket_.is_open())
684 return;
685
686 if (ec == boost::asio::error::operation_aborted)
687 return;
688
689 if (ec)
690 {
691 // This should never happen
692 JLOG(journal_.error()) << "onTimer: " << ec.message();
693 return close();
694 }
695
697 {
698 fail("Large send queue");
699 return;
700 }
701
702 if (auto const t = tracking_.load(); !inbound_ && t != Tracking::converged)
703 {
704 clock_type::duration duration;
705
706 {
708 duration = clock_type::now() - trackingTime_;
709 }
710
711 if ((t == Tracking::diverged &&
712 (duration > app_.config().MAX_DIVERGED_TIME)) ||
713 (t == Tracking::unknown &&
714 (duration > app_.config().MAX_UNKNOWN_TIME)))
715 {
717 fail("Not useful");
718 return;
719 }
720 }
721
722 // Already waiting for PONG
723 if (lastPingSeq_)
724 {
725 fail("Ping Timeout");
726 return;
727 }
728
730 lastPingSeq_ = rand_int<std::uint32_t>();
731
732 protocol::TMPing message;
733 message.set_type(protocol::TMPing::ptPING);
734 message.set_seq(*lastPingSeq_);
735
736 send(std::make_shared<Message>(message, protocol::mtPING));
737
738 setTimer();
739}
740
741void
743{
744 cancelTimer();
745 // If we don't get eof then something went wrong
746 if (!ec)
747 {
748 JLOG(journal_.error()) << "onShutdown: expected error condition";
749 return close();
750 }
751 if (ec != boost::asio::error::eof)
752 return fail("onShutdown", ec);
753 close();
754}
755
756//------------------------------------------------------------------------------
757void
759{
760 XRPL_ASSERT(
761 read_buffer_.size() == 0,
762 "ripple::PeerImp::doAccept : empty read buffer");
763
764 JLOG(journal_.debug()) << "doAccept: " << remote_address_;
765
766 auto const sharedValue = makeSharedValue(*stream_ptr_, journal_);
767
768 // This shouldn't fail since we already computed
769 // the shared value successfully in OverlayImpl
770 if (!sharedValue)
771 return fail("makeSharedValue: Unexpected failure");
772
773 JLOG(journal_.info()) << "Protocol: " << to_string(protocol_);
774 JLOG(journal_.info()) << "Public Key: "
776
777 if (auto member = app_.cluster().member(publicKey_))
778 {
779 {
781 name_ = *member;
782 }
783 JLOG(journal_.info()) << "Cluster name: " << *member;
784 }
785
787
788 // XXX Set timer: connection is in grace period to be useful.
789 // XXX Set timer: connection idle (idle may vary depending on connection
790 // type.)
791
792 auto write_buffer = std::make_shared<boost::beast::multi_buffer>();
793
794 boost::beast::ostream(*write_buffer) << makeResponse(
796 request_,
799 *sharedValue,
801 protocol_,
802 app_);
803
804 // Write the whole buffer and only start protocol when that's done.
805 boost::asio::async_write(
806 stream_,
807 write_buffer->data(),
808 boost::asio::transfer_all(),
809 bind_executor(
810 strand_,
811 [this, write_buffer, self = shared_from_this()](
812 error_code ec, std::size_t bytes_transferred) {
813 if (!socket_.is_open())
814 return;
815 if (ec == boost::asio::error::operation_aborted)
816 return;
817 if (ec)
818 return fail("onWriteResponse", ec);
819 if (write_buffer->size() == bytes_transferred)
820 return doProtocolStart();
821 return fail("Failed to write header");
822 }));
823}
824
827{
828 std::shared_lock read_lock{nameMutex_};
829 return name_;
830}
831
834{
835 return headers_["Server-Domain"];
836}
837
838//------------------------------------------------------------------------------
839
840// Protocol logic
841
842void
844{
846
847 // Send all the validator lists that have been loaded
849 {
851 [&](std::string const& manifest,
852 std::uint32_t version,
854 PublicKey const& pubKey,
855 std::size_t maxSequence,
856 uint256 const& hash) {
858 *this,
859 0,
860 pubKey,
861 maxSequence,
862 version,
863 manifest,
864 blobInfos,
866 p_journal_);
867
868 // Don't send it next time.
870 });
871 }
872
873 if (auto m = overlay_.getManifestsMessage())
874 send(m);
875
876 setTimer();
877}
878
879// Called repeatedly with protocol message data
880void
882{
883 if (!socket_.is_open())
884 return;
885 if (ec == boost::asio::error::operation_aborted)
886 return;
887 if (ec == boost::asio::error::eof)
888 {
889 JLOG(journal_.info()) << "EOF";
890 return gracefulClose();
891 }
892 if (ec)
893 return fail("onReadMessage", ec);
894 if (auto stream = journal_.trace())
895 {
896 if (bytes_transferred > 0)
897 stream << "onReadMessage: " << bytes_transferred << " bytes";
898 else
899 stream << "onReadMessage";
900 }
901
902 metrics_.recv.add_message(bytes_transferred);
903
904 read_buffer_.commit(bytes_transferred);
905
906 auto hint = Tuning::readBufferBytes;
907
908 while (read_buffer_.size() > 0)
909 {
910 std::size_t bytes_consumed;
911
912 using namespace std::chrono_literals;
913 std::tie(bytes_consumed, ec) = perf::measureDurationAndLog(
914 [&]() {
915 return invokeProtocolMessage(read_buffer_.data(), *this, hint);
916 },
917 "invokeProtocolMessage",
918 350ms,
919 journal_);
920
921 if (ec)
922 return fail("onReadMessage", ec);
923 if (!socket_.is_open())
924 return;
925 if (gracefulClose_)
926 return;
927 if (bytes_consumed == 0)
928 break;
929 read_buffer_.consume(bytes_consumed);
930 }
931
932 // Timeout on writes only
933 stream_.async_read_some(
935 bind_executor(
936 strand_,
937 std::bind(
940 std::placeholders::_1,
941 std::placeholders::_2)));
942}
943
944void
946{
947 if (!socket_.is_open())
948 return;
949 if (ec == boost::asio::error::operation_aborted)
950 return;
951 if (ec)
952 return fail("onWriteMessage", ec);
953 if (auto stream = journal_.trace())
954 {
955 if (bytes_transferred > 0)
956 stream << "onWriteMessage: " << bytes_transferred << " bytes";
957 else
958 stream << "onWriteMessage";
959 }
960
961 metrics_.sent.add_message(bytes_transferred);
962
963 XRPL_ASSERT(
964 !send_queue_.empty(),
965 "ripple::PeerImp::onWriteMessage : non-empty send buffer");
966 send_queue_.pop();
967 if (!send_queue_.empty())
968 {
969 // Timeout on writes only
970 return boost::asio::async_write(
971 stream_,
972 boost::asio::buffer(
973 send_queue_.front()->getBuffer(compressionEnabled_)),
974 bind_executor(
975 strand_,
976 std::bind(
979 std::placeholders::_1,
980 std::placeholders::_2)));
981 }
982
983 if (gracefulClose_)
984 {
985 return stream_.async_shutdown(bind_executor(
986 strand_,
987 std::bind(
990 std::placeholders::_1)));
991 }
992}
993
994//------------------------------------------------------------------------------
995//
996// ProtocolHandler
997//
998//------------------------------------------------------------------------------
999
1000void
1002{
1003 // TODO
1004}
1005
1006void
1008 std::uint16_t type,
1010 std::size_t size,
1011 std::size_t uncompressed_size,
1012 bool isCompressed)
1013{
1014 auto const name = protocolMessageName(type);
1017 auto const category = TrafficCount::categorize(*m, type, true);
1018 overlay_.reportTraffic(category, true, static_cast<int>(size));
1019 using namespace protocol;
1020 if ((type == MessageType::mtTRANSACTION ||
1021 type == MessageType::mtHAVE_TRANSACTIONS ||
1022 type == MessageType::mtTRANSACTIONS ||
1023 // GET_OBJECTS
1025 // GET_LEDGER
1028 // LEDGER_DATA
1032 {
1034 static_cast<MessageType>(type), static_cast<std::uint64_t>(size));
1035 }
1036 JLOG(journal_.trace()) << "onMessageBegin: " << type << " " << size << " "
1037 << uncompressed_size << " " << isCompressed;
1038}
1039
1040void
1044{
1045 load_event_.reset();
1047}
1048
1049void
1051{
1052 auto const s = m->list_size();
1053
1054 if (s == 0)
1055 {
1057 return;
1058 }
1059
1060 if (s > 100)
1062
1064 jtMANIFEST, "receiveManifests", [this, that = shared_from_this(), m]() {
1065 overlay_.onManifests(m, that);
1066 });
1067}
1068
1069void
1071{
1072 if (m->type() == protocol::TMPing::ptPING)
1073 {
1074 // We have received a ping request, reply with a pong
1076 m->set_type(protocol::TMPing::ptPONG);
1077 send(std::make_shared<Message>(*m, protocol::mtPING));
1078 return;
1079 }
1080
1081 if (m->type() == protocol::TMPing::ptPONG && m->has_seq())
1082 {
1083 // Only reset the ping sequence if we actually received a
1084 // PONG with the correct cookie. That way, any peers which
1085 // respond with incorrect cookies will eventually time out.
1086 if (m->seq() == lastPingSeq_)
1087 {
1089
1090 // Update latency estimate
1091 auto const rtt = std::chrono::round<std::chrono::milliseconds>(
1093
1095
1096 if (latency_)
1097 latency_ = (*latency_ * 7 + rtt) / 8;
1098 else
1099 latency_ = rtt;
1100 }
1101
1102 return;
1103 }
1104}
1105
1106void
1108{
1109 // VFALCO NOTE I think we should drop the peer immediately
1110 if (!cluster())
1111 {
1112 fee_.update(Resource::feeUselessData, "unknown cluster");
1113 return;
1114 }
1115
1116 for (int i = 0; i < m->clusternodes().size(); ++i)
1117 {
1118 protocol::TMClusterNode const& node = m->clusternodes(i);
1119
1121 if (node.has_nodename())
1122 name = node.nodename();
1123
1124 auto const publicKey =
1125 parseBase58<PublicKey>(TokenType::NodePublic, node.publickey());
1126
1127 // NIKB NOTE We should drop the peer immediately if
1128 // they send us a public key we can't parse
1129 if (publicKey)
1130 {
1131 auto const reportTime =
1132 NetClock::time_point{NetClock::duration{node.reporttime()}};
1133
1135 *publicKey, name, node.nodeload(), reportTime);
1136 }
1137 }
1138
1139 int loadSources = m->loadsources().size();
1140 if (loadSources != 0)
1141 {
1142 Resource::Gossip gossip;
1143 gossip.items.reserve(loadSources);
1144 for (int i = 0; i < m->loadsources().size(); ++i)
1145 {
1146 protocol::TMLoadSource const& node = m->loadsources(i);
1148 item.address = beast::IP::Endpoint::from_string(node.name());
1149 item.balance = node.cost();
1150 if (item.address != beast::IP::Endpoint())
1151 gossip.items.push_back(item);
1152 }
1154 }
1155
1156 // Calculate the cluster fee:
1157 auto const thresh = app_.timeKeeper().now() - 90s;
1158 std::uint32_t clusterFee = 0;
1159
1161 fees.reserve(app_.cluster().size());
1162
1163 app_.cluster().for_each([&fees, thresh](ClusterNode const& status) {
1164 if (status.getReportTime() >= thresh)
1165 fees.push_back(status.getLoadFee());
1166 });
1167
1168 if (!fees.empty())
1169 {
1170 auto const index = fees.size() / 2;
1171 std::nth_element(fees.begin(), fees.begin() + index, fees.end());
1172 clusterFee = fees[index];
1173 }
1174
1175 app_.getFeeTrack().setClusterFee(clusterFee);
1176}
1177
1178void
1180{
1181 // Don't allow endpoints from peers that are not known tracking or are
1182 // not using a version of the message that we support:
1183 if (tracking_.load() != Tracking::converged || m->version() != 2)
1184 return;
1185
1186 // The number is arbitrary and doesn't have any real significance or
1187 // implication for the protocol.
1188 if (m->endpoints_v2().size() >= 1024)
1189 {
1190 fee_.update(Resource::feeUselessData, "endpoints too large");
1191 return;
1192 }
1193
1195 endpoints.reserve(m->endpoints_v2().size());
1196
1197 auto malformed = 0;
1198 for (auto const& tm : m->endpoints_v2())
1199 {
1200 auto result = beast::IP::Endpoint::from_string_checked(tm.endpoint());
1201
1202 if (!result)
1203 {
1204 JLOG(p_journal_.error()) << "failed to parse incoming endpoint: {"
1205 << tm.endpoint() << "}";
1206 malformed++;
1207 continue;
1208 }
1209
1210 // If hops == 0, this Endpoint describes the peer we are connected
1211 // to -- in that case, we take the remote address seen on the
1212 // socket and store that in the IP::Endpoint. If this is the first
1213 // time, then we'll verify that their listener can receive incoming
1214 // by performing a connectivity test. if hops > 0, then we just
1215 // take the address/port we were given
1216 if (tm.hops() == 0)
1217 result = remote_address_.at_port(result->port());
1218
1219 endpoints.emplace_back(*result, tm.hops());
1220 }
1221
1222 // Charge the peer for each malformed endpoint. As there still may be
1223 // multiple valid endpoints we don't return early.
1224 if (malformed > 0)
1225 {
1226 fee_.update(
1227 Resource::feeInvalidData * malformed,
1228 std::to_string(malformed) + " malformed endpoints");
1229 }
1230
1231 if (!endpoints.empty())
1232 overlay_.peerFinder().on_endpoints(slot_, endpoints);
1233}
1234
1235void
1237{
1238 handleTransaction(m, true, false);
1239}
1240
1241void
1244 bool eraseTxQueue,
1245 bool batch)
1246{
1247 XRPL_ASSERT(
1248 eraseTxQueue != batch,
1249 ("ripple::PeerImp::handleTransaction : valid inputs"));
1251 return;
1252
1254 {
1255 // If we've never been in synch, there's nothing we can do
1256 // with a transaction
1257 JLOG(p_journal_.debug())
1258 << "Ignoring incoming transaction: " << "Need network ledger";
1259 return;
1260 }
1261
1262 SerialIter sit(makeSlice(m->rawtransaction()));
1263
1264 try
1265 {
1266 auto stx = std::make_shared<STTx const>(sit);
1267 uint256 txID = stx->getTransactionID();
1268
1269 int flags;
1270 constexpr std::chrono::seconds tx_interval = 10s;
1271
1272 if (!app_.getHashRouter().shouldProcess(txID, id_, flags, tx_interval))
1273 {
1274 // we have seen this transaction recently
1275 if (flags & SF_BAD)
1276 {
1277 fee_.update(Resource::feeUselessData, "known bad");
1278 JLOG(p_journal_.debug()) << "Ignoring known bad tx " << txID;
1279 }
1280
1281 // Erase only if the server has seen this tx. If the server has not
1282 // seen this tx then the tx could not has been queued for this peer.
1283 else if (eraseTxQueue && txReduceRelayEnabled())
1284 removeTxQueue(txID);
1285
1286 return;
1287 }
1288
1289 JLOG(p_journal_.debug()) << "Got tx " << txID;
1290
1291 bool checkSignature = true;
1292 if (cluster())
1293 {
1294 if (!m->has_deferred() || !m->deferred())
1295 {
1296 // Skip local checks if a server we trust
1297 // put the transaction in its open ledger
1298 flags |= SF_TRUSTED;
1299 }
1300
1301 // for non-validator nodes only -- localPublicKey is set for
1302 // validators only
1304 {
1305 // For now, be paranoid and have each validator
1306 // check each transaction, regardless of source
1307 checkSignature = false;
1308 }
1309 }
1310
1312 {
1313 JLOG(p_journal_.trace())
1314 << "No new transactions until synchronized";
1315 }
1316 else if (
1319 {
1321 JLOG(p_journal_.info()) << "Transaction queue is full";
1322 }
1323 else
1324 {
1327 "recvTransaction->checkTransaction",
1329 flags,
1330 checkSignature,
1331 batch,
1332 stx]() {
1333 if (auto peer = weak.lock())
1334 peer->checkTransaction(
1335 flags, checkSignature, stx, batch);
1336 });
1337 }
1338 }
1339 catch (std::exception const& ex)
1340 {
1341 JLOG(p_journal_.warn())
1342 << "Transaction invalid: " << strHex(m->rawtransaction())
1343 << ". Exception: " << ex.what();
1344 }
1345}
1346
1347void
1349{
1350 auto badData = [&](std::string const& msg) {
1351 fee_.update(Resource::feeInvalidData, "get_ledger " + msg);
1352 JLOG(p_journal_.warn()) << "TMGetLedger: " << msg;
1353 };
1354 auto const itype{m->itype()};
1355
1356 // Verify ledger info type
1357 if (itype < protocol::liBASE || itype > protocol::liTS_CANDIDATE)
1358 return badData("Invalid ledger info type");
1359
1360 auto const ltype = [&m]() -> std::optional<::protocol::TMLedgerType> {
1361 if (m->has_ltype())
1362 return m->ltype();
1363 return std::nullopt;
1364 }();
1365
1366 if (itype == protocol::liTS_CANDIDATE)
1367 {
1368 if (!m->has_ledgerhash())
1369 return badData("Invalid TX candidate set, missing TX set hash");
1370 }
1371 else if (
1372 !m->has_ledgerhash() && !m->has_ledgerseq() &&
1373 !(ltype && *ltype == protocol::ltCLOSED))
1374 {
1375 return badData("Invalid request");
1376 }
1377
1378 // Verify ledger type
1379 if (ltype && (*ltype < protocol::ltACCEPTED || *ltype > protocol::ltCLOSED))
1380 return badData("Invalid ledger type");
1381
1382 // Verify ledger hash
1383 if (m->has_ledgerhash() && !stringIsUint256Sized(m->ledgerhash()))
1384 return badData("Invalid ledger hash");
1385
1386 // Verify ledger sequence
1387 if (m->has_ledgerseq())
1388 {
1389 auto const ledgerSeq{m->ledgerseq()};
1390
1391 // Check if within a reasonable range
1392 using namespace std::chrono_literals;
1394 ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1395 {
1396 return badData(
1397 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1398 }
1399 }
1400
1401 // Verify ledger node IDs
1402 if (itype != protocol::liBASE)
1403 {
1404 if (m->nodeids_size() <= 0)
1405 return badData("Invalid ledger node IDs");
1406
1407 for (auto const& nodeId : m->nodeids())
1408 {
1409 if (deserializeSHAMapNodeID(nodeId) == std::nullopt)
1410 return badData("Invalid SHAMap node ID");
1411 }
1412 }
1413
1414 // Verify query type
1415 if (m->has_querytype() && m->querytype() != protocol::qtINDIRECT)
1416 return badData("Invalid query type");
1417
1418 // Verify query depth
1419 if (m->has_querydepth())
1420 {
1421 if (m->querydepth() > Tuning::maxQueryDepth ||
1422 itype == protocol::liBASE)
1423 {
1424 return badData("Invalid query depth");
1425 }
1426 }
1427
1428 // Queue a job to process the request
1430 app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m]() {
1431 if (auto peer = weak.lock())
1432 peer->processLedgerRequest(m);
1433 });
1434}
1435
1436void
1438{
1439 JLOG(p_journal_.trace()) << "onMessage, TMProofPathRequest";
1441 {
1442 fee_.update(
1443 Resource::feeMalformedRequest, "proof_path_request disabled");
1444 return;
1445 }
1446
1447 fee_.update(
1448 Resource::feeModerateBurdenPeer, "received a proof path request");
1451 jtREPLAY_REQ, "recvProofPathRequest", [weak, m]() {
1452 if (auto peer = weak.lock())
1453 {
1454 auto reply =
1455 peer->ledgerReplayMsgHandler_.processProofPathRequest(m);
1456 if (reply.has_error())
1457 {
1458 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1459 peer->charge(
1460 Resource::feeMalformedRequest,
1461 "proof_path_request");
1462 else
1463 peer->charge(
1464 Resource::feeRequestNoReply, "proof_path_request");
1465 }
1466 else
1467 {
1468 peer->send(std::make_shared<Message>(
1469 reply, protocol::mtPROOF_PATH_RESPONSE));
1470 }
1471 }
1472 });
1473}
1474
1475void
1477{
1478 if (!ledgerReplayEnabled_)
1479 {
1480 fee_.update(
1481 Resource::feeMalformedRequest, "proof_path_response disabled");
1482 return;
1483 }
1484
1485 if (!ledgerReplayMsgHandler_.processProofPathResponse(m))
1486 {
1487 fee_.update(Resource::feeInvalidData, "proof_path_response");
1488 }
1489}
1490
1491void
1493{
1494 JLOG(p_journal_.trace()) << "onMessage, TMReplayDeltaRequest";
1495 if (!ledgerReplayEnabled_)
1496 {
1497 fee_.update(
1498 Resource::feeMalformedRequest, "replay_delta_request disabled");
1499 return;
1500 }
1501
1502 fee_.fee = Resource::feeModerateBurdenPeer;
1503 std::weak_ptr<PeerImp> weak = shared_from_this();
1504 app_.getJobQueue().addJob(
1505 jtREPLAY_REQ, "recvReplayDeltaRequest", [weak, m]() {
1506 if (auto peer = weak.lock())
1507 {
1508 auto reply =
1509 peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m);
1510 if (reply.has_error())
1511 {
1512 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1513 peer->charge(
1514 Resource::feeMalformedRequest,
1515 "replay_delta_request");
1516 else
1517 peer->charge(
1518 Resource::feeRequestNoReply,
1519 "replay_delta_request");
1520 }
1521 else
1522 {
1523 peer->send(std::make_shared<Message>(
1524 reply, protocol::mtREPLAY_DELTA_RESPONSE));
1525 }
1526 }
1527 });
1528}
1529
1530void
1532{
1533 if (!ledgerReplayEnabled_)
1534 {
1535 fee_.update(
1536 Resource::feeMalformedRequest, "replay_delta_response disabled");
1537 return;
1538 }
1539
1540 if (!ledgerReplayMsgHandler_.processReplayDeltaResponse(m))
1541 {
1542 fee_.update(Resource::feeInvalidData, "replay_delta_response");
1543 }
1544}
1545
1546void
1548{
1549 auto badData = [&](std::string const& msg) {
1550 fee_.update(Resource::feeInvalidData, msg);
1551 JLOG(p_journal_.warn()) << "TMLedgerData: " << msg;
1552 };
1553
1554 // Verify ledger hash
1555 if (!stringIsUint256Sized(m->ledgerhash()))
1556 return badData("Invalid ledger hash");
1557
1558 // Verify ledger sequence
1559 {
1560 auto const ledgerSeq{m->ledgerseq()};
1561 if (m->type() == protocol::liTS_CANDIDATE)
1562 {
1563 if (ledgerSeq != 0)
1564 {
1565 return badData(
1566 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1567 }
1568 }
1569 else
1570 {
1571 // Check if within a reasonable range
1572 using namespace std::chrono_literals;
1573 if (app_.getLedgerMaster().getValidatedLedgerAge() <= 10s &&
1574 ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1575 {
1576 return badData(
1577 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1578 }
1579 }
1580 }
1581
1582 // Verify ledger info type
1583 if (m->type() < protocol::liBASE || m->type() > protocol::liTS_CANDIDATE)
1584 return badData("Invalid ledger info type");
1585
1586 // Verify reply error
1587 if (m->has_error() &&
1588 (m->error() < protocol::reNO_LEDGER ||
1589 m->error() > protocol::reBAD_REQUEST))
1590 {
1591 return badData("Invalid reply error");
1592 }
1593
1594 // Verify ledger nodes.
1595 if (m->nodes_size() <= 0 || m->nodes_size() > Tuning::hardMaxReplyNodes)
1596 {
1597 return badData(
1598 "Invalid Ledger/TXset nodes " + std::to_string(m->nodes_size()));
1599 }
1600
1601 // If there is a request cookie, attempt to relay the message
1602 if (m->has_requestcookie())
1603 {
1604 if (auto peer = overlay_.findPeerByShortID(m->requestcookie()))
1605 {
1606 m->clear_requestcookie();
1607 peer->send(std::make_shared<Message>(*m, protocol::mtLEDGER_DATA));
1608 }
1609 else
1610 {
1611 JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply";
1612 }
1613 return;
1614 }
1615
1616 uint256 const ledgerHash{m->ledgerhash()};
1617
1618 // Otherwise check if received data for a candidate transaction set
1619 if (m->type() == protocol::liTS_CANDIDATE)
1620 {
1621 std::weak_ptr<PeerImp> weak{shared_from_this()};
1622 app_.getJobQueue().addJob(
1623 jtTXN_DATA, "recvPeerData", [weak, ledgerHash, m]() {
1624 if (auto peer = weak.lock())
1625 {
1626 peer->app_.getInboundTransactions().gotData(
1627 ledgerHash, peer, m);
1628 }
1629 });
1630 return;
1631 }
1632
1633 // Consume the message
1634 app_.getInboundLedgers().gotLedgerData(ledgerHash, shared_from_this(), m);
1635}
1636
1637void
1639{
1640 protocol::TMProposeSet& set = *m;
1641
1642 auto const sig = makeSlice(set.signature());
1643
1644 // Preliminary check for the validity of the signature: A DER encoded
1645 // signature can't be longer than 72 bytes.
1646 if ((std::clamp<std::size_t>(sig.size(), 64, 72) != sig.size()) ||
1647 (publicKeyType(makeSlice(set.nodepubkey())) != KeyType::secp256k1))
1648 {
1649 JLOG(p_journal_.warn()) << "Proposal: malformed";
1650 fee_.update(
1651 Resource::feeInvalidSignature,
1652 " signature can't be longer than 72 bytes");
1653 return;
1654 }
1655
1656 if (!stringIsUint256Sized(set.currenttxhash()) ||
1657 !stringIsUint256Sized(set.previousledger()))
1658 {
1659 JLOG(p_journal_.warn()) << "Proposal: malformed";
1660 fee_.update(Resource::feeMalformedRequest, "bad hashes");
1661 return;
1662 }
1663
1664 // RH TODO: when isTrusted = false we should probably also cache a key
1665 // suppression for 30 seconds to avoid doing a relatively expensive lookup
1666 // every time a spam packet is received
1667 PublicKey const publicKey{makeSlice(set.nodepubkey())};
1668 auto const isTrusted = app_.validators().trusted(publicKey);
1669
1670 // If the operator has specified that untrusted proposals be dropped then
1671 // this happens here I.e. before further wasting CPU verifying the signature
1672 // of an untrusted key
1673 if (!isTrusted && app_.config().RELAY_UNTRUSTED_PROPOSALS == -1)
1674 return;
1675
1676 uint256 const proposeHash{set.currenttxhash()};
1677 uint256 const prevLedger{set.previousledger()};
1678
1679 NetClock::time_point const closeTime{NetClock::duration{set.closetime()}};
1680
1681 uint256 const suppression = proposalUniqueId(
1682 proposeHash,
1683 prevLedger,
1684 set.proposeseq(),
1685 closeTime,
1686 publicKey.slice(),
1687 sig);
1688
1689 if (auto [added, relayed] =
1690 app_.getHashRouter().addSuppressionPeerWithStatus(suppression, id_);
1691 !added)
1692 {
1693 // Count unique messages (Slots has it's own 'HashRouter'), which a peer
1694 // receives within IDLED seconds since the message has been relayed.
1695 if (reduceRelayReady() && relayed &&
1696 (stopwatch().now() - *relayed) < reduce_relay::IDLED)
1697 overlay_.updateSlotAndSquelch(
1698 suppression, publicKey, id_, protocol::mtPROPOSE_LEDGER);
1699 JLOG(p_journal_.trace()) << "Proposal: duplicate";
1700 return;
1701 }
1702
1703 if (!isTrusted)
1704 {
1705 if (tracking_.load() == Tracking::diverged)
1706 {
1707 JLOG(p_journal_.debug())
1708 << "Proposal: Dropping untrusted (peer divergence)";
1709 return;
1710 }
1711
1712 if (!cluster() && app_.getFeeTrack().isLoadedLocal())
1713 {
1714 JLOG(p_journal_.debug()) << "Proposal: Dropping untrusted (load)";
1715 return;
1716 }
1717 }
1718
1719 JLOG(p_journal_.trace())
1720 << "Proposal: " << (isTrusted ? "trusted" : "untrusted");
1721
1722 auto proposal = RCLCxPeerPos(
1723 publicKey,
1724 sig,
1725 suppression,
1727 prevLedger,
1728 set.proposeseq(),
1729 proposeHash,
1730 closeTime,
1731 app_.timeKeeper().closeTime(),
1732 calcNodeID(app_.validatorManifests().getMasterKey(publicKey))});
1733
1734 std::weak_ptr<PeerImp> weak = shared_from_this();
1735 app_.getJobQueue().addJob(
1736 isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut,
1737 "recvPropose->checkPropose",
1738 [weak, isTrusted, m, proposal]() {
1739 if (auto peer = weak.lock())
1740 peer->checkPropose(isTrusted, m, proposal);
1741 });
1742}
1743
1744void
1746{
1747 JLOG(p_journal_.trace()) << "Status: Change";
1748
1749 if (!m->has_networktime())
1750 m->set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1751
1752 {
1753 std::lock_guard sl(recentLock_);
1754 if (!last_status_.has_newstatus() || m->has_newstatus())
1755 last_status_ = *m;
1756 else
1757 {
1758 // preserve old status
1759 protocol::NodeStatus status = last_status_.newstatus();
1760 last_status_ = *m;
1761 m->set_newstatus(status);
1762 }
1763 }
1764
1765 if (m->newevent() == protocol::neLOST_SYNC)
1766 {
1767 bool outOfSync{false};
1768 {
1769 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1770 // guarded by recentLock_.
1771 std::lock_guard sl(recentLock_);
1772 if (!closedLedgerHash_.isZero())
1773 {
1774 outOfSync = true;
1775 closedLedgerHash_.zero();
1776 }
1777 previousLedgerHash_.zero();
1778 }
1779 if (outOfSync)
1780 {
1781 JLOG(p_journal_.debug()) << "Status: Out of sync";
1782 }
1783 return;
1784 }
1785
1786 {
1787 uint256 closedLedgerHash{};
1788 bool const peerChangedLedgers{
1789 m->has_ledgerhash() && stringIsUint256Sized(m->ledgerhash())};
1790
1791 {
1792 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1793 // guarded by recentLock_.
1794 std::lock_guard sl(recentLock_);
1795 if (peerChangedLedgers)
1796 {
1797 closedLedgerHash_ = m->ledgerhash();
1798 closedLedgerHash = closedLedgerHash_;
1799 addLedger(closedLedgerHash, sl);
1800 }
1801 else
1802 {
1803 closedLedgerHash_.zero();
1804 }
1805
1806 if (m->has_ledgerhashprevious() &&
1807 stringIsUint256Sized(m->ledgerhashprevious()))
1808 {
1809 previousLedgerHash_ = m->ledgerhashprevious();
1810 addLedger(previousLedgerHash_, sl);
1811 }
1812 else
1813 {
1814 previousLedgerHash_.zero();
1815 }
1816 }
1817 if (peerChangedLedgers)
1818 {
1819 JLOG(p_journal_.debug()) << "LCL is " << closedLedgerHash;
1820 }
1821 else
1822 {
1823 JLOG(p_journal_.debug()) << "Status: No ledger";
1824 }
1825 }
1826
1827 if (m->has_firstseq() && m->has_lastseq())
1828 {
1829 std::lock_guard sl(recentLock_);
1830
1831 minLedger_ = m->firstseq();
1832 maxLedger_ = m->lastseq();
1833
1834 if ((maxLedger_ < minLedger_) || (minLedger_ == 0) || (maxLedger_ == 0))
1835 minLedger_ = maxLedger_ = 0;
1836 }
1837
1838 if (m->has_ledgerseq() &&
1839 app_.getLedgerMaster().getValidatedLedgerAge() < 2min)
1840 {
1841 checkTracking(
1842 m->ledgerseq(), app_.getLedgerMaster().getValidLedgerIndex());
1843 }
1844
1845 app_.getOPs().pubPeerStatus([=, this]() -> Json::Value {
1847
1848 if (m->has_newstatus())
1849 {
1850 switch (m->newstatus())
1851 {
1852 case protocol::nsCONNECTING:
1853 j[jss::status] = "CONNECTING";
1854 break;
1855 case protocol::nsCONNECTED:
1856 j[jss::status] = "CONNECTED";
1857 break;
1858 case protocol::nsMONITORING:
1859 j[jss::status] = "MONITORING";
1860 break;
1861 case protocol::nsVALIDATING:
1862 j[jss::status] = "VALIDATING";
1863 break;
1864 case protocol::nsSHUTTING:
1865 j[jss::status] = "SHUTTING";
1866 break;
1867 }
1868 }
1869
1870 if (m->has_newevent())
1871 {
1872 switch (m->newevent())
1873 {
1874 case protocol::neCLOSING_LEDGER:
1875 j[jss::action] = "CLOSING_LEDGER";
1876 break;
1877 case protocol::neACCEPTED_LEDGER:
1878 j[jss::action] = "ACCEPTED_LEDGER";
1879 break;
1880 case protocol::neSWITCHED_LEDGER:
1881 j[jss::action] = "SWITCHED_LEDGER";
1882 break;
1883 case protocol::neLOST_SYNC:
1884 j[jss::action] = "LOST_SYNC";
1885 break;
1886 }
1887 }
1888
1889 if (m->has_ledgerseq())
1890 {
1891 j[jss::ledger_index] = m->ledgerseq();
1892 }
1893
1894 if (m->has_ledgerhash())
1895 {
1896 uint256 closedLedgerHash{};
1897 {
1898 std::lock_guard sl(recentLock_);
1899 closedLedgerHash = closedLedgerHash_;
1900 }
1901 j[jss::ledger_hash] = to_string(closedLedgerHash);
1902 }
1903
1904 if (m->has_networktime())
1905 {
1906 j[jss::date] = Json::UInt(m->networktime());
1907 }
1908
1909 if (m->has_firstseq() && m->has_lastseq())
1910 {
1911 j[jss::ledger_index_min] = Json::UInt(m->firstseq());
1912 j[jss::ledger_index_max] = Json::UInt(m->lastseq());
1913 }
1914
1915 return j;
1916 });
1917}
1918
1919void
1920PeerImp::checkTracking(std::uint32_t validationSeq)
1921{
1922 std::uint32_t serverSeq;
1923 {
1924 // Extract the sequence number of the highest
1925 // ledger this peer has
1926 std::lock_guard sl(recentLock_);
1927
1928 serverSeq = maxLedger_;
1929 }
1930 if (serverSeq != 0)
1931 {
1932 // Compare the peer's ledger sequence to the
1933 // sequence of a recently-validated ledger
1934 checkTracking(serverSeq, validationSeq);
1935 }
1936}
1937
1938void
1939PeerImp::checkTracking(std::uint32_t seq1, std::uint32_t seq2)
1940{
1941 int diff = std::max(seq1, seq2) - std::min(seq1, seq2);
1942
1943 if (diff < Tuning::convergedLedgerLimit)
1944 {
1945 // The peer's ledger sequence is close to the validation's
1946 tracking_ = Tracking::converged;
1947 }
1948
1949 if ((diff > Tuning::divergedLedgerLimit) &&
1950 (tracking_.load() != Tracking::diverged))
1951 {
1952 // The peer's ledger sequence is way off the validation's
1953 std::lock_guard sl(recentLock_);
1954
1955 tracking_ = Tracking::diverged;
1956 trackingTime_ = clock_type::now();
1957 }
1958}
1959
1960void
1962{
1963 if (!stringIsUint256Sized(m->hash()))
1964 {
1965 fee_.update(Resource::feeMalformedRequest, "bad hash");
1966 return;
1967 }
1968
1969 uint256 const hash{m->hash()};
1970
1971 if (m->status() == protocol::tsHAVE)
1972 {
1973 std::lock_guard sl(recentLock_);
1974
1975 if (std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
1976 recentTxSets_.end())
1977 {
1978 fee_.update(Resource::feeUselessData, "duplicate (tsHAVE)");
1979 return;
1980 }
1981
1982 recentTxSets_.push_back(hash);
1983 }
1984}
1985
1986void
1987PeerImp::onValidatorListMessage(
1988 std::string const& messageType,
1989 std::string const& manifest,
1990 std::uint32_t version,
1991 std::vector<ValidatorBlobInfo> const& blobs)
1992{
1993 // If there are no blobs, the message is malformed (possibly because of
1994 // ValidatorList class rules), so charge accordingly and skip processing.
1995 if (blobs.empty())
1996 {
1997 JLOG(p_journal_.warn()) << "Ignored malformed " << messageType
1998 << " from peer " << remote_address_;
1999 // This shouldn't ever happen with a well-behaved peer
2000 fee_.update(Resource::feeHeavyBurdenPeer, "no blobs");
2001 return;
2002 }
2003
2004 auto const hash = sha512Half(manifest, blobs, version);
2005
2006 JLOG(p_journal_.debug())
2007 << "Received " << messageType << " from " << remote_address_.to_string()
2008 << " (" << id_ << ")";
2009
2010 if (!app_.getHashRouter().addSuppressionPeer(hash, id_))
2011 {
2012 JLOG(p_journal_.debug())
2013 << messageType << ": received duplicate " << messageType;
2014 // Charging this fee here won't hurt the peer in the normal
2015 // course of operation (ie. refresh every 5 minutes), but
2016 // will add up if the peer is misbehaving.
2017 fee_.update(Resource::feeUselessData, "duplicate");
2018 return;
2019 }
2020
2021 auto const applyResult = app_.validators().applyListsAndBroadcast(
2022 manifest,
2023 version,
2024 blobs,
2025 remote_address_.to_string(),
2026 hash,
2027 app_.overlay(),
2028 app_.getHashRouter(),
2029 app_.getOPs());
2030
2031 JLOG(p_journal_.debug())
2032 << "Processed " << messageType << " version " << version << " from "
2033 << (applyResult.publisherKey ? strHex(*applyResult.publisherKey)
2034 : "unknown or invalid publisher")
2035 << " from " << remote_address_.to_string() << " (" << id_
2036 << ") with best result " << to_string(applyResult.bestDisposition());
2037
2038 // Act based on the best result
2039 switch (applyResult.bestDisposition())
2040 {
2041 // New list
2042 case ListDisposition::accepted:
2043 // Newest list is expired, and that needs to be broadcast, too
2044 case ListDisposition::expired:
2045 // Future list
2046 case ListDisposition::pending: {
2047 std::lock_guard<std::mutex> sl(recentLock_);
2048
2049 XRPL_ASSERT(
2050 applyResult.publisherKey,
2051 "ripple::PeerImp::onValidatorListMessage : publisher key is "
2052 "set");
2053 auto const& pubKey = *applyResult.publisherKey;
2054#ifndef NDEBUG
2055 if (auto const iter = publisherListSequences_.find(pubKey);
2056 iter != publisherListSequences_.end())
2057 {
2058 XRPL_ASSERT(
2059 iter->second < applyResult.sequence,
2060 "ripple::PeerImp::onValidatorListMessage : lower sequence");
2061 }
2062#endif
2063 publisherListSequences_[pubKey] = applyResult.sequence;
2064 }
2065 break;
2066 case ListDisposition::same_sequence:
2067 case ListDisposition::known_sequence:
2068#ifndef NDEBUG
2069 {
2070 std::lock_guard<std::mutex> sl(recentLock_);
2071 XRPL_ASSERT(
2072 applyResult.sequence && applyResult.publisherKey,
2073 "ripple::PeerImp::onValidatorListMessage : nonzero sequence "
2074 "and set publisher key");
2075 XRPL_ASSERT(
2076 publisherListSequences_[*applyResult.publisherKey] <=
2077 applyResult.sequence,
2078 "ripple::PeerImp::onValidatorListMessage : maximum sequence");
2079 }
2080#endif // !NDEBUG
2081
2082 break;
2083 case ListDisposition::stale:
2084 case ListDisposition::untrusted:
2085 case ListDisposition::invalid:
2086 case ListDisposition::unsupported_version:
2087 break;
2088 default:
2089 UNREACHABLE(
2090 "ripple::PeerImp::onValidatorListMessage : invalid best list "
2091 "disposition");
2092 }
2093
2094 // Charge based on the worst result
2095 switch (applyResult.worstDisposition())
2096 {
2097 case ListDisposition::accepted:
2098 case ListDisposition::expired:
2099 case ListDisposition::pending:
2100 // No charges for good data
2101 break;
2102 case ListDisposition::same_sequence:
2103 case ListDisposition::known_sequence:
2104 // Charging this fee here won't hurt the peer in the normal
2105 // course of operation (ie. refresh every 5 minutes), but
2106 // will add up if the peer is misbehaving.
2107 fee_.update(
2108 Resource::feeUselessData,
2109 " duplicate (same_sequence or known_sequence)");
2110 break;
2111 case ListDisposition::stale:
2112 // There are very few good reasons for a peer to send an
2113 // old list, particularly more than once.
2114 fee_.update(Resource::feeInvalidData, "expired");
2115 break;
2116 case ListDisposition::untrusted:
2117 // Charging this fee here won't hurt the peer in the normal
2118 // course of operation (ie. refresh every 5 minutes), but
2119 // will add up if the peer is misbehaving.
2120 fee_.update(Resource::feeUselessData, "untrusted");
2121 break;
2122 case ListDisposition::invalid:
2123 // This shouldn't ever happen with a well-behaved peer
2124 fee_.update(
2125 Resource::feeInvalidSignature, "invalid list disposition");
2126 break;
2127 case ListDisposition::unsupported_version:
2128 // During a version transition, this may be legitimate.
2129 // If it happens frequently, that's probably bad.
2130 fee_.update(Resource::feeInvalidData, "version");
2131 break;
2132 default:
2133 UNREACHABLE(
2134 "ripple::PeerImp::onValidatorListMessage : invalid worst list "
2135 "disposition");
2136 }
2137
2138 // Log based on all the results.
2139 for (auto const& [disp, count] : applyResult.dispositions)
2140 {
2141 switch (disp)
2142 {
2143 // New list
2144 case ListDisposition::accepted:
2145 JLOG(p_journal_.debug())
2146 << "Applied " << count << " new " << messageType
2147 << "(s) from peer " << remote_address_;
2148 break;
2149 // Newest list is expired, and that needs to be broadcast, too
2150 case ListDisposition::expired:
2151 JLOG(p_journal_.debug())
2152 << "Applied " << count << " expired " << messageType
2153 << "(s) from peer " << remote_address_;
2154 break;
2155 // Future list
2156 case ListDisposition::pending:
2157 JLOG(p_journal_.debug())
2158 << "Processed " << count << " future " << messageType
2159 << "(s) from peer " << remote_address_;
2160 break;
2161 case ListDisposition::same_sequence:
2162 JLOG(p_journal_.warn())
2163 << "Ignored " << count << " " << messageType
2164 << "(s) with current sequence from peer "
2165 << remote_address_;
2166 break;
2167 case ListDisposition::known_sequence:
2168 JLOG(p_journal_.warn())
2169 << "Ignored " << count << " " << messageType
2170 << "(s) with future sequence from peer " << remote_address_;
2171 break;
2172 case ListDisposition::stale:
2173 JLOG(p_journal_.warn())
2174 << "Ignored " << count << "stale " << messageType
2175 << "(s) from peer " << remote_address_;
2176 break;
2177 case ListDisposition::untrusted:
2178 JLOG(p_journal_.warn())
2179 << "Ignored " << count << " untrusted " << messageType
2180 << "(s) from peer " << remote_address_;
2181 break;
2182 case ListDisposition::unsupported_version:
2183 JLOG(p_journal_.warn())
2184 << "Ignored " << count << "unsupported version "
2185 << messageType << "(s) from peer " << remote_address_;
2186 break;
2187 case ListDisposition::invalid:
2188 JLOG(p_journal_.warn())
2189 << "Ignored " << count << "invalid " << messageType
2190 << "(s) from peer " << remote_address_;
2191 break;
2192 default:
2193 UNREACHABLE(
2194 "ripple::PeerImp::onValidatorListMessage : invalid list "
2195 "disposition");
2196 }
2197 }
2198}
2199
2200void
2202{
2203 try
2204 {
2205 if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
2206 {
2207 JLOG(p_journal_.debug())
2208 << "ValidatorList: received validator list from peer using "
2209 << "protocol version " << to_string(protocol_)
2210 << " which shouldn't support this feature.";
2211 fee_.update(Resource::feeUselessData, "unsupported peer");
2212 return;
2213 }
2214 onValidatorListMessage(
2215 "ValidatorList",
2216 m->manifest(),
2217 m->version(),
2218 ValidatorList::parseBlobs(*m));
2219 }
2220 catch (std::exception const& e)
2221 {
2222 JLOG(p_journal_.warn()) << "ValidatorList: Exception, " << e.what()
2223 << " from peer " << remote_address_;
2224 using namespace std::string_literals;
2225 fee_.update(Resource::feeInvalidData, e.what());
2226 }
2227}
2228
2229void
2230PeerImp::onMessage(
2232{
2233 try
2234 {
2235 if (!supportsFeature(ProtocolFeature::ValidatorList2Propagation))
2236 {
2237 JLOG(p_journal_.debug())
2238 << "ValidatorListCollection: received validator list from peer "
2239 << "using protocol version " << to_string(protocol_)
2240 << " which shouldn't support this feature.";
2241 fee_.update(Resource::feeUselessData, "unsupported peer");
2242 return;
2243 }
2244 else if (m->version() < 2)
2245 {
2246 JLOG(p_journal_.debug())
2247 << "ValidatorListCollection: received invalid validator list "
2248 "version "
2249 << m->version() << " from peer using protocol version "
2250 << to_string(protocol_);
2251 fee_.update(Resource::feeInvalidData, "wrong version");
2252 return;
2253 }
2254 onValidatorListMessage(
2255 "ValidatorListCollection",
2256 m->manifest(),
2257 m->version(),
2258 ValidatorList::parseBlobs(*m));
2259 }
2260 catch (std::exception const& e)
2261 {
2262 JLOG(p_journal_.warn()) << "ValidatorListCollection: Exception, "
2263 << e.what() << " from peer " << remote_address_;
2264 using namespace std::string_literals;
2265 fee_.update(Resource::feeInvalidData, e.what());
2266 }
2267}
2268
2269void
2271{
2272 if (m->validation().size() < 50)
2273 {
2274 JLOG(p_journal_.warn()) << "Validation: Too small";
2275 fee_.update(Resource::feeMalformedRequest, "too small");
2276 return;
2277 }
2278
2279 try
2280 {
2281 auto const closeTime = app_.timeKeeper().closeTime();
2282
2284 {
2285 SerialIter sit(makeSlice(m->validation()));
2286 val = std::make_shared<STValidation>(
2287 std::ref(sit),
2288 [this](PublicKey const& pk) {
2289 return calcNodeID(
2290 app_.validatorManifests().getMasterKey(pk));
2291 },
2292 false);
2293 val->setSeen(closeTime);
2294 }
2295
2296 if (!isCurrent(
2297 app_.getValidations().parms(),
2298 app_.timeKeeper().closeTime(),
2299 val->getSignTime(),
2300 val->getSeenTime()))
2301 {
2302 JLOG(p_journal_.trace()) << "Validation: Not current";
2303 fee_.update(Resource::feeUselessData, "not current");
2304 return;
2305 }
2306
2307 // RH TODO: when isTrusted = false we should probably also cache a key
2308 // suppression for 30 seconds to avoid doing a relatively expensive
2309 // lookup every time a spam packet is received
2310 auto const isTrusted =
2311 app_.validators().trusted(val->getSignerPublic());
2312
2313 // If the operator has specified that untrusted validations be dropped
2314 // then this happens here I.e. before further wasting CPU verifying the
2315 // signature of an untrusted key
2316 if (!isTrusted && app_.config().RELAY_UNTRUSTED_VALIDATIONS == -1)
2317 return;
2318
2319 auto key = sha512Half(makeSlice(m->validation()));
2320
2321 if (auto [added, relayed] =
2322 app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
2323 !added)
2324 {
2325 // Count unique messages (Slots has it's own 'HashRouter'), which a
2326 // peer receives within IDLED seconds since the message has been
2327 // relayed. Wait WAIT_ON_BOOTUP time to let the server establish
2328 // connections to peers.
2329 if (reduceRelayReady() && relayed &&
2330 (stopwatch().now() - *relayed) < reduce_relay::IDLED)
2331 overlay_.updateSlotAndSquelch(
2332 key, val->getSignerPublic(), id_, protocol::mtVALIDATION);
2333 JLOG(p_journal_.trace()) << "Validation: duplicate";
2334 return;
2335 }
2336
2337 if (!isTrusted && (tracking_.load() == Tracking::diverged))
2338 {
2339 JLOG(p_journal_.debug())
2340 << "Dropping untrusted validation from diverged peer";
2341 }
2342 else if (isTrusted || !app_.getFeeTrack().isLoadedLocal())
2343 {
2344 std::string const name = [isTrusted, val]() {
2345 std::string ret =
2346 isTrusted ? "Trusted validation" : "Untrusted validation";
2347
2348#ifdef DEBUG
2349 ret += " " +
2350 std::to_string(val->getFieldU32(sfLedgerSequence)) + ": " +
2351 to_string(val->getNodeID());
2352#endif
2353
2354 return ret;
2355 }();
2356
2357 std::weak_ptr<PeerImp> weak = shared_from_this();
2358 app_.getJobQueue().addJob(
2359 isTrusted ? jtVALIDATION_t : jtVALIDATION_ut,
2360 name,
2361 [weak, val, m, key]() {
2362 if (auto peer = weak.lock())
2363 peer->checkValidation(val, key, m);
2364 });
2365 }
2366 else
2367 {
2368 JLOG(p_journal_.debug())
2369 << "Dropping untrusted validation for load";
2370 }
2371 }
2372 catch (std::exception const& e)
2373 {
2374 JLOG(p_journal_.warn())
2375 << "Exception processing validation: " << e.what();
2376 using namespace std::string_literals;
2377 fee_.update(Resource::feeMalformedRequest, e.what());
2378 }
2379}
2380
2381void
2383{
2384 protocol::TMGetObjectByHash& packet = *m;
2385
2386 JLOG(p_journal_.trace()) << "received TMGetObjectByHash " << packet.type()
2387 << " " << packet.objects_size();
2388
2389 if (packet.query())
2390 {
2391 // this is a query
2392 if (send_queue_.size() >= Tuning::dropSendQueue)
2393 {
2394 JLOG(p_journal_.debug()) << "GetObject: Large send queue";
2395 return;
2396 }
2397
2398 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2399 {
2400 doFetchPack(m);
2401 return;
2402 }
2403
2404 if (packet.type() == protocol::TMGetObjectByHash::otTRANSACTIONS)
2405 {
2406 if (!txReduceRelayEnabled())
2407 {
2408 JLOG(p_journal_.error())
2409 << "TMGetObjectByHash: tx reduce-relay is disabled";
2410 fee_.update(Resource::feeMalformedRequest, "disabled");
2411 return;
2412 }
2413
2414 std::weak_ptr<PeerImp> weak = shared_from_this();
2415 app_.getJobQueue().addJob(
2416 jtREQUESTED_TXN, "doTransactions", [weak, m]() {
2417 if (auto peer = weak.lock())
2418 peer->doTransactions(m);
2419 });
2420 return;
2421 }
2422
2423 protocol::TMGetObjectByHash reply;
2424
2425 reply.set_query(false);
2426
2427 if (packet.has_seq())
2428 reply.set_seq(packet.seq());
2429
2430 reply.set_type(packet.type());
2431
2432 if (packet.has_ledgerhash())
2433 {
2434 if (!stringIsUint256Sized(packet.ledgerhash()))
2435 {
2436 fee_.update(Resource::feeMalformedRequest, "ledger hash");
2437 return;
2438 }
2439
2440 reply.set_ledgerhash(packet.ledgerhash());
2441 }
2442
2443 fee_.update(
2444 Resource::feeModerateBurdenPeer,
2445 " received a get object by hash request");
2446
2447 // This is a very minimal implementation
2448 for (int i = 0; i < packet.objects_size(); ++i)
2449 {
2450 auto const& obj = packet.objects(i);
2451 if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2452 {
2453 uint256 const hash{obj.hash()};
2454 // VFALCO TODO Move this someplace more sensible so we dont
2455 // need to inject the NodeStore interfaces.
2456 std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2457 auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
2458 if (nodeObject)
2459 {
2460 protocol::TMIndexedObject& newObj = *reply.add_objects();
2461 newObj.set_hash(hash.begin(), hash.size());
2462 newObj.set_data(
2463 &nodeObject->getData().front(),
2464 nodeObject->getData().size());
2465
2466 if (obj.has_nodeid())
2467 newObj.set_index(obj.nodeid());
2468 if (obj.has_ledgerseq())
2469 newObj.set_ledgerseq(obj.ledgerseq());
2470
2471 // VFALCO NOTE "seq" in the message is obsolete
2472 }
2473 }
2474 }
2475
2476 JLOG(p_journal_.trace()) << "GetObj: " << reply.objects_size() << " of "
2477 << packet.objects_size();
2478 send(std::make_shared<Message>(reply, protocol::mtGET_OBJECTS));
2479 }
2480 else
2481 {
2482 // this is a reply
2483 std::uint32_t pLSeq = 0;
2484 bool pLDo = true;
2485 bool progress = false;
2486
2487 for (int i = 0; i < packet.objects_size(); ++i)
2488 {
2489 const protocol::TMIndexedObject& obj = packet.objects(i);
2490
2491 if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2492 {
2493 if (obj.has_ledgerseq())
2494 {
2495 if (obj.ledgerseq() != pLSeq)
2496 {
2497 if (pLDo && (pLSeq != 0))
2498 {
2499 JLOG(p_journal_.debug())
2500 << "GetObj: Full fetch pack for " << pLSeq;
2501 }
2502 pLSeq = obj.ledgerseq();
2503 pLDo = !app_.getLedgerMaster().haveLedger(pLSeq);
2504
2505 if (!pLDo)
2506 {
2507 JLOG(p_journal_.debug())
2508 << "GetObj: Late fetch pack for " << pLSeq;
2509 }
2510 else
2511 progress = true;
2512 }
2513 }
2514
2515 if (pLDo)
2516 {
2517 uint256 const hash{obj.hash()};
2518
2519 app_.getLedgerMaster().addFetchPack(
2520 hash,
2521 std::make_shared<Blob>(
2522 obj.data().begin(), obj.data().end()));
2523 }
2524 }
2525 }
2526
2527 if (pLDo && (pLSeq != 0))
2528 {
2529 JLOG(p_journal_.debug())
2530 << "GetObj: Partial fetch pack for " << pLSeq;
2531 }
2532 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2533 app_.getLedgerMaster().gotFetchPack(progress, pLSeq);
2534 }
2535}
2536
2537void
2539{
2540 if (!txReduceRelayEnabled())
2541 {
2542 JLOG(p_journal_.error())
2543 << "TMHaveTransactions: tx reduce-relay is disabled";
2544 fee_.update(Resource::feeMalformedRequest, "disabled");
2545 return;
2546 }
2547
2548 std::weak_ptr<PeerImp> weak = shared_from_this();
2549 app_.getJobQueue().addJob(
2550 jtMISSING_TXN, "handleHaveTransactions", [weak, m]() {
2551 if (auto peer = weak.lock())
2552 peer->handleHaveTransactions(m);
2553 });
2554}
2555
2556void
2557PeerImp::handleHaveTransactions(
2559{
2560 protocol::TMGetObjectByHash tmBH;
2561 tmBH.set_type(protocol::TMGetObjectByHash_ObjectType_otTRANSACTIONS);
2562 tmBH.set_query(true);
2563
2564 JLOG(p_journal_.trace())
2565 << "received TMHaveTransactions " << m->hashes_size();
2566
2567 for (std::uint32_t i = 0; i < m->hashes_size(); i++)
2568 {
2569 if (!stringIsUint256Sized(m->hashes(i)))
2570 {
2571 JLOG(p_journal_.error())
2572 << "TMHaveTransactions with invalid hash size";
2573 fee_.update(Resource::feeMalformedRequest, "hash size");
2574 return;
2575 }
2576
2577 uint256 hash(m->hashes(i));
2578
2579 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2580
2581 JLOG(p_journal_.trace()) << "checking transaction " << (bool)txn;
2582
2583 if (!txn)
2584 {
2585 JLOG(p_journal_.debug()) << "adding transaction to request";
2586
2587 auto obj = tmBH.add_objects();
2588 obj->set_hash(hash.data(), hash.size());
2589 }
2590 else
2591 {
2592 // Erase only if a peer has seen this tx. If the peer has not
2593 // seen this tx then the tx could not has been queued for this
2594 // peer.
2595 removeTxQueue(hash);
2596 }
2597 }
2598
2599 JLOG(p_journal_.trace())
2600 << "transaction request object is " << tmBH.objects_size();
2601
2602 if (tmBH.objects_size() > 0)
2603 send(std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS));
2604}
2605
2606void
2608{
2609 if (!txReduceRelayEnabled())
2610 {
2611 JLOG(p_journal_.error())
2612 << "TMTransactions: tx reduce-relay is disabled";
2613 fee_.update(Resource::feeMalformedRequest, "disabled");
2614 return;
2615 }
2616
2617 JLOG(p_journal_.trace())
2618 << "received TMTransactions " << m->transactions_size();
2619
2620 overlay_.addTxMetrics(m->transactions_size());
2621
2622 for (std::uint32_t i = 0; i < m->transactions_size(); ++i)
2623 handleTransaction(
2625 m->mutable_transactions(i), [](protocol::TMTransaction*) {}),
2626 false,
2627 true);
2628}
2629
2630void
2631PeerImp::onMessage(std::shared_ptr<protocol::TMSquelch> const& m)
2632{
2633 using on_message_fn =
2635 if (!strand_.running_in_this_thread())
2636 return post(
2637 strand_,
2638 std::bind(
2639 (on_message_fn)&PeerImp::onMessage, shared_from_this(), m));
2640
2641 if (!m->has_validatorpubkey())
2642 {
2643 fee_.update(Resource::feeInvalidData, "squelch no pubkey");
2644 return;
2645 }
2646 auto validator = m->validatorpubkey();
2647 auto const slice{makeSlice(validator)};
2648 if (!publicKeyType(slice))
2649 {
2650 fee_.update(Resource::feeInvalidData, "squelch bad pubkey");
2651 return;
2652 }
2653 PublicKey key(slice);
2654
2655 // Ignore non-validator squelch
2656 if (!app_.validators().listed(key))
2657 {
2658 fee_.update(Resource::feeInvalidData, "squelch non-validator");
2659 JLOG(p_journal_.debug())
2660 << "onMessage: TMSquelch discarding non-validator squelch "
2661 << slice;
2662 return;
2663 }
2664
2665 // Ignore the squelch for validator's own messages.
2666 if (key == app_.getValidationPublicKey())
2667 {
2668 JLOG(p_journal_.debug())
2669 << "onMessage: TMSquelch discarding validator's squelch " << slice;
2670 return;
2671 }
2672
2673 std::uint32_t duration =
2674 m->has_squelchduration() ? m->squelchduration() : 0;
2675 if (!m->squelch())
2676 squelch_.removeSquelch(key);
2677 else if (!squelch_.addSquelch(key, std::chrono::seconds{duration}))
2678 fee_.update(Resource::feeInvalidData, "squelch duration");
2679
2680 JLOG(p_journal_.debug())
2681 << "onMessage: TMSquelch " << slice << " " << id() << " " << duration;
2682}
2683
2684//--------------------------------------------------------------------------
2685
2686void
2687PeerImp::addLedger(
2688 uint256 const& hash,
2689 std::lock_guard<std::mutex> const& lockedRecentLock)
2690{
2691 // lockedRecentLock is passed as a reminder that recentLock_ must be
2692 // locked by the caller.
2693 (void)lockedRecentLock;
2694
2695 if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
2696 recentLedgers_.end())
2697 return;
2698
2699 recentLedgers_.push_back(hash);
2700}
2701
2702void
2703PeerImp::doFetchPack(const std::shared_ptr<protocol::TMGetObjectByHash>& packet)
2704{
2705 // VFALCO TODO Invert this dependency using an observer and shared state
2706 // object. Don't queue fetch pack jobs if we're under load or we already
2707 // have some queued.
2708 if (app_.getFeeTrack().isLoadedLocal() ||
2709 (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2710 (app_.getJobQueue().getJobCount(jtPACK) > 10))
2711 {
2712 JLOG(p_journal_.info()) << "Too busy to make fetch pack";
2713 return;
2714 }
2715
2716 if (!stringIsUint256Sized(packet->ledgerhash()))
2717 {
2718 JLOG(p_journal_.warn()) << "FetchPack hash size malformed";
2719 fee_.update(Resource::feeMalformedRequest, "hash size");
2720 return;
2721 }
2722
2723 fee_.fee = Resource::feeHeavyBurdenPeer;
2724
2725 uint256 const hash{packet->ledgerhash()};
2726
2727 std::weak_ptr<PeerImp> weak = shared_from_this();
2728 auto elapsed = UptimeClock::now();
2729 auto const pap = &app_;
2730 app_.getJobQueue().addJob(
2731 jtPACK, "MakeFetchPack", [pap, weak, packet, hash, elapsed]() {
2732 pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
2733 });
2734}
2735
2736void
2737PeerImp::doTransactions(
2739{
2740 protocol::TMTransactions reply;
2741
2742 JLOG(p_journal_.trace()) << "received TMGetObjectByHash requesting tx "
2743 << packet->objects_size();
2744
2745 if (packet->objects_size() > reduce_relay::MAX_TX_QUEUE_SIZE)
2746 {
2747 JLOG(p_journal_.error()) << "doTransactions, invalid number of hashes";
2748 fee_.update(Resource::feeMalformedRequest, "too big");
2749 return;
2750 }
2751
2752 for (std::uint32_t i = 0; i < packet->objects_size(); ++i)
2753 {
2754 auto const& obj = packet->objects(i);
2755
2756 if (!stringIsUint256Sized(obj.hash()))
2757 {
2758 fee_.update(Resource::feeMalformedRequest, "hash size");
2759 return;
2760 }
2761
2762 uint256 hash(obj.hash());
2763
2764 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2765
2766 if (!txn)
2767 {
2768 JLOG(p_journal_.error()) << "doTransactions, transaction not found "
2769 << Slice(hash.data(), hash.size());
2770 fee_.update(Resource::feeMalformedRequest, "tx not found");
2771 return;
2772 }
2773
2774 Serializer s;
2775 auto tx = reply.add_transactions();
2776 auto sttx = txn->getSTransaction();
2777 sttx->add(s);
2778 tx->set_rawtransaction(s.data(), s.size());
2779 tx->set_status(
2780 txn->getStatus() == INCLUDED ? protocol::tsCURRENT
2781 : protocol::tsNEW);
2782 tx->set_receivetimestamp(
2783 app_.timeKeeper().now().time_since_epoch().count());
2784 tx->set_deferred(txn->getSubmitResult().queued);
2785 }
2786
2787 if (reply.transactions_size() > 0)
2788 send(std::make_shared<Message>(reply, protocol::mtTRANSACTIONS));
2789}
2790
2791void
2792PeerImp::checkTransaction(
2793 int flags,
2794 bool checkSignature,
2795 std::shared_ptr<STTx const> const& stx,
2796 bool batch)
2797{
2798 // VFALCO TODO Rewrite to not use exceptions
2799 try
2800 {
2801 // Expired?
2802 if (stx->isFieldPresent(sfLastLedgerSequence) &&
2803 (stx->getFieldU32(sfLastLedgerSequence) <
2804 app_.getLedgerMaster().getValidLedgerIndex()))
2805 {
2806 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2807 charge(Resource::feeUselessData, "expired tx");
2808 return;
2809 }
2810
2811 if (isPseudoTx(*stx))
2812 {
2813 // Don't do anything with pseudo transactions except put them in the
2814 // TransactionMaster cache
2815 std::string reason;
2816 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2817 XRPL_ASSERT(
2818 tx->getStatus() == NEW,
2819 "ripple::PeerImp::checkTransaction Transaction created "
2820 "correctly");
2821 if (tx->getStatus() == NEW)
2822 {
2823 JLOG(p_journal_.debug())
2824 << "Processing " << (batch ? "batch" : "unsolicited")
2825 << " pseudo-transaction tx " << tx->getID();
2826
2827 app_.getMasterTransaction().canonicalize(&tx);
2828 // Tell the overlay about it, but don't relay it.
2829 auto const toSkip =
2830 app_.getHashRouter().shouldRelay(tx->getID());
2831 if (toSkip)
2832 {
2833 JLOG(p_journal_.debug())
2834 << "Passing skipped pseudo pseudo-transaction tx "
2835 << tx->getID();
2836 app_.overlay().relay(tx->getID(), {}, *toSkip);
2837 }
2838 if (!batch)
2839 {
2840 JLOG(p_journal_.debug())
2841 << "Charging for pseudo-transaction tx " << tx->getID();
2842 charge(Resource::feeUselessData, "pseudo tx");
2843 }
2844
2845 return;
2846 }
2847 }
2848
2849 if (checkSignature)
2850 {
2851 // Check the signature before handing off to the job queue.
2852 if (auto [valid, validReason] = checkValidity(
2853 app_.getHashRouter(),
2854 *stx,
2855 app_.getLedgerMaster().getValidatedRules(),
2856 app_.config());
2857 valid != Validity::Valid)
2858 {
2859 if (!validReason.empty())
2860 {
2861 JLOG(p_journal_.trace())
2862 << "Exception checking transaction: " << validReason;
2863 }
2864
2865 // Probably not necessary to set SF_BAD, but doesn't hurt.
2866 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2867 charge(
2868 Resource::feeInvalidSignature,
2869 "check transaction signature failure");
2870 return;
2871 }
2872 }
2873 else
2874 {
2876 app_.getHashRouter(), stx->getTransactionID(), Validity::Valid);
2877 }
2878
2879 std::string reason;
2880 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2881
2882 if (tx->getStatus() == INVALID)
2883 {
2884 if (!reason.empty())
2885 {
2886 JLOG(p_journal_.trace())
2887 << "Exception checking transaction: " << reason;
2888 }
2889 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2890 charge(Resource::feeInvalidSignature, "tx (impossible)");
2891 return;
2892 }
2893
2894 bool const trusted(flags & SF_TRUSTED);
2895 app_.getOPs().processTransaction(
2896 tx, trusted, false, NetworkOPs::FailHard::no);
2897 }
2898 catch (std::exception const& ex)
2899 {
2900 JLOG(p_journal_.warn())
2901 << "Exception in " << __func__ << ": " << ex.what();
2902 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2903 using namespace std::string_literals;
2904 charge(Resource::feeInvalidData, "tx "s + ex.what());
2905 }
2906}
2907
2908// Called from our JobQueue
2909void
2910PeerImp::checkPropose(
2911 bool isTrusted,
2913 RCLCxPeerPos peerPos)
2914{
2915 JLOG(p_journal_.trace())
2916 << "Checking " << (isTrusted ? "trusted" : "UNTRUSTED") << " proposal";
2917
2918 XRPL_ASSERT(packet, "ripple::PeerImp::checkPropose : non-null packet");
2919
2920 if (!cluster() && !peerPos.checkSign())
2921 {
2922 std::string desc{"Proposal fails sig check"};
2923 JLOG(p_journal_.warn()) << desc;
2924 charge(Resource::feeInvalidSignature, desc);
2925 return;
2926 }
2927
2928 bool relay;
2929
2930 if (isTrusted)
2931 relay = app_.getOPs().processTrustedProposal(peerPos);
2932 else
2933 relay = app_.config().RELAY_UNTRUSTED_PROPOSALS == 1 || cluster();
2934
2935 if (relay)
2936 {
2937 // haveMessage contains peers, which are suppressed; i.e. the peers
2938 // are the source of the message, consequently the message should
2939 // not be relayed to these peers. But the message must be counted
2940 // as part of the squelch logic.
2941 auto haveMessage = app_.overlay().relay(
2942 *packet, peerPos.suppressionID(), peerPos.publicKey());
2943 if (reduceRelayReady() && !haveMessage.empty())
2944 overlay_.updateSlotAndSquelch(
2945 peerPos.suppressionID(),
2946 peerPos.publicKey(),
2947 std::move(haveMessage),
2948 protocol::mtPROPOSE_LEDGER);
2949 }
2950}
2951
2952void
2953PeerImp::checkValidation(
2955 uint256 const& key,
2957{
2958 if (!val->isValid())
2959 {
2960 std::string desc{"Validation forwarded by peer is invalid"};
2961 JLOG(p_journal_.debug()) << desc;
2962 charge(Resource::feeInvalidSignature, desc);
2963 return;
2964 }
2965
2966 // FIXME it should be safe to remove this try/catch. Investigate codepaths.
2967 try
2968 {
2969 if (app_.getOPs().recvValidation(val, std::to_string(id())) ||
2970 cluster())
2971 {
2972 // haveMessage contains peers, which are suppressed; i.e. the peers
2973 // are the source of the message, consequently the message should
2974 // not be relayed to these peers. But the message must be counted
2975 // as part of the squelch logic.
2976 auto haveMessage =
2977 overlay_.relay(*packet, key, val->getSignerPublic());
2978 if (reduceRelayReady() && !haveMessage.empty())
2979 {
2980 overlay_.updateSlotAndSquelch(
2981 key,
2982 val->getSignerPublic(),
2983 std::move(haveMessage),
2984 protocol::mtVALIDATION);
2985 }
2986 }
2987 }
2988 catch (std::exception const& ex)
2989 {
2990 JLOG(p_journal_.trace())
2991 << "Exception processing validation: " << ex.what();
2992 using namespace std::string_literals;
2993 charge(Resource::feeMalformedRequest, "validation "s + ex.what());
2994 }
2995}
2996
2997// Returns the set of peers that can help us get
2998// the TX tree with the specified root hash.
2999//
3001getPeerWithTree(OverlayImpl& ov, uint256 const& rootHash, PeerImp const* skip)
3002{
3004 int retScore = 0;
3005
3007 if (p->hasTxSet(rootHash) && p.get() != skip)
3008 {
3009 auto score = p->getScore(true);
3010 if (!ret || (score > retScore))
3011 {
3012 ret = std::move(p);
3013 retScore = score;
3014 }
3015 }
3016 });
3017
3018 return ret;
3019}
3020
3021// Returns a random peer weighted by how likely to
3022// have the ledger and how responsive it is.
3023//
3026 OverlayImpl& ov,
3027 uint256 const& ledgerHash,
3028 LedgerIndex ledger,
3029 PeerImp const* skip)
3030{
3032 int retScore = 0;
3033
3035 if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
3036 {
3037 auto score = p->getScore(true);
3038 if (!ret || (score > retScore))
3039 {
3040 ret = std::move(p);
3041 retScore = score;
3042 }
3043 }
3044 });
3045
3046 return ret;
3047}
3048
3049void
3050PeerImp::sendLedgerBase(
3051 std::shared_ptr<Ledger const> const& ledger,
3052 protocol::TMLedgerData& ledgerData)
3053{
3054 JLOG(p_journal_.trace()) << "sendLedgerBase: Base data";
3055
3056 Serializer s(sizeof(LedgerInfo));
3057 addRaw(ledger->info(), s);
3058 ledgerData.add_nodes()->set_nodedata(s.getDataPtr(), s.getLength());
3059
3060 auto const& stateMap{ledger->stateMap()};
3061 if (stateMap.getHash() != beast::zero)
3062 {
3063 // Return account state root node if possible
3064 Serializer root(768);
3065
3066 stateMap.serializeRoot(root);
3067 ledgerData.add_nodes()->set_nodedata(
3068 root.getDataPtr(), root.getLength());
3069
3070 if (ledger->info().txHash != beast::zero)
3071 {
3072 auto const& txMap{ledger->txMap()};
3073 if (txMap.getHash() != beast::zero)
3074 {
3075 // Return TX root node if possible
3076 root.erase();
3077 txMap.serializeRoot(root);
3078 ledgerData.add_nodes()->set_nodedata(
3079 root.getDataPtr(), root.getLength());
3080 }
3081 }
3082 }
3083
3084 auto message{
3085 std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA)};
3086 send(message);
3087}
3088
3090PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
3091{
3092 JLOG(p_journal_.trace()) << "getLedger: Ledger";
3093
3095
3096 if (m->has_ledgerhash())
3097 {
3098 // Attempt to find ledger by hash
3099 uint256 const ledgerHash{m->ledgerhash()};
3100 ledger = app_.getLedgerMaster().getLedgerByHash(ledgerHash);
3101 if (!ledger)
3102 {
3103 JLOG(p_journal_.trace())
3104 << "getLedger: Don't have ledger with hash " << ledgerHash;
3105
3106 if (m->has_querytype() && !m->has_requestcookie())
3107 {
3108 // Attempt to relay the request to a peer
3109 if (auto const peer = getPeerWithLedger(
3110 overlay_,
3111 ledgerHash,
3112 m->has_ledgerseq() ? m->ledgerseq() : 0,
3113 this))
3114 {
3115 m->set_requestcookie(id());
3116 peer->send(
3117 std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
3118 JLOG(p_journal_.debug())
3119 << "getLedger: Request relayed to peer";
3120 return ledger;
3121 }
3122
3123 JLOG(p_journal_.trace())
3124 << "getLedger: Failed to find peer to relay request";
3125 }
3126 }
3127 }
3128 else if (m->has_ledgerseq())
3129 {
3130 // Attempt to find ledger by sequence
3131 if (m->ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
3132 {
3133 JLOG(p_journal_.debug())
3134 << "getLedger: Early ledger sequence request";
3135 }
3136 else
3137 {
3138 ledger = app_.getLedgerMaster().getLedgerBySeq(m->ledgerseq());
3139 if (!ledger)
3140 {
3141 JLOG(p_journal_.debug())
3142 << "getLedger: Don't have ledger with sequence "
3143 << m->ledgerseq();
3144 }
3145 }
3146 }
3147 else if (m->has_ltype() && m->ltype() == protocol::ltCLOSED)
3148 {
3149 ledger = app_.getLedgerMaster().getClosedLedger();
3150 }
3151
3152 if (ledger)
3153 {
3154 // Validate retrieved ledger sequence
3155 auto const ledgerSeq{ledger->info().seq};
3156 if (m->has_ledgerseq())
3157 {
3158 if (ledgerSeq != m->ledgerseq())
3159 {
3160 // Do not resource charge a peer responding to a relay
3161 if (!m->has_requestcookie())
3162 charge(
3163 Resource::feeMalformedRequest, "get_ledger ledgerSeq");
3164
3165 ledger.reset();
3166 JLOG(p_journal_.warn())
3167 << "getLedger: Invalid ledger sequence " << ledgerSeq;
3168 }
3169 }
3170 else if (ledgerSeq < app_.getLedgerMaster().getEarliestFetch())
3171 {
3172 ledger.reset();
3173 JLOG(p_journal_.debug())
3174 << "getLedger: Early ledger sequence request " << ledgerSeq;
3175 }
3176 }
3177 else
3178 {
3179 JLOG(p_journal_.debug()) << "getLedger: Unable to find ledger";
3180 }
3181
3182 return ledger;
3183}
3184
3186PeerImp::getTxSet(std::shared_ptr<protocol::TMGetLedger> const& m) const
3187{
3188 JLOG(p_journal_.trace()) << "getTxSet: TX set";
3189
3190 uint256 const txSetHash{m->ledgerhash()};
3192 app_.getInboundTransactions().getSet(txSetHash, false)};
3193 if (!shaMap)
3194 {
3195 if (m->has_querytype() && !m->has_requestcookie())
3196 {
3197 // Attempt to relay the request to a peer
3198 if (auto const peer = getPeerWithTree(overlay_, txSetHash, this))
3199 {
3200 m->set_requestcookie(id());
3201 peer->send(
3202 std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
3203 JLOG(p_journal_.debug()) << "getTxSet: Request relayed";
3204 }
3205 else
3206 {
3207 JLOG(p_journal_.debug())
3208 << "getTxSet: Failed to find relay peer";
3209 }
3210 }
3211 else
3212 {
3213 JLOG(p_journal_.debug()) << "getTxSet: Failed to find TX set";
3214 }
3215 }
3216
3217 return shaMap;
3218}
3219
3220void
3221PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
3222{
3223 // Do not resource charge a peer responding to a relay
3224 if (!m->has_requestcookie())
3225 charge(
3226 Resource::feeModerateBurdenPeer, "received a get ledger request");
3227
3230 SHAMap const* map{nullptr};
3231 protocol::TMLedgerData ledgerData;
3232 bool fatLeaves{true};
3233 auto const itype{m->itype()};
3234
3235 if (itype == protocol::liTS_CANDIDATE)
3236 {
3237 if (sharedMap = getTxSet(m); !sharedMap)
3238 return;
3239 map = sharedMap.get();
3240
3241 // Fill out the reply
3242 ledgerData.set_ledgerseq(0);
3243 ledgerData.set_ledgerhash(m->ledgerhash());
3244 ledgerData.set_type(protocol::liTS_CANDIDATE);
3245 if (m->has_requestcookie())
3246 ledgerData.set_requestcookie(m->requestcookie());
3247
3248 // We'll already have most transactions
3249 fatLeaves = false;
3250 }
3251 else
3252 {
3253 if (send_queue_.size() >= Tuning::dropSendQueue)
3254 {
3255 JLOG(p_journal_.debug())
3256 << "processLedgerRequest: Large send queue";
3257 return;
3258 }
3259 if (app_.getFeeTrack().isLoadedLocal() && !cluster())
3260 {
3261 JLOG(p_journal_.debug()) << "processLedgerRequest: Too busy";
3262 return;
3263 }
3264
3265 if (ledger = getLedger(m); !ledger)
3266 return;
3267
3268 // Fill out the reply
3269 auto const ledgerHash{ledger->info().hash};
3270 ledgerData.set_ledgerhash(ledgerHash.begin(), ledgerHash.size());
3271 ledgerData.set_ledgerseq(ledger->info().seq);
3272 ledgerData.set_type(itype);
3273 if (m->has_requestcookie())
3274 ledgerData.set_requestcookie(m->requestcookie());
3275
3276 switch (itype)
3277 {
3278 case protocol::liBASE:
3279 sendLedgerBase(ledger, ledgerData);
3280 return;
3281
3282 case protocol::liTX_NODE:
3283 map = &ledger->txMap();
3284 JLOG(p_journal_.trace()) << "processLedgerRequest: TX map hash "
3285 << to_string(map->getHash());
3286 break;
3287
3288 case protocol::liAS_NODE:
3289 map = &ledger->stateMap();
3290 JLOG(p_journal_.trace())
3291 << "processLedgerRequest: Account state map hash "
3292 << to_string(map->getHash());
3293 break;
3294
3295 default:
3296 // This case should not be possible here
3297 JLOG(p_journal_.error())
3298 << "processLedgerRequest: Invalid ledger info type";
3299 return;
3300 }
3301 }
3302
3303 if (!map)
3304 {
3305 JLOG(p_journal_.warn()) << "processLedgerRequest: Unable to find map";
3306 return;
3307 }
3308
3309 // Add requested node data to reply
3310 if (m->nodeids_size() > 0)
3311 {
3312 auto const queryDepth{
3313 m->has_querydepth() ? m->querydepth() : (isHighLatency() ? 2 : 1)};
3314
3316
3317 for (int i = 0; i < m->nodeids_size() &&
3318 ledgerData.nodes_size() < Tuning::softMaxReplyNodes;
3319 ++i)
3320 {
3321 auto const shaMapNodeId{deserializeSHAMapNodeID(m->nodeids(i))};
3322
3323 data.clear();
3324 data.reserve(Tuning::softMaxReplyNodes);
3325
3326 try
3327 {
3328 if (map->getNodeFat(*shaMapNodeId, data, fatLeaves, queryDepth))
3329 {
3330 JLOG(p_journal_.trace())
3331 << "processLedgerRequest: getNodeFat got "
3332 << data.size() << " nodes";
3333
3334 for (auto const& d : data)
3335 {
3336 if (ledgerData.nodes_size() >=
3337 Tuning::hardMaxReplyNodes)
3338 break;
3339 protocol::TMLedgerNode* node{ledgerData.add_nodes()};
3340 node->set_nodeid(d.first.getRawString());
3341 node->set_nodedata(d.second.data(), d.second.size());
3342 }
3343 }
3344 else
3345 {
3346 JLOG(p_journal_.warn())
3347 << "processLedgerRequest: getNodeFat returns false";
3348 }
3349 }
3350 catch (std::exception const& e)
3351 {
3352 std::string info;
3353 switch (itype)
3354 {
3355 case protocol::liBASE:
3356 // This case should not be possible here
3357 info = "Ledger base";
3358 break;
3359
3360 case protocol::liTX_NODE:
3361 info = "TX node";
3362 break;
3363
3364 case protocol::liAS_NODE:
3365 info = "AS node";
3366 break;
3367
3368 case protocol::liTS_CANDIDATE:
3369 info = "TS candidate";
3370 break;
3371
3372 default:
3373 info = "Invalid";
3374 break;
3375 }
3376
3377 if (!m->has_ledgerhash())
3378 info += ", no hash specified";
3379
3380 JLOG(p_journal_.error())
3381 << "processLedgerRequest: getNodeFat with nodeId "
3382 << *shaMapNodeId << " and ledger info type " << info
3383 << " throws exception: " << e.what();
3384 }
3385 }
3386
3387 JLOG(p_journal_.info())
3388 << "processLedgerRequest: Got request for " << m->nodeids_size()
3389 << " nodes at depth " << queryDepth << ", return "
3390 << ledgerData.nodes_size() << " nodes";
3391 }
3392
3393 if (ledgerData.nodes_size() == 0)
3394 return;
3395
3396 send(std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA));
3397}
3398
3399int
3400PeerImp::getScore(bool haveItem) const
3401{
3402 // Random component of score, used to break ties and avoid
3403 // overloading the "best" peer
3404 static const int spRandomMax = 9999;
3405
3406 // Score for being very likely to have the thing we are
3407 // look for; should be roughly spRandomMax
3408 static const int spHaveItem = 10000;
3409
3410 // Score reduction for each millisecond of latency; should
3411 // be roughly spRandomMax divided by the maximum reasonable
3412 // latency
3413 static const int spLatency = 30;
3414
3415 // Penalty for unknown latency; should be roughly spRandomMax
3416 static const int spNoLatency = 8000;
3417
3418 int score = rand_int(spRandomMax);
3419
3420 if (haveItem)
3421 score += spHaveItem;
3422
3424 {
3425 std::lock_guard sl(recentLock_);
3426 latency = latency_;
3427 }
3428
3429 if (latency)
3430 score -= latency->count() * spLatency;
3431 else
3432 score -= spNoLatency;
3433
3434 return score;
3435}
3436
3437bool
3438PeerImp::isHighLatency() const
3439{
3440 std::lock_guard sl(recentLock_);
3441 return latency_ >= peerHighLatency;
3442}
3443
3444bool
3445PeerImp::reduceRelayReady()
3446{
3447 if (!reduceRelayReady_)
3448 reduceRelayReady_ =
3449 reduce_relay::epoch<std::chrono::minutes>(UptimeClock::now()) >
3450 reduce_relay::WAIT_ON_BOOTUP;
3451 return vpReduceRelayEnabled_ && reduceRelayReady_;
3452}
3453
3454void
3455PeerImp::Metrics::add_message(std::uint64_t bytes)
3456{
3457 using namespace std::chrono_literals;
3458 std::unique_lock lock{mutex_};
3459
3460 totalBytes_ += bytes;
3461 accumBytes_ += bytes;
3462 auto const timeElapsed = clock_type::now() - intervalStart_;
3463 auto const timeElapsedInSecs =
3464 std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
3465
3466 if (timeElapsedInSecs >= 1s)
3467 {
3468 auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
3469 rollingAvg_.push_back(avgBytes);
3470
3471 auto const totalBytes =
3472 std::accumulate(rollingAvg_.begin(), rollingAvg_.end(), 0ull);
3473 rollingAvgBytes_ = totalBytes / rollingAvg_.size();
3474
3475 intervalStart_ = clock_type::now();
3476 accumBytes_ = 0;
3477 }
3478}
3479
3481PeerImp::Metrics::average_bytes() const
3482{
3483 std::shared_lock lock{mutex_};
3484 return rollingAvgBytes_;
3485}
3486
3488PeerImp::Metrics::total_bytes() const
3489{
3490 std::shared_lock lock{mutex_};
3491 return totalBytes_;
3492}
3493
3494} // namespace ripple
T accumulate(T... args)
T bind(T... args)
Represents a JSON value.
Definition: json_value.h:148
A version-independent IP address and port combination.
Definition: IPEndpoint.h:39
Address const & address() const
Returns the address portion of this endpoint.
Definition: IPEndpoint.h:76
static std::optional< Endpoint > from_string_checked(std::string const &s)
Create an Endpoint from a string.
Definition: IPEndpoint.cpp:45
Endpoint at_port(Port port) const
Returns a new Endpoint with a different port.
Definition: IPEndpoint.h:69
static Endpoint from_string(std::string const &s)
Definition: IPEndpoint.cpp:59
std::string to_string() const
Returns a string representing the endpoint.
Definition: IPEndpoint.cpp:67
Stream error() const
Definition: Journal.h:346
Stream debug() const
Definition: Journal.h:328
bool active(Severity level) const
Returns true if any message would be logged at this severity level.
Definition: Journal.h:314
Stream info() const
Definition: Journal.h:334
Stream trace() const
Severity stream access functions.
Definition: Journal.h:322
Stream warn() const
Definition: Journal.h:340
virtual Config & config()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual TimeKeeper & timeKeeper()=0
virtual JobQueue & getJobQueue()=0
virtual NetworkOPs & getOPs()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual Cluster & cluster()=0
virtual HashRouter & getHashRouter()=0
void for_each(std::function< void(ClusterNode const &)> func) const
Invokes the callback once for every cluster node.
Definition: Cluster.cpp:83
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:49
bool update(PublicKey const &identity, std::string name, std::uint32_t loadFee=0, NetClock::time_point reportTime=NetClock::time_point{})
Store information about the state of a cluster node.
Definition: Cluster.cpp:57
std::optional< std::string > member(PublicKey const &node) const
Determines whether a node belongs in the cluster.
Definition: Cluster.cpp:38
bool TX_REDUCE_RELAY_METRICS
Definition: Config.h:266
int MAX_TRANSACTIONS
Definition: Config.h:226
std::chrono::seconds MAX_DIVERGED_TIME
Definition: Config.h:285
std::chrono::seconds MAX_UNKNOWN_TIME
Definition: Config.h:282
bool shouldProcess(uint256 const &key, PeerShortID peer, int &flags, std::chrono::seconds tx_interval)
Definition: HashRouter.cpp:78
bool addSuppressionPeer(uint256 const &key, PeerShortID peer)
Definition: HashRouter.cpp:51
std::unique_ptr< LoadEvent > makeLoadEvent(JobType t, std::string const &name)
Return a scoped LoadEvent.
Definition: JobQueue.cpp:179
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition: JobQueue.cpp:142
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
LedgerIndex getValidLedgerIndex()
std::chrono::seconds getValidatedLedgerAge()
void setClusterFee(std::uint32_t fee)
Definition: LoadFeeTrack.h:113
virtual bool isNeedNetworkLedger()=0
PeerFinder::Manager & peerFinder()
Definition: OverlayImpl.h:161
void reportTraffic(TrafficCount::category cat, bool isInbound, int bytes)
void activate(std::shared_ptr< PeerImp > const &peer)
Called when a peer has connected successfully This is called after the peer handshake has been comple...
void deletePeer(Peer::id_t id)
Called when the peer is deleted.
void incPeerDisconnect() override
Increment and retrieve counters for total peer disconnects, and disconnects we initiate for excessive...
Definition: OverlayImpl.h:363
void addTxMetrics(Args... args)
Add tx reduce-relay metrics.
Definition: OverlayImpl.h:434
void onPeerDeactivate(Peer::id_t id)
void remove(std::shared_ptr< PeerFinder::Slot > const &slot)
void for_each(UnaryFunc &&f) const
Definition: OverlayImpl.h:278
Resource::Manager & resourceManager()
Definition: OverlayImpl.h:167
void onManifests(std::shared_ptr< protocol::TMManifests > const &m, std::shared_ptr< PeerImp > const &from)
Setup const & setup() const
Definition: OverlayImpl.h:173
std::shared_ptr< Message > getManifestsMessage()
void incPeerDisconnectCharges() override
Definition: OverlayImpl.h:375
void incJqTransOverflow() override
Increment and retrieve counter for transaction job queue overflows.
Definition: OverlayImpl.h:351
virtual void on_endpoints(std::shared_ptr< Slot > const &slot, Endpoints const &endpoints)=0
Called when mtENDPOINTS is received.
virtual Config config()=0
Returns the configuration for the manager.
virtual void on_closed(std::shared_ptr< Slot > const &slot)=0
Called when the slot is closed.
virtual void on_failure(std::shared_ptr< Slot > const &slot)=0
Called when an outbound connection is deemed to have failed.
std::queue< std::shared_ptr< Message > > send_queue_
Definition: PeerImp.h:177
bool vpReduceRelayEnabled_
Definition: PeerImp.h:195
std::unique_ptr< LoadEvent > load_event_
Definition: PeerImp.h:180
boost::beast::http::fields const & headers_
Definition: PeerImp.h:176
void onMessageEnd(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m)
Definition: PeerImp.cpp:1041
bool hasLedger(uint256 const &hash, std::uint32_t seq) const override
Definition: PeerImp.cpp:515
clock_type::duration uptime() const
Definition: PeerImp.h:367
void removeTxQueue(uint256 const &hash) override
Remove transaction's hash from the transactions' hashes queue.
Definition: PeerImp.cpp:331
protocol::TMStatusChange last_status_
Definition: PeerImp.h:169
boost::shared_mutex nameMutex_
Definition: PeerImp.h:101
std::string name_
Definition: PeerImp.h:100
boost::circular_buffer< uint256 > recentTxSets_
Definition: PeerImp.h:111
std::unique_ptr< stream_type > stream_ptr_
Definition: PeerImp.h:77
void onMessage(std::shared_ptr< protocol::TMManifests > const &m)
Definition: PeerImp.cpp:1050
bool detaching_
Definition: PeerImp.h:97
Tracking
Whether the peer's view of the ledger converges or diverges from ours.
Definition: PeerImp.h:57
Compressed compressionEnabled_
Definition: PeerImp.h:185
uint256 closedLedgerHash_
Definition: PeerImp.h:107
std::string domain() const
Definition: PeerImp.cpp:833
std::optional< std::uint32_t > lastPingSeq_
Definition: PeerImp.h:114
void onTimer(boost::system::error_code const &ec)
Definition: PeerImp.cpp:681
bool gracefulClose_
Definition: PeerImp.h:178
beast::Journal const journal_
Definition: PeerImp.h:75
virtual void run()
Definition: PeerImp.cpp:157
void gracefulClose()
Definition: PeerImp.cpp:625
LedgerIndex maxLedger_
Definition: PeerImp.h:106
beast::Journal const p_journal_
Definition: PeerImp.h:76
void cancelTimer()
Definition: PeerImp.cpp:664
bool const inbound_
Definition: PeerImp.h:90
PeerImp(PeerImp const &)=delete
Application & app_
Definition: PeerImp.h:71
void stop() override
Definition: PeerImp.cpp:215
bool hasRange(std::uint32_t uMin, std::uint32_t uMax) override
Definition: PeerImp.cpp:557
bool hasTxSet(uint256 const &hash) const override
Definition: PeerImp.cpp:539
clock_type::time_point lastPingTime_
Definition: PeerImp.h:115
void onMessageUnknown(std::uint16_t type)
Definition: PeerImp.cpp:1001
std::shared_ptr< PeerFinder::Slot > const slot_
Definition: PeerImp.h:172
boost::circular_buffer< uint256 > recentLedgers_
Definition: PeerImp.h:110
id_t const id_
Definition: PeerImp.h:72
std::optional< std::chrono::milliseconds > latency_
Definition: PeerImp.h:113
void handleTransaction(std::shared_ptr< protocol::TMTransaction > const &m, bool eraseTxQueue, bool batch)
Called from onMessage(TMTransaction(s)).
Definition: PeerImp.cpp:1242
beast::IP::Endpoint const remote_address_
Definition: PeerImp.h:85
Json::Value json() override
Definition: PeerImp.cpp:380
PublicKey const publicKey_
Definition: PeerImp.h:99
hash_set< uint256 > txQueue_
Definition: PeerImp.h:190
std::mutex recentLock_
Definition: PeerImp.h:168
void doAccept()
Definition: PeerImp.cpp:758
void onMessageBegin(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m, std::size_t size, std::size_t uncompressed_size, bool isCompressed)
Definition: PeerImp.cpp:1007
bool txReduceRelayEnabled_
Definition: PeerImp.h:192
clock_type::time_point trackingTime_
Definition: PeerImp.h:96
socket_type & socket_
Definition: PeerImp.h:78
ProtocolVersion protocol_
Definition: PeerImp.h:93
reduce_relay::Squelch< UptimeClock > squelch_
Definition: PeerImp.h:118
std::string getVersion() const
Return the version of rippled that the peer is running, if reported.
Definition: PeerImp.cpp:372
struct ripple::PeerImp::@22 metrics_
uint256 previousLedgerHash_
Definition: PeerImp.h:108
void charge(Resource::Charge const &fee, std::string const &context) override
Adjust this peer's load balance based on the type of load imposed.
Definition: PeerImp.cpp:343
void setTimer()
Definition: PeerImp.cpp:646
void send(std::shared_ptr< Message > const &m) override
Definition: PeerImp.cpp:241
static std::string makePrefix(id_t id)
Definition: PeerImp.cpp:673
std::string name() const
Definition: PeerImp.cpp:826
boost::system::error_code error_code
Definition: PeerImp.h:61
void onReadMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:881
bool ledgerReplayEnabled_
Definition: PeerImp.h:196
boost::asio::basic_waitable_timer< std::chrono::steady_clock > waitable_timer
Definition: PeerImp.h:68
bool crawl() const
Returns true if this connection will publicly share its IP address.
Definition: PeerImp.cpp:357
waitable_timer timer_
Definition: PeerImp.h:81
void sendTxQueue() override
Send aggregated transactions' hashes.
Definition: PeerImp.cpp:295
bool txReduceRelayEnabled() const override
Definition: PeerImp.h:440
bool supportsFeature(ProtocolFeature f) const override
Definition: PeerImp.cpp:498
ChargeWithContext fee_
Definition: PeerImp.h:171
void onWriteMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:945
http_request_type request_
Definition: PeerImp.h:174
OverlayImpl & overlay_
Definition: PeerImp.h:89
LedgerIndex minLedger_
Definition: PeerImp.h:105
virtual ~PeerImp()
Definition: PeerImp.cpp:134
void addTxQueue(uint256 const &hash) override
Add transaction's hash to the transactions' hashes queue.
Definition: PeerImp.cpp:314
int large_sendq_
Definition: PeerImp.h:179
stream_type & stream_
Definition: PeerImp.h:79
bool cluster() const override
Returns true if this connection is a member of the cluster.
Definition: PeerImp.cpp:366
void onShutdown(error_code ec)
Definition: PeerImp.cpp:742
boost::asio::strand< boost::asio::executor > strand_
Definition: PeerImp.h:80
void cycleStatus() override
Definition: PeerImp.cpp:547
boost::beast::multi_buffer read_buffer_
Definition: PeerImp.h:173
Resource::Consumer usage_
Definition: PeerImp.h:170
void ledgerRange(std::uint32_t &minSeq, std::uint32_t &maxSeq) const override
Definition: PeerImp.cpp:530
void doProtocolStart()
Definition: PeerImp.cpp:843
void fail(std::string const &reason)
Definition: PeerImp.cpp:591
std::atomic< Tracking > tracking_
Definition: PeerImp.h:95
Represents a peer connection in the overlay.
A public key.
Definition: PublicKey.h:62
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
bool checkSign() const
Verify the signing hash of the proposal.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:78
uint256 const & suppressionID() const
Unique id used by hash router to suppress duplicates.
Definition: RCLCxPeerPos.h:85
A consumption charge.
Definition: Charge.h:31
An endpoint that consumes resources.
Definition: Consumer.h:35
int balance()
Returns the credit balance representing consumption.
Definition: Consumer.cpp:137
bool disconnect(beast::Journal const &j)
Returns true if the consumer should be disconnected.
Definition: Consumer.cpp:124
Disposition charge(Charge const &fee, std::string const &context={})
Apply a load charge to the consumer.
Definition: Consumer.cpp:106
virtual void importConsumers(std::string const &origin, Gossip const &gossip)=0
Import packaged consumer information.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:98
std::size_t size() const noexcept
Definition: Serializer.h:73
void const * data() const noexcept
Definition: Serializer.h:79
int getLength() const
Definition: Serializer.h:234
const void * getDataPtr() const
Definition: Serializer.h:224
An immutable linear range of bytes.
Definition: Slice.h:46
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
static category categorize(::google::protobuf::Message const &message, int type, bool inbound)
Given a protocol message, determine which traffic category it belongs to.
static void sendValidatorList(Peer &peer, std::uint64_t peerSequence, PublicKey const &publisherKey, std::size_t maxSequence, std::uint32_t rawVersion, std::string const &rawManifest, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, HashRouter &hashRouter, beast::Journal j)
void for_each_available(std::function< void(std::string const &manifest, std::uint32_t version, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, PublicKey const &pubKey, std::size_t maxSequence, uint256 const &hash)> func) const
Invokes the callback once for every available publisher list's raw data members.
pointer data()
Definition: base_uint.h:125
static constexpr std::size_t size()
Definition: base_uint.h:526
constexpr bool parseHex(std::string_view sv)
Parse a hex string into a base_uint.
Definition: base_uint.h:503
T emplace_back(T... args)
T empty(T... args)
T find(T... args)
T for_each(T... args)
T get(T... args)
T load(T... args)
T lock(T... args)
T max(T... args)
T min(T... args)
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:44
unsigned int UInt
Definition: json_forwards.h:27
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeInvalidData
Charge const feeUselessData
Charge const feeTrivialPeer
Charge const feeModerateBurdenPeer
std::size_t constexpr readBufferBytes
Size of buffer used to read from the socket.
@ targetSendQueue
How many messages we consider reasonable sustained on a send queue.
@ maxQueryDepth
The maximum number of levels to search.
@ sendqIntervals
How many timer intervals a sendq has to stay large before we disconnect.
@ sendQueueLogFreq
How often to log send queue size.
TER valid(PreclaimContext const &ctx, AccountID const &src)
auto measureDurationAndLog(Func &&func, const std::string &actionDescription, std::chrono::duration< Rep, Period > maxDelay, const beast::Journal &journal)
Definition: PerfLog.h:187
static constexpr std::size_t MAX_TX_QUEUE_SIZE
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string protocolMessageName(int type)
Returns the name of a protocol message given its type.
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:114
uint256 proposalUniqueId(uint256 const &proposeHash, uint256 const &previousLedger, std::uint32_t proposeSeq, NetClock::time_point closeTime, Slice const &publicKey, Slice const &signature)
Calculate a unique identifier for a signed proposal.
constexpr ProtocolVersion make_protocol(std::uint16_t major, std::uint16_t minor)
bool isPseudoTx(STObject const &tx)
Check whether a transaction is a pseudo-transaction.
Definition: STTx.cpp:640
@ INCLUDED
Definition: Transaction.h:49
@ INVALID
Definition: Transaction.h:48
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
static constexpr char FEATURE_COMPR[]
Definition: Handshake.h:141
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
Definition: Validations.h:149
std::string base64_decode(std::string_view data)
Definition: base64.cpp:248
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:315
http_response_type makeResponse(bool crawlPublic, http_request_type const &req, beast::IP::Address public_ip, beast::IP::Address remote_ip, uint256 const &sharedValue, std::optional< std::uint32_t > networkID, ProtocolVersion protocol, Application &app)
Make http response.
Definition: Handshake.cpp:392
static bool stringIsUint256Sized(std::string const &pBuffStr)
Definition: PeerImp.cpp:151
static constexpr char FEATURE_LEDGER_REPLAY[]
Definition: Handshake.h:147
std::pair< std::size_t, boost::system::error_code > invokeProtocolMessage(Buffers const &buffers, Handler &handler, std::size_t &hint)
Calls the handler for up to one protocol message in the passed buffers.
std::optional< uint256 > makeSharedValue(stream_type &ssl, beast::Journal journal)
Computes a shared value based on the SSL connection state.
Definition: Handshake.cpp:146
std::optional< KeyType > publicKeyType(Slice const &slice)
Returns the type of public key.
Definition: PublicKey.cpp:223
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
static std::shared_ptr< PeerImp > getPeerWithLedger(OverlayImpl &ov, uint256 const &ledgerHash, LedgerIndex ledger, PeerImp const *skip)
Definition: PeerImp.cpp:3025
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:244
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition: chrono.h:119
boost::beast::http::request< boost::beast::http::dynamic_body > http_request_type
Definition: Handoff.h:33
NodeID calcNodeID(PublicKey const &)
Calculate the 160-bit node ID from a node public key.
Definition: PublicKey.cpp:319
static std::shared_ptr< PeerImp > getPeerWithTree(OverlayImpl &ov, uint256 const &rootHash, PeerImp const *skip)
Definition: PeerImp.cpp:3001
bool peerFeatureEnabled(headers const &request, std::string const &feature, std::string value, bool config)
Check if a feature should be enabled for a peer.
Definition: Handshake.h:198
void forceValidity(HashRouter &router, uint256 const &txid, Validity validity)
Sets the validity of a given transaction in the cache.
Definition: apply.cpp:90
static constexpr char FEATURE_TXRR[]
Definition: Handshake.h:145
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:630
Number root(Number f, unsigned d)
Definition: Number.cpp:635
@ manifest
Manifest.
@ proposal
proposal for signing
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:38
@ jtLEDGER_REQ
Definition: Job.h:59
@ jtPROPOSAL_ut
Definition: Job.h:60
@ jtREPLAY_REQ
Definition: Job.h:58
@ jtTRANSACTION
Definition: Job.h:62
@ jtPEER
Definition: Job.h:80
@ jtREQUESTED_TXN
Definition: Job.h:64
@ jtMISSING_TXN
Definition: Job.h:63
@ jtVALIDATION_t
Definition: Job.h:71
@ jtMANIFEST
Definition: Job.h:55
@ jtTXN_DATA
Definition: Job.h:69
@ jtPACK
Definition: Job.h:43
@ jtVALIDATION_ut
Definition: Job.h:54
@ jtPROPOSAL_t
Definition: Job.h:74
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition: digest.h:225
static constexpr char FEATURE_VPRR[]
Definition: Handshake.h:143
STL namespace.
T nth_element(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T setfill(T... args)
T setw(T... args)
T size(T... args)
T str(T... args)
Information about the notional ledger backing the view.
Definition: LedgerHeader.h:34
beast::IP::Address public_ip
Definition: Overlay.h:69
std::optional< std::uint32_t > networkID
Definition: Overlay.h:72
bool peerPrivate
true if we want our IP address kept private.
void update(Resource::Charge f, std::string const &add)
Definition: PeerImp.h:154
Describes a single consumer.
Definition: Gossip.h:35
beast::IP::Endpoint address
Definition: Gossip.h:39
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
T tie(T... args)
T to_string(T... args)
T what(T... args)