rippled
Loading...
Searching...
No Matches
PeerImp.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2012, 2013 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/consensus/RCLValidations.h>
21#include <xrpld/app/ledger/InboundLedgers.h>
22#include <xrpld/app/ledger/InboundTransactions.h>
23#include <xrpld/app/ledger/LedgerMaster.h>
24#include <xrpld/app/ledger/TransactionMaster.h>
25#include <xrpld/app/misc/HashRouter.h>
26#include <xrpld/app/misc/LoadFeeTrack.h>
27#include <xrpld/app/misc/NetworkOPs.h>
28#include <xrpld/app/misc/Transaction.h>
29#include <xrpld/app/misc/ValidatorList.h>
30#include <xrpld/app/tx/apply.h>
31#include <xrpld/overlay/Cluster.h>
32#include <xrpld/overlay/detail/PeerImp.h>
33#include <xrpld/overlay/detail/Tuning.h>
34#include <xrpld/overlay/predicates.h>
35#include <xrpld/perflog/PerfLog.h>
36#include <xrpl/basics/UptimeClock.h>
37#include <xrpl/basics/base64.h>
38#include <xrpl/basics/random.h>
39#include <xrpl/basics/safe_cast.h>
40#include <xrpl/beast/core/LexicalCast.h>
41// #include <xrpl/beast/core/SemanticVersion.h>
42#include <xrpl/protocol/digest.h>
43
44#include <boost/algorithm/string/predicate.hpp>
45#include <boost/beast/core/ostream.hpp>
46
47#include <algorithm>
48#include <memory>
49#include <mutex>
50#include <numeric>
51#include <sstream>
52
53using namespace std::chrono_literals;
54
55namespace ripple {
56
57namespace {
59std::chrono::milliseconds constexpr peerHighLatency{300};
60
62std::chrono::seconds constexpr peerTimerInterval{60};
63} // namespace
64
65// TODO: Remove this exclusion once unit tests are added after the hotfix
66// release.
67
69 Application& app,
70 id_t id,
72 http_request_type&& request,
73 PublicKey const& publicKey,
75 Resource::Consumer consumer,
77 OverlayImpl& overlay)
78 : Child(overlay)
79 , app_(app)
80 , id_(id)
81 , sink_(app_.journal("Peer"), makePrefix(id))
82 , p_sink_(app_.journal("Protocol"), makePrefix(id))
83 , journal_(sink_)
84 , p_journal_(p_sink_)
85 , stream_ptr_(std::move(stream_ptr))
86 , socket_(stream_ptr_->next_layer().socket())
87 , stream_(*stream_ptr_)
88 , strand_(socket_.get_executor())
89 , timer_(waitable_timer{socket_.get_executor()})
90 , remote_address_(slot->remote_endpoint())
91 , overlay_(overlay)
92 , inbound_(true)
93 , protocol_(protocol)
94 , tracking_(Tracking::unknown)
95 , trackingTime_(clock_type::now())
96 , publicKey_(publicKey)
97 , lastPingTime_(clock_type::now())
98 , creationTime_(clock_type::now())
99 , squelch_(app_.journal("Squelch"))
100 , usage_(consumer)
101 , fee_{Resource::feeTrivialPeer, ""}
102 , slot_(slot)
103 , request_(std::move(request))
104 , headers_(request_)
105 , compressionEnabled_(
107 headers_,
109 "lz4",
110 app_.config().COMPRESSION)
111 ? Compressed::On
112 : Compressed::Off)
113 , txReduceRelayEnabled_(peerFeatureEnabled(
114 headers_,
116 app_.config().TX_REDUCE_RELAY_ENABLE))
117 , vpReduceRelayEnabled_(peerFeatureEnabled(
118 headers_,
120 app_.config().VP_REDUCE_RELAY_ENABLE))
121 , ledgerReplayEnabled_(peerFeatureEnabled(
122 headers_,
124 app_.config().LEDGER_REPLAY))
125 , ledgerReplayMsgHandler_(app, app.getLedgerReplayer())
126{
127 JLOG(journal_.info()) << "compression enabled "
128 << (compressionEnabled_ == Compressed::On)
129 << " vp reduce-relay enabled "
131 << " tx reduce-relay enabled "
133 << " " << id_;
134}
135
137{
138 const bool inCluster{cluster()};
139
144
145 if (inCluster)
146 {
147 JLOG(journal_.warn()) << name() << " left cluster";
148 }
149}
150
151// Helper function to check for valid uint256 values in protobuf buffers
152static bool
154{
155 return pBuffStr.size() == uint256::size();
156}
157
158void
160{
161 if (!strand_.running_in_this_thread())
163
164 auto parseLedgerHash =
166 if (uint256 ret; ret.parseHex(value))
167 return ret;
168
169 if (auto const s = base64_decode(value); s.size() == uint256::size())
170 return uint256{s};
171
172 return std::nullopt;
173 };
174
176 std::optional<uint256> previous;
177
178 if (auto const iter = headers_.find("Closed-Ledger");
179 iter != headers_.end())
180 {
181 closed = parseLedgerHash(iter->value());
182
183 if (!closed)
184 fail("Malformed handshake data (1)");
185 }
186
187 if (auto const iter = headers_.find("Previous-Ledger");
188 iter != headers_.end())
189 {
190 previous = parseLedgerHash(iter->value());
191
192 if (!previous)
193 fail("Malformed handshake data (2)");
194 }
195
196 if (previous && !closed)
197 fail("Malformed handshake data (3)");
198
199 {
201 if (closed)
202 closedLedgerHash_ = *closed;
203 if (previous)
204 previousLedgerHash_ = *previous;
205 }
206
207 if (inbound_)
208 doAccept();
209 else
211
212 // Anything else that needs to be done with the connection should be
213 // done in doProtocolStart
214}
215
216void
218{
219 if (!strand_.running_in_this_thread())
221 if (socket_.is_open())
222 {
223 // The rationale for using different severity levels is that
224 // outbound connections are under our control and may be logged
225 // at a higher level, but inbound connections are more numerous and
226 // uncontrolled so to prevent log flooding the severity is reduced.
227 //
228 if (inbound_)
229 {
230 JLOG(journal_.debug()) << "Stop";
231 }
232 else
233 {
234 JLOG(journal_.info()) << "Stop";
235 }
236 }
237 close();
238}
239
240//------------------------------------------------------------------------------
241
242void
244{
245 if (!strand_.running_in_this_thread())
246 return post(strand_, std::bind(&PeerImp::send, shared_from_this(), m));
247 if (gracefulClose_)
248 return;
249 if (detaching_)
250 return;
251
252 auto validator = m->getValidatorKey();
253 if (validator && !squelch_.expireSquelch(*validator))
254 return;
255
257 safe_cast<TrafficCount::category>(m->getCategory()),
258 false,
259 static_cast<int>(m->getBuffer(compressionEnabled_).size()));
260
261 auto sendq_size = send_queue_.size();
262
263 if (sendq_size < Tuning::targetSendQueue)
264 {
265 // To detect a peer that does not read from their
266 // side of the connection, we expect a peer to have
267 // a small senq periodically
268 large_sendq_ = 0;
269 }
270 else if (auto sink = journal_.debug();
271 sink && (sendq_size % Tuning::sendQueueLogFreq) == 0)
272 {
273 std::string const n = name();
274 sink << (n.empty() ? remote_address_.to_string() : n)
275 << " sendq: " << sendq_size;
276 }
277
278 send_queue_.push(m);
279
280 if (sendq_size != 0)
281 return;
282
283 boost::asio::async_write(
284 stream_,
285 boost::asio::buffer(
286 send_queue_.front()->getBuffer(compressionEnabled_)),
287 bind_executor(
288 strand_,
289 std::bind(
292 std::placeholders::_1,
293 std::placeholders::_2)));
294}
295
296void
298{
299 if (!strand_.running_in_this_thread())
300 return post(
302
303 if (!txQueue_.empty())
304 {
305 protocol::TMHaveTransactions ht;
306 std::for_each(txQueue_.begin(), txQueue_.end(), [&](auto const& hash) {
307 ht.add_hashes(hash.data(), hash.size());
308 });
309 JLOG(p_journal_.trace()) << "sendTxQueue " << txQueue_.size();
310 txQueue_.clear();
311 send(std::make_shared<Message>(ht, protocol::mtHAVE_TRANSACTIONS));
312 }
313}
314
315void
317{
318 if (!strand_.running_in_this_thread())
319 return post(
321
323 {
324 JLOG(p_journal_.warn()) << "addTxQueue exceeds the cap";
325 sendTxQueue();
326 }
327
328 txQueue_.insert(hash);
329 JLOG(p_journal_.trace()) << "addTxQueue " << txQueue_.size();
330}
331
332void
334{
335 if (!strand_.running_in_this_thread())
336 return post(
337 strand_,
339
340 auto removed = txQueue_.erase(hash);
341 JLOG(p_journal_.trace()) << "removeTxQueue " << removed;
342}
343
344void
346{
347 if ((usage_.charge(fee, context) == Resource::drop) &&
348 usage_.disconnect(p_journal_) && strand_.running_in_this_thread())
349 {
350 // Sever the connection
352 fail("charge: Resources");
353 }
354}
355
356//------------------------------------------------------------------------------
357
358bool
360{
361 auto const iter = headers_.find("Crawl");
362 if (iter == headers_.end())
363 return false;
364 return boost::iequals(iter->value(), "public");
365}
366
367bool
369{
370 return static_cast<bool>(app_.cluster().member(publicKey_));
371}
372
375{
376 if (inbound_)
377 return headers_["User-Agent"];
378 return headers_["Server"];
379}
380
383{
385
386 ret[jss::public_key] = toBase58(TokenType::NodePublic, publicKey_);
387 ret[jss::address] = remote_address_.to_string();
388
389 if (inbound_)
390 ret[jss::inbound] = true;
391
392 if (cluster())
393 {
394 ret[jss::cluster] = true;
395
396 if (auto const n = name(); !n.empty())
397 // Could move here if Json::Value supported moving from a string
398 ret[jss::name] = n;
399 }
400
401 if (auto const d = domain(); !d.empty())
402 ret[jss::server_domain] = std::string{d};
403
404 if (auto const nid = headers_["Network-ID"]; !nid.empty())
405 ret[jss::network_id] = std::string{nid};
406
407 ret[jss::load] = usage_.balance();
408
409 if (auto const version = getVersion(); !version.empty())
410 ret[jss::version] = std::string{version};
411
412 ret[jss::protocol] = to_string(protocol_);
413
414 {
416 if (latency_)
417 ret[jss::latency] = static_cast<Json::UInt>(latency_->count());
418 }
419
420 ret[jss::uptime] = static_cast<Json::UInt>(
421 std::chrono::duration_cast<std::chrono::seconds>(uptime()).count());
422
423 std::uint32_t minSeq, maxSeq;
424 ledgerRange(minSeq, maxSeq);
425
426 if ((minSeq != 0) || (maxSeq != 0))
427 ret[jss::complete_ledgers] =
428 std::to_string(minSeq) + " - " + std::to_string(maxSeq);
429
430 switch (tracking_.load())
431 {
433 ret[jss::track] = "diverged";
434 break;
435
437 ret[jss::track] = "unknown";
438 break;
439
441 // Nothing to do here
442 break;
443 }
444
445 uint256 closedLedgerHash;
446 protocol::TMStatusChange last_status;
447 {
449 closedLedgerHash = closedLedgerHash_;
450 last_status = last_status_;
451 }
452
453 if (closedLedgerHash != beast::zero)
454 ret[jss::ledger] = to_string(closedLedgerHash);
455
456 if (last_status.has_newstatus())
457 {
458 switch (last_status.newstatus())
459 {
460 case protocol::nsCONNECTING:
461 ret[jss::status] = "connecting";
462 break;
463
464 case protocol::nsCONNECTED:
465 ret[jss::status] = "connected";
466 break;
467
468 case protocol::nsMONITORING:
469 ret[jss::status] = "monitoring";
470 break;
471
472 case protocol::nsVALIDATING:
473 ret[jss::status] = "validating";
474 break;
475
476 case protocol::nsSHUTTING:
477 ret[jss::status] = "shutting";
478 break;
479
480 default:
481 JLOG(p_journal_.warn())
482 << "Unknown status: " << last_status.newstatus();
483 }
484 }
485
486 ret[jss::metrics] = Json::Value(Json::objectValue);
487 ret[jss::metrics][jss::total_bytes_recv] =
488 std::to_string(metrics_.recv.total_bytes());
489 ret[jss::metrics][jss::total_bytes_sent] =
490 std::to_string(metrics_.sent.total_bytes());
491 ret[jss::metrics][jss::avg_bps_recv] =
492 std::to_string(metrics_.recv.average_bytes());
493 ret[jss::metrics][jss::avg_bps_sent] =
494 std::to_string(metrics_.sent.average_bytes());
495
496 return ret;
497}
498
499bool
501{
502 switch (f)
503 {
505 return protocol_ >= make_protocol(2, 1);
507 return protocol_ >= make_protocol(2, 2);
510 }
511 return false;
512}
513
514//------------------------------------------------------------------------------
515
516bool
518{
519 {
521 if ((seq != 0) && (seq >= minLedger_) && (seq <= maxLedger_) &&
523 return true;
524 if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
525 recentLedgers_.end())
526 return true;
527 }
528 return false;
529}
530
531void
533{
535
536 minSeq = minLedger_;
537 maxSeq = maxLedger_;
538}
539
540bool
541PeerImp::hasTxSet(uint256 const& hash) const
542{
544 return std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
545 recentTxSets_.end();
546}
547
548void
550{
551 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
552 // guarded by recentLock_.
556}
557
558bool
560{
562 return (tracking_ != Tracking::diverged) && (uMin >= minLedger_) &&
563 (uMax <= maxLedger_);
564}
565
566//------------------------------------------------------------------------------
567
568void
570{
571 XRPL_ASSERT(
572 strand_.running_in_this_thread(),
573 "ripple::PeerImp::close : strand in this thread");
574 if (socket_.is_open())
575 {
576 detaching_ = true; // DEPRECATED
577 error_code ec;
578 timer_.cancel(ec);
579 socket_.close(ec);
581 if (inbound_)
582 {
583 JLOG(journal_.debug()) << "Closed";
584 }
585 else
586 {
587 JLOG(journal_.info()) << "Closed";
588 }
589 }
590}
591
592void
594{
595 if (!strand_.running_in_this_thread())
596 return post(
597 strand_,
598 std::bind(
599 (void(Peer::*)(std::string const&)) & PeerImp::fail,
601 reason));
603 {
604 std::string const n = name();
605 JLOG(journal_.warn()) << (n.empty() ? remote_address_.to_string() : n)
606 << " failed: " << reason;
607 }
608 close();
609}
610
611void
613{
614 XRPL_ASSERT(
615 strand_.running_in_this_thread(),
616 "ripple::PeerImp::fail : strand in this thread");
617 if (socket_.is_open())
618 {
619 JLOG(journal_.warn())
621 << " at " << remote_address_.to_string() << ": " << ec.message();
622 }
623 close();
624}
625
626void
628{
629 XRPL_ASSERT(
630 strand_.running_in_this_thread(),
631 "ripple::PeerImp::gracefulClose : strand in this thread");
632 XRPL_ASSERT(
633 socket_.is_open(), "ripple::PeerImp::gracefulClose : socket is open");
634 XRPL_ASSERT(
636 "ripple::PeerImp::gracefulClose : socket is not closing");
637 gracefulClose_ = true;
638 if (send_queue_.size() > 0)
639 return;
640 setTimer();
641 stream_.async_shutdown(bind_executor(
642 strand_,
643 std::bind(
644 &PeerImp::onShutdown, shared_from_this(), std::placeholders::_1)));
645}
646
647void
649{
650 error_code ec;
651 timer_.expires_from_now(peerTimerInterval, ec);
652
653 if (ec)
654 {
655 JLOG(journal_.error()) << "setTimer: " << ec.message();
656 return;
657 }
658 timer_.async_wait(bind_executor(
659 strand_,
660 std::bind(
661 &PeerImp::onTimer, shared_from_this(), std::placeholders::_1)));
662}
663
664// convenience for ignoring the error code
665void
667{
668 error_code ec;
669 timer_.cancel(ec);
670}
671
672//------------------------------------------------------------------------------
673
676{
678 ss << "[" << std::setfill('0') << std::setw(3) << id << "] ";
679 return ss.str();
680}
681
682void
684{
685 if (!socket_.is_open())
686 return;
687
688 if (ec == boost::asio::error::operation_aborted)
689 return;
690
691 if (ec)
692 {
693 // This should never happen
694 JLOG(journal_.error()) << "onTimer: " << ec.message();
695 return close();
696 }
697
699 {
700 fail("Large send queue");
701 return;
702 }
703
704 if (auto const t = tracking_.load(); !inbound_ && t != Tracking::converged)
705 {
706 clock_type::duration duration;
707
708 {
710 duration = clock_type::now() - trackingTime_;
711 }
712
713 if ((t == Tracking::diverged &&
714 (duration > app_.config().MAX_DIVERGED_TIME)) ||
715 (t == Tracking::unknown &&
716 (duration > app_.config().MAX_UNKNOWN_TIME)))
717 {
719 fail("Not useful");
720 return;
721 }
722 }
723
724 // Already waiting for PONG
725 if (lastPingSeq_)
726 {
727 fail("Ping Timeout");
728 return;
729 }
730
732 lastPingSeq_ = rand_int<std::uint32_t>();
733
734 protocol::TMPing message;
735 message.set_type(protocol::TMPing::ptPING);
736 message.set_seq(*lastPingSeq_);
737
738 send(std::make_shared<Message>(message, protocol::mtPING));
739
740 setTimer();
741}
742
743void
745{
746 cancelTimer();
747 // If we don't get eof then something went wrong
748 if (!ec)
749 {
750 JLOG(journal_.error()) << "onShutdown: expected error condition";
751 return close();
752 }
753 if (ec != boost::asio::error::eof)
754 return fail("onShutdown", ec);
755 close();
756}
757
758//------------------------------------------------------------------------------
759void
761{
762 XRPL_ASSERT(
763 read_buffer_.size() == 0,
764 "ripple::PeerImp::doAccept : empty read buffer");
765
766 JLOG(journal_.debug()) << "doAccept: " << remote_address_;
767
768 auto const sharedValue = makeSharedValue(*stream_ptr_, journal_);
769
770 // This shouldn't fail since we already computed
771 // the shared value successfully in OverlayImpl
772 if (!sharedValue)
773 return fail("makeSharedValue: Unexpected failure");
774
775 JLOG(journal_.info()) << "Protocol: " << to_string(protocol_);
776 JLOG(journal_.info()) << "Public Key: "
778
779 if (auto member = app_.cluster().member(publicKey_))
780 {
781 {
783 name_ = *member;
784 }
785 JLOG(journal_.info()) << "Cluster name: " << *member;
786 }
787
789
790 // XXX Set timer: connection is in grace period to be useful.
791 // XXX Set timer: connection idle (idle may vary depending on connection
792 // type.)
793
794 auto write_buffer = std::make_shared<boost::beast::multi_buffer>();
795
796 boost::beast::ostream(*write_buffer) << makeResponse(
798 request_,
801 *sharedValue,
803 protocol_,
804 app_);
805
806 // Write the whole buffer and only start protocol when that's done.
807 boost::asio::async_write(
808 stream_,
809 write_buffer->data(),
810 boost::asio::transfer_all(),
811 bind_executor(
812 strand_,
813 [this, write_buffer, self = shared_from_this()](
814 error_code ec, std::size_t bytes_transferred) {
815 if (!socket_.is_open())
816 return;
817 if (ec == boost::asio::error::operation_aborted)
818 return;
819 if (ec)
820 return fail("onWriteResponse", ec);
821 if (write_buffer->size() == bytes_transferred)
822 return doProtocolStart();
823 return fail("Failed to write header");
824 }));
825}
826
829{
830 std::shared_lock read_lock{nameMutex_};
831 return name_;
832}
833
836{
837 return headers_["Server-Domain"];
838}
839
840//------------------------------------------------------------------------------
841
842// Protocol logic
843
844void
846{
848
849 // Send all the validator lists that have been loaded
851 {
853 [&](std::string const& manifest,
854 std::uint32_t version,
856 PublicKey const& pubKey,
857 std::size_t maxSequence,
858 uint256 const& hash) {
860 *this,
861 0,
862 pubKey,
863 maxSequence,
864 version,
865 manifest,
866 blobInfos,
868 p_journal_);
869
870 // Don't send it next time.
872 });
873 }
874
875 if (auto m = overlay_.getManifestsMessage())
876 send(m);
877
878 setTimer();
879}
880
881// Called repeatedly with protocol message data
882void
884{
885 if (!socket_.is_open())
886 return;
887 if (ec == boost::asio::error::operation_aborted)
888 return;
889 if (ec == boost::asio::error::eof)
890 {
891 JLOG(journal_.info()) << "EOF";
892 return gracefulClose();
893 }
894 if (ec)
895 return fail("onReadMessage", ec);
896 if (auto stream = journal_.trace())
897 {
898 if (bytes_transferred > 0)
899 stream << "onReadMessage: " << bytes_transferred << " bytes";
900 else
901 stream << "onReadMessage";
902 }
903
904 metrics_.recv.add_message(bytes_transferred);
905
906 read_buffer_.commit(bytes_transferred);
907
908 auto hint = Tuning::readBufferBytes;
909
910 while (read_buffer_.size() > 0)
911 {
912 std::size_t bytes_consumed;
913
914 using namespace std::chrono_literals;
915 std::tie(bytes_consumed, ec) = perf::measureDurationAndLog(
916 [&]() {
917 return invokeProtocolMessage(read_buffer_.data(), *this, hint);
918 },
919 "invokeProtocolMessage",
920 350ms,
921 journal_);
922
923 if (ec)
924 return fail("onReadMessage", ec);
925 if (!socket_.is_open())
926 return;
927 if (gracefulClose_)
928 return;
929 if (bytes_consumed == 0)
930 break;
931 read_buffer_.consume(bytes_consumed);
932 }
933
934 // Timeout on writes only
935 stream_.async_read_some(
937 bind_executor(
938 strand_,
939 std::bind(
942 std::placeholders::_1,
943 std::placeholders::_2)));
944}
945
946void
948{
949 if (!socket_.is_open())
950 return;
951 if (ec == boost::asio::error::operation_aborted)
952 return;
953 if (ec)
954 return fail("onWriteMessage", ec);
955 if (auto stream = journal_.trace())
956 {
957 if (bytes_transferred > 0)
958 stream << "onWriteMessage: " << bytes_transferred << " bytes";
959 else
960 stream << "onWriteMessage";
961 }
962
963 metrics_.sent.add_message(bytes_transferred);
964
965 XRPL_ASSERT(
966 !send_queue_.empty(),
967 "ripple::PeerImp::onWriteMessage : non-empty send buffer");
968 send_queue_.pop();
969 if (!send_queue_.empty())
970 {
971 // Timeout on writes only
972 return boost::asio::async_write(
973 stream_,
974 boost::asio::buffer(
975 send_queue_.front()->getBuffer(compressionEnabled_)),
976 bind_executor(
977 strand_,
978 std::bind(
981 std::placeholders::_1,
982 std::placeholders::_2)));
983 }
984
985 if (gracefulClose_)
986 {
987 return stream_.async_shutdown(bind_executor(
988 strand_,
989 std::bind(
992 std::placeholders::_1)));
993 }
994}
995
996//------------------------------------------------------------------------------
997//
998// ProtocolHandler
999//
1000//------------------------------------------------------------------------------
1001
1002void
1004{
1005 // TODO
1006}
1007
1008void
1010 std::uint16_t type,
1012 std::size_t size,
1013 std::size_t uncompressed_size,
1014 bool isCompressed)
1015{
1016 auto const name = protocolMessageName(type);
1019 auto const category = TrafficCount::categorize(*m, type, true);
1020 overlay_.reportTraffic(category, true, static_cast<int>(size));
1021 using namespace protocol;
1022 if ((type == MessageType::mtTRANSACTION ||
1023 type == MessageType::mtHAVE_TRANSACTIONS ||
1024 type == MessageType::mtTRANSACTIONS ||
1025 // GET_OBJECTS
1027 // GET_LEDGER
1030 // LEDGER_DATA
1034 {
1036 static_cast<MessageType>(type), static_cast<std::uint64_t>(size));
1037 }
1038 JLOG(journal_.trace()) << "onMessageBegin: " << type << " " << size << " "
1039 << uncompressed_size << " " << isCompressed;
1040}
1041
1042void
1046{
1047 load_event_.reset();
1049}
1050
1051void
1053{
1054 auto const s = m->list_size();
1055
1056 if (s == 0)
1057 {
1059 return;
1060 }
1061
1062 if (s > 100)
1064
1066 jtMANIFEST, "receiveManifests", [this, that = shared_from_this(), m]() {
1067 overlay_.onManifests(m, that);
1068 });
1069}
1070
1071void
1073{
1074 if (m->type() == protocol::TMPing::ptPING)
1075 {
1076 // We have received a ping request, reply with a pong
1078 m->set_type(protocol::TMPing::ptPONG);
1079 send(std::make_shared<Message>(*m, protocol::mtPING));
1080 return;
1081 }
1082
1083 if (m->type() == protocol::TMPing::ptPONG && m->has_seq())
1084 {
1085 // Only reset the ping sequence if we actually received a
1086 // PONG with the correct cookie. That way, any peers which
1087 // respond with incorrect cookies will eventually time out.
1088 if (m->seq() == lastPingSeq_)
1089 {
1091
1092 // Update latency estimate
1093 auto const rtt = std::chrono::round<std::chrono::milliseconds>(
1095
1097
1098 if (latency_)
1099 latency_ = (*latency_ * 7 + rtt) / 8;
1100 else
1101 latency_ = rtt;
1102 }
1103
1104 return;
1105 }
1106}
1107
1108void
1110{
1111 // VFALCO NOTE I think we should drop the peer immediately
1112 if (!cluster())
1113 {
1115 return;
1116 }
1117
1118 for (int i = 0; i < m->clusternodes().size(); ++i)
1119 {
1120 protocol::TMClusterNode const& node = m->clusternodes(i);
1121
1123 if (node.has_nodename())
1124 name = node.nodename();
1125
1126 auto const publicKey =
1127 parseBase58<PublicKey>(TokenType::NodePublic, node.publickey());
1128
1129 // NIKB NOTE We should drop the peer immediately if
1130 // they send us a public key we can't parse
1131 if (publicKey)
1132 {
1133 auto const reportTime =
1134 NetClock::time_point{NetClock::duration{node.reporttime()}};
1135
1137 *publicKey, name, node.nodeload(), reportTime);
1138 }
1139 }
1140
1141 int loadSources = m->loadsources().size();
1142 if (loadSources != 0)
1143 {
1144 Resource::Gossip gossip;
1145 gossip.items.reserve(loadSources);
1146 for (int i = 0; i < m->loadsources().size(); ++i)
1147 {
1148 protocol::TMLoadSource const& node = m->loadsources(i);
1150 item.address = beast::IP::Endpoint::from_string(node.name());
1151 item.balance = node.cost();
1152 if (item.address != beast::IP::Endpoint())
1153 gossip.items.push_back(item);
1154 }
1156 }
1157
1158 // Calculate the cluster fee:
1159 auto const thresh = app_.timeKeeper().now() - 90s;
1160 std::uint32_t clusterFee = 0;
1161
1163 fees.reserve(app_.cluster().size());
1164
1165 app_.cluster().for_each([&fees, thresh](ClusterNode const& status) {
1166 if (status.getReportTime() >= thresh)
1167 fees.push_back(status.getLoadFee());
1168 });
1169
1170 if (!fees.empty())
1171 {
1172 auto const index = fees.size() / 2;
1173 std::nth_element(fees.begin(), fees.begin() + index, fees.end());
1174 clusterFee = fees[index];
1175 }
1176
1177 app_.getFeeTrack().setClusterFee(clusterFee);
1178}
1179
1180void
1182{
1183 // Don't allow endpoints from peers that are not known tracking or are
1184 // not using a version of the message that we support:
1185 if (tracking_.load() != Tracking::converged || m->version() != 2)
1186 return;
1187
1188 // The number is arbitrary and doesn't have any real significance or
1189 // implication for the protocol.
1190 if (m->endpoints_v2().size() >= 1024)
1191 {
1192 charge(Resource::feeInvalidData, "endpoints too large");
1193 return;
1194 }
1195
1197 endpoints.reserve(m->endpoints_v2().size());
1198
1199 for (auto const& tm : m->endpoints_v2())
1200 {
1201 auto result = beast::IP::Endpoint::from_string_checked(tm.endpoint());
1202
1203 if (!result)
1204 {
1205 JLOG(p_journal_.error()) << "failed to parse incoming endpoint: {"
1206 << tm.endpoint() << "}";
1207 charge(Resource::feeInvalidData, "endpoints malformed");
1208 continue;
1209 }
1210
1211 // If hops == 0, this Endpoint describes the peer we are connected
1212 // to -- in that case, we take the remote address seen on the
1213 // socket and store that in the IP::Endpoint. If this is the first
1214 // time, then we'll verify that their listener can receive incoming
1215 // by performing a connectivity test. if hops > 0, then we just
1216 // take the address/port we were given
1217 if (tm.hops() == 0)
1218 result = remote_address_.at_port(result->port());
1219
1220 endpoints.emplace_back(*result, tm.hops());
1221 }
1222
1223 if (!endpoints.empty())
1224 overlay_.peerFinder().on_endpoints(slot_, endpoints);
1225}
1226
1227void
1229{
1230 handleTransaction(m, true, false);
1231}
1232
1233void
1236 bool eraseTxQueue,
1237 bool batch)
1238{
1239 XRPL_ASSERT(
1240 eraseTxQueue != batch,
1241 ("ripple::PeerImp::handleTransaction correct function params"));
1243 return;
1244
1246 {
1247 // If we've never been in synch, there's nothing we can do
1248 // with a transaction
1249 JLOG(p_journal_.debug())
1250 << "Ignoring incoming transaction: " << "Need network ledger";
1251 return;
1252 }
1253
1254 SerialIter sit(makeSlice(m->rawtransaction()));
1255
1256 try
1257 {
1258 auto stx = std::make_shared<STTx const>(sit);
1259 uint256 txID = stx->getTransactionID();
1260
1261 int flags;
1262 constexpr std::chrono::seconds tx_interval = 10s;
1263
1264 if (!app_.getHashRouter().shouldProcess(txID, id_, flags, tx_interval))
1265 {
1266 // we have seen this transaction recently
1267 if (flags & SF_BAD)
1268 {
1269 fee_.update(Resource::feeUselessData, "known bad");
1270 JLOG(p_journal_.debug()) << "Ignoring known bad tx " << txID;
1271 }
1272
1273 // Erase only if the server has seen this tx. If the server has not
1274 // seen this tx then the tx could not has been queued for this peer.
1275 else if (eraseTxQueue && txReduceRelayEnabled())
1276 removeTxQueue(txID);
1277
1278 return;
1279 }
1280
1281 JLOG(p_journal_.debug()) << "Got tx " << txID;
1282
1283 bool checkSignature = true;
1284 if (cluster())
1285 {
1286 if (!m->has_deferred() || !m->deferred())
1287 {
1288 // Skip local checks if a server we trust
1289 // put the transaction in its open ledger
1290 flags |= SF_TRUSTED;
1291 }
1292
1293 // for non-validator nodes only -- localPublicKey is set for
1294 // validators only
1296 {
1297 // For now, be paranoid and have each validator
1298 // check each transaction, regardless of source
1299 checkSignature = false;
1300 }
1301 }
1302
1304 {
1305 JLOG(p_journal_.trace())
1306 << "No new transactions until synchronized";
1307 }
1308 else if (
1311 {
1313 JLOG(p_journal_.info()) << "Transaction queue is full";
1314 }
1315 else
1316 {
1319 "recvTransaction->checkTransaction",
1321 flags,
1322 checkSignature,
1323 batch,
1324 stx]() {
1325 if (auto peer = weak.lock())
1326 peer->checkTransaction(
1327 flags, checkSignature, stx, batch);
1328 });
1329 }
1330 }
1331 catch (std::exception const& ex)
1332 {
1333 JLOG(p_journal_.warn())
1334 << "Transaction invalid: " << strHex(m->rawtransaction())
1335 << ". Exception: " << ex.what();
1336 }
1337}
1338
1339void
1341{
1342 auto badData = [&](std::string const& msg) {
1343 charge(Resource::feeInvalidData, "get_ledger " + msg);
1344 JLOG(p_journal_.warn()) << "TMGetLedger: " << msg;
1345 };
1346 auto const itype{m->itype()};
1347
1348 // Verify ledger info type
1349 if (itype < protocol::liBASE || itype > protocol::liTS_CANDIDATE)
1350 return badData("Invalid ledger info type");
1351
1352 auto const ltype = [&m]() -> std::optional<::protocol::TMLedgerType> {
1353 if (m->has_ltype())
1354 return m->ltype();
1355 return std::nullopt;
1356 }();
1357
1358 if (itype == protocol::liTS_CANDIDATE)
1359 {
1360 if (!m->has_ledgerhash())
1361 return badData("Invalid TX candidate set, missing TX set hash");
1362 }
1363 else if (
1364 !m->has_ledgerhash() && !m->has_ledgerseq() &&
1365 !(ltype && *ltype == protocol::ltCLOSED))
1366 {
1367 return badData("Invalid request");
1368 }
1369
1370 // Verify ledger type
1371 if (ltype && (*ltype < protocol::ltACCEPTED || *ltype > protocol::ltCLOSED))
1372 return badData("Invalid ledger type");
1373
1374 // Verify ledger hash
1375 if (m->has_ledgerhash() && !stringIsUint256Sized(m->ledgerhash()))
1376 return badData("Invalid ledger hash");
1377
1378 // Verify ledger sequence
1379 if (m->has_ledgerseq())
1380 {
1381 auto const ledgerSeq{m->ledgerseq()};
1382
1383 // Check if within a reasonable range
1384 using namespace std::chrono_literals;
1386 ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1387 {
1388 return badData(
1389 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1390 }
1391 }
1392
1393 // Verify ledger node IDs
1394 if (itype != protocol::liBASE)
1395 {
1396 if (m->nodeids_size() <= 0)
1397 return badData("Invalid ledger node IDs");
1398
1399 for (auto const& nodeId : m->nodeids())
1400 {
1401 if (deserializeSHAMapNodeID(nodeId) == std::nullopt)
1402 return badData("Invalid SHAMap node ID");
1403 }
1404 }
1405
1406 // Verify query type
1407 if (m->has_querytype() && m->querytype() != protocol::qtINDIRECT)
1408 return badData("Invalid query type");
1409
1410 // Verify query depth
1411 if (m->has_querydepth())
1412 {
1413 if (m->querydepth() > Tuning::maxQueryDepth ||
1414 itype == protocol::liBASE)
1415 {
1416 return badData("Invalid query depth");
1417 }
1418 }
1419
1420 // Queue a job to process the request
1422 app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m]() {
1423 if (auto peer = weak.lock())
1424 peer->processLedgerRequest(m);
1425 });
1426}
1427
1428void
1430{
1431 JLOG(p_journal_.trace()) << "onMessage, TMProofPathRequest";
1433 {
1434 charge(Resource::feeMalformedRequest, "proof_path_request disabled");
1435 return;
1436 }
1437
1438 fee_.update(
1439 Resource::feeModerateBurdenPeer, "received a proof path request");
1442 jtREPLAY_REQ, "recvProofPathRequest", [weak, m]() {
1443 if (auto peer = weak.lock())
1444 {
1445 auto reply =
1446 peer->ledgerReplayMsgHandler_.processProofPathRequest(m);
1447 if (reply.has_error())
1448 {
1449 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1450 peer->charge(
1451 Resource::feeMalformedRequest,
1452 "proof_path_request");
1453 else
1454 peer->charge(
1455 Resource::feeRequestNoReply, "proof_path_request");
1456 }
1457 else
1458 {
1459 peer->send(std::make_shared<Message>(
1460 reply, protocol::mtPROOF_PATH_RESPONSE));
1461 }
1462 }
1463 });
1464}
1465
1466void
1468{
1469 if (!ledgerReplayEnabled_)
1470 {
1471 charge(Resource::feeMalformedRequest, "proof_path_response disabled");
1472 return;
1473 }
1474
1475 if (!ledgerReplayMsgHandler_.processProofPathResponse(m))
1476 {
1477 charge(Resource::feeInvalidData, "proof_path_response");
1478 }
1479}
1480
1481void
1483{
1484 JLOG(p_journal_.trace()) << "onMessage, TMReplayDeltaRequest";
1485 if (!ledgerReplayEnabled_)
1486 {
1487 charge(Resource::feeMalformedRequest, "replay_delta_request disabled");
1488 return;
1489 }
1490
1491 fee_.fee = Resource::feeModerateBurdenPeer;
1492 std::weak_ptr<PeerImp> weak = shared_from_this();
1493 app_.getJobQueue().addJob(
1494 jtREPLAY_REQ, "recvReplayDeltaRequest", [weak, m]() {
1495 if (auto peer = weak.lock())
1496 {
1497 auto reply =
1498 peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m);
1499 if (reply.has_error())
1500 {
1501 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1502 peer->charge(
1503 Resource::feeMalformedRequest,
1504 "replay_delta_request");
1505 else
1506 peer->charge(
1507 Resource::feeRequestNoReply,
1508 "replay_delta_request");
1509 }
1510 else
1511 {
1512 peer->send(std::make_shared<Message>(
1513 reply, protocol::mtREPLAY_DELTA_RESPONSE));
1514 }
1515 }
1516 });
1517}
1518
1519void
1521{
1522 if (!ledgerReplayEnabled_)
1523 {
1524 charge(Resource::feeMalformedRequest, "replay_delta_response disabled");
1525 return;
1526 }
1527
1528 if (!ledgerReplayMsgHandler_.processReplayDeltaResponse(m))
1529 {
1530 charge(Resource::feeInvalidData, "replay_delta_response");
1531 }
1532}
1533
1534void
1536{
1537 auto badData = [&](std::string const& msg) {
1538 fee_.update(Resource::feeInvalidData, msg);
1539 JLOG(p_journal_.warn()) << "TMLedgerData: " << msg;
1540 };
1541
1542 // Verify ledger hash
1543 if (!stringIsUint256Sized(m->ledgerhash()))
1544 return badData("Invalid ledger hash");
1545
1546 // Verify ledger sequence
1547 {
1548 auto const ledgerSeq{m->ledgerseq()};
1549 if (m->type() == protocol::liTS_CANDIDATE)
1550 {
1551 if (ledgerSeq != 0)
1552 {
1553 return badData(
1554 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1555 }
1556 }
1557 else
1558 {
1559 // Check if within a reasonable range
1560 using namespace std::chrono_literals;
1561 if (app_.getLedgerMaster().getValidatedLedgerAge() <= 10s &&
1562 ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1563 {
1564 return badData(
1565 "Invalid ledger sequence " + std::to_string(ledgerSeq));
1566 }
1567 }
1568 }
1569
1570 // Verify ledger info type
1571 if (m->type() < protocol::liBASE || m->type() > protocol::liTS_CANDIDATE)
1572 return badData("Invalid ledger info type");
1573
1574 // Verify reply error
1575 if (m->has_error() &&
1576 (m->error() < protocol::reNO_LEDGER ||
1577 m->error() > protocol::reBAD_REQUEST))
1578 {
1579 return badData("Invalid reply error");
1580 }
1581
1582 // Verify ledger nodes.
1583 if (m->nodes_size() <= 0 || m->nodes_size() > Tuning::hardMaxReplyNodes)
1584 {
1585 return badData(
1586 "Invalid Ledger/TXset nodes " + std::to_string(m->nodes_size()));
1587 }
1588
1589 // If there is a request cookie, attempt to relay the message
1590 if (m->has_requestcookie())
1591 {
1592 if (auto peer = overlay_.findPeerByShortID(m->requestcookie()))
1593 {
1594 m->clear_requestcookie();
1595 peer->send(std::make_shared<Message>(*m, protocol::mtLEDGER_DATA));
1596 }
1597 else
1598 {
1599 JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply";
1600 }
1601 return;
1602 }
1603
1604 uint256 const ledgerHash{m->ledgerhash()};
1605
1606 // Otherwise check if received data for a candidate transaction set
1607 if (m->type() == protocol::liTS_CANDIDATE)
1608 {
1609 std::weak_ptr<PeerImp> weak{shared_from_this()};
1610 app_.getJobQueue().addJob(
1611 jtTXN_DATA, "recvPeerData", [weak, ledgerHash, m]() {
1612 if (auto peer = weak.lock())
1613 {
1614 peer->app_.getInboundTransactions().gotData(
1615 ledgerHash, peer, m);
1616 }
1617 });
1618 return;
1619 }
1620
1621 // Consume the message
1622 app_.getInboundLedgers().gotLedgerData(ledgerHash, shared_from_this(), m);
1623}
1624
1625void
1627{
1628 protocol::TMProposeSet& set = *m;
1629
1630 auto const sig = makeSlice(set.signature());
1631
1632 // Preliminary check for the validity of the signature: A DER encoded
1633 // signature can't be longer than 72 bytes.
1634 if ((std::clamp<std::size_t>(sig.size(), 64, 72) != sig.size()) ||
1635 (publicKeyType(makeSlice(set.nodepubkey())) != KeyType::secp256k1))
1636 {
1637 JLOG(p_journal_.warn()) << "Proposal: malformed";
1638 fee_.update(
1639 Resource::feeInvalidSignature,
1640 " signature can't be longer than 72 bytes");
1641 return;
1642 }
1643
1644 if (!stringIsUint256Sized(set.currenttxhash()) ||
1645 !stringIsUint256Sized(set.previousledger()))
1646 {
1647 JLOG(p_journal_.warn()) << "Proposal: malformed";
1648 fee_.update(Resource::feeMalformedRequest, "bad hashes");
1649 return;
1650 }
1651
1652 // RH TODO: when isTrusted = false we should probably also cache a key
1653 // suppression for 30 seconds to avoid doing a relatively expensive lookup
1654 // every time a spam packet is received
1655 PublicKey const publicKey{makeSlice(set.nodepubkey())};
1656 auto const isTrusted = app_.validators().trusted(publicKey);
1657
1658 // If the operator has specified that untrusted proposals be dropped then
1659 // this happens here I.e. before further wasting CPU verifying the signature
1660 // of an untrusted key
1661 if (!isTrusted && app_.config().RELAY_UNTRUSTED_PROPOSALS == -1)
1662 return;
1663
1664 uint256 const proposeHash{set.currenttxhash()};
1665 uint256 const prevLedger{set.previousledger()};
1666
1667 NetClock::time_point const closeTime{NetClock::duration{set.closetime()}};
1668
1669 uint256 const suppression = proposalUniqueId(
1670 proposeHash,
1671 prevLedger,
1672 set.proposeseq(),
1673 closeTime,
1674 publicKey.slice(),
1675 sig);
1676
1677 if (auto [added, relayed] =
1678 app_.getHashRouter().addSuppressionPeerWithStatus(suppression, id_);
1679 !added)
1680 {
1681 // Count unique messages (Slots has it's own 'HashRouter'), which a peer
1682 // receives within IDLED seconds since the message has been relayed.
1683 if (reduceRelayReady() && relayed &&
1684 (stopwatch().now() - *relayed) < reduce_relay::IDLED)
1685 overlay_.updateSlotAndSquelch(
1686 suppression, publicKey, id_, protocol::mtPROPOSE_LEDGER);
1687 JLOG(p_journal_.trace()) << "Proposal: duplicate";
1688 return;
1689 }
1690
1691 if (!isTrusted)
1692 {
1693 if (tracking_.load() == Tracking::diverged)
1694 {
1695 JLOG(p_journal_.debug())
1696 << "Proposal: Dropping untrusted (peer divergence)";
1697 return;
1698 }
1699
1700 if (!cluster() && app_.getFeeTrack().isLoadedLocal())
1701 {
1702 JLOG(p_journal_.debug()) << "Proposal: Dropping untrusted (load)";
1703 return;
1704 }
1705 }
1706
1707 JLOG(p_journal_.trace())
1708 << "Proposal: " << (isTrusted ? "trusted" : "untrusted");
1709
1710 auto proposal = RCLCxPeerPos(
1711 publicKey,
1712 sig,
1713 suppression,
1715 prevLedger,
1716 set.proposeseq(),
1717 proposeHash,
1718 closeTime,
1719 app_.timeKeeper().closeTime(),
1720 calcNodeID(app_.validatorManifests().getMasterKey(publicKey))});
1721
1722 std::weak_ptr<PeerImp> weak = shared_from_this();
1723 app_.getJobQueue().addJob(
1724 isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut,
1725 "recvPropose->checkPropose",
1726 [weak, isTrusted, m, proposal]() {
1727 if (auto peer = weak.lock())
1728 peer->checkPropose(isTrusted, m, proposal);
1729 });
1730}
1731
1732void
1734{
1735 JLOG(p_journal_.trace()) << "Status: Change";
1736
1737 if (!m->has_networktime())
1738 m->set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1739
1740 {
1741 std::lock_guard sl(recentLock_);
1742 if (!last_status_.has_newstatus() || m->has_newstatus())
1743 last_status_ = *m;
1744 else
1745 {
1746 // preserve old status
1747 protocol::NodeStatus status = last_status_.newstatus();
1748 last_status_ = *m;
1749 m->set_newstatus(status);
1750 }
1751 }
1752
1753 if (m->newevent() == protocol::neLOST_SYNC)
1754 {
1755 bool outOfSync{false};
1756 {
1757 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1758 // guarded by recentLock_.
1759 std::lock_guard sl(recentLock_);
1760 if (!closedLedgerHash_.isZero())
1761 {
1762 outOfSync = true;
1763 closedLedgerHash_.zero();
1764 }
1765 previousLedgerHash_.zero();
1766 }
1767 if (outOfSync)
1768 {
1769 JLOG(p_journal_.debug()) << "Status: Out of sync";
1770 }
1771 return;
1772 }
1773
1774 {
1775 uint256 closedLedgerHash{};
1776 bool const peerChangedLedgers{
1777 m->has_ledgerhash() && stringIsUint256Sized(m->ledgerhash())};
1778
1779 {
1780 // Operations on closedLedgerHash_ and previousLedgerHash_ must be
1781 // guarded by recentLock_.
1782 std::lock_guard sl(recentLock_);
1783 if (peerChangedLedgers)
1784 {
1785 closedLedgerHash_ = m->ledgerhash();
1786 closedLedgerHash = closedLedgerHash_;
1787 addLedger(closedLedgerHash, sl);
1788 }
1789 else
1790 {
1791 closedLedgerHash_.zero();
1792 }
1793
1794 if (m->has_ledgerhashprevious() &&
1795 stringIsUint256Sized(m->ledgerhashprevious()))
1796 {
1797 previousLedgerHash_ = m->ledgerhashprevious();
1798 addLedger(previousLedgerHash_, sl);
1799 }
1800 else
1801 {
1802 previousLedgerHash_.zero();
1803 }
1804 }
1805 if (peerChangedLedgers)
1806 {
1807 JLOG(p_journal_.debug()) << "LCL is " << closedLedgerHash;
1808 }
1809 else
1810 {
1811 JLOG(p_journal_.debug()) << "Status: No ledger";
1812 }
1813 }
1814
1815 if (m->has_firstseq() && m->has_lastseq())
1816 {
1817 std::lock_guard sl(recentLock_);
1818
1819 minLedger_ = m->firstseq();
1820 maxLedger_ = m->lastseq();
1821
1822 if ((maxLedger_ < minLedger_) || (minLedger_ == 0) || (maxLedger_ == 0))
1823 minLedger_ = maxLedger_ = 0;
1824 }
1825
1826 if (m->has_ledgerseq() &&
1827 app_.getLedgerMaster().getValidatedLedgerAge() < 2min)
1828 {
1829 checkTracking(
1830 m->ledgerseq(), app_.getLedgerMaster().getValidLedgerIndex());
1831 }
1832
1833 app_.getOPs().pubPeerStatus([=, this]() -> Json::Value {
1835
1836 if (m->has_newstatus())
1837 {
1838 switch (m->newstatus())
1839 {
1840 case protocol::nsCONNECTING:
1841 j[jss::status] = "CONNECTING";
1842 break;
1843 case protocol::nsCONNECTED:
1844 j[jss::status] = "CONNECTED";
1845 break;
1846 case protocol::nsMONITORING:
1847 j[jss::status] = "MONITORING";
1848 break;
1849 case protocol::nsVALIDATING:
1850 j[jss::status] = "VALIDATING";
1851 break;
1852 case protocol::nsSHUTTING:
1853 j[jss::status] = "SHUTTING";
1854 break;
1855 }
1856 }
1857
1858 if (m->has_newevent())
1859 {
1860 switch (m->newevent())
1861 {
1862 case protocol::neCLOSING_LEDGER:
1863 j[jss::action] = "CLOSING_LEDGER";
1864 break;
1865 case protocol::neACCEPTED_LEDGER:
1866 j[jss::action] = "ACCEPTED_LEDGER";
1867 break;
1868 case protocol::neSWITCHED_LEDGER:
1869 j[jss::action] = "SWITCHED_LEDGER";
1870 break;
1871 case protocol::neLOST_SYNC:
1872 j[jss::action] = "LOST_SYNC";
1873 break;
1874 }
1875 }
1876
1877 if (m->has_ledgerseq())
1878 {
1879 j[jss::ledger_index] = m->ledgerseq();
1880 }
1881
1882 if (m->has_ledgerhash())
1883 {
1884 uint256 closedLedgerHash{};
1885 {
1886 std::lock_guard sl(recentLock_);
1887 closedLedgerHash = closedLedgerHash_;
1888 }
1889 j[jss::ledger_hash] = to_string(closedLedgerHash);
1890 }
1891
1892 if (m->has_networktime())
1893 {
1894 j[jss::date] = Json::UInt(m->networktime());
1895 }
1896
1897 if (m->has_firstseq() && m->has_lastseq())
1898 {
1899 j[jss::ledger_index_min] = Json::UInt(m->firstseq());
1900 j[jss::ledger_index_max] = Json::UInt(m->lastseq());
1901 }
1902
1903 return j;
1904 });
1905}
1906
1907void
1908PeerImp::checkTracking(std::uint32_t validationSeq)
1909{
1910 std::uint32_t serverSeq;
1911 {
1912 // Extract the sequence number of the highest
1913 // ledger this peer has
1914 std::lock_guard sl(recentLock_);
1915
1916 serverSeq = maxLedger_;
1917 }
1918 if (serverSeq != 0)
1919 {
1920 // Compare the peer's ledger sequence to the
1921 // sequence of a recently-validated ledger
1922 checkTracking(serverSeq, validationSeq);
1923 }
1924}
1925
1926void
1927PeerImp::checkTracking(std::uint32_t seq1, std::uint32_t seq2)
1928{
1929 int diff = std::max(seq1, seq2) - std::min(seq1, seq2);
1930
1931 if (diff < Tuning::convergedLedgerLimit)
1932 {
1933 // The peer's ledger sequence is close to the validation's
1934 tracking_ = Tracking::converged;
1935 }
1936
1937 if ((diff > Tuning::divergedLedgerLimit) &&
1938 (tracking_.load() != Tracking::diverged))
1939 {
1940 // The peer's ledger sequence is way off the validation's
1941 std::lock_guard sl(recentLock_);
1942
1943 tracking_ = Tracking::diverged;
1944 trackingTime_ = clock_type::now();
1945 }
1946}
1947
1948void
1950{
1951 if (!stringIsUint256Sized(m->hash()))
1952 {
1953 fee_.update(Resource::feeMalformedRequest, "bad hash");
1954 return;
1955 }
1956
1957 uint256 const hash{m->hash()};
1958
1959 if (m->status() == protocol::tsHAVE)
1960 {
1961 std::lock_guard sl(recentLock_);
1962
1963 if (std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
1964 recentTxSets_.end())
1965 {
1966 fee_.update(Resource::feeUselessData, "duplicate (tsHAVE)");
1967 return;
1968 }
1969
1970 recentTxSets_.push_back(hash);
1971 }
1972}
1973
1974void
1975PeerImp::onValidatorListMessage(
1976 std::string const& messageType,
1977 std::string const& manifest,
1978 std::uint32_t version,
1979 std::vector<ValidatorBlobInfo> const& blobs)
1980{
1981 // If there are no blobs, the message is malformed (possibly because of
1982 // ValidatorList class rules), so charge accordingly and skip processing.
1983 if (blobs.empty())
1984 {
1985 JLOG(p_journal_.warn()) << "Ignored malformed " << messageType
1986 << " from peer " << remote_address_;
1987 // This shouldn't ever happen with a well-behaved peer
1988 fee_.update(Resource::feeHeavyBurdenPeer, "no blobs");
1989 return;
1990 }
1991
1992 auto const hash = sha512Half(manifest, blobs, version);
1993
1994 JLOG(p_journal_.debug())
1995 << "Received " << messageType << " from " << remote_address_.to_string()
1996 << " (" << id_ << ")";
1997
1998 if (!app_.getHashRouter().addSuppressionPeer(hash, id_))
1999 {
2000 JLOG(p_journal_.debug())
2001 << messageType << ": received duplicate " << messageType;
2002 // Charging this fee here won't hurt the peer in the normal
2003 // course of operation (ie. refresh every 5 minutes), but
2004 // will add up if the peer is misbehaving.
2005 fee_.update(Resource::feeUselessData, "duplicate");
2006 return;
2007 }
2008
2009 auto const applyResult = app_.validators().applyListsAndBroadcast(
2010 manifest,
2011 version,
2012 blobs,
2013 remote_address_.to_string(),
2014 hash,
2015 app_.overlay(),
2016 app_.getHashRouter(),
2017 app_.getOPs());
2018
2019 JLOG(p_journal_.debug())
2020 << "Processed " << messageType << " version " << version << " from "
2021 << (applyResult.publisherKey ? strHex(*applyResult.publisherKey)
2022 : "unknown or invalid publisher")
2023 << " from " << remote_address_.to_string() << " (" << id_
2024 << ") with best result " << to_string(applyResult.bestDisposition());
2025
2026 // Act based on the best result
2027 switch (applyResult.bestDisposition())
2028 {
2029 // New list
2030 case ListDisposition::accepted:
2031 // Newest list is expired, and that needs to be broadcast, too
2032 case ListDisposition::expired:
2033 // Future list
2034 case ListDisposition::pending: {
2035 std::lock_guard<std::mutex> sl(recentLock_);
2036
2037 XRPL_ASSERT(
2038 applyResult.publisherKey,
2039 "ripple::PeerImp::onValidatorListMessage : publisher key is "
2040 "set");
2041 auto const& pubKey = *applyResult.publisherKey;
2042#ifndef NDEBUG
2043 if (auto const iter = publisherListSequences_.find(pubKey);
2044 iter != publisherListSequences_.end())
2045 {
2046 XRPL_ASSERT(
2047 iter->second < applyResult.sequence,
2048 "ripple::PeerImp::onValidatorListMessage : lower sequence");
2049 }
2050#endif
2051 publisherListSequences_[pubKey] = applyResult.sequence;
2052 }
2053 break;
2054 case ListDisposition::same_sequence:
2055 case ListDisposition::known_sequence:
2056#ifndef NDEBUG
2057 {
2058 std::lock_guard<std::mutex> sl(recentLock_);
2059 XRPL_ASSERT(
2060 applyResult.sequence && applyResult.publisherKey,
2061 "ripple::PeerImp::onValidatorListMessage : nonzero sequence "
2062 "and set publisher key");
2063 XRPL_ASSERT(
2064 publisherListSequences_[*applyResult.publisherKey] <=
2065 applyResult.sequence,
2066 "ripple::PeerImp::onValidatorListMessage : maximum sequence");
2067 }
2068#endif // !NDEBUG
2069
2070 break;
2071 case ListDisposition::stale:
2072 case ListDisposition::untrusted:
2073 case ListDisposition::invalid:
2074 case ListDisposition::unsupported_version:
2075 break;
2076 default:
2077 UNREACHABLE(
2078 "ripple::PeerImp::onValidatorListMessage : invalid best list "
2079 "disposition");
2080 }
2081
2082 // Charge based on the worst result
2083 switch (applyResult.worstDisposition())
2084 {
2085 case ListDisposition::accepted:
2086 case ListDisposition::expired:
2087 case ListDisposition::pending:
2088 // No charges for good data
2089 break;
2090 case ListDisposition::same_sequence:
2091 case ListDisposition::known_sequence:
2092 // Charging this fee here won't hurt the peer in the normal
2093 // course of operation (ie. refresh every 5 minutes), but
2094 // will add up if the peer is misbehaving.
2095 fee_.update(
2096 Resource::feeUselessData,
2097 " duplicate (same_sequence or known_sequence)");
2098 break;
2099 case ListDisposition::stale:
2100 // There are very few good reasons for a peer to send an
2101 // old list, particularly more than once.
2102 fee_.update(Resource::feeInvalidData, "expired");
2103 break;
2104 case ListDisposition::untrusted:
2105 // Charging this fee here won't hurt the peer in the normal
2106 // course of operation (ie. refresh every 5 minutes), but
2107 // will add up if the peer is misbehaving.
2108 fee_.update(Resource::feeUselessData, "untrusted");
2109 break;
2110 case ListDisposition::invalid:
2111 // This shouldn't ever happen with a well-behaved peer
2112 fee_.update(
2113 Resource::feeInvalidSignature, "invalid list disposition");
2114 break;
2115 case ListDisposition::unsupported_version:
2116 // During a version transition, this may be legitimate.
2117 // If it happens frequently, that's probably bad.
2118 fee_.update(Resource::feeInvalidData, "version");
2119 break;
2120 default:
2121 UNREACHABLE(
2122 "ripple::PeerImp::onValidatorListMessage : invalid worst list "
2123 "disposition");
2124 }
2125
2126 // Log based on all the results.
2127 for (auto const& [disp, count] : applyResult.dispositions)
2128 {
2129 switch (disp)
2130 {
2131 // New list
2132 case ListDisposition::accepted:
2133 JLOG(p_journal_.debug())
2134 << "Applied " << count << " new " << messageType
2135 << "(s) from peer " << remote_address_;
2136 break;
2137 // Newest list is expired, and that needs to be broadcast, too
2138 case ListDisposition::expired:
2139 JLOG(p_journal_.debug())
2140 << "Applied " << count << " expired " << messageType
2141 << "(s) from peer " << remote_address_;
2142 break;
2143 // Future list
2144 case ListDisposition::pending:
2145 JLOG(p_journal_.debug())
2146 << "Processed " << count << " future " << messageType
2147 << "(s) from peer " << remote_address_;
2148 break;
2149 case ListDisposition::same_sequence:
2150 JLOG(p_journal_.warn())
2151 << "Ignored " << count << " " << messageType
2152 << "(s) with current sequence from peer "
2153 << remote_address_;
2154 break;
2155 case ListDisposition::known_sequence:
2156 JLOG(p_journal_.warn())
2157 << "Ignored " << count << " " << messageType
2158 << "(s) with future sequence from peer " << remote_address_;
2159 break;
2160 case ListDisposition::stale:
2161 JLOG(p_journal_.warn())
2162 << "Ignored " << count << "stale " << messageType
2163 << "(s) from peer " << remote_address_;
2164 break;
2165 case ListDisposition::untrusted:
2166 JLOG(p_journal_.warn())
2167 << "Ignored " << count << " untrusted " << messageType
2168 << "(s) from peer " << remote_address_;
2169 break;
2170 case ListDisposition::unsupported_version:
2171 JLOG(p_journal_.warn())
2172 << "Ignored " << count << "unsupported version "
2173 << messageType << "(s) from peer " << remote_address_;
2174 break;
2175 case ListDisposition::invalid:
2176 JLOG(p_journal_.warn())
2177 << "Ignored " << count << "invalid " << messageType
2178 << "(s) from peer " << remote_address_;
2179 break;
2180 default:
2181 UNREACHABLE(
2182 "ripple::PeerImp::onValidatorListMessage : invalid list "
2183 "disposition");
2184 }
2185 }
2186}
2187
2188void
2190{
2191 try
2192 {
2193 if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
2194 {
2195 JLOG(p_journal_.debug())
2196 << "ValidatorList: received validator list from peer using "
2197 << "protocol version " << to_string(protocol_)
2198 << " which shouldn't support this feature.";
2199 fee_.update(Resource::feeUselessData, "unsupported peer");
2200 return;
2201 }
2202 onValidatorListMessage(
2203 "ValidatorList",
2204 m->manifest(),
2205 m->version(),
2206 ValidatorList::parseBlobs(*m));
2207 }
2208 catch (std::exception const& e)
2209 {
2210 JLOG(p_journal_.warn()) << "ValidatorList: Exception, " << e.what()
2211 << " from peer " << remote_address_;
2212 using namespace std::string_literals;
2213 fee_.update(Resource::feeInvalidData, e.what());
2214 }
2215}
2216
2217void
2218PeerImp::onMessage(
2220{
2221 try
2222 {
2223 if (!supportsFeature(ProtocolFeature::ValidatorList2Propagation))
2224 {
2225 JLOG(p_journal_.debug())
2226 << "ValidatorListCollection: received validator list from peer "
2227 << "using protocol version " << to_string(protocol_)
2228 << " which shouldn't support this feature.";
2229 fee_.update(Resource::feeUselessData, "unsupported peer");
2230 return;
2231 }
2232 else if (m->version() < 2)
2233 {
2234 JLOG(p_journal_.debug())
2235 << "ValidatorListCollection: received invalid validator list "
2236 "version "
2237 << m->version() << " from peer using protocol version "
2238 << to_string(protocol_);
2239 fee_.update(Resource::feeInvalidData, "wrong version");
2240 return;
2241 }
2242 onValidatorListMessage(
2243 "ValidatorListCollection",
2244 m->manifest(),
2245 m->version(),
2246 ValidatorList::parseBlobs(*m));
2247 }
2248 catch (std::exception const& e)
2249 {
2250 JLOG(p_journal_.warn()) << "ValidatorListCollection: Exception, "
2251 << e.what() << " from peer " << remote_address_;
2252 using namespace std::string_literals;
2253 fee_.update(Resource::feeInvalidData, e.what());
2254 }
2255}
2256
2257void
2259{
2260 if (m->validation().size() < 50)
2261 {
2262 JLOG(p_journal_.warn()) << "Validation: Too small";
2263 fee_.update(Resource::feeMalformedRequest, "too small");
2264 return;
2265 }
2266
2267 try
2268 {
2269 auto const closeTime = app_.timeKeeper().closeTime();
2270
2272 {
2273 SerialIter sit(makeSlice(m->validation()));
2274 val = std::make_shared<STValidation>(
2275 std::ref(sit),
2276 [this](PublicKey const& pk) {
2277 return calcNodeID(
2278 app_.validatorManifests().getMasterKey(pk));
2279 },
2280 false);
2281 val->setSeen(closeTime);
2282 }
2283
2284 if (!isCurrent(
2285 app_.getValidations().parms(),
2286 app_.timeKeeper().closeTime(),
2287 val->getSignTime(),
2288 val->getSeenTime()))
2289 {
2290 JLOG(p_journal_.trace()) << "Validation: Not current";
2291 fee_.update(Resource::feeUselessData, "not current");
2292 return;
2293 }
2294
2295 // RH TODO: when isTrusted = false we should probably also cache a key
2296 // suppression for 30 seconds to avoid doing a relatively expensive
2297 // lookup every time a spam packet is received
2298 auto const isTrusted =
2299 app_.validators().trusted(val->getSignerPublic());
2300
2301 // If the operator has specified that untrusted validations be dropped
2302 // then this happens here I.e. before further wasting CPU verifying the
2303 // signature of an untrusted key
2304 if (!isTrusted && app_.config().RELAY_UNTRUSTED_VALIDATIONS == -1)
2305 return;
2306
2307 auto key = sha512Half(makeSlice(m->validation()));
2308
2309 if (auto [added, relayed] =
2310 app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
2311 !added)
2312 {
2313 // Count unique messages (Slots has it's own 'HashRouter'), which a
2314 // peer receives within IDLED seconds since the message has been
2315 // relayed. Wait WAIT_ON_BOOTUP time to let the server establish
2316 // connections to peers.
2317 if (reduceRelayReady() && relayed &&
2318 (stopwatch().now() - *relayed) < reduce_relay::IDLED)
2319 overlay_.updateSlotAndSquelch(
2320 key, val->getSignerPublic(), id_, protocol::mtVALIDATION);
2321 JLOG(p_journal_.trace()) << "Validation: duplicate";
2322 return;
2323 }
2324
2325 if (!isTrusted && (tracking_.load() == Tracking::diverged))
2326 {
2327 JLOG(p_journal_.debug())
2328 << "Dropping untrusted validation from diverged peer";
2329 }
2330 else if (isTrusted || !app_.getFeeTrack().isLoadedLocal())
2331 {
2332 std::string const name = [isTrusted, val]() {
2333 std::string ret =
2334 isTrusted ? "Trusted validation" : "Untrusted validation";
2335
2336#ifdef DEBUG
2337 ret += " " +
2338 std::to_string(val->getFieldU32(sfLedgerSequence)) + ": " +
2339 to_string(val->getNodeID());
2340#endif
2341
2342 return ret;
2343 }();
2344
2345 std::weak_ptr<PeerImp> weak = shared_from_this();
2346 app_.getJobQueue().addJob(
2347 isTrusted ? jtVALIDATION_t : jtVALIDATION_ut,
2348 name,
2349 [weak, val, m, key]() {
2350 if (auto peer = weak.lock())
2351 peer->checkValidation(val, key, m);
2352 });
2353 }
2354 else
2355 {
2356 JLOG(p_journal_.debug())
2357 << "Dropping untrusted validation for load";
2358 }
2359 }
2360 catch (std::exception const& e)
2361 {
2362 JLOG(p_journal_.warn())
2363 << "Exception processing validation: " << e.what();
2364 using namespace std::string_literals;
2365 fee_.update(Resource::feeMalformedRequest, e.what());
2366 }
2367}
2368
2369void
2371{
2372 protocol::TMGetObjectByHash& packet = *m;
2373
2374 JLOG(p_journal_.trace()) << "received TMGetObjectByHash " << packet.type()
2375 << " " << packet.objects_size();
2376
2377 if (packet.query())
2378 {
2379 // this is a query
2380 if (send_queue_.size() >= Tuning::dropSendQueue)
2381 {
2382 JLOG(p_journal_.debug()) << "GetObject: Large send queue";
2383 return;
2384 }
2385
2386 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2387 {
2388 doFetchPack(m);
2389 return;
2390 }
2391
2392 if (packet.type() == protocol::TMGetObjectByHash::otTRANSACTIONS)
2393 {
2394 if (!txReduceRelayEnabled())
2395 {
2396 JLOG(p_journal_.error())
2397 << "TMGetObjectByHash: tx reduce-relay is disabled";
2398 fee_.update(Resource::feeMalformedRequest, "disabled");
2399 return;
2400 }
2401
2402 std::weak_ptr<PeerImp> weak = shared_from_this();
2403 app_.getJobQueue().addJob(
2404 jtREQUESTED_TXN, "doTransactions", [weak, m]() {
2405 if (auto peer = weak.lock())
2406 peer->doTransactions(m);
2407 });
2408 return;
2409 }
2410
2411 fee_.update(
2412 Resource::feeModerateBurdenPeer,
2413 " received a get object by hash request");
2414
2415 protocol::TMGetObjectByHash reply;
2416
2417 reply.set_query(false);
2418
2419 if (packet.has_seq())
2420 reply.set_seq(packet.seq());
2421
2422 reply.set_type(packet.type());
2423
2424 if (packet.has_ledgerhash())
2425 {
2426 if (!stringIsUint256Sized(packet.ledgerhash()))
2427 {
2428 fee_.update(Resource::feeMalformedRequest, "ledger hash");
2429 return;
2430 }
2431
2432 reply.set_ledgerhash(packet.ledgerhash());
2433 }
2434
2435 // This is a very minimal implementation
2436 for (int i = 0; i < packet.objects_size(); ++i)
2437 {
2438 auto const& obj = packet.objects(i);
2439 if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2440 {
2441 uint256 const hash{obj.hash()};
2442 // VFALCO TODO Move this someplace more sensible so we dont
2443 // need to inject the NodeStore interfaces.
2444 std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2445 auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
2446 if (nodeObject)
2447 {
2448 protocol::TMIndexedObject& newObj = *reply.add_objects();
2449 newObj.set_hash(hash.begin(), hash.size());
2450 newObj.set_data(
2451 &nodeObject->getData().front(),
2452 nodeObject->getData().size());
2453
2454 if (obj.has_nodeid())
2455 newObj.set_index(obj.nodeid());
2456 if (obj.has_ledgerseq())
2457 newObj.set_ledgerseq(obj.ledgerseq());
2458
2459 // VFALCO NOTE "seq" in the message is obsolete
2460 }
2461 }
2462 }
2463
2464 JLOG(p_journal_.trace()) << "GetObj: " << reply.objects_size() << " of "
2465 << packet.objects_size();
2466 send(std::make_shared<Message>(reply, protocol::mtGET_OBJECTS));
2467 }
2468 else
2469 {
2470 // this is a reply
2471 std::uint32_t pLSeq = 0;
2472 bool pLDo = true;
2473 bool progress = false;
2474
2475 for (int i = 0; i < packet.objects_size(); ++i)
2476 {
2477 const protocol::TMIndexedObject& obj = packet.objects(i);
2478
2479 if (obj.has_hash() && stringIsUint256Sized(obj.hash()))
2480 {
2481 if (obj.has_ledgerseq())
2482 {
2483 if (obj.ledgerseq() != pLSeq)
2484 {
2485 if (pLDo && (pLSeq != 0))
2486 {
2487 JLOG(p_journal_.debug())
2488 << "GetObj: Full fetch pack for " << pLSeq;
2489 }
2490 pLSeq = obj.ledgerseq();
2491 pLDo = !app_.getLedgerMaster().haveLedger(pLSeq);
2492
2493 if (!pLDo)
2494 {
2495 JLOG(p_journal_.debug())
2496 << "GetObj: Late fetch pack for " << pLSeq;
2497 }
2498 else
2499 progress = true;
2500 }
2501 }
2502
2503 if (pLDo)
2504 {
2505 uint256 const hash{obj.hash()};
2506
2507 app_.getLedgerMaster().addFetchPack(
2508 hash,
2509 std::make_shared<Blob>(
2510 obj.data().begin(), obj.data().end()));
2511 }
2512 }
2513 }
2514
2515 if (pLDo && (pLSeq != 0))
2516 {
2517 JLOG(p_journal_.debug())
2518 << "GetObj: Partial fetch pack for " << pLSeq;
2519 }
2520 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2521 app_.getLedgerMaster().gotFetchPack(progress, pLSeq);
2522 }
2523}
2524
2525void
2527{
2528 if (!txReduceRelayEnabled())
2529 {
2530 JLOG(p_journal_.error())
2531 << "TMHaveTransactions: tx reduce-relay is disabled";
2532 fee_.update(Resource::feeMalformedRequest, "disabled");
2533 return;
2534 }
2535
2536 std::weak_ptr<PeerImp> weak = shared_from_this();
2537 app_.getJobQueue().addJob(
2538 jtMISSING_TXN, "handleHaveTransactions", [weak, m]() {
2539 if (auto peer = weak.lock())
2540 peer->handleHaveTransactions(m);
2541 });
2542}
2543
2544void
2545PeerImp::handleHaveTransactions(
2547{
2548 protocol::TMGetObjectByHash tmBH;
2549 tmBH.set_type(protocol::TMGetObjectByHash_ObjectType_otTRANSACTIONS);
2550 tmBH.set_query(true);
2551
2552 JLOG(p_journal_.trace())
2553 << "received TMHaveTransactions " << m->hashes_size();
2554
2555 for (std::uint32_t i = 0; i < m->hashes_size(); i++)
2556 {
2557 if (!stringIsUint256Sized(m->hashes(i)))
2558 {
2559 JLOG(p_journal_.error())
2560 << "TMHaveTransactions with invalid hash size";
2561 fee_.update(Resource::feeMalformedRequest, "hash size");
2562 return;
2563 }
2564
2565 uint256 hash(m->hashes(i));
2566
2567 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2568
2569 JLOG(p_journal_.trace()) << "checking transaction " << (bool)txn;
2570
2571 if (!txn)
2572 {
2573 JLOG(p_journal_.debug()) << "adding transaction to request";
2574
2575 auto obj = tmBH.add_objects();
2576 obj->set_hash(hash.data(), hash.size());
2577 }
2578 else
2579 {
2580 // Erase only if a peer has seen this tx. If the peer has not
2581 // seen this tx then the tx could not has been queued for this
2582 // peer.
2583 removeTxQueue(hash);
2584 }
2585 }
2586
2587 JLOG(p_journal_.trace())
2588 << "transaction request object is " << tmBH.objects_size();
2589
2590 if (tmBH.objects_size() > 0)
2591 send(std::make_shared<Message>(tmBH, protocol::mtGET_OBJECTS));
2592}
2593
2594void
2596{
2597 if (!txReduceRelayEnabled())
2598 {
2599 JLOG(p_journal_.error())
2600 << "TMTransactions: tx reduce-relay is disabled";
2601 fee_.update(Resource::feeMalformedRequest, "disabled");
2602 return;
2603 }
2604
2605 JLOG(p_journal_.trace())
2606 << "received TMTransactions " << m->transactions_size();
2607
2608 overlay_.addTxMetrics(m->transactions_size());
2609
2610 for (std::uint32_t i = 0; i < m->transactions_size(); ++i)
2611 handleTransaction(
2613 m->mutable_transactions(i), [](protocol::TMTransaction*) {}),
2614 false,
2615 true);
2616}
2617
2618void
2619PeerImp::onMessage(std::shared_ptr<protocol::TMSquelch> const& m)
2620{
2621 using on_message_fn =
2623 if (!strand_.running_in_this_thread())
2624 return post(
2625 strand_,
2626 std::bind(
2627 (on_message_fn)&PeerImp::onMessage, shared_from_this(), m));
2628
2629 if (!m->has_validatorpubkey())
2630 {
2631 charge(Resource::feeInvalidData, "squelch no pubkey");
2632 return;
2633 }
2634 auto validator = m->validatorpubkey();
2635 auto const slice{makeSlice(validator)};
2636 if (!publicKeyType(slice))
2637 {
2638 charge(Resource::feeInvalidData, "squelch bad pubkey");
2639 return;
2640 }
2641 PublicKey key(slice);
2642
2643 // Ignore non-validator squelch
2644 if (!app_.validators().listed(key))
2645 {
2646 charge(Resource::feeInvalidData, "squelch non-validator");
2647 JLOG(p_journal_.debug())
2648 << "onMessage: TMSquelch discarding non-validator squelch "
2649 << slice;
2650 return;
2651 }
2652
2653 // Ignore the squelch for validator's own messages.
2654 if (key == app_.getValidationPublicKey())
2655 {
2656 JLOG(p_journal_.debug())
2657 << "onMessage: TMSquelch discarding validator's squelch " << slice;
2658 return;
2659 }
2660
2661 std::uint32_t duration =
2662 m->has_squelchduration() ? m->squelchduration() : 0;
2663 if (!m->squelch())
2664 squelch_.removeSquelch(key);
2665 else if (!squelch_.addSquelch(key, std::chrono::seconds{duration}))
2666 charge(Resource::feeInvalidData, "squelch duration");
2667
2668 JLOG(p_journal_.debug())
2669 << "onMessage: TMSquelch " << slice << " " << id() << " " << duration;
2670}
2671
2672//--------------------------------------------------------------------------
2673
2674void
2675PeerImp::addLedger(
2676 uint256 const& hash,
2677 std::lock_guard<std::mutex> const& lockedRecentLock)
2678{
2679 // lockedRecentLock is passed as a reminder that recentLock_ must be
2680 // locked by the caller.
2681 (void)lockedRecentLock;
2682
2683 if (std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
2684 recentLedgers_.end())
2685 return;
2686
2687 recentLedgers_.push_back(hash);
2688}
2689
2690void
2691PeerImp::doFetchPack(const std::shared_ptr<protocol::TMGetObjectByHash>& packet)
2692{
2693 // VFALCO TODO Invert this dependency using an observer and shared state
2694 // object. Don't queue fetch pack jobs if we're under load or we already
2695 // have some queued.
2696 if (app_.getFeeTrack().isLoadedLocal() ||
2697 (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2698 (app_.getJobQueue().getJobCount(jtPACK) > 10))
2699 {
2700 JLOG(p_journal_.info()) << "Too busy to make fetch pack";
2701 return;
2702 }
2703
2704 if (!stringIsUint256Sized(packet->ledgerhash()))
2705 {
2706 JLOG(p_journal_.warn()) << "FetchPack hash size malformed";
2707 fee_.update(Resource::feeMalformedRequest, "hash size");
2708 return;
2709 }
2710
2711 fee_.fee = Resource::feeHeavyBurdenPeer;
2712
2713 uint256 const hash{packet->ledgerhash()};
2714
2715 std::weak_ptr<PeerImp> weak = shared_from_this();
2716 auto elapsed = UptimeClock::now();
2717 auto const pap = &app_;
2718 app_.getJobQueue().addJob(
2719 jtPACK, "MakeFetchPack", [pap, weak, packet, hash, elapsed]() {
2720 pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
2721 });
2722}
2723
2724void
2725PeerImp::doTransactions(
2727{
2728 protocol::TMTransactions reply;
2729
2730 JLOG(p_journal_.trace()) << "received TMGetObjectByHash requesting tx "
2731 << packet->objects_size();
2732
2733 if (packet->objects_size() > reduce_relay::MAX_TX_QUEUE_SIZE)
2734 {
2735 JLOG(p_journal_.error()) << "doTransactions, invalid number of hashes";
2736 fee_.update(Resource::feeMalformedRequest, "too big");
2737 return;
2738 }
2739
2740 for (std::uint32_t i = 0; i < packet->objects_size(); ++i)
2741 {
2742 auto const& obj = packet->objects(i);
2743
2744 if (!stringIsUint256Sized(obj.hash()))
2745 {
2746 fee_.update(Resource::feeMalformedRequest, "hash size");
2747 return;
2748 }
2749
2750 uint256 hash(obj.hash());
2751
2752 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2753
2754 if (!txn)
2755 {
2756 JLOG(p_journal_.error()) << "doTransactions, transaction not found "
2757 << Slice(hash.data(), hash.size());
2758 fee_.update(Resource::feeMalformedRequest, "tx not found");
2759 return;
2760 }
2761
2762 Serializer s;
2763 auto tx = reply.add_transactions();
2764 auto sttx = txn->getSTransaction();
2765 sttx->add(s);
2766 tx->set_rawtransaction(s.data(), s.size());
2767 tx->set_status(
2768 txn->getStatus() == INCLUDED ? protocol::tsCURRENT
2769 : protocol::tsNEW);
2770 tx->set_receivetimestamp(
2771 app_.timeKeeper().now().time_since_epoch().count());
2772 tx->set_deferred(txn->getSubmitResult().queued);
2773 }
2774
2775 if (reply.transactions_size() > 0)
2776 send(std::make_shared<Message>(reply, protocol::mtTRANSACTIONS));
2777}
2778
2779void
2780PeerImp::checkTransaction(
2781 int flags,
2782 bool checkSignature,
2783 std::shared_ptr<STTx const> const& stx,
2784 bool batch)
2785{
2786 // VFALCO TODO Rewrite to not use exceptions
2787 try
2788 {
2789 // Expired?
2790 if (stx->isFieldPresent(sfLastLedgerSequence) &&
2791 (stx->getFieldU32(sfLastLedgerSequence) <
2792 app_.getLedgerMaster().getValidLedgerIndex()))
2793 {
2794 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2795 charge(Resource::feeUselessData, "expired tx");
2796 return;
2797 }
2798
2799 if (isPseudoTx(*stx))
2800 {
2801 // Don't do anything with pseudo transactions except put them in the
2802 // TransactionMaster cache
2803 std::string reason;
2804 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2805 XRPL_ASSERT(
2806 tx->getStatus() == NEW,
2807 "ripple::PeerImp::checkTransaction Transaction created "
2808 "correctly");
2809 if (tx->getStatus() == NEW)
2810 {
2811 JLOG(p_journal_.debug())
2812 << "Processing " << (batch ? "batch" : "unsolicited")
2813 << " pseudo-transaction tx " << tx->getID();
2814
2815 app_.getMasterTransaction().canonicalize(&tx);
2816 // Tell the overlay about it, but don't relay it.
2817 auto const toSkip =
2818 app_.getHashRouter().shouldRelay(tx->getID());
2819 if (toSkip)
2820 {
2821 JLOG(p_journal_.debug())
2822 << "Passing skipped pseudo pseudo-transaction tx "
2823 << tx->getID();
2824 app_.overlay().relay(tx->getID(), {}, *toSkip);
2825 }
2826 if (!batch)
2827 {
2828 JLOG(p_journal_.debug())
2829 << "Charging for pseudo-transaction tx " << tx->getID();
2830 charge(Resource::feeUselessData, "pseudo tx");
2831 }
2832
2833 return;
2834 }
2835 }
2836
2837 if (checkSignature)
2838 {
2839 // Check the signature before handing off to the job queue.
2840 if (auto [valid, validReason] = checkValidity(
2841 app_.getHashRouter(),
2842 *stx,
2843 app_.getLedgerMaster().getValidatedRules(),
2844 app_.config());
2845 valid != Validity::Valid)
2846 {
2847 if (!validReason.empty())
2848 {
2849 JLOG(p_journal_.trace())
2850 << "Exception checking transaction: " << validReason;
2851 }
2852
2853 // Probably not necessary to set SF_BAD, but doesn't hurt.
2854 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2855 charge(
2856 Resource::feeInvalidSignature,
2857 "check transaction signature failure");
2858 return;
2859 }
2860 }
2861 else
2862 {
2864 app_.getHashRouter(), stx->getTransactionID(), Validity::Valid);
2865 }
2866
2867 std::string reason;
2868 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2869
2870 if (tx->getStatus() == INVALID)
2871 {
2872 if (!reason.empty())
2873 {
2874 JLOG(p_journal_.trace())
2875 << "Exception checking transaction: " << reason;
2876 }
2877 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2878 charge(Resource::feeInvalidSignature, "tx (impossible)");
2879 return;
2880 }
2881
2882 bool const trusted(flags & SF_TRUSTED);
2883 app_.getOPs().processTransaction(
2884 tx, trusted, false, NetworkOPs::FailHard::no);
2885 }
2886 catch (std::exception const& ex)
2887 {
2888 JLOG(p_journal_.warn())
2889 << "Exception in " << __func__ << ": " << ex.what();
2890 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2891 using namespace std::string_literals;
2892 charge(Resource::feeInvalidData, "tx "s + ex.what());
2893 }
2894}
2895
2896// Called from our JobQueue
2897void
2898PeerImp::checkPropose(
2899 bool isTrusted,
2901 RCLCxPeerPos peerPos)
2902{
2903 JLOG(p_journal_.trace())
2904 << "Checking " << (isTrusted ? "trusted" : "UNTRUSTED") << " proposal";
2905
2906 XRPL_ASSERT(packet, "ripple::PeerImp::checkPropose : non-null packet");
2907
2908 if (!cluster() && !peerPos.checkSign())
2909 {
2910 std::string desc{"Proposal fails sig check"};
2911 JLOG(p_journal_.warn()) << desc;
2912 charge(Resource::feeInvalidSignature, desc);
2913 return;
2914 }
2915
2916 bool relay;
2917
2918 if (isTrusted)
2919 relay = app_.getOPs().processTrustedProposal(peerPos);
2920 else
2921 relay = app_.config().RELAY_UNTRUSTED_PROPOSALS == 1 || cluster();
2922
2923 if (relay)
2924 {
2925 // haveMessage contains peers, which are suppressed; i.e. the peers
2926 // are the source of the message, consequently the message should
2927 // not be relayed to these peers. But the message must be counted
2928 // as part of the squelch logic.
2929 auto haveMessage = app_.overlay().relay(
2930 *packet, peerPos.suppressionID(), peerPos.publicKey());
2931 if (reduceRelayReady() && !haveMessage.empty())
2932 overlay_.updateSlotAndSquelch(
2933 peerPos.suppressionID(),
2934 peerPos.publicKey(),
2935 std::move(haveMessage),
2936 protocol::mtPROPOSE_LEDGER);
2937 }
2938}
2939
2940void
2941PeerImp::checkValidation(
2943 uint256 const& key,
2945{
2946 if (!val->isValid())
2947 {
2948 std::string desc{"Validation forwarded by peer is invalid"};
2949 JLOG(p_journal_.debug()) << desc;
2950 charge(Resource::feeInvalidSignature, desc);
2951 return;
2952 }
2953
2954 // FIXME it should be safe to remove this try/catch. Investigate codepaths.
2955 try
2956 {
2957 if (app_.getOPs().recvValidation(val, std::to_string(id())) ||
2958 cluster())
2959 {
2960 // haveMessage contains peers, which are suppressed; i.e. the peers
2961 // are the source of the message, consequently the message should
2962 // not be relayed to these peers. But the message must be counted
2963 // as part of the squelch logic.
2964 auto haveMessage =
2965 overlay_.relay(*packet, key, val->getSignerPublic());
2966 if (reduceRelayReady() && !haveMessage.empty())
2967 {
2968 overlay_.updateSlotAndSquelch(
2969 key,
2970 val->getSignerPublic(),
2971 std::move(haveMessage),
2972 protocol::mtVALIDATION);
2973 }
2974 }
2975 }
2976 catch (std::exception const& ex)
2977 {
2978 JLOG(p_journal_.trace())
2979 << "Exception processing validation: " << ex.what();
2980 using namespace std::string_literals;
2981 charge(Resource::feeMalformedRequest, "validation "s + ex.what());
2982 }
2983}
2984
2985// Returns the set of peers that can help us get
2986// the TX tree with the specified root hash.
2987//
2989getPeerWithTree(OverlayImpl& ov, uint256 const& rootHash, PeerImp const* skip)
2990{
2992 int retScore = 0;
2993
2995 if (p->hasTxSet(rootHash) && p.get() != skip)
2996 {
2997 auto score = p->getScore(true);
2998 if (!ret || (score > retScore))
2999 {
3000 ret = std::move(p);
3001 retScore = score;
3002 }
3003 }
3004 });
3005
3006 return ret;
3007}
3008
3009// Returns a random peer weighted by how likely to
3010// have the ledger and how responsive it is.
3011//
3014 OverlayImpl& ov,
3015 uint256 const& ledgerHash,
3016 LedgerIndex ledger,
3017 PeerImp const* skip)
3018{
3020 int retScore = 0;
3021
3023 if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
3024 {
3025 auto score = p->getScore(true);
3026 if (!ret || (score > retScore))
3027 {
3028 ret = std::move(p);
3029 retScore = score;
3030 }
3031 }
3032 });
3033
3034 return ret;
3035}
3036
3037void
3038PeerImp::sendLedgerBase(
3039 std::shared_ptr<Ledger const> const& ledger,
3040 protocol::TMLedgerData& ledgerData)
3041{
3042 JLOG(p_journal_.trace()) << "sendLedgerBase: Base data";
3043
3044 Serializer s(sizeof(LedgerInfo));
3045 addRaw(ledger->info(), s);
3046 ledgerData.add_nodes()->set_nodedata(s.getDataPtr(), s.getLength());
3047
3048 auto const& stateMap{ledger->stateMap()};
3049 if (stateMap.getHash() != beast::zero)
3050 {
3051 // Return account state root node if possible
3052 Serializer root(768);
3053
3054 stateMap.serializeRoot(root);
3055 ledgerData.add_nodes()->set_nodedata(
3056 root.getDataPtr(), root.getLength());
3057
3058 if (ledger->info().txHash != beast::zero)
3059 {
3060 auto const& txMap{ledger->txMap()};
3061 if (txMap.getHash() != beast::zero)
3062 {
3063 // Return TX root node if possible
3064 root.erase();
3065 txMap.serializeRoot(root);
3066 ledgerData.add_nodes()->set_nodedata(
3067 root.getDataPtr(), root.getLength());
3068 }
3069 }
3070 }
3071
3072 auto message{
3073 std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA)};
3074 send(message);
3075}
3076
3078PeerImp::getLedger(std::shared_ptr<protocol::TMGetLedger> const& m)
3079{
3080 JLOG(p_journal_.trace()) << "getLedger: Ledger";
3081
3083
3084 if (m->has_ledgerhash())
3085 {
3086 // Attempt to find ledger by hash
3087 uint256 const ledgerHash{m->ledgerhash()};
3088 ledger = app_.getLedgerMaster().getLedgerByHash(ledgerHash);
3089 if (!ledger)
3090 {
3091 JLOG(p_journal_.trace())
3092 << "getLedger: Don't have ledger with hash " << ledgerHash;
3093
3094 if (m->has_querytype() && !m->has_requestcookie())
3095 {
3096 // Attempt to relay the request to a peer
3097 if (auto const peer = getPeerWithLedger(
3098 overlay_,
3099 ledgerHash,
3100 m->has_ledgerseq() ? m->ledgerseq() : 0,
3101 this))
3102 {
3103 m->set_requestcookie(id());
3104 peer->send(
3105 std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
3106 JLOG(p_journal_.debug())
3107 << "getLedger: Request relayed to peer";
3108 return ledger;
3109 }
3110
3111 JLOG(p_journal_.trace())
3112 << "getLedger: Failed to find peer to relay request";
3113 }
3114 }
3115 }
3116 else if (m->has_ledgerseq())
3117 {
3118 // Attempt to find ledger by sequence
3119 if (m->ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
3120 {
3121 JLOG(p_journal_.debug())
3122 << "getLedger: Early ledger sequence request";
3123 }
3124 else
3125 {
3126 ledger = app_.getLedgerMaster().getLedgerBySeq(m->ledgerseq());
3127 if (!ledger)
3128 {
3129 JLOG(p_journal_.debug())
3130 << "getLedger: Don't have ledger with sequence "
3131 << m->ledgerseq();
3132 }
3133 }
3134 }
3135 else if (m->has_ltype() && m->ltype() == protocol::ltCLOSED)
3136 {
3137 ledger = app_.getLedgerMaster().getClosedLedger();
3138 }
3139
3140 if (ledger)
3141 {
3142 // Validate retrieved ledger sequence
3143 auto const ledgerSeq{ledger->info().seq};
3144 if (m->has_ledgerseq())
3145 {
3146 if (ledgerSeq != m->ledgerseq())
3147 {
3148 // Do not resource charge a peer responding to a relay
3149 if (!m->has_requestcookie())
3150 charge(
3151 Resource::feeMalformedRequest, "get_ledger ledgerSeq");
3152
3153 ledger.reset();
3154 JLOG(p_journal_.warn())
3155 << "getLedger: Invalid ledger sequence " << ledgerSeq;
3156 }
3157 }
3158 else if (ledgerSeq < app_.getLedgerMaster().getEarliestFetch())
3159 {
3160 ledger.reset();
3161 JLOG(p_journal_.debug())
3162 << "getLedger: Early ledger sequence request " << ledgerSeq;
3163 }
3164 }
3165 else
3166 {
3167 JLOG(p_journal_.debug()) << "getLedger: Unable to find ledger";
3168 }
3169
3170 return ledger;
3171}
3172
3174PeerImp::getTxSet(std::shared_ptr<protocol::TMGetLedger> const& m) const
3175{
3176 JLOG(p_journal_.trace()) << "getTxSet: TX set";
3177
3178 uint256 const txSetHash{m->ledgerhash()};
3180 app_.getInboundTransactions().getSet(txSetHash, false)};
3181 if (!shaMap)
3182 {
3183 if (m->has_querytype() && !m->has_requestcookie())
3184 {
3185 // Attempt to relay the request to a peer
3186 if (auto const peer = getPeerWithTree(overlay_, txSetHash, this))
3187 {
3188 m->set_requestcookie(id());
3189 peer->send(
3190 std::make_shared<Message>(*m, protocol::mtGET_LEDGER));
3191 JLOG(p_journal_.debug()) << "getTxSet: Request relayed";
3192 }
3193 else
3194 {
3195 JLOG(p_journal_.debug())
3196 << "getTxSet: Failed to find relay peer";
3197 }
3198 }
3199 else
3200 {
3201 JLOG(p_journal_.debug()) << "getTxSet: Failed to find TX set";
3202 }
3203 }
3204
3205 return shaMap;
3206}
3207
3208void
3209PeerImp::processLedgerRequest(std::shared_ptr<protocol::TMGetLedger> const& m)
3210{
3211 // Do not resource charge a peer responding to a relay
3212 if (!m->has_requestcookie())
3213 charge(
3214 Resource::feeModerateBurdenPeer, "received a get ledger request");
3215
3218 SHAMap const* map{nullptr};
3219 protocol::TMLedgerData ledgerData;
3220 bool fatLeaves{true};
3221 auto const itype{m->itype()};
3222
3223 if (itype == protocol::liTS_CANDIDATE)
3224 {
3225 if (sharedMap = getTxSet(m); !sharedMap)
3226 return;
3227 map = sharedMap.get();
3228
3229 // Fill out the reply
3230 ledgerData.set_ledgerseq(0);
3231 ledgerData.set_ledgerhash(m->ledgerhash());
3232 ledgerData.set_type(protocol::liTS_CANDIDATE);
3233 if (m->has_requestcookie())
3234 ledgerData.set_requestcookie(m->requestcookie());
3235
3236 // We'll already have most transactions
3237 fatLeaves = false;
3238 }
3239 else
3240 {
3241 if (send_queue_.size() >= Tuning::dropSendQueue)
3242 {
3243 JLOG(p_journal_.debug())
3244 << "processLedgerRequest: Large send queue";
3245 return;
3246 }
3247 if (app_.getFeeTrack().isLoadedLocal() && !cluster())
3248 {
3249 JLOG(p_journal_.debug()) << "processLedgerRequest: Too busy";
3250 return;
3251 }
3252
3253 if (ledger = getLedger(m); !ledger)
3254 return;
3255
3256 // Fill out the reply
3257 auto const ledgerHash{ledger->info().hash};
3258 ledgerData.set_ledgerhash(ledgerHash.begin(), ledgerHash.size());
3259 ledgerData.set_ledgerseq(ledger->info().seq);
3260 ledgerData.set_type(itype);
3261 if (m->has_requestcookie())
3262 ledgerData.set_requestcookie(m->requestcookie());
3263
3264 switch (itype)
3265 {
3266 case protocol::liBASE:
3267 sendLedgerBase(ledger, ledgerData);
3268 return;
3269
3270 case protocol::liTX_NODE:
3271 map = &ledger->txMap();
3272 JLOG(p_journal_.trace()) << "processLedgerRequest: TX map hash "
3273 << to_string(map->getHash());
3274 break;
3275
3276 case protocol::liAS_NODE:
3277 map = &ledger->stateMap();
3278 JLOG(p_journal_.trace())
3279 << "processLedgerRequest: Account state map hash "
3280 << to_string(map->getHash());
3281 break;
3282
3283 default:
3284 // This case should not be possible here
3285 JLOG(p_journal_.error())
3286 << "processLedgerRequest: Invalid ledger info type";
3287 return;
3288 }
3289 }
3290
3291 if (!map)
3292 {
3293 JLOG(p_journal_.warn()) << "processLedgerRequest: Unable to find map";
3294 return;
3295 }
3296
3297 // Add requested node data to reply
3298 if (m->nodeids_size() > 0)
3299 {
3300 auto const queryDepth{
3301 m->has_querydepth() ? m->querydepth() : (isHighLatency() ? 2 : 1)};
3302
3304
3305 for (int i = 0; i < m->nodeids_size() &&
3306 ledgerData.nodes_size() < Tuning::softMaxReplyNodes;
3307 ++i)
3308 {
3309 auto const shaMapNodeId{deserializeSHAMapNodeID(m->nodeids(i))};
3310
3311 data.clear();
3312 data.reserve(Tuning::softMaxReplyNodes);
3313
3314 try
3315 {
3316 if (map->getNodeFat(*shaMapNodeId, data, fatLeaves, queryDepth))
3317 {
3318 JLOG(p_journal_.trace())
3319 << "processLedgerRequest: getNodeFat got "
3320 << data.size() << " nodes";
3321
3322 for (auto const& d : data)
3323 {
3324 if (ledgerData.nodes_size() >=
3325 Tuning::hardMaxReplyNodes)
3326 break;
3327 protocol::TMLedgerNode* node{ledgerData.add_nodes()};
3328 node->set_nodeid(d.first.getRawString());
3329 node->set_nodedata(d.second.data(), d.second.size());
3330 }
3331 }
3332 else
3333 {
3334 JLOG(p_journal_.warn())
3335 << "processLedgerRequest: getNodeFat returns false";
3336 }
3337 }
3338 catch (std::exception const& e)
3339 {
3340 std::string info;
3341 switch (itype)
3342 {
3343 case protocol::liBASE:
3344 // This case should not be possible here
3345 info = "Ledger base";
3346 break;
3347
3348 case protocol::liTX_NODE:
3349 info = "TX node";
3350 break;
3351
3352 case protocol::liAS_NODE:
3353 info = "AS node";
3354 break;
3355
3356 case protocol::liTS_CANDIDATE:
3357 info = "TS candidate";
3358 break;
3359
3360 default:
3361 info = "Invalid";
3362 break;
3363 }
3364
3365 if (!m->has_ledgerhash())
3366 info += ", no hash specified";
3367
3368 JLOG(p_journal_.error())
3369 << "processLedgerRequest: getNodeFat with nodeId "
3370 << *shaMapNodeId << " and ledger info type " << info
3371 << " throws exception: " << e.what();
3372 }
3373 }
3374
3375 JLOG(p_journal_.info())
3376 << "processLedgerRequest: Got request for " << m->nodeids_size()
3377 << " nodes at depth " << queryDepth << ", return "
3378 << ledgerData.nodes_size() << " nodes";
3379 }
3380
3381 if (ledgerData.nodes_size() == 0)
3382 return;
3383
3384 send(std::make_shared<Message>(ledgerData, protocol::mtLEDGER_DATA));
3385}
3386
3387int
3388PeerImp::getScore(bool haveItem) const
3389{
3390 // Random component of score, used to break ties and avoid
3391 // overloading the "best" peer
3392 static const int spRandomMax = 9999;
3393
3394 // Score for being very likely to have the thing we are
3395 // look for; should be roughly spRandomMax
3396 static const int spHaveItem = 10000;
3397
3398 // Score reduction for each millisecond of latency; should
3399 // be roughly spRandomMax divided by the maximum reasonable
3400 // latency
3401 static const int spLatency = 30;
3402
3403 // Penalty for unknown latency; should be roughly spRandomMax
3404 static const int spNoLatency = 8000;
3405
3406 int score = rand_int(spRandomMax);
3407
3408 if (haveItem)
3409 score += spHaveItem;
3410
3412 {
3413 std::lock_guard sl(recentLock_);
3414 latency = latency_;
3415 }
3416
3417 if (latency)
3418 score -= latency->count() * spLatency;
3419 else
3420 score -= spNoLatency;
3421
3422 return score;
3423}
3424
3425bool
3426PeerImp::isHighLatency() const
3427{
3428 std::lock_guard sl(recentLock_);
3429 return latency_ >= peerHighLatency;
3430}
3431
3432bool
3433PeerImp::reduceRelayReady()
3434{
3435 if (!reduceRelayReady_)
3436 reduceRelayReady_ =
3437 reduce_relay::epoch<std::chrono::minutes>(UptimeClock::now()) >
3438 reduce_relay::WAIT_ON_BOOTUP;
3439 return vpReduceRelayEnabled_ && reduceRelayReady_;
3440}
3441
3442void
3443PeerImp::Metrics::add_message(std::uint64_t bytes)
3444{
3445 using namespace std::chrono_literals;
3446 std::unique_lock lock{mutex_};
3447
3448 totalBytes_ += bytes;
3449 accumBytes_ += bytes;
3450 auto const timeElapsed = clock_type::now() - intervalStart_;
3451 auto const timeElapsedInSecs =
3452 std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
3453
3454 if (timeElapsedInSecs >= 1s)
3455 {
3456 auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
3457 rollingAvg_.push_back(avgBytes);
3458
3459 auto const totalBytes =
3460 std::accumulate(rollingAvg_.begin(), rollingAvg_.end(), 0ull);
3461 rollingAvgBytes_ = totalBytes / rollingAvg_.size();
3462
3463 intervalStart_ = clock_type::now();
3464 accumBytes_ = 0;
3465 }
3466}
3467
3469PeerImp::Metrics::average_bytes() const
3470{
3471 std::shared_lock lock{mutex_};
3472 return rollingAvgBytes_;
3473}
3474
3476PeerImp::Metrics::total_bytes() const
3477{
3478 std::shared_lock lock{mutex_};
3479 return totalBytes_;
3480}
3481
3482} // namespace ripple
T accumulate(T... args)
T bind(T... args)
Represents a JSON value.
Definition: json_value.h:147
A version-independent IP address and port combination.
Definition: IPEndpoint.h:39
Address const & address() const
Returns the address portion of this endpoint.
Definition: IPEndpoint.h:76
static std::optional< Endpoint > from_string_checked(std::string const &s)
Create an Endpoint from a string.
Definition: IPEndpoint.cpp:35
Endpoint at_port(Port port) const
Returns a new Endpoint with a different port.
Definition: IPEndpoint.h:69
static Endpoint from_string(std::string const &s)
Definition: IPEndpoint.cpp:49
std::string to_string() const
Returns a string representing the endpoint.
Definition: IPEndpoint.cpp:57
Stream error() const
Definition: Journal.h:335
Stream debug() const
Definition: Journal.h:317
bool active(Severity level) const
Returns true if any message would be logged at this severity level.
Definition: Journal.h:303
Stream info() const
Definition: Journal.h:323
Stream trace() const
Severity stream access functions.
Definition: Journal.h:311
Stream warn() const
Definition: Journal.h:329
virtual Config & config()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual TimeKeeper & timeKeeper()=0
virtual JobQueue & getJobQueue()=0
virtual NetworkOPs & getOPs()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual Cluster & cluster()=0
virtual HashRouter & getHashRouter()=0
void for_each(std::function< void(ClusterNode const &)> func) const
Invokes the callback once for every cluster node.
Definition: Cluster.cpp:82
std::size_t size() const
The number of nodes in the cluster list.
Definition: Cluster.cpp:48
bool update(PublicKey const &identity, std::string name, std::uint32_t loadFee=0, NetClock::time_point reportTime=NetClock::time_point{})
Store information about the state of a cluster node.
Definition: Cluster.cpp:56
std::optional< std::string > member(PublicKey const &node) const
Determines whether a node belongs in the cluster.
Definition: Cluster.cpp:37
bool TX_REDUCE_RELAY_METRICS
Definition: Config.h:273
int MAX_TRANSACTIONS
Definition: Config.h:233
std::chrono::seconds MAX_DIVERGED_TIME
Definition: Config.h:292
std::chrono::seconds MAX_UNKNOWN_TIME
Definition: Config.h:289
bool shouldProcess(uint256 const &key, PeerShortID peer, int &flags, std::chrono::seconds tx_interval)
Definition: HashRouter.cpp:78
bool addSuppressionPeer(uint256 const &key, PeerShortID peer)
Definition: HashRouter.cpp:51
std::unique_ptr< LoadEvent > makeLoadEvent(JobType t, std::string const &name)
Return a scoped LoadEvent.
Definition: JobQueue.cpp:177
int getJobCount(JobType t) const
Jobs waiting at this priority.
Definition: JobQueue.cpp:140
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
Definition: JobQueue.h:166
LedgerIndex getValidLedgerIndex()
std::chrono::seconds getValidatedLedgerAge()
void setClusterFee(std::uint32_t fee)
Definition: LoadFeeTrack.h:114
virtual bool isNeedNetworkLedger()=0
PeerFinder::Manager & peerFinder()
Definition: OverlayImpl.h:158
void reportTraffic(TrafficCount::category cat, bool isInbound, int bytes)
void activate(std::shared_ptr< PeerImp > const &peer)
Called when a peer has connected successfully This is called after the peer handshake has been comple...
void deletePeer(Peer::id_t id)
Called when the peer is deleted.
void incPeerDisconnect() override
Increment and retrieve counters for total peer disconnects, and disconnects we initiate for excessive...
Definition: OverlayImpl.h:360
void addTxMetrics(Args... args)
Add tx reduce-relay metrics.
Definition: OverlayImpl.h:431
void onPeerDeactivate(Peer::id_t id)
void remove(std::shared_ptr< PeerFinder::Slot > const &slot)
void for_each(UnaryFunc &&f) const
Definition: OverlayImpl.h:275
Resource::Manager & resourceManager()
Definition: OverlayImpl.h:164
void onManifests(std::shared_ptr< protocol::TMManifests > const &m, std::shared_ptr< PeerImp > const &from)
Setup const & setup() const
Definition: OverlayImpl.h:170
std::shared_ptr< Message > getManifestsMessage()
void incPeerDisconnectCharges() override
Definition: OverlayImpl.h:372
void incJqTransOverflow() override
Increment and retrieve counter for transaction job queue overflows.
Definition: OverlayImpl.h:348
virtual void on_endpoints(std::shared_ptr< Slot > const &slot, Endpoints const &endpoints)=0
Called when mtENDPOINTS is received.
virtual Config config()=0
Returns the configuration for the manager.
virtual void on_closed(std::shared_ptr< Slot > const &slot)=0
Called when the slot is closed.
virtual void on_failure(std::shared_ptr< Slot > const &slot)=0
Called when an outbound connection is deemed to have failed.
std::queue< std::shared_ptr< Message > > send_queue_
Definition: PeerImp.h:176
bool vpReduceRelayEnabled_
Definition: PeerImp.h:194
std::unique_ptr< LoadEvent > load_event_
Definition: PeerImp.h:179
boost::beast::http::fields const & headers_
Definition: PeerImp.h:175
void onMessageEnd(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m)
Definition: PeerImp.cpp:1043
bool hasLedger(uint256 const &hash, std::uint32_t seq) const override
Definition: PeerImp.cpp:517
clock_type::duration uptime() const
Definition: PeerImp.h:366
void removeTxQueue(uint256 const &hash) override
Remove transaction's hash from the transactions' hashes queue.
Definition: PeerImp.cpp:333
protocol::TMStatusChange last_status_
Definition: PeerImp.h:168
boost::shared_mutex nameMutex_
Definition: PeerImp.h:101
std::string name_
Definition: PeerImp.h:100
boost::circular_buffer< uint256 > recentTxSets_
Definition: PeerImp.h:111
std::unique_ptr< stream_type > stream_ptr_
Definition: PeerImp.h:77
void onMessage(std::shared_ptr< protocol::TMManifests > const &m)
Definition: PeerImp.cpp:1052
bool detaching_
Definition: PeerImp.h:97
Tracking
Whether the peer's view of the ledger converges or diverges from ours.
Definition: PeerImp.h:57
Compressed compressionEnabled_
Definition: PeerImp.h:184
uint256 closedLedgerHash_
Definition: PeerImp.h:107
std::string domain() const
Definition: PeerImp.cpp:835
std::optional< std::uint32_t > lastPingSeq_
Definition: PeerImp.h:114
void onTimer(boost::system::error_code const &ec)
Definition: PeerImp.cpp:683
bool gracefulClose_
Definition: PeerImp.h:177
beast::Journal const journal_
Definition: PeerImp.h:75
virtual void run()
Definition: PeerImp.cpp:159
void gracefulClose()
Definition: PeerImp.cpp:627
LedgerIndex maxLedger_
Definition: PeerImp.h:106
beast::Journal const p_journal_
Definition: PeerImp.h:76
void cancelTimer()
Definition: PeerImp.cpp:666
bool const inbound_
Definition: PeerImp.h:90
PeerImp(PeerImp const &)=delete
Application & app_
Definition: PeerImp.h:71
void stop() override
Definition: PeerImp.cpp:217
bool hasRange(std::uint32_t uMin, std::uint32_t uMax) override
Definition: PeerImp.cpp:559
bool hasTxSet(uint256 const &hash) const override
Definition: PeerImp.cpp:541
clock_type::time_point lastPingTime_
Definition: PeerImp.h:115
void onMessageUnknown(std::uint16_t type)
Definition: PeerImp.cpp:1003
std::shared_ptr< PeerFinder::Slot > const slot_
Definition: PeerImp.h:171
boost::circular_buffer< uint256 > recentLedgers_
Definition: PeerImp.h:110
id_t const id_
Definition: PeerImp.h:72
std::optional< std::chrono::milliseconds > latency_
Definition: PeerImp.h:113
void handleTransaction(std::shared_ptr< protocol::TMTransaction > const &m, bool eraseTxQueue, bool batch)
Called from onMessage(TMTransaction(s)).
Definition: PeerImp.cpp:1234
beast::IP::Endpoint const remote_address_
Definition: PeerImp.h:85
Json::Value json() override
Definition: PeerImp.cpp:382
PublicKey const publicKey_
Definition: PeerImp.h:99
hash_set< uint256 > txQueue_
Definition: PeerImp.h:189
std::mutex recentLock_
Definition: PeerImp.h:167
void doAccept()
Definition: PeerImp.cpp:760
void onMessageBegin(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m, std::size_t size, std::size_t uncompressed_size, bool isCompressed)
Definition: PeerImp.cpp:1009
bool txReduceRelayEnabled_
Definition: PeerImp.h:191
clock_type::time_point trackingTime_
Definition: PeerImp.h:96
socket_type & socket_
Definition: PeerImp.h:78
ProtocolVersion protocol_
Definition: PeerImp.h:93
reduce_relay::Squelch< UptimeClock > squelch_
Definition: PeerImp.h:118
std::string getVersion() const
Return the version of rippled that the peer is running, if reported.
Definition: PeerImp.cpp:374
struct ripple::PeerImp::@22 metrics_
uint256 previousLedgerHash_
Definition: PeerImp.h:108
void charge(Resource::Charge const &fee, std::string const &context) override
Adjust this peer's load balance based on the type of load imposed.
Definition: PeerImp.cpp:345
void setTimer()
Definition: PeerImp.cpp:648
void send(std::shared_ptr< Message > const &m) override
Definition: PeerImp.cpp:243
static std::string makePrefix(id_t id)
Definition: PeerImp.cpp:675
std::string name() const
Definition: PeerImp.cpp:828
boost::system::error_code error_code
Definition: PeerImp.h:61
void onReadMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:883
bool ledgerReplayEnabled_
Definition: PeerImp.h:195
boost::asio::basic_waitable_timer< std::chrono::steady_clock > waitable_timer
Definition: PeerImp.h:68
bool crawl() const
Returns true if this connection will publicly share its IP address.
Definition: PeerImp.cpp:359
waitable_timer timer_
Definition: PeerImp.h:81
void sendTxQueue() override
Send aggregated transactions' hashes.
Definition: PeerImp.cpp:297
bool txReduceRelayEnabled() const override
Definition: PeerImp.h:439
bool supportsFeature(ProtocolFeature f) const override
Definition: PeerImp.cpp:500
ChargeWithContext fee_
Definition: PeerImp.h:170
void onWriteMessage(error_code ec, std::size_t bytes_transferred)
Definition: PeerImp.cpp:947
http_request_type request_
Definition: PeerImp.h:173
OverlayImpl & overlay_
Definition: PeerImp.h:89
LedgerIndex minLedger_
Definition: PeerImp.h:105
virtual ~PeerImp()
Definition: PeerImp.cpp:136
void addTxQueue(uint256 const &hash) override
Add transaction's hash to the transactions' hashes queue.
Definition: PeerImp.cpp:316
int large_sendq_
Definition: PeerImp.h:178
stream_type & stream_
Definition: PeerImp.h:79
bool cluster() const override
Returns true if this connection is a member of the cluster.
Definition: PeerImp.cpp:368
void onShutdown(error_code ec)
Definition: PeerImp.cpp:744
boost::asio::strand< boost::asio::executor > strand_
Definition: PeerImp.h:80
void cycleStatus() override
Definition: PeerImp.cpp:549
boost::beast::multi_buffer read_buffer_
Definition: PeerImp.h:172
Resource::Consumer usage_
Definition: PeerImp.h:169
void ledgerRange(std::uint32_t &minSeq, std::uint32_t &maxSeq) const override
Definition: PeerImp.cpp:532
void doProtocolStart()
Definition: PeerImp.cpp:845
void fail(std::string const &reason)
Definition: PeerImp.cpp:593
std::atomic< Tracking > tracking_
Definition: PeerImp.h:95
Represents a peer connection in the overlay.
A public key.
Definition: PublicKey.h:62
A peer's signed, proposed position for use in RCLConsensus.
Definition: RCLCxPeerPos.h:44
bool checkSign() const
Verify the signing hash of the proposal.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
Definition: RCLCxPeerPos.h:78
uint256 const & suppressionID() const
Unique id used by hash router to suppress duplicates.
Definition: RCLCxPeerPos.h:85
A consumption charge.
Definition: Charge.h:31
An endpoint that consumes resources.
Definition: Consumer.h:35
int balance()
Returns the credit balance representing consumption.
Definition: Consumer.cpp:130
bool disconnect(beast::Journal const &j)
Returns true if the consumer should be disconnected.
Definition: Consumer.cpp:117
Disposition charge(Charge const &fee, std::string const &context={})
Apply a load charge to the consumer.
Definition: Consumer.cpp:99
virtual void importConsumers(std::string const &origin, Gossip const &gossip)=0
Import packaged consumer information.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
Definition: SHAMap.h:96
std::size_t size() const noexcept
Definition: Serializer.h:72
void const * data() const noexcept
Definition: Serializer.h:78
int getLength() const
Definition: Serializer.h:233
const void * getDataPtr() const
Definition: Serializer.h:223
An immutable linear range of bytes.
Definition: Slice.h:45
time_point now() const override
Returns the current time, using the server's clock.
Definition: TimeKeeper.h:64
static category categorize(::google::protobuf::Message const &message, int type, bool inbound)
Given a protocol message, determine which traffic category it belongs to.
static void sendValidatorList(Peer &peer, std::uint64_t peerSequence, PublicKey const &publisherKey, std::size_t maxSequence, std::uint32_t rawVersion, std::string const &rawManifest, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, HashRouter &hashRouter, beast::Journal j)
void for_each_available(std::function< void(std::string const &manifest, std::uint32_t version, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, PublicKey const &pubKey, std::size_t maxSequence, uint256 const &hash)> func) const
Invokes the callback once for every available publisher list's raw data members.
pointer data()
Definition: base_uint.h:124
static constexpr std::size_t size()
Definition: base_uint.h:525
constexpr bool parseHex(std::string_view sv)
Parse a hex string into a base_uint.
Definition: base_uint.h:502
T emplace_back(T... args)
T empty(T... args)
T find(T... args)
T for_each(T... args)
T get(T... args)
T load(T... args)
T lock(T... args)
T max(T... args)
T min(T... args)
@ objectValue
object value (collection of name/value pairs).
Definition: json_value.h:43
unsigned int UInt
Definition: json_forwards.h:27
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeInvalidData
Charge const feeUselessData
Charge const feeTrivialPeer
Charge const feeModerateBurdenPeer
std::size_t constexpr readBufferBytes
Size of buffer used to read from the socket.
@ targetSendQueue
How many messages we consider reasonable sustained on a send queue.
@ maxQueryDepth
The maximum number of levels to search.
@ sendqIntervals
How many timer intervals a sendq has to stay large before we disconnect.
@ sendQueueLogFreq
How often to log send queue size.
TER valid(PreclaimContext const &ctx, AccountID const &src)
auto measureDurationAndLog(Func &&func, const std::string &actionDescription, std::chrono::duration< Rep, Period > maxDelay, const beast::Journal &journal)
Definition: PerfLog.h:184
static constexpr std::size_t MAX_TX_QUEUE_SIZE
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
std::string protocolMessageName(int type)
Returns the name of a protocol message given its type.
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
Definition: AccountID.cpp:106
uint256 proposalUniqueId(uint256 const &proposeHash, uint256 const &previousLedger, std::uint32_t proposeSeq, NetClock::time_point closeTime, Slice const &publicKey, Slice const &signature)
Calculate a unique identifier for a signed proposal.
constexpr ProtocolVersion make_protocol(std::uint16_t major, std::uint16_t minor)
bool isPseudoTx(STObject const &tx)
Check whether a transaction is a pseudo-transaction.
Definition: STTx.cpp:613
@ INCLUDED
Definition: Transaction.h:49
@ INVALID
Definition: Transaction.h:48
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
static constexpr char FEATURE_COMPR[]
Definition: Handshake.h:142
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
Definition: Validations.h:148
std::string base64_decode(std::string_view data)
Definition: base64.cpp:245
bool set(T &target, std::string const &name, Section const &section)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
Definition: BasicConfig.h:316
http_response_type makeResponse(bool crawlPublic, http_request_type const &req, beast::IP::Address public_ip, beast::IP::Address remote_ip, uint256 const &sharedValue, std::optional< std::uint32_t > networkID, ProtocolVersion protocol, Application &app)
Make http response.
Definition: Handshake.cpp:389
static bool stringIsUint256Sized(std::string const &pBuffStr)
Definition: PeerImp.cpp:153
static constexpr char FEATURE_LEDGER_REPLAY[]
Definition: Handshake.h:148
std::pair< std::size_t, boost::system::error_code > invokeProtocolMessage(Buffers const &buffers, Handler &handler, std::size_t &hint)
Calls the handler for up to one protocol message in the passed buffers.
std::optional< uint256 > makeSharedValue(stream_type &ssl, beast::Journal journal)
Computes a shared value based on the SSL connection state.
Definition: Handshake.cpp:143
std::optional< KeyType > publicKeyType(Slice const &slice)
Returns the type of public key.
Definition: PublicKey.cpp:207
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
std::string strHex(FwdIt begin, FwdIt end)
Definition: strHex.h:30
static std::shared_ptr< PeerImp > getPeerWithLedger(OverlayImpl &ov, uint256 const &ledgerHash, LedgerIndex ledger, PeerImp const *skip)
Definition: PeerImp.cpp:3013
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Definition: Slice.h:243
Stopwatch & stopwatch()
Returns an instance of a wall clock.
Definition: chrono.h:120
boost::beast::http::request< boost::beast::http::dynamic_body > http_request_type
Definition: Handoff.h:31
NodeID calcNodeID(PublicKey const &)
Calculate the 160-bit node ID from a node public key.
Definition: PublicKey.cpp:303
static std::shared_ptr< PeerImp > getPeerWithTree(OverlayImpl &ov, uint256 const &rootHash, PeerImp const *skip)
Definition: PeerImp.cpp:2989
bool peerFeatureEnabled(headers const &request, std::string const &feature, std::string value, bool config)
Check if a feature should be enabled for a peer.
Definition: Handshake.h:199
void forceValidity(HashRouter &router, uint256 const &txid, Validity validity)
Sets the validity of a given transaction in the cache.
Definition: apply.cpp:89
static constexpr char FEATURE_TXRR[]
Definition: Handshake.h:146
std::string to_string(base_uint< Bits, Tag > const &a)
Definition: base_uint.h:629
Number root(Number f, unsigned d)
Definition: Number.cpp:630
@ manifest
Manifest.
@ proposal
proposal for signing
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
Definition: apply.cpp:37
@ jtLEDGER_REQ
Definition: Job.h:59
@ jtPROPOSAL_ut
Definition: Job.h:60
@ jtREPLAY_REQ
Definition: Job.h:58
@ jtTRANSACTION
Definition: Job.h:62
@ jtPEER
Definition: Job.h:80
@ jtREQUESTED_TXN
Definition: Job.h:64
@ jtMISSING_TXN
Definition: Job.h:63
@ jtVALIDATION_t
Definition: Job.h:71
@ jtMANIFEST
Definition: Job.h:55
@ jtTXN_DATA
Definition: Job.h:69
@ jtPACK
Definition: Job.h:43
@ jtVALIDATION_ut
Definition: Job.h:54
@ jtPROPOSAL_t
Definition: Job.h:74
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
Definition: digest.h:223
static constexpr char FEATURE_VPRR[]
Definition: Handshake.h:144
STL namespace.
T nth_element(T... args)
T ref(T... args)
T reserve(T... args)
T reset(T... args)
T setfill(T... args)
T setw(T... args)
T size(T... args)
T str(T... args)
Information about the notional ledger backing the view.
Definition: LedgerHeader.h:34
beast::IP::Address public_ip
Definition: Overlay.h:72
std::optional< std::uint32_t > networkID
Definition: Overlay.h:75
bool peerPrivate
true if we want our IP address kept private.
void update(Resource::Charge f, std::string const &add)
Definition: PeerImp.h:154
Describes a single consumer.
Definition: Gossip.h:35
beast::IP::Endpoint address
Definition: Gossip.h:39
Data format for exchanging consumption information across peers.
Definition: Gossip.h:30
std::vector< Item > items
Definition: Gossip.h:42
T tie(T... args)
T to_string(T... args)
T what(T... args)