20#include <xrpld/app/consensus/RCLValidations.h>
21#include <xrpld/app/ledger/InboundLedgers.h>
22#include <xrpld/app/ledger/InboundTransactions.h>
23#include <xrpld/app/ledger/LedgerMaster.h>
24#include <xrpld/app/ledger/TransactionMaster.h>
25#include <xrpld/app/misc/HashRouter.h>
26#include <xrpld/app/misc/LoadFeeTrack.h>
27#include <xrpld/app/misc/NetworkOPs.h>
28#include <xrpld/app/misc/Transaction.h>
29#include <xrpld/app/misc/ValidatorList.h>
30#include <xrpld/app/tx/apply.h>
31#include <xrpld/overlay/Cluster.h>
32#include <xrpld/overlay/detail/PeerImp.h>
33#include <xrpld/overlay/detail/Tuning.h>
34#include <xrpld/perflog/PerfLog.h>
36#include <xrpl/basics/UptimeClock.h>
37#include <xrpl/basics/base64.h>
38#include <xrpl/basics/random.h>
39#include <xrpl/basics/safe_cast.h>
40#include <xrpl/protocol/TxFlags.h>
41#include <xrpl/protocol/digest.h>
43#include <boost/algorithm/string/predicate.hpp>
44#include <boost/beast/core/ostream.hpp>
53using namespace std::chrono_literals;
85 , sink_(app_.journal(
"Peer"), makePrefix(id))
86 , p_sink_(app_.journal(
"Protocol"), makePrefix(id))
89 , stream_ptr_(
std::move(stream_ptr))
90 , socket_(stream_ptr_->next_layer().socket())
91 , stream_(*stream_ptr_)
92 , strand_(
boost::asio::make_strand(socket_.get_executor()))
94 , remote_address_(slot->remote_endpoint())
100 , publicKey_(publicKey)
103 , squelch_(app_.journal(
"Squelch"))
105 , fee_{Resource::feeTrivialPeer,
""}
107 , request_(
std::move(request))
109 , compressionEnabled_(
114 app_.config().COMPRESSION)
120 app_.config().TX_REDUCE_RELAY_ENABLE))
124 app_.config().LEDGER_REPLAY))
125 , ledgerReplayMsgHandler_(app, app.getLedgerReplayer())
129 <<
" vp reduce-relay base squelch enabled "
140 bool const inCluster{
cluster()};
163 if (!
strand_.running_in_this_thread())
166 auto parseLedgerHash =
180 if (
auto const iter =
headers_.find(
"Closed-Ledger");
183 closed = parseLedgerHash(iter->value());
186 fail(
"Malformed handshake data (1)");
189 if (
auto const iter =
headers_.find(
"Previous-Ledger");
192 previous = parseLedgerHash(iter->value());
195 fail(
"Malformed handshake data (2)");
198 if (previous && !closed)
199 fail(
"Malformed handshake data (3)");
221 if (!
strand_.running_in_this_thread())
241 if (!
strand_.running_in_this_thread())
251 auto validator = m->getValidatorKey();
252 if (validator && !
squelch_.expireSquelch(*validator))
262 safe_cast<TrafficCount::category>(m->getCategory()),
284 <<
" sendq: " << sendq_size;
293 boost::asio::async_write(
302 std::placeholders::_1,
303 std::placeholders::_2)));
309 if (!
strand_.running_in_this_thread())
315 protocol::TMHaveTransactions ht;
317 ht.add_hashes(hash.data(), hash.size());
328 if (!
strand_.running_in_this_thread())
345 if (!
strand_.running_in_this_thread())
350 auto removed =
txQueue_.erase(hash);
362 fail(
"charge: Resources");
371 auto const iter =
headers_.find(
"Crawl");
374 return boost::iequals(iter->value(),
"public");
400 ret[jss::inbound] =
true;
404 ret[jss::cluster] =
true;
414 if (
auto const nid =
headers_[
"Network-ID"]; !nid.empty())
431 std::chrono::duration_cast<std::chrono::seconds>(
uptime()).count());
436 if ((minSeq != 0) || (maxSeq != 0))
437 ret[jss::complete_ledgers] =
443 ret[jss::track] =
"diverged";
447 ret[jss::track] =
"unknown";
456 protocol::TMStatusChange last_status;
463 if (closedLedgerHash != beast::zero)
464 ret[jss::ledger] =
to_string(closedLedgerHash);
466 if (last_status.has_newstatus())
468 switch (last_status.newstatus())
470 case protocol::nsCONNECTING:
471 ret[jss::status] =
"connecting";
474 case protocol::nsCONNECTED:
475 ret[jss::status] =
"connected";
478 case protocol::nsMONITORING:
479 ret[jss::status] =
"monitoring";
482 case protocol::nsVALIDATING:
483 ret[jss::status] =
"validating";
486 case protocol::nsSHUTTING:
487 ret[jss::status] =
"shutting";
492 <<
"Unknown status: " << last_status.newstatus();
497 ret[jss::metrics][jss::total_bytes_recv] =
499 ret[jss::metrics][jss::total_bytes_sent] =
501 ret[jss::metrics][jss::avg_bps_recv] =
503 ret[jss::metrics][jss::avg_bps_sent] =
582 strand_.running_in_this_thread(),
583 "ripple::PeerImp::fail : strand in this thread");
599 if (!
strand_.running_in_this_thread())
615 <<
" failed: " << reason;
625 strand_.running_in_this_thread(),
626 "ripple::PeerImp::tryAsyncShutdown : strand in this thread");
639 stream_.async_shutdown(bind_executor(
649 strand_.running_in_this_thread(),
650 "ripple::PeerImp::shutdown: strand in this thread");
657 boost::beast::get_lowest_layer(
stream_).cancel();
674 (ec != boost::asio::error::eof &&
675 ec != boost::asio::error::operation_aborted &&
676 ec.message().find(
"application data after close notify") ==
692 strand_.running_in_this_thread(),
693 "ripple::PeerImp::close : strand in this thread");
719 timer_.expires_after(interval);
727 timer_.async_wait(bind_executor(
737 strand_.running_in_this_thread(),
738 "ripple::PeerImp::onTimer : strand in this thread");
746 if (ec == boost::asio::error::operation_aborted)
763 return fail(
"Large send queue");
767 clock_type::duration duration;
780 return fail(
"Not useful");
786 return fail(
"Ping Timeout");
791 protocol::TMPing message;
792 message.set_type(protocol::TMPing::ptPING);
827 "ripple::PeerImp::doAccept : empty read buffer");
840 return fail(
"makeSharedValue: Unexpected failure");
874 boost::asio::async_write(
876 write_buffer->data(),
877 boost::asio::transfer_all(),
884 if (ec == boost::asio::error::operation_aborted)
887 return fail(
"onWriteResponse", ec);
888 if (write_buffer->size() == bytes_transferred)
890 return fail(
"Failed to write header");
957 strand_.running_in_this_thread(),
958 "ripple::PeerImp::onReadMessage : strand in this thread");
967 if (ec == boost::asio::error::eof)
973 if (ec == boost::asio::error::operation_aborted)
976 return fail(
"onReadMessage", ec);
984 stream <<
"onReadMessage: "
985 << (bytes_transferred > 0
986 ?
to_string(bytes_transferred) +
" bytes"
990 metrics_.recv.add_message(bytes_transferred);
1000 using namespace std::chrono_literals;
1005 "invokeProtocolMessage",
1015 return fail(
"onReadMessage", ec);
1017 if (bytes_consumed == 0)
1040 std::placeholders::_1,
1041 std::placeholders::_2)));
1048 strand_.running_in_this_thread(),
1049 "ripple::PeerImp::onWriteMessage : strand in this thread");
1058 if (ec == boost::asio::error::operation_aborted)
1061 return fail(
"onWriteMessage", ec);
1066 stream <<
"onWriteMessage: "
1067 << (bytes_transferred > 0
1068 ?
to_string(bytes_transferred) +
" bytes"
1072 metrics_.sent.add_message(bytes_transferred);
1076 "ripple::PeerImp::onWriteMessage : non-empty send buffer");
1087 "ripple::PeerImp::onWriteMessage : shutdown started");
1090 return boost::asio::async_write(
1092 boost::asio::buffer(
1099 std::placeholders::_1,
1100 std::placeholders::_2)));
1129 *m,
static_cast<protocol::MessageType
>(type),
true);
1139 if ((type == MessageType::mtTRANSACTION ||
1140 type == MessageType::mtHAVE_TRANSACTIONS ||
1141 type == MessageType::mtTRANSACTIONS ||
1153 static_cast<MessageType
>(type),
static_cast<std::uint64_t>(size));
1155 JLOG(
journal_.
trace()) <<
"onMessageBegin: " << type <<
" " << size <<
" "
1156 << uncompressed_size <<
" " << isCompressed;
1171 auto const s = m->list_size();
1191 if (m->type() == protocol::TMPing::ptPING)
1195 m->set_type(protocol::TMPing::ptPONG);
1200 if (m->type() == protocol::TMPing::ptPONG && m->has_seq())
1210 auto const rtt = std::chrono::round<std::chrono::milliseconds>(
1235 for (
int i = 0; i < m->clusternodes().size(); ++i)
1237 protocol::TMClusterNode
const& node = m->clusternodes(i);
1240 if (node.has_nodename())
1241 name = node.nodename();
1243 auto const publicKey =
1250 auto const reportTime =
1254 *publicKey,
name, node.nodeload(), reportTime);
1258 int loadSources = m->loadsources().size();
1259 if (loadSources != 0)
1262 gossip.
items.reserve(loadSources);
1263 for (
int i = 0; i < m->loadsources().size(); ++i)
1265 protocol::TMLoadSource
const& node = m->loadsources(i);
1270 gossip.
items.push_back(item);
1283 if (status.getReportTime() >= thresh)
1284 fees.push_back(status.getLoadFee());
1289 auto const index = fees.size() / 2;
1291 clusterFee = fees[index];
1307 if (m->endpoints_v2().size() >= 1024)
1314 endpoints.
reserve(m->endpoints_v2().size());
1317 for (
auto const& tm : m->endpoints_v2())
1324 << tm.endpoint() <<
"}";
1350 if (!endpoints.
empty())
1367 eraseTxQueue !=
batch,
1368 (
"ripple::PeerImp::handleTransaction : valid inputs"));
1377 <<
"Need network ledger";
1386 uint256 txID = stx->getTransactionID();
1393 JLOG(
p_journal_.
warn()) <<
"Ignoring Network relayed Tx containing "
1394 "tfInnerBatchTxn (handleTransaction).";
1426 bool checkSignature =
true;
1429 if (!m->has_deferred() || !m->deferred())
1442 checkSignature =
false;
1449 <<
"No new transactions until synchronized";
1462 "recvTransaction->checkTransaction",
1468 if (
auto peer = weak.lock())
1469 peer->checkTransaction(
1470 flags, checkSignature, stx,
batch);
1477 <<
"Transaction invalid: " <<
strHex(m->rawtransaction())
1478 <<
". Exception: " << ex.
what();
1489 auto const itype{m->itype()};
1492 if (itype < protocol::liBASE || itype > protocol::liTS_CANDIDATE)
1493 return badData(
"Invalid ledger info type");
1501 if (itype == protocol::liTS_CANDIDATE)
1503 if (!m->has_ledgerhash())
1504 return badData(
"Invalid TX candidate set, missing TX set hash");
1507 !m->has_ledgerhash() && !m->has_ledgerseq() &&
1508 !(ltype && *ltype == protocol::ltCLOSED))
1510 return badData(
"Invalid request");
1514 if (ltype && (*ltype < protocol::ltACCEPTED || *ltype > protocol::ltCLOSED))
1515 return badData(
"Invalid ledger type");
1519 return badData(
"Invalid ledger hash");
1522 if (m->has_ledgerseq())
1524 auto const ledgerSeq{m->ledgerseq()};
1527 using namespace std::chrono_literals;
1537 if (itype != protocol::liBASE)
1539 if (m->nodeids_size() <= 0)
1540 return badData(
"Invalid ledger node IDs");
1542 for (
auto const& nodeId : m->nodeids())
1545 return badData(
"Invalid SHAMap node ID");
1550 if (m->has_querytype() && m->querytype() != protocol::qtINDIRECT)
1551 return badData(
"Invalid query type");
1554 if (m->has_querydepth())
1557 itype == protocol::liBASE)
1559 return badData(
"Invalid query depth");
1566 if (
auto peer = weak.
lock())
1567 peer->processLedgerRequest(m);
1587 if (
auto peer = weak.
lock())
1590 peer->ledgerReplayMsgHandler_.processProofPathRequest(m);
1591 if (reply.has_error())
1593 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1595 Resource::feeMalformedRequest,
1596 "proof_path_request");
1599 Resource::feeRequestNoReply,
"proof_path_request");
1603 peer->send(std::make_shared<Message>(
1604 reply, protocol::mtPROOF_PATH_RESPONSE));
1613 if (!ledgerReplayEnabled_)
1616 Resource::feeMalformedRequest,
"proof_path_response disabled");
1620 if (!ledgerReplayMsgHandler_.processProofPathResponse(m))
1622 fee_.update(Resource::feeInvalidData,
"proof_path_response");
1629 JLOG(p_journal_.trace()) <<
"onMessage, TMReplayDeltaRequest";
1630 if (!ledgerReplayEnabled_)
1633 Resource::feeMalformedRequest,
"replay_delta_request disabled");
1637 fee_.fee = Resource::feeModerateBurdenPeer;
1639 app_.getJobQueue().addJob(
1641 if (
auto peer = weak.
lock())
1644 peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m);
1645 if (reply.has_error())
1647 if (reply.error() == protocol::TMReplyError::reBAD_REQUEST)
1649 Resource::feeMalformedRequest,
1650 "replay_delta_request");
1653 Resource::feeRequestNoReply,
1654 "replay_delta_request");
1658 peer->send(std::make_shared<Message>(
1659 reply, protocol::mtREPLAY_DELTA_RESPONSE));
1668 if (!ledgerReplayEnabled_)
1671 Resource::feeMalformedRequest,
"replay_delta_response disabled");
1675 if (!ledgerReplayMsgHandler_.processReplayDeltaResponse(m))
1677 fee_.update(Resource::feeInvalidData,
"replay_delta_response");
1685 fee_.update(Resource::feeInvalidData, msg);
1686 JLOG(p_journal_.warn()) <<
"TMLedgerData: " << msg;
1691 return badData(
"Invalid ledger hash");
1695 auto const ledgerSeq{m->ledgerseq()};
1696 if (m->type() == protocol::liTS_CANDIDATE)
1707 using namespace std::chrono_literals;
1708 if (app_.getLedgerMaster().getValidatedLedgerAge() <= 10s &&
1709 ledgerSeq > app_.getLedgerMaster().getValidLedgerIndex() + 10)
1718 if (m->type() < protocol::liBASE || m->type() > protocol::liTS_CANDIDATE)
1719 return badData(
"Invalid ledger info type");
1722 if (m->has_error() &&
1723 (m->error() < protocol::reNO_LEDGER ||
1724 m->error() > protocol::reBAD_REQUEST))
1726 return badData(
"Invalid reply error");
1730 if (m->nodes_size() <= 0 || m->nodes_size() > Tuning::hardMaxReplyNodes)
1733 "Invalid Ledger/TXset nodes " +
std::to_string(m->nodes_size()));
1737 if (m->has_requestcookie())
1739 if (
auto peer = overlay_.findPeerByShortID(m->requestcookie()))
1741 m->clear_requestcookie();
1746 JLOG(p_journal_.info()) <<
"Unable to route TX/ledger data reply";
1751 uint256 const ledgerHash{m->ledgerhash()};
1754 if (m->type() == protocol::liTS_CANDIDATE)
1757 app_.getJobQueue().addJob(
1758 jtTXN_DATA,
"recvPeerData", [weak, ledgerHash, m]() {
1759 if (
auto peer = weak.lock())
1761 peer->app_.getInboundTransactions().gotData(
1762 ledgerHash, peer, m);
1769 app_.getInboundLedgers().gotLedgerData(ledgerHash, shared_from_this(), m);
1775 protocol::TMProposeSet&
set = *m;
1784 JLOG(p_journal_.warn()) <<
"Proposal: malformed";
1786 Resource::feeInvalidSignature,
1787 " signature can't be longer than 72 bytes");
1794 JLOG(p_journal_.warn()) <<
"Proposal: malformed";
1795 fee_.update(Resource::feeMalformedRequest,
"bad hashes");
1803 auto const isTrusted = app_.validators().trusted(publicKey);
1811 overlay_.reportInboundTraffic(
1812 TrafficCount::category::proposal_untrusted,
1813 Message::messageSize(*m));
1815 if (app_.config().RELAY_UNTRUSTED_PROPOSALS == -1)
1819 uint256 const proposeHash{
set.currenttxhash()};
1820 uint256 const prevLedger{
set.previousledger()};
1832 if (
auto [added, relayed] =
1833 app_.getHashRouter().addSuppressionPeerWithStatus(suppression, id_);
1838 if (relayed && (
stopwatch().now() - *relayed) < reduce_relay::IDLED)
1839 overlay_.updateSlotAndSquelch(
1840 suppression, publicKey, id_, protocol::mtPROPOSE_LEDGER);
1843 overlay_.reportInboundTraffic(
1844 TrafficCount::category::proposal_duplicate,
1845 Message::messageSize(*m));
1847 JLOG(p_journal_.trace()) <<
"Proposal: duplicate";
1854 if (tracking_.load() == Tracking::diverged)
1856 JLOG(p_journal_.debug())
1857 <<
"Proposal: Dropping untrusted (peer divergence)";
1861 if (!cluster() && app_.getFeeTrack().isLoadedLocal())
1863 JLOG(p_journal_.debug()) <<
"Proposal: Dropping untrusted (load)";
1868 JLOG(p_journal_.trace())
1869 <<
"Proposal: " << (isTrusted ?
"trusted" :
"untrusted");
1880 app_.timeKeeper().closeTime(),
1881 calcNodeID(app_.validatorManifests().getMasterKey(publicKey))});
1884 app_.getJobQueue().addJob(
1886 "recvPropose->checkPropose",
1888 if (
auto peer = weak.lock())
1889 peer->checkPropose(isTrusted, m,
proposal);
1896 JLOG(p_journal_.trace()) <<
"Status: Change";
1898 if (!m->has_networktime())
1899 m->set_networktime(app_.timeKeeper().now().time_since_epoch().count());
1903 if (!last_status_.has_newstatus() || m->has_newstatus())
1908 protocol::NodeStatus status = last_status_.newstatus();
1910 m->set_newstatus(status);
1914 if (m->newevent() == protocol::neLOST_SYNC)
1916 bool outOfSync{
false};
1921 if (!closedLedgerHash_.isZero())
1924 closedLedgerHash_.zero();
1926 previousLedgerHash_.zero();
1930 JLOG(p_journal_.debug()) <<
"Status: Out of sync";
1937 bool const peerChangedLedgers{
1944 if (peerChangedLedgers)
1946 closedLedgerHash_ = m->ledgerhash();
1947 closedLedgerHash = closedLedgerHash_;
1948 addLedger(closedLedgerHash, sl);
1952 closedLedgerHash_.zero();
1955 if (m->has_ledgerhashprevious() &&
1958 previousLedgerHash_ = m->ledgerhashprevious();
1959 addLedger(previousLedgerHash_, sl);
1963 previousLedgerHash_.zero();
1966 if (peerChangedLedgers)
1968 JLOG(p_journal_.debug()) <<
"LCL is " << closedLedgerHash;
1972 JLOG(p_journal_.debug()) <<
"Status: No ledger";
1976 if (m->has_firstseq() && m->has_lastseq())
1980 minLedger_ = m->firstseq();
1981 maxLedger_ = m->lastseq();
1983 if ((maxLedger_ < minLedger_) || (minLedger_ == 0) || (maxLedger_ == 0))
1984 minLedger_ = maxLedger_ = 0;
1987 if (m->has_ledgerseq() &&
1988 app_.getLedgerMaster().getValidatedLedgerAge() < 2min)
1991 m->ledgerseq(), app_.getLedgerMaster().getValidLedgerIndex());
1994 app_.getOPs().pubPeerStatus([=,
this]() ->
Json::Value {
1997 if (m->has_newstatus())
1999 switch (m->newstatus())
2001 case protocol::nsCONNECTING:
2002 j[jss::status] =
"CONNECTING";
2004 case protocol::nsCONNECTED:
2005 j[jss::status] =
"CONNECTED";
2007 case protocol::nsMONITORING:
2008 j[jss::status] =
"MONITORING";
2010 case protocol::nsVALIDATING:
2011 j[jss::status] =
"VALIDATING";
2013 case protocol::nsSHUTTING:
2014 j[jss::status] =
"SHUTTING";
2019 if (m->has_newevent())
2021 switch (m->newevent())
2023 case protocol::neCLOSING_LEDGER:
2024 j[jss::action] =
"CLOSING_LEDGER";
2026 case protocol::neACCEPTED_LEDGER:
2027 j[jss::action] =
"ACCEPTED_LEDGER";
2029 case protocol::neSWITCHED_LEDGER:
2030 j[jss::action] =
"SWITCHED_LEDGER";
2032 case protocol::neLOST_SYNC:
2033 j[jss::action] =
"LOST_SYNC";
2038 if (m->has_ledgerseq())
2040 j[jss::ledger_index] = m->ledgerseq();
2043 if (m->has_ledgerhash())
2045 uint256 closedLedgerHash{};
2048 closedLedgerHash = closedLedgerHash_;
2050 j[jss::ledger_hash] = to_string(closedLedgerHash);
2053 if (m->has_networktime())
2058 if (m->has_firstseq() && m->has_lastseq())
2060 j[jss::ledger_index_min] =
Json::UInt(m->firstseq());
2061 j[jss::ledger_index_max] =
Json::UInt(m->lastseq());
2077 serverSeq = maxLedger_;
2083 checkTracking(serverSeq, validationSeq);
2092 if (diff < Tuning::convergedLedgerLimit)
2095 tracking_ = Tracking::converged;
2098 if ((diff > Tuning::divergedLedgerLimit) &&
2099 (tracking_.load() != Tracking::diverged))
2104 tracking_ = Tracking::diverged;
2105 trackingTime_ = clock_type::now();
2114 fee_.update(Resource::feeMalformedRequest,
"bad hash");
2118 uint256 const hash{m->hash()};
2120 if (m->status() == protocol::tsHAVE)
2124 if (
std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
2125 recentTxSets_.end())
2127 fee_.update(Resource::feeUselessData,
"duplicate (tsHAVE)");
2131 recentTxSets_.push_back(hash);
2136PeerImp::onValidatorListMessage(
2146 JLOG(p_journal_.warn()) <<
"Ignored malformed " << messageType
2147 <<
" from peer " << remote_address_;
2149 fee_.update(Resource::feeHeavyBurdenPeer,
"no blobs");
2155 JLOG(p_journal_.debug())
2156 <<
"Received " << messageType <<
" from " << remote_address_.to_string()
2157 <<
" (" << id_ <<
")";
2159 if (!app_.getHashRouter().addSuppressionPeer(hash, id_))
2161 JLOG(p_journal_.debug())
2162 << messageType <<
": received duplicate " << messageType;
2166 fee_.update(Resource::feeUselessData,
"duplicate");
2170 auto const applyResult = app_.validators().applyListsAndBroadcast(
2174 remote_address_.to_string(),
2177 app_.getHashRouter(),
2180 JLOG(p_journal_.debug())
2181 <<
"Processed " << messageType <<
" version " << version <<
" from "
2182 << (applyResult.publisherKey ?
strHex(*applyResult.publisherKey)
2183 :
"unknown or invalid publisher")
2184 <<
" from " << remote_address_.to_string() <<
" (" << id_
2185 <<
") with best result " << to_string(applyResult.bestDisposition());
2188 switch (applyResult.bestDisposition())
2191 case ListDisposition::accepted:
2193 case ListDisposition::expired:
2195 case ListDisposition::pending: {
2199 applyResult.publisherKey,
2200 "ripple::PeerImp::onValidatorListMessage : publisher key is "
2202 auto const& pubKey = *applyResult.publisherKey;
2204 if (
auto const iter = publisherListSequences_.find(pubKey);
2205 iter != publisherListSequences_.end())
2208 iter->second < applyResult.sequence,
2209 "ripple::PeerImp::onValidatorListMessage : lower sequence");
2212 publisherListSequences_[pubKey] = applyResult.sequence;
2215 case ListDisposition::same_sequence:
2216 case ListDisposition::known_sequence:
2221 applyResult.sequence && applyResult.publisherKey,
2222 "ripple::PeerImp::onValidatorListMessage : nonzero sequence "
2223 "and set publisher key");
2225 publisherListSequences_[*applyResult.publisherKey] <=
2226 applyResult.sequence,
2227 "ripple::PeerImp::onValidatorListMessage : maximum sequence");
2232 case ListDisposition::stale:
2233 case ListDisposition::untrusted:
2234 case ListDisposition::invalid:
2235 case ListDisposition::unsupported_version:
2239 "ripple::PeerImp::onValidatorListMessage : invalid best list "
2244 switch (applyResult.worstDisposition())
2246 case ListDisposition::accepted:
2247 case ListDisposition::expired:
2248 case ListDisposition::pending:
2251 case ListDisposition::same_sequence:
2252 case ListDisposition::known_sequence:
2257 Resource::feeUselessData,
2258 " duplicate (same_sequence or known_sequence)");
2260 case ListDisposition::stale:
2263 fee_.update(Resource::feeInvalidData,
"expired");
2265 case ListDisposition::untrusted:
2269 fee_.update(Resource::feeUselessData,
"untrusted");
2271 case ListDisposition::invalid:
2274 Resource::feeInvalidSignature,
"invalid list disposition");
2276 case ListDisposition::unsupported_version:
2279 fee_.update(Resource::feeInvalidData,
"version");
2283 "ripple::PeerImp::onValidatorListMessage : invalid worst list "
2288 for (
auto const& [disp, count] : applyResult.dispositions)
2293 case ListDisposition::accepted:
2294 JLOG(p_journal_.debug())
2295 <<
"Applied " << count <<
" new " << messageType
2296 <<
"(s) from peer " << remote_address_;
2299 case ListDisposition::expired:
2300 JLOG(p_journal_.debug())
2301 <<
"Applied " << count <<
" expired " << messageType
2302 <<
"(s) from peer " << remote_address_;
2305 case ListDisposition::pending:
2306 JLOG(p_journal_.debug())
2307 <<
"Processed " << count <<
" future " << messageType
2308 <<
"(s) from peer " << remote_address_;
2310 case ListDisposition::same_sequence:
2311 JLOG(p_journal_.warn())
2312 <<
"Ignored " << count <<
" " << messageType
2313 <<
"(s) with current sequence from peer "
2316 case ListDisposition::known_sequence:
2317 JLOG(p_journal_.warn())
2318 <<
"Ignored " << count <<
" " << messageType
2319 <<
"(s) with future sequence from peer " << remote_address_;
2321 case ListDisposition::stale:
2322 JLOG(p_journal_.warn())
2323 <<
"Ignored " << count <<
"stale " << messageType
2324 <<
"(s) from peer " << remote_address_;
2326 case ListDisposition::untrusted:
2327 JLOG(p_journal_.warn())
2328 <<
"Ignored " << count <<
" untrusted " << messageType
2329 <<
"(s) from peer " << remote_address_;
2331 case ListDisposition::unsupported_version:
2332 JLOG(p_journal_.warn())
2333 <<
"Ignored " << count <<
"unsupported version "
2334 << messageType <<
"(s) from peer " << remote_address_;
2336 case ListDisposition::invalid:
2337 JLOG(p_journal_.warn())
2338 <<
"Ignored " << count <<
"invalid " << messageType
2339 <<
"(s) from peer " << remote_address_;
2343 "ripple::PeerImp::onValidatorListMessage : invalid list "
2354 if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
2356 JLOG(p_journal_.debug())
2357 <<
"ValidatorList: received validator list from peer using "
2358 <<
"protocol version " << to_string(protocol_)
2359 <<
" which shouldn't support this feature.";
2360 fee_.update(Resource::feeUselessData,
"unsupported peer");
2363 onValidatorListMessage(
2367 ValidatorList::parseBlobs(*m));
2371 JLOG(p_journal_.warn()) <<
"ValidatorList: Exception, " << e.
what()
2372 <<
" from peer " << remote_address_;
2373 using namespace std::string_literals;
2374 fee_.update(Resource::feeInvalidData, e.
what());
2384 if (!supportsFeature(ProtocolFeature::ValidatorList2Propagation))
2386 JLOG(p_journal_.debug())
2387 <<
"ValidatorListCollection: received validator list from peer "
2388 <<
"using protocol version " << to_string(protocol_)
2389 <<
" which shouldn't support this feature.";
2390 fee_.update(Resource::feeUselessData,
"unsupported peer");
2393 else if (m->version() < 2)
2395 JLOG(p_journal_.debug())
2396 <<
"ValidatorListCollection: received invalid validator list "
2398 << m->version() <<
" from peer using protocol version "
2399 << to_string(protocol_);
2400 fee_.update(Resource::feeInvalidData,
"wrong version");
2403 onValidatorListMessage(
2404 "ValidatorListCollection",
2407 ValidatorList::parseBlobs(*m));
2411 JLOG(p_journal_.warn()) <<
"ValidatorListCollection: Exception, "
2412 << e.
what() <<
" from peer " << remote_address_;
2413 using namespace std::string_literals;
2414 fee_.update(Resource::feeInvalidData, e.
what());
2421 if (m->validation().size() < 50)
2423 JLOG(p_journal_.warn()) <<
"Validation: Too small";
2424 fee_.update(Resource::feeMalformedRequest,
"too small");
2430 auto const closeTime = app_.timeKeeper().closeTime();
2439 app_.validatorManifests().getMasterKey(pk));
2442 val->setSeen(closeTime);
2446 app_.getValidations().parms(),
2447 app_.timeKeeper().closeTime(),
2449 val->getSeenTime()))
2451 JLOG(p_journal_.trace()) <<
"Validation: Not current";
2452 fee_.update(Resource::feeUselessData,
"not current");
2459 auto const isTrusted =
2460 app_.validators().trusted(val->getSignerPublic());
2468 overlay_.reportInboundTraffic(
2469 TrafficCount::category::validation_untrusted,
2470 Message::messageSize(*m));
2472 if (app_.config().RELAY_UNTRUSTED_VALIDATIONS == -1)
2478 auto [added, relayed] =
2479 app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
2486 if (relayed && (
stopwatch().now() - *relayed) < reduce_relay::IDLED)
2487 overlay_.updateSlotAndSquelch(
2488 key, val->getSignerPublic(), id_, protocol::mtVALIDATION);
2491 overlay_.reportInboundTraffic(
2492 TrafficCount::category::validation_duplicate,
2493 Message::messageSize(*m));
2495 JLOG(p_journal_.trace()) <<
"Validation: duplicate";
2499 if (!isTrusted && (tracking_.load() == Tracking::diverged))
2501 JLOG(p_journal_.debug())
2502 <<
"Dropping untrusted validation from diverged peer";
2504 else if (isTrusted || !app_.getFeeTrack().isLoadedLocal())
2508 isTrusted ?
"Trusted validation" :
"Untrusted validation";
2513 to_string(val->getNodeID());
2520 app_.getJobQueue().addJob(
2523 [weak, val, m, key]() {
2524 if (
auto peer = weak.
lock())
2525 peer->checkValidation(val, key, m);
2530 JLOG(p_journal_.debug())
2531 <<
"Dropping untrusted validation for load";
2536 JLOG(p_journal_.warn())
2537 <<
"Exception processing validation: " << e.
what();
2538 using namespace std::string_literals;
2539 fee_.update(Resource::feeMalformedRequest, e.
what());
2546 protocol::TMGetObjectByHash& packet = *m;
2548 JLOG(p_journal_.trace()) <<
"received TMGetObjectByHash " << packet.type()
2549 <<
" " << packet.objects_size();
2554 if (send_queue_.size() >= Tuning::dropSendQueue)
2556 JLOG(p_journal_.debug()) <<
"GetObject: Large send queue";
2560 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2566 if (packet.type() == protocol::TMGetObjectByHash::otTRANSACTIONS)
2568 if (!txReduceRelayEnabled())
2570 JLOG(p_journal_.error())
2571 <<
"TMGetObjectByHash: tx reduce-relay is disabled";
2572 fee_.update(Resource::feeMalformedRequest,
"disabled");
2577 app_.getJobQueue().addJob(
2579 if (
auto peer = weak.
lock())
2580 peer->doTransactions(m);
2585 protocol::TMGetObjectByHash reply;
2587 reply.set_query(
false);
2589 if (packet.has_seq())
2590 reply.set_seq(packet.seq());
2592 reply.set_type(packet.type());
2594 if (packet.has_ledgerhash())
2598 fee_.update(Resource::feeMalformedRequest,
"ledger hash");
2602 reply.set_ledgerhash(packet.ledgerhash());
2606 Resource::feeModerateBurdenPeer,
2607 " received a get object by hash request");
2610 for (
int i = 0; i < packet.objects_size(); ++i)
2612 auto const& obj = packet.objects(i);
2615 uint256 const hash{obj.hash()};
2618 std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2619 auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
2622 protocol::TMIndexedObject& newObj = *reply.add_objects();
2623 newObj.set_hash(hash.begin(), hash.size());
2625 &nodeObject->getData().front(),
2626 nodeObject->getData().size());
2628 if (obj.has_nodeid())
2629 newObj.set_index(obj.nodeid());
2630 if (obj.has_ledgerseq())
2631 newObj.set_ledgerseq(obj.ledgerseq());
2638 JLOG(p_journal_.trace()) <<
"GetObj: " << reply.objects_size() <<
" of "
2639 << packet.objects_size();
2647 bool progress =
false;
2649 for (
int i = 0; i < packet.objects_size(); ++i)
2651 protocol::TMIndexedObject
const& obj = packet.objects(i);
2655 if (obj.has_ledgerseq())
2657 if (obj.ledgerseq() != pLSeq)
2659 if (pLDo && (pLSeq != 0))
2661 JLOG(p_journal_.debug())
2662 <<
"GetObj: Full fetch pack for " << pLSeq;
2664 pLSeq = obj.ledgerseq();
2665 pLDo = !app_.getLedgerMaster().haveLedger(pLSeq);
2669 JLOG(p_journal_.debug())
2670 <<
"GetObj: Late fetch pack for " << pLSeq;
2679 uint256 const hash{obj.hash()};
2681 app_.getLedgerMaster().addFetchPack(
2684 obj.data().begin(), obj.data().end()));
2689 if (pLDo && (pLSeq != 0))
2691 JLOG(p_journal_.debug())
2692 <<
"GetObj: Partial fetch pack for " << pLSeq;
2694 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2695 app_.getLedgerMaster().gotFetchPack(progress, pLSeq);
2702 if (!txReduceRelayEnabled())
2704 JLOG(p_journal_.error())
2705 <<
"TMHaveTransactions: tx reduce-relay is disabled";
2706 fee_.update(Resource::feeMalformedRequest,
"disabled");
2711 app_.getJobQueue().addJob(
2713 if (
auto peer = weak.
lock())
2714 peer->handleHaveTransactions(m);
2719PeerImp::handleHaveTransactions(
2722 protocol::TMGetObjectByHash tmBH;
2723 tmBH.set_type(protocol::TMGetObjectByHash_ObjectType_otTRANSACTIONS);
2724 tmBH.set_query(
true);
2726 JLOG(p_journal_.trace())
2727 <<
"received TMHaveTransactions " << m->hashes_size();
2733 JLOG(p_journal_.error())
2734 <<
"TMHaveTransactions with invalid hash size";
2735 fee_.update(Resource::feeMalformedRequest,
"hash size");
2741 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2743 JLOG(p_journal_.trace()) <<
"checking transaction " << (bool)txn;
2747 JLOG(p_journal_.debug()) <<
"adding transaction to request";
2749 auto obj = tmBH.add_objects();
2750 obj->set_hash(hash.
data(), hash.
size());
2757 removeTxQueue(hash);
2761 JLOG(p_journal_.trace())
2762 <<
"transaction request object is " << tmBH.objects_size();
2764 if (tmBH.objects_size() > 0)
2771 if (!txReduceRelayEnabled())
2773 JLOG(p_journal_.error())
2774 <<
"TMTransactions: tx reduce-relay is disabled";
2775 fee_.update(Resource::feeMalformedRequest,
"disabled");
2779 JLOG(p_journal_.trace())
2780 <<
"received TMTransactions " << m->transactions_size();
2782 overlay_.addTxMetrics(m->transactions_size());
2787 m->mutable_transactions(i), [](protocol::TMTransaction*) {}),
2795 using on_message_fn =
2797 if (!strand_.running_in_this_thread())
2801 (on_message_fn)&PeerImp::onMessage, shared_from_this(), m));
2803 if (!m->has_validatorpubkey())
2805 fee_.update(Resource::feeInvalidData,
"squelch no pubkey");
2808 auto validator = m->validatorpubkey();
2812 fee_.update(Resource::feeInvalidData,
"squelch bad pubkey");
2818 if (key == app_.getValidationPublicKey())
2820 JLOG(p_journal_.debug())
2821 <<
"onMessage: TMSquelch discarding validator's squelch " << slice;
2826 m->has_squelchduration() ? m->squelchduration() : 0;
2828 squelch_.removeSquelch(key);
2830 fee_.update(Resource::feeInvalidData,
"squelch duration");
2832 JLOG(p_journal_.debug())
2833 <<
"onMessage: TMSquelch " << slice <<
" " << id() <<
" " << duration;
2845 (void)lockedRecentLock;
2847 if (
std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
2848 recentLedgers_.end())
2851 recentLedgers_.push_back(hash);
2860 if (app_.getFeeTrack().isLoadedLocal() ||
2861 (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2862 (app_.getJobQueue().getJobCount(
jtPACK) > 10))
2864 JLOG(p_journal_.info()) <<
"Too busy to make fetch pack";
2870 JLOG(p_journal_.warn()) <<
"FetchPack hash size malformed";
2871 fee_.update(Resource::feeMalformedRequest,
"hash size");
2875 fee_.fee = Resource::feeHeavyBurdenPeer;
2877 uint256 const hash{packet->ledgerhash()};
2880 auto elapsed = UptimeClock::now();
2881 auto const pap = &app_;
2882 app_.getJobQueue().addJob(
2883 jtPACK,
"MakeFetchPack", [pap, weak, packet, hash, elapsed]() {
2884 pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
2889PeerImp::doTransactions(
2892 protocol::TMTransactions reply;
2894 JLOG(p_journal_.trace()) <<
"received TMGetObjectByHash requesting tx "
2895 << packet->objects_size();
2897 if (packet->objects_size() > reduce_relay::MAX_TX_QUEUE_SIZE)
2899 JLOG(p_journal_.error()) <<
"doTransactions, invalid number of hashes";
2900 fee_.update(Resource::feeMalformedRequest,
"too big");
2906 auto const& obj = packet->objects(i);
2910 fee_.update(Resource::feeMalformedRequest,
"hash size");
2916 auto txn = app_.getMasterTransaction().fetch_from_cache(hash);
2920 JLOG(p_journal_.error()) <<
"doTransactions, transaction not found "
2922 fee_.update(Resource::feeMalformedRequest,
"tx not found");
2927 auto tx = reply.add_transactions();
2928 auto sttx = txn->getSTransaction();
2930 tx->set_rawtransaction(s.
data(), s.
size());
2932 txn->getStatus() ==
INCLUDED ? protocol::tsCURRENT
2934 tx->set_receivetimestamp(
2935 app_.timeKeeper().now().time_since_epoch().count());
2936 tx->set_deferred(txn->getSubmitResult().queued);
2939 if (reply.transactions_size() > 0)
2944PeerImp::checkTransaction(
2946 bool checkSignature,
2958 JLOG(p_journal_.warn()) <<
"Ignoring Network relayed Tx containing "
2959 "tfInnerBatchTxn (checkSignature).";
2960 charge(Resource::feeModerateBurdenPeer,
"inner batch txn");
2966 if (stx->isFieldPresent(sfLastLedgerSequence) &&
2967 (stx->getFieldU32(sfLastLedgerSequence) <
2968 app_.getLedgerMaster().getValidLedgerIndex()))
2970 JLOG(p_journal_.info())
2971 <<
"Marking transaction " << stx->getTransactionID()
2972 <<
"as BAD because it's expired";
2973 app_.getHashRouter().setFlags(
2974 stx->getTransactionID(), HashRouterFlags::BAD);
2975 charge(Resource::feeUselessData,
"expired tx");
2986 tx->getStatus() ==
NEW,
2987 "ripple::PeerImp::checkTransaction Transaction created "
2989 if (tx->getStatus() ==
NEW)
2991 JLOG(p_journal_.debug())
2992 <<
"Processing " << (
batch ?
"batch" :
"unsolicited")
2993 <<
" pseudo-transaction tx " << tx->getID();
2995 app_.getMasterTransaction().canonicalize(&tx);
2998 app_.getHashRouter().shouldRelay(tx->getID());
3001 JLOG(p_journal_.debug())
3002 <<
"Passing skipped pseudo pseudo-transaction tx "
3004 app_.overlay().relay(tx->getID(), {}, *toSkip);
3008 JLOG(p_journal_.debug())
3009 <<
"Charging for pseudo-transaction tx " << tx->getID();
3010 charge(Resource::feeUselessData,
"pseudo tx");
3021 app_.getHashRouter(),
3023 app_.getLedgerMaster().getValidatedRules(),
3025 valid != Validity::Valid)
3027 if (!validReason.empty())
3029 JLOG(p_journal_.debug())
3030 <<
"Exception checking transaction: " << validReason;
3035 app_.getHashRouter().setFlags(
3036 stx->getTransactionID(), HashRouterFlags::BAD);
3038 Resource::feeInvalidSignature,
3039 "check transaction signature failure");
3046 app_.getHashRouter(), stx->getTransactionID(), Validity::Valid);
3052 if (tx->getStatus() ==
INVALID)
3054 if (!reason.
empty())
3056 JLOG(p_journal_.debug())
3057 <<
"Exception checking transaction: " << reason;
3059 app_.getHashRouter().setFlags(
3060 stx->getTransactionID(), HashRouterFlags::BAD);
3061 charge(Resource::feeInvalidSignature,
"tx (impossible)");
3065 bool const trusted = any(flags & HashRouterFlags::TRUSTED);
3066 app_.getOPs().processTransaction(
3067 tx, trusted,
false, NetworkOPs::FailHard::no);
3071 JLOG(p_journal_.warn())
3072 <<
"Exception in " << __func__ <<
": " << ex.
what();
3073 app_.getHashRouter().setFlags(
3074 stx->getTransactionID(), HashRouterFlags::BAD);
3075 using namespace std::string_literals;
3076 charge(Resource::feeInvalidData,
"tx "s + ex.
what());
3082PeerImp::checkPropose(
3087 JLOG(p_journal_.trace())
3088 <<
"Checking " << (isTrusted ?
"trusted" :
"UNTRUSTED") <<
" proposal";
3090 XRPL_ASSERT(packet,
"ripple::PeerImp::checkPropose : non-null packet");
3095 JLOG(p_journal_.warn()) << desc;
3096 charge(Resource::feeInvalidSignature, desc);
3103 relay = app_.getOPs().processTrustedProposal(peerPos);
3105 relay = app_.config().RELAY_UNTRUSTED_PROPOSALS == 1 || cluster();
3113 auto haveMessage = app_.overlay().relay(
3115 if (!haveMessage.empty())
3116 overlay_.updateSlotAndSquelch(
3119 std::move(haveMessage),
3120 protocol::mtPROPOSE_LEDGER);
3125PeerImp::checkValidation(
3130 if (!val->isValid())
3132 std::string desc{
"Validation forwarded by peer is invalid"};
3133 JLOG(p_journal_.debug()) << desc;
3134 charge(Resource::feeInvalidSignature, desc);
3149 overlay_.relay(*packet, key, val->getSignerPublic());
3150 if (!haveMessage.empty())
3152 overlay_.updateSlotAndSquelch(
3154 val->getSignerPublic(),
3155 std::move(haveMessage),
3156 protocol::mtVALIDATION);
3162 JLOG(p_journal_.trace())
3163 <<
"Exception processing validation: " << ex.
what();
3164 using namespace std::string_literals;
3165 charge(Resource::feeMalformedRequest,
"validation "s + ex.
what());
3179 if (p->hasTxSet(rootHash) && p.get() != skip)
3181 auto score = p->getScore(true);
3182 if (!ret || (score > retScore))
3207 if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
3209 auto score = p->getScore(true);
3210 if (!ret || (score > retScore))
3222PeerImp::sendLedgerBase(
3224 protocol::TMLedgerData& ledgerData)
3226 JLOG(p_journal_.trace()) <<
"sendLedgerBase: Base data";
3229 addRaw(ledger->info(), s);
3232 auto const& stateMap{ledger->stateMap()};
3233 if (stateMap.getHash() != beast::zero)
3238 stateMap.serializeRoot(
root);
3239 ledgerData.add_nodes()->set_nodedata(
3240 root.getDataPtr(),
root.getLength());
3242 if (ledger->info().txHash != beast::zero)
3244 auto const& txMap{ledger->txMap()};
3245 if (txMap.getHash() != beast::zero)
3249 txMap.serializeRoot(
root);
3250 ledgerData.add_nodes()->set_nodedata(
3251 root.getDataPtr(),
root.getLength());
3264 JLOG(p_journal_.trace()) <<
"getLedger: Ledger";
3268 if (m->has_ledgerhash())
3271 uint256 const ledgerHash{m->ledgerhash()};
3272 ledger = app_.getLedgerMaster().getLedgerByHash(ledgerHash);
3275 JLOG(p_journal_.trace())
3276 <<
"getLedger: Don't have ledger with hash " << ledgerHash;
3278 if (m->has_querytype() && !m->has_requestcookie())
3284 m->has_ledgerseq() ? m->ledgerseq() : 0,
3287 m->set_requestcookie(
id());
3290 JLOG(p_journal_.debug())
3291 <<
"getLedger: Request relayed to peer";
3295 JLOG(p_journal_.trace())
3296 <<
"getLedger: Failed to find peer to relay request";
3300 else if (m->has_ledgerseq())
3303 if (m->ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
3305 JLOG(p_journal_.debug())
3306 <<
"getLedger: Early ledger sequence request";
3310 ledger = app_.getLedgerMaster().getLedgerBySeq(m->ledgerseq());
3313 JLOG(p_journal_.debug())
3314 <<
"getLedger: Don't have ledger with sequence "
3319 else if (m->has_ltype() && m->ltype() == protocol::ltCLOSED)
3321 ledger = app_.getLedgerMaster().getClosedLedger();
3327 auto const ledgerSeq{ledger->info().seq};
3328 if (m->has_ledgerseq())
3330 if (ledgerSeq != m->ledgerseq())
3333 if (!m->has_requestcookie())
3335 Resource::feeMalformedRequest,
"get_ledger ledgerSeq");
3338 JLOG(p_journal_.warn())
3339 <<
"getLedger: Invalid ledger sequence " << ledgerSeq;
3342 else if (ledgerSeq < app_.getLedgerMaster().getEarliestFetch())
3345 JLOG(p_journal_.debug())
3346 <<
"getLedger: Early ledger sequence request " << ledgerSeq;
3351 JLOG(p_journal_.debug()) <<
"getLedger: Unable to find ledger";
3360 JLOG(p_journal_.trace()) <<
"getTxSet: TX set";
3362 uint256 const txSetHash{m->ledgerhash()};
3364 app_.getInboundTransactions().getSet(txSetHash,
false)};
3367 if (m->has_querytype() && !m->has_requestcookie())
3372 m->set_requestcookie(
id());
3375 JLOG(p_journal_.debug()) <<
"getTxSet: Request relayed";
3379 JLOG(p_journal_.debug())
3380 <<
"getTxSet: Failed to find relay peer";
3385 JLOG(p_journal_.debug()) <<
"getTxSet: Failed to find TX set";
3396 if (!m->has_requestcookie())
3398 Resource::feeModerateBurdenPeer,
"received a get ledger request");
3402 SHAMap const* map{
nullptr};
3403 protocol::TMLedgerData ledgerData;
3404 bool fatLeaves{
true};
3405 auto const itype{m->itype()};
3407 if (itype == protocol::liTS_CANDIDATE)
3409 if (sharedMap = getTxSet(m); !sharedMap)
3411 map = sharedMap.
get();
3414 ledgerData.set_ledgerseq(0);
3415 ledgerData.set_ledgerhash(m->ledgerhash());
3416 ledgerData.set_type(protocol::liTS_CANDIDATE);
3417 if (m->has_requestcookie())
3418 ledgerData.set_requestcookie(m->requestcookie());
3425 if (send_queue_.size() >= Tuning::dropSendQueue)
3427 JLOG(p_journal_.debug())
3428 <<
"processLedgerRequest: Large send queue";
3431 if (app_.getFeeTrack().isLoadedLocal() && !cluster())
3433 JLOG(p_journal_.debug()) <<
"processLedgerRequest: Too busy";
3437 if (ledger = getLedger(m); !ledger)
3441 auto const ledgerHash{ledger->info().hash};
3442 ledgerData.set_ledgerhash(ledgerHash.begin(), ledgerHash.size());
3443 ledgerData.set_ledgerseq(ledger->info().seq);
3444 ledgerData.set_type(itype);
3445 if (m->has_requestcookie())
3446 ledgerData.set_requestcookie(m->requestcookie());
3450 case protocol::liBASE:
3451 sendLedgerBase(ledger, ledgerData);
3454 case protocol::liTX_NODE:
3455 map = &ledger->txMap();
3456 JLOG(p_journal_.trace()) <<
"processLedgerRequest: TX map hash "
3457 << to_string(map->getHash());
3460 case protocol::liAS_NODE:
3461 map = &ledger->stateMap();
3462 JLOG(p_journal_.trace())
3463 <<
"processLedgerRequest: Account state map hash "
3464 << to_string(map->getHash());
3469 JLOG(p_journal_.error())
3470 <<
"processLedgerRequest: Invalid ledger info type";
3477 JLOG(p_journal_.warn()) <<
"processLedgerRequest: Unable to find map";
3482 if (m->nodeids_size() > 0)
3484 auto const queryDepth{
3485 m->has_querydepth() ? m->querydepth() : (isHighLatency() ? 2 : 1)};
3489 for (
int i = 0; i < m->nodeids_size() &&
3490 ledgerData.nodes_size() < Tuning::softMaxReplyNodes;
3496 data.reserve(Tuning::softMaxReplyNodes);
3500 if (map->getNodeFat(*shaMapNodeId, data, fatLeaves, queryDepth))
3502 JLOG(p_journal_.trace())
3503 <<
"processLedgerRequest: getNodeFat got "
3504 << data.size() <<
" nodes";
3506 for (
auto const& d : data)
3508 if (ledgerData.nodes_size() >=
3509 Tuning::hardMaxReplyNodes)
3511 protocol::TMLedgerNode* node{ledgerData.add_nodes()};
3512 node->set_nodeid(d.first.getRawString());
3513 node->set_nodedata(d.second.data(), d.second.size());
3518 JLOG(p_journal_.warn())
3519 <<
"processLedgerRequest: getNodeFat returns false";
3527 case protocol::liBASE:
3529 info =
"Ledger base";
3532 case protocol::liTX_NODE:
3536 case protocol::liAS_NODE:
3540 case protocol::liTS_CANDIDATE:
3541 info =
"TS candidate";
3549 if (!m->has_ledgerhash())
3550 info +=
", no hash specified";
3552 JLOG(p_journal_.warn())
3553 <<
"processLedgerRequest: getNodeFat with nodeId "
3554 << *shaMapNodeId <<
" and ledger info type " << info
3555 <<
" throws exception: " << e.
what();
3559 JLOG(p_journal_.info())
3560 <<
"processLedgerRequest: Got request for " << m->nodeids_size()
3561 <<
" nodes at depth " << queryDepth <<
", return "
3562 << ledgerData.nodes_size() <<
" nodes";
3565 if (ledgerData.nodes_size() == 0)
3572PeerImp::getScore(
bool haveItem)
const
3576 static int const spRandomMax = 9999;
3580 static int const spHaveItem = 10000;
3585 static int const spLatency = 30;
3588 static int const spNoLatency = 8000;
3593 score += spHaveItem;
3602 score -= latency->count() * spLatency;
3604 score -= spNoLatency;
3610PeerImp::isHighLatency()
const
3613 return latency_ >= peerHighLatency;
3619 using namespace std::chrono_literals;
3622 totalBytes_ += bytes;
3623 accumBytes_ += bytes;
3624 auto const timeElapsed = clock_type::now() - intervalStart_;
3625 auto const timeElapsedInSecs =
3626 std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
3628 if (timeElapsedInSecs >= 1s)
3630 auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
3631 rollingAvg_.push_back(avgBytes);
3633 auto const totalBytes =
3635 rollingAvgBytes_ = totalBytes / rollingAvg_.size();
3637 intervalStart_ = clock_type::now();
3643PeerImp::Metrics::average_bytes()
const
3646 return rollingAvgBytes_;
3650PeerImp::Metrics::total_bytes()
const
A version-independent IP address and port combination.
Address const & address() const
Returns the address portion of this endpoint.
static std::optional< Endpoint > from_string_checked(std::string const &s)
Create an Endpoint from a string.
Endpoint at_port(Port port) const
Returns a new Endpoint with a different port.
static Endpoint from_string(std::string const &s)
std::string to_string() const
Returns a string representing the endpoint.
bool active(Severity level) const
Returns true if any message would be logged at this severity level.
Stream trace() const
Severity stream access functions.
virtual Config & config()=0
virtual LoadFeeTrack & getFeeTrack()=0
virtual TimeKeeper & timeKeeper()=0
virtual JobQueue & getJobQueue()=0
virtual NetworkOPs & getOPs()=0
virtual ValidatorList & validators()=0
virtual std::optional< PublicKey const > getValidationPublicKey() const =0
virtual LedgerMaster & getLedgerMaster()=0
virtual Cluster & cluster()=0
virtual HashRouter & getHashRouter()=0
void for_each(std::function< void(ClusterNode const &)> func) const
Invokes the callback once for every cluster node.
std::size_t size() const
The number of nodes in the cluster list.
bool update(PublicKey const &identity, std::string name, std::uint32_t loadFee=0, NetClock::time_point reportTime=NetClock::time_point{})
Store information about the state of a cluster node.
std::optional< std::string > member(PublicKey const &node) const
Determines whether a node belongs in the cluster.
bool VP_REDUCE_RELAY_BASE_SQUELCH_ENABLE
bool TX_REDUCE_RELAY_METRICS
std::chrono::seconds MAX_DIVERGED_TIME
std::chrono::seconds MAX_UNKNOWN_TIME
bool shouldProcess(uint256 const &key, PeerShortID peer, HashRouterFlags &flags, std::chrono::seconds tx_interval)
bool addSuppressionPeer(uint256 const &key, PeerShortID peer)
std::unique_ptr< LoadEvent > makeLoadEvent(JobType t, std::string const &name)
Return a scoped LoadEvent.
int getJobCount(JobType t) const
Jobs waiting at this priority.
bool addJob(JobType type, std::string const &name, JobHandler &&jobHandler)
Adds a job to the JobQueue.
LedgerIndex getValidLedgerIndex()
std::chrono::seconds getValidatedLedgerAge()
void setClusterFee(std::uint32_t fee)
static std::size_t messageSize(::google::protobuf::Message const &message)
virtual bool isNeedNetworkLedger()=0
PeerFinder::Manager & peerFinder()
void activate(std::shared_ptr< PeerImp > const &peer)
Called when a peer has connected successfully This is called after the peer handshake has been comple...
void deletePeer(Peer::id_t id)
Called when the peer is deleted.
void incPeerDisconnect() override
Increment and retrieve counters for total peer disconnects, and disconnects we initiate for excessive...
void addTxMetrics(Args... args)
Add tx reduce-relay metrics.
void onPeerDeactivate(Peer::id_t id)
void remove(std::shared_ptr< PeerFinder::Slot > const &slot)
void reportOutboundTraffic(TrafficCount::category cat, int bytes)
void for_each(UnaryFunc &&f) const
Resource::Manager & resourceManager()
void reportInboundTraffic(TrafficCount::category cat, int bytes)
void onManifests(std::shared_ptr< protocol::TMManifests > const &m, std::shared_ptr< PeerImp > const &from)
Setup const & setup() const
std::shared_ptr< Message > getManifestsMessage()
void incPeerDisconnectCharges() override
void incJqTransOverflow() override
Increment and retrieve counter for transaction job queue overflows.
virtual void on_endpoints(std::shared_ptr< Slot > const &slot, Endpoints const &endpoints)=0
Called when mtENDPOINTS is received.
virtual Config config()=0
Returns the configuration for the manager.
virtual void on_closed(std::shared_ptr< Slot > const &slot)=0
Called when the slot is closed.
virtual void on_failure(std::shared_ptr< Slot > const &slot)=0
Called when an outbound connection is deemed to have failed.
This class manages established peer-to-peer connections, handles message exchange,...
std::queue< std::shared_ptr< Message > > send_queue_
std::unique_ptr< LoadEvent > load_event_
boost::beast::http::fields const & headers_
void onMessageEnd(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m)
bool hasLedger(uint256 const &hash, std::uint32_t seq) const override
clock_type::duration uptime() const
void removeTxQueue(uint256 const &hash) override
Remove transaction's hash from the transactions' hashes queue.
protocol::TMStatusChange last_status_
boost::circular_buffer< uint256 > recentTxSets_
std::unique_ptr< stream_type > stream_ptr_
void onMessage(std::shared_ptr< protocol::TMManifests > const &m)
Tracking
Whether the peer's view of the ledger converges or diverges from ours.
Compressed compressionEnabled_
uint256 closedLedgerHash_
std::string domain() const
std::optional< std::uint32_t > lastPingSeq_
beast::Journal const journal_
struct ripple::PeerImp::@21 metrics_
void tryAsyncShutdown()
Attempts to perform a graceful SSL shutdown if conditions are met.
beast::Journal const p_journal_
PeerImp(PeerImp const &)=delete
void shutdown()
Initiates the peer disconnection sequence.
bool hasRange(std::uint32_t uMin, std::uint32_t uMax) override
bool hasTxSet(uint256 const &hash) const override
clock_type::time_point lastPingTime_
void onMessageUnknown(std::uint16_t type)
std::shared_ptr< PeerFinder::Slot > const slot_
std::shared_mutex nameMutex_
boost::circular_buffer< uint256 > recentLedgers_
std::optional< std::chrono::milliseconds > latency_
void handleTransaction(std::shared_ptr< protocol::TMTransaction > const &m, bool eraseTxQueue, bool batch)
Called from onMessage(TMTransaction(s)).
beast::IP::Endpoint const remote_address_
Json::Value json() override
PublicKey const publicKey_
void close()
Forcibly closes the underlying socket connection.
hash_set< uint256 > txQueue_
void onMessageBegin(std::uint16_t type, std::shared_ptr<::google::protobuf::Message > const &m, std::size_t size, std::size_t uncompressed_size, bool isCompressed)
bool txReduceRelayEnabled_
void fail(std::string const &name, error_code ec)
Handles a failure associated with a specific error code.
clock_type::time_point trackingTime_
void cancelTimer() noexcept
Cancels any pending wait on the peer activity timer.
ProtocolVersion protocol_
reduce_relay::Squelch< UptimeClock > squelch_
std::string getVersion() const
Return the version of rippled that the peer is running, if reported.
uint256 previousLedgerHash_
void charge(Resource::Charge const &fee, std::string const &context) override
Adjust this peer's load balance based on the type of load imposed.
void onTimer(error_code const &ec)
Handles the expiration of the peer activity timer.
void send(std::shared_ptr< Message > const &m) override
static std::string makePrefix(id_t id)
boost::system::error_code error_code
void onReadMessage(error_code ec, std::size_t bytes_transferred)
bool ledgerReplayEnabled_
boost::asio::basic_waitable_timer< std::chrono::steady_clock > waitable_timer
bool crawl() const
Returns true if this connection will publicly share its IP address.
void setTimer(std::chrono::seconds interval)
Sets and starts the peer timer.
void sendTxQueue() override
Send aggregated transactions' hashes.
bool txReduceRelayEnabled() const override
bool supportsFeature(ProtocolFeature f) const override
void onWriteMessage(error_code ec, std::size_t bytes_transferred)
http_request_type request_
void addTxQueue(uint256 const &hash) override
Add transaction's hash to the transactions' hashes queue.
bool cluster() const override
Returns true if this connection is a member of the cluster.
void onShutdown(error_code ec)
Handles the completion of the asynchronous SSL shutdown.
boost::asio::strand< boost::asio::executor > strand_
void cycleStatus() override
boost::beast::multi_buffer read_buffer_
Resource::Consumer usage_
void ledgerRange(std::uint32_t &minSeq, std::uint32_t &maxSeq) const override
std::atomic< Tracking > tracking_
Represents a peer connection in the overlay.
A peer's signed, proposed position for use in RCLConsensus.
bool checkSign() const
Verify the signing hash of the proposal.
PublicKey const & publicKey() const
Public key of peer that sent the proposal.
uint256 const & suppressionID() const
Unique id used by hash router to suppress duplicates.
An endpoint that consumes resources.
int balance()
Returns the credit balance representing consumption.
bool disconnect(beast::Journal const &j)
Returns true if the consumer should be disconnected.
Disposition charge(Charge const &fee, std::string const &context={})
Apply a load charge to the consumer.
virtual void importConsumers(std::string const &origin, Gossip const &gossip)=0
Import packaged consumer information.
A SHAMap is both a radix tree with a fan-out of 16 and a Merkle tree.
std::size_t size() const noexcept
void const * data() const noexcept
void const * getDataPtr() const
An immutable linear range of bytes.
time_point now() const override
Returns the current time, using the server's clock.
static category categorize(::google::protobuf::Message const &message, protocol::MessageType type, bool inbound)
Given a protocol message, determine which traffic category it belongs to.
static void sendValidatorList(Peer &peer, std::uint64_t peerSequence, PublicKey const &publisherKey, std::size_t maxSequence, std::uint32_t rawVersion, std::string const &rawManifest, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, HashRouter &hashRouter, beast::Journal j)
void for_each_available(std::function< void(std::string const &manifest, std::uint32_t version, std::map< std::size_t, ValidatorBlobInfo > const &blobInfos, PublicKey const &pubKey, std::size_t maxSequence, uint256 const &hash)> func) const
Invokes the callback once for every available publisher list's raw data members.
static constexpr std::size_t size()
constexpr bool parseHex(std::string_view sv)
Parse a hex string into a base_uint.
T emplace_back(T... args)
@ objectValue
object value (collection of name/value pairs).
Charge const feeMalformedRequest
Schedule of fees charged for imposing load on the server.
Charge const feeInvalidData
Charge const feeUselessData
Charge const feeTrivialPeer
Charge const feeModerateBurdenPeer
std::size_t constexpr readBufferBytes
Size of buffer used to read from the socket.
@ targetSendQueue
How many messages we consider reasonable sustained on a send queue.
@ maxQueryDepth
The maximum number of levels to search.
@ sendqIntervals
How many timer intervals a sendq has to stay large before we disconnect.
@ sendQueueLogFreq
How often to log send queue size.
TER valid(STTx const &tx, ReadView const &view, AccountID const &src, beast::Journal j)
auto measureDurationAndLog(Func &&func, std::string const &actionDescription, std::chrono::duration< Rep, Period > maxDelay, beast::Journal const &journal)
static constexpr std::size_t MAX_TX_QUEUE_SIZE
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
std::string protocolMessageName(int type)
Returns the name of a protocol message given its type.
std::string toBase58(AccountID const &v)
Convert AccountID to base58 checked string.
uint256 proposalUniqueId(uint256 const &proposeHash, uint256 const &previousLedger, std::uint32_t proposeSeq, NetClock::time_point closeTime, Slice const &publicKey, Slice const &signature)
Calculate a unique identifier for a signed proposal.
constexpr ProtocolVersion make_protocol(std::uint16_t major, std::uint16_t minor)
bool isPseudoTx(STObject const &tx)
Check whether a transaction is a pseudo-transaction.
std::optional< SHAMapNodeID > deserializeSHAMapNodeID(void const *data, std::size_t size)
Return an object representing a serialized SHAMap Node ID.
static constexpr char FEATURE_COMPR[]
bool isCurrent(ValidationParms const &p, NetClock::time_point now, NetClock::time_point signTime, NetClock::time_point seenTime)
Whether a validation is still current.
@ ValidatorListPropagation
@ ValidatorList2Propagation
std::string base64_decode(std::string_view data)
bool set(T &target, std::string const &name, Section const §ion)
Set a value from a configuration Section If the named value is not found or doesn't parse as a T,...
http_response_type makeResponse(bool crawlPublic, http_request_type const &req, beast::IP::Address public_ip, beast::IP::Address remote_ip, uint256 const &sharedValue, std::optional< std::uint32_t > networkID, ProtocolVersion protocol, Application &app)
Make http response.
static bool stringIsUint256Sized(std::string const &pBuffStr)
static constexpr char FEATURE_LEDGER_REPLAY[]
std::pair< std::size_t, boost::system::error_code > invokeProtocolMessage(Buffers const &buffers, Handler &handler, std::size_t &hint)
Calls the handler for up to one protocol message in the passed buffers.
std::optional< uint256 > makeSharedValue(stream_type &ssl, beast::Journal journal)
Computes a shared value based on the SSL connection state.
std::optional< KeyType > publicKeyType(Slice const &slice)
Returns the type of public key.
std::enable_if_t< std::is_integral< Integral >::value &&detail::is_engine< Engine >::value, Integral > rand_int(Engine &engine, Integral min, Integral max)
Return a uniformly distributed random integer.
std::string strHex(FwdIt begin, FwdIt end)
static std::shared_ptr< PeerImp > getPeerWithLedger(OverlayImpl &ov, uint256 const &ledgerHash, LedgerIndex ledger, PeerImp const *skip)
std::enable_if_t< std::is_same< T, char >::value||std::is_same< T, unsigned char >::value, Slice > makeSlice(std::array< T, N > const &a)
Stopwatch & stopwatch()
Returns an instance of a wall clock.
boost::beast::http::request< boost::beast::http::dynamic_body > http_request_type
NodeID calcNodeID(PublicKey const &)
Calculate the 160-bit node ID from a node public key.
static std::shared_ptr< PeerImp > getPeerWithTree(OverlayImpl &ov, uint256 const &rootHash, PeerImp const *skip)
bool peerFeatureEnabled(headers const &request, std::string const &feature, std::string value, bool config)
Check if a feature should be enabled for a peer.
void forceValidity(HashRouter &router, uint256 const &txid, Validity validity)
Sets the validity of a given transaction in the cache.
static constexpr char FEATURE_TXRR[]
std::string to_string(base_uint< Bits, Tag > const &a)
std::optional< Rules > const & getCurrentTransactionRules()
Number root(Number f, unsigned d)
@ proposal
proposal for signing
void addRaw(LedgerHeader const &, Serializer &, bool includeHash=false)
std::pair< Validity, std::string > checkValidity(HashRouter &router, STTx const &tx, Rules const &rules, Config const &config)
Checks transaction signature and local checks.
sha512_half_hasher::result_type sha512Half(Args const &... args)
Returns the SHA512-Half of a series of objects.
static constexpr char FEATURE_VPRR[]
constexpr std::uint32_t tfInnerBatchTxn
T shared_from_this(T... args)
beast::IP::Address public_ip
std::optional< std::uint32_t > networkID
bool peerPrivate
true if we want our IP address kept private.
void update(Resource::Charge f, std::string const &add)
Describes a single consumer.
beast::IP::Endpoint address
Data format for exchanging consumption information across peers.
std::vector< Item > items