20 #include <ripple/app/consensus/RCLValidations.h>
21 #include <ripple/app/ledger/InboundLedgers.h>
22 #include <ripple/app/ledger/InboundTransactions.h>
23 #include <ripple/app/ledger/LedgerMaster.h>
24 #include <ripple/app/misc/HashRouter.h>
25 #include <ripple/app/misc/LoadFeeTrack.h>
26 #include <ripple/app/misc/NetworkOPs.h>
27 #include <ripple/app/misc/Transaction.h>
28 #include <ripple/app/misc/ValidatorList.h>
29 #include <ripple/app/tx/apply.h>
30 #include <ripple/basics/UptimeClock.h>
31 #include <ripple/basics/base64.h>
32 #include <ripple/basics/random.h>
33 #include <ripple/basics/safe_cast.h>
34 #include <ripple/beast/core/LexicalCast.h>
35 #include <ripple/beast/core/SemanticVersion.h>
36 #include <ripple/nodestore/DatabaseShard.h>
37 #include <ripple/overlay/Cluster.h>
38 #include <ripple/overlay/impl/PeerImp.h>
39 #include <ripple/overlay/impl/Tuning.h>
40 #include <ripple/overlay/predicates.h>
41 #include <ripple/protocol/digest.h>
43 #include <boost/algorithm/clamp.hpp>
44 #include <boost/algorithm/string.hpp>
45 #include <boost/algorithm/string/predicate.hpp>
46 #include <boost/beast/core/ostream.hpp>
54 using namespace std::chrono_literals;
79 , sink_(app_.journal(
"Peer"), makePrefix(id))
80 , p_sink_(app_.journal(
"Protocol"), makePrefix(id))
83 , stream_ptr_(
std::move(stream_ptr))
84 , socket_(stream_ptr_->next_layer().socket())
85 , stream_(*stream_ptr_)
86 , strand_(socket_.get_executor())
88 , remote_address_(slot->remote_endpoint())
92 , tracking_(Tracking::unknown)
93 , trackingTime_(clock_type::now())
94 , publicKey_(publicKey)
95 , lastPingTime_(clock_type::now())
96 , creationTime_(clock_type::now())
100 , request_(std::move(request))
102 , compressionEnabled_(
103 headers_[
"X-Offer-Compression"] ==
"lz4" ? Compressed::On
110 const bool inCluster{
cluster()};
133 if (!
strand_.running_in_this_thread())
136 auto parseLedgerHash =
137 [](
std::string const& value) -> boost::optional<uint256> {
147 boost::optional<uint256> closed;
148 boost::optional<uint256> previous;
150 if (
auto const iter =
headers_.find(
"Closed-Ledger");
153 closed = parseLedgerHash(iter->value().to_string());
156 fail(
"Malformed handshake data (1)");
159 if (
auto const iter =
headers_.find(
"Previous-Ledger");
162 previous = parseLedgerHash(iter->value().to_string());
165 fail(
"Malformed handshake data (2)");
168 if (previous && !closed)
169 fail(
"Malformed handshake data (3)");
185 protocol::TMGetPeerShardInfo tmGPS;
187 send(std::make_shared<Message>(tmGPS, protocol::mtGET_PEER_SHARD_INFO));
195 if (!
strand_.running_in_this_thread())
221 if (!
strand_.running_in_this_thread())
228 auto validator = m->getValidatorKey();
229 if (validator &&
squelch_.isSquelched(*validator))
233 safe_cast<TrafficCount::category>(m->getCategory()),
252 <<
" sendq: " << sendq_size;
260 boost::asio::async_write(
269 std::placeholders::_1,
270 std::placeholders::_2)));
277 strand_.running_in_this_thread())
281 fail(
"charge: Resources");
290 auto const iter =
headers_.find(
"Crawl");
293 return boost::iequals(iter->value(),
"public");
306 return headers_[
"User-Agent"].to_string();
307 return headers_[
"Server"].to_string();
319 ret[jss::inbound] =
true;
323 ret[jss::cluster] =
true;
331 ret[jss::server_domain] =
domain();
334 ret[jss::network_id] = nid;
339 ret[jss::version] = version;
350 std::chrono::duration_cast<std::chrono::seconds>(
uptime()).count());
355 if ((minSeq != 0) || (maxSeq != 0))
356 ret[jss::complete_ledgers] =
362 ret[jss::track] =
"diverged";
366 ret[jss::track] =
"unknown";
375 protocol::TMStatusChange last_status;
382 if (closedLedgerHash != beast::zero)
383 ret[jss::ledger] =
to_string(closedLedgerHash);
385 if (last_status.has_newstatus())
387 switch (last_status.newstatus())
389 case protocol::nsCONNECTING:
390 ret[jss::status] =
"connecting";
393 case protocol::nsCONNECTED:
394 ret[jss::status] =
"connected";
397 case protocol::nsMONITORING:
398 ret[jss::status] =
"monitoring";
401 case protocol::nsVALIDATING:
402 ret[jss::status] =
"validating";
405 case protocol::nsSHUTTING:
406 ret[jss::status] =
"shutting";
411 <<
"Unknown status: " << last_status.newstatus();
416 ret[jss::metrics][jss::total_bytes_recv] =
418 ret[jss::metrics][jss::total_bytes_sent] =
420 ret[jss::metrics][jss::avg_bps_recv] =
422 ret[jss::metrics][jss::avg_bps_sent] =
473 return boost::icl::contains(it->second.shardIndexes, shardIndex);
508 assert(
strand_.running_in_this_thread());
530 if (!
strand_.running_in_this_thread())
541 <<
" failed: " << reason;
549 assert(
strand_.running_in_this_thread());
559 boost::optional<RangeSet<std::uint32_t>>
565 return it->second.shardIndexes;
569 boost::optional<hash_map<PublicKey, PeerImp::ShardInfo>>
581 assert(
strand_.running_in_this_thread());
593 stream_.async_shutdown(bind_executor(
603 timer_.expires_from_now(peerTimerInterval, ec);
610 timer_.async_wait(bind_executor(
640 if (ec == boost::asio::error::operation_aborted)
652 fail(
"Large send queue");
658 clock_type::duration duration;
679 fail(
"Ping Timeout");
686 protocol::TMPing message;
687 message.set_type(protocol::TMPing::ptPING);
690 send(std::make_shared<Message>(message, protocol::mtPING));
702 JLOG(
journal_.
error()) <<
"onShutdown: expected error condition";
705 if (ec != boost::asio::error::eof)
706 return fail(
"onShutdown", ec);
723 return fail(
"makeSharedValue: Unexpected failure");
744 auto write_buffer = [
this, sharedValue]() {
745 auto buf = std::make_shared<boost::beast::multi_buffer>();
748 resp.result(boost::beast::http::status::switching_protocols);
750 resp.insert(
"Connection",
"Upgrade");
752 resp.insert(
"Connect-As",
"Peer");
758 if (
request_[
"X-Offer-Compression"] ==
"lz4" &&
760 resp.insert(
"X-Offer-Compression",
"lz4");
770 boost::beast::ostream(*buf) << resp;
776 boost::asio::async_write(
778 write_buffer->data(),
779 boost::asio::transfer_all(),
784 if (ec == boost::asio::error::operation_aborted)
787 return fail(
"onWriteResponse", ec);
788 if (write_buffer->size() == bytes_transferred)
790 return fail(
"Failed to write header");
804 return headers_[
"Server-Domain"].to_string();
826 protocol::TMValidatorList vl;
830 vl.set_signature(signature);
831 vl.set_version(version);
834 <<
"Sending validator list for " <<
strHex(pubKey)
835 <<
" with sequence " << sequence <<
" to "
837 send(std::make_shared<Message>(vl, protocol::mtVALIDATORLIST));
854 if (ec == boost::asio::error::operation_aborted)
856 if (ec == boost::asio::error::eof)
862 return fail(
"onReadMessage", ec);
865 if (bytes_transferred > 0)
866 stream <<
"onReadMessage: " << bytes_transferred <<
" bytes";
868 stream <<
"onReadMessage";
871 metrics_.recv.add_message(bytes_transferred);
883 return fail(
"onReadMessage", ec);
888 if (bytes_consumed == 0)
901 std::placeholders::_1,
902 std::placeholders::_2)));
910 if (ec == boost::asio::error::operation_aborted)
913 return fail(
"onWriteMessage", ec);
916 if (bytes_transferred > 0)
917 stream <<
"onWriteMessage: " << bytes_transferred <<
" bytes";
919 stream <<
"onWriteMessage";
922 metrics_.sent.add_message(bytes_transferred);
929 return boost::asio::async_write(
938 std::placeholders::_1,
939 std::placeholders::_2)));
944 return stream_.async_shutdown(bind_executor(
949 std::placeholders::_1)));
1001 if (m->type() == protocol::TMPing::ptPING)
1005 m->set_type(protocol::TMPing::ptPONG);
1006 send(std::make_shared<Message>(*m, protocol::mtPING));
1010 if (m->type() == protocol::TMPing::ptPONG && m->has_seq())
1020 auto const rtt = std::chrono::round<std::chrono::milliseconds>(
1045 for (
int i = 0; i < m->clusternodes().size(); ++i)
1047 protocol::TMClusterNode
const& node = m->clusternodes(i);
1050 if (node.has_nodename())
1051 name = node.nodename();
1053 auto const publicKey =
1060 auto const reportTime =
1064 *publicKey,
name, node.nodeload(), reportTime);
1068 int loadSources = m->loadsources().size();
1069 if (loadSources != 0)
1072 gossip.
items.reserve(loadSources);
1073 for (
int i = 0; i < m->loadsources().size(); ++i)
1075 protocol::TMLoadSource
const& node = m->loadsources(i);
1080 gossip.
items.push_back(item);
1093 if (status.getReportTime() >= thresh)
1094 fees.push_back(status.getLoadFee());
1099 auto const index = fees.size() / 2;
1101 clusterFee = fees[index];
1130 return badData(
"Invalid peer chain");
1136 auto shards{shardStore->getCompleteShards()};
1137 if (!shards.empty())
1139 protocol::TMPeerShardInfo reply;
1140 reply.set_shardindexes(shards);
1142 if (m->has_lastlink())
1143 reply.set_lastlink(
true);
1145 if (m->peerchain_size() > 0)
1147 for (
int i = 0; i < m->peerchain_size(); ++i)
1150 return badData(
"Invalid peer chain public key");
1153 *reply.mutable_peerchain() = m->peerchain();
1156 send(std::make_shared<Message>(reply, protocol::mtPEER_SHARD_INFO));
1167 m->set_hops(m->hops() - 1);
1169 m->set_lastlink(
true);
1171 m->add_peerchain()->set_nodepubkey(
1175 std::make_shared<Message>(*m, protocol::mtGET_PEER_SHARD_INFO),
1188 if (m->shardindexes().empty())
1189 return badData(
"Missing shard indexes");
1191 return badData(
"Invalid peer chain");
1193 return badData(
"Invalid public key");
1196 if (m->peerchain_size() > 0)
1200 makeSlice(m->peerchain(m->peerchain_size() - 1).nodepubkey())};
1202 return badData(
"Invalid pubKey");
1207 if (!m->has_nodepubkey())
1210 if (!m->has_endpoint())
1216 m->set_endpoint(
"0");
1219 m->mutable_peerchain()->RemoveLast();
1221 std::make_shared<Message>(*m, protocol::mtPEER_SHARD_INFO));
1224 <<
"Relayed TMPeerShardInfo to peer with IP "
1239 if (!
from_string(shardIndexes, m->shardindexes()))
1240 return badData(
"Invalid shard indexes");
1243 boost::optional<std::uint32_t> latestShard;
1245 auto const curLedgerSeq{
1249 earliestShard = shardStore->earliestShardIndex();
1250 if (curLedgerSeq >= shardStore->earliestLedgerSeq())
1251 latestShard = shardStore->seqToShardIndex(curLedgerSeq);
1255 auto const earliestLedgerSeq{
1258 if (curLedgerSeq >= earliestLedgerSeq)
1263 if (boost::icl::first(shardIndexes) < earliestShard ||
1264 (latestShard && boost::icl::last(shardIndexes) > latestShard))
1266 return badData(
"Invalid shard indexes");
1272 if (m->has_endpoint())
1274 if (m->endpoint() !=
"0")
1279 return badData(
"Invalid incoming endpoint: " + m->endpoint());
1280 endpoint = std::move(*result);
1290 if (m->has_nodepubkey())
1301 it->second.endpoint = std::move(endpoint);
1304 it->second.shardIndexes += shardIndexes;
1310 shardInfo.
endpoint = std::move(endpoint);
1312 shardInfo_.emplace(publicKey, std::move(shardInfo));
1317 <<
"Consumed TMPeerShardInfo originating from public key "
1319 << m->shardindexes();
1321 if (m->has_lastlink())
1334 endpoints.
reserve(m->endpoints_v2().size());
1336 for (
auto const& tm : m->endpoints_v2())
1342 << tm.endpoint() <<
"}";
1358 if (!endpoints.
empty())
1373 <<
"Need network ledger";
1381 auto stx = std::make_shared<STTx const>(sit);
1382 uint256 txID = stx->getTransactionID();
1401 bool checkSignature =
true;
1404 if (!m->has_deferred() || !m->deferred())
1408 flags |= SF_TRUSTED;
1415 checkSignature =
false;
1428 <<
"No new transactions until synchronized";
1434 "recvTransaction->checkTransaction",
1439 if (
auto peer = weak.lock())
1440 peer->checkTransaction(flags, checkSignature, stx);
1447 <<
"Transaction invalid: " <<
strHex(m->rawtransaction());
1457 if (
auto peer = weak.
lock())
1465 protocol::TMLedgerData& packet = *m;
1467 if (m->nodes().size() <= 0)
1473 if (m->has_requestcookie())
1479 m->clear_requestcookie();
1481 std::make_shared<Message>(packet, protocol::mtLEDGER_DATA));
1485 JLOG(
p_journal_.
info()) <<
"Unable to route TX/ledger data reply";
1493 JLOG(
p_journal_.
warn()) <<
"TX candidate reply with invalid hash size";
1498 uint256 const hash{m->ledgerhash()};
1500 if (m->type() == protocol::liTS_CANDIDATE)
1506 if (
auto peer = weak.
lock())
1507 peer->app_.getInboundTransactions().gotData(hash, peer, m);
1522 protocol::TMProposeSet&
set = *m;
1528 if ((boost::algorithm::clamp(sig.size(), 64, 72) != sig.size()) ||
1544 uint256 const proposeHash{
set.currenttxhash()};
1545 uint256 const prevLedger{
set.previousledger()};
1558 if (
auto [added, relayed] =
1571 suppression, publicKey,
id_, protocol::mtPROPOSE_LEDGER);
1583 <<
"Proposal: Dropping untrusted (peer divergence)";
1595 <<
"Proposal: " << (isTrusted ?
"trusted" :
"untrusted");
1612 "recvPropose->checkPropose",
1614 if (
auto peer = weak.lock())
1615 peer->checkPropose(job, m,
proposal);
1624 if (!m->has_networktime())
1629 if (!
last_status_.has_newstatus() || m->has_newstatus())
1634 protocol::NodeStatus status =
last_status_.newstatus();
1636 m->set_newstatus(status);
1640 if (m->newevent() == protocol::neLOST_SYNC)
1642 bool outOfSync{
false};
1663 bool const peerChangedLedgers{
1670 if (peerChangedLedgers)
1681 if (m->has_ledgerhashprevious() &&
1692 if (peerChangedLedgers)
1702 if (m->has_firstseq() && m->has_lastseq())
1713 if (m->has_ledgerseq() &&
1723 if (m->has_newstatus())
1725 switch (m->newstatus())
1727 case protocol::nsCONNECTING:
1728 j[jss::status] =
"CONNECTING";
1730 case protocol::nsCONNECTED:
1731 j[jss::status] =
"CONNECTED";
1733 case protocol::nsMONITORING:
1734 j[jss::status] =
"MONITORING";
1736 case protocol::nsVALIDATING:
1737 j[jss::status] =
"VALIDATING";
1739 case protocol::nsSHUTTING:
1740 j[jss::status] =
"SHUTTING";
1745 if (m->has_newevent())
1747 switch (m->newevent())
1749 case protocol::neCLOSING_LEDGER:
1750 j[jss::action] =
"CLOSING_LEDGER";
1752 case protocol::neACCEPTED_LEDGER:
1753 j[jss::action] =
"ACCEPTED_LEDGER";
1755 case protocol::neSWITCHED_LEDGER:
1756 j[jss::action] =
"SWITCHED_LEDGER";
1758 case protocol::neLOST_SYNC:
1759 j[jss::action] =
"LOST_SYNC";
1764 if (m->has_ledgerseq())
1766 j[jss::ledger_index] = m->ledgerseq();
1769 if (m->has_ledgerhash())
1771 uint256 closedLedgerHash{};
1773 std::lock_guard sl(recentLock_);
1774 closedLedgerHash = closedLedgerHash_;
1776 j[jss::ledger_hash] =
to_string(closedLedgerHash);
1779 if (m->has_networktime())
1781 j[jss::date] = Json::UInt(m->networktime());
1784 if (m->has_firstseq() && m->has_lastseq())
1786 j[jss::ledger_index_min] = Json::UInt(m->firstseq());
1787 j[jss::ledger_index_max] = Json::UInt(m->lastseq());
1803 serverSeq = maxLedger_;
1809 checkTracking(serverSeq, validationSeq);
1818 if (
diff < Tuning::convergedLedgerLimit)
1821 tracking_ = Tracking::converged;
1824 if ((
diff > Tuning::divergedLedgerLimit) &&
1825 (tracking_.load() != Tracking::diverged))
1830 tracking_ = Tracking::diverged;
1831 trackingTime_ = clock_type::now();
1840 fee_ = Resource::feeInvalidRequest;
1844 uint256 const hash{m->hash()};
1846 if (m->status() == protocol::tsHAVE)
1850 if (
std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
1851 recentTxSets_.end())
1853 fee_ = Resource::feeUnwantedData;
1857 recentTxSets_.push_back(hash);
1866 if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
1868 JLOG(p_journal_.debug())
1869 <<
"ValidatorList: received validator list from peer using "
1870 <<
"protocol version " << to_string(protocol_)
1871 <<
" which shouldn't support this feature.";
1872 fee_ = Resource::feeUnwantedData;
1875 auto const&
manifest = m->manifest();
1876 auto const& blob = m->blob();
1877 auto const& signature = m->signature();
1878 auto const version = m->version();
1881 JLOG(p_journal_.debug())
1882 <<
"Received validator list from " << remote_address_.to_string()
1883 <<
" (" << id_ <<
")";
1885 if (!app_.getHashRouter().addSuppressionPeer(hash, id_))
1887 JLOG(p_journal_.debug())
1888 <<
"ValidatorList: received duplicate validator list";
1892 fee_ = Resource::feeUnwantedData;
1896 auto const applyResult = app_.validators().applyListAndBroadcast(
1901 remote_address_.to_string(),
1904 app_.getHashRouter());
1905 auto const disp = applyResult.disposition;
1907 JLOG(p_journal_.debug())
1908 <<
"Processed validator list from "
1909 << (applyResult.publisherKey ?
strHex(*applyResult.publisherKey)
1910 :
"unknown or invalid publisher")
1911 <<
" from " << remote_address_.to_string() <<
" (" << id_
1912 <<
") with result " << to_string(disp);
1916 case ListDisposition::accepted:
1917 JLOG(p_journal_.debug())
1918 <<
"Applied new validator list from peer "
1923 assert(applyResult.sequence && applyResult.publisherKey);
1924 auto const& pubKey = *applyResult.publisherKey;
1926 if (
auto const iter = publisherListSequences_.find(pubKey);
1927 iter != publisherListSequences_.end())
1929 assert(iter->second < *applyResult.sequence);
1932 publisherListSequences_[pubKey] = *applyResult.sequence;
1935 case ListDisposition::same_sequence:
1936 JLOG(p_journal_.warn())
1937 <<
"Validator list with current sequence from peer "
1942 fee_ = Resource::feeUnwantedData;
1946 assert(applyResult.sequence && applyResult.publisherKey);
1948 publisherListSequences_[*applyResult.publisherKey] ==
1949 *applyResult.sequence);
1954 case ListDisposition::stale:
1955 JLOG(p_journal_.warn())
1956 <<
"Stale validator list from peer " << remote_address_;
1959 fee_ = Resource::feeBadData;
1961 case ListDisposition::untrusted:
1962 JLOG(p_journal_.warn())
1963 <<
"Untrusted validator list from peer " << remote_address_;
1967 fee_ = Resource::feeUnwantedData;
1969 case ListDisposition::invalid:
1970 JLOG(p_journal_.warn())
1971 <<
"Invalid validator list from peer " << remote_address_;
1973 fee_ = Resource::feeInvalidSignature;
1975 case ListDisposition::unsupported_version:
1976 JLOG(p_journal_.warn())
1977 <<
"Unsupported version validator list from peer "
1981 fee_ = Resource::feeBadData;
1989 JLOG(p_journal_.warn()) <<
"ValidatorList: Exception, " << e.
what()
1990 <<
" from peer " << remote_address_;
1991 fee_ = Resource::feeBadData;
1998 auto const closeTime = app_.timeKeeper().closeTime();
2000 if (m->validation().size() < 50)
2002 JLOG(p_journal_.warn()) <<
"Validation: Too small";
2003 fee_ = Resource::feeInvalidRequest;
2012 val = std::make_shared<STValidation>(
2016 app_.validatorManifests().getMasterKey(pk));
2019 val->setSeen(closeTime);
2023 app_.getValidations().parms(),
2024 app_.timeKeeper().closeTime(),
2026 val->getSeenTime()))
2028 JLOG(p_journal_.trace()) <<
"Validation: Not current";
2029 fee_ = Resource::feeUnwantedData;
2034 if (
auto [added, relayed] =
2035 app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
2042 if (app_.config().REDUCE_RELAY_ENABLE && (
bool)relayed &&
2043 (
stopwatch().now() - *relayed) < squelch::IDLED &&
2044 squelch::epoch<std::chrono::minutes>(UptimeClock::now()) >
2045 squelch::WAIT_ON_BOOTUP)
2046 overlay_.updateSlotAndSquelch(
2047 key, val->getSignerPublic(), id_, protocol::mtVALIDATION);
2048 JLOG(p_journal_.trace()) <<
"Validation: duplicate";
2052 auto const isTrusted =
2053 app_.validators().trusted(val->getSignerPublic());
2055 if (!isTrusted && (tracking_.load() == Tracking::diverged))
2057 JLOG(p_journal_.debug())
2058 <<
"Validation: dropping untrusted from diverged peer";
2060 if (isTrusted || cluster() || !app_.getFeeTrack().isLoadedLocal())
2063 app_.getJobQueue().addJob(
2065 "recvValidation->checkValidation",
2066 [weak, val, m](
Job&) {
2067 if (
auto peer = weak.
lock())
2068 peer->checkValidation(val, m);
2073 JLOG(p_journal_.debug()) <<
"Validation: Dropping UNTRUSTED (load)";
2078 JLOG(p_journal_.warn())
2079 <<
"Exception processing validation: " << e.
what();
2080 fee_ = Resource::feeInvalidRequest;
2087 protocol::TMGetObjectByHash& packet = *m;
2092 if (send_queue_.size() >= Tuning::dropSendQueue)
2094 JLOG(p_journal_.debug()) <<
"GetObject: Large send queue";
2098 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2104 fee_ = Resource::feeMediumBurdenPeer;
2106 protocol::TMGetObjectByHash reply;
2108 reply.set_query(
false);
2110 if (packet.has_seq())
2111 reply.set_seq(packet.seq());
2113 reply.set_type(packet.type());
2115 if (packet.has_ledgerhash())
2119 fee_ = Resource::feeInvalidRequest;
2123 reply.set_ledgerhash(packet.ledgerhash());
2127 for (
int i = 0; i < packet.objects_size(); ++i)
2129 auto const& obj = packet.objects(i);
2132 uint256 const hash{obj.hash()};
2135 std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2136 auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
2139 if (
auto shardStore = app_.getShardStore())
2141 if (seq >= shardStore->earliestLedgerSeq())
2142 nodeObject = shardStore->fetchNodeObject(hash, seq);
2147 protocol::TMIndexedObject& newObj = *reply.add_objects();
2148 newObj.set_hash(hash.begin(), hash.size());
2150 &nodeObject->getData().front(),
2151 nodeObject->getData().size());
2153 if (obj.has_nodeid())
2154 newObj.set_index(obj.nodeid());
2155 if (obj.has_ledgerseq())
2156 newObj.set_ledgerseq(obj.ledgerseq());
2163 JLOG(p_journal_.trace()) <<
"GetObj: " << reply.objects_size() <<
" of "
2164 << packet.objects_size();
2165 send(std::make_shared<Message>(reply, protocol::mtGET_OBJECTS));
2172 bool progress =
false;
2174 for (
int i = 0; i < packet.objects_size(); ++i)
2176 const protocol::TMIndexedObject& obj = packet.objects(i);
2180 if (obj.has_ledgerseq())
2182 if (obj.ledgerseq() != pLSeq)
2184 if (pLDo && (pLSeq != 0))
2186 JLOG(p_journal_.debug())
2187 <<
"GetObj: Full fetch pack for " << pLSeq;
2189 pLSeq = obj.ledgerseq();
2190 pLDo = !app_.getLedgerMaster().haveLedger(pLSeq);
2194 JLOG(p_journal_.debug())
2195 <<
"GetObj: Late fetch pack for " << pLSeq;
2204 uint256 const hash{obj.hash()};
2206 app_.getLedgerMaster().addFetchPack(
2208 std::make_shared<Blob>(
2209 obj.data().begin(), obj.data().end()));
2214 if (pLDo && (pLSeq != 0))
2216 JLOG(p_journal_.debug())
2217 <<
"GetObj: Partial fetch pack for " << pLSeq;
2219 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2220 app_.getLedgerMaster().gotFetchPack(progress, pLSeq);
2227 if (!m->has_validatorpubkey())
2229 charge(Resource::feeBadData);
2232 auto validator = m->validatorpubkey();
2236 charge(Resource::feeBadData);
2240 auto squelch = m->squelch();
2241 auto duration = m->has_squelchduration() ? m->squelchduration() : 0;
2242 auto sp = shared_from_this();
2245 if (key == app_.getValidationPublicKey())
2247 JLOG(p_journal_.debug())
2248 <<
"onMessage: TMSquelch discarding validator's squelch " << slice;
2252 if (!strand_.running_in_this_thread())
2253 return post(strand_, [sp, key, squelch, duration]() {
2254 sp->squelch_.squelch(key, squelch, duration);
2257 JLOG(p_journal_.debug())
2258 <<
"onMessage: TMSquelch " << slice <<
" " << id() <<
" " << duration;
2260 squelch_.squelch(key, squelch, duration);
2272 (void)lockedRecentLock;
2274 if (
std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
2275 recentLedgers_.end())
2278 recentLedgers_.push_back(hash);
2287 if (app_.getFeeTrack().isLoadedLocal() ||
2288 (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2289 (app_.getJobQueue().getJobCount(
jtPACK) > 10))
2291 JLOG(p_journal_.info()) <<
"Too busy to make fetch pack";
2297 JLOG(p_journal_.warn()) <<
"FetchPack hash size malformed";
2298 fee_ = Resource::feeInvalidRequest;
2302 fee_ = Resource::feeHighBurdenPeer;
2304 uint256 const hash{packet->ledgerhash()};
2307 auto elapsed = UptimeClock::now();
2308 auto const pap = &app_;
2309 app_.getJobQueue().addJob(
2310 jtPACK,
"MakeFetchPack", [pap, weak, packet, hash, elapsed](
Job&) {
2311 pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
2316 PeerImp::checkTransaction(
2318 bool checkSignature,
2327 app_.getLedgerMaster().getValidLedgerIndex()))
2329 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2330 charge(Resource::feeUnwantedData);
2338 app_.getHashRouter(),
2340 app_.getLedgerMaster().getValidatedRules(),
2342 valid != Validity::Valid)
2344 if (!validReason.empty())
2346 JLOG(p_journal_.trace())
2347 <<
"Exception checking transaction: " << validReason;
2351 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2352 charge(Resource::feeInvalidSignature);
2359 app_.getHashRouter(), stx->getTransactionID(), Validity::Valid);
2363 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2365 if (tx->getStatus() ==
INVALID)
2367 if (!reason.
empty())
2369 JLOG(p_journal_.trace())
2370 <<
"Exception checking transaction: " << reason;
2372 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2373 charge(Resource::feeInvalidSignature);
2377 bool const trusted(flags & SF_TRUSTED);
2378 app_.getOPs().processTransaction(
2379 tx, trusted,
false, NetworkOPs::FailHard::no);
2383 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2384 charge(Resource::feeBadData);
2390 PeerImp::checkPropose(
2397 JLOG(p_journal_.trace())
2398 <<
"Checking " << (isTrusted ?
"trusted" :
"UNTRUSTED") <<
" proposal";
2404 JLOG(p_journal_.warn()) <<
"Proposal fails sig check";
2405 charge(Resource::feeInvalidSignature);
2412 relay = app_.getOPs().processTrustedProposal(peerPos);
2414 relay = app_.config().RELAY_UNTRUSTED_PROPOSALS || cluster();
2422 auto haveMessage = app_.overlay().relay(
2424 if (app_.config().REDUCE_RELAY_ENABLE && !haveMessage.empty() &&
2425 squelch::epoch<std::chrono::minutes>(UptimeClock::now()) >
2426 squelch::WAIT_ON_BOOTUP)
2427 overlay_.updateSlotAndSquelch(
2430 std::move(haveMessage),
2431 protocol::mtPROPOSE_LEDGER);
2436 PeerImp::checkValidation(
2443 if (!cluster() && !val->isValid())
2445 JLOG(p_journal_.warn()) <<
"Validation is invalid";
2446 charge(Resource::feeInvalidRequest);
2453 auto const suppression =
2460 overlay_.relay(*packet, suppression, val->getSignerPublic());
2461 if (app_.config().REDUCE_RELAY_ENABLE && !haveMessage.empty() &&
2462 squelch::epoch<std::chrono::minutes>(UptimeClock::now()) >
2463 squelch::WAIT_ON_BOOTUP)
2465 overlay_.updateSlotAndSquelch(
2467 val->getSignerPublic(),
2468 std::move(haveMessage),
2469 protocol::mtVALIDATION);
2475 JLOG(p_journal_.trace()) <<
"Exception processing validation";
2476 charge(Resource::feeInvalidRequest);
2490 if (p->hasTxSet(rootHash) && p.get() != skip)
2492 auto score = p->getScore(true);
2493 if (!ret || (score > retScore))
2518 if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
2520 auto score = p->getScore(true);
2521 if (!ret || (score > retScore))
2536 protocol::TMGetLedger& packet = *m;
2538 SHAMap const* map =
nullptr;
2539 protocol::TMLedgerData reply;
2540 bool fatLeaves =
true;
2543 if (packet.has_requestcookie())
2544 reply.set_requestcookie(packet.requestcookie());
2548 if (packet.itype() == protocol::liTS_CANDIDATE)
2551 JLOG(p_journal_.trace()) <<
"GetLedger: Tx candidate set";
2553 if (!packet.has_ledgerhash() ||
2556 charge(Resource::feeInvalidRequest);
2557 JLOG(p_journal_.warn()) <<
"GetLedger: Tx candidate set invalid";
2561 uint256 const txHash{packet.ledgerhash()};
2563 shared = app_.getInboundTransactions().getSet(txHash,
false);
2568 if (packet.has_querytype() && !packet.has_requestcookie())
2570 JLOG(p_journal_.debug()) <<
"GetLedger: Routing Tx set request";
2574 packet.set_requestcookie(
id());
2575 v->send(std::make_shared<Message>(
2576 packet, protocol::mtGET_LEDGER));
2580 JLOG(p_journal_.info()) <<
"GetLedger: Route TX set failed";
2584 JLOG(p_journal_.debug()) <<
"GetLedger: Can't provide map ";
2585 charge(Resource::feeInvalidRequest);
2589 reply.set_ledgerseq(0);
2590 reply.set_ledgerhash(txHash.begin(), txHash.size());
2591 reply.set_type(protocol::liTS_CANDIDATE);
2596 if (send_queue_.size() >= Tuning::dropSendQueue)
2598 JLOG(p_journal_.debug()) <<
"GetLedger: Large send queue";
2602 if (app_.getFeeTrack().isLoadedLocal() && !cluster())
2604 JLOG(p_journal_.debug()) <<
"GetLedger: Too busy";
2609 JLOG(p_journal_.trace()) <<
"GetLedger: Received";
2611 if (packet.has_ledgerhash())
2615 charge(Resource::feeInvalidRequest);
2616 JLOG(p_journal_.warn()) <<
"GetLedger: Invalid request";
2620 uint256 const ledgerhash{packet.ledgerhash()};
2621 logMe +=
"LedgerHash:";
2622 logMe += to_string(ledgerhash);
2623 ledger = app_.getLedgerMaster().getLedgerByHash(ledgerhash);
2625 if (!ledger && packet.has_ledgerseq())
2627 if (
auto shardStore = app_.getShardStore())
2629 auto seq = packet.ledgerseq();
2630 if (seq >= shardStore->earliestLedgerSeq())
2631 ledger = shardStore->fetchLedger(ledgerhash, seq);
2637 JLOG(p_journal_.trace())
2638 <<
"GetLedger: Don't have " << ledgerhash;
2642 (packet.has_querytype() && !packet.has_requestcookie()))
2649 packet.has_ledgerseq() ? packet.ledgerseq() : 0,
2653 JLOG(p_journal_.trace()) <<
"GetLedger: Cannot route";
2657 packet.set_requestcookie(
id());
2659 std::make_shared<Message>(packet, protocol::mtGET_LEDGER));
2660 JLOG(p_journal_.debug()) <<
"GetLedger: Request routed";
2664 else if (packet.has_ledgerseq())
2666 if (packet.ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
2668 JLOG(p_journal_.debug()) <<
"GetLedger: Early ledger request";
2671 ledger = app_.getLedgerMaster().getLedgerBySeq(packet.ledgerseq());
2674 JLOG(p_journal_.debug())
2675 <<
"GetLedger: Don't have " << packet.ledgerseq();
2678 else if (packet.has_ltype() && (packet.ltype() == protocol::ltCLOSED))
2680 ledger = app_.getLedgerMaster().getClosedLedger();
2681 assert(!ledger->open());
2685 if (ledger && ledger->info().open)
2686 ledger = app_.getLedgerMaster ().getLedgerBySeq (
2687 ledger->info().seq - 1);
2692 charge(Resource::feeInvalidRequest);
2693 JLOG(p_journal_.warn()) <<
"GetLedger: Unknown request";
2698 (packet.has_ledgerseq() &&
2699 (packet.ledgerseq() != ledger->info().seq)))
2701 charge(Resource::feeInvalidRequest);
2705 JLOG(p_journal_.warn()) <<
"GetLedger: Invalid sequence";
2710 if (!packet.has_ledgerseq() &&
2711 (ledger->info().seq < app_.getLedgerMaster().getEarliestFetch()))
2713 JLOG(p_journal_.debug()) <<
"GetLedger: Early ledger request";
2718 auto const lHash = ledger->info().hash;
2719 reply.set_ledgerhash(lHash.begin(), lHash.size());
2720 reply.set_ledgerseq(ledger->info().seq);
2721 reply.set_type(packet.itype());
2723 if (packet.itype() == protocol::liBASE)
2726 JLOG(p_journal_.trace()) <<
"GetLedger: Base data";
2728 addRaw(ledger->info(), nData);
2729 reply.add_nodes()->set_nodedata(
2732 auto const& stateMap = ledger->stateMap();
2733 if (stateMap.getHash() != beast::zero)
2738 stateMap.serializeRoot(rootNode);
2739 reply.add_nodes()->set_nodedata(
2742 if (ledger->info().txHash != beast::zero)
2744 auto const& txMap = ledger->txMap();
2745 if (txMap.getHash() != beast::zero)
2749 txMap.serializeRoot(rootNode);
2750 reply.add_nodes()->set_nodedata(
2757 std::make_shared<Message>(reply, protocol::mtLEDGER_DATA);
2762 if (packet.itype() == protocol::liTX_NODE)
2764 map = &ledger->txMap();
2766 logMe += to_string(map->
getHash());
2768 else if (packet.itype() == protocol::liAS_NODE)
2770 map = &ledger->stateMap();
2772 logMe += to_string(map->
getHash());
2776 if (!map || (packet.nodeids_size() == 0))
2778 JLOG(p_journal_.warn()) <<
"GetLedger: Can't find map or empty request";
2779 charge(Resource::feeInvalidRequest);
2783 JLOG(p_journal_.trace()) <<
"GetLedger: " << logMe;
2785 auto const depth = packet.has_querydepth()
2786 ? (
std::min(packet.querydepth(), 3u))
2787 : (isHighLatency() ? 2 : 1);
2790 (i < packet.nodeids().size() &&
2791 (reply.nodes().size() < Tuning::maxReplyNodes));
2798 JLOG(p_journal_.warn()) <<
"GetLedger: Invalid node " << logMe;
2799 charge(Resource::feeBadData);
2808 if (map->
getNodeFat(*mn, nodeIDs, rawNodes, fatLeaves, depth))
2810 assert(nodeIDs.
size() == rawNodes.
size());
2811 JLOG(p_journal_.trace()) <<
"GetLedger: getNodeFat got "
2812 << rawNodes.
size() <<
" nodes";
2816 for (nodeIDIterator = nodeIDs.
begin(),
2817 rawNodeIterator = rawNodes.
begin();
2818 nodeIDIterator != nodeIDs.
end();
2819 ++nodeIDIterator, ++rawNodeIterator)
2821 protocol::TMLedgerNode* node = reply.add_nodes();
2822 node->set_nodeid(nodeIDIterator->getRawString());
2824 &rawNodeIterator->
front(), rawNodeIterator->
size());
2829 JLOG(p_journal_.warn())
2830 <<
"GetLedger: getNodeFat returns false";
2837 if (packet.itype() == protocol::liTS_CANDIDATE)
2838 info =
"TS candidate";
2839 else if (packet.itype() == protocol::liBASE)
2840 info =
"Ledger base";
2841 else if (packet.itype() == protocol::liTX_NODE)
2843 else if (packet.itype() == protocol::liAS_NODE)
2846 if (!packet.has_ledgerhash())
2847 info +=
", no hash specified";
2849 JLOG(p_journal_.warn())
2850 <<
"getNodeFat( " << *mn <<
") throws exception: " << info;
2854 JLOG(p_journal_.info())
2855 <<
"Got request for " << packet.nodeids().size() <<
" nodes at depth "
2856 << depth <<
", return " << reply.nodes().size() <<
" nodes";
2858 auto oPacket = std::make_shared<Message>(reply, protocol::mtLEDGER_DATA);
2863 PeerImp::getScore(
bool haveItem)
const
2867 static const int spRandomMax = 9999;
2871 static const int spHaveItem = 10000;
2876 static const int spLatency = 30;
2879 static const int spNoLatency = 8000;
2884 score += spHaveItem;
2886 boost::optional<std::chrono::milliseconds> latency;
2893 score -= latency->count() * spLatency;
2895 score -= spNoLatency;
2901 PeerImp::isHighLatency()
const
2904 return latency_ >= peerHighLatency;
2910 using namespace std::chrono_literals;
2913 totalBytes_ += bytes;
2914 accumBytes_ += bytes;
2915 auto const timeElapsed = clock_type::now() - intervalStart_;
2916 auto const timeElapsedInSecs =
2917 std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
2919 if (timeElapsedInSecs >= 1s)
2921 auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
2922 rollingAvg_.push_back(avgBytes);
2924 auto const totalBytes =
2926 rollingAvgBytes_ = totalBytes / rollingAvg_.size();
2928 intervalStart_ = clock_type::now();
2934 PeerImp::Metrics::average_bytes()
const
2937 return rollingAvgBytes_;
2941 PeerImp::Metrics::total_bytes()
const