20 #include <ripple/app/consensus/RCLValidations.h>
21 #include <ripple/app/ledger/InboundLedgers.h>
22 #include <ripple/app/ledger/InboundTransactions.h>
23 #include <ripple/app/ledger/LedgerMaster.h>
24 #include <ripple/app/misc/HashRouter.h>
25 #include <ripple/app/misc/LoadFeeTrack.h>
26 #include <ripple/app/misc/NetworkOPs.h>
27 #include <ripple/app/misc/Transaction.h>
28 #include <ripple/app/misc/ValidatorList.h>
29 #include <ripple/app/tx/apply.h>
30 #include <ripple/basics/UptimeClock.h>
31 #include <ripple/basics/base64.h>
32 #include <ripple/basics/random.h>
33 #include <ripple/basics/safe_cast.h>
34 #include <ripple/beast/core/LexicalCast.h>
35 #include <ripple/beast/core/SemanticVersion.h>
36 #include <ripple/nodestore/DatabaseShard.h>
37 #include <ripple/overlay/Cluster.h>
38 #include <ripple/overlay/impl/PeerImp.h>
39 #include <ripple/overlay/impl/Tuning.h>
40 #include <ripple/overlay/predicates.h>
41 #include <ripple/protocol/digest.h>
43 #include <boost/algorithm/clamp.hpp>
44 #include <boost/algorithm/string.hpp>
45 #include <boost/algorithm/string/predicate.hpp>
46 #include <boost/beast/core/ostream.hpp>
54 using namespace std::chrono_literals;
79 , sink_(app_.journal(
"Peer"), makePrefix(id))
80 , p_sink_(app_.journal(
"Protocol"), makePrefix(id))
83 , stream_ptr_(
std::move(stream_ptr))
84 , socket_(stream_ptr_->next_layer().socket())
85 , stream_(*stream_ptr_)
86 , strand_(socket_.get_executor())
88 , remote_address_(slot->remote_endpoint())
92 , tracking_(Tracking::unknown)
93 , trackingTime_(clock_type::now())
94 , publicKey_(publicKey)
95 , lastPingTime_(clock_type::now())
96 , creationTime_(clock_type::now())
100 , request_(std::move(request))
102 , compressionEnabled_(
103 headers_[
"X-Offer-Compression"] ==
"lz4" && app_.config().COMPRESSION
111 const bool inCluster{
cluster()};
134 if (!
strand_.running_in_this_thread())
137 auto parseLedgerHash =
138 [](
std::string const& value) -> boost::optional<uint256> {
148 boost::optional<uint256> closed;
149 boost::optional<uint256> previous;
151 if (
auto const iter =
headers_.find(
"Closed-Ledger");
154 closed = parseLedgerHash(iter->value().to_string());
157 fail(
"Malformed handshake data (1)");
160 if (
auto const iter =
headers_.find(
"Previous-Ledger");
163 previous = parseLedgerHash(iter->value().to_string());
166 fail(
"Malformed handshake data (2)");
169 if (previous && !closed)
170 fail(
"Malformed handshake data (3)");
186 protocol::TMGetPeerShardInfo tmGPS;
188 send(std::make_shared<Message>(tmGPS, protocol::mtGET_PEER_SHARD_INFO));
196 if (!
strand_.running_in_this_thread())
222 if (!
strand_.running_in_this_thread())
229 auto validator = m->getValidatorKey();
230 if (validator &&
squelch_.isSquelched(*validator))
234 safe_cast<TrafficCount::category>(m->getCategory()),
253 <<
" sendq: " << sendq_size;
261 boost::asio::async_write(
270 std::placeholders::_1,
271 std::placeholders::_2)));
278 strand_.running_in_this_thread())
282 fail(
"charge: Resources");
291 auto const iter =
headers_.find(
"Crawl");
294 return boost::iequals(iter->value(),
"public");
307 return headers_[
"User-Agent"].to_string();
308 return headers_[
"Server"].to_string();
320 ret[jss::inbound] =
true;
324 ret[jss::cluster] =
true;
332 ret[jss::server_domain] =
domain();
335 ret[jss::network_id] = nid;
340 ret[jss::version] = version;
351 std::chrono::duration_cast<std::chrono::seconds>(
uptime()).count());
356 if ((minSeq != 0) || (maxSeq != 0))
357 ret[jss::complete_ledgers] =
363 ret[jss::track] =
"diverged";
367 ret[jss::track] =
"unknown";
376 protocol::TMStatusChange last_status;
383 if (closedLedgerHash != beast::zero)
384 ret[jss::ledger] =
to_string(closedLedgerHash);
386 if (last_status.has_newstatus())
388 switch (last_status.newstatus())
390 case protocol::nsCONNECTING:
391 ret[jss::status] =
"connecting";
394 case protocol::nsCONNECTED:
395 ret[jss::status] =
"connected";
398 case protocol::nsMONITORING:
399 ret[jss::status] =
"monitoring";
402 case protocol::nsVALIDATING:
403 ret[jss::status] =
"validating";
406 case protocol::nsSHUTTING:
407 ret[jss::status] =
"shutting";
412 <<
"Unknown status: " << last_status.newstatus();
417 ret[jss::metrics][jss::total_bytes_recv] =
419 ret[jss::metrics][jss::total_bytes_sent] =
421 ret[jss::metrics][jss::avg_bps_recv] =
423 ret[jss::metrics][jss::avg_bps_sent] =
474 return boost::icl::contains(it->second.shardIndexes, shardIndex);
509 assert(
strand_.running_in_this_thread());
531 if (!
strand_.running_in_this_thread())
542 <<
" failed: " << reason;
550 assert(
strand_.running_in_this_thread());
560 boost::optional<RangeSet<std::uint32_t>>
566 return it->second.shardIndexes;
570 boost::optional<hash_map<PublicKey, PeerImp::ShardInfo>>
582 assert(
strand_.running_in_this_thread());
594 stream_.async_shutdown(bind_executor(
604 timer_.expires_from_now(peerTimerInterval, ec);
611 timer_.async_wait(bind_executor(
641 if (ec == boost::asio::error::operation_aborted)
653 fail(
"Large send queue");
659 clock_type::duration duration;
680 fail(
"Ping Timeout");
687 protocol::TMPing message;
688 message.set_type(protocol::TMPing::ptPING);
691 send(std::make_shared<Message>(message, protocol::mtPING));
703 JLOG(
journal_.
error()) <<
"onShutdown: expected error condition";
706 if (ec != boost::asio::error::eof)
707 return fail(
"onShutdown", ec);
724 return fail(
"makeSharedValue: Unexpected failure");
745 auto write_buffer = [
this, sharedValue]() {
746 auto buf = std::make_shared<boost::beast::multi_buffer>();
749 resp.result(boost::beast::http::status::switching_protocols);
751 resp.insert(
"Connection",
"Upgrade");
753 resp.insert(
"Connect-As",
"Peer");
759 if (
request_[
"X-Offer-Compression"] ==
"lz4" &&
761 resp.insert(
"X-Offer-Compression",
"lz4");
771 boost::beast::ostream(*buf) << resp;
777 boost::asio::async_write(
779 write_buffer->data(),
780 boost::asio::transfer_all(),
785 if (ec == boost::asio::error::operation_aborted)
788 return fail(
"onWriteResponse", ec);
789 if (write_buffer->size() == bytes_transferred)
791 return fail(
"Failed to write header");
805 return headers_[
"Server-Domain"].to_string();
827 protocol::TMValidatorList vl;
831 vl.set_signature(signature);
832 vl.set_version(version);
835 <<
"Sending validator list for " <<
strHex(pubKey)
836 <<
" with sequence " << sequence <<
" to "
838 send(std::make_shared<Message>(vl, protocol::mtVALIDATORLIST));
855 if (ec == boost::asio::error::operation_aborted)
857 if (ec == boost::asio::error::eof)
863 return fail(
"onReadMessage", ec);
866 if (bytes_transferred > 0)
867 stream <<
"onReadMessage: " << bytes_transferred <<
" bytes";
869 stream <<
"onReadMessage";
872 metrics_.recv.add_message(bytes_transferred);
884 return fail(
"onReadMessage", ec);
889 if (bytes_consumed == 0)
902 std::placeholders::_1,
903 std::placeholders::_2)));
911 if (ec == boost::asio::error::operation_aborted)
914 return fail(
"onWriteMessage", ec);
917 if (bytes_transferred > 0)
918 stream <<
"onWriteMessage: " << bytes_transferred <<
" bytes";
920 stream <<
"onWriteMessage";
923 metrics_.sent.add_message(bytes_transferred);
930 return boost::asio::async_write(
939 std::placeholders::_1,
940 std::placeholders::_2)));
945 return stream_.async_shutdown(bind_executor(
950 std::placeholders::_1)));
1002 if (m->type() == protocol::TMPing::ptPING)
1006 m->set_type(protocol::TMPing::ptPONG);
1007 send(std::make_shared<Message>(*m, protocol::mtPING));
1011 if (m->type() == protocol::TMPing::ptPONG && m->has_seq())
1021 auto const rtt = std::chrono::round<std::chrono::milliseconds>(
1046 for (
int i = 0; i < m->clusternodes().size(); ++i)
1048 protocol::TMClusterNode
const& node = m->clusternodes(i);
1051 if (node.has_nodename())
1052 name = node.nodename();
1054 auto const publicKey =
1061 auto const reportTime =
1065 *publicKey,
name, node.nodeload(), reportTime);
1069 int loadSources = m->loadsources().size();
1070 if (loadSources != 0)
1073 gossip.
items.reserve(loadSources);
1074 for (
int i = 0; i < m->loadsources().size(); ++i)
1076 protocol::TMLoadSource
const& node = m->loadsources(i);
1081 gossip.
items.push_back(item);
1094 if (status.getReportTime() >= thresh)
1095 fees.push_back(status.getLoadFee());
1100 auto const index = fees.size() / 2;
1102 clusterFee = fees[index];
1131 return badData(
"Invalid peer chain");
1137 auto shards{shardStore->getCompleteShards()};
1138 if (!shards.empty())
1140 protocol::TMPeerShardInfo reply;
1141 reply.set_shardindexes(shards);
1143 if (m->has_lastlink())
1144 reply.set_lastlink(
true);
1146 if (m->peerchain_size() > 0)
1148 for (
int i = 0; i < m->peerchain_size(); ++i)
1151 return badData(
"Invalid peer chain public key");
1154 *reply.mutable_peerchain() = m->peerchain();
1157 send(std::make_shared<Message>(reply, protocol::mtPEER_SHARD_INFO));
1168 m->set_hops(m->hops() - 1);
1170 m->set_lastlink(
true);
1172 m->add_peerchain()->set_nodepubkey(
1176 std::make_shared<Message>(*m, protocol::mtGET_PEER_SHARD_INFO),
1189 if (m->shardindexes().empty())
1190 return badData(
"Missing shard indexes");
1192 return badData(
"Invalid peer chain");
1194 return badData(
"Invalid public key");
1197 if (m->peerchain_size() > 0)
1201 makeSlice(m->peerchain(m->peerchain_size() - 1).nodepubkey())};
1203 return badData(
"Invalid pubKey");
1208 if (!m->has_nodepubkey())
1211 if (!m->has_endpoint())
1217 m->set_endpoint(
"0");
1220 m->mutable_peerchain()->RemoveLast();
1222 std::make_shared<Message>(*m, protocol::mtPEER_SHARD_INFO));
1225 <<
"Relayed TMPeerShardInfo to peer with IP "
1240 if (!
from_string(shardIndexes, m->shardindexes()))
1241 return badData(
"Invalid shard indexes");
1244 boost::optional<std::uint32_t> latestShard;
1246 auto const curLedgerSeq{
1250 earliestShard = shardStore->earliestShardIndex();
1251 if (curLedgerSeq >= shardStore->earliestLedgerSeq())
1252 latestShard = shardStore->seqToShardIndex(curLedgerSeq);
1256 auto const earliestLedgerSeq{
1259 if (curLedgerSeq >= earliestLedgerSeq)
1264 if (boost::icl::first(shardIndexes) < earliestShard ||
1265 (latestShard && boost::icl::last(shardIndexes) > latestShard))
1267 return badData(
"Invalid shard indexes");
1273 if (m->has_endpoint())
1275 if (m->endpoint() !=
"0")
1280 return badData(
"Invalid incoming endpoint: " + m->endpoint());
1281 endpoint = std::move(*result);
1291 if (m->has_nodepubkey())
1302 it->second.endpoint = std::move(endpoint);
1305 it->second.shardIndexes += shardIndexes;
1311 shardInfo.
endpoint = std::move(endpoint);
1313 shardInfo_.emplace(publicKey, std::move(shardInfo));
1318 <<
"Consumed TMPeerShardInfo originating from public key "
1320 << m->shardindexes();
1322 if (m->has_lastlink())
1335 endpoints.
reserve(m->endpoints_v2().size());
1337 for (
auto const& tm : m->endpoints_v2())
1343 << tm.endpoint() <<
"}";
1359 if (!endpoints.
empty())
1374 <<
"Need network ledger";
1382 auto stx = std::make_shared<STTx const>(sit);
1383 uint256 txID = stx->getTransactionID();
1402 bool checkSignature =
true;
1405 if (!m->has_deferred() || !m->deferred())
1409 flags |= SF_TRUSTED;
1416 checkSignature =
false;
1429 <<
"No new transactions until synchronized";
1435 "recvTransaction->checkTransaction",
1440 if (
auto peer = weak.lock())
1441 peer->checkTransaction(flags, checkSignature, stx);
1448 <<
"Transaction invalid: " <<
strHex(m->rawtransaction());
1458 if (
auto peer = weak.
lock())
1466 protocol::TMLedgerData& packet = *m;
1468 if (m->nodes().size() <= 0)
1474 if (m->has_requestcookie())
1480 m->clear_requestcookie();
1482 std::make_shared<Message>(packet, protocol::mtLEDGER_DATA));
1486 JLOG(
p_journal_.
info()) <<
"Unable to route TX/ledger data reply";
1494 JLOG(
p_journal_.
warn()) <<
"TX candidate reply with invalid hash size";
1499 uint256 const hash{m->ledgerhash()};
1501 if (m->type() == protocol::liTS_CANDIDATE)
1507 if (
auto peer = weak.
lock())
1508 peer->app_.getInboundTransactions().gotData(hash, peer, m);
1523 protocol::TMProposeSet&
set = *m;
1529 if ((boost::algorithm::clamp(sig.size(), 64, 72) != sig.size()) ||
1545 uint256 const proposeHash{
set.currenttxhash()};
1546 uint256 const prevLedger{
set.previousledger()};
1559 if (
auto [added, relayed] =
1572 suppression, publicKey,
id_, protocol::mtPROPOSE_LEDGER);
1584 <<
"Proposal: Dropping untrusted (peer divergence)";
1596 <<
"Proposal: " << (isTrusted ?
"trusted" :
"untrusted");
1613 "recvPropose->checkPropose",
1615 if (
auto peer = weak.lock())
1616 peer->checkPropose(job, m,
proposal);
1625 if (!m->has_networktime())
1630 if (!
last_status_.has_newstatus() || m->has_newstatus())
1635 protocol::NodeStatus status =
last_status_.newstatus();
1637 m->set_newstatus(status);
1641 if (m->newevent() == protocol::neLOST_SYNC)
1643 bool outOfSync{
false};
1664 bool const peerChangedLedgers{
1671 if (peerChangedLedgers)
1682 if (m->has_ledgerhashprevious() &&
1693 if (peerChangedLedgers)
1703 if (m->has_firstseq() && m->has_lastseq())
1714 if (m->has_ledgerseq() &&
1724 if (m->has_newstatus())
1726 switch (m->newstatus())
1728 case protocol::nsCONNECTING:
1729 j[jss::status] =
"CONNECTING";
1731 case protocol::nsCONNECTED:
1732 j[jss::status] =
"CONNECTED";
1734 case protocol::nsMONITORING:
1735 j[jss::status] =
"MONITORING";
1737 case protocol::nsVALIDATING:
1738 j[jss::status] =
"VALIDATING";
1740 case protocol::nsSHUTTING:
1741 j[jss::status] =
"SHUTTING";
1746 if (m->has_newevent())
1748 switch (m->newevent())
1750 case protocol::neCLOSING_LEDGER:
1751 j[jss::action] =
"CLOSING_LEDGER";
1753 case protocol::neACCEPTED_LEDGER:
1754 j[jss::action] =
"ACCEPTED_LEDGER";
1756 case protocol::neSWITCHED_LEDGER:
1757 j[jss::action] =
"SWITCHED_LEDGER";
1759 case protocol::neLOST_SYNC:
1760 j[jss::action] =
"LOST_SYNC";
1765 if (m->has_ledgerseq())
1767 j[jss::ledger_index] = m->ledgerseq();
1770 if (m->has_ledgerhash())
1772 uint256 closedLedgerHash{};
1774 std::lock_guard sl(recentLock_);
1775 closedLedgerHash = closedLedgerHash_;
1777 j[jss::ledger_hash] =
to_string(closedLedgerHash);
1780 if (m->has_networktime())
1782 j[jss::date] = Json::UInt(m->networktime());
1785 if (m->has_firstseq() && m->has_lastseq())
1787 j[jss::ledger_index_min] = Json::UInt(m->firstseq());
1788 j[jss::ledger_index_max] = Json::UInt(m->lastseq());
1804 serverSeq = maxLedger_;
1810 checkTracking(serverSeq, validationSeq);
1819 if (
diff < Tuning::convergedLedgerLimit)
1822 tracking_ = Tracking::converged;
1825 if ((
diff > Tuning::divergedLedgerLimit) &&
1826 (tracking_.load() != Tracking::diverged))
1831 tracking_ = Tracking::diverged;
1832 trackingTime_ = clock_type::now();
1841 fee_ = Resource::feeInvalidRequest;
1845 uint256 const hash{m->hash()};
1847 if (m->status() == protocol::tsHAVE)
1851 if (
std::find(recentTxSets_.begin(), recentTxSets_.end(), hash) !=
1852 recentTxSets_.end())
1854 fee_ = Resource::feeUnwantedData;
1858 recentTxSets_.push_back(hash);
1867 if (!supportsFeature(ProtocolFeature::ValidatorListPropagation))
1869 JLOG(p_journal_.debug())
1870 <<
"ValidatorList: received validator list from peer using "
1871 <<
"protocol version " << to_string(protocol_)
1872 <<
" which shouldn't support this feature.";
1873 fee_ = Resource::feeUnwantedData;
1876 auto const&
manifest = m->manifest();
1877 auto const& blob = m->blob();
1878 auto const& signature = m->signature();
1879 auto const version = m->version();
1882 JLOG(p_journal_.debug())
1883 <<
"Received validator list from " << remote_address_.to_string()
1884 <<
" (" << id_ <<
")";
1886 if (!app_.getHashRouter().addSuppressionPeer(hash, id_))
1888 JLOG(p_journal_.debug())
1889 <<
"ValidatorList: received duplicate validator list";
1893 fee_ = Resource::feeUnwantedData;
1897 auto const applyResult = app_.validators().applyListAndBroadcast(
1902 remote_address_.to_string(),
1905 app_.getHashRouter());
1906 auto const disp = applyResult.disposition;
1908 JLOG(p_journal_.debug())
1909 <<
"Processed validator list from "
1910 << (applyResult.publisherKey ?
strHex(*applyResult.publisherKey)
1911 :
"unknown or invalid publisher")
1912 <<
" from " << remote_address_.to_string() <<
" (" << id_
1913 <<
") with result " << to_string(disp);
1917 case ListDisposition::accepted:
1918 JLOG(p_journal_.debug())
1919 <<
"Applied new validator list from peer "
1924 assert(applyResult.sequence && applyResult.publisherKey);
1925 auto const& pubKey = *applyResult.publisherKey;
1927 if (
auto const iter = publisherListSequences_.find(pubKey);
1928 iter != publisherListSequences_.end())
1930 assert(iter->second < *applyResult.sequence);
1933 publisherListSequences_[pubKey] = *applyResult.sequence;
1936 case ListDisposition::same_sequence:
1937 JLOG(p_journal_.warn())
1938 <<
"Validator list with current sequence from peer "
1943 fee_ = Resource::feeUnwantedData;
1947 assert(applyResult.sequence && applyResult.publisherKey);
1949 publisherListSequences_[*applyResult.publisherKey] ==
1950 *applyResult.sequence);
1955 case ListDisposition::stale:
1956 JLOG(p_journal_.warn())
1957 <<
"Stale validator list from peer " << remote_address_;
1960 fee_ = Resource::feeBadData;
1962 case ListDisposition::untrusted:
1963 JLOG(p_journal_.warn())
1964 <<
"Untrusted validator list from peer " << remote_address_;
1968 fee_ = Resource::feeUnwantedData;
1970 case ListDisposition::invalid:
1971 JLOG(p_journal_.warn())
1972 <<
"Invalid validator list from peer " << remote_address_;
1974 fee_ = Resource::feeInvalidSignature;
1976 case ListDisposition::unsupported_version:
1977 JLOG(p_journal_.warn())
1978 <<
"Unsupported version validator list from peer "
1982 fee_ = Resource::feeBadData;
1990 JLOG(p_journal_.warn()) <<
"ValidatorList: Exception, " << e.
what()
1991 <<
" from peer " << remote_address_;
1992 fee_ = Resource::feeBadData;
1999 auto const closeTime = app_.timeKeeper().closeTime();
2001 if (m->validation().size() < 50)
2003 JLOG(p_journal_.warn()) <<
"Validation: Too small";
2004 fee_ = Resource::feeInvalidRequest;
2013 val = std::make_shared<STValidation>(
2017 app_.validatorManifests().getMasterKey(pk));
2020 val->setSeen(closeTime);
2024 app_.getValidations().parms(),
2025 app_.timeKeeper().closeTime(),
2027 val->getSeenTime()))
2029 JLOG(p_journal_.trace()) <<
"Validation: Not current";
2030 fee_ = Resource::feeUnwantedData;
2035 if (
auto [added, relayed] =
2036 app_.getHashRouter().addSuppressionPeerWithStatus(key, id_);
2043 if (app_.config().REDUCE_RELAY_ENABLE && (
bool)relayed &&
2044 (
stopwatch().now() - *relayed) < squelch::IDLED &&
2045 squelch::epoch<std::chrono::minutes>(UptimeClock::now()) >
2046 squelch::WAIT_ON_BOOTUP)
2047 overlay_.updateSlotAndSquelch(
2048 key, val->getSignerPublic(), id_, protocol::mtVALIDATION);
2049 JLOG(p_journal_.trace()) <<
"Validation: duplicate";
2053 auto const isTrusted =
2054 app_.validators().trusted(val->getSignerPublic());
2056 if (!isTrusted && (tracking_.load() == Tracking::diverged))
2058 JLOG(p_journal_.debug())
2059 <<
"Validation: dropping untrusted from diverged peer";
2061 if (isTrusted || cluster() || !app_.getFeeTrack().isLoadedLocal())
2064 app_.getJobQueue().addJob(
2066 "recvValidation->checkValidation",
2067 [weak, val, m](
Job&) {
2068 if (
auto peer = weak.
lock())
2069 peer->checkValidation(val, m);
2074 JLOG(p_journal_.debug()) <<
"Validation: Dropping UNTRUSTED (load)";
2079 JLOG(p_journal_.warn())
2080 <<
"Exception processing validation: " << e.
what();
2081 fee_ = Resource::feeInvalidRequest;
2088 protocol::TMGetObjectByHash& packet = *m;
2093 if (send_queue_.size() >= Tuning::dropSendQueue)
2095 JLOG(p_journal_.debug()) <<
"GetObject: Large send queue";
2099 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2105 fee_ = Resource::feeMediumBurdenPeer;
2107 protocol::TMGetObjectByHash reply;
2109 reply.set_query(
false);
2111 if (packet.has_seq())
2112 reply.set_seq(packet.seq());
2114 reply.set_type(packet.type());
2116 if (packet.has_ledgerhash())
2120 fee_ = Resource::feeInvalidRequest;
2124 reply.set_ledgerhash(packet.ledgerhash());
2128 for (
int i = 0; i < packet.objects_size(); ++i)
2130 auto const& obj = packet.objects(i);
2133 uint256 const hash{obj.hash()};
2136 std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0};
2137 auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)};
2140 if (
auto shardStore = app_.getShardStore())
2142 if (seq >= shardStore->earliestLedgerSeq())
2143 nodeObject = shardStore->fetchNodeObject(hash, seq);
2148 protocol::TMIndexedObject& newObj = *reply.add_objects();
2149 newObj.set_hash(hash.begin(), hash.size());
2151 &nodeObject->getData().front(),
2152 nodeObject->getData().size());
2154 if (obj.has_nodeid())
2155 newObj.set_index(obj.nodeid());
2156 if (obj.has_ledgerseq())
2157 newObj.set_ledgerseq(obj.ledgerseq());
2164 JLOG(p_journal_.trace()) <<
"GetObj: " << reply.objects_size() <<
" of "
2165 << packet.objects_size();
2166 send(std::make_shared<Message>(reply, protocol::mtGET_OBJECTS));
2173 bool progress =
false;
2175 for (
int i = 0; i < packet.objects_size(); ++i)
2177 const protocol::TMIndexedObject& obj = packet.objects(i);
2181 if (obj.has_ledgerseq())
2183 if (obj.ledgerseq() != pLSeq)
2185 if (pLDo && (pLSeq != 0))
2187 JLOG(p_journal_.debug())
2188 <<
"GetObj: Full fetch pack for " << pLSeq;
2190 pLSeq = obj.ledgerseq();
2191 pLDo = !app_.getLedgerMaster().haveLedger(pLSeq);
2195 JLOG(p_journal_.debug())
2196 <<
"GetObj: Late fetch pack for " << pLSeq;
2205 uint256 const hash{obj.hash()};
2207 app_.getLedgerMaster().addFetchPack(
2209 std::make_shared<Blob>(
2210 obj.data().begin(), obj.data().end()));
2215 if (pLDo && (pLSeq != 0))
2217 JLOG(p_journal_.debug())
2218 <<
"GetObj: Partial fetch pack for " << pLSeq;
2220 if (packet.type() == protocol::TMGetObjectByHash::otFETCH_PACK)
2221 app_.getLedgerMaster().gotFetchPack(progress, pLSeq);
2228 if (!m->has_validatorpubkey())
2230 charge(Resource::feeBadData);
2233 auto validator = m->validatorpubkey();
2237 charge(Resource::feeBadData);
2241 auto squelch = m->squelch();
2242 auto duration = m->has_squelchduration() ? m->squelchduration() : 0;
2243 auto sp = shared_from_this();
2246 if (key == app_.getValidationPublicKey())
2248 JLOG(p_journal_.debug())
2249 <<
"onMessage: TMSquelch discarding validator's squelch " << slice;
2253 if (!strand_.running_in_this_thread())
2254 return post(strand_, [sp, key, squelch, duration]() {
2255 sp->squelch_.squelch(key, squelch, duration);
2258 JLOG(p_journal_.debug())
2259 <<
"onMessage: TMSquelch " << slice <<
" " << id() <<
" " << duration;
2261 squelch_.squelch(key, squelch, duration);
2273 (void)lockedRecentLock;
2275 if (
std::find(recentLedgers_.begin(), recentLedgers_.end(), hash) !=
2276 recentLedgers_.end())
2279 recentLedgers_.push_back(hash);
2288 if (app_.getFeeTrack().isLoadedLocal() ||
2289 (app_.getLedgerMaster().getValidatedLedgerAge() > 40s) ||
2290 (app_.getJobQueue().getJobCount(
jtPACK) > 10))
2292 JLOG(p_journal_.info()) <<
"Too busy to make fetch pack";
2298 JLOG(p_journal_.warn()) <<
"FetchPack hash size malformed";
2299 fee_ = Resource::feeInvalidRequest;
2303 fee_ = Resource::feeHighBurdenPeer;
2305 uint256 const hash{packet->ledgerhash()};
2308 auto elapsed = UptimeClock::now();
2309 auto const pap = &app_;
2310 app_.getJobQueue().addJob(
2311 jtPACK,
"MakeFetchPack", [pap, weak, packet, hash, elapsed](
Job&) {
2312 pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed);
2317 PeerImp::checkTransaction(
2319 bool checkSignature,
2328 app_.getLedgerMaster().getValidLedgerIndex()))
2330 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2331 charge(Resource::feeUnwantedData);
2339 app_.getHashRouter(),
2341 app_.getLedgerMaster().getValidatedRules(),
2343 valid != Validity::Valid)
2345 if (!validReason.empty())
2347 JLOG(p_journal_.trace())
2348 <<
"Exception checking transaction: " << validReason;
2352 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2353 charge(Resource::feeInvalidSignature);
2360 app_.getHashRouter(), stx->getTransactionID(), Validity::Valid);
2364 auto tx = std::make_shared<Transaction>(stx, reason, app_);
2366 if (tx->getStatus() ==
INVALID)
2368 if (!reason.
empty())
2370 JLOG(p_journal_.trace())
2371 <<
"Exception checking transaction: " << reason;
2373 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2374 charge(Resource::feeInvalidSignature);
2378 bool const trusted(flags & SF_TRUSTED);
2379 app_.getOPs().processTransaction(
2380 tx, trusted,
false, NetworkOPs::FailHard::no);
2384 app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD);
2385 charge(Resource::feeBadData);
2391 PeerImp::checkPropose(
2398 JLOG(p_journal_.trace())
2399 <<
"Checking " << (isTrusted ?
"trusted" :
"UNTRUSTED") <<
" proposal";
2405 JLOG(p_journal_.warn()) <<
"Proposal fails sig check";
2406 charge(Resource::feeInvalidSignature);
2413 relay = app_.getOPs().processTrustedProposal(peerPos);
2415 relay = app_.config().RELAY_UNTRUSTED_PROPOSALS || cluster();
2423 auto haveMessage = app_.overlay().relay(
2425 if (app_.config().REDUCE_RELAY_ENABLE && !haveMessage.empty() &&
2426 squelch::epoch<std::chrono::minutes>(UptimeClock::now()) >
2427 squelch::WAIT_ON_BOOTUP)
2428 overlay_.updateSlotAndSquelch(
2431 std::move(haveMessage),
2432 protocol::mtPROPOSE_LEDGER);
2437 PeerImp::checkValidation(
2444 if (!cluster() && !val->isValid())
2446 JLOG(p_journal_.warn()) <<
"Validation is invalid";
2447 charge(Resource::feeInvalidRequest);
2454 auto const suppression =
2461 overlay_.relay(*packet, suppression, val->getSignerPublic());
2462 if (app_.config().REDUCE_RELAY_ENABLE && !haveMessage.empty() &&
2463 squelch::epoch<std::chrono::minutes>(UptimeClock::now()) >
2464 squelch::WAIT_ON_BOOTUP)
2466 overlay_.updateSlotAndSquelch(
2468 val->getSignerPublic(),
2469 std::move(haveMessage),
2470 protocol::mtVALIDATION);
2476 JLOG(p_journal_.trace()) <<
"Exception processing validation";
2477 charge(Resource::feeInvalidRequest);
2491 if (p->hasTxSet(rootHash) && p.get() != skip)
2493 auto score = p->getScore(true);
2494 if (!ret || (score > retScore))
2519 if (p->hasLedger(ledgerHash, ledger) && p.get() != skip)
2521 auto score = p->getScore(true);
2522 if (!ret || (score > retScore))
2537 protocol::TMGetLedger& packet = *m;
2539 SHAMap const* map =
nullptr;
2540 protocol::TMLedgerData reply;
2541 bool fatLeaves =
true;
2544 if (packet.has_requestcookie())
2545 reply.set_requestcookie(packet.requestcookie());
2549 if (packet.itype() == protocol::liTS_CANDIDATE)
2552 JLOG(p_journal_.trace()) <<
"GetLedger: Tx candidate set";
2554 if (!packet.has_ledgerhash() ||
2557 charge(Resource::feeInvalidRequest);
2558 JLOG(p_journal_.warn()) <<
"GetLedger: Tx candidate set invalid";
2562 uint256 const txHash{packet.ledgerhash()};
2564 shared = app_.getInboundTransactions().getSet(txHash,
false);
2569 if (packet.has_querytype() && !packet.has_requestcookie())
2571 JLOG(p_journal_.debug()) <<
"GetLedger: Routing Tx set request";
2575 packet.set_requestcookie(
id());
2576 v->send(std::make_shared<Message>(
2577 packet, protocol::mtGET_LEDGER));
2581 JLOG(p_journal_.info()) <<
"GetLedger: Route TX set failed";
2585 JLOG(p_journal_.debug()) <<
"GetLedger: Can't provide map ";
2586 charge(Resource::feeInvalidRequest);
2590 reply.set_ledgerseq(0);
2591 reply.set_ledgerhash(txHash.begin(), txHash.size());
2592 reply.set_type(protocol::liTS_CANDIDATE);
2597 if (send_queue_.size() >= Tuning::dropSendQueue)
2599 JLOG(p_journal_.debug()) <<
"GetLedger: Large send queue";
2603 if (app_.getFeeTrack().isLoadedLocal() && !cluster())
2605 JLOG(p_journal_.debug()) <<
"GetLedger: Too busy";
2610 JLOG(p_journal_.trace()) <<
"GetLedger: Received";
2612 if (packet.has_ledgerhash())
2616 charge(Resource::feeInvalidRequest);
2617 JLOG(p_journal_.warn()) <<
"GetLedger: Invalid request";
2621 uint256 const ledgerhash{packet.ledgerhash()};
2622 logMe +=
"LedgerHash:";
2623 logMe += to_string(ledgerhash);
2624 ledger = app_.getLedgerMaster().getLedgerByHash(ledgerhash);
2626 if (!ledger && packet.has_ledgerseq())
2628 if (
auto shardStore = app_.getShardStore())
2630 auto seq = packet.ledgerseq();
2631 if (seq >= shardStore->earliestLedgerSeq())
2632 ledger = shardStore->fetchLedger(ledgerhash, seq);
2638 JLOG(p_journal_.trace())
2639 <<
"GetLedger: Don't have " << ledgerhash;
2643 (packet.has_querytype() && !packet.has_requestcookie()))
2650 packet.has_ledgerseq() ? packet.ledgerseq() : 0,
2654 JLOG(p_journal_.trace()) <<
"GetLedger: Cannot route";
2658 packet.set_requestcookie(
id());
2660 std::make_shared<Message>(packet, protocol::mtGET_LEDGER));
2661 JLOG(p_journal_.debug()) <<
"GetLedger: Request routed";
2665 else if (packet.has_ledgerseq())
2667 if (packet.ledgerseq() < app_.getLedgerMaster().getEarliestFetch())
2669 JLOG(p_journal_.debug()) <<
"GetLedger: Early ledger request";
2672 ledger = app_.getLedgerMaster().getLedgerBySeq(packet.ledgerseq());
2675 JLOG(p_journal_.debug())
2676 <<
"GetLedger: Don't have " << packet.ledgerseq();
2679 else if (packet.has_ltype() && (packet.ltype() == protocol::ltCLOSED))
2681 ledger = app_.getLedgerMaster().getClosedLedger();
2682 assert(!ledger->open());
2686 if (ledger && ledger->info().open)
2687 ledger = app_.getLedgerMaster ().getLedgerBySeq (
2688 ledger->info().seq - 1);
2693 charge(Resource::feeInvalidRequest);
2694 JLOG(p_journal_.warn()) <<
"GetLedger: Unknown request";
2699 (packet.has_ledgerseq() &&
2700 (packet.ledgerseq() != ledger->info().seq)))
2702 charge(Resource::feeInvalidRequest);
2706 JLOG(p_journal_.warn()) <<
"GetLedger: Invalid sequence";
2711 if (!packet.has_ledgerseq() &&
2712 (ledger->info().seq < app_.getLedgerMaster().getEarliestFetch()))
2714 JLOG(p_journal_.debug()) <<
"GetLedger: Early ledger request";
2719 auto const lHash = ledger->info().hash;
2720 reply.set_ledgerhash(lHash.begin(), lHash.size());
2721 reply.set_ledgerseq(ledger->info().seq);
2722 reply.set_type(packet.itype());
2724 if (packet.itype() == protocol::liBASE)
2727 JLOG(p_journal_.trace()) <<
"GetLedger: Base data";
2729 addRaw(ledger->info(), nData);
2730 reply.add_nodes()->set_nodedata(
2733 auto const& stateMap = ledger->stateMap();
2734 if (stateMap.getHash() != beast::zero)
2739 stateMap.serializeRoot(rootNode);
2740 reply.add_nodes()->set_nodedata(
2743 if (ledger->info().txHash != beast::zero)
2745 auto const& txMap = ledger->txMap();
2746 if (txMap.getHash() != beast::zero)
2750 txMap.serializeRoot(rootNode);
2751 reply.add_nodes()->set_nodedata(
2758 std::make_shared<Message>(reply, protocol::mtLEDGER_DATA);
2763 if (packet.itype() == protocol::liTX_NODE)
2765 map = &ledger->txMap();
2767 logMe += to_string(map->
getHash());
2769 else if (packet.itype() == protocol::liAS_NODE)
2771 map = &ledger->stateMap();
2773 logMe += to_string(map->
getHash());
2777 if (!map || (packet.nodeids_size() == 0))
2779 JLOG(p_journal_.warn()) <<
"GetLedger: Can't find map or empty request";
2780 charge(Resource::feeInvalidRequest);
2784 JLOG(p_journal_.trace()) <<
"GetLedger: " << logMe;
2786 auto const depth = packet.has_querydepth()
2787 ? (
std::min(packet.querydepth(), 3u))
2788 : (isHighLatency() ? 2 : 1);
2791 (i < packet.nodeids().size() &&
2792 (reply.nodes().size() < Tuning::maxReplyNodes));
2799 JLOG(p_journal_.warn()) <<
"GetLedger: Invalid node " << logMe;
2800 charge(Resource::feeBadData);
2809 if (map->
getNodeFat(*mn, nodeIDs, rawNodes, fatLeaves, depth))
2811 assert(nodeIDs.
size() == rawNodes.
size());
2812 JLOG(p_journal_.trace()) <<
"GetLedger: getNodeFat got "
2813 << rawNodes.
size() <<
" nodes";
2817 for (nodeIDIterator = nodeIDs.
begin(),
2818 rawNodeIterator = rawNodes.
begin();
2819 nodeIDIterator != nodeIDs.
end();
2820 ++nodeIDIterator, ++rawNodeIterator)
2822 protocol::TMLedgerNode* node = reply.add_nodes();
2823 node->set_nodeid(nodeIDIterator->getRawString());
2825 &rawNodeIterator->
front(), rawNodeIterator->
size());
2830 JLOG(p_journal_.warn())
2831 <<
"GetLedger: getNodeFat returns false";
2838 if (packet.itype() == protocol::liTS_CANDIDATE)
2839 info =
"TS candidate";
2840 else if (packet.itype() == protocol::liBASE)
2841 info =
"Ledger base";
2842 else if (packet.itype() == protocol::liTX_NODE)
2844 else if (packet.itype() == protocol::liAS_NODE)
2847 if (!packet.has_ledgerhash())
2848 info +=
", no hash specified";
2850 JLOG(p_journal_.warn())
2851 <<
"getNodeFat( " << *mn <<
") throws exception: " << info;
2855 JLOG(p_journal_.info())
2856 <<
"Got request for " << packet.nodeids().size() <<
" nodes at depth "
2857 << depth <<
", return " << reply.nodes().size() <<
" nodes";
2859 auto oPacket = std::make_shared<Message>(reply, protocol::mtLEDGER_DATA);
2864 PeerImp::getScore(
bool haveItem)
const
2868 static const int spRandomMax = 9999;
2872 static const int spHaveItem = 10000;
2877 static const int spLatency = 30;
2880 static const int spNoLatency = 8000;
2885 score += spHaveItem;
2887 boost::optional<std::chrono::milliseconds> latency;
2894 score -= latency->count() * spLatency;
2896 score -= spNoLatency;
2902 PeerImp::isHighLatency()
const
2905 return latency_ >= peerHighLatency;
2911 using namespace std::chrono_literals;
2914 totalBytes_ += bytes;
2915 accumBytes_ += bytes;
2916 auto const timeElapsed = clock_type::now() - intervalStart_;
2917 auto const timeElapsedInSecs =
2918 std::chrono::duration_cast<std::chrono::seconds>(timeElapsed);
2920 if (timeElapsedInSecs >= 1s)
2922 auto const avgBytes = accumBytes_ / timeElapsedInSecs.count();
2923 rollingAvg_.push_back(avgBytes);
2925 auto const totalBytes =
2927 rollingAvgBytes_ = totalBytes / rollingAvg_.size();
2929 intervalStart_ = clock_type::now();
2935 PeerImp::Metrics::average_bytes()
const
2938 return rollingAvgBytes_;
2942 PeerImp::Metrics::total_bytes()
const